1//===-- RISCVInstrInfo.cpp - RISC-V Instruction Information -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the RISC-V implementation of the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "RISCVInstrInfo.h"
14#include "MCTargetDesc/RISCVBaseInfo.h"
15#include "MCTargetDesc/RISCVMatInt.h"
16#include "RISCV.h"
17#include "RISCVMachineFunctionInfo.h"
18#include "RISCVSubtarget.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SmallVector.h"
21#include "llvm/ADT/Statistic.h"
22#include "llvm/Analysis/MemoryLocation.h"
23#include "llvm/Analysis/ValueTracking.h"
24#include "llvm/CodeGen/LiveIntervals.h"
25#include "llvm/CodeGen/LiveVariables.h"
26#include "llvm/CodeGen/MachineCombinerPattern.h"
27#include "llvm/CodeGen/MachineInstrBuilder.h"
28#include "llvm/CodeGen/MachineRegisterInfo.h"
29#include "llvm/CodeGen/MachineTraceMetrics.h"
30#include "llvm/CodeGen/RegisterScavenging.h"
31#include "llvm/CodeGen/StackMaps.h"
32#include "llvm/IR/DebugInfoMetadata.h"
33#include "llvm/IR/Module.h"
34#include "llvm/MC/MCInstBuilder.h"
35#include "llvm/MC/TargetRegistry.h"
36#include "llvm/Support/ErrorHandling.h"
37
38using namespace llvm;
39
40#define GEN_CHECK_COMPRESS_INSTR
41#include "RISCVGenCompressInstEmitter.inc"
42
43#define GET_INSTRINFO_CTOR_DTOR
44#define GET_INSTRINFO_NAMED_OPS
45#include "RISCVGenInstrInfo.inc"
46
47#define DEBUG_TYPE "riscv-instr-info"
48STATISTIC(NumVRegSpilled,
49 "Number of registers within vector register groups spilled");
50STATISTIC(NumVRegReloaded,
51 "Number of registers within vector register groups reloaded");
52
53static cl::opt<bool> PreferWholeRegisterMove(
54 "riscv-prefer-whole-register-move", cl::init(Val: false), cl::Hidden,
55 cl::desc("Prefer whole register move for vector registers."));
56
57static cl::opt<MachineTraceStrategy> ForceMachineCombinerStrategy(
58 "riscv-force-machine-combiner-strategy", cl::Hidden,
59 cl::desc("Force machine combiner to use a specific strategy for machine "
60 "trace metrics evaluation."),
61 cl::init(Val: MachineTraceStrategy::TS_NumStrategies),
62 cl::values(clEnumValN(MachineTraceStrategy::TS_Local, "local",
63 "Local strategy."),
64 clEnumValN(MachineTraceStrategy::TS_MinInstrCount, "min-instr",
65 "MinInstrCount strategy.")));
66
67namespace llvm::RISCVVPseudosTable {
68
69using namespace RISCV;
70
71#define GET_RISCVVPseudosTable_IMPL
72#include "RISCVGenSearchableTables.inc"
73
74} // namespace llvm::RISCVVPseudosTable
75
76namespace llvm::RISCV {
77
78#define GET_RISCVMaskedPseudosTable_IMPL
79#include "RISCVGenSearchableTables.inc"
80
81} // end namespace llvm::RISCV
82
83RISCVInstrInfo::RISCVInstrInfo(RISCVSubtarget &STI)
84 : RISCVGenInstrInfo(RISCV::ADJCALLSTACKDOWN, RISCV::ADJCALLSTACKUP),
85 STI(STI) {}
86
87#define GET_INSTRINFO_HELPERS
88#include "RISCVGenInstrInfo.inc"
89
90MCInst RISCVInstrInfo::getNop() const {
91 if (STI.hasStdExtZca())
92 return MCInstBuilder(RISCV::C_NOP);
93 return MCInstBuilder(RISCV::ADDI)
94 .addReg(Reg: RISCV::X0)
95 .addReg(Reg: RISCV::X0)
96 .addImm(Val: 0);
97}
98
99Register RISCVInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
100 int &FrameIndex) const {
101 TypeSize Dummy = TypeSize::getZero();
102 return isLoadFromStackSlot(MI, FrameIndex, MemBytes&: Dummy);
103}
104
105static std::optional<unsigned> getLMULForRVVWholeLoadStore(unsigned Opcode) {
106 switch (Opcode) {
107 default:
108 return std::nullopt;
109 case RISCV::VS1R_V:
110 case RISCV::VL1RE8_V:
111 case RISCV::VL1RE16_V:
112 case RISCV::VL1RE32_V:
113 case RISCV::VL1RE64_V:
114 return 1;
115 case RISCV::VS2R_V:
116 case RISCV::VL2RE8_V:
117 case RISCV::VL2RE16_V:
118 case RISCV::VL2RE32_V:
119 case RISCV::VL2RE64_V:
120 return 2;
121 case RISCV::VS4R_V:
122 case RISCV::VL4RE8_V:
123 case RISCV::VL4RE16_V:
124 case RISCV::VL4RE32_V:
125 case RISCV::VL4RE64_V:
126 return 4;
127 case RISCV::VS8R_V:
128 case RISCV::VL8RE8_V:
129 case RISCV::VL8RE16_V:
130 case RISCV::VL8RE32_V:
131 case RISCV::VL8RE64_V:
132 return 8;
133 }
134}
135
136Register RISCVInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
137 int &FrameIndex,
138 TypeSize &MemBytes) const {
139 switch (MI.getOpcode()) {
140 default:
141 return 0;
142 case RISCV::LB:
143 case RISCV::LBU:
144 MemBytes = TypeSize::getFixed(ExactSize: 1);
145 break;
146 case RISCV::LH:
147 case RISCV::LH_INX:
148 case RISCV::LHU:
149 case RISCV::FLH:
150 MemBytes = TypeSize::getFixed(ExactSize: 2);
151 break;
152 case RISCV::LW:
153 case RISCV::LW_INX:
154 case RISCV::FLW:
155 case RISCV::LWU:
156 MemBytes = TypeSize::getFixed(ExactSize: 4);
157 break;
158 case RISCV::LD:
159 case RISCV::LD_RV32:
160 case RISCV::FLD:
161 MemBytes = TypeSize::getFixed(ExactSize: 8);
162 break;
163 case RISCV::VL1RE8_V:
164 case RISCV::VL2RE8_V:
165 case RISCV::VL4RE8_V:
166 case RISCV::VL8RE8_V:
167 if (!MI.getOperand(i: 1).isFI())
168 return Register();
169 FrameIndex = MI.getOperand(i: 1).getIndex();
170 unsigned LMUL = *getLMULForRVVWholeLoadStore(Opcode: MI.getOpcode());
171 MemBytes = TypeSize::getScalable(MinimumSize: RISCV::RVVBytesPerBlock * LMUL);
172 return MI.getOperand(i: 0).getReg();
173 }
174
175 if (MI.getOperand(i: 1).isFI() && MI.getOperand(i: 2).isImm() &&
176 MI.getOperand(i: 2).getImm() == 0) {
177 FrameIndex = MI.getOperand(i: 1).getIndex();
178 return MI.getOperand(i: 0).getReg();
179 }
180
181 return 0;
182}
183
184Register RISCVInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
185 int &FrameIndex) const {
186 TypeSize Dummy = TypeSize::getZero();
187 return isStoreToStackSlot(MI, FrameIndex, MemBytes&: Dummy);
188}
189
190Register RISCVInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
191 int &FrameIndex,
192 TypeSize &MemBytes) const {
193 switch (MI.getOpcode()) {
194 default:
195 return 0;
196 case RISCV::SB:
197 MemBytes = TypeSize::getFixed(ExactSize: 1);
198 break;
199 case RISCV::SH:
200 case RISCV::SH_INX:
201 case RISCV::FSH:
202 MemBytes = TypeSize::getFixed(ExactSize: 2);
203 break;
204 case RISCV::SW:
205 case RISCV::SW_INX:
206 case RISCV::FSW:
207 MemBytes = TypeSize::getFixed(ExactSize: 4);
208 break;
209 case RISCV::SD:
210 case RISCV::SD_RV32:
211 case RISCV::FSD:
212 MemBytes = TypeSize::getFixed(ExactSize: 8);
213 break;
214 case RISCV::VS1R_V:
215 case RISCV::VS2R_V:
216 case RISCV::VS4R_V:
217 case RISCV::VS8R_V:
218 if (!MI.getOperand(i: 1).isFI())
219 return Register();
220 FrameIndex = MI.getOperand(i: 1).getIndex();
221 unsigned LMUL = *getLMULForRVVWholeLoadStore(Opcode: MI.getOpcode());
222 MemBytes = TypeSize::getScalable(MinimumSize: RISCV::RVVBytesPerBlock * LMUL);
223 return MI.getOperand(i: 0).getReg();
224 }
225
226 if (MI.getOperand(i: 1).isFI() && MI.getOperand(i: 2).isImm() &&
227 MI.getOperand(i: 2).getImm() == 0) {
228 FrameIndex = MI.getOperand(i: 1).getIndex();
229 return MI.getOperand(i: 0).getReg();
230 }
231
232 return 0;
233}
234
235bool RISCVInstrInfo::isReallyTriviallyReMaterializable(
236 const MachineInstr &MI) const {
237 switch (RISCV::getRVVMCOpcode(RVVPseudoOpcode: MI.getOpcode())) {
238 case RISCV::VMV_V_X:
239 case RISCV::VFMV_V_F:
240 case RISCV::VMV_V_I:
241 case RISCV::VMV_S_X:
242 case RISCV::VFMV_S_F:
243 case RISCV::VID_V:
244 return MI.getOperand(i: 1).isUndef();
245 default:
246 return TargetInstrInfo::isReallyTriviallyReMaterializable(MI);
247 }
248}
249
250static bool forwardCopyWillClobberTuple(unsigned DstReg, unsigned SrcReg,
251 unsigned NumRegs) {
252 return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
253}
254
255static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI,
256 const MachineBasicBlock &MBB,
257 MachineBasicBlock::const_iterator MBBI,
258 MachineBasicBlock::const_iterator &DefMBBI,
259 RISCVVType::VLMUL LMul) {
260 if (PreferWholeRegisterMove)
261 return false;
262
263 assert(MBBI->getOpcode() == TargetOpcode::COPY &&
264 "Unexpected COPY instruction.");
265 Register SrcReg = MBBI->getOperand(i: 1).getReg();
266 const TargetRegisterInfo *TRI = STI.getRegisterInfo();
267
268 bool FoundDef = false;
269 bool FirstVSetVLI = false;
270 unsigned FirstSEW = 0;
271 while (MBBI != MBB.begin()) {
272 --MBBI;
273 if (MBBI->isMetaInstruction())
274 continue;
275
276 if (RISCVInstrInfo::isVectorConfigInstr(MI: *MBBI)) {
277 // There is a vsetvli between COPY and source define instruction.
278 // vy = def_vop ... (producing instruction)
279 // ...
280 // vsetvli
281 // ...
282 // vx = COPY vy
283 if (!FoundDef) {
284 if (!FirstVSetVLI) {
285 FirstVSetVLI = true;
286 unsigned FirstVType = MBBI->getOperand(i: 2).getImm();
287 RISCVVType::VLMUL FirstLMul = RISCVVType::getVLMUL(VType: FirstVType);
288 FirstSEW = RISCVVType::getSEW(VType: FirstVType);
289 // The first encountered vsetvli must have the same lmul as the
290 // register class of COPY.
291 if (FirstLMul != LMul)
292 return false;
293 }
294 // Only permit `vsetvli x0, x0, vtype` between COPY and the source
295 // define instruction.
296 if (!RISCVInstrInfo::isVLPreservingConfig(MI: *MBBI))
297 return false;
298 continue;
299 }
300
301 // MBBI is the first vsetvli before the producing instruction.
302 unsigned VType = MBBI->getOperand(i: 2).getImm();
303 // If there is a vsetvli between COPY and the producing instruction.
304 if (FirstVSetVLI) {
305 // If SEW is different, return false.
306 if (RISCVVType::getSEW(VType) != FirstSEW)
307 return false;
308 }
309
310 // If the vsetvli is tail undisturbed, keep the whole register move.
311 if (!RISCVVType::isTailAgnostic(VType))
312 return false;
313
314 // The checking is conservative. We only have register classes for
315 // LMUL = 1/2/4/8. We should be able to convert vmv1r.v to vmv.v.v
316 // for fractional LMUL operations. However, we could not use the vsetvli
317 // lmul for widening operations. The result of widening operation is
318 // 2 x LMUL.
319 return LMul == RISCVVType::getVLMUL(VType);
320 } else if (MBBI->isInlineAsm() || MBBI->isCall()) {
321 return false;
322 } else if (MBBI->getNumDefs()) {
323 // Check all the instructions which will change VL.
324 // For example, vleff has implicit def VL.
325 if (MBBI->modifiesRegister(Reg: RISCV::VL, /*TRI=*/nullptr))
326 return false;
327
328 // Only converting whole register copies to vmv.v.v when the defining
329 // value appears in the explicit operands.
330 for (const MachineOperand &MO : MBBI->explicit_operands()) {
331 if (!MO.isReg() || !MO.isDef())
332 continue;
333 if (!FoundDef && TRI->regsOverlap(RegA: MO.getReg(), RegB: SrcReg)) {
334 // We only permit the source of COPY has the same LMUL as the defined
335 // operand.
336 // There are cases we need to keep the whole register copy if the LMUL
337 // is different.
338 // For example,
339 // $x0 = PseudoVSETIVLI 4, 73 // vsetivli zero, 4, e16,m2,ta,m
340 // $v28m4 = PseudoVWADD_VV_M2 $v26m2, $v8m2
341 // # The COPY may be created by vlmul_trunc intrinsic.
342 // $v26m2 = COPY renamable $v28m2, implicit killed $v28m4
343 //
344 // After widening, the valid value will be 4 x e32 elements. If we
345 // convert the COPY to vmv.v.v, it will only copy 4 x e16 elements.
346 // FIXME: The COPY of subregister of Zvlsseg register will not be able
347 // to convert to vmv.v.[v|i] under the constraint.
348 if (MO.getReg() != SrcReg)
349 return false;
350
351 // In widening reduction instructions with LMUL_1 input vector case,
352 // only checking the LMUL is insufficient due to reduction result is
353 // always LMUL_1.
354 // For example,
355 // $x11 = PseudoVSETIVLI 1, 64 // vsetivli a1, 1, e8, m1, ta, mu
356 // $v8m1 = PseudoVWREDSUM_VS_M1 $v26, $v27
357 // $v26 = COPY killed renamable $v8
358 // After widening, The valid value will be 1 x e16 elements. If we
359 // convert the COPY to vmv.v.v, it will only copy 1 x e8 elements.
360 uint64_t TSFlags = MBBI->getDesc().TSFlags;
361 if (RISCVII::isRVVWideningReduction(TSFlags))
362 return false;
363
364 // If the producing instruction does not depend on vsetvli, do not
365 // convert COPY to vmv.v.v. For example, VL1R_V or PseudoVRELOAD.
366 if (!RISCVII::hasSEWOp(TSFlags) || !RISCVII::hasVLOp(TSFlags))
367 return false;
368
369 // Found the definition.
370 FoundDef = true;
371 DefMBBI = MBBI;
372 break;
373 }
374 }
375 }
376 }
377
378 return false;
379}
380
381void RISCVInstrInfo::copyPhysRegVector(
382 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
383 const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc,
384 const TargetRegisterClass *RegClass) const {
385 const TargetRegisterInfo *TRI = STI.getRegisterInfo();
386 RISCVVType::VLMUL LMul = RISCVRI::getLMul(TSFlags: RegClass->TSFlags);
387 unsigned NF = RISCVRI::getNF(TSFlags: RegClass->TSFlags);
388
389 uint16_t SrcEncoding = TRI->getEncodingValue(Reg: SrcReg);
390 uint16_t DstEncoding = TRI->getEncodingValue(Reg: DstReg);
391 auto [LMulVal, Fractional] = RISCVVType::decodeVLMUL(VLMul: LMul);
392 assert(!Fractional && "It is impossible be fractional lmul here.");
393 unsigned NumRegs = NF * LMulVal;
394 bool ReversedCopy =
395 forwardCopyWillClobberTuple(DstReg: DstEncoding, SrcReg: SrcEncoding, NumRegs);
396 if (ReversedCopy) {
397 // If the src and dest overlap when copying a tuple, we need to copy the
398 // registers in reverse.
399 SrcEncoding += NumRegs - 1;
400 DstEncoding += NumRegs - 1;
401 }
402
403 unsigned I = 0;
404 auto GetCopyInfo = [&](uint16_t SrcEncoding, uint16_t DstEncoding)
405 -> std::tuple<RISCVVType::VLMUL, const TargetRegisterClass &, unsigned,
406 unsigned, unsigned> {
407 if (ReversedCopy) {
408 // For reversed copying, if there are enough aligned registers(8/4/2), we
409 // can do a larger copy(LMUL8/4/2).
410 // Besides, we have already known that DstEncoding is larger than
411 // SrcEncoding in forwardCopyWillClobberTuple, so the difference between
412 // DstEncoding and SrcEncoding should be >= LMUL value we try to use to
413 // avoid clobbering.
414 uint16_t Diff = DstEncoding - SrcEncoding;
415 if (I + 8 <= NumRegs && Diff >= 8 && SrcEncoding % 8 == 7 &&
416 DstEncoding % 8 == 7)
417 return {RISCVVType::LMUL_8, RISCV::VRM8RegClass, RISCV::VMV8R_V,
418 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
419 if (I + 4 <= NumRegs && Diff >= 4 && SrcEncoding % 4 == 3 &&
420 DstEncoding % 4 == 3)
421 return {RISCVVType::LMUL_4, RISCV::VRM4RegClass, RISCV::VMV4R_V,
422 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
423 if (I + 2 <= NumRegs && Diff >= 2 && SrcEncoding % 2 == 1 &&
424 DstEncoding % 2 == 1)
425 return {RISCVVType::LMUL_2, RISCV::VRM2RegClass, RISCV::VMV2R_V,
426 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
427 // Or we should do LMUL1 copying.
428 return {RISCVVType::LMUL_1, RISCV::VRRegClass, RISCV::VMV1R_V,
429 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
430 }
431
432 // For forward copying, if source register encoding and destination register
433 // encoding are aligned to 8/4/2, we can do a LMUL8/4/2 copying.
434 if (I + 8 <= NumRegs && SrcEncoding % 8 == 0 && DstEncoding % 8 == 0)
435 return {RISCVVType::LMUL_8, RISCV::VRM8RegClass, RISCV::VMV8R_V,
436 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
437 if (I + 4 <= NumRegs && SrcEncoding % 4 == 0 && DstEncoding % 4 == 0)
438 return {RISCVVType::LMUL_4, RISCV::VRM4RegClass, RISCV::VMV4R_V,
439 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
440 if (I + 2 <= NumRegs && SrcEncoding % 2 == 0 && DstEncoding % 2 == 0)
441 return {RISCVVType::LMUL_2, RISCV::VRM2RegClass, RISCV::VMV2R_V,
442 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
443 // Or we should do LMUL1 copying.
444 return {RISCVVType::LMUL_1, RISCV::VRRegClass, RISCV::VMV1R_V,
445 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
446 };
447 auto FindRegWithEncoding = [TRI](const TargetRegisterClass &RegClass,
448 uint16_t Encoding) {
449 MCRegister Reg = RISCV::V0 + Encoding;
450 if (RISCVRI::getLMul(TSFlags: RegClass.TSFlags) == RISCVVType::LMUL_1)
451 return Reg;
452 return TRI->getMatchingSuperReg(Reg, SubIdx: RISCV::sub_vrm1_0, RC: &RegClass);
453 };
454 while (I != NumRegs) {
455 // For non-segment copying, we only do this once as the registers are always
456 // aligned.
457 // For segment copying, we may do this several times. If the registers are
458 // aligned to larger LMUL, we can eliminate some copyings.
459 auto [LMulCopied, RegClass, Opc, VVOpc, VIOpc] =
460 GetCopyInfo(SrcEncoding, DstEncoding);
461 auto [NumCopied, _] = RISCVVType::decodeVLMUL(VLMul: LMulCopied);
462
463 MachineBasicBlock::const_iterator DefMBBI;
464 if (LMul == LMulCopied &&
465 isConvertibleToVMV_V_V(STI, MBB, MBBI, DefMBBI, LMul)) {
466 Opc = VVOpc;
467 if (DefMBBI->getOpcode() == VIOpc)
468 Opc = VIOpc;
469 }
470
471 // Emit actual copying.
472 // For reversed copying, the encoding should be decreased.
473 MCRegister ActualSrcReg = FindRegWithEncoding(
474 RegClass, ReversedCopy ? (SrcEncoding - NumCopied + 1) : SrcEncoding);
475 MCRegister ActualDstReg = FindRegWithEncoding(
476 RegClass, ReversedCopy ? (DstEncoding - NumCopied + 1) : DstEncoding);
477
478 auto MIB = BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode: Opc), DestReg: ActualDstReg);
479 bool UseVMV_V_I = RISCV::getRVVMCOpcode(RVVPseudoOpcode: Opc) == RISCV::VMV_V_I;
480 bool UseVMV = UseVMV_V_I || RISCV::getRVVMCOpcode(RVVPseudoOpcode: Opc) == RISCV::VMV_V_V;
481 if (UseVMV)
482 MIB.addReg(RegNo: ActualDstReg, flags: RegState::Undef);
483 if (UseVMV_V_I)
484 MIB = MIB.add(MO: DefMBBI->getOperand(i: 2));
485 else
486 MIB = MIB.addReg(RegNo: ActualSrcReg, flags: getKillRegState(B: KillSrc));
487 if (UseVMV) {
488 const MCInstrDesc &Desc = DefMBBI->getDesc();
489 MIB.add(MO: DefMBBI->getOperand(i: RISCVII::getVLOpNum(Desc))); // AVL
490 unsigned Log2SEW =
491 DefMBBI->getOperand(i: RISCVII::getSEWOpNum(Desc)).getImm();
492 MIB.addImm(Val: Log2SEW ? Log2SEW : 3); // SEW
493 MIB.addImm(Val: 0); // tu, mu
494 MIB.addReg(RegNo: RISCV::VL, flags: RegState::Implicit);
495 MIB.addReg(RegNo: RISCV::VTYPE, flags: RegState::Implicit);
496 }
497 // Add an implicit read of the original source to silence the verifier
498 // in the cases where some of the smaller VRs we're copying from might be
499 // undef, caused by the fact that the original, larger source VR might not
500 // be fully initialized at the time this COPY happens.
501 MIB.addReg(RegNo: SrcReg, flags: RegState::Implicit);
502
503 // If we are copying reversely, we should decrease the encoding.
504 SrcEncoding += (ReversedCopy ? -NumCopied : NumCopied);
505 DstEncoding += (ReversedCopy ? -NumCopied : NumCopied);
506 I += NumCopied;
507 }
508}
509
510void RISCVInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
511 MachineBasicBlock::iterator MBBI,
512 const DebugLoc &DL, Register DstReg,
513 Register SrcReg, bool KillSrc,
514 bool RenamableDest, bool RenamableSrc) const {
515 const TargetRegisterInfo *TRI = STI.getRegisterInfo();
516 unsigned KillFlag = getKillRegState(B: KillSrc);
517
518 if (RISCV::GPRRegClass.contains(Reg1: DstReg, Reg2: SrcReg)) {
519 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode: RISCV::ADDI), DestReg: DstReg)
520 .addReg(RegNo: SrcReg, flags: KillFlag | getRenamableRegState(B: RenamableSrc))
521 .addImm(Val: 0);
522 return;
523 }
524
525 if (RISCV::GPRF16RegClass.contains(Reg1: DstReg, Reg2: SrcReg)) {
526 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode: RISCV::PseudoMV_FPR16INX), DestReg: DstReg)
527 .addReg(RegNo: SrcReg, flags: KillFlag | getRenamableRegState(B: RenamableSrc));
528 return;
529 }
530
531 if (RISCV::GPRF32RegClass.contains(Reg1: DstReg, Reg2: SrcReg)) {
532 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode: RISCV::PseudoMV_FPR32INX), DestReg: DstReg)
533 .addReg(RegNo: SrcReg, flags: KillFlag | getRenamableRegState(B: RenamableSrc));
534 return;
535 }
536
537 if (RISCV::GPRPairRegClass.contains(Reg1: DstReg, Reg2: SrcReg)) {
538 MCRegister EvenReg = TRI->getSubReg(Reg: SrcReg, Idx: RISCV::sub_gpr_even);
539 MCRegister OddReg = TRI->getSubReg(Reg: SrcReg, Idx: RISCV::sub_gpr_odd);
540 // We need to correct the odd register of X0_Pair.
541 if (OddReg == RISCV::DUMMY_REG_PAIR_WITH_X0)
542 OddReg = RISCV::X0;
543 assert(DstReg != RISCV::X0_Pair && "Cannot write to X0_Pair");
544
545 // Emit an ADDI for both parts of GPRPair.
546 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode: RISCV::ADDI),
547 DestReg: TRI->getSubReg(Reg: DstReg, Idx: RISCV::sub_gpr_even))
548 .addReg(RegNo: EvenReg, flags: KillFlag)
549 .addImm(Val: 0);
550 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode: RISCV::ADDI),
551 DestReg: TRI->getSubReg(Reg: DstReg, Idx: RISCV::sub_gpr_odd))
552 .addReg(RegNo: OddReg, flags: KillFlag)
553 .addImm(Val: 0);
554 return;
555 }
556
557 // Handle copy from csr
558 if (RISCV::VCSRRegClass.contains(Reg: SrcReg) &&
559 RISCV::GPRRegClass.contains(Reg: DstReg)) {
560 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode: RISCV::CSRRS), DestReg: DstReg)
561 .addImm(Val: RISCVSysReg::lookupSysRegByName(Name: TRI->getName(RegNo: SrcReg))->Encoding)
562 .addReg(RegNo: RISCV::X0);
563 return;
564 }
565
566 if (RISCV::FPR16RegClass.contains(Reg1: DstReg, Reg2: SrcReg)) {
567 unsigned Opc;
568 if (STI.hasStdExtZfh()) {
569 Opc = RISCV::FSGNJ_H;
570 } else {
571 assert(STI.hasStdExtF() &&
572 (STI.hasStdExtZfhmin() || STI.hasStdExtZfbfmin()) &&
573 "Unexpected extensions");
574 // Zfhmin/Zfbfmin doesn't have FSGNJ_H, replace FSGNJ_H with FSGNJ_S.
575 DstReg = TRI->getMatchingSuperReg(Reg: DstReg, SubIdx: RISCV::sub_16,
576 RC: &RISCV::FPR32RegClass);
577 SrcReg = TRI->getMatchingSuperReg(Reg: SrcReg, SubIdx: RISCV::sub_16,
578 RC: &RISCV::FPR32RegClass);
579 Opc = RISCV::FSGNJ_S;
580 }
581 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode: Opc), DestReg: DstReg)
582 .addReg(RegNo: SrcReg, flags: KillFlag)
583 .addReg(RegNo: SrcReg, flags: KillFlag);
584 return;
585 }
586
587 if (RISCV::FPR32RegClass.contains(Reg1: DstReg, Reg2: SrcReg)) {
588 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode: RISCV::FSGNJ_S), DestReg: DstReg)
589 .addReg(RegNo: SrcReg, flags: KillFlag)
590 .addReg(RegNo: SrcReg, flags: KillFlag);
591 return;
592 }
593
594 if (RISCV::FPR64RegClass.contains(Reg1: DstReg, Reg2: SrcReg)) {
595 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode: RISCV::FSGNJ_D), DestReg: DstReg)
596 .addReg(RegNo: SrcReg, flags: KillFlag)
597 .addReg(RegNo: SrcReg, flags: KillFlag);
598 return;
599 }
600
601 if (RISCV::FPR32RegClass.contains(Reg: DstReg) &&
602 RISCV::GPRRegClass.contains(Reg: SrcReg)) {
603 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode: RISCV::FMV_W_X), DestReg: DstReg)
604 .addReg(RegNo: SrcReg, flags: KillFlag);
605 return;
606 }
607
608 if (RISCV::GPRRegClass.contains(Reg: DstReg) &&
609 RISCV::FPR32RegClass.contains(Reg: SrcReg)) {
610 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode: RISCV::FMV_X_W), DestReg: DstReg)
611 .addReg(RegNo: SrcReg, flags: KillFlag);
612 return;
613 }
614
615 if (RISCV::FPR64RegClass.contains(Reg: DstReg) &&
616 RISCV::GPRRegClass.contains(Reg: SrcReg)) {
617 assert(STI.getXLen() == 64 && "Unexpected GPR size");
618 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode: RISCV::FMV_D_X), DestReg: DstReg)
619 .addReg(RegNo: SrcReg, flags: KillFlag);
620 return;
621 }
622
623 if (RISCV::GPRRegClass.contains(Reg: DstReg) &&
624 RISCV::FPR64RegClass.contains(Reg: SrcReg)) {
625 assert(STI.getXLen() == 64 && "Unexpected GPR size");
626 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode: RISCV::FMV_X_D), DestReg: DstReg)
627 .addReg(RegNo: SrcReg, flags: KillFlag);
628 return;
629 }
630
631 // VR->VR copies.
632 const TargetRegisterClass *RegClass =
633 TRI->getCommonMinimalPhysRegClass(Reg1: SrcReg, Reg2: DstReg);
634 if (RISCVRegisterInfo::isRVVRegClass(RC: RegClass)) {
635 copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RegClass);
636 return;
637 }
638
639 llvm_unreachable("Impossible reg-to-reg copy");
640}
641
642void RISCVInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
643 MachineBasicBlock::iterator I,
644 Register SrcReg, bool IsKill, int FI,
645 const TargetRegisterClass *RC,
646 const TargetRegisterInfo *TRI,
647 Register VReg,
648 MachineInstr::MIFlag Flags) const {
649 MachineFunction *MF = MBB.getParent();
650 MachineFrameInfo &MFI = MF->getFrameInfo();
651
652 unsigned Opcode;
653 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
654 Opcode = TRI->getRegSizeInBits(RC: RISCV::GPRRegClass) == 32 ?
655 RISCV::SW : RISCV::SD;
656 } else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
657 Opcode = RISCV::SH_INX;
658 } else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
659 Opcode = RISCV::SW_INX;
660 } else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
661 Opcode = RISCV::PseudoRV32ZdinxSD;
662 } else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
663 Opcode = RISCV::FSH;
664 } else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
665 Opcode = RISCV::FSW;
666 } else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
667 Opcode = RISCV::FSD;
668 } else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
669 Opcode = RISCV::VS1R_V;
670 } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
671 Opcode = RISCV::VS2R_V;
672 } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
673 Opcode = RISCV::VS4R_V;
674 } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
675 Opcode = RISCV::VS8R_V;
676 } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
677 Opcode = RISCV::PseudoVSPILL2_M1;
678 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
679 Opcode = RISCV::PseudoVSPILL2_M2;
680 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
681 Opcode = RISCV::PseudoVSPILL2_M4;
682 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
683 Opcode = RISCV::PseudoVSPILL3_M1;
684 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
685 Opcode = RISCV::PseudoVSPILL3_M2;
686 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
687 Opcode = RISCV::PseudoVSPILL4_M1;
688 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
689 Opcode = RISCV::PseudoVSPILL4_M2;
690 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
691 Opcode = RISCV::PseudoVSPILL5_M1;
692 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
693 Opcode = RISCV::PseudoVSPILL6_M1;
694 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
695 Opcode = RISCV::PseudoVSPILL7_M1;
696 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
697 Opcode = RISCV::PseudoVSPILL8_M1;
698 else
699 llvm_unreachable("Can't store this register to stack slot");
700
701 if (RISCVRegisterInfo::isRVVRegClass(RC)) {
702 MachineMemOperand *MMO = MF->getMachineMemOperand(
703 PtrInfo: MachinePointerInfo::getFixedStack(MF&: *MF, FI), F: MachineMemOperand::MOStore,
704 Size: TypeSize::getScalable(MinimumSize: MFI.getObjectSize(ObjectIdx: FI)), BaseAlignment: MFI.getObjectAlign(ObjectIdx: FI));
705
706 MFI.setStackID(ObjectIdx: FI, ID: TargetStackID::ScalableVector);
707 BuildMI(BB&: MBB, I, MIMD: DebugLoc(), MCID: get(Opcode))
708 .addReg(RegNo: SrcReg, flags: getKillRegState(B: IsKill))
709 .addFrameIndex(Idx: FI)
710 .addMemOperand(MMO)
711 .setMIFlag(Flags);
712 NumVRegSpilled += TRI->getRegSizeInBits(RC: *RC) / RISCV::RVVBitsPerBlock;
713 } else {
714 MachineMemOperand *MMO = MF->getMachineMemOperand(
715 PtrInfo: MachinePointerInfo::getFixedStack(MF&: *MF, FI), F: MachineMemOperand::MOStore,
716 Size: MFI.getObjectSize(ObjectIdx: FI), BaseAlignment: MFI.getObjectAlign(ObjectIdx: FI));
717
718 BuildMI(BB&: MBB, I, MIMD: DebugLoc(), MCID: get(Opcode))
719 .addReg(RegNo: SrcReg, flags: getKillRegState(B: IsKill))
720 .addFrameIndex(Idx: FI)
721 .addImm(Val: 0)
722 .addMemOperand(MMO)
723 .setMIFlag(Flags);
724 }
725}
726
727void RISCVInstrInfo::loadRegFromStackSlot(
728 MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Register DstReg,
729 int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
730 Register VReg, MachineInstr::MIFlag Flags) const {
731 MachineFunction *MF = MBB.getParent();
732 MachineFrameInfo &MFI = MF->getFrameInfo();
733 DebugLoc DL =
734 Flags & MachineInstr::FrameDestroy ? MBB.findDebugLoc(MBBI: I) : DebugLoc();
735
736 unsigned Opcode;
737 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
738 Opcode = TRI->getRegSizeInBits(RC: RISCV::GPRRegClass) == 32 ?
739 RISCV::LW : RISCV::LD;
740 } else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
741 Opcode = RISCV::LH_INX;
742 } else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
743 Opcode = RISCV::LW_INX;
744 } else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
745 Opcode = RISCV::PseudoRV32ZdinxLD;
746 } else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
747 Opcode = RISCV::FLH;
748 } else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
749 Opcode = RISCV::FLW;
750 } else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
751 Opcode = RISCV::FLD;
752 } else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
753 Opcode = RISCV::VL1RE8_V;
754 } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
755 Opcode = RISCV::VL2RE8_V;
756 } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
757 Opcode = RISCV::VL4RE8_V;
758 } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
759 Opcode = RISCV::VL8RE8_V;
760 } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
761 Opcode = RISCV::PseudoVRELOAD2_M1;
762 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
763 Opcode = RISCV::PseudoVRELOAD2_M2;
764 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
765 Opcode = RISCV::PseudoVRELOAD2_M4;
766 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
767 Opcode = RISCV::PseudoVRELOAD3_M1;
768 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
769 Opcode = RISCV::PseudoVRELOAD3_M2;
770 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
771 Opcode = RISCV::PseudoVRELOAD4_M1;
772 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
773 Opcode = RISCV::PseudoVRELOAD4_M2;
774 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
775 Opcode = RISCV::PseudoVRELOAD5_M1;
776 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
777 Opcode = RISCV::PseudoVRELOAD6_M1;
778 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
779 Opcode = RISCV::PseudoVRELOAD7_M1;
780 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
781 Opcode = RISCV::PseudoVRELOAD8_M1;
782 else
783 llvm_unreachable("Can't load this register from stack slot");
784
785 if (RISCVRegisterInfo::isRVVRegClass(RC)) {
786 MachineMemOperand *MMO = MF->getMachineMemOperand(
787 PtrInfo: MachinePointerInfo::getFixedStack(MF&: *MF, FI), F: MachineMemOperand::MOLoad,
788 Size: TypeSize::getScalable(MinimumSize: MFI.getObjectSize(ObjectIdx: FI)), BaseAlignment: MFI.getObjectAlign(ObjectIdx: FI));
789
790 MFI.setStackID(ObjectIdx: FI, ID: TargetStackID::ScalableVector);
791 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode), DestReg: DstReg)
792 .addFrameIndex(Idx: FI)
793 .addMemOperand(MMO)
794 .setMIFlag(Flags);
795 NumVRegReloaded += TRI->getRegSizeInBits(RC: *RC) / RISCV::RVVBitsPerBlock;
796 } else {
797 MachineMemOperand *MMO = MF->getMachineMemOperand(
798 PtrInfo: MachinePointerInfo::getFixedStack(MF&: *MF, FI), F: MachineMemOperand::MOLoad,
799 Size: MFI.getObjectSize(ObjectIdx: FI), BaseAlignment: MFI.getObjectAlign(ObjectIdx: FI));
800
801 BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode), DestReg: DstReg)
802 .addFrameIndex(Idx: FI)
803 .addImm(Val: 0)
804 .addMemOperand(MMO)
805 .setMIFlag(Flags);
806 }
807}
808std::optional<unsigned> getFoldedOpcode(MachineFunction &MF, MachineInstr &MI,
809 ArrayRef<unsigned> Ops,
810 const RISCVSubtarget &ST) {
811
812 // The below optimizations narrow the load so they are only valid for little
813 // endian.
814 // TODO: Support big endian by adding an offset into the frame object?
815 if (MF.getDataLayout().isBigEndian())
816 return std::nullopt;
817
818 // Fold load from stack followed by sext.b/sext.h/sext.w/zext.b/zext.h/zext.w.
819 if (Ops.size() != 1 || Ops[0] != 1)
820 return std::nullopt;
821
822 switch (MI.getOpcode()) {
823 default:
824 if (RISCVInstrInfo::isSEXT_W(MI))
825 return RISCV::LW;
826 if (RISCVInstrInfo::isZEXT_W(MI))
827 return RISCV::LWU;
828 if (RISCVInstrInfo::isZEXT_B(MI))
829 return RISCV::LBU;
830 break;
831 case RISCV::SEXT_H:
832 return RISCV::LH;
833 case RISCV::SEXT_B:
834 return RISCV::LB;
835 case RISCV::ZEXT_H_RV32:
836 case RISCV::ZEXT_H_RV64:
837 return RISCV::LHU;
838 }
839
840 switch (RISCV::getRVVMCOpcode(RVVPseudoOpcode: MI.getOpcode())) {
841 default:
842 return std::nullopt;
843 case RISCV::VMV_X_S: {
844 unsigned Log2SEW =
845 MI.getOperand(i: RISCVII::getSEWOpNum(Desc: MI.getDesc())).getImm();
846 if (ST.getXLen() < (1U << Log2SEW))
847 return std::nullopt;
848 switch (Log2SEW) {
849 case 3:
850 return RISCV::LB;
851 case 4:
852 return RISCV::LH;
853 case 5:
854 return RISCV::LW;
855 case 6:
856 return RISCV::LD;
857 default:
858 llvm_unreachable("Unexpected SEW");
859 }
860 }
861 case RISCV::VFMV_F_S: {
862 unsigned Log2SEW =
863 MI.getOperand(i: RISCVII::getSEWOpNum(Desc: MI.getDesc())).getImm();
864 switch (Log2SEW) {
865 case 4:
866 return RISCV::FLH;
867 case 5:
868 return RISCV::FLW;
869 case 6:
870 return RISCV::FLD;
871 default:
872 llvm_unreachable("Unexpected SEW");
873 }
874 }
875 }
876}
877
878// This is the version used during inline spilling
879MachineInstr *RISCVInstrInfo::foldMemoryOperandImpl(
880 MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
881 MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS,
882 VirtRegMap *VRM) const {
883
884 std::optional<unsigned> LoadOpc = getFoldedOpcode(MF, MI, Ops, ST: STI);
885 if (!LoadOpc)
886 return nullptr;
887 Register DstReg = MI.getOperand(i: 0).getReg();
888 return BuildMI(BB&: *MI.getParent(), I: InsertPt, MIMD: MI.getDebugLoc(), MCID: get(Opcode: *LoadOpc),
889 DestReg: DstReg)
890 .addFrameIndex(Idx: FrameIndex)
891 .addImm(Val: 0);
892}
893
894void RISCVInstrInfo::movImm(MachineBasicBlock &MBB,
895 MachineBasicBlock::iterator MBBI,
896 const DebugLoc &DL, Register DstReg, uint64_t Val,
897 MachineInstr::MIFlag Flag, bool DstRenamable,
898 bool DstIsDead) const {
899 Register SrcReg = RISCV::X0;
900
901 // For RV32, allow a sign or unsigned 32 bit value.
902 if (!STI.is64Bit() && !isInt<32>(x: Val)) {
903 // If have a uimm32 it will still fit in a register so we can allow it.
904 if (!isUInt<32>(x: Val))
905 report_fatal_error(reason: "Should only materialize 32-bit constants for RV32");
906
907 // Sign extend for generateInstSeq.
908 Val = SignExtend64<32>(x: Val);
909 }
910
911 RISCVMatInt::InstSeq Seq = RISCVMatInt::generateInstSeq(Val, STI);
912 assert(!Seq.empty());
913
914 bool SrcRenamable = false;
915 unsigned Num = 0;
916
917 for (const RISCVMatInt::Inst &Inst : Seq) {
918 bool LastItem = ++Num == Seq.size();
919 unsigned DstRegState = getDeadRegState(B: DstIsDead && LastItem) |
920 getRenamableRegState(B: DstRenamable);
921 unsigned SrcRegState = getKillRegState(B: SrcReg != RISCV::X0) |
922 getRenamableRegState(B: SrcRenamable);
923 switch (Inst.getOpndKind()) {
924 case RISCVMatInt::Imm:
925 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode: Inst.getOpcode()))
926 .addReg(RegNo: DstReg, flags: RegState::Define | DstRegState)
927 .addImm(Val: Inst.getImm())
928 .setMIFlag(Flag);
929 break;
930 case RISCVMatInt::RegX0:
931 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode: Inst.getOpcode()))
932 .addReg(RegNo: DstReg, flags: RegState::Define | DstRegState)
933 .addReg(RegNo: SrcReg, flags: SrcRegState)
934 .addReg(RegNo: RISCV::X0)
935 .setMIFlag(Flag);
936 break;
937 case RISCVMatInt::RegReg:
938 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode: Inst.getOpcode()))
939 .addReg(RegNo: DstReg, flags: RegState::Define | DstRegState)
940 .addReg(RegNo: SrcReg, flags: SrcRegState)
941 .addReg(RegNo: SrcReg, flags: SrcRegState)
942 .setMIFlag(Flag);
943 break;
944 case RISCVMatInt::RegImm:
945 BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: get(Opcode: Inst.getOpcode()))
946 .addReg(RegNo: DstReg, flags: RegState::Define | DstRegState)
947 .addReg(RegNo: SrcReg, flags: SrcRegState)
948 .addImm(Val: Inst.getImm())
949 .setMIFlag(Flag);
950 break;
951 }
952
953 // Only the first instruction has X0 as its source.
954 SrcReg = DstReg;
955 SrcRenamable = DstRenamable;
956 }
957}
958
959RISCVCC::CondCode RISCVInstrInfo::getCondFromBranchOpc(unsigned Opc) {
960 switch (Opc) {
961 default:
962 return RISCVCC::COND_INVALID;
963 case RISCV::BEQ:
964 case RISCV::CV_BEQIMM:
965 case RISCV::QC_BEQI:
966 case RISCV::QC_E_BEQI:
967 case RISCV::NDS_BBC:
968 case RISCV::NDS_BEQC:
969 return RISCVCC::COND_EQ;
970 case RISCV::BNE:
971 case RISCV::QC_BNEI:
972 case RISCV::QC_E_BNEI:
973 case RISCV::CV_BNEIMM:
974 case RISCV::NDS_BBS:
975 case RISCV::NDS_BNEC:
976 return RISCVCC::COND_NE;
977 case RISCV::BLT:
978 case RISCV::QC_BLTI:
979 case RISCV::QC_E_BLTI:
980 return RISCVCC::COND_LT;
981 case RISCV::BGE:
982 case RISCV::QC_BGEI:
983 case RISCV::QC_E_BGEI:
984 return RISCVCC::COND_GE;
985 case RISCV::BLTU:
986 case RISCV::QC_BLTUI:
987 case RISCV::QC_E_BLTUI:
988 return RISCVCC::COND_LTU;
989 case RISCV::BGEU:
990 case RISCV::QC_BGEUI:
991 case RISCV::QC_E_BGEUI:
992 return RISCVCC::COND_GEU;
993 }
994}
995
996bool RISCVInstrInfo::evaluateCondBranch(RISCVCC::CondCode CC, int64_t C0,
997 int64_t C1) {
998 switch (CC) {
999 default:
1000 llvm_unreachable("Unexpected CC");
1001 case RISCVCC::COND_EQ:
1002 return C0 == C1;
1003 case RISCVCC::COND_NE:
1004 return C0 != C1;
1005 case RISCVCC::COND_LT:
1006 return C0 < C1;
1007 case RISCVCC::COND_GE:
1008 return C0 >= C1;
1009 case RISCVCC::COND_LTU:
1010 return (uint64_t)C0 < (uint64_t)C1;
1011 case RISCVCC::COND_GEU:
1012 return (uint64_t)C0 >= (uint64_t)C1;
1013 }
1014}
1015
1016// The contents of values added to Cond are not examined outside of
1017// RISCVInstrInfo, giving us flexibility in what to push to it. For RISCV, we
1018// push BranchOpcode, Reg1, Reg2.
1019static void parseCondBranch(MachineInstr &LastInst, MachineBasicBlock *&Target,
1020 SmallVectorImpl<MachineOperand> &Cond) {
1021 // Block ends with fall-through condbranch.
1022 assert(LastInst.getDesc().isConditionalBranch() &&
1023 "Unknown conditional branch");
1024 Target = LastInst.getOperand(i: 2).getMBB();
1025 Cond.push_back(Elt: MachineOperand::CreateImm(Val: LastInst.getOpcode()));
1026 Cond.push_back(Elt: LastInst.getOperand(i: 0));
1027 Cond.push_back(Elt: LastInst.getOperand(i: 1));
1028}
1029
1030unsigned RISCVCC::getBrCond(RISCVCC::CondCode CC, unsigned SelectOpc) {
1031 switch (SelectOpc) {
1032 default:
1033 switch (CC) {
1034 default:
1035 llvm_unreachable("Unexpected condition code!");
1036 case RISCVCC::COND_EQ:
1037 return RISCV::BEQ;
1038 case RISCVCC::COND_NE:
1039 return RISCV::BNE;
1040 case RISCVCC::COND_LT:
1041 return RISCV::BLT;
1042 case RISCVCC::COND_GE:
1043 return RISCV::BGE;
1044 case RISCVCC::COND_LTU:
1045 return RISCV::BLTU;
1046 case RISCVCC::COND_GEU:
1047 return RISCV::BGEU;
1048 }
1049 break;
1050 case RISCV::Select_GPR_Using_CC_SImm5_CV:
1051 switch (CC) {
1052 default:
1053 llvm_unreachable("Unexpected condition code!");
1054 case RISCVCC::COND_EQ:
1055 return RISCV::CV_BEQIMM;
1056 case RISCVCC::COND_NE:
1057 return RISCV::CV_BNEIMM;
1058 }
1059 break;
1060 case RISCV::Select_GPRNoX0_Using_CC_SImm5NonZero_QC:
1061 switch (CC) {
1062 default:
1063 llvm_unreachable("Unexpected condition code!");
1064 case RISCVCC::COND_EQ:
1065 return RISCV::QC_BEQI;
1066 case RISCVCC::COND_NE:
1067 return RISCV::QC_BNEI;
1068 case RISCVCC::COND_LT:
1069 return RISCV::QC_BLTI;
1070 case RISCVCC::COND_GE:
1071 return RISCV::QC_BGEI;
1072 }
1073 break;
1074 case RISCV::Select_GPRNoX0_Using_CC_UImm5NonZero_QC:
1075 switch (CC) {
1076 default:
1077 llvm_unreachable("Unexpected condition code!");
1078 case RISCVCC::COND_LTU:
1079 return RISCV::QC_BLTUI;
1080 case RISCVCC::COND_GEU:
1081 return RISCV::QC_BGEUI;
1082 }
1083 break;
1084 case RISCV::Select_GPRNoX0_Using_CC_SImm16NonZero_QC:
1085 switch (CC) {
1086 default:
1087 llvm_unreachable("Unexpected condition code!");
1088 case RISCVCC::COND_EQ:
1089 return RISCV::QC_E_BEQI;
1090 case RISCVCC::COND_NE:
1091 return RISCV::QC_E_BNEI;
1092 case RISCVCC::COND_LT:
1093 return RISCV::QC_E_BLTI;
1094 case RISCVCC::COND_GE:
1095 return RISCV::QC_E_BGEI;
1096 }
1097 break;
1098 case RISCV::Select_GPRNoX0_Using_CC_UImm16NonZero_QC:
1099 switch (CC) {
1100 default:
1101 llvm_unreachable("Unexpected condition code!");
1102 case RISCVCC::COND_LTU:
1103 return RISCV::QC_E_BLTUI;
1104 case RISCVCC::COND_GEU:
1105 return RISCV::QC_E_BGEUI;
1106 }
1107 break;
1108 case RISCV::Select_GPR_Using_CC_UImmLog2XLen_NDS:
1109 switch (CC) {
1110 default:
1111 llvm_unreachable("Unexpected condition code!");
1112 case RISCVCC::COND_EQ:
1113 return RISCV::NDS_BBC;
1114 case RISCVCC::COND_NE:
1115 return RISCV::NDS_BBS;
1116 }
1117 break;
1118 case RISCV::Select_GPR_Using_CC_UImm7_NDS:
1119 switch (CC) {
1120 default:
1121 llvm_unreachable("Unexpected condition code!");
1122 case RISCVCC::COND_EQ:
1123 return RISCV::NDS_BEQC;
1124 case RISCVCC::COND_NE:
1125 return RISCV::NDS_BNEC;
1126 }
1127 break;
1128 }
1129}
1130
1131RISCVCC::CondCode RISCVCC::getOppositeBranchCondition(RISCVCC::CondCode CC) {
1132 switch (CC) {
1133 default:
1134 llvm_unreachable("Unrecognized conditional branch");
1135 case RISCVCC::COND_EQ:
1136 return RISCVCC::COND_NE;
1137 case RISCVCC::COND_NE:
1138 return RISCVCC::COND_EQ;
1139 case RISCVCC::COND_LT:
1140 return RISCVCC::COND_GE;
1141 case RISCVCC::COND_GE:
1142 return RISCVCC::COND_LT;
1143 case RISCVCC::COND_LTU:
1144 return RISCVCC::COND_GEU;
1145 case RISCVCC::COND_GEU:
1146 return RISCVCC::COND_LTU;
1147 }
1148}
1149
1150bool RISCVInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
1151 MachineBasicBlock *&TBB,
1152 MachineBasicBlock *&FBB,
1153 SmallVectorImpl<MachineOperand> &Cond,
1154 bool AllowModify) const {
1155 TBB = FBB = nullptr;
1156 Cond.clear();
1157
1158 // If the block has no terminators, it just falls into the block after it.
1159 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
1160 if (I == MBB.end() || !isUnpredicatedTerminator(MI: *I))
1161 return false;
1162
1163 // Count the number of terminators and find the first unconditional or
1164 // indirect branch.
1165 MachineBasicBlock::iterator FirstUncondOrIndirectBr = MBB.end();
1166 int NumTerminators = 0;
1167 for (auto J = I.getReverse(); J != MBB.rend() && isUnpredicatedTerminator(MI: *J);
1168 J++) {
1169 NumTerminators++;
1170 if (J->getDesc().isUnconditionalBranch() ||
1171 J->getDesc().isIndirectBranch()) {
1172 FirstUncondOrIndirectBr = J.getReverse();
1173 }
1174 }
1175
1176 // If AllowModify is true, we can erase any terminators after
1177 // FirstUncondOrIndirectBR.
1178 if (AllowModify && FirstUncondOrIndirectBr != MBB.end()) {
1179 while (std::next(x: FirstUncondOrIndirectBr) != MBB.end()) {
1180 std::next(x: FirstUncondOrIndirectBr)->eraseFromParent();
1181 NumTerminators--;
1182 }
1183 I = FirstUncondOrIndirectBr;
1184 }
1185
1186 // We can't handle blocks that end in an indirect branch.
1187 if (I->getDesc().isIndirectBranch())
1188 return true;
1189
1190 // We can't handle Generic branch opcodes from Global ISel.
1191 if (I->isPreISelOpcode())
1192 return true;
1193
1194 // We can't handle blocks with more than 2 terminators.
1195 if (NumTerminators > 2)
1196 return true;
1197
1198 // Handle a single unconditional branch.
1199 if (NumTerminators == 1 && I->getDesc().isUnconditionalBranch()) {
1200 TBB = getBranchDestBlock(MI: *I);
1201 return false;
1202 }
1203
1204 // Handle a single conditional branch.
1205 if (NumTerminators == 1 && I->getDesc().isConditionalBranch()) {
1206 parseCondBranch(LastInst&: *I, Target&: TBB, Cond);
1207 return false;
1208 }
1209
1210 // Handle a conditional branch followed by an unconditional branch.
1211 if (NumTerminators == 2 && std::prev(x: I)->getDesc().isConditionalBranch() &&
1212 I->getDesc().isUnconditionalBranch()) {
1213 parseCondBranch(LastInst&: *std::prev(x: I), Target&: TBB, Cond);
1214 FBB = getBranchDestBlock(MI: *I);
1215 return false;
1216 }
1217
1218 // Otherwise, we can't handle this.
1219 return true;
1220}
1221
1222unsigned RISCVInstrInfo::removeBranch(MachineBasicBlock &MBB,
1223 int *BytesRemoved) const {
1224 if (BytesRemoved)
1225 *BytesRemoved = 0;
1226 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
1227 if (I == MBB.end())
1228 return 0;
1229
1230 if (!I->getDesc().isUnconditionalBranch() &&
1231 !I->getDesc().isConditionalBranch())
1232 return 0;
1233
1234 // Remove the branch.
1235 if (BytesRemoved)
1236 *BytesRemoved += getInstSizeInBytes(MI: *I);
1237 I->eraseFromParent();
1238
1239 I = MBB.end();
1240
1241 if (I == MBB.begin())
1242 return 1;
1243 --I;
1244 if (!I->getDesc().isConditionalBranch())
1245 return 1;
1246
1247 // Remove the branch.
1248 if (BytesRemoved)
1249 *BytesRemoved += getInstSizeInBytes(MI: *I);
1250 I->eraseFromParent();
1251 return 2;
1252}
1253
1254// Inserts a branch into the end of the specific MachineBasicBlock, returning
1255// the number of instructions inserted.
1256unsigned RISCVInstrInfo::insertBranch(
1257 MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
1258 ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const {
1259 if (BytesAdded)
1260 *BytesAdded = 0;
1261
1262 // Shouldn't be a fall through.
1263 assert(TBB && "insertBranch must not be told to insert a fallthrough");
1264 assert((Cond.size() == 3 || Cond.size() == 0) &&
1265 "RISC-V branch conditions have two components!");
1266
1267 // Unconditional branch.
1268 if (Cond.empty()) {
1269 MachineInstr &MI = *BuildMI(BB: &MBB, MIMD: DL, MCID: get(Opcode: RISCV::PseudoBR)).addMBB(MBB: TBB);
1270 if (BytesAdded)
1271 *BytesAdded += getInstSizeInBytes(MI);
1272 return 1;
1273 }
1274
1275 // Either a one or two-way conditional branch.
1276 MachineInstr &CondMI = *BuildMI(BB: &MBB, MIMD: DL, MCID: get(Opcode: Cond[0].getImm()))
1277 .add(MO: Cond[1])
1278 .add(MO: Cond[2])
1279 .addMBB(MBB: TBB);
1280 if (BytesAdded)
1281 *BytesAdded += getInstSizeInBytes(MI: CondMI);
1282
1283 // One-way conditional branch.
1284 if (!FBB)
1285 return 1;
1286
1287 // Two-way conditional branch.
1288 MachineInstr &MI = *BuildMI(BB: &MBB, MIMD: DL, MCID: get(Opcode: RISCV::PseudoBR)).addMBB(MBB: FBB);
1289 if (BytesAdded)
1290 *BytesAdded += getInstSizeInBytes(MI);
1291 return 2;
1292}
1293
1294void RISCVInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
1295 MachineBasicBlock &DestBB,
1296 MachineBasicBlock &RestoreBB,
1297 const DebugLoc &DL, int64_t BrOffset,
1298 RegScavenger *RS) const {
1299 assert(RS && "RegScavenger required for long branching");
1300 assert(MBB.empty() &&
1301 "new block should be inserted for expanding unconditional branch");
1302 assert(MBB.pred_size() == 1);
1303 assert(RestoreBB.empty() &&
1304 "restore block should be inserted for restoring clobbered registers");
1305
1306 MachineFunction *MF = MBB.getParent();
1307 MachineRegisterInfo &MRI = MF->getRegInfo();
1308 RISCVMachineFunctionInfo *RVFI = MF->getInfo<RISCVMachineFunctionInfo>();
1309 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
1310
1311 if (!isInt<32>(x: BrOffset))
1312 report_fatal_error(
1313 reason: "Branch offsets outside of the signed 32-bit range not supported");
1314
1315 // FIXME: A virtual register must be used initially, as the register
1316 // scavenger won't work with empty blocks (SIInstrInfo::insertIndirectBranch
1317 // uses the same workaround).
1318 Register ScratchReg = MRI.createVirtualRegister(RegClass: &RISCV::GPRJALRRegClass);
1319 auto II = MBB.end();
1320 // We may also update the jump target to RestoreBB later.
1321 MachineInstr &MI = *BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: get(Opcode: RISCV::PseudoJump))
1322 .addReg(RegNo: ScratchReg, flags: RegState::Define | RegState::Dead)
1323 .addMBB(MBB: &DestBB, TargetFlags: RISCVII::MO_CALL);
1324
1325 RS->enterBasicBlockEnd(MBB);
1326 Register TmpGPR =
1327 RS->scavengeRegisterBackwards(RC: RISCV::GPRRegClass, To: MI.getIterator(),
1328 /*RestoreAfter=*/false, /*SpAdj=*/SPAdj: 0,
1329 /*AllowSpill=*/false);
1330 if (TmpGPR != RISCV::NoRegister)
1331 RS->setRegUsed(Reg: TmpGPR);
1332 else {
1333 // The case when there is no scavenged register needs special handling.
1334
1335 // Pick s11(or s1 for rve) because it doesn't make a difference.
1336 TmpGPR = STI.hasStdExtE() ? RISCV::X9 : RISCV::X27;
1337
1338 int FrameIndex = RVFI->getBranchRelaxationScratchFrameIndex();
1339 if (FrameIndex == -1)
1340 report_fatal_error(reason: "underestimated function size");
1341
1342 storeRegToStackSlot(MBB, I: MI, SrcReg: TmpGPR, /*IsKill=*/true, FI: FrameIndex,
1343 RC: &RISCV::GPRRegClass, TRI, VReg: Register());
1344 TRI->eliminateFrameIndex(MI: std::prev(x: MI.getIterator()),
1345 /*SpAdj=*/SPAdj: 0, /*FIOperandNum=*/1);
1346
1347 MI.getOperand(i: 1).setMBB(&RestoreBB);
1348
1349 loadRegFromStackSlot(MBB&: RestoreBB, I: RestoreBB.end(), DstReg: TmpGPR, FI: FrameIndex,
1350 RC: &RISCV::GPRRegClass, TRI, VReg: Register());
1351 TRI->eliminateFrameIndex(MI: RestoreBB.back(),
1352 /*SpAdj=*/SPAdj: 0, /*FIOperandNum=*/1);
1353 }
1354
1355 MRI.replaceRegWith(FromReg: ScratchReg, ToReg: TmpGPR);
1356 MRI.clearVirtRegs();
1357}
1358
1359bool RISCVInstrInfo::reverseBranchCondition(
1360 SmallVectorImpl<MachineOperand> &Cond) const {
1361 assert((Cond.size() == 3) && "Invalid branch condition!");
1362 switch (Cond[0].getImm()) {
1363 default:
1364 llvm_unreachable("Unknown conditional branch!");
1365 case RISCV::BEQ:
1366 Cond[0].setImm(RISCV::BNE);
1367 break;
1368 case RISCV::BNE:
1369 Cond[0].setImm(RISCV::BEQ);
1370 break;
1371 case RISCV::BLT:
1372 Cond[0].setImm(RISCV::BGE);
1373 break;
1374 case RISCV::BGE:
1375 Cond[0].setImm(RISCV::BLT);
1376 break;
1377 case RISCV::BLTU:
1378 Cond[0].setImm(RISCV::BGEU);
1379 break;
1380 case RISCV::BGEU:
1381 Cond[0].setImm(RISCV::BLTU);
1382 break;
1383 case RISCV::CV_BEQIMM:
1384 Cond[0].setImm(RISCV::CV_BNEIMM);
1385 break;
1386 case RISCV::CV_BNEIMM:
1387 Cond[0].setImm(RISCV::CV_BEQIMM);
1388 break;
1389 case RISCV::QC_BEQI:
1390 Cond[0].setImm(RISCV::QC_BNEI);
1391 break;
1392 case RISCV::QC_BNEI:
1393 Cond[0].setImm(RISCV::QC_BEQI);
1394 break;
1395 case RISCV::QC_BGEI:
1396 Cond[0].setImm(RISCV::QC_BLTI);
1397 break;
1398 case RISCV::QC_BLTI:
1399 Cond[0].setImm(RISCV::QC_BGEI);
1400 break;
1401 case RISCV::QC_BGEUI:
1402 Cond[0].setImm(RISCV::QC_BLTUI);
1403 break;
1404 case RISCV::QC_BLTUI:
1405 Cond[0].setImm(RISCV::QC_BGEUI);
1406 break;
1407 case RISCV::QC_E_BEQI:
1408 Cond[0].setImm(RISCV::QC_E_BNEI);
1409 break;
1410 case RISCV::QC_E_BNEI:
1411 Cond[0].setImm(RISCV::QC_E_BEQI);
1412 break;
1413 case RISCV::QC_E_BGEI:
1414 Cond[0].setImm(RISCV::QC_E_BLTI);
1415 break;
1416 case RISCV::QC_E_BLTI:
1417 Cond[0].setImm(RISCV::QC_E_BGEI);
1418 break;
1419 case RISCV::QC_E_BGEUI:
1420 Cond[0].setImm(RISCV::QC_E_BLTUI);
1421 break;
1422 case RISCV::QC_E_BLTUI:
1423 Cond[0].setImm(RISCV::QC_E_BGEUI);
1424 break;
1425 case RISCV::NDS_BBC:
1426 Cond[0].setImm(RISCV::NDS_BBS);
1427 break;
1428 case RISCV::NDS_BBS:
1429 Cond[0].setImm(RISCV::NDS_BBC);
1430 break;
1431 case RISCV::NDS_BEQC:
1432 Cond[0].setImm(RISCV::NDS_BNEC);
1433 break;
1434 case RISCV::NDS_BNEC:
1435 Cond[0].setImm(RISCV::NDS_BEQC);
1436 break;
1437 }
1438
1439 return false;
1440}
1441
1442// Return true if the instruction is a load immediate instruction (i.e.
1443// ADDI x0, imm).
1444static bool isLoadImm(const MachineInstr *MI, int64_t &Imm) {
1445 if (MI->getOpcode() == RISCV::ADDI && MI->getOperand(i: 1).isReg() &&
1446 MI->getOperand(i: 1).getReg() == RISCV::X0) {
1447 Imm = MI->getOperand(i: 2).getImm();
1448 return true;
1449 }
1450 return false;
1451}
1452
1453bool RISCVInstrInfo::isFromLoadImm(const MachineRegisterInfo &MRI,
1454 const MachineOperand &Op, int64_t &Imm) {
1455 // Either a load from immediate instruction or X0.
1456 if (!Op.isReg())
1457 return false;
1458
1459 Register Reg = Op.getReg();
1460 if (Reg == RISCV::X0) {
1461 Imm = 0;
1462 return true;
1463 }
1464 return Reg.isVirtual() && isLoadImm(MI: MRI.getVRegDef(Reg), Imm);
1465}
1466
1467bool RISCVInstrInfo::optimizeCondBranch(MachineInstr &MI) const {
1468 bool IsSigned = false;
1469 bool IsEquality = false;
1470 switch (MI.getOpcode()) {
1471 default:
1472 return false;
1473 case RISCV::BEQ:
1474 case RISCV::BNE:
1475 IsEquality = true;
1476 break;
1477 case RISCV::BGE:
1478 case RISCV::BLT:
1479 IsSigned = true;
1480 break;
1481 case RISCV::BGEU:
1482 case RISCV::BLTU:
1483 break;
1484 }
1485
1486 MachineBasicBlock *MBB = MI.getParent();
1487 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
1488
1489 const MachineOperand &LHS = MI.getOperand(i: 0);
1490 const MachineOperand &RHS = MI.getOperand(i: 1);
1491 MachineBasicBlock *TBB = MI.getOperand(i: 2).getMBB();
1492
1493 RISCVCC::CondCode CC = getCondFromBranchOpc(Opc: MI.getOpcode());
1494 assert(CC != RISCVCC::COND_INVALID);
1495
1496 // Canonicalize conditional branches which can be constant folded into
1497 // beqz or bnez. We can't modify the CFG here.
1498 int64_t C0, C1;
1499 if (isFromLoadImm(MRI, Op: LHS, Imm&: C0) && isFromLoadImm(MRI, Op: RHS, Imm&: C1)) {
1500 unsigned NewOpc = evaluateCondBranch(CC, C0, C1) ? RISCV::BEQ : RISCV::BNE;
1501 // Build the new branch and remove the old one.
1502 BuildMI(BB&: *MBB, I&: MI, MIMD: MI.getDebugLoc(), MCID: get(Opcode: NewOpc))
1503 .addReg(RegNo: RISCV::X0)
1504 .addReg(RegNo: RISCV::X0)
1505 .addMBB(MBB: TBB);
1506 MI.eraseFromParent();
1507 return true;
1508 }
1509
1510 if (IsEquality)
1511 return false;
1512
1513 // For two constants C0 and C1 from
1514 // ```
1515 // li Y, C0
1516 // li Z, C1
1517 // ```
1518 // 1. if C1 = C0 + 1
1519 // we can turn:
1520 // (a) blt Y, X -> bge X, Z
1521 // (b) bge Y, X -> blt X, Z
1522 //
1523 // 2. if C1 = C0 - 1
1524 // we can turn:
1525 // (a) blt X, Y -> bge Z, X
1526 // (b) bge X, Y -> blt Z, X
1527 //
1528 // To make sure this optimization is really beneficial, we only
1529 // optimize for cases where Y had only one use (i.e. only used by the branch).
1530 // Try to find the register for constant Z; return
1531 // invalid register otherwise.
1532 auto searchConst = [&](int64_t C1) -> Register {
1533 MachineBasicBlock::reverse_iterator II(&MI), E = MBB->rend();
1534 auto DefC1 = std::find_if(first: ++II, last: E, pred: [&](const MachineInstr &I) -> bool {
1535 int64_t Imm;
1536 return isLoadImm(MI: &I, Imm) && Imm == C1 &&
1537 I.getOperand(i: 0).getReg().isVirtual();
1538 });
1539 if (DefC1 != E)
1540 return DefC1->getOperand(i: 0).getReg();
1541
1542 return Register();
1543 };
1544
1545 unsigned NewOpc = RISCVCC::getBrCond(CC: getOppositeBranchCondition(CC));
1546
1547 // Might be case 1.
1548 // Don't change 0 to 1 since we can use x0.
1549 // For unsigned cases changing -1U to 0 would be incorrect.
1550 // The incorrect case for signed would be INT_MAX, but isFromLoadImm can't
1551 // return that.
1552 if (isFromLoadImm(MRI, Op: LHS, Imm&: C0) && C0 != 0 && LHS.getReg().isVirtual() &&
1553 MRI.hasOneUse(RegNo: LHS.getReg()) && (IsSigned || C0 != -1)) {
1554 assert(isInt<12>(C0) && "Unexpected immediate");
1555 if (Register RegZ = searchConst(C0 + 1)) {
1556 BuildMI(BB&: *MBB, I&: MI, MIMD: MI.getDebugLoc(), MCID: get(Opcode: NewOpc))
1557 .add(MO: RHS)
1558 .addReg(RegNo: RegZ)
1559 .addMBB(MBB: TBB);
1560 // We might extend the live range of Z, clear its kill flag to
1561 // account for this.
1562 MRI.clearKillFlags(Reg: RegZ);
1563 MI.eraseFromParent();
1564 return true;
1565 }
1566 }
1567
1568 // Might be case 2.
1569 // For signed cases we don't want to change 0 since we can use x0.
1570 // For unsigned cases changing 0 to -1U would be incorrect.
1571 // The incorrect case for signed would be INT_MIN, but isFromLoadImm can't
1572 // return that.
1573 if (isFromLoadImm(MRI, Op: RHS, Imm&: C0) && C0 != 0 && RHS.getReg().isVirtual() &&
1574 MRI.hasOneUse(RegNo: RHS.getReg())) {
1575 assert(isInt<12>(C0) && "Unexpected immediate");
1576 if (Register RegZ = searchConst(C0 - 1)) {
1577 BuildMI(BB&: *MBB, I&: MI, MIMD: MI.getDebugLoc(), MCID: get(Opcode: NewOpc))
1578 .addReg(RegNo: RegZ)
1579 .add(MO: LHS)
1580 .addMBB(MBB: TBB);
1581 // We might extend the live range of Z, clear its kill flag to
1582 // account for this.
1583 MRI.clearKillFlags(Reg: RegZ);
1584 MI.eraseFromParent();
1585 return true;
1586 }
1587 }
1588
1589 return false;
1590}
1591
1592MachineBasicBlock *
1593RISCVInstrInfo::getBranchDestBlock(const MachineInstr &MI) const {
1594 assert(MI.getDesc().isBranch() && "Unexpected opcode!");
1595 // The branch target is always the last operand.
1596 int NumOp = MI.getNumExplicitOperands();
1597 return MI.getOperand(i: NumOp - 1).getMBB();
1598}
1599
1600bool RISCVInstrInfo::isBranchOffsetInRange(unsigned BranchOp,
1601 int64_t BrOffset) const {
1602 unsigned XLen = STI.getXLen();
1603 // Ideally we could determine the supported branch offset from the
1604 // RISCVII::FormMask, but this can't be used for Pseudo instructions like
1605 // PseudoBR.
1606 switch (BranchOp) {
1607 default:
1608 llvm_unreachable("Unexpected opcode!");
1609 case RISCV::NDS_BBC:
1610 case RISCV::NDS_BBS:
1611 case RISCV::NDS_BEQC:
1612 case RISCV::NDS_BNEC:
1613 return isInt<11>(x: BrOffset);
1614 case RISCV::BEQ:
1615 case RISCV::BNE:
1616 case RISCV::BLT:
1617 case RISCV::BGE:
1618 case RISCV::BLTU:
1619 case RISCV::BGEU:
1620 case RISCV::CV_BEQIMM:
1621 case RISCV::CV_BNEIMM:
1622 case RISCV::QC_BEQI:
1623 case RISCV::QC_BNEI:
1624 case RISCV::QC_BGEI:
1625 case RISCV::QC_BLTI:
1626 case RISCV::QC_BLTUI:
1627 case RISCV::QC_BGEUI:
1628 case RISCV::QC_E_BEQI:
1629 case RISCV::QC_E_BNEI:
1630 case RISCV::QC_E_BGEI:
1631 case RISCV::QC_E_BLTI:
1632 case RISCV::QC_E_BLTUI:
1633 case RISCV::QC_E_BGEUI:
1634 return isInt<13>(x: BrOffset);
1635 case RISCV::JAL:
1636 case RISCV::PseudoBR:
1637 return isInt<21>(x: BrOffset);
1638 case RISCV::PseudoJump:
1639 return isInt<32>(x: SignExtend64(X: BrOffset + 0x800, B: XLen));
1640 }
1641}
1642
1643// If the operation has a predicated pseudo instruction, return the pseudo
1644// instruction opcode. Otherwise, return RISCV::INSTRUCTION_LIST_END.
1645// TODO: Support more operations.
1646unsigned getPredicatedOpcode(unsigned Opcode) {
1647 switch (Opcode) {
1648 case RISCV::ADD: return RISCV::PseudoCCADD; break;
1649 case RISCV::SUB: return RISCV::PseudoCCSUB; break;
1650 case RISCV::SLL: return RISCV::PseudoCCSLL; break;
1651 case RISCV::SRL: return RISCV::PseudoCCSRL; break;
1652 case RISCV::SRA: return RISCV::PseudoCCSRA; break;
1653 case RISCV::AND: return RISCV::PseudoCCAND; break;
1654 case RISCV::OR: return RISCV::PseudoCCOR; break;
1655 case RISCV::XOR: return RISCV::PseudoCCXOR; break;
1656
1657 case RISCV::ADDI: return RISCV::PseudoCCADDI; break;
1658 case RISCV::SLLI: return RISCV::PseudoCCSLLI; break;
1659 case RISCV::SRLI: return RISCV::PseudoCCSRLI; break;
1660 case RISCV::SRAI: return RISCV::PseudoCCSRAI; break;
1661 case RISCV::ANDI: return RISCV::PseudoCCANDI; break;
1662 case RISCV::ORI: return RISCV::PseudoCCORI; break;
1663 case RISCV::XORI: return RISCV::PseudoCCXORI; break;
1664
1665 case RISCV::ADDW: return RISCV::PseudoCCADDW; break;
1666 case RISCV::SUBW: return RISCV::PseudoCCSUBW; break;
1667 case RISCV::SLLW: return RISCV::PseudoCCSLLW; break;
1668 case RISCV::SRLW: return RISCV::PseudoCCSRLW; break;
1669 case RISCV::SRAW: return RISCV::PseudoCCSRAW; break;
1670
1671 case RISCV::ADDIW: return RISCV::PseudoCCADDIW; break;
1672 case RISCV::SLLIW: return RISCV::PseudoCCSLLIW; break;
1673 case RISCV::SRLIW: return RISCV::PseudoCCSRLIW; break;
1674 case RISCV::SRAIW: return RISCV::PseudoCCSRAIW; break;
1675
1676 case RISCV::ANDN: return RISCV::PseudoCCANDN; break;
1677 case RISCV::ORN: return RISCV::PseudoCCORN; break;
1678 case RISCV::XNOR: return RISCV::PseudoCCXNOR; break;
1679
1680 case RISCV::NDS_BFOS: return RISCV::PseudoCCNDS_BFOS; break;
1681 case RISCV::NDS_BFOZ: return RISCV::PseudoCCNDS_BFOZ; break;
1682 }
1683
1684 return RISCV::INSTRUCTION_LIST_END;
1685}
1686
1687/// Identify instructions that can be folded into a CCMOV instruction, and
1688/// return the defining instruction.
1689static MachineInstr *canFoldAsPredicatedOp(Register Reg,
1690 const MachineRegisterInfo &MRI,
1691 const TargetInstrInfo *TII) {
1692 if (!Reg.isVirtual())
1693 return nullptr;
1694 if (!MRI.hasOneNonDBGUse(RegNo: Reg))
1695 return nullptr;
1696 MachineInstr *MI = MRI.getVRegDef(Reg);
1697 if (!MI)
1698 return nullptr;
1699 // Check if MI can be predicated and folded into the CCMOV.
1700 if (getPredicatedOpcode(Opcode: MI->getOpcode()) == RISCV::INSTRUCTION_LIST_END)
1701 return nullptr;
1702 // Don't predicate li idiom.
1703 if (MI->getOpcode() == RISCV::ADDI && MI->getOperand(i: 1).isReg() &&
1704 MI->getOperand(i: 1).getReg() == RISCV::X0)
1705 return nullptr;
1706 // Check if MI has any other defs or physreg uses.
1707 for (const MachineOperand &MO : llvm::drop_begin(RangeOrContainer: MI->operands())) {
1708 // Reject frame index operands, PEI can't handle the predicated pseudos.
1709 if (MO.isFI() || MO.isCPI() || MO.isJTI())
1710 return nullptr;
1711 if (!MO.isReg())
1712 continue;
1713 // MI can't have any tied operands, that would conflict with predication.
1714 if (MO.isTied())
1715 return nullptr;
1716 if (MO.isDef())
1717 return nullptr;
1718 // Allow constant physregs.
1719 if (MO.getReg().isPhysical() && !MRI.isConstantPhysReg(PhysReg: MO.getReg()))
1720 return nullptr;
1721 }
1722 bool DontMoveAcrossStores = true;
1723 if (!MI->isSafeToMove(SawStore&: DontMoveAcrossStores))
1724 return nullptr;
1725 return MI;
1726}
1727
1728bool RISCVInstrInfo::analyzeSelect(const MachineInstr &MI,
1729 SmallVectorImpl<MachineOperand> &Cond,
1730 unsigned &TrueOp, unsigned &FalseOp,
1731 bool &Optimizable) const {
1732 assert(MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1733 "Unknown select instruction");
1734 // CCMOV operands:
1735 // 0: Def.
1736 // 1: LHS of compare.
1737 // 2: RHS of compare.
1738 // 3: Condition code.
1739 // 4: False use.
1740 // 5: True use.
1741 TrueOp = 5;
1742 FalseOp = 4;
1743 Cond.push_back(Elt: MI.getOperand(i: 1));
1744 Cond.push_back(Elt: MI.getOperand(i: 2));
1745 Cond.push_back(Elt: MI.getOperand(i: 3));
1746 // We can only fold when we support short forward branch opt.
1747 Optimizable = STI.hasShortForwardBranchOpt();
1748 return false;
1749}
1750
1751MachineInstr *
1752RISCVInstrInfo::optimizeSelect(MachineInstr &MI,
1753 SmallPtrSetImpl<MachineInstr *> &SeenMIs,
1754 bool PreferFalse) const {
1755 assert(MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1756 "Unknown select instruction");
1757 if (!STI.hasShortForwardBranchOpt())
1758 return nullptr;
1759
1760 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
1761 MachineInstr *DefMI =
1762 canFoldAsPredicatedOp(Reg: MI.getOperand(i: 5).getReg(), MRI, TII: this);
1763 bool Invert = !DefMI;
1764 if (!DefMI)
1765 DefMI = canFoldAsPredicatedOp(Reg: MI.getOperand(i: 4).getReg(), MRI, TII: this);
1766 if (!DefMI)
1767 return nullptr;
1768
1769 // Find new register class to use.
1770 MachineOperand FalseReg = MI.getOperand(i: Invert ? 5 : 4);
1771 Register DestReg = MI.getOperand(i: 0).getReg();
1772 const TargetRegisterClass *PreviousClass = MRI.getRegClass(Reg: FalseReg.getReg());
1773 if (!MRI.constrainRegClass(Reg: DestReg, RC: PreviousClass))
1774 return nullptr;
1775
1776 unsigned PredOpc = getPredicatedOpcode(Opcode: DefMI->getOpcode());
1777 assert(PredOpc != RISCV::INSTRUCTION_LIST_END && "Unexpected opcode!");
1778
1779 // Create a new predicated version of DefMI.
1780 MachineInstrBuilder NewMI =
1781 BuildMI(BB&: *MI.getParent(), I&: MI, MIMD: MI.getDebugLoc(), MCID: get(Opcode: PredOpc), DestReg);
1782
1783 // Copy the condition portion.
1784 NewMI.add(MO: MI.getOperand(i: 1));
1785 NewMI.add(MO: MI.getOperand(i: 2));
1786
1787 // Add condition code, inverting if necessary.
1788 auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(i: 3).getImm());
1789 if (Invert)
1790 CC = RISCVCC::getOppositeBranchCondition(CC);
1791 NewMI.addImm(Val: CC);
1792
1793 // Copy the false register.
1794 NewMI.add(MO: FalseReg);
1795
1796 // Copy all the DefMI operands.
1797 const MCInstrDesc &DefDesc = DefMI->getDesc();
1798 for (unsigned i = 1, e = DefDesc.getNumOperands(); i != e; ++i)
1799 NewMI.add(MO: DefMI->getOperand(i));
1800
1801 // Update SeenMIs set: register newly created MI and erase removed DefMI.
1802 SeenMIs.insert(Ptr: NewMI);
1803 SeenMIs.erase(Ptr: DefMI);
1804
1805 // If MI is inside a loop, and DefMI is outside the loop, then kill flags on
1806 // DefMI would be invalid when transferred inside the loop. Checking for a
1807 // loop is expensive, but at least remove kill flags if they are in different
1808 // BBs.
1809 if (DefMI->getParent() != MI.getParent())
1810 NewMI->clearKillInfo();
1811
1812 // The caller will erase MI, but not DefMI.
1813 DefMI->eraseFromParent();
1814 return NewMI;
1815}
1816
1817unsigned RISCVInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
1818 if (MI.isMetaInstruction())
1819 return 0;
1820
1821 unsigned Opcode = MI.getOpcode();
1822
1823 if (Opcode == TargetOpcode::INLINEASM ||
1824 Opcode == TargetOpcode::INLINEASM_BR) {
1825 const MachineFunction &MF = *MI.getParent()->getParent();
1826 return getInlineAsmLength(Str: MI.getOperand(i: 0).getSymbolName(),
1827 MAI: *MF.getTarget().getMCAsmInfo());
1828 }
1829
1830 if (!MI.memoperands_empty()) {
1831 MachineMemOperand *MMO = *(MI.memoperands_begin());
1832 if (STI.hasStdExtZihintntl() && MMO->isNonTemporal()) {
1833 if (STI.hasStdExtZca()) {
1834 if (isCompressibleInst(MI, STI))
1835 return 4; // c.ntl.all + c.load/c.store
1836 return 6; // c.ntl.all + load/store
1837 }
1838 return 8; // ntl.all + load/store
1839 }
1840 }
1841
1842 if (Opcode == TargetOpcode::BUNDLE)
1843 return getInstBundleLength(MI);
1844
1845 if (MI.getParent() && MI.getParent()->getParent()) {
1846 if (isCompressibleInst(MI, STI))
1847 return 2;
1848 }
1849
1850 switch (Opcode) {
1851 case RISCV::PseudoMV_FPR16INX:
1852 case RISCV::PseudoMV_FPR32INX:
1853 // MV is always compressible to either c.mv or c.li rd, 0.
1854 return STI.hasStdExtZca() ? 2 : 4;
1855 case TargetOpcode::STACKMAP:
1856 // The upper bound for a stackmap intrinsic is the full length of its shadow
1857 return StackMapOpers(&MI).getNumPatchBytes();
1858 case TargetOpcode::PATCHPOINT:
1859 // The size of the patchpoint intrinsic is the number of bytes requested
1860 return PatchPointOpers(&MI).getNumPatchBytes();
1861 case TargetOpcode::STATEPOINT: {
1862 // The size of the statepoint intrinsic is the number of bytes requested
1863 unsigned NumBytes = StatepointOpers(&MI).getNumPatchBytes();
1864 // No patch bytes means at most a PseudoCall is emitted
1865 return std::max(a: NumBytes, b: 8U);
1866 }
1867 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
1868 case TargetOpcode::PATCHABLE_FUNCTION_EXIT:
1869 case TargetOpcode::PATCHABLE_TAIL_CALL: {
1870 const MachineFunction &MF = *MI.getParent()->getParent();
1871 const Function &F = MF.getFunction();
1872 if (Opcode == TargetOpcode::PATCHABLE_FUNCTION_ENTER &&
1873 F.hasFnAttribute(Kind: "patchable-function-entry")) {
1874 unsigned Num;
1875 if (F.getFnAttribute(Kind: "patchable-function-entry")
1876 .getValueAsString()
1877 .getAsInteger(Radix: 10, Result&: Num))
1878 return get(Opcode).getSize();
1879
1880 // Number of C.NOP or NOP
1881 return (STI.hasStdExtZca() ? 2 : 4) * Num;
1882 }
1883 // XRay uses C.JAL + 21 or 33 C.NOP for each sled in RV32 and RV64,
1884 // respectively.
1885 return STI.is64Bit() ? 68 : 44;
1886 }
1887 default:
1888 return get(Opcode).getSize();
1889 }
1890}
1891
1892unsigned RISCVInstrInfo::getInstBundleLength(const MachineInstr &MI) const {
1893 unsigned Size = 0;
1894 MachineBasicBlock::const_instr_iterator I = MI.getIterator();
1895 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end();
1896 while (++I != E && I->isInsideBundle()) {
1897 assert(!I->isBundle() && "No nested bundle!");
1898 Size += getInstSizeInBytes(MI: *I);
1899 }
1900 return Size;
1901}
1902
1903bool RISCVInstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const {
1904 const unsigned Opcode = MI.getOpcode();
1905 switch (Opcode) {
1906 default:
1907 break;
1908 case RISCV::FSGNJ_D:
1909 case RISCV::FSGNJ_S:
1910 case RISCV::FSGNJ_H:
1911 case RISCV::FSGNJ_D_INX:
1912 case RISCV::FSGNJ_D_IN32X:
1913 case RISCV::FSGNJ_S_INX:
1914 case RISCV::FSGNJ_H_INX:
1915 // The canonical floating-point move is fsgnj rd, rs, rs.
1916 return MI.getOperand(i: 1).isReg() && MI.getOperand(i: 2).isReg() &&
1917 MI.getOperand(i: 1).getReg() == MI.getOperand(i: 2).getReg();
1918 case RISCV::ADDI:
1919 case RISCV::ORI:
1920 case RISCV::XORI:
1921 return (MI.getOperand(i: 1).isReg() &&
1922 MI.getOperand(i: 1).getReg() == RISCV::X0) ||
1923 (MI.getOperand(i: 2).isImm() && MI.getOperand(i: 2).getImm() == 0);
1924 }
1925 return MI.isAsCheapAsAMove();
1926}
1927
1928std::optional<DestSourcePair>
1929RISCVInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
1930 if (MI.isMoveReg())
1931 return DestSourcePair{MI.getOperand(i: 0), MI.getOperand(i: 1)};
1932 switch (MI.getOpcode()) {
1933 default:
1934 break;
1935 case RISCV::ADD:
1936 case RISCV::OR:
1937 case RISCV::XOR:
1938 if (MI.getOperand(i: 1).isReg() && MI.getOperand(i: 1).getReg() == RISCV::X0 &&
1939 MI.getOperand(i: 2).isReg())
1940 return DestSourcePair{MI.getOperand(i: 0), MI.getOperand(i: 2)};
1941 if (MI.getOperand(i: 2).isReg() && MI.getOperand(i: 2).getReg() == RISCV::X0 &&
1942 MI.getOperand(i: 1).isReg())
1943 return DestSourcePair{MI.getOperand(i: 0), MI.getOperand(i: 1)};
1944 break;
1945 case RISCV::ADDI:
1946 // Operand 1 can be a frameindex but callers expect registers
1947 if (MI.getOperand(i: 1).isReg() && MI.getOperand(i: 2).isImm() &&
1948 MI.getOperand(i: 2).getImm() == 0)
1949 return DestSourcePair{MI.getOperand(i: 0), MI.getOperand(i: 1)};
1950 break;
1951 case RISCV::SUB:
1952 if (MI.getOperand(i: 2).isReg() && MI.getOperand(i: 2).getReg() == RISCV::X0 &&
1953 MI.getOperand(i: 1).isReg())
1954 return DestSourcePair{MI.getOperand(i: 0), MI.getOperand(i: 1)};
1955 break;
1956 case RISCV::SH1ADD:
1957 case RISCV::SH1ADD_UW:
1958 case RISCV::SH2ADD:
1959 case RISCV::SH2ADD_UW:
1960 case RISCV::SH3ADD:
1961 case RISCV::SH3ADD_UW:
1962 if (MI.getOperand(i: 1).isReg() && MI.getOperand(i: 1).getReg() == RISCV::X0 &&
1963 MI.getOperand(i: 2).isReg())
1964 return DestSourcePair{MI.getOperand(i: 0), MI.getOperand(i: 2)};
1965 break;
1966 case RISCV::FSGNJ_D:
1967 case RISCV::FSGNJ_S:
1968 case RISCV::FSGNJ_H:
1969 case RISCV::FSGNJ_D_INX:
1970 case RISCV::FSGNJ_D_IN32X:
1971 case RISCV::FSGNJ_S_INX:
1972 case RISCV::FSGNJ_H_INX:
1973 // The canonical floating-point move is fsgnj rd, rs, rs.
1974 if (MI.getOperand(i: 1).isReg() && MI.getOperand(i: 2).isReg() &&
1975 MI.getOperand(i: 1).getReg() == MI.getOperand(i: 2).getReg())
1976 return DestSourcePair{MI.getOperand(i: 0), MI.getOperand(i: 1)};
1977 break;
1978 }
1979 return std::nullopt;
1980}
1981
1982MachineTraceStrategy RISCVInstrInfo::getMachineCombinerTraceStrategy() const {
1983 if (ForceMachineCombinerStrategy.getNumOccurrences() == 0) {
1984 // The option is unused. Choose Local strategy only for in-order cores. When
1985 // scheduling model is unspecified, use MinInstrCount strategy as more
1986 // generic one.
1987 const auto &SchedModel = STI.getSchedModel();
1988 return (!SchedModel.hasInstrSchedModel() || SchedModel.isOutOfOrder())
1989 ? MachineTraceStrategy::TS_MinInstrCount
1990 : MachineTraceStrategy::TS_Local;
1991 }
1992 // The strategy was forced by the option.
1993 return ForceMachineCombinerStrategy;
1994}
1995
1996void RISCVInstrInfo::finalizeInsInstrs(
1997 MachineInstr &Root, unsigned &Pattern,
1998 SmallVectorImpl<MachineInstr *> &InsInstrs) const {
1999 int16_t FrmOpIdx =
2000 RISCV::getNamedOperandIdx(Opcode: Root.getOpcode(), Name: RISCV::OpName::frm);
2001 if (FrmOpIdx < 0) {
2002 assert(all_of(InsInstrs,
2003 [](MachineInstr *MI) {
2004 return RISCV::getNamedOperandIdx(MI->getOpcode(),
2005 RISCV::OpName::frm) < 0;
2006 }) &&
2007 "New instructions require FRM whereas the old one does not have it");
2008 return;
2009 }
2010
2011 const MachineOperand &FRM = Root.getOperand(i: FrmOpIdx);
2012 MachineFunction &MF = *Root.getMF();
2013
2014 for (auto *NewMI : InsInstrs) {
2015 // We'd already added the FRM operand.
2016 if (static_cast<unsigned>(RISCV::getNamedOperandIdx(
2017 Opcode: NewMI->getOpcode(), Name: RISCV::OpName::frm)) != NewMI->getNumOperands())
2018 continue;
2019 MachineInstrBuilder MIB(MF, NewMI);
2020 MIB.add(MO: FRM);
2021 if (FRM.getImm() == RISCVFPRndMode::DYN)
2022 MIB.addUse(RegNo: RISCV::FRM, Flags: RegState::Implicit);
2023 }
2024}
2025
2026static bool isFADD(unsigned Opc) {
2027 switch (Opc) {
2028 default:
2029 return false;
2030 case RISCV::FADD_H:
2031 case RISCV::FADD_S:
2032 case RISCV::FADD_D:
2033 return true;
2034 }
2035}
2036
2037static bool isFSUB(unsigned Opc) {
2038 switch (Opc) {
2039 default:
2040 return false;
2041 case RISCV::FSUB_H:
2042 case RISCV::FSUB_S:
2043 case RISCV::FSUB_D:
2044 return true;
2045 }
2046}
2047
2048static bool isFMUL(unsigned Opc) {
2049 switch (Opc) {
2050 default:
2051 return false;
2052 case RISCV::FMUL_H:
2053 case RISCV::FMUL_S:
2054 case RISCV::FMUL_D:
2055 return true;
2056 }
2057}
2058
2059bool RISCVInstrInfo::isVectorAssociativeAndCommutative(const MachineInstr &Inst,
2060 bool Invert) const {
2061#define OPCODE_LMUL_CASE(OPC) \
2062 case RISCV::OPC##_M1: \
2063 case RISCV::OPC##_M2: \
2064 case RISCV::OPC##_M4: \
2065 case RISCV::OPC##_M8: \
2066 case RISCV::OPC##_MF2: \
2067 case RISCV::OPC##_MF4: \
2068 case RISCV::OPC##_MF8
2069
2070#define OPCODE_LMUL_MASK_CASE(OPC) \
2071 case RISCV::OPC##_M1_MASK: \
2072 case RISCV::OPC##_M2_MASK: \
2073 case RISCV::OPC##_M4_MASK: \
2074 case RISCV::OPC##_M8_MASK: \
2075 case RISCV::OPC##_MF2_MASK: \
2076 case RISCV::OPC##_MF4_MASK: \
2077 case RISCV::OPC##_MF8_MASK
2078
2079 unsigned Opcode = Inst.getOpcode();
2080 if (Invert) {
2081 if (auto InvOpcode = getInverseOpcode(Opcode))
2082 Opcode = *InvOpcode;
2083 else
2084 return false;
2085 }
2086
2087 // clang-format off
2088 switch (Opcode) {
2089 default:
2090 return false;
2091 OPCODE_LMUL_CASE(PseudoVADD_VV):
2092 OPCODE_LMUL_MASK_CASE(PseudoVADD_VV):
2093 OPCODE_LMUL_CASE(PseudoVMUL_VV):
2094 OPCODE_LMUL_MASK_CASE(PseudoVMUL_VV):
2095 return true;
2096 }
2097 // clang-format on
2098
2099#undef OPCODE_LMUL_MASK_CASE
2100#undef OPCODE_LMUL_CASE
2101}
2102
2103bool RISCVInstrInfo::areRVVInstsReassociable(const MachineInstr &Root,
2104 const MachineInstr &Prev) const {
2105 if (!areOpcodesEqualOrInverse(Opcode1: Root.getOpcode(), Opcode2: Prev.getOpcode()))
2106 return false;
2107
2108 assert(Root.getMF() == Prev.getMF());
2109 const MachineRegisterInfo *MRI = &Root.getMF()->getRegInfo();
2110 const TargetRegisterInfo *TRI = MRI->getTargetRegisterInfo();
2111
2112 // Make sure vtype operands are also the same.
2113 const MCInstrDesc &Desc = get(Opcode: Root.getOpcode());
2114 const uint64_t TSFlags = Desc.TSFlags;
2115
2116 auto checkImmOperand = [&](unsigned OpIdx) {
2117 return Root.getOperand(i: OpIdx).getImm() == Prev.getOperand(i: OpIdx).getImm();
2118 };
2119
2120 auto checkRegOperand = [&](unsigned OpIdx) {
2121 return Root.getOperand(i: OpIdx).getReg() == Prev.getOperand(i: OpIdx).getReg();
2122 };
2123
2124 // PassThru
2125 // TODO: Potentially we can loosen the condition to consider Root to be
2126 // associable with Prev if Root has NoReg as passthru. In which case we
2127 // also need to loosen the condition on vector policies between these.
2128 if (!checkRegOperand(1))
2129 return false;
2130
2131 // SEW
2132 if (RISCVII::hasSEWOp(TSFlags) &&
2133 !checkImmOperand(RISCVII::getSEWOpNum(Desc)))
2134 return false;
2135
2136 // Mask
2137 if (RISCVII::usesMaskPolicy(TSFlags)) {
2138 const MachineBasicBlock *MBB = Root.getParent();
2139 const MachineBasicBlock::const_reverse_iterator It1(&Root);
2140 const MachineBasicBlock::const_reverse_iterator It2(&Prev);
2141 Register MI1VReg;
2142
2143 bool SeenMI2 = false;
2144 for (auto End = MBB->rend(), It = It1; It != End; ++It) {
2145 if (It == It2) {
2146 SeenMI2 = true;
2147 if (!MI1VReg.isValid())
2148 // There is no V0 def between Root and Prev; they're sharing the
2149 // same V0.
2150 break;
2151 }
2152
2153 if (It->modifiesRegister(Reg: RISCV::V0, TRI)) {
2154 Register SrcReg = It->getOperand(i: 1).getReg();
2155 // If it's not VReg it'll be more difficult to track its defs, so
2156 // bailing out here just to be safe.
2157 if (!SrcReg.isVirtual())
2158 return false;
2159
2160 if (!MI1VReg.isValid()) {
2161 // This is the V0 def for Root.
2162 MI1VReg = SrcReg;
2163 continue;
2164 }
2165
2166 // Some random mask updates.
2167 if (!SeenMI2)
2168 continue;
2169
2170 // This is the V0 def for Prev; check if it's the same as that of
2171 // Root.
2172 if (MI1VReg != SrcReg)
2173 return false;
2174 else
2175 break;
2176 }
2177 }
2178
2179 // If we haven't encountered Prev, it's likely that this function was
2180 // called in a wrong way (e.g. Root is before Prev).
2181 assert(SeenMI2 && "Prev is expected to appear before Root");
2182 }
2183
2184 // Tail / Mask policies
2185 if (RISCVII::hasVecPolicyOp(TSFlags) &&
2186 !checkImmOperand(RISCVII::getVecPolicyOpNum(Desc)))
2187 return false;
2188
2189 // VL
2190 if (RISCVII::hasVLOp(TSFlags)) {
2191 unsigned OpIdx = RISCVII::getVLOpNum(Desc);
2192 const MachineOperand &Op1 = Root.getOperand(i: OpIdx);
2193 const MachineOperand &Op2 = Prev.getOperand(i: OpIdx);
2194 if (Op1.getType() != Op2.getType())
2195 return false;
2196 switch (Op1.getType()) {
2197 case MachineOperand::MO_Register:
2198 if (Op1.getReg() != Op2.getReg())
2199 return false;
2200 break;
2201 case MachineOperand::MO_Immediate:
2202 if (Op1.getImm() != Op2.getImm())
2203 return false;
2204 break;
2205 default:
2206 llvm_unreachable("Unrecognized VL operand type");
2207 }
2208 }
2209
2210 // Rounding modes
2211 if (RISCVII::hasRoundModeOp(TSFlags) &&
2212 !checkImmOperand(RISCVII::getVLOpNum(Desc) - 1))
2213 return false;
2214
2215 return true;
2216}
2217
2218// Most of our RVV pseudos have passthru operand, so the real operands
2219// start from index = 2.
2220bool RISCVInstrInfo::hasReassociableVectorSibling(const MachineInstr &Inst,
2221 bool &Commuted) const {
2222 const MachineBasicBlock *MBB = Inst.getParent();
2223 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
2224 assert(RISCVII::isFirstDefTiedToFirstUse(get(Inst.getOpcode())) &&
2225 "Expect the present of passthrough operand.");
2226 MachineInstr *MI1 = MRI.getUniqueVRegDef(Reg: Inst.getOperand(i: 2).getReg());
2227 MachineInstr *MI2 = MRI.getUniqueVRegDef(Reg: Inst.getOperand(i: 3).getReg());
2228
2229 // If only one operand has the same or inverse opcode and it's the second
2230 // source operand, the operands must be commuted.
2231 Commuted = !areRVVInstsReassociable(Root: Inst, Prev: *MI1) &&
2232 areRVVInstsReassociable(Root: Inst, Prev: *MI2);
2233 if (Commuted)
2234 std::swap(a&: MI1, b&: MI2);
2235
2236 return areRVVInstsReassociable(Root: Inst, Prev: *MI1) &&
2237 (isVectorAssociativeAndCommutative(Inst: *MI1) ||
2238 isVectorAssociativeAndCommutative(Inst: *MI1, /* Invert */ true)) &&
2239 hasReassociableOperands(Inst: *MI1, MBB) &&
2240 MRI.hasOneNonDBGUse(RegNo: MI1->getOperand(i: 0).getReg());
2241}
2242
2243bool RISCVInstrInfo::hasReassociableOperands(
2244 const MachineInstr &Inst, const MachineBasicBlock *MBB) const {
2245 if (!isVectorAssociativeAndCommutative(Inst) &&
2246 !isVectorAssociativeAndCommutative(Inst, /*Invert=*/true))
2247 return TargetInstrInfo::hasReassociableOperands(Inst, MBB);
2248
2249 const MachineOperand &Op1 = Inst.getOperand(i: 2);
2250 const MachineOperand &Op2 = Inst.getOperand(i: 3);
2251 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
2252
2253 // We need virtual register definitions for the operands that we will
2254 // reassociate.
2255 MachineInstr *MI1 = nullptr;
2256 MachineInstr *MI2 = nullptr;
2257 if (Op1.isReg() && Op1.getReg().isVirtual())
2258 MI1 = MRI.getUniqueVRegDef(Reg: Op1.getReg());
2259 if (Op2.isReg() && Op2.getReg().isVirtual())
2260 MI2 = MRI.getUniqueVRegDef(Reg: Op2.getReg());
2261
2262 // And at least one operand must be defined in MBB.
2263 return MI1 && MI2 && (MI1->getParent() == MBB || MI2->getParent() == MBB);
2264}
2265
2266void RISCVInstrInfo::getReassociateOperandIndices(
2267 const MachineInstr &Root, unsigned Pattern,
2268 std::array<unsigned, 5> &OperandIndices) const {
2269 TargetInstrInfo::getReassociateOperandIndices(Root, Pattern, OperandIndices);
2270 if (RISCV::getRVVMCOpcode(RVVPseudoOpcode: Root.getOpcode())) {
2271 // Skip the passthrough operand, so increment all indices by one.
2272 for (unsigned I = 0; I < 5; ++I)
2273 ++OperandIndices[I];
2274 }
2275}
2276
2277bool RISCVInstrInfo::hasReassociableSibling(const MachineInstr &Inst,
2278 bool &Commuted) const {
2279 if (isVectorAssociativeAndCommutative(Inst) ||
2280 isVectorAssociativeAndCommutative(Inst, /*Invert=*/true))
2281 return hasReassociableVectorSibling(Inst, Commuted);
2282
2283 if (!TargetInstrInfo::hasReassociableSibling(Inst, Commuted))
2284 return false;
2285
2286 const MachineRegisterInfo &MRI = Inst.getMF()->getRegInfo();
2287 unsigned OperandIdx = Commuted ? 2 : 1;
2288 const MachineInstr &Sibling =
2289 *MRI.getVRegDef(Reg: Inst.getOperand(i: OperandIdx).getReg());
2290
2291 int16_t InstFrmOpIdx =
2292 RISCV::getNamedOperandIdx(Opcode: Inst.getOpcode(), Name: RISCV::OpName::frm);
2293 int16_t SiblingFrmOpIdx =
2294 RISCV::getNamedOperandIdx(Opcode: Sibling.getOpcode(), Name: RISCV::OpName::frm);
2295
2296 return (InstFrmOpIdx < 0 && SiblingFrmOpIdx < 0) ||
2297 RISCV::hasEqualFRM(MI1: Inst, MI2: Sibling);
2298}
2299
2300bool RISCVInstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst,
2301 bool Invert) const {
2302 if (isVectorAssociativeAndCommutative(Inst, Invert))
2303 return true;
2304
2305 unsigned Opc = Inst.getOpcode();
2306 if (Invert) {
2307 auto InverseOpcode = getInverseOpcode(Opcode: Opc);
2308 if (!InverseOpcode)
2309 return false;
2310 Opc = *InverseOpcode;
2311 }
2312
2313 if (isFADD(Opc) || isFMUL(Opc))
2314 return Inst.getFlag(Flag: MachineInstr::MIFlag::FmReassoc) &&
2315 Inst.getFlag(Flag: MachineInstr::MIFlag::FmNsz);
2316
2317 switch (Opc) {
2318 default:
2319 return false;
2320 case RISCV::ADD:
2321 case RISCV::ADDW:
2322 case RISCV::AND:
2323 case RISCV::OR:
2324 case RISCV::XOR:
2325 // From RISC-V ISA spec, if both the high and low bits of the same product
2326 // are required, then the recommended code sequence is:
2327 //
2328 // MULH[[S]U] rdh, rs1, rs2
2329 // MUL rdl, rs1, rs2
2330 // (source register specifiers must be in same order and rdh cannot be the
2331 // same as rs1 or rs2)
2332 //
2333 // Microarchitectures can then fuse these into a single multiply operation
2334 // instead of performing two separate multiplies.
2335 // MachineCombiner may reassociate MUL operands and lose the fusion
2336 // opportunity.
2337 case RISCV::MUL:
2338 case RISCV::MULW:
2339 case RISCV::MIN:
2340 case RISCV::MINU:
2341 case RISCV::MAX:
2342 case RISCV::MAXU:
2343 case RISCV::FMIN_H:
2344 case RISCV::FMIN_S:
2345 case RISCV::FMIN_D:
2346 case RISCV::FMAX_H:
2347 case RISCV::FMAX_S:
2348 case RISCV::FMAX_D:
2349 return true;
2350 }
2351
2352 return false;
2353}
2354
2355std::optional<unsigned>
2356RISCVInstrInfo::getInverseOpcode(unsigned Opcode) const {
2357#define RVV_OPC_LMUL_CASE(OPC, INV) \
2358 case RISCV::OPC##_M1: \
2359 return RISCV::INV##_M1; \
2360 case RISCV::OPC##_M2: \
2361 return RISCV::INV##_M2; \
2362 case RISCV::OPC##_M4: \
2363 return RISCV::INV##_M4; \
2364 case RISCV::OPC##_M8: \
2365 return RISCV::INV##_M8; \
2366 case RISCV::OPC##_MF2: \
2367 return RISCV::INV##_MF2; \
2368 case RISCV::OPC##_MF4: \
2369 return RISCV::INV##_MF4; \
2370 case RISCV::OPC##_MF8: \
2371 return RISCV::INV##_MF8
2372
2373#define RVV_OPC_LMUL_MASK_CASE(OPC, INV) \
2374 case RISCV::OPC##_M1_MASK: \
2375 return RISCV::INV##_M1_MASK; \
2376 case RISCV::OPC##_M2_MASK: \
2377 return RISCV::INV##_M2_MASK; \
2378 case RISCV::OPC##_M4_MASK: \
2379 return RISCV::INV##_M4_MASK; \
2380 case RISCV::OPC##_M8_MASK: \
2381 return RISCV::INV##_M8_MASK; \
2382 case RISCV::OPC##_MF2_MASK: \
2383 return RISCV::INV##_MF2_MASK; \
2384 case RISCV::OPC##_MF4_MASK: \
2385 return RISCV::INV##_MF4_MASK; \
2386 case RISCV::OPC##_MF8_MASK: \
2387 return RISCV::INV##_MF8_MASK
2388
2389 switch (Opcode) {
2390 default:
2391 return std::nullopt;
2392 case RISCV::FADD_H:
2393 return RISCV::FSUB_H;
2394 case RISCV::FADD_S:
2395 return RISCV::FSUB_S;
2396 case RISCV::FADD_D:
2397 return RISCV::FSUB_D;
2398 case RISCV::FSUB_H:
2399 return RISCV::FADD_H;
2400 case RISCV::FSUB_S:
2401 return RISCV::FADD_S;
2402 case RISCV::FSUB_D:
2403 return RISCV::FADD_D;
2404 case RISCV::ADD:
2405 return RISCV::SUB;
2406 case RISCV::SUB:
2407 return RISCV::ADD;
2408 case RISCV::ADDW:
2409 return RISCV::SUBW;
2410 case RISCV::SUBW:
2411 return RISCV::ADDW;
2412 // clang-format off
2413 RVV_OPC_LMUL_CASE(PseudoVADD_VV, PseudoVSUB_VV);
2414 RVV_OPC_LMUL_MASK_CASE(PseudoVADD_VV, PseudoVSUB_VV);
2415 RVV_OPC_LMUL_CASE(PseudoVSUB_VV, PseudoVADD_VV);
2416 RVV_OPC_LMUL_MASK_CASE(PseudoVSUB_VV, PseudoVADD_VV);
2417 // clang-format on
2418 }
2419
2420#undef RVV_OPC_LMUL_MASK_CASE
2421#undef RVV_OPC_LMUL_CASE
2422}
2423
2424static bool canCombineFPFusedMultiply(const MachineInstr &Root,
2425 const MachineOperand &MO,
2426 bool DoRegPressureReduce) {
2427 if (!MO.isReg() || !MO.getReg().isVirtual())
2428 return false;
2429 const MachineRegisterInfo &MRI = Root.getMF()->getRegInfo();
2430 MachineInstr *MI = MRI.getVRegDef(Reg: MO.getReg());
2431 if (!MI || !isFMUL(Opc: MI->getOpcode()))
2432 return false;
2433
2434 if (!Root.getFlag(Flag: MachineInstr::MIFlag::FmContract) ||
2435 !MI->getFlag(Flag: MachineInstr::MIFlag::FmContract))
2436 return false;
2437
2438 // Try combining even if fmul has more than one use as it eliminates
2439 // dependency between fadd(fsub) and fmul. However, it can extend liveranges
2440 // for fmul operands, so reject the transformation in register pressure
2441 // reduction mode.
2442 if (DoRegPressureReduce && !MRI.hasOneNonDBGUse(RegNo: MI->getOperand(i: 0).getReg()))
2443 return false;
2444
2445 // Do not combine instructions from different basic blocks.
2446 if (Root.getParent() != MI->getParent())
2447 return false;
2448 return RISCV::hasEqualFRM(MI1: Root, MI2: *MI);
2449}
2450
2451static bool getFPFusedMultiplyPatterns(MachineInstr &Root,
2452 SmallVectorImpl<unsigned> &Patterns,
2453 bool DoRegPressureReduce) {
2454 unsigned Opc = Root.getOpcode();
2455 bool IsFAdd = isFADD(Opc);
2456 if (!IsFAdd && !isFSUB(Opc))
2457 return false;
2458 bool Added = false;
2459 if (canCombineFPFusedMultiply(Root, MO: Root.getOperand(i: 1),
2460 DoRegPressureReduce)) {
2461 Patterns.push_back(Elt: IsFAdd ? RISCVMachineCombinerPattern::FMADD_AX
2462 : RISCVMachineCombinerPattern::FMSUB);
2463 Added = true;
2464 }
2465 if (canCombineFPFusedMultiply(Root, MO: Root.getOperand(i: 2),
2466 DoRegPressureReduce)) {
2467 Patterns.push_back(Elt: IsFAdd ? RISCVMachineCombinerPattern::FMADD_XA
2468 : RISCVMachineCombinerPattern::FNMSUB);
2469 Added = true;
2470 }
2471 return Added;
2472}
2473
2474static bool getFPPatterns(MachineInstr &Root,
2475 SmallVectorImpl<unsigned> &Patterns,
2476 bool DoRegPressureReduce) {
2477 return getFPFusedMultiplyPatterns(Root, Patterns, DoRegPressureReduce);
2478}
2479
2480/// Utility routine that checks if \param MO is defined by an
2481/// \param CombineOpc instruction in the basic block \param MBB
2482static const MachineInstr *canCombine(const MachineBasicBlock &MBB,
2483 const MachineOperand &MO,
2484 unsigned CombineOpc) {
2485 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
2486 const MachineInstr *MI = nullptr;
2487
2488 if (MO.isReg() && MO.getReg().isVirtual())
2489 MI = MRI.getUniqueVRegDef(Reg: MO.getReg());
2490 // And it needs to be in the trace (otherwise, it won't have a depth).
2491 if (!MI || MI->getParent() != &MBB || MI->getOpcode() != CombineOpc)
2492 return nullptr;
2493 // Must only used by the user we combine with.
2494 if (!MRI.hasOneNonDBGUse(RegNo: MI->getOperand(i: 0).getReg()))
2495 return nullptr;
2496
2497 return MI;
2498}
2499
2500/// Utility routine that checks if \param MO is defined by a SLLI in \param
2501/// MBB that can be combined by splitting across 2 SHXADD instructions. The
2502/// first SHXADD shift amount is given by \param OuterShiftAmt.
2503static bool canCombineShiftIntoShXAdd(const MachineBasicBlock &MBB,
2504 const MachineOperand &MO,
2505 unsigned OuterShiftAmt) {
2506 const MachineInstr *ShiftMI = canCombine(MBB, MO, CombineOpc: RISCV::SLLI);
2507 if (!ShiftMI)
2508 return false;
2509
2510 unsigned InnerShiftAmt = ShiftMI->getOperand(i: 2).getImm();
2511 if (InnerShiftAmt < OuterShiftAmt || (InnerShiftAmt - OuterShiftAmt) > 3)
2512 return false;
2513
2514 return true;
2515}
2516
2517// Returns the shift amount from a SHXADD instruction. Returns 0 if the
2518// instruction is not a SHXADD.
2519static unsigned getSHXADDShiftAmount(unsigned Opc) {
2520 switch (Opc) {
2521 default:
2522 return 0;
2523 case RISCV::SH1ADD:
2524 return 1;
2525 case RISCV::SH2ADD:
2526 return 2;
2527 case RISCV::SH3ADD:
2528 return 3;
2529 }
2530}
2531
2532// Returns the shift amount from a SHXADD.UW instruction. Returns 0 if the
2533// instruction is not a SHXADD.UW.
2534static unsigned getSHXADDUWShiftAmount(unsigned Opc) {
2535 switch (Opc) {
2536 default:
2537 return 0;
2538 case RISCV::SH1ADD_UW:
2539 return 1;
2540 case RISCV::SH2ADD_UW:
2541 return 2;
2542 case RISCV::SH3ADD_UW:
2543 return 3;
2544 }
2545}
2546
2547// Look for opportunities to combine (sh3add Z, (add X, (slli Y, 5))) into
2548// (sh3add (sh2add Y, Z), X).
2549static bool getSHXADDPatterns(const MachineInstr &Root,
2550 SmallVectorImpl<unsigned> &Patterns) {
2551 unsigned ShiftAmt = getSHXADDShiftAmount(Opc: Root.getOpcode());
2552 if (!ShiftAmt)
2553 return false;
2554
2555 const MachineBasicBlock &MBB = *Root.getParent();
2556
2557 const MachineInstr *AddMI = canCombine(MBB, MO: Root.getOperand(i: 2), CombineOpc: RISCV::ADD);
2558 if (!AddMI)
2559 return false;
2560
2561 bool Found = false;
2562 if (canCombineShiftIntoShXAdd(MBB, MO: AddMI->getOperand(i: 1), OuterShiftAmt: ShiftAmt)) {
2563 Patterns.push_back(Elt: RISCVMachineCombinerPattern::SHXADD_ADD_SLLI_OP1);
2564 Found = true;
2565 }
2566 if (canCombineShiftIntoShXAdd(MBB, MO: AddMI->getOperand(i: 2), OuterShiftAmt: ShiftAmt)) {
2567 Patterns.push_back(Elt: RISCVMachineCombinerPattern::SHXADD_ADD_SLLI_OP2);
2568 Found = true;
2569 }
2570
2571 return Found;
2572}
2573
2574CombinerObjective RISCVInstrInfo::getCombinerObjective(unsigned Pattern) const {
2575 switch (Pattern) {
2576 case RISCVMachineCombinerPattern::FMADD_AX:
2577 case RISCVMachineCombinerPattern::FMADD_XA:
2578 case RISCVMachineCombinerPattern::FMSUB:
2579 case RISCVMachineCombinerPattern::FNMSUB:
2580 return CombinerObjective::MustReduceDepth;
2581 default:
2582 return TargetInstrInfo::getCombinerObjective(Pattern);
2583 }
2584}
2585
2586bool RISCVInstrInfo::getMachineCombinerPatterns(
2587 MachineInstr &Root, SmallVectorImpl<unsigned> &Patterns,
2588 bool DoRegPressureReduce) const {
2589
2590 if (getFPPatterns(Root, Patterns, DoRegPressureReduce))
2591 return true;
2592
2593 if (getSHXADDPatterns(Root, Patterns))
2594 return true;
2595
2596 return TargetInstrInfo::getMachineCombinerPatterns(Root, Patterns,
2597 DoRegPressureReduce);
2598}
2599
2600static unsigned getFPFusedMultiplyOpcode(unsigned RootOpc, unsigned Pattern) {
2601 switch (RootOpc) {
2602 default:
2603 llvm_unreachable("Unexpected opcode");
2604 case RISCV::FADD_H:
2605 return RISCV::FMADD_H;
2606 case RISCV::FADD_S:
2607 return RISCV::FMADD_S;
2608 case RISCV::FADD_D:
2609 return RISCV::FMADD_D;
2610 case RISCV::FSUB_H:
2611 return Pattern == RISCVMachineCombinerPattern::FMSUB ? RISCV::FMSUB_H
2612 : RISCV::FNMSUB_H;
2613 case RISCV::FSUB_S:
2614 return Pattern == RISCVMachineCombinerPattern::FMSUB ? RISCV::FMSUB_S
2615 : RISCV::FNMSUB_S;
2616 case RISCV::FSUB_D:
2617 return Pattern == RISCVMachineCombinerPattern::FMSUB ? RISCV::FMSUB_D
2618 : RISCV::FNMSUB_D;
2619 }
2620}
2621
2622static unsigned getAddendOperandIdx(unsigned Pattern) {
2623 switch (Pattern) {
2624 default:
2625 llvm_unreachable("Unexpected pattern");
2626 case RISCVMachineCombinerPattern::FMADD_AX:
2627 case RISCVMachineCombinerPattern::FMSUB:
2628 return 2;
2629 case RISCVMachineCombinerPattern::FMADD_XA:
2630 case RISCVMachineCombinerPattern::FNMSUB:
2631 return 1;
2632 }
2633}
2634
2635static void combineFPFusedMultiply(MachineInstr &Root, MachineInstr &Prev,
2636 unsigned Pattern,
2637 SmallVectorImpl<MachineInstr *> &InsInstrs,
2638 SmallVectorImpl<MachineInstr *> &DelInstrs) {
2639 MachineFunction *MF = Root.getMF();
2640 MachineRegisterInfo &MRI = MF->getRegInfo();
2641 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
2642
2643 MachineOperand &Mul1 = Prev.getOperand(i: 1);
2644 MachineOperand &Mul2 = Prev.getOperand(i: 2);
2645 MachineOperand &Dst = Root.getOperand(i: 0);
2646 MachineOperand &Addend = Root.getOperand(i: getAddendOperandIdx(Pattern));
2647
2648 Register DstReg = Dst.getReg();
2649 unsigned FusedOpc = getFPFusedMultiplyOpcode(RootOpc: Root.getOpcode(), Pattern);
2650 uint32_t IntersectedFlags = Root.getFlags() & Prev.getFlags();
2651 DebugLoc MergedLoc =
2652 DILocation::getMergedLocation(LocA: Root.getDebugLoc(), LocB: Prev.getDebugLoc());
2653
2654 bool Mul1IsKill = Mul1.isKill();
2655 bool Mul2IsKill = Mul2.isKill();
2656 bool AddendIsKill = Addend.isKill();
2657
2658 // We need to clear kill flags since we may be extending the live range past
2659 // a kill. If the mul had kill flags, we can preserve those since we know
2660 // where the previous range stopped.
2661 MRI.clearKillFlags(Reg: Mul1.getReg());
2662 MRI.clearKillFlags(Reg: Mul2.getReg());
2663
2664 MachineInstrBuilder MIB =
2665 BuildMI(MF&: *MF, MIMD: MergedLoc, MCID: TII->get(Opcode: FusedOpc), DestReg: DstReg)
2666 .addReg(RegNo: Mul1.getReg(), flags: getKillRegState(B: Mul1IsKill))
2667 .addReg(RegNo: Mul2.getReg(), flags: getKillRegState(B: Mul2IsKill))
2668 .addReg(RegNo: Addend.getReg(), flags: getKillRegState(B: AddendIsKill))
2669 .setMIFlags(IntersectedFlags);
2670
2671 InsInstrs.push_back(Elt: MIB);
2672 if (MRI.hasOneNonDBGUse(RegNo: Prev.getOperand(i: 0).getReg()))
2673 DelInstrs.push_back(Elt: &Prev);
2674 DelInstrs.push_back(Elt: &Root);
2675}
2676
2677// Combine patterns like (sh3add Z, (add X, (slli Y, 5))) to
2678// (sh3add (sh2add Y, Z), X) if the shift amount can be split across two
2679// shXadd instructions. The outer shXadd keeps its original opcode.
2680static void
2681genShXAddAddShift(MachineInstr &Root, unsigned AddOpIdx,
2682 SmallVectorImpl<MachineInstr *> &InsInstrs,
2683 SmallVectorImpl<MachineInstr *> &DelInstrs,
2684 DenseMap<Register, unsigned> &InstrIdxForVirtReg) {
2685 MachineFunction *MF = Root.getMF();
2686 MachineRegisterInfo &MRI = MF->getRegInfo();
2687 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
2688
2689 unsigned OuterShiftAmt = getSHXADDShiftAmount(Opc: Root.getOpcode());
2690 assert(OuterShiftAmt != 0 && "Unexpected opcode");
2691
2692 MachineInstr *AddMI = MRI.getUniqueVRegDef(Reg: Root.getOperand(i: 2).getReg());
2693 MachineInstr *ShiftMI =
2694 MRI.getUniqueVRegDef(Reg: AddMI->getOperand(i: AddOpIdx).getReg());
2695
2696 unsigned InnerShiftAmt = ShiftMI->getOperand(i: 2).getImm();
2697 assert(InnerShiftAmt >= OuterShiftAmt && "Unexpected shift amount");
2698
2699 unsigned InnerOpc;
2700 switch (InnerShiftAmt - OuterShiftAmt) {
2701 default:
2702 llvm_unreachable("Unexpected shift amount");
2703 case 0:
2704 InnerOpc = RISCV::ADD;
2705 break;
2706 case 1:
2707 InnerOpc = RISCV::SH1ADD;
2708 break;
2709 case 2:
2710 InnerOpc = RISCV::SH2ADD;
2711 break;
2712 case 3:
2713 InnerOpc = RISCV::SH3ADD;
2714 break;
2715 }
2716
2717 const MachineOperand &X = AddMI->getOperand(i: 3 - AddOpIdx);
2718 const MachineOperand &Y = ShiftMI->getOperand(i: 1);
2719 const MachineOperand &Z = Root.getOperand(i: 1);
2720
2721 Register NewVR = MRI.createVirtualRegister(RegClass: &RISCV::GPRRegClass);
2722
2723 auto MIB1 = BuildMI(MF&: *MF, MIMD: MIMetadata(Root), MCID: TII->get(Opcode: InnerOpc), DestReg: NewVR)
2724 .addReg(RegNo: Y.getReg(), flags: getKillRegState(B: Y.isKill()))
2725 .addReg(RegNo: Z.getReg(), flags: getKillRegState(B: Z.isKill()));
2726 auto MIB2 = BuildMI(MF&: *MF, MIMD: MIMetadata(Root), MCID: TII->get(Opcode: Root.getOpcode()),
2727 DestReg: Root.getOperand(i: 0).getReg())
2728 .addReg(RegNo: NewVR, flags: RegState::Kill)
2729 .addReg(RegNo: X.getReg(), flags: getKillRegState(B: X.isKill()));
2730
2731 InstrIdxForVirtReg.insert(KV: std::make_pair(x&: NewVR, y: 0));
2732 InsInstrs.push_back(Elt: MIB1);
2733 InsInstrs.push_back(Elt: MIB2);
2734 DelInstrs.push_back(Elt: ShiftMI);
2735 DelInstrs.push_back(Elt: AddMI);
2736 DelInstrs.push_back(Elt: &Root);
2737}
2738
2739void RISCVInstrInfo::genAlternativeCodeSequence(
2740 MachineInstr &Root, unsigned Pattern,
2741 SmallVectorImpl<MachineInstr *> &InsInstrs,
2742 SmallVectorImpl<MachineInstr *> &DelInstrs,
2743 DenseMap<Register, unsigned> &InstrIdxForVirtReg) const {
2744 MachineRegisterInfo &MRI = Root.getMF()->getRegInfo();
2745 switch (Pattern) {
2746 default:
2747 TargetInstrInfo::genAlternativeCodeSequence(Root, Pattern, InsInstrs,
2748 DelInstrs, InstIdxForVirtReg&: InstrIdxForVirtReg);
2749 return;
2750 case RISCVMachineCombinerPattern::FMADD_AX:
2751 case RISCVMachineCombinerPattern::FMSUB: {
2752 MachineInstr &Prev = *MRI.getVRegDef(Reg: Root.getOperand(i: 1).getReg());
2753 combineFPFusedMultiply(Root, Prev, Pattern, InsInstrs, DelInstrs);
2754 return;
2755 }
2756 case RISCVMachineCombinerPattern::FMADD_XA:
2757 case RISCVMachineCombinerPattern::FNMSUB: {
2758 MachineInstr &Prev = *MRI.getVRegDef(Reg: Root.getOperand(i: 2).getReg());
2759 combineFPFusedMultiply(Root, Prev, Pattern, InsInstrs, DelInstrs);
2760 return;
2761 }
2762 case RISCVMachineCombinerPattern::SHXADD_ADD_SLLI_OP1:
2763 genShXAddAddShift(Root, AddOpIdx: 1, InsInstrs, DelInstrs, InstrIdxForVirtReg);
2764 return;
2765 case RISCVMachineCombinerPattern::SHXADD_ADD_SLLI_OP2:
2766 genShXAddAddShift(Root, AddOpIdx: 2, InsInstrs, DelInstrs, InstrIdxForVirtReg);
2767 return;
2768 }
2769}
2770
2771bool RISCVInstrInfo::verifyInstruction(const MachineInstr &MI,
2772 StringRef &ErrInfo) const {
2773 MCInstrDesc const &Desc = MI.getDesc();
2774
2775 for (const auto &[Index, Operand] : enumerate(First: Desc.operands())) {
2776 unsigned OpType = Operand.OperandType;
2777 if (OpType >= RISCVOp::OPERAND_FIRST_RISCV_IMM &&
2778 OpType <= RISCVOp::OPERAND_LAST_RISCV_IMM) {
2779 const MachineOperand &MO = MI.getOperand(i: Index);
2780 if (MO.isReg()) {
2781 ErrInfo = "Expected a non-register operand.";
2782 return false;
2783 }
2784 if (MO.isImm()) {
2785 int64_t Imm = MO.getImm();
2786 bool Ok;
2787 switch (OpType) {
2788 default:
2789 llvm_unreachable("Unexpected operand type");
2790
2791 // clang-format off
2792#define CASE_OPERAND_UIMM(NUM) \
2793 case RISCVOp::OPERAND_UIMM##NUM: \
2794 Ok = isUInt<NUM>(Imm); \
2795 break;
2796#define CASE_OPERAND_SIMM(NUM) \
2797 case RISCVOp::OPERAND_SIMM##NUM: \
2798 Ok = isInt<NUM>(Imm); \
2799 break;
2800 CASE_OPERAND_UIMM(1)
2801 CASE_OPERAND_UIMM(2)
2802 CASE_OPERAND_UIMM(3)
2803 CASE_OPERAND_UIMM(4)
2804 CASE_OPERAND_UIMM(5)
2805 CASE_OPERAND_UIMM(6)
2806 CASE_OPERAND_UIMM(7)
2807 CASE_OPERAND_UIMM(8)
2808 CASE_OPERAND_UIMM(9)
2809 CASE_OPERAND_UIMM(10)
2810 CASE_OPERAND_UIMM(12)
2811 CASE_OPERAND_UIMM(16)
2812 CASE_OPERAND_UIMM(20)
2813 CASE_OPERAND_UIMM(32)
2814 CASE_OPERAND_UIMM(48)
2815 CASE_OPERAND_UIMM(64)
2816 // clang-format on
2817 case RISCVOp::OPERAND_UIMM2_LSB0:
2818 Ok = isShiftedUInt<1, 1>(x: Imm);
2819 break;
2820 case RISCVOp::OPERAND_UIMM5_LSB0:
2821 Ok = isShiftedUInt<4, 1>(x: Imm);
2822 break;
2823 case RISCVOp::OPERAND_UIMM5_NONZERO:
2824 Ok = isUInt<5>(x: Imm) && (Imm != 0);
2825 break;
2826 case RISCVOp::OPERAND_UIMM5_PLUS1:
2827 Ok = (isUInt<5>(x: Imm) && (Imm != 0)) || (Imm == 32);
2828 break;
2829 case RISCVOp::OPERAND_UIMM6_LSB0:
2830 Ok = isShiftedUInt<5, 1>(x: Imm);
2831 break;
2832 case RISCVOp::OPERAND_UIMM7_LSB00:
2833 Ok = isShiftedUInt<5, 2>(x: Imm);
2834 break;
2835 case RISCVOp::OPERAND_UIMM7_LSB000:
2836 Ok = isShiftedUInt<4, 3>(x: Imm);
2837 break;
2838 case RISCVOp::OPERAND_UIMM8_LSB00:
2839 Ok = isShiftedUInt<6, 2>(x: Imm);
2840 break;
2841 case RISCVOp::OPERAND_UIMM8_LSB000:
2842 Ok = isShiftedUInt<5, 3>(x: Imm);
2843 break;
2844 case RISCVOp::OPERAND_UIMM8_GE32:
2845 Ok = isUInt<8>(x: Imm) && Imm >= 32;
2846 break;
2847 case RISCVOp::OPERAND_UIMM9_LSB000:
2848 Ok = isShiftedUInt<6, 3>(x: Imm);
2849 break;
2850 case RISCVOp::OPERAND_SIMM10_LSB0000_NONZERO:
2851 Ok = isShiftedInt<6, 4>(x: Imm) && (Imm != 0);
2852 break;
2853 case RISCVOp::OPERAND_UIMM10_LSB00_NONZERO:
2854 Ok = isShiftedUInt<8, 2>(x: Imm) && (Imm != 0);
2855 break;
2856 case RISCVOp::OPERAND_UIMM16_NONZERO:
2857 Ok = isUInt<16>(x: Imm) && (Imm != 0);
2858 break;
2859 case RISCVOp::OPERAND_ZERO:
2860 Ok = Imm == 0;
2861 break;
2862 case RISCVOp::OPERAND_THREE:
2863 Ok = Imm == 3;
2864 break;
2865 case RISCVOp::OPERAND_FOUR:
2866 Ok = Imm == 4;
2867 break;
2868 // clang-format off
2869 CASE_OPERAND_SIMM(5)
2870 CASE_OPERAND_SIMM(6)
2871 CASE_OPERAND_SIMM(11)
2872 CASE_OPERAND_SIMM(12)
2873 CASE_OPERAND_SIMM(26)
2874 // clang-format on
2875 case RISCVOp::OPERAND_SIMM5_PLUS1:
2876 Ok = (isInt<5>(x: Imm) && Imm != -16) || Imm == 16;
2877 break;
2878 case RISCVOp::OPERAND_SIMM5_NONZERO:
2879 Ok = isInt<5>(x: Imm) && (Imm != 0);
2880 break;
2881 case RISCVOp::OPERAND_SIMM6_NONZERO:
2882 Ok = Imm != 0 && isInt<6>(x: Imm);
2883 break;
2884 case RISCVOp::OPERAND_VTYPEI10:
2885 Ok = isUInt<10>(x: Imm);
2886 break;
2887 case RISCVOp::OPERAND_VTYPEI11:
2888 Ok = isUInt<11>(x: Imm);
2889 break;
2890 case RISCVOp::OPERAND_SIMM12_LSB00000:
2891 Ok = isShiftedInt<7, 5>(x: Imm);
2892 break;
2893 case RISCVOp::OPERAND_SIMM16_NONZERO:
2894 Ok = isInt<16>(x: Imm) && (Imm != 0);
2895 break;
2896 case RISCVOp::OPERAND_SIMM20_LI:
2897 Ok = isInt<20>(x: Imm);
2898 break;
2899 case RISCVOp::OPERAND_BARE_SIMM32:
2900 Ok = isInt<32>(x: Imm);
2901 break;
2902 case RISCVOp::OPERAND_UIMMLOG2XLEN:
2903 Ok = STI.is64Bit() ? isUInt<6>(x: Imm) : isUInt<5>(x: Imm);
2904 break;
2905 case RISCVOp::OPERAND_UIMMLOG2XLEN_NONZERO:
2906 Ok = STI.is64Bit() ? isUInt<6>(x: Imm) : isUInt<5>(x: Imm);
2907 Ok = Ok && Imm != 0;
2908 break;
2909 case RISCVOp::OPERAND_CLUI_IMM:
2910 Ok = (isUInt<5>(x: Imm) && Imm != 0) ||
2911 (Imm >= 0xfffe0 && Imm <= 0xfffff);
2912 break;
2913 case RISCVOp::OPERAND_RVKRNUM:
2914 Ok = Imm >= 0 && Imm <= 10;
2915 break;
2916 case RISCVOp::OPERAND_RVKRNUM_0_7:
2917 Ok = Imm >= 0 && Imm <= 7;
2918 break;
2919 case RISCVOp::OPERAND_RVKRNUM_1_10:
2920 Ok = Imm >= 1 && Imm <= 10;
2921 break;
2922 case RISCVOp::OPERAND_RVKRNUM_2_14:
2923 Ok = Imm >= 2 && Imm <= 14;
2924 break;
2925 case RISCVOp::OPERAND_RLIST:
2926 Ok = Imm >= RISCVZC::RA && Imm <= RISCVZC::RA_S0_S11;
2927 break;
2928 case RISCVOp::OPERAND_RLIST_S0:
2929 Ok = Imm >= RISCVZC::RA_S0 && Imm <= RISCVZC::RA_S0_S11;
2930 break;
2931 case RISCVOp::OPERAND_STACKADJ:
2932 Ok = Imm >= 0 && Imm <= 48 && Imm % 16 == 0;
2933 break;
2934 case RISCVOp::OPERAND_FRMARG:
2935 Ok = RISCVFPRndMode::isValidRoundingMode(Mode: Imm);
2936 break;
2937 case RISCVOp::OPERAND_RTZARG:
2938 Ok = Imm == RISCVFPRndMode::RTZ;
2939 break;
2940 case RISCVOp::OPERAND_COND_CODE:
2941 Ok = Imm >= 0 && Imm < RISCVCC::COND_INVALID;
2942 break;
2943 case RISCVOp::OPERAND_VEC_POLICY:
2944 Ok = (Imm &
2945 (RISCVVType::TAIL_AGNOSTIC | RISCVVType::MASK_AGNOSTIC)) == Imm;
2946 break;
2947 case RISCVOp::OPERAND_SEW:
2948 Ok = (isUInt<5>(x: Imm) && RISCVVType::isValidSEW(SEW: 1 << Imm));
2949 break;
2950 case RISCVOp::OPERAND_SEW_MASK:
2951 Ok = Imm == 0;
2952 break;
2953 case RISCVOp::OPERAND_VEC_RM:
2954 assert(RISCVII::hasRoundModeOp(Desc.TSFlags));
2955 if (RISCVII::usesVXRM(TSFlags: Desc.TSFlags))
2956 Ok = isUInt<2>(x: Imm);
2957 else
2958 Ok = RISCVFPRndMode::isValidRoundingMode(Mode: Imm);
2959 break;
2960 }
2961 if (!Ok) {
2962 ErrInfo = "Invalid immediate";
2963 return false;
2964 }
2965 }
2966 }
2967 }
2968
2969 const uint64_t TSFlags = Desc.TSFlags;
2970 if (RISCVII::hasVLOp(TSFlags)) {
2971 const MachineOperand &Op = MI.getOperand(i: RISCVII::getVLOpNum(Desc));
2972 if (!Op.isImm() && !Op.isReg()) {
2973 ErrInfo = "Invalid operand type for VL operand";
2974 return false;
2975 }
2976 if (Op.isReg() && Op.getReg() != RISCV::NoRegister) {
2977 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
2978 auto *RC = MRI.getRegClass(Reg: Op.getReg());
2979 if (!RISCV::GPRRegClass.hasSubClassEq(RC)) {
2980 ErrInfo = "Invalid register class for VL operand";
2981 return false;
2982 }
2983 }
2984 if (!RISCVII::hasSEWOp(TSFlags)) {
2985 ErrInfo = "VL operand w/o SEW operand?";
2986 return false;
2987 }
2988 }
2989 if (RISCVII::hasSEWOp(TSFlags)) {
2990 unsigned OpIdx = RISCVII::getSEWOpNum(Desc);
2991 if (!MI.getOperand(i: OpIdx).isImm()) {
2992 ErrInfo = "SEW value expected to be an immediate";
2993 return false;
2994 }
2995 uint64_t Log2SEW = MI.getOperand(i: OpIdx).getImm();
2996 if (Log2SEW > 31) {
2997 ErrInfo = "Unexpected SEW value";
2998 return false;
2999 }
3000 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
3001 if (!RISCVVType::isValidSEW(SEW)) {
3002 ErrInfo = "Unexpected SEW value";
3003 return false;
3004 }
3005 }
3006 if (RISCVII::hasVecPolicyOp(TSFlags)) {
3007 unsigned OpIdx = RISCVII::getVecPolicyOpNum(Desc);
3008 if (!MI.getOperand(i: OpIdx).isImm()) {
3009 ErrInfo = "Policy operand expected to be an immediate";
3010 return false;
3011 }
3012 uint64_t Policy = MI.getOperand(i: OpIdx).getImm();
3013 if (Policy > (RISCVVType::TAIL_AGNOSTIC | RISCVVType::MASK_AGNOSTIC)) {
3014 ErrInfo = "Invalid Policy Value";
3015 return false;
3016 }
3017 if (!RISCVII::hasVLOp(TSFlags)) {
3018 ErrInfo = "policy operand w/o VL operand?";
3019 return false;
3020 }
3021
3022 // VecPolicy operands can only exist on instructions with passthru/merge
3023 // arguments. Note that not all arguments with passthru have vec policy
3024 // operands- some instructions have implicit policies.
3025 unsigned UseOpIdx;
3026 if (!MI.isRegTiedToUseOperand(DefOpIdx: 0, UseOpIdx: &UseOpIdx)) {
3027 ErrInfo = "policy operand w/o tied operand?";
3028 return false;
3029 }
3030 }
3031
3032 if (int Idx = RISCVII::getFRMOpNum(Desc);
3033 Idx >= 0 && MI.getOperand(i: Idx).getImm() == RISCVFPRndMode::DYN &&
3034 !MI.readsRegister(Reg: RISCV::FRM, /*TRI=*/nullptr)) {
3035 ErrInfo = "dynamic rounding mode should read FRM";
3036 return false;
3037 }
3038
3039 return true;
3040}
3041
3042bool RISCVInstrInfo::canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg,
3043 const MachineInstr &AddrI,
3044 ExtAddrMode &AM) const {
3045 switch (MemI.getOpcode()) {
3046 default:
3047 return false;
3048 case RISCV::LB:
3049 case RISCV::LBU:
3050 case RISCV::LH:
3051 case RISCV::LH_INX:
3052 case RISCV::LHU:
3053 case RISCV::LW:
3054 case RISCV::LW_INX:
3055 case RISCV::LWU:
3056 case RISCV::LD:
3057 case RISCV::LD_RV32:
3058 case RISCV::FLH:
3059 case RISCV::FLW:
3060 case RISCV::FLD:
3061 case RISCV::SB:
3062 case RISCV::SH:
3063 case RISCV::SH_INX:
3064 case RISCV::SW:
3065 case RISCV::SW_INX:
3066 case RISCV::SD:
3067 case RISCV::SD_RV32:
3068 case RISCV::FSH:
3069 case RISCV::FSW:
3070 case RISCV::FSD:
3071 break;
3072 }
3073
3074 if (MemI.getOperand(i: 0).getReg() == Reg)
3075 return false;
3076
3077 if (AddrI.getOpcode() != RISCV::ADDI || !AddrI.getOperand(i: 1).isReg() ||
3078 !AddrI.getOperand(i: 2).isImm())
3079 return false;
3080
3081 int64_t OldOffset = MemI.getOperand(i: 2).getImm();
3082 int64_t Disp = AddrI.getOperand(i: 2).getImm();
3083 int64_t NewOffset = OldOffset + Disp;
3084 if (!STI.is64Bit())
3085 NewOffset = SignExtend64<32>(x: NewOffset);
3086
3087 if (!isInt<12>(x: NewOffset))
3088 return false;
3089
3090 AM.BaseReg = AddrI.getOperand(i: 1).getReg();
3091 AM.ScaledReg = 0;
3092 AM.Scale = 0;
3093 AM.Displacement = NewOffset;
3094 AM.Form = ExtAddrMode::Formula::Basic;
3095 return true;
3096}
3097
3098MachineInstr *RISCVInstrInfo::emitLdStWithAddr(MachineInstr &MemI,
3099 const ExtAddrMode &AM) const {
3100
3101 const DebugLoc &DL = MemI.getDebugLoc();
3102 MachineBasicBlock &MBB = *MemI.getParent();
3103
3104 assert(AM.ScaledReg == 0 && AM.Scale == 0 &&
3105 "Addressing mode not supported for folding");
3106
3107 return BuildMI(BB&: MBB, I&: MemI, MIMD: DL, MCID: get(Opcode: MemI.getOpcode()))
3108 .addReg(RegNo: MemI.getOperand(i: 0).getReg(),
3109 flags: MemI.mayLoad() ? RegState::Define : 0)
3110 .addReg(RegNo: AM.BaseReg)
3111 .addImm(Val: AM.Displacement)
3112 .setMemRefs(MemI.memoperands())
3113 .setMIFlags(MemI.getFlags());
3114}
3115
3116// TODO: At the moment, MIPS introduced paring of instructions operating with
3117// word or double word. This should be extended with more instructions when more
3118// vendors support load/store pairing.
3119bool RISCVInstrInfo::isPairableLdStInstOpc(unsigned Opc) {
3120 switch (Opc) {
3121 default:
3122 return false;
3123 case RISCV::SW:
3124 case RISCV::SD:
3125 case RISCV::LD:
3126 case RISCV::LW:
3127 return true;
3128 }
3129}
3130
3131bool RISCVInstrInfo::isLdStSafeToPair(const MachineInstr &LdSt,
3132 const TargetRegisterInfo *TRI) {
3133 // If this is a volatile load/store, don't mess with it.
3134 if (LdSt.hasOrderedMemoryRef() || LdSt.getNumExplicitOperands() != 3)
3135 return false;
3136
3137 if (LdSt.getOperand(i: 1).isFI())
3138 return true;
3139
3140 assert(LdSt.getOperand(1).isReg() && "Expected a reg operand.");
3141 // Can't cluster if the instruction modifies the base register
3142 // or it is update form. e.g. ld x5,8(x5)
3143 if (LdSt.modifiesRegister(Reg: LdSt.getOperand(i: 1).getReg(), TRI))
3144 return false;
3145
3146 if (!LdSt.getOperand(i: 2).isImm())
3147 return false;
3148
3149 return true;
3150}
3151
3152bool RISCVInstrInfo::getMemOperandsWithOffsetWidth(
3153 const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps,
3154 int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width,
3155 const TargetRegisterInfo *TRI) const {
3156 if (!LdSt.mayLoadOrStore())
3157 return false;
3158
3159 // Conservatively, only handle scalar loads/stores for now.
3160 switch (LdSt.getOpcode()) {
3161 case RISCV::LB:
3162 case RISCV::LBU:
3163 case RISCV::SB:
3164 case RISCV::LH:
3165 case RISCV::LH_INX:
3166 case RISCV::LHU:
3167 case RISCV::FLH:
3168 case RISCV::SH:
3169 case RISCV::SH_INX:
3170 case RISCV::FSH:
3171 case RISCV::LW:
3172 case RISCV::LW_INX:
3173 case RISCV::LWU:
3174 case RISCV::FLW:
3175 case RISCV::SW:
3176 case RISCV::SW_INX:
3177 case RISCV::FSW:
3178 case RISCV::LD:
3179 case RISCV::LD_RV32:
3180 case RISCV::FLD:
3181 case RISCV::SD:
3182 case RISCV::SD_RV32:
3183 case RISCV::FSD:
3184 break;
3185 default:
3186 return false;
3187 }
3188 const MachineOperand *BaseOp;
3189 OffsetIsScalable = false;
3190 if (!getMemOperandWithOffsetWidth(LdSt, BaseOp, Offset, Width, TRI))
3191 return false;
3192 BaseOps.push_back(Elt: BaseOp);
3193 return true;
3194}
3195
3196// TODO: This was copied from SIInstrInfo. Could it be lifted to a common
3197// helper?
3198static bool memOpsHaveSameBasePtr(const MachineInstr &MI1,
3199 ArrayRef<const MachineOperand *> BaseOps1,
3200 const MachineInstr &MI2,
3201 ArrayRef<const MachineOperand *> BaseOps2) {
3202 // Only examine the first "base" operand of each instruction, on the
3203 // assumption that it represents the real base address of the memory access.
3204 // Other operands are typically offsets or indices from this base address.
3205 if (BaseOps1.front()->isIdenticalTo(Other: *BaseOps2.front()))
3206 return true;
3207
3208 if (!MI1.hasOneMemOperand() || !MI2.hasOneMemOperand())
3209 return false;
3210
3211 auto MO1 = *MI1.memoperands_begin();
3212 auto MO2 = *MI2.memoperands_begin();
3213 if (MO1->getAddrSpace() != MO2->getAddrSpace())
3214 return false;
3215
3216 auto Base1 = MO1->getValue();
3217 auto Base2 = MO2->getValue();
3218 if (!Base1 || !Base2)
3219 return false;
3220 Base1 = getUnderlyingObject(V: Base1);
3221 Base2 = getUnderlyingObject(V: Base2);
3222
3223 if (isa<UndefValue>(Val: Base1) || isa<UndefValue>(Val: Base2))
3224 return false;
3225
3226 return Base1 == Base2;
3227}
3228
3229bool RISCVInstrInfo::shouldClusterMemOps(
3230 ArrayRef<const MachineOperand *> BaseOps1, int64_t Offset1,
3231 bool OffsetIsScalable1, ArrayRef<const MachineOperand *> BaseOps2,
3232 int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize,
3233 unsigned NumBytes) const {
3234 // If the mem ops (to be clustered) do not have the same base ptr, then they
3235 // should not be clustered
3236 if (!BaseOps1.empty() && !BaseOps2.empty()) {
3237 const MachineInstr &FirstLdSt = *BaseOps1.front()->getParent();
3238 const MachineInstr &SecondLdSt = *BaseOps2.front()->getParent();
3239 if (!memOpsHaveSameBasePtr(MI1: FirstLdSt, BaseOps1, MI2: SecondLdSt, BaseOps2))
3240 return false;
3241 } else if (!BaseOps1.empty() || !BaseOps2.empty()) {
3242 // If only one base op is empty, they do not have the same base ptr
3243 return false;
3244 }
3245
3246 unsigned CacheLineSize =
3247 BaseOps1.front()->getParent()->getMF()->getSubtarget().getCacheLineSize();
3248 // Assume a cache line size of 64 bytes if no size is set in RISCVSubtarget.
3249 CacheLineSize = CacheLineSize ? CacheLineSize : 64;
3250 // Cluster if the memory operations are on the same or a neighbouring cache
3251 // line, but limit the maximum ClusterSize to avoid creating too much
3252 // additional register pressure.
3253 return ClusterSize <= 4 && std::abs(i: Offset1 - Offset2) < CacheLineSize;
3254}
3255
3256// Set BaseReg (the base register operand), Offset (the byte offset being
3257// accessed) and the access Width of the passed instruction that reads/writes
3258// memory. Returns false if the instruction does not read/write memory or the
3259// BaseReg/Offset/Width can't be determined. Is not guaranteed to always
3260// recognise base operands and offsets in all cases.
3261// TODO: Add an IsScalable bool ref argument (like the equivalent AArch64
3262// function) and set it as appropriate.
3263bool RISCVInstrInfo::getMemOperandWithOffsetWidth(
3264 const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset,
3265 LocationSize &Width, const TargetRegisterInfo *TRI) const {
3266 if (!LdSt.mayLoadOrStore())
3267 return false;
3268
3269 // Here we assume the standard RISC-V ISA, which uses a base+offset
3270 // addressing mode. You'll need to relax these conditions to support custom
3271 // load/store instructions.
3272 if (LdSt.getNumExplicitOperands() != 3)
3273 return false;
3274 if ((!LdSt.getOperand(i: 1).isReg() && !LdSt.getOperand(i: 1).isFI()) ||
3275 !LdSt.getOperand(i: 2).isImm())
3276 return false;
3277
3278 if (!LdSt.hasOneMemOperand())
3279 return false;
3280
3281 Width = (*LdSt.memoperands_begin())->getSize();
3282 BaseReg = &LdSt.getOperand(i: 1);
3283 Offset = LdSt.getOperand(i: 2).getImm();
3284 return true;
3285}
3286
3287bool RISCVInstrInfo::areMemAccessesTriviallyDisjoint(
3288 const MachineInstr &MIa, const MachineInstr &MIb) const {
3289 assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
3290 assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
3291
3292 if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
3293 MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
3294 return false;
3295
3296 // Retrieve the base register, offset from the base register and width. Width
3297 // is the size of memory that is being loaded/stored (e.g. 1, 2, 4). If
3298 // base registers are identical, and the offset of a lower memory access +
3299 // the width doesn't overlap the offset of a higher memory access,
3300 // then the memory accesses are different.
3301 const TargetRegisterInfo *TRI = STI.getRegisterInfo();
3302 const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
3303 int64_t OffsetA = 0, OffsetB = 0;
3304 LocationSize WidthA = LocationSize::precise(Value: 0),
3305 WidthB = LocationSize::precise(Value: 0);
3306 if (getMemOperandWithOffsetWidth(LdSt: MIa, BaseReg&: BaseOpA, Offset&: OffsetA, Width&: WidthA, TRI) &&
3307 getMemOperandWithOffsetWidth(LdSt: MIb, BaseReg&: BaseOpB, Offset&: OffsetB, Width&: WidthB, TRI)) {
3308 if (BaseOpA->isIdenticalTo(Other: *BaseOpB)) {
3309 int LowOffset = std::min(a: OffsetA, b: OffsetB);
3310 int HighOffset = std::max(a: OffsetA, b: OffsetB);
3311 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
3312 if (LowWidth.hasValue() &&
3313 LowOffset + (int)LowWidth.getValue() <= HighOffset)
3314 return true;
3315 }
3316 }
3317 return false;
3318}
3319
3320std::pair<unsigned, unsigned>
3321RISCVInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
3322 const unsigned Mask = RISCVII::MO_DIRECT_FLAG_MASK;
3323 return std::make_pair(x: TF & Mask, y: TF & ~Mask);
3324}
3325
3326ArrayRef<std::pair<unsigned, const char *>>
3327RISCVInstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
3328 using namespace RISCVII;
3329 static const std::pair<unsigned, const char *> TargetFlags[] = {
3330 {MO_CALL, "riscv-call"},
3331 {MO_LO, "riscv-lo"},
3332 {MO_HI, "riscv-hi"},
3333 {MO_PCREL_LO, "riscv-pcrel-lo"},
3334 {MO_PCREL_HI, "riscv-pcrel-hi"},
3335 {MO_GOT_HI, "riscv-got-hi"},
3336 {MO_TPREL_LO, "riscv-tprel-lo"},
3337 {MO_TPREL_HI, "riscv-tprel-hi"},
3338 {MO_TPREL_ADD, "riscv-tprel-add"},
3339 {MO_TLS_GOT_HI, "riscv-tls-got-hi"},
3340 {MO_TLS_GD_HI, "riscv-tls-gd-hi"},
3341 {MO_TLSDESC_HI, "riscv-tlsdesc-hi"},
3342 {MO_TLSDESC_LOAD_LO, "riscv-tlsdesc-load-lo"},
3343 {MO_TLSDESC_ADD_LO, "riscv-tlsdesc-add-lo"},
3344 {MO_TLSDESC_CALL, "riscv-tlsdesc-call"}};
3345 return ArrayRef(TargetFlags);
3346}
3347bool RISCVInstrInfo::isFunctionSafeToOutlineFrom(
3348 MachineFunction &MF, bool OutlineFromLinkOnceODRs) const {
3349 const Function &F = MF.getFunction();
3350
3351 // Can F be deduplicated by the linker? If it can, don't outline from it.
3352 if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
3353 return false;
3354
3355 // Don't outline from functions with section markings; the program could
3356 // expect that all the code is in the named section.
3357 if (F.hasSection())
3358 return false;
3359
3360 // It's safe to outline from MF.
3361 return true;
3362}
3363
3364bool RISCVInstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
3365 unsigned &Flags) const {
3366 // More accurate safety checking is done in getOutliningCandidateInfo.
3367 return TargetInstrInfo::isMBBSafeToOutlineFrom(MBB, Flags);
3368}
3369
3370// Enum values indicating how an outlined call should be constructed.
3371enum MachineOutlinerConstructionID {
3372 MachineOutlinerTailCall,
3373 MachineOutlinerDefault
3374};
3375
3376bool RISCVInstrInfo::shouldOutlineFromFunctionByDefault(
3377 MachineFunction &MF) const {
3378 return MF.getFunction().hasMinSize();
3379}
3380
3381static bool isCandidatePatchable(const MachineBasicBlock &MBB) {
3382 const MachineFunction *MF = MBB.getParent();
3383 const Function &F = MF->getFunction();
3384 return F.getFnAttribute(Kind: "fentry-call").getValueAsBool() ||
3385 F.hasFnAttribute(Kind: "patchable-function-entry");
3386}
3387
3388static bool isMIReadsReg(const MachineInstr &MI, const TargetRegisterInfo *TRI,
3389 MCRegister RegNo) {
3390 return MI.readsRegister(Reg: RegNo, TRI) ||
3391 MI.getDesc().hasImplicitUseOfPhysReg(Reg: RegNo);
3392}
3393
3394static bool isMIModifiesReg(const MachineInstr &MI,
3395 const TargetRegisterInfo *TRI, MCRegister RegNo) {
3396 return MI.modifiesRegister(Reg: RegNo, TRI) ||
3397 MI.getDesc().hasImplicitDefOfPhysReg(Reg: RegNo);
3398}
3399
3400static bool cannotInsertTailCall(const MachineBasicBlock &MBB) {
3401 if (!MBB.back().isReturn())
3402 return true;
3403 if (isCandidatePatchable(MBB))
3404 return true;
3405
3406 // If the candidate reads the pre-set register
3407 // that can be used for expanding PseudoTAIL instruction,
3408 // then we cannot insert tail call.
3409 const TargetSubtargetInfo &STI = MBB.getParent()->getSubtarget();
3410 MCRegister TailExpandUseRegNo =
3411 RISCVII::getTailExpandUseRegNo(FeatureBits: STI.getFeatureBits());
3412 for (const MachineInstr &MI : MBB) {
3413 if (isMIReadsReg(MI, TRI: STI.getRegisterInfo(), RegNo: TailExpandUseRegNo))
3414 return true;
3415 if (isMIModifiesReg(MI, TRI: STI.getRegisterInfo(), RegNo: TailExpandUseRegNo))
3416 break;
3417 }
3418 return false;
3419}
3420
3421static bool analyzeCandidate(outliner::Candidate &C) {
3422 // If last instruction is return then we can rely on
3423 // the verification already performed in the getOutliningTypeImpl.
3424 if (C.back().isReturn()) {
3425 assert(!cannotInsertTailCall(*C.getMBB()) &&
3426 "The candidate who uses return instruction must be outlined "
3427 "using tail call");
3428 return false;
3429 }
3430
3431 // Filter out candidates where the X5 register (t0) can't be used to setup
3432 // the function call.
3433 const TargetRegisterInfo *TRI = C.getMF()->getSubtarget().getRegisterInfo();
3434 if (llvm::any_of(Range&: C, P: [TRI](const MachineInstr &MI) {
3435 return isMIModifiesReg(MI, TRI, RegNo: RISCV::X5);
3436 }))
3437 return true;
3438
3439 return !C.isAvailableAcrossAndOutOfSeq(Reg: RISCV::X5, TRI: *TRI);
3440}
3441
3442std::optional<std::unique_ptr<outliner::OutlinedFunction>>
3443RISCVInstrInfo::getOutliningCandidateInfo(
3444 const MachineModuleInfo &MMI,
3445 std::vector<outliner::Candidate> &RepeatedSequenceLocs,
3446 unsigned MinRepeats) const {
3447
3448 // Analyze each candidate and erase the ones that are not viable.
3449 llvm::erase_if(C&: RepeatedSequenceLocs, P: analyzeCandidate);
3450
3451 // If the sequence doesn't have enough candidates left, then we're done.
3452 if (RepeatedSequenceLocs.size() < MinRepeats)
3453 return std::nullopt;
3454
3455 // Each RepeatedSequenceLoc is identical.
3456 outliner::Candidate &Candidate = RepeatedSequenceLocs[0];
3457 unsigned InstrSizeCExt =
3458 Candidate.getMF()->getSubtarget<RISCVSubtarget>().hasStdExtZca() ? 2 : 4;
3459 unsigned CallOverhead = 0, FrameOverhead = 0;
3460
3461 MachineOutlinerConstructionID MOCI = MachineOutlinerDefault;
3462 if (Candidate.back().isReturn()) {
3463 MOCI = MachineOutlinerTailCall;
3464 // tail call = auipc + jalr in the worst case without linker relaxation.
3465 // FIXME: This code suggests the JALR can be compressed - how?
3466 CallOverhead = 4 + InstrSizeCExt;
3467 // Using tail call we move ret instruction from caller to callee.
3468 FrameOverhead = 0;
3469 } else {
3470 // call t0, function = 8 bytes.
3471 CallOverhead = 8;
3472 // jr t0 = 4 bytes, 2 bytes if compressed instructions are enabled.
3473 FrameOverhead = InstrSizeCExt;
3474 }
3475
3476 for (auto &C : RepeatedSequenceLocs)
3477 C.setCallInfo(CID: MOCI, CO: CallOverhead);
3478
3479 unsigned SequenceSize = 0;
3480 for (auto &MI : Candidate)
3481 SequenceSize += getInstSizeInBytes(MI);
3482
3483 return std::make_unique<outliner::OutlinedFunction>(
3484 args&: RepeatedSequenceLocs, args&: SequenceSize, args&: FrameOverhead, args&: MOCI);
3485}
3486
3487outliner::InstrType
3488RISCVInstrInfo::getOutliningTypeImpl(const MachineModuleInfo &MMI,
3489 MachineBasicBlock::iterator &MBBI,
3490 unsigned Flags) const {
3491 MachineInstr &MI = *MBBI;
3492 MachineBasicBlock *MBB = MI.getParent();
3493 const TargetRegisterInfo *TRI =
3494 MBB->getParent()->getSubtarget().getRegisterInfo();
3495 const auto &F = MI.getMF()->getFunction();
3496
3497 // We can manually strip out CFI instructions later.
3498 if (MI.isCFIInstruction())
3499 // If current function has exception handling code, we can't outline &
3500 // strip these CFI instructions since it may break .eh_frame section
3501 // needed in unwinding.
3502 return F.needsUnwindTableEntry() ? outliner::InstrType::Illegal
3503 : outliner::InstrType::Invisible;
3504
3505 if (cannotInsertTailCall(MBB: *MBB) &&
3506 (MI.isReturn() || isMIModifiesReg(MI, TRI, RegNo: RISCV::X5)))
3507 return outliner::InstrType::Illegal;
3508
3509 // Make sure the operands don't reference something unsafe.
3510 for (const auto &MO : MI.operands()) {
3511
3512 // pcrel-hi and pcrel-lo can't put in separate sections, filter that out
3513 // if any possible.
3514 if (MO.getTargetFlags() == RISCVII::MO_PCREL_LO &&
3515 (MI.getMF()->getTarget().getFunctionSections() || F.hasComdat() ||
3516 F.hasSection() || F.getSectionPrefix()))
3517 return outliner::InstrType::Illegal;
3518 }
3519
3520 return outliner::InstrType::Legal;
3521}
3522
3523void RISCVInstrInfo::buildOutlinedFrame(
3524 MachineBasicBlock &MBB, MachineFunction &MF,
3525 const outliner::OutlinedFunction &OF) const {
3526
3527 // Strip out any CFI instructions
3528 bool Changed = true;
3529 while (Changed) {
3530 Changed = false;
3531 auto I = MBB.begin();
3532 auto E = MBB.end();
3533 for (; I != E; ++I) {
3534 if (I->isCFIInstruction()) {
3535 I->removeFromParent();
3536 Changed = true;
3537 break;
3538 }
3539 }
3540 }
3541
3542 if (OF.FrameConstructionID == MachineOutlinerTailCall)
3543 return;
3544
3545 MBB.addLiveIn(PhysReg: RISCV::X5);
3546
3547 // Add in a return instruction to the end of the outlined frame.
3548 MBB.insert(I: MBB.end(), MI: BuildMI(MF, MIMD: DebugLoc(), MCID: get(Opcode: RISCV::JALR))
3549 .addReg(RegNo: RISCV::X0, flags: RegState::Define)
3550 .addReg(RegNo: RISCV::X5)
3551 .addImm(Val: 0));
3552}
3553
3554MachineBasicBlock::iterator RISCVInstrInfo::insertOutlinedCall(
3555 Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It,
3556 MachineFunction &MF, outliner::Candidate &C) const {
3557
3558 if (C.CallConstructionID == MachineOutlinerTailCall) {
3559 It = MBB.insert(I: It, MI: BuildMI(MF, MIMD: DebugLoc(), MCID: get(Opcode: RISCV::PseudoTAIL))
3560 .addGlobalAddress(GV: M.getNamedValue(Name: MF.getName()),
3561 /*Offset=*/0, TargetFlags: RISCVII::MO_CALL));
3562 return It;
3563 }
3564
3565 // Add in a call instruction to the outlined function at the given location.
3566 It = MBB.insert(I: It,
3567 MI: BuildMI(MF, MIMD: DebugLoc(), MCID: get(Opcode: RISCV::PseudoCALLReg), DestReg: RISCV::X5)
3568 .addGlobalAddress(GV: M.getNamedValue(Name: MF.getName()), Offset: 0,
3569 TargetFlags: RISCVII::MO_CALL));
3570 return It;
3571}
3572
3573std::optional<RegImmPair> RISCVInstrInfo::isAddImmediate(const MachineInstr &MI,
3574 Register Reg) const {
3575 // TODO: Handle cases where Reg is a super- or sub-register of the
3576 // destination register.
3577 const MachineOperand &Op0 = MI.getOperand(i: 0);
3578 if (!Op0.isReg() || Reg != Op0.getReg())
3579 return std::nullopt;
3580
3581 // Don't consider ADDIW as a candidate because the caller may not be aware
3582 // of its sign extension behaviour.
3583 if (MI.getOpcode() == RISCV::ADDI && MI.getOperand(i: 1).isReg() &&
3584 MI.getOperand(i: 2).isImm())
3585 return RegImmPair{MI.getOperand(i: 1).getReg(), MI.getOperand(i: 2).getImm()};
3586
3587 return std::nullopt;
3588}
3589
3590// MIR printer helper function to annotate Operands with a comment.
3591std::string RISCVInstrInfo::createMIROperandComment(
3592 const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx,
3593 const TargetRegisterInfo *TRI) const {
3594 // Print a generic comment for this operand if there is one.
3595 std::string GenericComment =
3596 TargetInstrInfo::createMIROperandComment(MI, Op, OpIdx, TRI);
3597 if (!GenericComment.empty())
3598 return GenericComment;
3599
3600 // If not, we must have an immediate operand.
3601 if (!Op.isImm())
3602 return std::string();
3603
3604 const MCInstrDesc &Desc = MI.getDesc();
3605 if (OpIdx >= Desc.getNumOperands())
3606 return std::string();
3607
3608 std::string Comment;
3609 raw_string_ostream OS(Comment);
3610
3611 const MCOperandInfo &OpInfo = Desc.operands()[OpIdx];
3612
3613 // Print the full VType operand of vsetvli/vsetivli instructions, and the SEW
3614 // operand of vector codegen pseudos.
3615 switch (OpInfo.OperandType) {
3616 case RISCVOp::OPERAND_VTYPEI10:
3617 case RISCVOp::OPERAND_VTYPEI11: {
3618 unsigned Imm = Op.getImm();
3619 RISCVVType::printVType(VType: Imm, OS);
3620 break;
3621 }
3622 case RISCVOp::OPERAND_SEW:
3623 case RISCVOp::OPERAND_SEW_MASK: {
3624 unsigned Log2SEW = Op.getImm();
3625 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
3626 assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW");
3627 OS << "e" << SEW;
3628 break;
3629 }
3630 case RISCVOp::OPERAND_VEC_POLICY:
3631 unsigned Policy = Op.getImm();
3632 assert(Policy <= (RISCVVType::TAIL_AGNOSTIC | RISCVVType::MASK_AGNOSTIC) &&
3633 "Invalid Policy Value");
3634 OS << (Policy & RISCVVType::TAIL_AGNOSTIC ? "ta" : "tu") << ", "
3635 << (Policy & RISCVVType::MASK_AGNOSTIC ? "ma" : "mu");
3636 break;
3637 }
3638
3639 return Comment;
3640}
3641
3642// clang-format off
3643#define CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL) \
3644 RISCV::Pseudo##OP##_##LMUL
3645
3646#define CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL) \
3647 RISCV::Pseudo##OP##_##LMUL##_MASK
3648
3649#define CASE_RVV_OPCODE_LMUL(OP, LMUL) \
3650 CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL): \
3651 case CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL)
3652
3653#define CASE_RVV_OPCODE_UNMASK_WIDEN(OP) \
3654 CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF8): \
3655 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF4): \
3656 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF2): \
3657 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M1): \
3658 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M2): \
3659 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M4)
3660
3661#define CASE_RVV_OPCODE_UNMASK(OP) \
3662 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3663 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M8)
3664
3665#define CASE_RVV_OPCODE_MASK_WIDEN(OP) \
3666 CASE_RVV_OPCODE_MASK_LMUL(OP, MF8): \
3667 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF4): \
3668 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF2): \
3669 case CASE_RVV_OPCODE_MASK_LMUL(OP, M1): \
3670 case CASE_RVV_OPCODE_MASK_LMUL(OP, M2): \
3671 case CASE_RVV_OPCODE_MASK_LMUL(OP, M4)
3672
3673#define CASE_RVV_OPCODE_MASK(OP) \
3674 CASE_RVV_OPCODE_MASK_WIDEN(OP): \
3675 case CASE_RVV_OPCODE_MASK_LMUL(OP, M8)
3676
3677#define CASE_RVV_OPCODE_WIDEN(OP) \
3678 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3679 case CASE_RVV_OPCODE_MASK_WIDEN(OP)
3680
3681#define CASE_RVV_OPCODE(OP) \
3682 CASE_RVV_OPCODE_UNMASK(OP): \
3683 case CASE_RVV_OPCODE_MASK(OP)
3684// clang-format on
3685
3686// clang-format off
3687#define CASE_VMA_OPCODE_COMMON(OP, TYPE, LMUL) \
3688 RISCV::PseudoV##OP##_##TYPE##_##LMUL
3689
3690#define CASE_VMA_OPCODE_LMULS(OP, TYPE) \
3691 CASE_VMA_OPCODE_COMMON(OP, TYPE, MF8): \
3692 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF4): \
3693 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF2): \
3694 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M1): \
3695 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M2): \
3696 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M4): \
3697 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M8)
3698
3699// VFMA instructions are SEW specific.
3700#define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL, SEW) \
3701 RISCV::PseudoV##OP##_##TYPE##_##LMUL##_##SEW
3702
3703#define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW) \
3704 CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1, SEW): \
3705 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2, SEW): \
3706 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4, SEW): \
3707 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8, SEW)
3708
3709#define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW) \
3710 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2, SEW): \
3711 case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW)
3712
3713#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE, SEW) \
3714 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4, SEW): \
3715 case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW)
3716
3717#define CASE_VFMA_OPCODE_VV(OP) \
3718 CASE_VFMA_OPCODE_LMULS_MF4(OP, VV, E16): \
3719 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VV, E32): \
3720 case CASE_VFMA_OPCODE_LMULS_M1(OP, VV, E64)
3721
3722#define CASE_VFMA_SPLATS(OP) \
3723 CASE_VFMA_OPCODE_LMULS_MF4(OP, VFPR16, E16): \
3724 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VFPR32, E32): \
3725 case CASE_VFMA_OPCODE_LMULS_M1(OP, VFPR64, E64)
3726// clang-format on
3727
3728bool RISCVInstrInfo::findCommutedOpIndices(const MachineInstr &MI,
3729 unsigned &SrcOpIdx1,
3730 unsigned &SrcOpIdx2) const {
3731 const MCInstrDesc &Desc = MI.getDesc();
3732 if (!Desc.isCommutable())
3733 return false;
3734
3735 switch (MI.getOpcode()) {
3736 case RISCV::TH_MVEQZ:
3737 case RISCV::TH_MVNEZ:
3738 // We can't commute operands if operand 2 (i.e., rs1 in
3739 // mveqz/mvnez rd,rs1,rs2) is the zero-register (as it is
3740 // not valid as the in/out-operand 1).
3741 if (MI.getOperand(i: 2).getReg() == RISCV::X0)
3742 return false;
3743 // Operands 1 and 2 are commutable, if we switch the opcode.
3744 return fixCommutedOpIndices(ResultIdx1&: SrcOpIdx1, ResultIdx2&: SrcOpIdx2, CommutableOpIdx1: 1, CommutableOpIdx2: 2);
3745 case RISCV::TH_MULA:
3746 case RISCV::TH_MULAW:
3747 case RISCV::TH_MULAH:
3748 case RISCV::TH_MULS:
3749 case RISCV::TH_MULSW:
3750 case RISCV::TH_MULSH:
3751 // Operands 2 and 3 are commutable.
3752 return fixCommutedOpIndices(ResultIdx1&: SrcOpIdx1, ResultIdx2&: SrcOpIdx2, CommutableOpIdx1: 2, CommutableOpIdx2: 3);
3753 case RISCV::PseudoCCMOVGPRNoX0:
3754 case RISCV::PseudoCCMOVGPR:
3755 // Operands 4 and 5 are commutable.
3756 return fixCommutedOpIndices(ResultIdx1&: SrcOpIdx1, ResultIdx2&: SrcOpIdx2, CommutableOpIdx1: 4, CommutableOpIdx2: 5);
3757 case CASE_RVV_OPCODE(VADD_VV):
3758 case CASE_RVV_OPCODE(VAND_VV):
3759 case CASE_RVV_OPCODE(VOR_VV):
3760 case CASE_RVV_OPCODE(VXOR_VV):
3761 case CASE_RVV_OPCODE_MASK(VMSEQ_VV):
3762 case CASE_RVV_OPCODE_MASK(VMSNE_VV):
3763 case CASE_RVV_OPCODE(VMIN_VV):
3764 case CASE_RVV_OPCODE(VMINU_VV):
3765 case CASE_RVV_OPCODE(VMAX_VV):
3766 case CASE_RVV_OPCODE(VMAXU_VV):
3767 case CASE_RVV_OPCODE(VMUL_VV):
3768 case CASE_RVV_OPCODE(VMULH_VV):
3769 case CASE_RVV_OPCODE(VMULHU_VV):
3770 case CASE_RVV_OPCODE_WIDEN(VWADD_VV):
3771 case CASE_RVV_OPCODE_WIDEN(VWADDU_VV):
3772 case CASE_RVV_OPCODE_WIDEN(VWMUL_VV):
3773 case CASE_RVV_OPCODE_WIDEN(VWMULU_VV):
3774 case CASE_RVV_OPCODE_WIDEN(VWMACC_VV):
3775 case CASE_RVV_OPCODE_WIDEN(VWMACCU_VV):
3776 case CASE_RVV_OPCODE_UNMASK(VADC_VVM):
3777 case CASE_RVV_OPCODE(VSADD_VV):
3778 case CASE_RVV_OPCODE(VSADDU_VV):
3779 case CASE_RVV_OPCODE(VAADD_VV):
3780 case CASE_RVV_OPCODE(VAADDU_VV):
3781 case CASE_RVV_OPCODE(VSMUL_VV):
3782 // Operands 2 and 3 are commutable.
3783 return fixCommutedOpIndices(ResultIdx1&: SrcOpIdx1, ResultIdx2&: SrcOpIdx2, CommutableOpIdx1: 2, CommutableOpIdx2: 3);
3784 case CASE_VFMA_SPLATS(FMADD):
3785 case CASE_VFMA_SPLATS(FMSUB):
3786 case CASE_VFMA_SPLATS(FMACC):
3787 case CASE_VFMA_SPLATS(FMSAC):
3788 case CASE_VFMA_SPLATS(FNMADD):
3789 case CASE_VFMA_SPLATS(FNMSUB):
3790 case CASE_VFMA_SPLATS(FNMACC):
3791 case CASE_VFMA_SPLATS(FNMSAC):
3792 case CASE_VFMA_OPCODE_VV(FMACC):
3793 case CASE_VFMA_OPCODE_VV(FMSAC):
3794 case CASE_VFMA_OPCODE_VV(FNMACC):
3795 case CASE_VFMA_OPCODE_VV(FNMSAC):
3796 case CASE_VMA_OPCODE_LMULS(MADD, VX):
3797 case CASE_VMA_OPCODE_LMULS(NMSUB, VX):
3798 case CASE_VMA_OPCODE_LMULS(MACC, VX):
3799 case CASE_VMA_OPCODE_LMULS(NMSAC, VX):
3800 case CASE_VMA_OPCODE_LMULS(MACC, VV):
3801 case CASE_VMA_OPCODE_LMULS(NMSAC, VV): {
3802 // If the tail policy is undisturbed we can't commute.
3803 assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags));
3804 if ((MI.getOperand(i: RISCVII::getVecPolicyOpNum(Desc: MI.getDesc())).getImm() &
3805 1) == 0)
3806 return false;
3807
3808 // For these instructions we can only swap operand 1 and operand 3 by
3809 // changing the opcode.
3810 unsigned CommutableOpIdx1 = 1;
3811 unsigned CommutableOpIdx2 = 3;
3812 if (!fixCommutedOpIndices(ResultIdx1&: SrcOpIdx1, ResultIdx2&: SrcOpIdx2, CommutableOpIdx1,
3813 CommutableOpIdx2))
3814 return false;
3815 return true;
3816 }
3817 case CASE_VFMA_OPCODE_VV(FMADD):
3818 case CASE_VFMA_OPCODE_VV(FMSUB):
3819 case CASE_VFMA_OPCODE_VV(FNMADD):
3820 case CASE_VFMA_OPCODE_VV(FNMSUB):
3821 case CASE_VMA_OPCODE_LMULS(MADD, VV):
3822 case CASE_VMA_OPCODE_LMULS(NMSUB, VV): {
3823 // If the tail policy is undisturbed we can't commute.
3824 assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags));
3825 if ((MI.getOperand(i: RISCVII::getVecPolicyOpNum(Desc: MI.getDesc())).getImm() &
3826 1) == 0)
3827 return false;
3828
3829 // For these instructions we have more freedom. We can commute with the
3830 // other multiplicand or with the addend/subtrahend/minuend.
3831
3832 // Any fixed operand must be from source 1, 2 or 3.
3833 if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
3834 return false;
3835 if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
3836 return false;
3837
3838 // It both ops are fixed one must be the tied source.
3839 if (SrcOpIdx1 != CommuteAnyOperandIndex &&
3840 SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
3841 return false;
3842
3843 // Look for two different register operands assumed to be commutable
3844 // regardless of the FMA opcode. The FMA opcode is adjusted later if
3845 // needed.
3846 if (SrcOpIdx1 == CommuteAnyOperandIndex ||
3847 SrcOpIdx2 == CommuteAnyOperandIndex) {
3848 // At least one of operands to be commuted is not specified and
3849 // this method is free to choose appropriate commutable operands.
3850 unsigned CommutableOpIdx1 = SrcOpIdx1;
3851 if (SrcOpIdx1 == SrcOpIdx2) {
3852 // Both of operands are not fixed. Set one of commutable
3853 // operands to the tied source.
3854 CommutableOpIdx1 = 1;
3855 } else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
3856 // Only one of the operands is not fixed.
3857 CommutableOpIdx1 = SrcOpIdx2;
3858 }
3859
3860 // CommutableOpIdx1 is well defined now. Let's choose another commutable
3861 // operand and assign its index to CommutableOpIdx2.
3862 unsigned CommutableOpIdx2;
3863 if (CommutableOpIdx1 != 1) {
3864 // If we haven't already used the tied source, we must use it now.
3865 CommutableOpIdx2 = 1;
3866 } else {
3867 Register Op1Reg = MI.getOperand(i: CommutableOpIdx1).getReg();
3868
3869 // The commuted operands should have different registers.
3870 // Otherwise, the commute transformation does not change anything and
3871 // is useless. We use this as a hint to make our decision.
3872 if (Op1Reg != MI.getOperand(i: 2).getReg())
3873 CommutableOpIdx2 = 2;
3874 else
3875 CommutableOpIdx2 = 3;
3876 }
3877
3878 // Assign the found pair of commutable indices to SrcOpIdx1 and
3879 // SrcOpIdx2 to return those values.
3880 if (!fixCommutedOpIndices(ResultIdx1&: SrcOpIdx1, ResultIdx2&: SrcOpIdx2, CommutableOpIdx1,
3881 CommutableOpIdx2))
3882 return false;
3883 }
3884
3885 return true;
3886 }
3887 }
3888
3889 return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
3890}
3891
3892// clang-format off
3893#define CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
3894 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
3895 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
3896 break;
3897
3898#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
3899 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
3900 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
3901 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
3902 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
3903 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
3904 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
3905 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
3906
3907// VFMA depends on SEW.
3908#define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL, SEW) \
3909 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL##_##SEW: \
3910 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL##_##SEW; \
3911 break;
3912
3913#define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW) \
3914 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1, SEW) \
3915 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2, SEW) \
3916 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4, SEW) \
3917 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8, SEW)
3918
3919#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW) \
3920 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2, SEW) \
3921 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW)
3922
3923#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE, SEW) \
3924 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4, SEW) \
3925 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW)
3926
3927#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP) \
3928 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VV, E16) \
3929 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VV, E32) \
3930 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VV, E64)
3931
3932#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
3933 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VFPR16, E16) \
3934 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VFPR32, E32) \
3935 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VFPR64, E64)
3936// clang-format on
3937
3938MachineInstr *RISCVInstrInfo::commuteInstructionImpl(MachineInstr &MI,
3939 bool NewMI,
3940 unsigned OpIdx1,
3941 unsigned OpIdx2) const {
3942 auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & {
3943 if (NewMI)
3944 return *MI.getParent()->getParent()->CloneMachineInstr(Orig: &MI);
3945 return MI;
3946 };
3947
3948 switch (MI.getOpcode()) {
3949 case RISCV::TH_MVEQZ:
3950 case RISCV::TH_MVNEZ: {
3951 auto &WorkingMI = cloneIfNew(MI);
3952 WorkingMI.setDesc(get(Opcode: MI.getOpcode() == RISCV::TH_MVEQZ ? RISCV::TH_MVNEZ
3953 : RISCV::TH_MVEQZ));
3954 return TargetInstrInfo::commuteInstructionImpl(MI&: WorkingMI, NewMI: false, OpIdx1,
3955 OpIdx2);
3956 }
3957 case RISCV::PseudoCCMOVGPRNoX0:
3958 case RISCV::PseudoCCMOVGPR: {
3959 // CCMOV can be commuted by inverting the condition.
3960 auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(i: 3).getImm());
3961 CC = RISCVCC::getOppositeBranchCondition(CC);
3962 auto &WorkingMI = cloneIfNew(MI);
3963 WorkingMI.getOperand(i: 3).setImm(CC);
3964 return TargetInstrInfo::commuteInstructionImpl(MI&: WorkingMI, /*NewMI*/ false,
3965 OpIdx1, OpIdx2);
3966 }
3967 case CASE_VFMA_SPLATS(FMACC):
3968 case CASE_VFMA_SPLATS(FMADD):
3969 case CASE_VFMA_SPLATS(FMSAC):
3970 case CASE_VFMA_SPLATS(FMSUB):
3971 case CASE_VFMA_SPLATS(FNMACC):
3972 case CASE_VFMA_SPLATS(FNMADD):
3973 case CASE_VFMA_SPLATS(FNMSAC):
3974 case CASE_VFMA_SPLATS(FNMSUB):
3975 case CASE_VFMA_OPCODE_VV(FMACC):
3976 case CASE_VFMA_OPCODE_VV(FMSAC):
3977 case CASE_VFMA_OPCODE_VV(FNMACC):
3978 case CASE_VFMA_OPCODE_VV(FNMSAC):
3979 case CASE_VMA_OPCODE_LMULS(MADD, VX):
3980 case CASE_VMA_OPCODE_LMULS(NMSUB, VX):
3981 case CASE_VMA_OPCODE_LMULS(MACC, VX):
3982 case CASE_VMA_OPCODE_LMULS(NMSAC, VX):
3983 case CASE_VMA_OPCODE_LMULS(MACC, VV):
3984 case CASE_VMA_OPCODE_LMULS(NMSAC, VV): {
3985 // It only make sense to toggle these between clobbering the
3986 // addend/subtrahend/minuend one of the multiplicands.
3987 assert((OpIdx1 == 1 || OpIdx2 == 1) && "Unexpected opcode index");
3988 assert((OpIdx1 == 3 || OpIdx2 == 3) && "Unexpected opcode index");
3989 unsigned Opc;
3990 switch (MI.getOpcode()) {
3991 default:
3992 llvm_unreachable("Unexpected opcode");
3993 CASE_VFMA_CHANGE_OPCODE_SPLATS(FMACC, FMADD)
3994 CASE_VFMA_CHANGE_OPCODE_SPLATS(FMADD, FMACC)
3995 CASE_VFMA_CHANGE_OPCODE_SPLATS(FMSAC, FMSUB)
3996 CASE_VFMA_CHANGE_OPCODE_SPLATS(FMSUB, FMSAC)
3997 CASE_VFMA_CHANGE_OPCODE_SPLATS(FNMACC, FNMADD)
3998 CASE_VFMA_CHANGE_OPCODE_SPLATS(FNMADD, FNMACC)
3999 CASE_VFMA_CHANGE_OPCODE_SPLATS(FNMSAC, FNMSUB)
4000 CASE_VFMA_CHANGE_OPCODE_SPLATS(FNMSUB, FNMSAC)
4001 CASE_VFMA_CHANGE_OPCODE_VV(FMACC, FMADD)
4002 CASE_VFMA_CHANGE_OPCODE_VV(FMSAC, FMSUB)
4003 CASE_VFMA_CHANGE_OPCODE_VV(FNMACC, FNMADD)
4004 CASE_VFMA_CHANGE_OPCODE_VV(FNMSAC, FNMSUB)
4005 CASE_VMA_CHANGE_OPCODE_LMULS(MACC, MADD, VX)
4006 CASE_VMA_CHANGE_OPCODE_LMULS(MADD, MACC, VX)
4007 CASE_VMA_CHANGE_OPCODE_LMULS(NMSAC, NMSUB, VX)
4008 CASE_VMA_CHANGE_OPCODE_LMULS(NMSUB, NMSAC, VX)
4009 CASE_VMA_CHANGE_OPCODE_LMULS(MACC, MADD, VV)
4010 CASE_VMA_CHANGE_OPCODE_LMULS(NMSAC, NMSUB, VV)
4011 }
4012
4013 auto &WorkingMI = cloneIfNew(MI);
4014 WorkingMI.setDesc(get(Opcode: Opc));
4015 return TargetInstrInfo::commuteInstructionImpl(MI&: WorkingMI, /*NewMI=*/false,
4016 OpIdx1, OpIdx2);
4017 }
4018 case CASE_VFMA_OPCODE_VV(FMADD):
4019 case CASE_VFMA_OPCODE_VV(FMSUB):
4020 case CASE_VFMA_OPCODE_VV(FNMADD):
4021 case CASE_VFMA_OPCODE_VV(FNMSUB):
4022 case CASE_VMA_OPCODE_LMULS(MADD, VV):
4023 case CASE_VMA_OPCODE_LMULS(NMSUB, VV): {
4024 assert((OpIdx1 == 1 || OpIdx2 == 1) && "Unexpected opcode index");
4025 // If one of the operands, is the addend we need to change opcode.
4026 // Otherwise we're just swapping 2 of the multiplicands.
4027 if (OpIdx1 == 3 || OpIdx2 == 3) {
4028 unsigned Opc;
4029 switch (MI.getOpcode()) {
4030 default:
4031 llvm_unreachable("Unexpected opcode");
4032 CASE_VFMA_CHANGE_OPCODE_VV(FMADD, FMACC)
4033 CASE_VFMA_CHANGE_OPCODE_VV(FMSUB, FMSAC)
4034 CASE_VFMA_CHANGE_OPCODE_VV(FNMADD, FNMACC)
4035 CASE_VFMA_CHANGE_OPCODE_VV(FNMSUB, FNMSAC)
4036 CASE_VMA_CHANGE_OPCODE_LMULS(MADD, MACC, VV)
4037 CASE_VMA_CHANGE_OPCODE_LMULS(NMSUB, NMSAC, VV)
4038 }
4039
4040 auto &WorkingMI = cloneIfNew(MI);
4041 WorkingMI.setDesc(get(Opcode: Opc));
4042 return TargetInstrInfo::commuteInstructionImpl(MI&: WorkingMI, /*NewMI=*/false,
4043 OpIdx1, OpIdx2);
4044 }
4045 // Let the default code handle it.
4046 break;
4047 }
4048 }
4049
4050 return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
4051}
4052
4053#undef CASE_VMA_CHANGE_OPCODE_COMMON
4054#undef CASE_VMA_CHANGE_OPCODE_LMULS
4055#undef CASE_VFMA_CHANGE_OPCODE_COMMON
4056#undef CASE_VFMA_CHANGE_OPCODE_LMULS_M1
4057#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF2
4058#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF4
4059#undef CASE_VFMA_CHANGE_OPCODE_VV
4060#undef CASE_VFMA_CHANGE_OPCODE_SPLATS
4061
4062#undef CASE_RVV_OPCODE_UNMASK_LMUL
4063#undef CASE_RVV_OPCODE_MASK_LMUL
4064#undef CASE_RVV_OPCODE_LMUL
4065#undef CASE_RVV_OPCODE_UNMASK_WIDEN
4066#undef CASE_RVV_OPCODE_UNMASK
4067#undef CASE_RVV_OPCODE_MASK_WIDEN
4068#undef CASE_RVV_OPCODE_MASK
4069#undef CASE_RVV_OPCODE_WIDEN
4070#undef CASE_RVV_OPCODE
4071
4072#undef CASE_VMA_OPCODE_COMMON
4073#undef CASE_VMA_OPCODE_LMULS
4074#undef CASE_VFMA_OPCODE_COMMON
4075#undef CASE_VFMA_OPCODE_LMULS_M1
4076#undef CASE_VFMA_OPCODE_LMULS_MF2
4077#undef CASE_VFMA_OPCODE_LMULS_MF4
4078#undef CASE_VFMA_OPCODE_VV
4079#undef CASE_VFMA_SPLATS
4080
4081bool RISCVInstrInfo::simplifyInstruction(MachineInstr &MI) const {
4082 switch (MI.getOpcode()) {
4083 default:
4084 break;
4085 case RISCV::ADD:
4086 case RISCV::OR:
4087 case RISCV::XOR:
4088 // Normalize (so we hit the next if clause).
4089 // add/[x]or rd, zero, rs => add/[x]or rd, rs, zero
4090 if (MI.getOperand(i: 1).getReg() == RISCV::X0)
4091 commuteInstruction(MI);
4092 // add/[x]or rd, rs, zero => addi rd, rs, 0
4093 if (MI.getOperand(i: 2).getReg() == RISCV::X0) {
4094 MI.getOperand(i: 2).ChangeToImmediate(ImmVal: 0);
4095 MI.setDesc(get(Opcode: RISCV::ADDI));
4096 return true;
4097 }
4098 // xor rd, rs, rs => addi rd, zero, 0
4099 if (MI.getOpcode() == RISCV::XOR &&
4100 MI.getOperand(i: 1).getReg() == MI.getOperand(i: 2).getReg()) {
4101 MI.getOperand(i: 1).setReg(RISCV::X0);
4102 MI.getOperand(i: 2).ChangeToImmediate(ImmVal: 0);
4103 MI.setDesc(get(Opcode: RISCV::ADDI));
4104 return true;
4105 }
4106 break;
4107 case RISCV::ORI:
4108 case RISCV::XORI:
4109 // [x]ori rd, zero, N => addi rd, zero, N
4110 if (MI.getOperand(i: 1).getReg() == RISCV::X0) {
4111 MI.setDesc(get(Opcode: RISCV::ADDI));
4112 return true;
4113 }
4114 break;
4115 case RISCV::SUB:
4116 // sub rd, rs, zero => addi rd, rs, 0
4117 if (MI.getOperand(i: 2).getReg() == RISCV::X0) {
4118 MI.getOperand(i: 2).ChangeToImmediate(ImmVal: 0);
4119 MI.setDesc(get(Opcode: RISCV::ADDI));
4120 return true;
4121 }
4122 break;
4123 case RISCV::SUBW:
4124 // subw rd, rs, zero => addiw rd, rs, 0
4125 if (MI.getOperand(i: 2).getReg() == RISCV::X0) {
4126 MI.getOperand(i: 2).ChangeToImmediate(ImmVal: 0);
4127 MI.setDesc(get(Opcode: RISCV::ADDIW));
4128 return true;
4129 }
4130 break;
4131 case RISCV::ADDW:
4132 // Normalize (so we hit the next if clause).
4133 // addw rd, zero, rs => addw rd, rs, zero
4134 if (MI.getOperand(i: 1).getReg() == RISCV::X0)
4135 commuteInstruction(MI);
4136 // addw rd, rs, zero => addiw rd, rs, 0
4137 if (MI.getOperand(i: 2).getReg() == RISCV::X0) {
4138 MI.getOperand(i: 2).ChangeToImmediate(ImmVal: 0);
4139 MI.setDesc(get(Opcode: RISCV::ADDIW));
4140 return true;
4141 }
4142 break;
4143 case RISCV::SH1ADD:
4144 case RISCV::SH1ADD_UW:
4145 case RISCV::SH2ADD:
4146 case RISCV::SH2ADD_UW:
4147 case RISCV::SH3ADD:
4148 case RISCV::SH3ADD_UW:
4149 // shNadd[.uw] rd, zero, rs => addi rd, rs, 0
4150 if (MI.getOperand(i: 1).getReg() == RISCV::X0) {
4151 MI.removeOperand(OpNo: 1);
4152 MI.addOperand(Op: MachineOperand::CreateImm(Val: 0));
4153 MI.setDesc(get(Opcode: RISCV::ADDI));
4154 return true;
4155 }
4156 // shNadd[.uw] rd, rs, zero => slli[.uw] rd, rs, N
4157 if (MI.getOperand(i: 2).getReg() == RISCV::X0) {
4158 MI.removeOperand(OpNo: 2);
4159 unsigned Opc = MI.getOpcode();
4160 if (Opc == RISCV::SH1ADD_UW || Opc == RISCV::SH2ADD_UW ||
4161 Opc == RISCV::SH3ADD_UW) {
4162 MI.addOperand(Op: MachineOperand::CreateImm(Val: getSHXADDUWShiftAmount(Opc)));
4163 MI.setDesc(get(Opcode: RISCV::SLLI_UW));
4164 return true;
4165 }
4166 MI.addOperand(Op: MachineOperand::CreateImm(Val: getSHXADDShiftAmount(Opc)));
4167 MI.setDesc(get(Opcode: RISCV::SLLI));
4168 return true;
4169 }
4170 break;
4171 case RISCV::AND:
4172 case RISCV::MUL:
4173 case RISCV::MULH:
4174 case RISCV::MULHSU:
4175 case RISCV::MULHU:
4176 case RISCV::MULW:
4177 // and rd, zero, rs => addi rd, zero, 0
4178 // mul* rd, zero, rs => addi rd, zero, 0
4179 // and rd, rs, zero => addi rd, zero, 0
4180 // mul* rd, rs, zero => addi rd, zero, 0
4181 if (MI.getOperand(i: 1).getReg() == RISCV::X0 ||
4182 MI.getOperand(i: 2).getReg() == RISCV::X0) {
4183 MI.getOperand(i: 1).setReg(RISCV::X0);
4184 MI.getOperand(i: 2).ChangeToImmediate(ImmVal: 0);
4185 MI.setDesc(get(Opcode: RISCV::ADDI));
4186 return true;
4187 }
4188 break;
4189 case RISCV::ANDI:
4190 // andi rd, zero, C => addi rd, zero, 0
4191 if (MI.getOperand(i: 1).getReg() == RISCV::X0) {
4192 MI.getOperand(i: 2).setImm(0);
4193 MI.setDesc(get(Opcode: RISCV::ADDI));
4194 return true;
4195 }
4196 break;
4197 case RISCV::SLL:
4198 case RISCV::SRL:
4199 case RISCV::SRA:
4200 // shift rd, zero, rs => addi rd, zero, 0
4201 if (MI.getOperand(i: 1).getReg() == RISCV::X0) {
4202 MI.getOperand(i: 2).ChangeToImmediate(ImmVal: 0);
4203 MI.setDesc(get(Opcode: RISCV::ADDI));
4204 return true;
4205 }
4206 // shift rd, rs, zero => addi rd, rs, 0
4207 if (MI.getOperand(i: 2).getReg() == RISCV::X0) {
4208 MI.getOperand(i: 2).ChangeToImmediate(ImmVal: 0);
4209 MI.setDesc(get(Opcode: RISCV::ADDI));
4210 return true;
4211 }
4212 break;
4213 case RISCV::SLLW:
4214 case RISCV::SRLW:
4215 case RISCV::SRAW:
4216 // shiftw rd, zero, rs => addi rd, zero, 0
4217 if (MI.getOperand(i: 1).getReg() == RISCV::X0) {
4218 MI.getOperand(i: 2).ChangeToImmediate(ImmVal: 0);
4219 MI.setDesc(get(Opcode: RISCV::ADDI));
4220 return true;
4221 }
4222 break;
4223 case RISCV::SLLI:
4224 case RISCV::SRLI:
4225 case RISCV::SRAI:
4226 case RISCV::SLLIW:
4227 case RISCV::SRLIW:
4228 case RISCV::SRAIW:
4229 case RISCV::SLLI_UW:
4230 // shiftimm rd, zero, N => addi rd, zero, 0
4231 if (MI.getOperand(i: 1).getReg() == RISCV::X0) {
4232 MI.getOperand(i: 2).setImm(0);
4233 MI.setDesc(get(Opcode: RISCV::ADDI));
4234 return true;
4235 }
4236 break;
4237 case RISCV::SLTU:
4238 case RISCV::ADD_UW:
4239 // sltu rd, zero, zero => addi rd, zero, 0
4240 // add.uw rd, zero, zero => addi rd, zero, 0
4241 if (MI.getOperand(i: 1).getReg() == RISCV::X0 &&
4242 MI.getOperand(i: 2).getReg() == RISCV::X0) {
4243 MI.getOperand(i: 2).ChangeToImmediate(ImmVal: 0);
4244 MI.setDesc(get(Opcode: RISCV::ADDI));
4245 return true;
4246 }
4247 // add.uw rd, zero, rs => addi rd, rs, 0
4248 if (MI.getOpcode() == RISCV::ADD_UW &&
4249 MI.getOperand(i: 1).getReg() == RISCV::X0) {
4250 MI.removeOperand(OpNo: 1);
4251 MI.addOperand(Op: MachineOperand::CreateImm(Val: 0));
4252 MI.setDesc(get(Opcode: RISCV::ADDI));
4253 }
4254 break;
4255 case RISCV::SLTIU:
4256 // sltiu rd, zero, NZC => addi rd, zero, 1
4257 // sltiu rd, zero, 0 => addi rd, zero, 0
4258 if (MI.getOperand(i: 1).getReg() == RISCV::X0) {
4259 MI.getOperand(i: 2).setImm(MI.getOperand(i: 2).getImm() != 0);
4260 MI.setDesc(get(Opcode: RISCV::ADDI));
4261 return true;
4262 }
4263 break;
4264 case RISCV::SEXT_H:
4265 case RISCV::SEXT_B:
4266 case RISCV::ZEXT_H_RV32:
4267 case RISCV::ZEXT_H_RV64:
4268 // sext.[hb] rd, zero => addi rd, zero, 0
4269 // zext.h rd, zero => addi rd, zero, 0
4270 if (MI.getOperand(i: 1).getReg() == RISCV::X0) {
4271 MI.addOperand(Op: MachineOperand::CreateImm(Val: 0));
4272 MI.setDesc(get(Opcode: RISCV::ADDI));
4273 return true;
4274 }
4275 break;
4276 case RISCV::MIN:
4277 case RISCV::MINU:
4278 case RISCV::MAX:
4279 case RISCV::MAXU:
4280 // min|max rd, rs, rs => addi rd, rs, 0
4281 if (MI.getOperand(i: 1).getReg() == MI.getOperand(i: 2).getReg()) {
4282 MI.getOperand(i: 2).ChangeToImmediate(ImmVal: 0);
4283 MI.setDesc(get(Opcode: RISCV::ADDI));
4284 return true;
4285 }
4286 break;
4287 case RISCV::BEQ:
4288 case RISCV::BNE:
4289 // b{eq,ne} zero, rs, imm => b{eq,ne} rs, zero, imm
4290 if (MI.getOperand(i: 0).getReg() == RISCV::X0) {
4291 MachineOperand MO0 = MI.getOperand(i: 0);
4292 MI.removeOperand(OpNo: 0);
4293 MI.insert(InsertBefore: MI.operands_begin() + 1, Ops: {MO0});
4294 }
4295 break;
4296 case RISCV::BLTU:
4297 // bltu zero, rs, imm => bne rs, zero, imm
4298 if (MI.getOperand(i: 0).getReg() == RISCV::X0) {
4299 MachineOperand MO0 = MI.getOperand(i: 0);
4300 MI.removeOperand(OpNo: 0);
4301 MI.insert(InsertBefore: MI.operands_begin() + 1, Ops: {MO0});
4302 MI.setDesc(get(Opcode: RISCV::BNE));
4303 }
4304 break;
4305 case RISCV::BGEU:
4306 // bgeu zero, rs, imm => beq rs, zero, imm
4307 if (MI.getOperand(i: 0).getReg() == RISCV::X0) {
4308 MachineOperand MO0 = MI.getOperand(i: 0);
4309 MI.removeOperand(OpNo: 0);
4310 MI.insert(InsertBefore: MI.operands_begin() + 1, Ops: {MO0});
4311 MI.setDesc(get(Opcode: RISCV::BEQ));
4312 }
4313 break;
4314 }
4315 return false;
4316}
4317
4318// clang-format off
4319#define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
4320 RISCV::PseudoV##OP##_##LMUL##_TIED
4321
4322#define CASE_WIDEOP_OPCODE_LMULS(OP) \
4323 CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
4324 case CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
4325 case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
4326 case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
4327 case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
4328 case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
4329
4330#define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
4331 case RISCV::PseudoV##OP##_##LMUL##_TIED: \
4332 NewOpc = RISCV::PseudoV##OP##_##LMUL; \
4333 break;
4334
4335#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
4336 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
4337 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
4338 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
4339 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
4340 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
4341 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
4342
4343// FP Widening Ops may by SEW aware. Create SEW aware cases for these cases.
4344#define CASE_FP_WIDEOP_OPCODE_COMMON(OP, LMUL, SEW) \
4345 RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED
4346
4347#define CASE_FP_WIDEOP_OPCODE_LMULS(OP) \
4348 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
4349 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
4350 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E32): \
4351 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
4352 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E32): \
4353 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
4354 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E32): \
4355 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16): \
4356 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E32) \
4357
4358#define CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL, SEW) \
4359 case RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED: \
4360 NewOpc = RISCV::PseudoV##OP##_##LMUL##_##SEW; \
4361 break;
4362
4363#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
4364 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
4365 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
4366 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E32) \
4367 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
4368 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E32) \
4369 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
4370 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E32) \
4371 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16) \
4372 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E32) \
4373// clang-format on
4374
4375MachineInstr *RISCVInstrInfo::convertToThreeAddress(MachineInstr &MI,
4376 LiveVariables *LV,
4377 LiveIntervals *LIS) const {
4378 MachineInstrBuilder MIB;
4379 switch (MI.getOpcode()) {
4380 default:
4381 return nullptr;
4382 case CASE_FP_WIDEOP_OPCODE_LMULS(FWADD_WV):
4383 case CASE_FP_WIDEOP_OPCODE_LMULS(FWSUB_WV): {
4384 assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags) &&
4385 MI.getNumExplicitOperands() == 7 &&
4386 "Expect 7 explicit operands rd, rs2, rs1, rm, vl, sew, policy");
4387 // If the tail policy is undisturbed we can't convert.
4388 if ((MI.getOperand(i: RISCVII::getVecPolicyOpNum(Desc: MI.getDesc())).getImm() &
4389 1) == 0)
4390 return nullptr;
4391 // clang-format off
4392 unsigned NewOpc;
4393 switch (MI.getOpcode()) {
4394 default:
4395 llvm_unreachable("Unexpected opcode");
4396 CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(FWADD_WV)
4397 CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(FWSUB_WV)
4398 }
4399 // clang-format on
4400
4401 MachineBasicBlock &MBB = *MI.getParent();
4402 MIB = BuildMI(BB&: MBB, I&: MI, MIMD: MI.getDebugLoc(), MCID: get(Opcode: NewOpc))
4403 .add(MO: MI.getOperand(i: 0))
4404 .addReg(RegNo: MI.getOperand(i: 0).getReg(), flags: RegState::Undef)
4405 .add(MO: MI.getOperand(i: 1))
4406 .add(MO: MI.getOperand(i: 2))
4407 .add(MO: MI.getOperand(i: 3))
4408 .add(MO: MI.getOperand(i: 4))
4409 .add(MO: MI.getOperand(i: 5))
4410 .add(MO: MI.getOperand(i: 6));
4411 break;
4412 }
4413 case CASE_WIDEOP_OPCODE_LMULS(WADD_WV):
4414 case CASE_WIDEOP_OPCODE_LMULS(WADDU_WV):
4415 case CASE_WIDEOP_OPCODE_LMULS(WSUB_WV):
4416 case CASE_WIDEOP_OPCODE_LMULS(WSUBU_WV): {
4417 // If the tail policy is undisturbed we can't convert.
4418 assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags) &&
4419 MI.getNumExplicitOperands() == 6);
4420 if ((MI.getOperand(i: RISCVII::getVecPolicyOpNum(Desc: MI.getDesc())).getImm() &
4421 1) == 0)
4422 return nullptr;
4423
4424 // clang-format off
4425 unsigned NewOpc;
4426 switch (MI.getOpcode()) {
4427 default:
4428 llvm_unreachable("Unexpected opcode");
4429 CASE_WIDEOP_CHANGE_OPCODE_LMULS(WADD_WV)
4430 CASE_WIDEOP_CHANGE_OPCODE_LMULS(WADDU_WV)
4431 CASE_WIDEOP_CHANGE_OPCODE_LMULS(WSUB_WV)
4432 CASE_WIDEOP_CHANGE_OPCODE_LMULS(WSUBU_WV)
4433 }
4434 // clang-format on
4435
4436 MachineBasicBlock &MBB = *MI.getParent();
4437 MIB = BuildMI(BB&: MBB, I&: MI, MIMD: MI.getDebugLoc(), MCID: get(Opcode: NewOpc))
4438 .add(MO: MI.getOperand(i: 0))
4439 .addReg(RegNo: MI.getOperand(i: 0).getReg(), flags: RegState::Undef)
4440 .add(MO: MI.getOperand(i: 1))
4441 .add(MO: MI.getOperand(i: 2))
4442 .add(MO: MI.getOperand(i: 3))
4443 .add(MO: MI.getOperand(i: 4))
4444 .add(MO: MI.getOperand(i: 5));
4445 break;
4446 }
4447 }
4448 MIB.copyImplicitOps(OtherMI: MI);
4449
4450 if (LV) {
4451 unsigned NumOps = MI.getNumOperands();
4452 for (unsigned I = 1; I < NumOps; ++I) {
4453 MachineOperand &Op = MI.getOperand(i: I);
4454 if (Op.isReg() && Op.isKill())
4455 LV->replaceKillInstruction(Reg: Op.getReg(), OldMI&: MI, NewMI&: *MIB);
4456 }
4457 }
4458
4459 if (LIS) {
4460 SlotIndex Idx = LIS->ReplaceMachineInstrInMaps(MI, NewMI&: *MIB);
4461
4462 if (MI.getOperand(i: 0).isEarlyClobber()) {
4463 // Use operand 1 was tied to early-clobber def operand 0, so its live
4464 // interval could have ended at an early-clobber slot. Now they are not
4465 // tied we need to update it to the normal register slot.
4466 LiveInterval &LI = LIS->getInterval(Reg: MI.getOperand(i: 1).getReg());
4467 LiveRange::Segment *S = LI.getSegmentContaining(Idx);
4468 if (S->end == Idx.getRegSlot(EC: true))
4469 S->end = Idx.getRegSlot();
4470 }
4471 }
4472
4473 return MIB;
4474}
4475
4476#undef CASE_WIDEOP_OPCODE_COMMON
4477#undef CASE_WIDEOP_OPCODE_LMULS
4478#undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
4479#undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
4480#undef CASE_FP_WIDEOP_OPCODE_COMMON
4481#undef CASE_FP_WIDEOP_OPCODE_LMULS
4482#undef CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON
4483#undef CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS
4484
4485void RISCVInstrInfo::mulImm(MachineFunction &MF, MachineBasicBlock &MBB,
4486 MachineBasicBlock::iterator II, const DebugLoc &DL,
4487 Register DestReg, uint32_t Amount,
4488 MachineInstr::MIFlag Flag) const {
4489 MachineRegisterInfo &MRI = MF.getRegInfo();
4490 if (llvm::has_single_bit<uint32_t>(Value: Amount)) {
4491 uint32_t ShiftAmount = Log2_32(Value: Amount);
4492 if (ShiftAmount == 0)
4493 return;
4494 BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: get(Opcode: RISCV::SLLI), DestReg)
4495 .addReg(RegNo: DestReg, flags: RegState::Kill)
4496 .addImm(Val: ShiftAmount)
4497 .setMIFlag(Flag);
4498 } else if (STI.hasStdExtZba() &&
4499 ((Amount % 3 == 0 && isPowerOf2_64(Value: Amount / 3)) ||
4500 (Amount % 5 == 0 && isPowerOf2_64(Value: Amount / 5)) ||
4501 (Amount % 9 == 0 && isPowerOf2_64(Value: Amount / 9)))) {
4502 // We can use Zba SHXADD+SLLI instructions for multiply in some cases.
4503 unsigned Opc;
4504 uint32_t ShiftAmount;
4505 if (Amount % 9 == 0) {
4506 Opc = RISCV::SH3ADD;
4507 ShiftAmount = Log2_64(Value: Amount / 9);
4508 } else if (Amount % 5 == 0) {
4509 Opc = RISCV::SH2ADD;
4510 ShiftAmount = Log2_64(Value: Amount / 5);
4511 } else if (Amount % 3 == 0) {
4512 Opc = RISCV::SH1ADD;
4513 ShiftAmount = Log2_64(Value: Amount / 3);
4514 } else {
4515 llvm_unreachable("implied by if-clause");
4516 }
4517 if (ShiftAmount)
4518 BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: get(Opcode: RISCV::SLLI), DestReg)
4519 .addReg(RegNo: DestReg, flags: RegState::Kill)
4520 .addImm(Val: ShiftAmount)
4521 .setMIFlag(Flag);
4522 BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: get(Opcode: Opc), DestReg)
4523 .addReg(RegNo: DestReg, flags: RegState::Kill)
4524 .addReg(RegNo: DestReg)
4525 .setMIFlag(Flag);
4526 } else if (llvm::has_single_bit<uint32_t>(Value: Amount - 1)) {
4527 Register ScaledRegister = MRI.createVirtualRegister(RegClass: &RISCV::GPRRegClass);
4528 uint32_t ShiftAmount = Log2_32(Value: Amount - 1);
4529 BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: get(Opcode: RISCV::SLLI), DestReg: ScaledRegister)
4530 .addReg(RegNo: DestReg)
4531 .addImm(Val: ShiftAmount)
4532 .setMIFlag(Flag);
4533 BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: get(Opcode: RISCV::ADD), DestReg)
4534 .addReg(RegNo: ScaledRegister, flags: RegState::Kill)
4535 .addReg(RegNo: DestReg, flags: RegState::Kill)
4536 .setMIFlag(Flag);
4537 } else if (llvm::has_single_bit<uint32_t>(Value: Amount + 1)) {
4538 Register ScaledRegister = MRI.createVirtualRegister(RegClass: &RISCV::GPRRegClass);
4539 uint32_t ShiftAmount = Log2_32(Value: Amount + 1);
4540 BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: get(Opcode: RISCV::SLLI), DestReg: ScaledRegister)
4541 .addReg(RegNo: DestReg)
4542 .addImm(Val: ShiftAmount)
4543 .setMIFlag(Flag);
4544 BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: get(Opcode: RISCV::SUB), DestReg)
4545 .addReg(RegNo: ScaledRegister, flags: RegState::Kill)
4546 .addReg(RegNo: DestReg, flags: RegState::Kill)
4547 .setMIFlag(Flag);
4548 } else if (STI.hasStdExtZmmul()) {
4549 Register N = MRI.createVirtualRegister(RegClass: &RISCV::GPRRegClass);
4550 movImm(MBB, MBBI: II, DL, DstReg: N, Val: Amount, Flag);
4551 BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: get(Opcode: RISCV::MUL), DestReg)
4552 .addReg(RegNo: DestReg, flags: RegState::Kill)
4553 .addReg(RegNo: N, flags: RegState::Kill)
4554 .setMIFlag(Flag);
4555 } else {
4556 Register Acc;
4557 uint32_t PrevShiftAmount = 0;
4558 for (uint32_t ShiftAmount = 0; Amount >> ShiftAmount; ShiftAmount++) {
4559 if (Amount & (1U << ShiftAmount)) {
4560 if (ShiftAmount)
4561 BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: get(Opcode: RISCV::SLLI), DestReg)
4562 .addReg(RegNo: DestReg, flags: RegState::Kill)
4563 .addImm(Val: ShiftAmount - PrevShiftAmount)
4564 .setMIFlag(Flag);
4565 if (Amount >> (ShiftAmount + 1)) {
4566 // If we don't have an accmulator yet, create it and copy DestReg.
4567 if (!Acc) {
4568 Acc = MRI.createVirtualRegister(RegClass: &RISCV::GPRRegClass);
4569 BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: get(Opcode: TargetOpcode::COPY), DestReg: Acc)
4570 .addReg(RegNo: DestReg)
4571 .setMIFlag(Flag);
4572 } else {
4573 BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: get(Opcode: RISCV::ADD), DestReg: Acc)
4574 .addReg(RegNo: Acc, flags: RegState::Kill)
4575 .addReg(RegNo: DestReg)
4576 .setMIFlag(Flag);
4577 }
4578 }
4579 PrevShiftAmount = ShiftAmount;
4580 }
4581 }
4582 assert(Acc && "Expected valid accumulator");
4583 BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: get(Opcode: RISCV::ADD), DestReg)
4584 .addReg(RegNo: DestReg, flags: RegState::Kill)
4585 .addReg(RegNo: Acc, flags: RegState::Kill)
4586 .setMIFlag(Flag);
4587 }
4588}
4589
4590ArrayRef<std::pair<MachineMemOperand::Flags, const char *>>
4591RISCVInstrInfo::getSerializableMachineMemOperandTargetFlags() const {
4592 static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
4593 {{MONontemporalBit0, "riscv-nontemporal-domain-bit-0"},
4594 {MONontemporalBit1, "riscv-nontemporal-domain-bit-1"}};
4595 return ArrayRef(TargetFlags);
4596}
4597
4598unsigned RISCVInstrInfo::getTailDuplicateSize(CodeGenOptLevel OptLevel) const {
4599 return OptLevel >= CodeGenOptLevel::Aggressive
4600 ? STI.getTailDupAggressiveThreshold()
4601 : 2;
4602}
4603
4604bool RISCV::isRVVSpill(const MachineInstr &MI) {
4605 // RVV lacks any support for immediate addressing for stack addresses, so be
4606 // conservative.
4607 unsigned Opcode = MI.getOpcode();
4608 if (!RISCVVPseudosTable::getPseudoInfo(Pseudo: Opcode) &&
4609 !getLMULForRVVWholeLoadStore(Opcode) && !isRVVSpillForZvlsseg(Opcode))
4610 return false;
4611 return true;
4612}
4613
4614std::optional<std::pair<unsigned, unsigned>>
4615RISCV::isRVVSpillForZvlsseg(unsigned Opcode) {
4616 switch (Opcode) {
4617 default:
4618 return std::nullopt;
4619 case RISCV::PseudoVSPILL2_M1:
4620 case RISCV::PseudoVRELOAD2_M1:
4621 return std::make_pair(x: 2u, y: 1u);
4622 case RISCV::PseudoVSPILL2_M2:
4623 case RISCV::PseudoVRELOAD2_M2:
4624 return std::make_pair(x: 2u, y: 2u);
4625 case RISCV::PseudoVSPILL2_M4:
4626 case RISCV::PseudoVRELOAD2_M4:
4627 return std::make_pair(x: 2u, y: 4u);
4628 case RISCV::PseudoVSPILL3_M1:
4629 case RISCV::PseudoVRELOAD3_M1:
4630 return std::make_pair(x: 3u, y: 1u);
4631 case RISCV::PseudoVSPILL3_M2:
4632 case RISCV::PseudoVRELOAD3_M2:
4633 return std::make_pair(x: 3u, y: 2u);
4634 case RISCV::PseudoVSPILL4_M1:
4635 case RISCV::PseudoVRELOAD4_M1:
4636 return std::make_pair(x: 4u, y: 1u);
4637 case RISCV::PseudoVSPILL4_M2:
4638 case RISCV::PseudoVRELOAD4_M2:
4639 return std::make_pair(x: 4u, y: 2u);
4640 case RISCV::PseudoVSPILL5_M1:
4641 case RISCV::PseudoVRELOAD5_M1:
4642 return std::make_pair(x: 5u, y: 1u);
4643 case RISCV::PseudoVSPILL6_M1:
4644 case RISCV::PseudoVRELOAD6_M1:
4645 return std::make_pair(x: 6u, y: 1u);
4646 case RISCV::PseudoVSPILL7_M1:
4647 case RISCV::PseudoVRELOAD7_M1:
4648 return std::make_pair(x: 7u, y: 1u);
4649 case RISCV::PseudoVSPILL8_M1:
4650 case RISCV::PseudoVRELOAD8_M1:
4651 return std::make_pair(x: 8u, y: 1u);
4652 }
4653}
4654
4655bool RISCV::hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2) {
4656 int16_t MI1FrmOpIdx =
4657 RISCV::getNamedOperandIdx(Opcode: MI1.getOpcode(), Name: RISCV::OpName::frm);
4658 int16_t MI2FrmOpIdx =
4659 RISCV::getNamedOperandIdx(Opcode: MI2.getOpcode(), Name: RISCV::OpName::frm);
4660 if (MI1FrmOpIdx < 0 || MI2FrmOpIdx < 0)
4661 return false;
4662 MachineOperand FrmOp1 = MI1.getOperand(i: MI1FrmOpIdx);
4663 MachineOperand FrmOp2 = MI2.getOperand(i: MI2FrmOpIdx);
4664 return FrmOp1.getImm() == FrmOp2.getImm();
4665}
4666
4667std::optional<unsigned>
4668RISCV::getVectorLowDemandedScalarBits(unsigned Opcode, unsigned Log2SEW) {
4669 switch (Opcode) {
4670 default:
4671 return std::nullopt;
4672
4673 // 11.6. Vector Single-Width Shift Instructions
4674 case RISCV::VSLL_VX:
4675 case RISCV::VSRL_VX:
4676 case RISCV::VSRA_VX:
4677 // 12.4. Vector Single-Width Scaling Shift Instructions
4678 case RISCV::VSSRL_VX:
4679 case RISCV::VSSRA_VX:
4680 // Zvbb
4681 case RISCV::VROL_VX:
4682 case RISCV::VROR_VX:
4683 // Only the low lg2(SEW) bits of the shift-amount value are used.
4684 return Log2SEW;
4685
4686 // 11.7 Vector Narrowing Integer Right Shift Instructions
4687 case RISCV::VNSRL_WX:
4688 case RISCV::VNSRA_WX:
4689 // 12.5. Vector Narrowing Fixed-Point Clip Instructions
4690 case RISCV::VNCLIPU_WX:
4691 case RISCV::VNCLIP_WX:
4692 // Zvbb
4693 case RISCV::VWSLL_VX:
4694 // Only the low lg2(2*SEW) bits of the shift-amount value are used.
4695 return Log2SEW + 1;
4696
4697 // 11.1. Vector Single-Width Integer Add and Subtract
4698 case RISCV::VADD_VX:
4699 case RISCV::VSUB_VX:
4700 case RISCV::VRSUB_VX:
4701 // 11.2. Vector Widening Integer Add/Subtract
4702 case RISCV::VWADDU_VX:
4703 case RISCV::VWSUBU_VX:
4704 case RISCV::VWADD_VX:
4705 case RISCV::VWSUB_VX:
4706 case RISCV::VWADDU_WX:
4707 case RISCV::VWSUBU_WX:
4708 case RISCV::VWADD_WX:
4709 case RISCV::VWSUB_WX:
4710 // 11.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
4711 case RISCV::VADC_VXM:
4712 case RISCV::VADC_VIM:
4713 case RISCV::VMADC_VXM:
4714 case RISCV::VMADC_VIM:
4715 case RISCV::VMADC_VX:
4716 case RISCV::VSBC_VXM:
4717 case RISCV::VMSBC_VXM:
4718 case RISCV::VMSBC_VX:
4719 // 11.5 Vector Bitwise Logical Instructions
4720 case RISCV::VAND_VX:
4721 case RISCV::VOR_VX:
4722 case RISCV::VXOR_VX:
4723 // 11.8. Vector Integer Compare Instructions
4724 case RISCV::VMSEQ_VX:
4725 case RISCV::VMSNE_VX:
4726 case RISCV::VMSLTU_VX:
4727 case RISCV::VMSLT_VX:
4728 case RISCV::VMSLEU_VX:
4729 case RISCV::VMSLE_VX:
4730 case RISCV::VMSGTU_VX:
4731 case RISCV::VMSGT_VX:
4732 // 11.9. Vector Integer Min/Max Instructions
4733 case RISCV::VMINU_VX:
4734 case RISCV::VMIN_VX:
4735 case RISCV::VMAXU_VX:
4736 case RISCV::VMAX_VX:
4737 // 11.10. Vector Single-Width Integer Multiply Instructions
4738 case RISCV::VMUL_VX:
4739 case RISCV::VMULH_VX:
4740 case RISCV::VMULHU_VX:
4741 case RISCV::VMULHSU_VX:
4742 // 11.11. Vector Integer Divide Instructions
4743 case RISCV::VDIVU_VX:
4744 case RISCV::VDIV_VX:
4745 case RISCV::VREMU_VX:
4746 case RISCV::VREM_VX:
4747 // 11.12. Vector Widening Integer Multiply Instructions
4748 case RISCV::VWMUL_VX:
4749 case RISCV::VWMULU_VX:
4750 case RISCV::VWMULSU_VX:
4751 // 11.13. Vector Single-Width Integer Multiply-Add Instructions
4752 case RISCV::VMACC_VX:
4753 case RISCV::VNMSAC_VX:
4754 case RISCV::VMADD_VX:
4755 case RISCV::VNMSUB_VX:
4756 // 11.14. Vector Widening Integer Multiply-Add Instructions
4757 case RISCV::VWMACCU_VX:
4758 case RISCV::VWMACC_VX:
4759 case RISCV::VWMACCSU_VX:
4760 case RISCV::VWMACCUS_VX:
4761 // 11.15. Vector Integer Merge Instructions
4762 case RISCV::VMERGE_VXM:
4763 // 11.16. Vector Integer Move Instructions
4764 case RISCV::VMV_V_X:
4765 // 12.1. Vector Single-Width Saturating Add and Subtract
4766 case RISCV::VSADDU_VX:
4767 case RISCV::VSADD_VX:
4768 case RISCV::VSSUBU_VX:
4769 case RISCV::VSSUB_VX:
4770 // 12.2. Vector Single-Width Averaging Add and Subtract
4771 case RISCV::VAADDU_VX:
4772 case RISCV::VAADD_VX:
4773 case RISCV::VASUBU_VX:
4774 case RISCV::VASUB_VX:
4775 // 12.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
4776 case RISCV::VSMUL_VX:
4777 // 16.1. Integer Scalar Move Instructions
4778 case RISCV::VMV_S_X:
4779 // Zvbb
4780 case RISCV::VANDN_VX:
4781 return 1U << Log2SEW;
4782 }
4783}
4784
4785unsigned RISCV::getRVVMCOpcode(unsigned RVVPseudoOpcode) {
4786 const RISCVVPseudosTable::PseudoInfo *RVV =
4787 RISCVVPseudosTable::getPseudoInfo(Pseudo: RVVPseudoOpcode);
4788 if (!RVV)
4789 return 0;
4790 return RVV->BaseInstr;
4791}
4792
4793unsigned RISCV::getDestLog2EEW(const MCInstrDesc &Desc, unsigned Log2SEW) {
4794 unsigned DestEEW =
4795 (Desc.TSFlags & RISCVII::DestEEWMask) >> RISCVII::DestEEWShift;
4796 // EEW = 1
4797 if (DestEEW == 0)
4798 return 0;
4799 // EEW = SEW * n
4800 unsigned Scaled = Log2SEW + (DestEEW - 1);
4801 assert(Scaled >= 3 && Scaled <= 6);
4802 return Scaled;
4803}
4804
4805/// Given two VL operands, do we know that LHS <= RHS?
4806bool RISCV::isVLKnownLE(const MachineOperand &LHS, const MachineOperand &RHS) {
4807 if (LHS.isReg() && RHS.isReg() && LHS.getReg().isVirtual() &&
4808 LHS.getReg() == RHS.getReg())
4809 return true;
4810 if (RHS.isImm() && RHS.getImm() == RISCV::VLMaxSentinel)
4811 return true;
4812 if (LHS.isImm() && LHS.getImm() == RISCV::VLMaxSentinel)
4813 return false;
4814 if (!LHS.isImm() || !RHS.isImm())
4815 return false;
4816 return LHS.getImm() <= RHS.getImm();
4817}
4818
4819namespace {
4820class RISCVPipelinerLoopInfo : public TargetInstrInfo::PipelinerLoopInfo {
4821 const MachineInstr *LHS;
4822 const MachineInstr *RHS;
4823 SmallVector<MachineOperand, 3> Cond;
4824
4825public:
4826 RISCVPipelinerLoopInfo(const MachineInstr *LHS, const MachineInstr *RHS,
4827 const SmallVectorImpl<MachineOperand> &Cond)
4828 : LHS(LHS), RHS(RHS), Cond(Cond.begin(), Cond.end()) {}
4829
4830 bool shouldIgnoreForPipelining(const MachineInstr *MI) const override {
4831 // Make the instructions for loop control be placed in stage 0.
4832 // The predecessors of LHS/RHS are considered by the caller.
4833 if (LHS && MI == LHS)
4834 return true;
4835 if (RHS && MI == RHS)
4836 return true;
4837 return false;
4838 }
4839
4840 std::optional<bool> createTripCountGreaterCondition(
4841 int TC, MachineBasicBlock &MBB,
4842 SmallVectorImpl<MachineOperand> &CondParam) override {
4843 // A branch instruction will be inserted as "if (Cond) goto epilogue".
4844 // Cond is normalized for such use.
4845 // The predecessors of the branch are assumed to have already been inserted.
4846 CondParam = Cond;
4847 return {};
4848 }
4849
4850 void setPreheader(MachineBasicBlock *NewPreheader) override {}
4851
4852 void adjustTripCount(int TripCountAdjust) override {}
4853};
4854} // namespace
4855
4856std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
4857RISCVInstrInfo::analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const {
4858 MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
4859 SmallVector<MachineOperand, 4> Cond;
4860 if (analyzeBranch(MBB&: *LoopBB, TBB, FBB, Cond, /*AllowModify=*/false))
4861 return nullptr;
4862
4863 // Infinite loops are not supported
4864 if (TBB == LoopBB && FBB == LoopBB)
4865 return nullptr;
4866
4867 // Must be conditional branch
4868 if (FBB == nullptr)
4869 return nullptr;
4870
4871 assert((TBB == LoopBB || FBB == LoopBB) &&
4872 "The Loop must be a single-basic-block loop");
4873
4874 // Normalization for createTripCountGreaterCondition()
4875 if (TBB == LoopBB)
4876 reverseBranchCondition(Cond);
4877
4878 const MachineRegisterInfo &MRI = LoopBB->getParent()->getRegInfo();
4879 auto FindRegDef = [&MRI](MachineOperand &Op) -> const MachineInstr * {
4880 if (!Op.isReg())
4881 return nullptr;
4882 Register Reg = Op.getReg();
4883 if (!Reg.isVirtual())
4884 return nullptr;
4885 return MRI.getVRegDef(Reg);
4886 };
4887
4888 const MachineInstr *LHS = FindRegDef(Cond[1]);
4889 const MachineInstr *RHS = FindRegDef(Cond[2]);
4890 if (LHS && LHS->isPHI())
4891 return nullptr;
4892 if (RHS && RHS->isPHI())
4893 return nullptr;
4894
4895 return std::make_unique<RISCVPipelinerLoopInfo>(args&: LHS, args&: RHS, args&: Cond);
4896}
4897
4898// FIXME: We should remove this if we have a default generic scheduling model.
4899bool RISCVInstrInfo::isHighLatencyDef(int Opc) const {
4900 unsigned RVVMCOpcode = RISCV::getRVVMCOpcode(RVVPseudoOpcode: Opc);
4901 Opc = RVVMCOpcode ? RVVMCOpcode : Opc;
4902 switch (Opc) {
4903 default:
4904 return false;
4905 // Integer div/rem.
4906 case RISCV::DIV:
4907 case RISCV::DIVW:
4908 case RISCV::DIVU:
4909 case RISCV::DIVUW:
4910 case RISCV::REM:
4911 case RISCV::REMW:
4912 case RISCV::REMU:
4913 case RISCV::REMUW:
4914 // Floating-point div/sqrt.
4915 case RISCV::FDIV_H:
4916 case RISCV::FDIV_S:
4917 case RISCV::FDIV_D:
4918 case RISCV::FDIV_H_INX:
4919 case RISCV::FDIV_S_INX:
4920 case RISCV::FDIV_D_INX:
4921 case RISCV::FDIV_D_IN32X:
4922 case RISCV::FSQRT_H:
4923 case RISCV::FSQRT_S:
4924 case RISCV::FSQRT_D:
4925 case RISCV::FSQRT_H_INX:
4926 case RISCV::FSQRT_S_INX:
4927 case RISCV::FSQRT_D_INX:
4928 case RISCV::FSQRT_D_IN32X:
4929 // Vector integer div/rem
4930 case RISCV::VDIV_VV:
4931 case RISCV::VDIV_VX:
4932 case RISCV::VDIVU_VV:
4933 case RISCV::VDIVU_VX:
4934 case RISCV::VREM_VV:
4935 case RISCV::VREM_VX:
4936 case RISCV::VREMU_VV:
4937 case RISCV::VREMU_VX:
4938 // Vector floating-point div/sqrt.
4939 case RISCV::VFDIV_VV:
4940 case RISCV::VFDIV_VF:
4941 case RISCV::VFRDIV_VF:
4942 case RISCV::VFSQRT_V:
4943 case RISCV::VFRSQRT7_V:
4944 return true;
4945 }
4946}
4947