1//===-- RISCVSubtarget.cpp - RISC-V Subtarget Information -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the RISC-V specific subclass of TargetSubtargetInfo.
10//
11//===----------------------------------------------------------------------===//
12
13#include "RISCVSubtarget.h"
14#include "GISel/RISCVCallLowering.h"
15#include "GISel/RISCVLegalizerInfo.h"
16#include "RISCV.h"
17#include "RISCVFrameLowering.h"
18#include "RISCVSelectionDAGInfo.h"
19#include "RISCVTargetMachine.h"
20#include "llvm/CodeGen/MachineFrameInfo.h"
21#include "llvm/MC/TargetRegistry.h"
22#include "llvm/Support/ErrorHandling.h"
23
24using namespace llvm;
25
26#define DEBUG_TYPE "riscv-subtarget"
27
28#define GET_SUBTARGETINFO_TARGET_DESC
29#define GET_SUBTARGETINFO_CTOR
30#include "RISCVGenSubtargetInfo.inc"
31
32#define GET_RISCV_MACRO_FUSION_PRED_IMPL
33#include "RISCVGenMacroFusion.inc"
34
35namespace llvm::RISCVTuneInfoTable {
36
37#define GET_RISCVTuneInfoTable_IMPL
38#include "RISCVGenSearchableTables.inc"
39} // namespace llvm::RISCVTuneInfoTable
40
41static cl::opt<unsigned> RVVVectorLMULMax(
42 "riscv-v-fixed-length-vector-lmul-max",
43 cl::desc("The maximum LMUL value to use for fixed length vectors. "
44 "Fractional LMUL values are not supported."),
45 cl::init(Val: 8), cl::Hidden);
46
47static cl::opt<bool> RISCVDisableUsingConstantPoolForLargeInts(
48 "riscv-disable-using-constant-pool-for-large-ints",
49 cl::desc("Disable using constant pool for large integers."),
50 cl::init(Val: false), cl::Hidden);
51
52static cl::opt<unsigned> RISCVMaxBuildIntsCost(
53 "riscv-max-build-ints-cost",
54 cl::desc("The maximum cost used for building integers."), cl::init(Val: 0),
55 cl::Hidden);
56
57static cl::opt<bool> UseAA("riscv-use-aa", cl::init(Val: true),
58 cl::desc("Enable the use of AA during codegen."));
59
60static cl::opt<unsigned> RISCVMinimumJumpTableEntries(
61 "riscv-min-jump-table-entries", cl::Hidden,
62 cl::desc("Set minimum number of entries to use a jump table on RISCV"));
63
64static cl::opt<bool> UseMIPSLoadStorePairsOpt(
65 "use-riscv-mips-load-store-pairs",
66 cl::desc("Enable the load/store pair optimization pass"), cl::init(Val: false),
67 cl::Hidden);
68
69static cl::opt<bool> UseMIPSCCMovInsn("use-riscv-mips-ccmov",
70 cl::desc("Use 'mips.ccmov' instruction"),
71 cl::init(Val: true), cl::Hidden);
72
73static cl::opt<bool> EnablePExtSIMDCodeGen(
74 "riscv-enable-p-ext-simd-codegen",
75 cl::desc("Turn on P Extension SIMD codegen(This is a temporary switch "
76 "where only partial codegen is currently supported)"),
77 cl::init(Val: false), cl::Hidden);
78
79void RISCVSubtarget::anchor() {}
80
81RISCVSubtarget &
82RISCVSubtarget::initializeSubtargetDependencies(const Triple &TT, StringRef CPU,
83 StringRef TuneCPU, StringRef FS,
84 StringRef ABIName) {
85 // Determine default and user-specified characteristics
86 bool Is64Bit = TT.isArch64Bit();
87 if (CPU.empty() || CPU == "generic")
88 CPU = Is64Bit ? "generic-rv64" : "generic-rv32";
89
90 if (TuneCPU.empty())
91 TuneCPU = CPU;
92 if (TuneCPU == "generic")
93 TuneCPU = Is64Bit ? "generic-rv64" : "generic-rv32";
94
95 TuneInfo = RISCVTuneInfoTable::getRISCVTuneInfo(Name: TuneCPU);
96 // If there is no TuneInfo for this CPU, we fail back to generic.
97 if (!TuneInfo)
98 TuneInfo = RISCVTuneInfoTable::getRISCVTuneInfo(Name: "generic");
99 assert(TuneInfo && "TuneInfo shouldn't be nullptr!");
100
101 ParseSubtargetFeatures(CPU, TuneCPU, FS);
102
103 RISCV::updateCZceFeatureImplications(STI&: *this);
104
105 // Re-sync the flags.
106 HasStdExtZcd = hasFeature(Feature: RISCV::FeatureStdExtZcd);
107 HasStdExtZcf = hasFeature(Feature: RISCV::FeatureStdExtZcf);
108 HasStdExtC = hasFeature(Feature: RISCV::FeatureStdExtC);
109 HasStdExtZce = hasFeature(Feature: RISCV::FeatureStdExtZce);
110
111 TargetABI = RISCVABI::computeTargetABI(TT, FeatureBits: getFeatureBits(), ABIName);
112 RISCVFeatures::validate(TT, FeatureBits: getFeatureBits());
113 return *this;
114}
115
116RISCVSubtarget::RISCVSubtarget(const Triple &TT, StringRef CPU,
117 StringRef TuneCPU, StringRef FS,
118 StringRef ABIName, unsigned RVVVectorBitsMin,
119 unsigned RVVVectorBitsMax,
120 const TargetMachine &TM)
121 : RISCVGenSubtargetInfo(TT, CPU, TuneCPU, FS),
122 IsLittleEndian(TT.isLittleEndian()), RVVVectorBitsMin(RVVVectorBitsMin),
123 RVVVectorBitsMax(RVVVectorBitsMax),
124 FrameLowering(
125 initializeSubtargetDependencies(TT, CPU, TuneCPU, FS, ABIName)),
126 InstrInfo(*this), TLInfo(TM, *this) {
127 TSInfo = std::make_unique<RISCVSelectionDAGInfo>();
128}
129
130RISCVSubtarget::~RISCVSubtarget() = default;
131
132const SelectionDAGTargetInfo *RISCVSubtarget::getSelectionDAGInfo() const {
133 return TSInfo.get();
134}
135
136const CallLowering *RISCVSubtarget::getCallLowering() const {
137 if (!CallLoweringInfo)
138 CallLoweringInfo.reset(p: new RISCVCallLowering(*getTargetLowering()));
139 return CallLoweringInfo.get();
140}
141
142InstructionSelector *RISCVSubtarget::getInstructionSelector() const {
143 if (!InstSelector) {
144 InstSelector.reset(p: createRISCVInstructionSelector(
145 *static_cast<const RISCVTargetMachine *>(&TLInfo.getTargetMachine()),
146 *this, *getRegBankInfo()));
147 }
148 return InstSelector.get();
149}
150
151const LegalizerInfo *RISCVSubtarget::getLegalizerInfo() const {
152 if (!Legalizer)
153 Legalizer.reset(p: new RISCVLegalizerInfo(*this));
154 return Legalizer.get();
155}
156
157const RISCVRegisterBankInfo *RISCVSubtarget::getRegBankInfo() const {
158 if (!RegBankInfo)
159 RegBankInfo.reset(p: new RISCVRegisterBankInfo(getHwMode()));
160 return RegBankInfo.get();
161}
162
163bool RISCVSubtarget::useConstantPoolForLargeInts() const {
164 return !RISCVDisableUsingConstantPoolForLargeInts;
165}
166
167bool RISCVSubtarget::enablePExtSIMDCodeGen() const {
168 return HasStdExtP && EnablePExtSIMDCodeGen;
169}
170
171// Returns true if VT is a P extension packed SIMD type that fits in XLen.
172bool RISCVSubtarget::isPExtPackedType(MVT VT) const {
173 if (!enablePExtSIMDCodeGen())
174 return false;
175
176 if (is64Bit())
177 return VT == MVT::v8i8 || VT == MVT::v4i16 || VT == MVT::v2i32;
178 return VT == MVT::v4i8 || VT == MVT::v2i16;
179}
180
181unsigned RISCVSubtarget::getMaxBuildIntsCost() const {
182 // Loading integer from constant pool needs two instructions (the reason why
183 // the minimum cost is 2): an address calculation instruction and a load
184 // instruction. Usually, address calculation and instructions used for
185 // building integers (addi, slli, etc.) can be done in one cycle, so here we
186 // set the default cost to (LoadLatency + 1) if no threshold is provided.
187 return RISCVMaxBuildIntsCost == 0
188 ? getSchedModel().LoadLatency + 1
189 : std::max<unsigned>(a: 2, b: RISCVMaxBuildIntsCost);
190}
191
192unsigned RISCVSubtarget::getMaxRVVVectorSizeInBits() const {
193 assert(hasVInstructions() &&
194 "Tried to get vector length without Zve or V extension support!");
195
196 // ZvlLen specifies the minimum required vlen. The upper bound provided by
197 // riscv-v-vector-bits-max should be no less than it.
198 if (RVVVectorBitsMax != 0 && RVVVectorBitsMax < ZvlLen)
199 report_fatal_error(reason: "riscv-v-vector-bits-max specified is lower "
200 "than the Zvl*b limitation");
201
202 return RVVVectorBitsMax;
203}
204
205unsigned RISCVSubtarget::getMinRVVVectorSizeInBits() const {
206 assert(hasVInstructions() &&
207 "Tried to get vector length without Zve or V extension support!");
208
209 if (RVVVectorBitsMin == -1U)
210 return ZvlLen;
211
212 // ZvlLen specifies the minimum required vlen. The lower bound provided by
213 // riscv-v-vector-bits-min should be no less than it.
214 if (RVVVectorBitsMin != 0 && RVVVectorBitsMin < ZvlLen)
215 report_fatal_error(reason: "riscv-v-vector-bits-min specified is lower "
216 "than the Zvl*b limitation");
217
218 return RVVVectorBitsMin;
219}
220
221unsigned RISCVSubtarget::getMaxLMULForFixedLengthVectors() const {
222 assert(hasVInstructions() &&
223 "Tried to get vector length without Zve or V extension support!");
224 assert(RVVVectorLMULMax <= 8 &&
225 llvm::has_single_bit<uint32_t>(RVVVectorLMULMax) &&
226 "V extension requires a LMUL to be at most 8 and a power of 2!");
227 return llvm::bit_floor(Value: std::clamp<unsigned>(val: RVVVectorLMULMax, lo: 1, hi: 8));
228}
229
230bool RISCVSubtarget::useRVVForFixedLengthVectors() const {
231 return hasVInstructions() &&
232 getMinRVVVectorSizeInBits() >= RISCV::RVVBitsPerBlock;
233}
234
235bool RISCVSubtarget::enableSubRegLiveness() const { return true; }
236
237bool RISCVSubtarget::enableMachinePipeliner() const {
238 return getSchedModel().hasInstrSchedModel();
239}
240
241void RISCVSubtarget::mirFileLoaded(MachineFunction &MF) const {
242 // We usually compute max call frame size after ISel. Do the computation now
243 // if the .mir file didn't specify it. Note that this will probably give you
244 // bogus values after PEI has eliminated the callframe setup/destroy pseudo
245 // instructions, specify explicitly if you need it to be correct.
246 MachineFrameInfo &MFI = MF.getFrameInfo();
247 if (!MFI.isMaxCallFrameSizeComputed())
248 MFI.computeMaxCallFrameSize(MF);
249}
250
251 /// Enable use of alias analysis during code generation (during MI
252 /// scheduling, DAGCombine, etc.).
253bool RISCVSubtarget::useAA() const { return UseAA; }
254
255unsigned RISCVSubtarget::getMinimumJumpTableEntries() const {
256 return RISCVMinimumJumpTableEntries.getNumOccurrences() > 0
257 ? RISCVMinimumJumpTableEntries
258 : TuneInfo->MinimumJumpTableEntries;
259}
260
261void RISCVSubtarget::overrideSchedPolicy(MachineSchedPolicy &Policy,
262 const SchedRegion &Region) const {
263 // Do bidirectional scheduling since it provides a more balanced scheduling
264 // leading to better performance. This will increase compile time.
265 Policy.OnlyTopDown = false;
266 Policy.OnlyBottomUp = false;
267
268 // Disabling the latency heuristic can reduce the number of spills/reloads but
269 // will cause some regressions on some cores.
270 Policy.DisableLatencyHeuristic = DisableLatencySchedHeuristic;
271
272 // Spilling is generally expensive on all RISC-V cores, so always enable
273 // register-pressure tracking. This will increase compile time.
274 Policy.ShouldTrackPressure = true;
275}
276
277void RISCVSubtarget::overridePostRASchedPolicy(
278 MachineSchedPolicy &Policy, const SchedRegion &Region) const {
279 MISched::Direction PostRASchedDirection = getPostRASchedDirection();
280 if (PostRASchedDirection == MISched::TopDown) {
281 Policy.OnlyTopDown = true;
282 Policy.OnlyBottomUp = false;
283 } else if (PostRASchedDirection == MISched::BottomUp) {
284 Policy.OnlyTopDown = false;
285 Policy.OnlyBottomUp = true;
286 } else if (PostRASchedDirection == MISched::Bidirectional) {
287 Policy.OnlyTopDown = false;
288 Policy.OnlyBottomUp = false;
289 }
290}
291
292bool RISCVSubtarget::useMIPSLoadStorePairs() const {
293 return UseMIPSLoadStorePairsOpt && HasVendorXMIPSLSP;
294}
295
296bool RISCVSubtarget::useMIPSCCMovInsn() const {
297 return UseMIPSCCMovInsn && HasVendorXMIPSCMov;
298}
299