1//===-- RISCVISelLowering.cpp - RISC-V DAG Lowering Implementation -------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that RISC-V uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "RISCVISelLowering.h"
15#include "MCTargetDesc/RISCVMatInt.h"
16#include "RISCV.h"
17#include "RISCVConstantPoolValue.h"
18#include "RISCVMachineFunctionInfo.h"
19#include "RISCVRegisterInfo.h"
20#include "RISCVSelectionDAGInfo.h"
21#include "RISCVSubtarget.h"
22#include "llvm/ADT/SmallSet.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/ADT/Statistic.h"
25#include "llvm/Analysis/MemoryLocation.h"
26#include "llvm/Analysis/ValueTracking.h"
27#include "llvm/Analysis/VectorUtils.h"
28#include "llvm/CodeGen/MachineFrameInfo.h"
29#include "llvm/CodeGen/MachineFunction.h"
30#include "llvm/CodeGen/MachineInstrBuilder.h"
31#include "llvm/CodeGen/MachineJumpTableInfo.h"
32#include "llvm/CodeGen/MachineRegisterInfo.h"
33#include "llvm/CodeGen/SDPatternMatch.h"
34#include "llvm/CodeGen/SelectionDAGAddressAnalysis.h"
35#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
36#include "llvm/CodeGen/ValueTypes.h"
37#include "llvm/IR/DiagnosticInfo.h"
38#include "llvm/IR/DiagnosticPrinter.h"
39#include "llvm/IR/IRBuilder.h"
40#include "llvm/IR/Instructions.h"
41#include "llvm/IR/IntrinsicInst.h"
42#include "llvm/IR/IntrinsicsRISCV.h"
43#include "llvm/MC/MCCodeEmitter.h"
44#include "llvm/MC/MCInstBuilder.h"
45#include "llvm/Support/CommandLine.h"
46#include "llvm/Support/Debug.h"
47#include "llvm/Support/ErrorHandling.h"
48#include "llvm/Support/InstructionCost.h"
49#include "llvm/Support/KnownBits.h"
50#include "llvm/Support/MathExtras.h"
51#include "llvm/Support/raw_ostream.h"
52#include <optional>
53
54using namespace llvm;
55
56#define DEBUG_TYPE "riscv-lower"
57
58STATISTIC(NumTailCalls, "Number of tail calls");
59
60static cl::opt<unsigned> ExtensionMaxWebSize(
61 DEBUG_TYPE "-ext-max-web-size", cl::Hidden,
62 cl::desc("Give the maximum size (in number of nodes) of the web of "
63 "instructions that we will consider for VW expansion"),
64 cl::init(Val: 18));
65
66static cl::opt<bool>
67 AllowSplatInVW_W(DEBUG_TYPE "-form-vw-w-with-splat", cl::Hidden,
68 cl::desc("Allow the formation of VW_W operations (e.g., "
69 "VWADD_W) with splat constants"),
70 cl::init(Val: false));
71
72static cl::opt<unsigned> NumRepeatedDivisors(
73 DEBUG_TYPE "-fp-repeated-divisors", cl::Hidden,
74 cl::desc("Set the minimum number of repetitions of a divisor to allow "
75 "transformation to multiplications by the reciprocal"),
76 cl::init(Val: 2));
77
78static cl::opt<int>
79 FPImmCost(DEBUG_TYPE "-fpimm-cost", cl::Hidden,
80 cl::desc("Give the maximum number of instructions that we will "
81 "use for creating a floating-point immediate value"),
82 cl::init(Val: 3));
83
84static cl::opt<bool>
85 ReassocShlAddiAdd("reassoc-shl-addi-add", cl::Hidden,
86 cl::desc("Swap add and addi in cases where the add may "
87 "be combined with a shift"),
88 cl::init(Val: true));
89
90// TODO: Support more ops
91static const unsigned ZvfbfaVPOps[] = {
92 ISD::VP_FNEG, ISD::VP_FABS, ISD::VP_FCOPYSIGN};
93static const unsigned ZvfbfaOps[] = {
94 ISD::FNEG, ISD::FABS, ISD::FCOPYSIGN, ISD::FADD,
95 ISD::FSUB, ISD::FMUL, ISD::FMINNUM, ISD::FMAXNUM,
96 ISD::FMINIMUMNUM, ISD::FMAXIMUMNUM, ISD::FMINIMUM, ISD::FMAXIMUM,
97 ISD::FMA, ISD::IS_FPCLASS, ISD::STRICT_FADD, ISD::STRICT_FSUB,
98 ISD::STRICT_FMUL, ISD::STRICT_FMA, ISD::SETCC};
99
100RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
101 const RISCVSubtarget &STI)
102 : TargetLowering(TM, STI), Subtarget(STI) {
103
104 RISCVABI::ABI ABI = Subtarget.getTargetABI();
105 assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
106
107 if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
108 !Subtarget.hasStdExtF()) {
109 errs() << "Hard-float 'f' ABI can't be used for a target that "
110 "doesn't support the F instruction set extension (ignoring "
111 "target-abi)\n";
112 ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
113 } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
114 !Subtarget.hasStdExtD()) {
115 errs() << "Hard-float 'd' ABI can't be used for a target that "
116 "doesn't support the D instruction set extension (ignoring "
117 "target-abi)\n";
118 ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
119 }
120
121 switch (ABI) {
122 default:
123 reportFatalUsageError(reason: "Don't know how to lower this ABI");
124 case RISCVABI::ABI_ILP32:
125 case RISCVABI::ABI_ILP32E:
126 case RISCVABI::ABI_LP64E:
127 case RISCVABI::ABI_ILP32F:
128 case RISCVABI::ABI_ILP32D:
129 case RISCVABI::ABI_LP64:
130 case RISCVABI::ABI_LP64F:
131 case RISCVABI::ABI_LP64D:
132 break;
133 }
134
135 MVT XLenVT = Subtarget.getXLenVT();
136
137 // Set up the register classes.
138 addRegisterClass(VT: XLenVT, RC: &RISCV::GPRRegClass);
139
140 if (Subtarget.hasStdExtZfhmin())
141 addRegisterClass(VT: MVT::f16, RC: &RISCV::FPR16RegClass);
142 if (Subtarget.hasStdExtZfbfmin() || Subtarget.hasVendorXAndesBFHCvt())
143 addRegisterClass(VT: MVT::bf16, RC: &RISCV::FPR16RegClass);
144 if (Subtarget.hasStdExtF())
145 addRegisterClass(VT: MVT::f32, RC: &RISCV::FPR32RegClass);
146 if (Subtarget.hasStdExtD())
147 addRegisterClass(VT: MVT::f64, RC: &RISCV::FPR64RegClass);
148 if (Subtarget.hasStdExtZhinxmin())
149 addRegisterClass(VT: MVT::f16, RC: &RISCV::GPRF16RegClass);
150 if (Subtarget.hasStdExtZfinx())
151 addRegisterClass(VT: MVT::f32, RC: &RISCV::GPRF32RegClass);
152 if (Subtarget.hasStdExtZdinx()) {
153 if (Subtarget.is64Bit())
154 addRegisterClass(VT: MVT::f64, RC: &RISCV::GPRRegClass);
155 else
156 addRegisterClass(VT: MVT::f64, RC: &RISCV::GPRPairRegClass);
157 }
158
159 static const MVT::SimpleValueType BoolVecVTs[] = {
160 MVT::nxv1i1, MVT::nxv2i1, MVT::nxv4i1, MVT::nxv8i1,
161 MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1};
162 static const MVT::SimpleValueType IntVecVTs[] = {
163 MVT::nxv1i8, MVT::nxv2i8, MVT::nxv4i8, MVT::nxv8i8, MVT::nxv16i8,
164 MVT::nxv32i8, MVT::nxv64i8, MVT::nxv1i16, MVT::nxv2i16, MVT::nxv4i16,
165 MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32,
166 MVT::nxv4i32, MVT::nxv8i32, MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64,
167 MVT::nxv4i64, MVT::nxv8i64};
168 static const MVT::SimpleValueType F16VecVTs[] = {
169 MVT::nxv1f16, MVT::nxv2f16, MVT::nxv4f16,
170 MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16};
171 static const MVT::SimpleValueType BF16VecVTs[] = {
172 MVT::nxv1bf16, MVT::nxv2bf16, MVT::nxv4bf16,
173 MVT::nxv8bf16, MVT::nxv16bf16, MVT::nxv32bf16};
174 static const MVT::SimpleValueType F32VecVTs[] = {
175 MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32};
176 static const MVT::SimpleValueType F64VecVTs[] = {
177 MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64};
178 static const MVT::SimpleValueType VecTupleVTs[] = {
179 MVT::riscv_nxv1i8x2, MVT::riscv_nxv1i8x3, MVT::riscv_nxv1i8x4,
180 MVT::riscv_nxv1i8x5, MVT::riscv_nxv1i8x6, MVT::riscv_nxv1i8x7,
181 MVT::riscv_nxv1i8x8, MVT::riscv_nxv2i8x2, MVT::riscv_nxv2i8x3,
182 MVT::riscv_nxv2i8x4, MVT::riscv_nxv2i8x5, MVT::riscv_nxv2i8x6,
183 MVT::riscv_nxv2i8x7, MVT::riscv_nxv2i8x8, MVT::riscv_nxv4i8x2,
184 MVT::riscv_nxv4i8x3, MVT::riscv_nxv4i8x4, MVT::riscv_nxv4i8x5,
185 MVT::riscv_nxv4i8x6, MVT::riscv_nxv4i8x7, MVT::riscv_nxv4i8x8,
186 MVT::riscv_nxv8i8x2, MVT::riscv_nxv8i8x3, MVT::riscv_nxv8i8x4,
187 MVT::riscv_nxv8i8x5, MVT::riscv_nxv8i8x6, MVT::riscv_nxv8i8x7,
188 MVT::riscv_nxv8i8x8, MVT::riscv_nxv16i8x2, MVT::riscv_nxv16i8x3,
189 MVT::riscv_nxv16i8x4, MVT::riscv_nxv32i8x2};
190
191 if (Subtarget.hasVInstructions()) {
192 auto addRegClassForRVV = [this](MVT VT) {
193 // Disable the smallest fractional LMUL types if ELEN is less than
194 // RVVBitsPerBlock.
195 unsigned MinElts = RISCV::RVVBitsPerBlock / Subtarget.getELen();
196 if (VT.getVectorMinNumElements() < MinElts)
197 return;
198
199 unsigned Size = VT.getSizeInBits().getKnownMinValue();
200 const TargetRegisterClass *RC;
201 if (Size <= RISCV::RVVBitsPerBlock)
202 RC = &RISCV::VRRegClass;
203 else if (Size == 2 * RISCV::RVVBitsPerBlock)
204 RC = &RISCV::VRM2RegClass;
205 else if (Size == 4 * RISCV::RVVBitsPerBlock)
206 RC = &RISCV::VRM4RegClass;
207 else if (Size == 8 * RISCV::RVVBitsPerBlock)
208 RC = &RISCV::VRM8RegClass;
209 else
210 llvm_unreachable("Unexpected size");
211
212 addRegisterClass(VT, RC);
213 };
214
215 for (MVT VT : BoolVecVTs)
216 addRegClassForRVV(VT);
217 for (MVT VT : IntVecVTs) {
218 if (VT.getVectorElementType() == MVT::i64 &&
219 !Subtarget.hasVInstructionsI64())
220 continue;
221 addRegClassForRVV(VT);
222 }
223
224 if (Subtarget.hasVInstructionsF16Minimal() ||
225 Subtarget.hasVendorXAndesVPackFPH())
226 for (MVT VT : F16VecVTs)
227 addRegClassForRVV(VT);
228
229 if (Subtarget.hasVInstructionsBF16Minimal() ||
230 Subtarget.hasVendorXAndesVBFHCvt())
231 for (MVT VT : BF16VecVTs)
232 addRegClassForRVV(VT);
233
234 if (Subtarget.hasVInstructionsF32())
235 for (MVT VT : F32VecVTs)
236 addRegClassForRVV(VT);
237
238 if (Subtarget.hasVInstructionsF64())
239 for (MVT VT : F64VecVTs)
240 addRegClassForRVV(VT);
241
242 if (Subtarget.useRVVForFixedLengthVectors()) {
243 auto addRegClassForFixedVectors = [this](MVT VT) {
244 MVT ContainerVT = getContainerForFixedLengthVector(VT);
245 unsigned RCID = getRegClassIDForVecVT(VT: ContainerVT);
246 const RISCVRegisterInfo &TRI = *Subtarget.getRegisterInfo();
247 addRegisterClass(VT, RC: TRI.getRegClass(i: RCID));
248 };
249 for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
250 if (useRVVForFixedLengthVectorVT(VT))
251 addRegClassForFixedVectors(VT);
252
253 for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
254 if (useRVVForFixedLengthVectorVT(VT))
255 addRegClassForFixedVectors(VT);
256 }
257
258 addRegisterClass(VT: MVT::riscv_nxv1i8x2, RC: &RISCV::VRN2M1RegClass);
259 addRegisterClass(VT: MVT::riscv_nxv1i8x3, RC: &RISCV::VRN3M1RegClass);
260 addRegisterClass(VT: MVT::riscv_nxv1i8x4, RC: &RISCV::VRN4M1RegClass);
261 addRegisterClass(VT: MVT::riscv_nxv1i8x5, RC: &RISCV::VRN5M1RegClass);
262 addRegisterClass(VT: MVT::riscv_nxv1i8x6, RC: &RISCV::VRN6M1RegClass);
263 addRegisterClass(VT: MVT::riscv_nxv1i8x7, RC: &RISCV::VRN7M1RegClass);
264 addRegisterClass(VT: MVT::riscv_nxv1i8x8, RC: &RISCV::VRN8M1RegClass);
265 addRegisterClass(VT: MVT::riscv_nxv2i8x2, RC: &RISCV::VRN2M1RegClass);
266 addRegisterClass(VT: MVT::riscv_nxv2i8x3, RC: &RISCV::VRN3M1RegClass);
267 addRegisterClass(VT: MVT::riscv_nxv2i8x4, RC: &RISCV::VRN4M1RegClass);
268 addRegisterClass(VT: MVT::riscv_nxv2i8x5, RC: &RISCV::VRN5M1RegClass);
269 addRegisterClass(VT: MVT::riscv_nxv2i8x6, RC: &RISCV::VRN6M1RegClass);
270 addRegisterClass(VT: MVT::riscv_nxv2i8x7, RC: &RISCV::VRN7M1RegClass);
271 addRegisterClass(VT: MVT::riscv_nxv2i8x8, RC: &RISCV::VRN8M1RegClass);
272 addRegisterClass(VT: MVT::riscv_nxv4i8x2, RC: &RISCV::VRN2M1RegClass);
273 addRegisterClass(VT: MVT::riscv_nxv4i8x3, RC: &RISCV::VRN3M1RegClass);
274 addRegisterClass(VT: MVT::riscv_nxv4i8x4, RC: &RISCV::VRN4M1RegClass);
275 addRegisterClass(VT: MVT::riscv_nxv4i8x5, RC: &RISCV::VRN5M1RegClass);
276 addRegisterClass(VT: MVT::riscv_nxv4i8x6, RC: &RISCV::VRN6M1RegClass);
277 addRegisterClass(VT: MVT::riscv_nxv4i8x7, RC: &RISCV::VRN7M1RegClass);
278 addRegisterClass(VT: MVT::riscv_nxv4i8x8, RC: &RISCV::VRN8M1RegClass);
279 addRegisterClass(VT: MVT::riscv_nxv8i8x2, RC: &RISCV::VRN2M1RegClass);
280 addRegisterClass(VT: MVT::riscv_nxv8i8x3, RC: &RISCV::VRN3M1RegClass);
281 addRegisterClass(VT: MVT::riscv_nxv8i8x4, RC: &RISCV::VRN4M1RegClass);
282 addRegisterClass(VT: MVT::riscv_nxv8i8x5, RC: &RISCV::VRN5M1RegClass);
283 addRegisterClass(VT: MVT::riscv_nxv8i8x6, RC: &RISCV::VRN6M1RegClass);
284 addRegisterClass(VT: MVT::riscv_nxv8i8x7, RC: &RISCV::VRN7M1RegClass);
285 addRegisterClass(VT: MVT::riscv_nxv8i8x8, RC: &RISCV::VRN8M1RegClass);
286 addRegisterClass(VT: MVT::riscv_nxv16i8x2, RC: &RISCV::VRN2M2RegClass);
287 addRegisterClass(VT: MVT::riscv_nxv16i8x3, RC: &RISCV::VRN3M2RegClass);
288 addRegisterClass(VT: MVT::riscv_nxv16i8x4, RC: &RISCV::VRN4M2RegClass);
289 addRegisterClass(VT: MVT::riscv_nxv32i8x2, RC: &RISCV::VRN2M4RegClass);
290 }
291
292 // fixed vector is stored in GPRs for P extension packed operations
293 if (Subtarget.enablePExtSIMDCodeGen()) {
294 if (Subtarget.is64Bit()) {
295 addRegisterClass(VT: MVT::v2i32, RC: &RISCV::GPRRegClass);
296 addRegisterClass(VT: MVT::v4i16, RC: &RISCV::GPRRegClass);
297 addRegisterClass(VT: MVT::v8i8, RC: &RISCV::GPRRegClass);
298 } else {
299 addRegisterClass(VT: MVT::v2i16, RC: &RISCV::GPRRegClass);
300 addRegisterClass(VT: MVT::v4i8, RC: &RISCV::GPRRegClass);
301 }
302 }
303
304 // Compute derived properties from the register classes.
305 computeRegisterProperties(TRI: STI.getRegisterInfo());
306
307 setStackPointerRegisterToSaveRestore(RISCV::X2);
308
309 setLoadExtAction(ExtTypes: {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, ValVT: XLenVT,
310 MemVT: MVT::i1, Action: Promote);
311 // DAGCombiner can call isLoadExtLegal for types that aren't legal.
312 setLoadExtAction(ExtTypes: {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, ValVT: MVT::i32,
313 MemVT: MVT::i1, Action: Promote);
314
315 // TODO: add all necessary setOperationAction calls.
316 setOperationAction(Op: ISD::DYNAMIC_STACKALLOC, VT: XLenVT, Action: Custom);
317
318 setOperationAction(Op: ISD::BR_JT, VT: MVT::Other, Action: Expand);
319 setOperationAction(Op: ISD::BR_CC, VT: XLenVT, Action: Expand);
320 setOperationAction(Op: ISD::BRCOND, VT: MVT::Other, Action: Custom);
321 setOperationAction(Op: ISD::SELECT_CC, VT: XLenVT, Action: Expand);
322
323 setCondCodeAction(CCs: ISD::SETGT, VT: XLenVT, Action: Custom);
324 setCondCodeAction(CCs: ISD::SETGE, VT: XLenVT, Action: Expand);
325 setCondCodeAction(CCs: ISD::SETUGT, VT: XLenVT, Action: Custom);
326 setCondCodeAction(CCs: ISD::SETUGE, VT: XLenVT, Action: Expand);
327 if (!(Subtarget.hasVendorXCValu() && !Subtarget.is64Bit())) {
328 setCondCodeAction(CCs: ISD::SETULE, VT: XLenVT, Action: Expand);
329 setCondCodeAction(CCs: ISD::SETLE, VT: XLenVT, Action: Expand);
330 }
331
332 setOperationAction(Ops: {ISD::STACKSAVE, ISD::STACKRESTORE}, VT: MVT::Other, Action: Expand);
333
334 setOperationAction(Op: ISD::VASTART, VT: MVT::Other, Action: Custom);
335 setOperationAction(Ops: {ISD::VAARG, ISD::VACOPY, ISD::VAEND}, VT: MVT::Other, Action: Expand);
336
337 if (!Subtarget.hasVendorXTHeadBb() && !Subtarget.hasVendorXqcibm() &&
338 !Subtarget.hasVendorXAndesPerf())
339 setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::i1, Action: Expand);
340
341 setOperationAction(Op: ISD::EH_DWARF_CFA, VT: MVT::i32, Action: Custom);
342
343 if (!Subtarget.hasStdExtZbb() && !Subtarget.hasVendorXTHeadBb() &&
344 !Subtarget.hasVendorXqcibm() && !Subtarget.hasVendorXAndesPerf() &&
345 !(Subtarget.hasVendorXCValu() && !Subtarget.is64Bit()))
346 setOperationAction(Ops: ISD::SIGN_EXTEND_INREG, VTs: {MVT::i8, MVT::i16}, Action: Expand);
347
348 if (Subtarget.hasStdExtZilsd() && !Subtarget.is64Bit()) {
349 setOperationAction(Op: ISD::LOAD, VT: MVT::i64, Action: Custom);
350 setOperationAction(Op: ISD::STORE, VT: MVT::i64, Action: Custom);
351 }
352
353 if (Subtarget.is64Bit()) {
354 setOperationAction(Op: ISD::EH_DWARF_CFA, VT: MVT::i64, Action: Custom);
355
356 setOperationAction(Op: ISD::LOAD, VT: MVT::i32, Action: Custom);
357 setOperationAction(Ops: {ISD::ADD, ISD::SUB, ISD::SHL, ISD::SRA, ISD::SRL},
358 VT: MVT::i32, Action: Custom);
359 setOperationAction(Ops: {ISD::UADDO, ISD::USUBO}, VT: MVT::i32, Action: Custom);
360 setOperationAction(Ops: {ISD::SADDO, ISD::SSUBO}, VT: MVT::i32, Action: Custom);
361 }
362 if (!Subtarget.hasStdExtZmmul()) {
363 setOperationAction(Ops: {ISD::MUL, ISD::MULHS, ISD::MULHU}, VT: XLenVT, Action: Expand);
364 } else if (Subtarget.is64Bit()) {
365 setOperationAction(Op: ISD::MUL, VT: MVT::i128, Action: Custom);
366 setOperationAction(Op: ISD::MUL, VT: MVT::i32, Action: Custom);
367 } else {
368 setOperationAction(Op: ISD::MUL, VT: MVT::i64, Action: Custom);
369 }
370
371 if (!Subtarget.hasStdExtM()) {
372 setOperationAction(Ops: {ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM}, VT: XLenVT,
373 Action: Expand);
374 } else if (Subtarget.is64Bit()) {
375 setOperationAction(Ops: {ISD::SDIV, ISD::UDIV, ISD::UREM},
376 VTs: {MVT::i8, MVT::i16, MVT::i32}, Action: Custom);
377 }
378
379 setOperationAction(
380 Ops: {ISD::SDIVREM, ISD::UDIVREM, ISD::SMUL_LOHI, ISD::UMUL_LOHI}, VT: XLenVT,
381 Action: Expand);
382
383 setOperationAction(Ops: {ISD::SHL_PARTS, ISD::SRL_PARTS, ISD::SRA_PARTS}, VT: XLenVT,
384 Action: Custom);
385
386 if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb()) {
387 if (Subtarget.is64Bit())
388 setOperationAction(Ops: {ISD::ROTL, ISD::ROTR}, VT: MVT::i32, Action: Custom);
389 } else if (Subtarget.hasVendorXTHeadBb()) {
390 if (Subtarget.is64Bit())
391 setOperationAction(Ops: {ISD::ROTL, ISD::ROTR}, VT: MVT::i32, Action: Custom);
392 setOperationAction(Ops: {ISD::ROTL, ISD::ROTR}, VT: XLenVT, Action: Custom);
393 } else if (Subtarget.hasVendorXCVbitmanip() && !Subtarget.is64Bit()) {
394 setOperationAction(Op: ISD::ROTL, VT: XLenVT, Action: Expand);
395 } else {
396 setOperationAction(Ops: {ISD::ROTL, ISD::ROTR}, VT: XLenVT, Action: Expand);
397 }
398
399 if (Subtarget.hasStdExtP())
400 setOperationAction(Ops: {ISD::FSHL, ISD::FSHR}, VT: XLenVT, Action: Legal);
401
402 setOperationAction(Op: ISD::BSWAP, VT: XLenVT,
403 Action: Subtarget.hasREV8Like() ? Legal : Expand);
404
405 if ((Subtarget.hasVendorXCVbitmanip() || Subtarget.hasVendorXqcibm()) &&
406 !Subtarget.is64Bit()) {
407 setOperationAction(Op: ISD::BITREVERSE, VT: XLenVT, Action: Legal);
408 } else {
409 // Zbkb can use rev8+brev8 to implement bitreverse.
410 setOperationAction(Op: ISD::BITREVERSE, VT: XLenVT,
411 Action: Subtarget.hasStdExtZbkb() ? Custom : Expand);
412 if (Subtarget.hasStdExtZbkb())
413 setOperationAction(Op: ISD::BITREVERSE, VT: MVT::i8, Action: Custom);
414 }
415
416 if (Subtarget.hasStdExtZbb() ||
417 (Subtarget.hasVendorXCValu() && !Subtarget.is64Bit())) {
418 setOperationAction(Ops: {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, VT: XLenVT,
419 Action: Legal);
420 }
421
422 if (Subtarget.hasCTZLike()) {
423 if (Subtarget.is64Bit())
424 setOperationAction(Ops: {ISD::CTTZ, ISD::CTTZ_ZERO_UNDEF}, VT: MVT::i32, Action: Custom);
425 } else {
426 setOperationAction(Op: ISD::CTTZ, VT: XLenVT, Action: Expand);
427 }
428
429 if (!Subtarget.hasCPOPLike()) {
430 // TODO: These should be set to LibCall, but this currently breaks
431 // the Linux kernel build. See #101786. Lacks i128 tests, too.
432 if (Subtarget.is64Bit())
433 setOperationAction(Op: ISD::CTPOP, VT: MVT::i128, Action: Expand);
434 else
435 setOperationAction(Op: ISD::CTPOP, VT: MVT::i32, Action: Expand);
436 setOperationAction(Op: ISD::CTPOP, VT: MVT::i64, Action: Expand);
437 }
438
439 if (Subtarget.hasCLZLike()) {
440 // We need the custom lowering to make sure that the resulting sequence
441 // for the 32bit case is efficient on 64bit targets.
442 // Use default promotion for i32 without Zbb.
443 if (Subtarget.is64Bit() &&
444 (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtP()))
445 setOperationAction(Ops: {ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF}, VT: MVT::i32, Action: Custom);
446 } else {
447 setOperationAction(Op: ISD::CTLZ, VT: XLenVT, Action: Expand);
448 }
449
450 if (Subtarget.hasStdExtP()) {
451 setOperationAction(Op: ISD::CTLS, VT: XLenVT, Action: Legal);
452 if (Subtarget.is64Bit())
453 setOperationAction(Op: ISD::CTLS, VT: MVT::i32, Action: Custom);
454 }
455
456 if (Subtarget.hasStdExtP() ||
457 (Subtarget.hasVendorXCValu() && !Subtarget.is64Bit())) {
458 setOperationAction(Op: ISD::ABS, VT: XLenVT, Action: Legal);
459 if (Subtarget.is64Bit())
460 setOperationAction(Op: ISD::ABS, VT: MVT::i32, Action: Custom);
461 } else if (Subtarget.hasShortForwardBranchIALU()) {
462 // We can use PseudoCCSUB to implement ABS.
463 setOperationAction(Op: ISD::ABS, VT: XLenVT, Action: Legal);
464 } else if (Subtarget.is64Bit()) {
465 setOperationAction(Op: ISD::ABS, VT: MVT::i32, Action: Custom);
466 }
467
468 if (!Subtarget.useMIPSCCMovInsn() && !Subtarget.hasVendorXTHeadCondMov())
469 setOperationAction(Op: ISD::SELECT, VT: XLenVT, Action: Custom);
470
471 if ((Subtarget.hasStdExtP() || Subtarget.hasVendorXqcia()) &&
472 !Subtarget.is64Bit()) {
473 // FIXME: Support i32 on RV64+P by inserting into a v2i32 vector, doing
474 // the vector operation and extracting.
475 setOperationAction(Ops: {ISD::SADDSAT, ISD::SSUBSAT, ISD::UADDSAT, ISD::USUBSAT},
476 VT: MVT::i32, Action: Legal);
477 } else if (!Subtarget.hasStdExtZbb() && Subtarget.is64Bit()) {
478 setOperationAction(Ops: {ISD::SADDSAT, ISD::SSUBSAT, ISD::UADDSAT, ISD::USUBSAT},
479 VT: MVT::i32, Action: Custom);
480 }
481
482 if (Subtarget.hasVendorXqcia() && !Subtarget.is64Bit()) {
483 setOperationAction(Op: ISD::USHLSAT, VT: MVT::i32, Action: Legal);
484 }
485
486 if ((Subtarget.hasStdExtP() || Subtarget.hasVendorXqcia()) &&
487 !Subtarget.is64Bit()) {
488 // FIXME: Support i32 on RV64+P by inserting into a v2i32 vector, doing
489 // pssha.w and extracting.
490 setOperationAction(Op: ISD::SSHLSAT, VT: MVT::i32, Action: Legal);
491 }
492
493 if (Subtarget.hasStdExtZbc() || Subtarget.hasStdExtZbkc())
494 setOperationAction(Ops: {ISD::CLMUL, ISD::CLMULH}, VT: XLenVT, Action: Legal);
495 if (Subtarget.hasStdExtZbc())
496 setOperationAction(Op: ISD::CLMULR, VT: XLenVT, Action: Legal);
497
498 static const unsigned FPLegalNodeTypes[] = {
499 ISD::FMINNUM, ISD::FMAXNUM, ISD::FMINIMUMNUM,
500 ISD::FMAXIMUMNUM, ISD::LRINT, ISD::LLRINT,
501 ISD::LROUND, ISD::LLROUND, ISD::STRICT_LRINT,
502 ISD::STRICT_LLRINT, ISD::STRICT_LROUND, ISD::STRICT_LLROUND,
503 ISD::STRICT_FMA, ISD::STRICT_FADD, ISD::STRICT_FSUB,
504 ISD::STRICT_FMUL, ISD::STRICT_FDIV, ISD::STRICT_FSQRT,
505 ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS, ISD::FCANONICALIZE};
506
507 static const ISD::CondCode FPCCToExpand[] = {
508 ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
509 ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
510 ISD::SETGE, ISD::SETNE, ISD::SETO, ISD::SETUO};
511
512 static const unsigned FPOpToExpand[] = {ISD::FSIN, ISD::FCOS, ISD::FSINCOS,
513 ISD::FPOW};
514 static const unsigned FPOpToLibCall[] = {ISD::FREM};
515
516 static const unsigned FPRndMode[] = {
517 ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FRINT, ISD::FROUND,
518 ISD::FROUNDEVEN};
519
520 static const unsigned ZfhminZfbfminPromoteOps[] = {
521 ISD::FMINNUM, ISD::FMAXNUM, ISD::FMAXIMUMNUM,
522 ISD::FMINIMUMNUM, ISD::FADD, ISD::FSUB,
523 ISD::FMUL, ISD::FMA, ISD::FDIV,
524 ISD::FSQRT, ISD::STRICT_FMA, ISD::STRICT_FADD,
525 ISD::STRICT_FSUB, ISD::STRICT_FMUL, ISD::STRICT_FDIV,
526 ISD::STRICT_FSQRT, ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS,
527 ISD::SETCC, ISD::FCEIL, ISD::FFLOOR,
528 ISD::FTRUNC, ISD::FRINT, ISD::FROUND,
529 ISD::FROUNDEVEN, ISD::FCANONICALIZE};
530
531 if (Subtarget.enablePExtSIMDCodeGen()) {
532 setTargetDAGCombine(ISD::TRUNCATE);
533 setTruncStoreAction(ValVT: MVT::v2i32, MemVT: MVT::v2i16, Action: Expand);
534 setTruncStoreAction(ValVT: MVT::v4i16, MemVT: MVT::v4i8, Action: Expand);
535 static const MVT RV32VTs[] = {MVT::v2i16, MVT::v4i8};
536 static const MVT RV64VTs[] = {MVT::v2i32, MVT::v4i16, MVT::v8i8};
537 ArrayRef<MVT> VTs;
538 if (Subtarget.is64Bit()) {
539 VTs = RV64VTs;
540 setTruncStoreAction(ValVT: MVT::v2i64, MemVT: MVT::v2i32, Action: Expand);
541 setTruncStoreAction(ValVT: MVT::v4i32, MemVT: MVT::v4i16, Action: Expand);
542 setTruncStoreAction(ValVT: MVT::v8i16, MemVT: MVT::v8i8, Action: Expand);
543 setTruncStoreAction(ValVT: MVT::v2i32, MemVT: MVT::v2i16, Action: Expand);
544 setTruncStoreAction(ValVT: MVT::v4i16, MemVT: MVT::v4i8, Action: Expand);
545 // There's no instruction for vector shamt in P extension so we unroll to
546 // scalar instructions. Vector VTs that are 32-bit are widened to 64-bit
547 // vector, e.g. v2i16 -> v4i16, before getting unrolled, so we need custom
548 // widen for those operations that will be unrolled.
549 setOperationAction(Ops: {ISD::SHL, ISD::SRL, ISD::SRA},
550 VTs: {MVT::v2i16, MVT::v4i8}, Action: Custom);
551 } else {
552 VTs = RV32VTs;
553 }
554 // By default everything must be expanded.
555 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
556 setOperationAction(Ops: Op, VTs, Action: Expand);
557 setOperationAction(Ops: {ISD::LOAD, ISD::STORE}, VTs, Action: Legal);
558 setOperationAction(Ops: {ISD::ADD, ISD::SUB}, VTs, Action: Legal);
559 setOperationAction(Ops: {ISD::AND, ISD::OR, ISD::XOR}, VTs, Action: Legal);
560 setOperationAction(Ops: {ISD::MUL, ISD::MULHS, ISD::MULHU}, VTs, Action: Legal);
561 setOperationAction(Ops: ISD::UADDSAT, VTs, Action: Legal);
562 setOperationAction(Ops: ISD::SADDSAT, VTs, Action: Legal);
563 setOperationAction(Ops: ISD::USUBSAT, VTs, Action: Legal);
564 setOperationAction(Ops: ISD::SSUBSAT, VTs, Action: Legal);
565 setOperationAction(Ops: ISD::SSHLSAT, VTs, Action: Legal);
566 setOperationAction(Ops: {ISD::AVGFLOORS, ISD::AVGFLOORU}, VTs, Action: Legal);
567 setOperationAction(Ops: {ISD::ABDS, ISD::ABDU}, VTs, Action: Legal);
568 setOperationAction(Ops: ISD::SPLAT_VECTOR, VTs, Action: Legal);
569 setOperationAction(Ops: ISD::BUILD_VECTOR, VTs, Action: Legal);
570 setOperationAction(Ops: ISD::SCALAR_TO_VECTOR, VTs, Action: Legal);
571 setOperationAction(Ops: {ISD::SHL, ISD::SRL, ISD::SRA}, VTs, Action: Custom);
572 setOperationAction(Ops: ISD::BITCAST, VTs, Action: Custom);
573 setOperationAction(Ops: ISD::EXTRACT_VECTOR_ELT, VTs, Action: Custom);
574 setOperationAction(Ops: {ISD::SMIN, ISD::UMIN, ISD::SMAX, ISD::UMAX}, VTs,
575 Action: Legal);
576 setOperationAction(Ops: ISD::SELECT, VTs, Action: Custom);
577 setOperationAction(Ops: ISD::SELECT_CC, VTs, Action: Expand);
578 setOperationAction(Ops: ISD::VSELECT, VTs, Action: Legal);
579 setOperationAction(Ops: ISD::SETCC, VTs, Action: Legal);
580 setCondCodeAction(CCs: {ISD::SETNE, ISD::SETGT, ISD::SETGE, ISD::SETUGT,
581 ISD::SETUGE, ISD::SETULE, ISD::SETLE},
582 VTs, Action: Expand);
583
584 if (!Subtarget.is64Bit())
585 setOperationAction(Op: ISD::BUILD_VECTOR, VT: MVT::v4i8, Action: Custom);
586
587 // P extension vector comparisons produce all 1s for true, all 0s for false
588 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
589 }
590
591 if (Subtarget.hasStdExtZfbfmin()) {
592 setOperationAction(Op: ISD::BITCAST, VT: MVT::i16, Action: Custom);
593 setOperationAction(Op: ISD::ConstantFP, VT: MVT::bf16, Action: Expand);
594 setOperationAction(Op: ISD::SELECT_CC, VT: MVT::bf16, Action: Expand);
595 setOperationAction(Op: ISD::SELECT, VT: MVT::bf16, Action: Custom);
596 setOperationAction(Op: ISD::BR_CC, VT: MVT::bf16, Action: Expand);
597 setOperationAction(Ops: ZfhminZfbfminPromoteOps, VT: MVT::bf16, Action: Promote);
598 setOperationAction(Op: ISD::FREM, VT: MVT::bf16, Action: Promote);
599 setOperationAction(Op: ISD::FABS, VT: MVT::bf16, Action: Custom);
600 setOperationAction(Op: ISD::FNEG, VT: MVT::bf16, Action: Custom);
601 setOperationAction(Op: ISD::FCOPYSIGN, VT: MVT::bf16, Action: Custom);
602 setOperationAction(Ops: {ISD::FP_TO_SINT, ISD::FP_TO_UINT}, VT: XLenVT, Action: Custom);
603 setOperationAction(Ops: {ISD::SINT_TO_FP, ISD::UINT_TO_FP}, VT: XLenVT, Action: Custom);
604 }
605
606 if (Subtarget.hasStdExtZfhminOrZhinxmin()) {
607 if (Subtarget.hasStdExtZfhOrZhinx()) {
608 setOperationAction(Ops: FPLegalNodeTypes, VT: MVT::f16, Action: Legal);
609 setOperationAction(Ops: FPRndMode, VT: MVT::f16,
610 Action: Subtarget.hasStdExtZfa() ? Legal : Custom);
611 setOperationAction(Op: ISD::IS_FPCLASS, VT: MVT::f16, Action: Custom);
612 setOperationAction(Ops: {ISD::FMAXIMUM, ISD::FMINIMUM}, VT: MVT::f16,
613 Action: Subtarget.hasStdExtZfa() ? Legal : Custom);
614 if (Subtarget.hasStdExtZfa())
615 setOperationAction(Op: ISD::ConstantFP, VT: MVT::f16, Action: Custom);
616 } else {
617 setOperationAction(Ops: ZfhminZfbfminPromoteOps, VT: MVT::f16, Action: Promote);
618 setOperationAction(Ops: {ISD::FMAXIMUM, ISD::FMINIMUM}, VT: MVT::f16, Action: Promote);
619 for (auto Op : {ISD::LROUND, ISD::LLROUND, ISD::LRINT, ISD::LLRINT,
620 ISD::STRICT_LROUND, ISD::STRICT_LLROUND,
621 ISD::STRICT_LRINT, ISD::STRICT_LLRINT})
622 setOperationAction(Op, VT: MVT::f16, Action: Custom);
623 setOperationAction(Op: ISD::FABS, VT: MVT::f16, Action: Custom);
624 setOperationAction(Op: ISD::FNEG, VT: MVT::f16, Action: Custom);
625 setOperationAction(Op: ISD::FCOPYSIGN, VT: MVT::f16, Action: Custom);
626 setOperationAction(Ops: {ISD::FP_TO_SINT, ISD::FP_TO_UINT}, VT: XLenVT, Action: Custom);
627 setOperationAction(Ops: {ISD::SINT_TO_FP, ISD::UINT_TO_FP}, VT: XLenVT, Action: Custom);
628 }
629
630 if (!Subtarget.hasStdExtD()) {
631 // FIXME: handle f16 fma when f64 is not legal. Using an f32 fma
632 // instruction runs into double rounding issues, so this is wrong.
633 // Normally we'd use an f64 fma, but without the D extension the f64 type
634 // is not legal. This should probably be a libcall.
635 AddPromotedToType(Opc: ISD::FMA, OrigVT: MVT::f16, DestVT: MVT::f32);
636 AddPromotedToType(Opc: ISD::STRICT_FMA, OrigVT: MVT::f16, DestVT: MVT::f32);
637 }
638
639 setOperationAction(Op: ISD::BITCAST, VT: MVT::i16, Action: Custom);
640
641 setOperationAction(Op: ISD::STRICT_FP_ROUND, VT: MVT::f16, Action: Legal);
642 setOperationAction(Op: ISD::STRICT_FP_EXTEND, VT: MVT::f32, Action: Legal);
643 setCondCodeAction(CCs: FPCCToExpand, VT: MVT::f16, Action: Expand);
644 setOperationAction(Op: ISD::SELECT_CC, VT: MVT::f16, Action: Expand);
645 setOperationAction(Op: ISD::SELECT, VT: MVT::f16, Action: Custom);
646 setOperationAction(Op: ISD::BR_CC, VT: MVT::f16, Action: Expand);
647
648 setOperationAction(
649 Op: ISD::FNEARBYINT, VT: MVT::f16,
650 Action: Subtarget.hasStdExtZfh() && Subtarget.hasStdExtZfa() ? Legal : Promote);
651 setOperationAction(Ops: {ISD::FREM, ISD::FPOW, ISD::FPOWI,
652 ISD::FCOS, ISD::FSIN, ISD::FSINCOS, ISD::FEXP,
653 ISD::FEXP2, ISD::FEXP10, ISD::FLOG, ISD::FLOG2,
654 ISD::FLOG10, ISD::FLDEXP, ISD::FFREXP, ISD::FMODF},
655 VT: MVT::f16, Action: Promote);
656
657 // FIXME: Need to promote f16 STRICT_* to f32 libcalls, but we don't have
658 // complete support for all operations in LegalizeDAG.
659 setOperationAction(Ops: {ISD::STRICT_FCEIL, ISD::STRICT_FFLOOR,
660 ISD::STRICT_FNEARBYINT, ISD::STRICT_FRINT,
661 ISD::STRICT_FROUND, ISD::STRICT_FROUNDEVEN,
662 ISD::STRICT_FTRUNC, ISD::STRICT_FLDEXP},
663 VT: MVT::f16, Action: Promote);
664
665 // We need to custom promote this.
666 if (Subtarget.is64Bit())
667 setOperationAction(Op: ISD::FPOWI, VT: MVT::i32, Action: Custom);
668 }
669
670 if (Subtarget.hasStdExtFOrZfinx()) {
671 setOperationAction(Ops: FPLegalNodeTypes, VT: MVT::f32, Action: Legal);
672 setOperationAction(Ops: FPRndMode, VT: MVT::f32,
673 Action: Subtarget.hasStdExtZfa() ? Legal : Custom);
674 setCondCodeAction(CCs: FPCCToExpand, VT: MVT::f32, Action: Expand);
675 setOperationAction(Op: ISD::SELECT_CC, VT: MVT::f32, Action: Expand);
676 setOperationAction(Op: ISD::SELECT, VT: MVT::f32, Action: Custom);
677 setOperationAction(Op: ISD::BR_CC, VT: MVT::f32, Action: Expand);
678 setOperationAction(Ops: FPOpToExpand, VT: MVT::f32, Action: Expand);
679 setOperationAction(Ops: FPOpToLibCall, VT: MVT::f32, Action: LibCall);
680 setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: MVT::f32, MemVT: MVT::f16, Action: Expand);
681 setTruncStoreAction(ValVT: MVT::f32, MemVT: MVT::f16, Action: Expand);
682 setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: MVT::f32, MemVT: MVT::bf16, Action: Expand);
683 setTruncStoreAction(ValVT: MVT::f32, MemVT: MVT::bf16, Action: Expand);
684 setOperationAction(Op: ISD::IS_FPCLASS, VT: MVT::f32, Action: Custom);
685 setOperationAction(Op: ISD::BF16_TO_FP, VT: MVT::f32, Action: Custom);
686 setOperationAction(Op: ISD::FP_TO_BF16, VT: MVT::f32,
687 Action: Subtarget.isSoftFPABI() ? LibCall : Custom);
688 setOperationAction(Op: ISD::FP_TO_FP16, VT: MVT::f32, Action: Custom);
689 setOperationAction(Op: ISD::FP16_TO_FP, VT: MVT::f32, Action: Custom);
690 setOperationAction(Op: ISD::STRICT_FP_TO_FP16, VT: MVT::f32, Action: Custom);
691 setOperationAction(Op: ISD::STRICT_FP16_TO_FP, VT: MVT::f32, Action: Custom);
692
693 if (Subtarget.hasStdExtZfa()) {
694 setOperationAction(Op: ISD::ConstantFP, VT: MVT::f32, Action: Custom);
695 setOperationAction(Op: ISD::FNEARBYINT, VT: MVT::f32, Action: Legal);
696 setOperationAction(Ops: {ISD::FMAXIMUM, ISD::FMINIMUM}, VT: MVT::f32, Action: Legal);
697 } else {
698 setOperationAction(Ops: {ISD::FMAXIMUM, ISD::FMINIMUM}, VT: MVT::f32, Action: Custom);
699 }
700 }
701
702 if (Subtarget.hasStdExtFOrZfinx() && Subtarget.is64Bit())
703 setOperationAction(Op: ISD::BITCAST, VT: MVT::i32, Action: Custom);
704
705 if (Subtarget.hasStdExtDOrZdinx()) {
706 setOperationAction(Ops: FPLegalNodeTypes, VT: MVT::f64, Action: Legal);
707
708 if (!Subtarget.is64Bit())
709 setOperationAction(Op: ISD::BITCAST, VT: MVT::i64, Action: Custom);
710
711 if (Subtarget.hasStdExtZdinx() && !Subtarget.hasStdExtZilsd() &&
712 !Subtarget.is64Bit()) {
713 setOperationAction(Op: ISD::LOAD, VT: MVT::f64, Action: Custom);
714 setOperationAction(Op: ISD::STORE, VT: MVT::f64, Action: Custom);
715 }
716
717 if (Subtarget.hasStdExtZfa()) {
718 setOperationAction(Op: ISD::ConstantFP, VT: MVT::f64, Action: Custom);
719 setOperationAction(Ops: FPRndMode, VT: MVT::f64, Action: Legal);
720 setOperationAction(Op: ISD::FNEARBYINT, VT: MVT::f64, Action: Legal);
721 setOperationAction(Ops: {ISD::FMAXIMUM, ISD::FMINIMUM}, VT: MVT::f64, Action: Legal);
722 } else {
723 if (Subtarget.is64Bit())
724 setOperationAction(Ops: FPRndMode, VT: MVT::f64, Action: Custom);
725
726 setOperationAction(Ops: {ISD::FMAXIMUM, ISD::FMINIMUM}, VT: MVT::f64, Action: Custom);
727 }
728
729 setOperationAction(Op: ISD::STRICT_FP_ROUND, VT: MVT::f32, Action: Legal);
730 setOperationAction(Op: ISD::STRICT_FP_EXTEND, VT: MVT::f64, Action: Legal);
731 setCondCodeAction(CCs: FPCCToExpand, VT: MVT::f64, Action: Expand);
732 setOperationAction(Op: ISD::SELECT_CC, VT: MVT::f64, Action: Expand);
733 setOperationAction(Op: ISD::SELECT, VT: MVT::f64, Action: Custom);
734 setOperationAction(Op: ISD::BR_CC, VT: MVT::f64, Action: Expand);
735 setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: MVT::f64, MemVT: MVT::f32, Action: Expand);
736 setTruncStoreAction(ValVT: MVT::f64, MemVT: MVT::f32, Action: Expand);
737 setOperationAction(Ops: FPOpToExpand, VT: MVT::f64, Action: Expand);
738 setOperationAction(Ops: FPOpToLibCall, VT: MVT::f64, Action: LibCall);
739 setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: MVT::f64, MemVT: MVT::f16, Action: Expand);
740 setTruncStoreAction(ValVT: MVT::f64, MemVT: MVT::f16, Action: Expand);
741 setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: MVT::f64, MemVT: MVT::bf16, Action: Expand);
742 setTruncStoreAction(ValVT: MVT::f64, MemVT: MVT::bf16, Action: Expand);
743 setOperationAction(Op: ISD::IS_FPCLASS, VT: MVT::f64, Action: Custom);
744 setOperationAction(Op: ISD::BF16_TO_FP, VT: MVT::f64, Action: Custom);
745 setOperationAction(Op: ISD::FP_TO_BF16, VT: MVT::f64,
746 Action: Subtarget.isSoftFPABI() ? LibCall : Custom);
747 setOperationAction(Op: ISD::FP_TO_FP16, VT: MVT::f64, Action: Custom);
748 setOperationAction(Op: ISD::FP16_TO_FP, VT: MVT::f64, Action: Expand);
749 setOperationAction(Op: ISD::STRICT_FP_TO_FP16, VT: MVT::f64, Action: Custom);
750 setOperationAction(Op: ISD::STRICT_FP16_TO_FP, VT: MVT::f64, Action: Expand);
751 }
752
753 if (Subtarget.is64Bit()) {
754 setOperationAction(Ops: {ISD::FP_TO_UINT, ISD::FP_TO_SINT,
755 ISD::STRICT_FP_TO_UINT, ISD::STRICT_FP_TO_SINT},
756 VT: MVT::i32, Action: Custom);
757 setOperationAction(Op: ISD::LROUND, VT: MVT::i32, Action: Custom);
758 }
759
760 if (Subtarget.hasStdExtFOrZfinx()) {
761 setOperationAction(Ops: {ISD::FP_TO_UINT_SAT, ISD::FP_TO_SINT_SAT}, VT: XLenVT,
762 Action: Custom);
763
764 // f16/bf16 require custom handling.
765 setOperationAction(Ops: {ISD::STRICT_FP_TO_UINT, ISD::STRICT_FP_TO_SINT}, VT: XLenVT,
766 Action: Custom);
767 setOperationAction(Ops: {ISD::STRICT_UINT_TO_FP, ISD::STRICT_SINT_TO_FP}, VT: XLenVT,
768 Action: Custom);
769
770 setOperationAction(Op: ISD::GET_ROUNDING, VT: XLenVT, Action: Custom);
771 setOperationAction(Op: ISD::SET_ROUNDING, VT: MVT::Other, Action: Custom);
772 setOperationAction(Op: ISD::GET_FPENV, VT: XLenVT, Action: Custom);
773 setOperationAction(Op: ISD::SET_FPENV, VT: XLenVT, Action: Custom);
774 setOperationAction(Op: ISD::RESET_FPENV, VT: MVT::Other, Action: Custom);
775 setOperationAction(Op: ISD::GET_FPMODE, VT: XLenVT, Action: Custom);
776 setOperationAction(Op: ISD::SET_FPMODE, VT: XLenVT, Action: Custom);
777 setOperationAction(Op: ISD::RESET_FPMODE, VT: MVT::Other, Action: Custom);
778 }
779
780 setOperationAction(Ops: {ISD::GlobalAddress, ISD::BlockAddress, ISD::ConstantPool,
781 ISD::JumpTable},
782 VT: XLenVT, Action: Custom);
783
784 setOperationAction(Op: ISD::GlobalTLSAddress, VT: XLenVT, Action: Custom);
785
786 if (Subtarget.is64Bit())
787 setOperationAction(Op: ISD::Constant, VT: MVT::i64, Action: Custom);
788
789 // TODO: On M-mode only targets, the cycle[h]/time[h] CSR may not be present.
790 // Unfortunately this can't be determined just from the ISA naming string.
791 setOperationAction(Op: ISD::READCYCLECOUNTER, VT: MVT::i64,
792 Action: Subtarget.is64Bit() ? Legal : Custom);
793 setOperationAction(Op: ISD::READSTEADYCOUNTER, VT: MVT::i64,
794 Action: Subtarget.is64Bit() ? Legal : Custom);
795
796 if (Subtarget.is64Bit()) {
797 setOperationAction(Op: ISD::INIT_TRAMPOLINE, VT: MVT::Other, Action: Custom);
798 setOperationAction(Op: ISD::ADJUST_TRAMPOLINE, VT: MVT::Other, Action: Custom);
799 }
800
801 setOperationAction(Ops: {ISD::TRAP, ISD::DEBUGTRAP}, VT: MVT::Other, Action: Legal);
802 setOperationAction(Op: ISD::INTRINSIC_WO_CHAIN, VT: MVT::Other, Action: Custom);
803 if (Subtarget.is64Bit())
804 setOperationAction(Op: ISD::INTRINSIC_WO_CHAIN, VT: MVT::i32, Action: Custom);
805
806 if (Subtarget.hasVendorXMIPSCBOP())
807 setOperationAction(Op: ISD::PREFETCH, VT: MVT::Other, Action: Custom);
808 else if (Subtarget.hasStdExtZicbop())
809 setOperationAction(Op: ISD::PREFETCH, VT: MVT::Other, Action: Legal);
810
811 if (Subtarget.hasStdExtZalrsc()) {
812 setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
813 if (Subtarget.hasStdExtZabha() && Subtarget.hasStdExtZacas())
814 setMinCmpXchgSizeInBits(8);
815 else
816 setMinCmpXchgSizeInBits(32);
817 } else if (Subtarget.hasForcedAtomics()) {
818 setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
819 } else {
820 setMaxAtomicSizeInBitsSupported(0);
821 }
822
823 setOperationAction(Op: ISD::ATOMIC_FENCE, VT: MVT::Other, Action: Custom);
824
825 setBooleanContents(ZeroOrOneBooleanContent);
826
827 if (getTargetMachine().getTargetTriple().isOSLinux()) {
828 // Custom lowering of llvm.clear_cache.
829 setOperationAction(Op: ISD::CLEAR_CACHE, VT: MVT::Other, Action: Custom);
830 }
831
832 if (Subtarget.hasVInstructions()) {
833 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
834
835 setOperationAction(Op: ISD::VSCALE, VT: XLenVT, Action: Custom);
836
837 // RVV intrinsics may have illegal operands.
838 // We also need to custom legalize vmv.x.s.
839 setOperationAction(Ops: {ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN,
840 ISD::INTRINSIC_VOID},
841 VTs: {MVT::i8, MVT::i16}, Action: Custom);
842 if (Subtarget.is64Bit())
843 setOperationAction(Ops: {ISD::INTRINSIC_W_CHAIN, ISD::INTRINSIC_VOID},
844 VT: MVT::i32, Action: Custom);
845 else
846 setOperationAction(Ops: {ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN},
847 VT: MVT::i64, Action: Custom);
848
849 setOperationAction(Ops: {ISD::INTRINSIC_W_CHAIN, ISD::INTRINSIC_VOID},
850 VT: MVT::Other, Action: Custom);
851
852 static const unsigned IntegerVPOps[] = {
853 ISD::VP_ADD, ISD::VP_SUB, ISD::VP_MUL,
854 ISD::VP_SDIV, ISD::VP_UDIV, ISD::VP_SREM,
855 ISD::VP_UREM, ISD::VP_AND, ISD::VP_OR,
856 ISD::VP_XOR, ISD::VP_SRA, ISD::VP_SRL,
857 ISD::VP_SHL, ISD::VP_REDUCE_ADD, ISD::VP_REDUCE_AND,
858 ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR, ISD::VP_REDUCE_SMAX,
859 ISD::VP_REDUCE_SMIN, ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN,
860 ISD::VP_MERGE, ISD::VP_SELECT, ISD::VP_FP_TO_SINT,
861 ISD::VP_FP_TO_UINT, ISD::VP_SETCC, ISD::VP_SIGN_EXTEND,
862 ISD::VP_ZERO_EXTEND, ISD::VP_TRUNCATE, ISD::VP_SMIN,
863 ISD::VP_SMAX, ISD::VP_UMIN, ISD::VP_UMAX,
864 ISD::VP_ABS, ISD::EXPERIMENTAL_VP_REVERSE, ISD::EXPERIMENTAL_VP_SPLICE,
865 ISD::VP_SADDSAT, ISD::VP_UADDSAT, ISD::VP_SSUBSAT,
866 ISD::VP_USUBSAT, ISD::VP_CTTZ_ELTS, ISD::VP_CTTZ_ELTS_ZERO_UNDEF};
867
868 static const unsigned FloatingPointVPOps[] = {
869 ISD::VP_FADD, ISD::VP_FSUB, ISD::VP_FMUL,
870 ISD::VP_FDIV, ISD::VP_FNEG, ISD::VP_FABS,
871 ISD::VP_FMA, ISD::VP_REDUCE_FADD, ISD::VP_REDUCE_SEQ_FADD,
872 ISD::VP_REDUCE_FMIN, ISD::VP_REDUCE_FMAX, ISD::VP_MERGE,
873 ISD::VP_SELECT, ISD::VP_SINT_TO_FP, ISD::VP_UINT_TO_FP,
874 ISD::VP_SETCC, ISD::VP_FP_ROUND, ISD::VP_FP_EXTEND,
875 ISD::VP_SQRT, ISD::VP_FMINNUM, ISD::VP_FMAXNUM,
876 ISD::VP_FCEIL, ISD::VP_FFLOOR, ISD::VP_FROUND,
877 ISD::VP_FROUNDEVEN, ISD::VP_FCOPYSIGN, ISD::VP_FROUNDTOZERO,
878 ISD::VP_FRINT, ISD::VP_FNEARBYINT, ISD::VP_IS_FPCLASS,
879 ISD::VP_FMINIMUM, ISD::VP_FMAXIMUM, ISD::VP_LRINT,
880 ISD::VP_LLRINT, ISD::VP_REDUCE_FMINIMUM,
881 ISD::VP_REDUCE_FMAXIMUM};
882
883 static const unsigned IntegerVecReduceOps[] = {
884 ISD::VECREDUCE_ADD, ISD::VECREDUCE_AND, ISD::VECREDUCE_OR,
885 ISD::VECREDUCE_XOR, ISD::VECREDUCE_SMAX, ISD::VECREDUCE_SMIN,
886 ISD::VECREDUCE_UMAX, ISD::VECREDUCE_UMIN};
887
888 static const unsigned FloatingPointVecReduceOps[] = {
889 ISD::VECREDUCE_FADD, ISD::VECREDUCE_SEQ_FADD, ISD::VECREDUCE_FMIN,
890 ISD::VECREDUCE_FMAX, ISD::VECREDUCE_FMINIMUM, ISD::VECREDUCE_FMAXIMUM};
891
892 static const unsigned FloatingPointLibCallOps[] = {
893 ISD::FREM, ISD::FPOW, ISD::FCOS, ISD::FSIN, ISD::FSINCOS, ISD::FEXP,
894 ISD::FEXP2, ISD::FEXP10, ISD::FLOG, ISD::FLOG2, ISD::FLOG10};
895
896 if (!Subtarget.is64Bit()) {
897 // We must custom-lower certain vXi64 operations on RV32 due to the vector
898 // element type being illegal.
899 setOperationAction(Ops: {ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT},
900 VT: MVT::i64, Action: Custom);
901
902 setOperationAction(Ops: IntegerVecReduceOps, VT: MVT::i64, Action: Custom);
903
904 setOperationAction(Ops: {ISD::VP_REDUCE_ADD, ISD::VP_REDUCE_AND,
905 ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR,
906 ISD::VP_REDUCE_SMAX, ISD::VP_REDUCE_SMIN,
907 ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN},
908 VT: MVT::i64, Action: Custom);
909 }
910
911 for (MVT VT : BoolVecVTs) {
912 if (!isTypeLegal(VT))
913 continue;
914
915 setOperationAction(Op: ISD::SPLAT_VECTOR, VT, Action: Custom);
916
917 // Mask VTs are custom-expanded into a series of standard nodes
918 setOperationAction(Ops: {ISD::TRUNCATE, ISD::CONCAT_VECTORS,
919 ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR,
920 ISD::SCALAR_TO_VECTOR},
921 VT, Action: Custom);
922
923 setOperationAction(Ops: {ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, VT,
924 Action: Custom);
925
926 setOperationAction(Op: ISD::SELECT, VT, Action: Custom);
927 setOperationAction(Ops: {ISD::SELECT_CC, ISD::VSELECT, ISD::VP_SELECT}, VT,
928 Action: Expand);
929 setOperationAction(Op: ISD::VP_MERGE, VT, Action: Custom);
930
931 setOperationAction(Ops: {ISD::VP_CTTZ_ELTS, ISD::VP_CTTZ_ELTS_ZERO_UNDEF}, VT,
932 Action: Custom);
933
934 setOperationAction(Ops: {ISD::VP_AND, ISD::VP_OR, ISD::VP_XOR}, VT, Action: Custom);
935
936 setOperationAction(
937 Ops: {ISD::VECREDUCE_AND, ISD::VECREDUCE_OR, ISD::VECREDUCE_XOR}, VT,
938 Action: Custom);
939
940 setOperationAction(
941 Ops: {ISD::VP_REDUCE_AND, ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR}, VT,
942 Action: Custom);
943
944 // RVV has native int->float & float->int conversions where the
945 // element type sizes are within one power-of-two of each other. Any
946 // wider distances between type sizes have to be lowered as sequences
947 // which progressively narrow the gap in stages.
948 setOperationAction(Ops: {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT,
949 ISD::FP_TO_UINT, ISD::STRICT_SINT_TO_FP,
950 ISD::STRICT_UINT_TO_FP, ISD::STRICT_FP_TO_SINT,
951 ISD::STRICT_FP_TO_UINT},
952 VT, Action: Custom);
953 setOperationAction(Ops: {ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT}, VT,
954 Action: Custom);
955
956 // Expand all extending loads to types larger than this, and truncating
957 // stores from types larger than this.
958 for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
959 setTruncStoreAction(ValVT: VT, MemVT: OtherVT, Action: Expand);
960 setLoadExtAction(ExtTypes: {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, ValVT: VT,
961 MemVT: OtherVT, Action: Expand);
962 }
963
964 setOperationAction(Ops: {ISD::VP_FP_TO_SINT, ISD::VP_FP_TO_UINT,
965 ISD::VP_TRUNCATE, ISD::VP_SETCC},
966 VT, Action: Custom);
967
968 setOperationAction(Op: ISD::VECTOR_DEINTERLEAVE, VT, Action: Custom);
969 setOperationAction(Op: ISD::VECTOR_INTERLEAVE, VT, Action: Custom);
970
971 setOperationAction(Op: ISD::VECTOR_REVERSE, VT, Action: Custom);
972
973 setOperationAction(Op: ISD::EXPERIMENTAL_VP_SPLICE, VT, Action: Custom);
974 setOperationAction(Op: ISD::EXPERIMENTAL_VP_REVERSE, VT, Action: Custom);
975
976 setOperationPromotedToType(
977 Ops: {ISD::VECTOR_SPLICE_LEFT, ISD::VECTOR_SPLICE_RIGHT}, OrigVT: VT,
978 DestVT: MVT::getVectorVT(VT: MVT::i8, EC: VT.getVectorElementCount()));
979 }
980
981 for (MVT VT : IntVecVTs) {
982 if (!isTypeLegal(VT))
983 continue;
984
985 setOperationAction(Op: ISD::SPLAT_VECTOR, VT, Action: Legal);
986 setOperationAction(Op: ISD::SPLAT_VECTOR_PARTS, VT, Action: Custom);
987
988 // Vectors implement MULHS/MULHU.
989 setOperationAction(Ops: {ISD::SMUL_LOHI, ISD::UMUL_LOHI}, VT, Action: Expand);
990
991 // nxvXi64 MULHS/MULHU requires the V extension instead of Zve64*.
992 if (VT.getVectorElementType() == MVT::i64 && !Subtarget.hasStdExtV())
993 setOperationAction(Ops: {ISD::MULHU, ISD::MULHS}, VT, Action: Expand);
994
995 setOperationAction(Ops: {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, VT,
996 Action: Legal);
997
998 setOperationAction(Ops: {ISD::ABDS, ISD::ABDU}, VT, Action: Custom);
999
1000 // Custom-lower extensions and truncations from/to mask types.
1001 setOperationAction(Ops: {ISD::ANY_EXTEND, ISD::SIGN_EXTEND, ISD::ZERO_EXTEND},
1002 VT, Action: Custom);
1003
1004 // RVV has native int->float & float->int conversions where the
1005 // element type sizes are within one power-of-two of each other. Any
1006 // wider distances between type sizes have to be lowered as sequences
1007 // which progressively narrow the gap in stages.
1008 setOperationAction(Ops: {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT,
1009 ISD::FP_TO_UINT, ISD::STRICT_SINT_TO_FP,
1010 ISD::STRICT_UINT_TO_FP, ISD::STRICT_FP_TO_SINT,
1011 ISD::STRICT_FP_TO_UINT},
1012 VT, Action: Custom);
1013 setOperationAction(Ops: {ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT}, VT,
1014 Action: Custom);
1015 setOperationAction(Ops: {ISD::AVGFLOORS, ISD::AVGFLOORU, ISD::AVGCEILS,
1016 ISD::AVGCEILU, ISD::SADDSAT, ISD::UADDSAT,
1017 ISD::SSUBSAT, ISD::USUBSAT},
1018 VT, Action: Legal);
1019
1020 // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL"
1021 // nodes which truncate by one power of two at a time.
1022 setOperationAction(
1023 Ops: {ISD::TRUNCATE, ISD::TRUNCATE_SSAT_S, ISD::TRUNCATE_USAT_U}, VT,
1024 Action: Custom);
1025
1026 // Custom-lower insert/extract operations to simplify patterns.
1027 setOperationAction(Ops: {ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, VT,
1028 Action: Custom);
1029
1030 // Custom-lower reduction operations to set up the corresponding custom
1031 // nodes' operands.
1032 setOperationAction(Ops: IntegerVecReduceOps, VT, Action: Custom);
1033
1034 setOperationAction(Ops: IntegerVPOps, VT, Action: Custom);
1035
1036 setOperationAction(Ops: {ISD::LOAD, ISD::STORE}, VT, Action: Custom);
1037
1038 setOperationAction(Ops: {ISD::MLOAD, ISD::MSTORE, ISD::MGATHER, ISD::MSCATTER},
1039 VT, Action: Custom);
1040
1041 setOperationAction(
1042 Ops: {ISD::VP_LOAD, ISD::VP_STORE, ISD::EXPERIMENTAL_VP_STRIDED_LOAD,
1043 ISD::EXPERIMENTAL_VP_STRIDED_STORE, ISD::VP_GATHER, ISD::VP_SCATTER},
1044 VT, Action: Custom);
1045 setOperationAction(Op: ISD::VP_LOAD_FF, VT, Action: Custom);
1046
1047 setOperationAction(Ops: {ISD::CONCAT_VECTORS, ISD::INSERT_SUBVECTOR,
1048 ISD::EXTRACT_SUBVECTOR, ISD::SCALAR_TO_VECTOR},
1049 VT, Action: Custom);
1050
1051 setOperationAction(Op: ISD::SELECT, VT, Action: Custom);
1052 setOperationAction(Op: ISD::SELECT_CC, VT, Action: Expand);
1053
1054 setOperationAction(Ops: {ISD::STEP_VECTOR, ISD::VECTOR_REVERSE}, VT, Action: Custom);
1055
1056 for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
1057 setTruncStoreAction(ValVT: VT, MemVT: OtherVT, Action: Expand);
1058 setLoadExtAction(ExtTypes: {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, ValVT: VT,
1059 MemVT: OtherVT, Action: Expand);
1060 }
1061
1062 setOperationAction(Op: ISD::VECTOR_DEINTERLEAVE, VT, Action: Custom);
1063 setOperationAction(Op: ISD::VECTOR_INTERLEAVE, VT, Action: Custom);
1064
1065 setOperationAction(Ops: {ISD::VECTOR_SPLICE_LEFT, ISD::VECTOR_SPLICE_RIGHT},
1066 VT, Action: Custom);
1067
1068 if (Subtarget.hasStdExtZvkb()) {
1069 setOperationAction(Op: ISD::BSWAP, VT, Action: Legal);
1070 setOperationAction(Op: ISD::VP_BSWAP, VT, Action: Custom);
1071 } else {
1072 setOperationAction(Ops: {ISD::BSWAP, ISD::VP_BSWAP}, VT, Action: Expand);
1073 setOperationAction(Ops: {ISD::ROTL, ISD::ROTR}, VT, Action: Expand);
1074 }
1075
1076 if (Subtarget.hasStdExtZvbb()) {
1077 setOperationAction(Op: ISD::BITREVERSE, VT, Action: Legal);
1078 setOperationAction(Op: ISD::VP_BITREVERSE, VT, Action: Custom);
1079 setOperationAction(Ops: {ISD::VP_CTLZ, ISD::VP_CTLZ_ZERO_UNDEF, ISD::VP_CTTZ,
1080 ISD::VP_CTTZ_ZERO_UNDEF, ISD::VP_CTPOP},
1081 VT, Action: Custom);
1082 } else {
1083 setOperationAction(Ops: {ISD::BITREVERSE, ISD::VP_BITREVERSE}, VT, Action: Expand);
1084 setOperationAction(Ops: {ISD::CTLZ, ISD::CTTZ, ISD::CTPOP}, VT, Action: Expand);
1085 setOperationAction(Ops: {ISD::VP_CTLZ, ISD::VP_CTLZ_ZERO_UNDEF, ISD::VP_CTTZ,
1086 ISD::VP_CTTZ_ZERO_UNDEF, ISD::VP_CTPOP},
1087 VT, Action: Expand);
1088
1089 // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if element of VT in the
1090 // range of f32.
1091 EVT FloatVT = MVT::getVectorVT(VT: MVT::f32, EC: VT.getVectorElementCount());
1092 if (isTypeLegal(VT: FloatVT)) {
1093 setOperationAction(Ops: {ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF,
1094 ISD::CTTZ_ZERO_UNDEF, ISD::VP_CTLZ,
1095 ISD::VP_CTLZ_ZERO_UNDEF, ISD::VP_CTTZ_ZERO_UNDEF},
1096 VT, Action: Custom);
1097 }
1098 }
1099
1100 if (Subtarget.hasStdExtZvbc() && VT.getVectorElementType() == MVT::i64)
1101 setOperationAction(Ops: {ISD::CLMUL, ISD::CLMULH}, VT, Action: Legal);
1102
1103 setOperationAction(Op: ISD::VECTOR_COMPRESS, VT, Action: Custom);
1104 }
1105
1106 for (MVT VT : VecTupleVTs) {
1107 if (!isTypeLegal(VT))
1108 continue;
1109
1110 setOperationAction(Ops: {ISD::LOAD, ISD::STORE}, VT, Action: Custom);
1111 }
1112
1113 // Expand various CCs to best match the RVV ISA, which natively supports UNE
1114 // but no other unordered comparisons, and supports all ordered comparisons
1115 // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization
1116 // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE),
1117 // and we pattern-match those back to the "original", swapping operands once
1118 // more. This way we catch both operations and both "vf" and "fv" forms with
1119 // fewer patterns.
1120 static const ISD::CondCode VFPCCToExpand[] = {
1121 ISD::SETO, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
1122 ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO,
1123 ISD::SETGT, ISD::SETOGT, ISD::SETGE, ISD::SETOGE,
1124 };
1125
1126 // TODO: support more ops.
1127 static const unsigned ZvfhminZvfbfminPromoteOps[] = {
1128 ISD::FMINNUM,
1129 ISD::FMAXNUM,
1130 ISD::FMINIMUMNUM,
1131 ISD::FMAXIMUMNUM,
1132 ISD::FADD,
1133 ISD::FSUB,
1134 ISD::FMUL,
1135 ISD::FMA,
1136 ISD::FDIV,
1137 ISD::FSQRT,
1138 ISD::FCEIL,
1139 ISD::FTRUNC,
1140 ISD::FFLOOR,
1141 ISD::FROUND,
1142 ISD::FROUNDEVEN,
1143 ISD::FRINT,
1144 ISD::FNEARBYINT,
1145 ISD::IS_FPCLASS,
1146 ISD::SETCC,
1147 ISD::FMAXIMUM,
1148 ISD::FMINIMUM,
1149 ISD::STRICT_FADD,
1150 ISD::STRICT_FSUB,
1151 ISD::STRICT_FMUL,
1152 ISD::STRICT_FDIV,
1153 ISD::STRICT_FSQRT,
1154 ISD::STRICT_FMA,
1155 ISD::VECREDUCE_FMIN,
1156 ISD::VECREDUCE_FMAX,
1157 ISD::VECREDUCE_FMINIMUM,
1158 ISD::VECREDUCE_FMAXIMUM};
1159
1160 // TODO: Make more of these ops legal.
1161 static const unsigned ZvfbfaPromoteOps[] = {ISD::FDIV,
1162 ISD::FSQRT,
1163 ISD::FCEIL,
1164 ISD::FTRUNC,
1165 ISD::FFLOOR,
1166 ISD::FROUND,
1167 ISD::FROUNDEVEN,
1168 ISD::FRINT,
1169 ISD::FNEARBYINT,
1170 ISD::STRICT_FDIV,
1171 ISD::STRICT_FSQRT,
1172 ISD::VECREDUCE_FMIN,
1173 ISD::VECREDUCE_FMAX,
1174 ISD::VECREDUCE_FMINIMUM,
1175 ISD::VECREDUCE_FMAXIMUM};
1176
1177 // TODO: support more vp ops.
1178 static const unsigned ZvfhminZvfbfminPromoteVPOps[] = {
1179 ISD::VP_FADD,
1180 ISD::VP_FSUB,
1181 ISD::VP_FMUL,
1182 ISD::VP_FDIV,
1183 ISD::VP_FMA,
1184 ISD::VP_REDUCE_FMIN,
1185 ISD::VP_REDUCE_FMAX,
1186 ISD::VP_SQRT,
1187 ISD::VP_FMINNUM,
1188 ISD::VP_FMAXNUM,
1189 ISD::VP_FCEIL,
1190 ISD::VP_FFLOOR,
1191 ISD::VP_FROUND,
1192 ISD::VP_FROUNDEVEN,
1193 ISD::VP_FROUNDTOZERO,
1194 ISD::VP_FRINT,
1195 ISD::VP_FNEARBYINT,
1196 ISD::VP_SETCC,
1197 ISD::VP_FMINIMUM,
1198 ISD::VP_FMAXIMUM,
1199 ISD::VP_REDUCE_FMINIMUM,
1200 ISD::VP_REDUCE_FMAXIMUM};
1201
1202 // Sets common operation actions on RVV floating-point vector types.
1203 const auto SetCommonVFPActions = [&](MVT VT) {
1204 setOperationAction(Op: ISD::SPLAT_VECTOR, VT, Action: Legal);
1205 // RVV has native FP_ROUND & FP_EXTEND conversions where the element type
1206 // sizes are within one power-of-two of each other. Therefore conversions
1207 // between vXf16 and vXf64 must be lowered as sequences which convert via
1208 // vXf32.
1209 setOperationAction(Ops: {ISD::FP_ROUND, ISD::FP_EXTEND}, VT, Action: Custom);
1210 setOperationAction(Ops: {ISD::LRINT, ISD::LLRINT}, VT, Action: Custom);
1211 setOperationAction(Ops: {ISD::LROUND, ISD::LLROUND}, VT, Action: Custom);
1212 // Custom-lower insert/extract operations to simplify patterns.
1213 setOperationAction(Ops: {ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, VT,
1214 Action: Custom);
1215 // Expand various condition codes (explained above).
1216 setCondCodeAction(CCs: VFPCCToExpand, VT, Action: Expand);
1217
1218 setOperationAction(
1219 Ops: {ISD::FMINNUM, ISD::FMAXNUM, ISD::FMAXIMUMNUM, ISD::FMINIMUMNUM}, VT,
1220 Action: Legal);
1221 setOperationAction(Ops: {ISD::FMAXIMUM, ISD::FMINIMUM}, VT, Action: Custom);
1222
1223 setOperationAction(Ops: {ISD::FTRUNC, ISD::FCEIL, ISD::FFLOOR, ISD::FROUND,
1224 ISD::FROUNDEVEN, ISD::FRINT, ISD::FNEARBYINT,
1225 ISD::IS_FPCLASS},
1226 VT, Action: Custom);
1227
1228 setOperationAction(Ops: FloatingPointVecReduceOps, VT, Action: Custom);
1229
1230 // Expand FP operations that need libcalls.
1231 setOperationAction(Ops: FloatingPointLibCallOps, VT, Action: Expand);
1232
1233 setOperationAction(Op: ISD::FCOPYSIGN, VT, Action: Legal);
1234
1235 setOperationAction(Ops: {ISD::LOAD, ISD::STORE}, VT, Action: Custom);
1236
1237 setOperationAction(Ops: {ISD::MLOAD, ISD::MSTORE, ISD::MGATHER, ISD::MSCATTER},
1238 VT, Action: Custom);
1239
1240 setOperationAction(
1241 Ops: {ISD::VP_LOAD, ISD::VP_STORE, ISD::EXPERIMENTAL_VP_STRIDED_LOAD,
1242 ISD::EXPERIMENTAL_VP_STRIDED_STORE, ISD::VP_GATHER, ISD::VP_SCATTER},
1243 VT, Action: Custom);
1244 setOperationAction(Op: ISD::VP_LOAD_FF, VT, Action: Custom);
1245
1246 setOperationAction(Op: ISD::SELECT, VT, Action: Custom);
1247 setOperationAction(Op: ISD::SELECT_CC, VT, Action: Expand);
1248
1249 setOperationAction(Ops: {ISD::CONCAT_VECTORS, ISD::INSERT_SUBVECTOR,
1250 ISD::EXTRACT_SUBVECTOR, ISD::SCALAR_TO_VECTOR},
1251 VT, Action: Custom);
1252
1253 setOperationAction(Op: ISD::VECTOR_DEINTERLEAVE, VT, Action: Custom);
1254 setOperationAction(Op: ISD::VECTOR_INTERLEAVE, VT, Action: Custom);
1255
1256 setOperationAction(Ops: {ISD::VECTOR_REVERSE, ISD::VECTOR_SPLICE_LEFT,
1257 ISD::VECTOR_SPLICE_RIGHT},
1258 VT, Action: Custom);
1259 setOperationAction(Op: ISD::EXPERIMENTAL_VP_SPLICE, VT, Action: Custom);
1260 setOperationAction(Op: ISD::EXPERIMENTAL_VP_REVERSE, VT, Action: Custom);
1261
1262 setOperationAction(Ops: FloatingPointVPOps, VT, Action: Custom);
1263
1264 setOperationAction(Ops: {ISD::STRICT_FP_EXTEND, ISD::STRICT_FP_ROUND}, VT,
1265 Action: Custom);
1266 setOperationAction(Ops: {ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL,
1267 ISD::STRICT_FDIV, ISD::STRICT_FSQRT, ISD::STRICT_FMA},
1268 VT, Action: Legal);
1269 setOperationAction(Ops: {ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS,
1270 ISD::STRICT_FTRUNC, ISD::STRICT_FCEIL,
1271 ISD::STRICT_FFLOOR, ISD::STRICT_FROUND,
1272 ISD::STRICT_FROUNDEVEN, ISD::STRICT_FNEARBYINT},
1273 VT, Action: Custom);
1274
1275 setOperationAction(Op: ISD::VECTOR_COMPRESS, VT, Action: Custom);
1276 };
1277
1278 // Sets common extload/truncstore actions on RVV floating-point vector
1279 // types.
1280 const auto SetCommonVFPExtLoadTruncStoreActions =
1281 [&](MVT VT, ArrayRef<MVT::SimpleValueType> SmallerVTs) {
1282 for (auto SmallVT : SmallerVTs) {
1283 setTruncStoreAction(ValVT: VT, MemVT: SmallVT, Action: Expand);
1284 setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: VT, MemVT: SmallVT, Action: Expand);
1285 }
1286 };
1287
1288 // Sets common actions for f16 and bf16 for when there's only
1289 // zvfhmin/zvfbfmin and we need to promote to f32 for most operations.
1290 const auto SetCommonPromoteToF32Actions = [&](MVT VT) {
1291 setOperationAction(Ops: {ISD::FP_ROUND, ISD::FP_EXTEND}, VT, Action: Custom);
1292 setOperationAction(Ops: {ISD::STRICT_FP_ROUND, ISD::STRICT_FP_EXTEND}, VT,
1293 Action: Custom);
1294 setOperationAction(Ops: {ISD::VP_FP_ROUND, ISD::VP_FP_EXTEND}, VT, Action: Custom);
1295 setOperationAction(Ops: {ISD::LRINT, ISD::LLRINT}, VT, Action: Custom);
1296 setOperationAction(Ops: {ISD::LROUND, ISD::LLROUND}, VT, Action: Custom);
1297 setOperationAction(Ops: {ISD::VP_MERGE, ISD::VP_SELECT, ISD::SELECT}, VT,
1298 Action: Custom);
1299 setOperationAction(Op: ISD::SELECT_CC, VT, Action: Expand);
1300 setOperationAction(Ops: {ISD::VP_SINT_TO_FP, ISD::VP_UINT_TO_FP}, VT, Action: Custom);
1301 setOperationAction(Ops: {ISD::INSERT_VECTOR_ELT, ISD::CONCAT_VECTORS,
1302 ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR,
1303 ISD::VECTOR_DEINTERLEAVE, ISD::VECTOR_INTERLEAVE,
1304 ISD::VECTOR_REVERSE, ISD::VECTOR_SPLICE_LEFT,
1305 ISD::VECTOR_SPLICE_RIGHT, ISD::VECTOR_COMPRESS},
1306 VT, Action: Custom);
1307 setOperationAction(Op: ISD::EXPERIMENTAL_VP_SPLICE, VT, Action: Custom);
1308 setOperationAction(Op: ISD::EXPERIMENTAL_VP_REVERSE, VT, Action: Custom);
1309 MVT EltVT = VT.getVectorElementType();
1310 if (isTypeLegal(VT: EltVT))
1311 setOperationAction(Ops: {ISD::SPLAT_VECTOR, ISD::EXTRACT_VECTOR_ELT},
1312 VT, Action: Custom);
1313 else
1314 setOperationAction(Op: ISD::SPLAT_VECTOR, VT: EltVT, Action: Custom);
1315 setOperationAction(Ops: {ISD::LOAD, ISD::STORE, ISD::MLOAD, ISD::MSTORE,
1316 ISD::MGATHER, ISD::MSCATTER, ISD::VP_LOAD,
1317 ISD::VP_STORE, ISD::EXPERIMENTAL_VP_STRIDED_LOAD,
1318 ISD::EXPERIMENTAL_VP_STRIDED_STORE, ISD::VP_GATHER,
1319 ISD::VP_SCATTER},
1320 VT, Action: Custom);
1321 setOperationAction(Op: ISD::VP_LOAD_FF, VT, Action: Custom);
1322
1323 setOperationAction(Op: ISD::FNEG, VT, Action: Expand);
1324 setOperationAction(Op: ISD::FABS, VT, Action: Expand);
1325 setOperationAction(Op: ISD::FCOPYSIGN, VT, Action: Expand);
1326
1327 // Expand FP operations that need libcalls.
1328 setOperationAction(Ops: FloatingPointLibCallOps, VT, Action: Expand);
1329
1330 // Custom split nxv32[b]f16 since nxv32[b]f32 is not legal.
1331 if (getLMUL(VT) == RISCVVType::LMUL_8) {
1332 setOperationAction(Ops: ZvfhminZvfbfminPromoteOps, VT, Action: Custom);
1333 setOperationAction(Ops: ZvfhminZvfbfminPromoteVPOps, VT, Action: Custom);
1334 } else {
1335 MVT F32VecVT = MVT::getVectorVT(VT: MVT::f32, EC: VT.getVectorElementCount());
1336 setOperationPromotedToType(Ops: ZvfhminZvfbfminPromoteOps, OrigVT: VT, DestVT: F32VecVT);
1337 setOperationPromotedToType(Ops: ZvfhminZvfbfminPromoteVPOps, OrigVT: VT, DestVT: F32VecVT);
1338 }
1339 };
1340
1341 // Sets common actions for zvfbfa, some of instructions are supported
1342 // natively so that we don't need to promote them.
1343 const auto SetZvfbfaActions = [&](MVT VT) {
1344 setOperationAction(Ops: {ISD::FP_ROUND, ISD::FP_EXTEND}, VT, Action: Custom);
1345 setOperationAction(Ops: {ISD::STRICT_FP_ROUND, ISD::STRICT_FP_EXTEND}, VT,
1346 Action: Custom);
1347 setOperationAction(Ops: {ISD::VP_FP_ROUND, ISD::VP_FP_EXTEND}, VT, Action: Custom);
1348 setOperationAction(Ops: {ISD::LRINT, ISD::LLRINT}, VT, Action: Custom);
1349 setOperationAction(Ops: {ISD::LROUND, ISD::LLROUND}, VT, Action: Custom);
1350 setOperationAction(Ops: {ISD::VP_MERGE, ISD::VP_SELECT, ISD::SELECT}, VT,
1351 Action: Custom);
1352 setOperationAction(Op: ISD::SELECT_CC, VT, Action: Expand);
1353 setOperationAction(Ops: {ISD::VP_SINT_TO_FP, ISD::VP_UINT_TO_FP}, VT, Action: Custom);
1354 setOperationAction(Ops: {ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
1355 ISD::CONCAT_VECTORS, ISD::INSERT_SUBVECTOR,
1356 ISD::EXTRACT_SUBVECTOR, ISD::VECTOR_DEINTERLEAVE,
1357 ISD::VECTOR_INTERLEAVE, ISD::VECTOR_REVERSE,
1358 ISD::VECTOR_SPLICE_LEFT, ISD::VECTOR_SPLICE_RIGHT,
1359 ISD::VECTOR_COMPRESS},
1360 VT, Action: Custom);
1361 setOperationAction(
1362 Ops: {ISD::FMINNUM, ISD::FMAXNUM, ISD::FMAXIMUMNUM, ISD::FMINIMUMNUM}, VT,
1363 Action: Legal);
1364 setOperationAction(Ops: {ISD::FMAXIMUM, ISD::FMINIMUM}, VT, Action: Custom);
1365 setOperationAction(Op: ISD::IS_FPCLASS, VT, Action: Custom);
1366 setOperationAction(Op: ISD::EXPERIMENTAL_VP_SPLICE, VT, Action: Custom);
1367 setOperationAction(Op: ISD::EXPERIMENTAL_VP_REVERSE, VT, Action: Custom);
1368
1369 setOperationAction(Op: ISD::FCOPYSIGN, VT, Action: Legal);
1370 setOperationAction(Op: ISD::SPLAT_VECTOR, VT, Action: Legal);
1371 setOperationAction(Ops: {ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL,
1372 ISD::STRICT_FMA},
1373 VT, Action: Legal);
1374 setOperationAction(Ops: ZvfbfaVPOps, VT, Action: Custom);
1375 setCondCodeAction(CCs: VFPCCToExpand, VT, Action: Expand);
1376
1377 setOperationAction(Ops: {ISD::LOAD, ISD::STORE, ISD::MLOAD, ISD::MSTORE,
1378 ISD::MGATHER, ISD::MSCATTER, ISD::VP_LOAD,
1379 ISD::VP_STORE, ISD::EXPERIMENTAL_VP_STRIDED_LOAD,
1380 ISD::EXPERIMENTAL_VP_STRIDED_STORE, ISD::VP_GATHER,
1381 ISD::VP_SCATTER},
1382 VT, Action: Custom);
1383 setOperationAction(Op: ISD::VP_LOAD_FF, VT, Action: Custom);
1384
1385 // Expand FP operations that need libcalls.
1386 setOperationAction(Ops: FloatingPointLibCallOps, VT, Action: Expand);
1387
1388 // Custom split nxv32[b]f16 since nxv32[b]f32 is not legal.
1389 if (getLMUL(VT) == RISCVVType::LMUL_8) {
1390 setOperationAction(Ops: ZvfbfaPromoteOps, VT, Action: Custom);
1391 setOperationAction(Ops: ZvfhminZvfbfminPromoteVPOps, VT, Action: Custom);
1392 } else {
1393 MVT F32VecVT = MVT::getVectorVT(VT: MVT::f32, EC: VT.getVectorElementCount());
1394 setOperationPromotedToType(Ops: ZvfbfaPromoteOps, OrigVT: VT, DestVT: F32VecVT);
1395 setOperationPromotedToType(Ops: ZvfhminZvfbfminPromoteVPOps, OrigVT: VT, DestVT: F32VecVT);
1396 }
1397 };
1398
1399 if (Subtarget.hasVInstructionsF16()) {
1400 for (MVT VT : F16VecVTs) {
1401 if (!isTypeLegal(VT))
1402 continue;
1403 SetCommonVFPActions(VT);
1404 }
1405 } else if (Subtarget.hasVInstructionsF16Minimal()) {
1406 for (MVT VT : F16VecVTs) {
1407 if (!isTypeLegal(VT))
1408 continue;
1409 SetCommonPromoteToF32Actions(VT);
1410 }
1411 }
1412
1413 if (Subtarget.hasVInstructionsBF16()) {
1414 for (MVT VT : BF16VecVTs) {
1415 if (!isTypeLegal(VT))
1416 continue;
1417 SetZvfbfaActions(VT);
1418 }
1419 } else if (Subtarget.hasVInstructionsBF16Minimal()) {
1420 for (MVT VT : BF16VecVTs) {
1421 if (!isTypeLegal(VT))
1422 continue;
1423 SetCommonPromoteToF32Actions(VT);
1424 }
1425 }
1426
1427 if (Subtarget.hasVInstructionsF32()) {
1428 for (MVT VT : F32VecVTs) {
1429 if (!isTypeLegal(VT))
1430 continue;
1431 SetCommonVFPActions(VT);
1432 SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
1433 SetCommonVFPExtLoadTruncStoreActions(VT, BF16VecVTs);
1434 }
1435 }
1436
1437 if (Subtarget.hasVInstructionsF64()) {
1438 for (MVT VT : F64VecVTs) {
1439 if (!isTypeLegal(VT))
1440 continue;
1441 SetCommonVFPActions(VT);
1442 SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
1443 SetCommonVFPExtLoadTruncStoreActions(VT, BF16VecVTs);
1444 SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs);
1445 }
1446 }
1447
1448 if (Subtarget.useRVVForFixedLengthVectors()) {
1449 for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
1450 if (!useRVVForFixedLengthVectorVT(VT))
1451 continue;
1452
1453 // By default everything must be expanded.
1454 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
1455 setOperationAction(Op, VT, Action: Expand);
1456 for (MVT OtherVT : MVT::integer_fixedlen_vector_valuetypes()) {
1457 setTruncStoreAction(ValVT: VT, MemVT: OtherVT, Action: Expand);
1458 setLoadExtAction(ExtTypes: {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, ValVT: VT,
1459 MemVT: OtherVT, Action: Expand);
1460 }
1461
1462 // Custom lower fixed vector undefs to scalable vector undefs to avoid
1463 // expansion to a build_vector of 0s.
1464 setOperationAction(Op: ISD::UNDEF, VT, Action: Custom);
1465
1466 // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
1467 setOperationAction(Ops: {ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR}, VT,
1468 Action: Custom);
1469
1470 setOperationAction(
1471 Ops: {ISD::BUILD_VECTOR, ISD::CONCAT_VECTORS, ISD::VECTOR_REVERSE}, VT,
1472 Action: Custom);
1473
1474 setOperationAction(Ops: {ISD::VECTOR_INTERLEAVE, ISD::VECTOR_DEINTERLEAVE},
1475 VT, Action: Custom);
1476
1477 setOperationAction(Ops: {ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT},
1478 VT, Action: Custom);
1479
1480 setOperationAction(Op: ISD::SCALAR_TO_VECTOR, VT, Action: Custom);
1481
1482 setOperationAction(Ops: {ISD::LOAD, ISD::STORE}, VT, Action: Custom);
1483
1484 setOperationAction(Op: ISD::SETCC, VT, Action: Custom);
1485
1486 setOperationAction(Op: ISD::SELECT, VT, Action: Custom);
1487
1488 setOperationAction(
1489 Ops: {ISD::TRUNCATE, ISD::TRUNCATE_SSAT_S, ISD::TRUNCATE_USAT_U}, VT,
1490 Action: Custom);
1491
1492 setOperationAction(Op: ISD::BITCAST, VT, Action: Custom);
1493
1494 setOperationAction(
1495 Ops: {ISD::VECREDUCE_AND, ISD::VECREDUCE_OR, ISD::VECREDUCE_XOR}, VT,
1496 Action: Custom);
1497
1498 setOperationAction(
1499 Ops: {ISD::VP_REDUCE_AND, ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR}, VT,
1500 Action: Custom);
1501
1502 setOperationAction(
1503 Ops: {
1504 ISD::SINT_TO_FP,
1505 ISD::UINT_TO_FP,
1506 ISD::FP_TO_SINT,
1507 ISD::FP_TO_UINT,
1508 ISD::STRICT_SINT_TO_FP,
1509 ISD::STRICT_UINT_TO_FP,
1510 ISD::STRICT_FP_TO_SINT,
1511 ISD::STRICT_FP_TO_UINT,
1512 },
1513 VT, Action: Custom);
1514 setOperationAction(Ops: {ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT}, VT,
1515 Action: Custom);
1516
1517 setOperationAction(Op: ISD::VECTOR_SHUFFLE, VT, Action: Custom);
1518
1519 // Operations below are different for between masks and other vectors.
1520 if (VT.getVectorElementType() == MVT::i1) {
1521 setOperationAction(Ops: {ISD::VP_AND, ISD::VP_OR, ISD::VP_XOR, ISD::AND,
1522 ISD::OR, ISD::XOR},
1523 VT, Action: Custom);
1524
1525 setOperationAction(Ops: {ISD::VP_FP_TO_SINT, ISD::VP_FP_TO_UINT,
1526 ISD::VP_SETCC, ISD::VP_TRUNCATE},
1527 VT, Action: Custom);
1528
1529 setOperationAction(Op: ISD::VP_MERGE, VT, Action: Custom);
1530
1531 setOperationAction(Op: ISD::EXPERIMENTAL_VP_SPLICE, VT, Action: Custom);
1532 setOperationAction(Op: ISD::EXPERIMENTAL_VP_REVERSE, VT, Action: Custom);
1533 continue;
1534 }
1535
1536 // Make SPLAT_VECTOR Legal so DAGCombine will convert splat vectors to
1537 // it before type legalization for i64 vectors on RV32. It will then be
1538 // type legalized to SPLAT_VECTOR_PARTS which we need to Custom handle.
1539 // FIXME: Use SPLAT_VECTOR for all types? DAGCombine probably needs
1540 // improvements first.
1541 if (!Subtarget.is64Bit() && VT.getVectorElementType() == MVT::i64) {
1542 setOperationAction(Op: ISD::SPLAT_VECTOR, VT, Action: Legal);
1543 setOperationAction(Op: ISD::SPLAT_VECTOR_PARTS, VT, Action: Custom);
1544
1545 // Lower BUILD_VECTOR with i64 type to VID on RV32 if possible.
1546 setOperationAction(Op: ISD::BUILD_VECTOR, VT: MVT::i64, Action: Custom);
1547 }
1548
1549 setOperationAction(
1550 Ops: {ISD::MLOAD, ISD::MSTORE, ISD::MGATHER, ISD::MSCATTER}, VT, Action: Custom);
1551
1552 setOperationAction(Ops: {ISD::VP_LOAD, ISD::VP_STORE,
1553 ISD::EXPERIMENTAL_VP_STRIDED_LOAD,
1554 ISD::EXPERIMENTAL_VP_STRIDED_STORE, ISD::VP_GATHER,
1555 ISD::VP_SCATTER},
1556 VT, Action: Custom);
1557 setOperationAction(Op: ISD::VP_LOAD_FF, VT, Action: Custom);
1558
1559 setOperationAction(Ops: {ISD::ADD, ISD::MUL, ISD::SUB, ISD::AND, ISD::OR,
1560 ISD::XOR, ISD::SDIV, ISD::SREM, ISD::UDIV,
1561 ISD::UREM, ISD::SHL, ISD::SRA, ISD::SRL},
1562 VT, Action: Custom);
1563
1564 setOperationAction(
1565 Ops: {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX, ISD::ABS}, VT, Action: Custom);
1566
1567 setOperationAction(Ops: {ISD::ABDS, ISD::ABDU}, VT, Action: Custom);
1568
1569 // vXi64 MULHS/MULHU requires the V extension instead of Zve64*.
1570 if (VT.getVectorElementType() != MVT::i64 || Subtarget.hasStdExtV())
1571 setOperationAction(Ops: {ISD::MULHS, ISD::MULHU}, VT, Action: Custom);
1572
1573 setOperationAction(Ops: {ISD::AVGFLOORS, ISD::AVGFLOORU, ISD::AVGCEILS,
1574 ISD::AVGCEILU, ISD::SADDSAT, ISD::UADDSAT,
1575 ISD::SSUBSAT, ISD::USUBSAT},
1576 VT, Action: Custom);
1577
1578 setOperationAction(Op: ISD::VSELECT, VT, Action: Custom);
1579
1580 setOperationAction(
1581 Ops: {ISD::ANY_EXTEND, ISD::SIGN_EXTEND, ISD::ZERO_EXTEND}, VT, Action: Custom);
1582
1583 // Custom-lower reduction operations to set up the corresponding custom
1584 // nodes' operands.
1585 setOperationAction(Ops: {ISD::VECREDUCE_ADD, ISD::VECREDUCE_SMAX,
1586 ISD::VECREDUCE_SMIN, ISD::VECREDUCE_UMAX,
1587 ISD::VECREDUCE_UMIN},
1588 VT, Action: Custom);
1589
1590 setOperationAction(Ops: IntegerVPOps, VT, Action: Custom);
1591
1592 if (Subtarget.hasStdExtZvkb())
1593 setOperationAction(Ops: {ISD::BSWAP, ISD::ROTL, ISD::ROTR}, VT, Action: Custom);
1594
1595 if (Subtarget.hasStdExtZvbb()) {
1596 setOperationAction(Ops: {ISD::BITREVERSE, ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF,
1597 ISD::CTTZ, ISD::CTTZ_ZERO_UNDEF, ISD::CTPOP},
1598 VT, Action: Custom);
1599 } else {
1600 // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if element of VT in the
1601 // range of f32.
1602 EVT FloatVT = MVT::getVectorVT(VT: MVT::f32, EC: VT.getVectorElementCount());
1603 if (isTypeLegal(VT: FloatVT))
1604 setOperationAction(
1605 Ops: {ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF, ISD::CTTZ_ZERO_UNDEF}, VT,
1606 Action: Custom);
1607 }
1608
1609 setOperationAction(Op: ISD::VECTOR_COMPRESS, VT, Action: Custom);
1610 }
1611
1612 for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
1613 // There are no extending loads or truncating stores.
1614 for (MVT InnerVT : MVT::fp_fixedlen_vector_valuetypes()) {
1615 setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: VT, MemVT: InnerVT, Action: Expand);
1616 setTruncStoreAction(ValVT: VT, MemVT: InnerVT, Action: Expand);
1617 }
1618
1619 if (!useRVVForFixedLengthVectorVT(VT))
1620 continue;
1621
1622 // By default everything must be expanded.
1623 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
1624 setOperationAction(Op, VT, Action: Expand);
1625
1626 // Custom lower fixed vector undefs to scalable vector undefs to avoid
1627 // expansion to a build_vector of 0s.
1628 setOperationAction(Op: ISD::UNDEF, VT, Action: Custom);
1629
1630 setOperationAction(Ops: {ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
1631 ISD::CONCAT_VECTORS, ISD::INSERT_SUBVECTOR,
1632 ISD::EXTRACT_SUBVECTOR, ISD::VECTOR_REVERSE,
1633 ISD::VECTOR_SHUFFLE, ISD::VECTOR_COMPRESS},
1634 VT, Action: Custom);
1635 setOperationAction(Op: ISD::EXPERIMENTAL_VP_SPLICE, VT, Action: Custom);
1636 setOperationAction(Op: ISD::EXPERIMENTAL_VP_REVERSE, VT, Action: Custom);
1637
1638 setOperationAction(Ops: {ISD::VECTOR_INTERLEAVE, ISD::VECTOR_DEINTERLEAVE},
1639 VT, Action: Custom);
1640
1641 setOperationAction(Ops: {ISD::LOAD, ISD::STORE, ISD::MLOAD, ISD::MSTORE,
1642 ISD::MGATHER, ISD::MSCATTER},
1643 VT, Action: Custom);
1644 setOperationAction(Ops: {ISD::VP_LOAD, ISD::VP_STORE, ISD::VP_GATHER,
1645 ISD::VP_SCATTER, ISD::EXPERIMENTAL_VP_STRIDED_LOAD,
1646 ISD::EXPERIMENTAL_VP_STRIDED_STORE},
1647 VT, Action: Custom);
1648 setOperationAction(Op: ISD::VP_LOAD_FF, VT, Action: Custom);
1649
1650 setOperationAction(Ops: {ISD::FP_ROUND, ISD::FP_EXTEND}, VT, Action: Custom);
1651 setOperationAction(Ops: {ISD::STRICT_FP_ROUND, ISD::STRICT_FP_EXTEND}, VT,
1652 Action: Custom);
1653
1654 setOperationAction(Op: ISD::BITCAST, VT, Action: Custom);
1655
1656 if (VT.getVectorElementType() == MVT::f16 &&
1657 !Subtarget.hasVInstructionsF16()) {
1658 setOperationAction(Ops: {ISD::VP_FP_ROUND, ISD::VP_FP_EXTEND}, VT, Action: Custom);
1659 setOperationAction(
1660 Ops: {ISD::VP_MERGE, ISD::VP_SELECT, ISD::VSELECT, ISD::SELECT}, VT,
1661 Action: Custom);
1662 setOperationAction(Ops: {ISD::VP_SINT_TO_FP, ISD::VP_UINT_TO_FP}, VT,
1663 Action: Custom);
1664 setOperationAction(Ops: {ISD::LRINT, ISD::LLRINT}, VT, Action: Custom);
1665 setOperationAction(Ops: {ISD::LROUND, ISD::LLROUND}, VT, Action: Custom);
1666 if (Subtarget.hasStdExtZfhmin()) {
1667 setOperationAction(Op: ISD::BUILD_VECTOR, VT, Action: Custom);
1668 } else {
1669 // We need to custom legalize f16 build vectors if Zfhmin isn't
1670 // available.
1671 setOperationAction(Op: ISD::BUILD_VECTOR, VT: MVT::f16, Action: Custom);
1672 }
1673 setOperationAction(Op: ISD::FNEG, VT, Action: Expand);
1674 setOperationAction(Op: ISD::FABS, VT, Action: Expand);
1675 setOperationAction(Op: ISD::FCOPYSIGN, VT, Action: Expand);
1676 MVT F32VecVT = MVT::getVectorVT(VT: MVT::f32, EC: VT.getVectorElementCount());
1677 // Don't promote f16 vector operations to f32 if f32 vector type is
1678 // not legal.
1679 // TODO: could split the f16 vector into two vectors and do promotion.
1680 if (!isTypeLegal(VT: F32VecVT))
1681 continue;
1682 setOperationPromotedToType(Ops: ZvfhminZvfbfminPromoteOps, OrigVT: VT, DestVT: F32VecVT);
1683 setOperationPromotedToType(Ops: ZvfhminZvfbfminPromoteVPOps, OrigVT: VT, DestVT: F32VecVT);
1684 continue;
1685 }
1686
1687 if (VT.getVectorElementType() == MVT::bf16) {
1688 setOperationAction(Ops: {ISD::VP_FP_ROUND, ISD::VP_FP_EXTEND}, VT, Action: Custom);
1689 setOperationAction(Ops: {ISD::LRINT, ISD::LLRINT}, VT, Action: Custom);
1690 setOperationAction(Ops: {ISD::LROUND, ISD::LLROUND}, VT, Action: Custom);
1691 if (Subtarget.hasStdExtZfbfmin()) {
1692 setOperationAction(Op: ISD::BUILD_VECTOR, VT, Action: Custom);
1693 } else {
1694 // We need to custom legalize bf16 build vectors if Zfbfmin isn't
1695 // available.
1696 setOperationAction(Op: ISD::BUILD_VECTOR, VT: MVT::bf16, Action: Custom);
1697 }
1698 if (Subtarget.hasStdExtZvfbfa()) {
1699 setOperationAction(Ops: ZvfbfaOps, VT, Action: Custom);
1700 setOperationAction(Ops: ZvfbfaVPOps, VT, Action: Custom);
1701 setCondCodeAction(CCs: VFPCCToExpand, VT, Action: Expand);
1702 }
1703 setOperationAction(
1704 Ops: {ISD::VP_MERGE, ISD::VP_SELECT, ISD::VSELECT, ISD::SELECT}, VT,
1705 Action: Custom);
1706 MVT F32VecVT = MVT::getVectorVT(VT: MVT::f32, EC: VT.getVectorElementCount());
1707 // Don't promote f16 vector operations to f32 if f32 vector type is
1708 // not legal.
1709 // TODO: could split the f16 vector into two vectors and do promotion.
1710 if (!isTypeLegal(VT: F32VecVT))
1711 continue;
1712
1713 if (Subtarget.hasStdExtZvfbfa())
1714 setOperationPromotedToType(Ops: ZvfbfaPromoteOps, OrigVT: VT, DestVT: F32VecVT);
1715 else
1716 setOperationPromotedToType(Ops: ZvfhminZvfbfminPromoteOps, OrigVT: VT, DestVT: F32VecVT);
1717 setOperationPromotedToType(Ops: ZvfhminZvfbfminPromoteVPOps, OrigVT: VT, DestVT: F32VecVT);
1718 continue;
1719 }
1720
1721 setOperationAction(Ops: {ISD::BUILD_VECTOR, ISD::SCALAR_TO_VECTOR}, VT,
1722 Action: Custom);
1723
1724 setOperationAction(Ops: {ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FDIV,
1725 ISD::FNEG, ISD::FABS, ISD::FCOPYSIGN, ISD::FSQRT,
1726 ISD::FMA, ISD::FMINNUM, ISD::FMAXNUM,
1727 ISD::FMINIMUMNUM, ISD::FMAXIMUMNUM, ISD::IS_FPCLASS,
1728 ISD::FMAXIMUM, ISD::FMINIMUM},
1729 VT, Action: Custom);
1730
1731 setOperationAction(Ops: {ISD::FTRUNC, ISD::FCEIL, ISD::FFLOOR, ISD::FROUND,
1732 ISD::FROUNDEVEN, ISD::FRINT, ISD::LRINT,
1733 ISD::LLRINT, ISD::LROUND, ISD::LLROUND,
1734 ISD::FNEARBYINT},
1735 VT, Action: Custom);
1736
1737 setCondCodeAction(CCs: VFPCCToExpand, VT, Action: Expand);
1738
1739 setOperationAction(Op: ISD::SETCC, VT, Action: Custom);
1740 setOperationAction(Ops: {ISD::VSELECT, ISD::SELECT}, VT, Action: Custom);
1741
1742 setOperationAction(Ops: FloatingPointVecReduceOps, VT, Action: Custom);
1743
1744 setOperationAction(Ops: FloatingPointVPOps, VT, Action: Custom);
1745
1746 setOperationAction(
1747 Ops: {ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL,
1748 ISD::STRICT_FDIV, ISD::STRICT_FSQRT, ISD::STRICT_FMA,
1749 ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS, ISD::STRICT_FTRUNC,
1750 ISD::STRICT_FCEIL, ISD::STRICT_FFLOOR, ISD::STRICT_FROUND,
1751 ISD::STRICT_FROUNDEVEN, ISD::STRICT_FNEARBYINT},
1752 VT, Action: Custom);
1753 }
1754
1755 // Custom-legalize bitcasts from fixed-length vectors to scalar types.
1756 setOperationAction(Ops: ISD::BITCAST, VTs: {MVT::i8, MVT::i16, MVT::i32}, Action: Custom);
1757 if (Subtarget.is64Bit())
1758 setOperationAction(Op: ISD::BITCAST, VT: MVT::i64, Action: Custom);
1759 if (Subtarget.hasStdExtZfhminOrZhinxmin())
1760 setOperationAction(Op: ISD::BITCAST, VT: MVT::f16, Action: Custom);
1761 if (Subtarget.hasStdExtZfbfmin())
1762 setOperationAction(Op: ISD::BITCAST, VT: MVT::bf16, Action: Custom);
1763 if (Subtarget.hasStdExtFOrZfinx())
1764 setOperationAction(Op: ISD::BITCAST, VT: MVT::f32, Action: Custom);
1765 if (Subtarget.hasStdExtDOrZdinx())
1766 setOperationAction(Op: ISD::BITCAST, VT: MVT::f64, Action: Custom);
1767 }
1768 }
1769
1770 if (Subtarget.hasStdExtZaamo())
1771 setOperationAction(Op: ISD::ATOMIC_LOAD_SUB, VT: XLenVT, Action: Expand);
1772
1773 if (Subtarget.hasForcedAtomics()) {
1774 // Force __sync libcalls to be emitted for atomic rmw/cas operations.
1775 setOperationAction(
1776 Ops: {ISD::ATOMIC_CMP_SWAP, ISD::ATOMIC_SWAP, ISD::ATOMIC_LOAD_ADD,
1777 ISD::ATOMIC_LOAD_SUB, ISD::ATOMIC_LOAD_AND, ISD::ATOMIC_LOAD_OR,
1778 ISD::ATOMIC_LOAD_XOR, ISD::ATOMIC_LOAD_NAND, ISD::ATOMIC_LOAD_MIN,
1779 ISD::ATOMIC_LOAD_MAX, ISD::ATOMIC_LOAD_UMIN, ISD::ATOMIC_LOAD_UMAX},
1780 VT: XLenVT, Action: LibCall);
1781 }
1782
1783 if (Subtarget.hasVendorXTHeadMemIdx()) {
1784 for (unsigned im : {ISD::PRE_INC, ISD::POST_INC}) {
1785 setIndexedLoadAction(IdxModes: im, VT: MVT::i8, Action: Legal);
1786 setIndexedStoreAction(IdxModes: im, VT: MVT::i8, Action: Legal);
1787 setIndexedLoadAction(IdxModes: im, VT: MVT::i16, Action: Legal);
1788 setIndexedStoreAction(IdxModes: im, VT: MVT::i16, Action: Legal);
1789 setIndexedLoadAction(IdxModes: im, VT: MVT::i32, Action: Legal);
1790 setIndexedStoreAction(IdxModes: im, VT: MVT::i32, Action: Legal);
1791
1792 if (Subtarget.is64Bit()) {
1793 setIndexedLoadAction(IdxModes: im, VT: MVT::i64, Action: Legal);
1794 setIndexedStoreAction(IdxModes: im, VT: MVT::i64, Action: Legal);
1795 }
1796 }
1797 }
1798
1799 if (Subtarget.hasVendorXCVmem() && !Subtarget.is64Bit()) {
1800 setIndexedLoadAction(IdxModes: ISD::POST_INC, VT: MVT::i8, Action: Legal);
1801 setIndexedLoadAction(IdxModes: ISD::POST_INC, VT: MVT::i16, Action: Legal);
1802 setIndexedLoadAction(IdxModes: ISD::POST_INC, VT: MVT::i32, Action: Legal);
1803
1804 setIndexedStoreAction(IdxModes: ISD::POST_INC, VT: MVT::i8, Action: Legal);
1805 setIndexedStoreAction(IdxModes: ISD::POST_INC, VT: MVT::i16, Action: Legal);
1806 setIndexedStoreAction(IdxModes: ISD::POST_INC, VT: MVT::i32, Action: Legal);
1807 }
1808
1809 // zve32x is broken for partial_reduce_umla, but let's not make it worse.
1810 if (Subtarget.hasStdExtZvqdotq() && Subtarget.getELen() >= 64) {
1811 static const unsigned MLAOps[] = {ISD::PARTIAL_REDUCE_SMLA,
1812 ISD::PARTIAL_REDUCE_UMLA,
1813 ISD::PARTIAL_REDUCE_SUMLA};
1814 setPartialReduceMLAAction(Opcodes: MLAOps, AccVT: MVT::nxv1i32, InputVT: MVT::nxv4i8, Action: Custom);
1815 setPartialReduceMLAAction(Opcodes: MLAOps, AccVT: MVT::nxv2i32, InputVT: MVT::nxv8i8, Action: Custom);
1816 setPartialReduceMLAAction(Opcodes: MLAOps, AccVT: MVT::nxv4i32, InputVT: MVT::nxv16i8, Action: Custom);
1817 setPartialReduceMLAAction(Opcodes: MLAOps, AccVT: MVT::nxv8i32, InputVT: MVT::nxv32i8, Action: Custom);
1818 setPartialReduceMLAAction(Opcodes: MLAOps, AccVT: MVT::nxv16i32, InputVT: MVT::nxv64i8, Action: Custom);
1819
1820 if (Subtarget.useRVVForFixedLengthVectors()) {
1821 for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
1822 if (VT.getVectorElementType() != MVT::i32 ||
1823 !useRVVForFixedLengthVectorVT(VT))
1824 continue;
1825 ElementCount EC = VT.getVectorElementCount();
1826 MVT ArgVT = MVT::getVectorVT(VT: MVT::i8, EC: EC.multiplyCoefficientBy(RHS: 4));
1827 setPartialReduceMLAAction(Opcodes: MLAOps, AccVT: VT, InputVT: ArgVT, Action: Custom);
1828 }
1829 }
1830 }
1831
1832 // Customize load and store operation for bf16 if zfh isn't enabled.
1833 if (Subtarget.hasVendorXAndesBFHCvt() && !Subtarget.hasStdExtZfh()) {
1834 setOperationAction(Op: ISD::LOAD, VT: MVT::bf16, Action: Custom);
1835 setOperationAction(Op: ISD::STORE, VT: MVT::bf16, Action: Custom);
1836 }
1837
1838 // Function alignments.
1839 const Align FunctionAlignment(Subtarget.hasStdExtZca() ? 2 : 4);
1840 setMinFunctionAlignment(FunctionAlignment);
1841 // Set preferred alignments.
1842 setPrefFunctionAlignment(Subtarget.getPrefFunctionAlignment());
1843 setPrefLoopAlignment(Subtarget.getPrefLoopAlignment());
1844
1845 setTargetDAGCombine({ISD::INTRINSIC_VOID, ISD::INTRINSIC_W_CHAIN,
1846 ISD::INTRINSIC_WO_CHAIN, ISD::ADD, ISD::SUB, ISD::MUL,
1847 ISD::AND, ISD::OR, ISD::XOR, ISD::SETCC, ISD::SELECT});
1848 setTargetDAGCombine(ISD::SRA);
1849 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
1850
1851 if (Subtarget.hasStdExtFOrZfinx())
1852 setTargetDAGCombine({ISD::FADD, ISD::FMAXNUM, ISD::FMINNUM, ISD::FMUL});
1853
1854 if (Subtarget.hasStdExtZbb())
1855 setTargetDAGCombine({ISD::UMAX, ISD::UMIN, ISD::SMAX, ISD::SMIN});
1856
1857 if ((Subtarget.hasStdExtZbs() && Subtarget.is64Bit()) ||
1858 Subtarget.hasVInstructions())
1859 setTargetDAGCombine(ISD::TRUNCATE);
1860
1861 if (Subtarget.hasStdExtZbkb())
1862 setTargetDAGCombine(ISD::BITREVERSE);
1863
1864 if (Subtarget.hasStdExtFOrZfinx())
1865 setTargetDAGCombine({ISD::ZERO_EXTEND, ISD::FP_TO_SINT, ISD::FP_TO_UINT,
1866 ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT});
1867 if (Subtarget.hasVInstructions())
1868 setTargetDAGCombine(
1869 {ISD::FCOPYSIGN, ISD::MGATHER, ISD::MSCATTER,
1870 ISD::VP_GATHER, ISD::VP_SCATTER, ISD::SRA,
1871 ISD::SRL, ISD::SHL, ISD::STORE,
1872 ISD::SPLAT_VECTOR, ISD::BUILD_VECTOR, ISD::CONCAT_VECTORS,
1873 ISD::VP_STORE, ISD::VP_TRUNCATE, ISD::EXPERIMENTAL_VP_REVERSE,
1874 ISD::MUL, ISD::SDIV, ISD::UDIV,
1875 ISD::SREM, ISD::UREM, ISD::INSERT_VECTOR_ELT,
1876 ISD::ABS, ISD::CTPOP, ISD::VECTOR_SHUFFLE,
1877 ISD::FMA, ISD::VSELECT, ISD::VECREDUCE_ADD});
1878
1879 if (Subtarget.hasVendorXTHeadMemPair())
1880 setTargetDAGCombine({ISD::LOAD, ISD::STORE});
1881 if (Subtarget.useRVVForFixedLengthVectors())
1882 setTargetDAGCombine(ISD::BITCAST);
1883
1884 setMaxDivRemBitWidthSupported(Subtarget.is64Bit() ? 128 : 64);
1885
1886 // Disable strict node mutation.
1887 IsStrictFPEnabled = true;
1888 EnableExtLdPromotion = true;
1889
1890 // Let the subtarget decide if a predictable select is more expensive than the
1891 // corresponding branch. This information is used in CGP/SelectOpt to decide
1892 // when to convert selects into branches.
1893 PredictableSelectIsExpensive = Subtarget.predictableSelectIsExpensive();
1894
1895 MaxStoresPerMemsetOptSize = Subtarget.getMaxStoresPerMemset(/*OptSize=*/true);
1896 MaxStoresPerMemset = Subtarget.getMaxStoresPerMemset(/*OptSize=*/false);
1897
1898 MaxGluedStoresPerMemcpy = Subtarget.getMaxGluedStoresPerMemcpy();
1899 MaxStoresPerMemcpyOptSize = Subtarget.getMaxStoresPerMemcpy(/*OptSize=*/true);
1900 MaxStoresPerMemcpy = Subtarget.getMaxStoresPerMemcpy(/*OptSize=*/false);
1901
1902 MaxStoresPerMemmoveOptSize =
1903 Subtarget.getMaxStoresPerMemmove(/*OptSize=*/true);
1904 MaxStoresPerMemmove = Subtarget.getMaxStoresPerMemmove(/*OptSize=*/false);
1905
1906 MaxLoadsPerMemcmpOptSize = Subtarget.getMaxLoadsPerMemcmp(/*OptSize=*/true);
1907 MaxLoadsPerMemcmp = Subtarget.getMaxLoadsPerMemcmp(/*OptSize=*/false);
1908}
1909
1910TargetLoweringBase::LegalizeTypeAction
1911RISCVTargetLowering::getPreferredVectorAction(MVT VT) const {
1912 if (Subtarget.is64Bit() && Subtarget.enablePExtSIMDCodeGen())
1913 if (VT == MVT::v2i16 || VT == MVT::v4i8)
1914 return TypeWidenVector;
1915
1916 return TargetLoweringBase::getPreferredVectorAction(VT);
1917}
1918
1919EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL,
1920 LLVMContext &Context,
1921 EVT VT) const {
1922 if (!VT.isVector())
1923 return getPointerTy(DL);
1924 if (Subtarget.hasVInstructions() &&
1925 (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors()))
1926 return EVT::getVectorVT(Context, VT: MVT::i1, EC: VT.getVectorElementCount());
1927 return VT.changeVectorElementTypeToInteger();
1928}
1929
1930MVT RISCVTargetLowering::getVPExplicitVectorLengthTy() const {
1931 return Subtarget.getXLenVT();
1932}
1933
1934// Return false if we can lower get_vector_length to a vsetvli intrinsic.
1935bool RISCVTargetLowering::shouldExpandGetVectorLength(EVT TripCountVT,
1936 unsigned VF,
1937 bool IsScalable) const {
1938 if (!Subtarget.hasVInstructions())
1939 return true;
1940
1941 if (!IsScalable)
1942 return true;
1943
1944 if (TripCountVT != MVT::i32 && TripCountVT != Subtarget.getXLenVT())
1945 return true;
1946
1947 // Don't allow VF=1 if those types are't legal.
1948 if (VF < RISCV::RVVBitsPerBlock / Subtarget.getELen())
1949 return true;
1950
1951 // VLEN=32 support is incomplete.
1952 if (Subtarget.getRealMinVLen() < RISCV::RVVBitsPerBlock)
1953 return true;
1954
1955 // The maximum VF is for the smallest element width with LMUL=8.
1956 // VF must be a power of 2.
1957 unsigned MaxVF = RISCV::RVVBytesPerBlock * 8;
1958 return VF > MaxVF || !isPowerOf2_32(Value: VF);
1959}
1960
1961bool RISCVTargetLowering::shouldExpandCttzElements(EVT VT) const {
1962 return !Subtarget.hasVInstructions() ||
1963 VT.getVectorElementType() != MVT::i1 || !isTypeLegal(VT);
1964}
1965
1966void RISCVTargetLowering::getTgtMemIntrinsic(
1967 SmallVectorImpl<IntrinsicInfo> &Infos, const CallBase &I,
1968 MachineFunction &MF, unsigned Intrinsic) const {
1969 IntrinsicInfo Info;
1970 auto &DL = I.getDataLayout();
1971
1972 auto SetRVVLoadStoreInfo = [&](unsigned PtrOp, bool IsStore,
1973 bool IsUnitStrided, bool UsePtrVal = false) {
1974 Info.opc = IsStore ? ISD::INTRINSIC_VOID : ISD::INTRINSIC_W_CHAIN;
1975 // We can't use ptrVal if the intrinsic can access memory before the
1976 // pointer. This means we can't use it for strided or indexed intrinsics.
1977 if (UsePtrVal)
1978 Info.ptrVal = I.getArgOperand(i: PtrOp);
1979 else
1980 Info.fallbackAddressSpace =
1981 I.getArgOperand(i: PtrOp)->getType()->getPointerAddressSpace();
1982 Type *MemTy;
1983 if (IsStore) {
1984 // Store value is the first operand.
1985 MemTy = I.getArgOperand(i: 0)->getType();
1986 } else {
1987 // Use return type. If it's segment load, return type is a struct.
1988 MemTy = I.getType();
1989 if (MemTy->isStructTy())
1990 MemTy = MemTy->getStructElementType(N: 0);
1991 }
1992 if (!IsUnitStrided)
1993 MemTy = MemTy->getScalarType();
1994
1995 Info.memVT = getValueType(DL, Ty: MemTy);
1996 if (MemTy->isTargetExtTy()) {
1997 // RISC-V vector tuple type's alignment type should be its element type.
1998 if (cast<TargetExtType>(Val: MemTy)->getName() == "riscv.vector.tuple")
1999 MemTy = Type::getIntNTy(
2000 C&: MemTy->getContext(),
2001 N: 1 << cast<ConstantInt>(Val: I.getArgOperand(i: I.arg_size() - 1))
2002 ->getZExtValue());
2003 Info.align = DL.getABITypeAlign(Ty: MemTy);
2004 } else {
2005 Info.align = Align(DL.getTypeStoreSize(Ty: MemTy->getScalarType()));
2006 }
2007 Info.size = MemoryLocation::UnknownSize;
2008 Info.flags |=
2009 IsStore ? MachineMemOperand::MOStore : MachineMemOperand::MOLoad;
2010 Infos.push_back(Elt: Info);
2011 };
2012
2013 if (I.hasMetadata(KindID: LLVMContext::MD_nontemporal))
2014 Info.flags |= MachineMemOperand::MONonTemporal;
2015
2016 Info.flags |= RISCVTargetLowering::getTargetMMOFlags(I);
2017 switch (Intrinsic) {
2018 default:
2019 return;
2020 case Intrinsic::riscv_masked_atomicrmw_xchg:
2021 case Intrinsic::riscv_masked_atomicrmw_add:
2022 case Intrinsic::riscv_masked_atomicrmw_sub:
2023 case Intrinsic::riscv_masked_atomicrmw_nand:
2024 case Intrinsic::riscv_masked_atomicrmw_max:
2025 case Intrinsic::riscv_masked_atomicrmw_min:
2026 case Intrinsic::riscv_masked_atomicrmw_umax:
2027 case Intrinsic::riscv_masked_atomicrmw_umin:
2028 case Intrinsic::riscv_masked_cmpxchg:
2029 // riscv_masked_{atomicrmw_*,cmpxchg} intrinsics represent an emulated
2030 // narrow atomic operation. These will be expanded to an LR/SC loop that
2031 // reads/writes to/from an aligned 4 byte location. And, or, shift, etc.
2032 // will be used to modify the appropriate part of the 4 byte data and
2033 // preserve the rest.
2034 Info.opc = ISD::INTRINSIC_W_CHAIN;
2035 Info.memVT = MVT::i32;
2036 Info.ptrVal = I.getArgOperand(i: 0);
2037 Info.offset = 0;
2038 Info.align = Align(4);
2039 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
2040 MachineMemOperand::MOVolatile;
2041 Infos.push_back(Elt: Info);
2042 return;
2043 case Intrinsic::riscv_seg2_load_mask:
2044 case Intrinsic::riscv_seg3_load_mask:
2045 case Intrinsic::riscv_seg4_load_mask:
2046 case Intrinsic::riscv_seg5_load_mask:
2047 case Intrinsic::riscv_seg6_load_mask:
2048 case Intrinsic::riscv_seg7_load_mask:
2049 case Intrinsic::riscv_seg8_load_mask:
2050 case Intrinsic::riscv_sseg2_load_mask:
2051 case Intrinsic::riscv_sseg3_load_mask:
2052 case Intrinsic::riscv_sseg4_load_mask:
2053 case Intrinsic::riscv_sseg5_load_mask:
2054 case Intrinsic::riscv_sseg6_load_mask:
2055 case Intrinsic::riscv_sseg7_load_mask:
2056 case Intrinsic::riscv_sseg8_load_mask:
2057 SetRVVLoadStoreInfo(/*PtrOp*/ 0, /*IsStore*/ false,
2058 /*IsUnitStrided*/ false, /*UsePtrVal*/ true);
2059 return;
2060 case Intrinsic::riscv_seg2_store_mask:
2061 case Intrinsic::riscv_seg3_store_mask:
2062 case Intrinsic::riscv_seg4_store_mask:
2063 case Intrinsic::riscv_seg5_store_mask:
2064 case Intrinsic::riscv_seg6_store_mask:
2065 case Intrinsic::riscv_seg7_store_mask:
2066 case Intrinsic::riscv_seg8_store_mask:
2067 // Operands are (vec, ..., vec, ptr, mask, vl)
2068 SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 3,
2069 /*IsStore*/ true,
2070 /*IsUnitStrided*/ false, /*UsePtrVal*/ true);
2071 return;
2072 case Intrinsic::riscv_sseg2_store_mask:
2073 case Intrinsic::riscv_sseg3_store_mask:
2074 case Intrinsic::riscv_sseg4_store_mask:
2075 case Intrinsic::riscv_sseg5_store_mask:
2076 case Intrinsic::riscv_sseg6_store_mask:
2077 case Intrinsic::riscv_sseg7_store_mask:
2078 case Intrinsic::riscv_sseg8_store_mask:
2079 // Operands are (vec, ..., vec, ptr, offset, mask, vl)
2080 SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 4,
2081 /*IsStore*/ true,
2082 /*IsUnitStrided*/ false, /*UsePtrVal*/ true);
2083 return;
2084 case Intrinsic::riscv_vlm:
2085 SetRVVLoadStoreInfo(/*PtrOp*/ 0,
2086 /*IsStore*/ false,
2087 /*IsUnitStrided*/ true,
2088 /*UsePtrVal*/ true);
2089 return;
2090 case Intrinsic::riscv_vle:
2091 case Intrinsic::riscv_vle_mask:
2092 case Intrinsic::riscv_vleff:
2093 case Intrinsic::riscv_vleff_mask:
2094 SetRVVLoadStoreInfo(/*PtrOp*/ 1,
2095 /*IsStore*/ false,
2096 /*IsUnitStrided*/ true,
2097 /*UsePtrVal*/ true);
2098 return;
2099 case Intrinsic::riscv_vsm:
2100 case Intrinsic::riscv_vse:
2101 case Intrinsic::riscv_vse_mask:
2102 SetRVVLoadStoreInfo(/*PtrOp*/ 1,
2103 /*IsStore*/ true,
2104 /*IsUnitStrided*/ true,
2105 /*UsePtrVal*/ true);
2106 return;
2107 case Intrinsic::riscv_vlse:
2108 case Intrinsic::riscv_vlse_mask:
2109 case Intrinsic::riscv_vloxei:
2110 case Intrinsic::riscv_vloxei_mask:
2111 case Intrinsic::riscv_vluxei:
2112 case Intrinsic::riscv_vluxei_mask:
2113 SetRVVLoadStoreInfo(/*PtrOp*/ 1,
2114 /*IsStore*/ false,
2115 /*IsUnitStrided*/ false);
2116 return;
2117 case Intrinsic::riscv_vsse:
2118 case Intrinsic::riscv_vsse_mask:
2119 case Intrinsic::riscv_vsoxei:
2120 case Intrinsic::riscv_vsoxei_mask:
2121 case Intrinsic::riscv_vsuxei:
2122 case Intrinsic::riscv_vsuxei_mask:
2123 SetRVVLoadStoreInfo(/*PtrOp*/ 1,
2124 /*IsStore*/ true,
2125 /*IsUnitStrided*/ false);
2126 return;
2127 case Intrinsic::riscv_vlseg2:
2128 case Intrinsic::riscv_vlseg3:
2129 case Intrinsic::riscv_vlseg4:
2130 case Intrinsic::riscv_vlseg5:
2131 case Intrinsic::riscv_vlseg6:
2132 case Intrinsic::riscv_vlseg7:
2133 case Intrinsic::riscv_vlseg8:
2134 case Intrinsic::riscv_vlseg2ff:
2135 case Intrinsic::riscv_vlseg3ff:
2136 case Intrinsic::riscv_vlseg4ff:
2137 case Intrinsic::riscv_vlseg5ff:
2138 case Intrinsic::riscv_vlseg6ff:
2139 case Intrinsic::riscv_vlseg7ff:
2140 case Intrinsic::riscv_vlseg8ff:
2141 SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 3,
2142 /*IsStore*/ false,
2143 /*IsUnitStrided*/ false, /*UsePtrVal*/ true);
2144 return;
2145 case Intrinsic::riscv_vlseg2_mask:
2146 case Intrinsic::riscv_vlseg3_mask:
2147 case Intrinsic::riscv_vlseg4_mask:
2148 case Intrinsic::riscv_vlseg5_mask:
2149 case Intrinsic::riscv_vlseg6_mask:
2150 case Intrinsic::riscv_vlseg7_mask:
2151 case Intrinsic::riscv_vlseg8_mask:
2152 case Intrinsic::riscv_vlseg2ff_mask:
2153 case Intrinsic::riscv_vlseg3ff_mask:
2154 case Intrinsic::riscv_vlseg4ff_mask:
2155 case Intrinsic::riscv_vlseg5ff_mask:
2156 case Intrinsic::riscv_vlseg6ff_mask:
2157 case Intrinsic::riscv_vlseg7ff_mask:
2158 case Intrinsic::riscv_vlseg8ff_mask:
2159 SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 5,
2160 /*IsStore*/ false,
2161 /*IsUnitStrided*/ false, /*UsePtrVal*/ true);
2162 return;
2163 case Intrinsic::riscv_vlsseg2:
2164 case Intrinsic::riscv_vlsseg3:
2165 case Intrinsic::riscv_vlsseg4:
2166 case Intrinsic::riscv_vlsseg5:
2167 case Intrinsic::riscv_vlsseg6:
2168 case Intrinsic::riscv_vlsseg7:
2169 case Intrinsic::riscv_vlsseg8:
2170 case Intrinsic::riscv_vloxseg2:
2171 case Intrinsic::riscv_vloxseg3:
2172 case Intrinsic::riscv_vloxseg4:
2173 case Intrinsic::riscv_vloxseg5:
2174 case Intrinsic::riscv_vloxseg6:
2175 case Intrinsic::riscv_vloxseg7:
2176 case Intrinsic::riscv_vloxseg8:
2177 case Intrinsic::riscv_vluxseg2:
2178 case Intrinsic::riscv_vluxseg3:
2179 case Intrinsic::riscv_vluxseg4:
2180 case Intrinsic::riscv_vluxseg5:
2181 case Intrinsic::riscv_vluxseg6:
2182 case Intrinsic::riscv_vluxseg7:
2183 case Intrinsic::riscv_vluxseg8:
2184 SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 4,
2185 /*IsStore*/ false,
2186 /*IsUnitStrided*/ false);
2187 return;
2188 case Intrinsic::riscv_vlsseg2_mask:
2189 case Intrinsic::riscv_vlsseg3_mask:
2190 case Intrinsic::riscv_vlsseg4_mask:
2191 case Intrinsic::riscv_vlsseg5_mask:
2192 case Intrinsic::riscv_vlsseg6_mask:
2193 case Intrinsic::riscv_vlsseg7_mask:
2194 case Intrinsic::riscv_vlsseg8_mask:
2195 case Intrinsic::riscv_vloxseg2_mask:
2196 case Intrinsic::riscv_vloxseg3_mask:
2197 case Intrinsic::riscv_vloxseg4_mask:
2198 case Intrinsic::riscv_vloxseg5_mask:
2199 case Intrinsic::riscv_vloxseg6_mask:
2200 case Intrinsic::riscv_vloxseg7_mask:
2201 case Intrinsic::riscv_vloxseg8_mask:
2202 case Intrinsic::riscv_vluxseg2_mask:
2203 case Intrinsic::riscv_vluxseg3_mask:
2204 case Intrinsic::riscv_vluxseg4_mask:
2205 case Intrinsic::riscv_vluxseg5_mask:
2206 case Intrinsic::riscv_vluxseg6_mask:
2207 case Intrinsic::riscv_vluxseg7_mask:
2208 case Intrinsic::riscv_vluxseg8_mask:
2209 SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 6,
2210 /*IsStore*/ false,
2211 /*IsUnitStrided*/ false);
2212 return;
2213 case Intrinsic::riscv_vsseg2:
2214 case Intrinsic::riscv_vsseg3:
2215 case Intrinsic::riscv_vsseg4:
2216 case Intrinsic::riscv_vsseg5:
2217 case Intrinsic::riscv_vsseg6:
2218 case Intrinsic::riscv_vsseg7:
2219 case Intrinsic::riscv_vsseg8:
2220 SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 3,
2221 /*IsStore*/ true,
2222 /*IsUnitStrided*/ false);
2223 return;
2224 case Intrinsic::riscv_vsseg2_mask:
2225 case Intrinsic::riscv_vsseg3_mask:
2226 case Intrinsic::riscv_vsseg4_mask:
2227 case Intrinsic::riscv_vsseg5_mask:
2228 case Intrinsic::riscv_vsseg6_mask:
2229 case Intrinsic::riscv_vsseg7_mask:
2230 case Intrinsic::riscv_vsseg8_mask:
2231 SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 4,
2232 /*IsStore*/ true,
2233 /*IsUnitStrided*/ false);
2234 return;
2235 case Intrinsic::riscv_vssseg2:
2236 case Intrinsic::riscv_vssseg3:
2237 case Intrinsic::riscv_vssseg4:
2238 case Intrinsic::riscv_vssseg5:
2239 case Intrinsic::riscv_vssseg6:
2240 case Intrinsic::riscv_vssseg7:
2241 case Intrinsic::riscv_vssseg8:
2242 case Intrinsic::riscv_vsoxseg2:
2243 case Intrinsic::riscv_vsoxseg3:
2244 case Intrinsic::riscv_vsoxseg4:
2245 case Intrinsic::riscv_vsoxseg5:
2246 case Intrinsic::riscv_vsoxseg6:
2247 case Intrinsic::riscv_vsoxseg7:
2248 case Intrinsic::riscv_vsoxseg8:
2249 case Intrinsic::riscv_vsuxseg2:
2250 case Intrinsic::riscv_vsuxseg3:
2251 case Intrinsic::riscv_vsuxseg4:
2252 case Intrinsic::riscv_vsuxseg5:
2253 case Intrinsic::riscv_vsuxseg6:
2254 case Intrinsic::riscv_vsuxseg7:
2255 case Intrinsic::riscv_vsuxseg8:
2256 SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 4,
2257 /*IsStore*/ true,
2258 /*IsUnitStrided*/ false);
2259 return;
2260 case Intrinsic::riscv_vssseg2_mask:
2261 case Intrinsic::riscv_vssseg3_mask:
2262 case Intrinsic::riscv_vssseg4_mask:
2263 case Intrinsic::riscv_vssseg5_mask:
2264 case Intrinsic::riscv_vssseg6_mask:
2265 case Intrinsic::riscv_vssseg7_mask:
2266 case Intrinsic::riscv_vssseg8_mask:
2267 case Intrinsic::riscv_vsoxseg2_mask:
2268 case Intrinsic::riscv_vsoxseg3_mask:
2269 case Intrinsic::riscv_vsoxseg4_mask:
2270 case Intrinsic::riscv_vsoxseg5_mask:
2271 case Intrinsic::riscv_vsoxseg6_mask:
2272 case Intrinsic::riscv_vsoxseg7_mask:
2273 case Intrinsic::riscv_vsoxseg8_mask:
2274 case Intrinsic::riscv_vsuxseg2_mask:
2275 case Intrinsic::riscv_vsuxseg3_mask:
2276 case Intrinsic::riscv_vsuxseg4_mask:
2277 case Intrinsic::riscv_vsuxseg5_mask:
2278 case Intrinsic::riscv_vsuxseg6_mask:
2279 case Intrinsic::riscv_vsuxseg7_mask:
2280 case Intrinsic::riscv_vsuxseg8_mask:
2281 SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 5,
2282 /*IsStore*/ true,
2283 /*IsUnitStrided*/ false);
2284 return;
2285 case Intrinsic::riscv_sf_vlte8:
2286 case Intrinsic::riscv_sf_vlte16:
2287 case Intrinsic::riscv_sf_vlte32:
2288 case Intrinsic::riscv_sf_vlte64:
2289 Info.opc = ISD::INTRINSIC_VOID;
2290 Info.ptrVal = I.getArgOperand(i: 1);
2291 switch (Intrinsic) {
2292 case Intrinsic::riscv_sf_vlte8:
2293 Info.memVT = MVT::i8;
2294 Info.align = Align(1);
2295 break;
2296 case Intrinsic::riscv_sf_vlte16:
2297 Info.memVT = MVT::i16;
2298 Info.align = Align(2);
2299 break;
2300 case Intrinsic::riscv_sf_vlte32:
2301 Info.memVT = MVT::i32;
2302 Info.align = Align(4);
2303 break;
2304 case Intrinsic::riscv_sf_vlte64:
2305 Info.memVT = MVT::i64;
2306 Info.align = Align(8);
2307 break;
2308 }
2309 Info.size = MemoryLocation::UnknownSize;
2310 Info.flags |= MachineMemOperand::MOLoad;
2311 Infos.push_back(Elt: Info);
2312 return;
2313 case Intrinsic::riscv_sf_vste8:
2314 case Intrinsic::riscv_sf_vste16:
2315 case Intrinsic::riscv_sf_vste32:
2316 case Intrinsic::riscv_sf_vste64:
2317 Info.opc = ISD::INTRINSIC_VOID;
2318 Info.ptrVal = I.getArgOperand(i: 1);
2319 switch (Intrinsic) {
2320 case Intrinsic::riscv_sf_vste8:
2321 Info.memVT = MVT::i8;
2322 Info.align = Align(1);
2323 break;
2324 case Intrinsic::riscv_sf_vste16:
2325 Info.memVT = MVT::i16;
2326 Info.align = Align(2);
2327 break;
2328 case Intrinsic::riscv_sf_vste32:
2329 Info.memVT = MVT::i32;
2330 Info.align = Align(4);
2331 break;
2332 case Intrinsic::riscv_sf_vste64:
2333 Info.memVT = MVT::i64;
2334 Info.align = Align(8);
2335 break;
2336 }
2337 Info.size = MemoryLocation::UnknownSize;
2338 Info.flags |= MachineMemOperand::MOStore;
2339 Infos.push_back(Elt: Info);
2340 return;
2341 }
2342}
2343
2344bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
2345 const AddrMode &AM, Type *Ty,
2346 unsigned AS,
2347 Instruction *I) const {
2348 // No global is ever allowed as a base.
2349 if (AM.BaseGV)
2350 return false;
2351
2352 // None of our addressing modes allows a scalable offset
2353 if (AM.ScalableOffset)
2354 return false;
2355
2356 // RVV instructions only support register addressing.
2357 if (Subtarget.hasVInstructions() && isa<VectorType>(Val: Ty))
2358 return AM.HasBaseReg && AM.Scale == 0 && !AM.BaseOffs;
2359
2360 // Require a 12-bit signed offset.
2361 if (!isInt<12>(x: AM.BaseOffs))
2362 return false;
2363
2364 switch (AM.Scale) {
2365 case 0: // "r+i" or just "i", depending on HasBaseReg.
2366 break;
2367 case 1:
2368 if (!AM.HasBaseReg) // allow "r+i".
2369 break;
2370 return false; // disallow "r+r" or "r+r+i".
2371 default:
2372 return false;
2373 }
2374
2375 return true;
2376}
2377
2378bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
2379 return isInt<12>(x: Imm);
2380}
2381
2382bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
2383 return isInt<12>(x: Imm);
2384}
2385
2386// On RV32, 64-bit integers are split into their high and low parts and held
2387// in two different registers, so the trunc is free since the low register can
2388// just be used.
2389// FIXME: Should we consider i64->i32 free on RV64 to match the EVT version of
2390// isTruncateFree?
2391bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
2392 if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
2393 return false;
2394 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
2395 unsigned DestBits = DstTy->getPrimitiveSizeInBits();
2396 return (SrcBits == 64 && DestBits == 32);
2397}
2398
2399bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
2400 // We consider i64->i32 free on RV64 since we have good selection of W
2401 // instructions that make promoting operations back to i64 free in many cases.
2402 if (SrcVT.isVector() || DstVT.isVector() || !SrcVT.isInteger() ||
2403 !DstVT.isInteger())
2404 return false;
2405 unsigned SrcBits = SrcVT.getSizeInBits();
2406 unsigned DestBits = DstVT.getSizeInBits();
2407 return (SrcBits == 64 && DestBits == 32);
2408}
2409
2410bool RISCVTargetLowering::isTruncateFree(SDValue Val, EVT VT2) const {
2411 EVT SrcVT = Val.getValueType();
2412 // free truncate from vnsrl and vnsra
2413 if (Subtarget.hasVInstructions() &&
2414 (Val.getOpcode() == ISD::SRL || Val.getOpcode() == ISD::SRA) &&
2415 SrcVT.isVector() && VT2.isVector()) {
2416 unsigned SrcBits = SrcVT.getVectorElementType().getSizeInBits();
2417 unsigned DestBits = VT2.getVectorElementType().getSizeInBits();
2418 if (SrcBits == DestBits * 2) {
2419 return true;
2420 }
2421 }
2422 return TargetLowering::isTruncateFree(Val, VT2);
2423}
2424
2425bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
2426 // Zexts are free if they can be combined with a load.
2427 // Don't advertise i32->i64 zextload as being free for RV64. It interacts
2428 // poorly with type legalization of compares preferring sext.
2429 if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
2430 EVT MemVT = LD->getMemoryVT();
2431 if ((MemVT == MVT::i8 || MemVT == MVT::i16) &&
2432 (LD->getExtensionType() == ISD::NON_EXTLOAD ||
2433 LD->getExtensionType() == ISD::ZEXTLOAD))
2434 return true;
2435 }
2436
2437 return TargetLowering::isZExtFree(Val, VT2);
2438}
2439
2440bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
2441 return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
2442}
2443
2444bool RISCVTargetLowering::signExtendConstant(const ConstantInt *CI) const {
2445 return Subtarget.is64Bit() && CI->getType()->isIntegerTy(Bitwidth: 32);
2446}
2447
2448bool RISCVTargetLowering::isCheapToSpeculateCttz(Type *Ty) const {
2449 return Subtarget.hasCTZLike();
2450}
2451
2452bool RISCVTargetLowering::isCheapToSpeculateCtlz(Type *Ty) const {
2453 return Subtarget.hasCLZLike();
2454}
2455
2456bool RISCVTargetLowering::isMaskAndCmp0FoldingBeneficial(
2457 const Instruction &AndI) const {
2458 // We expect to be able to match a bit extraction instruction if the Zbs
2459 // extension is supported and the mask is a power of two. However, we
2460 // conservatively return false if the mask would fit in an ANDI instruction,
2461 // on the basis that it's possible the sinking+duplication of the AND in
2462 // CodeGenPrepare triggered by this hook wouldn't decrease the instruction
2463 // count and would increase code size (e.g. ANDI+BNEZ => BEXTI+BNEZ).
2464 if (!Subtarget.hasBEXTILike())
2465 return false;
2466 ConstantInt *Mask = dyn_cast<ConstantInt>(Val: AndI.getOperand(i: 1));
2467 if (!Mask)
2468 return false;
2469 return !Mask->getValue().isSignedIntN(N: 12) && Mask->getValue().isPowerOf2();
2470}
2471
2472bool RISCVTargetLowering::hasAndNotCompare(SDValue Y) const {
2473 EVT VT = Y.getValueType();
2474
2475 if (VT.isVector())
2476 return false;
2477
2478 return (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb()) &&
2479 (!isa<ConstantSDNode>(Val: Y) || cast<ConstantSDNode>(Val&: Y)->isOpaque());
2480}
2481
2482bool RISCVTargetLowering::hasAndNot(SDValue Y) const {
2483 EVT VT = Y.getValueType();
2484
2485 if (!VT.isVector())
2486 return hasAndNotCompare(Y);
2487
2488 return Subtarget.hasStdExtZvkb();
2489}
2490
2491bool RISCVTargetLowering::hasBitTest(SDValue X, SDValue Y) const {
2492 // Zbs provides BEXT[_I], which can be used with SEQZ/SNEZ as a bit test.
2493 if (Subtarget.hasStdExtZbs())
2494 return X.getValueType().isScalarInteger();
2495 auto *C = dyn_cast<ConstantSDNode>(Val&: Y);
2496 // XTheadBs provides th.tst (similar to bexti), if Y is a constant
2497 if (Subtarget.hasVendorXTHeadBs())
2498 return C != nullptr;
2499 // We can use ANDI+SEQZ/SNEZ as a bit test. Y contains the bit position.
2500 return C && C->getAPIntValue().ule(RHS: 10);
2501}
2502
2503bool RISCVTargetLowering::shouldFoldSelectWithIdentityConstant(
2504 unsigned BinOpcode, EVT VT, unsigned SelectOpcode, SDValue X,
2505 SDValue Y) const {
2506 if (SelectOpcode != ISD::VSELECT)
2507 return false;
2508
2509 // Only enable for rvv.
2510 if (!VT.isVector() || !Subtarget.hasVInstructions())
2511 return false;
2512
2513 if (VT.isFixedLengthVector() && !isTypeLegal(VT))
2514 return false;
2515
2516 return true;
2517}
2518
2519bool RISCVTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
2520 Type *Ty) const {
2521 assert(Ty->isIntegerTy());
2522
2523 unsigned BitSize = Ty->getIntegerBitWidth();
2524 if (BitSize > Subtarget.getXLen())
2525 return false;
2526
2527 // Fast path, assume 32-bit immediates are cheap.
2528 int64_t Val = Imm.getSExtValue();
2529 if (isInt<32>(x: Val))
2530 return true;
2531
2532 // A constant pool entry may be more aligned than the load we're trying to
2533 // replace. If we don't support unaligned scalar mem, prefer the constant
2534 // pool.
2535 // TODO: Can the caller pass down the alignment?
2536 if (!Subtarget.enableUnalignedScalarMem())
2537 return true;
2538
2539 // Prefer to keep the load if it would require many instructions.
2540 // This uses the same threshold we use for constant pools but doesn't
2541 // check useConstantPoolForLargeInts.
2542 // TODO: Should we keep the load only when we're definitely going to emit a
2543 // constant pool?
2544
2545 RISCVMatInt::InstSeq Seq = RISCVMatInt::generateInstSeq(Val, STI: Subtarget);
2546 return Seq.size() <= Subtarget.getMaxBuildIntsCost();
2547}
2548
2549bool RISCVTargetLowering::
2550 shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
2551 SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
2552 unsigned OldShiftOpcode, unsigned NewShiftOpcode,
2553 SelectionDAG &DAG) const {
2554 // One interesting pattern that we'd want to form is 'bit extract':
2555 // ((1 >> Y) & 1) ==/!= 0
2556 // But we also need to be careful not to try to reverse that fold.
2557
2558 // Is this '((1 >> Y) & 1)'?
2559 if (XC && OldShiftOpcode == ISD::SRL && XC->isOne())
2560 return false; // Keep the 'bit extract' pattern.
2561
2562 // Will this be '((1 >> Y) & 1)' after the transform?
2563 if (NewShiftOpcode == ISD::SRL && CC->isOne())
2564 return true; // Do form the 'bit extract' pattern.
2565
2566 // If 'X' is a constant, and we transform, then we will immediately
2567 // try to undo the fold, thus causing endless combine loop.
2568 // So only do the transform if X is not a constant. This matches the default
2569 // implementation of this function.
2570 return !XC;
2571}
2572
2573bool RISCVTargetLowering::shouldScalarizeBinop(SDValue VecOp) const {
2574 unsigned Opc = VecOp.getOpcode();
2575
2576 // Assume target opcodes can't be scalarized.
2577 // TODO - do we have any exceptions?
2578 if (Opc >= ISD::BUILTIN_OP_END || !isBinOp(Opcode: Opc))
2579 return false;
2580
2581 // If the vector op is not supported, try to convert to scalar.
2582 EVT VecVT = VecOp.getValueType();
2583 if (!isOperationLegalOrCustomOrPromote(Op: Opc, VT: VecVT))
2584 return true;
2585
2586 // If the vector op is supported, but the scalar op is not, the transform may
2587 // not be worthwhile.
2588 // Permit a vector binary operation can be converted to scalar binary
2589 // operation which is custom lowered with illegal type.
2590 EVT ScalarVT = VecVT.getScalarType();
2591 return isOperationLegalOrCustomOrPromote(Op: Opc, VT: ScalarVT) ||
2592 isOperationCustom(Op: Opc, VT: ScalarVT);
2593}
2594
2595bool RISCVTargetLowering::isOffsetFoldingLegal(
2596 const GlobalAddressSDNode *GA) const {
2597 // In order to maximise the opportunity for common subexpression elimination,
2598 // keep a separate ADD node for the global address offset instead of folding
2599 // it in the global address node. Later peephole optimisations may choose to
2600 // fold it back in when profitable.
2601 return false;
2602}
2603
2604// Returns 0-31 if the fli instruction is available for the type and this is
2605// legal FP immediate for the type. Returns -1 otherwise.
2606int RISCVTargetLowering::getLegalZfaFPImm(const APFloat &Imm, EVT VT) const {
2607 if (!Subtarget.hasStdExtZfa())
2608 return -1;
2609
2610 bool IsSupportedVT = false;
2611 if (VT == MVT::f16) {
2612 IsSupportedVT = Subtarget.hasStdExtZfh() || Subtarget.hasStdExtZvfh();
2613 } else if (VT == MVT::f32) {
2614 IsSupportedVT = true;
2615 } else if (VT == MVT::f64) {
2616 assert(Subtarget.hasStdExtD() && "Expect D extension");
2617 IsSupportedVT = true;
2618 }
2619
2620 if (!IsSupportedVT)
2621 return -1;
2622
2623 return RISCVLoadFPImm::getLoadFPImm(FPImm: Imm);
2624}
2625
2626bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
2627 bool ForCodeSize) const {
2628 bool IsLegalVT = false;
2629 if (VT == MVT::f16)
2630 IsLegalVT = Subtarget.hasStdExtZfhminOrZhinxmin();
2631 else if (VT == MVT::f32)
2632 IsLegalVT = Subtarget.hasStdExtFOrZfinx();
2633 else if (VT == MVT::f64)
2634 IsLegalVT = Subtarget.hasStdExtDOrZdinx();
2635 else if (VT == MVT::bf16)
2636 IsLegalVT = Subtarget.hasStdExtZfbfmin();
2637
2638 if (!IsLegalVT)
2639 return false;
2640
2641 if (getLegalZfaFPImm(Imm, VT) >= 0)
2642 return true;
2643
2644 // Some constants can be produced by fli+fneg.
2645 if (Imm.isNegative() && getLegalZfaFPImm(Imm: -Imm, VT) >= 0)
2646 return true;
2647
2648 // Cannot create a 64 bit floating-point immediate value for rv32.
2649 if (Subtarget.getXLen() < VT.getScalarSizeInBits()) {
2650 // td can handle +0.0 or -0.0 already.
2651 // -0.0 can be created by fmv + fneg.
2652 return Imm.isZero();
2653 }
2654
2655 // Special case: fmv + fneg
2656 if (Imm.isNegZero())
2657 return true;
2658
2659 // Building an integer and then converting requires a fmv at the end of
2660 // the integer sequence. The fmv is not required for Zfinx.
2661 const int FmvCost = Subtarget.hasStdExtZfinx() ? 0 : 1;
2662 const int Cost =
2663 FmvCost + RISCVMatInt::getIntMatCost(Val: Imm.bitcastToAPInt(),
2664 Size: Subtarget.getXLen(), STI: Subtarget);
2665 return Cost <= FPImmCost;
2666}
2667
2668// TODO: This is very conservative.
2669bool RISCVTargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
2670 unsigned Index) const {
2671 if (!isOperationLegalOrCustom(Op: ISD::EXTRACT_SUBVECTOR, VT: ResVT))
2672 return false;
2673
2674 // Extracts from index 0 are just subreg extracts.
2675 if (Index == 0)
2676 return true;
2677
2678 // Only support extracting a fixed from a fixed vector for now.
2679 if (ResVT.isScalableVector() || SrcVT.isScalableVector())
2680 return false;
2681
2682 EVT EltVT = ResVT.getVectorElementType();
2683 assert(EltVT == SrcVT.getVectorElementType() && "Should hold for node");
2684
2685 // The smallest type we can slide is i8.
2686 // TODO: We can extract index 0 from a mask vector without a slide.
2687 if (EltVT == MVT::i1)
2688 return false;
2689
2690 unsigned ResElts = ResVT.getVectorNumElements();
2691 unsigned SrcElts = SrcVT.getVectorNumElements();
2692
2693 unsigned MinVLen = Subtarget.getRealMinVLen();
2694 unsigned MinVLMAX = MinVLen / EltVT.getSizeInBits();
2695
2696 // If we're extracting only data from the first VLEN bits of the source
2697 // then we can always do this with an m1 vslidedown.vx. Restricting the
2698 // Index ensures we can use a vslidedown.vi.
2699 // TODO: We can generalize this when the exact VLEN is known.
2700 if (Index + ResElts <= MinVLMAX && Index < 31)
2701 return true;
2702
2703 // Convervatively only handle extracting half of a vector.
2704 // TODO: We can do arbitrary slidedowns, but for now only support extracting
2705 // the upper half of a vector until we have more test coverage.
2706 // TODO: For sizes which aren't multiples of VLEN sizes, this may not be
2707 // a cheap extract. However, this case is important in practice for
2708 // shuffled extracts of longer vectors. How resolve?
2709 return (ResElts * 2) == SrcElts && Index == ResElts;
2710}
2711
2712MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
2713 CallingConv::ID CC,
2714 EVT VT) const {
2715 // Use f32 to pass f16 if it is legal and Zfh/Zfhmin is not enabled.
2716 // We might still end up using a GPR but that will be decided based on ABI.
2717 if (VT == MVT::f16 && Subtarget.hasStdExtFOrZfinx() &&
2718 !Subtarget.hasStdExtZfhminOrZhinxmin())
2719 return MVT::f32;
2720
2721 return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
2722}
2723
2724unsigned
2725RISCVTargetLowering::getNumRegisters(LLVMContext &Context, EVT VT,
2726 std::optional<MVT> RegisterVT) const {
2727 // Pair inline assembly operand
2728 if (VT == (Subtarget.is64Bit() ? MVT::i128 : MVT::i64) && RegisterVT &&
2729 *RegisterVT == MVT::Untyped)
2730 return 1;
2731
2732 return TargetLowering::getNumRegisters(Context, VT, RegisterVT);
2733}
2734
2735unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
2736 CallingConv::ID CC,
2737 EVT VT) const {
2738 // Use f32 to pass f16 if it is legal and Zfh/Zfhmin is not enabled.
2739 // We might still end up using a GPR but that will be decided based on ABI.
2740 if (VT == MVT::f16 && Subtarget.hasStdExtFOrZfinx() &&
2741 !Subtarget.hasStdExtZfhminOrZhinxmin())
2742 return 1;
2743
2744 return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
2745}
2746
2747// Changes the condition code and swaps operands if necessary, so the SetCC
2748// operation matches one of the comparisons supported directly by branches
2749// in the RISC-V ISA. May adjust compares to favor compare with 0 over compare
2750// with 1/-1.
2751static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
2752 ISD::CondCode &CC, SelectionDAG &DAG,
2753 const RISCVSubtarget &Subtarget) {
2754 // If this is a single bit test that can't be handled by ANDI, shift the
2755 // bit to be tested to the MSB and perform a signed compare with 0.
2756 if (isIntEqualitySetCC(Code: CC) && isNullConstant(V: RHS) &&
2757 LHS.getOpcode() == ISD::AND && LHS.hasOneUse() &&
2758 isa<ConstantSDNode>(Val: LHS.getOperand(i: 1)) &&
2759 // XAndesPerf supports branch on test bit.
2760 !Subtarget.hasVendorXAndesPerf()) {
2761 uint64_t Mask = LHS.getConstantOperandVal(i: 1);
2762 if ((isPowerOf2_64(Value: Mask) || isMask_64(Value: Mask)) && !isInt<12>(x: Mask)) {
2763 unsigned ShAmt = 0;
2764 if (isPowerOf2_64(Value: Mask)) {
2765 CC = CC == ISD::SETEQ ? ISD::SETGE : ISD::SETLT;
2766 ShAmt = LHS.getValueSizeInBits() - 1 - Log2_64(Value: Mask);
2767 } else {
2768 ShAmt = LHS.getValueSizeInBits() - llvm::bit_width(Value: Mask);
2769 }
2770
2771 LHS = LHS.getOperand(i: 0);
2772 if (ShAmt != 0)
2773 LHS = DAG.getNode(Opcode: ISD::SHL, DL, VT: LHS.getValueType(), N1: LHS,
2774 N2: DAG.getConstant(Val: ShAmt, DL, VT: LHS.getValueType()));
2775 return;
2776 }
2777 }
2778
2779 if (auto *RHSC = dyn_cast<ConstantSDNode>(Val&: RHS)) {
2780 int64_t C = RHSC->getSExtValue();
2781 switch (CC) {
2782 default: break;
2783 case ISD::SETGT:
2784 // Convert X > -1 to X >= 0.
2785 if (C == -1) {
2786 RHS = DAG.getConstant(Val: 0, DL, VT: RHS.getValueType());
2787 CC = ISD::SETGE;
2788 return;
2789 }
2790 if ((Subtarget.hasVendorXqcicm() || Subtarget.hasVendorXqcicli()) &&
2791 C != INT64_MAX && isInt<5>(x: C + 1)) {
2792 // We have a conditional move instruction for SETGE but not SETGT.
2793 // Convert X > C to X >= C + 1, if (C + 1) is a 5-bit signed immediate.
2794 RHS = DAG.getSignedConstant(Val: C + 1, DL, VT: RHS.getValueType());
2795 CC = ISD::SETGE;
2796 return;
2797 }
2798 if (Subtarget.hasVendorXqcibi() && C != INT64_MAX && isInt<16>(x: C + 1)) {
2799 // We have a branch immediate instruction for SETGE but not SETGT.
2800 // Convert X > C to X >= C + 1, if (C + 1) is a 16-bit signed immediate.
2801 RHS = DAG.getSignedConstant(Val: C + 1, DL, VT: RHS.getValueType());
2802 CC = ISD::SETGE;
2803 return;
2804 }
2805 break;
2806 case ISD::SETLT:
2807 // Convert X < 1 to 0 >= X.
2808 if (C == 1) {
2809 RHS = LHS;
2810 LHS = DAG.getConstant(Val: 0, DL, VT: RHS.getValueType());
2811 CC = ISD::SETGE;
2812 return;
2813 }
2814 break;
2815 case ISD::SETUGT:
2816 if ((Subtarget.hasVendorXqcicm() || Subtarget.hasVendorXqcicli()) &&
2817 C != INT64_MAX && isUInt<5>(x: C + 1)) {
2818 // We have a conditional move instruction for SETUGE but not SETUGT.
2819 // Convert X > C to X >= C + 1, if (C + 1) is a 5-bit signed immediate.
2820 RHS = DAG.getConstant(Val: C + 1, DL, VT: RHS.getValueType());
2821 CC = ISD::SETUGE;
2822 return;
2823 }
2824 if (Subtarget.hasVendorXqcibi() && C != INT64_MAX && isUInt<16>(x: C + 1)) {
2825 // We have a branch immediate instruction for SETUGE but not SETUGT.
2826 // Convert X > C to X >= C + 1, if (C + 1) is a 16-bit unsigned
2827 // immediate.
2828 RHS = DAG.getConstant(Val: C + 1, DL, VT: RHS.getValueType());
2829 CC = ISD::SETUGE;
2830 return;
2831 }
2832 break;
2833 }
2834 }
2835
2836 switch (CC) {
2837 default:
2838 break;
2839 case ISD::SETGT:
2840 case ISD::SETLE:
2841 case ISD::SETUGT:
2842 case ISD::SETULE:
2843 CC = ISD::getSetCCSwappedOperands(Operation: CC);
2844 std::swap(a&: LHS, b&: RHS);
2845 break;
2846 }
2847}
2848
2849RISCVVType::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
2850 if (VT.isRISCVVectorTuple()) {
2851 if (VT.SimpleTy >= MVT::riscv_nxv1i8x2 &&
2852 VT.SimpleTy <= MVT::riscv_nxv1i8x8)
2853 return RISCVVType::LMUL_F8;
2854 if (VT.SimpleTy >= MVT::riscv_nxv2i8x2 &&
2855 VT.SimpleTy <= MVT::riscv_nxv2i8x8)
2856 return RISCVVType::LMUL_F4;
2857 if (VT.SimpleTy >= MVT::riscv_nxv4i8x2 &&
2858 VT.SimpleTy <= MVT::riscv_nxv4i8x8)
2859 return RISCVVType::LMUL_F2;
2860 if (VT.SimpleTy >= MVT::riscv_nxv8i8x2 &&
2861 VT.SimpleTy <= MVT::riscv_nxv8i8x8)
2862 return RISCVVType::LMUL_1;
2863 if (VT.SimpleTy >= MVT::riscv_nxv16i8x2 &&
2864 VT.SimpleTy <= MVT::riscv_nxv16i8x4)
2865 return RISCVVType::LMUL_2;
2866 if (VT.SimpleTy == MVT::riscv_nxv32i8x2)
2867 return RISCVVType::LMUL_4;
2868 llvm_unreachable("Invalid vector tuple type LMUL.");
2869 }
2870
2871 assert(VT.isScalableVector() && "Expecting a scalable vector type");
2872 unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
2873 if (VT.getVectorElementType() == MVT::i1)
2874 KnownSize *= 8;
2875
2876 switch (KnownSize) {
2877 default:
2878 llvm_unreachable("Invalid LMUL.");
2879 case 8:
2880 return RISCVVType::LMUL_F8;
2881 case 16:
2882 return RISCVVType::LMUL_F4;
2883 case 32:
2884 return RISCVVType::LMUL_F2;
2885 case 64:
2886 return RISCVVType::LMUL_1;
2887 case 128:
2888 return RISCVVType::LMUL_2;
2889 case 256:
2890 return RISCVVType::LMUL_4;
2891 case 512:
2892 return RISCVVType::LMUL_8;
2893 }
2894}
2895
2896unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVVType::VLMUL LMul) {
2897 switch (LMul) {
2898 default:
2899 llvm_unreachable("Invalid LMUL.");
2900 case RISCVVType::LMUL_F8:
2901 case RISCVVType::LMUL_F4:
2902 case RISCVVType::LMUL_F2:
2903 case RISCVVType::LMUL_1:
2904 return RISCV::VRRegClassID;
2905 case RISCVVType::LMUL_2:
2906 return RISCV::VRM2RegClassID;
2907 case RISCVVType::LMUL_4:
2908 return RISCV::VRM4RegClassID;
2909 case RISCVVType::LMUL_8:
2910 return RISCV::VRM8RegClassID;
2911 }
2912}
2913
2914unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) {
2915 RISCVVType::VLMUL LMUL = getLMUL(VT);
2916 if (LMUL == RISCVVType::LMUL_F8 || LMUL == RISCVVType::LMUL_F4 ||
2917 LMUL == RISCVVType::LMUL_F2 || LMUL == RISCVVType::LMUL_1) {
2918 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
2919 "Unexpected subreg numbering");
2920 return RISCV::sub_vrm1_0 + Index;
2921 }
2922 if (LMUL == RISCVVType::LMUL_2) {
2923 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
2924 "Unexpected subreg numbering");
2925 return RISCV::sub_vrm2_0 + Index;
2926 }
2927 if (LMUL == RISCVVType::LMUL_4) {
2928 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
2929 "Unexpected subreg numbering");
2930 return RISCV::sub_vrm4_0 + Index;
2931 }
2932 llvm_unreachable("Invalid vector type.");
2933}
2934
2935unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) {
2936 if (VT.isRISCVVectorTuple()) {
2937 unsigned NF = VT.getRISCVVectorTupleNumFields();
2938 unsigned RegsPerField =
2939 std::max(a: 1U, b: (unsigned)VT.getSizeInBits().getKnownMinValue() /
2940 (NF * RISCV::RVVBitsPerBlock));
2941 switch (RegsPerField) {
2942 case 1:
2943 if (NF == 2)
2944 return RISCV::VRN2M1RegClassID;
2945 if (NF == 3)
2946 return RISCV::VRN3M1RegClassID;
2947 if (NF == 4)
2948 return RISCV::VRN4M1RegClassID;
2949 if (NF == 5)
2950 return RISCV::VRN5M1RegClassID;
2951 if (NF == 6)
2952 return RISCV::VRN6M1RegClassID;
2953 if (NF == 7)
2954 return RISCV::VRN7M1RegClassID;
2955 if (NF == 8)
2956 return RISCV::VRN8M1RegClassID;
2957 break;
2958 case 2:
2959 if (NF == 2)
2960 return RISCV::VRN2M2RegClassID;
2961 if (NF == 3)
2962 return RISCV::VRN3M2RegClassID;
2963 if (NF == 4)
2964 return RISCV::VRN4M2RegClassID;
2965 break;
2966 case 4:
2967 assert(NF == 2);
2968 return RISCV::VRN2M4RegClassID;
2969 default:
2970 break;
2971 }
2972 llvm_unreachable("Invalid vector tuple type RegClass.");
2973 }
2974
2975 if (VT.getVectorElementType() == MVT::i1)
2976 return RISCV::VRRegClassID;
2977 return getRegClassIDForLMUL(LMul: getLMUL(VT));
2978}
2979
2980// Attempt to decompose a subvector insert/extract between VecVT and
2981// SubVecVT via subregister indices. Returns the subregister index that
2982// can perform the subvector insert/extract with the given element index, as
2983// well as the index corresponding to any leftover subvectors that must be
2984// further inserted/extracted within the register class for SubVecVT.
2985std::pair<unsigned, unsigned>
2986RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
2987 MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx,
2988 const RISCVRegisterInfo *TRI) {
2989 static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID &&
2990 RISCV::VRM4RegClassID > RISCV::VRM2RegClassID &&
2991 RISCV::VRM2RegClassID > RISCV::VRRegClassID),
2992 "Register classes not ordered");
2993 unsigned VecRegClassID = getRegClassIDForVecVT(VT: VecVT);
2994 unsigned SubRegClassID = getRegClassIDForVecVT(VT: SubVecVT);
2995
2996 // If VecVT is a vector tuple type, either it's the tuple type with same
2997 // RegClass with SubVecVT or SubVecVT is a actually a subvector of the VecVT.
2998 if (VecVT.isRISCVVectorTuple()) {
2999 if (VecRegClassID == SubRegClassID)
3000 return {RISCV::NoSubRegister, 0};
3001
3002 assert(SubVecVT.isScalableVector() &&
3003 "Only allow scalable vector subvector.");
3004 assert(getLMUL(VecVT) == getLMUL(SubVecVT) &&
3005 "Invalid vector tuple insert/extract for vector and subvector with "
3006 "different LMUL.");
3007 return {getSubregIndexByMVT(VT: VecVT, Index: InsertExtractIdx), 0};
3008 }
3009
3010 // Try to compose a subregister index that takes us from the incoming
3011 // LMUL>1 register class down to the outgoing one. At each step we half
3012 // the LMUL:
3013 // nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0
3014 // Note that this is not guaranteed to find a subregister index, such as
3015 // when we are extracting from one VR type to another.
3016 unsigned SubRegIdx = RISCV::NoSubRegister;
3017 for (const unsigned RCID :
3018 {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID})
3019 if (VecRegClassID > RCID && SubRegClassID <= RCID) {
3020 VecVT = VecVT.getHalfNumVectorElementsVT();
3021 bool IsHi =
3022 InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue();
3023 SubRegIdx = TRI->composeSubRegIndices(a: SubRegIdx,
3024 b: getSubregIndexByMVT(VT: VecVT, Index: IsHi));
3025 if (IsHi)
3026 InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue();
3027 }
3028 return {SubRegIdx, InsertExtractIdx};
3029}
3030
3031// Permit combining of mask vectors as BUILD_VECTOR never expands to scalar
3032// stores for those types.
3033bool RISCVTargetLowering::mergeStoresAfterLegalization(EVT VT) const {
3034 return !Subtarget.useRVVForFixedLengthVectors() ||
3035 (VT.isFixedLengthVector() && VT.getVectorElementType() == MVT::i1);
3036}
3037
3038bool RISCVTargetLowering::isLegalElementTypeForRVV(EVT ScalarTy) const {
3039 if (!ScalarTy.isSimple())
3040 return false;
3041 switch (ScalarTy.getSimpleVT().SimpleTy) {
3042 case MVT::iPTR:
3043 return Subtarget.is64Bit() ? Subtarget.hasVInstructionsI64() : true;
3044 case MVT::i8:
3045 case MVT::i16:
3046 case MVT::i32:
3047 return Subtarget.hasVInstructions();
3048 case MVT::i64:
3049 return Subtarget.hasVInstructionsI64();
3050 case MVT::f16:
3051 return Subtarget.hasVInstructionsF16Minimal();
3052 case MVT::bf16:
3053 return Subtarget.hasVInstructionsBF16Minimal();
3054 case MVT::f32:
3055 return Subtarget.hasVInstructionsF32();
3056 case MVT::f64:
3057 return Subtarget.hasVInstructionsF64();
3058 default:
3059 return false;
3060 }
3061}
3062
3063
3064unsigned RISCVTargetLowering::combineRepeatedFPDivisors() const {
3065 return NumRepeatedDivisors;
3066}
3067
3068static SDValue getVLOperand(SDValue Op) {
3069 assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
3070 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
3071 "Unexpected opcode");
3072 bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
3073 unsigned IntNo = Op.getConstantOperandVal(i: HasChain ? 1 : 0);
3074 const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
3075 RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntrinsicID: IntNo);
3076 if (!II)
3077 return SDValue();
3078 return Op.getOperand(i: II->VLOperand + 1 + HasChain);
3079}
3080
3081static bool useRVVForFixedLengthVectorVT(MVT VT,
3082 const RISCVSubtarget &Subtarget) {
3083 assert(VT.isFixedLengthVector() && "Expected a fixed length vector type!");
3084 if (!Subtarget.useRVVForFixedLengthVectors())
3085 return false;
3086
3087 // We only support a set of vector types with a consistent maximum fixed size
3088 // across all supported vector element types to avoid legalization issues.
3089 // Therefore -- since the largest is v1024i8/v512i16/etc -- the largest
3090 // fixed-length vector type we support is 1024 bytes.
3091 if (VT.getVectorNumElements() > 1024 || VT.getFixedSizeInBits() > 1024 * 8)
3092 return false;
3093
3094 unsigned MinVLen = Subtarget.getRealMinVLen();
3095
3096 MVT EltVT = VT.getVectorElementType();
3097
3098 // Don't use RVV for vectors we cannot scalarize if required.
3099 switch (EltVT.SimpleTy) {
3100 // i1 is supported but has different rules.
3101 default:
3102 return false;
3103 case MVT::i1:
3104 // Masks can only use a single register.
3105 if (VT.getVectorNumElements() > MinVLen)
3106 return false;
3107 MinVLen /= 8;
3108 break;
3109 case MVT::i8:
3110 case MVT::i16:
3111 case MVT::i32:
3112 break;
3113 case MVT::i64:
3114 if (!Subtarget.hasVInstructionsI64())
3115 return false;
3116 break;
3117 case MVT::f16:
3118 if (!Subtarget.hasVInstructionsF16Minimal())
3119 return false;
3120 break;
3121 case MVT::bf16:
3122 if (!Subtarget.hasVInstructionsBF16Minimal())
3123 return false;
3124 break;
3125 case MVT::f32:
3126 if (!Subtarget.hasVInstructionsF32())
3127 return false;
3128 break;
3129 case MVT::f64:
3130 if (!Subtarget.hasVInstructionsF64())
3131 return false;
3132 break;
3133 }
3134
3135 // Reject elements larger than ELEN.
3136 if (EltVT.getSizeInBits() > Subtarget.getELen())
3137 return false;
3138
3139 unsigned LMul = divideCeil(Numerator: VT.getSizeInBits(), Denominator: MinVLen);
3140 // Don't use RVV for types that don't fit.
3141 if (LMul > Subtarget.getMaxLMULForFixedLengthVectors())
3142 return false;
3143
3144 // TODO: Perhaps an artificial restriction, but worth having whilst getting
3145 // the base fixed length RVV support in place.
3146 if (!VT.isPow2VectorType())
3147 return false;
3148
3149 return true;
3150}
3151
3152bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const {
3153 return ::useRVVForFixedLengthVectorVT(VT, Subtarget);
3154}
3155
3156// Return the largest legal scalable vector type that matches VT's element type.
3157static MVT getContainerForFixedLengthVector(const TargetLowering &TLI, MVT VT,
3158 const RISCVSubtarget &Subtarget) {
3159 // This may be called before legal types are setup.
3160 assert(((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) ||
3161 useRVVForFixedLengthVectorVT(VT, Subtarget)) &&
3162 "Expected legal fixed length vector!");
3163
3164 unsigned MinVLen = Subtarget.getRealMinVLen();
3165 unsigned MaxELen = Subtarget.getELen();
3166
3167 MVT EltVT = VT.getVectorElementType();
3168 switch (EltVT.SimpleTy) {
3169 default:
3170 llvm_unreachable("unexpected element type for RVV container");
3171 case MVT::i1:
3172 case MVT::i8:
3173 case MVT::i16:
3174 case MVT::i32:
3175 case MVT::i64:
3176 case MVT::bf16:
3177 case MVT::f16:
3178 case MVT::f32:
3179 case MVT::f64: {
3180 // We prefer to use LMUL=1 for VLEN sized types. Use fractional lmuls for
3181 // narrower types. The smallest fractional LMUL we support is 8/ELEN. Within
3182 // each fractional LMUL we support SEW between 8 and LMUL*ELEN.
3183 unsigned NumElts =
3184 (VT.getVectorNumElements() * RISCV::RVVBitsPerBlock) / MinVLen;
3185 NumElts = std::max(a: NumElts, b: RISCV::RVVBitsPerBlock / MaxELen);
3186 assert(isPowerOf2_32(NumElts) && "Expected power of 2 NumElts");
3187 return MVT::getScalableVectorVT(VT: EltVT, NumElements: NumElts);
3188 }
3189 }
3190}
3191
3192static MVT getContainerForFixedLengthVector(SelectionDAG &DAG, MVT VT,
3193 const RISCVSubtarget &Subtarget) {
3194 return getContainerForFixedLengthVector(TLI: DAG.getTargetLoweringInfo(), VT,
3195 Subtarget);
3196}
3197
3198MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const {
3199 return ::getContainerForFixedLengthVector(TLI: *this, VT, Subtarget: getSubtarget());
3200}
3201
3202// Grow V to consume an entire RVV register.
3203static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
3204 const RISCVSubtarget &Subtarget) {
3205 assert(VT.isScalableVector() &&
3206 "Expected to convert into a scalable vector!");
3207 assert(V.getValueType().isFixedLengthVector() &&
3208 "Expected a fixed length vector operand!");
3209 SDLoc DL(V);
3210 return DAG.getInsertSubvector(DL, Vec: DAG.getUNDEF(VT), SubVec: V, Idx: 0);
3211}
3212
3213// Shrink V so it's just big enough to maintain a VT's worth of data.
3214static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
3215 const RISCVSubtarget &Subtarget) {
3216 assert(VT.isFixedLengthVector() &&
3217 "Expected to convert into a fixed length vector!");
3218 assert(V.getValueType().isScalableVector() &&
3219 "Expected a scalable vector operand!");
3220 SDLoc DL(V);
3221 return DAG.getExtractSubvector(DL, VT, Vec: V, Idx: 0);
3222}
3223
3224/// Return the type of the mask type suitable for masking the provided
3225/// vector type. This is simply an i1 element type vector of the same
3226/// (possibly scalable) length.
3227static MVT getMaskTypeFor(MVT VecVT) {
3228 assert(VecVT.isVector());
3229 ElementCount EC = VecVT.getVectorElementCount();
3230 return MVT::getVectorVT(VT: MVT::i1, EC);
3231}
3232
3233/// Creates an all ones mask suitable for masking a vector of type VecTy with
3234/// vector length VL. .
3235static SDValue getAllOnesMask(MVT VecVT, SDValue VL, const SDLoc &DL,
3236 SelectionDAG &DAG) {
3237 MVT MaskVT = getMaskTypeFor(VecVT);
3238 return DAG.getNode(Opcode: RISCVISD::VMSET_VL, DL, VT: MaskVT, Operand: VL);
3239}
3240
3241static std::pair<SDValue, SDValue>
3242getDefaultScalableVLOps(MVT VecVT, const SDLoc &DL, SelectionDAG &DAG,
3243 const RISCVSubtarget &Subtarget) {
3244 assert(VecVT.isScalableVector() && "Expecting a scalable vector");
3245 SDValue VL = DAG.getRegister(Reg: RISCV::X0, VT: Subtarget.getXLenVT());
3246 SDValue Mask = getAllOnesMask(VecVT, VL, DL, DAG);
3247 return {Mask, VL};
3248}
3249
3250static std::pair<SDValue, SDValue>
3251getDefaultVLOps(uint64_t NumElts, MVT ContainerVT, const SDLoc &DL,
3252 SelectionDAG &DAG, const RISCVSubtarget &Subtarget) {
3253 assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
3254 SDValue VL = DAG.getConstant(Val: NumElts, DL, VT: Subtarget.getXLenVT());
3255 SDValue Mask = getAllOnesMask(VecVT: ContainerVT, VL, DL, DAG);
3256 return {Mask, VL};
3257}
3258
3259// Gets the two common "VL" operands: an all-ones mask and the vector length.
3260// VecVT is a vector type, either fixed-length or scalable, and ContainerVT is
3261// the vector type that the fixed-length vector is contained in. Otherwise if
3262// VecVT is scalable, then ContainerVT should be the same as VecVT.
3263static std::pair<SDValue, SDValue>
3264getDefaultVLOps(MVT VecVT, MVT ContainerVT, const SDLoc &DL, SelectionDAG &DAG,
3265 const RISCVSubtarget &Subtarget) {
3266 if (VecVT.isFixedLengthVector())
3267 return getDefaultVLOps(NumElts: VecVT.getVectorNumElements(), ContainerVT, DL, DAG,
3268 Subtarget);
3269 assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
3270 return getDefaultScalableVLOps(VecVT: ContainerVT, DL, DAG, Subtarget);
3271}
3272
3273SDValue RISCVTargetLowering::computeVLMax(MVT VecVT, const SDLoc &DL,
3274 SelectionDAG &DAG) const {
3275 assert(VecVT.isScalableVector() && "Expected scalable vector");
3276 return DAG.getElementCount(DL, VT: Subtarget.getXLenVT(),
3277 EC: VecVT.getVectorElementCount());
3278}
3279
3280std::pair<unsigned, unsigned>
3281RISCVTargetLowering::computeVLMAXBounds(MVT VecVT,
3282 const RISCVSubtarget &Subtarget) {
3283 assert(VecVT.isScalableVector() && "Expected scalable vector");
3284
3285 unsigned EltSize = VecVT.getScalarSizeInBits();
3286 unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
3287
3288 unsigned VectorBitsMax = Subtarget.getRealMaxVLen();
3289 unsigned MaxVLMAX =
3290 RISCVTargetLowering::computeVLMAX(VectorBits: VectorBitsMax, EltSize, MinSize);
3291
3292 unsigned VectorBitsMin = Subtarget.getRealMinVLen();
3293 unsigned MinVLMAX =
3294 RISCVTargetLowering::computeVLMAX(VectorBits: VectorBitsMin, EltSize, MinSize);
3295
3296 return std::make_pair(x&: MinVLMAX, y&: MaxVLMAX);
3297}
3298
3299// The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few
3300// of either is (currently) supported. This can get us into an infinite loop
3301// where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR
3302// as a ..., etc.
3303// Until either (or both) of these can reliably lower any node, reporting that
3304// we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks
3305// the infinite loop. Note that this lowers BUILD_VECTOR through the stack,
3306// which is not desirable.
3307bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles(
3308 EVT VT, unsigned DefinedValues) const {
3309 return false;
3310}
3311
3312InstructionCost RISCVTargetLowering::getLMULCost(MVT VT) const {
3313 // TODO: Here assume reciprocal throughput is 1 for LMUL_1, it is
3314 // implementation-defined.
3315 if (!VT.isVector())
3316 return InstructionCost::getInvalid();
3317 unsigned DLenFactor = Subtarget.getDLenFactor();
3318 unsigned Cost;
3319 if (VT.isScalableVector()) {
3320 unsigned LMul;
3321 bool Fractional;
3322 std::tie(args&: LMul, args&: Fractional) =
3323 RISCVVType::decodeVLMUL(VLMul: RISCVTargetLowering::getLMUL(VT));
3324 if (Fractional)
3325 Cost = LMul <= DLenFactor ? (DLenFactor / LMul) : 1;
3326 else
3327 Cost = (LMul * DLenFactor);
3328 } else {
3329 Cost = divideCeil(Numerator: VT.getSizeInBits(), Denominator: Subtarget.getRealMinVLen() / DLenFactor);
3330 }
3331 return Cost;
3332}
3333
3334
3335/// Return the cost of a vrgather.vv instruction for the type VT. vrgather.vv
3336/// may be quadratic in the number of vreg implied by LMUL, and is assumed to
3337/// be by default. VRGatherCostModel reflects available options. Note that
3338/// operand (index and possibly mask) are handled separately.
3339InstructionCost RISCVTargetLowering::getVRGatherVVCost(MVT VT) const {
3340 auto LMULCost = getLMULCost(VT);
3341 bool Log2CostModel =
3342 Subtarget.getVRGatherCostModel() == llvm::RISCVSubtarget::NLog2N;
3343 if (Log2CostModel && LMULCost.isValid()) {
3344 unsigned Log = Log2_64(Value: LMULCost.getValue());
3345 if (Log > 0)
3346 return LMULCost * Log;
3347 }
3348 return LMULCost * LMULCost;
3349}
3350
3351/// Return the cost of a vrgather.vi (or vx) instruction for the type VT.
3352/// vrgather.vi/vx may be linear in the number of vregs implied by LMUL,
3353/// or may track the vrgather.vv cost. It is implementation-dependent.
3354InstructionCost RISCVTargetLowering::getVRGatherVICost(MVT VT) const {
3355 return getLMULCost(VT);
3356}
3357
3358/// Return the cost of a vslidedown.vx or vslideup.vx instruction
3359/// for the type VT. (This does not cover the vslide1up or vslide1down
3360/// variants.) Slides may be linear in the number of vregs implied by LMUL,
3361/// or may track the vrgather.vv cost. It is implementation-dependent.
3362InstructionCost RISCVTargetLowering::getVSlideVXCost(MVT VT) const {
3363 return getLMULCost(VT);
3364}
3365
3366/// Return the cost of a vslidedown.vi or vslideup.vi instruction
3367/// for the type VT. (This does not cover the vslide1up or vslide1down
3368/// variants.) Slides may be linear in the number of vregs implied by LMUL,
3369/// or may track the vrgather.vv cost. It is implementation-dependent.
3370InstructionCost RISCVTargetLowering::getVSlideVICost(MVT VT) const {
3371 return getLMULCost(VT);
3372}
3373
3374static SDValue lowerINT_TO_FP(SDValue Op, SelectionDAG &DAG,
3375 const RISCVSubtarget &Subtarget) {
3376 // f16 conversions are promoted to f32 when Zfh/Zhinx are not supported.
3377 // bf16 conversions are always promoted to f32.
3378 if ((Op.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfhOrZhinx()) ||
3379 Op.getValueType() == MVT::bf16) {
3380 bool IsStrict = Op->isStrictFPOpcode();
3381
3382 SDLoc DL(Op);
3383 if (IsStrict) {
3384 SDValue Val = DAG.getNode(Opcode: Op.getOpcode(), DL, ResultTys: {MVT::f32, MVT::Other},
3385 Ops: {Op.getOperand(i: 0), Op.getOperand(i: 1)});
3386 return DAG.getNode(Opcode: ISD::STRICT_FP_ROUND, DL,
3387 ResultTys: {Op.getValueType(), MVT::Other},
3388 Ops: {Val.getValue(R: 1), Val.getValue(R: 0),
3389 DAG.getIntPtrConstant(Val: 0, DL, /*isTarget=*/true)});
3390 }
3391 return DAG.getNode(
3392 Opcode: ISD::FP_ROUND, DL, VT: Op.getValueType(),
3393 N1: DAG.getNode(Opcode: Op.getOpcode(), DL, VT: MVT::f32, Operand: Op.getOperand(i: 0)),
3394 N2: DAG.getIntPtrConstant(Val: 0, DL, /*isTarget=*/true));
3395 }
3396
3397 // Other operations are legal.
3398 return Op;
3399}
3400
3401static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG,
3402 const RISCVSubtarget &Subtarget) {
3403 // RISC-V FP-to-int conversions saturate to the destination register size, but
3404 // don't produce 0 for nan. We can use a conversion instruction and fix the
3405 // nan case with a compare and a select.
3406 SDValue Src = Op.getOperand(i: 0);
3407
3408 MVT DstVT = Op.getSimpleValueType();
3409 EVT SatVT = cast<VTSDNode>(Val: Op.getOperand(i: 1))->getVT();
3410
3411 bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT;
3412
3413 if (!DstVT.isVector()) {
3414 // For bf16 or for f16 in absence of Zfh, promote to f32, then saturate
3415 // the result.
3416 if ((Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfhOrZhinx()) ||
3417 Src.getValueType() == MVT::bf16) {
3418 Src = DAG.getNode(Opcode: ISD::FP_EXTEND, DL: SDLoc(Op), VT: MVT::f32, Operand: Src);
3419 }
3420
3421 unsigned Opc;
3422 if (SatVT == DstVT)
3423 Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
3424 else if (DstVT == MVT::i64 && SatVT == MVT::i32)
3425 Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
3426 else
3427 return SDValue();
3428 // FIXME: Support other SatVTs by clamping before or after the conversion.
3429
3430 SDLoc DL(Op);
3431 SDValue FpToInt = DAG.getNode(
3432 Opcode: Opc, DL, VT: DstVT, N1: Src,
3433 N2: DAG.getTargetConstant(Val: RISCVFPRndMode::RTZ, DL, VT: Subtarget.getXLenVT()));
3434
3435 if (Opc == RISCVISD::FCVT_WU_RV64)
3436 FpToInt = DAG.getZeroExtendInReg(Op: FpToInt, DL, VT: MVT::i32);
3437
3438 SDValue ZeroInt = DAG.getConstant(Val: 0, DL, VT: DstVT);
3439 return DAG.getSelectCC(DL, LHS: Src, RHS: Src, True: ZeroInt, False: FpToInt,
3440 Cond: ISD::CondCode::SETUO);
3441 }
3442
3443 // Vectors.
3444
3445 MVT DstEltVT = DstVT.getVectorElementType();
3446 MVT SrcVT = Src.getSimpleValueType();
3447 MVT SrcEltVT = SrcVT.getVectorElementType();
3448 unsigned SrcEltSize = SrcEltVT.getSizeInBits();
3449 unsigned DstEltSize = DstEltVT.getSizeInBits();
3450
3451 // Only handle saturating to the destination type.
3452 if (SatVT != DstEltVT)
3453 return SDValue();
3454
3455 MVT DstContainerVT = DstVT;
3456 MVT SrcContainerVT = SrcVT;
3457 if (DstVT.isFixedLengthVector()) {
3458 DstContainerVT = getContainerForFixedLengthVector(DAG, VT: DstVT, Subtarget);
3459 SrcContainerVT = getContainerForFixedLengthVector(DAG, VT: SrcVT, Subtarget);
3460 assert(DstContainerVT.getVectorElementCount() ==
3461 SrcContainerVT.getVectorElementCount() &&
3462 "Expected same element count");
3463 Src = convertToScalableVector(VT: SrcContainerVT, V: Src, DAG, Subtarget);
3464 }
3465
3466 SDLoc DL(Op);
3467
3468 auto [Mask, VL] = getDefaultVLOps(VecVT: DstVT, ContainerVT: DstContainerVT, DL, DAG, Subtarget);
3469
3470 SDValue IsNan = DAG.getNode(Opcode: RISCVISD::SETCC_VL, DL, VT: Mask.getValueType(),
3471 Ops: {Src, Src, DAG.getCondCode(Cond: ISD::SETNE),
3472 DAG.getUNDEF(VT: Mask.getValueType()), Mask, VL});
3473
3474 // Need to widen by more than 1 step, promote the FP type, then do a widening
3475 // convert.
3476 if (DstEltSize > (2 * SrcEltSize)) {
3477 assert(SrcContainerVT.getVectorElementType() == MVT::f16 && "Unexpected VT!");
3478 MVT InterVT = SrcContainerVT.changeVectorElementType(EltVT: MVT::f32);
3479 Src = DAG.getNode(Opcode: RISCVISD::FP_EXTEND_VL, DL, VT: InterVT, N1: Src, N2: Mask, N3: VL);
3480 }
3481
3482 MVT CvtContainerVT = DstContainerVT;
3483 MVT CvtEltVT = DstEltVT;
3484 if (SrcEltSize > (2 * DstEltSize)) {
3485 CvtEltVT = MVT::getIntegerVT(BitWidth: SrcEltVT.getSizeInBits() / 2);
3486 CvtContainerVT = CvtContainerVT.changeVectorElementType(EltVT: CvtEltVT);
3487 }
3488
3489 unsigned RVVOpc =
3490 IsSigned ? RISCVISD::VFCVT_RTZ_X_F_VL : RISCVISD::VFCVT_RTZ_XU_F_VL;
3491 SDValue Res = DAG.getNode(Opcode: RVVOpc, DL, VT: CvtContainerVT, N1: Src, N2: Mask, N3: VL);
3492
3493 while (CvtContainerVT != DstContainerVT) {
3494 CvtEltVT = MVT::getIntegerVT(BitWidth: CvtEltVT.getSizeInBits() / 2);
3495 CvtContainerVT = CvtContainerVT.changeVectorElementType(EltVT: CvtEltVT);
3496 // Rounding mode here is arbitrary since we aren't shifting out any bits.
3497 unsigned ClipOpc = IsSigned ? RISCVISD::TRUNCATE_VECTOR_VL_SSAT
3498 : RISCVISD::TRUNCATE_VECTOR_VL_USAT;
3499 Res = DAG.getNode(Opcode: ClipOpc, DL, VT: CvtContainerVT, N1: Res, N2: Mask, N3: VL);
3500 }
3501
3502 SDValue SplatZero = DAG.getNode(
3503 Opcode: RISCVISD::VMV_V_X_VL, DL, VT: DstContainerVT, N1: DAG.getUNDEF(VT: DstContainerVT),
3504 N2: DAG.getConstant(Val: 0, DL, VT: Subtarget.getXLenVT()), N3: VL);
3505 Res = DAG.getNode(Opcode: RISCVISD::VMERGE_VL, DL, VT: DstContainerVT, N1: IsNan, N2: SplatZero,
3506 N3: Res, N4: DAG.getUNDEF(VT: DstContainerVT), N5: VL);
3507
3508 if (DstVT.isFixedLengthVector())
3509 Res = convertFromScalableVector(VT: DstVT, V: Res, DAG, Subtarget);
3510
3511 return Res;
3512}
3513
3514static SDValue lowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
3515 const RISCVSubtarget &Subtarget) {
3516 bool IsStrict = Op->isStrictFPOpcode();
3517 SDValue SrcVal = Op.getOperand(i: IsStrict ? 1 : 0);
3518
3519 // f16 conversions are promoted to f32 when Zfh/Zhinx is not enabled.
3520 // bf16 conversions are always promoted to f32.
3521 if ((SrcVal.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfhOrZhinx()) ||
3522 SrcVal.getValueType() == MVT::bf16) {
3523 SDLoc DL(Op);
3524 if (IsStrict) {
3525 SDValue Ext =
3526 DAG.getNode(Opcode: ISD::STRICT_FP_EXTEND, DL, ResultTys: {MVT::f32, MVT::Other},
3527 Ops: {Op.getOperand(i: 0), SrcVal});
3528 return DAG.getNode(Opcode: Op.getOpcode(), DL, ResultTys: {Op.getValueType(), MVT::Other},
3529 Ops: {Ext.getValue(R: 1), Ext.getValue(R: 0)});
3530 }
3531 return DAG.getNode(Opcode: Op.getOpcode(), DL, VT: Op.getValueType(),
3532 Operand: DAG.getNode(Opcode: ISD::FP_EXTEND, DL, VT: MVT::f32, Operand: SrcVal));
3533 }
3534
3535 // Other operations are legal.
3536 return Op;
3537}
3538
3539static RISCVFPRndMode::RoundingMode matchRoundingOp(unsigned Opc) {
3540 switch (Opc) {
3541 case ISD::FROUNDEVEN:
3542 case ISD::STRICT_FROUNDEVEN:
3543 case ISD::VP_FROUNDEVEN:
3544 return RISCVFPRndMode::RNE;
3545 case ISD::FTRUNC:
3546 case ISD::STRICT_FTRUNC:
3547 case ISD::VP_FROUNDTOZERO:
3548 return RISCVFPRndMode::RTZ;
3549 case ISD::FFLOOR:
3550 case ISD::STRICT_FFLOOR:
3551 case ISD::VP_FFLOOR:
3552 return RISCVFPRndMode::RDN;
3553 case ISD::FCEIL:
3554 case ISD::STRICT_FCEIL:
3555 case ISD::VP_FCEIL:
3556 return RISCVFPRndMode::RUP;
3557 case ISD::FROUND:
3558 case ISD::LROUND:
3559 case ISD::LLROUND:
3560 case ISD::STRICT_FROUND:
3561 case ISD::STRICT_LROUND:
3562 case ISD::STRICT_LLROUND:
3563 case ISD::VP_FROUND:
3564 return RISCVFPRndMode::RMM;
3565 case ISD::FRINT:
3566 case ISD::LRINT:
3567 case ISD::LLRINT:
3568 case ISD::STRICT_FRINT:
3569 case ISD::STRICT_LRINT:
3570 case ISD::STRICT_LLRINT:
3571 case ISD::VP_FRINT:
3572 case ISD::VP_LRINT:
3573 case ISD::VP_LLRINT:
3574 return RISCVFPRndMode::DYN;
3575 }
3576
3577 return RISCVFPRndMode::Invalid;
3578}
3579
3580// Expand vector FTRUNC, FCEIL, FFLOOR, FROUND, VP_FCEIL, VP_FFLOOR, VP_FROUND
3581// VP_FROUNDEVEN, VP_FROUNDTOZERO, VP_FRINT and VP_FNEARBYINT by converting to
3582// the integer domain and back. Taking care to avoid converting values that are
3583// nan or already correct.
3584static SDValue
3585lowerVectorFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG,
3586 const RISCVSubtarget &Subtarget) {
3587 MVT VT = Op.getSimpleValueType();
3588 assert(VT.isVector() && "Unexpected type");
3589
3590 SDLoc DL(Op);
3591
3592 SDValue Src = Op.getOperand(i: 0);
3593
3594 // Freeze the source since we are increasing the number of uses.
3595 Src = DAG.getFreeze(V: Src);
3596
3597 MVT ContainerVT = VT;
3598 if (VT.isFixedLengthVector()) {
3599 ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
3600 Src = convertToScalableVector(VT: ContainerVT, V: Src, DAG, Subtarget);
3601 }
3602
3603 SDValue Mask, VL;
3604 if (Op->isVPOpcode()) {
3605 Mask = Op.getOperand(i: 1);
3606 if (VT.isFixedLengthVector())
3607 Mask = convertToScalableVector(VT: getMaskTypeFor(VecVT: ContainerVT), V: Mask, DAG,
3608 Subtarget);
3609 VL = Op.getOperand(i: 2);
3610 } else {
3611 std::tie(args&: Mask, args&: VL) = getDefaultVLOps(VecVT: VT, ContainerVT, DL, DAG, Subtarget);
3612 }
3613
3614 // We do the conversion on the absolute value and fix the sign at the end.
3615 SDValue Abs = DAG.getNode(Opcode: RISCVISD::FABS_VL, DL, VT: ContainerVT, N1: Src, N2: Mask, N3: VL);
3616
3617 // Determine the largest integer that can be represented exactly. This and
3618 // values larger than it don't have any fractional bits so don't need to
3619 // be converted.
3620 const fltSemantics &FltSem = ContainerVT.getFltSemantics();
3621 unsigned Precision = APFloat::semanticsPrecision(FltSem);
3622 APFloat MaxVal = APFloat(FltSem);
3623 MaxVal.convertFromAPInt(Input: APInt::getOneBitSet(numBits: Precision, BitNo: Precision - 1),
3624 /*IsSigned*/ false, RM: APFloat::rmNearestTiesToEven);
3625 SDValue MaxValNode =
3626 DAG.getConstantFP(Val: MaxVal, DL, VT: ContainerVT.getVectorElementType());
3627 SDValue MaxValSplat = DAG.getNode(Opcode: RISCVISD::VFMV_V_F_VL, DL, VT: ContainerVT,
3628 N1: DAG.getUNDEF(VT: ContainerVT), N2: MaxValNode, N3: VL);
3629
3630 // If abs(Src) was larger than MaxVal or nan, keep it.
3631 MVT SetccVT = MVT::getVectorVT(VT: MVT::i1, EC: ContainerVT.getVectorElementCount());
3632 Mask =
3633 DAG.getNode(Opcode: RISCVISD::SETCC_VL, DL, VT: SetccVT,
3634 Ops: {Abs, MaxValSplat, DAG.getCondCode(Cond: ISD::SETOLT),
3635 Mask, Mask, VL});
3636
3637 // Truncate to integer and convert back to FP.
3638 MVT IntVT = ContainerVT.changeVectorElementTypeToInteger();
3639 MVT XLenVT = Subtarget.getXLenVT();
3640 SDValue Truncated;
3641
3642 switch (Op.getOpcode()) {
3643 default:
3644 llvm_unreachable("Unexpected opcode");
3645 case ISD::FRINT:
3646 case ISD::VP_FRINT:
3647 case ISD::FCEIL:
3648 case ISD::VP_FCEIL:
3649 case ISD::FFLOOR:
3650 case ISD::VP_FFLOOR:
3651 case ISD::FROUND:
3652 case ISD::FROUNDEVEN:
3653 case ISD::VP_FROUND:
3654 case ISD::VP_FROUNDEVEN:
3655 case ISD::VP_FROUNDTOZERO: {
3656 RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Opc: Op.getOpcode());
3657 assert(FRM != RISCVFPRndMode::Invalid);
3658 Truncated = DAG.getNode(Opcode: RISCVISD::VFCVT_RM_X_F_VL, DL, VT: IntVT, N1: Src, N2: Mask,
3659 N3: DAG.getTargetConstant(Val: FRM, DL, VT: XLenVT), N4: VL);
3660 break;
3661 }
3662 case ISD::FTRUNC:
3663 Truncated = DAG.getNode(Opcode: RISCVISD::VFCVT_RTZ_X_F_VL, DL, VT: IntVT, N1: Src,
3664 N2: Mask, N3: VL);
3665 break;
3666 case ISD::FNEARBYINT:
3667 case ISD::VP_FNEARBYINT:
3668 Truncated = DAG.getNode(Opcode: RISCVISD::VFROUND_NOEXCEPT_VL, DL, VT: ContainerVT, N1: Src,
3669 N2: Mask, N3: VL);
3670 break;
3671 }
3672
3673 // VFROUND_NOEXCEPT_VL includes SINT_TO_FP_VL.
3674 if (Truncated.getOpcode() != RISCVISD::VFROUND_NOEXCEPT_VL)
3675 Truncated = DAG.getNode(Opcode: RISCVISD::SINT_TO_FP_VL, DL, VT: ContainerVT, N1: Truncated,
3676 N2: Mask, N3: VL);
3677
3678 // Restore the original sign so that -0.0 is preserved.
3679 Truncated = DAG.getNode(Opcode: RISCVISD::FCOPYSIGN_VL, DL, VT: ContainerVT, N1: Truncated,
3680 N2: Src, N3: Src, N4: Mask, N5: VL);
3681
3682 if (!VT.isFixedLengthVector())
3683 return Truncated;
3684
3685 return convertFromScalableVector(VT, V: Truncated, DAG, Subtarget);
3686}
3687
3688// Expand vector STRICT_FTRUNC, STRICT_FCEIL, STRICT_FFLOOR, STRICT_FROUND
3689// STRICT_FROUNDEVEN and STRICT_FNEARBYINT by converting sNan of the source to
3690// qNan and converting the new source to integer and back to FP.
3691static SDValue
3692lowerVectorStrictFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG,
3693 const RISCVSubtarget &Subtarget) {
3694 SDLoc DL(Op);
3695 MVT VT = Op.getSimpleValueType();
3696 SDValue Chain = Op.getOperand(i: 0);
3697 SDValue Src = Op.getOperand(i: 1);
3698
3699 MVT ContainerVT = VT;
3700 if (VT.isFixedLengthVector()) {
3701 ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
3702 Src = convertToScalableVector(VT: ContainerVT, V: Src, DAG, Subtarget);
3703 }
3704
3705 auto [Mask, VL] = getDefaultVLOps(VecVT: VT, ContainerVT, DL, DAG, Subtarget);
3706
3707 // Freeze the source since we are increasing the number of uses.
3708 Src = DAG.getFreeze(V: Src);
3709
3710 // Convert sNan to qNan by executing x + x for all unordered element x in Src.
3711 MVT MaskVT = Mask.getSimpleValueType();
3712 SDValue Unorder = DAG.getNode(Opcode: RISCVISD::STRICT_FSETCC_VL, DL,
3713 VTList: DAG.getVTList(VT1: MaskVT, VT2: MVT::Other),
3714 Ops: {Chain, Src, Src, DAG.getCondCode(Cond: ISD::SETUNE),
3715 DAG.getUNDEF(VT: MaskVT), Mask, VL});
3716 Chain = Unorder.getValue(R: 1);
3717 Src = DAG.getNode(Opcode: RISCVISD::STRICT_FADD_VL, DL,
3718 VTList: DAG.getVTList(VT1: ContainerVT, VT2: MVT::Other),
3719 Ops: {Chain, Src, Src, Src, Unorder, VL});
3720 Chain = Src.getValue(R: 1);
3721
3722 // We do the conversion on the absolute value and fix the sign at the end.
3723 SDValue Abs = DAG.getNode(Opcode: RISCVISD::FABS_VL, DL, VT: ContainerVT, N1: Src, N2: Mask, N3: VL);
3724
3725 // Determine the largest integer that can be represented exactly. This and
3726 // values larger than it don't have any fractional bits so don't need to
3727 // be converted.
3728 const fltSemantics &FltSem = ContainerVT.getFltSemantics();
3729 unsigned Precision = APFloat::semanticsPrecision(FltSem);
3730 APFloat MaxVal = APFloat(FltSem);
3731 MaxVal.convertFromAPInt(Input: APInt::getOneBitSet(numBits: Precision, BitNo: Precision - 1),
3732 /*IsSigned*/ false, RM: APFloat::rmNearestTiesToEven);
3733 SDValue MaxValNode =
3734 DAG.getConstantFP(Val: MaxVal, DL, VT: ContainerVT.getVectorElementType());
3735 SDValue MaxValSplat = DAG.getNode(Opcode: RISCVISD::VFMV_V_F_VL, DL, VT: ContainerVT,
3736 N1: DAG.getUNDEF(VT: ContainerVT), N2: MaxValNode, N3: VL);
3737
3738 // If abs(Src) was larger than MaxVal or nan, keep it.
3739 Mask = DAG.getNode(
3740 Opcode: RISCVISD::SETCC_VL, DL, VT: MaskVT,
3741 Ops: {Abs, MaxValSplat, DAG.getCondCode(Cond: ISD::SETOLT), Mask, Mask, VL});
3742
3743 // Truncate to integer and convert back to FP.
3744 MVT IntVT = ContainerVT.changeVectorElementTypeToInteger();
3745 MVT XLenVT = Subtarget.getXLenVT();
3746 SDValue Truncated;
3747
3748 switch (Op.getOpcode()) {
3749 default:
3750 llvm_unreachable("Unexpected opcode");
3751 case ISD::STRICT_FCEIL:
3752 case ISD::STRICT_FFLOOR:
3753 case ISD::STRICT_FROUND:
3754 case ISD::STRICT_FROUNDEVEN: {
3755 RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Opc: Op.getOpcode());
3756 assert(FRM != RISCVFPRndMode::Invalid);
3757 Truncated = DAG.getNode(
3758 Opcode: RISCVISD::STRICT_VFCVT_RM_X_F_VL, DL, VTList: DAG.getVTList(VT1: IntVT, VT2: MVT::Other),
3759 Ops: {Chain, Src, Mask, DAG.getTargetConstant(Val: FRM, DL, VT: XLenVT), VL});
3760 break;
3761 }
3762 case ISD::STRICT_FTRUNC:
3763 Truncated =
3764 DAG.getNode(Opcode: RISCVISD::STRICT_VFCVT_RTZ_X_F_VL, DL,
3765 VTList: DAG.getVTList(VT1: IntVT, VT2: MVT::Other), N1: Chain, N2: Src, N3: Mask, N4: VL);
3766 break;
3767 case ISD::STRICT_FNEARBYINT:
3768 Truncated = DAG.getNode(Opcode: RISCVISD::STRICT_VFROUND_NOEXCEPT_VL, DL,
3769 VTList: DAG.getVTList(VT1: ContainerVT, VT2: MVT::Other), N1: Chain, N2: Src,
3770 N3: Mask, N4: VL);
3771 break;
3772 }
3773 Chain = Truncated.getValue(R: 1);
3774
3775 // VFROUND_NOEXCEPT_VL includes SINT_TO_FP_VL.
3776 if (Op.getOpcode() != ISD::STRICT_FNEARBYINT) {
3777 Truncated = DAG.getNode(Opcode: RISCVISD::STRICT_SINT_TO_FP_VL, DL,
3778 VTList: DAG.getVTList(VT1: ContainerVT, VT2: MVT::Other), N1: Chain,
3779 N2: Truncated, N3: Mask, N4: VL);
3780 Chain = Truncated.getValue(R: 1);
3781 }
3782
3783 // Restore the original sign so that -0.0 is preserved.
3784 Truncated = DAG.getNode(Opcode: RISCVISD::FCOPYSIGN_VL, DL, VT: ContainerVT, N1: Truncated,
3785 N2: Src, N3: Src, N4: Mask, N5: VL);
3786
3787 if (VT.isFixedLengthVector())
3788 Truncated = convertFromScalableVector(VT, V: Truncated, DAG, Subtarget);
3789 return DAG.getMergeValues(Ops: {Truncated, Chain}, dl: DL);
3790}
3791
3792static SDValue
3793lowerFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG,
3794 const RISCVSubtarget &Subtarget) {
3795 MVT VT = Op.getSimpleValueType();
3796 if (VT.isVector())
3797 return lowerVectorFTRUNC_FCEIL_FFLOOR_FROUND(Op, DAG, Subtarget);
3798
3799 if (DAG.shouldOptForSize())
3800 return SDValue();
3801
3802 SDLoc DL(Op);
3803 SDValue Src = Op.getOperand(i: 0);
3804
3805 // Create an integer the size of the mantissa with the MSB set. This and all
3806 // values larger than it don't have any fractional bits so don't need to be
3807 // converted.
3808 const fltSemantics &FltSem = VT.getFltSemantics();
3809 unsigned Precision = APFloat::semanticsPrecision(FltSem);
3810 APFloat MaxVal = APFloat(FltSem);
3811 MaxVal.convertFromAPInt(Input: APInt::getOneBitSet(numBits: Precision, BitNo: Precision - 1),
3812 /*IsSigned*/ false, RM: APFloat::rmNearestTiesToEven);
3813 SDValue MaxValNode = DAG.getConstantFP(Val: MaxVal, DL, VT);
3814
3815 RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Opc: Op.getOpcode());
3816 return DAG.getNode(Opcode: RISCVISD::FROUND, DL, VT, N1: Src, N2: MaxValNode,
3817 N3: DAG.getTargetConstant(Val: FRM, DL, VT: Subtarget.getXLenVT()));
3818}
3819
3820// Expand vector [L]LRINT and [L]LROUND by converting to the integer domain.
3821static SDValue lowerVectorXRINT_XROUND(SDValue Op, SelectionDAG &DAG,
3822 const RISCVSubtarget &Subtarget) {
3823 SDLoc DL(Op);
3824 MVT DstVT = Op.getSimpleValueType();
3825 SDValue Src = Op.getOperand(i: 0);
3826 MVT SrcVT = Src.getSimpleValueType();
3827 assert(SrcVT.isVector() && DstVT.isVector() &&
3828 !(SrcVT.isFixedLengthVector() ^ DstVT.isFixedLengthVector()) &&
3829 "Unexpected type");
3830
3831 MVT DstContainerVT = DstVT;
3832 MVT SrcContainerVT = SrcVT;
3833
3834 if (DstVT.isFixedLengthVector()) {
3835 DstContainerVT = getContainerForFixedLengthVector(DAG, VT: DstVT, Subtarget);
3836 SrcContainerVT = getContainerForFixedLengthVector(DAG, VT: SrcVT, Subtarget);
3837 Src = convertToScalableVector(VT: SrcContainerVT, V: Src, DAG, Subtarget);
3838 }
3839
3840 auto [Mask, VL] = getDefaultVLOps(VecVT: SrcVT, ContainerVT: SrcContainerVT, DL, DAG, Subtarget);
3841
3842 // [b]f16 -> f32
3843 MVT SrcElemType = SrcVT.getVectorElementType();
3844 if (SrcElemType == MVT::f16 || SrcElemType == MVT::bf16) {
3845 MVT F32VT = SrcContainerVT.changeVectorElementType(EltVT: MVT::f32);
3846 Src = DAG.getNode(Opcode: RISCVISD::FP_EXTEND_VL, DL, VT: F32VT, N1: Src, N2: Mask, N3: VL);
3847 }
3848
3849 SDValue Res =
3850 DAG.getNode(Opcode: RISCVISD::VFCVT_RM_X_F_VL, DL, VT: DstContainerVT, N1: Src, N2: Mask,
3851 N3: DAG.getTargetConstant(Val: matchRoundingOp(Opc: Op.getOpcode()), DL,
3852 VT: Subtarget.getXLenVT()),
3853 N4: VL);
3854
3855 if (!DstVT.isFixedLengthVector())
3856 return Res;
3857
3858 return convertFromScalableVector(VT: DstVT, V: Res, DAG, Subtarget);
3859}
3860
3861static SDValue
3862getVSlidedown(SelectionDAG &DAG, const RISCVSubtarget &Subtarget,
3863 const SDLoc &DL, EVT VT, SDValue Passthru, SDValue Op,
3864 SDValue Offset, SDValue Mask, SDValue VL,
3865 unsigned Policy = RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED) {
3866 if (Passthru.isUndef())
3867 Policy = RISCVVType::TAIL_AGNOSTIC | RISCVVType::MASK_AGNOSTIC;
3868 SDValue PolicyOp = DAG.getTargetConstant(Val: Policy, DL, VT: Subtarget.getXLenVT());
3869 SDValue Ops[] = {Passthru, Op, Offset, Mask, VL, PolicyOp};
3870 return DAG.getNode(Opcode: RISCVISD::VSLIDEDOWN_VL, DL, VT, Ops);
3871}
3872
3873static SDValue
3874getVSlideup(SelectionDAG &DAG, const RISCVSubtarget &Subtarget, const SDLoc &DL,
3875 EVT VT, SDValue Passthru, SDValue Op, SDValue Offset, SDValue Mask,
3876 SDValue VL,
3877 unsigned Policy = RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED) {
3878 if (Passthru.isUndef())
3879 Policy = RISCVVType::TAIL_AGNOSTIC | RISCVVType::MASK_AGNOSTIC;
3880 SDValue PolicyOp = DAG.getTargetConstant(Val: Policy, DL, VT: Subtarget.getXLenVT());
3881 SDValue Ops[] = {Passthru, Op, Offset, Mask, VL, PolicyOp};
3882 return DAG.getNode(Opcode: RISCVISD::VSLIDEUP_VL, DL, VT, Ops);
3883}
3884
3885struct VIDSequence {
3886 int64_t StepNumerator;
3887 unsigned StepDenominator;
3888 int64_t Addend;
3889};
3890
3891static std::optional<APInt> getExactInteger(const APFloat &APF,
3892 uint32_t BitWidth) {
3893 // We will use a SINT_TO_FP to materialize this constant so we should use a
3894 // signed APSInt here.
3895 APSInt ValInt(BitWidth, /*IsUnsigned*/ false);
3896 // We use an arbitrary rounding mode here. If a floating-point is an exact
3897 // integer (e.g., 1.0), the rounding mode does not affect the output value. If
3898 // the rounding mode changes the output value, then it is not an exact
3899 // integer.
3900 RoundingMode ArbitraryRM = RoundingMode::TowardZero;
3901 bool IsExact;
3902 // If it is out of signed integer range, it will return an invalid operation.
3903 // If it is not an exact integer, IsExact is false.
3904 if ((APF.convertToInteger(Result&: ValInt, RM: ArbitraryRM, IsExact: &IsExact) ==
3905 APFloatBase::opInvalidOp) ||
3906 !IsExact)
3907 return std::nullopt;
3908 return ValInt.extractBits(numBits: BitWidth, bitPosition: 0);
3909}
3910
3911// Try to match an arithmetic-sequence BUILD_VECTOR [X,X+S,X+2*S,...,X+(N-1)*S]
3912// to the (non-zero) step S and start value X. This can be then lowered as the
3913// RVV sequence (VID * S) + X, for example.
3914// The step S is represented as an integer numerator divided by a positive
3915// denominator. Note that the implementation currently only identifies
3916// sequences in which either the numerator is +/- 1 or the denominator is 1. It
3917// cannot detect 2/3, for example.
3918// Note that this method will also match potentially unappealing index
3919// sequences, like <i32 0, i32 50939494>, however it is left to the caller to
3920// determine whether this is worth generating code for.
3921//
3922// EltSizeInBits is the size of the type that the sequence will be calculated
3923// in, i.e. SEW for build_vectors or XLEN for address calculations.
3924static std::optional<VIDSequence> isSimpleVIDSequence(SDValue Op,
3925 unsigned EltSizeInBits) {
3926 assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unexpected BUILD_VECTOR");
3927 if (!cast<BuildVectorSDNode>(Val&: Op)->isConstant())
3928 return std::nullopt;
3929 bool IsInteger = Op.getValueType().isInteger();
3930
3931 std::optional<unsigned> SeqStepDenom;
3932 std::optional<APInt> SeqStepNum;
3933 std::optional<APInt> SeqAddend;
3934 std::optional<std::pair<APInt, unsigned>> PrevElt;
3935 assert(EltSizeInBits >= Op.getValueType().getScalarSizeInBits());
3936
3937 // First extract the ops into a list of constant integer values. This may not
3938 // be possible for floats if they're not all representable as integers.
3939 SmallVector<std::optional<APInt>> Elts(Op.getNumOperands());
3940 const unsigned OpSize = Op.getScalarValueSizeInBits();
3941 for (auto [Idx, Elt] : enumerate(First: Op->op_values())) {
3942 if (Elt.isUndef()) {
3943 Elts[Idx] = std::nullopt;
3944 continue;
3945 }
3946 if (IsInteger) {
3947 Elts[Idx] = Elt->getAsAPIntVal().trunc(width: OpSize).zext(width: EltSizeInBits);
3948 } else {
3949 auto ExactInteger =
3950 getExactInteger(APF: cast<ConstantFPSDNode>(Val: Elt)->getValueAPF(), BitWidth: OpSize);
3951 if (!ExactInteger)
3952 return std::nullopt;
3953 Elts[Idx] = *ExactInteger;
3954 }
3955 }
3956
3957 for (auto [Idx, Elt] : enumerate(First&: Elts)) {
3958 // Assume undef elements match the sequence; we just have to be careful
3959 // when interpolating across them.
3960 if (!Elt)
3961 continue;
3962
3963 if (PrevElt) {
3964 // Calculate the step since the last non-undef element, and ensure
3965 // it's consistent across the entire sequence.
3966 unsigned IdxDiff = Idx - PrevElt->second;
3967 APInt ValDiff = *Elt - PrevElt->first;
3968
3969 // A zero-value value difference means that we're somewhere in the middle
3970 // of a fractional step, e.g. <0,0,0*,0,1,1,1,1>. Wait until we notice a
3971 // step change before evaluating the sequence.
3972 if (ValDiff == 0)
3973 continue;
3974
3975 int64_t Remainder = ValDiff.srem(RHS: IdxDiff);
3976 // Normalize the step if it's greater than 1.
3977 if (Remainder != ValDiff.getSExtValue()) {
3978 // The difference must cleanly divide the element span.
3979 if (Remainder != 0)
3980 return std::nullopt;
3981 ValDiff = ValDiff.sdiv(RHS: IdxDiff);
3982 IdxDiff = 1;
3983 }
3984
3985 if (!SeqStepNum)
3986 SeqStepNum = ValDiff;
3987 else if (ValDiff != SeqStepNum)
3988 return std::nullopt;
3989
3990 if (!SeqStepDenom)
3991 SeqStepDenom = IdxDiff;
3992 else if (IdxDiff != *SeqStepDenom)
3993 return std::nullopt;
3994 }
3995
3996 // Record this non-undef element for later.
3997 if (!PrevElt || PrevElt->first != *Elt)
3998 PrevElt = std::make_pair(x&: *Elt, y&: Idx);
3999 }
4000
4001 // We need to have logged a step for this to count as a legal index sequence.
4002 if (!SeqStepNum || !SeqStepDenom)
4003 return std::nullopt;
4004
4005 // Loop back through the sequence and validate elements we might have skipped
4006 // while waiting for a valid step. While doing this, log any sequence addend.
4007 for (auto [Idx, Elt] : enumerate(First&: Elts)) {
4008 if (!Elt)
4009 continue;
4010 APInt ExpectedVal =
4011 (APInt(EltSizeInBits, Idx, /*isSigned=*/false, /*implicitTrunc=*/true) *
4012 *SeqStepNum)
4013 .sdiv(RHS: *SeqStepDenom);
4014
4015 APInt Addend = *Elt - ExpectedVal;
4016 if (!SeqAddend)
4017 SeqAddend = Addend;
4018 else if (Addend != SeqAddend)
4019 return std::nullopt;
4020 }
4021
4022 assert(SeqAddend && "Must have an addend if we have a step");
4023
4024 return VIDSequence{.StepNumerator: SeqStepNum->getSExtValue(), .StepDenominator: *SeqStepDenom,
4025 .Addend: SeqAddend->getSExtValue()};
4026}
4027
4028// Match a splatted value (SPLAT_VECTOR/BUILD_VECTOR) of an EXTRACT_VECTOR_ELT
4029// and lower it as a VRGATHER_VX_VL from the source vector.
4030static SDValue matchSplatAsGather(SDValue SplatVal, MVT VT, const SDLoc &DL,
4031 SelectionDAG &DAG,
4032 const RISCVSubtarget &Subtarget) {
4033 if (SplatVal.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
4034 return SDValue();
4035 SDValue Src = SplatVal.getOperand(i: 0);
4036 // Don't perform this optimization for i1 vectors, or if the element types are
4037 // different
4038 // FIXME: Support i1 vectors, maybe by promoting to i8?
4039 MVT EltTy = VT.getVectorElementType();
4040 if (EltTy == MVT::i1 ||
4041 !DAG.getTargetLoweringInfo().isTypeLegal(VT: Src.getValueType()))
4042 return SDValue();
4043 MVT SrcVT = Src.getSimpleValueType();
4044 if (EltTy != SrcVT.getVectorElementType())
4045 return SDValue();
4046 SDValue Idx = SplatVal.getOperand(i: 1);
4047 // The index must be a legal type.
4048 if (Idx.getValueType() != Subtarget.getXLenVT())
4049 return SDValue();
4050
4051 // Check that we know Idx lies within VT
4052 if (!TypeSize::isKnownLE(LHS: SrcVT.getSizeInBits(), RHS: VT.getSizeInBits())) {
4053 auto *CIdx = dyn_cast<ConstantSDNode>(Val&: Idx);
4054 if (!CIdx || CIdx->getZExtValue() >= VT.getVectorMinNumElements())
4055 return SDValue();
4056 }
4057
4058 // Convert fixed length vectors to scalable
4059 MVT ContainerVT = VT;
4060 if (VT.isFixedLengthVector())
4061 ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
4062
4063 MVT SrcContainerVT = SrcVT;
4064 if (SrcVT.isFixedLengthVector()) {
4065 SrcContainerVT = getContainerForFixedLengthVector(DAG, VT: SrcVT, Subtarget);
4066 Src = convertToScalableVector(VT: SrcContainerVT, V: Src, DAG, Subtarget);
4067 }
4068
4069 // Put Vec in a VT sized vector
4070 if (SrcContainerVT.getVectorMinNumElements() <
4071 ContainerVT.getVectorMinNumElements())
4072 Src = DAG.getInsertSubvector(DL, Vec: DAG.getUNDEF(VT: ContainerVT), SubVec: Src, Idx: 0);
4073 else
4074 Src = DAG.getExtractSubvector(DL, VT: ContainerVT, Vec: Src, Idx: 0);
4075
4076 // We checked that Idx fits inside VT earlier
4077 auto [Mask, VL] = getDefaultVLOps(VecVT: VT, ContainerVT, DL, DAG, Subtarget);
4078 SDValue Gather = DAG.getNode(Opcode: RISCVISD::VRGATHER_VX_VL, DL, VT: ContainerVT, N1: Src,
4079 N2: Idx, N3: DAG.getUNDEF(VT: ContainerVT), N4: Mask, N5: VL);
4080 if (VT.isFixedLengthVector())
4081 Gather = convertFromScalableVector(VT, V: Gather, DAG, Subtarget);
4082 return Gather;
4083}
4084
4085static SDValue lowerBuildVectorViaVID(SDValue Op, SelectionDAG &DAG,
4086 const RISCVSubtarget &Subtarget) {
4087 MVT VT = Op.getSimpleValueType();
4088 assert(VT.isFixedLengthVector() && "Unexpected vector!");
4089
4090 MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
4091
4092 SDLoc DL(Op);
4093 auto [Mask, VL] = getDefaultVLOps(VecVT: VT, ContainerVT, DL, DAG, Subtarget);
4094
4095 if (auto SimpleVID = isSimpleVIDSequence(Op, EltSizeInBits: Op.getScalarValueSizeInBits())) {
4096 int64_t StepNumerator = SimpleVID->StepNumerator;
4097 unsigned StepDenominator = SimpleVID->StepDenominator;
4098 int64_t Addend = SimpleVID->Addend;
4099
4100 assert(StepNumerator != 0 && "Invalid step");
4101 bool Negate = false;
4102 int64_t SplatStepVal = StepNumerator;
4103 unsigned StepOpcode = ISD::MUL;
4104 // Exclude INT64_MIN to avoid passing it to std::abs. We won't optimize it
4105 // anyway as the shift of 63 won't fit in uimm5.
4106 if (StepNumerator != 1 && StepNumerator != INT64_MIN &&
4107 isPowerOf2_64(Value: std::abs(i: StepNumerator))) {
4108 Negate = StepNumerator < 0;
4109 StepOpcode = ISD::SHL;
4110 SplatStepVal = Log2_64(Value: std::abs(i: StepNumerator));
4111 }
4112
4113 // Only emit VIDs with suitably-small steps. We use imm5 as a threshold
4114 // since it's the immediate value many RVV instructions accept. There is
4115 // no vmul.vi instruction so ensure multiply constant can fit in a
4116 // single addi instruction. For the addend, we allow up to 32 bits..
4117 if (((StepOpcode == ISD::MUL && isInt<12>(x: SplatStepVal)) ||
4118 (StepOpcode == ISD::SHL && isUInt<5>(x: SplatStepVal))) &&
4119 isPowerOf2_32(Value: StepDenominator) &&
4120 (SplatStepVal >= 0 || StepDenominator == 1) && isInt<32>(x: Addend)) {
4121 MVT VIDVT =
4122 VT.isFloatingPoint() ? VT.changeVectorElementTypeToInteger() : VT;
4123 MVT VIDContainerVT =
4124 getContainerForFixedLengthVector(DAG, VT: VIDVT, Subtarget);
4125 SDValue VID = DAG.getNode(Opcode: RISCVISD::VID_VL, DL, VT: VIDContainerVT, N1: Mask, N2: VL);
4126 // Convert right out of the scalable type so we can use standard ISD
4127 // nodes for the rest of the computation. If we used scalable types with
4128 // these, we'd lose the fixed-length vector info and generate worse
4129 // vsetvli code.
4130 VID = convertFromScalableVector(VT: VIDVT, V: VID, DAG, Subtarget);
4131 if ((StepOpcode == ISD::MUL && SplatStepVal != 1) ||
4132 (StepOpcode == ISD::SHL && SplatStepVal != 0)) {
4133 SDValue SplatStep = DAG.getSignedConstant(Val: SplatStepVal, DL, VT: VIDVT);
4134 VID = DAG.getNode(Opcode: StepOpcode, DL, VT: VIDVT, N1: VID, N2: SplatStep);
4135 }
4136 if (StepDenominator != 1) {
4137 SDValue SplatStep =
4138 DAG.getConstant(Val: Log2_64(Value: StepDenominator), DL, VT: VIDVT);
4139 VID = DAG.getNode(Opcode: ISD::SRL, DL, VT: VIDVT, N1: VID, N2: SplatStep);
4140 }
4141 if (Addend != 0 || Negate) {
4142 SDValue SplatAddend = DAG.getSignedConstant(Val: Addend, DL, VT: VIDVT);
4143 VID = DAG.getNode(Opcode: Negate ? ISD::SUB : ISD::ADD, DL, VT: VIDVT, N1: SplatAddend,
4144 N2: VID);
4145 }
4146 if (VT.isFloatingPoint()) {
4147 // TODO: Use vfwcvt to reduce register pressure.
4148 VID = DAG.getNode(Opcode: ISD::SINT_TO_FP, DL, VT, Operand: VID);
4149 }
4150 return VID;
4151 }
4152 }
4153
4154 return SDValue();
4155}
4156
4157/// Try and optimize BUILD_VECTORs with "dominant values" - these are values
4158/// which constitute a large proportion of the elements. In such cases we can
4159/// splat a vector with the dominant element and make up the shortfall with
4160/// INSERT_VECTOR_ELTs. Returns SDValue if not profitable.
4161/// Note that this includes vectors of 2 elements by association. The
4162/// upper-most element is the "dominant" one, allowing us to use a splat to
4163/// "insert" the upper element, and an insert of the lower element at position
4164/// 0, which improves codegen.
4165static SDValue lowerBuildVectorViaDominantValues(SDValue Op, SelectionDAG &DAG,
4166 const RISCVSubtarget &Subtarget) {
4167 MVT VT = Op.getSimpleValueType();
4168 assert(VT.isFixedLengthVector() && "Unexpected vector!");
4169
4170 MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
4171
4172 SDLoc DL(Op);
4173 auto [Mask, VL] = getDefaultVLOps(VecVT: VT, ContainerVT, DL, DAG, Subtarget);
4174
4175 MVT XLenVT = Subtarget.getXLenVT();
4176 unsigned NumElts = Op.getNumOperands();
4177
4178 SDValue DominantValue;
4179 unsigned MostCommonCount = 0;
4180 DenseMap<SDValue, unsigned> ValueCounts;
4181 unsigned NumUndefElts =
4182 count_if(Range: Op->op_values(), P: [](const SDValue &V) { return V.isUndef(); });
4183
4184 // Track the number of scalar loads we know we'd be inserting, estimated as
4185 // any non-zero floating-point constant. Other kinds of element are either
4186 // already in registers or are materialized on demand. The threshold at which
4187 // a vector load is more desirable than several scalar materializion and
4188 // vector-insertion instructions is not known.
4189 unsigned NumScalarLoads = 0;
4190
4191 for (SDValue V : Op->op_values()) {
4192 if (V.isUndef())
4193 continue;
4194
4195 unsigned &Count = ValueCounts[V];
4196 if (0 == Count)
4197 if (auto *CFP = dyn_cast<ConstantFPSDNode>(Val&: V))
4198 NumScalarLoads += !CFP->isExactlyValue(V: +0.0);
4199
4200 // Is this value dominant? In case of a tie, prefer the highest element as
4201 // it's cheaper to insert near the beginning of a vector than it is at the
4202 // end.
4203 if (++Count >= MostCommonCount) {
4204 DominantValue = V;
4205 MostCommonCount = Count;
4206 }
4207 }
4208
4209 assert(DominantValue && "Not expecting an all-undef BUILD_VECTOR");
4210 unsigned NumDefElts = NumElts - NumUndefElts;
4211 unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2;
4212
4213 // Don't perform this optimization when optimizing for size, since
4214 // materializing elements and inserting them tends to cause code bloat.
4215 if (!DAG.shouldOptForSize() && NumScalarLoads < NumElts &&
4216 (NumElts != 2 || ISD::isBuildVectorOfConstantSDNodes(N: Op.getNode())) &&
4217 ((MostCommonCount > DominantValueCountThreshold) ||
4218 (ValueCounts.size() <= Log2_32(Value: NumDefElts)))) {
4219 // Start by splatting the most common element.
4220 SDValue Vec = DAG.getSplatBuildVector(VT, DL, Op: DominantValue);
4221
4222 DenseSet<SDValue> Processed{DominantValue};
4223
4224 // We can handle an insert into the last element (of a splat) via
4225 // v(f)slide1down. This is slightly better than the vslideup insert
4226 // lowering as it avoids the need for a vector group temporary. It
4227 // is also better than using vmerge.vx as it avoids the need to
4228 // materialize the mask in a vector register.
4229 if (SDValue LastOp = Op->getOperand(Num: Op->getNumOperands() - 1);
4230 !LastOp.isUndef() && ValueCounts[LastOp] == 1 &&
4231 LastOp != DominantValue) {
4232 Vec = convertToScalableVector(VT: ContainerVT, V: Vec, DAG, Subtarget);
4233 auto OpCode =
4234 VT.isFloatingPoint() ? RISCVISD::VFSLIDE1DOWN_VL : RISCVISD::VSLIDE1DOWN_VL;
4235 if (!VT.isFloatingPoint())
4236 LastOp = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: XLenVT, Operand: LastOp);
4237 Vec = DAG.getNode(Opcode: OpCode, DL, VT: ContainerVT, N1: DAG.getUNDEF(VT: ContainerVT), N2: Vec,
4238 N3: LastOp, N4: Mask, N5: VL);
4239 Vec = convertFromScalableVector(VT, V: Vec, DAG, Subtarget);
4240 Processed.insert(V: LastOp);
4241 }
4242
4243 MVT SelMaskTy = VT.changeVectorElementType(EltVT: MVT::i1);
4244 for (const auto &OpIdx : enumerate(First: Op->ops())) {
4245 const SDValue &V = OpIdx.value();
4246 if (V.isUndef() || !Processed.insert(V).second)
4247 continue;
4248 if (ValueCounts[V] == 1) {
4249 Vec = DAG.getInsertVectorElt(DL, Vec, Elt: V, Idx: OpIdx.index());
4250 } else {
4251 // Blend in all instances of this value using a VSELECT, using a
4252 // mask where each bit signals whether that element is the one
4253 // we're after.
4254 SmallVector<SDValue> Ops;
4255 transform(Range: Op->op_values(), d_first: std::back_inserter(x&: Ops), F: [&](SDValue V1) {
4256 return DAG.getConstant(Val: V == V1, DL, VT: XLenVT);
4257 });
4258 Vec = DAG.getNode(Opcode: ISD::VSELECT, DL, VT,
4259 N1: DAG.getBuildVector(VT: SelMaskTy, DL, Ops),
4260 N2: DAG.getSplatBuildVector(VT, DL, Op: V), N3: Vec);
4261 }
4262 }
4263
4264 return Vec;
4265 }
4266
4267 return SDValue();
4268}
4269
4270static SDValue lowerBuildVectorOfConstants(SDValue Op, SelectionDAG &DAG,
4271 const RISCVSubtarget &Subtarget) {
4272 MVT VT = Op.getSimpleValueType();
4273 assert(VT.isFixedLengthVector() && "Unexpected vector!");
4274
4275 MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
4276
4277 SDLoc DL(Op);
4278 auto [Mask, VL] = getDefaultVLOps(VecVT: VT, ContainerVT, DL, DAG, Subtarget);
4279
4280 MVT XLenVT = Subtarget.getXLenVT();
4281 unsigned NumElts = Op.getNumOperands();
4282
4283 if (VT.getVectorElementType() == MVT::i1) {
4284 if (ISD::isBuildVectorAllZeros(N: Op.getNode())) {
4285 SDValue VMClr = DAG.getNode(Opcode: RISCVISD::VMCLR_VL, DL, VT: ContainerVT, Operand: VL);
4286 return convertFromScalableVector(VT, V: VMClr, DAG, Subtarget);
4287 }
4288
4289 if (ISD::isBuildVectorAllOnes(N: Op.getNode())) {
4290 SDValue VMSet = DAG.getNode(Opcode: RISCVISD::VMSET_VL, DL, VT: ContainerVT, Operand: VL);
4291 return convertFromScalableVector(VT, V: VMSet, DAG, Subtarget);
4292 }
4293
4294 // Lower constant mask BUILD_VECTORs via an integer vector type, in
4295 // scalar integer chunks whose bit-width depends on the number of mask
4296 // bits and XLEN.
4297 // First, determine the most appropriate scalar integer type to use. This
4298 // is at most XLenVT, but may be shrunk to a smaller vector element type
4299 // according to the size of the final vector - use i8 chunks rather than
4300 // XLenVT if we're producing a v8i1. This results in more consistent
4301 // codegen across RV32 and RV64.
4302 unsigned NumViaIntegerBits = std::clamp(val: NumElts, lo: 8u, hi: Subtarget.getXLen());
4303 NumViaIntegerBits = std::min(a: NumViaIntegerBits, b: Subtarget.getELen());
4304 // If we have to use more than one INSERT_VECTOR_ELT then this
4305 // optimization is likely to increase code size; avoid performing it in
4306 // such a case. We can use a load from a constant pool in this case.
4307 if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits)
4308 return SDValue();
4309 // Now we can create our integer vector type. Note that it may be larger
4310 // than the resulting mask type: v4i1 would use v1i8 as its integer type.
4311 unsigned IntegerViaVecElts = divideCeil(Numerator: NumElts, Denominator: NumViaIntegerBits);
4312 MVT IntegerViaVecVT =
4313 MVT::getVectorVT(VT: MVT::getIntegerVT(BitWidth: NumViaIntegerBits),
4314 NumElements: IntegerViaVecElts);
4315
4316 uint64_t Bits = 0;
4317 unsigned BitPos = 0, IntegerEltIdx = 0;
4318 SmallVector<SDValue, 8> Elts(IntegerViaVecElts);
4319
4320 for (unsigned I = 0; I < NumElts;) {
4321 SDValue V = Op.getOperand(i: I);
4322 bool BitValue = !V.isUndef() && V->getAsZExtVal();
4323 Bits |= ((uint64_t)BitValue << BitPos);
4324 ++BitPos;
4325 ++I;
4326
4327 // Once we accumulate enough bits to fill our scalar type or process the
4328 // last element, insert into our vector and clear our accumulated data.
4329 if (I % NumViaIntegerBits == 0 || I == NumElts) {
4330 if (NumViaIntegerBits <= 32)
4331 Bits = SignExtend64<32>(x: Bits);
4332 SDValue Elt = DAG.getSignedConstant(Val: Bits, DL, VT: XLenVT);
4333 Elts[IntegerEltIdx] = Elt;
4334 Bits = 0;
4335 BitPos = 0;
4336 IntegerEltIdx++;
4337 }
4338 }
4339
4340 SDValue Vec = DAG.getBuildVector(VT: IntegerViaVecVT, DL, Ops: Elts);
4341
4342 if (NumElts < NumViaIntegerBits) {
4343 // If we're producing a smaller vector than our minimum legal integer
4344 // type, bitcast to the equivalent (known-legal) mask type, and extract
4345 // our final mask.
4346 assert(IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type");
4347 Vec = DAG.getBitcast(VT: MVT::v8i1, V: Vec);
4348 Vec = DAG.getExtractSubvector(DL, VT, Vec, Idx: 0);
4349 } else {
4350 // Else we must have produced an integer type with the same size as the
4351 // mask type; bitcast for the final result.
4352 assert(VT.getSizeInBits() == IntegerViaVecVT.getSizeInBits());
4353 Vec = DAG.getBitcast(VT, V: Vec);
4354 }
4355
4356 return Vec;
4357 }
4358
4359 if (SDValue Splat = cast<BuildVectorSDNode>(Val&: Op)->getSplatValue()) {
4360 unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
4361 : RISCVISD::VMV_V_X_VL;
4362 if (!VT.isFloatingPoint())
4363 Splat = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: XLenVT, Operand: Splat);
4364 Splat =
4365 DAG.getNode(Opcode: Opc, DL, VT: ContainerVT, N1: DAG.getUNDEF(VT: ContainerVT), N2: Splat, N3: VL);
4366 return convertFromScalableVector(VT, V: Splat, DAG, Subtarget);
4367 }
4368
4369 // Try and match index sequences, which we can lower to the vid instruction
4370 // with optional modifications. An all-undef vector is matched by
4371 // getSplatValue, above.
4372 if (SDValue Res = lowerBuildVectorViaVID(Op, DAG, Subtarget))
4373 return Res;
4374
4375 // For very small build_vectors, use a single scalar insert of a constant.
4376 // TODO: Base this on constant rematerialization cost, not size.
4377 const unsigned EltBitSize = VT.getScalarSizeInBits();
4378 if (VT.getSizeInBits() <= 32 &&
4379 ISD::isBuildVectorOfConstantSDNodes(N: Op.getNode())) {
4380 MVT ViaIntVT = MVT::getIntegerVT(BitWidth: VT.getSizeInBits());
4381 assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32) &&
4382 "Unexpected sequence type");
4383 // If we can use the original VL with the modified element type, this
4384 // means we only have a VTYPE toggle, not a VL toggle. TODO: Should this
4385 // be moved into InsertVSETVLI?
4386 unsigned ViaVecLen =
4387 (Subtarget.getRealMinVLen() >= VT.getSizeInBits() * NumElts) ? NumElts : 1;
4388 MVT ViaVecVT = MVT::getVectorVT(VT: ViaIntVT, NumElements: ViaVecLen);
4389
4390 uint64_t EltMask = maskTrailingOnes<uint64_t>(N: EltBitSize);
4391 uint64_t SplatValue = 0;
4392 // Construct the amalgamated value at this larger vector type.
4393 for (const auto &OpIdx : enumerate(First: Op->op_values())) {
4394 const auto &SeqV = OpIdx.value();
4395 if (!SeqV.isUndef())
4396 SplatValue |=
4397 ((SeqV->getAsZExtVal() & EltMask) << (OpIdx.index() * EltBitSize));
4398 }
4399
4400 // On RV64, sign-extend from 32 to 64 bits where possible in order to
4401 // achieve better constant materializion.
4402 // On RV32, we need to sign-extend to use getSignedConstant.
4403 if (ViaIntVT == MVT::i32)
4404 SplatValue = SignExtend64<32>(x: SplatValue);
4405
4406 SDValue Vec = DAG.getInsertVectorElt(
4407 DL, Vec: DAG.getUNDEF(VT: ViaVecVT),
4408 Elt: DAG.getSignedConstant(Val: SplatValue, DL, VT: XLenVT), Idx: 0);
4409 if (ViaVecLen != 1)
4410 Vec = DAG.getExtractSubvector(DL, VT: MVT::getVectorVT(VT: ViaIntVT, NumElements: 1), Vec, Idx: 0);
4411 return DAG.getBitcast(VT, V: Vec);
4412 }
4413
4414
4415 // Attempt to detect "hidden" splats, which only reveal themselves as splats
4416 // when re-interpreted as a vector with a larger element type. For example,
4417 // v4i16 = build_vector i16 0, i16 1, i16 0, i16 1
4418 // could be instead splat as
4419 // v2i32 = build_vector i32 0x00010000, i32 0x00010000
4420 // TODO: This optimization could also work on non-constant splats, but it
4421 // would require bit-manipulation instructions to construct the splat value.
4422 SmallVector<SDValue> Sequence;
4423 const auto *BV = cast<BuildVectorSDNode>(Val&: Op);
4424 if (VT.isInteger() && EltBitSize < Subtarget.getELen() &&
4425 ISD::isBuildVectorOfConstantSDNodes(N: Op.getNode()) &&
4426 BV->getRepeatedSequence(Sequence) &&
4427 (Sequence.size() * EltBitSize) <= Subtarget.getELen()) {
4428 unsigned SeqLen = Sequence.size();
4429 MVT ViaIntVT = MVT::getIntegerVT(BitWidth: EltBitSize * SeqLen);
4430 assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 ||
4431 ViaIntVT == MVT::i64) &&
4432 "Unexpected sequence type");
4433
4434 // If we can use the original VL with the modified element type, this
4435 // means we only have a VTYPE toggle, not a VL toggle. TODO: Should this
4436 // be moved into InsertVSETVLI?
4437 const unsigned RequiredVL = NumElts / SeqLen;
4438 const unsigned ViaVecLen =
4439 (Subtarget.getRealMinVLen() >= ViaIntVT.getSizeInBits() * NumElts) ?
4440 NumElts : RequiredVL;
4441 MVT ViaVecVT = MVT::getVectorVT(VT: ViaIntVT, NumElements: ViaVecLen);
4442
4443 unsigned EltIdx = 0;
4444 uint64_t EltMask = maskTrailingOnes<uint64_t>(N: EltBitSize);
4445 uint64_t SplatValue = 0;
4446 // Construct the amalgamated value which can be splatted as this larger
4447 // vector type.
4448 for (const auto &SeqV : Sequence) {
4449 if (!SeqV.isUndef())
4450 SplatValue |=
4451 ((SeqV->getAsZExtVal() & EltMask) << (EltIdx * EltBitSize));
4452 EltIdx++;
4453 }
4454
4455 // On RV64, sign-extend from 32 to 64 bits where possible in order to
4456 // achieve better constant materializion.
4457 // On RV32, we need to sign-extend to use getSignedConstant.
4458 if (ViaIntVT == MVT::i32)
4459 SplatValue = SignExtend64<32>(x: SplatValue);
4460
4461 // Since we can't introduce illegal i64 types at this stage, we can only
4462 // perform an i64 splat on RV32 if it is its own sign-extended value. That
4463 // way we can use RVV instructions to splat.
4464 assert((ViaIntVT.bitsLE(XLenVT) ||
4465 (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) &&
4466 "Unexpected bitcast sequence");
4467 if (ViaIntVT.bitsLE(VT: XLenVT) || isInt<32>(x: SplatValue)) {
4468 SDValue ViaVL =
4469 DAG.getConstant(Val: ViaVecVT.getVectorNumElements(), DL, VT: XLenVT);
4470 MVT ViaContainerVT =
4471 getContainerForFixedLengthVector(DAG, VT: ViaVecVT, Subtarget);
4472 SDValue Splat =
4473 DAG.getNode(Opcode: RISCVISD::VMV_V_X_VL, DL, VT: ViaContainerVT,
4474 N1: DAG.getUNDEF(VT: ViaContainerVT),
4475 N2: DAG.getSignedConstant(Val: SplatValue, DL, VT: XLenVT), N3: ViaVL);
4476 Splat = convertFromScalableVector(VT: ViaVecVT, V: Splat, DAG, Subtarget);
4477 if (ViaVecLen != RequiredVL)
4478 Splat = DAG.getExtractSubvector(
4479 DL, VT: MVT::getVectorVT(VT: ViaIntVT, NumElements: RequiredVL), Vec: Splat, Idx: 0);
4480 return DAG.getBitcast(VT, V: Splat);
4481 }
4482 }
4483
4484 // If the number of signbits allows, see if we can lower as a <N x i8>.
4485 // Our main goal here is to reduce LMUL (and thus work) required to
4486 // build the constant, but we will also narrow if the resulting
4487 // narrow vector is known to materialize cheaply.
4488 // TODO: We really should be costing the smaller vector. There are
4489 // profitable cases this misses.
4490 if (EltBitSize > 8 && VT.isInteger() &&
4491 (NumElts <= 4 || VT.getSizeInBits() > Subtarget.getRealMinVLen()) &&
4492 DAG.ComputeMaxSignificantBits(Op) <= 8) {
4493 SDValue Source = DAG.getBuildVector(VT: VT.changeVectorElementType(EltVT: MVT::i8),
4494 DL, Ops: Op->ops());
4495 Source = convertToScalableVector(VT: ContainerVT.changeVectorElementType(EltVT: MVT::i8),
4496 V: Source, DAG, Subtarget);
4497 SDValue Res = DAG.getNode(Opcode: RISCVISD::VSEXT_VL, DL, VT: ContainerVT, N1: Source, N2: Mask, N3: VL);
4498 return convertFromScalableVector(VT, V: Res, DAG, Subtarget);
4499 }
4500
4501 if (SDValue Res = lowerBuildVectorViaDominantValues(Op, DAG, Subtarget))
4502 return Res;
4503
4504 // For constant vectors, use generic constant pool lowering. Otherwise,
4505 // we'd have to materialize constants in GPRs just to move them into the
4506 // vector.
4507 return SDValue();
4508}
4509
4510static unsigned getPACKOpcode(unsigned DestBW,
4511 const RISCVSubtarget &Subtarget) {
4512 switch (DestBW) {
4513 default:
4514 llvm_unreachable("Unsupported pack size");
4515 case 16:
4516 return RISCV::PACKH;
4517 case 32:
4518 return Subtarget.is64Bit() ? RISCV::PACKW : RISCV::PACK;
4519 case 64:
4520 assert(Subtarget.is64Bit());
4521 return RISCV::PACK;
4522 }
4523}
4524
4525/// Double the element size of the build vector to reduce the number
4526/// of vslide1down in the build vector chain. In the worst case, this
4527/// trades three scalar operations for 1 vector operation. Scalar
4528/// operations are generally lower latency, and for out-of-order cores
4529/// we also benefit from additional parallelism.
4530static SDValue lowerBuildVectorViaPacking(SDValue Op, SelectionDAG &DAG,
4531 const RISCVSubtarget &Subtarget) {
4532 SDLoc DL(Op);
4533 MVT VT = Op.getSimpleValueType();
4534 assert(VT.isFixedLengthVector() && "Unexpected vector!");
4535 MVT ElemVT = VT.getVectorElementType();
4536 if (!ElemVT.isInteger())
4537 return SDValue();
4538
4539 // TODO: Relax these architectural restrictions, possibly with costing
4540 // of the actual instructions required.
4541 if (!Subtarget.hasStdExtZbb() || !Subtarget.hasStdExtZba())
4542 return SDValue();
4543
4544 unsigned NumElts = VT.getVectorNumElements();
4545 unsigned ElemSizeInBits = ElemVT.getSizeInBits();
4546 if (ElemSizeInBits >= std::min(a: Subtarget.getELen(), b: Subtarget.getXLen()) ||
4547 NumElts % 2 != 0)
4548 return SDValue();
4549
4550 // Produce [B,A] packed into a type twice as wide. Note that all
4551 // scalars are XLenVT, possibly masked (see below).
4552 MVT XLenVT = Subtarget.getXLenVT();
4553 SDValue Mask = DAG.getConstant(
4554 Val: APInt::getLowBitsSet(numBits: XLenVT.getSizeInBits(), loBitsSet: ElemSizeInBits), DL, VT: XLenVT);
4555 auto pack = [&](SDValue A, SDValue B) {
4556 // Bias the scheduling of the inserted operations to near the
4557 // definition of the element - this tends to reduce register
4558 // pressure overall.
4559 SDLoc ElemDL(B);
4560 if (Subtarget.hasStdExtZbkb())
4561 // Note that we're relying on the high bits of the result being
4562 // don't care. For PACKW, the result is *sign* extended.
4563 return SDValue(
4564 DAG.getMachineNode(Opcode: getPACKOpcode(DestBW: ElemSizeInBits * 2, Subtarget),
4565 dl: ElemDL, VT: XLenVT, Op1: A, Op2: B),
4566 0);
4567
4568 A = DAG.getNode(Opcode: ISD::AND, DL: SDLoc(A), VT: XLenVT, N1: A, N2: Mask);
4569 B = DAG.getNode(Opcode: ISD::AND, DL: SDLoc(B), VT: XLenVT, N1: B, N2: Mask);
4570 SDValue ShtAmt = DAG.getConstant(Val: ElemSizeInBits, DL: ElemDL, VT: XLenVT);
4571 return DAG.getNode(Opcode: ISD::OR, DL: ElemDL, VT: XLenVT, N1: A,
4572 N2: DAG.getNode(Opcode: ISD::SHL, DL: ElemDL, VT: XLenVT, N1: B, N2: ShtAmt),
4573 Flags: SDNodeFlags::Disjoint);
4574 };
4575
4576 SmallVector<SDValue> NewOperands;
4577 NewOperands.reserve(N: NumElts / 2);
4578 for (unsigned i = 0; i < VT.getVectorNumElements(); i += 2)
4579 NewOperands.push_back(Elt: pack(Op.getOperand(i), Op.getOperand(i: i + 1)));
4580 assert(NumElts == NewOperands.size() * 2);
4581 MVT WideVT = MVT::getIntegerVT(BitWidth: ElemSizeInBits * 2);
4582 MVT WideVecVT = MVT::getVectorVT(VT: WideVT, NumElements: NumElts / 2);
4583 return DAG.getNode(Opcode: ISD::BITCAST, DL, VT,
4584 Operand: DAG.getBuildVector(VT: WideVecVT, DL, Ops: NewOperands));
4585}
4586
4587static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
4588 const RISCVSubtarget &Subtarget) {
4589 MVT VT = Op.getSimpleValueType();
4590 assert(VT.isFixedLengthVector() && "Unexpected vector!");
4591
4592 MVT EltVT = VT.getVectorElementType();
4593 MVT XLenVT = Subtarget.getXLenVT();
4594
4595 SDLoc DL(Op);
4596
4597 if (Subtarget.isRV32() && Subtarget.enablePExtSIMDCodeGen()) {
4598 if (VT != MVT::v4i8)
4599 return SDValue();
4600
4601 // <4 x i8> BUILD_VECTOR a, b, c, d -> PACK(PPACK.DH pair(a, b), pair(c, d))
4602 SDValue Val0 =
4603 DAG.getNode(Opcode: ISD::SCALAR_TO_VECTOR, DL, VT: MVT::v4i8, Operand: Op->getOperand(Num: 0));
4604 SDValue Val1 =
4605 DAG.getNode(Opcode: ISD::SCALAR_TO_VECTOR, DL, VT: MVT::v4i8, Operand: Op->getOperand(Num: 1));
4606 SDValue Val2 =
4607 DAG.getNode(Opcode: ISD::SCALAR_TO_VECTOR, DL, VT: MVT::v4i8, Operand: Op->getOperand(Num: 2));
4608 SDValue Val3 =
4609 DAG.getNode(Opcode: ISD::SCALAR_TO_VECTOR, DL, VT: MVT::v4i8, Operand: Op->getOperand(Num: 3));
4610 SDValue PackDH =
4611 DAG.getNode(Opcode: RISCVISD::PPACK_DH, DL, ResultTys: {MVT::v2i16, MVT::v2i16},
4612 Ops: {Val0, Val1, Val2, Val3});
4613
4614 return DAG.getNode(
4615 Opcode: ISD::BITCAST, DL, VT: MVT::v4i8,
4616 Operand: SDValue(
4617 DAG.getMachineNode(
4618 Opcode: RISCV::PACK, dl: DL, VT: MVT::i32,
4619 Ops: {DAG.getNode(Opcode: ISD::BITCAST, DL, VT: MVT::i32, Operand: PackDH.getValue(R: 0)),
4620 DAG.getNode(Opcode: ISD::BITCAST, DL, VT: MVT::i32, Operand: PackDH.getValue(R: 1))}),
4621 0));
4622 }
4623
4624 // Proper support for f16 requires Zvfh. bf16 always requires special
4625 // handling. We need to cast the scalar to integer and create an integer
4626 // build_vector.
4627 if ((EltVT == MVT::f16 && !Subtarget.hasStdExtZvfh()) ||
4628 (EltVT == MVT::bf16 && !Subtarget.hasVInstructionsBF16())) {
4629 MVT IVT = VT.changeVectorElementType(EltVT: MVT::i16);
4630 SmallVector<SDValue, 16> NewOps(Op.getNumOperands());
4631 for (const auto &[I, U] : enumerate(First: Op->ops())) {
4632 SDValue Elem = U.get();
4633 if ((EltVT == MVT::bf16 && Subtarget.hasStdExtZfbfmin()) ||
4634 (EltVT == MVT::f16 && Subtarget.hasStdExtZfhmin())) {
4635 // Called by LegalizeDAG, we need to use XLenVT operations since we
4636 // can't create illegal types.
4637 if (auto *C = dyn_cast<ConstantFPSDNode>(Val&: Elem)) {
4638 // Manually constant fold so the integer build_vector can be lowered
4639 // better. Waiting for DAGCombine will be too late.
4640 APInt V =
4641 C->getValueAPF().bitcastToAPInt().sext(width: XLenVT.getSizeInBits());
4642 NewOps[I] = DAG.getConstant(Val: V, DL, VT: XLenVT);
4643 } else {
4644 NewOps[I] = DAG.getNode(Opcode: RISCVISD::FMV_X_ANYEXTH, DL, VT: XLenVT, Operand: Elem);
4645 }
4646 } else {
4647 // Called by scalar type legalizer, we can use i16.
4648 NewOps[I] = DAG.getBitcast(VT: MVT::i16, V: Op.getOperand(i: I));
4649 }
4650 }
4651 SDValue Res = DAG.getNode(Opcode: ISD::BUILD_VECTOR, DL, VT: IVT, Ops: NewOps);
4652 return DAG.getBitcast(VT, V: Res);
4653 }
4654
4655 if (ISD::isBuildVectorOfConstantSDNodes(N: Op.getNode()) ||
4656 ISD::isBuildVectorOfConstantFPSDNodes(N: Op.getNode()))
4657 return lowerBuildVectorOfConstants(Op, DAG, Subtarget);
4658
4659 MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
4660
4661 auto [Mask, VL] = getDefaultVLOps(VecVT: VT, ContainerVT, DL, DAG, Subtarget);
4662
4663 if (VT.getVectorElementType() == MVT::i1) {
4664 // A BUILD_VECTOR can be lowered as a SETCC. For each fixed-length mask
4665 // vector type, we have a legal equivalently-sized i8 type, so we can use
4666 // that.
4667 MVT WideVecVT = VT.changeVectorElementType(EltVT: MVT::i8);
4668 SDValue VecZero = DAG.getConstant(Val: 0, DL, VT: WideVecVT);
4669
4670 SDValue WideVec;
4671 if (SDValue Splat = cast<BuildVectorSDNode>(Val&: Op)->getSplatValue()) {
4672 // For a splat, perform a scalar truncate before creating the wider
4673 // vector.
4674 Splat = DAG.getNode(Opcode: ISD::AND, DL, VT: Splat.getValueType(), N1: Splat,
4675 N2: DAG.getConstant(Val: 1, DL, VT: Splat.getValueType()));
4676 WideVec = DAG.getSplatBuildVector(VT: WideVecVT, DL, Op: Splat);
4677 } else {
4678 SmallVector<SDValue, 8> Ops(Op->op_values());
4679 WideVec = DAG.getBuildVector(VT: WideVecVT, DL, Ops);
4680 SDValue VecOne = DAG.getConstant(Val: 1, DL, VT: WideVecVT);
4681 WideVec = DAG.getNode(Opcode: ISD::AND, DL, VT: WideVecVT, N1: WideVec, N2: VecOne);
4682 }
4683
4684 return DAG.getSetCC(DL, VT, LHS: WideVec, RHS: VecZero, Cond: ISD::SETNE);
4685 }
4686
4687 if (SDValue Splat = cast<BuildVectorSDNode>(Val&: Op)->getSplatValue()) {
4688 if (auto Gather = matchSplatAsGather(SplatVal: Splat, VT, DL, DAG, Subtarget))
4689 return Gather;
4690
4691 // Prefer vmv.s.x/vfmv.s.f if legal to reduce work and register
4692 // pressure at high LMUL.
4693 if (all_of(Range: Op->ops().drop_front(),
4694 P: [](const SDUse &U) { return U.get().isUndef(); })) {
4695 unsigned Opc =
4696 VT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL;
4697 if (!VT.isFloatingPoint())
4698 Splat = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: XLenVT, Operand: Splat);
4699 Splat = DAG.getNode(Opcode: Opc, DL, VT: ContainerVT, N1: DAG.getUNDEF(VT: ContainerVT),
4700 N2: Splat, N3: VL);
4701 return convertFromScalableVector(VT, V: Splat, DAG, Subtarget);
4702 }
4703
4704 unsigned Opc =
4705 VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
4706 if (!VT.isFloatingPoint())
4707 Splat = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: XLenVT, Operand: Splat);
4708 Splat =
4709 DAG.getNode(Opcode: Opc, DL, VT: ContainerVT, N1: DAG.getUNDEF(VT: ContainerVT), N2: Splat, N3: VL);
4710 return convertFromScalableVector(VT, V: Splat, DAG, Subtarget);
4711 }
4712
4713 if (SDValue Res = lowerBuildVectorViaDominantValues(Op, DAG, Subtarget))
4714 return Res;
4715
4716 // If we're compiling for an exact VLEN value, we can split our work per
4717 // register in the register group.
4718 if (const auto VLen = Subtarget.getRealVLen();
4719 VLen && VT.getSizeInBits().getKnownMinValue() > *VLen) {
4720 MVT ElemVT = VT.getVectorElementType();
4721 unsigned ElemsPerVReg = *VLen / ElemVT.getFixedSizeInBits();
4722 EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
4723 MVT OneRegVT = MVT::getVectorVT(VT: ElemVT, NumElements: ElemsPerVReg);
4724 MVT M1VT = getContainerForFixedLengthVector(DAG, VT: OneRegVT, Subtarget);
4725 assert(M1VT == RISCVTargetLowering::getM1VT(M1VT));
4726
4727 // The following semantically builds up a fixed length concat_vector
4728 // of the component build_vectors. We eagerly lower to scalable and
4729 // insert_subvector here to avoid DAG combining it back to a large
4730 // build_vector.
4731 SmallVector<SDValue> BuildVectorOps(Op->ops());
4732 unsigned NumOpElts = M1VT.getVectorMinNumElements();
4733 SDValue Vec = DAG.getUNDEF(VT: ContainerVT);
4734 for (unsigned i = 0; i < VT.getVectorNumElements(); i += ElemsPerVReg) {
4735 auto OneVRegOfOps = ArrayRef(BuildVectorOps).slice(N: i, M: ElemsPerVReg);
4736 SDValue SubBV =
4737 DAG.getNode(Opcode: ISD::BUILD_VECTOR, DL, VT: OneRegVT, Ops: OneVRegOfOps);
4738 SubBV = convertToScalableVector(VT: M1VT, V: SubBV, DAG, Subtarget);
4739 unsigned InsertIdx = (i / ElemsPerVReg) * NumOpElts;
4740 Vec = DAG.getInsertSubvector(DL, Vec, SubVec: SubBV, Idx: InsertIdx);
4741 }
4742 return convertFromScalableVector(VT, V: Vec, DAG, Subtarget);
4743 }
4744
4745 // If we're about to resort to vslide1down (or stack usage), pack our
4746 // elements into the widest scalar type we can. This will force a VL/VTYPE
4747 // toggle, but reduces the critical path, the number of vslide1down ops
4748 // required, and possibly enables scalar folds of the values.
4749 if (SDValue Res = lowerBuildVectorViaPacking(Op, DAG, Subtarget))
4750 return Res;
4751
4752 // For m1 vectors, if we have non-undef values in both halves of our vector,
4753 // split the vector into low and high halves, build them separately, then
4754 // use a vselect to combine them. For long vectors, this cuts the critical
4755 // path of the vslide1down sequence in half, and gives us an opportunity
4756 // to special case each half independently. Note that we don't change the
4757 // length of the sub-vectors here, so if both fallback to the generic
4758 // vslide1down path, we should be able to fold the vselect into the final
4759 // vslidedown (for the undef tail) for the first half w/ masking.
4760 unsigned NumElts = VT.getVectorNumElements();
4761 unsigned NumUndefElts =
4762 count_if(Range: Op->op_values(), P: [](const SDValue &V) { return V.isUndef(); });
4763 unsigned NumDefElts = NumElts - NumUndefElts;
4764 if (NumDefElts >= 8 && NumDefElts > NumElts / 2 &&
4765 ContainerVT.bitsLE(VT: RISCVTargetLowering::getM1VT(VT: ContainerVT))) {
4766 SmallVector<SDValue> SubVecAOps, SubVecBOps;
4767 SmallVector<SDValue> MaskVals;
4768 SDValue UndefElem = DAG.getUNDEF(VT: Op->getOperand(Num: 0)->getValueType(ResNo: 0));
4769 SubVecAOps.reserve(N: NumElts);
4770 SubVecBOps.reserve(N: NumElts);
4771 for (const auto &[Idx, U] : enumerate(First: Op->ops())) {
4772 SDValue Elem = U.get();
4773 if (Idx < NumElts / 2) {
4774 SubVecAOps.push_back(Elt: Elem);
4775 SubVecBOps.push_back(Elt: UndefElem);
4776 } else {
4777 SubVecAOps.push_back(Elt: UndefElem);
4778 SubVecBOps.push_back(Elt: Elem);
4779 }
4780 bool SelectMaskVal = (Idx < NumElts / 2);
4781 MaskVals.push_back(Elt: DAG.getConstant(Val: SelectMaskVal, DL, VT: XLenVT));
4782 }
4783 assert(SubVecAOps.size() == NumElts && SubVecBOps.size() == NumElts &&
4784 MaskVals.size() == NumElts);
4785
4786 SDValue SubVecA = DAG.getBuildVector(VT, DL, Ops: SubVecAOps);
4787 SDValue SubVecB = DAG.getBuildVector(VT, DL, Ops: SubVecBOps);
4788 MVT MaskVT = MVT::getVectorVT(VT: MVT::i1, NumElements: NumElts);
4789 SDValue SelectMask = DAG.getBuildVector(VT: MaskVT, DL, Ops: MaskVals);
4790 return DAG.getNode(Opcode: ISD::VSELECT, DL, VT, N1: SelectMask, N2: SubVecA, N3: SubVecB);
4791 }
4792
4793 // Cap the cost at a value linear to the number of elements in the vector.
4794 // The default lowering is to use the stack. The vector store + scalar loads
4795 // is linear in VL. However, at high lmuls vslide1down and vslidedown end up
4796 // being (at least) linear in LMUL. As a result, using the vslidedown
4797 // lowering for every element ends up being VL*LMUL..
4798 // TODO: Should we be directly costing the stack alternative? Doing so might
4799 // give us a more accurate upper bound.
4800 InstructionCost LinearBudget = VT.getVectorNumElements() * 2;
4801
4802 // TODO: unify with TTI getSlideCost.
4803 InstructionCost PerSlideCost = 1;
4804 switch (RISCVTargetLowering::getLMUL(VT: ContainerVT)) {
4805 default: break;
4806 case RISCVVType::LMUL_2:
4807 PerSlideCost = 2;
4808 break;
4809 case RISCVVType::LMUL_4:
4810 PerSlideCost = 4;
4811 break;
4812 case RISCVVType::LMUL_8:
4813 PerSlideCost = 8;
4814 break;
4815 }
4816
4817 // TODO: Should we be using the build instseq then cost + evaluate scheme
4818 // we use for integer constants here?
4819 unsigned UndefCount = 0;
4820 for (const SDValue &V : Op->ops()) {
4821 if (V.isUndef()) {
4822 UndefCount++;
4823 continue;
4824 }
4825 if (UndefCount) {
4826 LinearBudget -= PerSlideCost;
4827 UndefCount = 0;
4828 }
4829 LinearBudget -= PerSlideCost;
4830 }
4831 if (UndefCount) {
4832 LinearBudget -= PerSlideCost;
4833 }
4834
4835 if (LinearBudget < 0)
4836 return SDValue();
4837
4838 assert((!VT.isFloatingPoint() ||
4839 VT.getVectorElementType().getSizeInBits() <= Subtarget.getFLen()) &&
4840 "Illegal type which will result in reserved encoding");
4841
4842 const unsigned Policy = RISCVVType::TAIL_AGNOSTIC | RISCVVType::MASK_AGNOSTIC;
4843
4844 // General case: splat the first operand and slide other operands down one
4845 // by one to form a vector. Alternatively, if every operand is an
4846 // extraction from element 0 of a vector, we use that vector from the last
4847 // extraction as the start value and slide up instead of slide down. Such that
4848 // (1) we can avoid the initial splat (2) we can turn those vslide1up into
4849 // vslideup of 1 later and eliminate the vector to scalar movement, which is
4850 // something we cannot do with vslide1down/vslidedown.
4851 // Of course, using vslide1up/vslideup might increase the register pressure,
4852 // and that's why we conservatively limit to cases where every operand is an
4853 // extraction from the first element.
4854 SmallVector<SDValue> Operands(Op->op_begin(), Op->op_end());
4855 SDValue EVec;
4856 bool SlideUp = false;
4857 auto getVSlide = [&](EVT ContainerVT, SDValue Passthru, SDValue Vec,
4858 SDValue Offset, SDValue Mask, SDValue VL) -> SDValue {
4859 if (SlideUp)
4860 return getVSlideup(DAG, Subtarget, DL, VT: ContainerVT, Passthru, Op: Vec, Offset,
4861 Mask, VL, Policy);
4862 return getVSlidedown(DAG, Subtarget, DL, VT: ContainerVT, Passthru, Op: Vec, Offset,
4863 Mask, VL, Policy);
4864 };
4865
4866 // The reason we don't use all_of here is because we're also capturing EVec
4867 // from the last non-undef operand. If the std::execution_policy of the
4868 // underlying std::all_of is anything but std::sequenced_policy we might
4869 // capture the wrong EVec.
4870 for (SDValue V : Operands) {
4871 using namespace SDPatternMatch;
4872 SlideUp = V.isUndef() || sd_match(N: V, P: m_ExtractElt(Vec: m_Value(N&: EVec), Idx: m_Zero()));
4873 if (!SlideUp)
4874 break;
4875 }
4876
4877 // Do not slideup if the element type of EVec is different.
4878 if (SlideUp) {
4879 MVT EVecEltVT = EVec.getSimpleValueType().getVectorElementType();
4880 MVT ContainerEltVT = ContainerVT.getVectorElementType();
4881 if (EVecEltVT != ContainerEltVT)
4882 SlideUp = false;
4883 }
4884
4885 if (SlideUp) {
4886 MVT EVecContainerVT = EVec.getSimpleValueType();
4887 // Make sure the original vector has scalable vector type.
4888 if (EVecContainerVT.isFixedLengthVector()) {
4889 EVecContainerVT =
4890 getContainerForFixedLengthVector(DAG, VT: EVecContainerVT, Subtarget);
4891 EVec = convertToScalableVector(VT: EVecContainerVT, V: EVec, DAG, Subtarget);
4892 }
4893
4894 // Adapt EVec's type into ContainerVT.
4895 if (EVecContainerVT.getVectorMinNumElements() <
4896 ContainerVT.getVectorMinNumElements())
4897 EVec = DAG.getInsertSubvector(DL, Vec: DAG.getUNDEF(VT: ContainerVT), SubVec: EVec, Idx: 0);
4898 else
4899 EVec = DAG.getExtractSubvector(DL, VT: ContainerVT, Vec: EVec, Idx: 0);
4900
4901 // Reverse the elements as we're going to slide up from the last element.
4902 std::reverse(first: Operands.begin(), last: Operands.end());
4903 }
4904
4905 SDValue Vec;
4906 UndefCount = 0;
4907 for (SDValue V : Operands) {
4908 if (V.isUndef()) {
4909 UndefCount++;
4910 continue;
4911 }
4912
4913 // Start our sequence with either a TA splat or extract source in the
4914 // hopes that hardware is able to recognize there's no dependency on the
4915 // prior value of our temporary register.
4916 if (!Vec) {
4917 if (SlideUp) {
4918 Vec = EVec;
4919 } else {
4920 Vec = DAG.getSplatVector(VT, DL, Op: V);
4921 Vec = convertToScalableVector(VT: ContainerVT, V: Vec, DAG, Subtarget);
4922 }
4923
4924 UndefCount = 0;
4925 continue;
4926 }
4927
4928 if (UndefCount) {
4929 const SDValue Offset = DAG.getConstant(Val: UndefCount, DL, VT: Subtarget.getXLenVT());
4930 Vec = getVSlide(ContainerVT, DAG.getUNDEF(VT: ContainerVT), Vec, Offset, Mask,
4931 VL);
4932 UndefCount = 0;
4933 }
4934
4935 unsigned Opcode;
4936 if (VT.isFloatingPoint())
4937 Opcode = SlideUp ? RISCVISD::VFSLIDE1UP_VL : RISCVISD::VFSLIDE1DOWN_VL;
4938 else
4939 Opcode = SlideUp ? RISCVISD::VSLIDE1UP_VL : RISCVISD::VSLIDE1DOWN_VL;
4940
4941 if (!VT.isFloatingPoint())
4942 V = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: Subtarget.getXLenVT(), Operand: V);
4943 Vec = DAG.getNode(Opcode, DL, VT: ContainerVT, N1: DAG.getUNDEF(VT: ContainerVT), N2: Vec,
4944 N3: V, N4: Mask, N5: VL);
4945 }
4946 if (UndefCount) {
4947 const SDValue Offset = DAG.getConstant(Val: UndefCount, DL, VT: Subtarget.getXLenVT());
4948 Vec = getVSlide(ContainerVT, DAG.getUNDEF(VT: ContainerVT), Vec, Offset, Mask,
4949 VL);
4950 }
4951 return convertFromScalableVector(VT, V: Vec, DAG, Subtarget);
4952}
4953
4954static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
4955 SDValue Lo, SDValue Hi, SDValue VL,
4956 SelectionDAG &DAG) {
4957 if (!Passthru)
4958 Passthru = DAG.getUNDEF(VT);
4959 if (isa<ConstantSDNode>(Val: Lo) && isa<ConstantSDNode>(Val: Hi)) {
4960 int32_t LoC = cast<ConstantSDNode>(Val&: Lo)->getSExtValue();
4961 int32_t HiC = cast<ConstantSDNode>(Val&: Hi)->getSExtValue();
4962 // If Hi constant is all the same sign bit as Lo, lower this as a custom
4963 // node in order to try and match RVV vector/scalar instructions.
4964 if ((LoC >> 31) == HiC)
4965 return DAG.getNode(Opcode: RISCVISD::VMV_V_X_VL, DL, VT, N1: Passthru, N2: Lo, N3: VL);
4966
4967 // Use vmv.v.x with EEW=32. Use either a vsetivli or vsetvli to change
4968 // VL. This can temporarily increase VL if VL less than VLMAX.
4969 if (LoC == HiC) {
4970 SDValue NewVL;
4971 if (isa<ConstantSDNode>(Val: VL) && isUInt<4>(x: VL->getAsZExtVal()))
4972 NewVL = DAG.getNode(Opcode: ISD::ADD, DL, VT: VL.getValueType(), N1: VL, N2: VL);
4973 else
4974 NewVL = DAG.getRegister(Reg: RISCV::X0, VT: MVT::i32);
4975 MVT InterVT =
4976 MVT::getVectorVT(VT: MVT::i32, EC: VT.getVectorElementCount() * 2);
4977 auto InterVec = DAG.getNode(Opcode: RISCVISD::VMV_V_X_VL, DL, VT: InterVT,
4978 N1: DAG.getUNDEF(VT: InterVT), N2: Lo, N3: NewVL);
4979 return DAG.getNode(Opcode: ISD::BITCAST, DL, VT, Operand: InterVec);
4980 }
4981 }
4982
4983 // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended.
4984 if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(i: 0) == Lo &&
4985 isa<ConstantSDNode>(Val: Hi.getOperand(i: 1)) &&
4986 Hi.getConstantOperandVal(i: 1) == 31)
4987 return DAG.getNode(Opcode: RISCVISD::VMV_V_X_VL, DL, VT, N1: Passthru, N2: Lo, N3: VL);
4988
4989 // If the hi bits of the splat are undefined, then it's fine to just splat Lo
4990 // even if it might be sign extended.
4991 if (Hi.isUndef())
4992 return DAG.getNode(Opcode: RISCVISD::VMV_V_X_VL, DL, VT, N1: Passthru, N2: Lo, N3: VL);
4993
4994 // Fall back to a stack store and stride x0 vector load.
4995 return DAG.getNode(Opcode: RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VT, N1: Passthru, N2: Lo,
4996 N3: Hi, N4: VL);
4997}
4998
4999// Called by type legalization to handle splat of i64 on RV32.
5000// FIXME: We can optimize this when the type has sign or zero bits in one
5001// of the halves.
5002static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
5003 SDValue Scalar, SDValue VL,
5004 SelectionDAG &DAG) {
5005 assert(Scalar.getValueType() == MVT::i64 && "Unexpected VT!");
5006 SDValue Lo, Hi;
5007 std::tie(args&: Lo, args&: Hi) = DAG.SplitScalar(N: Scalar, DL, LoVT: MVT::i32, HiVT: MVT::i32);
5008 return splatPartsI64WithVL(DL, VT, Passthru, Lo, Hi, VL, DAG);
5009}
5010
5011// This function lowers a splat of a scalar operand Splat with the vector
5012// length VL. It ensures the final sequence is type legal, which is useful when
5013// lowering a splat after type legalization.
5014static SDValue lowerScalarSplat(SDValue Passthru, SDValue Scalar, SDValue VL,
5015 MVT VT, const SDLoc &DL, SelectionDAG &DAG,
5016 const RISCVSubtarget &Subtarget) {
5017 bool HasPassthru = Passthru && !Passthru.isUndef();
5018 if (!HasPassthru && !Passthru)
5019 Passthru = DAG.getUNDEF(VT);
5020
5021 MVT EltVT = VT.getVectorElementType();
5022 MVT XLenVT = Subtarget.getXLenVT();
5023
5024 if (VT.isFloatingPoint()) {
5025 if ((EltVT == MVT::f16 && !Subtarget.hasStdExtZvfh()) ||
5026 (EltVT == MVT::bf16 && !Subtarget.hasVInstructionsBF16())) {
5027 if ((EltVT == MVT::bf16 && Subtarget.hasStdExtZfbfmin()) ||
5028 (EltVT == MVT::f16 && Subtarget.hasStdExtZfhmin()))
5029 Scalar = DAG.getNode(Opcode: RISCVISD::FMV_X_ANYEXTH, DL, VT: XLenVT, Operand: Scalar);
5030 else
5031 Scalar = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: MVT::i16, Operand: Scalar);
5032 MVT IVT = VT.changeVectorElementType(EltVT: MVT::i16);
5033 Passthru = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: IVT, Operand: Passthru);
5034 SDValue Splat =
5035 lowerScalarSplat(Passthru, Scalar, VL, VT: IVT, DL, DAG, Subtarget);
5036 return DAG.getNode(Opcode: ISD::BITCAST, DL, VT, Operand: Splat);
5037 }
5038 return DAG.getNode(Opcode: RISCVISD::VFMV_V_F_VL, DL, VT, N1: Passthru, N2: Scalar, N3: VL);
5039 }
5040
5041 // Simplest case is that the operand needs to be promoted to XLenVT.
5042 if (Scalar.getValueType().bitsLE(VT: XLenVT)) {
5043 // If the operand is a constant, sign extend to increase our chances
5044 // of being able to use a .vi instruction. ANY_EXTEND would become a
5045 // a zero extend and the simm5 check in isel would fail.
5046 // FIXME: Should we ignore the upper bits in isel instead?
5047 unsigned ExtOpc =
5048 isa<ConstantSDNode>(Val: Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
5049 Scalar = DAG.getNode(Opcode: ExtOpc, DL, VT: XLenVT, Operand: Scalar);
5050 return DAG.getNode(Opcode: RISCVISD::VMV_V_X_VL, DL, VT, N1: Passthru, N2: Scalar, N3: VL);
5051 }
5052
5053 assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 &&
5054 "Unexpected scalar for splat lowering!");
5055
5056 if (isOneConstant(V: VL) && isNullConstant(V: Scalar))
5057 return DAG.getNode(Opcode: RISCVISD::VMV_S_X_VL, DL, VT, N1: Passthru,
5058 N2: DAG.getConstant(Val: 0, DL, VT: XLenVT), N3: VL);
5059
5060 // Otherwise use the more complicated splatting algorithm.
5061 return splatSplitI64WithVL(DL, VT, Passthru, Scalar, VL, DAG);
5062}
5063
5064// This function lowers an insert of a scalar operand Scalar into lane
5065// 0 of the vector regardless of the value of VL. The contents of the
5066// remaining lanes of the result vector are unspecified. VL is assumed
5067// to be non-zero.
5068static SDValue lowerScalarInsert(SDValue Scalar, SDValue VL, MVT VT,
5069 const SDLoc &DL, SelectionDAG &DAG,
5070 const RISCVSubtarget &Subtarget) {
5071 assert(VT.isScalableVector() && "Expect VT is scalable vector type.");
5072
5073 const MVT XLenVT = Subtarget.getXLenVT();
5074 SDValue Passthru = DAG.getUNDEF(VT);
5075
5076 if (Scalar.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
5077 isNullConstant(V: Scalar.getOperand(i: 1))) {
5078 SDValue ExtractedVal = Scalar.getOperand(i: 0);
5079 // The element types must be the same.
5080 if (ExtractedVal.getValueType().getVectorElementType() ==
5081 VT.getVectorElementType()) {
5082 MVT ExtractedVT = ExtractedVal.getSimpleValueType();
5083 MVT ExtractedContainerVT = ExtractedVT;
5084 if (ExtractedContainerVT.isFixedLengthVector()) {
5085 ExtractedContainerVT = getContainerForFixedLengthVector(
5086 DAG, VT: ExtractedContainerVT, Subtarget);
5087 ExtractedVal = convertToScalableVector(VT: ExtractedContainerVT,
5088 V: ExtractedVal, DAG, Subtarget);
5089 }
5090 if (ExtractedContainerVT.bitsLE(VT))
5091 return DAG.getInsertSubvector(DL, Vec: Passthru, SubVec: ExtractedVal, Idx: 0);
5092 return DAG.getExtractSubvector(DL, VT, Vec: ExtractedVal, Idx: 0);
5093 }
5094 }
5095
5096 if (VT.isFloatingPoint())
5097 return DAG.getNode(Opcode: RISCVISD::VFMV_S_F_VL, DL, VT, N1: DAG.getUNDEF(VT), N2: Scalar,
5098 N3: VL);
5099
5100 // Avoid the tricky legalization cases by falling back to using the
5101 // splat code which already handles it gracefully.
5102 if (!Scalar.getValueType().bitsLE(VT: XLenVT))
5103 return lowerScalarSplat(Passthru: DAG.getUNDEF(VT), Scalar,
5104 VL: DAG.getConstant(Val: 1, DL, VT: XLenVT),
5105 VT, DL, DAG, Subtarget);
5106
5107 // If the operand is a constant, sign extend to increase our chances
5108 // of being able to use a .vi instruction. ANY_EXTEND would become a
5109 // a zero extend and the simm5 check in isel would fail.
5110 // FIXME: Should we ignore the upper bits in isel instead?
5111 unsigned ExtOpc =
5112 isa<ConstantSDNode>(Val: Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
5113 Scalar = DAG.getNode(Opcode: ExtOpc, DL, VT: XLenVT, Operand: Scalar);
5114 return DAG.getNode(Opcode: RISCVISD::VMV_S_X_VL, DL, VT, N1: DAG.getUNDEF(VT), N2: Scalar,
5115 N3: VL);
5116}
5117
5118/// If concat_vector(V1,V2) could be folded away to some existing
5119/// vector source, return it. Note that the source may be larger
5120/// than the requested concat_vector (i.e. a extract_subvector
5121/// might be required.)
5122static SDValue foldConcatVector(SDValue V1, SDValue V2) {
5123 EVT VT = V1.getValueType();
5124 assert(VT == V2.getValueType() && "argument types must match");
5125 // Both input must be extracts.
5126 if (V1.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
5127 V2.getOpcode() != ISD::EXTRACT_SUBVECTOR)
5128 return SDValue();
5129
5130 // Extracting from the same source.
5131 SDValue Src = V1.getOperand(i: 0);
5132 if (Src != V2.getOperand(i: 0) ||
5133 VT.isScalableVector() != Src.getValueType().isScalableVector())
5134 return SDValue();
5135
5136 // The extracts must extract the two halves of the source.
5137 if (V1.getConstantOperandVal(i: 1) != 0 ||
5138 V2.getConstantOperandVal(i: 1) != VT.getVectorMinNumElements())
5139 return SDValue();
5140
5141 return Src;
5142}
5143
5144// Can this shuffle be performed on exactly one (possibly larger) input?
5145static SDValue getSingleShuffleSrc(MVT VT, SDValue V1, SDValue V2) {
5146
5147 if (V2.isUndef())
5148 return V1;
5149
5150 unsigned NumElts = VT.getVectorNumElements();
5151 // Src needs to have twice the number of elements.
5152 // TODO: Update shuffle lowering to add the extract subvector
5153 if (SDValue Src = foldConcatVector(V1, V2);
5154 Src && Src.getValueType().getVectorNumElements() == (NumElts * 2))
5155 return Src;
5156
5157 return SDValue();
5158}
5159
5160/// Is this shuffle interleaving contiguous elements from one vector into the
5161/// even elements and contiguous elements from another vector into the odd
5162/// elements. \p EvenSrc will contain the element that should be in the first
5163/// even element. \p OddSrc will contain the element that should be in the first
5164/// odd element. These can be the first element in a source or the element half
5165/// way through the source.
5166static bool isInterleaveShuffle(ArrayRef<int> Mask, MVT VT, int &EvenSrc,
5167 int &OddSrc, const RISCVSubtarget &Subtarget) {
5168 // We need to be able to widen elements to the next larger integer type or
5169 // use the zip2a instruction at e64.
5170 if (VT.getScalarSizeInBits() >= Subtarget.getELen() &&
5171 !Subtarget.hasVendorXRivosVizip())
5172 return false;
5173
5174 int Size = Mask.size();
5175 int NumElts = VT.getVectorNumElements();
5176 assert(Size == (int)NumElts && "Unexpected mask size");
5177
5178 SmallVector<unsigned, 2> StartIndexes;
5179 if (!ShuffleVectorInst::isInterleaveMask(Mask, Factor: 2, NumInputElts: Size * 2, StartIndexes))
5180 return false;
5181
5182 EvenSrc = StartIndexes[0];
5183 OddSrc = StartIndexes[1];
5184
5185 // One source should be low half of first vector.
5186 if (EvenSrc != 0 && OddSrc != 0)
5187 return false;
5188
5189 // Subvectors will be subtracted from either at the start of the two input
5190 // vectors, or at the start and middle of the first vector if it's an unary
5191 // interleave.
5192 // In both cases, HalfNumElts will be extracted.
5193 // We need to ensure that the extract indices are 0 or HalfNumElts otherwise
5194 // we'll create an illegal extract_subvector.
5195 // FIXME: We could support other values using a slidedown first.
5196 int HalfNumElts = NumElts / 2;
5197 return ((EvenSrc % HalfNumElts) == 0) && ((OddSrc % HalfNumElts) == 0);
5198}
5199
5200/// Is this mask representing a masked combination of two slides?
5201static bool isMaskedSlidePair(ArrayRef<int> Mask,
5202 std::array<std::pair<int, int>, 2> &SrcInfo) {
5203 if (!llvm::isMaskedSlidePair(Mask, NumElts: Mask.size(), SrcInfo))
5204 return false;
5205
5206 // Avoid matching vselect idioms
5207 if (SrcInfo[0].second == 0 && SrcInfo[1].second == 0)
5208 return false;
5209 // Prefer vslideup as the second instruction, and identity
5210 // only as the initial instruction.
5211 if ((SrcInfo[0].second > 0 && SrcInfo[1].second < 0) ||
5212 SrcInfo[1].second == 0)
5213 std::swap(x&: SrcInfo[0], y&: SrcInfo[1]);
5214 assert(SrcInfo[0].first != -1 && "Must find one slide");
5215 return true;
5216}
5217
5218// Exactly matches the semantics of a previously existing custom matcher
5219// to allow migration to new matcher without changing output.
5220static bool isElementRotate(const std::array<std::pair<int, int>, 2> &SrcInfo,
5221 unsigned NumElts) {
5222 if (SrcInfo[1].first == -1)
5223 return true;
5224 return SrcInfo[0].second < 0 && SrcInfo[1].second > 0 &&
5225 SrcInfo[1].second - SrcInfo[0].second == (int)NumElts;
5226}
5227
5228static bool isAlternating(const std::array<std::pair<int, int>, 2> &SrcInfo,
5229 ArrayRef<int> Mask, unsigned Factor,
5230 bool RequiredPolarity) {
5231 int NumElts = Mask.size();
5232 for (const auto &[Idx, M] : enumerate(First&: Mask)) {
5233 if (M < 0)
5234 continue;
5235 int Src = M >= NumElts;
5236 int Diff = (int)Idx - (M % NumElts);
5237 bool C = Src == SrcInfo[1].first && Diff == SrcInfo[1].second;
5238 assert(C != (Src == SrcInfo[0].first && Diff == SrcInfo[0].second) &&
5239 "Must match exactly one of the two slides");
5240 if (RequiredPolarity != (C == (Idx / Factor) % 2))
5241 return false;
5242 }
5243 return true;
5244}
5245
5246/// Given a shuffle which can be represented as a pair of two slides,
5247/// see if it is a zipeven idiom. Zipeven is:
5248/// vs2: a0 a1 a2 a3
5249/// vs1: b0 b1 b2 b3
5250/// vd: a0 b0 a2 b2
5251static bool isZipEven(const std::array<std::pair<int, int>, 2> &SrcInfo,
5252 ArrayRef<int> Mask, unsigned &Factor) {
5253 Factor = SrcInfo[1].second;
5254 return SrcInfo[0].second == 0 && isPowerOf2_32(Value: Factor) &&
5255 Mask.size() % Factor == 0 &&
5256 isAlternating(SrcInfo, Mask, Factor, RequiredPolarity: true);
5257}
5258
5259/// Given a shuffle which can be represented as a pair of two slides,
5260/// see if it is a zipodd idiom. Zipodd is:
5261/// vs2: a0 a1 a2 a3
5262/// vs1: b0 b1 b2 b3
5263/// vd: a1 b1 a3 b3
5264/// Note that the operand order is swapped due to the way we canonicalize
5265/// the slides, so SrCInfo[0] is vs1, and SrcInfo[1] is vs2.
5266static bool isZipOdd(const std::array<std::pair<int, int>, 2> &SrcInfo,
5267 ArrayRef<int> Mask, unsigned &Factor) {
5268 Factor = -SrcInfo[1].second;
5269 return SrcInfo[0].second == 0 && isPowerOf2_32(Value: Factor) &&
5270 Mask.size() % Factor == 0 &&
5271 isAlternating(SrcInfo, Mask, Factor, RequiredPolarity: false);
5272}
5273
5274// Lower a deinterleave shuffle to SRL and TRUNC. Factor must be
5275// 2, 4, 8 and the integer type Factor-times larger than VT's
5276// element type must be a legal element type.
5277// [a, p, b, q, c, r, d, s] -> [a, b, c, d] (Factor=2, Index=0)
5278// -> [p, q, r, s] (Factor=2, Index=1)
5279static SDValue getDeinterleaveShiftAndTrunc(const SDLoc &DL, MVT VT,
5280 SDValue Src, unsigned Factor,
5281 unsigned Index, SelectionDAG &DAG) {
5282 unsigned EltBits = VT.getScalarSizeInBits();
5283 ElementCount SrcEC = Src.getValueType().getVectorElementCount();
5284 MVT WideSrcVT = MVT::getVectorVT(VT: MVT::getIntegerVT(BitWidth: EltBits * Factor),
5285 EC: SrcEC.divideCoefficientBy(RHS: Factor));
5286 MVT ResVT = MVT::getVectorVT(VT: MVT::getIntegerVT(BitWidth: EltBits),
5287 EC: SrcEC.divideCoefficientBy(RHS: Factor));
5288 Src = DAG.getBitcast(VT: WideSrcVT, V: Src);
5289
5290 unsigned Shift = Index * EltBits;
5291 SDValue Res = DAG.getNode(Opcode: ISD::SRL, DL, VT: WideSrcVT, N1: Src,
5292 N2: DAG.getConstant(Val: Shift, DL, VT: WideSrcVT));
5293 Res = DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: ResVT, Operand: Res);
5294 MVT CastVT = ResVT.changeVectorElementType(EltVT: VT.getVectorElementType());
5295 Res = DAG.getBitcast(VT: CastVT, V: Res);
5296 return DAG.getInsertSubvector(DL, Vec: DAG.getUNDEF(VT), SubVec: Res, Idx: 0);
5297}
5298
5299/// Match a single source shuffle which is an identity except that some
5300/// particular element is repeated. This can be lowered as a masked
5301/// vrgather.vi/vx. Note that the two source form of this is handled
5302/// by the recursive splitting logic and doesn't need special handling.
5303static SDValue lowerVECTOR_SHUFFLEAsVRGatherVX(ShuffleVectorSDNode *SVN,
5304 const RISCVSubtarget &Subtarget,
5305 SelectionDAG &DAG) {
5306
5307 SDLoc DL(SVN);
5308 MVT VT = SVN->getSimpleValueType(ResNo: 0);
5309 SDValue V1 = SVN->getOperand(Num: 0);
5310 assert(SVN->getOperand(1).isUndef());
5311 ArrayRef<int> Mask = SVN->getMask();
5312 const unsigned NumElts = VT.getVectorNumElements();
5313 MVT XLenVT = Subtarget.getXLenVT();
5314
5315 std::optional<int> SplatIdx;
5316 for (auto [I, M] : enumerate(First&: Mask)) {
5317 if (M == -1 || I == (unsigned)M)
5318 continue;
5319 if (SplatIdx && *SplatIdx != M)
5320 return SDValue();
5321 SplatIdx = M;
5322 }
5323
5324 if (!SplatIdx)
5325 return SDValue();
5326
5327 SmallVector<SDValue> MaskVals;
5328 for (int MaskIndex : Mask) {
5329 bool SelectMaskVal = MaskIndex == *SplatIdx;
5330 MaskVals.push_back(Elt: DAG.getConstant(Val: SelectMaskVal, DL, VT: XLenVT));
5331 }
5332 assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
5333 MVT MaskVT = MVT::getVectorVT(VT: MVT::i1, NumElements: NumElts);
5334 SDValue SelectMask = DAG.getBuildVector(VT: MaskVT, DL, Ops: MaskVals);
5335 SDValue Splat = DAG.getVectorShuffle(VT, dl: DL, N1: V1, N2: DAG.getUNDEF(VT),
5336 Mask: SmallVector<int>(NumElts, *SplatIdx));
5337 return DAG.getNode(Opcode: ISD::VSELECT, DL, VT, N1: SelectMask, N2: Splat, N3: V1);
5338}
5339
5340// Lower the following shuffle to vslidedown.
5341// a)
5342// t49: v8i8 = extract_subvector t13, Constant:i64<0>
5343// t109: v8i8 = extract_subvector t13, Constant:i64<8>
5344// t108: v8i8 = vector_shuffle<1,2,3,4,5,6,7,8> t49, t106
5345// b)
5346// t69: v16i16 = extract_subvector t68, Constant:i64<0>
5347// t23: v8i16 = extract_subvector t69, Constant:i64<0>
5348// t29: v4i16 = extract_subvector t23, Constant:i64<4>
5349// t26: v8i16 = extract_subvector t69, Constant:i64<8>
5350// t30: v4i16 = extract_subvector t26, Constant:i64<0>
5351// t54: v4i16 = vector_shuffle<1,2,3,4> t29, t30
5352static SDValue lowerVECTOR_SHUFFLEAsVSlidedown(const SDLoc &DL, MVT VT,
5353 SDValue V1, SDValue V2,
5354 ArrayRef<int> Mask,
5355 const RISCVSubtarget &Subtarget,
5356 SelectionDAG &DAG) {
5357 auto findNonEXTRACT_SUBVECTORParent =
5358 [](SDValue Parent) -> std::pair<SDValue, uint64_t> {
5359 uint64_t Offset = 0;
5360 while (Parent.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
5361 // EXTRACT_SUBVECTOR can be used to extract a fixed-width vector from
5362 // a scalable vector. But we don't want to match the case.
5363 Parent.getOperand(i: 0).getSimpleValueType().isFixedLengthVector()) {
5364 Offset += Parent.getConstantOperandVal(i: 1);
5365 Parent = Parent.getOperand(i: 0);
5366 }
5367 return std::make_pair(x&: Parent, y&: Offset);
5368 };
5369
5370 auto [V1Src, V1IndexOffset] = findNonEXTRACT_SUBVECTORParent(V1);
5371 auto [V2Src, V2IndexOffset] = findNonEXTRACT_SUBVECTORParent(V2);
5372
5373 // Extracting from the same source.
5374 SDValue Src = V1Src;
5375 if (Src != V2Src)
5376 return SDValue();
5377
5378 // Rebuild mask because Src may be from multiple EXTRACT_SUBVECTORs.
5379 SmallVector<int, 16> NewMask(Mask);
5380 for (size_t i = 0; i != NewMask.size(); ++i) {
5381 if (NewMask[i] == -1)
5382 continue;
5383
5384 if (static_cast<size_t>(NewMask[i]) < NewMask.size()) {
5385 NewMask[i] = NewMask[i] + V1IndexOffset;
5386 } else {
5387 // Minus NewMask.size() is needed. Otherwise, the b case would be
5388 // <5,6,7,12> instead of <5,6,7,8>.
5389 NewMask[i] = NewMask[i] - NewMask.size() + V2IndexOffset;
5390 }
5391 }
5392
5393 // First index must be known and non-zero. It will be used as the slidedown
5394 // amount.
5395 if (NewMask[0] <= 0)
5396 return SDValue();
5397
5398 // NewMask is also continuous.
5399 for (unsigned i = 1; i != NewMask.size(); ++i)
5400 if (NewMask[i - 1] + 1 != NewMask[i])
5401 return SDValue();
5402
5403 MVT XLenVT = Subtarget.getXLenVT();
5404 MVT SrcVT = Src.getSimpleValueType();
5405 MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT: SrcVT, Subtarget);
5406 auto [TrueMask, VL] = getDefaultVLOps(VecVT: SrcVT, ContainerVT, DL, DAG, Subtarget);
5407 SDValue Slidedown =
5408 getVSlidedown(DAG, Subtarget, DL, VT: ContainerVT, Passthru: DAG.getUNDEF(VT: ContainerVT),
5409 Op: convertToScalableVector(VT: ContainerVT, V: Src, DAG, Subtarget),
5410 Offset: DAG.getConstant(Val: NewMask[0], DL, VT: XLenVT), Mask: TrueMask, VL);
5411 return DAG.getExtractSubvector(
5412 DL, VT, Vec: convertFromScalableVector(VT: SrcVT, V: Slidedown, DAG, Subtarget), Idx: 0);
5413}
5414
5415// Because vslideup leaves the destination elements at the start intact, we can
5416// use it to perform shuffles that insert subvectors:
5417//
5418// vector_shuffle v8:v8i8, v9:v8i8, <0, 1, 2, 3, 8, 9, 10, 11>
5419// ->
5420// vsetvli zero, 8, e8, mf2, ta, ma
5421// vslideup.vi v8, v9, 4
5422//
5423// vector_shuffle v8:v8i8, v9:v8i8 <0, 1, 8, 9, 10, 5, 6, 7>
5424// ->
5425// vsetvli zero, 5, e8, mf2, tu, ma
5426// vslideup.v1 v8, v9, 2
5427static SDValue lowerVECTOR_SHUFFLEAsVSlideup(const SDLoc &DL, MVT VT,
5428 SDValue V1, SDValue V2,
5429 ArrayRef<int> Mask,
5430 const RISCVSubtarget &Subtarget,
5431 SelectionDAG &DAG) {
5432 unsigned NumElts = VT.getVectorNumElements();
5433 int NumSubElts, Index;
5434 if (!ShuffleVectorInst::isInsertSubvectorMask(Mask, NumSrcElts: NumElts, NumSubElts,
5435 Index))
5436 return SDValue();
5437
5438 bool OpsSwapped = Mask[Index] < (int)NumElts;
5439 SDValue InPlace = OpsSwapped ? V2 : V1;
5440 SDValue ToInsert = OpsSwapped ? V1 : V2;
5441
5442 MVT XLenVT = Subtarget.getXLenVT();
5443 MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
5444 auto TrueMask = getDefaultVLOps(VecVT: VT, ContainerVT, DL, DAG, Subtarget).first;
5445 // We slide up by the index that the subvector is being inserted at, and set
5446 // VL to the index + the number of elements being inserted.
5447 unsigned Policy =
5448 RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED | RISCVVType::MASK_AGNOSTIC;
5449 // If the we're adding a suffix to the in place vector, i.e. inserting right
5450 // up to the very end of it, then we don't actually care about the tail.
5451 if (NumSubElts + Index >= (int)NumElts)
5452 Policy |= RISCVVType::TAIL_AGNOSTIC;
5453
5454 InPlace = convertToScalableVector(VT: ContainerVT, V: InPlace, DAG, Subtarget);
5455 ToInsert = convertToScalableVector(VT: ContainerVT, V: ToInsert, DAG, Subtarget);
5456 SDValue VL = DAG.getConstant(Val: NumSubElts + Index, DL, VT: XLenVT);
5457
5458 SDValue Res;
5459 // If we're inserting into the lowest elements, use a tail undisturbed
5460 // vmv.v.v.
5461 if (Index == 0)
5462 Res = DAG.getNode(Opcode: RISCVISD::VMV_V_V_VL, DL, VT: ContainerVT, N1: InPlace, N2: ToInsert,
5463 N3: VL);
5464 else
5465 Res = getVSlideup(DAG, Subtarget, DL, VT: ContainerVT, Passthru: InPlace, Op: ToInsert,
5466 Offset: DAG.getConstant(Val: Index, DL, VT: XLenVT), Mask: TrueMask, VL, Policy);
5467 return convertFromScalableVector(VT, V: Res, DAG, Subtarget);
5468}
5469
5470/// Match v(f)slide1up/down idioms. These operations involve sliding
5471/// N-1 elements to make room for an inserted scalar at one end.
5472static SDValue lowerVECTOR_SHUFFLEAsVSlide1(const SDLoc &DL, MVT VT,
5473 SDValue V1, SDValue V2,
5474 ArrayRef<int> Mask,
5475 const RISCVSubtarget &Subtarget,
5476 SelectionDAG &DAG) {
5477 bool OpsSwapped = false;
5478 if (!isa<BuildVectorSDNode>(Val: V1)) {
5479 if (!isa<BuildVectorSDNode>(Val: V2))
5480 return SDValue();
5481 std::swap(a&: V1, b&: V2);
5482 OpsSwapped = true;
5483 }
5484 SDValue Splat = cast<BuildVectorSDNode>(Val&: V1)->getSplatValue();
5485 if (!Splat)
5486 return SDValue();
5487
5488 // Return true if the mask could describe a slide of Mask.size() - 1
5489 // elements from concat_vector(V1, V2)[Base:] to [Offset:].
5490 auto isSlideMask = [](ArrayRef<int> Mask, unsigned Base, int Offset) {
5491 const unsigned S = (Offset > 0) ? 0 : -Offset;
5492 const unsigned E = Mask.size() - ((Offset > 0) ? Offset : 0);
5493 for (unsigned i = S; i != E; ++i)
5494 if (Mask[i] >= 0 && (unsigned)Mask[i] != Base + i + Offset)
5495 return false;
5496 return true;
5497 };
5498
5499 const unsigned NumElts = VT.getVectorNumElements();
5500 bool IsVSlidedown = isSlideMask(Mask, OpsSwapped ? 0 : NumElts, 1);
5501 if (!IsVSlidedown && !isSlideMask(Mask, OpsSwapped ? 0 : NumElts, -1))
5502 return SDValue();
5503
5504 const int InsertIdx = Mask[IsVSlidedown ? (NumElts - 1) : 0];
5505 // Inserted lane must come from splat, undef scalar is legal but not profitable.
5506 if (InsertIdx < 0 || InsertIdx / NumElts != (unsigned)OpsSwapped)
5507 return SDValue();
5508
5509 MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
5510 auto [TrueMask, VL] = getDefaultVLOps(VecVT: VT, ContainerVT, DL, DAG, Subtarget);
5511
5512 // zvfhmin and zvfbfmin don't have vfslide1{down,up}.vf so use fmv.x.h +
5513 // vslide1{down,up}.vx instead.
5514 if (VT.getVectorElementType() == MVT::bf16 ||
5515 (VT.getVectorElementType() == MVT::f16 &&
5516 !Subtarget.hasVInstructionsF16())) {
5517 MVT IntVT = ContainerVT.changeVectorElementTypeToInteger();
5518 Splat =
5519 DAG.getNode(Opcode: RISCVISD::FMV_X_ANYEXTH, DL, VT: Subtarget.getXLenVT(), Operand: Splat);
5520 V2 = DAG.getBitcast(
5521 VT: IntVT, V: convertToScalableVector(VT: ContainerVT, V: V2, DAG, Subtarget));
5522 SDValue Vec = DAG.getNode(
5523 Opcode: IsVSlidedown ? RISCVISD::VSLIDE1DOWN_VL : RISCVISD::VSLIDE1UP_VL, DL,
5524 VT: IntVT, N1: DAG.getUNDEF(VT: IntVT), N2: V2, N3: Splat, N4: TrueMask, N5: VL);
5525 Vec = DAG.getBitcast(VT: ContainerVT, V: Vec);
5526 return convertFromScalableVector(VT, V: Vec, DAG, Subtarget);
5527 }
5528
5529 auto OpCode = IsVSlidedown ?
5530 (VT.isFloatingPoint() ? RISCVISD::VFSLIDE1DOWN_VL : RISCVISD::VSLIDE1DOWN_VL) :
5531 (VT.isFloatingPoint() ? RISCVISD::VFSLIDE1UP_VL : RISCVISD::VSLIDE1UP_VL);
5532 if (!VT.isFloatingPoint())
5533 Splat = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: Subtarget.getXLenVT(), Operand: Splat);
5534 auto Vec = DAG.getNode(Opcode: OpCode, DL, VT: ContainerVT,
5535 N1: DAG.getUNDEF(VT: ContainerVT),
5536 N2: convertToScalableVector(VT: ContainerVT, V: V2, DAG, Subtarget),
5537 N3: Splat, N4: TrueMask, N5: VL);
5538 return convertFromScalableVector(VT, V: Vec, DAG, Subtarget);
5539}
5540
5541/// Match a mask which "spreads" the leading elements of a vector evenly
5542/// across the result. Factor is the spread amount, and Index is the
5543/// offset applied. (on success, Index < Factor) This is the inverse
5544/// of a deinterleave with the same Factor and Index. This is analogous
5545/// to an interleave, except that all but one lane is undef.
5546bool RISCVTargetLowering::isSpreadMask(ArrayRef<int> Mask, unsigned Factor,
5547 unsigned &Index) {
5548 SmallVector<bool> LaneIsUndef(Factor, true);
5549 for (unsigned i = 0; i < Mask.size(); i++)
5550 LaneIsUndef[i % Factor] &= (Mask[i] == -1);
5551
5552 bool Found = false;
5553 for (unsigned i = 0; i < Factor; i++) {
5554 if (LaneIsUndef[i])
5555 continue;
5556 if (Found)
5557 return false;
5558 Index = i;
5559 Found = true;
5560 }
5561 if (!Found)
5562 return false;
5563
5564 for (unsigned i = 0; i < Mask.size() / Factor; i++) {
5565 unsigned j = i * Factor + Index;
5566 if (Mask[j] != -1 && (unsigned)Mask[j] != i)
5567 return false;
5568 }
5569 return true;
5570}
5571
5572static SDValue lowerVZIP(unsigned Opc, SDValue Op0, SDValue Op1,
5573 const SDLoc &DL, SelectionDAG &DAG,
5574 const RISCVSubtarget &Subtarget) {
5575 assert(RISCVISD::RI_VZIPEVEN_VL == Opc || RISCVISD::RI_VZIPODD_VL == Opc ||
5576 RISCVISD::RI_VZIP2A_VL == Opc || RISCVISD::RI_VZIP2B_VL == Opc ||
5577 RISCVISD::RI_VUNZIP2A_VL == Opc || RISCVISD::RI_VUNZIP2B_VL == Opc);
5578 assert(Op0.getSimpleValueType() == Op1.getSimpleValueType());
5579
5580 MVT VT = Op0.getSimpleValueType();
5581 MVT IntVT = VT.changeVectorElementTypeToInteger();
5582 Op0 = DAG.getBitcast(VT: IntVT, V: Op0);
5583 Op1 = DAG.getBitcast(VT: IntVT, V: Op1);
5584
5585 MVT ContainerVT = IntVT;
5586 if (VT.isFixedLengthVector()) {
5587 ContainerVT = getContainerForFixedLengthVector(DAG, VT: IntVT, Subtarget);
5588 Op0 = convertToScalableVector(VT: ContainerVT, V: Op0, DAG, Subtarget);
5589 Op1 = convertToScalableVector(VT: ContainerVT, V: Op1, DAG, Subtarget);
5590 }
5591
5592 MVT InnerVT = ContainerVT;
5593 auto [Mask, VL] = getDefaultVLOps(VecVT: IntVT, ContainerVT: InnerVT, DL, DAG, Subtarget);
5594 if (Op1.isUndef() &&
5595 ContainerVT.bitsGT(VT: RISCVTargetLowering::getM1VT(VT: ContainerVT)) &&
5596 (RISCVISD::RI_VUNZIP2A_VL == Opc || RISCVISD::RI_VUNZIP2B_VL == Opc)) {
5597 InnerVT = ContainerVT.getHalfNumVectorElementsVT();
5598 VL = DAG.getConstant(Val: VT.getVectorNumElements() / 2, DL,
5599 VT: Subtarget.getXLenVT());
5600 Mask = getAllOnesMask(VecVT: InnerVT, VL, DL, DAG);
5601 unsigned HighIdx = InnerVT.getVectorElementCount().getKnownMinValue();
5602 Op1 = DAG.getExtractSubvector(DL, VT: InnerVT, Vec: Op0, Idx: HighIdx);
5603 Op0 = DAG.getExtractSubvector(DL, VT: InnerVT, Vec: Op0, Idx: 0);
5604 }
5605
5606 SDValue Passthru = DAG.getUNDEF(VT: InnerVT);
5607 SDValue Res = DAG.getNode(Opcode: Opc, DL, VT: InnerVT, N1: Op0, N2: Op1, N3: Passthru, N4: Mask, N5: VL);
5608 if (InnerVT.bitsLT(VT: ContainerVT))
5609 Res = DAG.getInsertSubvector(DL, Vec: DAG.getUNDEF(VT: ContainerVT), SubVec: Res, Idx: 0);
5610 if (IntVT.isFixedLengthVector())
5611 Res = convertFromScalableVector(VT: IntVT, V: Res, DAG, Subtarget);
5612 Res = DAG.getBitcast(VT, V: Res);
5613 return Res;
5614}
5615
5616// Given a vector a, b, c, d return a vector Factor times longer
5617// with Factor-1 undef's between elements. Ex:
5618// a, undef, b, undef, c, undef, d, undef (Factor=2, Index=0)
5619// undef, a, undef, b, undef, c, undef, d (Factor=2, Index=1)
5620static SDValue getWideningSpread(SDValue V, unsigned Factor, unsigned Index,
5621 const SDLoc &DL, SelectionDAG &DAG) {
5622
5623 MVT VT = V.getSimpleValueType();
5624 unsigned EltBits = VT.getScalarSizeInBits();
5625 ElementCount EC = VT.getVectorElementCount();
5626 V = DAG.getBitcast(VT: VT.changeTypeToInteger(), V);
5627
5628 MVT WideVT = MVT::getVectorVT(VT: MVT::getIntegerVT(BitWidth: EltBits * Factor), EC);
5629
5630 SDValue Result = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL, VT: WideVT, Operand: V);
5631 // TODO: On rv32, the constant becomes a splat_vector_parts which does not
5632 // allow the SHL to fold away if Index is 0.
5633 if (Index != 0)
5634 Result = DAG.getNode(Opcode: ISD::SHL, DL, VT: WideVT, N1: Result,
5635 N2: DAG.getConstant(Val: EltBits * Index, DL, VT: WideVT));
5636 // Make sure to use original element type
5637 MVT ResultVT = MVT::getVectorVT(VT: VT.getVectorElementType(),
5638 EC: EC.multiplyCoefficientBy(RHS: Factor));
5639 return DAG.getBitcast(VT: ResultVT, V: Result);
5640}
5641
5642// Given two input vectors of <[vscale x ]n x ty>, use vwaddu.vv and vwmaccu.vx
5643// to create an interleaved vector of <[vscale x] n*2 x ty>.
5644// This requires that the size of ty is less than the subtarget's maximum ELEN.
5645static SDValue getWideningInterleave(SDValue EvenV, SDValue OddV,
5646 const SDLoc &DL, SelectionDAG &DAG,
5647 const RISCVSubtarget &Subtarget) {
5648
5649 // FIXME: Not only does this optimize the code, it fixes some correctness
5650 // issues because MIR does not have freeze.
5651 if (EvenV.isUndef())
5652 return getWideningSpread(V: OddV, Factor: 2, Index: 1, DL, DAG);
5653 if (OddV.isUndef())
5654 return getWideningSpread(V: EvenV, Factor: 2, Index: 0, DL, DAG);
5655
5656 MVT VecVT = EvenV.getSimpleValueType();
5657 MVT VecContainerVT = VecVT; // <vscale x n x ty>
5658 // Convert fixed vectors to scalable if needed
5659 if (VecContainerVT.isFixedLengthVector()) {
5660 VecContainerVT = getContainerForFixedLengthVector(DAG, VT: VecVT, Subtarget);
5661 EvenV = convertToScalableVector(VT: VecContainerVT, V: EvenV, DAG, Subtarget);
5662 OddV = convertToScalableVector(VT: VecContainerVT, V: OddV, DAG, Subtarget);
5663 }
5664
5665 assert(VecVT.getScalarSizeInBits() < Subtarget.getELen());
5666
5667 // We're working with a vector of the same size as the resulting
5668 // interleaved vector, but with half the number of elements and
5669 // twice the SEW (Hence the restriction on not using the maximum
5670 // ELEN)
5671 MVT WideVT =
5672 MVT::getVectorVT(VT: MVT::getIntegerVT(BitWidth: VecVT.getScalarSizeInBits() * 2),
5673 EC: VecVT.getVectorElementCount());
5674 MVT WideContainerVT = WideVT; // <vscale x n x ty*2>
5675 if (WideContainerVT.isFixedLengthVector())
5676 WideContainerVT = getContainerForFixedLengthVector(DAG, VT: WideVT, Subtarget);
5677
5678 // Bitcast the input vectors to integers in case they are FP
5679 VecContainerVT = VecContainerVT.changeTypeToInteger();
5680 EvenV = DAG.getBitcast(VT: VecContainerVT, V: EvenV);
5681 OddV = DAG.getBitcast(VT: VecContainerVT, V: OddV);
5682
5683 auto [Mask, VL] = getDefaultVLOps(VecVT, ContainerVT: VecContainerVT, DL, DAG, Subtarget);
5684 SDValue Passthru = DAG.getUNDEF(VT: WideContainerVT);
5685
5686 SDValue Interleaved;
5687 if (Subtarget.hasStdExtZvbb()) {
5688 // Interleaved = (OddV << VecVT.getScalarSizeInBits()) + EvenV.
5689 SDValue OffsetVec =
5690 DAG.getConstant(Val: VecVT.getScalarSizeInBits(), DL, VT: VecContainerVT);
5691 Interleaved = DAG.getNode(Opcode: RISCVISD::VWSLL_VL, DL, VT: WideContainerVT, N1: OddV,
5692 N2: OffsetVec, N3: Passthru, N4: Mask, N5: VL);
5693 Interleaved = DAG.getNode(Opcode: RISCVISD::VWADDU_W_VL, DL, VT: WideContainerVT,
5694 N1: Interleaved, N2: EvenV, N3: Passthru, N4: Mask, N5: VL);
5695 } else {
5696 // FIXME: We should freeze the odd vector here. We already handled the case
5697 // of provably undef/poison above.
5698
5699 // Widen EvenV and OddV with 0s and add one copy of OddV to EvenV with
5700 // vwaddu.vv
5701 Interleaved = DAG.getNode(Opcode: RISCVISD::VWADDU_VL, DL, VT: WideContainerVT, N1: EvenV,
5702 N2: OddV, N3: Passthru, N4: Mask, N5: VL);
5703
5704 // Then get OddV * by 2^(VecVT.getScalarSizeInBits() - 1)
5705 SDValue AllOnesVec = DAG.getSplatVector(
5706 VT: VecContainerVT, DL, Op: DAG.getAllOnesConstant(DL, VT: Subtarget.getXLenVT()));
5707 SDValue OddsMul = DAG.getNode(Opcode: RISCVISD::VWMULU_VL, DL, VT: WideContainerVT,
5708 N1: OddV, N2: AllOnesVec, N3: Passthru, N4: Mask, N5: VL);
5709
5710 // Add the two together so we get
5711 // (OddV * 0xff...ff) + (OddV + EvenV)
5712 // = (OddV * 0x100...00) + EvenV
5713 // = (OddV << VecVT.getScalarSizeInBits()) + EvenV
5714 // Note the ADD_VL and VLMULU_VL should get selected as vwmaccu.vx
5715 Interleaved = DAG.getNode(Opcode: RISCVISD::ADD_VL, DL, VT: WideContainerVT,
5716 N1: Interleaved, N2: OddsMul, N3: Passthru, N4: Mask, N5: VL);
5717 }
5718
5719 // Bitcast from <vscale x n * ty*2> to <vscale x 2*n x ty>
5720 MVT ResultContainerVT = MVT::getVectorVT(
5721 VT: VecVT.getVectorElementType(), // Make sure to use original type
5722 EC: VecContainerVT.getVectorElementCount().multiplyCoefficientBy(RHS: 2));
5723 Interleaved = DAG.getBitcast(VT: ResultContainerVT, V: Interleaved);
5724
5725 // Convert back to a fixed vector if needed
5726 MVT ResultVT =
5727 MVT::getVectorVT(VT: VecVT.getVectorElementType(),
5728 EC: VecVT.getVectorElementCount().multiplyCoefficientBy(RHS: 2));
5729 if (ResultVT.isFixedLengthVector())
5730 Interleaved =
5731 convertFromScalableVector(VT: ResultVT, V: Interleaved, DAG, Subtarget);
5732
5733 return Interleaved;
5734}
5735
5736// If we have a vector of bits that we want to reverse, we can use a vbrev on a
5737// larger element type, e.g. v32i1 can be reversed with a v1i32 bitreverse.
5738static SDValue lowerBitreverseShuffle(ShuffleVectorSDNode *SVN,
5739 SelectionDAG &DAG,
5740 const RISCVSubtarget &Subtarget) {
5741 SDLoc DL(SVN);
5742 MVT VT = SVN->getSimpleValueType(ResNo: 0);
5743 SDValue V = SVN->getOperand(Num: 0);
5744 unsigned NumElts = VT.getVectorNumElements();
5745
5746 assert(VT.getVectorElementType() == MVT::i1);
5747
5748 if (!ShuffleVectorInst::isReverseMask(Mask: SVN->getMask(),
5749 NumSrcElts: SVN->getMask().size()) ||
5750 !SVN->getOperand(Num: 1).isUndef())
5751 return SDValue();
5752
5753 unsigned ViaEltSize = std::max(a: (uint64_t)8, b: PowerOf2Ceil(A: NumElts));
5754 EVT ViaVT = EVT::getVectorVT(
5755 Context&: *DAG.getContext(), VT: EVT::getIntegerVT(Context&: *DAG.getContext(), BitWidth: ViaEltSize), NumElements: 1);
5756 EVT ViaBitVT =
5757 EVT::getVectorVT(Context&: *DAG.getContext(), VT: MVT::i1, NumElements: ViaVT.getScalarSizeInBits());
5758
5759 // If we don't have zvbb or the larger element type > ELEN, the operation will
5760 // be illegal.
5761 if (!Subtarget.getTargetLowering()->isOperationLegalOrCustom(Op: ISD::BITREVERSE,
5762 VT: ViaVT) ||
5763 !Subtarget.getTargetLowering()->isTypeLegal(VT: ViaBitVT))
5764 return SDValue();
5765
5766 // If the bit vector doesn't fit exactly into the larger element type, we need
5767 // to insert it into the larger vector and then shift up the reversed bits
5768 // afterwards to get rid of the gap introduced.
5769 if (ViaEltSize > NumElts)
5770 V = DAG.getInsertSubvector(DL, Vec: DAG.getUNDEF(VT: ViaBitVT), SubVec: V, Idx: 0);
5771
5772 SDValue Res =
5773 DAG.getNode(Opcode: ISD::BITREVERSE, DL, VT: ViaVT, Operand: DAG.getBitcast(VT: ViaVT, V));
5774
5775 // Shift up the reversed bits if the vector didn't exactly fit into the larger
5776 // element type.
5777 if (ViaEltSize > NumElts)
5778 Res = DAG.getNode(Opcode: ISD::SRL, DL, VT: ViaVT, N1: Res,
5779 N2: DAG.getConstant(Val: ViaEltSize - NumElts, DL, VT: ViaVT));
5780
5781 Res = DAG.getBitcast(VT: ViaBitVT, V: Res);
5782
5783 if (ViaEltSize > NumElts)
5784 Res = DAG.getExtractSubvector(DL, VT, Vec: Res, Idx: 0);
5785 return Res;
5786}
5787
5788static bool isLegalBitRotate(ArrayRef<int> Mask, EVT VT,
5789 const RISCVSubtarget &Subtarget,
5790 MVT &RotateVT, unsigned &RotateAmt) {
5791 unsigned NumElts = VT.getVectorNumElements();
5792 unsigned EltSizeInBits = VT.getScalarSizeInBits();
5793 unsigned NumSubElts;
5794 if (!ShuffleVectorInst::isBitRotateMask(Mask, EltSizeInBits, MinSubElts: 2,
5795 MaxSubElts: NumElts, NumSubElts, RotateAmt))
5796 return false;
5797 RotateVT = MVT::getVectorVT(VT: MVT::getIntegerVT(BitWidth: EltSizeInBits * NumSubElts),
5798 NumElements: NumElts / NumSubElts);
5799
5800 // We might have a RotateVT that isn't legal, e.g. v4i64 on zve32x.
5801 return Subtarget.getTargetLowering()->isTypeLegal(VT: RotateVT);
5802}
5803
5804// Given a shuffle mask like <3, 0, 1, 2, 7, 4, 5, 6> for v8i8, we can
5805// reinterpret it as a v2i32 and rotate it right by 8 instead. We can lower this
5806// as a vror.vi if we have Zvkb, or otherwise as a vsll, vsrl and vor.
5807static SDValue lowerVECTOR_SHUFFLEAsRotate(ShuffleVectorSDNode *SVN,
5808 SelectionDAG &DAG,
5809 const RISCVSubtarget &Subtarget) {
5810 SDLoc DL(SVN);
5811
5812 EVT VT = SVN->getValueType(ResNo: 0);
5813 unsigned RotateAmt;
5814 MVT RotateVT;
5815 if (!isLegalBitRotate(Mask: SVN->getMask(), VT, Subtarget, RotateVT, RotateAmt))
5816 return SDValue();
5817
5818 SDValue Op = DAG.getBitcast(VT: RotateVT, V: SVN->getOperand(Num: 0));
5819
5820 SDValue Rotate;
5821 // A rotate of an i16 by 8 bits either direction is equivalent to a byteswap,
5822 // so canonicalize to vrev8.
5823 if (RotateVT.getScalarType() == MVT::i16 && RotateAmt == 8)
5824 Rotate = DAG.getNode(Opcode: ISD::BSWAP, DL, VT: RotateVT, Operand: Op);
5825 else
5826 Rotate = DAG.getNode(Opcode: ISD::ROTL, DL, VT: RotateVT, N1: Op,
5827 N2: DAG.getConstant(Val: RotateAmt, DL, VT: RotateVT));
5828
5829 return DAG.getBitcast(VT, V: Rotate);
5830}
5831
5832// If compiling with an exactly known VLEN, see if we can split a
5833// shuffle on m2 or larger into a small number of m1 sized shuffles
5834// which write each destination registers exactly once.
5835static SDValue lowerShuffleViaVRegSplitting(ShuffleVectorSDNode *SVN,
5836 SelectionDAG &DAG,
5837 const RISCVSubtarget &Subtarget) {
5838 SDLoc DL(SVN);
5839 MVT VT = SVN->getSimpleValueType(ResNo: 0);
5840 SDValue V1 = SVN->getOperand(Num: 0);
5841 SDValue V2 = SVN->getOperand(Num: 1);
5842 ArrayRef<int> Mask = SVN->getMask();
5843
5844 // If we don't know exact data layout, not much we can do. If this
5845 // is already m1 or smaller, no point in splitting further.
5846 const auto VLen = Subtarget.getRealVLen();
5847 if (!VLen || VT.getSizeInBits().getFixedValue() <= *VLen)
5848 return SDValue();
5849
5850 // Avoid picking up bitrotate patterns which we have a linear-in-lmul
5851 // expansion for.
5852 unsigned RotateAmt;
5853 MVT RotateVT;
5854 if (isLegalBitRotate(Mask, VT, Subtarget, RotateVT, RotateAmt))
5855 return SDValue();
5856
5857 MVT ElemVT = VT.getVectorElementType();
5858 unsigned ElemsPerVReg = *VLen / ElemVT.getFixedSizeInBits();
5859
5860 EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
5861 MVT OneRegVT = MVT::getVectorVT(VT: ElemVT, NumElements: ElemsPerVReg);
5862 MVT M1VT = getContainerForFixedLengthVector(DAG, VT: OneRegVT, Subtarget);
5863 assert(M1VT == RISCVTargetLowering::getM1VT(M1VT));
5864 unsigned NumOpElts = M1VT.getVectorMinNumElements();
5865 unsigned NumElts = ContainerVT.getVectorMinNumElements();
5866 unsigned NumOfSrcRegs = NumElts / NumOpElts;
5867 unsigned NumOfDestRegs = NumElts / NumOpElts;
5868 // The following semantically builds up a fixed length concat_vector
5869 // of the component shuffle_vectors. We eagerly lower to scalable here
5870 // to avoid DAG combining it back to a large shuffle_vector again.
5871 V1 = convertToScalableVector(VT: ContainerVT, V: V1, DAG, Subtarget);
5872 V2 = convertToScalableVector(VT: ContainerVT, V: V2, DAG, Subtarget);
5873 SmallVector<SmallVector<std::tuple<unsigned, unsigned, SmallVector<int>>>>
5874 Operands;
5875 processShuffleMasks(
5876 Mask, NumOfSrcRegs, NumOfDestRegs, NumOfUsedRegs: NumOfDestRegs,
5877 NoInputAction: [&]() { Operands.emplace_back(); },
5878 SingleInputAction: [&](ArrayRef<int> SrcSubMask, unsigned SrcVecIdx, unsigned DstVecIdx) {
5879 Operands.emplace_back().emplace_back(Args&: SrcVecIdx, UINT_MAX,
5880 Args: SmallVector<int>(SrcSubMask));
5881 },
5882 ManyInputsAction: [&](ArrayRef<int> SrcSubMask, unsigned Idx1, unsigned Idx2, bool NewReg) {
5883 if (NewReg)
5884 Operands.emplace_back();
5885 Operands.back().emplace_back(Args&: Idx1, Args&: Idx2, Args: SmallVector<int>(SrcSubMask));
5886 });
5887 assert(Operands.size() == NumOfDestRegs && "Whole vector must be processed");
5888 // Note: check that we do not emit too many shuffles here to prevent code
5889 // size explosion.
5890 // TODO: investigate, if it can be improved by extra analysis of the masks to
5891 // check if the code is more profitable.
5892 unsigned NumShuffles = std::accumulate(
5893 first: Operands.begin(), last: Operands.end(), init: 0u,
5894 binary_op: [&](unsigned N,
5895 ArrayRef<std::tuple<unsigned, unsigned, SmallVector<int>>> Data) {
5896 if (Data.empty())
5897 return N;
5898 N += Data.size();
5899 for (const auto &P : Data) {
5900 unsigned Idx2 = std::get<1>(t: P);
5901 ArrayRef<int> Mask = std::get<2>(t: P);
5902 if (Idx2 != UINT_MAX)
5903 ++N;
5904 else if (ShuffleVectorInst::isIdentityMask(Mask, NumSrcElts: Mask.size()))
5905 --N;
5906 }
5907 return N;
5908 });
5909 if ((NumOfDestRegs > 2 && NumShuffles > NumOfDestRegs) ||
5910 (NumOfDestRegs <= 2 && NumShuffles >= 4))
5911 return SDValue();
5912 auto ExtractValue = [&, &DAG = DAG](SDValue SrcVec, unsigned ExtractIdx) {
5913 SDValue SubVec = DAG.getExtractSubvector(DL, VT: M1VT, Vec: SrcVec, Idx: ExtractIdx);
5914 SubVec = convertFromScalableVector(VT: OneRegVT, V: SubVec, DAG, Subtarget);
5915 return SubVec;
5916 };
5917 auto PerformShuffle = [&, &DAG = DAG](SDValue SubVec1, SDValue SubVec2,
5918 ArrayRef<int> Mask) {
5919 SDValue SubVec = DAG.getVectorShuffle(VT: OneRegVT, dl: DL, N1: SubVec1, N2: SubVec2, Mask);
5920 return SubVec;
5921 };
5922 SDValue Vec = DAG.getUNDEF(VT: ContainerVT);
5923 for (auto [I, Data] : enumerate(First&: Operands)) {
5924 if (Data.empty())
5925 continue;
5926 SmallDenseMap<unsigned, SDValue, 4> Values;
5927 for (unsigned I : seq<unsigned>(Size: Data.size())) {
5928 const auto &[Idx1, Idx2, _] = Data[I];
5929 // If the shuffle contains permutation of odd number of elements,
5930 // Idx1 might be used already in the first iteration.
5931 //
5932 // Idx1 = shuffle Idx1, Idx2
5933 // Idx1 = shuffle Idx1, Idx3
5934 SDValue &V = Values.try_emplace(Key: Idx1).first->getSecond();
5935 if (!V)
5936 V = ExtractValue(Idx1 >= NumOfSrcRegs ? V2 : V1,
5937 (Idx1 % NumOfSrcRegs) * NumOpElts);
5938 if (Idx2 != UINT_MAX) {
5939 SDValue &V = Values.try_emplace(Key: Idx2).first->getSecond();
5940 if (!V)
5941 V = ExtractValue(Idx2 >= NumOfSrcRegs ? V2 : V1,
5942 (Idx2 % NumOfSrcRegs) * NumOpElts);
5943 }
5944 }
5945 SDValue V;
5946 for (const auto &[Idx1, Idx2, Mask] : Data) {
5947 SDValue V1 = Values.at(Val: Idx1);
5948 SDValue V2 = Idx2 == UINT_MAX ? V1 : Values.at(Val: Idx2);
5949 V = PerformShuffle(V1, V2, Mask);
5950 Values[Idx1] = V;
5951 }
5952
5953 unsigned InsertIdx = I * NumOpElts;
5954 V = convertToScalableVector(VT: M1VT, V, DAG, Subtarget);
5955 Vec = DAG.getInsertSubvector(DL, Vec, SubVec: V, Idx: InsertIdx);
5956 }
5957 return convertFromScalableVector(VT, V: Vec, DAG, Subtarget);
5958}
5959
5960// Matches a subset of compress masks with a contiguous prefix of output
5961// elements. This could be extended to allow gaps by deciding which
5962// source elements to spuriously demand.
5963static bool isCompressMask(ArrayRef<int> Mask) {
5964 int Last = -1;
5965 bool SawUndef = false;
5966 for (const auto &[Idx, M] : enumerate(First&: Mask)) {
5967 if (M == -1) {
5968 SawUndef = true;
5969 continue;
5970 }
5971 if (SawUndef)
5972 return false;
5973 if (Idx > (unsigned)M)
5974 return false;
5975 if (M <= Last)
5976 return false;
5977 Last = M;
5978 }
5979 return true;
5980}
5981
5982/// Given a shuffle where the indices are disjoint between the two sources,
5983/// e.g.:
5984///
5985/// t2:v4i8 = vector_shuffle t0:v4i8, t1:v4i8, <2, 7, 1, 4>
5986///
5987/// Merge the two sources into one and do a single source shuffle:
5988///
5989/// t2:v4i8 = vselect t1:v4i8, t0:v4i8, <0, 1, 0, 1>
5990/// t3:v4i8 = vector_shuffle t2:v4i8, undef, <2, 3, 1, 0>
5991///
5992/// A vselect will either be merged into a masked instruction or be lowered as a
5993/// vmerge.vvm, which is cheaper than a vrgather.vv.
5994static SDValue lowerDisjointIndicesShuffle(ShuffleVectorSDNode *SVN,
5995 SelectionDAG &DAG,
5996 const RISCVSubtarget &Subtarget) {
5997 MVT VT = SVN->getSimpleValueType(ResNo: 0);
5998 MVT XLenVT = Subtarget.getXLenVT();
5999 SDLoc DL(SVN);
6000
6001 const ArrayRef<int> Mask = SVN->getMask();
6002
6003 // Work out which source each lane will come from.
6004 SmallVector<int, 16> Srcs(Mask.size(), -1);
6005
6006 for (int Idx : Mask) {
6007 if (Idx == -1)
6008 continue;
6009 unsigned SrcIdx = Idx % Mask.size();
6010 int Src = (uint32_t)Idx < Mask.size() ? 0 : 1;
6011 if (Srcs[SrcIdx] == -1)
6012 // Mark this source as using this lane.
6013 Srcs[SrcIdx] = Src;
6014 else if (Srcs[SrcIdx] != Src)
6015 // The other source is using this lane: not disjoint.
6016 return SDValue();
6017 }
6018
6019 SmallVector<SDValue> SelectMaskVals;
6020 for (int Lane : Srcs) {
6021 if (Lane == -1)
6022 SelectMaskVals.push_back(Elt: DAG.getUNDEF(VT: XLenVT));
6023 else
6024 SelectMaskVals.push_back(Elt: DAG.getConstant(Val: Lane ? 0 : 1, DL, VT: XLenVT));
6025 }
6026 MVT MaskVT = VT.changeVectorElementType(EltVT: MVT::i1);
6027 SDValue SelectMask = DAG.getBuildVector(VT: MaskVT, DL, Ops: SelectMaskVals);
6028 SDValue Select = DAG.getNode(Opcode: ISD::VSELECT, DL, VT, N1: SelectMask,
6029 N2: SVN->getOperand(Num: 0), N3: SVN->getOperand(Num: 1));
6030
6031 // Move all indices relative to the first source.
6032 SmallVector<int> NewMask(Mask.size());
6033 for (unsigned I = 0; I < Mask.size(); I++) {
6034 if (Mask[I] == -1)
6035 NewMask[I] = -1;
6036 else
6037 NewMask[I] = Mask[I] % Mask.size();
6038 }
6039
6040 return DAG.getVectorShuffle(VT, dl: DL, N1: Select, N2: DAG.getUNDEF(VT), Mask: NewMask);
6041}
6042
6043/// Is this mask local (i.e. elements only move within their local span), and
6044/// repeating (that is, the same rearrangement is being done within each span)?
6045static bool isLocalRepeatingShuffle(ArrayRef<int> Mask, int Span) {
6046 // Require a prefix from the original mask until the consumer code
6047 // is adjusted to rewrite the mask instead of just taking a prefix.
6048 for (auto [I, M] : enumerate(First&: Mask)) {
6049 if (M == -1)
6050 continue;
6051 if ((M / Span) != (int)(I / Span))
6052 return false;
6053 int SpanIdx = I % Span;
6054 int Expected = M % Span;
6055 if (Mask[SpanIdx] != Expected)
6056 return false;
6057 }
6058 return true;
6059}
6060
6061/// Is this mask only using elements from the first span of the input?
6062static bool isLowSourceShuffle(ArrayRef<int> Mask, int Span) {
6063 return all_of(Range&: Mask, P: [&](const auto &Idx) { return Idx == -1 || Idx < Span; });
6064}
6065
6066/// Return true for a mask which performs an arbitrary shuffle within the first
6067/// span, and then repeats that same result across all remaining spans. Note
6068/// that this doesn't check if all the inputs come from a single span!
6069static bool isSpanSplatShuffle(ArrayRef<int> Mask, int Span) {
6070 // Require a prefix from the original mask until the consumer code
6071 // is adjusted to rewrite the mask instead of just taking a prefix.
6072 for (auto [I, M] : enumerate(First&: Mask)) {
6073 if (M == -1)
6074 continue;
6075 int SpanIdx = I % Span;
6076 if (Mask[SpanIdx] != M)
6077 return false;
6078 }
6079 return true;
6080}
6081
6082/// Try to widen element type to get a new mask value for a better permutation
6083/// sequence. This doesn't try to inspect the widened mask for profitability;
6084/// we speculate the widened form is equal or better. This has the effect of
6085/// reducing mask constant sizes - allowing cheaper materialization sequences
6086/// - and index sequence sizes - reducing register pressure and materialization
6087/// cost, at the cost of (possibly) an extra VTYPE toggle.
6088static SDValue tryWidenMaskForShuffle(SDValue Op, SelectionDAG &DAG) {
6089 SDLoc DL(Op);
6090 MVT VT = Op.getSimpleValueType();
6091 MVT ScalarVT = VT.getVectorElementType();
6092 unsigned ElementSize = ScalarVT.getFixedSizeInBits();
6093 SDValue V0 = Op.getOperand(i: 0);
6094 SDValue V1 = Op.getOperand(i: 1);
6095 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Val&: Op)->getMask();
6096
6097 // Avoid wasted work leading to isTypeLegal check failing below
6098 if (ElementSize > 32)
6099 return SDValue();
6100
6101 SmallVector<int, 8> NewMask;
6102 if (!widenShuffleMaskElts(M: Mask, NewMask))
6103 return SDValue();
6104
6105 MVT NewEltVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(BitWidth: ElementSize * 2)
6106 : MVT::getIntegerVT(BitWidth: ElementSize * 2);
6107 MVT NewVT = MVT::getVectorVT(VT: NewEltVT, NumElements: VT.getVectorNumElements() / 2);
6108 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT: NewVT))
6109 return SDValue();
6110 V0 = DAG.getBitcast(VT: NewVT, V: V0);
6111 V1 = DAG.getBitcast(VT: NewVT, V: V1);
6112 return DAG.getBitcast(VT, V: DAG.getVectorShuffle(VT: NewVT, dl: DL, N1: V0, N2: V1, Mask: NewMask));
6113}
6114
6115static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
6116 const RISCVSubtarget &Subtarget) {
6117 SDValue V1 = Op.getOperand(i: 0);
6118 SDValue V2 = Op.getOperand(i: 1);
6119 SDLoc DL(Op);
6120 MVT XLenVT = Subtarget.getXLenVT();
6121 MVT VT = Op.getSimpleValueType();
6122 unsigned NumElts = VT.getVectorNumElements();
6123 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Val: Op.getNode());
6124
6125 if (VT.getVectorElementType() == MVT::i1) {
6126 // Lower to a vror.vi of a larger element type if possible before we promote
6127 // i1s to i8s.
6128 if (SDValue V = lowerVECTOR_SHUFFLEAsRotate(SVN, DAG, Subtarget))
6129 return V;
6130 if (SDValue V = lowerBitreverseShuffle(SVN, DAG, Subtarget))
6131 return V;
6132
6133 // Promote i1 shuffle to i8 shuffle.
6134 MVT WidenVT = MVT::getVectorVT(VT: MVT::i8, EC: VT.getVectorElementCount());
6135 V1 = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL, VT: WidenVT, Operand: V1);
6136 V2 = V2.isUndef() ? DAG.getUNDEF(VT: WidenVT)
6137 : DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL, VT: WidenVT, Operand: V2);
6138 SDValue Shuffled = DAG.getVectorShuffle(VT: WidenVT, dl: DL, N1: V1, N2: V2, Mask: SVN->getMask());
6139 return DAG.getSetCC(DL, VT, LHS: Shuffled, RHS: DAG.getConstant(Val: 0, DL, VT: WidenVT),
6140 Cond: ISD::SETNE);
6141 }
6142
6143 MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
6144
6145 // Store the return value in a single variable instead of structured bindings
6146 // so that we can pass it to GetSlide below, which cannot capture structured
6147 // bindings until C++20.
6148 auto TrueMaskVL = getDefaultVLOps(VecVT: VT, ContainerVT, DL, DAG, Subtarget);
6149 auto [TrueMask, VL] = TrueMaskVL;
6150
6151 if (SVN->isSplat()) {
6152 const int Lane = SVN->getSplatIndex();
6153 if (Lane >= 0) {
6154 MVT SVT = VT.getVectorElementType();
6155
6156 // Turn splatted vector load into a strided load with an X0 stride.
6157 SDValue V = V1;
6158 // Peek through CONCAT_VECTORS as VectorCombine can concat a vector
6159 // with undef.
6160 // FIXME: Peek through INSERT_SUBVECTOR, EXTRACT_SUBVECTOR, bitcasts?
6161 int Offset = Lane;
6162 if (V.getOpcode() == ISD::CONCAT_VECTORS) {
6163 int OpElements =
6164 V.getOperand(i: 0).getSimpleValueType().getVectorNumElements();
6165 V = V.getOperand(i: Offset / OpElements);
6166 Offset %= OpElements;
6167 }
6168
6169 // We need to ensure the load isn't atomic or volatile.
6170 if (ISD::isNormalLoad(N: V.getNode()) && cast<LoadSDNode>(Val&: V)->isSimple()) {
6171 auto *Ld = cast<LoadSDNode>(Val&: V);
6172 Offset *= SVT.getStoreSize();
6173 SDValue NewAddr = DAG.getMemBasePlusOffset(
6174 Base: Ld->getBasePtr(), Offset: TypeSize::getFixed(ExactSize: Offset), DL);
6175
6176 // If this is SEW=64 on RV32, use a strided load with a stride of x0.
6177 if (SVT.isInteger() && SVT.bitsGT(VT: XLenVT)) {
6178 SDVTList VTs = DAG.getVTList(VTs: {ContainerVT, MVT::Other});
6179 SDValue IntID =
6180 DAG.getTargetConstant(Val: Intrinsic::riscv_vlse, DL, VT: XLenVT);
6181 SDValue Ops[] = {Ld->getChain(),
6182 IntID,
6183 DAG.getUNDEF(VT: ContainerVT),
6184 NewAddr,
6185 DAG.getRegister(Reg: RISCV::X0, VT: XLenVT),
6186 VL};
6187 SDValue NewLoad = DAG.getMemIntrinsicNode(
6188 Opcode: ISD::INTRINSIC_W_CHAIN, dl: DL, VTList: VTs, Ops, MemVT: SVT,
6189 MMO: DAG.getMachineFunction().getMachineMemOperand(
6190 MMO: Ld->getMemOperand(), Offset, Size: SVT.getStoreSize()));
6191 DAG.makeEquivalentMemoryOrdering(OldLoad: Ld, NewMemOp: NewLoad);
6192 return convertFromScalableVector(VT, V: NewLoad, DAG, Subtarget);
6193 }
6194
6195 MVT SplatVT = ContainerVT;
6196
6197 // f16 with zvfhmin and bf16 need to use an integer scalar load.
6198 if (SVT == MVT::bf16 ||
6199 (SVT == MVT::f16 && !Subtarget.hasStdExtZfh())) {
6200 SVT = MVT::i16;
6201 SplatVT = ContainerVT.changeVectorElementType(EltVT: SVT);
6202 }
6203
6204 // Otherwise use a scalar load and splat. This will give the best
6205 // opportunity to fold a splat into the operation. ISel can turn it into
6206 // the x0 strided load if we aren't able to fold away the select.
6207 if (SVT.isFloatingPoint())
6208 V = DAG.getLoad(VT: SVT, dl: DL, Chain: Ld->getChain(), Ptr: NewAddr,
6209 PtrInfo: Ld->getPointerInfo().getWithOffset(O: Offset),
6210 Alignment: Ld->getBaseAlign(), MMOFlags: Ld->getMemOperand()->getFlags());
6211 else
6212 V = DAG.getExtLoad(ExtType: ISD::EXTLOAD, dl: DL, VT: XLenVT, Chain: Ld->getChain(), Ptr: NewAddr,
6213 PtrInfo: Ld->getPointerInfo().getWithOffset(O: Offset), MemVT: SVT,
6214 Alignment: Ld->getBaseAlign(),
6215 MMOFlags: Ld->getMemOperand()->getFlags());
6216 DAG.makeEquivalentMemoryOrdering(OldLoad: Ld, NewMemOp: V);
6217
6218 unsigned Opc = SplatVT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
6219 : RISCVISD::VMV_V_X_VL;
6220 SDValue Splat =
6221 DAG.getNode(Opcode: Opc, DL, VT: SplatVT, N1: DAG.getUNDEF(VT: ContainerVT), N2: V, N3: VL);
6222 Splat = DAG.getBitcast(VT: ContainerVT, V: Splat);
6223 return convertFromScalableVector(VT, V: Splat, DAG, Subtarget);
6224 }
6225
6226 V1 = convertToScalableVector(VT: ContainerVT, V: V1, DAG, Subtarget);
6227 assert(Lane < (int)NumElts && "Unexpected lane!");
6228 SDValue Gather = DAG.getNode(Opcode: RISCVISD::VRGATHER_VX_VL, DL, VT: ContainerVT,
6229 N1: V1, N2: DAG.getConstant(Val: Lane, DL, VT: XLenVT),
6230 N3: DAG.getUNDEF(VT: ContainerVT), N4: TrueMask, N5: VL);
6231 return convertFromScalableVector(VT, V: Gather, DAG, Subtarget);
6232 }
6233 }
6234
6235 // For exact VLEN m2 or greater, try to split to m1 operations if we
6236 // can split cleanly.
6237 if (SDValue V = lowerShuffleViaVRegSplitting(SVN, DAG, Subtarget))
6238 return V;
6239
6240 ArrayRef<int> Mask = SVN->getMask();
6241
6242 if (SDValue V =
6243 lowerVECTOR_SHUFFLEAsVSlide1(DL, VT, V1, V2, Mask, Subtarget, DAG))
6244 return V;
6245
6246 if (SDValue V =
6247 lowerVECTOR_SHUFFLEAsVSlidedown(DL, VT, V1, V2, Mask, Subtarget, DAG))
6248 return V;
6249
6250 // A bitrotate will be one instruction on Zvkb, so try to lower to it first if
6251 // available.
6252 if (Subtarget.hasStdExtZvkb())
6253 if (SDValue V = lowerVECTOR_SHUFFLEAsRotate(SVN, DAG, Subtarget))
6254 return V;
6255
6256 if (ShuffleVectorInst::isReverseMask(Mask, NumSrcElts: NumElts) && V2.isUndef() &&
6257 NumElts != 2)
6258 return DAG.getNode(Opcode: ISD::VECTOR_REVERSE, DL, VT, Operand: V1);
6259
6260 // If this is a deinterleave(2,4,8) and we can widen the vector, then we can
6261 // use shift and truncate to perform the shuffle.
6262 // TODO: For Factor=6, we can perform the first step of the deinterleave via
6263 // shift-and-trunc reducing total cost for everything except an mf8 result.
6264 // TODO: For Factor=4,8, we can do the same when the ratio isn't high enough
6265 // to do the entire operation.
6266 if (VT.getScalarSizeInBits() < Subtarget.getELen()) {
6267 const unsigned MaxFactor = Subtarget.getELen() / VT.getScalarSizeInBits();
6268 assert(MaxFactor == 2 || MaxFactor == 4 || MaxFactor == 8);
6269 for (unsigned Factor = 2; Factor <= MaxFactor; Factor <<= 1) {
6270 unsigned Index = 0;
6271 if (ShuffleVectorInst::isDeInterleaveMaskOfFactor(Mask, Factor, Index) &&
6272 1 < count_if(Range&: Mask, P: [](int Idx) { return Idx != -1; })) {
6273 if (SDValue Src = getSingleShuffleSrc(VT, V1, V2))
6274 return getDeinterleaveShiftAndTrunc(DL, VT, Src, Factor, Index, DAG);
6275 if (1 < count_if(Range&: Mask,
6276 P: [&Mask](int Idx) { return Idx < (int)Mask.size(); }) &&
6277 1 < count_if(Range&: Mask, P: [&Mask](int Idx) {
6278 return Idx >= (int)Mask.size();
6279 })) {
6280 // Narrow each source and concatenate them.
6281 // FIXME: For small LMUL it is better to concatenate first.
6282 MVT EltVT = VT.getVectorElementType();
6283 auto EltCnt = VT.getVectorElementCount();
6284 MVT SubVT =
6285 MVT::getVectorVT(VT: EltVT, EC: EltCnt.divideCoefficientBy(RHS: Factor));
6286
6287 SDValue Lo =
6288 getDeinterleaveShiftAndTrunc(DL, VT: SubVT, Src: V1, Factor, Index, DAG);
6289 SDValue Hi =
6290 getDeinterleaveShiftAndTrunc(DL, VT: SubVT, Src: V2, Factor, Index, DAG);
6291
6292 SDValue Concat =
6293 DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL,
6294 VT: SubVT.getDoubleNumVectorElementsVT(), N1: Lo, N2: Hi);
6295 if (Factor == 2)
6296 return Concat;
6297
6298 SDValue Vec = DAG.getUNDEF(VT);
6299 return DAG.getInsertSubvector(DL, Vec, SubVec: Concat, Idx: 0);
6300 }
6301 }
6302 }
6303 }
6304
6305 // If this is a deinterleave(2), try using vunzip{a,b}. This mostly catches
6306 // e64 which can't match above.
6307 unsigned Index = 0;
6308 if (Subtarget.hasVendorXRivosVizip() &&
6309 ShuffleVectorInst::isDeInterleaveMaskOfFactor(Mask, Factor: 2, Index) &&
6310 1 < count_if(Range&: Mask, P: [](int Idx) { return Idx != -1; })) {
6311 unsigned Opc =
6312 Index == 0 ? RISCVISD::RI_VUNZIP2A_VL : RISCVISD::RI_VUNZIP2B_VL;
6313 if (V2.isUndef())
6314 return lowerVZIP(Opc, Op0: V1, Op1: V2, DL, DAG, Subtarget);
6315 if (auto VLEN = Subtarget.getRealVLen();
6316 VLEN && VT.getSizeInBits().getKnownMinValue() % *VLEN == 0)
6317 return lowerVZIP(Opc, Op0: V1, Op1: V2, DL, DAG, Subtarget);
6318 if (SDValue Src = foldConcatVector(V1, V2)) {
6319 EVT NewVT = VT.getDoubleNumVectorElementsVT();
6320 Src = DAG.getExtractSubvector(DL, VT: NewVT, Vec: Src, Idx: 0);
6321 SDValue Res =
6322 lowerVZIP(Opc, Op0: Src, Op1: DAG.getUNDEF(VT: NewVT), DL, DAG, Subtarget);
6323 return DAG.getExtractSubvector(DL, VT, Vec: Res, Idx: 0);
6324 }
6325 // Deinterleave each source and concatenate them, or concat first, then
6326 // deinterleave.
6327 if (1 < count_if(Range&: Mask,
6328 P: [&Mask](int Idx) { return Idx < (int)Mask.size(); }) &&
6329 1 < count_if(Range&: Mask,
6330 P: [&Mask](int Idx) { return Idx >= (int)Mask.size(); })) {
6331
6332 const unsigned EltSize = VT.getScalarSizeInBits();
6333 const unsigned MinVLMAX = Subtarget.getRealMinVLen() / EltSize;
6334 if (NumElts < MinVLMAX) {
6335 MVT ConcatVT = VT.getDoubleNumVectorElementsVT();
6336 SDValue Concat = DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: ConcatVT, N1: V1, N2: V2);
6337 SDValue Res =
6338 lowerVZIP(Opc, Op0: Concat, Op1: DAG.getUNDEF(VT: ConcatVT), DL, DAG, Subtarget);
6339 return DAG.getExtractSubvector(DL, VT, Vec: Res, Idx: 0);
6340 }
6341
6342 SDValue Lo = lowerVZIP(Opc, Op0: V1, Op1: DAG.getUNDEF(VT), DL, DAG, Subtarget);
6343 SDValue Hi = lowerVZIP(Opc, Op0: V2, Op1: DAG.getUNDEF(VT), DL, DAG, Subtarget);
6344
6345 MVT SubVT = VT.getHalfNumVectorElementsVT();
6346 return DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT,
6347 N1: DAG.getExtractSubvector(DL, VT: SubVT, Vec: Lo, Idx: 0),
6348 N2: DAG.getExtractSubvector(DL, VT: SubVT, Vec: Hi, Idx: 0));
6349 }
6350 }
6351
6352 if (SDValue V =
6353 lowerVECTOR_SHUFFLEAsVSlideup(DL, VT, V1, V2, Mask, Subtarget, DAG))
6354 return V;
6355
6356 // Detect an interleave shuffle and lower to
6357 // (vmaccu.vx (vwaddu.vx lohalf(V1), lohalf(V2)), lohalf(V2), (2^eltbits - 1))
6358 int EvenSrc, OddSrc;
6359 if (isInterleaveShuffle(Mask, VT, EvenSrc, OddSrc, Subtarget) &&
6360 !(NumElts == 2 &&
6361 ShuffleVectorInst::isSingleSourceMask(Mask, NumSrcElts: Mask.size()))) {
6362 // Extract the halves of the vectors.
6363 MVT HalfVT = VT.getHalfNumVectorElementsVT();
6364
6365 // Recognize if one half is actually undef; the matching above will
6366 // otherwise reuse the even stream for the undef one. This improves
6367 // spread(2) shuffles.
6368 bool LaneIsUndef[2] = { true, true};
6369 for (const auto &[Idx, M] : enumerate(First&: Mask))
6370 LaneIsUndef[Idx % 2] &= (M == -1);
6371
6372 int Size = Mask.size();
6373 SDValue EvenV, OddV;
6374 if (LaneIsUndef[0]) {
6375 EvenV = DAG.getUNDEF(VT: HalfVT);
6376 } else {
6377 assert(EvenSrc >= 0 && "Undef source?");
6378 EvenV = (EvenSrc / Size) == 0 ? V1 : V2;
6379 EvenV = DAG.getExtractSubvector(DL, VT: HalfVT, Vec: EvenV, Idx: EvenSrc % Size);
6380 }
6381
6382 if (LaneIsUndef[1]) {
6383 OddV = DAG.getUNDEF(VT: HalfVT);
6384 } else {
6385 assert(OddSrc >= 0 && "Undef source?");
6386 OddV = (OddSrc / Size) == 0 ? V1 : V2;
6387 OddV = DAG.getExtractSubvector(DL, VT: HalfVT, Vec: OddV, Idx: OddSrc % Size);
6388 }
6389
6390 // Prefer vzip2a if available.
6391 // TODO: Extend to matching zip2b if EvenSrc and OddSrc allow.
6392 if (Subtarget.hasVendorXRivosVizip()) {
6393 EvenV = DAG.getInsertSubvector(DL, Vec: DAG.getUNDEF(VT), SubVec: EvenV, Idx: 0);
6394 OddV = DAG.getInsertSubvector(DL, Vec: DAG.getUNDEF(VT), SubVec: OddV, Idx: 0);
6395 return lowerVZIP(Opc: RISCVISD::RI_VZIP2A_VL, Op0: EvenV, Op1: OddV, DL, DAG, Subtarget);
6396 }
6397 return getWideningInterleave(EvenV, OddV, DL, DAG, Subtarget);
6398 }
6399
6400 // Recognize a pattern which can handled via a pair of vslideup/vslidedown
6401 // instructions (in any combination) with masking on the second instruction.
6402 // Also handles masked slides into an identity source, and single slides
6403 // without masking. Avoid matching bit rotates (which are not also element
6404 // rotates) as slide pairs. This is a performance heuristic, not a
6405 // functional check.
6406 std::array<std::pair<int, int>, 2> SrcInfo;
6407 unsigned RotateAmt;
6408 MVT RotateVT;
6409 if (::isMaskedSlidePair(Mask, SrcInfo) &&
6410 (isElementRotate(SrcInfo, NumElts) ||
6411 !isLegalBitRotate(Mask, VT, Subtarget, RotateVT, RotateAmt))) {
6412 SDValue Sources[2];
6413 auto GetSourceFor = [&](const std::pair<int, int> &Info) {
6414 int SrcIdx = Info.first;
6415 assert(SrcIdx == 0 || SrcIdx == 1);
6416 SDValue &Src = Sources[SrcIdx];
6417 if (!Src) {
6418 SDValue SrcV = SrcIdx == 0 ? V1 : V2;
6419 Src = convertToScalableVector(VT: ContainerVT, V: SrcV, DAG, Subtarget);
6420 }
6421 return Src;
6422 };
6423 auto GetSlide = [&](const std::pair<int, int> &Src, SDValue Mask,
6424 SDValue Passthru) {
6425 auto [TrueMask, VL] = TrueMaskVL;
6426 SDValue SrcV = GetSourceFor(Src);
6427 int SlideAmt = Src.second;
6428 if (SlideAmt == 0) {
6429 // Should never be second operation
6430 assert(Mask == TrueMask);
6431 return SrcV;
6432 }
6433 if (SlideAmt < 0)
6434 return getVSlidedown(DAG, Subtarget, DL, VT: ContainerVT, Passthru, Op: SrcV,
6435 Offset: DAG.getConstant(Val: -SlideAmt, DL, VT: XLenVT), Mask, VL,
6436 Policy: RISCVVType::TAIL_AGNOSTIC);
6437 return getVSlideup(DAG, Subtarget, DL, VT: ContainerVT, Passthru, Op: SrcV,
6438 Offset: DAG.getConstant(Val: SlideAmt, DL, VT: XLenVT), Mask, VL,
6439 Policy: RISCVVType::TAIL_AGNOSTIC);
6440 };
6441
6442 if (SrcInfo[1].first == -1) {
6443 SDValue Res = DAG.getUNDEF(VT: ContainerVT);
6444 Res = GetSlide(SrcInfo[0], TrueMask, Res);
6445 return convertFromScalableVector(VT, V: Res, DAG, Subtarget);
6446 }
6447
6448 if (Subtarget.hasVendorXRivosVizip()) {
6449 bool TryWiden = false;
6450 unsigned Factor;
6451 if (isZipEven(SrcInfo, Mask, Factor)) {
6452 if (Factor == 1) {
6453 SDValue Src1 = SrcInfo[0].first == 0 ? V1 : V2;
6454 SDValue Src2 = SrcInfo[1].first == 0 ? V1 : V2;
6455 return lowerVZIP(Opc: RISCVISD::RI_VZIPEVEN_VL, Op0: Src1, Op1: Src2, DL, DAG,
6456 Subtarget);
6457 }
6458 TryWiden = true;
6459 }
6460 if (isZipOdd(SrcInfo, Mask, Factor)) {
6461 if (Factor == 1) {
6462 SDValue Src1 = SrcInfo[1].first == 0 ? V1 : V2;
6463 SDValue Src2 = SrcInfo[0].first == 0 ? V1 : V2;
6464 return lowerVZIP(Opc: RISCVISD::RI_VZIPODD_VL, Op0: Src1, Op1: Src2, DL, DAG,
6465 Subtarget);
6466 }
6467 TryWiden = true;
6468 }
6469 // If we found a widening oppurtunity which would let us form a
6470 // zipeven or zipodd, use the generic code to widen the shuffle
6471 // and recurse through this logic.
6472 if (TryWiden)
6473 if (SDValue V = tryWidenMaskForShuffle(Op, DAG))
6474 return V;
6475 }
6476
6477 // Build the mask. Note that vslideup unconditionally preserves elements
6478 // below the slide amount in the destination, and thus those elements are
6479 // undefined in the mask. If the mask ends up all true (or undef), it
6480 // will be folded away by general logic.
6481 SmallVector<SDValue> MaskVals;
6482 for (const auto &[Idx, M] : enumerate(First&: Mask)) {
6483 if (M < 0 ||
6484 (SrcInfo[1].second > 0 && Idx < (unsigned)SrcInfo[1].second)) {
6485 MaskVals.push_back(Elt: DAG.getUNDEF(VT: XLenVT));
6486 continue;
6487 }
6488 int Src = M >= (int)NumElts;
6489 int Diff = (int)Idx - (M % NumElts);
6490 bool C = Src == SrcInfo[1].first && Diff == SrcInfo[1].second;
6491 assert(C ^ (Src == SrcInfo[0].first && Diff == SrcInfo[0].second) &&
6492 "Must match exactly one of the two slides");
6493 MaskVals.push_back(Elt: DAG.getConstant(Val: C, DL, VT: XLenVT));
6494 }
6495 assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
6496 MVT MaskVT = MVT::getVectorVT(VT: MVT::i1, NumElements: NumElts);
6497 SDValue SelectMask = convertToScalableVector(
6498 VT: ContainerVT.changeVectorElementType(EltVT: MVT::i1),
6499 V: DAG.getBuildVector(VT: MaskVT, DL, Ops: MaskVals), DAG, Subtarget);
6500
6501 SDValue Res = DAG.getUNDEF(VT: ContainerVT);
6502 Res = GetSlide(SrcInfo[0], TrueMask, Res);
6503 Res = GetSlide(SrcInfo[1], SelectMask, Res);
6504 return convertFromScalableVector(VT, V: Res, DAG, Subtarget);
6505 }
6506
6507 // Handle any remaining single source shuffles
6508 assert(!V1.isUndef() && "Unexpected shuffle canonicalization");
6509 if (V2.isUndef()) {
6510 // We might be able to express the shuffle as a bitrotate. But even if we
6511 // don't have Zvkb and have to expand, the expanded sequence of approx. 2
6512 // shifts and a vor will have a higher throughput than a vrgather.
6513 if (SDValue V = lowerVECTOR_SHUFFLEAsRotate(SVN, DAG, Subtarget))
6514 return V;
6515
6516 if (SDValue V = lowerVECTOR_SHUFFLEAsVRGatherVX(SVN, Subtarget, DAG))
6517 return V;
6518
6519 // Match a spread(4,8) which can be done via extend and shift. Spread(2)
6520 // is fully covered in interleave(2) above, so it is ignored here.
6521 if (VT.getScalarSizeInBits() < Subtarget.getELen()) {
6522 unsigned MaxFactor = Subtarget.getELen() / VT.getScalarSizeInBits();
6523 assert(MaxFactor == 2 || MaxFactor == 4 || MaxFactor == 8);
6524 for (unsigned Factor = 4; Factor <= MaxFactor; Factor <<= 1) {
6525 unsigned Index;
6526 if (RISCVTargetLowering::isSpreadMask(Mask, Factor, Index)) {
6527 MVT NarrowVT =
6528 MVT::getVectorVT(VT: VT.getVectorElementType(), NumElements: NumElts / Factor);
6529 SDValue Src = DAG.getExtractSubvector(DL, VT: NarrowVT, Vec: V1, Idx: 0);
6530 return getWideningSpread(V: Src, Factor, Index, DL, DAG);
6531 }
6532 }
6533 }
6534
6535 // If only a prefix of the source elements influence a prefix of the
6536 // destination elements, try to see if we can reduce the required LMUL
6537 unsigned MinVLen = Subtarget.getRealMinVLen();
6538 unsigned MinVLMAX = MinVLen / VT.getScalarSizeInBits();
6539 if (NumElts > MinVLMAX) {
6540 unsigned MaxIdx = 0;
6541 for (auto [I, M] : enumerate(First&: Mask)) {
6542 if (M == -1)
6543 continue;
6544 MaxIdx = std::max(a: std::max(a: (unsigned)I, b: (unsigned)M), b: MaxIdx);
6545 }
6546 unsigned NewNumElts =
6547 std::max(a: (uint64_t)MinVLMAX, b: PowerOf2Ceil(A: MaxIdx + 1));
6548 if (NewNumElts != NumElts) {
6549 MVT NewVT = MVT::getVectorVT(VT: VT.getVectorElementType(), NumElements: NewNumElts);
6550 V1 = DAG.getExtractSubvector(DL, VT: NewVT, Vec: V1, Idx: 0);
6551 SDValue Res = DAG.getVectorShuffle(VT: NewVT, dl: DL, N1: V1, N2: DAG.getUNDEF(VT: NewVT),
6552 Mask: Mask.take_front(N: NewNumElts));
6553 return DAG.getInsertSubvector(DL, Vec: DAG.getUNDEF(VT), SubVec: Res, Idx: 0);
6554 }
6555 }
6556
6557 // Before hitting generic lowering fallbacks, try to widen the mask
6558 // to a wider SEW.
6559 if (SDValue V = tryWidenMaskForShuffle(Op, DAG))
6560 return V;
6561
6562 // Can we generate a vcompress instead of a vrgather? These scale better
6563 // at high LMUL, at the cost of not being able to fold a following select
6564 // into them. The mask constants are also smaller than the index vector
6565 // constants, and thus easier to materialize.
6566 if (isCompressMask(Mask)) {
6567 SmallVector<SDValue> MaskVals(NumElts,
6568 DAG.getConstant(Val: false, DL, VT: XLenVT));
6569 for (auto Idx : Mask) {
6570 if (Idx == -1)
6571 break;
6572 assert(Idx >= 0 && (unsigned)Idx < NumElts);
6573 MaskVals[Idx] = DAG.getConstant(Val: true, DL, VT: XLenVT);
6574 }
6575 MVT MaskVT = MVT::getVectorVT(VT: MVT::i1, NumElements: NumElts);
6576 SDValue CompressMask = DAG.getBuildVector(VT: MaskVT, DL, Ops: MaskVals);
6577 return DAG.getNode(Opcode: ISD::VECTOR_COMPRESS, DL, VT, N1: V1, N2: CompressMask,
6578 N3: DAG.getUNDEF(VT));
6579 }
6580
6581 if (VT.getScalarSizeInBits() == 8 &&
6582 any_of(Range&: Mask, P: [&](const auto &Idx) { return Idx > 255; })) {
6583 // On such a vector we're unable to use i8 as the index type.
6584 // FIXME: We could promote the index to i16 and use vrgatherei16, but that
6585 // may involve vector splitting if we're already at LMUL=8, or our
6586 // user-supplied maximum fixed-length LMUL.
6587 return SDValue();
6588 }
6589
6590 // Base case for the two operand recursion below - handle the worst case
6591 // single source shuffle.
6592 unsigned GatherVVOpc = RISCVISD::VRGATHER_VV_VL;
6593 MVT IndexVT = VT.changeTypeToInteger();
6594 // Since we can't introduce illegal index types at this stage, use i16 and
6595 // vrgatherei16 if the corresponding index type for plain vrgather is greater
6596 // than XLenVT.
6597 if (IndexVT.getScalarType().bitsGT(VT: XLenVT)) {
6598 GatherVVOpc = RISCVISD::VRGATHEREI16_VV_VL;
6599 IndexVT = IndexVT.changeVectorElementType(EltVT: MVT::i16);
6600 }
6601
6602 // If the mask allows, we can do all the index computation in 16 bits. This
6603 // requires less work and less register pressure at high LMUL, and creates
6604 // smaller constants which may be cheaper to materialize.
6605 if (IndexVT.getScalarType().bitsGT(VT: MVT::i16) && isUInt<16>(x: NumElts - 1) &&
6606 (IndexVT.getSizeInBits() / Subtarget.getRealMinVLen()) > 1) {
6607 GatherVVOpc = RISCVISD::VRGATHEREI16_VV_VL;
6608 IndexVT = IndexVT.changeVectorElementType(EltVT: MVT::i16);
6609 }
6610
6611 MVT IndexContainerVT =
6612 ContainerVT.changeVectorElementType(EltVT: IndexVT.getScalarType());
6613
6614 V1 = convertToScalableVector(VT: ContainerVT, V: V1, DAG, Subtarget);
6615 SmallVector<SDValue> GatherIndicesLHS;
6616 for (int MaskIndex : Mask) {
6617 bool IsLHSIndex = MaskIndex < (int)NumElts && MaskIndex >= 0;
6618 GatherIndicesLHS.push_back(Elt: IsLHSIndex
6619 ? DAG.getConstant(Val: MaskIndex, DL, VT: XLenVT)
6620 : DAG.getUNDEF(VT: XLenVT));
6621 }
6622 SDValue LHSIndices = DAG.getBuildVector(VT: IndexVT, DL, Ops: GatherIndicesLHS);
6623 LHSIndices =
6624 convertToScalableVector(VT: IndexContainerVT, V: LHSIndices, DAG, Subtarget);
6625 // At m1 and less, there's no point trying any of the high LMUL splitting
6626 // techniques. TODO: Should we reconsider this for DLEN < VLEN?
6627 if (NumElts <= MinVLMAX) {
6628 SDValue Gather = DAG.getNode(Opcode: GatherVVOpc, DL, VT: ContainerVT, N1: V1, N2: LHSIndices,
6629 N3: DAG.getUNDEF(VT: ContainerVT), N4: TrueMask, N5: VL);
6630 return convertFromScalableVector(VT, V: Gather, DAG, Subtarget);
6631 }
6632
6633 const MVT M1VT = RISCVTargetLowering::getM1VT(VT: ContainerVT);
6634 EVT SubIndexVT = M1VT.changeVectorElementType(EltVT: IndexVT.getScalarType());
6635 auto [InnerTrueMask, InnerVL] =
6636 getDefaultScalableVLOps(VecVT: M1VT, DL, DAG, Subtarget);
6637 int N =
6638 ContainerVT.getVectorMinNumElements() / M1VT.getVectorMinNumElements();
6639 assert(isPowerOf2_32(N) && N <= 8);
6640
6641 // If we have a locally repeating mask, then we can reuse the first
6642 // register in the index register group for all registers within the
6643 // source register group. TODO: This generalizes to m2, and m4.
6644 if (isLocalRepeatingShuffle(Mask, Span: MinVLMAX)) {
6645 SDValue SubIndex = DAG.getExtractSubvector(DL, VT: SubIndexVT, Vec: LHSIndices, Idx: 0);
6646 SDValue Gather = DAG.getUNDEF(VT: ContainerVT);
6647 for (int i = 0; i < N; i++) {
6648 unsigned SubIdx = M1VT.getVectorMinNumElements() * i;
6649 SDValue SubV1 = DAG.getExtractSubvector(DL, VT: M1VT, Vec: V1, Idx: SubIdx);
6650 SDValue SubVec =
6651 DAG.getNode(Opcode: GatherVVOpc, DL, VT: M1VT, N1: SubV1, N2: SubIndex,
6652 N3: DAG.getUNDEF(VT: M1VT), N4: InnerTrueMask, N5: InnerVL);
6653 Gather = DAG.getInsertSubvector(DL, Vec: Gather, SubVec, Idx: SubIdx);
6654 }
6655 return convertFromScalableVector(VT, V: Gather, DAG, Subtarget);
6656 }
6657
6658 // If we have a shuffle which only uses the first register in our source
6659 // register group, and repeats the same index across all spans, we can
6660 // use a single vrgather (and possibly some register moves).
6661 // TODO: This can be generalized for m2 or m4, or for any shuffle for
6662 // which we can do a linear number of shuffles to form an m1 which
6663 // contains all the output elements.
6664 if (isLowSourceShuffle(Mask, Span: MinVLMAX) &&
6665 isSpanSplatShuffle(Mask, Span: MinVLMAX)) {
6666 SDValue SubV1 = DAG.getExtractSubvector(DL, VT: M1VT, Vec: V1, Idx: 0);
6667 SDValue SubIndex = DAG.getExtractSubvector(DL, VT: SubIndexVT, Vec: LHSIndices, Idx: 0);
6668 SDValue SubVec = DAG.getNode(Opcode: GatherVVOpc, DL, VT: M1VT, N1: SubV1, N2: SubIndex,
6669 N3: DAG.getUNDEF(VT: M1VT), N4: InnerTrueMask, N5: InnerVL);
6670 SDValue Gather = DAG.getUNDEF(VT: ContainerVT);
6671 for (int i = 0; i < N; i++)
6672 Gather = DAG.getInsertSubvector(DL, Vec: Gather, SubVec,
6673 Idx: M1VT.getVectorMinNumElements() * i);
6674 return convertFromScalableVector(VT, V: Gather, DAG, Subtarget);
6675 }
6676
6677 // If we have a shuffle which only uses the first register in our
6678 // source register group, we can do a linear number of m1 vrgathers
6679 // reusing the same source register (but with different indices)
6680 // TODO: This can be generalized for m2 or m4, or for any shuffle
6681 // for which we can do a vslidedown followed by this expansion.
6682 if (isLowSourceShuffle(Mask, Span: MinVLMAX)) {
6683 SDValue SlideAmt =
6684 DAG.getElementCount(DL, VT: XLenVT, EC: M1VT.getVectorElementCount());
6685 SDValue SubV1 = DAG.getExtractSubvector(DL, VT: M1VT, Vec: V1, Idx: 0);
6686 SDValue Gather = DAG.getUNDEF(VT: ContainerVT);
6687 for (int i = 0; i < N; i++) {
6688 if (i != 0)
6689 LHSIndices = getVSlidedown(DAG, Subtarget, DL, VT: IndexContainerVT,
6690 Passthru: DAG.getUNDEF(VT: IndexContainerVT), Op: LHSIndices,
6691 Offset: SlideAmt, Mask: TrueMask, VL);
6692 SDValue SubIndex =
6693 DAG.getExtractSubvector(DL, VT: SubIndexVT, Vec: LHSIndices, Idx: 0);
6694 SDValue SubVec =
6695 DAG.getNode(Opcode: GatherVVOpc, DL, VT: M1VT, N1: SubV1, N2: SubIndex,
6696 N3: DAG.getUNDEF(VT: M1VT), N4: InnerTrueMask, N5: InnerVL);
6697 Gather = DAG.getInsertSubvector(DL, Vec: Gather, SubVec,
6698 Idx: M1VT.getVectorMinNumElements() * i);
6699 }
6700 return convertFromScalableVector(VT, V: Gather, DAG, Subtarget);
6701 }
6702
6703 // Fallback to generic vrgather if we can't find anything better.
6704 // On many machines, this will be O(LMUL^2)
6705 SDValue Gather = DAG.getNode(Opcode: GatherVVOpc, DL, VT: ContainerVT, N1: V1, N2: LHSIndices,
6706 N3: DAG.getUNDEF(VT: ContainerVT), N4: TrueMask, N5: VL);
6707 return convertFromScalableVector(VT, V: Gather, DAG, Subtarget);
6708 }
6709
6710 // As a backup, shuffles can be lowered via a vrgather instruction, possibly
6711 // merged with a second vrgather.
6712 SmallVector<int> ShuffleMaskLHS, ShuffleMaskRHS;
6713
6714 // Now construct the mask that will be used by the blended vrgather operation.
6715 // Construct the appropriate indices into each vector.
6716 for (int MaskIndex : Mask) {
6717 bool IsLHSOrUndefIndex = MaskIndex < (int)NumElts;
6718 ShuffleMaskLHS.push_back(Elt: IsLHSOrUndefIndex && MaskIndex >= 0
6719 ? MaskIndex : -1);
6720 ShuffleMaskRHS.push_back(Elt: IsLHSOrUndefIndex ? -1 : (MaskIndex - NumElts));
6721 }
6722
6723 // If the mask indices are disjoint between the two sources, we can lower it
6724 // as a vselect + a single source vrgather.vv. Don't do this if we think the
6725 // operands may end up being lowered to something cheaper than a vrgather.vv.
6726 if (!DAG.isSplatValue(V: V2) && !DAG.isSplatValue(V: V1) &&
6727 !ShuffleVectorSDNode::isSplatMask(Mask: ShuffleMaskLHS) &&
6728 !ShuffleVectorSDNode::isSplatMask(Mask: ShuffleMaskRHS) &&
6729 !ShuffleVectorInst::isIdentityMask(Mask: ShuffleMaskLHS, NumSrcElts: NumElts) &&
6730 !ShuffleVectorInst::isIdentityMask(Mask: ShuffleMaskRHS, NumSrcElts: NumElts))
6731 if (SDValue V = lowerDisjointIndicesShuffle(SVN, DAG, Subtarget))
6732 return V;
6733
6734 // Before hitting generic lowering fallbacks, try to widen the mask
6735 // to a wider SEW.
6736 if (SDValue V = tryWidenMaskForShuffle(Op, DAG))
6737 return V;
6738
6739 // Try to pick a profitable operand order.
6740 bool SwapOps = DAG.isSplatValue(V: V2) && !DAG.isSplatValue(V: V1);
6741 SwapOps = SwapOps ^ ShuffleVectorInst::isIdentityMask(Mask: ShuffleMaskRHS, NumSrcElts: NumElts);
6742
6743 // Recursively invoke lowering for each operand if we had two
6744 // independent single source shuffles, and then combine the result via a
6745 // vselect. Note that the vselect will likely be folded back into the
6746 // second permute (vrgather, or other) by the post-isel combine.
6747 V1 = DAG.getVectorShuffle(VT, dl: DL, N1: V1, N2: DAG.getUNDEF(VT), Mask: ShuffleMaskLHS);
6748 V2 = DAG.getVectorShuffle(VT, dl: DL, N1: V2, N2: DAG.getUNDEF(VT), Mask: ShuffleMaskRHS);
6749
6750 SmallVector<SDValue> MaskVals;
6751 for (int MaskIndex : Mask) {
6752 bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ !SwapOps;
6753 MaskVals.push_back(Elt: DAG.getConstant(Val: SelectMaskVal, DL, VT: XLenVT));
6754 }
6755
6756 assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
6757 MVT MaskVT = MVT::getVectorVT(VT: MVT::i1, NumElements: NumElts);
6758 SDValue SelectMask = DAG.getBuildVector(VT: MaskVT, DL, Ops: MaskVals);
6759
6760 if (SwapOps)
6761 return DAG.getNode(Opcode: ISD::VSELECT, DL, VT, N1: SelectMask, N2: V1, N3: V2);
6762 return DAG.getNode(Opcode: ISD::VSELECT, DL, VT, N1: SelectMask, N2: V2, N3: V1);
6763}
6764
6765bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
6766 // Only support legal VTs for other shuffles for now.
6767 if (!isTypeLegal(VT) || !Subtarget.hasVInstructions())
6768 return false;
6769
6770 // Support splats for any type. These should type legalize well.
6771 if (ShuffleVectorSDNode::isSplatMask(Mask: M))
6772 return true;
6773
6774 const unsigned NumElts = M.size();
6775 MVT SVT = VT.getSimpleVT();
6776
6777 // Not for i1 vectors.
6778 if (SVT.getScalarType() == MVT::i1)
6779 return false;
6780
6781 std::array<std::pair<int, int>, 2> SrcInfo;
6782 int Dummy1, Dummy2;
6783 return ShuffleVectorInst::isReverseMask(Mask: M, NumSrcElts: NumElts) ||
6784 (::isMaskedSlidePair(Mask: M, SrcInfo) &&
6785 isElementRotate(SrcInfo, NumElts)) ||
6786 isInterleaveShuffle(Mask: M, VT: SVT, EvenSrc&: Dummy1, OddSrc&: Dummy2, Subtarget);
6787}
6788
6789// Lower CTLZ_ZERO_UNDEF or CTTZ_ZERO_UNDEF by converting to FP and extracting
6790// the exponent.
6791SDValue
6792RISCVTargetLowering::lowerCTLZ_CTTZ_ZERO_UNDEF(SDValue Op,
6793 SelectionDAG &DAG) const {
6794 MVT VT = Op.getSimpleValueType();
6795 unsigned EltSize = VT.getScalarSizeInBits();
6796 SDValue Src = Op.getOperand(i: 0);
6797 SDLoc DL(Op);
6798 MVT ContainerVT = VT;
6799
6800 SDValue Mask, VL;
6801 if (Op->isVPOpcode()) {
6802 Mask = Op.getOperand(i: 1);
6803 if (VT.isFixedLengthVector())
6804 Mask = convertToScalableVector(VT: getMaskTypeFor(VecVT: ContainerVT), V: Mask, DAG,
6805 Subtarget);
6806 VL = Op.getOperand(i: 2);
6807 }
6808
6809 // We choose FP type that can represent the value if possible. Otherwise, we
6810 // use rounding to zero conversion for correct exponent of the result.
6811 // TODO: Use f16 for i8 when possible?
6812 MVT FloatEltVT = (EltSize >= 32) ? MVT::f64 : MVT::f32;
6813 if (!isTypeLegal(VT: MVT::getVectorVT(VT: FloatEltVT, EC: VT.getVectorElementCount())))
6814 FloatEltVT = MVT::f32;
6815 MVT FloatVT = MVT::getVectorVT(VT: FloatEltVT, EC: VT.getVectorElementCount());
6816
6817 // Legal types should have been checked in the RISCVTargetLowering
6818 // constructor.
6819 // TODO: Splitting may make sense in some cases.
6820 assert(DAG.getTargetLoweringInfo().isTypeLegal(FloatVT) &&
6821 "Expected legal float type!");
6822
6823 // For CTTZ_ZERO_UNDEF, we need to extract the lowest set bit using X & -X.
6824 // The trailing zero count is equal to log2 of this single bit value.
6825 if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF) {
6826 SDValue Neg = DAG.getNegative(Val: Src, DL, VT);
6827 Src = DAG.getNode(Opcode: ISD::AND, DL, VT, N1: Src, N2: Neg);
6828 } else if (Op.getOpcode() == ISD::VP_CTTZ_ZERO_UNDEF) {
6829 SDValue Neg = DAG.getNode(Opcode: ISD::VP_SUB, DL, VT, N1: DAG.getConstant(Val: 0, DL, VT),
6830 N2: Src, N3: Mask, N4: VL);
6831 Src = DAG.getNode(Opcode: ISD::VP_AND, DL, VT, N1: Src, N2: Neg, N3: Mask, N4: VL);
6832 }
6833
6834 // We have a legal FP type, convert to it.
6835 SDValue FloatVal;
6836 if (FloatVT.bitsGT(VT)) {
6837 if (Op->isVPOpcode())
6838 FloatVal = DAG.getNode(Opcode: ISD::VP_UINT_TO_FP, DL, VT: FloatVT, N1: Src, N2: Mask, N3: VL);
6839 else
6840 FloatVal = DAG.getNode(Opcode: ISD::UINT_TO_FP, DL, VT: FloatVT, Operand: Src);
6841 } else {
6842 // Use RTZ to avoid rounding influencing exponent of FloatVal.
6843 if (VT.isFixedLengthVector()) {
6844 ContainerVT = getContainerForFixedLengthVector(VT);
6845 Src = convertToScalableVector(VT: ContainerVT, V: Src, DAG, Subtarget);
6846 }
6847 if (!Op->isVPOpcode())
6848 std::tie(args&: Mask, args&: VL) = getDefaultVLOps(VecVT: VT, ContainerVT, DL, DAG, Subtarget);
6849 SDValue RTZRM =
6850 DAG.getTargetConstant(Val: RISCVFPRndMode::RTZ, DL, VT: Subtarget.getXLenVT());
6851 MVT ContainerFloatVT =
6852 MVT::getVectorVT(VT: FloatEltVT, EC: ContainerVT.getVectorElementCount());
6853 FloatVal = DAG.getNode(Opcode: RISCVISD::VFCVT_RM_F_XU_VL, DL, VT: ContainerFloatVT,
6854 N1: Src, N2: Mask, N3: RTZRM, N4: VL);
6855 if (VT.isFixedLengthVector())
6856 FloatVal = convertFromScalableVector(VT: FloatVT, V: FloatVal, DAG, Subtarget);
6857 }
6858 // Bitcast to integer and shift the exponent to the LSB.
6859 EVT IntVT = FloatVT.changeVectorElementTypeToInteger();
6860 SDValue Bitcast = DAG.getBitcast(VT: IntVT, V: FloatVal);
6861 unsigned ShiftAmt = FloatEltVT == MVT::f64 ? 52 : 23;
6862
6863 SDValue Exp;
6864 // Restore back to original type. Truncation after SRL is to generate vnsrl.
6865 if (Op->isVPOpcode()) {
6866 Exp = DAG.getNode(Opcode: ISD::VP_SRL, DL, VT: IntVT, N1: Bitcast,
6867 N2: DAG.getConstant(Val: ShiftAmt, DL, VT: IntVT), N3: Mask, N4: VL);
6868 Exp = DAG.getVPZExtOrTrunc(DL, VT, Op: Exp, Mask, EVL: VL);
6869 } else {
6870 Exp = DAG.getNode(Opcode: ISD::SRL, DL, VT: IntVT, N1: Bitcast,
6871 N2: DAG.getConstant(Val: ShiftAmt, DL, VT: IntVT));
6872 if (IntVT.bitsLT(VT))
6873 Exp = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL, VT, Operand: Exp);
6874 else if (IntVT.bitsGT(VT))
6875 Exp = DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT, Operand: Exp);
6876 }
6877
6878 // The exponent contains log2 of the value in biased form.
6879 unsigned ExponentBias = FloatEltVT == MVT::f64 ? 1023 : 127;
6880 // For trailing zeros, we just need to subtract the bias.
6881 if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF)
6882 return DAG.getNode(Opcode: ISD::SUB, DL, VT, N1: Exp,
6883 N2: DAG.getConstant(Val: ExponentBias, DL, VT));
6884 if (Op.getOpcode() == ISD::VP_CTTZ_ZERO_UNDEF)
6885 return DAG.getNode(Opcode: ISD::VP_SUB, DL, VT, N1: Exp,
6886 N2: DAG.getConstant(Val: ExponentBias, DL, VT), N3: Mask, N4: VL);
6887
6888 // For leading zeros, we need to remove the bias and convert from log2 to
6889 // leading zeros. We can do this by subtracting from (Bias + (EltSize - 1)).
6890 unsigned Adjust = ExponentBias + (EltSize - 1);
6891 SDValue Res;
6892 if (Op->isVPOpcode())
6893 Res = DAG.getNode(Opcode: ISD::VP_SUB, DL, VT, N1: DAG.getConstant(Val: Adjust, DL, VT), N2: Exp,
6894 N3: Mask, N4: VL);
6895 else
6896 Res = DAG.getNode(Opcode: ISD::SUB, DL, VT, N1: DAG.getConstant(Val: Adjust, DL, VT), N2: Exp);
6897
6898 // The above result with zero input equals to Adjust which is greater than
6899 // EltSize. Hence, we can do min(Res, EltSize) for CTLZ.
6900 if (Op.getOpcode() == ISD::CTLZ)
6901 Res = DAG.getNode(Opcode: ISD::UMIN, DL, VT, N1: Res, N2: DAG.getConstant(Val: EltSize, DL, VT));
6902 else if (Op.getOpcode() == ISD::VP_CTLZ)
6903 Res = DAG.getNode(Opcode: ISD::VP_UMIN, DL, VT, N1: Res,
6904 N2: DAG.getConstant(Val: EltSize, DL, VT), N3: Mask, N4: VL);
6905 return Res;
6906}
6907
6908SDValue RISCVTargetLowering::lowerVPCttzElements(SDValue Op,
6909 SelectionDAG &DAG) const {
6910 SDLoc DL(Op);
6911 MVT XLenVT = Subtarget.getXLenVT();
6912 SDValue Source = Op->getOperand(Num: 0);
6913 MVT SrcVT = Source.getSimpleValueType();
6914 SDValue Mask = Op->getOperand(Num: 1);
6915 SDValue EVL = Op->getOperand(Num: 2);
6916
6917 if (SrcVT.isFixedLengthVector()) {
6918 MVT ContainerVT = getContainerForFixedLengthVector(VT: SrcVT);
6919 Source = convertToScalableVector(VT: ContainerVT, V: Source, DAG, Subtarget);
6920 Mask = convertToScalableVector(VT: getMaskTypeFor(VecVT: ContainerVT), V: Mask, DAG,
6921 Subtarget);
6922 SrcVT = ContainerVT;
6923 }
6924
6925 // Convert to boolean vector.
6926 if (SrcVT.getScalarType() != MVT::i1) {
6927 SDValue AllZero = DAG.getConstant(Val: 0, DL, VT: SrcVT);
6928 SrcVT = MVT::getVectorVT(VT: MVT::i1, EC: SrcVT.getVectorElementCount());
6929 Source = DAG.getNode(Opcode: RISCVISD::SETCC_VL, DL, VT: SrcVT,
6930 Ops: {Source, AllZero, DAG.getCondCode(Cond: ISD::SETNE),
6931 DAG.getUNDEF(VT: SrcVT), Mask, EVL});
6932 }
6933
6934 SDValue Res = DAG.getNode(Opcode: RISCVISD::VFIRST_VL, DL, VT: XLenVT, N1: Source, N2: Mask, N3: EVL);
6935 if (Op->getOpcode() == ISD::VP_CTTZ_ELTS_ZERO_UNDEF)
6936 // In this case, we can interpret poison as -1, so nothing to do further.
6937 return Res;
6938
6939 // Convert -1 to VL.
6940 SDValue SetCC =
6941 DAG.getSetCC(DL, VT: XLenVT, LHS: Res, RHS: DAG.getConstant(Val: 0, DL, VT: XLenVT), Cond: ISD::SETLT);
6942 Res = DAG.getSelect(DL, VT: XLenVT, Cond: SetCC, LHS: EVL, RHS: Res);
6943 return DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: Op.getValueType(), Operand: Res);
6944}
6945
6946// While RVV has alignment restrictions, we should always be able to load as a
6947// legal equivalently-sized byte-typed vector instead. This method is
6948// responsible for re-expressing a ISD::LOAD via a correctly-aligned type. If
6949// the load is already correctly-aligned, it returns SDValue().
6950SDValue RISCVTargetLowering::expandUnalignedRVVLoad(SDValue Op,
6951 SelectionDAG &DAG) const {
6952 auto *Load = cast<LoadSDNode>(Val&: Op);
6953 assert(Load && Load->getMemoryVT().isVector() && "Expected vector load");
6954
6955 if (allowsMemoryAccessForAlignment(Context&: *DAG.getContext(), DL: DAG.getDataLayout(),
6956 VT: Load->getMemoryVT(),
6957 MMO: *Load->getMemOperand()))
6958 return SDValue();
6959
6960 SDLoc DL(Op);
6961 MVT VT = Op.getSimpleValueType();
6962 unsigned EltSizeBits = VT.getScalarSizeInBits();
6963 assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
6964 "Unexpected unaligned RVV load type");
6965 MVT NewVT =
6966 MVT::getVectorVT(VT: MVT::i8, EC: VT.getVectorElementCount() * (EltSizeBits / 8));
6967 assert(NewVT.isValid() &&
6968 "Expecting equally-sized RVV vector types to be legal");
6969 SDValue L = DAG.getLoad(VT: NewVT, dl: DL, Chain: Load->getChain(), Ptr: Load->getBasePtr(),
6970 PtrInfo: Load->getPointerInfo(), Alignment: Load->getBaseAlign(),
6971 MMOFlags: Load->getMemOperand()->getFlags());
6972 return DAG.getMergeValues(Ops: {DAG.getBitcast(VT, V: L), L.getValue(R: 1)}, dl: DL);
6973}
6974
6975// While RVV has alignment restrictions, we should always be able to store as a
6976// legal equivalently-sized byte-typed vector instead. This method is
6977// responsible for re-expressing a ISD::STORE via a correctly-aligned type. It
6978// returns SDValue() if the store is already correctly aligned.
6979SDValue RISCVTargetLowering::expandUnalignedRVVStore(SDValue Op,
6980 SelectionDAG &DAG) const {
6981 auto *Store = cast<StoreSDNode>(Val&: Op);
6982 assert(Store && Store->getValue().getValueType().isVector() &&
6983 "Expected vector store");
6984
6985 if (allowsMemoryAccessForAlignment(Context&: *DAG.getContext(), DL: DAG.getDataLayout(),
6986 VT: Store->getMemoryVT(),
6987 MMO: *Store->getMemOperand()))
6988 return SDValue();
6989
6990 SDLoc DL(Op);
6991 SDValue StoredVal = Store->getValue();
6992 MVT VT = StoredVal.getSimpleValueType();
6993 unsigned EltSizeBits = VT.getScalarSizeInBits();
6994 assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
6995 "Unexpected unaligned RVV store type");
6996 MVT NewVT =
6997 MVT::getVectorVT(VT: MVT::i8, EC: VT.getVectorElementCount() * (EltSizeBits / 8));
6998 assert(NewVT.isValid() &&
6999 "Expecting equally-sized RVV vector types to be legal");
7000 StoredVal = DAG.getBitcast(VT: NewVT, V: StoredVal);
7001 return DAG.getStore(Chain: Store->getChain(), dl: DL, Val: StoredVal, Ptr: Store->getBasePtr(),
7002 PtrInfo: Store->getPointerInfo(), Alignment: Store->getBaseAlign(),
7003 MMOFlags: Store->getMemOperand()->getFlags());
7004}
7005
7006// While RVV has alignment restrictions, we should always be able to load as a
7007// legal equivalently-sized byte-typed vector instead. This method is
7008// responsible for re-expressing a ISD::VP_LOAD via a correctly-aligned type. If
7009// the load is already correctly-aligned, it returns SDValue().
7010SDValue RISCVTargetLowering::expandUnalignedVPLoad(SDValue Op,
7011 SelectionDAG &DAG) const {
7012 auto *Load = cast<VPLoadSDNode>(Val&: Op);
7013 assert(Load && Load->getMemoryVT().isVector() && "Expected vector load");
7014
7015 if (allowsMemoryAccessForAlignment(Context&: *DAG.getContext(), DL: DAG.getDataLayout(),
7016 VT: Load->getMemoryVT(),
7017 MMO: *Load->getMemOperand()))
7018 return SDValue();
7019
7020 SDValue Mask = Load->getMask();
7021
7022 // FIXME: Handled masked loads somehow.
7023 if (!ISD::isConstantSplatVectorAllOnes(N: Mask.getNode()))
7024 return SDValue();
7025
7026 SDLoc DL(Op);
7027 MVT VT = Op.getSimpleValueType();
7028 unsigned EltSizeBits = VT.getScalarSizeInBits();
7029 assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
7030 "Unexpected unaligned RVV load type");
7031 MVT NewVT =
7032 MVT::getVectorVT(VT: MVT::i8, EC: VT.getVectorElementCount() * (EltSizeBits / 8));
7033 assert(NewVT.isValid() &&
7034 "Expecting equally-sized RVV vector types to be legal");
7035
7036 SDValue VL = Load->getVectorLength();
7037 VL = DAG.getNode(Opcode: ISD::MUL, DL, VT: VL.getValueType(), N1: VL,
7038 N2: DAG.getConstant(Val: (EltSizeBits / 8), DL, VT: VL.getValueType()));
7039
7040 MVT MaskVT = MVT::getVectorVT(VT: MVT::i1, EC: NewVT.getVectorElementCount());
7041 SDValue L = DAG.getLoadVP(VT: NewVT, dl: DL, Chain: Load->getChain(), Ptr: Load->getBasePtr(),
7042 Mask: DAG.getAllOnesConstant(DL, VT: MaskVT), EVL: VL,
7043 PtrInfo: Load->getPointerInfo(), Alignment: Load->getBaseAlign(),
7044 MMOFlags: Load->getMemOperand()->getFlags(), AAInfo: AAMDNodes());
7045 return DAG.getMergeValues(Ops: {DAG.getBitcast(VT, V: L), L.getValue(R: 1)}, dl: DL);
7046}
7047
7048// While RVV has alignment restrictions, we should always be able to store as a
7049// legal equivalently-sized byte-typed vector instead. This method is
7050// responsible for re-expressing a ISD::VP STORE via a correctly-aligned type.
7051// It returns SDValue() if the store is already correctly aligned.
7052SDValue RISCVTargetLowering::expandUnalignedVPStore(SDValue Op,
7053 SelectionDAG &DAG) const {
7054 auto *Store = cast<VPStoreSDNode>(Val&: Op);
7055 assert(Store && Store->getValue().getValueType().isVector() &&
7056 "Expected vector store");
7057
7058 if (allowsMemoryAccessForAlignment(Context&: *DAG.getContext(), DL: DAG.getDataLayout(),
7059 VT: Store->getMemoryVT(),
7060 MMO: *Store->getMemOperand()))
7061 return SDValue();
7062
7063 SDValue Mask = Store->getMask();
7064
7065 // FIXME: Handled masked stores somehow.
7066 if (!ISD::isConstantSplatVectorAllOnes(N: Mask.getNode()))
7067 return SDValue();
7068
7069 SDLoc DL(Op);
7070 SDValue StoredVal = Store->getValue();
7071 MVT VT = StoredVal.getSimpleValueType();
7072 unsigned EltSizeBits = VT.getScalarSizeInBits();
7073 assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
7074 "Unexpected unaligned RVV store type");
7075 MVT NewVT =
7076 MVT::getVectorVT(VT: MVT::i8, EC: VT.getVectorElementCount() * (EltSizeBits / 8));
7077 assert(NewVT.isValid() &&
7078 "Expecting equally-sized RVV vector types to be legal");
7079
7080 SDValue VL = Store->getVectorLength();
7081 VL = DAG.getNode(Opcode: ISD::MUL, DL, VT: VL.getValueType(), N1: VL,
7082 N2: DAG.getConstant(Val: (EltSizeBits / 8), DL, VT: VL.getValueType()));
7083
7084 StoredVal = DAG.getBitcast(VT: NewVT, V: StoredVal);
7085
7086 LocationSize Size = LocationSize::precise(Value: NewVT.getStoreSize());
7087 MachineFunction &MF = DAG.getMachineFunction();
7088 MachineMemOperand *MMO = MF.getMachineMemOperand(
7089 PtrInfo: Store->getPointerInfo(), F: Store->getMemOperand()->getFlags(), Size,
7090 BaseAlignment: Store->getBaseAlign());
7091
7092 MVT MaskVT = MVT::getVectorVT(VT: MVT::i1, EC: NewVT.getVectorElementCount());
7093 return DAG.getStoreVP(Chain: Store->getChain(), dl: DL, Val: StoredVal, Ptr: Store->getBasePtr(),
7094 Offset: DAG.getUNDEF(VT: Store->getBasePtr().getValueType()),
7095 Mask: DAG.getAllOnesConstant(DL, VT: MaskVT), EVL: VL, MemVT: NewVT, MMO,
7096 AM: ISD::UNINDEXED);
7097}
7098
7099static SDValue lowerConstant(SDValue Op, SelectionDAG &DAG,
7100 const RISCVSubtarget &Subtarget) {
7101 assert(Op.getValueType() == MVT::i64 && "Unexpected VT");
7102
7103 int64_t Imm = cast<ConstantSDNode>(Val&: Op)->getSExtValue();
7104
7105 // All simm32 constants should be handled by isel.
7106 // NOTE: The getMaxBuildIntsCost call below should return a value >= 2 making
7107 // this check redundant, but small immediates are common so this check
7108 // should have better compile time.
7109 if (isInt<32>(x: Imm))
7110 return Op;
7111
7112 // We only need to cost the immediate, if constant pool lowering is enabled.
7113 if (!Subtarget.useConstantPoolForLargeInts())
7114 return Op;
7115
7116 RISCVMatInt::InstSeq Seq = RISCVMatInt::generateInstSeq(Val: Imm, STI: Subtarget);
7117 if (Seq.size() <= Subtarget.getMaxBuildIntsCost())
7118 return Op;
7119
7120 // Optimizations below are disabled for opt size. If we're optimizing for
7121 // size, use a constant pool.
7122 if (DAG.shouldOptForSize())
7123 return SDValue();
7124
7125 // Special case. See if we can build the constant as (ADD (SLLI X, C), X) do
7126 // that if it will avoid a constant pool.
7127 // It will require an extra temporary register though.
7128 // If we have Zba we can use (ADD_UW X, (SLLI X, 32)) to handle cases where
7129 // low and high 32 bits are the same and bit 31 and 63 are set.
7130 unsigned ShiftAmt, AddOpc;
7131 RISCVMatInt::InstSeq SeqLo =
7132 RISCVMatInt::generateTwoRegInstSeq(Val: Imm, STI: Subtarget, ShiftAmt, AddOpc);
7133 if (!SeqLo.empty() && (SeqLo.size() + 2) <= Subtarget.getMaxBuildIntsCost())
7134 return Op;
7135
7136 return SDValue();
7137}
7138
7139SDValue RISCVTargetLowering::lowerConstantFP(SDValue Op,
7140 SelectionDAG &DAG) const {
7141 MVT VT = Op.getSimpleValueType();
7142 const APFloat &Imm = cast<ConstantFPSDNode>(Val&: Op)->getValueAPF();
7143
7144 // Can this constant be selected by a Zfa FLI instruction?
7145 bool Negate = false;
7146 int Index = getLegalZfaFPImm(Imm, VT);
7147
7148 // If the constant is negative, try negating.
7149 if (Index < 0 && Imm.isNegative()) {
7150 Index = getLegalZfaFPImm(Imm: -Imm, VT);
7151 Negate = true;
7152 }
7153
7154 // If we couldn't find a FLI lowering, fall back to generic code.
7155 if (Index < 0)
7156 return SDValue();
7157
7158 // Emit an FLI+FNEG. We use a custom node to hide from constant folding.
7159 SDLoc DL(Op);
7160 SDValue Const =
7161 DAG.getNode(Opcode: RISCVISD::FLI, DL, VT,
7162 Operand: DAG.getTargetConstant(Val: Index, DL, VT: Subtarget.getXLenVT()));
7163 if (!Negate)
7164 return Const;
7165
7166 return DAG.getNode(Opcode: ISD::FNEG, DL, VT, Operand: Const);
7167}
7168
7169static SDValue LowerPREFETCH(SDValue Op, const RISCVSubtarget &Subtarget,
7170 SelectionDAG &DAG) {
7171
7172 unsigned IsData = Op.getConstantOperandVal(i: 4);
7173
7174 // mips-p8700 we support data prefetch for now.
7175 if (Subtarget.hasVendorXMIPSCBOP() && !IsData)
7176 return Op.getOperand(i: 0);
7177 return Op;
7178}
7179
7180static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG,
7181 const RISCVSubtarget &Subtarget) {
7182 SDLoc dl(Op);
7183 AtomicOrdering FenceOrdering =
7184 static_cast<AtomicOrdering>(Op.getConstantOperandVal(i: 1));
7185 SyncScope::ID FenceSSID =
7186 static_cast<SyncScope::ID>(Op.getConstantOperandVal(i: 2));
7187
7188 if (Subtarget.hasStdExtZtso()) {
7189 // The only fence that needs an instruction is a sequentially-consistent
7190 // cross-thread fence.
7191 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
7192 FenceSSID == SyncScope::System)
7193 return Op;
7194
7195 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
7196 return DAG.getNode(Opcode: ISD::MEMBARRIER, DL: dl, VT: MVT::Other, Operand: Op.getOperand(i: 0));
7197 }
7198
7199 // singlethread fences only synchronize with signal handlers on the same
7200 // thread and thus only need to preserve instruction order, not actually
7201 // enforce memory ordering.
7202 if (FenceSSID == SyncScope::SingleThread)
7203 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
7204 return DAG.getNode(Opcode: ISD::MEMBARRIER, DL: dl, VT: MVT::Other, Operand: Op.getOperand(i: 0));
7205
7206 return Op;
7207}
7208
7209SDValue RISCVTargetLowering::LowerIS_FPCLASS(SDValue Op,
7210 SelectionDAG &DAG) const {
7211 SDLoc DL(Op);
7212 MVT VT = Op.getSimpleValueType();
7213 MVT XLenVT = Subtarget.getXLenVT();
7214 unsigned Check = Op.getConstantOperandVal(i: 1);
7215 unsigned TDCMask = 0;
7216 if (Check & fcSNan)
7217 TDCMask |= RISCV::FPMASK_Signaling_NaN;
7218 if (Check & fcQNan)
7219 TDCMask |= RISCV::FPMASK_Quiet_NaN;
7220 if (Check & fcPosInf)
7221 TDCMask |= RISCV::FPMASK_Positive_Infinity;
7222 if (Check & fcNegInf)
7223 TDCMask |= RISCV::FPMASK_Negative_Infinity;
7224 if (Check & fcPosNormal)
7225 TDCMask |= RISCV::FPMASK_Positive_Normal;
7226 if (Check & fcNegNormal)
7227 TDCMask |= RISCV::FPMASK_Negative_Normal;
7228 if (Check & fcPosSubnormal)
7229 TDCMask |= RISCV::FPMASK_Positive_Subnormal;
7230 if (Check & fcNegSubnormal)
7231 TDCMask |= RISCV::FPMASK_Negative_Subnormal;
7232 if (Check & fcPosZero)
7233 TDCMask |= RISCV::FPMASK_Positive_Zero;
7234 if (Check & fcNegZero)
7235 TDCMask |= RISCV::FPMASK_Negative_Zero;
7236
7237 bool IsOneBitMask = isPowerOf2_32(Value: TDCMask);
7238
7239 SDValue TDCMaskV = DAG.getConstant(Val: TDCMask, DL, VT: XLenVT);
7240
7241 if (VT.isVector()) {
7242 SDValue Op0 = Op.getOperand(i: 0);
7243 MVT VT0 = Op.getOperand(i: 0).getSimpleValueType();
7244
7245 if (VT.isScalableVector()) {
7246 MVT DstVT = VT0.changeVectorElementTypeToInteger();
7247 auto [Mask, VL] = getDefaultScalableVLOps(VecVT: VT0, DL, DAG, Subtarget);
7248 if (Op.getOpcode() == ISD::VP_IS_FPCLASS) {
7249 Mask = Op.getOperand(i: 2);
7250 VL = Op.getOperand(i: 3);
7251 }
7252 SDValue FPCLASS = DAG.getNode(Opcode: RISCVISD::FCLASS_VL, DL, VT: DstVT, N1: Op0, N2: Mask,
7253 N3: VL, Flags: Op->getFlags());
7254 if (IsOneBitMask)
7255 return DAG.getSetCC(DL, VT, LHS: FPCLASS,
7256 RHS: DAG.getConstant(Val: TDCMask, DL, VT: DstVT),
7257 Cond: ISD::CondCode::SETEQ);
7258 SDValue AND = DAG.getNode(Opcode: ISD::AND, DL, VT: DstVT, N1: FPCLASS,
7259 N2: DAG.getConstant(Val: TDCMask, DL, VT: DstVT));
7260 return DAG.getSetCC(DL, VT, LHS: AND, RHS: DAG.getConstant(Val: 0, DL, VT: DstVT),
7261 Cond: ISD::SETNE);
7262 }
7263
7264 MVT ContainerVT0 = getContainerForFixedLengthVector(VT: VT0);
7265 MVT ContainerVT = getContainerForFixedLengthVector(VT);
7266 MVT ContainerDstVT = ContainerVT0.changeVectorElementTypeToInteger();
7267 auto [Mask, VL] = getDefaultVLOps(VecVT: VT0, ContainerVT: ContainerVT0, DL, DAG, Subtarget);
7268 if (Op.getOpcode() == ISD::VP_IS_FPCLASS) {
7269 Mask = Op.getOperand(i: 2);
7270 MVT MaskContainerVT =
7271 getContainerForFixedLengthVector(VT: Mask.getSimpleValueType());
7272 Mask = convertToScalableVector(VT: MaskContainerVT, V: Mask, DAG, Subtarget);
7273 VL = Op.getOperand(i: 3);
7274 }
7275 Op0 = convertToScalableVector(VT: ContainerVT0, V: Op0, DAG, Subtarget);
7276
7277 SDValue FPCLASS = DAG.getNode(Opcode: RISCVISD::FCLASS_VL, DL, VT: ContainerDstVT, N1: Op0,
7278 N2: Mask, N3: VL, Flags: Op->getFlags());
7279
7280 TDCMaskV = DAG.getNode(Opcode: RISCVISD::VMV_V_X_VL, DL, VT: ContainerDstVT,
7281 N1: DAG.getUNDEF(VT: ContainerDstVT), N2: TDCMaskV, N3: VL);
7282 if (IsOneBitMask) {
7283 SDValue VMSEQ =
7284 DAG.getNode(Opcode: RISCVISD::SETCC_VL, DL, VT: ContainerVT,
7285 Ops: {FPCLASS, TDCMaskV, DAG.getCondCode(Cond: ISD::SETEQ),
7286 DAG.getUNDEF(VT: ContainerVT), Mask, VL});
7287 return convertFromScalableVector(VT, V: VMSEQ, DAG, Subtarget);
7288 }
7289 SDValue AND = DAG.getNode(Opcode: RISCVISD::AND_VL, DL, VT: ContainerDstVT, N1: FPCLASS,
7290 N2: TDCMaskV, N3: DAG.getUNDEF(VT: ContainerDstVT), N4: Mask, N5: VL);
7291
7292 SDValue SplatZero = DAG.getConstant(Val: 0, DL, VT: XLenVT);
7293 SplatZero = DAG.getNode(Opcode: RISCVISD::VMV_V_X_VL, DL, VT: ContainerDstVT,
7294 N1: DAG.getUNDEF(VT: ContainerDstVT), N2: SplatZero, N3: VL);
7295
7296 SDValue VMSNE = DAG.getNode(Opcode: RISCVISD::SETCC_VL, DL, VT: ContainerVT,
7297 Ops: {AND, SplatZero, DAG.getCondCode(Cond: ISD::SETNE),
7298 DAG.getUNDEF(VT: ContainerVT), Mask, VL});
7299 return convertFromScalableVector(VT, V: VMSNE, DAG, Subtarget);
7300 }
7301
7302 SDValue FCLASS = DAG.getNode(Opcode: RISCVISD::FCLASS, DL, VT: XLenVT, Operand: Op.getOperand(i: 0));
7303 SDValue AND = DAG.getNode(Opcode: ISD::AND, DL, VT: XLenVT, N1: FCLASS, N2: TDCMaskV);
7304 SDValue Res = DAG.getSetCC(DL, VT: XLenVT, LHS: AND, RHS: DAG.getConstant(Val: 0, DL, VT: XLenVT),
7305 Cond: ISD::CondCode::SETNE);
7306 return DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT, Operand: Res);
7307}
7308
7309// Lower fmaximum and fminimum. Unlike our fmax and fmin instructions, these
7310// operations propagate nans.
7311static SDValue lowerFMAXIMUM_FMINIMUM(SDValue Op, SelectionDAG &DAG,
7312 const RISCVSubtarget &Subtarget) {
7313 SDLoc DL(Op);
7314 MVT VT = Op.getSimpleValueType();
7315
7316 SDValue X = Op.getOperand(i: 0);
7317 SDValue Y = Op.getOperand(i: 1);
7318
7319 if (!VT.isVector()) {
7320 MVT XLenVT = Subtarget.getXLenVT();
7321
7322 // If X is a nan, replace Y with X. If Y is a nan, replace X with Y. This
7323 // ensures that when one input is a nan, the other will also be a nan
7324 // allowing the nan to propagate. If both inputs are nan, this will swap the
7325 // inputs which is harmless.
7326
7327 SDValue NewY = Y;
7328 if (!Op->getFlags().hasNoNaNs() && !DAG.isKnownNeverNaN(Op: X)) {
7329 SDValue XIsNonNan = DAG.getSetCC(DL, VT: XLenVT, LHS: X, RHS: X, Cond: ISD::SETOEQ);
7330 NewY = DAG.getSelect(DL, VT, Cond: XIsNonNan, LHS: Y, RHS: X);
7331 }
7332
7333 SDValue NewX = X;
7334 if (!Op->getFlags().hasNoNaNs() && !DAG.isKnownNeverNaN(Op: Y)) {
7335 SDValue YIsNonNan = DAG.getSetCC(DL, VT: XLenVT, LHS: Y, RHS: Y, Cond: ISD::SETOEQ);
7336 NewX = DAG.getSelect(DL, VT, Cond: YIsNonNan, LHS: X, RHS: Y);
7337 }
7338
7339 unsigned Opc =
7340 Op.getOpcode() == ISD::FMAXIMUM ? RISCVISD::FMAX : RISCVISD::FMIN;
7341 return DAG.getNode(Opcode: Opc, DL, VT, N1: NewX, N2: NewY);
7342 }
7343
7344 // Check no NaNs before converting to fixed vector scalable.
7345 bool XIsNeverNan = Op->getFlags().hasNoNaNs() || DAG.isKnownNeverNaN(Op: X);
7346 bool YIsNeverNan = Op->getFlags().hasNoNaNs() || DAG.isKnownNeverNaN(Op: Y);
7347
7348 MVT ContainerVT = VT;
7349 if (VT.isFixedLengthVector()) {
7350 ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
7351 X = convertToScalableVector(VT: ContainerVT, V: X, DAG, Subtarget);
7352 Y = convertToScalableVector(VT: ContainerVT, V: Y, DAG, Subtarget);
7353 }
7354
7355 SDValue Mask, VL;
7356 if (Op->isVPOpcode()) {
7357 Mask = Op.getOperand(i: 2);
7358 if (VT.isFixedLengthVector())
7359 Mask = convertToScalableVector(VT: getMaskTypeFor(VecVT: ContainerVT), V: Mask, DAG,
7360 Subtarget);
7361 VL = Op.getOperand(i: 3);
7362 } else {
7363 std::tie(args&: Mask, args&: VL) = getDefaultVLOps(VecVT: VT, ContainerVT, DL, DAG, Subtarget);
7364 }
7365
7366 SDValue NewY = Y;
7367 if (!XIsNeverNan) {
7368 SDValue XIsNonNan = DAG.getNode(Opcode: RISCVISD::SETCC_VL, DL, VT: Mask.getValueType(),
7369 Ops: {X, X, DAG.getCondCode(Cond: ISD::SETOEQ),
7370 DAG.getUNDEF(VT: ContainerVT), Mask, VL});
7371 NewY = DAG.getNode(Opcode: RISCVISD::VMERGE_VL, DL, VT: ContainerVT, N1: XIsNonNan, N2: Y, N3: X,
7372 N4: DAG.getUNDEF(VT: ContainerVT), N5: VL);
7373 }
7374
7375 SDValue NewX = X;
7376 if (!YIsNeverNan) {
7377 SDValue YIsNonNan = DAG.getNode(Opcode: RISCVISD::SETCC_VL, DL, VT: Mask.getValueType(),
7378 Ops: {Y, Y, DAG.getCondCode(Cond: ISD::SETOEQ),
7379 DAG.getUNDEF(VT: ContainerVT), Mask, VL});
7380 NewX = DAG.getNode(Opcode: RISCVISD::VMERGE_VL, DL, VT: ContainerVT, N1: YIsNonNan, N2: X, N3: Y,
7381 N4: DAG.getUNDEF(VT: ContainerVT), N5: VL);
7382 }
7383
7384 unsigned Opc =
7385 Op.getOpcode() == ISD::FMAXIMUM || Op->getOpcode() == ISD::VP_FMAXIMUM
7386 ? RISCVISD::VFMAX_VL
7387 : RISCVISD::VFMIN_VL;
7388 SDValue Res = DAG.getNode(Opcode: Opc, DL, VT: ContainerVT, N1: NewX, N2: NewY,
7389 N3: DAG.getUNDEF(VT: ContainerVT), N4: Mask, N5: VL);
7390 if (VT.isFixedLengthVector())
7391 Res = convertFromScalableVector(VT, V: Res, DAG, Subtarget);
7392 return Res;
7393}
7394
7395static SDValue lowerFABSorFNEG(SDValue Op, SelectionDAG &DAG,
7396 const RISCVSubtarget &Subtarget) {
7397 bool IsFABS = Op.getOpcode() == ISD::FABS;
7398 assert((IsFABS || Op.getOpcode() == ISD::FNEG) &&
7399 "Wrong opcode for lowering FABS or FNEG.");
7400
7401 MVT XLenVT = Subtarget.getXLenVT();
7402 MVT VT = Op.getSimpleValueType();
7403 assert((VT == MVT::f16 || VT == MVT::bf16) && "Unexpected type");
7404
7405 SDLoc DL(Op);
7406 SDValue Fmv =
7407 DAG.getNode(Opcode: RISCVISD::FMV_X_ANYEXTH, DL, VT: XLenVT, Operand: Op.getOperand(i: 0));
7408
7409 APInt Mask = IsFABS ? APInt::getSignedMaxValue(numBits: 16) : APInt::getSignMask(BitWidth: 16);
7410 Mask = Mask.sext(width: Subtarget.getXLen());
7411
7412 unsigned LogicOpc = IsFABS ? ISD::AND : ISD::XOR;
7413 SDValue Logic =
7414 DAG.getNode(Opcode: LogicOpc, DL, VT: XLenVT, N1: Fmv, N2: DAG.getConstant(Val: Mask, DL, VT: XLenVT));
7415 return DAG.getNode(Opcode: RISCVISD::FMV_H_X, DL, VT, Operand: Logic);
7416}
7417
7418static SDValue lowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG,
7419 const RISCVSubtarget &Subtarget) {
7420 assert(Op.getOpcode() == ISD::FCOPYSIGN && "Unexpected opcode");
7421
7422 MVT XLenVT = Subtarget.getXLenVT();
7423 MVT VT = Op.getSimpleValueType();
7424 assert((VT == MVT::f16 || VT == MVT::bf16) && "Unexpected type");
7425
7426 SDValue Mag = Op.getOperand(i: 0);
7427 SDValue Sign = Op.getOperand(i: 1);
7428
7429 SDLoc DL(Op);
7430
7431 // Get sign bit into an integer value.
7432 unsigned SignSize = Sign.getValueSizeInBits();
7433 SDValue SignAsInt = [&]() {
7434 if (SignSize == Subtarget.getXLen())
7435 return DAG.getNode(Opcode: ISD::BITCAST, DL, VT: XLenVT, Operand: Sign);
7436 switch (SignSize) {
7437 case 16:
7438 return DAG.getNode(Opcode: RISCVISD::FMV_X_ANYEXTH, DL, VT: XLenVT, Operand: Sign);
7439 case 32:
7440 return DAG.getNode(Opcode: RISCVISD::FMV_X_ANYEXTW_RV64, DL, VT: XLenVT, Operand: Sign);
7441 case 64: {
7442 assert(XLenVT == MVT::i32 && "Unexpected type");
7443 // Copy the upper word to integer.
7444 SignSize = 32;
7445 return DAG.getNode(Opcode: RISCVISD::SplitF64, DL, ResultTys: {MVT::i32, MVT::i32}, Ops: Sign)
7446 .getValue(R: 1);
7447 }
7448 default:
7449 llvm_unreachable("Unexpected sign size");
7450 }
7451 }();
7452
7453 // Get the signbit at the right position for MagAsInt.
7454 if (int ShiftAmount = (int)SignSize - (int)Mag.getValueSizeInBits())
7455 SignAsInt = DAG.getNode(Opcode: ShiftAmount > 0 ? ISD::SRL : ISD::SHL, DL, VT: XLenVT,
7456 N1: SignAsInt,
7457 N2: DAG.getConstant(Val: std::abs(x: ShiftAmount), DL, VT: XLenVT));
7458
7459 // Mask the sign bit and any bits above it. The extra bits will be dropped
7460 // when we convert back to FP.
7461 SDValue SignMask = DAG.getConstant(
7462 Val: APInt::getSignMask(BitWidth: 16).sext(width: Subtarget.getXLen()), DL, VT: XLenVT);
7463 SDValue SignBit = DAG.getNode(Opcode: ISD::AND, DL, VT: XLenVT, N1: SignAsInt, N2: SignMask);
7464
7465 // Transform Mag value to integer, and clear the sign bit.
7466 SDValue MagAsInt = DAG.getNode(Opcode: RISCVISD::FMV_X_ANYEXTH, DL, VT: XLenVT, Operand: Mag);
7467 SDValue ClearSignMask = DAG.getConstant(
7468 Val: APInt::getSignedMaxValue(numBits: 16).sext(width: Subtarget.getXLen()), DL, VT: XLenVT);
7469 SDValue ClearedSign =
7470 DAG.getNode(Opcode: ISD::AND, DL, VT: XLenVT, N1: MagAsInt, N2: ClearSignMask);
7471
7472 SDValue CopiedSign = DAG.getNode(Opcode: ISD::OR, DL, VT: XLenVT, N1: ClearedSign, N2: SignBit,
7473 Flags: SDNodeFlags::Disjoint);
7474
7475 return DAG.getNode(Opcode: RISCVISD::FMV_H_X, DL, VT, Operand: CopiedSign);
7476}
7477
7478/// Get a RISC-V target specified VL op for a given SDNode.
7479static unsigned getRISCVVLOp(SDValue Op) {
7480#define OP_CASE(NODE) \
7481 case ISD::NODE: \
7482 return RISCVISD::NODE##_VL;
7483#define VP_CASE(NODE) \
7484 case ISD::VP_##NODE: \
7485 return RISCVISD::NODE##_VL;
7486 // clang-format off
7487 switch (Op.getOpcode()) {
7488 default:
7489 llvm_unreachable("don't have RISC-V specified VL op for this SDNode");
7490 OP_CASE(ADD)
7491 OP_CASE(SUB)
7492 OP_CASE(MUL)
7493 OP_CASE(MULHS)
7494 OP_CASE(MULHU)
7495 OP_CASE(SDIV)
7496 OP_CASE(SREM)
7497 OP_CASE(UDIV)
7498 OP_CASE(UREM)
7499 OP_CASE(SHL)
7500 OP_CASE(SRA)
7501 OP_CASE(SRL)
7502 OP_CASE(ROTL)
7503 OP_CASE(ROTR)
7504 OP_CASE(BSWAP)
7505 OP_CASE(CTTZ)
7506 OP_CASE(CTLZ)
7507 OP_CASE(CTPOP)
7508 OP_CASE(BITREVERSE)
7509 OP_CASE(SADDSAT)
7510 OP_CASE(UADDSAT)
7511 OP_CASE(SSUBSAT)
7512 OP_CASE(USUBSAT)
7513 OP_CASE(AVGFLOORS)
7514 OP_CASE(AVGFLOORU)
7515 OP_CASE(AVGCEILS)
7516 OP_CASE(AVGCEILU)
7517 OP_CASE(FADD)
7518 OP_CASE(FSUB)
7519 OP_CASE(FMUL)
7520 OP_CASE(FDIV)
7521 OP_CASE(FNEG)
7522 OP_CASE(FABS)
7523 OP_CASE(FCOPYSIGN)
7524 OP_CASE(FSQRT)
7525 OP_CASE(SMIN)
7526 OP_CASE(SMAX)
7527 OP_CASE(UMIN)
7528 OP_CASE(UMAX)
7529 OP_CASE(STRICT_FADD)
7530 OP_CASE(STRICT_FSUB)
7531 OP_CASE(STRICT_FMUL)
7532 OP_CASE(STRICT_FDIV)
7533 OP_CASE(STRICT_FSQRT)
7534 VP_CASE(ADD) // VP_ADD
7535 VP_CASE(SUB) // VP_SUB
7536 VP_CASE(MUL) // VP_MUL
7537 VP_CASE(SDIV) // VP_SDIV
7538 VP_CASE(SREM) // VP_SREM
7539 VP_CASE(UDIV) // VP_UDIV
7540 VP_CASE(UREM) // VP_UREM
7541 VP_CASE(SHL) // VP_SHL
7542 VP_CASE(FADD) // VP_FADD
7543 VP_CASE(FSUB) // VP_FSUB
7544 VP_CASE(FMUL) // VP_FMUL
7545 VP_CASE(FDIV) // VP_FDIV
7546 VP_CASE(FNEG) // VP_FNEG
7547 VP_CASE(FABS) // VP_FABS
7548 VP_CASE(SMIN) // VP_SMIN
7549 VP_CASE(SMAX) // VP_SMAX
7550 VP_CASE(UMIN) // VP_UMIN
7551 VP_CASE(UMAX) // VP_UMAX
7552 VP_CASE(FCOPYSIGN) // VP_FCOPYSIGN
7553 VP_CASE(SETCC) // VP_SETCC
7554 VP_CASE(SINT_TO_FP) // VP_SINT_TO_FP
7555 VP_CASE(UINT_TO_FP) // VP_UINT_TO_FP
7556 VP_CASE(BITREVERSE) // VP_BITREVERSE
7557 VP_CASE(SADDSAT) // VP_SADDSAT
7558 VP_CASE(UADDSAT) // VP_UADDSAT
7559 VP_CASE(SSUBSAT) // VP_SSUBSAT
7560 VP_CASE(USUBSAT) // VP_USUBSAT
7561 VP_CASE(BSWAP) // VP_BSWAP
7562 VP_CASE(CTLZ) // VP_CTLZ
7563 VP_CASE(CTTZ) // VP_CTTZ
7564 VP_CASE(CTPOP) // VP_CTPOP
7565 case ISD::CTLZ_ZERO_UNDEF:
7566 case ISD::VP_CTLZ_ZERO_UNDEF:
7567 return RISCVISD::CTLZ_VL;
7568 case ISD::CTTZ_ZERO_UNDEF:
7569 case ISD::VP_CTTZ_ZERO_UNDEF:
7570 return RISCVISD::CTTZ_VL;
7571 case ISD::FMA:
7572 case ISD::VP_FMA:
7573 return RISCVISD::VFMADD_VL;
7574 case ISD::STRICT_FMA:
7575 return RISCVISD::STRICT_VFMADD_VL;
7576 case ISD::AND:
7577 case ISD::VP_AND:
7578 if (Op.getSimpleValueType().getVectorElementType() == MVT::i1)
7579 return RISCVISD::VMAND_VL;
7580 return RISCVISD::AND_VL;
7581 case ISD::OR:
7582 case ISD::VP_OR:
7583 if (Op.getSimpleValueType().getVectorElementType() == MVT::i1)
7584 return RISCVISD::VMOR_VL;
7585 return RISCVISD::OR_VL;
7586 case ISD::XOR:
7587 case ISD::VP_XOR:
7588 if (Op.getSimpleValueType().getVectorElementType() == MVT::i1)
7589 return RISCVISD::VMXOR_VL;
7590 return RISCVISD::XOR_VL;
7591 case ISD::ANY_EXTEND:
7592 case ISD::ZERO_EXTEND:
7593 return RISCVISD::VZEXT_VL;
7594 case ISD::SIGN_EXTEND:
7595 return RISCVISD::VSEXT_VL;
7596 case ISD::SETCC:
7597 return RISCVISD::SETCC_VL;
7598 case ISD::VSELECT:
7599 return RISCVISD::VMERGE_VL;
7600 case ISD::VP_SELECT:
7601 case ISD::VP_MERGE:
7602 return RISCVISD::VMERGE_VL;
7603 case ISD::VP_SRA:
7604 return RISCVISD::SRA_VL;
7605 case ISD::VP_SRL:
7606 return RISCVISD::SRL_VL;
7607 case ISD::VP_SQRT:
7608 return RISCVISD::FSQRT_VL;
7609 case ISD::VP_SIGN_EXTEND:
7610 return RISCVISD::VSEXT_VL;
7611 case ISD::VP_ZERO_EXTEND:
7612 return RISCVISD::VZEXT_VL;
7613 case ISD::VP_FP_TO_SINT:
7614 return RISCVISD::VFCVT_RTZ_X_F_VL;
7615 case ISD::VP_FP_TO_UINT:
7616 return RISCVISD::VFCVT_RTZ_XU_F_VL;
7617 case ISD::FMINNUM:
7618 case ISD::FMINIMUMNUM:
7619 case ISD::VP_FMINNUM:
7620 return RISCVISD::VFMIN_VL;
7621 case ISD::FMAXNUM:
7622 case ISD::FMAXIMUMNUM:
7623 case ISD::VP_FMAXNUM:
7624 return RISCVISD::VFMAX_VL;
7625 case ISD::LRINT:
7626 case ISD::VP_LRINT:
7627 case ISD::LLRINT:
7628 case ISD::VP_LLRINT:
7629 return RISCVISD::VFCVT_RM_X_F_VL;
7630 }
7631 // clang-format on
7632#undef OP_CASE
7633#undef VP_CASE
7634}
7635
7636static bool isPromotedOpNeedingSplit(SDValue Op,
7637 const RISCVSubtarget &Subtarget) {
7638 return (Op.getValueType() == MVT::nxv32f16 &&
7639 (Subtarget.hasVInstructionsF16Minimal() &&
7640 !Subtarget.hasVInstructionsF16())) ||
7641 (Op.getValueType() == MVT::nxv32bf16 &&
7642 Subtarget.hasVInstructionsBF16Minimal() &&
7643 (!Subtarget.hasVInstructionsBF16() ||
7644 (!llvm::is_contained(Range: ZvfbfaOps, Element: Op.getOpcode()) &&
7645 !llvm::is_contained(Range: ZvfbfaVPOps, Element: Op.getOpcode()))));
7646}
7647
7648static SDValue SplitVectorOp(SDValue Op, SelectionDAG &DAG) {
7649 auto [LoVT, HiVT] = DAG.GetSplitDestVTs(VT: Op.getValueType());
7650 SDLoc DL(Op);
7651
7652 SmallVector<SDValue, 4> LoOperands(Op.getNumOperands());
7653 SmallVector<SDValue, 4> HiOperands(Op.getNumOperands());
7654
7655 for (unsigned j = 0; j != Op.getNumOperands(); ++j) {
7656 if (!Op.getOperand(i: j).getValueType().isVector()) {
7657 LoOperands[j] = Op.getOperand(i: j);
7658 HiOperands[j] = Op.getOperand(i: j);
7659 continue;
7660 }
7661 std::tie(args&: LoOperands[j], args&: HiOperands[j]) =
7662 DAG.SplitVector(N: Op.getOperand(i: j), DL);
7663 }
7664
7665 SDValue LoRes =
7666 DAG.getNode(Opcode: Op.getOpcode(), DL, VT: LoVT, Ops: LoOperands, Flags: Op->getFlags());
7667 SDValue HiRes =
7668 DAG.getNode(Opcode: Op.getOpcode(), DL, VT: HiVT, Ops: HiOperands, Flags: Op->getFlags());
7669
7670 return DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: Op.getValueType(), N1: LoRes, N2: HiRes);
7671}
7672
7673static SDValue SplitVPOp(SDValue Op, SelectionDAG &DAG) {
7674 assert(ISD::isVPOpcode(Op.getOpcode()) && "Not a VP op");
7675 auto [LoVT, HiVT] = DAG.GetSplitDestVTs(VT: Op.getValueType());
7676 SDLoc DL(Op);
7677
7678 SmallVector<SDValue, 4> LoOperands(Op.getNumOperands());
7679 SmallVector<SDValue, 4> HiOperands(Op.getNumOperands());
7680
7681 for (unsigned j = 0; j != Op.getNumOperands(); ++j) {
7682 if (ISD::getVPExplicitVectorLengthIdx(Opcode: Op.getOpcode()) == j) {
7683 std::tie(args&: LoOperands[j], args&: HiOperands[j]) =
7684 DAG.SplitEVL(N: Op.getOperand(i: j), VecVT: Op.getValueType(), DL);
7685 continue;
7686 }
7687 if (!Op.getOperand(i: j).getValueType().isVector()) {
7688 LoOperands[j] = Op.getOperand(i: j);
7689 HiOperands[j] = Op.getOperand(i: j);
7690 continue;
7691 }
7692 std::tie(args&: LoOperands[j], args&: HiOperands[j]) =
7693 DAG.SplitVector(N: Op.getOperand(i: j), DL);
7694 }
7695
7696 SDValue LoRes =
7697 DAG.getNode(Opcode: Op.getOpcode(), DL, VT: LoVT, Ops: LoOperands, Flags: Op->getFlags());
7698 SDValue HiRes =
7699 DAG.getNode(Opcode: Op.getOpcode(), DL, VT: HiVT, Ops: HiOperands, Flags: Op->getFlags());
7700
7701 return DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: Op.getValueType(), N1: LoRes, N2: HiRes);
7702}
7703
7704static SDValue SplitVectorReductionOp(SDValue Op, SelectionDAG &DAG) {
7705 SDLoc DL(Op);
7706
7707 auto [Lo, Hi] = DAG.SplitVector(N: Op.getOperand(i: 1), DL);
7708 auto [MaskLo, MaskHi] = DAG.SplitVector(N: Op.getOperand(i: 2), DL);
7709 auto [EVLLo, EVLHi] =
7710 DAG.SplitEVL(N: Op.getOperand(i: 3), VecVT: Op.getOperand(i: 1).getValueType(), DL);
7711
7712 SDValue ResLo =
7713 DAG.getNode(Opcode: Op.getOpcode(), DL, VT: Op.getValueType(),
7714 Ops: {Op.getOperand(i: 0), Lo, MaskLo, EVLLo}, Flags: Op->getFlags());
7715 return DAG.getNode(Opcode: Op.getOpcode(), DL, VT: Op.getValueType(),
7716 Ops: {ResLo, Hi, MaskHi, EVLHi}, Flags: Op->getFlags());
7717}
7718
7719static SDValue SplitStrictFPVectorOp(SDValue Op, SelectionDAG &DAG) {
7720
7721 assert(Op->isStrictFPOpcode());
7722
7723 auto [LoVT, HiVT] = DAG.GetSplitDestVTs(VT: Op->getValueType(ResNo: 0));
7724
7725 SDVTList LoVTs = DAG.getVTList(VT1: LoVT, VT2: Op->getValueType(ResNo: 1));
7726 SDVTList HiVTs = DAG.getVTList(VT1: HiVT, VT2: Op->getValueType(ResNo: 1));
7727
7728 SDLoc DL(Op);
7729
7730 SmallVector<SDValue, 4> LoOperands(Op.getNumOperands());
7731 SmallVector<SDValue, 4> HiOperands(Op.getNumOperands());
7732
7733 for (unsigned j = 0; j != Op.getNumOperands(); ++j) {
7734 if (!Op.getOperand(i: j).getValueType().isVector()) {
7735 LoOperands[j] = Op.getOperand(i: j);
7736 HiOperands[j] = Op.getOperand(i: j);
7737 continue;
7738 }
7739 std::tie(args&: LoOperands[j], args&: HiOperands[j]) =
7740 DAG.SplitVector(N: Op.getOperand(i: j), DL);
7741 }
7742
7743 SDValue LoRes =
7744 DAG.getNode(Opcode: Op.getOpcode(), DL, VTList: LoVTs, Ops: LoOperands, Flags: Op->getFlags());
7745 HiOperands[0] = LoRes.getValue(R: 1);
7746 SDValue HiRes =
7747 DAG.getNode(Opcode: Op.getOpcode(), DL, VTList: HiVTs, Ops: HiOperands, Flags: Op->getFlags());
7748
7749 SDValue V = DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: Op->getValueType(ResNo: 0),
7750 N1: LoRes.getValue(R: 0), N2: HiRes.getValue(R: 0));
7751 return DAG.getMergeValues(Ops: {V, HiRes.getValue(R: 1)}, dl: DL);
7752}
7753
7754SDValue
7755RISCVTargetLowering::lowerXAndesBfHCvtBFloat16Load(SDValue Op,
7756 SelectionDAG &DAG) const {
7757 assert(Subtarget.hasVendorXAndesBFHCvt() && !Subtarget.hasStdExtZfh() &&
7758 "Unexpected bfloat16 load lowering");
7759
7760 SDLoc DL(Op);
7761 LoadSDNode *LD = cast<LoadSDNode>(Val: Op.getNode());
7762 EVT MemVT = LD->getMemoryVT();
7763 SDValue Load = DAG.getExtLoad(
7764 ExtType: ISD::ZEXTLOAD, dl: DL, VT: Subtarget.getXLenVT(), Chain: LD->getChain(),
7765 Ptr: LD->getBasePtr(),
7766 MemVT: EVT::getIntegerVT(Context&: *DAG.getContext(), BitWidth: MemVT.getSizeInBits()),
7767 MMO: LD->getMemOperand());
7768 // Using mask to make bf16 nan-boxing valid when we don't have flh
7769 // instruction. -65536 would be treat as a small number and thus it can be
7770 // directly used lui to get the constant.
7771 SDValue mask = DAG.getSignedConstant(Val: -65536, DL, VT: Subtarget.getXLenVT());
7772 SDValue OrSixteenOne =
7773 DAG.getNode(Opcode: ISD::OR, DL, VT: Load.getValueType(), Ops: {Load, mask});
7774 SDValue ConvertedResult =
7775 DAG.getNode(Opcode: RISCVISD::NDS_FMV_BF16_X, DL, VT: MVT::bf16, Operand: OrSixteenOne);
7776 return DAG.getMergeValues(Ops: {ConvertedResult, Load.getValue(R: 1)}, dl: DL);
7777}
7778
7779SDValue
7780RISCVTargetLowering::lowerXAndesBfHCvtBFloat16Store(SDValue Op,
7781 SelectionDAG &DAG) const {
7782 assert(Subtarget.hasVendorXAndesBFHCvt() && !Subtarget.hasStdExtZfh() &&
7783 "Unexpected bfloat16 store lowering");
7784
7785 StoreSDNode *ST = cast<StoreSDNode>(Val: Op.getNode());
7786 SDLoc DL(Op);
7787 SDValue FMV = DAG.getNode(Opcode: RISCVISD::NDS_FMV_X_ANYEXTBF16, DL,
7788 VT: Subtarget.getXLenVT(), Operand: ST->getValue());
7789 return DAG.getTruncStore(
7790 Chain: ST->getChain(), dl: DL, Val: FMV, Ptr: ST->getBasePtr(),
7791 SVT: EVT::getIntegerVT(Context&: *DAG.getContext(), BitWidth: ST->getMemoryVT().getSizeInBits()),
7792 MMO: ST->getMemOperand());
7793}
7794
7795SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
7796 SelectionDAG &DAG) const {
7797 switch (Op.getOpcode()) {
7798 default:
7799 reportFatalInternalError(
7800 reason: "Unimplemented RISCVTargetLowering::LowerOperation Case");
7801 case ISD::PREFETCH:
7802 return LowerPREFETCH(Op, Subtarget, DAG);
7803 case ISD::ATOMIC_FENCE:
7804 return LowerATOMIC_FENCE(Op, DAG, Subtarget);
7805 case ISD::GlobalAddress:
7806 return lowerGlobalAddress(Op, DAG);
7807 case ISD::BlockAddress:
7808 return lowerBlockAddress(Op, DAG);
7809 case ISD::ConstantPool:
7810 return lowerConstantPool(Op, DAG);
7811 case ISD::JumpTable:
7812 return lowerJumpTable(Op, DAG);
7813 case ISD::GlobalTLSAddress:
7814 return lowerGlobalTLSAddress(Op, DAG);
7815 case ISD::Constant:
7816 return lowerConstant(Op, DAG, Subtarget);
7817 case ISD::ConstantFP:
7818 return lowerConstantFP(Op, DAG);
7819 case ISD::SELECT:
7820 return lowerSELECT(Op, DAG);
7821 case ISD::BRCOND:
7822 return lowerBRCOND(Op, DAG);
7823 case ISD::VASTART:
7824 return lowerVASTART(Op, DAG);
7825 case ISD::FRAMEADDR:
7826 return lowerFRAMEADDR(Op, DAG);
7827 case ISD::RETURNADDR:
7828 return lowerRETURNADDR(Op, DAG);
7829 case ISD::SHL_PARTS:
7830 return lowerShiftLeftParts(Op, DAG);
7831 case ISD::SRA_PARTS:
7832 return lowerShiftRightParts(Op, DAG, IsSRA: true);
7833 case ISD::SRL_PARTS:
7834 return lowerShiftRightParts(Op, DAG, IsSRA: false);
7835 case ISD::ROTL:
7836 case ISD::ROTR:
7837 if (Op.getValueType().isFixedLengthVector()) {
7838 assert(Subtarget.hasStdExtZvkb());
7839 return lowerToScalableOp(Op, DAG);
7840 }
7841 assert(Subtarget.hasVendorXTHeadBb() &&
7842 !(Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb()) &&
7843 "Unexpected custom legalization");
7844 // XTHeadBb only supports rotate by constant.
7845 if (!isa<ConstantSDNode>(Val: Op.getOperand(i: 1)))
7846 return SDValue();
7847 return Op;
7848 case ISD::BITCAST: {
7849 SDLoc DL(Op);
7850 EVT VT = Op.getValueType();
7851 SDValue Op0 = Op.getOperand(i: 0);
7852 EVT Op0VT = Op0.getValueType();
7853 MVT XLenVT = Subtarget.getXLenVT();
7854 if (Op0VT == MVT::i16 &&
7855 ((VT == MVT::f16 && Subtarget.hasStdExtZfhminOrZhinxmin()) ||
7856 (VT == MVT::bf16 && Subtarget.hasStdExtZfbfmin()))) {
7857 SDValue NewOp0 = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: XLenVT, Operand: Op0);
7858 return DAG.getNode(Opcode: RISCVISD::FMV_H_X, DL, VT, Operand: NewOp0);
7859 }
7860 if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.is64Bit() &&
7861 Subtarget.hasStdExtFOrZfinx()) {
7862 SDValue NewOp0 = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: MVT::i64, Operand: Op0);
7863 return DAG.getNode(Opcode: RISCVISD::FMV_W_X_RV64, DL, VT: MVT::f32, Operand: NewOp0);
7864 }
7865 if (VT == MVT::f64 && Op0VT == MVT::i64 && !Subtarget.is64Bit() &&
7866 Subtarget.hasStdExtDOrZdinx()) {
7867 SDValue Lo, Hi;
7868 std::tie(args&: Lo, args&: Hi) = DAG.SplitScalar(N: Op0, DL, LoVT: MVT::i32, HiVT: MVT::i32);
7869 return DAG.getNode(Opcode: RISCVISD::BuildPairF64, DL, VT: MVT::f64, N1: Lo, N2: Hi);
7870 }
7871
7872 if (Subtarget.enablePExtSIMDCodeGen()) {
7873 bool Is32BitCast =
7874 (VT == MVT::i32 && (Op0VT == MVT::v4i8 || Op0VT == MVT::v2i16)) ||
7875 (Op0VT == MVT::i32 && (VT == MVT::v4i8 || VT == MVT::v2i16));
7876 bool Is64BitCast =
7877 (VT == MVT::i64 && (Op0VT == MVT::v8i8 || Op0VT == MVT::v4i16 ||
7878 Op0VT == MVT::v2i32)) ||
7879 (Op0VT == MVT::i64 &&
7880 (VT == MVT::v8i8 || VT == MVT::v4i16 || VT == MVT::v2i32));
7881 if (Is32BitCast || Is64BitCast)
7882 return Op;
7883 }
7884
7885 // Consider other scalar<->scalar casts as legal if the types are legal.
7886 // Otherwise expand them.
7887 if (!VT.isVector() && !Op0VT.isVector()) {
7888 if (isTypeLegal(VT) && isTypeLegal(VT: Op0VT))
7889 return Op;
7890 return SDValue();
7891 }
7892
7893 assert(!VT.isScalableVector() && !Op0VT.isScalableVector() &&
7894 "Unexpected types");
7895
7896 if (VT.isFixedLengthVector()) {
7897 // We can handle fixed length vector bitcasts with a simple replacement
7898 // in isel.
7899 if (Op0VT.isFixedLengthVector())
7900 return Op;
7901 // When bitcasting from scalar to fixed-length vector, insert the scalar
7902 // into a one-element vector of the result type, and perform a vector
7903 // bitcast.
7904 if (!Op0VT.isVector()) {
7905 EVT BVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: Op0VT, NumElements: 1);
7906 if (!isTypeLegal(VT: BVT))
7907 return SDValue();
7908 return DAG.getBitcast(
7909 VT, V: DAG.getInsertVectorElt(DL, Vec: DAG.getUNDEF(VT: BVT), Elt: Op0, Idx: 0));
7910 }
7911 return SDValue();
7912 }
7913 // Custom-legalize bitcasts from fixed-length vector types to scalar types
7914 // thus: bitcast the vector to a one-element vector type whose element type
7915 // is the same as the result type, and extract the first element.
7916 if (!VT.isVector() && Op0VT.isFixedLengthVector()) {
7917 EVT BVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT, NumElements: 1);
7918 if (!isTypeLegal(VT: BVT))
7919 return SDValue();
7920 SDValue BVec = DAG.getBitcast(VT: BVT, V: Op0);
7921 return DAG.getExtractVectorElt(DL, VT, Vec: BVec, Idx: 0);
7922 }
7923 return SDValue();
7924 }
7925 case ISD::INTRINSIC_WO_CHAIN:
7926 return LowerINTRINSIC_WO_CHAIN(Op, DAG);
7927 case ISD::INTRINSIC_W_CHAIN:
7928 return LowerINTRINSIC_W_CHAIN(Op, DAG);
7929 case ISD::INTRINSIC_VOID:
7930 return LowerINTRINSIC_VOID(Op, DAG);
7931 case ISD::IS_FPCLASS:
7932 return LowerIS_FPCLASS(Op, DAG);
7933 case ISD::BITREVERSE: {
7934 MVT VT = Op.getSimpleValueType();
7935 if (VT.isFixedLengthVector()) {
7936 assert(Subtarget.hasStdExtZvbb());
7937 return lowerToScalableOp(Op, DAG);
7938 }
7939 SDLoc DL(Op);
7940 assert(Subtarget.hasStdExtZbkb() && "Unexpected custom legalization");
7941 assert(Op.getOpcode() == ISD::BITREVERSE && "Unexpected opcode");
7942 // Expand bitreverse to a bswap(rev8) followed by brev8.
7943 SDValue BSwap = DAG.getNode(Opcode: ISD::BSWAP, DL, VT, Operand: Op.getOperand(i: 0));
7944 return DAG.getNode(Opcode: RISCVISD::BREV8, DL, VT, Operand: BSwap);
7945 }
7946 case ISD::TRUNCATE:
7947 case ISD::TRUNCATE_SSAT_S:
7948 case ISD::TRUNCATE_USAT_U:
7949 // Only custom-lower vector truncates
7950 if (!Op.getSimpleValueType().isVector())
7951 return Op;
7952 return lowerVectorTruncLike(Op, DAG);
7953 case ISD::ANY_EXTEND:
7954 case ISD::ZERO_EXTEND:
7955 if (Op.getOperand(i: 0).getValueType().isVector() &&
7956 Op.getOperand(i: 0).getValueType().getVectorElementType() == MVT::i1)
7957 return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ ExtTrueVal: 1);
7958 if (Op.getValueType().isScalableVector())
7959 return Op;
7960 return lowerToScalableOp(Op, DAG);
7961 case ISD::SIGN_EXTEND:
7962 if (Op.getOperand(i: 0).getValueType().isVector() &&
7963 Op.getOperand(i: 0).getValueType().getVectorElementType() == MVT::i1)
7964 return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ ExtTrueVal: -1);
7965 if (Op.getValueType().isScalableVector())
7966 return Op;
7967 return lowerToScalableOp(Op, DAG);
7968 case ISD::SPLAT_VECTOR_PARTS:
7969 return lowerSPLAT_VECTOR_PARTS(Op, DAG);
7970 case ISD::INSERT_VECTOR_ELT:
7971 return lowerINSERT_VECTOR_ELT(Op, DAG);
7972 case ISD::EXTRACT_VECTOR_ELT:
7973 return lowerEXTRACT_VECTOR_ELT(Op, DAG);
7974 case ISD::SCALAR_TO_VECTOR: {
7975 MVT VT = Op.getSimpleValueType();
7976 SDLoc DL(Op);
7977 SDValue Scalar = Op.getOperand(i: 0);
7978 if (VT.getVectorElementType() == MVT::i1) {
7979 MVT WideVT = VT.changeVectorElementType(EltVT: MVT::i8);
7980 SDValue V = DAG.getNode(Opcode: ISD::SCALAR_TO_VECTOR, DL, VT: WideVT, Operand: Scalar);
7981 return DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT, Operand: V);
7982 }
7983 MVT ContainerVT = VT;
7984 if (VT.isFixedLengthVector())
7985 ContainerVT = getContainerForFixedLengthVector(VT);
7986 SDValue VL = getDefaultVLOps(VecVT: VT, ContainerVT, DL, DAG, Subtarget).second;
7987
7988 SDValue V;
7989 if (VT.isFloatingPoint()) {
7990 V = DAG.getNode(Opcode: RISCVISD::VFMV_S_F_VL, DL, VT: ContainerVT,
7991 N1: DAG.getUNDEF(VT: ContainerVT), N2: Scalar, N3: VL);
7992 } else {
7993 Scalar = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: Subtarget.getXLenVT(), Operand: Scalar);
7994 V = DAG.getNode(Opcode: RISCVISD::VMV_S_X_VL, DL, VT: ContainerVT,
7995 N1: DAG.getUNDEF(VT: ContainerVT), N2: Scalar, N3: VL);
7996 }
7997 if (VT.isFixedLengthVector())
7998 V = convertFromScalableVector(VT, V, DAG, Subtarget);
7999 return V;
8000 }
8001 case ISD::VSCALE: {
8002 MVT XLenVT = Subtarget.getXLenVT();
8003 MVT VT = Op.getSimpleValueType();
8004 SDLoc DL(Op);
8005 SDValue Res = DAG.getNode(Opcode: RISCVISD::READ_VLENB, DL, VT: XLenVT);
8006 // We define our scalable vector types for lmul=1 to use a 64 bit known
8007 // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
8008 // vscale as VLENB / 8.
8009 static_assert(RISCV::RVVBitsPerBlock == 64, "Unexpected bits per block!");
8010 if (Subtarget.getRealMinVLen() < RISCV::RVVBitsPerBlock)
8011 reportFatalInternalError(reason: "Support for VLEN==32 is incomplete.");
8012 // We assume VLENB is a multiple of 8. We manually choose the best shift
8013 // here because SimplifyDemandedBits isn't always able to simplify it.
8014 uint64_t Val = Op.getConstantOperandVal(i: 0);
8015 if (isPowerOf2_64(Value: Val)) {
8016 uint64_t Log2 = Log2_64(Value: Val);
8017 if (Log2 < 3) {
8018 SDNodeFlags Flags;
8019 Flags.setExact(true);
8020 Res = DAG.getNode(Opcode: ISD::SRL, DL, VT: XLenVT, N1: Res,
8021 N2: DAG.getConstant(Val: 3 - Log2, DL, VT: XLenVT), Flags);
8022 } else if (Log2 > 3) {
8023 Res = DAG.getNode(Opcode: ISD::SHL, DL, VT: XLenVT, N1: Res,
8024 N2: DAG.getConstant(Val: Log2 - 3, DL, VT: XLenVT));
8025 }
8026 } else if ((Val % 8) == 0) {
8027 // If the multiplier is a multiple of 8, scale it down to avoid needing
8028 // to shift the VLENB value.
8029 Res = DAG.getNode(Opcode: ISD::MUL, DL, VT: XLenVT, N1: Res,
8030 N2: DAG.getConstant(Val: Val / 8, DL, VT: XLenVT));
8031 } else {
8032 SDNodeFlags Flags;
8033 Flags.setExact(true);
8034 SDValue VScale = DAG.getNode(Opcode: ISD::SRL, DL, VT: XLenVT, N1: Res,
8035 N2: DAG.getConstant(Val: 3, DL, VT: XLenVT), Flags);
8036 Res = DAG.getNode(Opcode: ISD::MUL, DL, VT: XLenVT, N1: VScale,
8037 N2: DAG.getConstant(Val, DL, VT: XLenVT));
8038 }
8039 return DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT, Operand: Res);
8040 }
8041 case ISD::FPOWI: {
8042 // Custom promote f16 powi with illegal i32 integer type on RV64. Once
8043 // promoted this will be legalized into a libcall by LegalizeIntegerTypes.
8044 if (Op.getValueType() == MVT::f16 && Subtarget.is64Bit() &&
8045 Op.getOperand(i: 1).getValueType() == MVT::i32) {
8046 SDLoc DL(Op);
8047 SDValue Op0 = DAG.getNode(Opcode: ISD::FP_EXTEND, DL, VT: MVT::f32, Operand: Op.getOperand(i: 0));
8048 SDValue Powi =
8049 DAG.getNode(Opcode: ISD::FPOWI, DL, VT: MVT::f32, N1: Op0, N2: Op.getOperand(i: 1));
8050 return DAG.getNode(Opcode: ISD::FP_ROUND, DL, VT: MVT::f16, N1: Powi,
8051 N2: DAG.getIntPtrConstant(Val: 0, DL, /*isTarget=*/true));
8052 }
8053 return SDValue();
8054 }
8055 case ISD::FMAXIMUM:
8056 case ISD::FMINIMUM:
8057 if (isPromotedOpNeedingSplit(Op, Subtarget))
8058 return SplitVectorOp(Op, DAG);
8059 return lowerFMAXIMUM_FMINIMUM(Op, DAG, Subtarget);
8060 case ISD::FP_EXTEND:
8061 case ISD::FP_ROUND:
8062 return lowerVectorFPExtendOrRoundLike(Op, DAG);
8063 case ISD::STRICT_FP_ROUND:
8064 case ISD::STRICT_FP_EXTEND:
8065 return lowerStrictFPExtendOrRoundLike(Op, DAG);
8066 case ISD::SINT_TO_FP:
8067 case ISD::UINT_TO_FP:
8068 if (Op.getValueType().isVector() &&
8069 ((Op.getValueType().getScalarType() == MVT::f16 &&
8070 (Subtarget.hasVInstructionsF16Minimal() &&
8071 !Subtarget.hasVInstructionsF16())) ||
8072 Op.getValueType().getScalarType() == MVT::bf16)) {
8073 if (isPromotedOpNeedingSplit(Op, Subtarget))
8074 return SplitVectorOp(Op, DAG);
8075 // int -> f32
8076 SDLoc DL(Op);
8077 MVT NVT =
8078 MVT::getVectorVT(VT: MVT::f32, EC: Op.getValueType().getVectorElementCount());
8079 SDValue NC = DAG.getNode(Opcode: Op.getOpcode(), DL, VT: NVT, Ops: Op->ops());
8080 // f32 -> [b]f16
8081 return DAG.getNode(Opcode: ISD::FP_ROUND, DL, VT: Op.getValueType(), N1: NC,
8082 N2: DAG.getIntPtrConstant(Val: 0, DL, /*isTarget=*/true));
8083 }
8084 [[fallthrough]];
8085 case ISD::FP_TO_SINT:
8086 case ISD::FP_TO_UINT:
8087 if (SDValue Op1 = Op.getOperand(i: 0);
8088 Op1.getValueType().isVector() &&
8089 ((Op1.getValueType().getScalarType() == MVT::f16 &&
8090 (Subtarget.hasVInstructionsF16Minimal() &&
8091 !Subtarget.hasVInstructionsF16())) ||
8092 Op1.getValueType().getScalarType() == MVT::bf16)) {
8093 if (isPromotedOpNeedingSplit(Op: Op1, Subtarget))
8094 return SplitVectorOp(Op, DAG);
8095 // [b]f16 -> f32
8096 SDLoc DL(Op);
8097 MVT NVT = MVT::getVectorVT(VT: MVT::f32,
8098 EC: Op1.getValueType().getVectorElementCount());
8099 SDValue WidenVec = DAG.getNode(Opcode: ISD::FP_EXTEND, DL, VT: NVT, Operand: Op1);
8100 // f32 -> int
8101 return DAG.getNode(Opcode: Op.getOpcode(), DL, VT: Op.getValueType(), Operand: WidenVec);
8102 }
8103 [[fallthrough]];
8104 case ISD::STRICT_FP_TO_SINT:
8105 case ISD::STRICT_FP_TO_UINT:
8106 case ISD::STRICT_SINT_TO_FP:
8107 case ISD::STRICT_UINT_TO_FP: {
8108 // RVV can only do fp<->int conversions to types half/double the size as
8109 // the source. We custom-lower any conversions that do two hops into
8110 // sequences.
8111 MVT VT = Op.getSimpleValueType();
8112 if (VT.isScalarInteger())
8113 return lowerFP_TO_INT(Op, DAG, Subtarget);
8114 bool IsStrict = Op->isStrictFPOpcode();
8115 SDValue Src = Op.getOperand(i: 0 + IsStrict);
8116 MVT SrcVT = Src.getSimpleValueType();
8117 if (SrcVT.isScalarInteger())
8118 return lowerINT_TO_FP(Op, DAG, Subtarget);
8119 if (!VT.isVector())
8120 return Op;
8121 SDLoc DL(Op);
8122 MVT EltVT = VT.getVectorElementType();
8123 MVT SrcEltVT = SrcVT.getVectorElementType();
8124 unsigned EltSize = EltVT.getSizeInBits();
8125 unsigned SrcEltSize = SrcEltVT.getSizeInBits();
8126 assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) &&
8127 "Unexpected vector element types");
8128
8129 bool IsInt2FP = SrcEltVT.isInteger();
8130 // Widening conversions
8131 if (EltSize > (2 * SrcEltSize)) {
8132 if (IsInt2FP) {
8133 // Do a regular integer sign/zero extension then convert to float.
8134 MVT IVecVT = MVT::getVectorVT(VT: MVT::getIntegerVT(BitWidth: EltSize / 2),
8135 EC: VT.getVectorElementCount());
8136 unsigned ExtOpcode = (Op.getOpcode() == ISD::UINT_TO_FP ||
8137 Op.getOpcode() == ISD::STRICT_UINT_TO_FP)
8138 ? ISD::ZERO_EXTEND
8139 : ISD::SIGN_EXTEND;
8140 SDValue Ext = DAG.getNode(Opcode: ExtOpcode, DL, VT: IVecVT, Operand: Src);
8141 if (IsStrict)
8142 return DAG.getNode(Opcode: Op.getOpcode(), DL, VTList: Op->getVTList(),
8143 N1: Op.getOperand(i: 0), N2: Ext);
8144 return DAG.getNode(Opcode: Op.getOpcode(), DL, VT, Operand: Ext);
8145 }
8146 // FP2Int
8147 assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering");
8148 // Do one doubling fp_extend then complete the operation by converting
8149 // to int.
8150 MVT InterimFVT = MVT::getVectorVT(VT: MVT::f32, EC: VT.getVectorElementCount());
8151 if (IsStrict) {
8152 auto [FExt, Chain] =
8153 DAG.getStrictFPExtendOrRound(Op: Src, Chain: Op.getOperand(i: 0), DL, VT: InterimFVT);
8154 return DAG.getNode(Opcode: Op.getOpcode(), DL, VTList: Op->getVTList(), N1: Chain, N2: FExt);
8155 }
8156 SDValue FExt = DAG.getFPExtendOrRound(Op: Src, DL, VT: InterimFVT);
8157 return DAG.getNode(Opcode: Op.getOpcode(), DL, VT, Operand: FExt);
8158 }
8159
8160 // Narrowing conversions
8161 if (SrcEltSize > (2 * EltSize)) {
8162 if (IsInt2FP) {
8163 // One narrowing int_to_fp, then an fp_round.
8164 assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering");
8165 MVT InterimFVT = MVT::getVectorVT(VT: MVT::f32, EC: VT.getVectorElementCount());
8166 if (IsStrict) {
8167 SDValue Int2FP = DAG.getNode(Opcode: Op.getOpcode(), DL,
8168 VTList: DAG.getVTList(VT1: InterimFVT, VT2: MVT::Other),
8169 N1: Op.getOperand(i: 0), N2: Src);
8170 SDValue Chain = Int2FP.getValue(R: 1);
8171 return DAG.getStrictFPExtendOrRound(Op: Int2FP, Chain, DL, VT).first;
8172 }
8173 SDValue Int2FP = DAG.getNode(Opcode: Op.getOpcode(), DL, VT: InterimFVT, Operand: Src);
8174 return DAG.getFPExtendOrRound(Op: Int2FP, DL, VT);
8175 }
8176 // FP2Int
8177 // One narrowing fp_to_int, then truncate the integer. If the float isn't
8178 // representable by the integer, the result is poison.
8179 MVT IVecVT = MVT::getVectorVT(VT: MVT::getIntegerVT(BitWidth: SrcEltSize / 2),
8180 EC: VT.getVectorElementCount());
8181 if (IsStrict) {
8182 SDValue FP2Int =
8183 DAG.getNode(Opcode: Op.getOpcode(), DL, VTList: DAG.getVTList(VT1: IVecVT, VT2: MVT::Other),
8184 N1: Op.getOperand(i: 0), N2: Src);
8185 SDValue Res = DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT, Operand: FP2Int);
8186 return DAG.getMergeValues(Ops: {Res, FP2Int.getValue(R: 1)}, dl: DL);
8187 }
8188 SDValue FP2Int = DAG.getNode(Opcode: Op.getOpcode(), DL, VT: IVecVT, Operand: Src);
8189 return DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT, Operand: FP2Int);
8190 }
8191
8192 // Scalable vectors can exit here. Patterns will handle equally-sized
8193 // conversions halving/doubling ones.
8194 if (!VT.isFixedLengthVector())
8195 return Op;
8196
8197 // For fixed-length vectors we lower to a custom "VL" node.
8198 unsigned RVVOpc = 0;
8199 switch (Op.getOpcode()) {
8200 default:
8201 llvm_unreachable("Impossible opcode");
8202 case ISD::FP_TO_SINT:
8203 RVVOpc = RISCVISD::VFCVT_RTZ_X_F_VL;
8204 break;
8205 case ISD::FP_TO_UINT:
8206 RVVOpc = RISCVISD::VFCVT_RTZ_XU_F_VL;
8207 break;
8208 case ISD::SINT_TO_FP:
8209 RVVOpc = RISCVISD::SINT_TO_FP_VL;
8210 break;
8211 case ISD::UINT_TO_FP:
8212 RVVOpc = RISCVISD::UINT_TO_FP_VL;
8213 break;
8214 case ISD::STRICT_FP_TO_SINT:
8215 RVVOpc = RISCVISD::STRICT_VFCVT_RTZ_X_F_VL;
8216 break;
8217 case ISD::STRICT_FP_TO_UINT:
8218 RVVOpc = RISCVISD::STRICT_VFCVT_RTZ_XU_F_VL;
8219 break;
8220 case ISD::STRICT_SINT_TO_FP:
8221 RVVOpc = RISCVISD::STRICT_SINT_TO_FP_VL;
8222 break;
8223 case ISD::STRICT_UINT_TO_FP:
8224 RVVOpc = RISCVISD::STRICT_UINT_TO_FP_VL;
8225 break;
8226 }
8227
8228 MVT ContainerVT = getContainerForFixedLengthVector(VT);
8229 MVT SrcContainerVT = getContainerForFixedLengthVector(VT: SrcVT);
8230 assert(ContainerVT.getVectorElementCount() == SrcContainerVT.getVectorElementCount() &&
8231 "Expected same element count");
8232
8233 auto [Mask, VL] = getDefaultVLOps(VecVT: VT, ContainerVT, DL, DAG, Subtarget);
8234
8235 Src = convertToScalableVector(VT: SrcContainerVT, V: Src, DAG, Subtarget);
8236 if (IsStrict) {
8237 Src = DAG.getNode(Opcode: RVVOpc, DL, VTList: DAG.getVTList(VT1: ContainerVT, VT2: MVT::Other),
8238 N1: Op.getOperand(i: 0), N2: Src, N3: Mask, N4: VL);
8239 SDValue SubVec = convertFromScalableVector(VT, V: Src, DAG, Subtarget);
8240 return DAG.getMergeValues(Ops: {SubVec, Src.getValue(R: 1)}, dl: DL);
8241 }
8242 Src = DAG.getNode(Opcode: RVVOpc, DL, VT: ContainerVT, N1: Src, N2: Mask, N3: VL);
8243 return convertFromScalableVector(VT, V: Src, DAG, Subtarget);
8244 }
8245 case ISD::FP_TO_SINT_SAT:
8246 case ISD::FP_TO_UINT_SAT:
8247 return lowerFP_TO_INT_SAT(Op, DAG, Subtarget);
8248 case ISD::FP_TO_BF16: {
8249 // Custom lower to ensure the libcall return is passed in an FPR on hard
8250 // float ABIs.
8251 assert(!Subtarget.isSoftFPABI() && "Unexpected custom legalization");
8252 SDLoc DL(Op);
8253 MakeLibCallOptions CallOptions;
8254 RTLIB::Libcall LC =
8255 RTLIB::getFPROUND(OpVT: Op.getOperand(i: 0).getValueType(), RetVT: MVT::bf16);
8256 SDValue Res =
8257 makeLibCall(DAG, LC, RetVT: MVT::f32, Ops: Op.getOperand(i: 0), CallOptions, dl: DL).first;
8258 if (Subtarget.is64Bit())
8259 return DAG.getNode(Opcode: RISCVISD::FMV_X_ANYEXTW_RV64, DL, VT: MVT::i64, Operand: Res);
8260 return DAG.getBitcast(VT: MVT::i32, V: Res);
8261 }
8262 case ISD::BF16_TO_FP: {
8263 assert(Subtarget.hasStdExtFOrZfinx() && "Unexpected custom legalization");
8264 MVT VT = Op.getSimpleValueType();
8265 SDLoc DL(Op);
8266 Op = DAG.getNode(
8267 Opcode: ISD::SHL, DL, VT: Op.getOperand(i: 0).getValueType(), N1: Op.getOperand(i: 0),
8268 N2: DAG.getShiftAmountConstant(Val: 16, VT: Op.getOperand(i: 0).getValueType(), DL));
8269 SDValue Res = Subtarget.is64Bit()
8270 ? DAG.getNode(Opcode: RISCVISD::FMV_W_X_RV64, DL, VT: MVT::f32, Operand: Op)
8271 : DAG.getBitcast(VT: MVT::f32, V: Op);
8272 // fp_extend if the target VT is bigger than f32.
8273 if (VT != MVT::f32)
8274 return DAG.getNode(Opcode: ISD::FP_EXTEND, DL, VT, Operand: Res);
8275 return Res;
8276 }
8277 case ISD::STRICT_FP_TO_FP16:
8278 case ISD::FP_TO_FP16: {
8279 // Custom lower to ensure the libcall return is passed in an FPR on hard
8280 // float ABIs.
8281 assert(Subtarget.hasStdExtFOrZfinx() && "Unexpected custom legalisation");
8282 SDLoc DL(Op);
8283 MakeLibCallOptions CallOptions;
8284 bool IsStrict = Op->isStrictFPOpcode();
8285 SDValue Op0 = IsStrict ? Op.getOperand(i: 1) : Op.getOperand(i: 0);
8286 SDValue Chain = IsStrict ? Op.getOperand(i: 0) : SDValue();
8287 RTLIB::Libcall LC = RTLIB::getFPROUND(OpVT: Op0.getValueType(), RetVT: MVT::f16);
8288 SDValue Res;
8289 std::tie(args&: Res, args&: Chain) =
8290 makeLibCall(DAG, LC, RetVT: MVT::f32, Ops: Op0, CallOptions, dl: DL, Chain);
8291 if (Subtarget.is64Bit())
8292 return DAG.getNode(Opcode: RISCVISD::FMV_X_ANYEXTW_RV64, DL, VT: MVT::i64, Operand: Res);
8293 SDValue Result = DAG.getBitcast(VT: MVT::i32, V: IsStrict ? Res.getValue(R: 0) : Res);
8294 if (IsStrict)
8295 return DAG.getMergeValues(Ops: {Result, Chain}, dl: DL);
8296 return Result;
8297 }
8298 case ISD::STRICT_FP16_TO_FP:
8299 case ISD::FP16_TO_FP: {
8300 // Custom lower to ensure the libcall argument is passed in an FPR on hard
8301 // float ABIs.
8302 assert(Subtarget.hasStdExtFOrZfinx() && "Unexpected custom legalisation");
8303 SDLoc DL(Op);
8304 MakeLibCallOptions CallOptions;
8305 bool IsStrict = Op->isStrictFPOpcode();
8306 SDValue Op0 = IsStrict ? Op.getOperand(i: 1) : Op.getOperand(i: 0);
8307 SDValue Chain = IsStrict ? Op.getOperand(i: 0) : SDValue();
8308 SDValue Arg = Subtarget.is64Bit()
8309 ? DAG.getNode(Opcode: RISCVISD::FMV_W_X_RV64, DL, VT: MVT::f32, Operand: Op0)
8310 : DAG.getBitcast(VT: MVT::f32, V: Op0);
8311 SDValue Res;
8312 std::tie(args&: Res, args&: Chain) = makeLibCall(DAG, LC: RTLIB::FPEXT_F16_F32, RetVT: MVT::f32, Ops: Arg,
8313 CallOptions, dl: DL, Chain);
8314 if (IsStrict)
8315 return DAG.getMergeValues(Ops: {Res, Chain}, dl: DL);
8316 return Res;
8317 }
8318 case ISD::FTRUNC:
8319 case ISD::FCEIL:
8320 case ISD::FFLOOR:
8321 case ISD::FNEARBYINT:
8322 case ISD::FRINT:
8323 case ISD::FROUND:
8324 case ISD::FROUNDEVEN:
8325 if (isPromotedOpNeedingSplit(Op, Subtarget))
8326 return SplitVectorOp(Op, DAG);
8327 return lowerFTRUNC_FCEIL_FFLOOR_FROUND(Op, DAG, Subtarget);
8328 case ISD::LRINT:
8329 case ISD::LLRINT:
8330 case ISD::LROUND:
8331 case ISD::LLROUND: {
8332 if (Op.getValueType().isVector())
8333 return lowerVectorXRINT_XROUND(Op, DAG, Subtarget);
8334 assert(Op.getOperand(0).getValueType() == MVT::f16 &&
8335 "Unexpected custom legalisation");
8336 SDLoc DL(Op);
8337 SDValue Ext = DAG.getNode(Opcode: ISD::FP_EXTEND, DL, VT: MVT::f32, Operand: Op.getOperand(i: 0));
8338 return DAG.getNode(Opcode: Op.getOpcode(), DL, VT: Op.getValueType(), Operand: Ext);
8339 }
8340 case ISD::STRICT_LRINT:
8341 case ISD::STRICT_LLRINT:
8342 case ISD::STRICT_LROUND:
8343 case ISD::STRICT_LLROUND: {
8344 assert(Op.getOperand(1).getValueType() == MVT::f16 &&
8345 "Unexpected custom legalisation");
8346 SDLoc DL(Op);
8347 SDValue Ext = DAG.getNode(Opcode: ISD::STRICT_FP_EXTEND, DL, ResultTys: {MVT::f32, MVT::Other},
8348 Ops: {Op.getOperand(i: 0), Op.getOperand(i: 1)});
8349 return DAG.getNode(Opcode: Op.getOpcode(), DL, ResultTys: {Op.getValueType(), MVT::Other},
8350 Ops: {Ext.getValue(R: 1), Ext.getValue(R: 0)});
8351 }
8352 case ISD::VECREDUCE_ADD:
8353 case ISD::VECREDUCE_UMAX:
8354 case ISD::VECREDUCE_SMAX:
8355 case ISD::VECREDUCE_UMIN:
8356 case ISD::VECREDUCE_SMIN:
8357 return lowerVECREDUCE(Op, DAG);
8358 case ISD::VECREDUCE_AND:
8359 case ISD::VECREDUCE_OR:
8360 case ISD::VECREDUCE_XOR:
8361 if (Op.getOperand(i: 0).getValueType().getVectorElementType() == MVT::i1)
8362 return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ false);
8363 return lowerVECREDUCE(Op, DAG);
8364 case ISD::VECREDUCE_FADD:
8365 case ISD::VECREDUCE_SEQ_FADD:
8366 case ISD::VECREDUCE_FMIN:
8367 case ISD::VECREDUCE_FMAX:
8368 case ISD::VECREDUCE_FMAXIMUM:
8369 case ISD::VECREDUCE_FMINIMUM:
8370 return lowerFPVECREDUCE(Op, DAG);
8371 case ISD::VP_REDUCE_ADD:
8372 case ISD::VP_REDUCE_UMAX:
8373 case ISD::VP_REDUCE_SMAX:
8374 case ISD::VP_REDUCE_UMIN:
8375 case ISD::VP_REDUCE_SMIN:
8376 case ISD::VP_REDUCE_FADD:
8377 case ISD::VP_REDUCE_SEQ_FADD:
8378 case ISD::VP_REDUCE_FMIN:
8379 case ISD::VP_REDUCE_FMAX:
8380 case ISD::VP_REDUCE_FMINIMUM:
8381 case ISD::VP_REDUCE_FMAXIMUM:
8382 if (isPromotedOpNeedingSplit(Op: Op.getOperand(i: 1), Subtarget))
8383 return SplitVectorReductionOp(Op, DAG);
8384 return lowerVPREDUCE(Op, DAG);
8385 case ISD::VP_REDUCE_AND:
8386 case ISD::VP_REDUCE_OR:
8387 case ISD::VP_REDUCE_XOR:
8388 if (Op.getOperand(i: 1).getValueType().getVectorElementType() == MVT::i1)
8389 return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ true);
8390 return lowerVPREDUCE(Op, DAG);
8391 case ISD::VP_CTTZ_ELTS:
8392 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
8393 return lowerVPCttzElements(Op, DAG);
8394 case ISD::UNDEF: {
8395 MVT ContainerVT = getContainerForFixedLengthVector(VT: Op.getSimpleValueType());
8396 return convertFromScalableVector(VT: Op.getSimpleValueType(),
8397 V: DAG.getUNDEF(VT: ContainerVT), DAG, Subtarget);
8398 }
8399 case ISD::INSERT_SUBVECTOR:
8400 return lowerINSERT_SUBVECTOR(Op, DAG);
8401 case ISD::EXTRACT_SUBVECTOR:
8402 return lowerEXTRACT_SUBVECTOR(Op, DAG);
8403 case ISD::VECTOR_DEINTERLEAVE:
8404 return lowerVECTOR_DEINTERLEAVE(Op, DAG);
8405 case ISD::VECTOR_INTERLEAVE:
8406 return lowerVECTOR_INTERLEAVE(Op, DAG);
8407 case ISD::STEP_VECTOR:
8408 return lowerSTEP_VECTOR(Op, DAG);
8409 case ISD::VECTOR_REVERSE:
8410 return lowerVECTOR_REVERSE(Op, DAG);
8411 case ISD::VECTOR_SPLICE_LEFT:
8412 case ISD::VECTOR_SPLICE_RIGHT:
8413 return lowerVECTOR_SPLICE(Op, DAG);
8414 case ISD::BUILD_VECTOR: {
8415 MVT VT = Op.getSimpleValueType();
8416 MVT EltVT = VT.getVectorElementType();
8417 if (!Subtarget.is64Bit() && EltVT == MVT::i64)
8418 return lowerBuildVectorViaVID(Op, DAG, Subtarget);
8419 return lowerBUILD_VECTOR(Op, DAG, Subtarget);
8420 }
8421 case ISD::SPLAT_VECTOR: {
8422 MVT VT = Op.getSimpleValueType();
8423 MVT EltVT = VT.getVectorElementType();
8424 if ((EltVT == MVT::f16 && !Subtarget.hasStdExtZvfh()) ||
8425 EltVT == MVT::bf16) {
8426 SDLoc DL(Op);
8427 SDValue Elt;
8428 if ((EltVT == MVT::bf16 && Subtarget.hasStdExtZfbfmin()) ||
8429 (EltVT == MVT::f16 && Subtarget.hasStdExtZfhmin()))
8430 Elt = DAG.getNode(Opcode: RISCVISD::FMV_X_ANYEXTH, DL, VT: Subtarget.getXLenVT(),
8431 Operand: Op.getOperand(i: 0));
8432 else
8433 Elt = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: MVT::i16, Operand: Op.getOperand(i: 0));
8434 MVT IVT = VT.changeVectorElementType(EltVT: MVT::i16);
8435 return DAG.getNode(Opcode: ISD::BITCAST, DL, VT,
8436 Operand: DAG.getNode(Opcode: ISD::SPLAT_VECTOR, DL, VT: IVT, Operand: Elt));
8437 }
8438
8439 if (EltVT == MVT::i1)
8440 return lowerVectorMaskSplat(Op, DAG);
8441 return SDValue();
8442 }
8443 case ISD::VECTOR_SHUFFLE:
8444 return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
8445 case ISD::CONCAT_VECTORS: {
8446 // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is
8447 // better than going through the stack, as the default expansion does.
8448 SDLoc DL(Op);
8449 MVT VT = Op.getSimpleValueType();
8450 MVT ContainerVT = VT;
8451 if (VT.isFixedLengthVector())
8452 ContainerVT = ::getContainerForFixedLengthVector(DAG, VT, Subtarget);
8453
8454 // Recursively split concat_vectors with more than 2 operands:
8455 //
8456 // concat_vector op1, op2, op3, op4
8457 // ->
8458 // concat_vector (concat_vector op1, op2), (concat_vector op3, op4)
8459 //
8460 // This reduces the length of the chain of vslideups and allows us to
8461 // perform the vslideups at a smaller LMUL, limited to MF2.
8462 if (Op.getNumOperands() > 2 &&
8463 ContainerVT.bitsGE(VT: RISCVTargetLowering::getM1VT(VT: ContainerVT))) {
8464 MVT HalfVT = VT.getHalfNumVectorElementsVT();
8465 assert(isPowerOf2_32(Op.getNumOperands()));
8466 size_t HalfNumOps = Op.getNumOperands() / 2;
8467 SDValue Lo = DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: HalfVT,
8468 Ops: Op->ops().take_front(N: HalfNumOps));
8469 SDValue Hi = DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: HalfVT,
8470 Ops: Op->ops().drop_front(N: HalfNumOps));
8471 return DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT, N1: Lo, N2: Hi);
8472 }
8473
8474 unsigned NumOpElts =
8475 Op.getOperand(i: 0).getSimpleValueType().getVectorMinNumElements();
8476 SDValue Vec = DAG.getUNDEF(VT);
8477 for (const auto &OpIdx : enumerate(First: Op->ops())) {
8478 SDValue SubVec = OpIdx.value();
8479 // Don't insert undef subvectors.
8480 if (SubVec.isUndef())
8481 continue;
8482 Vec = DAG.getInsertSubvector(DL, Vec, SubVec, Idx: OpIdx.index() * NumOpElts);
8483 }
8484 return Vec;
8485 }
8486 case ISD::LOAD: {
8487 auto *Load = cast<LoadSDNode>(Val&: Op);
8488 EVT VT = Load->getValueType(ResNo: 0);
8489 if (VT == MVT::f64) {
8490 assert(Subtarget.hasStdExtZdinx() && !Subtarget.hasStdExtZilsd() &&
8491 !Subtarget.is64Bit() && "Unexpected custom legalisation");
8492
8493 // Replace a double precision load with two i32 loads and a BuildPairF64.
8494 SDLoc DL(Op);
8495 SDValue BasePtr = Load->getBasePtr();
8496 SDValue Chain = Load->getChain();
8497
8498 SDValue Lo =
8499 DAG.getLoad(VT: MVT::i32, dl: DL, Chain, Ptr: BasePtr, PtrInfo: Load->getPointerInfo(),
8500 Alignment: Load->getBaseAlign(), MMOFlags: Load->getMemOperand()->getFlags());
8501 BasePtr = DAG.getObjectPtrOffset(SL: DL, Ptr: BasePtr, Offset: TypeSize::getFixed(ExactSize: 4));
8502 SDValue Hi = DAG.getLoad(
8503 VT: MVT::i32, dl: DL, Chain, Ptr: BasePtr, PtrInfo: Load->getPointerInfo().getWithOffset(O: 4),
8504 Alignment: Load->getBaseAlign(), MMOFlags: Load->getMemOperand()->getFlags());
8505 Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other, N1: Lo.getValue(R: 1),
8506 N2: Hi.getValue(R: 1));
8507
8508 // For big-endian, swap the order of Lo and Hi.
8509 if (!Subtarget.isLittleEndian())
8510 std::swap(a&: Lo, b&: Hi);
8511
8512 SDValue Pair = DAG.getNode(Opcode: RISCVISD::BuildPairF64, DL, VT: MVT::f64, N1: Lo, N2: Hi);
8513 return DAG.getMergeValues(Ops: {Pair, Chain}, dl: DL);
8514 }
8515
8516 if (VT == MVT::bf16)
8517 return lowerXAndesBfHCvtBFloat16Load(Op, DAG);
8518
8519 // Handle normal vector tuple load.
8520 if (VT.isRISCVVectorTuple()) {
8521 SDLoc DL(Op);
8522 MVT XLenVT = Subtarget.getXLenVT();
8523 unsigned NF = VT.getRISCVVectorTupleNumFields();
8524 unsigned Sz = VT.getSizeInBits().getKnownMinValue();
8525 unsigned NumElts = Sz / (NF * 8);
8526 int Log2LMUL = Log2_64(Value: NumElts) - 3;
8527
8528 auto Flag = SDNodeFlags();
8529 Flag.setNoUnsignedWrap(true);
8530 SDValue Ret = DAG.getUNDEF(VT);
8531 SDValue BasePtr = Load->getBasePtr();
8532 SDValue VROffset = DAG.getNode(Opcode: RISCVISD::READ_VLENB, DL, VT: XLenVT);
8533 VROffset =
8534 DAG.getNode(Opcode: ISD::SHL, DL, VT: XLenVT, N1: VROffset,
8535 N2: DAG.getConstant(Val: std::max(a: Log2LMUL, b: 0), DL, VT: XLenVT));
8536 SmallVector<SDValue, 8> OutChains;
8537
8538 // Load NF vector registers and combine them to a vector tuple.
8539 for (unsigned i = 0; i < NF; ++i) {
8540 SDValue LoadVal = DAG.getLoad(
8541 VT: MVT::getScalableVectorVT(VT: MVT::i8, NumElements: NumElts), dl: DL, Chain: Load->getChain(),
8542 Ptr: BasePtr, PtrInfo: MachinePointerInfo(Load->getAddressSpace()), Alignment: Align(8));
8543 OutChains.push_back(Elt: LoadVal.getValue(R: 1));
8544 Ret = DAG.getNode(Opcode: RISCVISD::TUPLE_INSERT, DL, VT, N1: Ret, N2: LoadVal,
8545 N3: DAG.getTargetConstant(Val: i, DL, VT: MVT::i32));
8546 BasePtr = DAG.getNode(Opcode: ISD::ADD, DL, VT: XLenVT, N1: BasePtr, N2: VROffset, Flags: Flag);
8547 }
8548 return DAG.getMergeValues(
8549 Ops: {Ret, DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other, Ops: OutChains)}, dl: DL);
8550 }
8551
8552 if (auto V = expandUnalignedRVVLoad(Op, DAG))
8553 return V;
8554 if (Op.getValueType().isFixedLengthVector())
8555 return lowerFixedLengthVectorLoadToRVV(Op, DAG);
8556 return Op;
8557 }
8558 case ISD::STORE: {
8559 auto *Store = cast<StoreSDNode>(Val&: Op);
8560 SDValue StoredVal = Store->getValue();
8561 EVT VT = StoredVal.getValueType();
8562 if (Subtarget.enablePExtSIMDCodeGen()) {
8563 if (VT == MVT::v2i16 || VT == MVT::v4i8) {
8564 SDValue DL(Op);
8565 SDValue Cast = DAG.getBitcast(VT: MVT::i32, V: StoredVal);
8566 SDValue NewStore =
8567 DAG.getStore(Chain: Store->getChain(), dl: DL, Val: Cast, Ptr: Store->getBasePtr(),
8568 PtrInfo: Store->getPointerInfo(), Alignment: Store->getBaseAlign(),
8569 MMOFlags: Store->getMemOperand()->getFlags());
8570 return NewStore;
8571 }
8572 }
8573 if (VT == MVT::f64) {
8574 assert(Subtarget.hasStdExtZdinx() && !Subtarget.hasStdExtZilsd() &&
8575 !Subtarget.is64Bit() && "Unexpected custom legalisation");
8576
8577 // Replace a double precision store with a SplitF64 and i32 stores.
8578 SDValue DL(Op);
8579 SDValue BasePtr = Store->getBasePtr();
8580 SDValue Chain = Store->getChain();
8581 SDValue Split = DAG.getNode(Opcode: RISCVISD::SplitF64, DL,
8582 VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N: StoredVal);
8583
8584 SDValue Lo = Split.getValue(R: 0);
8585 SDValue Hi = Split.getValue(R: 1);
8586
8587 // For big-endian, swap the order of Lo and Hi before storing.
8588 if (!Subtarget.isLittleEndian())
8589 std::swap(a&: Lo, b&: Hi);
8590
8591 SDValue LoStore = DAG.getStore(
8592 Chain, dl: DL, Val: Lo, Ptr: BasePtr, PtrInfo: Store->getPointerInfo(),
8593 Alignment: Store->getBaseAlign(), MMOFlags: Store->getMemOperand()->getFlags());
8594 BasePtr = DAG.getObjectPtrOffset(SL: DL, Ptr: BasePtr, Offset: TypeSize::getFixed(ExactSize: 4));
8595 SDValue HiStore = DAG.getStore(
8596 Chain, dl: DL, Val: Hi, Ptr: BasePtr, PtrInfo: Store->getPointerInfo().getWithOffset(O: 4),
8597 Alignment: Store->getBaseAlign(), MMOFlags: Store->getMemOperand()->getFlags());
8598 return DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other, N1: LoStore, N2: HiStore);
8599 }
8600 if (VT == MVT::i64) {
8601 assert(Subtarget.hasStdExtZilsd() && !Subtarget.is64Bit() &&
8602 "Unexpected custom legalisation");
8603 if (Store->isTruncatingStore())
8604 return SDValue();
8605
8606 if (Store->getAlign() < Subtarget.getZilsdAlign())
8607 return SDValue();
8608
8609 SDLoc DL(Op);
8610 SDValue Lo = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL, VT: MVT::i32, N1: StoredVal,
8611 N2: DAG.getTargetConstant(Val: 0, DL, VT: MVT::i32));
8612 SDValue Hi = DAG.getNode(Opcode: ISD::EXTRACT_ELEMENT, DL, VT: MVT::i32, N1: StoredVal,
8613 N2: DAG.getTargetConstant(Val: 1, DL, VT: MVT::i32));
8614
8615 return DAG.getMemIntrinsicNode(
8616 Opcode: RISCVISD::SD_RV32, dl: DL, VTList: DAG.getVTList(VT: MVT::Other),
8617 Ops: {Store->getChain(), Lo, Hi, Store->getBasePtr()}, MemVT: MVT::i64,
8618 MMO: Store->getMemOperand());
8619 }
8620
8621 if (VT == MVT::bf16)
8622 return lowerXAndesBfHCvtBFloat16Store(Op, DAG);
8623
8624 // Handle normal vector tuple store.
8625 if (VT.isRISCVVectorTuple()) {
8626 SDLoc DL(Op);
8627 MVT XLenVT = Subtarget.getXLenVT();
8628 unsigned NF = VT.getRISCVVectorTupleNumFields();
8629 unsigned Sz = VT.getSizeInBits().getKnownMinValue();
8630 unsigned NumElts = Sz / (NF * 8);
8631 int Log2LMUL = Log2_64(Value: NumElts) - 3;
8632
8633 auto Flag = SDNodeFlags();
8634 Flag.setNoUnsignedWrap(true);
8635 SDValue Ret;
8636 SDValue Chain = Store->getChain();
8637 SDValue BasePtr = Store->getBasePtr();
8638 SDValue VROffset = DAG.getNode(Opcode: RISCVISD::READ_VLENB, DL, VT: XLenVT);
8639 VROffset =
8640 DAG.getNode(Opcode: ISD::SHL, DL, VT: XLenVT, N1: VROffset,
8641 N2: DAG.getConstant(Val: std::max(a: Log2LMUL, b: 0), DL, VT: XLenVT));
8642
8643 // Extract subregisters in a vector tuple and store them individually.
8644 for (unsigned i = 0; i < NF; ++i) {
8645 auto Extract =
8646 DAG.getNode(Opcode: RISCVISD::TUPLE_EXTRACT, DL,
8647 VT: MVT::getScalableVectorVT(VT: MVT::i8, NumElements: NumElts), N1: StoredVal,
8648 N2: DAG.getTargetConstant(Val: i, DL, VT: MVT::i32));
8649 Ret = DAG.getStore(Chain, dl: DL, Val: Extract, Ptr: BasePtr,
8650 PtrInfo: MachinePointerInfo(Store->getAddressSpace()),
8651 Alignment: Store->getBaseAlign(),
8652 MMOFlags: Store->getMemOperand()->getFlags());
8653 Chain = Ret.getValue(R: 0);
8654 BasePtr = DAG.getNode(Opcode: ISD::ADD, DL, VT: XLenVT, N1: BasePtr, N2: VROffset, Flags: Flag);
8655 }
8656 return Ret;
8657 }
8658
8659 if (auto V = expandUnalignedRVVStore(Op, DAG))
8660 return V;
8661 if (Op.getOperand(i: 1).getValueType().isFixedLengthVector())
8662 return lowerFixedLengthVectorStoreToRVV(Op, DAG);
8663 return Op;
8664 }
8665 case ISD::VP_LOAD:
8666 if (SDValue V = expandUnalignedVPLoad(Op, DAG))
8667 return V;
8668 [[fallthrough]];
8669 case ISD::MLOAD:
8670 return lowerMaskedLoad(Op, DAG);
8671 case ISD::VP_LOAD_FF:
8672 return lowerLoadFF(Op, DAG);
8673 case ISD::VP_STORE:
8674 if (SDValue V = expandUnalignedVPStore(Op, DAG))
8675 return V;
8676 [[fallthrough]];
8677 case ISD::MSTORE:
8678 return lowerMaskedStore(Op, DAG);
8679 case ISD::VECTOR_COMPRESS:
8680 return lowerVectorCompress(Op, DAG);
8681 case ISD::SELECT_CC: {
8682 // This occurs because we custom legalize SETGT and SETUGT for setcc. That
8683 // causes LegalizeDAG to think we need to custom legalize select_cc. Expand
8684 // into separate SETCC+SELECT just like LegalizeDAG.
8685 SDValue Tmp1 = Op.getOperand(i: 0);
8686 SDValue Tmp2 = Op.getOperand(i: 1);
8687 SDValue True = Op.getOperand(i: 2);
8688 SDValue False = Op.getOperand(i: 3);
8689 EVT VT = Op.getValueType();
8690 SDValue CC = Op.getOperand(i: 4);
8691 EVT CmpVT = Tmp1.getValueType();
8692 EVT CCVT =
8693 getSetCCResultType(DL: DAG.getDataLayout(), Context&: *DAG.getContext(), VT: CmpVT);
8694 SDLoc DL(Op);
8695 SDValue Cond =
8696 DAG.getNode(Opcode: ISD::SETCC, DL, VT: CCVT, N1: Tmp1, N2: Tmp2, N3: CC, Flags: Op->getFlags());
8697 return DAG.getSelect(DL, VT, Cond, LHS: True, RHS: False);
8698 }
8699 case ISD::SETCC: {
8700 MVT OpVT = Op.getOperand(i: 0).getSimpleValueType();
8701 if (OpVT.isScalarInteger()) {
8702 MVT VT = Op.getSimpleValueType();
8703 SDValue LHS = Op.getOperand(i: 0);
8704 SDValue RHS = Op.getOperand(i: 1);
8705 ISD::CondCode CCVal = cast<CondCodeSDNode>(Val: Op.getOperand(i: 2))->get();
8706 assert((CCVal == ISD::SETGT || CCVal == ISD::SETUGT) &&
8707 "Unexpected CondCode");
8708
8709 SDLoc DL(Op);
8710
8711 // If the RHS is a constant in the range [-2049, 0) or (0, 2046], we can
8712 // convert this to the equivalent of (set(u)ge X, C+1) by using
8713 // (xori (slti(u) X, C+1), 1). This avoids materializing a small constant
8714 // in a register.
8715 if (isa<ConstantSDNode>(Val: RHS)) {
8716 int64_t Imm = cast<ConstantSDNode>(Val&: RHS)->getSExtValue();
8717 if (Imm != 0 && isInt<12>(x: (uint64_t)Imm + 1)) {
8718 // If this is an unsigned compare and the constant is -1, incrementing
8719 // the constant would change behavior. The result should be false.
8720 if (CCVal == ISD::SETUGT && Imm == -1)
8721 return DAG.getConstant(Val: 0, DL, VT);
8722 // Using getSetCCSwappedOperands will convert SET(U)GT->SET(U)LT.
8723 CCVal = ISD::getSetCCSwappedOperands(Operation: CCVal);
8724 SDValue SetCC = DAG.getSetCC(
8725 DL, VT, LHS, RHS: DAG.getSignedConstant(Val: Imm + 1, DL, VT: OpVT), Cond: CCVal);
8726 return DAG.getLogicalNOT(DL, Val: SetCC, VT);
8727 }
8728 // Lower (setugt X, 2047) as (setne (srl X, 11), 0).
8729 if (CCVal == ISD::SETUGT && Imm == 2047) {
8730 SDValue Shift = DAG.getNode(Opcode: ISD::SRL, DL, VT: OpVT, N1: LHS,
8731 N2: DAG.getShiftAmountConstant(Val: 11, VT: OpVT, DL));
8732 return DAG.getSetCC(DL, VT, LHS: Shift, RHS: DAG.getConstant(Val: 0, DL, VT: OpVT),
8733 Cond: ISD::SETNE);
8734 }
8735 }
8736
8737 // Not a constant we could handle, swap the operands and condition code to
8738 // SETLT/SETULT.
8739 CCVal = ISD::getSetCCSwappedOperands(Operation: CCVal);
8740 return DAG.getSetCC(DL, VT, LHS: RHS, RHS: LHS, Cond: CCVal);
8741 }
8742
8743 if (isPromotedOpNeedingSplit(Op: Op.getOperand(i: 0), Subtarget))
8744 return SplitVectorOp(Op, DAG);
8745
8746 return lowerToScalableOp(Op, DAG);
8747 }
8748 case ISD::ADD:
8749 case ISD::SUB:
8750 case ISD::MUL:
8751 case ISD::MULHS:
8752 case ISD::MULHU:
8753 case ISD::AND:
8754 case ISD::OR:
8755 case ISD::XOR:
8756 case ISD::SDIV:
8757 case ISD::SREM:
8758 case ISD::UDIV:
8759 case ISD::UREM:
8760 case ISD::BSWAP:
8761 case ISD::CTPOP:
8762 case ISD::VSELECT:
8763 return lowerToScalableOp(Op, DAG);
8764 case ISD::SHL:
8765 case ISD::SRL:
8766 case ISD::SRA:
8767 if (Op.getSimpleValueType().isFixedLengthVector()) {
8768 if (Subtarget.enablePExtSIMDCodeGen()) {
8769 // We have patterns for scalar/immediate shift amount, so no lowering
8770 // needed.
8771 if (Op.getOperand(i: 1)->getOpcode() == ISD::SPLAT_VECTOR)
8772 return Op;
8773
8774 // There's no vector-vector version of shift instruction in P extension
8775 // so we need to unroll to scalar computation and pack them back.
8776 return DAG.UnrollVectorOp(N: Op.getNode());
8777 }
8778 return lowerToScalableOp(Op, DAG);
8779 }
8780 // This can be called for an i32 shift amount that needs to be promoted.
8781 assert(Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() &&
8782 "Unexpected custom legalisation");
8783 return SDValue();
8784 case ISD::FABS:
8785 case ISD::FNEG:
8786 if (Op.getValueType() == MVT::f16 || Op.getValueType() == MVT::bf16)
8787 return lowerFABSorFNEG(Op, DAG, Subtarget);
8788 [[fallthrough]];
8789 case ISD::FADD:
8790 case ISD::FSUB:
8791 case ISD::FMUL:
8792 case ISD::FDIV:
8793 case ISD::FSQRT:
8794 case ISD::FMA:
8795 case ISD::FMINNUM:
8796 case ISD::FMAXNUM:
8797 case ISD::FMINIMUMNUM:
8798 case ISD::FMAXIMUMNUM:
8799 if (isPromotedOpNeedingSplit(Op, Subtarget))
8800 return SplitVectorOp(Op, DAG);
8801 [[fallthrough]];
8802 case ISD::AVGFLOORS:
8803 case ISD::AVGFLOORU:
8804 case ISD::AVGCEILS:
8805 case ISD::AVGCEILU:
8806 case ISD::SMIN:
8807 case ISD::SMAX:
8808 case ISD::UMIN:
8809 case ISD::UMAX:
8810 case ISD::UADDSAT:
8811 case ISD::USUBSAT:
8812 case ISD::SADDSAT:
8813 case ISD::SSUBSAT:
8814 return lowerToScalableOp(Op, DAG);
8815 case ISD::ABDS:
8816 case ISD::ABDU: {
8817 SDLoc dl(Op);
8818 EVT VT = Op->getValueType(ResNo: 0);
8819 SDValue LHS = DAG.getFreeze(V: Op->getOperand(Num: 0));
8820 SDValue RHS = DAG.getFreeze(V: Op->getOperand(Num: 1));
8821 bool IsSigned = Op->getOpcode() == ISD::ABDS;
8822
8823 // abds(lhs, rhs) -> sub(smax(lhs,rhs), smin(lhs,rhs))
8824 // abdu(lhs, rhs) -> sub(umax(lhs,rhs), umin(lhs,rhs))
8825 unsigned MaxOpc = IsSigned ? ISD::SMAX : ISD::UMAX;
8826 unsigned MinOpc = IsSigned ? ISD::SMIN : ISD::UMIN;
8827 SDValue Max = DAG.getNode(Opcode: MaxOpc, DL: dl, VT, N1: LHS, N2: RHS);
8828 SDValue Min = DAG.getNode(Opcode: MinOpc, DL: dl, VT, N1: LHS, N2: RHS);
8829 return DAG.getNode(Opcode: ISD::SUB, DL: dl, VT, N1: Max, N2: Min);
8830 }
8831 case ISD::ABS:
8832 case ISD::VP_ABS:
8833 return lowerABS(Op, DAG);
8834 case ISD::CTLZ:
8835 case ISD::CTLZ_ZERO_UNDEF:
8836 case ISD::CTTZ:
8837 case ISD::CTTZ_ZERO_UNDEF:
8838 if (Subtarget.hasStdExtZvbb())
8839 return lowerToScalableOp(Op, DAG);
8840 assert(Op.getOpcode() != ISD::CTTZ);
8841 return lowerCTLZ_CTTZ_ZERO_UNDEF(Op, DAG);
8842 case ISD::FCOPYSIGN:
8843 if (Op.getValueType() == MVT::f16 || Op.getValueType() == MVT::bf16)
8844 return lowerFCOPYSIGN(Op, DAG, Subtarget);
8845 if (isPromotedOpNeedingSplit(Op, Subtarget))
8846 return SplitVectorOp(Op, DAG);
8847 return lowerToScalableOp(Op, DAG);
8848 case ISD::STRICT_FADD:
8849 case ISD::STRICT_FSUB:
8850 case ISD::STRICT_FMUL:
8851 case ISD::STRICT_FDIV:
8852 case ISD::STRICT_FSQRT:
8853 case ISD::STRICT_FMA:
8854 if (isPromotedOpNeedingSplit(Op, Subtarget))
8855 return SplitStrictFPVectorOp(Op, DAG);
8856 return lowerToScalableOp(Op, DAG);
8857 case ISD::STRICT_FSETCC:
8858 case ISD::STRICT_FSETCCS:
8859 return lowerVectorStrictFSetcc(Op, DAG);
8860 case ISD::STRICT_FCEIL:
8861 case ISD::STRICT_FRINT:
8862 case ISD::STRICT_FFLOOR:
8863 case ISD::STRICT_FTRUNC:
8864 case ISD::STRICT_FNEARBYINT:
8865 case ISD::STRICT_FROUND:
8866 case ISD::STRICT_FROUNDEVEN:
8867 return lowerVectorStrictFTRUNC_FCEIL_FFLOOR_FROUND(Op, DAG, Subtarget);
8868 case ISD::MGATHER:
8869 case ISD::VP_GATHER:
8870 return lowerMaskedGather(Op, DAG);
8871 case ISD::MSCATTER:
8872 case ISD::VP_SCATTER:
8873 return lowerMaskedScatter(Op, DAG);
8874 case ISD::GET_ROUNDING:
8875 return lowerGET_ROUNDING(Op, DAG);
8876 case ISD::SET_ROUNDING:
8877 return lowerSET_ROUNDING(Op, DAG);
8878 case ISD::GET_FPENV:
8879 return lowerGET_FPENV(Op, DAG);
8880 case ISD::SET_FPENV:
8881 return lowerSET_FPENV(Op, DAG);
8882 case ISD::RESET_FPENV:
8883 return lowerRESET_FPENV(Op, DAG);
8884 case ISD::GET_FPMODE:
8885 return lowerGET_FPMODE(Op, DAG);
8886 case ISD::SET_FPMODE:
8887 return lowerSET_FPMODE(Op, DAG);
8888 case ISD::RESET_FPMODE:
8889 return lowerRESET_FPMODE(Op, DAG);
8890 case ISD::EH_DWARF_CFA:
8891 return lowerEH_DWARF_CFA(Op, DAG);
8892 case ISD::VP_MERGE:
8893 if (Op.getSimpleValueType().getVectorElementType() == MVT::i1)
8894 return lowerVPMergeMask(Op, DAG);
8895 [[fallthrough]];
8896 case ISD::VP_SELECT:
8897 case ISD::VP_ADD:
8898 case ISD::VP_SUB:
8899 case ISD::VP_MUL:
8900 case ISD::VP_SDIV:
8901 case ISD::VP_UDIV:
8902 case ISD::VP_SREM:
8903 case ISD::VP_UREM:
8904 case ISD::VP_UADDSAT:
8905 case ISD::VP_USUBSAT:
8906 case ISD::VP_SADDSAT:
8907 case ISD::VP_SSUBSAT:
8908 case ISD::VP_LRINT:
8909 case ISD::VP_LLRINT:
8910 return lowerVPOp(Op, DAG);
8911 case ISD::VP_AND:
8912 case ISD::VP_OR:
8913 case ISD::VP_XOR:
8914 return lowerLogicVPOp(Op, DAG);
8915 case ISD::VP_FADD:
8916 case ISD::VP_FSUB:
8917 case ISD::VP_FMUL:
8918 case ISD::VP_FDIV:
8919 case ISD::VP_FNEG:
8920 case ISD::VP_FABS:
8921 case ISD::VP_SQRT:
8922 case ISD::VP_FMA:
8923 case ISD::VP_FMINNUM:
8924 case ISD::VP_FMAXNUM:
8925 case ISD::VP_FCOPYSIGN:
8926 if (isPromotedOpNeedingSplit(Op, Subtarget))
8927 return SplitVPOp(Op, DAG);
8928 [[fallthrough]];
8929 case ISD::VP_SRA:
8930 case ISD::VP_SRL:
8931 case ISD::VP_SHL:
8932 return lowerVPOp(Op, DAG);
8933 case ISD::VP_IS_FPCLASS:
8934 return LowerIS_FPCLASS(Op, DAG);
8935 case ISD::VP_SIGN_EXTEND:
8936 case ISD::VP_ZERO_EXTEND:
8937 if (Op.getOperand(i: 0).getSimpleValueType().getVectorElementType() == MVT::i1)
8938 return lowerVPExtMaskOp(Op, DAG);
8939 return lowerVPOp(Op, DAG);
8940 case ISD::VP_TRUNCATE:
8941 return lowerVectorTruncLike(Op, DAG);
8942 case ISD::VP_FP_EXTEND:
8943 case ISD::VP_FP_ROUND:
8944 return lowerVectorFPExtendOrRoundLike(Op, DAG);
8945 case ISD::VP_SINT_TO_FP:
8946 case ISD::VP_UINT_TO_FP:
8947 if (Op.getValueType().isVector() &&
8948 ((Op.getValueType().getScalarType() == MVT::f16 &&
8949 (Subtarget.hasVInstructionsF16Minimal() &&
8950 !Subtarget.hasVInstructionsF16())) ||
8951 Op.getValueType().getScalarType() == MVT::bf16)) {
8952 if (isPromotedOpNeedingSplit(Op, Subtarget))
8953 return SplitVectorOp(Op, DAG);
8954 // int -> f32
8955 SDLoc DL(Op);
8956 MVT NVT =
8957 MVT::getVectorVT(VT: MVT::f32, EC: Op.getValueType().getVectorElementCount());
8958 auto NC = DAG.getNode(Opcode: Op.getOpcode(), DL, VT: NVT, Ops: Op->ops());
8959 // f32 -> [b]f16
8960 return DAG.getNode(Opcode: ISD::FP_ROUND, DL, VT: Op.getValueType(), N1: NC,
8961 N2: DAG.getIntPtrConstant(Val: 0, DL, /*isTarget=*/true));
8962 }
8963 [[fallthrough]];
8964 case ISD::VP_FP_TO_SINT:
8965 case ISD::VP_FP_TO_UINT:
8966 if (SDValue Op1 = Op.getOperand(i: 0);
8967 Op1.getValueType().isVector() &&
8968 ((Op1.getValueType().getScalarType() == MVT::f16 &&
8969 (Subtarget.hasVInstructionsF16Minimal() &&
8970 !Subtarget.hasVInstructionsF16())) ||
8971 Op1.getValueType().getScalarType() == MVT::bf16)) {
8972 if (isPromotedOpNeedingSplit(Op: Op1, Subtarget))
8973 return SplitVectorOp(Op, DAG);
8974 // [b]f16 -> f32
8975 SDLoc DL(Op);
8976 MVT NVT = MVT::getVectorVT(VT: MVT::f32,
8977 EC: Op1.getValueType().getVectorElementCount());
8978 SDValue WidenVec = DAG.getNode(Opcode: ISD::FP_EXTEND, DL, VT: NVT, Operand: Op1);
8979 // f32 -> int
8980 return DAG.getNode(Opcode: Op.getOpcode(), DL, VT: Op.getValueType(),
8981 Ops: {WidenVec, Op.getOperand(i: 1), Op.getOperand(i: 2)});
8982 }
8983 return lowerVPFPIntConvOp(Op, DAG);
8984 case ISD::VP_SETCC:
8985 if (isPromotedOpNeedingSplit(Op: Op.getOperand(i: 0), Subtarget))
8986 return SplitVPOp(Op, DAG);
8987 if (Op.getOperand(i: 0).getSimpleValueType().getVectorElementType() == MVT::i1)
8988 return lowerVPSetCCMaskOp(Op, DAG);
8989 [[fallthrough]];
8990 case ISD::VP_SMIN:
8991 case ISD::VP_SMAX:
8992 case ISD::VP_UMIN:
8993 case ISD::VP_UMAX:
8994 case ISD::VP_BITREVERSE:
8995 case ISD::VP_BSWAP:
8996 return lowerVPOp(Op, DAG);
8997 case ISD::VP_CTLZ:
8998 case ISD::VP_CTLZ_ZERO_UNDEF:
8999 if (Subtarget.hasStdExtZvbb())
9000 return lowerVPOp(Op, DAG);
9001 return lowerCTLZ_CTTZ_ZERO_UNDEF(Op, DAG);
9002 case ISD::VP_CTTZ:
9003 case ISD::VP_CTTZ_ZERO_UNDEF:
9004 if (Subtarget.hasStdExtZvbb())
9005 return lowerVPOp(Op, DAG);
9006 return lowerCTLZ_CTTZ_ZERO_UNDEF(Op, DAG);
9007 case ISD::VP_CTPOP:
9008 return lowerVPOp(Op, DAG);
9009 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
9010 return lowerVPStridedLoad(Op, DAG);
9011 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
9012 return lowerVPStridedStore(Op, DAG);
9013 case ISD::VP_FCEIL:
9014 case ISD::VP_FFLOOR:
9015 case ISD::VP_FRINT:
9016 case ISD::VP_FNEARBYINT:
9017 case ISD::VP_FROUND:
9018 case ISD::VP_FROUNDEVEN:
9019 case ISD::VP_FROUNDTOZERO:
9020 if (isPromotedOpNeedingSplit(Op, Subtarget))
9021 return SplitVPOp(Op, DAG);
9022 return lowerVectorFTRUNC_FCEIL_FFLOOR_FROUND(Op, DAG, Subtarget);
9023 case ISD::VP_FMAXIMUM:
9024 case ISD::VP_FMINIMUM:
9025 if (isPromotedOpNeedingSplit(Op, Subtarget))
9026 return SplitVPOp(Op, DAG);
9027 return lowerFMAXIMUM_FMINIMUM(Op, DAG, Subtarget);
9028 case ISD::EXPERIMENTAL_VP_SPLICE:
9029 return lowerVPSpliceExperimental(Op, DAG);
9030 case ISD::EXPERIMENTAL_VP_REVERSE:
9031 return lowerVPReverseExperimental(Op, DAG);
9032 case ISD::CLEAR_CACHE: {
9033 assert(getTargetMachine().getTargetTriple().isOSLinux() &&
9034 "llvm.clear_cache only needs custom lower on Linux targets");
9035 SDLoc DL(Op);
9036 SDValue Flags = DAG.getConstant(Val: 0, DL, VT: Subtarget.getXLenVT());
9037 return emitFlushICache(DAG, InChain: Op.getOperand(i: 0), Start: Op.getOperand(i: 1),
9038 End: Op.getOperand(i: 2), Flags, DL);
9039 }
9040 case ISD::DYNAMIC_STACKALLOC:
9041 return lowerDYNAMIC_STACKALLOC(Op, DAG);
9042 case ISD::INIT_TRAMPOLINE:
9043 return lowerINIT_TRAMPOLINE(Op, DAG);
9044 case ISD::ADJUST_TRAMPOLINE:
9045 return lowerADJUST_TRAMPOLINE(Op, DAG);
9046 case ISD::PARTIAL_REDUCE_UMLA:
9047 case ISD::PARTIAL_REDUCE_SMLA:
9048 case ISD::PARTIAL_REDUCE_SUMLA:
9049 return lowerPARTIAL_REDUCE_MLA(Op, DAG);
9050 }
9051}
9052
9053SDValue RISCVTargetLowering::emitFlushICache(SelectionDAG &DAG, SDValue InChain,
9054 SDValue Start, SDValue End,
9055 SDValue Flags, SDLoc DL) const {
9056 MakeLibCallOptions CallOptions;
9057 std::pair<SDValue, SDValue> CallResult =
9058 makeLibCall(DAG, LC: RTLIB::RISCV_FLUSH_ICACHE, RetVT: MVT::isVoid,
9059 Ops: {Start, End, Flags}, CallOptions, dl: DL, Chain: InChain);
9060
9061 // This function returns void so only the out chain matters.
9062 return CallResult.second;
9063}
9064
9065SDValue RISCVTargetLowering::lowerINIT_TRAMPOLINE(SDValue Op,
9066 SelectionDAG &DAG) const {
9067 if (!Subtarget.is64Bit())
9068 llvm::reportFatalUsageError(reason: "Trampolines only implemented for RV64");
9069
9070 // Create an MCCodeEmitter to encode instructions.
9071 TargetLoweringObjectFile *TLO = getTargetMachine().getObjFileLowering();
9072 assert(TLO);
9073 MCContext &MCCtx = TLO->getContext();
9074
9075 std::unique_ptr<MCCodeEmitter> CodeEmitter(
9076 createRISCVMCCodeEmitter(MCII: *getTargetMachine().getMCInstrInfo(), Ctx&: MCCtx));
9077
9078 SDValue Root = Op.getOperand(i: 0);
9079 SDValue Trmp = Op.getOperand(i: 1); // trampoline
9080 SDLoc dl(Op);
9081
9082 const Value *TrmpAddr = cast<SrcValueSDNode>(Val: Op.getOperand(i: 4))->getValue();
9083
9084 // We store in the trampoline buffer the following instructions and data.
9085 // Offset:
9086 // 0: auipc t2, 0
9087 // 4: ld t0, 24(t2)
9088 // 8: ld t2, 16(t2)
9089 // 12: jalr t0
9090 // 16: <StaticChainOffset>
9091 // 24: <FunctionAddressOffset>
9092 // 32:
9093 // Offset with branch control flow protection enabled:
9094 // 0: lpad <imm20>
9095 // 4: auipc t3, 0
9096 // 8: ld t2, 28(t3)
9097 // 12: ld t3, 20(t3)
9098 // 16: jalr t2
9099 // 20: <StaticChainOffset>
9100 // 28: <FunctionAddressOffset>
9101 // 36:
9102
9103 const bool HasCFBranch =
9104 Subtarget.hasStdExtZicfilp() &&
9105 DAG.getMachineFunction().getFunction().getParent()->getModuleFlag(
9106 Key: "cf-protection-branch");
9107 const unsigned StaticChainIdx = HasCFBranch ? 5 : 4;
9108 const unsigned StaticChainOffset = StaticChainIdx * 4;
9109 const unsigned FunctionAddressOffset = StaticChainOffset + 8;
9110
9111 const MCSubtargetInfo *STI = getTargetMachine().getMCSubtargetInfo();
9112 assert(STI);
9113 auto GetEncoding = [&](const MCInst &MC) {
9114 SmallVector<char, 4> CB;
9115 SmallVector<MCFixup> Fixups;
9116 CodeEmitter->encodeInstruction(Inst: MC, CB, Fixups, STI: *STI);
9117 uint32_t Encoding = support::endian::read32le(P: CB.data());
9118 return Encoding;
9119 };
9120
9121 SmallVector<SDValue> OutChains;
9122
9123 SmallVector<uint32_t> Encodings;
9124 if (!HasCFBranch) {
9125 Encodings.append(
9126 IL: {// auipc t2, 0
9127 // Loads the current PC into t2.
9128 GetEncoding(MCInstBuilder(RISCV::AUIPC).addReg(Reg: RISCV::X7).addImm(Val: 0)),
9129 // ld t0, 24(t2)
9130 // Loads the function address into t0. Note that we are using offsets
9131 // pc-relative to the first instruction of the trampoline.
9132 GetEncoding(MCInstBuilder(RISCV::LD)
9133 .addReg(Reg: RISCV::X5)
9134 .addReg(Reg: RISCV::X7)
9135 .addImm(Val: FunctionAddressOffset)),
9136 // ld t2, 16(t2)
9137 // Load the value of the static chain.
9138 GetEncoding(MCInstBuilder(RISCV::LD)
9139 .addReg(Reg: RISCV::X7)
9140 .addReg(Reg: RISCV::X7)
9141 .addImm(Val: StaticChainOffset)),
9142 // jalr t0
9143 // Jump to the function.
9144 GetEncoding(MCInstBuilder(RISCV::JALR)
9145 .addReg(Reg: RISCV::X0)
9146 .addReg(Reg: RISCV::X5)
9147 .addImm(Val: 0))});
9148 } else {
9149 Encodings.append(
9150 IL: {// auipc x0, <imm20> (lpad <imm20>)
9151 // Landing pad.
9152 GetEncoding(MCInstBuilder(RISCV::AUIPC).addReg(Reg: RISCV::X0).addImm(Val: 0)),
9153 // auipc t3, 0
9154 // Loads the current PC into t3.
9155 GetEncoding(MCInstBuilder(RISCV::AUIPC).addReg(Reg: RISCV::X28).addImm(Val: 0)),
9156 // ld t2, (FunctionAddressOffset - 4)(t3)
9157 // Loads the function address into t2. Note that we are using offsets
9158 // pc-relative to the SECOND instruction of the trampoline.
9159 GetEncoding(MCInstBuilder(RISCV::LD)
9160 .addReg(Reg: RISCV::X7)
9161 .addReg(Reg: RISCV::X28)
9162 .addImm(Val: FunctionAddressOffset - 4)),
9163 // ld t3, (StaticChainOffset - 4)(t3)
9164 // Load the value of the static chain.
9165 GetEncoding(MCInstBuilder(RISCV::LD)
9166 .addReg(Reg: RISCV::X28)
9167 .addReg(Reg: RISCV::X28)
9168 .addImm(Val: StaticChainOffset - 4)),
9169 // jalr t2
9170 // Software-guarded jump to the function.
9171 GetEncoding(MCInstBuilder(RISCV::JALR)
9172 .addReg(Reg: RISCV::X0)
9173 .addReg(Reg: RISCV::X7)
9174 .addImm(Val: 0))});
9175 }
9176
9177 // Store encoded instructions.
9178 for (auto [Idx, Encoding] : llvm::enumerate(First&: Encodings)) {
9179 SDValue Addr = Idx > 0 ? DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: MVT::i64, N1: Trmp,
9180 N2: DAG.getConstant(Val: Idx * 4, DL: dl, VT: MVT::i64))
9181 : Trmp;
9182 OutChains.push_back(Elt: DAG.getTruncStore(
9183 Chain: Root, dl, Val: DAG.getConstant(Val: Encoding, DL: dl, VT: MVT::i64), Ptr: Addr,
9184 PtrInfo: MachinePointerInfo(TrmpAddr, Idx * 4), SVT: MVT::i32));
9185 }
9186
9187 // Now store the variable part of the trampoline.
9188 SDValue FunctionAddress = Op.getOperand(i: 2);
9189 SDValue StaticChain = Op.getOperand(i: 3);
9190
9191 // Store the given static chain and function pointer in the trampoline buffer.
9192 struct OffsetValuePair {
9193 const unsigned Offset;
9194 const SDValue Value;
9195 SDValue Addr = SDValue(); // Used to cache the address.
9196 } OffsetValues[] = {
9197 {.Offset: StaticChainOffset, .Value: StaticChain},
9198 {.Offset: FunctionAddressOffset, .Value: FunctionAddress},
9199 };
9200 for (auto &OffsetValue : OffsetValues) {
9201 SDValue Addr =
9202 DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: MVT::i64, N1: Trmp,
9203 N2: DAG.getConstant(Val: OffsetValue.Offset, DL: dl, VT: MVT::i64));
9204 OffsetValue.Addr = Addr;
9205 OutChains.push_back(
9206 Elt: DAG.getStore(Chain: Root, dl, Val: OffsetValue.Value, Ptr: Addr,
9207 PtrInfo: MachinePointerInfo(TrmpAddr, OffsetValue.Offset)));
9208 }
9209
9210 assert(OutChains.size() == StaticChainIdx + 2 &&
9211 "Size of OutChains mismatch");
9212 SDValue StoreToken = DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, Ops: OutChains);
9213
9214 // The end of instructions of trampoline is the same as the static chain
9215 // address that we computed earlier.
9216 SDValue EndOfTrmp = OffsetValues[0].Addr;
9217
9218 // Call clear cache on the trampoline instructions.
9219 SDValue Chain = DAG.getNode(Opcode: ISD::CLEAR_CACHE, DL: dl, VT: MVT::Other, N1: StoreToken,
9220 N2: Trmp, N3: EndOfTrmp);
9221
9222 return Chain;
9223}
9224
9225SDValue RISCVTargetLowering::lowerADJUST_TRAMPOLINE(SDValue Op,
9226 SelectionDAG &DAG) const {
9227 if (!Subtarget.is64Bit())
9228 llvm::reportFatalUsageError(reason: "Trampolines only implemented for RV64");
9229
9230 return Op.getOperand(i: 0);
9231}
9232
9233SDValue RISCVTargetLowering::lowerPARTIAL_REDUCE_MLA(SDValue Op,
9234 SelectionDAG &DAG) const {
9235 // Currently, only the vqdot and vqdotu case (from zvqdotq) should be legal.
9236 // TODO: There are many other sub-cases we could potentially lower, are
9237 // any of them worthwhile? Ex: via vredsum, vwredsum, vwwmaccu, etc..
9238 SDLoc DL(Op);
9239 MVT VT = Op.getSimpleValueType();
9240 SDValue Accum = Op.getOperand(i: 0);
9241 assert(Accum.getSimpleValueType() == VT &&
9242 VT.getVectorElementType() == MVT::i32);
9243 SDValue A = Op.getOperand(i: 1);
9244 SDValue B = Op.getOperand(i: 2);
9245 MVT ArgVT = A.getSimpleValueType();
9246 assert(ArgVT == B.getSimpleValueType() &&
9247 ArgVT.getVectorElementType() == MVT::i8);
9248 (void)ArgVT;
9249
9250 // The zvqdotq pseudos are defined with sources and destination both
9251 // being i32. This cast is needed for correctness to avoid incorrect
9252 // .vx matching of i8 splats.
9253 A = DAG.getBitcast(VT, V: A);
9254 B = DAG.getBitcast(VT, V: B);
9255
9256 MVT ContainerVT = VT;
9257 if (VT.isFixedLengthVector()) {
9258 ContainerVT = getContainerForFixedLengthVector(VT);
9259 Accum = convertToScalableVector(VT: ContainerVT, V: Accum, DAG, Subtarget);
9260 A = convertToScalableVector(VT: ContainerVT, V: A, DAG, Subtarget);
9261 B = convertToScalableVector(VT: ContainerVT, V: B, DAG, Subtarget);
9262 }
9263
9264 unsigned Opc;
9265 switch (Op.getOpcode()) {
9266 case ISD::PARTIAL_REDUCE_SMLA:
9267 Opc = RISCVISD::VQDOT_VL;
9268 break;
9269 case ISD::PARTIAL_REDUCE_UMLA:
9270 Opc = RISCVISD::VQDOTU_VL;
9271 break;
9272 case ISD::PARTIAL_REDUCE_SUMLA:
9273 Opc = RISCVISD::VQDOTSU_VL;
9274 break;
9275 default:
9276 llvm_unreachable("Unexpected opcode");
9277 }
9278 auto [Mask, VL] = getDefaultVLOps(VecVT: VT, ContainerVT, DL, DAG, Subtarget);
9279 SDValue Res = DAG.getNode(Opcode: Opc, DL, VT: ContainerVT, Ops: {A, B, Accum, Mask, VL});
9280 if (VT.isFixedLengthVector())
9281 Res = convertFromScalableVector(VT, V: Res, DAG, Subtarget);
9282 return Res;
9283}
9284
9285static SDValue getTargetNode(GlobalAddressSDNode *N, const SDLoc &DL, EVT Ty,
9286 SelectionDAG &DAG, unsigned Flags) {
9287 return DAG.getTargetGlobalAddress(GV: N->getGlobal(), DL, VT: Ty, offset: 0, TargetFlags: Flags);
9288}
9289
9290static SDValue getTargetNode(BlockAddressSDNode *N, const SDLoc &DL, EVT Ty,
9291 SelectionDAG &DAG, unsigned Flags) {
9292 return DAG.getTargetBlockAddress(BA: N->getBlockAddress(), VT: Ty, Offset: N->getOffset(),
9293 TargetFlags: Flags);
9294}
9295
9296static SDValue getTargetNode(ConstantPoolSDNode *N, const SDLoc &DL, EVT Ty,
9297 SelectionDAG &DAG, unsigned Flags) {
9298 return DAG.getTargetConstantPool(C: N->getConstVal(), VT: Ty, Align: N->getAlign(),
9299 Offset: N->getOffset(), TargetFlags: Flags);
9300}
9301
9302static SDValue getTargetNode(JumpTableSDNode *N, const SDLoc &DL, EVT Ty,
9303 SelectionDAG &DAG, unsigned Flags) {
9304 return DAG.getTargetJumpTable(JTI: N->getIndex(), VT: Ty, TargetFlags: Flags);
9305}
9306
9307static SDValue getLargeGlobalAddress(GlobalAddressSDNode *N, const SDLoc &DL,
9308 EVT Ty, SelectionDAG &DAG) {
9309 RISCVConstantPoolValue *CPV = RISCVConstantPoolValue::Create(GV: N->getGlobal());
9310 SDValue CPAddr = DAG.getTargetConstantPool(C: CPV, VT: Ty, Align: Align(8));
9311 SDValue LC = DAG.getNode(Opcode: RISCVISD::LLA, DL, VT: Ty, Operand: CPAddr);
9312 return DAG.getLoad(
9313 VT: Ty, dl: DL, Chain: DAG.getEntryNode(), Ptr: LC,
9314 PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction()));
9315}
9316
9317static SDValue getLargeExternalSymbol(ExternalSymbolSDNode *N, const SDLoc &DL,
9318 EVT Ty, SelectionDAG &DAG) {
9319 RISCVConstantPoolValue *CPV =
9320 RISCVConstantPoolValue::Create(C&: *DAG.getContext(), S: N->getSymbol());
9321 SDValue CPAddr = DAG.getTargetConstantPool(C: CPV, VT: Ty, Align: Align(8));
9322 SDValue LC = DAG.getNode(Opcode: RISCVISD::LLA, DL, VT: Ty, Operand: CPAddr);
9323 return DAG.getLoad(
9324 VT: Ty, dl: DL, Chain: DAG.getEntryNode(), Ptr: LC,
9325 PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction()));
9326}
9327
9328template <class NodeTy>
9329SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
9330 bool IsLocal, bool IsExternWeak) const {
9331 SDLoc DL(N);
9332 EVT Ty = getPointerTy(DL: DAG.getDataLayout());
9333
9334 // When HWASAN is used and tagging of global variables is enabled
9335 // they should be accessed via the GOT, since the tagged address of a global
9336 // is incompatible with existing code models. This also applies to non-pic
9337 // mode.
9338 if (isPositionIndependent() || Subtarget.allowTaggedGlobals()) {
9339 SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
9340 if (IsLocal && !Subtarget.allowTaggedGlobals())
9341 // Use PC-relative addressing to access the symbol. This generates the
9342 // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
9343 // %pcrel_lo(auipc)).
9344 return DAG.getNode(Opcode: RISCVISD::LLA, DL, VT: Ty, Operand: Addr);
9345
9346 // Use PC-relative addressing to access the GOT for this symbol, then load
9347 // the address from the GOT. This generates the pattern (PseudoLGA sym),
9348 // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
9349 SDValue Load =
9350 SDValue(DAG.getMachineNode(Opcode: RISCV::PseudoLGA, dl: DL, VT: Ty, Op1: Addr), 0);
9351 MachineFunction &MF = DAG.getMachineFunction();
9352 MachineMemOperand *MemOp = MF.getMachineMemOperand(
9353 PtrInfo: MachinePointerInfo::getGOT(MF),
9354 f: MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
9355 MachineMemOperand::MOInvariant,
9356 MemTy: LLT(Ty.getSimpleVT()), base_alignment: Align(Ty.getFixedSizeInBits() / 8));
9357 DAG.setNodeMemRefs(N: cast<MachineSDNode>(Val: Load.getNode()), NewMemRefs: {MemOp});
9358 return Load;
9359 }
9360
9361 switch (getTargetMachine().getCodeModel()) {
9362 default:
9363 reportFatalUsageError(reason: "Unsupported code model for lowering");
9364 case CodeModel::Small: {
9365 // Generate a sequence for accessing addresses within the first 2 GiB of
9366 // address space.
9367 if (Subtarget.hasVendorXqcili()) {
9368 // Use QC.E.LI to generate the address, as this is easier to relax than
9369 // LUI/ADDI.
9370 SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
9371 return DAG.getNode(Opcode: RISCVISD::QC_E_LI, DL, VT: Ty, Operand: Addr);
9372 }
9373
9374 // This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
9375 SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
9376 SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
9377 SDValue MNHi = DAG.getNode(Opcode: RISCVISD::HI, DL, VT: Ty, Operand: AddrHi);
9378 return DAG.getNode(Opcode: RISCVISD::ADD_LO, DL, VT: Ty, N1: MNHi, N2: AddrLo);
9379 }
9380 case CodeModel::Medium: {
9381 SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
9382 if (IsExternWeak) {
9383 // An extern weak symbol may be undefined, i.e. have value 0, which may
9384 // not be within 2GiB of PC, so use GOT-indirect addressing to access the
9385 // symbol. This generates the pattern (PseudoLGA sym), which expands to
9386 // (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
9387 SDValue Load =
9388 SDValue(DAG.getMachineNode(Opcode: RISCV::PseudoLGA, dl: DL, VT: Ty, Op1: Addr), 0);
9389 MachineFunction &MF = DAG.getMachineFunction();
9390 MachineMemOperand *MemOp = MF.getMachineMemOperand(
9391 PtrInfo: MachinePointerInfo::getGOT(MF),
9392 f: MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
9393 MachineMemOperand::MOInvariant,
9394 MemTy: LLT(Ty.getSimpleVT()), base_alignment: Align(Ty.getFixedSizeInBits() / 8));
9395 DAG.setNodeMemRefs(N: cast<MachineSDNode>(Val: Load.getNode()), NewMemRefs: {MemOp});
9396 return Load;
9397 }
9398
9399 // Generate a sequence for accessing addresses within any 2GiB range within
9400 // the address space. This generates the pattern (PseudoLLA sym), which
9401 // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
9402 return DAG.getNode(Opcode: RISCVISD::LLA, DL, VT: Ty, Operand: Addr);
9403 }
9404 case CodeModel::Large: {
9405 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N))
9406 return getLargeGlobalAddress(N: G, DL, Ty, DAG);
9407
9408 // Using pc-relative mode for other node type.
9409 SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
9410 return DAG.getNode(Opcode: RISCVISD::LLA, DL, VT: Ty, Operand: Addr);
9411 }
9412 }
9413}
9414
9415SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
9416 SelectionDAG &DAG) const {
9417 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Val&: Op);
9418 assert(N->getOffset() == 0 && "unexpected offset in global node");
9419 const GlobalValue *GV = N->getGlobal();
9420 bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(GV);
9421 return getAddr(N, DAG, IsLocal, IsExternWeak: GV->hasExternalWeakLinkage());
9422}
9423
9424SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
9425 SelectionDAG &DAG) const {
9426 BlockAddressSDNode *N = cast<BlockAddressSDNode>(Val&: Op);
9427
9428 return getAddr(N, DAG);
9429}
9430
9431SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
9432 SelectionDAG &DAG) const {
9433 ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Val&: Op);
9434
9435 return getAddr(N, DAG);
9436}
9437
9438SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
9439 SelectionDAG &DAG) const {
9440 JumpTableSDNode *N = cast<JumpTableSDNode>(Val&: Op);
9441
9442 return getAddr(N, DAG);
9443}
9444
9445SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
9446 SelectionDAG &DAG,
9447 bool UseGOT) const {
9448 SDLoc DL(N);
9449 EVT Ty = getPointerTy(DL: DAG.getDataLayout());
9450 const GlobalValue *GV = N->getGlobal();
9451 MVT XLenVT = Subtarget.getXLenVT();
9452
9453 if (UseGOT) {
9454 // Use PC-relative addressing to access the GOT for this TLS symbol, then
9455 // load the address from the GOT and add the thread pointer. This generates
9456 // the pattern (PseudoLA_TLS_IE sym), which expands to
9457 // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
9458 SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, VT: Ty, offset: 0, TargetFlags: 0);
9459 SDValue Load =
9460 SDValue(DAG.getMachineNode(Opcode: RISCV::PseudoLA_TLS_IE, dl: DL, VT: Ty, Op1: Addr), 0);
9461 MachineFunction &MF = DAG.getMachineFunction();
9462 MachineMemOperand *MemOp = MF.getMachineMemOperand(
9463 PtrInfo: MachinePointerInfo::getGOT(MF),
9464 f: MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
9465 MachineMemOperand::MOInvariant,
9466 MemTy: LLT(Ty.getSimpleVT()), base_alignment: Align(Ty.getFixedSizeInBits() / 8));
9467 DAG.setNodeMemRefs(N: cast<MachineSDNode>(Val: Load.getNode()), NewMemRefs: {MemOp});
9468
9469 // Add the thread pointer.
9470 SDValue TPReg = DAG.getRegister(Reg: RISCV::X4, VT: XLenVT);
9471 return DAG.getNode(Opcode: ISD::ADD, DL, VT: Ty, N1: Load, N2: TPReg);
9472 }
9473
9474 // Generate a sequence for accessing the address relative to the thread
9475 // pointer, with the appropriate adjustment for the thread pointer offset.
9476 // This generates the pattern
9477 // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
9478 SDValue AddrHi =
9479 DAG.getTargetGlobalAddress(GV, DL, VT: Ty, offset: 0, TargetFlags: RISCVII::MO_TPREL_HI);
9480 SDValue AddrAdd =
9481 DAG.getTargetGlobalAddress(GV, DL, VT: Ty, offset: 0, TargetFlags: RISCVII::MO_TPREL_ADD);
9482 SDValue AddrLo =
9483 DAG.getTargetGlobalAddress(GV, DL, VT: Ty, offset: 0, TargetFlags: RISCVII::MO_TPREL_LO);
9484
9485 SDValue MNHi = DAG.getNode(Opcode: RISCVISD::HI, DL, VT: Ty, Operand: AddrHi);
9486 SDValue TPReg = DAG.getRegister(Reg: RISCV::X4, VT: XLenVT);
9487 SDValue MNAdd =
9488 DAG.getNode(Opcode: RISCVISD::ADD_TPREL, DL, VT: Ty, N1: MNHi, N2: TPReg, N3: AddrAdd);
9489 return DAG.getNode(Opcode: RISCVISD::ADD_LO, DL, VT: Ty, N1: MNAdd, N2: AddrLo);
9490}
9491
9492SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
9493 SelectionDAG &DAG) const {
9494 SDLoc DL(N);
9495 EVT Ty = getPointerTy(DL: DAG.getDataLayout());
9496 IntegerType *CallTy = Type::getIntNTy(C&: *DAG.getContext(), N: Ty.getSizeInBits());
9497 const GlobalValue *GV = N->getGlobal();
9498
9499 // Use a PC-relative addressing mode to access the global dynamic GOT address.
9500 // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
9501 // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
9502 SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, VT: Ty, offset: 0, TargetFlags: 0);
9503 SDValue Load =
9504 SDValue(DAG.getMachineNode(Opcode: RISCV::PseudoLA_TLS_GD, dl: DL, VT: Ty, Op1: Addr), 0);
9505
9506 // Prepare argument list to generate call.
9507 ArgListTy Args;
9508 Args.emplace_back(args&: Load, args&: CallTy);
9509
9510 // Setup call to __tls_get_addr.
9511 TargetLowering::CallLoweringInfo CLI(DAG);
9512 CLI.setDebugLoc(DL)
9513 .setChain(DAG.getEntryNode())
9514 .setLibCallee(CC: CallingConv::C, ResultType: CallTy,
9515 Target: DAG.getExternalSymbol(Sym: "__tls_get_addr", VT: Ty),
9516 ArgsList: std::move(Args));
9517
9518 return LowerCallTo(CLI).first;
9519}
9520
9521SDValue RISCVTargetLowering::getTLSDescAddr(GlobalAddressSDNode *N,
9522 SelectionDAG &DAG) const {
9523 SDLoc DL(N);
9524 EVT Ty = getPointerTy(DL: DAG.getDataLayout());
9525 const GlobalValue *GV = N->getGlobal();
9526
9527 // Use a PC-relative addressing mode to access the global dynamic GOT address.
9528 // This generates the pattern (PseudoLA_TLSDESC sym), which expands to
9529 //
9530 // auipc tX, %tlsdesc_hi(symbol) // R_RISCV_TLSDESC_HI20(symbol)
9531 // lw tY, tX, %tlsdesc_load_lo(label) // R_RISCV_TLSDESC_LOAD_LO12(label)
9532 // addi a0, tX, %tlsdesc_add_lo(label) // R_RISCV_TLSDESC_ADD_LO12(label)
9533 // jalr t0, tY // R_RISCV_TLSDESC_CALL(label)
9534 SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, VT: Ty, offset: 0, TargetFlags: 0);
9535 return SDValue(DAG.getMachineNode(Opcode: RISCV::PseudoLA_TLSDESC, dl: DL, VT: Ty, Op1: Addr), 0);
9536}
9537
9538SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
9539 SelectionDAG &DAG) const {
9540 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Val&: Op);
9541 assert(N->getOffset() == 0 && "unexpected offset in global node");
9542
9543 if (DAG.getTarget().useEmulatedTLS())
9544 return LowerToTLSEmulatedModel(GA: N, DAG);
9545
9546 TLSModel::Model Model = getTargetMachine().getTLSModel(GV: N->getGlobal());
9547
9548 if (DAG.getMachineFunction().getFunction().getCallingConv() ==
9549 CallingConv::GHC)
9550 reportFatalUsageError(reason: "In GHC calling convention TLS is not supported");
9551
9552 SDValue Addr;
9553 switch (Model) {
9554 case TLSModel::LocalExec:
9555 Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
9556 break;
9557 case TLSModel::InitialExec:
9558 Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
9559 break;
9560 case TLSModel::LocalDynamic:
9561 case TLSModel::GeneralDynamic:
9562 Addr = DAG.getTarget().useTLSDESC() ? getTLSDescAddr(N, DAG)
9563 : getDynamicTLSAddr(N, DAG);
9564 break;
9565 }
9566
9567 return Addr;
9568}
9569
9570// Return true if Val is equal to (setcc LHS, RHS, CC).
9571// Return false if Val is the inverse of (setcc LHS, RHS, CC).
9572// Otherwise, return std::nullopt.
9573static std::optional<bool> matchSetCC(SDValue LHS, SDValue RHS,
9574 ISD::CondCode CC, SDValue Val) {
9575 assert(Val->getOpcode() == ISD::SETCC);
9576 SDValue LHS2 = Val.getOperand(i: 0);
9577 SDValue RHS2 = Val.getOperand(i: 1);
9578 ISD::CondCode CC2 = cast<CondCodeSDNode>(Val: Val.getOperand(i: 2))->get();
9579
9580 if (LHS == LHS2 && RHS == RHS2) {
9581 if (CC == CC2)
9582 return true;
9583 if (CC == ISD::getSetCCInverse(Operation: CC2, Type: LHS2.getValueType()))
9584 return false;
9585 } else if (LHS == RHS2 && RHS == LHS2) {
9586 CC2 = ISD::getSetCCSwappedOperands(Operation: CC2);
9587 if (CC == CC2)
9588 return true;
9589 if (CC == ISD::getSetCCInverse(Operation: CC2, Type: LHS2.getValueType()))
9590 return false;
9591 }
9592
9593 return std::nullopt;
9594}
9595
9596static bool isSimm12Constant(SDValue V) {
9597 return isa<ConstantSDNode>(Val: V) && V->getAsAPIntVal().isSignedIntN(N: 12);
9598}
9599
9600static SDValue lowerSelectToBinOp(SDNode *N, SelectionDAG &DAG,
9601 const RISCVSubtarget &Subtarget) {
9602 SDValue CondV = N->getOperand(Num: 0);
9603 SDValue TrueV = N->getOperand(Num: 1);
9604 SDValue FalseV = N->getOperand(Num: 2);
9605 MVT VT = N->getSimpleValueType(ResNo: 0);
9606 SDLoc DL(N);
9607
9608 if (!Subtarget.hasConditionalMoveFusion()) {
9609 // (select c, -1, y) -> -c | y
9610 if (isAllOnesConstant(V: TrueV)) {
9611 SDValue Neg = DAG.getNegative(Val: CondV, DL, VT);
9612 return DAG.getNode(Opcode: ISD::OR, DL, VT, N1: Neg, N2: DAG.getFreeze(V: FalseV));
9613 }
9614 // (select c, y, -1) -> (c-1) | y
9615 if (isAllOnesConstant(V: FalseV)) {
9616 SDValue Neg = DAG.getNode(Opcode: ISD::ADD, DL, VT, N1: CondV,
9617 N2: DAG.getAllOnesConstant(DL, VT));
9618 return DAG.getNode(Opcode: ISD::OR, DL, VT, N1: Neg, N2: DAG.getFreeze(V: TrueV));
9619 }
9620
9621 const bool HasCZero = VT.isScalarInteger() && Subtarget.hasCZEROLike();
9622
9623 // (select c, 0, y) -> (c-1) & y
9624 if (isNullConstant(V: TrueV) && (!HasCZero || isSimm12Constant(V: FalseV))) {
9625 SDValue Neg =
9626 DAG.getNode(Opcode: ISD::ADD, DL, VT, N1: CondV, N2: DAG.getAllOnesConstant(DL, VT));
9627 return DAG.getNode(Opcode: ISD::AND, DL, VT, N1: Neg, N2: DAG.getFreeze(V: FalseV));
9628 }
9629 if (isNullConstant(V: FalseV)) {
9630 // (select c, (1 << ShAmount) + 1, 0) -> (c << ShAmount) + c
9631 if (auto *TrueC = dyn_cast<ConstantSDNode>(Val&: TrueV)) {
9632 uint64_t TrueM1 = TrueC->getZExtValue() - 1;
9633 if (isPowerOf2_64(Value: TrueM1)) {
9634 unsigned ShAmount = Log2_64(Value: TrueM1);
9635 if (Subtarget.hasShlAdd(ShAmt: ShAmount))
9636 return DAG.getNode(Opcode: RISCVISD::SHL_ADD, DL, VT, N1: CondV,
9637 N2: DAG.getTargetConstant(Val: ShAmount, DL, VT), N3: CondV);
9638 }
9639 }
9640 // (select c, y, 0) -> -c & y
9641 if (!HasCZero || isSimm12Constant(V: TrueV)) {
9642 SDValue Neg = DAG.getNegative(Val: CondV, DL, VT);
9643 return DAG.getNode(Opcode: ISD::AND, DL, VT, N1: Neg, N2: DAG.getFreeze(V: TrueV));
9644 }
9645 }
9646 }
9647
9648 // select c, ~x, x --> xor -c, x
9649 if (isa<ConstantSDNode>(Val: TrueV) && isa<ConstantSDNode>(Val: FalseV)) {
9650 const APInt &TrueVal = TrueV->getAsAPIntVal();
9651 const APInt &FalseVal = FalseV->getAsAPIntVal();
9652 if (~TrueVal == FalseVal) {
9653 SDValue Neg = DAG.getNegative(Val: CondV, DL, VT);
9654 return DAG.getNode(Opcode: ISD::XOR, DL, VT, N1: Neg, N2: FalseV);
9655 }
9656 }
9657
9658 // Try to fold (select (setcc lhs, rhs, cc), truev, falsev) into bitwise ops
9659 // when both truev and falsev are also setcc.
9660 if (CondV.getOpcode() == ISD::SETCC && TrueV.getOpcode() == ISD::SETCC &&
9661 FalseV.getOpcode() == ISD::SETCC) {
9662 SDValue LHS = CondV.getOperand(i: 0);
9663 SDValue RHS = CondV.getOperand(i: 1);
9664 ISD::CondCode CC = cast<CondCodeSDNode>(Val: CondV.getOperand(i: 2))->get();
9665
9666 // (select x, x, y) -> x | y
9667 // (select !x, x, y) -> x & y
9668 if (std::optional<bool> MatchResult = matchSetCC(LHS, RHS, CC, Val: TrueV)) {
9669 return DAG.getNode(Opcode: *MatchResult ? ISD::OR : ISD::AND, DL, VT, N1: TrueV,
9670 N2: DAG.getFreeze(V: FalseV));
9671 }
9672 // (select x, y, x) -> x & y
9673 // (select !x, y, x) -> x | y
9674 if (std::optional<bool> MatchResult = matchSetCC(LHS, RHS, CC, Val: FalseV)) {
9675 return DAG.getNode(Opcode: *MatchResult ? ISD::AND : ISD::OR, DL, VT,
9676 N1: DAG.getFreeze(V: TrueV), N2: FalseV);
9677 }
9678 }
9679
9680 return SDValue();
9681}
9682
9683// Transform `binOp (select cond, x, c0), c1` where `c0` and `c1` are constants
9684// into `select cond, binOp(x, c1), binOp(c0, c1)` if profitable.
9685// For now we only consider transformation profitable if `binOp(c0, c1)` ends up
9686// being `0` or `-1`. In such cases we can replace `select` with `and`.
9687// TODO: Should we also do this if `binOp(c0, c1)` is cheaper to materialize
9688// than `c0`?
9689static SDValue
9690foldBinOpIntoSelectIfProfitable(SDNode *BO, SelectionDAG &DAG,
9691 const RISCVSubtarget &Subtarget) {
9692 if (Subtarget.hasShortForwardBranchIALU())
9693 return SDValue();
9694
9695 unsigned SelOpNo = 0;
9696 SDValue Sel = BO->getOperand(Num: 0);
9697 if (Sel.getOpcode() != ISD::SELECT || !Sel.hasOneUse()) {
9698 SelOpNo = 1;
9699 Sel = BO->getOperand(Num: 1);
9700 }
9701
9702 if (Sel.getOpcode() != ISD::SELECT || !Sel.hasOneUse())
9703 return SDValue();
9704
9705 unsigned ConstSelOpNo = 1;
9706 unsigned OtherSelOpNo = 2;
9707 if (!isa<ConstantSDNode>(Val: Sel->getOperand(Num: ConstSelOpNo))) {
9708 ConstSelOpNo = 2;
9709 OtherSelOpNo = 1;
9710 }
9711 SDValue ConstSelOp = Sel->getOperand(Num: ConstSelOpNo);
9712 ConstantSDNode *ConstSelOpNode = dyn_cast<ConstantSDNode>(Val&: ConstSelOp);
9713 if (!ConstSelOpNode || ConstSelOpNode->isOpaque())
9714 return SDValue();
9715
9716 SDValue ConstBinOp = BO->getOperand(Num: SelOpNo ^ 1);
9717 ConstantSDNode *ConstBinOpNode = dyn_cast<ConstantSDNode>(Val&: ConstBinOp);
9718 if (!ConstBinOpNode || ConstBinOpNode->isOpaque())
9719 return SDValue();
9720
9721 SDLoc DL(Sel);
9722 EVT VT = BO->getValueType(ResNo: 0);
9723
9724 SDValue NewConstOps[2] = {ConstSelOp, ConstBinOp};
9725 if (SelOpNo == 1)
9726 std::swap(a&: NewConstOps[0], b&: NewConstOps[1]);
9727
9728 SDValue NewConstOp =
9729 DAG.FoldConstantArithmetic(Opcode: BO->getOpcode(), DL, VT, Ops: NewConstOps);
9730 if (!NewConstOp)
9731 return SDValue();
9732
9733 const APInt &NewConstAPInt = NewConstOp->getAsAPIntVal();
9734 if (!NewConstAPInt.isZero() && !NewConstAPInt.isAllOnes())
9735 return SDValue();
9736
9737 SDValue OtherSelOp = Sel->getOperand(Num: OtherSelOpNo);
9738 SDValue NewNonConstOps[2] = {OtherSelOp, ConstBinOp};
9739 if (SelOpNo == 1)
9740 std::swap(a&: NewNonConstOps[0], b&: NewNonConstOps[1]);
9741 SDValue NewNonConstOp = DAG.getNode(Opcode: BO->getOpcode(), DL, VT, Ops: NewNonConstOps);
9742
9743 SDValue NewT = (ConstSelOpNo == 1) ? NewConstOp : NewNonConstOp;
9744 SDValue NewF = (ConstSelOpNo == 1) ? NewNonConstOp : NewConstOp;
9745 return DAG.getSelect(DL, VT, Cond: Sel.getOperand(i: 0), LHS: NewT, RHS: NewF);
9746}
9747
9748SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
9749 SDValue CondV = Op.getOperand(i: 0);
9750 SDValue TrueV = Op.getOperand(i: 1);
9751 SDValue FalseV = Op.getOperand(i: 2);
9752 SDLoc DL(Op);
9753 MVT VT = Op.getSimpleValueType();
9754 MVT XLenVT = Subtarget.getXLenVT();
9755
9756 // Handle P extension packed types by bitcasting to XLenVT for selection,
9757 // e.g. select i1 %cond, <2 x i16> %TrueV, <2 x i16> %FalseV
9758 // These types fit in a single GPR so can use the same selection mechanism
9759 // as scalars.
9760 if (Subtarget.isPExtPackedType(VT)) {
9761 SDValue TrueVInt = DAG.getBitcast(VT: XLenVT, V: TrueV);
9762 SDValue FalseVInt = DAG.getBitcast(VT: XLenVT, V: FalseV);
9763 SDValue ResultInt =
9764 DAG.getNode(Opcode: ISD::SELECT, DL, VT: XLenVT, N1: CondV, N2: TrueVInt, N3: FalseVInt);
9765 return DAG.getBitcast(VT, V: ResultInt);
9766 }
9767
9768 // Lower vector SELECTs to VSELECTs by splatting the condition.
9769 if (VT.isVector()) {
9770 MVT SplatCondVT = VT.changeVectorElementType(EltVT: MVT::i1);
9771 SDValue CondSplat = DAG.getSplat(VT: SplatCondVT, DL, Op: CondV);
9772 return DAG.getNode(Opcode: ISD::VSELECT, DL, VT, N1: CondSplat, N2: TrueV, N3: FalseV);
9773 }
9774
9775 // Try some other optimizations before falling back to generic lowering.
9776 if (SDValue V = lowerSelectToBinOp(N: Op.getNode(), DAG, Subtarget))
9777 return V;
9778
9779 // When there is no cost for GPR <-> FPR, we can use zicond select for
9780 // floating value when CondV is int type
9781 bool FPinGPR = Subtarget.hasStdExtZfinx();
9782
9783 // We can handle FGPR without spliting into hi/lo parts
9784 bool FitsInGPR = TypeSize::isKnownLE(LHS: VT.getSizeInBits(),
9785 RHS: Subtarget.getXLenVT().getSizeInBits());
9786
9787 bool UseZicondForFPSel = Subtarget.hasStdExtZicond() && FPinGPR &&
9788 VT.isFloatingPoint() && FitsInGPR;
9789
9790 if (UseZicondForFPSel) {
9791
9792 auto CastToInt = [&](SDValue V) -> SDValue {
9793 // Treat +0.0 as int 0 to enable single 'czero' instruction generation.
9794 if (isNullFPConstant(V))
9795 return DAG.getConstant(Val: 0, DL, VT: XLenVT);
9796
9797 if (VT == MVT::f16)
9798 return DAG.getNode(Opcode: RISCVISD::FMV_X_ANYEXTH, DL, VT: XLenVT, Operand: V);
9799
9800 if (VT == MVT::f32 && Subtarget.is64Bit())
9801 return DAG.getNode(Opcode: RISCVISD::FMV_X_ANYEXTW_RV64, DL, VT: XLenVT, Operand: V);
9802
9803 return DAG.getBitcast(VT: XLenVT, V);
9804 };
9805
9806 SDValue TrueVInt = CastToInt(TrueV);
9807 SDValue FalseVInt = CastToInt(FalseV);
9808
9809 // Emit integer SELECT (lowers to Zicond)
9810 SDValue ResultInt =
9811 DAG.getNode(Opcode: ISD::SELECT, DL, VT: XLenVT, N1: CondV, N2: TrueVInt, N3: FalseVInt);
9812
9813 // Convert back to floating VT
9814 if (VT == MVT::f32 && Subtarget.is64Bit())
9815 return DAG.getNode(Opcode: RISCVISD::FMV_W_X_RV64, DL, VT, Operand: ResultInt);
9816
9817 if (VT == MVT::f16)
9818 return DAG.getNode(Opcode: RISCVISD::FMV_H_X, DL, VT, Operand: ResultInt);
9819
9820 return DAG.getBitcast(VT, V: ResultInt);
9821 }
9822
9823 // When Zicond or XVentanaCondOps is present, emit CZERO_EQZ and CZERO_NEZ
9824 // nodes to implement the SELECT. Performing the lowering here allows for
9825 // greater control over when CZERO_{EQZ/NEZ} are used vs another branchless
9826 // sequence or RISCVISD::SELECT_CC node (branch-based select).
9827 if (Subtarget.hasCZEROLike() && VT.isScalarInteger()) {
9828
9829 // (select c, t, 0) -> (czero_eqz t, c)
9830 if (isNullConstant(V: FalseV))
9831 return DAG.getNode(Opcode: RISCVISD::CZERO_EQZ, DL, VT, N1: TrueV, N2: CondV);
9832 // (select c, 0, f) -> (czero_nez f, c)
9833 if (isNullConstant(V: TrueV))
9834 return DAG.getNode(Opcode: RISCVISD::CZERO_NEZ, DL, VT, N1: FalseV, N2: CondV);
9835
9836 // Check to see if a given operation is a 'NOT', if so return the negated
9837 // operand
9838 auto getNotOperand = [](const SDValue &Op) -> std::optional<const SDValue> {
9839 using namespace llvm::SDPatternMatch;
9840 SDValue Xor;
9841 if (sd_match(N: Op, P: m_OneUse(P: m_Not(V: m_Value(N&: Xor))))) {
9842 return Xor;
9843 }
9844 return std::nullopt;
9845 };
9846 // (select c, (and f, x), f) -> (or (and f, x), (czero_nez f, c))
9847 // (select c, (and f, ~x), f) -> (andn f, (czero_eqz x, c))
9848 if (TrueV.getOpcode() == ISD::AND &&
9849 (TrueV.getOperand(i: 0) == FalseV || TrueV.getOperand(i: 1) == FalseV)) {
9850 auto NotOperand = (TrueV.getOperand(i: 0) == FalseV)
9851 ? getNotOperand(TrueV.getOperand(i: 1))
9852 : getNotOperand(TrueV.getOperand(i: 0));
9853 if (NotOperand) {
9854 SDValue CMOV =
9855 DAG.getNode(Opcode: RISCVISD::CZERO_EQZ, DL, VT, N1: *NotOperand, N2: CondV);
9856 SDValue NOT = DAG.getNOT(DL, Val: CMOV, VT);
9857 return DAG.getNode(Opcode: ISD::AND, DL, VT, N1: FalseV, N2: NOT);
9858 }
9859 return DAG.getNode(
9860 Opcode: ISD::OR, DL, VT, N1: TrueV,
9861 N2: DAG.getNode(Opcode: RISCVISD::CZERO_NEZ, DL, VT, N1: FalseV, N2: CondV));
9862 }
9863
9864 // (select c, t, (and t, x)) -> (or (czero_eqz t, c), (and t, x))
9865 // (select c, t, (and t, ~x)) -> (andn t, (czero_nez x, c))
9866 if (FalseV.getOpcode() == ISD::AND &&
9867 (FalseV.getOperand(i: 0) == TrueV || FalseV.getOperand(i: 1) == TrueV)) {
9868 auto NotOperand = (FalseV.getOperand(i: 0) == TrueV)
9869 ? getNotOperand(FalseV.getOperand(i: 1))
9870 : getNotOperand(FalseV.getOperand(i: 0));
9871 if (NotOperand) {
9872 SDValue CMOV =
9873 DAG.getNode(Opcode: RISCVISD::CZERO_NEZ, DL, VT, N1: *NotOperand, N2: CondV);
9874 SDValue NOT = DAG.getNOT(DL, Val: CMOV, VT);
9875 return DAG.getNode(Opcode: ISD::AND, DL, VT, N1: TrueV, N2: NOT);
9876 }
9877 return DAG.getNode(
9878 Opcode: ISD::OR, DL, VT, N1: FalseV,
9879 N2: DAG.getNode(Opcode: RISCVISD::CZERO_EQZ, DL, VT, N1: TrueV, N2: CondV));
9880 }
9881
9882 // (select c, c1, c2) -> (add (czero_nez c2 - c1, c), c1)
9883 // (select c, c1, c2) -> (add (czero_eqz c1 - c2, c), c2)
9884 if (isa<ConstantSDNode>(Val: TrueV) && isa<ConstantSDNode>(Val: FalseV)) {
9885 const APInt &TrueVal = TrueV->getAsAPIntVal();
9886 const APInt &FalseVal = FalseV->getAsAPIntVal();
9887
9888 // Prefer these over Zicond to avoid materializing an immediate:
9889 // (select (x < 0), y, z) -> x >> (XLEN - 1) & (y - z) + z
9890 // (select (x > -1), z, y) -> x >> (XLEN - 1) & (y - z) + z
9891 if (CondV.getOpcode() == ISD::SETCC &&
9892 CondV.getOperand(i: 0).getValueType() == VT && CondV.hasOneUse()) {
9893 ISD::CondCode CCVal = cast<CondCodeSDNode>(Val: CondV.getOperand(i: 2))->get();
9894 if ((CCVal == ISD::SETLT && isNullConstant(V: CondV.getOperand(i: 1))) ||
9895 (CCVal == ISD::SETGT && isAllOnesConstant(V: CondV.getOperand(i: 1)))) {
9896 int64_t TrueImm = TrueVal.getSExtValue();
9897 int64_t FalseImm = FalseVal.getSExtValue();
9898 if (CCVal == ISD::SETGT)
9899 std::swap(a&: TrueImm, b&: FalseImm);
9900 if (isInt<12>(x: TrueImm) && isInt<12>(x: FalseImm) &&
9901 isInt<12>(x: TrueImm - FalseImm)) {
9902 SDValue SRA =
9903 DAG.getNode(Opcode: ISD::SRA, DL, VT, N1: CondV.getOperand(i: 0),
9904 N2: DAG.getConstant(Val: Subtarget.getXLen() - 1, DL, VT));
9905 SDValue AND =
9906 DAG.getNode(Opcode: ISD::AND, DL, VT, N1: SRA,
9907 N2: DAG.getSignedConstant(Val: TrueImm - FalseImm, DL, VT));
9908 return DAG.getNode(Opcode: ISD::ADD, DL, VT, N1: AND,
9909 N2: DAG.getSignedConstant(Val: FalseImm, DL, VT));
9910 }
9911 }
9912 }
9913
9914 // Use SHL/ADDI (and possible XORI) to avoid having to materialize
9915 // a constant in register
9916 if ((TrueVal - FalseVal).isPowerOf2() && FalseVal.isSignedIntN(N: 12)) {
9917 SDValue Log2 = DAG.getConstant(Val: (TrueVal - FalseVal).logBase2(), DL, VT);
9918 SDValue BitDiff = DAG.getNode(Opcode: ISD::SHL, DL, VT, N1: CondV, N2: Log2);
9919 return DAG.getNode(Opcode: ISD::ADD, DL, VT, N1: FalseV, N2: BitDiff);
9920 }
9921 if ((FalseVal - TrueVal).isPowerOf2() && TrueVal.isSignedIntN(N: 12)) {
9922 SDValue Log2 = DAG.getConstant(Val: (FalseVal - TrueVal).logBase2(), DL, VT);
9923 CondV = DAG.getLogicalNOT(DL, Val: CondV, VT: CondV->getValueType(ResNo: 0));
9924 SDValue BitDiff = DAG.getNode(Opcode: ISD::SHL, DL, VT, N1: CondV, N2: Log2);
9925 return DAG.getNode(Opcode: ISD::ADD, DL, VT, N1: TrueV, N2: BitDiff);
9926 }
9927
9928 auto getCost = [&](const APInt &Delta, const APInt &Addend) {
9929 const int DeltaCost = RISCVMatInt::getIntMatCost(
9930 Val: Delta, Size: Subtarget.getXLen(), STI: Subtarget, /*CompressionCost=*/true);
9931 // Does the addend fold into an ADDI
9932 if (Addend.isSignedIntN(N: 12))
9933 return DeltaCost;
9934 const int AddendCost = RISCVMatInt::getIntMatCost(
9935 Val: Addend, Size: Subtarget.getXLen(), STI: Subtarget, /*CompressionCost=*/true);
9936 return AddendCost + DeltaCost;
9937 };
9938 bool IsCZERO_NEZ = getCost(FalseVal - TrueVal, TrueVal) <=
9939 getCost(TrueVal - FalseVal, FalseVal);
9940 SDValue LHSVal = DAG.getConstant(
9941 Val: IsCZERO_NEZ ? FalseVal - TrueVal : TrueVal - FalseVal, DL, VT);
9942 SDValue CMOV =
9943 DAG.getNode(Opcode: IsCZERO_NEZ ? RISCVISD::CZERO_NEZ : RISCVISD::CZERO_EQZ,
9944 DL, VT, N1: LHSVal, N2: CondV);
9945 return DAG.getNode(Opcode: ISD::ADD, DL, VT, N1: CMOV, N2: IsCZERO_NEZ ? TrueV : FalseV);
9946 }
9947
9948 // (select c, c1, t) -> (add (czero_nez t - c1, c), c1)
9949 // (select c, t, c1) -> (add (czero_eqz t - c1, c), c1)
9950 if (isa<ConstantSDNode>(Val: TrueV) != isa<ConstantSDNode>(Val: FalseV)) {
9951 bool IsCZERO_NEZ = isa<ConstantSDNode>(Val: TrueV);
9952 SDValue ConstVal = IsCZERO_NEZ ? TrueV : FalseV;
9953 SDValue RegV = IsCZERO_NEZ ? FalseV : TrueV;
9954 int64_t RawConstVal = cast<ConstantSDNode>(Val&: ConstVal)->getSExtValue();
9955 // Efficient only if the constant and its negation fit into `ADDI`
9956 // Prefer Add/Sub over Xor since can be compressed for small immediates
9957 if (isInt<12>(x: RawConstVal)) {
9958 // Fall back to XORI if Const == -0x800 since we don't have SUBI.
9959 unsigned SubOpc = (RawConstVal == -0x800) ? ISD::XOR : ISD::SUB;
9960 unsigned AddOpc = (RawConstVal == -0x800) ? ISD::XOR : ISD::ADD;
9961 SDValue SubOp = DAG.getNode(Opcode: SubOpc, DL, VT, N1: RegV, N2: ConstVal);
9962 SDValue CZERO =
9963 DAG.getNode(Opcode: IsCZERO_NEZ ? RISCVISD::CZERO_NEZ : RISCVISD::CZERO_EQZ,
9964 DL, VT, N1: SubOp, N2: CondV);
9965 return DAG.getNode(Opcode: AddOpc, DL, VT, N1: CZERO, N2: ConstVal);
9966 }
9967 }
9968
9969 // (select c, t, f) -> (or (czero_eqz t, c), (czero_nez f, c))
9970 // Unless we have the short forward branch optimization.
9971 if (!Subtarget.hasConditionalMoveFusion())
9972 return DAG.getNode(
9973 Opcode: ISD::OR, DL, VT,
9974 N1: DAG.getNode(Opcode: RISCVISD::CZERO_EQZ, DL, VT, N1: TrueV, N2: CondV),
9975 N2: DAG.getNode(Opcode: RISCVISD::CZERO_NEZ, DL, VT, N1: FalseV, N2: CondV),
9976 Flags: SDNodeFlags::Disjoint);
9977 }
9978
9979 if (Op.hasOneUse()) {
9980 unsigned UseOpc = Op->user_begin()->getOpcode();
9981 if (isBinOp(Opcode: UseOpc) && DAG.isSafeToSpeculativelyExecute(Opcode: UseOpc)) {
9982 SDNode *BinOp = *Op->user_begin();
9983 if (SDValue NewSel = foldBinOpIntoSelectIfProfitable(BO: *Op->user_begin(),
9984 DAG, Subtarget)) {
9985 DAG.ReplaceAllUsesWith(From: BinOp, To: &NewSel);
9986 // Opcode check is necessary because foldBinOpIntoSelectIfProfitable
9987 // may return a constant node and cause crash in lowerSELECT.
9988 if (NewSel.getOpcode() == ISD::SELECT)
9989 return lowerSELECT(Op: NewSel, DAG);
9990 return NewSel;
9991 }
9992 }
9993 }
9994
9995 // (select cc, 1.0, 0.0) -> (sint_to_fp (zext cc))
9996 // (select cc, 0.0, 1.0) -> (sint_to_fp (zext (xor cc, 1)))
9997 const ConstantFPSDNode *FPTV = dyn_cast<ConstantFPSDNode>(Val&: TrueV);
9998 const ConstantFPSDNode *FPFV = dyn_cast<ConstantFPSDNode>(Val&: FalseV);
9999 if (FPTV && FPFV) {
10000 if (FPTV->isExactlyValue(V: 1.0) && FPFV->isExactlyValue(V: 0.0))
10001 return DAG.getNode(Opcode: ISD::SINT_TO_FP, DL, VT, Operand: CondV);
10002 if (FPTV->isExactlyValue(V: 0.0) && FPFV->isExactlyValue(V: 1.0)) {
10003 SDValue XOR = DAG.getNode(Opcode: ISD::XOR, DL, VT: XLenVT, N1: CondV,
10004 N2: DAG.getConstant(Val: 1, DL, VT: XLenVT));
10005 return DAG.getNode(Opcode: ISD::SINT_TO_FP, DL, VT, Operand: XOR);
10006 }
10007 }
10008
10009 // If the condition is not an integer SETCC which operates on XLenVT, we need
10010 // to emit a RISCVISD::SELECT_CC comparing the condition to zero. i.e.:
10011 // (select condv, truev, falsev)
10012 // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
10013 if (CondV.getOpcode() != ISD::SETCC ||
10014 CondV.getOperand(i: 0).getSimpleValueType() != XLenVT) {
10015 SDValue Zero = DAG.getConstant(Val: 0, DL, VT: XLenVT);
10016 SDValue SetNE = DAG.getCondCode(Cond: ISD::SETNE);
10017
10018 SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
10019
10020 return DAG.getNode(Opcode: RISCVISD::SELECT_CC, DL, VT, Ops);
10021 }
10022
10023 // If the CondV is the output of a SETCC node which operates on XLenVT inputs,
10024 // then merge the SETCC node into the lowered RISCVISD::SELECT_CC to take
10025 // advantage of the integer compare+branch instructions. i.e.:
10026 // (select (setcc lhs, rhs, cc), truev, falsev)
10027 // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
10028 SDValue LHS = CondV.getOperand(i: 0);
10029 SDValue RHS = CondV.getOperand(i: 1);
10030 ISD::CondCode CCVal = cast<CondCodeSDNode>(Val: CondV.getOperand(i: 2))->get();
10031
10032 // Special case for a select of 2 constants that have a difference of 1.
10033 // Normally this is done by DAGCombine, but if the select is introduced by
10034 // type legalization or op legalization, we miss it. Restricting to SETLT
10035 // case for now because that is what signed saturating add/sub need.
10036 // FIXME: We don't need the condition to be SETLT or even a SETCC,
10037 // but we would probably want to swap the true/false values if the condition
10038 // is SETGE/SETLE to avoid an XORI.
10039 if (isa<ConstantSDNode>(Val: TrueV) && isa<ConstantSDNode>(Val: FalseV) &&
10040 CCVal == ISD::SETLT) {
10041 const APInt &TrueVal = TrueV->getAsAPIntVal();
10042 const APInt &FalseVal = FalseV->getAsAPIntVal();
10043 if (TrueVal - 1 == FalseVal)
10044 return DAG.getNode(Opcode: ISD::ADD, DL, VT, N1: CondV, N2: FalseV);
10045 if (TrueVal + 1 == FalseVal)
10046 return DAG.getNode(Opcode: ISD::SUB, DL, VT, N1: FalseV, N2: CondV);
10047 }
10048
10049 translateSetCCForBranch(DL, LHS, RHS, CC&: CCVal, DAG, Subtarget);
10050 // 1 < x ? x : 1 -> 0 < x ? x : 1
10051 if (isOneConstant(V: LHS) && (CCVal == ISD::SETLT || CCVal == ISD::SETULT) &&
10052 RHS == TrueV && LHS == FalseV) {
10053 LHS = DAG.getConstant(Val: 0, DL, VT);
10054 // 0 <u x is the same as x != 0.
10055 if (CCVal == ISD::SETULT) {
10056 std::swap(a&: LHS, b&: RHS);
10057 CCVal = ISD::SETNE;
10058 }
10059 }
10060
10061 // x <s -1 ? x : -1 -> x <s 0 ? x : -1
10062 if (isAllOnesConstant(V: RHS) && CCVal == ISD::SETLT && LHS == TrueV &&
10063 RHS == FalseV) {
10064 RHS = DAG.getConstant(Val: 0, DL, VT);
10065 }
10066
10067 SDValue TargetCC = DAG.getCondCode(Cond: CCVal);
10068
10069 if (isa<ConstantSDNode>(Val: TrueV) && !isa<ConstantSDNode>(Val: FalseV)) {
10070 // (select (setcc lhs, rhs, CC), constant, falsev)
10071 // -> (select (setcc lhs, rhs, InverseCC), falsev, constant)
10072 std::swap(a&: TrueV, b&: FalseV);
10073 TargetCC = DAG.getCondCode(Cond: ISD::getSetCCInverse(Operation: CCVal, Type: LHS.getValueType()));
10074 }
10075
10076 SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
10077 return DAG.getNode(Opcode: RISCVISD::SELECT_CC, DL, VT, Ops);
10078}
10079
10080SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
10081 SDValue CondV = Op.getOperand(i: 1);
10082 SDLoc DL(Op);
10083 MVT XLenVT = Subtarget.getXLenVT();
10084
10085 if (CondV.getOpcode() == ISD::SETCC &&
10086 CondV.getOperand(i: 0).getValueType() == XLenVT) {
10087 SDValue LHS = CondV.getOperand(i: 0);
10088 SDValue RHS = CondV.getOperand(i: 1);
10089 ISD::CondCode CCVal = cast<CondCodeSDNode>(Val: CondV.getOperand(i: 2))->get();
10090
10091 translateSetCCForBranch(DL, LHS, RHS, CC&: CCVal, DAG, Subtarget);
10092
10093 SDValue TargetCC = DAG.getCondCode(Cond: CCVal);
10094 return DAG.getNode(Opcode: RISCVISD::BR_CC, DL, VT: Op.getValueType(), N1: Op.getOperand(i: 0),
10095 N2: LHS, N3: RHS, N4: TargetCC, N5: Op.getOperand(i: 2));
10096 }
10097
10098 return DAG.getNode(Opcode: RISCVISD::BR_CC, DL, VT: Op.getValueType(), N1: Op.getOperand(i: 0),
10099 N2: CondV, N3: DAG.getConstant(Val: 0, DL, VT: XLenVT),
10100 N4: DAG.getCondCode(Cond: ISD::SETNE), N5: Op.getOperand(i: 2));
10101}
10102
10103SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
10104 MachineFunction &MF = DAG.getMachineFunction();
10105 RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
10106
10107 SDLoc DL(Op);
10108 SDValue FI = DAG.getFrameIndex(FI: FuncInfo->getVarArgsFrameIndex(),
10109 VT: getPointerTy(DL: MF.getDataLayout()));
10110
10111 // vastart just stores the address of the VarArgsFrameIndex slot into the
10112 // memory location argument.
10113 const Value *SV = cast<SrcValueSDNode>(Val: Op.getOperand(i: 2))->getValue();
10114 return DAG.getStore(Chain: Op.getOperand(i: 0), dl: DL, Val: FI, Ptr: Op.getOperand(i: 1),
10115 PtrInfo: MachinePointerInfo(SV));
10116}
10117
10118SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
10119 SelectionDAG &DAG) const {
10120 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
10121 MachineFunction &MF = DAG.getMachineFunction();
10122 MachineFrameInfo &MFI = MF.getFrameInfo();
10123 MFI.setFrameAddressIsTaken(true);
10124 Register FrameReg = RI.getFrameRegister(MF);
10125 int XLenInBytes = Subtarget.getXLen() / 8;
10126
10127 EVT VT = Op.getValueType();
10128 SDLoc DL(Op);
10129 SDValue FrameAddr = DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl: DL, Reg: FrameReg, VT);
10130 unsigned Depth = Op.getConstantOperandVal(i: 0);
10131 while (Depth--) {
10132 int Offset = -(XLenInBytes * 2);
10133 SDValue Ptr = DAG.getNode(
10134 Opcode: ISD::ADD, DL, VT, N1: FrameAddr,
10135 N2: DAG.getSignedConstant(Val: Offset, DL, VT: getPointerTy(DL: DAG.getDataLayout())));
10136 FrameAddr =
10137 DAG.getLoad(VT, dl: DL, Chain: DAG.getEntryNode(), Ptr, PtrInfo: MachinePointerInfo());
10138 }
10139 return FrameAddr;
10140}
10141
10142SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
10143 SelectionDAG &DAG) const {
10144 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
10145 MachineFunction &MF = DAG.getMachineFunction();
10146 MachineFrameInfo &MFI = MF.getFrameInfo();
10147 MFI.setReturnAddressIsTaken(true);
10148 MVT XLenVT = Subtarget.getXLenVT();
10149 int XLenInBytes = Subtarget.getXLen() / 8;
10150
10151 EVT VT = Op.getValueType();
10152 SDLoc DL(Op);
10153 unsigned Depth = Op.getConstantOperandVal(i: 0);
10154 if (Depth) {
10155 int Off = -XLenInBytes;
10156 SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
10157 SDValue Offset = DAG.getSignedConstant(Val: Off, DL, VT);
10158 return DAG.getLoad(VT, dl: DL, Chain: DAG.getEntryNode(),
10159 Ptr: DAG.getNode(Opcode: ISD::ADD, DL, VT, N1: FrameAddr, N2: Offset),
10160 PtrInfo: MachinePointerInfo());
10161 }
10162
10163 // Return the value of the return address register, marking it an implicit
10164 // live-in.
10165 Register Reg = MF.addLiveIn(PReg: RI.getRARegister(), RC: getRegClassFor(VT: XLenVT));
10166 return DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl: DL, Reg, VT: XLenVT);
10167}
10168
10169SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
10170 SelectionDAG &DAG) const {
10171 SDLoc DL(Op);
10172 SDValue Lo = Op.getOperand(i: 0);
10173 SDValue Hi = Op.getOperand(i: 1);
10174 SDValue Shamt = Op.getOperand(i: 2);
10175 EVT VT = Lo.getValueType();
10176
10177 // if Shamt-XLEN < 0: // Shamt < XLEN
10178 // Lo = Lo << Shamt
10179 // Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt))
10180 // else:
10181 // Lo = 0
10182 // Hi = Lo << (Shamt-XLEN)
10183
10184 SDValue Zero = DAG.getConstant(Val: 0, DL, VT);
10185 SDValue One = DAG.getConstant(Val: 1, DL, VT);
10186 SDValue MinusXLen = DAG.getSignedConstant(Val: -(int)Subtarget.getXLen(), DL, VT);
10187 SDValue XLenMinus1 = DAG.getConstant(Val: Subtarget.getXLen() - 1, DL, VT);
10188 SDValue ShamtMinusXLen = DAG.getNode(Opcode: ISD::ADD, DL, VT, N1: Shamt, N2: MinusXLen);
10189 SDValue XLenMinus1Shamt = DAG.getNode(Opcode: ISD::SUB, DL, VT, N1: XLenMinus1, N2: Shamt);
10190
10191 SDValue LoTrue = DAG.getNode(Opcode: ISD::SHL, DL, VT, N1: Lo, N2: Shamt);
10192 SDValue ShiftRight1Lo = DAG.getNode(Opcode: ISD::SRL, DL, VT, N1: Lo, N2: One);
10193 SDValue ShiftRightLo =
10194 DAG.getNode(Opcode: ISD::SRL, DL, VT, N1: ShiftRight1Lo, N2: XLenMinus1Shamt);
10195 SDValue ShiftLeftHi = DAG.getNode(Opcode: ISD::SHL, DL, VT, N1: Hi, N2: Shamt);
10196 SDValue HiTrue = DAG.getNode(Opcode: ISD::OR, DL, VT, N1: ShiftLeftHi, N2: ShiftRightLo);
10197 SDValue HiFalse = DAG.getNode(Opcode: ISD::SHL, DL, VT, N1: Lo, N2: ShamtMinusXLen);
10198
10199 SDValue CC = DAG.getSetCC(DL, VT, LHS: ShamtMinusXLen, RHS: Zero, Cond: ISD::SETLT);
10200
10201 Lo = DAG.getNode(Opcode: ISD::SELECT, DL, VT, N1: CC, N2: LoTrue, N3: Zero);
10202 Hi = DAG.getNode(Opcode: ISD::SELECT, DL, VT, N1: CC, N2: HiTrue, N3: HiFalse);
10203
10204 SDValue Parts[2] = {Lo, Hi};
10205 return DAG.getMergeValues(Ops: Parts, dl: DL);
10206}
10207
10208SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
10209 bool IsSRA) const {
10210 SDLoc DL(Op);
10211 SDValue Lo = Op.getOperand(i: 0);
10212 SDValue Hi = Op.getOperand(i: 1);
10213 SDValue Shamt = Op.getOperand(i: 2);
10214 EVT VT = Lo.getValueType();
10215
10216 // SRA expansion:
10217 // if Shamt-XLEN < 0: // Shamt < XLEN
10218 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - ShAmt))
10219 // Hi = Hi >>s Shamt
10220 // else:
10221 // Lo = Hi >>s (Shamt-XLEN);
10222 // Hi = Hi >>s (XLEN-1)
10223 //
10224 // SRL expansion:
10225 // if Shamt-XLEN < 0: // Shamt < XLEN
10226 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - ShAmt))
10227 // Hi = Hi >>u Shamt
10228 // else:
10229 // Lo = Hi >>u (Shamt-XLEN);
10230 // Hi = 0;
10231
10232 unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
10233
10234 SDValue Zero = DAG.getConstant(Val: 0, DL, VT);
10235 SDValue One = DAG.getConstant(Val: 1, DL, VT);
10236 SDValue MinusXLen = DAG.getSignedConstant(Val: -(int)Subtarget.getXLen(), DL, VT);
10237 SDValue XLenMinus1 = DAG.getConstant(Val: Subtarget.getXLen() - 1, DL, VT);
10238 SDValue ShamtMinusXLen = DAG.getNode(Opcode: ISD::ADD, DL, VT, N1: Shamt, N2: MinusXLen);
10239 SDValue XLenMinus1Shamt = DAG.getNode(Opcode: ISD::SUB, DL, VT, N1: XLenMinus1, N2: Shamt);
10240
10241 SDValue ShiftRightLo = DAG.getNode(Opcode: ISD::SRL, DL, VT, N1: Lo, N2: Shamt);
10242 SDValue ShiftLeftHi1 = DAG.getNode(Opcode: ISD::SHL, DL, VT, N1: Hi, N2: One);
10243 SDValue ShiftLeftHi =
10244 DAG.getNode(Opcode: ISD::SHL, DL, VT, N1: ShiftLeftHi1, N2: XLenMinus1Shamt);
10245 SDValue LoTrue = DAG.getNode(Opcode: ISD::OR, DL, VT, N1: ShiftRightLo, N2: ShiftLeftHi);
10246 SDValue HiTrue = DAG.getNode(Opcode: ShiftRightOp, DL, VT, N1: Hi, N2: Shamt);
10247 SDValue LoFalse = DAG.getNode(Opcode: ShiftRightOp, DL, VT, N1: Hi, N2: ShamtMinusXLen);
10248 SDValue HiFalse =
10249 IsSRA ? DAG.getNode(Opcode: ISD::SRA, DL, VT, N1: Hi, N2: XLenMinus1) : Zero;
10250
10251 SDValue CC = DAG.getSetCC(DL, VT, LHS: ShamtMinusXLen, RHS: Zero, Cond: ISD::SETLT);
10252
10253 Lo = DAG.getNode(Opcode: ISD::SELECT, DL, VT, N1: CC, N2: LoTrue, N3: LoFalse);
10254 Hi = DAG.getNode(Opcode: ISD::SELECT, DL, VT, N1: CC, N2: HiTrue, N3: HiFalse);
10255
10256 SDValue Parts[2] = {Lo, Hi};
10257 return DAG.getMergeValues(Ops: Parts, dl: DL);
10258}
10259
10260// Lower splats of i1 types to SETCC. For each mask vector type, we have a
10261// legal equivalently-sized i8 type, so we can use that as a go-between.
10262SDValue RISCVTargetLowering::lowerVectorMaskSplat(SDValue Op,
10263 SelectionDAG &DAG) const {
10264 SDLoc DL(Op);
10265 MVT VT = Op.getSimpleValueType();
10266 SDValue SplatVal = Op.getOperand(i: 0);
10267 // All-zeros or all-ones splats are handled specially.
10268 if (ISD::isConstantSplatVectorAllOnes(N: Op.getNode())) {
10269 SDValue VL = getDefaultScalableVLOps(VecVT: VT, DL, DAG, Subtarget).second;
10270 return DAG.getNode(Opcode: RISCVISD::VMSET_VL, DL, VT, Operand: VL);
10271 }
10272 if (ISD::isConstantSplatVectorAllZeros(N: Op.getNode())) {
10273 SDValue VL = getDefaultScalableVLOps(VecVT: VT, DL, DAG, Subtarget).second;
10274 return DAG.getNode(Opcode: RISCVISD::VMCLR_VL, DL, VT, Operand: VL);
10275 }
10276 MVT InterVT = VT.changeVectorElementType(EltVT: MVT::i8);
10277 SplatVal = DAG.getNode(Opcode: ISD::AND, DL, VT: SplatVal.getValueType(), N1: SplatVal,
10278 N2: DAG.getConstant(Val: 1, DL, VT: SplatVal.getValueType()));
10279 SDValue LHS = DAG.getSplatVector(VT: InterVT, DL, Op: SplatVal);
10280 SDValue Zero = DAG.getConstant(Val: 0, DL, VT: InterVT);
10281 return DAG.getSetCC(DL, VT, LHS, RHS: Zero, Cond: ISD::SETNE);
10282}
10283
10284// Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is
10285// illegal (currently only vXi64 RV32).
10286// FIXME: We could also catch non-constant sign-extended i32 values and lower
10287// them to VMV_V_X_VL.
10288SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op,
10289 SelectionDAG &DAG) const {
10290 SDLoc DL(Op);
10291 MVT VecVT = Op.getSimpleValueType();
10292 assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&
10293 "Unexpected SPLAT_VECTOR_PARTS lowering");
10294
10295 assert(Op.getNumOperands() == 2 && "Unexpected number of operands!");
10296 SDValue Lo = Op.getOperand(i: 0);
10297 SDValue Hi = Op.getOperand(i: 1);
10298
10299 MVT ContainerVT = VecVT;
10300 if (VecVT.isFixedLengthVector())
10301 ContainerVT = getContainerForFixedLengthVector(VT: VecVT);
10302
10303 auto VL = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).second;
10304
10305 SDValue Res =
10306 splatPartsI64WithVL(DL, VT: ContainerVT, Passthru: SDValue(), Lo, Hi, VL, DAG);
10307
10308 if (VecVT.isFixedLengthVector())
10309 Res = convertFromScalableVector(VT: VecVT, V: Res, DAG, Subtarget);
10310
10311 return Res;
10312}
10313
10314// Custom-lower extensions from mask vectors by using a vselect either with 1
10315// for zero/any-extension or -1 for sign-extension:
10316// (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
10317// Note that any-extension is lowered identically to zero-extension.
10318SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
10319 int64_t ExtTrueVal) const {
10320 SDLoc DL(Op);
10321 MVT VecVT = Op.getSimpleValueType();
10322 SDValue Src = Op.getOperand(i: 0);
10323 // Only custom-lower extensions from mask types
10324 assert(Src.getValueType().isVector() &&
10325 Src.getValueType().getVectorElementType() == MVT::i1);
10326
10327 if (VecVT.isScalableVector()) {
10328 SDValue SplatZero = DAG.getConstant(Val: 0, DL, VT: VecVT);
10329 SDValue SplatTrueVal = DAG.getSignedConstant(Val: ExtTrueVal, DL, VT: VecVT);
10330 if (Src.getOpcode() == ISD::XOR &&
10331 ISD::isConstantSplatVectorAllOnes(N: Src.getOperand(i: 1).getNode()))
10332 return DAG.getNode(Opcode: ISD::VSELECT, DL, VT: VecVT, N1: Src.getOperand(i: 0), N2: SplatZero,
10333 N3: SplatTrueVal);
10334 return DAG.getNode(Opcode: ISD::VSELECT, DL, VT: VecVT, N1: Src, N2: SplatTrueVal, N3: SplatZero);
10335 }
10336
10337 MVT ContainerVT = getContainerForFixedLengthVector(VT: VecVT);
10338 MVT I1ContainerVT =
10339 MVT::getVectorVT(VT: MVT::i1, EC: ContainerVT.getVectorElementCount());
10340
10341 SDValue CC = convertToScalableVector(VT: I1ContainerVT, V: Src, DAG, Subtarget);
10342
10343 SDValue VL = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).second;
10344
10345 MVT XLenVT = Subtarget.getXLenVT();
10346 SDValue SplatZero = DAG.getConstant(Val: 0, DL, VT: XLenVT);
10347 SDValue SplatTrueVal = DAG.getSignedConstant(Val: ExtTrueVal, DL, VT: XLenVT);
10348
10349 if (Src.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
10350 SDValue Xor = Src.getOperand(i: 0);
10351 if (Xor.getOpcode() == RISCVISD::VMXOR_VL) {
10352 SDValue ScalableOnes = Xor.getOperand(i: 1);
10353 if (ScalableOnes.getOpcode() == ISD::INSERT_SUBVECTOR &&
10354 ScalableOnes.getOperand(i: 0).isUndef() &&
10355 ISD::isConstantSplatVectorAllOnes(
10356 N: ScalableOnes.getOperand(i: 1).getNode())) {
10357 CC = Xor.getOperand(i: 0);
10358 std::swap(a&: SplatZero, b&: SplatTrueVal);
10359 }
10360 }
10361 }
10362
10363 SplatZero = DAG.getNode(Opcode: RISCVISD::VMV_V_X_VL, DL, VT: ContainerVT,
10364 N1: DAG.getUNDEF(VT: ContainerVT), N2: SplatZero, N3: VL);
10365 SplatTrueVal = DAG.getNode(Opcode: RISCVISD::VMV_V_X_VL, DL, VT: ContainerVT,
10366 N1: DAG.getUNDEF(VT: ContainerVT), N2: SplatTrueVal, N3: VL);
10367 SDValue Select =
10368 DAG.getNode(Opcode: RISCVISD::VMERGE_VL, DL, VT: ContainerVT, N1: CC, N2: SplatTrueVal,
10369 N3: SplatZero, N4: DAG.getUNDEF(VT: ContainerVT), N5: VL);
10370
10371 return convertFromScalableVector(VT: VecVT, V: Select, DAG, Subtarget);
10372}
10373
10374// Custom-lower truncations from vectors to mask vectors by using a mask and a
10375// setcc operation:
10376// (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne)
10377SDValue RISCVTargetLowering::lowerVectorMaskTruncLike(SDValue Op,
10378 SelectionDAG &DAG) const {
10379 bool IsVPTrunc = Op.getOpcode() == ISD::VP_TRUNCATE;
10380 SDLoc DL(Op);
10381 EVT MaskVT = Op.getValueType();
10382 // Only expect to custom-lower truncations to mask types
10383 assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 &&
10384 "Unexpected type for vector mask lowering");
10385 SDValue Src = Op.getOperand(i: 0);
10386 MVT VecVT = Src.getSimpleValueType();
10387 SDValue Mask, VL;
10388 if (IsVPTrunc) {
10389 Mask = Op.getOperand(i: 1);
10390 VL = Op.getOperand(i: 2);
10391 }
10392 // If this is a fixed vector, we need to convert it to a scalable vector.
10393 MVT ContainerVT = VecVT;
10394
10395 if (VecVT.isFixedLengthVector()) {
10396 ContainerVT = getContainerForFixedLengthVector(VT: VecVT);
10397 Src = convertToScalableVector(VT: ContainerVT, V: Src, DAG, Subtarget);
10398 if (IsVPTrunc) {
10399 MVT MaskContainerVT =
10400 getContainerForFixedLengthVector(VT: Mask.getSimpleValueType());
10401 Mask = convertToScalableVector(VT: MaskContainerVT, V: Mask, DAG, Subtarget);
10402 }
10403 }
10404
10405 if (!IsVPTrunc) {
10406 std::tie(args&: Mask, args&: VL) =
10407 getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
10408 }
10409
10410 SDValue SplatOne = DAG.getConstant(Val: 1, DL, VT: Subtarget.getXLenVT());
10411 SDValue SplatZero = DAG.getConstant(Val: 0, DL, VT: Subtarget.getXLenVT());
10412
10413 SplatOne = DAG.getNode(Opcode: RISCVISD::VMV_V_X_VL, DL, VT: ContainerVT,
10414 N1: DAG.getUNDEF(VT: ContainerVT), N2: SplatOne, N3: VL);
10415 SplatZero = DAG.getNode(Opcode: RISCVISD::VMV_V_X_VL, DL, VT: ContainerVT,
10416 N1: DAG.getUNDEF(VT: ContainerVT), N2: SplatZero, N3: VL);
10417
10418 MVT MaskContainerVT = ContainerVT.changeVectorElementType(EltVT: MVT::i1);
10419 SDValue Trunc = DAG.getNode(Opcode: RISCVISD::AND_VL, DL, VT: ContainerVT, N1: Src, N2: SplatOne,
10420 N3: DAG.getUNDEF(VT: ContainerVT), N4: Mask, N5: VL);
10421 Trunc = DAG.getNode(Opcode: RISCVISD::SETCC_VL, DL, VT: MaskContainerVT,
10422 Ops: {Trunc, SplatZero, DAG.getCondCode(Cond: ISD::SETNE),
10423 DAG.getUNDEF(VT: MaskContainerVT), Mask, VL});
10424 if (MaskVT.isFixedLengthVector())
10425 Trunc = convertFromScalableVector(VT: MaskVT, V: Trunc, DAG, Subtarget);
10426 return Trunc;
10427}
10428
10429SDValue RISCVTargetLowering::lowerVectorTruncLike(SDValue Op,
10430 SelectionDAG &DAG) const {
10431 unsigned Opc = Op.getOpcode();
10432 bool IsVPTrunc = Opc == ISD::VP_TRUNCATE;
10433 SDLoc DL(Op);
10434
10435 MVT VT = Op.getSimpleValueType();
10436 // Only custom-lower vector truncates
10437 assert(VT.isVector() && "Unexpected type for vector truncate lowering");
10438
10439 // Truncates to mask types are handled differently
10440 if (VT.getVectorElementType() == MVT::i1)
10441 return lowerVectorMaskTruncLike(Op, DAG);
10442
10443 // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary
10444 // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which
10445 // truncate by one power of two at a time.
10446 MVT DstEltVT = VT.getVectorElementType();
10447
10448 SDValue Src = Op.getOperand(i: 0);
10449 MVT SrcVT = Src.getSimpleValueType();
10450 MVT SrcEltVT = SrcVT.getVectorElementType();
10451
10452 assert(DstEltVT.bitsLT(SrcEltVT) && isPowerOf2_64(DstEltVT.getSizeInBits()) &&
10453 isPowerOf2_64(SrcEltVT.getSizeInBits()) &&
10454 "Unexpected vector truncate lowering");
10455
10456 MVT ContainerVT = SrcVT;
10457 SDValue Mask, VL;
10458 if (IsVPTrunc) {
10459 Mask = Op.getOperand(i: 1);
10460 VL = Op.getOperand(i: 2);
10461 }
10462 if (SrcVT.isFixedLengthVector()) {
10463 ContainerVT = getContainerForFixedLengthVector(VT: SrcVT);
10464 Src = convertToScalableVector(VT: ContainerVT, V: Src, DAG, Subtarget);
10465 if (IsVPTrunc) {
10466 MVT MaskVT = getMaskTypeFor(VecVT: ContainerVT);
10467 Mask = convertToScalableVector(VT: MaskVT, V: Mask, DAG, Subtarget);
10468 }
10469 }
10470
10471 SDValue Result = Src;
10472 if (!IsVPTrunc) {
10473 std::tie(args&: Mask, args&: VL) =
10474 getDefaultVLOps(VecVT: SrcVT, ContainerVT, DL, DAG, Subtarget);
10475 }
10476
10477 unsigned NewOpc;
10478 if (Opc == ISD::TRUNCATE_SSAT_S)
10479 NewOpc = RISCVISD::TRUNCATE_VECTOR_VL_SSAT;
10480 else if (Opc == ISD::TRUNCATE_USAT_U)
10481 NewOpc = RISCVISD::TRUNCATE_VECTOR_VL_USAT;
10482 else
10483 NewOpc = RISCVISD::TRUNCATE_VECTOR_VL;
10484
10485 do {
10486 SrcEltVT = MVT::getIntegerVT(BitWidth: SrcEltVT.getSizeInBits() / 2);
10487 MVT ResultVT = ContainerVT.changeVectorElementType(EltVT: SrcEltVT);
10488 Result = DAG.getNode(Opcode: NewOpc, DL, VT: ResultVT, N1: Result, N2: Mask, N3: VL);
10489 } while (SrcEltVT != DstEltVT);
10490
10491 if (SrcVT.isFixedLengthVector())
10492 Result = convertFromScalableVector(VT, V: Result, DAG, Subtarget);
10493
10494 return Result;
10495}
10496
10497SDValue
10498RISCVTargetLowering::lowerStrictFPExtendOrRoundLike(SDValue Op,
10499 SelectionDAG &DAG) const {
10500 SDLoc DL(Op);
10501 SDValue Chain = Op.getOperand(i: 0);
10502 SDValue Src = Op.getOperand(i: 1);
10503 MVT VT = Op.getSimpleValueType();
10504 MVT SrcVT = Src.getSimpleValueType();
10505 MVT ContainerVT = VT;
10506 if (VT.isFixedLengthVector()) {
10507 MVT SrcContainerVT = getContainerForFixedLengthVector(VT: SrcVT);
10508 ContainerVT =
10509 SrcContainerVT.changeVectorElementType(EltVT: VT.getVectorElementType());
10510 Src = convertToScalableVector(VT: SrcContainerVT, V: Src, DAG, Subtarget);
10511 }
10512
10513 auto [Mask, VL] = getDefaultVLOps(VecVT: SrcVT, ContainerVT, DL, DAG, Subtarget);
10514
10515 // RVV can only widen/truncate fp to types double/half the size as the source.
10516 if ((VT.getVectorElementType() == MVT::f64 &&
10517 (SrcVT.getVectorElementType() == MVT::f16 ||
10518 SrcVT.getVectorElementType() == MVT::bf16)) ||
10519 ((VT.getVectorElementType() == MVT::f16 ||
10520 VT.getVectorElementType() == MVT::bf16) &&
10521 SrcVT.getVectorElementType() == MVT::f64)) {
10522 // For double rounding, the intermediate rounding should be round-to-odd.
10523 unsigned InterConvOpc = Op.getOpcode() == ISD::STRICT_FP_EXTEND
10524 ? RISCVISD::STRICT_FP_EXTEND_VL
10525 : RISCVISD::STRICT_VFNCVT_ROD_VL;
10526 MVT InterVT = ContainerVT.changeVectorElementType(EltVT: MVT::f32);
10527 Src = DAG.getNode(Opcode: InterConvOpc, DL, VTList: DAG.getVTList(VT1: InterVT, VT2: MVT::Other),
10528 N1: Chain, N2: Src, N3: Mask, N4: VL);
10529 Chain = Src.getValue(R: 1);
10530 }
10531
10532 unsigned ConvOpc = Op.getOpcode() == ISD::STRICT_FP_EXTEND
10533 ? RISCVISD::STRICT_FP_EXTEND_VL
10534 : RISCVISD::STRICT_FP_ROUND_VL;
10535 SDValue Res = DAG.getNode(Opcode: ConvOpc, DL, VTList: DAG.getVTList(VT1: ContainerVT, VT2: MVT::Other),
10536 N1: Chain, N2: Src, N3: Mask, N4: VL);
10537 if (VT.isFixedLengthVector()) {
10538 // StrictFP operations have two result values. Their lowered result should
10539 // have same result count.
10540 SDValue SubVec = convertFromScalableVector(VT, V: Res, DAG, Subtarget);
10541 Res = DAG.getMergeValues(Ops: {SubVec, Res.getValue(R: 1)}, dl: DL);
10542 }
10543 return Res;
10544}
10545
10546SDValue
10547RISCVTargetLowering::lowerVectorFPExtendOrRoundLike(SDValue Op,
10548 SelectionDAG &DAG) const {
10549 bool IsVP =
10550 Op.getOpcode() == ISD::VP_FP_ROUND || Op.getOpcode() == ISD::VP_FP_EXTEND;
10551 bool IsExtend =
10552 Op.getOpcode() == ISD::VP_FP_EXTEND || Op.getOpcode() == ISD::FP_EXTEND;
10553 // RVV can only do truncate fp to types half the size as the source. We
10554 // custom-lower f64->f16 rounds via RVV's round-to-odd float
10555 // conversion instruction.
10556 SDLoc DL(Op);
10557 MVT VT = Op.getSimpleValueType();
10558
10559 assert(VT.isVector() && "Unexpected type for vector truncate lowering");
10560
10561 SDValue Src = Op.getOperand(i: 0);
10562 MVT SrcVT = Src.getSimpleValueType();
10563
10564 bool IsDirectExtend =
10565 IsExtend && (VT.getVectorElementType() != MVT::f64 ||
10566 (SrcVT.getVectorElementType() != MVT::f16 &&
10567 SrcVT.getVectorElementType() != MVT::bf16));
10568 bool IsDirectTrunc = !IsExtend && ((VT.getVectorElementType() != MVT::f16 &&
10569 VT.getVectorElementType() != MVT::bf16) ||
10570 SrcVT.getVectorElementType() != MVT::f64);
10571
10572 bool IsDirectConv = IsDirectExtend || IsDirectTrunc;
10573
10574 // We have regular SD node patterns for direct non-VL extends.
10575 if (VT.isScalableVector() && IsDirectConv && !IsVP)
10576 return Op;
10577
10578 // Prepare any fixed-length vector operands.
10579 MVT ContainerVT = VT;
10580 SDValue Mask, VL;
10581 if (IsVP) {
10582 Mask = Op.getOperand(i: 1);
10583 VL = Op.getOperand(i: 2);
10584 }
10585 if (VT.isFixedLengthVector()) {
10586 MVT SrcContainerVT = getContainerForFixedLengthVector(VT: SrcVT);
10587 ContainerVT =
10588 SrcContainerVT.changeVectorElementType(EltVT: VT.getVectorElementType());
10589 Src = convertToScalableVector(VT: SrcContainerVT, V: Src, DAG, Subtarget);
10590 if (IsVP) {
10591 MVT MaskVT = getMaskTypeFor(VecVT: ContainerVT);
10592 Mask = convertToScalableVector(VT: MaskVT, V: Mask, DAG, Subtarget);
10593 }
10594 }
10595
10596 if (!IsVP)
10597 std::tie(args&: Mask, args&: VL) =
10598 getDefaultVLOps(VecVT: SrcVT, ContainerVT, DL, DAG, Subtarget);
10599
10600 unsigned ConvOpc = IsExtend ? RISCVISD::FP_EXTEND_VL : RISCVISD::FP_ROUND_VL;
10601
10602 if (IsDirectConv) {
10603 Src = DAG.getNode(Opcode: ConvOpc, DL, VT: ContainerVT, N1: Src, N2: Mask, N3: VL);
10604 if (VT.isFixedLengthVector())
10605 Src = convertFromScalableVector(VT, V: Src, DAG, Subtarget);
10606 return Src;
10607 }
10608
10609 unsigned InterConvOpc =
10610 IsExtend ? RISCVISD::FP_EXTEND_VL : RISCVISD::VFNCVT_ROD_VL;
10611
10612 MVT InterVT = ContainerVT.changeVectorElementType(EltVT: MVT::f32);
10613 SDValue IntermediateConv =
10614 DAG.getNode(Opcode: InterConvOpc, DL, VT: InterVT, N1: Src, N2: Mask, N3: VL);
10615 SDValue Result =
10616 DAG.getNode(Opcode: ConvOpc, DL, VT: ContainerVT, N1: IntermediateConv, N2: Mask, N3: VL);
10617 if (VT.isFixedLengthVector())
10618 return convertFromScalableVector(VT, V: Result, DAG, Subtarget);
10619 return Result;
10620}
10621
10622// Given a scalable vector type and an index into it, returns the type for the
10623// smallest subvector that the index fits in. This can be used to reduce LMUL
10624// for operations like vslidedown.
10625//
10626// E.g. With Zvl128b, index 3 in a nxv4i32 fits within the first nxv2i32.
10627static std::optional<MVT>
10628getSmallestVTForIndex(MVT VecVT, unsigned MaxIdx, SDLoc DL, SelectionDAG &DAG,
10629 const RISCVSubtarget &Subtarget) {
10630 assert(VecVT.isScalableVector());
10631 const unsigned EltSize = VecVT.getScalarSizeInBits();
10632 const unsigned VectorBitsMin = Subtarget.getRealMinVLen();
10633 const unsigned MinVLMAX = VectorBitsMin / EltSize;
10634 MVT SmallerVT;
10635 if (MaxIdx < MinVLMAX)
10636 SmallerVT = RISCVTargetLowering::getM1VT(VT: VecVT);
10637 else if (MaxIdx < MinVLMAX * 2)
10638 SmallerVT =
10639 RISCVTargetLowering::getM1VT(VT: VecVT).getDoubleNumVectorElementsVT();
10640 else if (MaxIdx < MinVLMAX * 4)
10641 SmallerVT = RISCVTargetLowering::getM1VT(VT: VecVT)
10642 .getDoubleNumVectorElementsVT()
10643 .getDoubleNumVectorElementsVT();
10644 if (!SmallerVT.isValid() || !VecVT.bitsGT(VT: SmallerVT))
10645 return std::nullopt;
10646 return SmallerVT;
10647}
10648
10649static bool isValidVisniInsertExtractIndex(SDValue Idx) {
10650 auto *IdxC = dyn_cast<ConstantSDNode>(Val&: Idx);
10651 if (!IdxC || isNullConstant(V: Idx))
10652 return false;
10653 return isUInt<5>(x: IdxC->getZExtValue());
10654}
10655
10656// Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the
10657// first position of a vector, and that vector is slid up to the insert index.
10658// By limiting the active vector length to index+1 and merging with the
10659// original vector (with an undisturbed tail policy for elements >= VL), we
10660// achieve the desired result of leaving all elements untouched except the one
10661// at VL-1, which is replaced with the desired value.
10662SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
10663 SelectionDAG &DAG) const {
10664 SDLoc DL(Op);
10665 MVT VecVT = Op.getSimpleValueType();
10666 MVT XLenVT = Subtarget.getXLenVT();
10667 SDValue Vec = Op.getOperand(i: 0);
10668 SDValue Val = Op.getOperand(i: 1);
10669 MVT ValVT = Val.getSimpleValueType();
10670 SDValue Idx = Op.getOperand(i: 2);
10671
10672 if (VecVT.getVectorElementType() == MVT::i1) {
10673 // FIXME: For now we just promote to an i8 vector and insert into that,
10674 // but this is probably not optimal.
10675 MVT WideVT = MVT::getVectorVT(VT: MVT::i8, EC: VecVT.getVectorElementCount());
10676 Vec = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL, VT: WideVT, Operand: Vec);
10677 Vec = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL, VT: WideVT, N1: Vec, N2: Val, N3: Idx);
10678 return DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: VecVT, Operand: Vec);
10679 }
10680
10681 if ((ValVT == MVT::f16 && !Subtarget.hasVInstructionsF16()) ||
10682 (ValVT == MVT::bf16 && !Subtarget.hasVInstructionsBF16())) {
10683 // If we don't have vfmv.s.f for f16/bf16, use fmv.x.h first.
10684 MVT IntVT = VecVT.changeTypeToInteger();
10685 SDValue IntInsert = DAG.getNode(
10686 Opcode: ISD::INSERT_VECTOR_ELT, DL, VT: IntVT, N1: DAG.getBitcast(VT: IntVT, V: Vec),
10687 N2: DAG.getNode(Opcode: RISCVISD::FMV_X_ANYEXTH, DL, VT: XLenVT, Operand: Val), N3: Idx);
10688 return DAG.getBitcast(VT: VecVT, V: IntInsert);
10689 }
10690
10691 MVT ContainerVT = VecVT;
10692 // If the operand is a fixed-length vector, convert to a scalable one.
10693 if (VecVT.isFixedLengthVector()) {
10694 ContainerVT = getContainerForFixedLengthVector(VT: VecVT);
10695 Vec = convertToScalableVector(VT: ContainerVT, V: Vec, DAG, Subtarget);
10696 }
10697
10698 // If we know the index we're going to insert at, we can shrink Vec so that
10699 // we're performing the scalar inserts and slideup on a smaller LMUL.
10700 SDValue OrigVec = Vec;
10701 std::optional<unsigned> AlignedIdx;
10702 if (auto *IdxC = dyn_cast<ConstantSDNode>(Val&: Idx)) {
10703 const unsigned OrigIdx = IdxC->getZExtValue();
10704 // Do we know an upper bound on LMUL?
10705 if (auto ShrunkVT = getSmallestVTForIndex(VecVT: ContainerVT, MaxIdx: OrigIdx,
10706 DL, DAG, Subtarget)) {
10707 ContainerVT = *ShrunkVT;
10708 AlignedIdx = 0;
10709 }
10710
10711 // If we're compiling for an exact VLEN value, we can always perform
10712 // the insert in m1 as we can determine the register corresponding to
10713 // the index in the register group.
10714 const MVT M1VT = RISCVTargetLowering::getM1VT(VT: ContainerVT);
10715 if (auto VLEN = Subtarget.getRealVLen(); VLEN && ContainerVT.bitsGT(VT: M1VT)) {
10716 EVT ElemVT = VecVT.getVectorElementType();
10717 unsigned ElemsPerVReg = *VLEN / ElemVT.getFixedSizeInBits();
10718 unsigned RemIdx = OrigIdx % ElemsPerVReg;
10719 unsigned SubRegIdx = OrigIdx / ElemsPerVReg;
10720 AlignedIdx = SubRegIdx * M1VT.getVectorElementCount().getKnownMinValue();
10721 Idx = DAG.getVectorIdxConstant(Val: RemIdx, DL);
10722 ContainerVT = M1VT;
10723 }
10724
10725 if (AlignedIdx)
10726 Vec = DAG.getExtractSubvector(DL, VT: ContainerVT, Vec, Idx: *AlignedIdx);
10727 }
10728
10729 bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64;
10730 // Even i64-element vectors on RV32 can be lowered without scalar
10731 // legalization if the most-significant 32 bits of the value are not affected
10732 // by the sign-extension of the lower 32 bits.
10733 // TODO: We could also catch sign extensions of a 32-bit value.
10734 if (!IsLegalInsert && isa<ConstantSDNode>(Val)) {
10735 const auto *CVal = cast<ConstantSDNode>(Val);
10736 if (isInt<32>(x: CVal->getSExtValue())) {
10737 IsLegalInsert = true;
10738 Val = DAG.getSignedConstant(Val: CVal->getSExtValue(), DL, VT: MVT::i32);
10739 }
10740 }
10741
10742 auto [Mask, VL] = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
10743
10744 SDValue ValInVec;
10745
10746 if (IsLegalInsert) {
10747 unsigned Opc =
10748 VecVT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL;
10749 if (isNullConstant(V: Idx)) {
10750 if (!VecVT.isFloatingPoint())
10751 Val = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: XLenVT, Operand: Val);
10752 Vec = DAG.getNode(Opcode: Opc, DL, VT: ContainerVT, N1: Vec, N2: Val, N3: VL);
10753
10754 if (AlignedIdx)
10755 Vec = DAG.getInsertSubvector(DL, Vec: OrigVec, SubVec: Vec, Idx: *AlignedIdx);
10756 if (!VecVT.isFixedLengthVector())
10757 return Vec;
10758 return convertFromScalableVector(VT: VecVT, V: Vec, DAG, Subtarget);
10759 }
10760
10761 // Use ri.vinsert.v.x if available.
10762 if (Subtarget.hasVendorXRivosVisni() && VecVT.isInteger() &&
10763 isValidVisniInsertExtractIndex(Idx)) {
10764 // Tail policy applies to elements past VLMAX (by assumption Idx < VLMAX)
10765 SDValue PolicyOp =
10766 DAG.getTargetConstant(Val: RISCVVType::TAIL_AGNOSTIC, DL, VT: XLenVT);
10767 Vec = DAG.getNode(Opcode: RISCVISD::RI_VINSERT_VL, DL, VT: ContainerVT, N1: Vec, N2: Val, N3: Idx,
10768 N4: VL, N5: PolicyOp);
10769 if (AlignedIdx)
10770 Vec = DAG.getInsertSubvector(DL, Vec: OrigVec, SubVec: Vec, Idx: *AlignedIdx);
10771 if (!VecVT.isFixedLengthVector())
10772 return Vec;
10773 return convertFromScalableVector(VT: VecVT, V: Vec, DAG, Subtarget);
10774 }
10775
10776 ValInVec = lowerScalarInsert(Scalar: Val, VL, VT: ContainerVT, DL, DAG, Subtarget);
10777 } else {
10778 // On RV32, i64-element vectors must be specially handled to place the
10779 // value at element 0, by using two vslide1down instructions in sequence on
10780 // the i32 split lo/hi value. Use an equivalently-sized i32 vector for
10781 // this.
10782 SDValue ValLo, ValHi;
10783 std::tie(args&: ValLo, args&: ValHi) = DAG.SplitScalar(N: Val, DL, LoVT: MVT::i32, HiVT: MVT::i32);
10784 MVT I32ContainerVT =
10785 MVT::getVectorVT(VT: MVT::i32, EC: ContainerVT.getVectorElementCount() * 2);
10786 SDValue I32Mask =
10787 getDefaultScalableVLOps(VecVT: I32ContainerVT, DL, DAG, Subtarget).first;
10788 // Limit the active VL to two.
10789 SDValue InsertI64VL = DAG.getConstant(Val: 2, DL, VT: XLenVT);
10790 // If the Idx is 0 we can insert directly into the vector.
10791 if (isNullConstant(V: Idx)) {
10792 // First slide in the lo value, then the hi in above it. We use slide1down
10793 // to avoid the register group overlap constraint of vslide1up.
10794 ValInVec = DAG.getNode(Opcode: RISCVISD::VSLIDE1DOWN_VL, DL, VT: I32ContainerVT,
10795 N1: Vec, N2: Vec, N3: ValLo, N4: I32Mask, N5: InsertI64VL);
10796 // If the source vector is undef don't pass along the tail elements from
10797 // the previous slide1down.
10798 SDValue Tail = Vec.isUndef() ? Vec : ValInVec;
10799 ValInVec = DAG.getNode(Opcode: RISCVISD::VSLIDE1DOWN_VL, DL, VT: I32ContainerVT,
10800 N1: Tail, N2: ValInVec, N3: ValHi, N4: I32Mask, N5: InsertI64VL);
10801 // Bitcast back to the right container type.
10802 ValInVec = DAG.getBitcast(VT: ContainerVT, V: ValInVec);
10803
10804 if (AlignedIdx)
10805 ValInVec = DAG.getInsertSubvector(DL, Vec: OrigVec, SubVec: ValInVec, Idx: *AlignedIdx);
10806 if (!VecVT.isFixedLengthVector())
10807 return ValInVec;
10808 return convertFromScalableVector(VT: VecVT, V: ValInVec, DAG, Subtarget);
10809 }
10810
10811 // First slide in the lo value, then the hi in above it. We use slide1down
10812 // to avoid the register group overlap constraint of vslide1up.
10813 ValInVec = DAG.getNode(Opcode: RISCVISD::VSLIDE1DOWN_VL, DL, VT: I32ContainerVT,
10814 N1: DAG.getUNDEF(VT: I32ContainerVT),
10815 N2: DAG.getUNDEF(VT: I32ContainerVT), N3: ValLo,
10816 N4: I32Mask, N5: InsertI64VL);
10817 ValInVec = DAG.getNode(Opcode: RISCVISD::VSLIDE1DOWN_VL, DL, VT: I32ContainerVT,
10818 N1: DAG.getUNDEF(VT: I32ContainerVT), N2: ValInVec, N3: ValHi,
10819 N4: I32Mask, N5: InsertI64VL);
10820 // Bitcast back to the right container type.
10821 ValInVec = DAG.getBitcast(VT: ContainerVT, V: ValInVec);
10822 }
10823
10824 // Now that the value is in a vector, slide it into position.
10825 SDValue InsertVL =
10826 DAG.getNode(Opcode: ISD::ADD, DL, VT: XLenVT, N1: Idx, N2: DAG.getConstant(Val: 1, DL, VT: XLenVT));
10827
10828 // Use tail agnostic policy if Idx is the last index of Vec.
10829 unsigned Policy = RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED;
10830 if (VecVT.isFixedLengthVector() && isa<ConstantSDNode>(Val: Idx) &&
10831 Idx->getAsZExtVal() + 1 == VecVT.getVectorNumElements())
10832 Policy = RISCVVType::TAIL_AGNOSTIC;
10833 SDValue Slideup = getVSlideup(DAG, Subtarget, DL, VT: ContainerVT, Passthru: Vec, Op: ValInVec,
10834 Offset: Idx, Mask, VL: InsertVL, Policy);
10835
10836 if (AlignedIdx)
10837 Slideup = DAG.getInsertSubvector(DL, Vec: OrigVec, SubVec: Slideup, Idx: *AlignedIdx);
10838 if (!VecVT.isFixedLengthVector())
10839 return Slideup;
10840 return convertFromScalableVector(VT: VecVT, V: Slideup, DAG, Subtarget);
10841}
10842
10843// Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then
10844// extract the first element: (extractelt (slidedown vec, idx), 0). For integer
10845// types this is done using VMV_X_S to allow us to glean information about the
10846// sign bits of the result.
10847SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
10848 SelectionDAG &DAG) const {
10849 SDLoc DL(Op);
10850 SDValue Idx = Op.getOperand(i: 1);
10851 SDValue Vec = Op.getOperand(i: 0);
10852 EVT EltVT = Op.getValueType();
10853 MVT VecVT = Vec.getSimpleValueType();
10854 MVT XLenVT = Subtarget.getXLenVT();
10855
10856 if (VecVT.getVectorElementType() == MVT::i1) {
10857 // Use vfirst.m to extract the first bit.
10858 if (isNullConstant(V: Idx)) {
10859 MVT ContainerVT = VecVT;
10860 if (VecVT.isFixedLengthVector()) {
10861 ContainerVT = getContainerForFixedLengthVector(VT: VecVT);
10862 Vec = convertToScalableVector(VT: ContainerVT, V: Vec, DAG, Subtarget);
10863 }
10864 auto [Mask, VL] = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
10865 SDValue Vfirst =
10866 DAG.getNode(Opcode: RISCVISD::VFIRST_VL, DL, VT: XLenVT, N1: Vec, N2: Mask, N3: VL);
10867 SDValue Res = DAG.getSetCC(DL, VT: XLenVT, LHS: Vfirst,
10868 RHS: DAG.getConstant(Val: 0, DL, VT: XLenVT), Cond: ISD::SETEQ);
10869 return DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: EltVT, Operand: Res);
10870 }
10871 if (VecVT.isFixedLengthVector()) {
10872 unsigned NumElts = VecVT.getVectorNumElements();
10873 if (NumElts >= 8) {
10874 MVT WideEltVT;
10875 unsigned WidenVecLen;
10876 SDValue ExtractElementIdx;
10877 SDValue ExtractBitIdx;
10878 unsigned MaxEEW = Subtarget.getELen();
10879 MVT LargestEltVT = MVT::getIntegerVT(
10880 BitWidth: std::min(a: MaxEEW, b: unsigned(XLenVT.getSizeInBits())));
10881 if (NumElts <= LargestEltVT.getSizeInBits()) {
10882 assert(isPowerOf2_32(NumElts) &&
10883 "the number of elements should be power of 2");
10884 WideEltVT = MVT::getIntegerVT(BitWidth: NumElts);
10885 WidenVecLen = 1;
10886 ExtractElementIdx = DAG.getConstant(Val: 0, DL, VT: XLenVT);
10887 ExtractBitIdx = Idx;
10888 } else {
10889 WideEltVT = LargestEltVT;
10890 WidenVecLen = NumElts / WideEltVT.getSizeInBits();
10891 // extract element index = index / element width
10892 ExtractElementIdx = DAG.getNode(
10893 Opcode: ISD::SRL, DL, VT: XLenVT, N1: Idx,
10894 N2: DAG.getConstant(Val: Log2_64(Value: WideEltVT.getSizeInBits()), DL, VT: XLenVT));
10895 // mask bit index = index % element width
10896 ExtractBitIdx = DAG.getNode(
10897 Opcode: ISD::AND, DL, VT: XLenVT, N1: Idx,
10898 N2: DAG.getConstant(Val: WideEltVT.getSizeInBits() - 1, DL, VT: XLenVT));
10899 }
10900 MVT WideVT = MVT::getVectorVT(VT: WideEltVT, NumElements: WidenVecLen);
10901 Vec = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: WideVT, Operand: Vec);
10902 SDValue ExtractElt = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL, VT: XLenVT,
10903 N1: Vec, N2: ExtractElementIdx);
10904 // Extract the bit from GPR.
10905 SDValue ShiftRight =
10906 DAG.getNode(Opcode: ISD::SRL, DL, VT: XLenVT, N1: ExtractElt, N2: ExtractBitIdx);
10907 SDValue Res = DAG.getNode(Opcode: ISD::AND, DL, VT: XLenVT, N1: ShiftRight,
10908 N2: DAG.getConstant(Val: 1, DL, VT: XLenVT));
10909 return DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: EltVT, Operand: Res);
10910 }
10911 }
10912 // Otherwise, promote to an i8 vector and extract from that.
10913 MVT WideVT = MVT::getVectorVT(VT: MVT::i8, EC: VecVT.getVectorElementCount());
10914 Vec = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL, VT: WideVT, Operand: Vec);
10915 return DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL, VT: EltVT, N1: Vec, N2: Idx);
10916 }
10917
10918 if ((EltVT == MVT::f16 && !Subtarget.hasVInstructionsF16()) ||
10919 (EltVT == MVT::bf16 && !Subtarget.hasVInstructionsBF16())) {
10920 // If we don't have vfmv.f.s for f16/bf16, extract to a gpr then use fmv.h.x
10921 MVT IntVT = VecVT.changeTypeToInteger();
10922 SDValue IntVec = DAG.getBitcast(VT: IntVT, V: Vec);
10923 SDValue IntExtract =
10924 DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL, VT: XLenVT, N1: IntVec, N2: Idx);
10925 return DAG.getNode(Opcode: RISCVISD::FMV_H_X, DL, VT: EltVT, Operand: IntExtract);
10926 }
10927
10928 if (Subtarget.enablePExtSIMDCodeGen() && VecVT.isFixedLengthVector()) {
10929 if (VecVT != MVT::v4i16 && VecVT != MVT::v2i16 && VecVT != MVT::v8i8 &&
10930 VecVT != MVT::v4i8 && VecVT != MVT::v2i32)
10931 return SDValue();
10932 SDValue Extracted = DAG.getBitcast(VT: XLenVT, V: Vec);
10933 unsigned ElemWidth = VecVT.getVectorElementType().getSizeInBits();
10934 SDValue Shamt = DAG.getNode(Opcode: ISD::MUL, DL, VT: XLenVT, N1: Idx,
10935 N2: DAG.getConstant(Val: ElemWidth, DL, VT: XLenVT));
10936 return DAG.getNode(Opcode: ISD::SRL, DL, VT: XLenVT, N1: Extracted, N2: Shamt);
10937 }
10938
10939 // If this is a fixed vector, we need to convert it to a scalable vector.
10940 MVT ContainerVT = VecVT;
10941 if (VecVT.isFixedLengthVector()) {
10942 ContainerVT = getContainerForFixedLengthVector(VT: VecVT);
10943 Vec = convertToScalableVector(VT: ContainerVT, V: Vec, DAG, Subtarget);
10944 }
10945
10946 // If we're compiling for an exact VLEN value and we have a known
10947 // constant index, we can always perform the extract in m1 (or
10948 // smaller) as we can determine the register corresponding to
10949 // the index in the register group.
10950 const auto VLen = Subtarget.getRealVLen();
10951 if (auto *IdxC = dyn_cast<ConstantSDNode>(Val&: Idx);
10952 IdxC && VLen && VecVT.getSizeInBits().getKnownMinValue() > *VLen) {
10953 MVT M1VT = RISCVTargetLowering::getM1VT(VT: ContainerVT);
10954 unsigned OrigIdx = IdxC->getZExtValue();
10955 EVT ElemVT = VecVT.getVectorElementType();
10956 unsigned ElemsPerVReg = *VLen / ElemVT.getFixedSizeInBits();
10957 unsigned RemIdx = OrigIdx % ElemsPerVReg;
10958 unsigned SubRegIdx = OrigIdx / ElemsPerVReg;
10959 unsigned ExtractIdx =
10960 SubRegIdx * M1VT.getVectorElementCount().getKnownMinValue();
10961 Vec = DAG.getExtractSubvector(DL, VT: M1VT, Vec, Idx: ExtractIdx);
10962 Idx = DAG.getVectorIdxConstant(Val: RemIdx, DL);
10963 ContainerVT = M1VT;
10964 }
10965
10966 // Reduce the LMUL of our slidedown and vmv.x.s to the smallest LMUL which
10967 // contains our index.
10968 std::optional<uint64_t> MaxIdx;
10969 if (VecVT.isFixedLengthVector())
10970 MaxIdx = VecVT.getVectorNumElements() - 1;
10971 if (auto *IdxC = dyn_cast<ConstantSDNode>(Val&: Idx))
10972 MaxIdx = IdxC->getZExtValue();
10973 if (MaxIdx) {
10974 if (auto SmallerVT =
10975 getSmallestVTForIndex(VecVT: ContainerVT, MaxIdx: *MaxIdx, DL, DAG, Subtarget)) {
10976 ContainerVT = *SmallerVT;
10977 Vec = DAG.getExtractSubvector(DL, VT: ContainerVT, Vec, Idx: 0);
10978 }
10979 }
10980
10981 // Use ri.vextract.x.v if available.
10982 // TODO: Avoid index 0 and just use the vmv.x.s
10983 if (Subtarget.hasVendorXRivosVisni() && EltVT.isInteger() &&
10984 isValidVisniInsertExtractIndex(Idx)) {
10985 SDValue Elt = DAG.getNode(Opcode: RISCVISD::RI_VEXTRACT, DL, VT: XLenVT, N1: Vec, N2: Idx);
10986 return DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: EltVT, Operand: Elt);
10987 }
10988
10989 // If after narrowing, the required slide is still greater than LMUL2,
10990 // fallback to generic expansion and go through the stack. This is done
10991 // for a subtle reason: extracting *all* elements out of a vector is
10992 // widely expected to be linear in vector size, but because vslidedown
10993 // is linear in LMUL, performing N extracts using vslidedown becomes
10994 // O(n^2) / (VLEN/ETYPE) work. On the surface, going through the stack
10995 // seems to have the same problem (the store is linear in LMUL), but the
10996 // generic expansion *memoizes* the store, and thus for many extracts of
10997 // the same vector we end up with one store and a bunch of loads.
10998 // TODO: We don't have the same code for insert_vector_elt because we
10999 // have BUILD_VECTOR and handle the degenerate case there. Should we
11000 // consider adding an inverse BUILD_VECTOR node?
11001 MVT LMUL2VT =
11002 RISCVTargetLowering::getM1VT(VT: ContainerVT).getDoubleNumVectorElementsVT();
11003 if (ContainerVT.bitsGT(VT: LMUL2VT) && VecVT.isFixedLengthVector())
11004 return SDValue();
11005
11006 // If the index is 0, the vector is already in the right position.
11007 if (!isNullConstant(V: Idx)) {
11008 // Use a VL of 1 to avoid processing more elements than we need.
11009 auto [Mask, VL] = getDefaultVLOps(NumElts: 1, ContainerVT, DL, DAG, Subtarget);
11010 Vec = getVSlidedown(DAG, Subtarget, DL, VT: ContainerVT,
11011 Passthru: DAG.getUNDEF(VT: ContainerVT), Op: Vec, Offset: Idx, Mask, VL);
11012 }
11013
11014 if (!EltVT.isInteger()) {
11015 // Floating-point extracts are handled in TableGen.
11016 return DAG.getExtractVectorElt(DL, VT: EltVT, Vec, Idx: 0);
11017 }
11018
11019 SDValue Elt0 = DAG.getNode(Opcode: RISCVISD::VMV_X_S, DL, VT: XLenVT, Operand: Vec);
11020 return DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: EltVT, Operand: Elt0);
11021}
11022
11023// Some RVV intrinsics may claim that they want an integer operand to be
11024// promoted or expanded.
11025static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
11026 const RISCVSubtarget &Subtarget) {
11027 assert((Op.getOpcode() == ISD::INTRINSIC_VOID ||
11028 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
11029 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
11030 "Unexpected opcode");
11031
11032 if (!Subtarget.hasVInstructions())
11033 return SDValue();
11034
11035 bool HasChain = Op.getOpcode() == ISD::INTRINSIC_VOID ||
11036 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
11037 unsigned IntNo = Op.getConstantOperandVal(i: HasChain ? 1 : 0);
11038
11039 SDLoc DL(Op);
11040
11041 const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
11042 RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntrinsicID: IntNo);
11043 if (!II || !II->hasScalarOperand())
11044 return SDValue();
11045
11046 unsigned SplatOp = II->ScalarOperand + 1 + HasChain;
11047 assert(SplatOp < Op.getNumOperands());
11048
11049 SmallVector<SDValue, 8> Operands(Op->ops());
11050 SDValue &ScalarOp = Operands[SplatOp];
11051 MVT OpVT = ScalarOp.getSimpleValueType();
11052 MVT XLenVT = Subtarget.getXLenVT();
11053
11054 // If this isn't a scalar, or its type is XLenVT we're done.
11055 if (!OpVT.isScalarInteger() || OpVT == XLenVT)
11056 return SDValue();
11057
11058 // Simplest case is that the operand needs to be promoted to XLenVT.
11059 if (OpVT.bitsLT(VT: XLenVT)) {
11060 // If the operand is a constant, sign extend to increase our chances
11061 // of being able to use a .vi instruction. ANY_EXTEND would become a
11062 // a zero extend and the simm5 check in isel would fail.
11063 // FIXME: Should we ignore the upper bits in isel instead?
11064 unsigned ExtOpc =
11065 isa<ConstantSDNode>(Val: ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
11066 ScalarOp = DAG.getNode(Opcode: ExtOpc, DL, VT: XLenVT, Operand: ScalarOp);
11067 return DAG.getNode(Opcode: Op->getOpcode(), DL, VTList: Op->getVTList(), Ops: Operands);
11068 }
11069
11070 // Use the previous operand to get the vXi64 VT. The result might be a mask
11071 // VT for compares. Using the previous operand assumes that the previous
11072 // operand will never have a smaller element size than a scalar operand and
11073 // that a widening operation never uses SEW=64.
11074 // NOTE: If this fails the below assert, we can probably just find the
11075 // element count from any operand or result and use it to construct the VT.
11076 assert(II->ScalarOperand > 0 && "Unexpected splat operand!");
11077 MVT VT = Op.getOperand(i: SplatOp - 1).getSimpleValueType();
11078
11079 // The more complex case is when the scalar is larger than XLenVT.
11080 assert(XLenVT == MVT::i32 && OpVT == MVT::i64 &&
11081 VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!");
11082
11083 // If this is a sign-extended 32-bit value, we can truncate it and rely on the
11084 // instruction to sign-extend since SEW>XLEN.
11085 if (DAG.ComputeNumSignBits(Op: ScalarOp) > 32) {
11086 ScalarOp = DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: MVT::i32, Operand: ScalarOp);
11087 return DAG.getNode(Opcode: Op->getOpcode(), DL, VTList: Op->getVTList(), Ops: Operands);
11088 }
11089
11090 switch (IntNo) {
11091 case Intrinsic::riscv_vslide1up:
11092 case Intrinsic::riscv_vslide1down:
11093 case Intrinsic::riscv_vslide1up_mask:
11094 case Intrinsic::riscv_vslide1down_mask: {
11095 // We need to special case these when the scalar is larger than XLen.
11096 unsigned NumOps = Op.getNumOperands();
11097 bool IsMasked = NumOps == 7;
11098
11099 // Convert the vector source to the equivalent nxvXi32 vector.
11100 MVT I32VT = MVT::getVectorVT(VT: MVT::i32, EC: VT.getVectorElementCount() * 2);
11101 SDValue Vec = DAG.getBitcast(VT: I32VT, V: Operands[2]);
11102 SDValue ScalarLo, ScalarHi;
11103 std::tie(args&: ScalarLo, args&: ScalarHi) =
11104 DAG.SplitScalar(N: ScalarOp, DL, LoVT: MVT::i32, HiVT: MVT::i32);
11105
11106 // Double the VL since we halved SEW.
11107 SDValue AVL = getVLOperand(Op);
11108 SDValue I32VL;
11109
11110 // Optimize for constant AVL
11111 if (isa<ConstantSDNode>(Val: AVL)) {
11112 const auto [MinVLMAX, MaxVLMAX] =
11113 RISCVTargetLowering::computeVLMAXBounds(VecVT: VT, Subtarget);
11114
11115 uint64_t AVLInt = AVL->getAsZExtVal();
11116 if (AVLInt <= MinVLMAX) {
11117 I32VL = DAG.getConstant(Val: 2 * AVLInt, DL, VT: XLenVT);
11118 } else if (AVLInt >= 2 * MaxVLMAX) {
11119 // Just set vl to VLMAX in this situation
11120 I32VL = DAG.getRegister(Reg: RISCV::X0, VT: XLenVT);
11121 } else {
11122 // For AVL between (MinVLMAX, 2 * MaxVLMAX), the actual working vl
11123 // is related to the hardware implementation.
11124 // So let the following code handle
11125 }
11126 }
11127 if (!I32VL) {
11128 RISCVVType::VLMUL Lmul = RISCVTargetLowering::getLMUL(VT);
11129 SDValue LMUL = DAG.getConstant(Val: Lmul, DL, VT: XLenVT);
11130 unsigned Sew = RISCVVType::encodeSEW(SEW: VT.getScalarSizeInBits());
11131 SDValue SEW = DAG.getConstant(Val: Sew, DL, VT: XLenVT);
11132 SDValue SETVL =
11133 DAG.getTargetConstant(Val: Intrinsic::riscv_vsetvli, DL, VT: MVT::i32);
11134 // Using vsetvli instruction to get actually used length which related to
11135 // the hardware implementation
11136 SDValue VL = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL, VT: XLenVT, N1: SETVL, N2: AVL,
11137 N3: SEW, N4: LMUL);
11138 I32VL =
11139 DAG.getNode(Opcode: ISD::SHL, DL, VT: XLenVT, N1: VL, N2: DAG.getConstant(Val: 1, DL, VT: XLenVT));
11140 }
11141
11142 SDValue I32Mask = getAllOnesMask(VecVT: I32VT, VL: I32VL, DL, DAG);
11143
11144 // Shift the two scalar parts in using SEW=32 slide1up/slide1down
11145 // instructions.
11146 SDValue Passthru;
11147 if (IsMasked)
11148 Passthru = DAG.getUNDEF(VT: I32VT);
11149 else
11150 Passthru = DAG.getBitcast(VT: I32VT, V: Operands[1]);
11151
11152 if (IntNo == Intrinsic::riscv_vslide1up ||
11153 IntNo == Intrinsic::riscv_vslide1up_mask) {
11154 Vec = DAG.getNode(Opcode: RISCVISD::VSLIDE1UP_VL, DL, VT: I32VT, N1: Passthru, N2: Vec,
11155 N3: ScalarHi, N4: I32Mask, N5: I32VL);
11156 Vec = DAG.getNode(Opcode: RISCVISD::VSLIDE1UP_VL, DL, VT: I32VT, N1: Passthru, N2: Vec,
11157 N3: ScalarLo, N4: I32Mask, N5: I32VL);
11158 } else {
11159 Vec = DAG.getNode(Opcode: RISCVISD::VSLIDE1DOWN_VL, DL, VT: I32VT, N1: Passthru, N2: Vec,
11160 N3: ScalarLo, N4: I32Mask, N5: I32VL);
11161 Vec = DAG.getNode(Opcode: RISCVISD::VSLIDE1DOWN_VL, DL, VT: I32VT, N1: Passthru, N2: Vec,
11162 N3: ScalarHi, N4: I32Mask, N5: I32VL);
11163 }
11164
11165 // Convert back to nxvXi64.
11166 Vec = DAG.getBitcast(VT, V: Vec);
11167
11168 if (!IsMasked)
11169 return Vec;
11170 // Apply mask after the operation.
11171 SDValue Mask = Operands[NumOps - 3];
11172 SDValue MaskedOff = Operands[1];
11173 // Assume Policy operand is the last operand.
11174 uint64_t Policy = Operands[NumOps - 1]->getAsZExtVal();
11175 // We don't need to select maskedoff if it's undef.
11176 if (MaskedOff.isUndef())
11177 return Vec;
11178 // TAMU
11179 if (Policy == RISCVVType::TAIL_AGNOSTIC)
11180 return DAG.getNode(Opcode: RISCVISD::VMERGE_VL, DL, VT, N1: Mask, N2: Vec, N3: MaskedOff,
11181 N4: DAG.getUNDEF(VT), N5: AVL);
11182 // TUMA or TUMU: Currently we always emit tumu policy regardless of tuma.
11183 // It's fine because vmerge does not care mask policy.
11184 return DAG.getNode(Opcode: RISCVISD::VMERGE_VL, DL, VT, N1: Mask, N2: Vec, N3: MaskedOff,
11185 N4: MaskedOff, N5: AVL);
11186 }
11187 }
11188
11189 // We need to convert the scalar to a splat vector.
11190 SDValue VL = getVLOperand(Op);
11191 assert(VL.getValueType() == XLenVT);
11192 ScalarOp = splatSplitI64WithVL(DL, VT, Passthru: SDValue(), Scalar: ScalarOp, VL, DAG);
11193 return DAG.getNode(Opcode: Op->getOpcode(), DL, VTList: Op->getVTList(), Ops: Operands);
11194}
11195
11196// Lower the llvm.get.vector.length intrinsic to vsetvli. We only support
11197// scalable vector llvm.get.vector.length for now.
11198//
11199// We need to convert from a scalable VF to a vsetvli with VLMax equal to
11200// (vscale * VF). The vscale and VF are independent of element width. We use
11201// SEW=8 for the vsetvli because it is the only element width that supports all
11202// fractional LMULs. The LMUL is chosen so that with SEW=8 the VLMax is
11203// (vscale * VF). Where vscale is defined as VLEN/RVVBitsPerBlock. The
11204// InsertVSETVLI pass can fix up the vtype of the vsetvli if a different
11205// SEW and LMUL are better for the surrounding vector instructions.
11206static SDValue lowerGetVectorLength(SDNode *N, SelectionDAG &DAG,
11207 const RISCVSubtarget &Subtarget) {
11208 MVT XLenVT = Subtarget.getXLenVT();
11209
11210 // The smallest LMUL is only valid for the smallest element width.
11211 const unsigned ElementWidth = 8;
11212
11213 // Determine the VF that corresponds to LMUL 1 for ElementWidth.
11214 unsigned LMul1VF = RISCV::RVVBitsPerBlock / ElementWidth;
11215 // We don't support VF==1 with ELEN==32.
11216 [[maybe_unused]] unsigned MinVF =
11217 RISCV::RVVBitsPerBlock / Subtarget.getELen();
11218
11219 [[maybe_unused]] unsigned VF = N->getConstantOperandVal(Num: 2);
11220 assert(VF >= MinVF && VF <= (LMul1VF * 8) && isPowerOf2_32(VF) &&
11221 "Unexpected VF");
11222
11223 bool Fractional = VF < LMul1VF;
11224 unsigned LMulVal = Fractional ? LMul1VF / VF : VF / LMul1VF;
11225 unsigned VLMUL = (unsigned)RISCVVType::encodeLMUL(LMUL: LMulVal, Fractional);
11226 unsigned VSEW = RISCVVType::encodeSEW(SEW: ElementWidth);
11227
11228 SDLoc DL(N);
11229
11230 SDValue LMul = DAG.getTargetConstant(Val: VLMUL, DL, VT: XLenVT);
11231 SDValue Sew = DAG.getTargetConstant(Val: VSEW, DL, VT: XLenVT);
11232
11233 SDValue AVL = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL, VT: XLenVT, Operand: N->getOperand(Num: 1));
11234
11235 SDValue ID = DAG.getTargetConstant(Val: Intrinsic::riscv_vsetvli, DL, VT: XLenVT);
11236 SDValue Res =
11237 DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL, VT: XLenVT, N1: ID, N2: AVL, N3: Sew, N4: LMul);
11238 return DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: N->getValueType(ResNo: 0), Operand: Res);
11239}
11240
11241static SDValue lowerCttzElts(SDNode *N, SelectionDAG &DAG,
11242 const RISCVSubtarget &Subtarget) {
11243 SDValue Op0 = N->getOperand(Num: 1);
11244 MVT OpVT = Op0.getSimpleValueType();
11245 MVT ContainerVT = OpVT;
11246 if (OpVT.isFixedLengthVector()) {
11247 ContainerVT = getContainerForFixedLengthVector(DAG, VT: OpVT, Subtarget);
11248 Op0 = convertToScalableVector(VT: ContainerVT, V: Op0, DAG, Subtarget);
11249 }
11250 MVT XLenVT = Subtarget.getXLenVT();
11251 SDLoc DL(N);
11252 auto [Mask, VL] = getDefaultVLOps(VecVT: OpVT, ContainerVT, DL, DAG, Subtarget);
11253 SDValue Res = DAG.getNode(Opcode: RISCVISD::VFIRST_VL, DL, VT: XLenVT, N1: Op0, N2: Mask, N3: VL);
11254 if (isOneConstant(V: N->getOperand(Num: 2)))
11255 return Res;
11256
11257 // Convert -1 to VL.
11258 SDValue Setcc =
11259 DAG.getSetCC(DL, VT: XLenVT, LHS: Res, RHS: DAG.getConstant(Val: 0, DL, VT: XLenVT), Cond: ISD::SETLT);
11260 VL = DAG.getElementCount(DL, VT: XLenVT, EC: OpVT.getVectorElementCount());
11261 return DAG.getSelect(DL, VT: XLenVT, Cond: Setcc, LHS: VL, RHS: Res);
11262}
11263
11264static inline void promoteVCIXScalar(SDValue Op,
11265 MutableArrayRef<SDValue> Operands,
11266 SelectionDAG &DAG) {
11267 const RISCVSubtarget &Subtarget =
11268 DAG.getMachineFunction().getSubtarget<RISCVSubtarget>();
11269
11270 bool HasChain = Op.getOpcode() == ISD::INTRINSIC_VOID ||
11271 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
11272 unsigned IntNo = Op.getConstantOperandVal(i: HasChain ? 1 : 0);
11273 SDLoc DL(Op);
11274
11275 const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
11276 RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntrinsicID: IntNo);
11277 if (!II || !II->hasScalarOperand())
11278 return;
11279
11280 unsigned SplatOp = II->ScalarOperand + 1;
11281 assert(SplatOp < Op.getNumOperands());
11282
11283 SDValue &ScalarOp = Operands[SplatOp];
11284 MVT OpVT = ScalarOp.getSimpleValueType();
11285 MVT XLenVT = Subtarget.getXLenVT();
11286
11287 // The code below is partially copied from lowerVectorIntrinsicScalars.
11288 // If this isn't a scalar, or its type is XLenVT we're done.
11289 if (!OpVT.isScalarInteger() || OpVT == XLenVT)
11290 return;
11291
11292 // Manually emit promote operation for scalar operation.
11293 if (OpVT.bitsLT(VT: XLenVT)) {
11294 unsigned ExtOpc =
11295 isa<ConstantSDNode>(Val: ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
11296 ScalarOp = DAG.getNode(Opcode: ExtOpc, DL, VT: XLenVT, Operand: ScalarOp);
11297 }
11298}
11299
11300static void processVCIXOperands(SDValue OrigOp,
11301 MutableArrayRef<SDValue> Operands,
11302 SelectionDAG &DAG) {
11303 promoteVCIXScalar(Op: OrigOp, Operands, DAG);
11304 const RISCVSubtarget &Subtarget =
11305 DAG.getMachineFunction().getSubtarget<RISCVSubtarget>();
11306 for (SDValue &V : Operands) {
11307 EVT ValType = V.getValueType();
11308 if (ValType.isVector() && ValType.isFloatingPoint()) {
11309 MVT InterimIVT =
11310 MVT::getVectorVT(VT: MVT::getIntegerVT(BitWidth: ValType.getScalarSizeInBits()),
11311 EC: ValType.getVectorElementCount());
11312 V = DAG.getBitcast(VT: InterimIVT, V);
11313 }
11314 if (ValType.isFixedLengthVector()) {
11315 MVT OpContainerVT = getContainerForFixedLengthVector(
11316 DAG, VT: V.getSimpleValueType(), Subtarget);
11317 V = convertToScalableVector(VT: OpContainerVT, V, DAG, Subtarget);
11318 }
11319 }
11320}
11321
11322// LMUL * VLEN should be greater than or equal to EGS * SEW
11323static inline bool isValidEGW(int EGS, EVT VT,
11324 const RISCVSubtarget &Subtarget) {
11325 return (Subtarget.getRealMinVLen() *
11326 VT.getSizeInBits().getKnownMinValue()) / RISCV::RVVBitsPerBlock >=
11327 EGS * VT.getScalarSizeInBits();
11328}
11329
11330SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
11331 SelectionDAG &DAG) const {
11332 unsigned IntNo = Op.getConstantOperandVal(i: 0);
11333 SDLoc DL(Op);
11334 MVT XLenVT = Subtarget.getXLenVT();
11335
11336 switch (IntNo) {
11337 default:
11338 break; // Don't custom lower most intrinsics.
11339 case Intrinsic::riscv_tuple_insert: {
11340 SDValue Vec = Op.getOperand(i: 1);
11341 SDValue SubVec = Op.getOperand(i: 2);
11342 SDValue Index = Op.getOperand(i: 3);
11343
11344 return DAG.getNode(Opcode: RISCVISD::TUPLE_INSERT, DL, VT: Op.getValueType(), N1: Vec,
11345 N2: SubVec, N3: Index);
11346 }
11347 case Intrinsic::riscv_tuple_extract: {
11348 SDValue Vec = Op.getOperand(i: 1);
11349 SDValue Index = Op.getOperand(i: 2);
11350
11351 return DAG.getNode(Opcode: RISCVISD::TUPLE_EXTRACT, DL, VT: Op.getValueType(), N1: Vec,
11352 N2: Index);
11353 }
11354 case Intrinsic::thread_pointer: {
11355 EVT PtrVT = getPointerTy(DL: DAG.getDataLayout());
11356 return DAG.getRegister(Reg: RISCV::X4, VT: PtrVT);
11357 }
11358 case Intrinsic::riscv_orc_b:
11359 case Intrinsic::riscv_brev8:
11360 case Intrinsic::riscv_sha256sig0:
11361 case Intrinsic::riscv_sha256sig1:
11362 case Intrinsic::riscv_sha256sum0:
11363 case Intrinsic::riscv_sha256sum1:
11364 case Intrinsic::riscv_sm3p0:
11365 case Intrinsic::riscv_sm3p1: {
11366 unsigned Opc;
11367 switch (IntNo) {
11368 case Intrinsic::riscv_orc_b: Opc = RISCVISD::ORC_B; break;
11369 case Intrinsic::riscv_brev8: Opc = RISCVISD::BREV8; break;
11370 case Intrinsic::riscv_sha256sig0: Opc = RISCVISD::SHA256SIG0; break;
11371 case Intrinsic::riscv_sha256sig1: Opc = RISCVISD::SHA256SIG1; break;
11372 case Intrinsic::riscv_sha256sum0: Opc = RISCVISD::SHA256SUM0; break;
11373 case Intrinsic::riscv_sha256sum1: Opc = RISCVISD::SHA256SUM1; break;
11374 case Intrinsic::riscv_sm3p0: Opc = RISCVISD::SM3P0; break;
11375 case Intrinsic::riscv_sm3p1: Opc = RISCVISD::SM3P1; break;
11376 }
11377
11378 return DAG.getNode(Opcode: Opc, DL, VT: XLenVT, Operand: Op.getOperand(i: 1));
11379 }
11380 case Intrinsic::riscv_sm4ks:
11381 case Intrinsic::riscv_sm4ed: {
11382 unsigned Opc =
11383 IntNo == Intrinsic::riscv_sm4ks ? RISCVISD::SM4KS : RISCVISD::SM4ED;
11384
11385 return DAG.getNode(Opcode: Opc, DL, VT: XLenVT, N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2),
11386 N3: Op.getOperand(i: 3));
11387 }
11388 case Intrinsic::riscv_zip:
11389 case Intrinsic::riscv_unzip: {
11390 unsigned Opc =
11391 IntNo == Intrinsic::riscv_zip ? RISCVISD::ZIP : RISCVISD::UNZIP;
11392 return DAG.getNode(Opcode: Opc, DL, VT: XLenVT, Operand: Op.getOperand(i: 1));
11393 }
11394 case Intrinsic::riscv_mopr:
11395 return DAG.getNode(Opcode: RISCVISD::MOP_R, DL, VT: XLenVT, N1: Op.getOperand(i: 1),
11396 N2: Op.getOperand(i: 2));
11397
11398 case Intrinsic::riscv_moprr: {
11399 return DAG.getNode(Opcode: RISCVISD::MOP_RR, DL, VT: XLenVT, N1: Op.getOperand(i: 1),
11400 N2: Op.getOperand(i: 2), N3: Op.getOperand(i: 3));
11401 }
11402 case Intrinsic::riscv_clmulh:
11403 case Intrinsic::riscv_clmulr: {
11404 unsigned Opc = IntNo == Intrinsic::riscv_clmulh ? ISD::CLMULH : ISD::CLMULR;
11405 return DAG.getNode(Opcode: Opc, DL, VT: XLenVT, N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2));
11406 }
11407 case Intrinsic::experimental_get_vector_length:
11408 return lowerGetVectorLength(N: Op.getNode(), DAG, Subtarget);
11409 case Intrinsic::experimental_cttz_elts:
11410 return lowerCttzElts(N: Op.getNode(), DAG, Subtarget);
11411 case Intrinsic::riscv_vmv_x_s: {
11412 SDValue Res = DAG.getNode(Opcode: RISCVISD::VMV_X_S, DL, VT: XLenVT, Operand: Op.getOperand(i: 1));
11413 return DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: Op.getValueType(), Operand: Res);
11414 }
11415 case Intrinsic::riscv_vfmv_f_s:
11416 return DAG.getExtractVectorElt(DL, VT: Op.getValueType(), Vec: Op.getOperand(i: 1), Idx: 0);
11417 case Intrinsic::riscv_vmv_v_x:
11418 return lowerScalarSplat(Passthru: Op.getOperand(i: 1), Scalar: Op.getOperand(i: 2),
11419 VL: Op.getOperand(i: 3), VT: Op.getSimpleValueType(), DL, DAG,
11420 Subtarget);
11421 case Intrinsic::riscv_vfmv_v_f:
11422 return DAG.getNode(Opcode: RISCVISD::VFMV_V_F_VL, DL, VT: Op.getValueType(),
11423 N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2), N3: Op.getOperand(i: 3));
11424 case Intrinsic::riscv_vmv_s_x: {
11425 SDValue Scalar = Op.getOperand(i: 2);
11426
11427 if (Scalar.getValueType().bitsLE(VT: XLenVT)) {
11428 Scalar = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: XLenVT, Operand: Scalar);
11429 return DAG.getNode(Opcode: RISCVISD::VMV_S_X_VL, DL, VT: Op.getValueType(),
11430 N1: Op.getOperand(i: 1), N2: Scalar, N3: Op.getOperand(i: 3));
11431 }
11432
11433 assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!");
11434
11435 // This is an i64 value that lives in two scalar registers. We have to
11436 // insert this in a convoluted way. First we build vXi64 splat containing
11437 // the two values that we assemble using some bit math. Next we'll use
11438 // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask
11439 // to merge element 0 from our splat into the source vector.
11440 // FIXME: This is probably not the best way to do this, but it is
11441 // consistent with INSERT_VECTOR_ELT lowering so it is a good starting
11442 // point.
11443 // sw lo, (a0)
11444 // sw hi, 4(a0)
11445 // vlse vX, (a0)
11446 //
11447 // vid.v vVid
11448 // vmseq.vx mMask, vVid, 0
11449 // vmerge.vvm vDest, vSrc, vVal, mMask
11450 MVT VT = Op.getSimpleValueType();
11451 SDValue Vec = Op.getOperand(i: 1);
11452 SDValue VL = getVLOperand(Op);
11453
11454 SDValue SplattedVal = splatSplitI64WithVL(DL, VT, Passthru: SDValue(), Scalar, VL, DAG);
11455 if (Op.getOperand(i: 1).isUndef())
11456 return SplattedVal;
11457 SDValue SplattedIdx =
11458 DAG.getNode(Opcode: RISCVISD::VMV_V_X_VL, DL, VT, N1: DAG.getUNDEF(VT),
11459 N2: DAG.getConstant(Val: 0, DL, VT: MVT::i32), N3: VL);
11460
11461 MVT MaskVT = getMaskTypeFor(VecVT: VT);
11462 SDValue Mask = getAllOnesMask(VecVT: VT, VL, DL, DAG);
11463 SDValue VID = DAG.getNode(Opcode: RISCVISD::VID_VL, DL, VT, N1: Mask, N2: VL);
11464 SDValue SelectCond =
11465 DAG.getNode(Opcode: RISCVISD::SETCC_VL, DL, VT: MaskVT,
11466 Ops: {VID, SplattedIdx, DAG.getCondCode(Cond: ISD::SETEQ),
11467 DAG.getUNDEF(VT: MaskVT), Mask, VL});
11468 return DAG.getNode(Opcode: RISCVISD::VMERGE_VL, DL, VT, N1: SelectCond, N2: SplattedVal,
11469 N3: Vec, N4: DAG.getUNDEF(VT), N5: VL);
11470 }
11471 case Intrinsic::riscv_vfmv_s_f:
11472 return DAG.getNode(Opcode: RISCVISD::VFMV_S_F_VL, DL, VT: Op.getValueType(),
11473 N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2), N3: Op.getOperand(i: 3));
11474 // EGS * EEW >= 128 bits
11475 case Intrinsic::riscv_vaesdf_vv:
11476 case Intrinsic::riscv_vaesdf_vs:
11477 case Intrinsic::riscv_vaesdm_vv:
11478 case Intrinsic::riscv_vaesdm_vs:
11479 case Intrinsic::riscv_vaesef_vv:
11480 case Intrinsic::riscv_vaesef_vs:
11481 case Intrinsic::riscv_vaesem_vv:
11482 case Intrinsic::riscv_vaesem_vs:
11483 case Intrinsic::riscv_vaeskf1:
11484 case Intrinsic::riscv_vaeskf2:
11485 case Intrinsic::riscv_vaesz_vs:
11486 case Intrinsic::riscv_vsm4k:
11487 case Intrinsic::riscv_vsm4r_vv:
11488 case Intrinsic::riscv_vsm4r_vs: {
11489 if (!isValidEGW(EGS: 4, VT: Op.getSimpleValueType(), Subtarget) ||
11490 !isValidEGW(EGS: 4, VT: Op->getOperand(Num: 1).getSimpleValueType(), Subtarget) ||
11491 !isValidEGW(EGS: 4, VT: Op->getOperand(Num: 2).getSimpleValueType(), Subtarget))
11492 reportFatalUsageError(reason: "EGW should be greater than or equal to 4 * SEW.");
11493 return Op;
11494 }
11495 // EGS * EEW >= 256 bits
11496 case Intrinsic::riscv_vsm3c:
11497 case Intrinsic::riscv_vsm3me: {
11498 if (!isValidEGW(EGS: 8, VT: Op.getSimpleValueType(), Subtarget) ||
11499 !isValidEGW(EGS: 8, VT: Op->getOperand(Num: 1).getSimpleValueType(), Subtarget))
11500 reportFatalUsageError(reason: "EGW should be greater than or equal to 8 * SEW.");
11501 return Op;
11502 }
11503 // zvknha(SEW=32)/zvknhb(SEW=[32|64])
11504 case Intrinsic::riscv_vsha2ch:
11505 case Intrinsic::riscv_vsha2cl:
11506 case Intrinsic::riscv_vsha2ms: {
11507 if (Op->getSimpleValueType(ResNo: 0).getScalarSizeInBits() == 64 &&
11508 !Subtarget.hasStdExtZvknhb())
11509 reportFatalUsageError(reason: "SEW=64 needs Zvknhb to be enabled.");
11510 if (!isValidEGW(EGS: 4, VT: Op.getSimpleValueType(), Subtarget) ||
11511 !isValidEGW(EGS: 4, VT: Op->getOperand(Num: 1).getSimpleValueType(), Subtarget) ||
11512 !isValidEGW(EGS: 4, VT: Op->getOperand(Num: 2).getSimpleValueType(), Subtarget))
11513 reportFatalUsageError(reason: "EGW should be greater than or equal to 4 * SEW.");
11514 return Op;
11515 }
11516 case Intrinsic::riscv_sf_vc_v_x:
11517 case Intrinsic::riscv_sf_vc_v_i:
11518 case Intrinsic::riscv_sf_vc_v_xv:
11519 case Intrinsic::riscv_sf_vc_v_iv:
11520 case Intrinsic::riscv_sf_vc_v_vv:
11521 case Intrinsic::riscv_sf_vc_v_fv:
11522 case Intrinsic::riscv_sf_vc_v_xvv:
11523 case Intrinsic::riscv_sf_vc_v_ivv:
11524 case Intrinsic::riscv_sf_vc_v_vvv:
11525 case Intrinsic::riscv_sf_vc_v_fvv:
11526 case Intrinsic::riscv_sf_vc_v_xvw:
11527 case Intrinsic::riscv_sf_vc_v_ivw:
11528 case Intrinsic::riscv_sf_vc_v_vvw:
11529 case Intrinsic::riscv_sf_vc_v_fvw: {
11530 MVT VT = Op.getSimpleValueType();
11531
11532 SmallVector<SDValue> Operands{Op->op_values()};
11533 processVCIXOperands(OrigOp: Op, Operands, DAG);
11534
11535 MVT RetVT = VT;
11536 if (VT.isFixedLengthVector())
11537 RetVT = getContainerForFixedLengthVector(VT);
11538 else if (VT.isFloatingPoint())
11539 RetVT = MVT::getVectorVT(VT: MVT::getIntegerVT(BitWidth: VT.getScalarSizeInBits()),
11540 EC: VT.getVectorElementCount());
11541
11542 SDValue NewNode = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL, VT: RetVT, Ops: Operands);
11543
11544 if (VT.isFixedLengthVector())
11545 NewNode = convertFromScalableVector(VT, V: NewNode, DAG, Subtarget);
11546 else if (VT.isFloatingPoint())
11547 NewNode = DAG.getBitcast(VT, V: NewNode);
11548
11549 if (Op == NewNode)
11550 break;
11551
11552 return NewNode;
11553 }
11554 }
11555
11556 return lowerVectorIntrinsicScalars(Op, DAG, Subtarget);
11557}
11558
11559static inline SDValue getVCIXISDNodeWCHAIN(SDValue Op, SelectionDAG &DAG,
11560 unsigned Type) {
11561 SDLoc DL(Op);
11562 SmallVector<SDValue> Operands{Op->op_values()};
11563 Operands.erase(CI: Operands.begin() + 1);
11564
11565 const RISCVSubtarget &Subtarget =
11566 DAG.getMachineFunction().getSubtarget<RISCVSubtarget>();
11567 MVT VT = Op.getSimpleValueType();
11568 MVT RetVT = VT;
11569 MVT FloatVT = VT;
11570
11571 if (VT.isFloatingPoint()) {
11572 RetVT = MVT::getVectorVT(VT: MVT::getIntegerVT(BitWidth: VT.getScalarSizeInBits()),
11573 EC: VT.getVectorElementCount());
11574 FloatVT = RetVT;
11575 }
11576 if (VT.isFixedLengthVector())
11577 RetVT = getContainerForFixedLengthVector(TLI: DAG.getTargetLoweringInfo(), VT: RetVT,
11578 Subtarget);
11579
11580 processVCIXOperands(OrigOp: Op, Operands, DAG);
11581
11582 SDVTList VTs = DAG.getVTList(VTs: {RetVT, MVT::Other});
11583 SDValue NewNode = DAG.getNode(Opcode: Type, DL, VTList: VTs, Ops: Operands);
11584 SDValue Chain = NewNode.getValue(R: 1);
11585
11586 if (VT.isFixedLengthVector())
11587 NewNode = convertFromScalableVector(VT: FloatVT, V: NewNode, DAG, Subtarget);
11588 if (VT.isFloatingPoint())
11589 NewNode = DAG.getBitcast(VT, V: NewNode);
11590
11591 NewNode = DAG.getMergeValues(Ops: {NewNode, Chain}, dl: DL);
11592
11593 return NewNode;
11594}
11595
11596static inline SDValue getVCIXISDNodeVOID(SDValue Op, SelectionDAG &DAG,
11597 unsigned Type) {
11598 SmallVector<SDValue> Operands{Op->op_values()};
11599 Operands.erase(CI: Operands.begin() + 1);
11600 processVCIXOperands(OrigOp: Op, Operands, DAG);
11601
11602 return DAG.getNode(Opcode: Type, DL: SDLoc(Op), VT: Op.getValueType(), Ops: Operands);
11603}
11604
11605static SDValue
11606lowerFixedVectorSegLoadIntrinsics(unsigned IntNo, SDValue Op,
11607 const RISCVSubtarget &Subtarget,
11608 SelectionDAG &DAG) {
11609 bool IsStrided;
11610 switch (IntNo) {
11611 case Intrinsic::riscv_seg2_load_mask:
11612 case Intrinsic::riscv_seg3_load_mask:
11613 case Intrinsic::riscv_seg4_load_mask:
11614 case Intrinsic::riscv_seg5_load_mask:
11615 case Intrinsic::riscv_seg6_load_mask:
11616 case Intrinsic::riscv_seg7_load_mask:
11617 case Intrinsic::riscv_seg8_load_mask:
11618 IsStrided = false;
11619 break;
11620 case Intrinsic::riscv_sseg2_load_mask:
11621 case Intrinsic::riscv_sseg3_load_mask:
11622 case Intrinsic::riscv_sseg4_load_mask:
11623 case Intrinsic::riscv_sseg5_load_mask:
11624 case Intrinsic::riscv_sseg6_load_mask:
11625 case Intrinsic::riscv_sseg7_load_mask:
11626 case Intrinsic::riscv_sseg8_load_mask:
11627 IsStrided = true;
11628 break;
11629 default:
11630 llvm_unreachable("unexpected intrinsic ID");
11631 };
11632
11633 static const Intrinsic::ID VlsegInts[7] = {
11634 Intrinsic::riscv_vlseg2_mask, Intrinsic::riscv_vlseg3_mask,
11635 Intrinsic::riscv_vlseg4_mask, Intrinsic::riscv_vlseg5_mask,
11636 Intrinsic::riscv_vlseg6_mask, Intrinsic::riscv_vlseg7_mask,
11637 Intrinsic::riscv_vlseg8_mask};
11638 static const Intrinsic::ID VlssegInts[7] = {
11639 Intrinsic::riscv_vlsseg2_mask, Intrinsic::riscv_vlsseg3_mask,
11640 Intrinsic::riscv_vlsseg4_mask, Intrinsic::riscv_vlsseg5_mask,
11641 Intrinsic::riscv_vlsseg6_mask, Intrinsic::riscv_vlsseg7_mask,
11642 Intrinsic::riscv_vlsseg8_mask};
11643
11644 SDLoc DL(Op);
11645 unsigned NF = Op->getNumValues() - 1;
11646 assert(NF >= 2 && NF <= 8 && "Unexpected seg number");
11647 MVT XLenVT = Subtarget.getXLenVT();
11648 MVT VT = Op->getSimpleValueType(ResNo: 0);
11649 MVT ContainerVT = ::getContainerForFixedLengthVector(DAG, VT, Subtarget);
11650 unsigned Sz = NF * ContainerVT.getVectorMinNumElements() *
11651 ContainerVT.getScalarSizeInBits();
11652 EVT VecTupTy = MVT::getRISCVVectorTupleVT(Sz, NFields: NF);
11653
11654 // Operands: (chain, int_id, pointer, mask, vl) or
11655 // (chain, int_id, pointer, offset, mask, vl)
11656 SDValue VL = Op.getOperand(i: Op.getNumOperands() - 1);
11657 SDValue Mask = Op.getOperand(i: Op.getNumOperands() - 2);
11658 MVT MaskVT = Mask.getSimpleValueType();
11659 MVT MaskContainerVT =
11660 ::getContainerForFixedLengthVector(DAG, VT: MaskVT, Subtarget);
11661 Mask = convertToScalableVector(VT: MaskContainerVT, V: Mask, DAG, Subtarget);
11662
11663 SDValue IntID = DAG.getTargetConstant(
11664 Val: IsStrided ? VlssegInts[NF - 2] : VlsegInts[NF - 2], DL, VT: XLenVT);
11665 auto *Load = cast<MemIntrinsicSDNode>(Val&: Op);
11666
11667 SDVTList VTs = DAG.getVTList(VTs: {VecTupTy, MVT::Other});
11668 SmallVector<SDValue, 9> Ops = {
11669 Load->getChain(),
11670 IntID,
11671 DAG.getUNDEF(VT: VecTupTy),
11672 Op.getOperand(i: 2),
11673 Mask,
11674 VL,
11675 DAG.getTargetConstant(
11676 Val: RISCVVType::TAIL_AGNOSTIC | RISCVVType::MASK_AGNOSTIC, DL, VT: XLenVT),
11677 DAG.getTargetConstant(Val: Log2_64(Value: VT.getScalarSizeInBits()), DL, VT: XLenVT)};
11678 // Insert the stride operand.
11679 if (IsStrided)
11680 Ops.insert(I: std::next(x: Ops.begin(), n: 4), Elt: Op.getOperand(i: 3));
11681
11682 SDValue Result =
11683 DAG.getMemIntrinsicNode(Opcode: ISD::INTRINSIC_W_CHAIN, dl: DL, VTList: VTs, Ops,
11684 MemVT: Load->getMemoryVT(), MMO: Load->getMemOperand());
11685 SmallVector<SDValue, 9> Results;
11686 for (unsigned int RetIdx = 0; RetIdx < NF; RetIdx++) {
11687 SDValue SubVec = DAG.getNode(Opcode: RISCVISD::TUPLE_EXTRACT, DL, VT: ContainerVT,
11688 N1: Result.getValue(R: 0),
11689 N2: DAG.getTargetConstant(Val: RetIdx, DL, VT: MVT::i32));
11690 Results.push_back(Elt: convertFromScalableVector(VT, V: SubVec, DAG, Subtarget));
11691 }
11692 Results.push_back(Elt: Result.getValue(R: 1));
11693 return DAG.getMergeValues(Ops: Results, dl: DL);
11694}
11695
11696SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
11697 SelectionDAG &DAG) const {
11698 unsigned IntNo = Op.getConstantOperandVal(i: 1);
11699 switch (IntNo) {
11700 default:
11701 break;
11702 case Intrinsic::riscv_seg2_load_mask:
11703 case Intrinsic::riscv_seg3_load_mask:
11704 case Intrinsic::riscv_seg4_load_mask:
11705 case Intrinsic::riscv_seg5_load_mask:
11706 case Intrinsic::riscv_seg6_load_mask:
11707 case Intrinsic::riscv_seg7_load_mask:
11708 case Intrinsic::riscv_seg8_load_mask:
11709 case Intrinsic::riscv_sseg2_load_mask:
11710 case Intrinsic::riscv_sseg3_load_mask:
11711 case Intrinsic::riscv_sseg4_load_mask:
11712 case Intrinsic::riscv_sseg5_load_mask:
11713 case Intrinsic::riscv_sseg6_load_mask:
11714 case Intrinsic::riscv_sseg7_load_mask:
11715 case Intrinsic::riscv_sseg8_load_mask:
11716 return lowerFixedVectorSegLoadIntrinsics(IntNo, Op, Subtarget, DAG);
11717
11718 case Intrinsic::riscv_sf_vc_v_x_se:
11719 return getVCIXISDNodeWCHAIN(Op, DAG, Type: RISCVISD::SF_VC_V_X_SE);
11720 case Intrinsic::riscv_sf_vc_v_i_se:
11721 return getVCIXISDNodeWCHAIN(Op, DAG, Type: RISCVISD::SF_VC_V_I_SE);
11722 case Intrinsic::riscv_sf_vc_v_xv_se:
11723 return getVCIXISDNodeWCHAIN(Op, DAG, Type: RISCVISD::SF_VC_V_XV_SE);
11724 case Intrinsic::riscv_sf_vc_v_iv_se:
11725 return getVCIXISDNodeWCHAIN(Op, DAG, Type: RISCVISD::SF_VC_V_IV_SE);
11726 case Intrinsic::riscv_sf_vc_v_vv_se:
11727 return getVCIXISDNodeWCHAIN(Op, DAG, Type: RISCVISD::SF_VC_V_VV_SE);
11728 case Intrinsic::riscv_sf_vc_v_fv_se:
11729 return getVCIXISDNodeWCHAIN(Op, DAG, Type: RISCVISD::SF_VC_V_FV_SE);
11730 case Intrinsic::riscv_sf_vc_v_xvv_se:
11731 return getVCIXISDNodeWCHAIN(Op, DAG, Type: RISCVISD::SF_VC_V_XVV_SE);
11732 case Intrinsic::riscv_sf_vc_v_ivv_se:
11733 return getVCIXISDNodeWCHAIN(Op, DAG, Type: RISCVISD::SF_VC_V_IVV_SE);
11734 case Intrinsic::riscv_sf_vc_v_vvv_se:
11735 return getVCIXISDNodeWCHAIN(Op, DAG, Type: RISCVISD::SF_VC_V_VVV_SE);
11736 case Intrinsic::riscv_sf_vc_v_fvv_se:
11737 return getVCIXISDNodeWCHAIN(Op, DAG, Type: RISCVISD::SF_VC_V_FVV_SE);
11738 case Intrinsic::riscv_sf_vc_v_xvw_se:
11739 return getVCIXISDNodeWCHAIN(Op, DAG, Type: RISCVISD::SF_VC_V_XVW_SE);
11740 case Intrinsic::riscv_sf_vc_v_ivw_se:
11741 return getVCIXISDNodeWCHAIN(Op, DAG, Type: RISCVISD::SF_VC_V_IVW_SE);
11742 case Intrinsic::riscv_sf_vc_v_vvw_se:
11743 return getVCIXISDNodeWCHAIN(Op, DAG, Type: RISCVISD::SF_VC_V_VVW_SE);
11744 case Intrinsic::riscv_sf_vc_v_fvw_se:
11745 return getVCIXISDNodeWCHAIN(Op, DAG, Type: RISCVISD::SF_VC_V_FVW_SE);
11746 }
11747
11748 return lowerVectorIntrinsicScalars(Op, DAG, Subtarget);
11749}
11750
11751static SDValue
11752lowerFixedVectorSegStoreIntrinsics(unsigned IntNo, SDValue Op,
11753 const RISCVSubtarget &Subtarget,
11754 SelectionDAG &DAG) {
11755 bool IsStrided;
11756 switch (IntNo) {
11757 case Intrinsic::riscv_seg2_store_mask:
11758 case Intrinsic::riscv_seg3_store_mask:
11759 case Intrinsic::riscv_seg4_store_mask:
11760 case Intrinsic::riscv_seg5_store_mask:
11761 case Intrinsic::riscv_seg6_store_mask:
11762 case Intrinsic::riscv_seg7_store_mask:
11763 case Intrinsic::riscv_seg8_store_mask:
11764 IsStrided = false;
11765 break;
11766 case Intrinsic::riscv_sseg2_store_mask:
11767 case Intrinsic::riscv_sseg3_store_mask:
11768 case Intrinsic::riscv_sseg4_store_mask:
11769 case Intrinsic::riscv_sseg5_store_mask:
11770 case Intrinsic::riscv_sseg6_store_mask:
11771 case Intrinsic::riscv_sseg7_store_mask:
11772 case Intrinsic::riscv_sseg8_store_mask:
11773 IsStrided = true;
11774 break;
11775 default:
11776 llvm_unreachable("unexpected intrinsic ID");
11777 }
11778
11779 SDLoc DL(Op);
11780 static const Intrinsic::ID VssegInts[] = {
11781 Intrinsic::riscv_vsseg2_mask, Intrinsic::riscv_vsseg3_mask,
11782 Intrinsic::riscv_vsseg4_mask, Intrinsic::riscv_vsseg5_mask,
11783 Intrinsic::riscv_vsseg6_mask, Intrinsic::riscv_vsseg7_mask,
11784 Intrinsic::riscv_vsseg8_mask};
11785 static const Intrinsic::ID VsssegInts[] = {
11786 Intrinsic::riscv_vssseg2_mask, Intrinsic::riscv_vssseg3_mask,
11787 Intrinsic::riscv_vssseg4_mask, Intrinsic::riscv_vssseg5_mask,
11788 Intrinsic::riscv_vssseg6_mask, Intrinsic::riscv_vssseg7_mask,
11789 Intrinsic::riscv_vssseg8_mask};
11790
11791 // Operands: (chain, int_id, vec*, ptr, mask, vl) or
11792 // (chain, int_id, vec*, ptr, stride, mask, vl)
11793 unsigned NF = Op->getNumOperands() - (IsStrided ? 6 : 5);
11794 assert(NF >= 2 && NF <= 8 && "Unexpected seg number");
11795 MVT XLenVT = Subtarget.getXLenVT();
11796 MVT VT = Op->getOperand(Num: 2).getSimpleValueType();
11797 MVT ContainerVT = ::getContainerForFixedLengthVector(DAG, VT, Subtarget);
11798 unsigned Sz = NF * ContainerVT.getVectorMinNumElements() *
11799 ContainerVT.getScalarSizeInBits();
11800 EVT VecTupTy = MVT::getRISCVVectorTupleVT(Sz, NFields: NF);
11801
11802 SDValue VL = Op.getOperand(i: Op.getNumOperands() - 1);
11803 SDValue Mask = Op.getOperand(i: Op.getNumOperands() - 2);
11804 MVT MaskVT = Mask.getSimpleValueType();
11805 MVT MaskContainerVT =
11806 ::getContainerForFixedLengthVector(DAG, VT: MaskVT, Subtarget);
11807 Mask = convertToScalableVector(VT: MaskContainerVT, V: Mask, DAG, Subtarget);
11808
11809 SDValue IntID = DAG.getTargetConstant(
11810 Val: IsStrided ? VsssegInts[NF - 2] : VssegInts[NF - 2], DL, VT: XLenVT);
11811 SDValue Ptr = Op->getOperand(Num: NF + 2);
11812
11813 auto *FixedIntrinsic = cast<MemIntrinsicSDNode>(Val&: Op);
11814
11815 SDValue StoredVal = DAG.getUNDEF(VT: VecTupTy);
11816 for (unsigned i = 0; i < NF; i++)
11817 StoredVal = DAG.getNode(
11818 Opcode: RISCVISD::TUPLE_INSERT, DL, VT: VecTupTy, N1: StoredVal,
11819 N2: convertToScalableVector(VT: ContainerVT, V: FixedIntrinsic->getOperand(Num: 2 + i),
11820 DAG, Subtarget),
11821 N3: DAG.getTargetConstant(Val: i, DL, VT: MVT::i32));
11822
11823 SmallVector<SDValue, 10> Ops = {
11824 FixedIntrinsic->getChain(),
11825 IntID,
11826 StoredVal,
11827 Ptr,
11828 Mask,
11829 VL,
11830 DAG.getTargetConstant(Val: Log2_64(Value: VT.getScalarSizeInBits()), DL, VT: XLenVT)};
11831 // Insert the stride operand.
11832 if (IsStrided)
11833 Ops.insert(I: std::next(x: Ops.begin(), n: 4),
11834 Elt: Op.getOperand(i: Op.getNumOperands() - 3));
11835
11836 return DAG.getMemIntrinsicNode(
11837 Opcode: ISD::INTRINSIC_VOID, dl: DL, VTList: DAG.getVTList(VT: MVT::Other), Ops,
11838 MemVT: FixedIntrinsic->getMemoryVT(), MMO: FixedIntrinsic->getMemOperand());
11839}
11840
11841SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
11842 SelectionDAG &DAG) const {
11843 unsigned IntNo = Op.getConstantOperandVal(i: 1);
11844 switch (IntNo) {
11845 default:
11846 break;
11847 case Intrinsic::riscv_seg2_store_mask:
11848 case Intrinsic::riscv_seg3_store_mask:
11849 case Intrinsic::riscv_seg4_store_mask:
11850 case Intrinsic::riscv_seg5_store_mask:
11851 case Intrinsic::riscv_seg6_store_mask:
11852 case Intrinsic::riscv_seg7_store_mask:
11853 case Intrinsic::riscv_seg8_store_mask:
11854 case Intrinsic::riscv_sseg2_store_mask:
11855 case Intrinsic::riscv_sseg3_store_mask:
11856 case Intrinsic::riscv_sseg4_store_mask:
11857 case Intrinsic::riscv_sseg5_store_mask:
11858 case Intrinsic::riscv_sseg6_store_mask:
11859 case Intrinsic::riscv_sseg7_store_mask:
11860 case Intrinsic::riscv_sseg8_store_mask:
11861 return lowerFixedVectorSegStoreIntrinsics(IntNo, Op, Subtarget, DAG);
11862
11863 case Intrinsic::riscv_sf_vc_xv_se:
11864 return getVCIXISDNodeVOID(Op, DAG, Type: RISCVISD::SF_VC_XV_SE);
11865 case Intrinsic::riscv_sf_vc_iv_se:
11866 return getVCIXISDNodeVOID(Op, DAG, Type: RISCVISD::SF_VC_IV_SE);
11867 case Intrinsic::riscv_sf_vc_vv_se:
11868 return getVCIXISDNodeVOID(Op, DAG, Type: RISCVISD::SF_VC_VV_SE);
11869 case Intrinsic::riscv_sf_vc_fv_se:
11870 return getVCIXISDNodeVOID(Op, DAG, Type: RISCVISD::SF_VC_FV_SE);
11871 case Intrinsic::riscv_sf_vc_xvv_se:
11872 return getVCIXISDNodeVOID(Op, DAG, Type: RISCVISD::SF_VC_XVV_SE);
11873 case Intrinsic::riscv_sf_vc_ivv_se:
11874 return getVCIXISDNodeVOID(Op, DAG, Type: RISCVISD::SF_VC_IVV_SE);
11875 case Intrinsic::riscv_sf_vc_vvv_se:
11876 return getVCIXISDNodeVOID(Op, DAG, Type: RISCVISD::SF_VC_VVV_SE);
11877 case Intrinsic::riscv_sf_vc_fvv_se:
11878 return getVCIXISDNodeVOID(Op, DAG, Type: RISCVISD::SF_VC_FVV_SE);
11879 case Intrinsic::riscv_sf_vc_xvw_se:
11880 return getVCIXISDNodeVOID(Op, DAG, Type: RISCVISD::SF_VC_XVW_SE);
11881 case Intrinsic::riscv_sf_vc_ivw_se:
11882 return getVCIXISDNodeVOID(Op, DAG, Type: RISCVISD::SF_VC_IVW_SE);
11883 case Intrinsic::riscv_sf_vc_vvw_se:
11884 return getVCIXISDNodeVOID(Op, DAG, Type: RISCVISD::SF_VC_VVW_SE);
11885 case Intrinsic::riscv_sf_vc_fvw_se:
11886 return getVCIXISDNodeVOID(Op, DAG, Type: RISCVISD::SF_VC_FVW_SE);
11887 }
11888
11889 return lowerVectorIntrinsicScalars(Op, DAG, Subtarget);
11890}
11891
11892static unsigned getRVVReductionOp(unsigned ISDOpcode) {
11893 switch (ISDOpcode) {
11894 default:
11895 llvm_unreachable("Unhandled reduction");
11896 case ISD::VP_REDUCE_ADD:
11897 case ISD::VECREDUCE_ADD:
11898 return RISCVISD::VECREDUCE_ADD_VL;
11899 case ISD::VP_REDUCE_UMAX:
11900 case ISD::VECREDUCE_UMAX:
11901 return RISCVISD::VECREDUCE_UMAX_VL;
11902 case ISD::VP_REDUCE_SMAX:
11903 case ISD::VECREDUCE_SMAX:
11904 return RISCVISD::VECREDUCE_SMAX_VL;
11905 case ISD::VP_REDUCE_UMIN:
11906 case ISD::VECREDUCE_UMIN:
11907 return RISCVISD::VECREDUCE_UMIN_VL;
11908 case ISD::VP_REDUCE_SMIN:
11909 case ISD::VECREDUCE_SMIN:
11910 return RISCVISD::VECREDUCE_SMIN_VL;
11911 case ISD::VP_REDUCE_AND:
11912 case ISD::VECREDUCE_AND:
11913 return RISCVISD::VECREDUCE_AND_VL;
11914 case ISD::VP_REDUCE_OR:
11915 case ISD::VECREDUCE_OR:
11916 return RISCVISD::VECREDUCE_OR_VL;
11917 case ISD::VP_REDUCE_XOR:
11918 case ISD::VECREDUCE_XOR:
11919 return RISCVISD::VECREDUCE_XOR_VL;
11920 case ISD::VP_REDUCE_FADD:
11921 return RISCVISD::VECREDUCE_FADD_VL;
11922 case ISD::VP_REDUCE_SEQ_FADD:
11923 return RISCVISD::VECREDUCE_SEQ_FADD_VL;
11924 case ISD::VP_REDUCE_FMAX:
11925 case ISD::VP_REDUCE_FMAXIMUM:
11926 return RISCVISD::VECREDUCE_FMAX_VL;
11927 case ISD::VP_REDUCE_FMIN:
11928 case ISD::VP_REDUCE_FMINIMUM:
11929 return RISCVISD::VECREDUCE_FMIN_VL;
11930 }
11931
11932}
11933
11934SDValue RISCVTargetLowering::lowerVectorMaskVecReduction(SDValue Op,
11935 SelectionDAG &DAG,
11936 bool IsVP) const {
11937 SDLoc DL(Op);
11938 SDValue Vec = Op.getOperand(i: IsVP ? 1 : 0);
11939 MVT VecVT = Vec.getSimpleValueType();
11940 assert((Op.getOpcode() == ISD::VECREDUCE_AND ||
11941 Op.getOpcode() == ISD::VECREDUCE_OR ||
11942 Op.getOpcode() == ISD::VECREDUCE_XOR ||
11943 Op.getOpcode() == ISD::VP_REDUCE_AND ||
11944 Op.getOpcode() == ISD::VP_REDUCE_OR ||
11945 Op.getOpcode() == ISD::VP_REDUCE_XOR) &&
11946 "Unexpected reduction lowering");
11947
11948 MVT XLenVT = Subtarget.getXLenVT();
11949
11950 MVT ContainerVT = VecVT;
11951 if (VecVT.isFixedLengthVector()) {
11952 ContainerVT = getContainerForFixedLengthVector(VT: VecVT);
11953 Vec = convertToScalableVector(VT: ContainerVT, V: Vec, DAG, Subtarget);
11954 }
11955
11956 SDValue Mask, VL;
11957 if (IsVP) {
11958 Mask = Op.getOperand(i: 2);
11959 VL = Op.getOperand(i: 3);
11960 } else {
11961 std::tie(args&: Mask, args&: VL) =
11962 getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
11963 }
11964
11965 ISD::CondCode CC;
11966 switch (Op.getOpcode()) {
11967 default:
11968 llvm_unreachable("Unhandled reduction");
11969 case ISD::VECREDUCE_AND:
11970 case ISD::VP_REDUCE_AND: {
11971 // vcpop ~x == 0
11972 SDValue TrueMask = DAG.getNode(Opcode: RISCVISD::VMSET_VL, DL, VT: ContainerVT, Operand: VL);
11973 if (IsVP || VecVT.isFixedLengthVector())
11974 Vec = DAG.getNode(Opcode: RISCVISD::VMXOR_VL, DL, VT: ContainerVT, N1: Vec, N2: TrueMask, N3: VL);
11975 else
11976 Vec = DAG.getNode(Opcode: ISD::XOR, DL, VT: ContainerVT, N1: Vec, N2: TrueMask);
11977 Vec = DAG.getNode(Opcode: RISCVISD::VCPOP_VL, DL, VT: XLenVT, N1: Vec, N2: Mask, N3: VL);
11978 CC = ISD::SETEQ;
11979 break;
11980 }
11981 case ISD::VECREDUCE_OR:
11982 case ISD::VP_REDUCE_OR:
11983 // vcpop x != 0
11984 Vec = DAG.getNode(Opcode: RISCVISD::VCPOP_VL, DL, VT: XLenVT, N1: Vec, N2: Mask, N3: VL);
11985 CC = ISD::SETNE;
11986 break;
11987 case ISD::VECREDUCE_XOR:
11988 case ISD::VP_REDUCE_XOR: {
11989 // ((vcpop x) & 1) != 0
11990 SDValue One = DAG.getConstant(Val: 1, DL, VT: XLenVT);
11991 Vec = DAG.getNode(Opcode: RISCVISD::VCPOP_VL, DL, VT: XLenVT, N1: Vec, N2: Mask, N3: VL);
11992 Vec = DAG.getNode(Opcode: ISD::AND, DL, VT: XLenVT, N1: Vec, N2: One);
11993 CC = ISD::SETNE;
11994 break;
11995 }
11996 }
11997
11998 SDValue Zero = DAG.getConstant(Val: 0, DL, VT: XLenVT);
11999 SDValue SetCC = DAG.getSetCC(DL, VT: XLenVT, LHS: Vec, RHS: Zero, Cond: CC);
12000 SetCC = DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: Op.getValueType(), Operand: SetCC);
12001
12002 if (!IsVP)
12003 return SetCC;
12004
12005 // Now include the start value in the operation.
12006 // Note that we must return the start value when no elements are operated
12007 // upon. The vcpop instructions we've emitted in each case above will return
12008 // 0 for an inactive vector, and so we've already received the neutral value:
12009 // AND gives us (0 == 0) -> 1 and OR/XOR give us (0 != 0) -> 0. Therefore we
12010 // can simply include the start value.
12011 unsigned BaseOpc = ISD::getVecReduceBaseOpcode(VecReduceOpcode: Op.getOpcode());
12012 return DAG.getNode(Opcode: BaseOpc, DL, VT: Op.getValueType(), N1: SetCC, N2: Op.getOperand(i: 0));
12013}
12014
12015static bool isNonZeroAVL(SDValue AVL) {
12016 auto *RegisterAVL = dyn_cast<RegisterSDNode>(Val&: AVL);
12017 auto *ImmAVL = dyn_cast<ConstantSDNode>(Val&: AVL);
12018 return (RegisterAVL && RegisterAVL->getReg() == RISCV::X0) ||
12019 (ImmAVL && ImmAVL->getZExtValue() >= 1);
12020}
12021
12022/// Helper to lower a reduction sequence of the form:
12023/// scalar = reduce_op vec, scalar_start
12024static SDValue lowerReductionSeq(unsigned RVVOpcode, MVT ResVT,
12025 SDValue StartValue, SDValue Vec, SDValue Mask,
12026 SDValue VL, const SDLoc &DL, SelectionDAG &DAG,
12027 const RISCVSubtarget &Subtarget) {
12028 const MVT VecVT = Vec.getSimpleValueType();
12029 const MVT M1VT = RISCVTargetLowering::getM1VT(VT: VecVT);
12030 const MVT XLenVT = Subtarget.getXLenVT();
12031 const bool NonZeroAVL = isNonZeroAVL(AVL: VL);
12032
12033 // The reduction needs an LMUL1 input; do the splat at either LMUL1
12034 // or the original VT if fractional.
12035 auto InnerVT = VecVT.bitsLE(VT: M1VT) ? VecVT : M1VT;
12036 // We reuse the VL of the reduction to reduce vsetvli toggles if we can
12037 // prove it is non-zero. For the AVL=0 case, we need the scalar to
12038 // be the result of the reduction operation.
12039 auto InnerVL = NonZeroAVL ? VL : DAG.getConstant(Val: 1, DL, VT: XLenVT);
12040 SDValue InitialValue =
12041 lowerScalarInsert(Scalar: StartValue, VL: InnerVL, VT: InnerVT, DL, DAG, Subtarget);
12042 if (M1VT != InnerVT)
12043 InitialValue =
12044 DAG.getInsertSubvector(DL, Vec: DAG.getUNDEF(VT: M1VT), SubVec: InitialValue, Idx: 0);
12045 SDValue PassThru = NonZeroAVL ? DAG.getUNDEF(VT: M1VT) : InitialValue;
12046 SDValue Policy = DAG.getTargetConstant(Val: RISCVVType::TAIL_AGNOSTIC, DL, VT: XLenVT);
12047 SDValue Ops[] = {PassThru, Vec, InitialValue, Mask, VL, Policy};
12048 SDValue Reduction = DAG.getNode(Opcode: RVVOpcode, DL, VT: M1VT, Ops);
12049 return DAG.getExtractVectorElt(DL, VT: ResVT, Vec: Reduction, Idx: 0);
12050}
12051
12052SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op,
12053 SelectionDAG &DAG) const {
12054 SDLoc DL(Op);
12055 SDValue Vec = Op.getOperand(i: 0);
12056 EVT VecEVT = Vec.getValueType();
12057
12058 unsigned BaseOpc = ISD::getVecReduceBaseOpcode(VecReduceOpcode: Op.getOpcode());
12059
12060 // Due to ordering in legalize types we may have a vector type that needs to
12061 // be split. Do that manually so we can get down to a legal type.
12062 while (getTypeAction(Context&: *DAG.getContext(), VT: VecEVT) ==
12063 TargetLowering::TypeSplitVector) {
12064 auto [Lo, Hi] = DAG.SplitVector(N: Vec, DL);
12065 VecEVT = Lo.getValueType();
12066 Vec = DAG.getNode(Opcode: BaseOpc, DL, VT: VecEVT, N1: Lo, N2: Hi);
12067 }
12068
12069 // TODO: The type may need to be widened rather than split. Or widened before
12070 // it can be split.
12071 if (!isTypeLegal(VT: VecEVT))
12072 return SDValue();
12073
12074 MVT VecVT = VecEVT.getSimpleVT();
12075 MVT VecEltVT = VecVT.getVectorElementType();
12076 unsigned RVVOpcode = getRVVReductionOp(ISDOpcode: Op.getOpcode());
12077
12078 MVT ContainerVT = VecVT;
12079 if (VecVT.isFixedLengthVector()) {
12080 ContainerVT = getContainerForFixedLengthVector(VT: VecVT);
12081 Vec = convertToScalableVector(VT: ContainerVT, V: Vec, DAG, Subtarget);
12082 }
12083
12084 auto [Mask, VL] = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
12085
12086 SDValue StartV = DAG.getNeutralElement(Opcode: BaseOpc, DL, VT: VecEltVT, Flags: SDNodeFlags());
12087 switch (BaseOpc) {
12088 case ISD::AND:
12089 case ISD::OR:
12090 case ISD::UMAX:
12091 case ISD::UMIN:
12092 case ISD::SMAX:
12093 case ISD::SMIN:
12094 StartV = DAG.getExtractVectorElt(DL, VT: VecEltVT, Vec, Idx: 0);
12095 }
12096 return lowerReductionSeq(RVVOpcode, ResVT: Op.getSimpleValueType(), StartValue: StartV, Vec,
12097 Mask, VL, DL, DAG, Subtarget);
12098}
12099
12100// Given a reduction op, this function returns the matching reduction opcode,
12101// the vector SDValue and the scalar SDValue required to lower this to a
12102// RISCVISD node.
12103static std::tuple<unsigned, SDValue, SDValue>
12104getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT,
12105 const RISCVSubtarget &Subtarget) {
12106 SDLoc DL(Op);
12107 auto Flags = Op->getFlags();
12108 unsigned Opcode = Op.getOpcode();
12109 switch (Opcode) {
12110 default:
12111 llvm_unreachable("Unhandled reduction");
12112 case ISD::VECREDUCE_FADD: {
12113 // Use positive zero if we can. It is cheaper to materialize.
12114 SDValue Zero =
12115 DAG.getConstantFP(Val: Flags.hasNoSignedZeros() ? 0.0 : -0.0, DL, VT: EltVT);
12116 return std::make_tuple(args: RISCVISD::VECREDUCE_FADD_VL, args: Op.getOperand(i: 0), args&: Zero);
12117 }
12118 case ISD::VECREDUCE_SEQ_FADD:
12119 return std::make_tuple(args: RISCVISD::VECREDUCE_SEQ_FADD_VL, args: Op.getOperand(i: 1),
12120 args: Op.getOperand(i: 0));
12121 case ISD::VECREDUCE_FMINIMUM:
12122 case ISD::VECREDUCE_FMAXIMUM:
12123 case ISD::VECREDUCE_FMIN:
12124 case ISD::VECREDUCE_FMAX: {
12125 SDValue Front = DAG.getExtractVectorElt(DL, VT: EltVT, Vec: Op.getOperand(i: 0), Idx: 0);
12126 unsigned RVVOpc =
12127 (Opcode == ISD::VECREDUCE_FMIN || Opcode == ISD::VECREDUCE_FMINIMUM)
12128 ? RISCVISD::VECREDUCE_FMIN_VL
12129 : RISCVISD::VECREDUCE_FMAX_VL;
12130 return std::make_tuple(args&: RVVOpc, args: Op.getOperand(i: 0), args&: Front);
12131 }
12132 }
12133}
12134
12135SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op,
12136 SelectionDAG &DAG) const {
12137 SDLoc DL(Op);
12138 MVT VecEltVT = Op.getSimpleValueType();
12139
12140 unsigned RVVOpcode;
12141 SDValue VectorVal, ScalarVal;
12142 std::tie(args&: RVVOpcode, args&: VectorVal, args&: ScalarVal) =
12143 getRVVFPReductionOpAndOperands(Op, DAG, EltVT: VecEltVT, Subtarget);
12144 MVT VecVT = VectorVal.getSimpleValueType();
12145
12146 MVT ContainerVT = VecVT;
12147 if (VecVT.isFixedLengthVector()) {
12148 ContainerVT = getContainerForFixedLengthVector(VT: VecVT);
12149 VectorVal = convertToScalableVector(VT: ContainerVT, V: VectorVal, DAG, Subtarget);
12150 }
12151
12152 MVT ResVT = Op.getSimpleValueType();
12153 auto [Mask, VL] = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
12154 SDValue Res = lowerReductionSeq(RVVOpcode, ResVT, StartValue: ScalarVal, Vec: VectorVal, Mask,
12155 VL, DL, DAG, Subtarget);
12156 if (Op.getOpcode() != ISD::VECREDUCE_FMINIMUM &&
12157 Op.getOpcode() != ISD::VECREDUCE_FMAXIMUM)
12158 return Res;
12159
12160 if (Op->getFlags().hasNoNaNs())
12161 return Res;
12162
12163 // Force output to NaN if any element is Nan.
12164 SDValue IsNan =
12165 DAG.getNode(Opcode: RISCVISD::SETCC_VL, DL, VT: Mask.getValueType(),
12166 Ops: {VectorVal, VectorVal, DAG.getCondCode(Cond: ISD::SETNE),
12167 DAG.getUNDEF(VT: Mask.getValueType()), Mask, VL});
12168 MVT XLenVT = Subtarget.getXLenVT();
12169 SDValue CPop = DAG.getNode(Opcode: RISCVISD::VCPOP_VL, DL, VT: XLenVT, N1: IsNan, N2: Mask, N3: VL);
12170 SDValue NoNaNs = DAG.getSetCC(DL, VT: XLenVT, LHS: CPop,
12171 RHS: DAG.getConstant(Val: 0, DL, VT: XLenVT), Cond: ISD::SETEQ);
12172 return DAG.getSelect(
12173 DL, VT: ResVT, Cond: NoNaNs, LHS: Res,
12174 RHS: DAG.getConstantFP(Val: APFloat::getNaN(Sem: ResVT.getFltSemantics()), DL, VT: ResVT));
12175}
12176
12177SDValue RISCVTargetLowering::lowerVPREDUCE(SDValue Op,
12178 SelectionDAG &DAG) const {
12179 SDLoc DL(Op);
12180 unsigned Opc = Op.getOpcode();
12181 SDValue Start = Op.getOperand(i: 0);
12182 SDValue Vec = Op.getOperand(i: 1);
12183 EVT VecEVT = Vec.getValueType();
12184 MVT XLenVT = Subtarget.getXLenVT();
12185
12186 // TODO: The type may need to be widened rather than split. Or widened before
12187 // it can be split.
12188 if (!isTypeLegal(VT: VecEVT))
12189 return SDValue();
12190
12191 MVT VecVT = VecEVT.getSimpleVT();
12192 unsigned RVVOpcode = getRVVReductionOp(ISDOpcode: Opc);
12193
12194 if (VecVT.isFixedLengthVector()) {
12195 auto ContainerVT = getContainerForFixedLengthVector(VT: VecVT);
12196 Vec = convertToScalableVector(VT: ContainerVT, V: Vec, DAG, Subtarget);
12197 }
12198
12199 SDValue VL = Op.getOperand(i: 3);
12200 SDValue Mask = Op.getOperand(i: 2);
12201 SDValue Res =
12202 lowerReductionSeq(RVVOpcode, ResVT: Op.getSimpleValueType(), StartValue: Op.getOperand(i: 0),
12203 Vec, Mask, VL, DL, DAG, Subtarget);
12204 if ((Opc != ISD::VP_REDUCE_FMINIMUM && Opc != ISD::VP_REDUCE_FMAXIMUM) ||
12205 Op->getFlags().hasNoNaNs())
12206 return Res;
12207
12208 // Propagate NaNs.
12209 MVT PredVT = getMaskTypeFor(VecVT: Vec.getSimpleValueType());
12210 // Check if any of the elements in Vec is NaN.
12211 SDValue IsNaN = DAG.getNode(
12212 Opcode: RISCVISD::SETCC_VL, DL, VT: PredVT,
12213 Ops: {Vec, Vec, DAG.getCondCode(Cond: ISD::SETNE), DAG.getUNDEF(VT: PredVT), Mask, VL});
12214 SDValue VCPop = DAG.getNode(Opcode: RISCVISD::VCPOP_VL, DL, VT: XLenVT, N1: IsNaN, N2: Mask, N3: VL);
12215 // Check if the start value is NaN.
12216 SDValue StartIsNaN = DAG.getSetCC(DL, VT: XLenVT, LHS: Start, RHS: Start, Cond: ISD::SETUO);
12217 VCPop = DAG.getNode(Opcode: ISD::OR, DL, VT: XLenVT, N1: VCPop, N2: StartIsNaN);
12218 SDValue NoNaNs = DAG.getSetCC(DL, VT: XLenVT, LHS: VCPop,
12219 RHS: DAG.getConstant(Val: 0, DL, VT: XLenVT), Cond: ISD::SETEQ);
12220 MVT ResVT = Res.getSimpleValueType();
12221 return DAG.getSelect(
12222 DL, VT: ResVT, Cond: NoNaNs, LHS: Res,
12223 RHS: DAG.getConstantFP(Val: APFloat::getNaN(Sem: ResVT.getFltSemantics()), DL, VT: ResVT));
12224}
12225
12226SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
12227 SelectionDAG &DAG) const {
12228 SDValue Vec = Op.getOperand(i: 0);
12229 SDValue SubVec = Op.getOperand(i: 1);
12230 MVT VecVT = Vec.getSimpleValueType();
12231 MVT SubVecVT = SubVec.getSimpleValueType();
12232
12233 SDLoc DL(Op);
12234 MVT XLenVT = Subtarget.getXLenVT();
12235 unsigned OrigIdx = Op.getConstantOperandVal(i: 2);
12236 const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
12237
12238 if (OrigIdx == 0 && Vec.isUndef())
12239 return Op;
12240
12241 // We don't have the ability to slide mask vectors up indexed by their i1
12242 // elements; the smallest we can do is i8. Often we are able to bitcast to
12243 // equivalent i8 vectors. Note that when inserting a fixed-length vector
12244 // into a scalable one, we might not necessarily have enough scalable
12245 // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid.
12246 if (SubVecVT.getVectorElementType() == MVT::i1) {
12247 if (VecVT.getVectorMinNumElements() >= 8 &&
12248 SubVecVT.getVectorMinNumElements() >= 8) {
12249 assert(OrigIdx % 8 == 0 && "Invalid index");
12250 assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
12251 SubVecVT.getVectorMinNumElements() % 8 == 0 &&
12252 "Unexpected mask vector lowering");
12253 OrigIdx /= 8;
12254 SubVecVT =
12255 MVT::getVectorVT(VT: MVT::i8, NumElements: SubVecVT.getVectorMinNumElements() / 8,
12256 IsScalable: SubVecVT.isScalableVector());
12257 VecVT = MVT::getVectorVT(VT: MVT::i8, NumElements: VecVT.getVectorMinNumElements() / 8,
12258 IsScalable: VecVT.isScalableVector());
12259 Vec = DAG.getBitcast(VT: VecVT, V: Vec);
12260 SubVec = DAG.getBitcast(VT: SubVecVT, V: SubVec);
12261 } else {
12262 // We can't slide this mask vector up indexed by its i1 elements.
12263 // This poses a problem when we wish to insert a scalable vector which
12264 // can't be re-expressed as a larger type. Just choose the slow path and
12265 // extend to a larger type, then truncate back down.
12266 MVT ExtVecVT = VecVT.changeVectorElementType(EltVT: MVT::i8);
12267 MVT ExtSubVecVT = SubVecVT.changeVectorElementType(EltVT: MVT::i8);
12268 Vec = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL, VT: ExtVecVT, Operand: Vec);
12269 SubVec = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL, VT: ExtSubVecVT, Operand: SubVec);
12270 Vec = DAG.getNode(Opcode: ISD::INSERT_SUBVECTOR, DL, VT: ExtVecVT, N1: Vec, N2: SubVec,
12271 N3: Op.getOperand(i: 2));
12272 SDValue SplatZero = DAG.getConstant(Val: 0, DL, VT: ExtVecVT);
12273 return DAG.getSetCC(DL, VT: VecVT, LHS: Vec, RHS: SplatZero, Cond: ISD::SETNE);
12274 }
12275 }
12276
12277 // If the subvector vector is a fixed-length type and we don't know VLEN
12278 // exactly, we cannot use subregister manipulation to simplify the codegen; we
12279 // don't know which register of a LMUL group contains the specific subvector
12280 // as we only know the minimum register size. Therefore we must slide the
12281 // vector group up the full amount.
12282 const auto VLen = Subtarget.getRealVLen();
12283 if (SubVecVT.isFixedLengthVector() && !VLen) {
12284 MVT ContainerVT = VecVT;
12285 if (VecVT.isFixedLengthVector()) {
12286 ContainerVT = getContainerForFixedLengthVector(VT: VecVT);
12287 Vec = convertToScalableVector(VT: ContainerVT, V: Vec, DAG, Subtarget);
12288 }
12289
12290 SubVec = DAG.getInsertSubvector(DL, Vec: DAG.getUNDEF(VT: ContainerVT), SubVec, Idx: 0);
12291
12292 SDValue Mask =
12293 getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
12294 // Set the vector length to only the number of elements we care about. Note
12295 // that for slideup this includes the offset.
12296 unsigned EndIndex = OrigIdx + SubVecVT.getVectorNumElements();
12297 SDValue VL = DAG.getConstant(Val: EndIndex, DL, VT: XLenVT);
12298
12299 // Use tail agnostic policy if we're inserting over Vec's tail.
12300 unsigned Policy = RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED;
12301 if (VecVT.isFixedLengthVector() && EndIndex == VecVT.getVectorNumElements())
12302 Policy = RISCVVType::TAIL_AGNOSTIC;
12303
12304 // If we're inserting into the lowest elements, use a tail undisturbed
12305 // vmv.v.v.
12306 if (OrigIdx == 0) {
12307 SubVec =
12308 DAG.getNode(Opcode: RISCVISD::VMV_V_V_VL, DL, VT: ContainerVT, N1: Vec, N2: SubVec, N3: VL);
12309 } else {
12310 SDValue SlideupAmt = DAG.getConstant(Val: OrigIdx, DL, VT: XLenVT);
12311 SubVec = getVSlideup(DAG, Subtarget, DL, VT: ContainerVT, Passthru: Vec, Op: SubVec,
12312 Offset: SlideupAmt, Mask, VL, Policy);
12313 }
12314
12315 if (VecVT.isFixedLengthVector())
12316 SubVec = convertFromScalableVector(VT: VecVT, V: SubVec, DAG, Subtarget);
12317 return DAG.getBitcast(VT: Op.getValueType(), V: SubVec);
12318 }
12319
12320 MVT ContainerVecVT = VecVT;
12321 if (VecVT.isFixedLengthVector()) {
12322 ContainerVecVT = getContainerForFixedLengthVector(VT: VecVT);
12323 Vec = convertToScalableVector(VT: ContainerVecVT, V: Vec, DAG, Subtarget);
12324 }
12325
12326 MVT ContainerSubVecVT = SubVecVT;
12327 if (SubVecVT.isFixedLengthVector()) {
12328 ContainerSubVecVT = getContainerForFixedLengthVector(VT: SubVecVT);
12329 SubVec = convertToScalableVector(VT: ContainerSubVecVT, V: SubVec, DAG, Subtarget);
12330 }
12331
12332 unsigned SubRegIdx;
12333 ElementCount RemIdx;
12334 // insert_subvector scales the index by vscale if the subvector is scalable,
12335 // and decomposeSubvectorInsertExtractToSubRegs takes this into account. So if
12336 // we have a fixed length subvector, we need to adjust the index by 1/vscale.
12337 if (SubVecVT.isFixedLengthVector()) {
12338 assert(VLen);
12339 unsigned Vscale = *VLen / RISCV::RVVBitsPerBlock;
12340 auto Decompose =
12341 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
12342 VecVT: ContainerVecVT, SubVecVT: ContainerSubVecVT, InsertExtractIdx: OrigIdx / Vscale, TRI);
12343 SubRegIdx = Decompose.first;
12344 RemIdx = ElementCount::getFixed(MinVal: (Decompose.second * Vscale) +
12345 (OrigIdx % Vscale));
12346 } else {
12347 auto Decompose =
12348 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
12349 VecVT: ContainerVecVT, SubVecVT: ContainerSubVecVT, InsertExtractIdx: OrigIdx, TRI);
12350 SubRegIdx = Decompose.first;
12351 RemIdx = ElementCount::getScalable(MinVal: Decompose.second);
12352 }
12353
12354 TypeSize VecRegSize = TypeSize::getScalable(MinimumSize: RISCV::RVVBitsPerBlock);
12355 assert(isPowerOf2_64(
12356 Subtarget.expandVScale(SubVecVT.getSizeInBits()).getKnownMinValue()));
12357 bool ExactlyVecRegSized =
12358 Subtarget.expandVScale(X: SubVecVT.getSizeInBits())
12359 .isKnownMultipleOf(RHS: Subtarget.expandVScale(X: VecRegSize));
12360
12361 // 1. If the Idx has been completely eliminated and this subvector's size is
12362 // a vector register or a multiple thereof, or the surrounding elements are
12363 // undef, then this is a subvector insert which naturally aligns to a vector
12364 // register. These can easily be handled using subregister manipulation.
12365 // 2. If the subvector isn't an exact multiple of a valid register group size,
12366 // then the insertion must preserve the undisturbed elements of the register.
12367 // We do this by lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1
12368 // vector type (which resolves to a subregister copy), performing a VSLIDEUP
12369 // to place the subvector within the vector register, and an INSERT_SUBVECTOR
12370 // of that LMUL=1 type back into the larger vector (resolving to another
12371 // subregister operation). See below for how our VSLIDEUP works. We go via a
12372 // LMUL=1 type to avoid allocating a large register group to hold our
12373 // subvector.
12374 if (RemIdx.isZero() && (ExactlyVecRegSized || Vec.isUndef())) {
12375 if (SubVecVT.isFixedLengthVector()) {
12376 // We may get NoSubRegister if inserting at index 0 and the subvec
12377 // container is the same as the vector, e.g. vec=v4i32,subvec=v4i32,idx=0
12378 if (SubRegIdx == RISCV::NoSubRegister) {
12379 assert(OrigIdx == 0);
12380 return Op;
12381 }
12382
12383 // Use a insert_subvector that will resolve to an insert subreg.
12384 assert(VLen);
12385 unsigned Vscale = *VLen / RISCV::RVVBitsPerBlock;
12386 SDValue Insert =
12387 DAG.getInsertSubvector(DL, Vec, SubVec, Idx: OrigIdx / Vscale);
12388 if (VecVT.isFixedLengthVector())
12389 Insert = convertFromScalableVector(VT: VecVT, V: Insert, DAG, Subtarget);
12390 return Insert;
12391 }
12392 return Op;
12393 }
12394
12395 // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements
12396 // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy
12397 // (in our case undisturbed). This means we can set up a subvector insertion
12398 // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
12399 // size of the subvector.
12400 MVT InterSubVT = ContainerVecVT;
12401 SDValue AlignedExtract = Vec;
12402 unsigned AlignedIdx = OrigIdx - RemIdx.getKnownMinValue();
12403 if (SubVecVT.isFixedLengthVector()) {
12404 assert(VLen);
12405 AlignedIdx /= *VLen / RISCV::RVVBitsPerBlock;
12406 }
12407 if (ContainerVecVT.bitsGT(VT: RISCVTargetLowering::getM1VT(VT: ContainerVecVT))) {
12408 InterSubVT = RISCVTargetLowering::getM1VT(VT: ContainerVecVT);
12409 // Extract a subvector equal to the nearest full vector register type. This
12410 // should resolve to a EXTRACT_SUBREG instruction.
12411 AlignedExtract = DAG.getExtractSubvector(DL, VT: InterSubVT, Vec, Idx: AlignedIdx);
12412 }
12413
12414 SubVec = DAG.getInsertSubvector(DL, Vec: DAG.getUNDEF(VT: InterSubVT), SubVec, Idx: 0);
12415
12416 auto [Mask, VL] = getDefaultVLOps(VecVT, ContainerVT: ContainerVecVT, DL, DAG, Subtarget);
12417
12418 ElementCount EndIndex = RemIdx + SubVecVT.getVectorElementCount();
12419 VL = DAG.getElementCount(DL, VT: XLenVT, EC: SubVecVT.getVectorElementCount());
12420
12421 // Use tail agnostic policy if we're inserting over InterSubVT's tail.
12422 unsigned Policy = RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED;
12423 if (Subtarget.expandVScale(X: EndIndex) ==
12424 Subtarget.expandVScale(X: InterSubVT.getVectorElementCount()))
12425 Policy = RISCVVType::TAIL_AGNOSTIC;
12426
12427 // If we're inserting into the lowest elements, use a tail undisturbed
12428 // vmv.v.v.
12429 if (RemIdx.isZero()) {
12430 SubVec = DAG.getNode(Opcode: RISCVISD::VMV_V_V_VL, DL, VT: InterSubVT, N1: AlignedExtract,
12431 N2: SubVec, N3: VL);
12432 } else {
12433 SDValue SlideupAmt = DAG.getElementCount(DL, VT: XLenVT, EC: RemIdx);
12434
12435 // Construct the vector length corresponding to RemIdx + length(SubVecVT).
12436 VL = DAG.getNode(Opcode: ISD::ADD, DL, VT: XLenVT, N1: SlideupAmt, N2: VL);
12437
12438 SubVec = getVSlideup(DAG, Subtarget, DL, VT: InterSubVT, Passthru: AlignedExtract, Op: SubVec,
12439 Offset: SlideupAmt, Mask, VL, Policy);
12440 }
12441
12442 // If required, insert this subvector back into the correct vector register.
12443 // This should resolve to an INSERT_SUBREG instruction.
12444 if (ContainerVecVT.bitsGT(VT: InterSubVT))
12445 SubVec = DAG.getInsertSubvector(DL, Vec, SubVec, Idx: AlignedIdx);
12446
12447 if (VecVT.isFixedLengthVector())
12448 SubVec = convertFromScalableVector(VT: VecVT, V: SubVec, DAG, Subtarget);
12449
12450 // We might have bitcast from a mask type: cast back to the original type if
12451 // required.
12452 return DAG.getBitcast(VT: Op.getSimpleValueType(), V: SubVec);
12453}
12454
12455SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
12456 SelectionDAG &DAG) const {
12457 SDValue Vec = Op.getOperand(i: 0);
12458 MVT SubVecVT = Op.getSimpleValueType();
12459 MVT VecVT = Vec.getSimpleValueType();
12460
12461 SDLoc DL(Op);
12462 MVT XLenVT = Subtarget.getXLenVT();
12463 unsigned OrigIdx = Op.getConstantOperandVal(i: 1);
12464 const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
12465
12466 // With an index of 0 this is a cast-like subvector, which can be performed
12467 // with subregister operations.
12468 if (OrigIdx == 0)
12469 return Op;
12470
12471 // We don't have the ability to slide mask vectors down indexed by their i1
12472 // elements; the smallest we can do is i8. Often we are able to bitcast to
12473 // equivalent i8 vectors. Note that when extracting a fixed-length vector
12474 // from a scalable one, we might not necessarily have enough scalable
12475 // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid.
12476 if (SubVecVT.getVectorElementType() == MVT::i1) {
12477 if (VecVT.getVectorMinNumElements() >= 8 &&
12478 SubVecVT.getVectorMinNumElements() >= 8) {
12479 assert(OrigIdx % 8 == 0 && "Invalid index");
12480 assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
12481 SubVecVT.getVectorMinNumElements() % 8 == 0 &&
12482 "Unexpected mask vector lowering");
12483 OrigIdx /= 8;
12484 SubVecVT =
12485 MVT::getVectorVT(VT: MVT::i8, NumElements: SubVecVT.getVectorMinNumElements() / 8,
12486 IsScalable: SubVecVT.isScalableVector());
12487 VecVT = MVT::getVectorVT(VT: MVT::i8, NumElements: VecVT.getVectorMinNumElements() / 8,
12488 IsScalable: VecVT.isScalableVector());
12489 Vec = DAG.getBitcast(VT: VecVT, V: Vec);
12490 } else {
12491 // We can't slide this mask vector down, indexed by its i1 elements.
12492 // This poses a problem when we wish to extract a scalable vector which
12493 // can't be re-expressed as a larger type. Just choose the slow path and
12494 // extend to a larger type, then truncate back down.
12495 // TODO: We could probably improve this when extracting certain fixed
12496 // from fixed, where we can extract as i8 and shift the correct element
12497 // right to reach the desired subvector?
12498 MVT ExtVecVT = VecVT.changeVectorElementType(EltVT: MVT::i8);
12499 MVT ExtSubVecVT = SubVecVT.changeVectorElementType(EltVT: MVT::i8);
12500 Vec = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL, VT: ExtVecVT, Operand: Vec);
12501 Vec = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL, VT: ExtSubVecVT, N1: Vec,
12502 N2: Op.getOperand(i: 1));
12503 SDValue SplatZero = DAG.getConstant(Val: 0, DL, VT: ExtSubVecVT);
12504 return DAG.getSetCC(DL, VT: SubVecVT, LHS: Vec, RHS: SplatZero, Cond: ISD::SETNE);
12505 }
12506 }
12507
12508 const auto VLen = Subtarget.getRealVLen();
12509
12510 // If the subvector vector is a fixed-length type and we don't know VLEN
12511 // exactly, we cannot use subregister manipulation to simplify the codegen; we
12512 // don't know which register of a LMUL group contains the specific subvector
12513 // as we only know the minimum register size. Therefore we must slide the
12514 // vector group down the full amount.
12515 if (SubVecVT.isFixedLengthVector() && !VLen) {
12516 MVT ContainerVT = VecVT;
12517 if (VecVT.isFixedLengthVector()) {
12518 ContainerVT = getContainerForFixedLengthVector(VT: VecVT);
12519 Vec = convertToScalableVector(VT: ContainerVT, V: Vec, DAG, Subtarget);
12520 }
12521
12522 // Shrink down Vec so we're performing the slidedown on a smaller LMUL.
12523 unsigned LastIdx = OrigIdx + SubVecVT.getVectorNumElements() - 1;
12524 if (auto ShrunkVT =
12525 getSmallestVTForIndex(VecVT: ContainerVT, MaxIdx: LastIdx, DL, DAG, Subtarget)) {
12526 ContainerVT = *ShrunkVT;
12527 Vec = DAG.getExtractSubvector(DL, VT: ContainerVT, Vec, Idx: 0);
12528 }
12529
12530 SDValue Mask =
12531 getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
12532 // Set the vector length to only the number of elements we care about. This
12533 // avoids sliding down elements we're going to discard straight away.
12534 SDValue VL = DAG.getConstant(Val: SubVecVT.getVectorNumElements(), DL, VT: XLenVT);
12535 SDValue SlidedownAmt = DAG.getConstant(Val: OrigIdx, DL, VT: XLenVT);
12536 SDValue Slidedown =
12537 getVSlidedown(DAG, Subtarget, DL, VT: ContainerVT,
12538 Passthru: DAG.getUNDEF(VT: ContainerVT), Op: Vec, Offset: SlidedownAmt, Mask, VL);
12539 // Now we can use a cast-like subvector extract to get the result.
12540 Slidedown = DAG.getExtractSubvector(DL, VT: SubVecVT, Vec: Slidedown, Idx: 0);
12541 return DAG.getBitcast(VT: Op.getValueType(), V: Slidedown);
12542 }
12543
12544 if (VecVT.isFixedLengthVector()) {
12545 VecVT = getContainerForFixedLengthVector(VT: VecVT);
12546 Vec = convertToScalableVector(VT: VecVT, V: Vec, DAG, Subtarget);
12547 }
12548
12549 MVT ContainerSubVecVT = SubVecVT;
12550 if (SubVecVT.isFixedLengthVector())
12551 ContainerSubVecVT = getContainerForFixedLengthVector(VT: SubVecVT);
12552
12553 unsigned SubRegIdx;
12554 ElementCount RemIdx;
12555 // extract_subvector scales the index by vscale if the subvector is scalable,
12556 // and decomposeSubvectorInsertExtractToSubRegs takes this into account. So if
12557 // we have a fixed length subvector, we need to adjust the index by 1/vscale.
12558 if (SubVecVT.isFixedLengthVector()) {
12559 assert(VLen);
12560 unsigned Vscale = *VLen / RISCV::RVVBitsPerBlock;
12561 auto Decompose =
12562 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
12563 VecVT, SubVecVT: ContainerSubVecVT, InsertExtractIdx: OrigIdx / Vscale, TRI);
12564 SubRegIdx = Decompose.first;
12565 RemIdx = ElementCount::getFixed(MinVal: (Decompose.second * Vscale) +
12566 (OrigIdx % Vscale));
12567 } else {
12568 auto Decompose =
12569 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
12570 VecVT, SubVecVT: ContainerSubVecVT, InsertExtractIdx: OrigIdx, TRI);
12571 SubRegIdx = Decompose.first;
12572 RemIdx = ElementCount::getScalable(MinVal: Decompose.second);
12573 }
12574
12575 // If the Idx has been completely eliminated then this is a subvector extract
12576 // which naturally aligns to a vector register. These can easily be handled
12577 // using subregister manipulation. We use an extract_subvector that will
12578 // resolve to an extract subreg.
12579 if (RemIdx.isZero()) {
12580 if (SubVecVT.isFixedLengthVector()) {
12581 assert(VLen);
12582 unsigned Vscale = *VLen / RISCV::RVVBitsPerBlock;
12583 Vec =
12584 DAG.getExtractSubvector(DL, VT: ContainerSubVecVT, Vec, Idx: OrigIdx / Vscale);
12585 return convertFromScalableVector(VT: SubVecVT, V: Vec, DAG, Subtarget);
12586 }
12587 return Op;
12588 }
12589
12590 // Else SubVecVT is M1 or smaller and may need to be slid down: if SubVecVT
12591 // was > M1 then the index would need to be a multiple of VLMAX, and so would
12592 // divide exactly.
12593 assert(RISCVVType::decodeVLMUL(getLMUL(ContainerSubVecVT)).second ||
12594 getLMUL(ContainerSubVecVT) == RISCVVType::LMUL_1);
12595
12596 // If the vector type is an LMUL-group type, extract a subvector equal to the
12597 // nearest full vector register type.
12598 MVT InterSubVT = VecVT;
12599 if (VecVT.bitsGT(VT: RISCVTargetLowering::getM1VT(VT: VecVT))) {
12600 // If VecVT has an LMUL > 1, then SubVecVT should have a smaller LMUL, and
12601 // we should have successfully decomposed the extract into a subregister.
12602 // We use an extract_subvector that will resolve to a subreg extract.
12603 assert(SubRegIdx != RISCV::NoSubRegister);
12604 (void)SubRegIdx;
12605 unsigned Idx = OrigIdx - RemIdx.getKnownMinValue();
12606 if (SubVecVT.isFixedLengthVector()) {
12607 assert(VLen);
12608 Idx /= *VLen / RISCV::RVVBitsPerBlock;
12609 }
12610 InterSubVT = RISCVTargetLowering::getM1VT(VT: VecVT);
12611 Vec = DAG.getExtractSubvector(DL, VT: InterSubVT, Vec, Idx);
12612 }
12613
12614 // Slide this vector register down by the desired number of elements in order
12615 // to place the desired subvector starting at element 0.
12616 SDValue SlidedownAmt = DAG.getElementCount(DL, VT: XLenVT, EC: RemIdx);
12617 auto [Mask, VL] = getDefaultScalableVLOps(VecVT: InterSubVT, DL, DAG, Subtarget);
12618 if (SubVecVT.isFixedLengthVector())
12619 VL = DAG.getConstant(Val: SubVecVT.getVectorNumElements(), DL, VT: XLenVT);
12620 SDValue Slidedown =
12621 getVSlidedown(DAG, Subtarget, DL, VT: InterSubVT, Passthru: DAG.getUNDEF(VT: InterSubVT),
12622 Op: Vec, Offset: SlidedownAmt, Mask, VL);
12623
12624 // Now the vector is in the right position, extract our final subvector. This
12625 // should resolve to a COPY.
12626 Slidedown = DAG.getExtractSubvector(DL, VT: SubVecVT, Vec: Slidedown, Idx: 0);
12627
12628 // We might have bitcast from a mask type: cast back to the original type if
12629 // required.
12630 return DAG.getBitcast(VT: Op.getSimpleValueType(), V: Slidedown);
12631}
12632
12633// Widen a vector's operands to i8, then truncate its results back to the
12634// original type, typically i1. All operand and result types must be the same.
12635static SDValue widenVectorOpsToi8(SDValue N, const SDLoc &DL,
12636 SelectionDAG &DAG) {
12637 MVT VT = N.getSimpleValueType();
12638 MVT WideVT = VT.changeVectorElementType(EltVT: MVT::i8);
12639 SmallVector<SDValue, 4> WideOps;
12640 for (SDValue Op : N->ops()) {
12641 assert(Op.getSimpleValueType() == VT &&
12642 "Operands and result must be same type");
12643 WideOps.push_back(Elt: DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL, VT: WideVT, Operand: Op));
12644 }
12645
12646 unsigned NumVals = N->getNumValues();
12647
12648 SDVTList VTs = DAG.getVTList(VTs: SmallVector<EVT, 4>(
12649 NumVals,
12650 N.getValueType().changeVectorElementType(Context&: *DAG.getContext(), EltVT: MVT::i8)));
12651 SDValue WideN = DAG.getNode(Opcode: N.getOpcode(), DL, VTList: VTs, Ops: WideOps);
12652 SmallVector<SDValue, 4> TruncVals;
12653 for (unsigned I = 0; I < NumVals; I++) {
12654 TruncVals.push_back(
12655 Elt: DAG.getSetCC(DL, VT: N->getSimpleValueType(ResNo: I), LHS: WideN.getValue(R: I),
12656 RHS: DAG.getConstant(Val: 0, DL, VT: WideVT), Cond: ISD::SETNE));
12657 }
12658
12659 if (TruncVals.size() > 1)
12660 return DAG.getMergeValues(Ops: TruncVals, dl: DL);
12661 return TruncVals.front();
12662}
12663
12664SDValue RISCVTargetLowering::lowerVECTOR_DEINTERLEAVE(SDValue Op,
12665 SelectionDAG &DAG) const {
12666 SDLoc DL(Op);
12667 MVT VecVT = Op.getSimpleValueType();
12668
12669 const unsigned Factor = Op->getNumValues();
12670 assert(Factor <= 8);
12671
12672 // 1 bit element vectors need to be widened to e8
12673 if (VecVT.getVectorElementType() == MVT::i1)
12674 return widenVectorOpsToi8(N: Op, DL, DAG);
12675
12676 // Convert to scalable vectors first.
12677 if (VecVT.isFixedLengthVector()) {
12678 MVT ContainerVT = getContainerForFixedLengthVector(VT: VecVT);
12679 SmallVector<SDValue, 8> Ops(Factor);
12680 for (unsigned i = 0U; i < Factor; ++i)
12681 Ops[i] = convertToScalableVector(VT: ContainerVT, V: Op.getOperand(i), DAG,
12682 Subtarget);
12683
12684 SmallVector<EVT, 8> VTs(Factor, ContainerVT);
12685 SDValue NewDeinterleave =
12686 DAG.getNode(Opcode: ISD::VECTOR_DEINTERLEAVE, DL, ResultTys: VTs, Ops);
12687
12688 SmallVector<SDValue, 8> Res(Factor);
12689 for (unsigned i = 0U; i < Factor; ++i)
12690 Res[i] = convertFromScalableVector(VT: VecVT, V: NewDeinterleave.getValue(R: i),
12691 DAG, Subtarget);
12692 return DAG.getMergeValues(Ops: Res, dl: DL);
12693 }
12694
12695 // If concatenating would exceed LMUL=8, we need to split.
12696 if ((VecVT.getSizeInBits().getKnownMinValue() * Factor) >
12697 (8 * RISCV::RVVBitsPerBlock)) {
12698 SmallVector<SDValue, 8> Ops(Factor * 2);
12699 for (unsigned i = 0; i != Factor; ++i) {
12700 auto [OpLo, OpHi] = DAG.SplitVectorOperand(N: Op.getNode(), OpNo: i);
12701 Ops[i * 2] = OpLo;
12702 Ops[i * 2 + 1] = OpHi;
12703 }
12704
12705 SmallVector<EVT, 8> VTs(Factor, Ops[0].getValueType());
12706
12707 SDValue Lo = DAG.getNode(Opcode: ISD::VECTOR_DEINTERLEAVE, DL, ResultTys: VTs,
12708 Ops: ArrayRef(Ops).slice(N: 0, M: Factor));
12709 SDValue Hi = DAG.getNode(Opcode: ISD::VECTOR_DEINTERLEAVE, DL, ResultTys: VTs,
12710 Ops: ArrayRef(Ops).slice(N: Factor, M: Factor));
12711
12712 SmallVector<SDValue, 8> Res(Factor);
12713 for (unsigned i = 0; i != Factor; ++i)
12714 Res[i] = DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: VecVT, N1: Lo.getValue(R: i),
12715 N2: Hi.getValue(R: i));
12716
12717 return DAG.getMergeValues(Ops: Res, dl: DL);
12718 }
12719
12720 if (Subtarget.hasVendorXRivosVizip() && Factor == 2) {
12721 MVT VT = Op->getSimpleValueType(ResNo: 0);
12722 SDValue V1 = Op->getOperand(Num: 0);
12723 SDValue V2 = Op->getOperand(Num: 1);
12724
12725 // For fractional LMUL, check if we can use a higher LMUL
12726 // instruction to avoid a vslidedown.
12727 if (SDValue Src = foldConcatVector(V1, V2);
12728 Src && RISCVTargetLowering::getM1VT(VT).bitsGT(VT)) {
12729 EVT NewVT = VT.getDoubleNumVectorElementsVT();
12730 Src = DAG.getExtractSubvector(DL, VT: NewVT, Vec: Src, Idx: 0);
12731 // Freeze the source so we can increase its use count.
12732 Src = DAG.getFreeze(V: Src);
12733 SDValue Even = lowerVZIP(Opc: RISCVISD::RI_VUNZIP2A_VL, Op0: Src,
12734 Op1: DAG.getUNDEF(VT: NewVT), DL, DAG, Subtarget);
12735 SDValue Odd = lowerVZIP(Opc: RISCVISD::RI_VUNZIP2B_VL, Op0: Src,
12736 Op1: DAG.getUNDEF(VT: NewVT), DL, DAG, Subtarget);
12737 Even = DAG.getExtractSubvector(DL, VT, Vec: Even, Idx: 0);
12738 Odd = DAG.getExtractSubvector(DL, VT, Vec: Odd, Idx: 0);
12739 return DAG.getMergeValues(Ops: {Even, Odd}, dl: DL);
12740 }
12741
12742 // Freeze the sources so we can increase their use count.
12743 V1 = DAG.getFreeze(V: V1);
12744 V2 = DAG.getFreeze(V: V2);
12745 SDValue Even =
12746 lowerVZIP(Opc: RISCVISD::RI_VUNZIP2A_VL, Op0: V1, Op1: V2, DL, DAG, Subtarget);
12747 SDValue Odd =
12748 lowerVZIP(Opc: RISCVISD::RI_VUNZIP2B_VL, Op0: V1, Op1: V2, DL, DAG, Subtarget);
12749 return DAG.getMergeValues(Ops: {Even, Odd}, dl: DL);
12750 }
12751
12752 SmallVector<SDValue, 8> Ops(Op->op_values());
12753
12754 // Concatenate the vectors as one vector to deinterleave
12755 MVT ConcatVT =
12756 MVT::getVectorVT(VT: VecVT.getVectorElementType(),
12757 EC: VecVT.getVectorElementCount().multiplyCoefficientBy(
12758 RHS: PowerOf2Ceil(A: Factor)));
12759 if (Ops.size() < PowerOf2Ceil(A: Factor))
12760 Ops.append(NumInputs: PowerOf2Ceil(A: Factor) - Factor, Elt: DAG.getUNDEF(VT: VecVT));
12761 SDValue Concat = DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: ConcatVT, Ops);
12762
12763 if (Factor == 2) {
12764 // We can deinterleave through vnsrl.wi if the element type is smaller than
12765 // ELEN
12766 if (VecVT.getScalarSizeInBits() < Subtarget.getELen()) {
12767 SDValue Even = getDeinterleaveShiftAndTrunc(DL, VT: VecVT, Src: Concat, Factor: 2, Index: 0, DAG);
12768 SDValue Odd = getDeinterleaveShiftAndTrunc(DL, VT: VecVT, Src: Concat, Factor: 2, Index: 1, DAG);
12769 return DAG.getMergeValues(Ops: {Even, Odd}, dl: DL);
12770 }
12771
12772 // For the indices, use the vmv.v.x of an i8 constant to fill the largest
12773 // possibly mask vector, then extract the required subvector. Doing this
12774 // (instead of a vid, vmsne sequence) reduces LMUL, and allows the mask
12775 // creation to be rematerialized during register allocation to reduce
12776 // register pressure if needed.
12777
12778 MVT MaskVT = ConcatVT.changeVectorElementType(EltVT: MVT::i1);
12779
12780 SDValue EvenSplat = DAG.getConstant(Val: 0b01010101, DL, VT: MVT::nxv8i8);
12781 EvenSplat = DAG.getBitcast(VT: MVT::nxv64i1, V: EvenSplat);
12782 SDValue EvenMask = DAG.getExtractSubvector(DL, VT: MaskVT, Vec: EvenSplat, Idx: 0);
12783
12784 SDValue OddSplat = DAG.getConstant(Val: 0b10101010, DL, VT: MVT::nxv8i8);
12785 OddSplat = DAG.getBitcast(VT: MVT::nxv64i1, V: OddSplat);
12786 SDValue OddMask = DAG.getExtractSubvector(DL, VT: MaskVT, Vec: OddSplat, Idx: 0);
12787
12788 // vcompress the even and odd elements into two separate vectors
12789 SDValue EvenWide = DAG.getNode(Opcode: ISD::VECTOR_COMPRESS, DL, VT: ConcatVT, N1: Concat,
12790 N2: EvenMask, N3: DAG.getUNDEF(VT: ConcatVT));
12791 SDValue OddWide = DAG.getNode(Opcode: ISD::VECTOR_COMPRESS, DL, VT: ConcatVT, N1: Concat,
12792 N2: OddMask, N3: DAG.getUNDEF(VT: ConcatVT));
12793
12794 // Extract the result half of the gather for even and odd
12795 SDValue Even = DAG.getExtractSubvector(DL, VT: VecVT, Vec: EvenWide, Idx: 0);
12796 SDValue Odd = DAG.getExtractSubvector(DL, VT: VecVT, Vec: OddWide, Idx: 0);
12797
12798 return DAG.getMergeValues(Ops: {Even, Odd}, dl: DL);
12799 }
12800
12801 // Store with unit-stride store and load it back with segmented load.
12802 MVT XLenVT = Subtarget.getXLenVT();
12803 auto [Mask, VL] = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
12804 SDValue Passthru = DAG.getUNDEF(VT: ConcatVT);
12805
12806 // Allocate a stack slot.
12807 Align Alignment = DAG.getReducedAlign(VT: VecVT, /*UseABI=*/false);
12808 SDValue StackPtr =
12809 DAG.CreateStackTemporary(Bytes: ConcatVT.getStoreSize(), Alignment);
12810 auto &MF = DAG.getMachineFunction();
12811 auto FrameIndex = cast<FrameIndexSDNode>(Val: StackPtr.getNode())->getIndex();
12812 auto PtrInfo = MachinePointerInfo::getFixedStack(MF, FI: FrameIndex);
12813
12814 SDValue StoreOps[] = {DAG.getEntryNode(),
12815 DAG.getTargetConstant(Val: Intrinsic::riscv_vse, DL, VT: XLenVT),
12816 Concat, StackPtr, VL};
12817
12818 SDValue Chain = DAG.getMemIntrinsicNode(
12819 Opcode: ISD::INTRINSIC_VOID, dl: DL, VTList: DAG.getVTList(VT: MVT::Other), Ops: StoreOps,
12820 MemVT: ConcatVT.getVectorElementType(), PtrInfo, Alignment,
12821 Flags: MachineMemOperand::MOStore, Size: LocationSize::beforeOrAfterPointer());
12822
12823 static const Intrinsic::ID VlsegIntrinsicsIds[] = {
12824 Intrinsic::riscv_vlseg2_mask, Intrinsic::riscv_vlseg3_mask,
12825 Intrinsic::riscv_vlseg4_mask, Intrinsic::riscv_vlseg5_mask,
12826 Intrinsic::riscv_vlseg6_mask, Intrinsic::riscv_vlseg7_mask,
12827 Intrinsic::riscv_vlseg8_mask};
12828
12829 SDValue LoadOps[] = {
12830 Chain,
12831 DAG.getTargetConstant(Val: VlsegIntrinsicsIds[Factor - 2], DL, VT: XLenVT),
12832 Passthru,
12833 StackPtr,
12834 Mask,
12835 VL,
12836 DAG.getTargetConstant(
12837 Val: RISCVVType::TAIL_AGNOSTIC | RISCVVType::MASK_AGNOSTIC, DL, VT: XLenVT),
12838 DAG.getTargetConstant(Val: Log2_64(Value: VecVT.getScalarSizeInBits()), DL, VT: XLenVT)};
12839
12840 unsigned Sz =
12841 Factor * VecVT.getVectorMinNumElements() * VecVT.getScalarSizeInBits();
12842 EVT VecTupTy = MVT::getRISCVVectorTupleVT(Sz, NFields: Factor);
12843
12844 SDValue Load = DAG.getMemIntrinsicNode(
12845 Opcode: ISD::INTRINSIC_W_CHAIN, dl: DL, VTList: DAG.getVTList(VTs: {VecTupTy, MVT::Other}),
12846 Ops: LoadOps, MemVT: ConcatVT.getVectorElementType(), PtrInfo, Alignment,
12847 Flags: MachineMemOperand::MOLoad, Size: LocationSize::beforeOrAfterPointer());
12848
12849 SmallVector<SDValue, 8> Res(Factor);
12850
12851 for (unsigned i = 0U; i < Factor; ++i)
12852 Res[i] = DAG.getNode(Opcode: RISCVISD::TUPLE_EXTRACT, DL, VT: VecVT, N1: Load,
12853 N2: DAG.getTargetConstant(Val: i, DL, VT: MVT::i32));
12854
12855 return DAG.getMergeValues(Ops: Res, dl: DL);
12856}
12857
12858SDValue RISCVTargetLowering::lowerVECTOR_INTERLEAVE(SDValue Op,
12859 SelectionDAG &DAG) const {
12860 SDLoc DL(Op);
12861 MVT VecVT = Op.getSimpleValueType();
12862
12863 const unsigned Factor = Op.getNumOperands();
12864 assert(Factor <= 8);
12865
12866 // i1 vectors need to be widened to i8
12867 if (VecVT.getVectorElementType() == MVT::i1)
12868 return widenVectorOpsToi8(N: Op, DL, DAG);
12869
12870 // Convert to scalable vectors first.
12871 if (VecVT.isFixedLengthVector()) {
12872 MVT ContainerVT = getContainerForFixedLengthVector(VT: VecVT);
12873 SmallVector<SDValue, 8> Ops(Factor);
12874 for (unsigned i = 0U; i < Factor; ++i)
12875 Ops[i] = convertToScalableVector(VT: ContainerVT, V: Op.getOperand(i), DAG,
12876 Subtarget);
12877
12878 SmallVector<EVT, 8> VTs(Factor, ContainerVT);
12879 SDValue NewInterleave = DAG.getNode(Opcode: ISD::VECTOR_INTERLEAVE, DL, ResultTys: VTs, Ops);
12880
12881 SmallVector<SDValue, 8> Res(Factor);
12882 for (unsigned i = 0U; i < Factor; ++i)
12883 Res[i] = convertFromScalableVector(VT: VecVT, V: NewInterleave.getValue(R: i), DAG,
12884 Subtarget);
12885 return DAG.getMergeValues(Ops: Res, dl: DL);
12886 }
12887
12888 MVT XLenVT = Subtarget.getXLenVT();
12889 auto [Mask, VL] = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
12890
12891 // If the VT is larger than LMUL=8, we need to split and reassemble.
12892 if ((VecVT.getSizeInBits().getKnownMinValue() * Factor) >
12893 (8 * RISCV::RVVBitsPerBlock)) {
12894 SmallVector<SDValue, 8> Ops(Factor * 2);
12895 for (unsigned i = 0; i != Factor; ++i) {
12896 auto [OpLo, OpHi] = DAG.SplitVectorOperand(N: Op.getNode(), OpNo: i);
12897 Ops[i] = OpLo;
12898 Ops[i + Factor] = OpHi;
12899 }
12900
12901 SmallVector<EVT, 8> VTs(Factor, Ops[0].getValueType());
12902
12903 SDValue Res[] = {DAG.getNode(Opcode: ISD::VECTOR_INTERLEAVE, DL, ResultTys: VTs,
12904 Ops: ArrayRef(Ops).take_front(N: Factor)),
12905 DAG.getNode(Opcode: ISD::VECTOR_INTERLEAVE, DL, ResultTys: VTs,
12906 Ops: ArrayRef(Ops).drop_front(N: Factor))};
12907
12908 SmallVector<SDValue, 8> Concats(Factor);
12909 for (unsigned i = 0; i != Factor; ++i) {
12910 unsigned IdxLo = 2 * i;
12911 unsigned IdxHi = 2 * i + 1;
12912 Concats[i] = DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: VecVT,
12913 N1: Res[IdxLo / Factor].getValue(R: IdxLo % Factor),
12914 N2: Res[IdxHi / Factor].getValue(R: IdxHi % Factor));
12915 }
12916
12917 return DAG.getMergeValues(Ops: Concats, dl: DL);
12918 }
12919
12920 SDValue Interleaved;
12921
12922 // Spill to the stack using a segment store for simplicity.
12923 if (Factor != 2) {
12924 EVT MemVT =
12925 EVT::getVectorVT(Context&: *DAG.getContext(), VT: VecVT.getVectorElementType(),
12926 EC: VecVT.getVectorElementCount() * Factor);
12927
12928 // Allocate a stack slot.
12929 Align Alignment = DAG.getReducedAlign(VT: VecVT, /*UseABI=*/false);
12930 SDValue StackPtr =
12931 DAG.CreateStackTemporary(Bytes: MemVT.getStoreSize(), Alignment);
12932 EVT PtrVT = StackPtr.getValueType();
12933 auto &MF = DAG.getMachineFunction();
12934 auto FrameIndex = cast<FrameIndexSDNode>(Val: StackPtr.getNode())->getIndex();
12935 auto PtrInfo = MachinePointerInfo::getFixedStack(MF, FI: FrameIndex);
12936
12937 static const Intrinsic::ID IntrIds[] = {
12938 Intrinsic::riscv_vsseg2_mask, Intrinsic::riscv_vsseg3_mask,
12939 Intrinsic::riscv_vsseg4_mask, Intrinsic::riscv_vsseg5_mask,
12940 Intrinsic::riscv_vsseg6_mask, Intrinsic::riscv_vsseg7_mask,
12941 Intrinsic::riscv_vsseg8_mask,
12942 };
12943
12944 unsigned Sz =
12945 Factor * VecVT.getVectorMinNumElements() * VecVT.getScalarSizeInBits();
12946 EVT VecTupTy = MVT::getRISCVVectorTupleVT(Sz, NFields: Factor);
12947
12948 SDValue StoredVal = DAG.getUNDEF(VT: VecTupTy);
12949 for (unsigned i = 0; i < Factor; i++)
12950 StoredVal =
12951 DAG.getNode(Opcode: RISCVISD::TUPLE_INSERT, DL, VT: VecTupTy, N1: StoredVal,
12952 N2: Op.getOperand(i), N3: DAG.getTargetConstant(Val: i, DL, VT: MVT::i32));
12953
12954 SDValue Ops[] = {DAG.getEntryNode(),
12955 DAG.getTargetConstant(Val: IntrIds[Factor - 2], DL, VT: XLenVT),
12956 StoredVal,
12957 StackPtr,
12958 Mask,
12959 VL,
12960 DAG.getTargetConstant(Val: Log2_64(Value: VecVT.getScalarSizeInBits()),
12961 DL, VT: XLenVT)};
12962
12963 SDValue Chain = DAG.getMemIntrinsicNode(
12964 Opcode: ISD::INTRINSIC_VOID, dl: DL, VTList: DAG.getVTList(VT: MVT::Other), Ops,
12965 MemVT: VecVT.getVectorElementType(), PtrInfo, Alignment,
12966 Flags: MachineMemOperand::MOStore, Size: LocationSize::beforeOrAfterPointer());
12967
12968 SmallVector<SDValue, 8> Loads(Factor);
12969
12970 SDValue Increment = DAG.getTypeSize(DL, VT: PtrVT, TS: VecVT.getStoreSize());
12971 for (unsigned i = 0; i != Factor; ++i) {
12972 if (i != 0)
12973 StackPtr = DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT, N1: StackPtr, N2: Increment);
12974
12975 Loads[i] = DAG.getLoad(VT: VecVT, dl: DL, Chain, Ptr: StackPtr, PtrInfo);
12976 }
12977
12978 return DAG.getMergeValues(Ops: Loads, dl: DL);
12979 }
12980
12981 // Use ri.vzip2{a,b} if available
12982 // TODO: Figure out the best lowering for the spread variants
12983 if (Subtarget.hasVendorXRivosVizip() && !Op.getOperand(i: 0).isUndef() &&
12984 !Op.getOperand(i: 1).isUndef()) {
12985 // Freeze the sources so we can increase their use count.
12986 SDValue V1 = DAG.getFreeze(V: Op->getOperand(Num: 0));
12987 SDValue V2 = DAG.getFreeze(V: Op->getOperand(Num: 1));
12988 SDValue Lo = lowerVZIP(Opc: RISCVISD::RI_VZIP2A_VL, Op0: V1, Op1: V2, DL, DAG, Subtarget);
12989 SDValue Hi = lowerVZIP(Opc: RISCVISD::RI_VZIP2B_VL, Op0: V1, Op1: V2, DL, DAG, Subtarget);
12990 return DAG.getMergeValues(Ops: {Lo, Hi}, dl: DL);
12991 }
12992
12993 // If the element type is smaller than ELEN, then we can interleave with
12994 // vwaddu.vv and vwmaccu.vx
12995 if (VecVT.getScalarSizeInBits() < Subtarget.getELen()) {
12996 Interleaved = getWideningInterleave(EvenV: Op.getOperand(i: 0), OddV: Op.getOperand(i: 1), DL,
12997 DAG, Subtarget);
12998 } else {
12999 // Otherwise, fallback to using vrgathere16.vv
13000 MVT ConcatVT =
13001 MVT::getVectorVT(VT: VecVT.getVectorElementType(),
13002 EC: VecVT.getVectorElementCount().multiplyCoefficientBy(RHS: 2));
13003 SDValue Concat = DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: ConcatVT,
13004 N1: Op.getOperand(i: 0), N2: Op.getOperand(i: 1));
13005
13006 MVT IdxVT = ConcatVT.changeVectorElementType(EltVT: MVT::i16);
13007
13008 // 0 1 2 3 4 5 6 7 ...
13009 SDValue StepVec = DAG.getStepVector(DL, ResVT: IdxVT);
13010
13011 // 1 1 1 1 1 1 1 1 ...
13012 SDValue Ones = DAG.getSplatVector(VT: IdxVT, DL, Op: DAG.getConstant(Val: 1, DL, VT: XLenVT));
13013
13014 // 1 0 1 0 1 0 1 0 ...
13015 SDValue OddMask = DAG.getNode(Opcode: ISD::AND, DL, VT: IdxVT, N1: StepVec, N2: Ones);
13016 OddMask = DAG.getSetCC(
13017 DL, VT: IdxVT.changeVectorElementType(EltVT: MVT::i1), LHS: OddMask,
13018 RHS: DAG.getSplatVector(VT: IdxVT, DL, Op: DAG.getConstant(Val: 0, DL, VT: XLenVT)),
13019 Cond: ISD::CondCode::SETNE);
13020
13021 SDValue VLMax = DAG.getSplatVector(VT: IdxVT, DL, Op: computeVLMax(VecVT, DL, DAG));
13022
13023 // Build up the index vector for interleaving the concatenated vector
13024 // 0 0 1 1 2 2 3 3 ...
13025 SDValue Idx = DAG.getNode(Opcode: ISD::SRL, DL, VT: IdxVT, N1: StepVec, N2: Ones);
13026 // 0 n 1 n+1 2 n+2 3 n+3 ...
13027 Idx =
13028 DAG.getNode(Opcode: RISCVISD::ADD_VL, DL, VT: IdxVT, N1: Idx, N2: VLMax, N3: Idx, N4: OddMask, N5: VL);
13029
13030 // Then perform the interleave
13031 // v[0] v[n] v[1] v[n+1] v[2] v[n+2] v[3] v[n+3] ...
13032 SDValue TrueMask = getAllOnesMask(VecVT: IdxVT, VL, DL, DAG);
13033 Interleaved = DAG.getNode(Opcode: RISCVISD::VRGATHEREI16_VV_VL, DL, VT: ConcatVT,
13034 N1: Concat, N2: Idx, N3: DAG.getUNDEF(VT: ConcatVT), N4: TrueMask, N5: VL);
13035 }
13036
13037 // Extract the two halves from the interleaved result
13038 SDValue Lo = DAG.getExtractSubvector(DL, VT: VecVT, Vec: Interleaved, Idx: 0);
13039 SDValue Hi = DAG.getExtractSubvector(DL, VT: VecVT, Vec: Interleaved,
13040 Idx: VecVT.getVectorMinNumElements());
13041
13042 return DAG.getMergeValues(Ops: {Lo, Hi}, dl: DL);
13043}
13044
13045// Lower step_vector to the vid instruction. Any non-identity step value must
13046// be accounted for my manual expansion.
13047SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op,
13048 SelectionDAG &DAG) const {
13049 SDLoc DL(Op);
13050 MVT VT = Op.getSimpleValueType();
13051 assert(VT.isScalableVector() && "Expected scalable vector");
13052 MVT XLenVT = Subtarget.getXLenVT();
13053 auto [Mask, VL] = getDefaultScalableVLOps(VecVT: VT, DL, DAG, Subtarget);
13054 SDValue StepVec = DAG.getNode(Opcode: RISCVISD::VID_VL, DL, VT, N1: Mask, N2: VL);
13055 uint64_t StepValImm = Op.getConstantOperandVal(i: 0);
13056 if (StepValImm != 1) {
13057 if (isPowerOf2_64(Value: StepValImm)) {
13058 SDValue StepVal =
13059 DAG.getNode(Opcode: RISCVISD::VMV_V_X_VL, DL, VT, N1: DAG.getUNDEF(VT),
13060 N2: DAG.getConstant(Val: Log2_64(Value: StepValImm), DL, VT: XLenVT), N3: VL);
13061 StepVec = DAG.getNode(Opcode: ISD::SHL, DL, VT, N1: StepVec, N2: StepVal);
13062 } else {
13063 SDValue StepVal = lowerScalarSplat(
13064 Passthru: SDValue(), Scalar: DAG.getConstant(Val: StepValImm, DL, VT: VT.getVectorElementType()),
13065 VL, VT, DL, DAG, Subtarget);
13066 StepVec = DAG.getNode(Opcode: ISD::MUL, DL, VT, N1: StepVec, N2: StepVal);
13067 }
13068 }
13069 return StepVec;
13070}
13071
13072// Implement vector_reverse using vrgather.vv with indices determined by
13073// subtracting the id of each element from (VLMAX-1). This will convert
13074// the indices like so:
13075// (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0).
13076// TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16.
13077SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
13078 SelectionDAG &DAG) const {
13079 SDLoc DL(Op);
13080 MVT VecVT = Op.getSimpleValueType();
13081 if (VecVT.getVectorElementType() == MVT::i1) {
13082 MVT WidenVT = MVT::getVectorVT(VT: MVT::i8, EC: VecVT.getVectorElementCount());
13083 SDValue Op1 = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL, VT: WidenVT, Operand: Op.getOperand(i: 0));
13084 SDValue Op2 = DAG.getNode(Opcode: ISD::VECTOR_REVERSE, DL, VT: WidenVT, Operand: Op1);
13085 return DAG.getSetCC(DL, VT: VecVT, LHS: Op2,
13086 RHS: DAG.getConstant(Val: 0, DL, VT: Op2.getValueType()), Cond: ISD::SETNE);
13087 }
13088
13089 MVT ContainerVT = VecVT;
13090 SDValue Vec = Op.getOperand(i: 0);
13091 if (VecVT.isFixedLengthVector()) {
13092 ContainerVT = getContainerForFixedLengthVector(VT: VecVT);
13093 Vec = convertToScalableVector(VT: ContainerVT, V: Vec, DAG, Subtarget);
13094 }
13095
13096 MVT XLenVT = Subtarget.getXLenVT();
13097 auto [Mask, VL] = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
13098
13099 // On some uarchs vrgather.vv will read from every input register for each
13100 // output register, regardless of the indices. However to reverse a vector
13101 // each output register only needs to read from one register. So decompose it
13102 // into LMUL * M1 vrgather.vvs, so we get O(LMUL) performance instead of
13103 // O(LMUL^2).
13104 //
13105 // vsetvli a1, zero, e64, m4, ta, ma
13106 // vrgatherei16.vv v12, v8, v16
13107 // ->
13108 // vsetvli a1, zero, e64, m1, ta, ma
13109 // vrgather.vv v15, v8, v16
13110 // vrgather.vv v14, v9, v16
13111 // vrgather.vv v13, v10, v16
13112 // vrgather.vv v12, v11, v16
13113 if (ContainerVT.bitsGT(VT: RISCVTargetLowering::getM1VT(VT: ContainerVT)) &&
13114 ContainerVT.getVectorElementCount().isKnownMultipleOf(RHS: 2)) {
13115 auto [Lo, Hi] = DAG.SplitVector(N: Vec, DL);
13116 Lo = DAG.getNode(Opcode: ISD::VECTOR_REVERSE, DL, VT: Lo.getValueType(), Operand: Lo);
13117 Hi = DAG.getNode(Opcode: ISD::VECTOR_REVERSE, DL, VT: Hi.getValueType(), Operand: Hi);
13118 SDValue Concat = DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: ContainerVT, N1: Hi, N2: Lo);
13119
13120 // Fixed length vectors might not fit exactly into their container, and so
13121 // leave a gap in the front of the vector after being reversed. Slide this
13122 // away.
13123 //
13124 // x x x x 3 2 1 0 <- v4i16 @ vlen=128
13125 // 0 1 2 3 x x x x <- reverse
13126 // x x x x 0 1 2 3 <- vslidedown.vx
13127 if (VecVT.isFixedLengthVector()) {
13128 SDValue Offset = DAG.getNode(
13129 Opcode: ISD::SUB, DL, VT: XLenVT,
13130 N1: DAG.getElementCount(DL, VT: XLenVT, EC: ContainerVT.getVectorElementCount()),
13131 N2: DAG.getElementCount(DL, VT: XLenVT, EC: VecVT.getVectorElementCount()));
13132 Concat =
13133 getVSlidedown(DAG, Subtarget, DL, VT: ContainerVT,
13134 Passthru: DAG.getUNDEF(VT: ContainerVT), Op: Concat, Offset, Mask, VL);
13135 Concat = convertFromScalableVector(VT: VecVT, V: Concat, DAG, Subtarget);
13136 }
13137 return Concat;
13138 }
13139
13140 unsigned EltSize = ContainerVT.getScalarSizeInBits();
13141 unsigned MinSize = ContainerVT.getSizeInBits().getKnownMinValue();
13142 unsigned VectorBitsMax = Subtarget.getRealMaxVLen();
13143 unsigned MaxVLMAX =
13144 VecVT.isFixedLengthVector()
13145 ? VecVT.getVectorNumElements()
13146 : RISCVTargetLowering::computeVLMAX(VectorBits: VectorBitsMax, EltSize, MinSize);
13147
13148 unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
13149 MVT IntVT = ContainerVT.changeVectorElementTypeToInteger();
13150
13151 // If this is SEW=8 and VLMAX is potentially more than 256, we need
13152 // to use vrgatherei16.vv.
13153 if (MaxVLMAX > 256 && EltSize == 8) {
13154 // If this is LMUL=8, we have to split before can use vrgatherei16.vv.
13155 // Reverse each half, then reassemble them in reverse order.
13156 // NOTE: It's also possible that after splitting that VLMAX no longer
13157 // requires vrgatherei16.vv.
13158 if (MinSize == (8 * RISCV::RVVBitsPerBlock)) {
13159 auto [Lo, Hi] = DAG.SplitVectorOperand(N: Op.getNode(), OpNo: 0);
13160 auto [LoVT, HiVT] = DAG.GetSplitDestVTs(VT: VecVT);
13161 Lo = DAG.getNode(Opcode: ISD::VECTOR_REVERSE, DL, VT: LoVT, Operand: Lo);
13162 Hi = DAG.getNode(Opcode: ISD::VECTOR_REVERSE, DL, VT: HiVT, Operand: Hi);
13163 // Reassemble the low and high pieces reversed.
13164 return DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: VecVT, N1: Hi, N2: Lo);
13165 }
13166
13167 // Just promote the int type to i16 which will double the LMUL.
13168 IntVT = MVT::getVectorVT(VT: MVT::i16, EC: ContainerVT.getVectorElementCount());
13169 GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
13170 }
13171
13172 // At LMUL > 1, do the index computation in 16 bits to reduce register
13173 // pressure.
13174 if (IntVT.getScalarType().bitsGT(VT: MVT::i16) &&
13175 IntVT.bitsGT(VT: RISCVTargetLowering::getM1VT(VT: IntVT))) {
13176 assert(isUInt<16>(MaxVLMAX - 1)); // Largest VLMAX is 65536 @ zvl65536b
13177 GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
13178 IntVT = IntVT.changeVectorElementType(EltVT: MVT::i16);
13179 }
13180
13181 // Calculate VLMAX-1 for the desired SEW.
13182 SDValue VLMinus1 = DAG.getNode(
13183 Opcode: ISD::SUB, DL, VT: XLenVT,
13184 N1: DAG.getElementCount(DL, VT: XLenVT, EC: VecVT.getVectorElementCount()),
13185 N2: DAG.getConstant(Val: 1, DL, VT: XLenVT));
13186
13187 // Splat VLMAX-1 taking care to handle SEW==64 on RV32.
13188 bool IsRV32E64 =
13189 !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64;
13190 SDValue SplatVL;
13191 if (!IsRV32E64)
13192 SplatVL = DAG.getSplatVector(VT: IntVT, DL, Op: VLMinus1);
13193 else
13194 SplatVL = DAG.getNode(Opcode: RISCVISD::VMV_V_X_VL, DL, VT: IntVT, N1: DAG.getUNDEF(VT: IntVT),
13195 N2: VLMinus1, N3: DAG.getRegister(Reg: RISCV::X0, VT: XLenVT));
13196
13197 SDValue VID = DAG.getNode(Opcode: RISCVISD::VID_VL, DL, VT: IntVT, N1: Mask, N2: VL);
13198 SDValue Indices = DAG.getNode(Opcode: RISCVISD::SUB_VL, DL, VT: IntVT, N1: SplatVL, N2: VID,
13199 N3: DAG.getUNDEF(VT: IntVT), N4: Mask, N5: VL);
13200
13201 SDValue Gather = DAG.getNode(Opcode: GatherOpc, DL, VT: ContainerVT, N1: Vec, N2: Indices,
13202 N3: DAG.getUNDEF(VT: ContainerVT), N4: Mask, N5: VL);
13203 if (VecVT.isFixedLengthVector())
13204 Gather = convertFromScalableVector(VT: VecVT, V: Gather, DAG, Subtarget);
13205 return Gather;
13206}
13207
13208SDValue RISCVTargetLowering::lowerVECTOR_SPLICE(SDValue Op,
13209 SelectionDAG &DAG) const {
13210 SDLoc DL(Op);
13211 SDValue V1 = Op.getOperand(i: 0);
13212 SDValue V2 = Op.getOperand(i: 1);
13213 SDValue Offset = Op.getOperand(i: 2);
13214 MVT XLenVT = Subtarget.getXLenVT();
13215 MVT VecVT = Op.getSimpleValueType();
13216
13217 SDValue VLMax = computeVLMax(VecVT, DL, DAG);
13218
13219 SDValue DownOffset, UpOffset;
13220 if (Op.getOpcode() == ISD::VECTOR_SPLICE_LEFT) {
13221 // The operand is a TargetConstant, we need to rebuild it as a regular
13222 // constant.
13223 DownOffset = Offset;
13224 UpOffset = DAG.getNode(Opcode: ISD::SUB, DL, VT: XLenVT, N1: VLMax, N2: Offset);
13225 } else {
13226 // The operand is a TargetConstant, we need to rebuild it as a regular
13227 // constant rather than negating the original operand.
13228 UpOffset = Offset;
13229 DownOffset = DAG.getNode(Opcode: ISD::SUB, DL, VT: XLenVT, N1: VLMax, N2: Offset);
13230 }
13231
13232 SDValue TrueMask = getAllOnesMask(VecVT, VL: VLMax, DL, DAG);
13233
13234 SDValue SlideDown = getVSlidedown(
13235 DAG, Subtarget, DL, VT: VecVT, Passthru: DAG.getUNDEF(VT: VecVT), Op: V1, Offset: DownOffset, Mask: TrueMask,
13236 VL: Subtarget.hasVLDependentLatency() ? UpOffset
13237 : DAG.getRegister(Reg: RISCV::X0, VT: XLenVT));
13238 return getVSlideup(DAG, Subtarget, DL, VT: VecVT, Passthru: SlideDown, Op: V2, Offset: UpOffset,
13239 Mask: TrueMask, VL: DAG.getRegister(Reg: RISCV::X0, VT: XLenVT),
13240 Policy: RISCVVType::TAIL_AGNOSTIC);
13241}
13242
13243SDValue
13244RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
13245 SelectionDAG &DAG) const {
13246 SDLoc DL(Op);
13247 auto *Load = cast<LoadSDNode>(Val&: Op);
13248
13249 assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
13250 Load->getMemoryVT(),
13251 *Load->getMemOperand()) &&
13252 "Expecting a correctly-aligned load");
13253
13254 MVT VT = Op.getSimpleValueType();
13255 MVT XLenVT = Subtarget.getXLenVT();
13256 MVT ContainerVT = getContainerForFixedLengthVector(VT);
13257
13258 // If we know the exact VLEN and our fixed length vector completely fills
13259 // the container, use a whole register load instead.
13260 const auto [MinVLMAX, MaxVLMAX] =
13261 RISCVTargetLowering::computeVLMAXBounds(VecVT: ContainerVT, Subtarget);
13262 if (MinVLMAX == MaxVLMAX && MinVLMAX == VT.getVectorNumElements() &&
13263 RISCVTargetLowering::getM1VT(VT: ContainerVT).bitsLE(VT: ContainerVT)) {
13264 MachineMemOperand *MMO = Load->getMemOperand();
13265 SDValue NewLoad =
13266 DAG.getLoad(VT: ContainerVT, dl: DL, Chain: Load->getChain(), Ptr: Load->getBasePtr(),
13267 PtrInfo: MMO->getPointerInfo(), Alignment: MMO->getBaseAlign(), MMOFlags: MMO->getFlags(),
13268 AAInfo: MMO->getAAInfo(), Ranges: MMO->getRanges());
13269 SDValue Result = convertFromScalableVector(VT, V: NewLoad, DAG, Subtarget);
13270 return DAG.getMergeValues(Ops: {Result, NewLoad.getValue(R: 1)}, dl: DL);
13271 }
13272
13273 SDValue VL = DAG.getConstant(Val: VT.getVectorNumElements(), DL, VT: XLenVT);
13274
13275 bool IsMaskOp = VT.getVectorElementType() == MVT::i1;
13276 SDValue IntID = DAG.getTargetConstant(
13277 Val: IsMaskOp ? Intrinsic::riscv_vlm : Intrinsic::riscv_vle, DL, VT: XLenVT);
13278 SmallVector<SDValue, 4> Ops{Load->getChain(), IntID};
13279 if (!IsMaskOp)
13280 Ops.push_back(Elt: DAG.getUNDEF(VT: ContainerVT));
13281 Ops.push_back(Elt: Load->getBasePtr());
13282 Ops.push_back(Elt: VL);
13283 SDVTList VTs = DAG.getVTList(VTs: {ContainerVT, MVT::Other});
13284 SDValue NewLoad =
13285 DAG.getMemIntrinsicNode(Opcode: ISD::INTRINSIC_W_CHAIN, dl: DL, VTList: VTs, Ops,
13286 MemVT: Load->getMemoryVT(), MMO: Load->getMemOperand());
13287
13288 SDValue Result = convertFromScalableVector(VT, V: NewLoad, DAG, Subtarget);
13289 return DAG.getMergeValues(Ops: {Result, NewLoad.getValue(R: 1)}, dl: DL);
13290}
13291
13292SDValue
13293RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
13294 SelectionDAG &DAG) const {
13295 SDLoc DL(Op);
13296 auto *Store = cast<StoreSDNode>(Val&: Op);
13297
13298 assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
13299 Store->getMemoryVT(),
13300 *Store->getMemOperand()) &&
13301 "Expecting a correctly-aligned store");
13302
13303 SDValue StoreVal = Store->getValue();
13304 MVT VT = StoreVal.getSimpleValueType();
13305 MVT XLenVT = Subtarget.getXLenVT();
13306
13307 // If the size less than a byte, we need to pad with zeros to make a byte.
13308 if (VT.getVectorElementType() == MVT::i1 && VT.getVectorNumElements() < 8) {
13309 VT = MVT::v8i1;
13310 StoreVal =
13311 DAG.getInsertSubvector(DL, Vec: DAG.getConstant(Val: 0, DL, VT), SubVec: StoreVal, Idx: 0);
13312 }
13313
13314 MVT ContainerVT = getContainerForFixedLengthVector(VT);
13315
13316 SDValue NewValue =
13317 convertToScalableVector(VT: ContainerVT, V: StoreVal, DAG, Subtarget);
13318
13319 // If we know the exact VLEN and our fixed length vector completely fills
13320 // the container, use a whole register store instead.
13321 const auto [MinVLMAX, MaxVLMAX] =
13322 RISCVTargetLowering::computeVLMAXBounds(VecVT: ContainerVT, Subtarget);
13323 if (MinVLMAX == MaxVLMAX && MinVLMAX == VT.getVectorNumElements() &&
13324 RISCVTargetLowering::getM1VT(VT: ContainerVT).bitsLE(VT: ContainerVT)) {
13325 MachineMemOperand *MMO = Store->getMemOperand();
13326 return DAG.getStore(Chain: Store->getChain(), dl: DL, Val: NewValue, Ptr: Store->getBasePtr(),
13327 PtrInfo: MMO->getPointerInfo(), Alignment: MMO->getBaseAlign(),
13328 MMOFlags: MMO->getFlags(), AAInfo: MMO->getAAInfo());
13329 }
13330
13331 SDValue VL = DAG.getConstant(Val: VT.getVectorNumElements(), DL, VT: XLenVT);
13332
13333 bool IsMaskOp = VT.getVectorElementType() == MVT::i1;
13334 SDValue IntID = DAG.getTargetConstant(
13335 Val: IsMaskOp ? Intrinsic::riscv_vsm : Intrinsic::riscv_vse, DL, VT: XLenVT);
13336 return DAG.getMemIntrinsicNode(
13337 Opcode: ISD::INTRINSIC_VOID, dl: DL, VTList: DAG.getVTList(VT: MVT::Other),
13338 Ops: {Store->getChain(), IntID, NewValue, Store->getBasePtr(), VL},
13339 MemVT: Store->getMemoryVT(), MMO: Store->getMemOperand());
13340}
13341
13342SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op,
13343 SelectionDAG &DAG) const {
13344 SDLoc DL(Op);
13345 MVT VT = Op.getSimpleValueType();
13346
13347 const auto *MemSD = cast<MemSDNode>(Val&: Op);
13348 EVT MemVT = MemSD->getMemoryVT();
13349 MachineMemOperand *MMO = MemSD->getMemOperand();
13350 SDValue Chain = MemSD->getChain();
13351 SDValue BasePtr = MemSD->getBasePtr();
13352
13353 SDValue Mask, PassThru, VL;
13354 bool IsExpandingLoad = false;
13355 if (const auto *VPLoad = dyn_cast<VPLoadSDNode>(Val&: Op)) {
13356 Mask = VPLoad->getMask();
13357 PassThru = DAG.getUNDEF(VT);
13358 VL = VPLoad->getVectorLength();
13359 } else {
13360 const auto *MLoad = cast<MaskedLoadSDNode>(Val&: Op);
13361 Mask = MLoad->getMask();
13362 PassThru = MLoad->getPassThru();
13363 IsExpandingLoad = MLoad->isExpandingLoad();
13364 }
13365
13366 bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(N: Mask.getNode());
13367
13368 MVT XLenVT = Subtarget.getXLenVT();
13369
13370 MVT ContainerVT = VT;
13371 if (VT.isFixedLengthVector()) {
13372 ContainerVT = getContainerForFixedLengthVector(VT);
13373 PassThru = convertToScalableVector(VT: ContainerVT, V: PassThru, DAG, Subtarget);
13374 if (!IsUnmasked) {
13375 MVT MaskVT = getMaskTypeFor(VecVT: ContainerVT);
13376 Mask = convertToScalableVector(VT: MaskVT, V: Mask, DAG, Subtarget);
13377 }
13378 }
13379
13380 if (!VL)
13381 VL = getDefaultVLOps(VecVT: VT, ContainerVT, DL, DAG, Subtarget).second;
13382
13383 SDValue ExpandingVL;
13384 if (!IsUnmasked && IsExpandingLoad) {
13385 ExpandingVL = VL;
13386 VL =
13387 DAG.getNode(Opcode: RISCVISD::VCPOP_VL, DL, VT: XLenVT, N1: Mask,
13388 N2: getAllOnesMask(VecVT: Mask.getSimpleValueType(), VL, DL, DAG), N3: VL);
13389 }
13390
13391 unsigned IntID = IsUnmasked || IsExpandingLoad ? Intrinsic::riscv_vle
13392 : Intrinsic::riscv_vle_mask;
13393 SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(Val: IntID, DL, VT: XLenVT)};
13394 if (IntID == Intrinsic::riscv_vle)
13395 Ops.push_back(Elt: DAG.getUNDEF(VT: ContainerVT));
13396 else
13397 Ops.push_back(Elt: PassThru);
13398 Ops.push_back(Elt: BasePtr);
13399 if (IntID == Intrinsic::riscv_vle_mask)
13400 Ops.push_back(Elt: Mask);
13401 Ops.push_back(Elt: VL);
13402 if (IntID == Intrinsic::riscv_vle_mask)
13403 Ops.push_back(Elt: DAG.getTargetConstant(Val: RISCVVType::TAIL_AGNOSTIC, DL, VT: XLenVT));
13404
13405 SDVTList VTs = DAG.getVTList(VTs: {ContainerVT, MVT::Other});
13406
13407 SDValue Result =
13408 DAG.getMemIntrinsicNode(Opcode: ISD::INTRINSIC_W_CHAIN, dl: DL, VTList: VTs, Ops, MemVT, MMO);
13409 Chain = Result.getValue(R: 1);
13410 if (ExpandingVL) {
13411 MVT IndexVT = ContainerVT;
13412 if (ContainerVT.isFloatingPoint())
13413 IndexVT = ContainerVT.changeVectorElementTypeToInteger();
13414
13415 MVT IndexEltVT = IndexVT.getVectorElementType();
13416 bool UseVRGATHEREI16 = false;
13417 // If index vector is an i8 vector and the element count exceeds 256, we
13418 // should change the element type of index vector to i16 to avoid
13419 // overflow.
13420 if (IndexEltVT == MVT::i8 && VT.getVectorNumElements() > 256) {
13421 // FIXME: We need to do vector splitting manually for LMUL=8 cases.
13422 assert(getLMUL(IndexVT) != RISCVVType::LMUL_8);
13423 IndexVT = IndexVT.changeVectorElementType(EltVT: MVT::i16);
13424 UseVRGATHEREI16 = true;
13425 }
13426
13427 SDValue Iota =
13428 DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL, VT: IndexVT,
13429 N1: DAG.getTargetConstant(Val: Intrinsic::riscv_viota, DL, VT: XLenVT),
13430 N2: DAG.getUNDEF(VT: IndexVT), N3: Mask, N4: ExpandingVL);
13431 Result =
13432 DAG.getNode(Opcode: UseVRGATHEREI16 ? RISCVISD::VRGATHEREI16_VV_VL
13433 : RISCVISD::VRGATHER_VV_VL,
13434 DL, VT: ContainerVT, N1: Result, N2: Iota, N3: PassThru, N4: Mask, N5: ExpandingVL);
13435 }
13436
13437 if (VT.isFixedLengthVector())
13438 Result = convertFromScalableVector(VT, V: Result, DAG, Subtarget);
13439
13440 return DAG.getMergeValues(Ops: {Result, Chain}, dl: DL);
13441}
13442
13443SDValue RISCVTargetLowering::lowerLoadFF(SDValue Op, SelectionDAG &DAG) const {
13444 SDLoc DL(Op);
13445 MVT VT = Op->getSimpleValueType(ResNo: 0);
13446
13447 const auto *VPLoadFF = cast<VPLoadFFSDNode>(Val&: Op);
13448 EVT MemVT = VPLoadFF->getMemoryVT();
13449 MachineMemOperand *MMO = VPLoadFF->getMemOperand();
13450 SDValue Chain = VPLoadFF->getChain();
13451 SDValue BasePtr = VPLoadFF->getBasePtr();
13452
13453 SDValue Mask = VPLoadFF->getMask();
13454 SDValue VL = VPLoadFF->getVectorLength();
13455
13456 MVT XLenVT = Subtarget.getXLenVT();
13457
13458 MVT ContainerVT = VT;
13459 if (VT.isFixedLengthVector()) {
13460 ContainerVT = getContainerForFixedLengthVector(VT);
13461 MVT MaskVT = getMaskTypeFor(VecVT: ContainerVT);
13462 Mask = convertToScalableVector(VT: MaskVT, V: Mask, DAG, Subtarget);
13463 }
13464
13465 unsigned IntID = Intrinsic::riscv_vleff_mask;
13466 SDValue Ops[] = {
13467 Chain,
13468 DAG.getTargetConstant(Val: IntID, DL, VT: XLenVT),
13469 DAG.getUNDEF(VT: ContainerVT),
13470 BasePtr,
13471 Mask,
13472 VL,
13473 DAG.getTargetConstant(Val: RISCVVType::TAIL_AGNOSTIC, DL, VT: XLenVT)};
13474
13475 SDVTList VTs = DAG.getVTList(VTs: {ContainerVT, Op->getValueType(ResNo: 1), MVT::Other});
13476
13477 SDValue Result =
13478 DAG.getMemIntrinsicNode(Opcode: ISD::INTRINSIC_W_CHAIN, dl: DL, VTList: VTs, Ops, MemVT, MMO);
13479 SDValue OutVL = Result.getValue(R: 1);
13480 Chain = Result.getValue(R: 2);
13481
13482 if (VT.isFixedLengthVector())
13483 Result = convertFromScalableVector(VT, V: Result, DAG, Subtarget);
13484
13485 return DAG.getMergeValues(Ops: {Result, OutVL, Chain}, dl: DL);
13486}
13487
13488SDValue RISCVTargetLowering::lowerMaskedStore(SDValue Op,
13489 SelectionDAG &DAG) const {
13490 SDLoc DL(Op);
13491
13492 const auto *MemSD = cast<MemSDNode>(Val&: Op);
13493 EVT MemVT = MemSD->getMemoryVT();
13494 MachineMemOperand *MMO = MemSD->getMemOperand();
13495 SDValue Chain = MemSD->getChain();
13496 SDValue BasePtr = MemSD->getBasePtr();
13497 SDValue Val, Mask, VL;
13498
13499 bool IsCompressingStore = false;
13500 if (const auto *VPStore = dyn_cast<VPStoreSDNode>(Val&: Op)) {
13501 Val = VPStore->getValue();
13502 Mask = VPStore->getMask();
13503 VL = VPStore->getVectorLength();
13504 } else {
13505 const auto *MStore = cast<MaskedStoreSDNode>(Val&: Op);
13506 Val = MStore->getValue();
13507 Mask = MStore->getMask();
13508 IsCompressingStore = MStore->isCompressingStore();
13509 }
13510
13511 bool IsUnmasked =
13512 ISD::isConstantSplatVectorAllOnes(N: Mask.getNode()) || IsCompressingStore;
13513
13514 MVT VT = Val.getSimpleValueType();
13515 MVT XLenVT = Subtarget.getXLenVT();
13516
13517 MVT ContainerVT = VT;
13518 if (VT.isFixedLengthVector()) {
13519 ContainerVT = getContainerForFixedLengthVector(VT);
13520
13521 Val = convertToScalableVector(VT: ContainerVT, V: Val, DAG, Subtarget);
13522 if (!IsUnmasked || IsCompressingStore) {
13523 MVT MaskVT = getMaskTypeFor(VecVT: ContainerVT);
13524 Mask = convertToScalableVector(VT: MaskVT, V: Mask, DAG, Subtarget);
13525 }
13526 }
13527
13528 if (!VL)
13529 VL = getDefaultVLOps(VecVT: VT, ContainerVT, DL, DAG, Subtarget).second;
13530
13531 if (IsCompressingStore) {
13532 Val = DAG.getNode(
13533 Opcode: ISD::INTRINSIC_WO_CHAIN, DL, VT: ContainerVT,
13534 N1: DAG.getTargetConstant(Val: Intrinsic::riscv_vcompress, DL, VT: XLenVT),
13535 N2: DAG.getUNDEF(VT: ContainerVT), N3: Val, N4: Mask, N5: VL);
13536 VL =
13537 DAG.getNode(Opcode: RISCVISD::VCPOP_VL, DL, VT: XLenVT, N1: Mask,
13538 N2: getAllOnesMask(VecVT: Mask.getSimpleValueType(), VL, DL, DAG), N3: VL);
13539 }
13540
13541 unsigned IntID =
13542 IsUnmasked ? Intrinsic::riscv_vse : Intrinsic::riscv_vse_mask;
13543 SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(Val: IntID, DL, VT: XLenVT)};
13544 Ops.push_back(Elt: Val);
13545 Ops.push_back(Elt: BasePtr);
13546 if (!IsUnmasked)
13547 Ops.push_back(Elt: Mask);
13548 Ops.push_back(Elt: VL);
13549
13550 return DAG.getMemIntrinsicNode(Opcode: ISD::INTRINSIC_VOID, dl: DL,
13551 VTList: DAG.getVTList(VT: MVT::Other), Ops, MemVT, MMO);
13552}
13553
13554SDValue RISCVTargetLowering::lowerVectorCompress(SDValue Op,
13555 SelectionDAG &DAG) const {
13556 SDLoc DL(Op);
13557 SDValue Val = Op.getOperand(i: 0);
13558 SDValue Mask = Op.getOperand(i: 1);
13559 SDValue Passthru = Op.getOperand(i: 2);
13560
13561 MVT VT = Val.getSimpleValueType();
13562 MVT XLenVT = Subtarget.getXLenVT();
13563 MVT ContainerVT = VT;
13564 if (VT.isFixedLengthVector()) {
13565 ContainerVT = getContainerForFixedLengthVector(VT);
13566 MVT MaskVT = getMaskTypeFor(VecVT: ContainerVT);
13567 Val = convertToScalableVector(VT: ContainerVT, V: Val, DAG, Subtarget);
13568 Mask = convertToScalableVector(VT: MaskVT, V: Mask, DAG, Subtarget);
13569 Passthru = convertToScalableVector(VT: ContainerVT, V: Passthru, DAG, Subtarget);
13570 }
13571
13572 SDValue VL = getDefaultVLOps(VecVT: VT, ContainerVT, DL, DAG, Subtarget).second;
13573 SDValue Res =
13574 DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL, VT: ContainerVT,
13575 N1: DAG.getTargetConstant(Val: Intrinsic::riscv_vcompress, DL, VT: XLenVT),
13576 N2: Passthru, N3: Val, N4: Mask, N5: VL);
13577
13578 if (VT.isFixedLengthVector())
13579 Res = convertFromScalableVector(VT, V: Res, DAG, Subtarget);
13580
13581 return Res;
13582}
13583
13584SDValue RISCVTargetLowering::lowerVectorStrictFSetcc(SDValue Op,
13585 SelectionDAG &DAG) const {
13586 unsigned Opc = Op.getOpcode();
13587 SDLoc DL(Op);
13588 SDValue Chain = Op.getOperand(i: 0);
13589 SDValue Op1 = Op.getOperand(i: 1);
13590 SDValue Op2 = Op.getOperand(i: 2);
13591 SDValue CC = Op.getOperand(i: 3);
13592 ISD::CondCode CCVal = cast<CondCodeSDNode>(Val&: CC)->get();
13593 MVT VT = Op.getSimpleValueType();
13594 MVT InVT = Op1.getSimpleValueType();
13595
13596 // RVV VMFEQ/VMFNE ignores qNan, so we expand strict_fsetccs with OEQ/UNE
13597 // condition code.
13598 if (Opc == ISD::STRICT_FSETCCS) {
13599 // Expand strict_fsetccs(x, oeq) to
13600 // (and strict_fsetccs(x, y, oge), strict_fsetccs(x, y, ole))
13601 SDVTList VTList = Op->getVTList();
13602 if (CCVal == ISD::SETEQ || CCVal == ISD::SETOEQ) {
13603 SDValue OLECCVal = DAG.getCondCode(Cond: ISD::SETOLE);
13604 SDValue Tmp1 = DAG.getNode(Opcode: ISD::STRICT_FSETCCS, DL, VTList, N1: Chain, N2: Op1,
13605 N3: Op2, N4: OLECCVal);
13606 SDValue Tmp2 = DAG.getNode(Opcode: ISD::STRICT_FSETCCS, DL, VTList, N1: Chain, N2: Op2,
13607 N3: Op1, N4: OLECCVal);
13608 SDValue OutChain = DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other,
13609 N1: Tmp1.getValue(R: 1), N2: Tmp2.getValue(R: 1));
13610 // Tmp1 and Tmp2 might be the same node.
13611 if (Tmp1 != Tmp2)
13612 Tmp1 = DAG.getNode(Opcode: ISD::AND, DL, VT, N1: Tmp1, N2: Tmp2);
13613 return DAG.getMergeValues(Ops: {Tmp1, OutChain}, dl: DL);
13614 }
13615
13616 // Expand (strict_fsetccs x, y, une) to (not (strict_fsetccs x, y, oeq))
13617 if (CCVal == ISD::SETNE || CCVal == ISD::SETUNE) {
13618 SDValue OEQCCVal = DAG.getCondCode(Cond: ISD::SETOEQ);
13619 SDValue OEQ = DAG.getNode(Opcode: ISD::STRICT_FSETCCS, DL, VTList, N1: Chain, N2: Op1,
13620 N3: Op2, N4: OEQCCVal);
13621 SDValue Res = DAG.getNOT(DL, Val: OEQ, VT);
13622 return DAG.getMergeValues(Ops: {Res, OEQ.getValue(R: 1)}, dl: DL);
13623 }
13624 }
13625
13626 MVT ContainerInVT = InVT;
13627 if (InVT.isFixedLengthVector()) {
13628 ContainerInVT = getContainerForFixedLengthVector(VT: InVT);
13629 Op1 = convertToScalableVector(VT: ContainerInVT, V: Op1, DAG, Subtarget);
13630 Op2 = convertToScalableVector(VT: ContainerInVT, V: Op2, DAG, Subtarget);
13631 }
13632 MVT MaskVT = getMaskTypeFor(VecVT: ContainerInVT);
13633
13634 auto [Mask, VL] = getDefaultVLOps(VecVT: InVT, ContainerVT: ContainerInVT, DL, DAG, Subtarget);
13635
13636 SDValue Res;
13637 if (Opc == ISD::STRICT_FSETCC &&
13638 (CCVal == ISD::SETLT || CCVal == ISD::SETOLT || CCVal == ISD::SETLE ||
13639 CCVal == ISD::SETOLE)) {
13640 // VMFLT/VMFLE/VMFGT/VMFGE raise exception for qNan. Generate a mask to only
13641 // active when both input elements are ordered.
13642 SDValue True = getAllOnesMask(VecVT: ContainerInVT, VL, DL, DAG);
13643 SDValue OrderMask1 = DAG.getNode(
13644 Opcode: RISCVISD::STRICT_FSETCC_VL, DL, VTList: DAG.getVTList(VT1: MaskVT, VT2: MVT::Other),
13645 Ops: {Chain, Op1, Op1, DAG.getCondCode(Cond: ISD::SETOEQ), DAG.getUNDEF(VT: MaskVT),
13646 True, VL});
13647 SDValue OrderMask2 = DAG.getNode(
13648 Opcode: RISCVISD::STRICT_FSETCC_VL, DL, VTList: DAG.getVTList(VT1: MaskVT, VT2: MVT::Other),
13649 Ops: {Chain, Op2, Op2, DAG.getCondCode(Cond: ISD::SETOEQ), DAG.getUNDEF(VT: MaskVT),
13650 True, VL});
13651 Mask =
13652 DAG.getNode(Opcode: RISCVISD::VMAND_VL, DL, VT: MaskVT, N1: OrderMask1, N2: OrderMask2, N3: VL);
13653 // Use Mask as the passthru operand to let the result be 0 if either of the
13654 // inputs is unordered.
13655 Res = DAG.getNode(Opcode: RISCVISD::STRICT_FSETCCS_VL, DL,
13656 VTList: DAG.getVTList(VT1: MaskVT, VT2: MVT::Other),
13657 Ops: {Chain, Op1, Op2, CC, Mask, Mask, VL});
13658 } else {
13659 unsigned RVVOpc = Opc == ISD::STRICT_FSETCC ? RISCVISD::STRICT_FSETCC_VL
13660 : RISCVISD::STRICT_FSETCCS_VL;
13661 Res = DAG.getNode(Opcode: RVVOpc, DL, VTList: DAG.getVTList(VT1: MaskVT, VT2: MVT::Other),
13662 Ops: {Chain, Op1, Op2, CC, DAG.getUNDEF(VT: MaskVT), Mask, VL});
13663 }
13664
13665 if (VT.isFixedLengthVector()) {
13666 SDValue SubVec = convertFromScalableVector(VT, V: Res, DAG, Subtarget);
13667 return DAG.getMergeValues(Ops: {SubVec, Res.getValue(R: 1)}, dl: DL);
13668 }
13669 return Res;
13670}
13671
13672// Lower vector ABS to smax(X, sub(0, X)).
13673SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
13674 SDLoc DL(Op);
13675 MVT VT = Op.getSimpleValueType();
13676 SDValue X = Op.getOperand(i: 0);
13677
13678 assert((Op.getOpcode() == ISD::VP_ABS || VT.isFixedLengthVector()) &&
13679 "Unexpected type for ISD::ABS");
13680
13681 MVT ContainerVT = VT;
13682 if (VT.isFixedLengthVector()) {
13683 ContainerVT = getContainerForFixedLengthVector(VT);
13684 X = convertToScalableVector(VT: ContainerVT, V: X, DAG, Subtarget);
13685 }
13686
13687 SDValue Mask, VL;
13688 if (Op->getOpcode() == ISD::VP_ABS) {
13689 Mask = Op->getOperand(Num: 1);
13690 if (VT.isFixedLengthVector())
13691 Mask = convertToScalableVector(VT: getMaskTypeFor(VecVT: ContainerVT), V: Mask, DAG,
13692 Subtarget);
13693 VL = Op->getOperand(Num: 2);
13694 } else
13695 std::tie(args&: Mask, args&: VL) = getDefaultVLOps(VecVT: VT, ContainerVT, DL, DAG, Subtarget);
13696
13697 SDValue SplatZero = DAG.getNode(
13698 Opcode: RISCVISD::VMV_V_X_VL, DL, VT: ContainerVT, N1: DAG.getUNDEF(VT: ContainerVT),
13699 N2: DAG.getConstant(Val: 0, DL, VT: Subtarget.getXLenVT()), N3: VL);
13700 SDValue NegX = DAG.getNode(Opcode: RISCVISD::SUB_VL, DL, VT: ContainerVT, N1: SplatZero, N2: X,
13701 N3: DAG.getUNDEF(VT: ContainerVT), N4: Mask, N5: VL);
13702 SDValue Max = DAG.getNode(Opcode: RISCVISD::SMAX_VL, DL, VT: ContainerVT, N1: X, N2: NegX,
13703 N3: DAG.getUNDEF(VT: ContainerVT), N4: Mask, N5: VL);
13704
13705 if (VT.isFixedLengthVector())
13706 Max = convertFromScalableVector(VT, V: Max, DAG, Subtarget);
13707 return Max;
13708}
13709
13710SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op,
13711 SelectionDAG &DAG) const {
13712 const auto &TSInfo =
13713 static_cast<const RISCVSelectionDAGInfo &>(DAG.getSelectionDAGInfo());
13714
13715 unsigned NewOpc = getRISCVVLOp(Op);
13716 bool HasPassthruOp = TSInfo.hasPassthruOp(Opcode: NewOpc);
13717 bool HasMask = TSInfo.hasMaskOp(Opcode: NewOpc);
13718
13719 MVT VT = Op.getSimpleValueType();
13720 MVT ContainerVT = getContainerForFixedLengthVector(VT);
13721
13722 // Create list of operands by converting existing ones to scalable types.
13723 SmallVector<SDValue, 6> Ops;
13724 for (const SDValue &V : Op->op_values()) {
13725 assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
13726
13727 // Pass through non-vector operands.
13728 if (!V.getValueType().isVector()) {
13729 Ops.push_back(Elt: V);
13730 continue;
13731 }
13732
13733 // "cast" fixed length vector to a scalable vector.
13734 assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) &&
13735 "Only fixed length vectors are supported!");
13736 MVT VContainerVT = ContainerVT.changeVectorElementType(
13737 EltVT: V.getSimpleValueType().getVectorElementType());
13738 Ops.push_back(Elt: convertToScalableVector(VT: VContainerVT, V, DAG, Subtarget));
13739 }
13740
13741 SDLoc DL(Op);
13742 auto [Mask, VL] = getDefaultVLOps(VecVT: VT, ContainerVT, DL, DAG, Subtarget);
13743 if (HasPassthruOp)
13744 Ops.push_back(Elt: DAG.getUNDEF(VT: ContainerVT));
13745 if (HasMask)
13746 Ops.push_back(Elt: Mask);
13747 Ops.push_back(Elt: VL);
13748
13749 // StrictFP operations have two result values. Their lowered result should
13750 // have same result count.
13751 if (Op->isStrictFPOpcode()) {
13752 SDValue ScalableRes =
13753 DAG.getNode(Opcode: NewOpc, DL, VTList: DAG.getVTList(VT1: ContainerVT, VT2: MVT::Other), Ops,
13754 Flags: Op->getFlags());
13755 SDValue SubVec = convertFromScalableVector(VT, V: ScalableRes, DAG, Subtarget);
13756 return DAG.getMergeValues(Ops: {SubVec, ScalableRes.getValue(R: 1)}, dl: DL);
13757 }
13758
13759 SDValue ScalableRes =
13760 DAG.getNode(Opcode: NewOpc, DL, VT: ContainerVT, Ops, Flags: Op->getFlags());
13761 return convertFromScalableVector(VT, V: ScalableRes, DAG, Subtarget);
13762}
13763
13764// Lower a VP_* ISD node to the corresponding RISCVISD::*_VL node:
13765// * Operands of each node are assumed to be in the same order.
13766// * The EVL operand is promoted from i32 to i64 on RV64.
13767// * Fixed-length vectors are converted to their scalable-vector container
13768// types.
13769SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG) const {
13770 const auto &TSInfo =
13771 static_cast<const RISCVSelectionDAGInfo &>(DAG.getSelectionDAGInfo());
13772
13773 unsigned RISCVISDOpc = getRISCVVLOp(Op);
13774 bool HasPassthruOp = TSInfo.hasPassthruOp(Opcode: RISCVISDOpc);
13775
13776 SDLoc DL(Op);
13777 MVT VT = Op.getSimpleValueType();
13778 SmallVector<SDValue, 4> Ops;
13779
13780 MVT ContainerVT = VT;
13781 if (VT.isFixedLengthVector())
13782 ContainerVT = getContainerForFixedLengthVector(VT);
13783
13784 for (const auto &OpIdx : enumerate(First: Op->ops())) {
13785 SDValue V = OpIdx.value();
13786 assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
13787 // Add dummy passthru value before the mask. Or if there isn't a mask,
13788 // before EVL.
13789 if (HasPassthruOp) {
13790 auto MaskIdx = ISD::getVPMaskIdx(Opcode: Op.getOpcode());
13791 if (MaskIdx) {
13792 if (*MaskIdx == OpIdx.index())
13793 Ops.push_back(Elt: DAG.getUNDEF(VT: ContainerVT));
13794 } else if (ISD::getVPExplicitVectorLengthIdx(Opcode: Op.getOpcode()) ==
13795 OpIdx.index()) {
13796 if (Op.getOpcode() == ISD::VP_MERGE) {
13797 // For VP_MERGE, copy the false operand instead of an undef value.
13798 Ops.push_back(Elt: Ops.back());
13799 } else {
13800 assert(Op.getOpcode() == ISD::VP_SELECT);
13801 // For VP_SELECT, add an undef value.
13802 Ops.push_back(Elt: DAG.getUNDEF(VT: ContainerVT));
13803 }
13804 }
13805 }
13806 // VFCVT_RM_X_F_VL requires a rounding mode to be injected before the VL.
13807 if (RISCVISDOpc == RISCVISD::VFCVT_RM_X_F_VL &&
13808 ISD::getVPExplicitVectorLengthIdx(Opcode: Op.getOpcode()) == OpIdx.index())
13809 Ops.push_back(Elt: DAG.getTargetConstant(Val: RISCVFPRndMode::DYN, DL,
13810 VT: Subtarget.getXLenVT()));
13811 // Pass through operands which aren't fixed-length vectors.
13812 if (!V.getValueType().isFixedLengthVector()) {
13813 Ops.push_back(Elt: V);
13814 continue;
13815 }
13816 // "cast" fixed length vector to a scalable vector.
13817 MVT OpVT = V.getSimpleValueType();
13818 MVT ContainerVT = getContainerForFixedLengthVector(VT: OpVT);
13819 assert(useRVVForFixedLengthVectorVT(OpVT) &&
13820 "Only fixed length vectors are supported!");
13821 Ops.push_back(Elt: convertToScalableVector(VT: ContainerVT, V, DAG, Subtarget));
13822 }
13823
13824 if (!VT.isFixedLengthVector())
13825 return DAG.getNode(Opcode: RISCVISDOpc, DL, VT, Ops, Flags: Op->getFlags());
13826
13827 SDValue VPOp = DAG.getNode(Opcode: RISCVISDOpc, DL, VT: ContainerVT, Ops, Flags: Op->getFlags());
13828
13829 return convertFromScalableVector(VT, V: VPOp, DAG, Subtarget);
13830}
13831
13832SDValue RISCVTargetLowering::lowerVPExtMaskOp(SDValue Op,
13833 SelectionDAG &DAG) const {
13834 SDLoc DL(Op);
13835 MVT VT = Op.getSimpleValueType();
13836
13837 SDValue Src = Op.getOperand(i: 0);
13838 // NOTE: Mask is dropped.
13839 SDValue VL = Op.getOperand(i: 2);
13840
13841 MVT ContainerVT = VT;
13842 if (VT.isFixedLengthVector()) {
13843 ContainerVT = getContainerForFixedLengthVector(VT);
13844 MVT SrcVT = MVT::getVectorVT(VT: MVT::i1, EC: ContainerVT.getVectorElementCount());
13845 Src = convertToScalableVector(VT: SrcVT, V: Src, DAG, Subtarget);
13846 }
13847
13848 MVT XLenVT = Subtarget.getXLenVT();
13849 SDValue Zero = DAG.getConstant(Val: 0, DL, VT: XLenVT);
13850 SDValue ZeroSplat = DAG.getNode(Opcode: RISCVISD::VMV_V_X_VL, DL, VT: ContainerVT,
13851 N1: DAG.getUNDEF(VT: ContainerVT), N2: Zero, N3: VL);
13852
13853 SDValue SplatValue = DAG.getSignedConstant(
13854 Val: Op.getOpcode() == ISD::VP_ZERO_EXTEND ? 1 : -1, DL, VT: XLenVT);
13855 SDValue Splat = DAG.getNode(Opcode: RISCVISD::VMV_V_X_VL, DL, VT: ContainerVT,
13856 N1: DAG.getUNDEF(VT: ContainerVT), N2: SplatValue, N3: VL);
13857
13858 SDValue Result = DAG.getNode(Opcode: RISCVISD::VMERGE_VL, DL, VT: ContainerVT, N1: Src, N2: Splat,
13859 N3: ZeroSplat, N4: DAG.getUNDEF(VT: ContainerVT), N5: VL);
13860 if (!VT.isFixedLengthVector())
13861 return Result;
13862 return convertFromScalableVector(VT, V: Result, DAG, Subtarget);
13863}
13864
13865SDValue RISCVTargetLowering::lowerVPSetCCMaskOp(SDValue Op,
13866 SelectionDAG &DAG) const {
13867 SDLoc DL(Op);
13868 MVT VT = Op.getSimpleValueType();
13869
13870 SDValue Op1 = Op.getOperand(i: 0);
13871 SDValue Op2 = Op.getOperand(i: 1);
13872 ISD::CondCode Condition = cast<CondCodeSDNode>(Val: Op.getOperand(i: 2))->get();
13873 // NOTE: Mask is dropped.
13874 SDValue VL = Op.getOperand(i: 4);
13875
13876 MVT ContainerVT = VT;
13877 if (VT.isFixedLengthVector()) {
13878 ContainerVT = getContainerForFixedLengthVector(VT);
13879 Op1 = convertToScalableVector(VT: ContainerVT, V: Op1, DAG, Subtarget);
13880 Op2 = convertToScalableVector(VT: ContainerVT, V: Op2, DAG, Subtarget);
13881 }
13882
13883 SDValue Result;
13884 SDValue AllOneMask = DAG.getNode(Opcode: RISCVISD::VMSET_VL, DL, VT: ContainerVT, Operand: VL);
13885
13886 switch (Condition) {
13887 default:
13888 break;
13889 // X != Y --> (X^Y)
13890 case ISD::SETNE:
13891 Result = DAG.getNode(Opcode: RISCVISD::VMXOR_VL, DL, VT: ContainerVT, N1: Op1, N2: Op2, N3: VL);
13892 break;
13893 // X == Y --> ~(X^Y)
13894 case ISD::SETEQ: {
13895 SDValue Temp =
13896 DAG.getNode(Opcode: RISCVISD::VMXOR_VL, DL, VT: ContainerVT, N1: Op1, N2: Op2, N3: VL);
13897 Result =
13898 DAG.getNode(Opcode: RISCVISD::VMXOR_VL, DL, VT: ContainerVT, N1: Temp, N2: AllOneMask, N3: VL);
13899 break;
13900 }
13901 // X >s Y --> X == 0 & Y == 1 --> ~X & Y
13902 // X <u Y --> X == 0 & Y == 1 --> ~X & Y
13903 case ISD::SETGT:
13904 case ISD::SETULT: {
13905 SDValue Temp =
13906 DAG.getNode(Opcode: RISCVISD::VMXOR_VL, DL, VT: ContainerVT, N1: Op1, N2: AllOneMask, N3: VL);
13907 Result = DAG.getNode(Opcode: RISCVISD::VMAND_VL, DL, VT: ContainerVT, N1: Temp, N2: Op2, N3: VL);
13908 break;
13909 }
13910 // X <s Y --> X == 1 & Y == 0 --> ~Y & X
13911 // X >u Y --> X == 1 & Y == 0 --> ~Y & X
13912 case ISD::SETLT:
13913 case ISD::SETUGT: {
13914 SDValue Temp =
13915 DAG.getNode(Opcode: RISCVISD::VMXOR_VL, DL, VT: ContainerVT, N1: Op2, N2: AllOneMask, N3: VL);
13916 Result = DAG.getNode(Opcode: RISCVISD::VMAND_VL, DL, VT: ContainerVT, N1: Op1, N2: Temp, N3: VL);
13917 break;
13918 }
13919 // X >=s Y --> X == 0 | Y == 1 --> ~X | Y
13920 // X <=u Y --> X == 0 | Y == 1 --> ~X | Y
13921 case ISD::SETGE:
13922 case ISD::SETULE: {
13923 SDValue Temp =
13924 DAG.getNode(Opcode: RISCVISD::VMXOR_VL, DL, VT: ContainerVT, N1: Op1, N2: AllOneMask, N3: VL);
13925 Result = DAG.getNode(Opcode: RISCVISD::VMXOR_VL, DL, VT: ContainerVT, N1: Temp, N2: Op2, N3: VL);
13926 break;
13927 }
13928 // X <=s Y --> X == 1 | Y == 0 --> ~Y | X
13929 // X >=u Y --> X == 1 | Y == 0 --> ~Y | X
13930 case ISD::SETLE:
13931 case ISD::SETUGE: {
13932 SDValue Temp =
13933 DAG.getNode(Opcode: RISCVISD::VMXOR_VL, DL, VT: ContainerVT, N1: Op2, N2: AllOneMask, N3: VL);
13934 Result = DAG.getNode(Opcode: RISCVISD::VMXOR_VL, DL, VT: ContainerVT, N1: Temp, N2: Op1, N3: VL);
13935 break;
13936 }
13937 }
13938
13939 if (!VT.isFixedLengthVector())
13940 return Result;
13941 return convertFromScalableVector(VT, V: Result, DAG, Subtarget);
13942}
13943
13944// Lower Floating-Point/Integer Type-Convert VP SDNodes
13945SDValue RISCVTargetLowering::lowerVPFPIntConvOp(SDValue Op,
13946 SelectionDAG &DAG) const {
13947 SDLoc DL(Op);
13948
13949 SDValue Src = Op.getOperand(i: 0);
13950 SDValue Mask = Op.getOperand(i: 1);
13951 SDValue VL = Op.getOperand(i: 2);
13952 unsigned RISCVISDOpc = getRISCVVLOp(Op);
13953
13954 MVT DstVT = Op.getSimpleValueType();
13955 MVT SrcVT = Src.getSimpleValueType();
13956 if (DstVT.isFixedLengthVector()) {
13957 DstVT = getContainerForFixedLengthVector(VT: DstVT);
13958 SrcVT = getContainerForFixedLengthVector(VT: SrcVT);
13959 Src = convertToScalableVector(VT: SrcVT, V: Src, DAG, Subtarget);
13960 MVT MaskVT = getMaskTypeFor(VecVT: DstVT);
13961 Mask = convertToScalableVector(VT: MaskVT, V: Mask, DAG, Subtarget);
13962 }
13963
13964 unsigned DstEltSize = DstVT.getScalarSizeInBits();
13965 unsigned SrcEltSize = SrcVT.getScalarSizeInBits();
13966
13967 SDValue Result;
13968 if (DstEltSize >= SrcEltSize) { // Single-width and widening conversion.
13969 if (SrcVT.isInteger()) {
13970 assert(DstVT.isFloatingPoint() && "Wrong input/output vector types");
13971
13972 unsigned RISCVISDExtOpc = RISCVISDOpc == RISCVISD::SINT_TO_FP_VL
13973 ? RISCVISD::VSEXT_VL
13974 : RISCVISD::VZEXT_VL;
13975
13976 // Do we need to do any pre-widening before converting?
13977 if (SrcEltSize == 1) {
13978 MVT IntVT = DstVT.changeVectorElementTypeToInteger();
13979 MVT XLenVT = Subtarget.getXLenVT();
13980 SDValue Zero = DAG.getConstant(Val: 0, DL, VT: XLenVT);
13981 SDValue ZeroSplat = DAG.getNode(Opcode: RISCVISD::VMV_V_X_VL, DL, VT: IntVT,
13982 N1: DAG.getUNDEF(VT: IntVT), N2: Zero, N3: VL);
13983 SDValue One = DAG.getSignedConstant(
13984 Val: RISCVISDExtOpc == RISCVISD::VZEXT_VL ? 1 : -1, DL, VT: XLenVT);
13985 SDValue OneSplat = DAG.getNode(Opcode: RISCVISD::VMV_V_X_VL, DL, VT: IntVT,
13986 N1: DAG.getUNDEF(VT: IntVT), N2: One, N3: VL);
13987 Src = DAG.getNode(Opcode: RISCVISD::VMERGE_VL, DL, VT: IntVT, N1: Src, N2: OneSplat,
13988 N3: ZeroSplat, N4: DAG.getUNDEF(VT: IntVT), N5: VL);
13989 } else if (DstEltSize > (2 * SrcEltSize)) {
13990 // Widen before converting.
13991 MVT IntVT = MVT::getVectorVT(VT: MVT::getIntegerVT(BitWidth: DstEltSize / 2),
13992 EC: DstVT.getVectorElementCount());
13993 Src = DAG.getNode(Opcode: RISCVISDExtOpc, DL, VT: IntVT, N1: Src, N2: Mask, N3: VL);
13994 }
13995
13996 Result = DAG.getNode(Opcode: RISCVISDOpc, DL, VT: DstVT, N1: Src, N2: Mask, N3: VL);
13997 } else {
13998 assert(SrcVT.isFloatingPoint() && DstVT.isInteger() &&
13999 "Wrong input/output vector types");
14000
14001 // Convert f16 to f32 then convert f32 to i64.
14002 if (DstEltSize > (2 * SrcEltSize)) {
14003 assert(SrcVT.getVectorElementType() == MVT::f16 && "Unexpected type!");
14004 MVT InterimFVT =
14005 MVT::getVectorVT(VT: MVT::f32, EC: DstVT.getVectorElementCount());
14006 Src =
14007 DAG.getNode(Opcode: RISCVISD::FP_EXTEND_VL, DL, VT: InterimFVT, N1: Src, N2: Mask, N3: VL);
14008 }
14009
14010 Result = DAG.getNode(Opcode: RISCVISDOpc, DL, VT: DstVT, N1: Src, N2: Mask, N3: VL);
14011 }
14012 } else { // Narrowing + Conversion
14013 if (SrcVT.isInteger()) {
14014 assert(DstVT.isFloatingPoint() && "Wrong input/output vector types");
14015 // First do a narrowing convert to an FP type half the size, then round
14016 // the FP type to a small FP type if needed.
14017
14018 MVT InterimFVT = DstVT;
14019 if (SrcEltSize > (2 * DstEltSize)) {
14020 assert(SrcEltSize == (4 * DstEltSize) && "Unexpected types!");
14021 assert(DstVT.getVectorElementType() == MVT::f16 && "Unexpected type!");
14022 InterimFVT = MVT::getVectorVT(VT: MVT::f32, EC: DstVT.getVectorElementCount());
14023 }
14024
14025 Result = DAG.getNode(Opcode: RISCVISDOpc, DL, VT: InterimFVT, N1: Src, N2: Mask, N3: VL);
14026
14027 if (InterimFVT != DstVT) {
14028 Src = Result;
14029 Result = DAG.getNode(Opcode: RISCVISD::FP_ROUND_VL, DL, VT: DstVT, N1: Src, N2: Mask, N3: VL);
14030 }
14031 } else {
14032 assert(SrcVT.isFloatingPoint() && DstVT.isInteger() &&
14033 "Wrong input/output vector types");
14034 // First do a narrowing conversion to an integer half the size, then
14035 // truncate if needed.
14036
14037 if (DstEltSize == 1) {
14038 // First convert to the same size integer, then convert to mask using
14039 // setcc.
14040 assert(SrcEltSize >= 16 && "Unexpected FP type!");
14041 MVT InterimIVT = MVT::getVectorVT(VT: MVT::getIntegerVT(BitWidth: SrcEltSize),
14042 EC: DstVT.getVectorElementCount());
14043 Result = DAG.getNode(Opcode: RISCVISDOpc, DL, VT: InterimIVT, N1: Src, N2: Mask, N3: VL);
14044
14045 // Compare the integer result to 0. The integer should be 0 or 1/-1,
14046 // otherwise the conversion was undefined.
14047 MVT XLenVT = Subtarget.getXLenVT();
14048 SDValue SplatZero = DAG.getConstant(Val: 0, DL, VT: XLenVT);
14049 SplatZero = DAG.getNode(Opcode: RISCVISD::VMV_V_X_VL, DL, VT: InterimIVT,
14050 N1: DAG.getUNDEF(VT: InterimIVT), N2: SplatZero, N3: VL);
14051 Result = DAG.getNode(Opcode: RISCVISD::SETCC_VL, DL, VT: DstVT,
14052 Ops: {Result, SplatZero, DAG.getCondCode(Cond: ISD::SETNE),
14053 DAG.getUNDEF(VT: DstVT), Mask, VL});
14054 } else {
14055 MVT InterimIVT = MVT::getVectorVT(VT: MVT::getIntegerVT(BitWidth: SrcEltSize / 2),
14056 EC: DstVT.getVectorElementCount());
14057
14058 Result = DAG.getNode(Opcode: RISCVISDOpc, DL, VT: InterimIVT, N1: Src, N2: Mask, N3: VL);
14059
14060 while (InterimIVT != DstVT) {
14061 SrcEltSize /= 2;
14062 Src = Result;
14063 InterimIVT = MVT::getVectorVT(VT: MVT::getIntegerVT(BitWidth: SrcEltSize / 2),
14064 EC: DstVT.getVectorElementCount());
14065 Result = DAG.getNode(Opcode: RISCVISD::TRUNCATE_VECTOR_VL, DL, VT: InterimIVT,
14066 N1: Src, N2: Mask, N3: VL);
14067 }
14068 }
14069 }
14070 }
14071
14072 MVT VT = Op.getSimpleValueType();
14073 if (!VT.isFixedLengthVector())
14074 return Result;
14075 return convertFromScalableVector(VT, V: Result, DAG, Subtarget);
14076}
14077
14078SDValue RISCVTargetLowering::lowerVPMergeMask(SDValue Op,
14079 SelectionDAG &DAG) const {
14080 SDLoc DL(Op);
14081 MVT VT = Op.getSimpleValueType();
14082 MVT XLenVT = Subtarget.getXLenVT();
14083
14084 SDValue Mask = Op.getOperand(i: 0);
14085 SDValue TrueVal = Op.getOperand(i: 1);
14086 SDValue FalseVal = Op.getOperand(i: 2);
14087 SDValue VL = Op.getOperand(i: 3);
14088
14089 // Use default legalization if a vector of EVL type would be legal.
14090 EVT EVLVecVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: VL.getValueType(),
14091 EC: VT.getVectorElementCount());
14092 if (isTypeLegal(VT: EVLVecVT))
14093 return SDValue();
14094
14095 MVT ContainerVT = VT;
14096 if (VT.isFixedLengthVector()) {
14097 ContainerVT = getContainerForFixedLengthVector(VT);
14098 Mask = convertToScalableVector(VT: ContainerVT, V: Mask, DAG, Subtarget);
14099 TrueVal = convertToScalableVector(VT: ContainerVT, V: TrueVal, DAG, Subtarget);
14100 FalseVal = convertToScalableVector(VT: ContainerVT, V: FalseVal, DAG, Subtarget);
14101 }
14102
14103 // Promote to a vector of i8.
14104 MVT PromotedVT = ContainerVT.changeVectorElementType(EltVT: MVT::i8);
14105
14106 // Promote TrueVal and FalseVal using VLMax.
14107 // FIXME: Is there a better way to do this?
14108 SDValue VLMax = DAG.getRegister(Reg: RISCV::X0, VT: XLenVT);
14109 SDValue SplatOne = DAG.getNode(Opcode: RISCVISD::VMV_V_X_VL, DL, VT: PromotedVT,
14110 N1: DAG.getUNDEF(VT: PromotedVT),
14111 N2: DAG.getConstant(Val: 1, DL, VT: XLenVT), N3: VLMax);
14112 SDValue SplatZero = DAG.getNode(Opcode: RISCVISD::VMV_V_X_VL, DL, VT: PromotedVT,
14113 N1: DAG.getUNDEF(VT: PromotedVT),
14114 N2: DAG.getConstant(Val: 0, DL, VT: XLenVT), N3: VLMax);
14115 TrueVal = DAG.getNode(Opcode: RISCVISD::VMERGE_VL, DL, VT: PromotedVT, N1: TrueVal, N2: SplatOne,
14116 N3: SplatZero, N4: DAG.getUNDEF(VT: PromotedVT), N5: VL);
14117 // Any element past VL uses FalseVal, so use VLMax
14118 FalseVal = DAG.getNode(Opcode: RISCVISD::VMERGE_VL, DL, VT: PromotedVT, N1: FalseVal,
14119 N2: SplatOne, N3: SplatZero, N4: DAG.getUNDEF(VT: PromotedVT), N5: VLMax);
14120
14121 // VP_MERGE the two promoted values.
14122 SDValue VPMerge = DAG.getNode(Opcode: RISCVISD::VMERGE_VL, DL, VT: PromotedVT, N1: Mask,
14123 N2: TrueVal, N3: FalseVal, N4: FalseVal, N5: VL);
14124
14125 // Convert back to mask.
14126 SDValue TrueMask = DAG.getNode(Opcode: RISCVISD::VMSET_VL, DL, VT: ContainerVT, Operand: VL);
14127 SDValue Result = DAG.getNode(
14128 Opcode: RISCVISD::SETCC_VL, DL, VT: ContainerVT,
14129 Ops: {VPMerge, DAG.getConstant(Val: 0, DL, VT: PromotedVT), DAG.getCondCode(Cond: ISD::SETNE),
14130 DAG.getUNDEF(VT: getMaskTypeFor(VecVT: ContainerVT)), TrueMask, VLMax});
14131
14132 if (VT.isFixedLengthVector())
14133 Result = convertFromScalableVector(VT, V: Result, DAG, Subtarget);
14134 return Result;
14135}
14136
14137SDValue
14138RISCVTargetLowering::lowerVPSpliceExperimental(SDValue Op,
14139 SelectionDAG &DAG) const {
14140 using namespace SDPatternMatch;
14141
14142 SDLoc DL(Op);
14143
14144 SDValue Op1 = Op.getOperand(i: 0);
14145 SDValue Op2 = Op.getOperand(i: 1);
14146 SDValue Offset = Op.getOperand(i: 2);
14147 SDValue Mask = Op.getOperand(i: 3);
14148 SDValue EVL1 = Op.getOperand(i: 4);
14149 SDValue EVL2 = Op.getOperand(i: 5);
14150
14151 const MVT XLenVT = Subtarget.getXLenVT();
14152 MVT VT = Op.getSimpleValueType();
14153 MVT ContainerVT = VT;
14154 if (VT.isFixedLengthVector()) {
14155 ContainerVT = getContainerForFixedLengthVector(VT);
14156 Op1 = convertToScalableVector(VT: ContainerVT, V: Op1, DAG, Subtarget);
14157 Op2 = convertToScalableVector(VT: ContainerVT, V: Op2, DAG, Subtarget);
14158 MVT MaskVT = getMaskTypeFor(VecVT: ContainerVT);
14159 Mask = convertToScalableVector(VT: MaskVT, V: Mask, DAG, Subtarget);
14160 }
14161
14162 bool IsMaskVector = VT.getVectorElementType() == MVT::i1;
14163 if (IsMaskVector) {
14164 ContainerVT = ContainerVT.changeVectorElementType(EltVT: MVT::i8);
14165
14166 // Expand input operands
14167 SDValue SplatOneOp1 = DAG.getNode(Opcode: RISCVISD::VMV_V_X_VL, DL, VT: ContainerVT,
14168 N1: DAG.getUNDEF(VT: ContainerVT),
14169 N2: DAG.getConstant(Val: 1, DL, VT: XLenVT), N3: EVL1);
14170 SDValue SplatZeroOp1 = DAG.getNode(Opcode: RISCVISD::VMV_V_X_VL, DL, VT: ContainerVT,
14171 N1: DAG.getUNDEF(VT: ContainerVT),
14172 N2: DAG.getConstant(Val: 0, DL, VT: XLenVT), N3: EVL1);
14173 Op1 = DAG.getNode(Opcode: RISCVISD::VMERGE_VL, DL, VT: ContainerVT, N1: Op1, N2: SplatOneOp1,
14174 N3: SplatZeroOp1, N4: DAG.getUNDEF(VT: ContainerVT), N5: EVL1);
14175
14176 SDValue SplatOneOp2 = DAG.getNode(Opcode: RISCVISD::VMV_V_X_VL, DL, VT: ContainerVT,
14177 N1: DAG.getUNDEF(VT: ContainerVT),
14178 N2: DAG.getConstant(Val: 1, DL, VT: XLenVT), N3: EVL2);
14179 SDValue SplatZeroOp2 = DAG.getNode(Opcode: RISCVISD::VMV_V_X_VL, DL, VT: ContainerVT,
14180 N1: DAG.getUNDEF(VT: ContainerVT),
14181 N2: DAG.getConstant(Val: 0, DL, VT: XLenVT), N3: EVL2);
14182 Op2 = DAG.getNode(Opcode: RISCVISD::VMERGE_VL, DL, VT: ContainerVT, N1: Op2, N2: SplatOneOp2,
14183 N3: SplatZeroOp2, N4: DAG.getUNDEF(VT: ContainerVT), N5: EVL2);
14184 }
14185
14186 auto getVectorFirstEle = [](SDValue Vec) {
14187 SDValue FirstEle;
14188 if (sd_match(N: Vec, P: m_InsertElt(Vec: m_Value(), Val: m_Value(N&: FirstEle), Idx: m_Zero())))
14189 return FirstEle;
14190
14191 if (Vec.getOpcode() == ISD::SPLAT_VECTOR ||
14192 Vec.getOpcode() == ISD::BUILD_VECTOR)
14193 return Vec.getOperand(i: 0);
14194
14195 return SDValue();
14196 };
14197
14198 if (!IsMaskVector && isNullConstant(V: Offset) && isOneConstant(V: EVL1))
14199 if (auto FirstEle = getVectorFirstEle(Op->getOperand(Num: 0))) {
14200 MVT EltVT = ContainerVT.getVectorElementType();
14201 SDValue Result;
14202 if ((EltVT == MVT::f16 && !Subtarget.hasVInstructionsF16()) ||
14203 EltVT == MVT::bf16) {
14204 EltVT = EltVT.changeTypeToInteger();
14205 ContainerVT = ContainerVT.changeVectorElementType(EltVT);
14206 Op2 = DAG.getBitcast(VT: ContainerVT, V: Op2);
14207 FirstEle =
14208 DAG.getAnyExtOrTrunc(Op: DAG.getBitcast(VT: EltVT, V: FirstEle), DL, VT: XLenVT);
14209 }
14210 Result = DAG.getNode(Opcode: EltVT.isFloatingPoint() ? RISCVISD::VFSLIDE1UP_VL
14211 : RISCVISD::VSLIDE1UP_VL,
14212 DL, VT: ContainerVT, N1: DAG.getUNDEF(VT: ContainerVT), N2: Op2,
14213 N3: FirstEle, N4: Mask, N5: EVL2);
14214 Result = DAG.getBitcast(
14215 VT: ContainerVT.changeVectorElementType(EltVT: VT.getVectorElementType()),
14216 V: Result);
14217 return VT.isFixedLengthVector()
14218 ? convertFromScalableVector(VT, V: Result, DAG, Subtarget)
14219 : Result;
14220 }
14221
14222 int64_t ImmValue = cast<ConstantSDNode>(Val&: Offset)->getSExtValue();
14223 SDValue DownOffset, UpOffset;
14224 if (ImmValue >= 0) {
14225 // The operand is a TargetConstant, we need to rebuild it as a regular
14226 // constant.
14227 DownOffset = DAG.getConstant(Val: ImmValue, DL, VT: XLenVT);
14228 UpOffset = DAG.getNode(Opcode: ISD::SUB, DL, VT: XLenVT, N1: EVL1, N2: DownOffset);
14229 } else {
14230 // The operand is a TargetConstant, we need to rebuild it as a regular
14231 // constant rather than negating the original operand.
14232 UpOffset = DAG.getConstant(Val: -ImmValue, DL, VT: XLenVT);
14233 DownOffset = DAG.getNode(Opcode: ISD::SUB, DL, VT: XLenVT, N1: EVL1, N2: UpOffset);
14234 }
14235
14236 if (ImmValue != 0)
14237 Op1 = getVSlidedown(DAG, Subtarget, DL, VT: ContainerVT,
14238 Passthru: DAG.getUNDEF(VT: ContainerVT), Op: Op1, Offset: DownOffset, Mask,
14239 VL: Subtarget.hasVLDependentLatency() ? UpOffset : EVL2);
14240 SDValue Result = getVSlideup(DAG, Subtarget, DL, VT: ContainerVT, Passthru: Op1, Op: Op2,
14241 Offset: UpOffset, Mask, VL: EVL2, Policy: RISCVVType::TAIL_AGNOSTIC);
14242
14243 if (IsMaskVector) {
14244 // Truncate Result back to a mask vector (Result has same EVL as Op2)
14245 Result = DAG.getNode(
14246 Opcode: RISCVISD::SETCC_VL, DL, VT: ContainerVT.changeVectorElementType(EltVT: MVT::i1),
14247 Ops: {Result, DAG.getConstant(Val: 0, DL, VT: ContainerVT),
14248 DAG.getCondCode(Cond: ISD::SETNE), DAG.getUNDEF(VT: getMaskTypeFor(VecVT: ContainerVT)),
14249 Mask, EVL2});
14250 }
14251
14252 if (!VT.isFixedLengthVector())
14253 return Result;
14254 return convertFromScalableVector(VT, V: Result, DAG, Subtarget);
14255}
14256
14257SDValue
14258RISCVTargetLowering::lowerVPReverseExperimental(SDValue Op,
14259 SelectionDAG &DAG) const {
14260 SDLoc DL(Op);
14261 MVT VT = Op.getSimpleValueType();
14262 MVT XLenVT = Subtarget.getXLenVT();
14263
14264 SDValue Op1 = Op.getOperand(i: 0);
14265 SDValue Mask = Op.getOperand(i: 1);
14266 SDValue EVL = Op.getOperand(i: 2);
14267
14268 MVT ContainerVT = VT;
14269 if (VT.isFixedLengthVector()) {
14270 ContainerVT = getContainerForFixedLengthVector(VT);
14271 Op1 = convertToScalableVector(VT: ContainerVT, V: Op1, DAG, Subtarget);
14272 MVT MaskVT = getMaskTypeFor(VecVT: ContainerVT);
14273 Mask = convertToScalableVector(VT: MaskVT, V: Mask, DAG, Subtarget);
14274 }
14275
14276 MVT GatherVT = ContainerVT;
14277 MVT IndicesVT = ContainerVT.changeVectorElementTypeToInteger();
14278 // Check if we are working with mask vectors
14279 bool IsMaskVector = ContainerVT.getVectorElementType() == MVT::i1;
14280 if (IsMaskVector) {
14281 GatherVT = IndicesVT = ContainerVT.changeVectorElementType(EltVT: MVT::i8);
14282
14283 // Expand input operand
14284 SDValue SplatOne = DAG.getNode(Opcode: RISCVISD::VMV_V_X_VL, DL, VT: IndicesVT,
14285 N1: DAG.getUNDEF(VT: IndicesVT),
14286 N2: DAG.getConstant(Val: 1, DL, VT: XLenVT), N3: EVL);
14287 SDValue SplatZero = DAG.getNode(Opcode: RISCVISD::VMV_V_X_VL, DL, VT: IndicesVT,
14288 N1: DAG.getUNDEF(VT: IndicesVT),
14289 N2: DAG.getConstant(Val: 0, DL, VT: XLenVT), N3: EVL);
14290 Op1 = DAG.getNode(Opcode: RISCVISD::VMERGE_VL, DL, VT: IndicesVT, N1: Op1, N2: SplatOne,
14291 N3: SplatZero, N4: DAG.getUNDEF(VT: IndicesVT), N5: EVL);
14292 }
14293
14294 unsigned EltSize = GatherVT.getScalarSizeInBits();
14295 unsigned MinSize = GatherVT.getSizeInBits().getKnownMinValue();
14296 unsigned VectorBitsMax = Subtarget.getRealMaxVLen();
14297 unsigned MaxVLMAX =
14298 RISCVTargetLowering::computeVLMAX(VectorBits: VectorBitsMax, EltSize, MinSize);
14299
14300 unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
14301 // If this is SEW=8 and VLMAX is unknown or more than 256, we need
14302 // to use vrgatherei16.vv.
14303 // TODO: It's also possible to use vrgatherei16.vv for other types to
14304 // decrease register width for the index calculation.
14305 // NOTE: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16.
14306 if (MaxVLMAX > 256 && EltSize == 8) {
14307 // If this is LMUL=8, we have to split before using vrgatherei16.vv.
14308 // Split the vector in half and reverse each half using a full register
14309 // reverse.
14310 // Swap the halves and concatenate them.
14311 // Slide the concatenated result by (VLMax - VL).
14312 if (MinSize == (8 * RISCV::RVVBitsPerBlock)) {
14313 auto [LoVT, HiVT] = DAG.GetSplitDestVTs(VT: GatherVT);
14314 auto [Lo, Hi] = DAG.SplitVector(N: Op1, DL);
14315
14316 SDValue LoRev = DAG.getNode(Opcode: ISD::VECTOR_REVERSE, DL, VT: LoVT, Operand: Lo);
14317 SDValue HiRev = DAG.getNode(Opcode: ISD::VECTOR_REVERSE, DL, VT: HiVT, Operand: Hi);
14318
14319 // Reassemble the low and high pieces reversed.
14320 // NOTE: this Result is unmasked (because we do not need masks for
14321 // shuffles). If in the future this has to change, we can use a SELECT_VL
14322 // between Result and UNDEF using the mask originally passed to VP_REVERSE
14323 SDValue Result =
14324 DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: GatherVT, N1: HiRev, N2: LoRev);
14325
14326 // Slide off any elements from past EVL that were reversed into the low
14327 // elements.
14328 SDValue VLMax =
14329 DAG.getElementCount(DL, VT: XLenVT, EC: GatherVT.getVectorElementCount());
14330 SDValue Diff = DAG.getNode(Opcode: ISD::SUB, DL, VT: XLenVT, N1: VLMax, N2: EVL);
14331
14332 Result = getVSlidedown(DAG, Subtarget, DL, VT: GatherVT,
14333 Passthru: DAG.getUNDEF(VT: GatherVT), Op: Result, Offset: Diff, Mask, VL: EVL);
14334
14335 if (IsMaskVector) {
14336 // Truncate Result back to a mask vector
14337 Result =
14338 DAG.getNode(Opcode: RISCVISD::SETCC_VL, DL, VT: ContainerVT,
14339 Ops: {Result, DAG.getConstant(Val: 0, DL, VT: GatherVT),
14340 DAG.getCondCode(Cond: ISD::SETNE),
14341 DAG.getUNDEF(VT: getMaskTypeFor(VecVT: ContainerVT)), Mask, EVL});
14342 }
14343
14344 if (!VT.isFixedLengthVector())
14345 return Result;
14346 return convertFromScalableVector(VT, V: Result, DAG, Subtarget);
14347 }
14348
14349 // Just promote the int type to i16 which will double the LMUL.
14350 IndicesVT = MVT::getVectorVT(VT: MVT::i16, EC: IndicesVT.getVectorElementCount());
14351 GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
14352 }
14353
14354 SDValue VID = DAG.getNode(Opcode: RISCVISD::VID_VL, DL, VT: IndicesVT, N1: Mask, N2: EVL);
14355 SDValue VecLen =
14356 DAG.getNode(Opcode: ISD::SUB, DL, VT: XLenVT, N1: EVL, N2: DAG.getConstant(Val: 1, DL, VT: XLenVT));
14357 SDValue VecLenSplat = DAG.getNode(Opcode: RISCVISD::VMV_V_X_VL, DL, VT: IndicesVT,
14358 N1: DAG.getUNDEF(VT: IndicesVT), N2: VecLen, N3: EVL);
14359 SDValue VRSUB = DAG.getNode(Opcode: RISCVISD::SUB_VL, DL, VT: IndicesVT, N1: VecLenSplat, N2: VID,
14360 N3: DAG.getUNDEF(VT: IndicesVT), N4: Mask, N5: EVL);
14361 SDValue Result = DAG.getNode(Opcode: GatherOpc, DL, VT: GatherVT, N1: Op1, N2: VRSUB,
14362 N3: DAG.getUNDEF(VT: GatherVT), N4: Mask, N5: EVL);
14363
14364 if (IsMaskVector) {
14365 // Truncate Result back to a mask vector
14366 Result = DAG.getNode(
14367 Opcode: RISCVISD::SETCC_VL, DL, VT: ContainerVT,
14368 Ops: {Result, DAG.getConstant(Val: 0, DL, VT: GatherVT), DAG.getCondCode(Cond: ISD::SETNE),
14369 DAG.getUNDEF(VT: getMaskTypeFor(VecVT: ContainerVT)), Mask, EVL});
14370 }
14371
14372 if (!VT.isFixedLengthVector())
14373 return Result;
14374 return convertFromScalableVector(VT, V: Result, DAG, Subtarget);
14375}
14376
14377SDValue RISCVTargetLowering::lowerLogicVPOp(SDValue Op,
14378 SelectionDAG &DAG) const {
14379 MVT VT = Op.getSimpleValueType();
14380 if (VT.getVectorElementType() != MVT::i1)
14381 return lowerVPOp(Op, DAG);
14382
14383 // It is safe to drop mask parameter as masked-off elements are undef.
14384 SDValue Op1 = Op->getOperand(Num: 0);
14385 SDValue Op2 = Op->getOperand(Num: 1);
14386 SDValue VL = Op->getOperand(Num: 3);
14387
14388 MVT ContainerVT = VT;
14389 const bool IsFixed = VT.isFixedLengthVector();
14390 if (IsFixed) {
14391 ContainerVT = getContainerForFixedLengthVector(VT);
14392 Op1 = convertToScalableVector(VT: ContainerVT, V: Op1, DAG, Subtarget);
14393 Op2 = convertToScalableVector(VT: ContainerVT, V: Op2, DAG, Subtarget);
14394 }
14395
14396 SDLoc DL(Op);
14397 SDValue Val = DAG.getNode(Opcode: getRISCVVLOp(Op), DL, VT: ContainerVT, N1: Op1, N2: Op2, N3: VL);
14398 if (!IsFixed)
14399 return Val;
14400 return convertFromScalableVector(VT, V: Val, DAG, Subtarget);
14401}
14402
14403SDValue RISCVTargetLowering::lowerVPStridedLoad(SDValue Op,
14404 SelectionDAG &DAG) const {
14405 SDLoc DL(Op);
14406 MVT XLenVT = Subtarget.getXLenVT();
14407 MVT VT = Op.getSimpleValueType();
14408 MVT ContainerVT = VT;
14409 if (VT.isFixedLengthVector())
14410 ContainerVT = getContainerForFixedLengthVector(VT);
14411
14412 SDVTList VTs = DAG.getVTList(VTs: {ContainerVT, MVT::Other});
14413
14414 auto *VPNode = cast<VPStridedLoadSDNode>(Val&: Op);
14415 // Check if the mask is known to be all ones
14416 SDValue Mask = VPNode->getMask();
14417 bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(N: Mask.getNode());
14418
14419 SDValue IntID = DAG.getTargetConstant(Val: IsUnmasked ? Intrinsic::riscv_vlse
14420 : Intrinsic::riscv_vlse_mask,
14421 DL, VT: XLenVT);
14422 SmallVector<SDValue, 8> Ops{VPNode->getChain(), IntID,
14423 DAG.getUNDEF(VT: ContainerVT), VPNode->getBasePtr(),
14424 VPNode->getStride()};
14425 if (!IsUnmasked) {
14426 if (VT.isFixedLengthVector()) {
14427 MVT MaskVT = ContainerVT.changeVectorElementType(EltVT: MVT::i1);
14428 Mask = convertToScalableVector(VT: MaskVT, V: Mask, DAG, Subtarget);
14429 }
14430 Ops.push_back(Elt: Mask);
14431 }
14432 Ops.push_back(Elt: VPNode->getVectorLength());
14433 if (!IsUnmasked) {
14434 SDValue Policy =
14435 DAG.getTargetConstant(Val: RISCVVType::TAIL_AGNOSTIC, DL, VT: XLenVT);
14436 Ops.push_back(Elt: Policy);
14437 }
14438
14439 SDValue Result =
14440 DAG.getMemIntrinsicNode(Opcode: ISD::INTRINSIC_W_CHAIN, dl: DL, VTList: VTs, Ops,
14441 MemVT: VPNode->getMemoryVT(), MMO: VPNode->getMemOperand());
14442 SDValue Chain = Result.getValue(R: 1);
14443
14444 if (VT.isFixedLengthVector())
14445 Result = convertFromScalableVector(VT, V: Result, DAG, Subtarget);
14446
14447 return DAG.getMergeValues(Ops: {Result, Chain}, dl: DL);
14448}
14449
14450SDValue RISCVTargetLowering::lowerVPStridedStore(SDValue Op,
14451 SelectionDAG &DAG) const {
14452 SDLoc DL(Op);
14453 MVT XLenVT = Subtarget.getXLenVT();
14454
14455 auto *VPNode = cast<VPStridedStoreSDNode>(Val&: Op);
14456 SDValue StoreVal = VPNode->getValue();
14457 MVT VT = StoreVal.getSimpleValueType();
14458 MVT ContainerVT = VT;
14459 if (VT.isFixedLengthVector()) {
14460 ContainerVT = getContainerForFixedLengthVector(VT);
14461 StoreVal = convertToScalableVector(VT: ContainerVT, V: StoreVal, DAG, Subtarget);
14462 }
14463
14464 // Check if the mask is known to be all ones
14465 SDValue Mask = VPNode->getMask();
14466 bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(N: Mask.getNode());
14467
14468 SDValue IntID = DAG.getTargetConstant(Val: IsUnmasked ? Intrinsic::riscv_vsse
14469 : Intrinsic::riscv_vsse_mask,
14470 DL, VT: XLenVT);
14471 SmallVector<SDValue, 8> Ops{VPNode->getChain(), IntID, StoreVal,
14472 VPNode->getBasePtr(), VPNode->getStride()};
14473 if (!IsUnmasked) {
14474 if (VT.isFixedLengthVector()) {
14475 MVT MaskVT = ContainerVT.changeVectorElementType(EltVT: MVT::i1);
14476 Mask = convertToScalableVector(VT: MaskVT, V: Mask, DAG, Subtarget);
14477 }
14478 Ops.push_back(Elt: Mask);
14479 }
14480 Ops.push_back(Elt: VPNode->getVectorLength());
14481
14482 return DAG.getMemIntrinsicNode(Opcode: ISD::INTRINSIC_VOID, dl: DL, VTList: VPNode->getVTList(),
14483 Ops, MemVT: VPNode->getMemoryVT(),
14484 MMO: VPNode->getMemOperand());
14485}
14486
14487// Custom lower MGATHER/VP_GATHER to a legalized form for RVV. It will then be
14488// matched to a RVV indexed load. The RVV indexed load instructions only
14489// support the "unsigned unscaled" addressing mode; indices are implicitly
14490// zero-extended or truncated to XLEN and are treated as byte offsets. Any
14491// signed or scaled indexing is extended to the XLEN value type and scaled
14492// accordingly.
14493SDValue RISCVTargetLowering::lowerMaskedGather(SDValue Op,
14494 SelectionDAG &DAG) const {
14495 SDLoc DL(Op);
14496 MVT VT = Op.getSimpleValueType();
14497
14498 const auto *MemSD = cast<MemSDNode>(Val: Op.getNode());
14499 EVT MemVT = MemSD->getMemoryVT();
14500 MachineMemOperand *MMO = MemSD->getMemOperand();
14501 SDValue Chain = MemSD->getChain();
14502 SDValue BasePtr = MemSD->getBasePtr();
14503
14504 [[maybe_unused]] ISD::LoadExtType LoadExtType;
14505 SDValue Index, Mask, PassThru, VL;
14506
14507 if (auto *VPGN = dyn_cast<VPGatherSDNode>(Val: Op.getNode())) {
14508 Index = VPGN->getIndex();
14509 Mask = VPGN->getMask();
14510 PassThru = DAG.getUNDEF(VT);
14511 VL = VPGN->getVectorLength();
14512 // VP doesn't support extending loads.
14513 LoadExtType = ISD::NON_EXTLOAD;
14514 } else {
14515 // Else it must be a MGATHER.
14516 auto *MGN = cast<MaskedGatherSDNode>(Val: Op.getNode());
14517 Index = MGN->getIndex();
14518 Mask = MGN->getMask();
14519 PassThru = MGN->getPassThru();
14520 LoadExtType = MGN->getExtensionType();
14521 }
14522
14523 MVT IndexVT = Index.getSimpleValueType();
14524 MVT XLenVT = Subtarget.getXLenVT();
14525
14526 assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
14527 "Unexpected VTs!");
14528 assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
14529 // Targets have to explicitly opt-in for extending vector loads.
14530 assert(LoadExtType == ISD::NON_EXTLOAD &&
14531 "Unexpected extending MGATHER/VP_GATHER");
14532
14533 // If the mask is known to be all ones, optimize to an unmasked intrinsic;
14534 // the selection of the masked intrinsics doesn't do this for us.
14535 bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(N: Mask.getNode());
14536
14537 MVT ContainerVT = VT;
14538 if (VT.isFixedLengthVector()) {
14539 ContainerVT = getContainerForFixedLengthVector(VT);
14540 IndexVT = MVT::getVectorVT(VT: IndexVT.getVectorElementType(),
14541 EC: ContainerVT.getVectorElementCount());
14542
14543 Index = convertToScalableVector(VT: IndexVT, V: Index, DAG, Subtarget);
14544
14545 if (!IsUnmasked) {
14546 MVT MaskVT = getMaskTypeFor(VecVT: ContainerVT);
14547 Mask = convertToScalableVector(VT: MaskVT, V: Mask, DAG, Subtarget);
14548 PassThru = convertToScalableVector(VT: ContainerVT, V: PassThru, DAG, Subtarget);
14549 }
14550 }
14551
14552 if (!VL)
14553 VL = getDefaultVLOps(VecVT: VT, ContainerVT, DL, DAG, Subtarget).second;
14554
14555 if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(VT: XLenVT)) {
14556 IndexVT = IndexVT.changeVectorElementType(EltVT: XLenVT);
14557 Index = DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: IndexVT, Operand: Index);
14558 }
14559
14560 unsigned IntID =
14561 IsUnmasked ? Intrinsic::riscv_vluxei : Intrinsic::riscv_vluxei_mask;
14562 SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(Val: IntID, DL, VT: XLenVT)};
14563 if (IsUnmasked)
14564 Ops.push_back(Elt: DAG.getUNDEF(VT: ContainerVT));
14565 else
14566 Ops.push_back(Elt: PassThru);
14567 Ops.push_back(Elt: BasePtr);
14568 Ops.push_back(Elt: Index);
14569 if (!IsUnmasked)
14570 Ops.push_back(Elt: Mask);
14571 Ops.push_back(Elt: VL);
14572 if (!IsUnmasked)
14573 Ops.push_back(Elt: DAG.getTargetConstant(Val: RISCVVType::TAIL_AGNOSTIC, DL, VT: XLenVT));
14574
14575 SDVTList VTs = DAG.getVTList(VTs: {ContainerVT, MVT::Other});
14576 SDValue Result =
14577 DAG.getMemIntrinsicNode(Opcode: ISD::INTRINSIC_W_CHAIN, dl: DL, VTList: VTs, Ops, MemVT, MMO);
14578 Chain = Result.getValue(R: 1);
14579
14580 if (VT.isFixedLengthVector())
14581 Result = convertFromScalableVector(VT, V: Result, DAG, Subtarget);
14582
14583 return DAG.getMergeValues(Ops: {Result, Chain}, dl: DL);
14584}
14585
14586// Custom lower MSCATTER/VP_SCATTER to a legalized form for RVV. It will then be
14587// matched to a RVV indexed store. The RVV indexed store instructions only
14588// support the "unsigned unscaled" addressing mode; indices are implicitly
14589// zero-extended or truncated to XLEN and are treated as byte offsets. Any
14590// signed or scaled indexing is extended to the XLEN value type and scaled
14591// accordingly.
14592SDValue RISCVTargetLowering::lowerMaskedScatter(SDValue Op,
14593 SelectionDAG &DAG) const {
14594 SDLoc DL(Op);
14595 const auto *MemSD = cast<MemSDNode>(Val: Op.getNode());
14596 EVT MemVT = MemSD->getMemoryVT();
14597 MachineMemOperand *MMO = MemSD->getMemOperand();
14598 SDValue Chain = MemSD->getChain();
14599 SDValue BasePtr = MemSD->getBasePtr();
14600
14601 [[maybe_unused]] bool IsTruncatingStore = false;
14602 SDValue Index, Mask, Val, VL;
14603
14604 if (auto *VPSN = dyn_cast<VPScatterSDNode>(Val: Op.getNode())) {
14605 Index = VPSN->getIndex();
14606 Mask = VPSN->getMask();
14607 Val = VPSN->getValue();
14608 VL = VPSN->getVectorLength();
14609 // VP doesn't support truncating stores.
14610 IsTruncatingStore = false;
14611 } else {
14612 // Else it must be a MSCATTER.
14613 auto *MSN = cast<MaskedScatterSDNode>(Val: Op.getNode());
14614 Index = MSN->getIndex();
14615 Mask = MSN->getMask();
14616 Val = MSN->getValue();
14617 IsTruncatingStore = MSN->isTruncatingStore();
14618 }
14619
14620 MVT VT = Val.getSimpleValueType();
14621 MVT IndexVT = Index.getSimpleValueType();
14622 MVT XLenVT = Subtarget.getXLenVT();
14623
14624 assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
14625 "Unexpected VTs!");
14626 assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
14627 // Targets have to explicitly opt-in for extending vector loads and
14628 // truncating vector stores.
14629 assert(!IsTruncatingStore && "Unexpected truncating MSCATTER/VP_SCATTER");
14630
14631 // If the mask is known to be all ones, optimize to an unmasked intrinsic;
14632 // the selection of the masked intrinsics doesn't do this for us.
14633 bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(N: Mask.getNode());
14634
14635 MVT ContainerVT = VT;
14636 if (VT.isFixedLengthVector()) {
14637 ContainerVT = getContainerForFixedLengthVector(VT);
14638 IndexVT = MVT::getVectorVT(VT: IndexVT.getVectorElementType(),
14639 EC: ContainerVT.getVectorElementCount());
14640
14641 Index = convertToScalableVector(VT: IndexVT, V: Index, DAG, Subtarget);
14642 Val = convertToScalableVector(VT: ContainerVT, V: Val, DAG, Subtarget);
14643
14644 if (!IsUnmasked) {
14645 MVT MaskVT = getMaskTypeFor(VecVT: ContainerVT);
14646 Mask = convertToScalableVector(VT: MaskVT, V: Mask, DAG, Subtarget);
14647 }
14648 }
14649
14650 if (!VL)
14651 VL = getDefaultVLOps(VecVT: VT, ContainerVT, DL, DAG, Subtarget).second;
14652
14653 if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(VT: XLenVT)) {
14654 IndexVT = IndexVT.changeVectorElementType(EltVT: XLenVT);
14655 Index = DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: IndexVT, Operand: Index);
14656 }
14657
14658 unsigned IntID =
14659 IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;
14660 SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(Val: IntID, DL, VT: XLenVT)};
14661 Ops.push_back(Elt: Val);
14662 Ops.push_back(Elt: BasePtr);
14663 Ops.push_back(Elt: Index);
14664 if (!IsUnmasked)
14665 Ops.push_back(Elt: Mask);
14666 Ops.push_back(Elt: VL);
14667
14668 return DAG.getMemIntrinsicNode(Opcode: ISD::INTRINSIC_VOID, dl: DL,
14669 VTList: DAG.getVTList(VT: MVT::Other), Ops, MemVT, MMO);
14670}
14671
14672SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op,
14673 SelectionDAG &DAG) const {
14674 const MVT XLenVT = Subtarget.getXLenVT();
14675 SDLoc DL(Op);
14676 SDValue Chain = Op->getOperand(Num: 0);
14677 SDValue SysRegNo = DAG.getTargetConstant(Val: RISCVSysReg::frm, DL, VT: XLenVT);
14678 SDVTList VTs = DAG.getVTList(VT1: XLenVT, VT2: MVT::Other);
14679 SDValue RM = DAG.getNode(Opcode: RISCVISD::READ_CSR, DL, VTList: VTs, N1: Chain, N2: SysRegNo);
14680
14681 // Encoding used for rounding mode in RISC-V differs from that used in
14682 // FLT_ROUNDS. To convert it the RISC-V rounding mode is used as an index in a
14683 // table, which consists of a sequence of 4-bit fields, each representing
14684 // corresponding FLT_ROUNDS mode.
14685 static const int Table =
14686 (int(RoundingMode::NearestTiesToEven) << 4 * RISCVFPRndMode::RNE) |
14687 (int(RoundingMode::TowardZero) << 4 * RISCVFPRndMode::RTZ) |
14688 (int(RoundingMode::TowardNegative) << 4 * RISCVFPRndMode::RDN) |
14689 (int(RoundingMode::TowardPositive) << 4 * RISCVFPRndMode::RUP) |
14690 (int(RoundingMode::NearestTiesToAway) << 4 * RISCVFPRndMode::RMM);
14691
14692 SDValue Shift =
14693 DAG.getNode(Opcode: ISD::SHL, DL, VT: XLenVT, N1: RM, N2: DAG.getConstant(Val: 2, DL, VT: XLenVT));
14694 SDValue Shifted = DAG.getNode(Opcode: ISD::SRL, DL, VT: XLenVT,
14695 N1: DAG.getConstant(Val: Table, DL, VT: XLenVT), N2: Shift);
14696 SDValue Masked = DAG.getNode(Opcode: ISD::AND, DL, VT: XLenVT, N1: Shifted,
14697 N2: DAG.getConstant(Val: 7, DL, VT: XLenVT));
14698
14699 return DAG.getMergeValues(Ops: {Masked, Chain}, dl: DL);
14700}
14701
14702SDValue RISCVTargetLowering::lowerSET_ROUNDING(SDValue Op,
14703 SelectionDAG &DAG) const {
14704 const MVT XLenVT = Subtarget.getXLenVT();
14705 SDLoc DL(Op);
14706 SDValue Chain = Op->getOperand(Num: 0);
14707 SDValue RMValue = Op->getOperand(Num: 1);
14708 SDValue SysRegNo = DAG.getTargetConstant(Val: RISCVSysReg::frm, DL, VT: XLenVT);
14709
14710 // Encoding used for rounding mode in RISC-V differs from that used in
14711 // FLT_ROUNDS. To convert it the C rounding mode is used as an index in
14712 // a table, which consists of a sequence of 4-bit fields, each representing
14713 // corresponding RISC-V mode.
14714 static const unsigned Table =
14715 (RISCVFPRndMode::RNE << 4 * int(RoundingMode::NearestTiesToEven)) |
14716 (RISCVFPRndMode::RTZ << 4 * int(RoundingMode::TowardZero)) |
14717 (RISCVFPRndMode::RDN << 4 * int(RoundingMode::TowardNegative)) |
14718 (RISCVFPRndMode::RUP << 4 * int(RoundingMode::TowardPositive)) |
14719 (RISCVFPRndMode::RMM << 4 * int(RoundingMode::NearestTiesToAway));
14720
14721 RMValue = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL, VT: XLenVT, Operand: RMValue);
14722
14723 SDValue Shift = DAG.getNode(Opcode: ISD::SHL, DL, VT: XLenVT, N1: RMValue,
14724 N2: DAG.getConstant(Val: 2, DL, VT: XLenVT));
14725 SDValue Shifted = DAG.getNode(Opcode: ISD::SRL, DL, VT: XLenVT,
14726 N1: DAG.getConstant(Val: Table, DL, VT: XLenVT), N2: Shift);
14727 RMValue = DAG.getNode(Opcode: ISD::AND, DL, VT: XLenVT, N1: Shifted,
14728 N2: DAG.getConstant(Val: 0x7, DL, VT: XLenVT));
14729 return DAG.getNode(Opcode: RISCVISD::WRITE_CSR, DL, VT: MVT::Other, N1: Chain, N2: SysRegNo,
14730 N3: RMValue);
14731}
14732
14733SDValue RISCVTargetLowering::lowerGET_FPENV(SDValue Op,
14734 SelectionDAG &DAG) const {
14735 const MVT XLenVT = Subtarget.getXLenVT();
14736 SDLoc DL(Op);
14737 SDValue Chain = Op->getOperand(Num: 0);
14738 SDValue SysRegNo = DAG.getTargetConstant(Val: RISCVSysReg::fcsr, DL, VT: XLenVT);
14739 SDVTList VTs = DAG.getVTList(VT1: XLenVT, VT2: MVT::Other);
14740 return DAG.getNode(Opcode: RISCVISD::READ_CSR, DL, VTList: VTs, N1: Chain, N2: SysRegNo);
14741}
14742
14743SDValue RISCVTargetLowering::lowerSET_FPENV(SDValue Op,
14744 SelectionDAG &DAG) const {
14745 const MVT XLenVT = Subtarget.getXLenVT();
14746 SDLoc DL(Op);
14747 SDValue Chain = Op->getOperand(Num: 0);
14748 SDValue EnvValue = Op->getOperand(Num: 1);
14749 SDValue SysRegNo = DAG.getTargetConstant(Val: RISCVSysReg::fcsr, DL, VT: XLenVT);
14750
14751 EnvValue = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL, VT: XLenVT, Operand: EnvValue);
14752 return DAG.getNode(Opcode: RISCVISD::WRITE_CSR, DL, VT: MVT::Other, N1: Chain, N2: SysRegNo,
14753 N3: EnvValue);
14754}
14755
14756SDValue RISCVTargetLowering::lowerRESET_FPENV(SDValue Op,
14757 SelectionDAG &DAG) const {
14758 const MVT XLenVT = Subtarget.getXLenVT();
14759 SDLoc DL(Op);
14760 SDValue Chain = Op->getOperand(Num: 0);
14761 SDValue EnvValue = DAG.getRegister(Reg: RISCV::X0, VT: XLenVT);
14762 SDValue SysRegNo = DAG.getTargetConstant(Val: RISCVSysReg::fcsr, DL, VT: XLenVT);
14763
14764 return DAG.getNode(Opcode: RISCVISD::WRITE_CSR, DL, VT: MVT::Other, N1: Chain, N2: SysRegNo,
14765 N3: EnvValue);
14766}
14767
14768const uint64_t ModeMask64 = ~RISCVExceptFlags::ALL;
14769const uint32_t ModeMask32 = ~RISCVExceptFlags::ALL;
14770
14771SDValue RISCVTargetLowering::lowerGET_FPMODE(SDValue Op,
14772 SelectionDAG &DAG) const {
14773 const MVT XLenVT = Subtarget.getXLenVT();
14774 SDLoc DL(Op);
14775 SDValue Chain = Op->getOperand(Num: 0);
14776 SDValue SysRegNo = DAG.getTargetConstant(Val: RISCVSysReg::fcsr, DL, VT: XLenVT);
14777 SDVTList VTs = DAG.getVTList(VT1: XLenVT, VT2: MVT::Other);
14778 SDValue Result = DAG.getNode(Opcode: RISCVISD::READ_CSR, DL, VTList: VTs, N1: Chain, N2: SysRegNo);
14779 Chain = Result.getValue(R: 1);
14780 return DAG.getMergeValues(Ops: {Result, Chain}, dl: DL);
14781}
14782
14783SDValue RISCVTargetLowering::lowerSET_FPMODE(SDValue Op,
14784 SelectionDAG &DAG) const {
14785 const MVT XLenVT = Subtarget.getXLenVT();
14786 const uint64_t ModeMaskValue = Subtarget.is64Bit() ? ModeMask64 : ModeMask32;
14787 SDLoc DL(Op);
14788 SDValue Chain = Op->getOperand(Num: 0);
14789 SDValue EnvValue = Op->getOperand(Num: 1);
14790 SDValue SysRegNo = DAG.getTargetConstant(Val: RISCVSysReg::fcsr, DL, VT: XLenVT);
14791 SDValue ModeMask = DAG.getConstant(Val: ModeMaskValue, DL, VT: XLenVT);
14792
14793 EnvValue = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL, VT: XLenVT, Operand: EnvValue);
14794 EnvValue = DAG.getNode(Opcode: ISD::AND, DL, VT: XLenVT, N1: EnvValue, N2: ModeMask);
14795 Chain = DAG.getNode(Opcode: RISCVISD::CLEAR_CSR, DL, VT: MVT::Other, N1: Chain, N2: SysRegNo,
14796 N3: ModeMask);
14797 return DAG.getNode(Opcode: RISCVISD::SET_CSR, DL, VT: MVT::Other, N1: Chain, N2: SysRegNo,
14798 N3: EnvValue);
14799}
14800
14801SDValue RISCVTargetLowering::lowerRESET_FPMODE(SDValue Op,
14802 SelectionDAG &DAG) const {
14803 const MVT XLenVT = Subtarget.getXLenVT();
14804 const uint64_t ModeMaskValue = Subtarget.is64Bit() ? ModeMask64 : ModeMask32;
14805 SDLoc DL(Op);
14806 SDValue Chain = Op->getOperand(Num: 0);
14807 SDValue SysRegNo = DAG.getTargetConstant(Val: RISCVSysReg::fcsr, DL, VT: XLenVT);
14808 SDValue ModeMask = DAG.getConstant(Val: ModeMaskValue, DL, VT: XLenVT);
14809
14810 return DAG.getNode(Opcode: RISCVISD::CLEAR_CSR, DL, VT: MVT::Other, N1: Chain, N2: SysRegNo,
14811 N3: ModeMask);
14812}
14813
14814SDValue RISCVTargetLowering::lowerEH_DWARF_CFA(SDValue Op,
14815 SelectionDAG &DAG) const {
14816 MachineFunction &MF = DAG.getMachineFunction();
14817
14818 bool isRISCV64 = Subtarget.is64Bit();
14819 EVT PtrVT = getPointerTy(DL: DAG.getDataLayout());
14820
14821 int FI = MF.getFrameInfo().CreateFixedObject(Size: isRISCV64 ? 8 : 4, SPOffset: 0, IsImmutable: false);
14822 return DAG.getFrameIndex(FI, VT: PtrVT);
14823}
14824
14825// Returns the opcode of the target-specific SDNode that implements the 32-bit
14826// form of the given Opcode.
14827static unsigned getRISCVWOpcode(unsigned Opcode) {
14828 switch (Opcode) {
14829 default:
14830 llvm_unreachable("Unexpected opcode");
14831 case ISD::SHL:
14832 return RISCVISD::SLLW;
14833 case ISD::SRA:
14834 return RISCVISD::SRAW;
14835 case ISD::SRL:
14836 return RISCVISD::SRLW;
14837 case ISD::SDIV:
14838 return RISCVISD::DIVW;
14839 case ISD::UDIV:
14840 return RISCVISD::DIVUW;
14841 case ISD::UREM:
14842 return RISCVISD::REMUW;
14843 case ISD::ROTL:
14844 return RISCVISD::ROLW;
14845 case ISD::ROTR:
14846 return RISCVISD::RORW;
14847 }
14848}
14849
14850// Converts the given i8/i16/i32 operation to a target-specific SelectionDAG
14851// node. Because i8/i16/i32 isn't a legal type for RV64, these operations would
14852// otherwise be promoted to i64, making it difficult to select the
14853// SLLW/DIVUW/.../*W later one because the fact the operation was originally of
14854// type i8/i16/i32 is lost.
14855static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG,
14856 unsigned ExtOpc = ISD::ANY_EXTEND) {
14857 SDLoc DL(N);
14858 unsigned WOpcode = getRISCVWOpcode(Opcode: N->getOpcode());
14859 SDValue NewOp0 = DAG.getNode(Opcode: ExtOpc, DL, VT: MVT::i64, Operand: N->getOperand(Num: 0));
14860 SDValue NewOp1 = DAG.getNode(Opcode: ExtOpc, DL, VT: MVT::i64, Operand: N->getOperand(Num: 1));
14861 SDValue NewRes = DAG.getNode(Opcode: WOpcode, DL, VT: MVT::i64, N1: NewOp0, N2: NewOp1);
14862 // ReplaceNodeResults requires we maintain the same type for the return value.
14863 return DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: N->getValueType(ResNo: 0), Operand: NewRes);
14864}
14865
14866// Converts the given 32-bit operation to a i64 operation with signed extension
14867// semantic to reduce the signed extension instructions.
14868static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
14869 SDLoc DL(N);
14870 SDValue NewOp0 = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: MVT::i64, Operand: N->getOperand(Num: 0));
14871 SDValue NewOp1 = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: MVT::i64, Operand: N->getOperand(Num: 1));
14872 SDValue NewWOp = DAG.getNode(Opcode: N->getOpcode(), DL, VT: MVT::i64, N1: NewOp0, N2: NewOp1);
14873 SDValue NewRes = DAG.getNode(Opcode: ISD::SIGN_EXTEND_INREG, DL, VT: MVT::i64, N1: NewWOp,
14874 N2: DAG.getValueType(MVT::i32));
14875 return DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: MVT::i32, Operand: NewRes);
14876}
14877
14878void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
14879 SmallVectorImpl<SDValue> &Results,
14880 SelectionDAG &DAG) const {
14881 SDLoc DL(N);
14882 switch (N->getOpcode()) {
14883 default:
14884 llvm_unreachable("Don't know how to custom type legalize this operation!");
14885 case ISD::STRICT_FP_TO_SINT:
14886 case ISD::STRICT_FP_TO_UINT:
14887 case ISD::FP_TO_SINT:
14888 case ISD::FP_TO_UINT: {
14889 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
14890 "Unexpected custom legalisation");
14891 bool IsStrict = N->isStrictFPOpcode();
14892 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT ||
14893 N->getOpcode() == ISD::STRICT_FP_TO_SINT;
14894 SDValue Op0 = IsStrict ? N->getOperand(Num: 1) : N->getOperand(Num: 0);
14895 if (getTypeAction(Context&: *DAG.getContext(), VT: Op0.getValueType()) !=
14896 TargetLowering::TypeSoftenFloat) {
14897 if (!isTypeLegal(VT: Op0.getValueType()))
14898 return;
14899 if (IsStrict) {
14900 SDValue Chain = N->getOperand(Num: 0);
14901 // In absence of Zfh, promote f16 to f32, then convert.
14902 if (Op0.getValueType() == MVT::f16 &&
14903 !Subtarget.hasStdExtZfhOrZhinx()) {
14904 Op0 = DAG.getNode(Opcode: ISD::STRICT_FP_EXTEND, DL, ResultTys: {MVT::f32, MVT::Other},
14905 Ops: {Chain, Op0});
14906 Chain = Op0.getValue(R: 1);
14907 }
14908 unsigned Opc = IsSigned ? RISCVISD::STRICT_FCVT_W_RV64
14909 : RISCVISD::STRICT_FCVT_WU_RV64;
14910 SDVTList VTs = DAG.getVTList(VT1: MVT::i64, VT2: MVT::Other);
14911 SDValue Res = DAG.getNode(
14912 Opcode: Opc, DL, VTList: VTs, N1: Chain, N2: Op0,
14913 N3: DAG.getTargetConstant(Val: RISCVFPRndMode::RTZ, DL, VT: MVT::i64));
14914 Results.push_back(Elt: DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: MVT::i32, Operand: Res));
14915 Results.push_back(Elt: Res.getValue(R: 1));
14916 return;
14917 }
14918 // For bf16, or f16 in absence of Zfh, promote [b]f16 to f32 and then
14919 // convert.
14920 if ((Op0.getValueType() == MVT::f16 &&
14921 !Subtarget.hasStdExtZfhOrZhinx()) ||
14922 Op0.getValueType() == MVT::bf16)
14923 Op0 = DAG.getNode(Opcode: ISD::FP_EXTEND, DL, VT: MVT::f32, Operand: Op0);
14924
14925 unsigned Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
14926 SDValue Res =
14927 DAG.getNode(Opcode: Opc, DL, VT: MVT::i64, N1: Op0,
14928 N2: DAG.getTargetConstant(Val: RISCVFPRndMode::RTZ, DL, VT: MVT::i64));
14929 Results.push_back(Elt: DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: MVT::i32, Operand: Res));
14930 return;
14931 }
14932 // If the FP type needs to be softened, emit a library call using the 'si'
14933 // version. If we left it to default legalization we'd end up with 'di'. If
14934 // the FP type doesn't need to be softened just let generic type
14935 // legalization promote the result type.
14936 RTLIB::Libcall LC;
14937 if (IsSigned)
14938 LC = RTLIB::getFPTOSINT(OpVT: Op0.getValueType(), RetVT: N->getValueType(ResNo: 0));
14939 else
14940 LC = RTLIB::getFPTOUINT(OpVT: Op0.getValueType(), RetVT: N->getValueType(ResNo: 0));
14941 MakeLibCallOptions CallOptions;
14942 EVT OpVT = Op0.getValueType();
14943 CallOptions.setTypeListBeforeSoften(OpsVT: OpVT, RetVT: N->getValueType(ResNo: 0));
14944 SDValue Chain = IsStrict ? N->getOperand(Num: 0) : SDValue();
14945 SDValue Result;
14946 std::tie(args&: Result, args&: Chain) =
14947 makeLibCall(DAG, LC, RetVT: N->getValueType(ResNo: 0), Ops: Op0, CallOptions, dl: DL, Chain);
14948 Results.push_back(Elt: Result);
14949 if (IsStrict)
14950 Results.push_back(Elt: Chain);
14951 break;
14952 }
14953 case ISD::LROUND: {
14954 SDValue Op0 = N->getOperand(Num: 0);
14955 EVT Op0VT = Op0.getValueType();
14956 if (getTypeAction(Context&: *DAG.getContext(), VT: Op0.getValueType()) !=
14957 TargetLowering::TypeSoftenFloat) {
14958 if (!isTypeLegal(VT: Op0VT))
14959 return;
14960
14961 // In absence of Zfh, promote f16 to f32, then convert.
14962 if (Op0.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfhOrZhinx())
14963 Op0 = DAG.getNode(Opcode: ISD::FP_EXTEND, DL, VT: MVT::f32, Operand: Op0);
14964
14965 SDValue Res =
14966 DAG.getNode(Opcode: RISCVISD::FCVT_W_RV64, DL, VT: MVT::i64, N1: Op0,
14967 N2: DAG.getTargetConstant(Val: RISCVFPRndMode::RMM, DL, VT: MVT::i64));
14968 Results.push_back(Elt: DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: MVT::i32, Operand: Res));
14969 return;
14970 }
14971 // If the FP type needs to be softened, emit a library call to lround. We'll
14972 // need to truncate the result. We assume any value that doesn't fit in i32
14973 // is allowed to return an unspecified value.
14974 RTLIB::Libcall LC =
14975 Op0.getValueType() == MVT::f64 ? RTLIB::LROUND_F64 : RTLIB::LROUND_F32;
14976 MakeLibCallOptions CallOptions;
14977 EVT OpVT = Op0.getValueType();
14978 CallOptions.setTypeListBeforeSoften(OpsVT: OpVT, RetVT: MVT::i64);
14979 SDValue Result = makeLibCall(DAG, LC, RetVT: MVT::i64, Ops: Op0, CallOptions, dl: DL).first;
14980 Result = DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: MVT::i32, Operand: Result);
14981 Results.push_back(Elt: Result);
14982 break;
14983 }
14984 case ISD::READCYCLECOUNTER:
14985 case ISD::READSTEADYCOUNTER: {
14986 assert(!Subtarget.is64Bit() && "READCYCLECOUNTER/READSTEADYCOUNTER only "
14987 "has custom type legalization on riscv32");
14988
14989 SDValue LoCounter, HiCounter;
14990 MVT XLenVT = Subtarget.getXLenVT();
14991 if (N->getOpcode() == ISD::READCYCLECOUNTER) {
14992 LoCounter = DAG.getTargetConstant(Val: RISCVSysReg::cycle, DL, VT: XLenVT);
14993 HiCounter = DAG.getTargetConstant(Val: RISCVSysReg::cycleh, DL, VT: XLenVT);
14994 } else {
14995 LoCounter = DAG.getTargetConstant(Val: RISCVSysReg::time, DL, VT: XLenVT);
14996 HiCounter = DAG.getTargetConstant(Val: RISCVSysReg::timeh, DL, VT: XLenVT);
14997 }
14998 SDVTList VTs = DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32, VT3: MVT::Other);
14999 SDValue RCW = DAG.getNode(Opcode: RISCVISD::READ_COUNTER_WIDE, DL, VTList: VTs,
15000 N1: N->getOperand(Num: 0), N2: LoCounter, N3: HiCounter);
15001
15002 Results.push_back(
15003 Elt: DAG.getNode(Opcode: ISD::BUILD_PAIR, DL, VT: MVT::i64, N1: RCW, N2: RCW.getValue(R: 1)));
15004 Results.push_back(Elt: RCW.getValue(R: 2));
15005 break;
15006 }
15007 case ISD::LOAD: {
15008 if (!ISD::isNON_EXTLoad(N))
15009 return;
15010
15011 // Use a SEXTLOAD instead of the default EXTLOAD. Similar to the
15012 // sext_inreg we emit for ADD/SUB/MUL/SLLI.
15013 LoadSDNode *Ld = cast<LoadSDNode>(Val: N);
15014
15015 if (N->getValueType(ResNo: 0) == MVT::i64) {
15016 assert(Subtarget.hasStdExtZilsd() && !Subtarget.is64Bit() &&
15017 "Unexpected custom legalisation");
15018
15019 if (Ld->getAlign() < Subtarget.getZilsdAlign())
15020 return;
15021
15022 SDLoc DL(N);
15023 SDValue Result = DAG.getMemIntrinsicNode(
15024 Opcode: RISCVISD::LD_RV32, dl: DL,
15025 VTList: DAG.getVTList(VTs: {MVT::i32, MVT::i32, MVT::Other}),
15026 Ops: {Ld->getChain(), Ld->getBasePtr()}, MemVT: MVT::i64, MMO: Ld->getMemOperand());
15027 SDValue Lo = Result.getValue(R: 0);
15028 SDValue Hi = Result.getValue(R: 1);
15029 SDValue Pair = DAG.getNode(Opcode: ISD::BUILD_PAIR, DL, VT: MVT::i64, N1: Lo, N2: Hi);
15030 Results.append(IL: {Pair, Result.getValue(R: 2)});
15031 return;
15032 }
15033
15034 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
15035 "Unexpected custom legalisation");
15036
15037 SDLoc dl(N);
15038 SDValue Res = DAG.getExtLoad(ExtType: ISD::SEXTLOAD, dl, VT: MVT::i64, Chain: Ld->getChain(),
15039 Ptr: Ld->getBasePtr(), MemVT: Ld->getMemoryVT(),
15040 MMO: Ld->getMemOperand());
15041 Results.push_back(Elt: DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: MVT::i32, Operand: Res));
15042 Results.push_back(Elt: Res.getValue(R: 1));
15043 return;
15044 }
15045 case ISD::MUL: {
15046 unsigned Size = N->getSimpleValueType(ResNo: 0).getSizeInBits();
15047 unsigned XLen = Subtarget.getXLen();
15048 // This multiply needs to be expanded, try to use MULHSU+MUL if possible.
15049 if (Size > XLen) {
15050 assert(Size == (XLen * 2) && "Unexpected custom legalisation");
15051 SDValue LHS = N->getOperand(Num: 0);
15052 SDValue RHS = N->getOperand(Num: 1);
15053 APInt HighMask = APInt::getHighBitsSet(numBits: Size, hiBitsSet: XLen);
15054
15055 bool LHSIsU = DAG.MaskedValueIsZero(Op: LHS, Mask: HighMask);
15056 bool RHSIsU = DAG.MaskedValueIsZero(Op: RHS, Mask: HighMask);
15057 // We need exactly one side to be unsigned.
15058 if (LHSIsU == RHSIsU)
15059 return;
15060
15061 auto MakeMULPair = [&](SDValue S, SDValue U) {
15062 MVT XLenVT = Subtarget.getXLenVT();
15063 S = DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: XLenVT, Operand: S);
15064 U = DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: XLenVT, Operand: U);
15065 SDValue Lo = DAG.getNode(Opcode: ISD::MUL, DL, VT: XLenVT, N1: S, N2: U);
15066 SDValue Hi = DAG.getNode(Opcode: RISCVISD::MULHSU, DL, VT: XLenVT, N1: S, N2: U);
15067 return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL, VT: N->getValueType(ResNo: 0), N1: Lo, N2: Hi);
15068 };
15069
15070 bool LHSIsS = DAG.ComputeNumSignBits(Op: LHS) > XLen;
15071 bool RHSIsS = DAG.ComputeNumSignBits(Op: RHS) > XLen;
15072
15073 // The other operand should be signed, but still prefer MULH when
15074 // possible.
15075 if (RHSIsU && LHSIsS && !RHSIsS)
15076 Results.push_back(Elt: MakeMULPair(LHS, RHS));
15077 else if (LHSIsU && RHSIsS && !LHSIsS)
15078 Results.push_back(Elt: MakeMULPair(RHS, LHS));
15079
15080 return;
15081 }
15082 [[fallthrough]];
15083 }
15084 case ISD::ADD:
15085 case ISD::SUB:
15086 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
15087 "Unexpected custom legalisation");
15088 Results.push_back(Elt: customLegalizeToWOpWithSExt(N, DAG));
15089 break;
15090 case ISD::SHL:
15091 case ISD::SRA:
15092 case ISD::SRL: {
15093 EVT VT = N->getValueType(ResNo: 0);
15094 if (VT.isFixedLengthVector() && Subtarget.enablePExtSIMDCodeGen()) {
15095 assert(Subtarget.is64Bit() && (VT == MVT::v2i16 || VT == MVT::v4i8) &&
15096 "Unexpected vector type for P-extension shift");
15097
15098 // If shift amount is a splat, don't scalarize - let normal widening
15099 // and SIMD patterns handle it (pslli.h, psrli.h, etc.)
15100 SDValue ShiftAmt = N->getOperand(Num: 1);
15101 if (DAG.isSplatValue(V: ShiftAmt, /*AllowUndefs=*/true))
15102 break;
15103
15104 EVT WidenVT = getTypeToTransformTo(Context&: *DAG.getContext(), VT);
15105 unsigned WidenNumElts = WidenVT.getVectorNumElements();
15106 // Unroll with OrigNumElts operations, padding result to WidenNumElts
15107 SDValue Res = DAG.UnrollVectorOp(N, ResNE: WidenNumElts);
15108 Results.push_back(Elt: Res);
15109 break;
15110 }
15111
15112 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
15113 "Unexpected custom legalisation");
15114 if (N->getOperand(Num: 1).getOpcode() != ISD::Constant) {
15115 // If we can use a BSET instruction, allow default promotion to apply.
15116 if (N->getOpcode() == ISD::SHL && Subtarget.hasStdExtZbs() &&
15117 isOneConstant(V: N->getOperand(Num: 0)))
15118 break;
15119 Results.push_back(Elt: customLegalizeToWOp(N, DAG));
15120 break;
15121 }
15122
15123 // Custom legalize ISD::SHL by placing a SIGN_EXTEND_INREG after. This is
15124 // similar to customLegalizeToWOpWithSExt, but we must zero_extend the
15125 // shift amount.
15126 if (N->getOpcode() == ISD::SHL) {
15127 SDLoc DL(N);
15128 SDValue NewOp0 =
15129 DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: MVT::i64, Operand: N->getOperand(Num: 0));
15130 SDValue NewOp1 =
15131 DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL, VT: MVT::i64, Operand: N->getOperand(Num: 1));
15132 SDValue NewWOp = DAG.getNode(Opcode: ISD::SHL, DL, VT: MVT::i64, N1: NewOp0, N2: NewOp1);
15133 SDValue NewRes = DAG.getNode(Opcode: ISD::SIGN_EXTEND_INREG, DL, VT: MVT::i64, N1: NewWOp,
15134 N2: DAG.getValueType(MVT::i32));
15135 Results.push_back(Elt: DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: MVT::i32, Operand: NewRes));
15136 }
15137
15138 break;
15139 }
15140 case ISD::ROTL:
15141 case ISD::ROTR:
15142 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
15143 "Unexpected custom legalisation");
15144 assert((Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb() ||
15145 Subtarget.hasVendorXTHeadBb()) &&
15146 "Unexpected custom legalization");
15147 if (!isa<ConstantSDNode>(Val: N->getOperand(Num: 1)) &&
15148 !(Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb()))
15149 return;
15150 Results.push_back(Elt: customLegalizeToWOp(N, DAG));
15151 break;
15152 case ISD::CTTZ:
15153 case ISD::CTTZ_ZERO_UNDEF:
15154 case ISD::CTLZ:
15155 case ISD::CTLZ_ZERO_UNDEF:
15156 case ISD::CTLS: {
15157 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
15158 "Unexpected custom legalisation");
15159
15160 SDValue NewOp0 =
15161 DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: MVT::i64, Operand: N->getOperand(Num: 0));
15162 unsigned Opc;
15163 switch (N->getOpcode()) {
15164 default: llvm_unreachable("Unexpected opcode");
15165 case ISD::CTTZ:
15166 case ISD::CTTZ_ZERO_UNDEF:
15167 Opc = RISCVISD::CTZW;
15168 break;
15169 case ISD::CTLZ:
15170 case ISD::CTLZ_ZERO_UNDEF:
15171 Opc = RISCVISD::CLZW;
15172 break;
15173 case ISD::CTLS:
15174 Opc = RISCVISD::CLSW;
15175 break;
15176 }
15177
15178 SDValue Res = DAG.getNode(Opcode: Opc, DL, VT: MVT::i64, Operand: NewOp0);
15179 Results.push_back(Elt: DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: MVT::i32, Operand: Res));
15180 return;
15181 }
15182 case ISD::SDIV:
15183 case ISD::UDIV:
15184 case ISD::UREM: {
15185 MVT VT = N->getSimpleValueType(ResNo: 0);
15186 assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
15187 Subtarget.is64Bit() && Subtarget.hasStdExtM() &&
15188 "Unexpected custom legalisation");
15189 // Don't promote division/remainder by constant since we should expand those
15190 // to multiply by magic constant.
15191 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
15192 if (N->getOperand(Num: 1).getOpcode() == ISD::Constant &&
15193 !isIntDivCheap(VT: N->getValueType(ResNo: 0), Attr))
15194 return;
15195
15196 // If the input is i32, use ANY_EXTEND since the W instructions don't read
15197 // the upper 32 bits. For other types we need to sign or zero extend
15198 // based on the opcode.
15199 unsigned ExtOpc = ISD::ANY_EXTEND;
15200 if (VT != MVT::i32)
15201 ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND
15202 : ISD::ZERO_EXTEND;
15203
15204 Results.push_back(Elt: customLegalizeToWOp(N, DAG, ExtOpc));
15205 break;
15206 }
15207 case ISD::SADDO:
15208 case ISD::SSUBO: {
15209 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
15210 "Unexpected custom legalisation");
15211
15212 // This is similar to the default legalization, but we return the
15213 // sext_inreg instead of the add/sub.
15214 bool IsAdd = N->getOpcode() == ISD::SADDO;
15215 SDValue LHS = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL, VT: MVT::i64, Operand: N->getOperand(Num: 0));
15216 SDValue RHS = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL, VT: MVT::i64, Operand: N->getOperand(Num: 1));
15217 SDValue Op =
15218 DAG.getNode(Opcode: IsAdd ? ISD::ADD : ISD::SUB, DL, VT: MVT::i64, N1: LHS, N2: RHS);
15219 SDValue Res = DAG.getNode(Opcode: ISD::SIGN_EXTEND_INREG, DL, VT: MVT::i64, N1: Op,
15220 N2: DAG.getValueType(MVT::i32));
15221
15222 SDValue Overflow;
15223
15224 // If the RHS is a constant, we can simplify ConditionRHS below. Otherwise
15225 // use the default legalization.
15226 if (IsAdd && isa<ConstantSDNode>(Val: N->getOperand(Num: 1))) {
15227 SDValue Zero = DAG.getConstant(Val: 0, DL, VT: MVT::i64);
15228
15229 // For an addition, the result should be less than one of the operands
15230 // (LHS) if and only if the other operand (RHS) is negative, otherwise
15231 // there will be overflow.
15232 EVT OType = N->getValueType(ResNo: 1);
15233 SDValue ResultLowerThanLHS =
15234 DAG.getSetCC(DL, VT: OType, LHS: Res, RHS: LHS, Cond: ISD::SETLT);
15235 SDValue ConditionRHS = DAG.getSetCC(DL, VT: OType, LHS: RHS, RHS: Zero, Cond: ISD::SETLT);
15236
15237 Overflow =
15238 DAG.getNode(Opcode: ISD::XOR, DL, VT: OType, N1: ConditionRHS, N2: ResultLowerThanLHS);
15239 } else {
15240 Overflow = DAG.getSetCC(DL, VT: N->getValueType(ResNo: 1), LHS: Res, RHS: Op, Cond: ISD::SETNE);
15241 }
15242
15243 Results.push_back(Elt: DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: MVT::i32, Operand: Res));
15244 Results.push_back(Elt: Overflow);
15245 return;
15246 }
15247 case ISD::UADDO:
15248 case ISD::USUBO: {
15249 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
15250 "Unexpected custom legalisation");
15251 bool IsAdd = N->getOpcode() == ISD::UADDO;
15252 // Create an ADDW or SUBW.
15253 SDValue LHS = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: MVT::i64, Operand: N->getOperand(Num: 0));
15254 SDValue RHS = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: MVT::i64, Operand: N->getOperand(Num: 1));
15255 SDValue Res =
15256 DAG.getNode(Opcode: IsAdd ? ISD::ADD : ISD::SUB, DL, VT: MVT::i64, N1: LHS, N2: RHS);
15257 Res = DAG.getNode(Opcode: ISD::SIGN_EXTEND_INREG, DL, VT: MVT::i64, N1: Res,
15258 N2: DAG.getValueType(MVT::i32));
15259
15260 SDValue Overflow;
15261 if (IsAdd && isOneConstant(V: RHS)) {
15262 // Special case uaddo X, 1 overflowed if the addition result is 0.
15263 // The general case (X + C) < C is not necessarily beneficial. Although we
15264 // reduce the live range of X, we may introduce the materialization of
15265 // constant C, especially when the setcc result is used by branch. We have
15266 // no compare with constant and branch instructions.
15267 Overflow = DAG.getSetCC(DL, VT: N->getValueType(ResNo: 1), LHS: Res,
15268 RHS: DAG.getConstant(Val: 0, DL, VT: MVT::i64), Cond: ISD::SETEQ);
15269 } else if (IsAdd && isAllOnesConstant(V: RHS)) {
15270 // Special case uaddo X, -1 overflowed if X != 0.
15271 Overflow = DAG.getSetCC(DL, VT: N->getValueType(ResNo: 1), LHS: N->getOperand(Num: 0),
15272 RHS: DAG.getConstant(Val: 0, DL, VT: MVT::i32), Cond: ISD::SETNE);
15273 } else {
15274 // Sign extend the LHS and perform an unsigned compare with the ADDW
15275 // result. Since the inputs are sign extended from i32, this is equivalent
15276 // to comparing the lower 32 bits.
15277 LHS = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL, VT: MVT::i64, Operand: N->getOperand(Num: 0));
15278 Overflow = DAG.getSetCC(DL, VT: N->getValueType(ResNo: 1), LHS: Res, RHS: LHS,
15279 Cond: IsAdd ? ISD::SETULT : ISD::SETUGT);
15280 }
15281
15282 Results.push_back(Elt: DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: MVT::i32, Operand: Res));
15283 Results.push_back(Elt: Overflow);
15284 return;
15285 }
15286 case ISD::UADDSAT:
15287 case ISD::USUBSAT: {
15288 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
15289 !Subtarget.hasStdExtZbb() && "Unexpected custom legalisation");
15290 // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom
15291 // promotion for UADDO/USUBO.
15292 Results.push_back(Elt: expandAddSubSat(Node: N, DAG));
15293 return;
15294 }
15295 case ISD::SADDSAT:
15296 case ISD::SSUBSAT: {
15297 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
15298 "Unexpected custom legalisation");
15299 Results.push_back(Elt: expandAddSubSat(Node: N, DAG));
15300 return;
15301 }
15302 case ISD::ABS: {
15303 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
15304 "Unexpected custom legalisation");
15305
15306 if (Subtarget.hasStdExtP()) {
15307 SDValue Src =
15308 DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: MVT::i64, Operand: N->getOperand(Num: 0));
15309 SDValue Abs = DAG.getNode(Opcode: RISCVISD::ABSW, DL, VT: MVT::i64, Operand: Src);
15310 Results.push_back(Elt: DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: MVT::i32, Operand: Abs));
15311 return;
15312 }
15313
15314 if (Subtarget.hasStdExtZbb()) {
15315 // Emit a special node that will be expanded to NEGW+MAX at isel.
15316 // This allows us to remember that the result is sign extended. Expanding
15317 // to NEGW+MAX here requires a Freeze which breaks ComputeNumSignBits.
15318 SDValue Src = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL, VT: MVT::i64,
15319 Operand: N->getOperand(Num: 0));
15320 SDValue Abs = DAG.getNode(Opcode: RISCVISD::NEGW_MAX, DL, VT: MVT::i64, Operand: Src);
15321 Results.push_back(Elt: DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: MVT::i32, Operand: Abs));
15322 return;
15323 }
15324
15325 // Expand abs to Y = (sraiw X, 31); subw(xor(X, Y), Y)
15326 SDValue Src = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: MVT::i64, Operand: N->getOperand(Num: 0));
15327
15328 // Freeze the source so we can increase it's use count.
15329 Src = DAG.getFreeze(V: Src);
15330
15331 // Copy sign bit to all bits using the sraiw pattern.
15332 SDValue SignFill = DAG.getNode(Opcode: ISD::SIGN_EXTEND_INREG, DL, VT: MVT::i64, N1: Src,
15333 N2: DAG.getValueType(MVT::i32));
15334 SignFill = DAG.getNode(Opcode: ISD::SRA, DL, VT: MVT::i64, N1: SignFill,
15335 N2: DAG.getConstant(Val: 31, DL, VT: MVT::i64));
15336
15337 SDValue NewRes = DAG.getNode(Opcode: ISD::XOR, DL, VT: MVT::i64, N1: Src, N2: SignFill);
15338 NewRes = DAG.getNode(Opcode: ISD::SUB, DL, VT: MVT::i64, N1: NewRes, N2: SignFill);
15339
15340 // NOTE: The result is only required to be anyextended, but sext is
15341 // consistent with type legalization of sub.
15342 NewRes = DAG.getNode(Opcode: ISD::SIGN_EXTEND_INREG, DL, VT: MVT::i64, N1: NewRes,
15343 N2: DAG.getValueType(MVT::i32));
15344 Results.push_back(Elt: DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: MVT::i32, Operand: NewRes));
15345 return;
15346 }
15347 case ISD::BITCAST: {
15348 EVT VT = N->getValueType(ResNo: 0);
15349 assert(VT.isInteger() && !VT.isVector() && "Unexpected VT!");
15350 SDValue Op0 = N->getOperand(Num: 0);
15351 EVT Op0VT = Op0.getValueType();
15352 MVT XLenVT = Subtarget.getXLenVT();
15353 if (VT == MVT::i16 &&
15354 ((Op0VT == MVT::f16 && Subtarget.hasStdExtZfhminOrZhinxmin()) ||
15355 (Op0VT == MVT::bf16 && Subtarget.hasStdExtZfbfmin()))) {
15356 SDValue FPConv = DAG.getNode(Opcode: RISCVISD::FMV_X_ANYEXTH, DL, VT: XLenVT, Operand: Op0);
15357 Results.push_back(Elt: DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: MVT::i16, Operand: FPConv));
15358 } else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() &&
15359 Subtarget.hasStdExtFOrZfinx()) {
15360 SDValue FPConv =
15361 DAG.getNode(Opcode: RISCVISD::FMV_X_ANYEXTW_RV64, DL, VT: MVT::i64, Operand: Op0);
15362 Results.push_back(Elt: DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: MVT::i32, Operand: FPConv));
15363 } else if (VT == MVT::i64 && Op0VT == MVT::f64 && !Subtarget.is64Bit() &&
15364 Subtarget.hasStdExtDOrZdinx()) {
15365 SDValue NewReg = DAG.getNode(Opcode: RISCVISD::SplitF64, DL,
15366 VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N: Op0);
15367 SDValue Lo = NewReg.getValue(R: 0);
15368 SDValue Hi = NewReg.getValue(R: 1);
15369 // For big-endian, swap the order when building the i64 pair.
15370 if (!Subtarget.isLittleEndian())
15371 std::swap(a&: Lo, b&: Hi);
15372 SDValue RetReg = DAG.getNode(Opcode: ISD::BUILD_PAIR, DL, VT: MVT::i64, N1: Lo, N2: Hi);
15373 Results.push_back(Elt: RetReg);
15374 } else if (!VT.isVector() && Op0VT.isFixedLengthVector() &&
15375 isTypeLegal(VT: Op0VT)) {
15376 // Custom-legalize bitcasts from fixed-length vector types to illegal
15377 // scalar types in order to improve codegen. Bitcast the vector to a
15378 // one-element vector type whose element type is the same as the result
15379 // type, and extract the first element.
15380 EVT BVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT, NumElements: 1);
15381 if (isTypeLegal(VT: BVT)) {
15382 SDValue BVec = DAG.getBitcast(VT: BVT, V: Op0);
15383 Results.push_back(Elt: DAG.getExtractVectorElt(DL, VT, Vec: BVec, Idx: 0));
15384 }
15385 }
15386 break;
15387 }
15388 case ISD::BITREVERSE: {
15389 assert(N->getValueType(0) == MVT::i8 && Subtarget.hasStdExtZbkb() &&
15390 "Unexpected custom legalisation");
15391 MVT XLenVT = Subtarget.getXLenVT();
15392 SDValue NewOp = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: XLenVT, Operand: N->getOperand(Num: 0));
15393 SDValue NewRes = DAG.getNode(Opcode: RISCVISD::BREV8, DL, VT: XLenVT, Operand: NewOp);
15394 // ReplaceNodeResults requires we maintain the same type for the return
15395 // value.
15396 Results.push_back(Elt: DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: MVT::i8, Operand: NewRes));
15397 break;
15398 }
15399 case RISCVISD::BREV8:
15400 case RISCVISD::ORC_B: {
15401 MVT VT = N->getSimpleValueType(ResNo: 0);
15402 MVT XLenVT = Subtarget.getXLenVT();
15403 assert((VT == MVT::i16 || (VT == MVT::i32 && Subtarget.is64Bit())) &&
15404 "Unexpected custom legalisation");
15405 assert(((N->getOpcode() == RISCVISD::BREV8 && Subtarget.hasStdExtZbkb()) ||
15406 (N->getOpcode() == RISCVISD::ORC_B && Subtarget.hasStdExtZbb())) &&
15407 "Unexpected extension");
15408 SDValue NewOp = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: XLenVT, Operand: N->getOperand(Num: 0));
15409 SDValue NewRes = DAG.getNode(Opcode: N->getOpcode(), DL, VT: XLenVT, Operand: NewOp);
15410 // ReplaceNodeResults requires we maintain the same type for the return
15411 // value.
15412 Results.push_back(Elt: DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT, Operand: NewRes));
15413 break;
15414 }
15415 case RISCVISD::PASUB:
15416 case RISCVISD::PASUBU:
15417 case RISCVISD::PMULHSU:
15418 case RISCVISD::PMULHR:
15419 case RISCVISD::PMULHRU:
15420 case RISCVISD::PMULHRSU: {
15421 MVT VT = N->getSimpleValueType(ResNo: 0);
15422 SDValue Op0 = N->getOperand(Num: 0);
15423 SDValue Op1 = N->getOperand(Num: 1);
15424 unsigned Opcode = N->getOpcode();
15425 // PMULH* variants don't support i8
15426 [[maybe_unused]] bool IsMulH =
15427 Opcode == RISCVISD::PMULHSU || Opcode == RISCVISD::PMULHR ||
15428 Opcode == RISCVISD::PMULHRU || Opcode == RISCVISD::PMULHRSU;
15429 assert(VT == MVT::v2i16 || (!IsMulH && VT == MVT::v4i8));
15430 MVT NewVT = MVT::v4i16;
15431 if (VT == MVT::v4i8)
15432 NewVT = MVT::v8i8;
15433 SDValue Undef = DAG.getUNDEF(VT);
15434 Op0 = DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: NewVT, Ops: {Op0, Undef});
15435 Op1 = DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: NewVT, Ops: {Op1, Undef});
15436 Results.push_back(Elt: DAG.getNode(Opcode, DL, VT: NewVT, Ops: {Op0, Op1}));
15437 return;
15438 }
15439 case ISD::EXTRACT_VECTOR_ELT: {
15440 // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element
15441 // type is illegal (currently only vXi64 RV32).
15442 // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are
15443 // transferred to the destination register. We issue two of these from the
15444 // upper- and lower- halves of the SEW-bit vector element, slid down to the
15445 // first element.
15446 SDValue Vec = N->getOperand(Num: 0);
15447 SDValue Idx = N->getOperand(Num: 1);
15448
15449 // The vector type hasn't been legalized yet so we can't issue target
15450 // specific nodes if it needs legalization.
15451 // FIXME: We would manually legalize if it's important.
15452 if (!isTypeLegal(VT: Vec.getValueType()))
15453 return;
15454
15455 MVT VecVT = Vec.getSimpleValueType();
15456
15457 assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&
15458 VecVT.getVectorElementType() == MVT::i64 &&
15459 "Unexpected EXTRACT_VECTOR_ELT legalization");
15460
15461 // If this is a fixed vector, we need to convert it to a scalable vector.
15462 MVT ContainerVT = VecVT;
15463 if (VecVT.isFixedLengthVector()) {
15464 ContainerVT = getContainerForFixedLengthVector(VT: VecVT);
15465 Vec = convertToScalableVector(VT: ContainerVT, V: Vec, DAG, Subtarget);
15466 }
15467
15468 MVT XLenVT = Subtarget.getXLenVT();
15469
15470 // Use a VL of 1 to avoid processing more elements than we need.
15471 auto [Mask, VL] = getDefaultVLOps(NumElts: 1, ContainerVT, DL, DAG, Subtarget);
15472
15473 // Unless the index is known to be 0, we must slide the vector down to get
15474 // the desired element into index 0.
15475 if (!isNullConstant(V: Idx)) {
15476 Vec = getVSlidedown(DAG, Subtarget, DL, VT: ContainerVT,
15477 Passthru: DAG.getUNDEF(VT: ContainerVT), Op: Vec, Offset: Idx, Mask, VL);
15478 }
15479
15480 // Extract the lower XLEN bits of the correct vector element.
15481 SDValue EltLo = DAG.getNode(Opcode: RISCVISD::VMV_X_S, DL, VT: XLenVT, Operand: Vec);
15482
15483 // To extract the upper XLEN bits of the vector element, shift the first
15484 // element right by 32 bits and re-extract the lower XLEN bits.
15485 SDValue ThirtyTwoV = DAG.getNode(Opcode: RISCVISD::VMV_V_X_VL, DL, VT: ContainerVT,
15486 N1: DAG.getUNDEF(VT: ContainerVT),
15487 N2: DAG.getConstant(Val: 32, DL, VT: XLenVT), N3: VL);
15488 SDValue LShr32 =
15489 DAG.getNode(Opcode: RISCVISD::SRL_VL, DL, VT: ContainerVT, N1: Vec, N2: ThirtyTwoV,
15490 N3: DAG.getUNDEF(VT: ContainerVT), N4: Mask, N5: VL);
15491
15492 SDValue EltHi = DAG.getNode(Opcode: RISCVISD::VMV_X_S, DL, VT: XLenVT, Operand: LShr32);
15493
15494 Results.push_back(Elt: DAG.getNode(Opcode: ISD::BUILD_PAIR, DL, VT: MVT::i64, N1: EltLo, N2: EltHi));
15495 break;
15496 }
15497 case ISD::INTRINSIC_WO_CHAIN: {
15498 unsigned IntNo = N->getConstantOperandVal(Num: 0);
15499 switch (IntNo) {
15500 default:
15501 llvm_unreachable(
15502 "Don't know how to custom type legalize this intrinsic!");
15503 case Intrinsic::experimental_get_vector_length: {
15504 SDValue Res = lowerGetVectorLength(N, DAG, Subtarget);
15505 Results.push_back(Elt: DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: MVT::i32, Operand: Res));
15506 return;
15507 }
15508 case Intrinsic::experimental_cttz_elts: {
15509 SDValue Res = lowerCttzElts(N, DAG, Subtarget);
15510 Results.push_back(Elt: DAG.getZExtOrTrunc(Op: Res, DL, VT: N->getValueType(ResNo: 0)));
15511 return;
15512 }
15513 case Intrinsic::riscv_orc_b:
15514 case Intrinsic::riscv_brev8:
15515 case Intrinsic::riscv_sha256sig0:
15516 case Intrinsic::riscv_sha256sig1:
15517 case Intrinsic::riscv_sha256sum0:
15518 case Intrinsic::riscv_sha256sum1:
15519 case Intrinsic::riscv_sm3p0:
15520 case Intrinsic::riscv_sm3p1: {
15521 if (!Subtarget.is64Bit() || N->getValueType(ResNo: 0) != MVT::i32)
15522 return;
15523 unsigned Opc;
15524 switch (IntNo) {
15525 case Intrinsic::riscv_orc_b: Opc = RISCVISD::ORC_B; break;
15526 case Intrinsic::riscv_brev8: Opc = RISCVISD::BREV8; break;
15527 case Intrinsic::riscv_sha256sig0: Opc = RISCVISD::SHA256SIG0; break;
15528 case Intrinsic::riscv_sha256sig1: Opc = RISCVISD::SHA256SIG1; break;
15529 case Intrinsic::riscv_sha256sum0: Opc = RISCVISD::SHA256SUM0; break;
15530 case Intrinsic::riscv_sha256sum1: Opc = RISCVISD::SHA256SUM1; break;
15531 case Intrinsic::riscv_sm3p0: Opc = RISCVISD::SM3P0; break;
15532 case Intrinsic::riscv_sm3p1: Opc = RISCVISD::SM3P1; break;
15533 }
15534
15535 SDValue NewOp =
15536 DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: MVT::i64, Operand: N->getOperand(Num: 1));
15537 SDValue Res = DAG.getNode(Opcode: Opc, DL, VT: MVT::i64, Operand: NewOp);
15538 Results.push_back(Elt: DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: MVT::i32, Operand: Res));
15539 return;
15540 }
15541 case Intrinsic::riscv_sm4ks:
15542 case Intrinsic::riscv_sm4ed: {
15543 unsigned Opc =
15544 IntNo == Intrinsic::riscv_sm4ks ? RISCVISD::SM4KS : RISCVISD::SM4ED;
15545 SDValue NewOp0 =
15546 DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: MVT::i64, Operand: N->getOperand(Num: 1));
15547 SDValue NewOp1 =
15548 DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: MVT::i64, Operand: N->getOperand(Num: 2));
15549 SDValue Res =
15550 DAG.getNode(Opcode: Opc, DL, VT: MVT::i64, N1: NewOp0, N2: NewOp1, N3: N->getOperand(Num: 3));
15551 Results.push_back(Elt: DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: MVT::i32, Operand: Res));
15552 return;
15553 }
15554 case Intrinsic::riscv_mopr: {
15555 if (!Subtarget.is64Bit() || N->getValueType(ResNo: 0) != MVT::i32)
15556 return;
15557 SDValue NewOp =
15558 DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: MVT::i64, Operand: N->getOperand(Num: 1));
15559 SDValue Res = DAG.getNode(
15560 Opcode: RISCVISD::MOP_R, DL, VT: MVT::i64, N1: NewOp,
15561 N2: DAG.getTargetConstant(Val: N->getConstantOperandVal(Num: 2), DL, VT: MVT::i64));
15562 Results.push_back(Elt: DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: MVT::i32, Operand: Res));
15563 return;
15564 }
15565 case Intrinsic::riscv_moprr: {
15566 if (!Subtarget.is64Bit() || N->getValueType(ResNo: 0) != MVT::i32)
15567 return;
15568 SDValue NewOp0 =
15569 DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: MVT::i64, Operand: N->getOperand(Num: 1));
15570 SDValue NewOp1 =
15571 DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: MVT::i64, Operand: N->getOperand(Num: 2));
15572 SDValue Res = DAG.getNode(
15573 Opcode: RISCVISD::MOP_RR, DL, VT: MVT::i64, N1: NewOp0, N2: NewOp1,
15574 N3: DAG.getTargetConstant(Val: N->getConstantOperandVal(Num: 3), DL, VT: MVT::i64));
15575 Results.push_back(Elt: DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: MVT::i32, Operand: Res));
15576 return;
15577 }
15578 case Intrinsic::riscv_clmulh:
15579 case Intrinsic::riscv_clmulr: {
15580 if (!Subtarget.is64Bit() || N->getValueType(ResNo: 0) != MVT::i32)
15581 return;
15582
15583 // Extend inputs to XLen, and shift by 32. This will add 64 trailing zeros
15584 // to the full 128-bit clmul result of multiplying two xlen values.
15585 // Perform clmulr or clmulh on the shifted values. Finally, extract the
15586 // upper 32 bits.
15587 //
15588 // The alternative is to mask the inputs to 32 bits and use clmul, but
15589 // that requires two shifts to mask each input without zext.w.
15590 // FIXME: If the inputs are known zero extended or could be freely
15591 // zero extended, the mask form would be better.
15592 SDValue NewOp0 =
15593 DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: MVT::i64, Operand: N->getOperand(Num: 1));
15594 SDValue NewOp1 =
15595 DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: MVT::i64, Operand: N->getOperand(Num: 2));
15596 NewOp0 = DAG.getNode(Opcode: ISD::SHL, DL, VT: MVT::i64, N1: NewOp0,
15597 N2: DAG.getConstant(Val: 32, DL, VT: MVT::i64));
15598 NewOp1 = DAG.getNode(Opcode: ISD::SHL, DL, VT: MVT::i64, N1: NewOp1,
15599 N2: DAG.getConstant(Val: 32, DL, VT: MVT::i64));
15600 unsigned Opc =
15601 IntNo == Intrinsic::riscv_clmulh ? ISD::CLMULH : ISD::CLMULR;
15602 SDValue Res = DAG.getNode(Opcode: Opc, DL, VT: MVT::i64, N1: NewOp0, N2: NewOp1);
15603 Res = DAG.getNode(Opcode: ISD::SRL, DL, VT: MVT::i64, N1: Res,
15604 N2: DAG.getConstant(Val: 32, DL, VT: MVT::i64));
15605 Results.push_back(Elt: DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: MVT::i32, Operand: Res));
15606 return;
15607 }
15608 case Intrinsic::riscv_vmv_x_s: {
15609 EVT VT = N->getValueType(ResNo: 0);
15610 MVT XLenVT = Subtarget.getXLenVT();
15611 if (VT.bitsLT(VT: XLenVT)) {
15612 // Simple case just extract using vmv.x.s and truncate.
15613 SDValue Extract = DAG.getNode(Opcode: RISCVISD::VMV_X_S, DL,
15614 VT: Subtarget.getXLenVT(), Operand: N->getOperand(Num: 1));
15615 Results.push_back(Elt: DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT, Operand: Extract));
15616 return;
15617 }
15618
15619 assert(VT == MVT::i64 && !Subtarget.is64Bit() &&
15620 "Unexpected custom legalization");
15621
15622 // We need to do the move in two steps.
15623 SDValue Vec = N->getOperand(Num: 1);
15624 MVT VecVT = Vec.getSimpleValueType();
15625
15626 // First extract the lower XLEN bits of the element.
15627 SDValue EltLo = DAG.getNode(Opcode: RISCVISD::VMV_X_S, DL, VT: XLenVT, Operand: Vec);
15628
15629 // To extract the upper XLEN bits of the vector element, shift the first
15630 // element right by 32 bits and re-extract the lower XLEN bits.
15631 auto [Mask, VL] = getDefaultVLOps(NumElts: 1, ContainerVT: VecVT, DL, DAG, Subtarget);
15632
15633 SDValue ThirtyTwoV =
15634 DAG.getNode(Opcode: RISCVISD::VMV_V_X_VL, DL, VT: VecVT, N1: DAG.getUNDEF(VT: VecVT),
15635 N2: DAG.getConstant(Val: 32, DL, VT: XLenVT), N3: VL);
15636 SDValue LShr32 = DAG.getNode(Opcode: RISCVISD::SRL_VL, DL, VT: VecVT, N1: Vec, N2: ThirtyTwoV,
15637 N3: DAG.getUNDEF(VT: VecVT), N4: Mask, N5: VL);
15638 SDValue EltHi = DAG.getNode(Opcode: RISCVISD::VMV_X_S, DL, VT: XLenVT, Operand: LShr32);
15639
15640 Results.push_back(
15641 Elt: DAG.getNode(Opcode: ISD::BUILD_PAIR, DL, VT: MVT::i64, N1: EltLo, N2: EltHi));
15642 break;
15643 }
15644 }
15645 break;
15646 }
15647 case ISD::VECREDUCE_ADD:
15648 case ISD::VECREDUCE_AND:
15649 case ISD::VECREDUCE_OR:
15650 case ISD::VECREDUCE_XOR:
15651 case ISD::VECREDUCE_SMAX:
15652 case ISD::VECREDUCE_UMAX:
15653 case ISD::VECREDUCE_SMIN:
15654 case ISD::VECREDUCE_UMIN:
15655 if (SDValue V = lowerVECREDUCE(Op: SDValue(N, 0), DAG))
15656 Results.push_back(Elt: V);
15657 break;
15658 case ISD::VP_REDUCE_ADD:
15659 case ISD::VP_REDUCE_AND:
15660 case ISD::VP_REDUCE_OR:
15661 case ISD::VP_REDUCE_XOR:
15662 case ISD::VP_REDUCE_SMAX:
15663 case ISD::VP_REDUCE_UMAX:
15664 case ISD::VP_REDUCE_SMIN:
15665 case ISD::VP_REDUCE_UMIN:
15666 if (SDValue V = lowerVPREDUCE(Op: SDValue(N, 0), DAG))
15667 Results.push_back(Elt: V);
15668 break;
15669 case ISD::GET_ROUNDING: {
15670 SDVTList VTs = DAG.getVTList(VT1: Subtarget.getXLenVT(), VT2: MVT::Other);
15671 SDValue Res = DAG.getNode(Opcode: ISD::GET_ROUNDING, DL, VTList: VTs, N: N->getOperand(Num: 0));
15672 Results.push_back(Elt: Res.getValue(R: 0));
15673 Results.push_back(Elt: Res.getValue(R: 1));
15674 break;
15675 }
15676 }
15677}
15678
15679/// Given a binary operator, return the *associative* generic ISD::VECREDUCE_OP
15680/// which corresponds to it.
15681static unsigned getVecReduceOpcode(unsigned Opc) {
15682 switch (Opc) {
15683 default:
15684 llvm_unreachable("Unhandled binary to transform reduction");
15685 case ISD::ADD:
15686 return ISD::VECREDUCE_ADD;
15687 case ISD::UMAX:
15688 return ISD::VECREDUCE_UMAX;
15689 case ISD::SMAX:
15690 return ISD::VECREDUCE_SMAX;
15691 case ISD::UMIN:
15692 return ISD::VECREDUCE_UMIN;
15693 case ISD::SMIN:
15694 return ISD::VECREDUCE_SMIN;
15695 case ISD::AND:
15696 return ISD::VECREDUCE_AND;
15697 case ISD::OR:
15698 return ISD::VECREDUCE_OR;
15699 case ISD::XOR:
15700 return ISD::VECREDUCE_XOR;
15701 case ISD::FADD:
15702 // Note: This is the associative form of the generic reduction opcode.
15703 return ISD::VECREDUCE_FADD;
15704 case ISD::FMAXNUM:
15705 return ISD::VECREDUCE_FMAX;
15706 case ISD::FMINNUM:
15707 return ISD::VECREDUCE_FMIN;
15708 }
15709}
15710
15711/// Perform two related transforms whose purpose is to incrementally recognize
15712/// an explode_vector followed by scalar reduction as a vector reduction node.
15713/// This exists to recover from a deficiency in SLP which can't handle
15714/// forests with multiple roots sharing common nodes. In some cases, one
15715/// of the trees will be vectorized, and the other will remain (unprofitably)
15716/// scalarized.
15717static SDValue
15718combineBinOpOfExtractToReduceTree(SDNode *N, SelectionDAG &DAG,
15719 const RISCVSubtarget &Subtarget) {
15720
15721 // This transforms need to run before all integer types have been legalized
15722 // to i64 (so that the vector element type matches the add type), and while
15723 // it's safe to introduce odd sized vector types.
15724 if (DAG.NewNodesMustHaveLegalTypes)
15725 return SDValue();
15726
15727 // Without V, this transform isn't useful. We could form the (illegal)
15728 // operations and let them be scalarized again, but there's really no point.
15729 if (!Subtarget.hasVInstructions())
15730 return SDValue();
15731
15732 const SDLoc DL(N);
15733 const EVT VT = N->getValueType(ResNo: 0);
15734 const unsigned Opc = N->getOpcode();
15735
15736 if (!VT.isInteger()) {
15737 switch (Opc) {
15738 default:
15739 return SDValue();
15740 case ISD::FADD:
15741 // For FADD, we only handle the case with reassociation allowed. We
15742 // could handle strict reduction order, but at the moment, there's no
15743 // known reason to, and the complexity isn't worth it.
15744 if (!N->getFlags().hasAllowReassociation())
15745 return SDValue();
15746 break;
15747 case ISD::FMAXNUM:
15748 case ISD::FMINNUM:
15749 break;
15750 }
15751 }
15752
15753 const unsigned ReduceOpc = getVecReduceOpcode(Opc);
15754 assert(Opc == ISD::getVecReduceBaseOpcode(ReduceOpc) &&
15755 "Inconsistent mappings");
15756 SDValue LHS = N->getOperand(Num: 0);
15757 SDValue RHS = N->getOperand(Num: 1);
15758
15759 if (!LHS.hasOneUse() || !RHS.hasOneUse())
15760 return SDValue();
15761
15762 if (RHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
15763 std::swap(a&: LHS, b&: RHS);
15764
15765 if (RHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
15766 !isa<ConstantSDNode>(Val: RHS.getOperand(i: 1)))
15767 return SDValue();
15768
15769 uint64_t RHSIdx = cast<ConstantSDNode>(Val: RHS.getOperand(i: 1))->getLimitedValue();
15770 SDValue SrcVec = RHS.getOperand(i: 0);
15771 EVT SrcVecVT = SrcVec.getValueType();
15772 assert(SrcVecVT.getVectorElementType() == VT);
15773 if (SrcVecVT.isScalableVector())
15774 return SDValue();
15775
15776 if (SrcVecVT.getScalarSizeInBits() > Subtarget.getELen())
15777 return SDValue();
15778
15779 // match binop (extract_vector_elt V, 0), (extract_vector_elt V, 1) to
15780 // reduce_op (extract_subvector [2 x VT] from V). This will form the
15781 // root of our reduction tree. TODO: We could extend this to any two
15782 // adjacent aligned constant indices if desired.
15783 if (LHS.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
15784 LHS.getOperand(i: 0) == SrcVec && isa<ConstantSDNode>(Val: LHS.getOperand(i: 1))) {
15785 uint64_t LHSIdx =
15786 cast<ConstantSDNode>(Val: LHS.getOperand(i: 1))->getLimitedValue();
15787 if (0 == std::min(a: LHSIdx, b: RHSIdx) && 1 == std::max(a: LHSIdx, b: RHSIdx)) {
15788 EVT ReduceVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT, NumElements: 2);
15789 SDValue Vec = DAG.getExtractSubvector(DL, VT: ReduceVT, Vec: SrcVec, Idx: 0);
15790 return DAG.getNode(Opcode: ReduceOpc, DL, VT, Operand: Vec, Flags: N->getFlags());
15791 }
15792 }
15793
15794 // Match (binop (reduce (extract_subvector V, 0),
15795 // (extract_vector_elt V, sizeof(SubVec))))
15796 // into a reduction of one more element from the original vector V.
15797 if (LHS.getOpcode() != ReduceOpc)
15798 return SDValue();
15799
15800 SDValue ReduceVec = LHS.getOperand(i: 0);
15801 if (ReduceVec.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
15802 ReduceVec.hasOneUse() && ReduceVec.getOperand(i: 0) == RHS.getOperand(i: 0) &&
15803 isNullConstant(V: ReduceVec.getOperand(i: 1)) &&
15804 ReduceVec.getValueType().getVectorNumElements() == RHSIdx) {
15805 // For illegal types (e.g. 3xi32), most will be combined again into a
15806 // wider (hopefully legal) type. If this is a terminal state, we are
15807 // relying on type legalization here to produce something reasonable
15808 // and this lowering quality could probably be improved. (TODO)
15809 EVT ReduceVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT, NumElements: RHSIdx + 1);
15810 SDValue Vec = DAG.getExtractSubvector(DL, VT: ReduceVT, Vec: SrcVec, Idx: 0);
15811 return DAG.getNode(Opcode: ReduceOpc, DL, VT, Operand: Vec,
15812 Flags: ReduceVec->getFlags() & N->getFlags());
15813 }
15814
15815 return SDValue();
15816}
15817
15818
15819// Try to fold (<bop> x, (reduction.<bop> vec, start))
15820static SDValue combineBinOpToReduce(SDNode *N, SelectionDAG &DAG,
15821 const RISCVSubtarget &Subtarget) {
15822 auto BinOpToRVVReduce = [](unsigned Opc) {
15823 switch (Opc) {
15824 default:
15825 llvm_unreachable("Unhandled binary to transform reduction");
15826 case ISD::ADD:
15827 return RISCVISD::VECREDUCE_ADD_VL;
15828 case ISD::UMAX:
15829 return RISCVISD::VECREDUCE_UMAX_VL;
15830 case ISD::SMAX:
15831 return RISCVISD::VECREDUCE_SMAX_VL;
15832 case ISD::UMIN:
15833 return RISCVISD::VECREDUCE_UMIN_VL;
15834 case ISD::SMIN:
15835 return RISCVISD::VECREDUCE_SMIN_VL;
15836 case ISD::AND:
15837 return RISCVISD::VECREDUCE_AND_VL;
15838 case ISD::OR:
15839 return RISCVISD::VECREDUCE_OR_VL;
15840 case ISD::XOR:
15841 return RISCVISD::VECREDUCE_XOR_VL;
15842 case ISD::FADD:
15843 return RISCVISD::VECREDUCE_FADD_VL;
15844 case ISD::FMAXNUM:
15845 return RISCVISD::VECREDUCE_FMAX_VL;
15846 case ISD::FMINNUM:
15847 return RISCVISD::VECREDUCE_FMIN_VL;
15848 }
15849 };
15850
15851 auto IsReduction = [&BinOpToRVVReduce](SDValue V, unsigned Opc) {
15852 return V.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
15853 isNullConstant(V: V.getOperand(i: 1)) &&
15854 V.getOperand(i: 0).getOpcode() == BinOpToRVVReduce(Opc);
15855 };
15856
15857 unsigned Opc = N->getOpcode();
15858 unsigned ReduceIdx;
15859 if (IsReduction(N->getOperand(Num: 0), Opc))
15860 ReduceIdx = 0;
15861 else if (IsReduction(N->getOperand(Num: 1), Opc))
15862 ReduceIdx = 1;
15863 else
15864 return SDValue();
15865
15866 // Skip if FADD disallows reassociation but the combiner needs.
15867 if (Opc == ISD::FADD && !N->getFlags().hasAllowReassociation())
15868 return SDValue();
15869
15870 SDValue Extract = N->getOperand(Num: ReduceIdx);
15871 SDValue Reduce = Extract.getOperand(i: 0);
15872 if (!Extract.hasOneUse() || !Reduce.hasOneUse())
15873 return SDValue();
15874
15875 SDValue ScalarV = Reduce.getOperand(i: 2);
15876 EVT ScalarVT = ScalarV.getValueType();
15877 if (ScalarV.getOpcode() == ISD::INSERT_SUBVECTOR &&
15878 ScalarV.getOperand(i: 0)->isUndef() &&
15879 isNullConstant(V: ScalarV.getOperand(i: 2)))
15880 ScalarV = ScalarV.getOperand(i: 1);
15881
15882 // Make sure that ScalarV is a splat with VL=1.
15883 if (ScalarV.getOpcode() != RISCVISD::VFMV_S_F_VL &&
15884 ScalarV.getOpcode() != RISCVISD::VMV_S_X_VL &&
15885 ScalarV.getOpcode() != RISCVISD::VMV_V_X_VL)
15886 return SDValue();
15887
15888 if (!isNonZeroAVL(AVL: ScalarV.getOperand(i: 2)))
15889 return SDValue();
15890
15891 // Check the scalar of ScalarV is neutral element
15892 // TODO: Deal with value other than neutral element.
15893 if (!isNeutralConstant(Opc: N->getOpcode(), Flags: N->getFlags(), V: ScalarV.getOperand(i: 1),
15894 OperandNo: 0))
15895 return SDValue();
15896
15897 // If the AVL is zero, operand 0 will be returned. So it's not safe to fold.
15898 // FIXME: We might be able to improve this if operand 0 is undef.
15899 if (!isNonZeroAVL(AVL: Reduce.getOperand(i: 5)))
15900 return SDValue();
15901
15902 SDValue NewStart = N->getOperand(Num: 1 - ReduceIdx);
15903
15904 SDLoc DL(N);
15905 SDValue NewScalarV =
15906 lowerScalarInsert(Scalar: NewStart, VL: ScalarV.getOperand(i: 2),
15907 VT: ScalarV.getSimpleValueType(), DL, DAG, Subtarget);
15908
15909 // If we looked through an INSERT_SUBVECTOR we need to restore it.
15910 if (ScalarVT != ScalarV.getValueType())
15911 NewScalarV =
15912 DAG.getInsertSubvector(DL, Vec: DAG.getUNDEF(VT: ScalarVT), SubVec: NewScalarV, Idx: 0);
15913
15914 SDValue Ops[] = {Reduce.getOperand(i: 0), Reduce.getOperand(i: 1),
15915 NewScalarV, Reduce.getOperand(i: 3),
15916 Reduce.getOperand(i: 4), Reduce.getOperand(i: 5)};
15917 SDValue NewReduce =
15918 DAG.getNode(Opcode: Reduce.getOpcode(), DL, VT: Reduce.getValueType(), Ops);
15919 return DAG.getNode(Opcode: Extract.getOpcode(), DL, VT: Extract.getValueType(), N1: NewReduce,
15920 N2: Extract.getOperand(i: 1));
15921}
15922
15923// Optimize (add (shl x, c0), (shl y, c1)) ->
15924// (SLLI (SH*ADD x, y), c0), if c1-c0 equals to [1|2|3].
15925// or
15926// (SLLI (QC.SHLADD x, y, c1 - c0), c0), if 4 <= (c1-c0) <=31.
15927static SDValue transformAddShlImm(SDNode *N, SelectionDAG &DAG,
15928 const RISCVSubtarget &Subtarget) {
15929 // Perform this optimization only in the zba/xandesperf/xqciac/xtheadba
15930 // extension.
15931 if (!Subtarget.hasShlAdd(ShAmt: 3))
15932 return SDValue();
15933
15934 // Skip for vector types and larger types.
15935 EVT VT = N->getValueType(ResNo: 0);
15936 if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
15937 return SDValue();
15938
15939 // The two operand nodes must be SHL and have no other use.
15940 SDValue N0 = N->getOperand(Num: 0);
15941 SDValue N1 = N->getOperand(Num: 1);
15942 if (N0->getOpcode() != ISD::SHL || N1->getOpcode() != ISD::SHL ||
15943 !N0->hasOneUse() || !N1->hasOneUse())
15944 return SDValue();
15945
15946 // Check c0 and c1.
15947 auto *N0C = dyn_cast<ConstantSDNode>(Val: N0->getOperand(Num: 1));
15948 auto *N1C = dyn_cast<ConstantSDNode>(Val: N1->getOperand(Num: 1));
15949 if (!N0C || !N1C)
15950 return SDValue();
15951 int64_t C0 = N0C->getSExtValue();
15952 int64_t C1 = N1C->getSExtValue();
15953 if (C0 <= 0 || C1 <= 0)
15954 return SDValue();
15955
15956 int64_t Diff = std::abs(i: C0 - C1);
15957 if (!Subtarget.hasShlAdd(ShAmt: Diff))
15958 return SDValue();
15959
15960 // Build nodes.
15961 SDLoc DL(N);
15962 int64_t Bits = std::min(a: C0, b: C1);
15963 SDValue NS = (C0 < C1) ? N0->getOperand(Num: 0) : N1->getOperand(Num: 0);
15964 SDValue NL = (C0 > C1) ? N0->getOperand(Num: 0) : N1->getOperand(Num: 0);
15965 SDValue SHADD = DAG.getNode(Opcode: RISCVISD::SHL_ADD, DL, VT, N1: NL,
15966 N2: DAG.getTargetConstant(Val: Diff, DL, VT), N3: NS);
15967 return DAG.getNode(Opcode: ISD::SHL, DL, VT, N1: SHADD, N2: DAG.getConstant(Val: Bits, DL, VT));
15968}
15969
15970// Check if this SDValue is an add immediate that is fed by a shift of 1, 2,
15971// or 3.
15972static SDValue combineShlAddIAddImpl(SDNode *N, SDValue AddI, SDValue Other,
15973 SelectionDAG &DAG) {
15974 using namespace llvm::SDPatternMatch;
15975
15976 // Looking for a reg-reg add and not an addi.
15977 if (isa<ConstantSDNode>(Val: N->getOperand(Num: 1)))
15978 return SDValue();
15979
15980 // Based on testing it seems that performance degrades if the ADDI has
15981 // more than 2 uses.
15982 if (AddI->use_size() > 2)
15983 return SDValue();
15984
15985 APInt AddVal;
15986 SDValue SHLVal;
15987 if (!sd_match(N: AddI, P: m_Add(L: m_Value(N&: SHLVal), R: m_ConstInt(V&: AddVal))))
15988 return SDValue();
15989
15990 APInt VShift;
15991 if (!sd_match(N: SHLVal, P: m_OneUse(P: m_Shl(L: m_Value(), R: m_ConstInt(V&: VShift)))))
15992 return SDValue();
15993
15994 if (VShift.slt(RHS: 1) || VShift.sgt(RHS: 3))
15995 return SDValue();
15996
15997 SDLoc DL(N);
15998 EVT VT = N->getValueType(ResNo: 0);
15999 // The shift must be positive but the add can be signed.
16000 uint64_t ShlConst = VShift.getZExtValue();
16001 int64_t AddConst = AddVal.getSExtValue();
16002
16003 SDValue SHADD = DAG.getNode(Opcode: RISCVISD::SHL_ADD, DL, VT, N1: SHLVal->getOperand(Num: 0),
16004 N2: DAG.getTargetConstant(Val: ShlConst, DL, VT), N3: Other);
16005 return DAG.getNode(Opcode: ISD::ADD, DL, VT, N1: SHADD,
16006 N2: DAG.getSignedConstant(Val: AddConst, DL, VT));
16007}
16008
16009// Optimize (add (add (shl x, c0), c1), y) ->
16010// (ADDI (SH*ADD y, x), c1), if c0 equals to [1|2|3].
16011static SDValue combineShlAddIAdd(SDNode *N, SelectionDAG &DAG,
16012 const RISCVSubtarget &Subtarget) {
16013 // Perform this optimization only in the zba extension.
16014 if (!ReassocShlAddiAdd || !Subtarget.hasShlAdd(ShAmt: 3))
16015 return SDValue();
16016
16017 // Skip for vector types and larger types.
16018 EVT VT = N->getValueType(ResNo: 0);
16019 if (VT != Subtarget.getXLenVT())
16020 return SDValue();
16021
16022 SDValue AddI = N->getOperand(Num: 0);
16023 SDValue Other = N->getOperand(Num: 1);
16024 if (SDValue V = combineShlAddIAddImpl(N, AddI, Other, DAG))
16025 return V;
16026 if (SDValue V = combineShlAddIAddImpl(N, AddI: Other, Other: AddI, DAG))
16027 return V;
16028 return SDValue();
16029}
16030
16031// Combine a constant select operand into its use:
16032//
16033// (and (select cond, -1, c), x)
16034// -> (select cond, x, (and x, c)) [AllOnes=1]
16035// (or (select cond, 0, c), x)
16036// -> (select cond, x, (or x, c)) [AllOnes=0]
16037// (xor (select cond, 0, c), x)
16038// -> (select cond, x, (xor x, c)) [AllOnes=0]
16039// (add (select cond, 0, c), x)
16040// -> (select cond, x, (add x, c)) [AllOnes=0]
16041// (sub x, (select cond, 0, c))
16042// -> (select cond, x, (sub x, c)) [AllOnes=0]
16043static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
16044 SelectionDAG &DAG, bool AllOnes,
16045 const RISCVSubtarget &Subtarget) {
16046 EVT VT = N->getValueType(ResNo: 0);
16047
16048 // Skip vectors.
16049 if (VT.isVector())
16050 return SDValue();
16051
16052 if (!Subtarget.hasConditionalMoveFusion()) {
16053 // (select cond, x, (and x, c)) has custom lowering with Zicond.
16054 if (!Subtarget.hasCZEROLike() || N->getOpcode() != ISD::AND)
16055 return SDValue();
16056
16057 // Maybe harmful when condition code has multiple use.
16058 if (Slct.getOpcode() == ISD::SELECT && !Slct.getOperand(i: 0).hasOneUse())
16059 return SDValue();
16060
16061 // Maybe harmful when VT is wider than XLen.
16062 if (VT.getSizeInBits() > Subtarget.getXLen())
16063 return SDValue();
16064 }
16065
16066 if ((Slct.getOpcode() != ISD::SELECT &&
16067 Slct.getOpcode() != RISCVISD::SELECT_CC) ||
16068 !Slct.hasOneUse())
16069 return SDValue();
16070
16071 auto isZeroOrAllOnes = [](SDValue N, bool AllOnes) {
16072 return AllOnes ? isAllOnesConstant(V: N) : isNullConstant(V: N);
16073 };
16074
16075 bool SwapSelectOps;
16076 unsigned OpOffset = Slct.getOpcode() == RISCVISD::SELECT_CC ? 2 : 0;
16077 SDValue TrueVal = Slct.getOperand(i: 1 + OpOffset);
16078 SDValue FalseVal = Slct.getOperand(i: 2 + OpOffset);
16079 SDValue NonConstantVal;
16080 if (isZeroOrAllOnes(TrueVal, AllOnes)) {
16081 SwapSelectOps = false;
16082 NonConstantVal = FalseVal;
16083 } else if (isZeroOrAllOnes(FalseVal, AllOnes)) {
16084 SwapSelectOps = true;
16085 NonConstantVal = TrueVal;
16086 } else
16087 return SDValue();
16088
16089 // Slct is now know to be the desired identity constant when CC is true.
16090 TrueVal = OtherOp;
16091 FalseVal = DAG.getNode(Opcode: N->getOpcode(), DL: SDLoc(N), VT, N1: OtherOp, N2: NonConstantVal);
16092 // Unless SwapSelectOps says the condition should be false.
16093 if (SwapSelectOps)
16094 std::swap(a&: TrueVal, b&: FalseVal);
16095
16096 if (Slct.getOpcode() == RISCVISD::SELECT_CC)
16097 return DAG.getNode(Opcode: RISCVISD::SELECT_CC, DL: SDLoc(N), VT,
16098 Ops: {Slct.getOperand(i: 0), Slct.getOperand(i: 1),
16099 Slct.getOperand(i: 2), TrueVal, FalseVal});
16100
16101 return DAG.getNode(Opcode: ISD::SELECT, DL: SDLoc(N), VT,
16102 Ops: {Slct.getOperand(i: 0), TrueVal, FalseVal});
16103}
16104
16105// Attempt combineSelectAndUse on each operand of a commutative operator N.
16106static SDValue combineSelectAndUseCommutative(SDNode *N, SelectionDAG &DAG,
16107 bool AllOnes,
16108 const RISCVSubtarget &Subtarget) {
16109 SDValue N0 = N->getOperand(Num: 0);
16110 SDValue N1 = N->getOperand(Num: 1);
16111 if (SDValue Result = combineSelectAndUse(N, Slct: N0, OtherOp: N1, DAG, AllOnes, Subtarget))
16112 return Result;
16113 if (SDValue Result = combineSelectAndUse(N, Slct: N1, OtherOp: N0, DAG, AllOnes, Subtarget))
16114 return Result;
16115 return SDValue();
16116}
16117
16118// Transform (add (mul x, c0), c1) ->
16119// (add (mul (add x, c1/c0), c0), c1%c0).
16120// if c1/c0 and c1%c0 are simm12, while c1 is not. A special corner case
16121// that should be excluded is when c0*(c1/c0) is simm12, which will lead
16122// to an infinite loop in DAGCombine if transformed.
16123// Or transform (add (mul x, c0), c1) ->
16124// (add (mul (add x, c1/c0+1), c0), c1%c0-c0),
16125// if c1/c0+1 and c1%c0-c0 are simm12, while c1 is not. A special corner
16126// case that should be excluded is when c0*(c1/c0+1) is simm12, which will
16127// lead to an infinite loop in DAGCombine if transformed.
16128// Or transform (add (mul x, c0), c1) ->
16129// (add (mul (add x, c1/c0-1), c0), c1%c0+c0),
16130// if c1/c0-1 and c1%c0+c0 are simm12, while c1 is not. A special corner
16131// case that should be excluded is when c0*(c1/c0-1) is simm12, which will
16132// lead to an infinite loop in DAGCombine if transformed.
16133// Or transform (add (mul x, c0), c1) ->
16134// (mul (add x, c1/c0), c0).
16135// if c1%c0 is zero, and c1/c0 is simm12 while c1 is not.
16136static SDValue transformAddImmMulImm(SDNode *N, SelectionDAG &DAG,
16137 const RISCVSubtarget &Subtarget) {
16138 // Skip for vector types and larger types.
16139 EVT VT = N->getValueType(ResNo: 0);
16140 if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
16141 return SDValue();
16142 // The first operand node must be a MUL and has no other use.
16143 SDValue N0 = N->getOperand(Num: 0);
16144 if (!N0->hasOneUse() || N0->getOpcode() != ISD::MUL)
16145 return SDValue();
16146 // Check if c0 and c1 match above conditions.
16147 auto *N0C = dyn_cast<ConstantSDNode>(Val: N0->getOperand(Num: 1));
16148 auto *N1C = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 1));
16149 if (!N0C || !N1C)
16150 return SDValue();
16151 // If N0C has multiple uses it's possible one of the cases in
16152 // DAGCombiner::isMulAddWithConstProfitable will be true, which would result
16153 // in an infinite loop.
16154 if (!N0C->hasOneUse())
16155 return SDValue();
16156 int64_t C0 = N0C->getSExtValue();
16157 int64_t C1 = N1C->getSExtValue();
16158 int64_t CA, CB;
16159 if (C0 == -1 || C0 == 0 || C0 == 1 || isInt<12>(x: C1))
16160 return SDValue();
16161 // Search for proper CA (non-zero) and CB that both are simm12.
16162 if ((C1 / C0) != 0 && isInt<12>(x: C1 / C0) && isInt<12>(x: C1 % C0) &&
16163 !isInt<12>(x: C0 * (C1 / C0))) {
16164 CA = C1 / C0;
16165 CB = C1 % C0;
16166 } else if ((C1 / C0 + 1) != 0 && isInt<12>(x: C1 / C0 + 1) &&
16167 isInt<12>(x: C1 % C0 - C0) && !isInt<12>(x: C0 * (C1 / C0 + 1))) {
16168 CA = C1 / C0 + 1;
16169 CB = C1 % C0 - C0;
16170 } else if ((C1 / C0 - 1) != 0 && isInt<12>(x: C1 / C0 - 1) &&
16171 isInt<12>(x: C1 % C0 + C0) && !isInt<12>(x: C0 * (C1 / C0 - 1))) {
16172 CA = C1 / C0 - 1;
16173 CB = C1 % C0 + C0;
16174 } else
16175 return SDValue();
16176 // Build new nodes (add (mul (add x, c1/c0), c0), c1%c0).
16177 SDLoc DL(N);
16178 SDValue New0 = DAG.getNode(Opcode: ISD::ADD, DL, VT, N1: N0->getOperand(Num: 0),
16179 N2: DAG.getSignedConstant(Val: CA, DL, VT));
16180 SDValue New1 =
16181 DAG.getNode(Opcode: ISD::MUL, DL, VT, N1: New0, N2: DAG.getSignedConstant(Val: C0, DL, VT));
16182 return DAG.getNode(Opcode: ISD::ADD, DL, VT, N1: New1, N2: DAG.getSignedConstant(Val: CB, DL, VT));
16183}
16184
16185// add (zext, zext) -> zext (add (zext, zext))
16186// sub (zext, zext) -> sext (sub (zext, zext))
16187// mul (zext, zext) -> zext (mul (zext, zext))
16188// sdiv (zext, zext) -> zext (sdiv (zext, zext))
16189// udiv (zext, zext) -> zext (udiv (zext, zext))
16190// srem (zext, zext) -> zext (srem (zext, zext))
16191// urem (zext, zext) -> zext (urem (zext, zext))
16192//
16193// where the sum of the extend widths match, and the the range of the bin op
16194// fits inside the width of the narrower bin op. (For profitability on rvv, we
16195// use a power of two for both inner and outer extend.)
16196static SDValue combineBinOpOfZExt(SDNode *N, SelectionDAG &DAG) {
16197
16198 EVT VT = N->getValueType(ResNo: 0);
16199 if (!VT.isVector() || !DAG.getTargetLoweringInfo().isTypeLegal(VT))
16200 return SDValue();
16201
16202 SDValue N0 = N->getOperand(Num: 0);
16203 SDValue N1 = N->getOperand(Num: 1);
16204 if (N0.getOpcode() != ISD::ZERO_EXTEND || N1.getOpcode() != ISD::ZERO_EXTEND)
16205 return SDValue();
16206 if (!N0.hasOneUse() || !N1.hasOneUse())
16207 return SDValue();
16208
16209 SDValue Src0 = N0.getOperand(i: 0);
16210 SDValue Src1 = N1.getOperand(i: 0);
16211 EVT SrcVT = Src0.getValueType();
16212 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT: SrcVT) ||
16213 SrcVT != Src1.getValueType() || SrcVT.getScalarSizeInBits() < 8 ||
16214 SrcVT.getScalarSizeInBits() >= VT.getScalarSizeInBits() / 2)
16215 return SDValue();
16216
16217 LLVMContext &C = *DAG.getContext();
16218 EVT ElemVT = VT.getVectorElementType().getHalfSizedIntegerVT(Context&: C);
16219 EVT NarrowVT = EVT::getVectorVT(Context&: C, VT: ElemVT, EC: VT.getVectorElementCount());
16220
16221 Src0 = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: SDLoc(Src0), VT: NarrowVT, Operand: Src0);
16222 Src1 = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: SDLoc(Src1), VT: NarrowVT, Operand: Src1);
16223
16224 // Src0 and Src1 are zero extended, so they're always positive if signed.
16225 //
16226 // sub can produce a negative from two positive operands, so it needs sign
16227 // extended. Other nodes produce a positive from two positive operands, so
16228 // zero extend instead.
16229 unsigned OuterExtend =
16230 N->getOpcode() == ISD::SUB ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
16231
16232 return DAG.getNode(
16233 Opcode: OuterExtend, DL: SDLoc(N), VT,
16234 Operand: DAG.getNode(Opcode: N->getOpcode(), DL: SDLoc(N), VT: NarrowVT, N1: Src0, N2: Src1));
16235}
16236
16237// Try to turn (add (xor bool, 1) -1) into (neg bool).
16238static SDValue combineAddOfBooleanXor(SDNode *N, SelectionDAG &DAG) {
16239 SDValue N0 = N->getOperand(Num: 0);
16240 SDValue N1 = N->getOperand(Num: 1);
16241 EVT VT = N->getValueType(ResNo: 0);
16242 SDLoc DL(N);
16243
16244 // RHS should be -1.
16245 if (!isAllOnesConstant(V: N1))
16246 return SDValue();
16247
16248 // Look for (xor X, 1).
16249 if (N0.getOpcode() != ISD::XOR || !isOneConstant(V: N0.getOperand(i: 1)))
16250 return SDValue();
16251
16252 // First xor input should be 0 or 1.
16253 APInt Mask = APInt::getBitsSetFrom(numBits: VT.getSizeInBits(), loBit: 1);
16254 if (!DAG.MaskedValueIsZero(Op: N0.getOperand(i: 0), Mask))
16255 return SDValue();
16256
16257 // Emit a negate of the setcc.
16258 return DAG.getNegative(Val: N0.getOperand(i: 0), DL, VT);
16259}
16260
16261static SDValue performADDCombine(SDNode *N,
16262 TargetLowering::DAGCombinerInfo &DCI,
16263 const RISCVSubtarget &Subtarget) {
16264 SelectionDAG &DAG = DCI.DAG;
16265 if (SDValue V = combineAddOfBooleanXor(N, DAG))
16266 return V;
16267 if (SDValue V = transformAddImmMulImm(N, DAG, Subtarget))
16268 return V;
16269 if (!DCI.isBeforeLegalize() && !DCI.isCalledByLegalizer()) {
16270 if (SDValue V = transformAddShlImm(N, DAG, Subtarget))
16271 return V;
16272 if (SDValue V = combineShlAddIAdd(N, DAG, Subtarget))
16273 return V;
16274 }
16275 if (SDValue V = combineBinOpToReduce(N, DAG, Subtarget))
16276 return V;
16277 if (SDValue V = combineBinOpOfExtractToReduceTree(N, DAG, Subtarget))
16278 return V;
16279 if (SDValue V = combineBinOpOfZExt(N, DAG))
16280 return V;
16281
16282 // fold (add (select lhs, rhs, cc, 0, y), x) ->
16283 // (select lhs, rhs, cc, x, (add x, y))
16284 return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false, Subtarget);
16285}
16286
16287// Try to turn a sub boolean RHS and constant LHS into an addi.
16288static SDValue combineSubOfBoolean(SDNode *N, SelectionDAG &DAG) {
16289 SDValue N0 = N->getOperand(Num: 0);
16290 SDValue N1 = N->getOperand(Num: 1);
16291 EVT VT = N->getValueType(ResNo: 0);
16292 SDLoc DL(N);
16293
16294 // Require a constant LHS.
16295 auto *N0C = dyn_cast<ConstantSDNode>(Val&: N0);
16296 if (!N0C)
16297 return SDValue();
16298
16299 // All our optimizations involve subtracting 1 from the immediate and forming
16300 // an ADDI. Make sure the new immediate is valid for an ADDI.
16301 APInt ImmValMinus1 = N0C->getAPIntValue() - 1;
16302 if (!ImmValMinus1.isSignedIntN(N: 12))
16303 return SDValue();
16304
16305 SDValue NewLHS;
16306 if (N1.getOpcode() == ISD::SETCC && N1.hasOneUse()) {
16307 // (sub constant, (setcc x, y, eq/neq)) ->
16308 // (add (setcc x, y, neq/eq), constant - 1)
16309 ISD::CondCode CCVal = cast<CondCodeSDNode>(Val: N1.getOperand(i: 2))->get();
16310 EVT SetCCOpVT = N1.getOperand(i: 0).getValueType();
16311 if (!isIntEqualitySetCC(Code: CCVal) || !SetCCOpVT.isInteger())
16312 return SDValue();
16313 CCVal = ISD::getSetCCInverse(Operation: CCVal, Type: SetCCOpVT);
16314 NewLHS =
16315 DAG.getSetCC(DL: SDLoc(N1), VT, LHS: N1.getOperand(i: 0), RHS: N1.getOperand(i: 1), Cond: CCVal);
16316 } else if (N1.getOpcode() == ISD::XOR && isOneConstant(V: N1.getOperand(i: 1)) &&
16317 N1.getOperand(i: 0).getOpcode() == ISD::SETCC) {
16318 // (sub C, (xor (setcc), 1)) -> (add (setcc), C-1).
16319 // Since setcc returns a bool the xor is equivalent to 1-setcc.
16320 NewLHS = N1.getOperand(i: 0);
16321 } else
16322 return SDValue();
16323
16324 SDValue NewRHS = DAG.getConstant(Val: ImmValMinus1, DL, VT);
16325 return DAG.getNode(Opcode: ISD::ADD, DL, VT, N1: NewLHS, N2: NewRHS);
16326}
16327
16328// Looks for (sub (shl X, 8-Y), (shr X, Y)) where the Y-th bit in each byte is
16329// potentially set. It is fine for Y to be 0, meaning that (sub (shl X, 8), X)
16330// is also valid. Replace with (orc.b X). For example, 0b0000_1000_0000_1000 is
16331// valid with Y=3, while 0b0000_1000_0000_0100 is not.
16332static SDValue combineSubShiftToOrcB(SDNode *N, SelectionDAG &DAG,
16333 const RISCVSubtarget &Subtarget) {
16334 if (!Subtarget.hasStdExtZbb())
16335 return SDValue();
16336
16337 EVT VT = N->getValueType(ResNo: 0);
16338
16339 if (VT != Subtarget.getXLenVT() && VT != MVT::i32 && VT != MVT::i16)
16340 return SDValue();
16341
16342 SDValue N0 = N->getOperand(Num: 0);
16343 SDValue N1 = N->getOperand(Num: 1);
16344
16345 if (N0->getOpcode() != ISD::SHL)
16346 return SDValue();
16347
16348 auto *ShAmtCLeft = dyn_cast<ConstantSDNode>(Val: N0.getOperand(i: 1));
16349 if (!ShAmtCLeft)
16350 return SDValue();
16351 unsigned ShiftedAmount = 8 - ShAmtCLeft->getZExtValue();
16352
16353 if (ShiftedAmount >= 8)
16354 return SDValue();
16355
16356 SDValue LeftShiftOperand = N0->getOperand(Num: 0);
16357 SDValue RightShiftOperand = N1;
16358
16359 if (ShiftedAmount != 0) { // Right operand must be a right shift.
16360 if (N1->getOpcode() != ISD::SRL)
16361 return SDValue();
16362 auto *ShAmtCRight = dyn_cast<ConstantSDNode>(Val: N1.getOperand(i: 1));
16363 if (!ShAmtCRight || ShAmtCRight->getZExtValue() != ShiftedAmount)
16364 return SDValue();
16365 RightShiftOperand = N1.getOperand(i: 0);
16366 }
16367
16368 // At least one shift should have a single use.
16369 if (!N0.hasOneUse() && (ShiftedAmount == 0 || !N1.hasOneUse()))
16370 return SDValue();
16371
16372 if (LeftShiftOperand != RightShiftOperand)
16373 return SDValue();
16374
16375 APInt Mask = APInt::getSplat(NewLen: VT.getSizeInBits(), V: APInt(8, 0x1));
16376 Mask <<= ShiftedAmount;
16377 // Check that X has indeed the right shape (only the Y-th bit can be set in
16378 // every byte).
16379 if (!DAG.MaskedValueIsZero(Op: LeftShiftOperand, Mask: ~Mask))
16380 return SDValue();
16381
16382 return DAG.getNode(Opcode: RISCVISD::ORC_B, DL: SDLoc(N), VT, Operand: LeftShiftOperand);
16383}
16384
16385static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG,
16386 const RISCVSubtarget &Subtarget) {
16387 if (SDValue V = combineSubOfBoolean(N, DAG))
16388 return V;
16389
16390 EVT VT = N->getValueType(ResNo: 0);
16391 SDValue N0 = N->getOperand(Num: 0);
16392 SDValue N1 = N->getOperand(Num: 1);
16393 // fold (sub 0, (setcc x, 0, setlt)) -> (sra x, xlen - 1)
16394 if (isNullConstant(V: N0) && N1.getOpcode() == ISD::SETCC && N1.hasOneUse() &&
16395 isNullConstant(V: N1.getOperand(i: 1)) &&
16396 N1.getValueType() == N1.getOperand(i: 0).getValueType()) {
16397 ISD::CondCode CCVal = cast<CondCodeSDNode>(Val: N1.getOperand(i: 2))->get();
16398 if (CCVal == ISD::SETLT) {
16399 SDLoc DL(N);
16400 unsigned ShAmt = N0.getValueSizeInBits() - 1;
16401 return DAG.getNode(Opcode: ISD::SRA, DL, VT, N1: N1.getOperand(i: 0),
16402 N2: DAG.getConstant(Val: ShAmt, DL, VT));
16403 }
16404 }
16405
16406 if (SDValue V = combineBinOpOfZExt(N, DAG))
16407 return V;
16408 if (SDValue V = combineSubShiftToOrcB(N, DAG, Subtarget))
16409 return V;
16410
16411 // fold (sub x, (select lhs, rhs, cc, 0, y)) ->
16412 // (select lhs, rhs, cc, x, (sub x, y))
16413 return combineSelectAndUse(N, Slct: N1, OtherOp: N0, DAG, /*AllOnes*/ false, Subtarget);
16414}
16415
16416// Apply DeMorgan's law to (and/or (xor X, 1), (xor Y, 1)) if X and Y are 0/1.
16417// Legalizing setcc can introduce xors like this. Doing this transform reduces
16418// the number of xors and may allow the xor to fold into a branch condition.
16419static SDValue combineDeMorganOfBoolean(SDNode *N, SelectionDAG &DAG) {
16420 SDValue N0 = N->getOperand(Num: 0);
16421 SDValue N1 = N->getOperand(Num: 1);
16422 bool IsAnd = N->getOpcode() == ISD::AND;
16423
16424 if (N0.getOpcode() != ISD::XOR || N1.getOpcode() != ISD::XOR)
16425 return SDValue();
16426
16427 if (!N0.hasOneUse() || !N1.hasOneUse())
16428 return SDValue();
16429
16430 SDValue N01 = N0.getOperand(i: 1);
16431 SDValue N11 = N1.getOperand(i: 1);
16432
16433 // For AND, SimplifyDemandedBits may have turned one of the (xor X, 1) into
16434 // (xor X, -1) based on the upper bits of the other operand being 0. If the
16435 // operation is And, allow one of the Xors to use -1.
16436 if (isOneConstant(V: N01)) {
16437 if (!isOneConstant(V: N11) && !(IsAnd && isAllOnesConstant(V: N11)))
16438 return SDValue();
16439 } else if (isOneConstant(V: N11)) {
16440 // N01 and N11 being 1 was already handled. Handle N11==1 and N01==-1.
16441 if (!(IsAnd && isAllOnesConstant(V: N01)))
16442 return SDValue();
16443 } else
16444 return SDValue();
16445
16446 EVT VT = N->getValueType(ResNo: 0);
16447
16448 SDValue N00 = N0.getOperand(i: 0);
16449 SDValue N10 = N1.getOperand(i: 0);
16450
16451 // The LHS of the xors needs to be 0/1.
16452 APInt Mask = APInt::getBitsSetFrom(numBits: VT.getSizeInBits(), loBit: 1);
16453 if (!DAG.MaskedValueIsZero(Op: N00, Mask) || !DAG.MaskedValueIsZero(Op: N10, Mask))
16454 return SDValue();
16455
16456 // Invert the opcode and insert a new xor.
16457 SDLoc DL(N);
16458 unsigned Opc = IsAnd ? ISD::OR : ISD::AND;
16459 SDValue Logic = DAG.getNode(Opcode: Opc, DL, VT, N1: N00, N2: N10);
16460 return DAG.getNode(Opcode: ISD::XOR, DL, VT, N1: Logic, N2: DAG.getConstant(Val: 1, DL, VT));
16461}
16462
16463// Fold (vXi8 (trunc (vselect (setltu, X, 256), X, (sext (setgt X, 0))))) to
16464// (vXi8 (trunc (smin (smax X, 0), 255))). This represents saturating a signed
16465// value to an unsigned value. This will be lowered to vmax and series of
16466// vnclipu instructions later. This can be extended to other truncated types
16467// other than i8 by replacing 256 and 255 with the equivalent constants for the
16468// type.
16469static SDValue combineTruncSelectToSMaxUSat(SDNode *N, SelectionDAG &DAG) {
16470 EVT VT = N->getValueType(ResNo: 0);
16471 SDValue N0 = N->getOperand(Num: 0);
16472 EVT SrcVT = N0.getValueType();
16473
16474 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16475 if (!VT.isVector() || !TLI.isTypeLegal(VT) || !TLI.isTypeLegal(VT: SrcVT))
16476 return SDValue();
16477
16478 if (N0.getOpcode() != ISD::VSELECT || !N0.hasOneUse())
16479 return SDValue();
16480
16481 SDValue Cond = N0.getOperand(i: 0);
16482 SDValue True = N0.getOperand(i: 1);
16483 SDValue False = N0.getOperand(i: 2);
16484
16485 if (Cond.getOpcode() != ISD::SETCC)
16486 return SDValue();
16487
16488 // FIXME: Support the version of this pattern with the select operands
16489 // swapped.
16490 ISD::CondCode CCVal = cast<CondCodeSDNode>(Val: Cond.getOperand(i: 2))->get();
16491 if (CCVal != ISD::SETULT)
16492 return SDValue();
16493
16494 SDValue CondLHS = Cond.getOperand(i: 0);
16495 SDValue CondRHS = Cond.getOperand(i: 1);
16496
16497 if (CondLHS != True)
16498 return SDValue();
16499
16500 unsigned ScalarBits = VT.getScalarSizeInBits();
16501
16502 // FIXME: Support other constants.
16503 ConstantSDNode *CondRHSC = isConstOrConstSplat(N: CondRHS);
16504 if (!CondRHSC || CondRHSC->getAPIntValue() != (1ULL << ScalarBits))
16505 return SDValue();
16506
16507 if (False.getOpcode() != ISD::SIGN_EXTEND)
16508 return SDValue();
16509
16510 False = False.getOperand(i: 0);
16511
16512 if (False.getOpcode() != ISD::SETCC || False.getOperand(i: 0) != True)
16513 return SDValue();
16514
16515 ConstantSDNode *FalseRHSC = isConstOrConstSplat(N: False.getOperand(i: 1));
16516 if (!FalseRHSC || !FalseRHSC->isZero())
16517 return SDValue();
16518
16519 ISD::CondCode CCVal2 = cast<CondCodeSDNode>(Val: False.getOperand(i: 2))->get();
16520 if (CCVal2 != ISD::SETGT)
16521 return SDValue();
16522
16523 // Emit the signed to unsigned saturation pattern.
16524 SDLoc DL(N);
16525 SDValue Max =
16526 DAG.getNode(Opcode: ISD::SMAX, DL, VT: SrcVT, N1: True, N2: DAG.getConstant(Val: 0, DL, VT: SrcVT));
16527 SDValue Min =
16528 DAG.getNode(Opcode: ISD::SMIN, DL, VT: SrcVT, N1: Max,
16529 N2: DAG.getConstant(Val: (1ULL << ScalarBits) - 1, DL, VT: SrcVT));
16530 return DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT, Operand: Min);
16531}
16532
16533// Handle P extension truncate patterns:
16534// PASUB/PASUBU: (trunc (srl (sub ([s|z]ext a), ([s|z]ext b)), 1))
16535// PMULHSU: (trunc (srl (mul (sext a), (zext b)), EltBits))
16536// PMULHR*: (trunc (srl (add (mul (sext a), (zext b)), round_const), EltBits))
16537static SDValue combinePExtTruncate(SDNode *N, SelectionDAG &DAG,
16538 const RISCVSubtarget &Subtarget) {
16539 SDValue N0 = N->getOperand(Num: 0);
16540 EVT VT = N->getValueType(ResNo: 0);
16541 if (N0.getOpcode() != ISD::SRL)
16542 return SDValue();
16543
16544 MVT VecVT = VT.getSimpleVT();
16545 if (VecVT != MVT::v4i16 && VecVT != MVT::v2i16 && VecVT != MVT::v8i8 &&
16546 VecVT != MVT::v4i8 && VecVT != MVT::v2i32)
16547 return SDValue();
16548
16549 // Check if shift amount is a splat constant
16550 SDValue ShAmt = N0.getOperand(i: 1);
16551 if (ShAmt.getOpcode() != ISD::BUILD_VECTOR)
16552 return SDValue();
16553
16554 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Val: ShAmt.getNode());
16555 if (!BV)
16556 return SDValue();
16557 SDValue Splat = BV->getSplatValue();
16558 if (!Splat)
16559 return SDValue();
16560 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: Splat);
16561 if (!C)
16562 return SDValue();
16563
16564 SDValue Op = N0.getOperand(i: 0);
16565 unsigned ShAmtVal = C->getZExtValue();
16566 unsigned EltBits = VecVT.getScalarSizeInBits();
16567
16568 // Check for rounding pattern: (add (mul ...), round_const)
16569 bool IsRounding = false;
16570 if (Op.getOpcode() == ISD::ADD && (EltBits == 16 || EltBits == 32)) {
16571 SDValue AddRHS = Op.getOperand(i: 1);
16572 if (auto *RndBV = dyn_cast<BuildVectorSDNode>(Val: AddRHS.getNode())) {
16573 if (auto *RndC =
16574 dyn_cast_or_null<ConstantSDNode>(Val: RndBV->getSplatValue())) {
16575 uint64_t ExpectedRnd = 1ULL << (EltBits - 1);
16576 if (RndC->getZExtValue() == ExpectedRnd &&
16577 Op.getOperand(i: 0).getOpcode() == ISD::MUL) {
16578 Op = Op.getOperand(i: 0);
16579 IsRounding = true;
16580 }
16581 }
16582 }
16583 }
16584
16585 SDValue LHS = Op.getOperand(i: 0);
16586 SDValue RHS = Op.getOperand(i: 1);
16587
16588 bool LHSIsSExt = LHS.getOpcode() == ISD::SIGN_EXTEND;
16589 bool LHSIsZExt = LHS.getOpcode() == ISD::ZERO_EXTEND;
16590 bool RHSIsSExt = RHS.getOpcode() == ISD::SIGN_EXTEND;
16591 bool RHSIsZExt = RHS.getOpcode() == ISD::ZERO_EXTEND;
16592
16593 if (!(LHSIsSExt || LHSIsZExt) || !(RHSIsSExt || RHSIsZExt))
16594 return SDValue();
16595
16596 SDValue A = LHS.getOperand(i: 0);
16597 SDValue B = RHS.getOperand(i: 0);
16598
16599 if (A.getValueType() != VT || B.getValueType() != VT)
16600 return SDValue();
16601
16602 unsigned Opc;
16603 switch (Op.getOpcode()) {
16604 default:
16605 return SDValue();
16606 case ISD::SUB:
16607 // PASUB/PASUBU: shift amount must be 1
16608 if (ShAmtVal != 1)
16609 return SDValue();
16610 if (LHSIsSExt && RHSIsSExt)
16611 Opc = RISCVISD::PASUB;
16612 else if (LHSIsZExt && RHSIsZExt)
16613 Opc = RISCVISD::PASUBU;
16614 else
16615 return SDValue();
16616 break;
16617 case ISD::MUL:
16618 // PMULH*/PMULHR*: shift amount must be element size, only for i16/i32
16619 if (ShAmtVal != EltBits || (EltBits != 16 && EltBits != 32))
16620 return SDValue();
16621 if (IsRounding) {
16622 if (LHSIsSExt && RHSIsSExt) {
16623 Opc = RISCVISD::PMULHR;
16624 } else if (LHSIsZExt && RHSIsZExt) {
16625 Opc = RISCVISD::PMULHRU;
16626 } else if ((LHSIsSExt && RHSIsZExt) || (LHSIsZExt && RHSIsSExt)) {
16627 Opc = RISCVISD::PMULHRSU;
16628 // commuted case
16629 if (LHSIsZExt && RHSIsSExt)
16630 std::swap(a&: A, b&: B);
16631 } else {
16632 return SDValue();
16633 }
16634 } else {
16635 if ((LHSIsSExt && RHSIsZExt) || (LHSIsZExt && RHSIsSExt)) {
16636 Opc = RISCVISD::PMULHSU;
16637 // commuted case
16638 if (LHSIsZExt && RHSIsSExt)
16639 std::swap(a&: A, b&: B);
16640 } else
16641 return SDValue();
16642 }
16643 break;
16644 }
16645
16646 return DAG.getNode(Opcode: Opc, DL: SDLoc(N), VT, Ops: {A, B});
16647}
16648
16649static SDValue performTRUNCATECombine(SDNode *N, SelectionDAG &DAG,
16650 const RISCVSubtarget &Subtarget) {
16651 SDValue N0 = N->getOperand(Num: 0);
16652 EVT VT = N->getValueType(ResNo: 0);
16653
16654 if (VT.isFixedLengthVector() && Subtarget.enablePExtSIMDCodeGen())
16655 return combinePExtTruncate(N, DAG, Subtarget);
16656
16657 // Pre-promote (i1 (truncate (srl X, Y))) on RV64 with Zbs without zero
16658 // extending X. This is safe since we only need the LSB after the shift and
16659 // shift amounts larger than 31 would produce poison. If we wait until
16660 // type legalization, we'll create RISCVISD::SRLW and we can't recover it
16661 // to use a BEXT instruction.
16662 if (Subtarget.is64Bit() && Subtarget.hasStdExtZbs() && VT == MVT::i1 &&
16663 N0.getValueType() == MVT::i32 && N0.getOpcode() == ISD::SRL &&
16664 !isa<ConstantSDNode>(Val: N0.getOperand(i: 1)) && N0.hasOneUse()) {
16665 SDLoc DL(N0);
16666 SDValue Op0 = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: MVT::i64, Operand: N0.getOperand(i: 0));
16667 SDValue Op1 = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL, VT: MVT::i64, Operand: N0.getOperand(i: 1));
16668 SDValue Srl = DAG.getNode(Opcode: ISD::SRL, DL, VT: MVT::i64, N1: Op0, N2: Op1);
16669 return DAG.getNode(Opcode: ISD::TRUNCATE, DL: SDLoc(N), VT, Operand: Srl);
16670 }
16671
16672 return combineTruncSelectToSMaxUSat(N, DAG);
16673}
16674
16675// InstCombinerImpl::transformZExtICmp will narrow a zext of an icmp with a
16676// truncation. But RVV doesn't have truncation instructions for more than twice
16677// the bitwidth.
16678//
16679// E.g. trunc <vscale x 1 x i64> %x to <vscale x 1 x i8> will generate:
16680//
16681// vsetvli a0, zero, e32, m2, ta, ma
16682// vnsrl.wi v12, v8, 0
16683// vsetvli zero, zero, e16, m1, ta, ma
16684// vnsrl.wi v8, v12, 0
16685// vsetvli zero, zero, e8, mf2, ta, ma
16686// vnsrl.wi v8, v8, 0
16687//
16688// So reverse the combine so we generate an vmseq/vmsne again:
16689//
16690// and (lshr (trunc X), ShAmt), 1
16691// -->
16692// zext (icmp ne (and X, (1 << ShAmt)), 0)
16693//
16694// and (lshr (not (trunc X)), ShAmt), 1
16695// -->
16696// zext (icmp eq (and X, (1 << ShAmt)), 0)
16697static SDValue reverseZExtICmpCombine(SDNode *N, SelectionDAG &DAG,
16698 const RISCVSubtarget &Subtarget) {
16699 using namespace SDPatternMatch;
16700 SDLoc DL(N);
16701
16702 if (!Subtarget.hasVInstructions())
16703 return SDValue();
16704
16705 EVT VT = N->getValueType(ResNo: 0);
16706 if (!VT.isVector())
16707 return SDValue();
16708
16709 APInt ShAmt;
16710 SDValue Inner;
16711 if (!sd_match(N, P: m_And(L: m_OneUse(P: m_Srl(L: m_Value(N&: Inner), R: m_ConstInt(V&: ShAmt))),
16712 R: m_One())))
16713 return SDValue();
16714
16715 SDValue X;
16716 bool IsNot;
16717 if (sd_match(N: Inner, P: m_Not(V: m_Trunc(Op: m_Value(N&: X)))))
16718 IsNot = true;
16719 else if (sd_match(N: Inner, P: m_Trunc(Op: m_Value(N&: X))))
16720 IsNot = false;
16721 else
16722 return SDValue();
16723
16724 EVT WideVT = X.getValueType();
16725 if (VT.getScalarSizeInBits() >= WideVT.getScalarSizeInBits() / 2)
16726 return SDValue();
16727
16728 SDValue Res =
16729 DAG.getNode(Opcode: ISD::AND, DL, VT: WideVT, N1: X,
16730 N2: DAG.getConstant(Val: 1ULL << ShAmt.getZExtValue(), DL, VT: WideVT));
16731 Res = DAG.getSetCC(DL,
16732 VT: EVT::getVectorVT(Context&: *DAG.getContext(), VT: MVT::i1,
16733 EC: WideVT.getVectorElementCount()),
16734 LHS: Res, RHS: DAG.getConstant(Val: 0, DL, VT: WideVT),
16735 Cond: IsNot ? ISD::SETEQ : ISD::SETNE);
16736 return DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL, VT, Operand: Res);
16737}
16738
16739// (and (i1) f, (setcc c, 0, ne)) -> (czero.nez f, c)
16740// (and (i1) f, (setcc c, 0, eq)) -> (czero.eqz f, c)
16741// (and (setcc c, 0, ne), (i1) g) -> (czero.nez g, c)
16742// (and (setcc c, 0, eq), (i1) g) -> (czero.eqz g, c)
16743static SDValue combineANDOfSETCCToCZERO(SDNode *N, SelectionDAG &DAG,
16744 const RISCVSubtarget &Subtarget) {
16745 if (!Subtarget.hasCZEROLike())
16746 return SDValue();
16747
16748 SDValue N0 = N->getOperand(Num: 0);
16749 SDValue N1 = N->getOperand(Num: 1);
16750
16751 auto IsEqualCompZero = [](SDValue &V) -> bool {
16752 if (V.getOpcode() == ISD::SETCC && isNullConstant(V: V.getOperand(i: 1))) {
16753 ISD::CondCode CC = cast<CondCodeSDNode>(Val: V.getOperand(i: 2))->get();
16754 if (ISD::isIntEqualitySetCC(Code: CC))
16755 return true;
16756 }
16757 return false;
16758 };
16759
16760 if (!IsEqualCompZero(N0) || !N0.hasOneUse())
16761 std::swap(a&: N0, b&: N1);
16762 if (!IsEqualCompZero(N0) || !N0.hasOneUse())
16763 return SDValue();
16764
16765 KnownBits Known = DAG.computeKnownBits(Op: N1);
16766 if (Known.getMaxValue().ugt(RHS: 1))
16767 return SDValue();
16768
16769 unsigned CzeroOpcode =
16770 (cast<CondCodeSDNode>(Val: N0.getOperand(i: 2))->get() == ISD::SETNE)
16771 ? RISCVISD::CZERO_EQZ
16772 : RISCVISD::CZERO_NEZ;
16773
16774 EVT VT = N->getValueType(ResNo: 0);
16775 SDLoc DL(N);
16776 return DAG.getNode(Opcode: CzeroOpcode, DL, VT, N1, N2: N0.getOperand(i: 0));
16777}
16778
16779static SDValue reduceANDOfAtomicLoad(SDNode *N,
16780 TargetLowering::DAGCombinerInfo &DCI) {
16781 SelectionDAG &DAG = DCI.DAG;
16782 if (N->getOpcode() != ISD::AND)
16783 return SDValue();
16784
16785 SDValue N0 = N->getOperand(Num: 0);
16786 if (N0.getOpcode() != ISD::ATOMIC_LOAD)
16787 return SDValue();
16788 if (!N0.hasOneUse())
16789 return SDValue();
16790
16791 AtomicSDNode *ALoad = cast<AtomicSDNode>(Val: N0.getNode());
16792 if (isStrongerThanMonotonic(AO: ALoad->getSuccessOrdering()))
16793 return SDValue();
16794
16795 EVT LoadedVT = ALoad->getMemoryVT();
16796 ConstantSDNode *MaskConst = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 1));
16797 if (!MaskConst)
16798 return SDValue();
16799 uint64_t Mask = MaskConst->getZExtValue();
16800 uint64_t ExpectedMask = maskTrailingOnes<uint64_t>(N: LoadedVT.getSizeInBits());
16801 if (Mask != ExpectedMask)
16802 return SDValue();
16803
16804 SDValue ZextLoad = DAG.getAtomicLoad(
16805 ExtType: ISD::ZEXTLOAD, dl: SDLoc(N), MemVT: ALoad->getMemoryVT(), VT: N->getValueType(ResNo: 0),
16806 Chain: ALoad->getChain(), Ptr: ALoad->getBasePtr(), MMO: ALoad->getMemOperand());
16807 DCI.CombineTo(N, Res: ZextLoad);
16808 DAG.ReplaceAllUsesOfValueWith(From: SDValue(N0.getNode(), 1), To: ZextLoad.getValue(R: 1));
16809 DCI.recursivelyDeleteUnusedNodes(N: N0.getNode());
16810 return SDValue(N, 0);
16811}
16812
16813// Sometimes a mask is applied after a shift. If that shift was fed by a
16814// load, there is sometimes the opportunity to narrow the load, which is
16815// hidden by the intermediate shift. Detect that case and commute the
16816// shift/and in order to enable load narrowing.
16817static SDValue combineNarrowableShiftedLoad(SDNode *N, SelectionDAG &DAG) {
16818 EVT VT = N->getValueType(ResNo: 0);
16819 if (!VT.isScalarInteger())
16820 return SDValue();
16821
16822 using namespace SDPatternMatch;
16823 SDValue LoadNode;
16824 APInt MaskVal, ShiftVal;
16825 // (and (shl (load ...), ShiftAmt), Mask)
16826 if (!sd_match(
16827 N, P: m_And(L: m_OneUse(P: m_Shl(L: m_AllOf(preds: m_Opc(Opcode: ISD::LOAD), preds: m_Value(N&: LoadNode)),
16828 R: m_ConstInt(V&: ShiftVal))),
16829 R: m_ConstInt(V&: MaskVal)))) {
16830 return SDValue();
16831 }
16832
16833 uint64_t ShiftAmt = ShiftVal.getZExtValue();
16834
16835 if (ShiftAmt >= VT.getSizeInBits())
16836 return SDValue();
16837
16838 // Calculate the appropriate mask if it were applied before the shift.
16839 APInt InnerMask = MaskVal.lshr(shiftAmt: ShiftAmt);
16840 bool IsNarrowable =
16841 InnerMask == 0xff || InnerMask == 0xffff || InnerMask == 0xffffffff;
16842
16843 if (!IsNarrowable)
16844 return SDValue();
16845
16846 // AND the loaded value and change the shift appropriately, allowing
16847 // the load to be narrowed.
16848 SDLoc DL(N);
16849 SDValue InnerAnd = DAG.getNode(Opcode: ISD::AND, DL, VT, N1: LoadNode,
16850 N2: DAG.getConstant(Val: InnerMask, DL, VT));
16851 return DAG.getNode(Opcode: ISD::SHL, DL, VT, N1: InnerAnd,
16852 N2: DAG.getShiftAmountConstant(Val: ShiftAmt, VT, DL));
16853}
16854
16855// Combines two comparison operation and logic operation to one selection
16856// operation(min, max) and logic operation. Returns new constructed Node if
16857// conditions for optimization are satisfied.
16858static SDValue performANDCombine(SDNode *N,
16859 TargetLowering::DAGCombinerInfo &DCI,
16860 const RISCVSubtarget &Subtarget) {
16861 SelectionDAG &DAG = DCI.DAG;
16862 SDValue N0 = N->getOperand(Num: 0);
16863
16864 // Pre-promote (i32 (and (srl X, Y), 1)) on RV64 with Zbs without zero
16865 // extending X. This is safe since we only need the LSB after the shift and
16866 // shift amounts larger than 31 would produce poison. If we wait until
16867 // type legalization, we'll create RISCVISD::SRLW and we can't recover it
16868 // to use a BEXT instruction.
16869 if (Subtarget.is64Bit() && Subtarget.hasStdExtZbs() &&
16870 N->getValueType(ResNo: 0) == MVT::i32 && isOneConstant(V: N->getOperand(Num: 1)) &&
16871 N0.getOpcode() == ISD::SRL && !isa<ConstantSDNode>(Val: N0.getOperand(i: 1)) &&
16872 N0.hasOneUse()) {
16873 SDLoc DL(N);
16874 SDValue Op0 = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: MVT::i64, Operand: N0.getOperand(i: 0));
16875 SDValue Op1 = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL, VT: MVT::i64, Operand: N0.getOperand(i: 1));
16876 SDValue Srl = DAG.getNode(Opcode: ISD::SRL, DL, VT: MVT::i64, N1: Op0, N2: Op1);
16877 SDValue And = DAG.getNode(Opcode: ISD::AND, DL, VT: MVT::i64, N1: Srl,
16878 N2: DAG.getConstant(Val: 1, DL, VT: MVT::i64));
16879 return DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: MVT::i32, Operand: And);
16880 }
16881
16882 if (SDValue V = combineNarrowableShiftedLoad(N, DAG))
16883 return V;
16884 if (SDValue V = reverseZExtICmpCombine(N, DAG, Subtarget))
16885 return V;
16886 if (DCI.isAfterLegalizeDAG())
16887 if (SDValue V = combineANDOfSETCCToCZERO(N, DAG, Subtarget))
16888 return V;
16889 if (SDValue V = combineBinOpToReduce(N, DAG, Subtarget))
16890 return V;
16891 if (SDValue V = combineBinOpOfExtractToReduceTree(N, DAG, Subtarget))
16892 return V;
16893 if (SDValue V = reduceANDOfAtomicLoad(N, DCI))
16894 return V;
16895
16896 if (DCI.isAfterLegalizeDAG())
16897 if (SDValue V = combineDeMorganOfBoolean(N, DAG))
16898 return V;
16899
16900 // fold (and (select lhs, rhs, cc, -1, y), x) ->
16901 // (select lhs, rhs, cc, x, (and x, y))
16902 return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ true, Subtarget);
16903}
16904
16905// Try to pull an xor with 1 through a select idiom that uses czero_eqz/nez.
16906// FIXME: Generalize to other binary operators with same operand.
16907static SDValue combineOrOfCZERO(SDNode *N, SDValue N0, SDValue N1,
16908 SelectionDAG &DAG) {
16909 assert(N->getOpcode() == ISD::OR && "Unexpected opcode");
16910
16911 if (N0.getOpcode() != RISCVISD::CZERO_EQZ ||
16912 N1.getOpcode() != RISCVISD::CZERO_NEZ ||
16913 !N0.hasOneUse() || !N1.hasOneUse())
16914 return SDValue();
16915
16916 // Should have the same condition.
16917 SDValue Cond = N0.getOperand(i: 1);
16918 if (Cond != N1.getOperand(i: 1))
16919 return SDValue();
16920
16921 SDValue TrueV = N0.getOperand(i: 0);
16922 SDValue FalseV = N1.getOperand(i: 0);
16923
16924 if (TrueV.getOpcode() != ISD::XOR || FalseV.getOpcode() != ISD::XOR ||
16925 TrueV.getOperand(i: 1) != FalseV.getOperand(i: 1) ||
16926 !isOneConstant(V: TrueV.getOperand(i: 1)) ||
16927 !TrueV.hasOneUse() || !FalseV.hasOneUse())
16928 return SDValue();
16929
16930 EVT VT = N->getValueType(ResNo: 0);
16931 SDLoc DL(N);
16932
16933 SDValue NewN0 = DAG.getNode(Opcode: RISCVISD::CZERO_EQZ, DL, VT, N1: TrueV.getOperand(i: 0),
16934 N2: Cond);
16935 SDValue NewN1 =
16936 DAG.getNode(Opcode: RISCVISD::CZERO_NEZ, DL, VT, N1: FalseV.getOperand(i: 0), N2: Cond);
16937 SDValue NewOr =
16938 DAG.getNode(Opcode: ISD::OR, DL, VT, N1: NewN0, N2: NewN1, Flags: SDNodeFlags::Disjoint);
16939 return DAG.getNode(Opcode: ISD::XOR, DL, VT, N1: NewOr, N2: TrueV.getOperand(i: 1));
16940}
16941
16942// (xor X, (xor (and X, C2), Y))
16943// ->(qc_insb X, (sra Y, ShAmt), Width, ShAmt)
16944// where C2 is a shifted mask with width = Width and shift = ShAmt
16945// qc_insb might become qc.insb or qc.insbi depending on the operands.
16946static SDValue combineXorToBitfieldInsert(SDNode *N, SelectionDAG &DAG,
16947 const RISCVSubtarget &Subtarget) {
16948 if (!Subtarget.hasVendorXqcibm())
16949 return SDValue();
16950
16951 using namespace SDPatternMatch;
16952 SDValue Base, Inserted;
16953 APInt CMask;
16954 if (!sd_match(N, P: m_Xor(L: m_Value(N&: Base),
16955 R: m_OneUse(P: m_Xor(L: m_OneUse(P: m_And(L: m_Deferred(V&: Base),
16956 R: m_ConstInt(V&: CMask))),
16957 R: m_Value(N&: Inserted))))))
16958 return SDValue();
16959
16960 if (N->getValueType(ResNo: 0) != MVT::i32)
16961 return SDValue();
16962 unsigned Width, ShAmt;
16963 if (!CMask.isShiftedMask(MaskIdx&: ShAmt, MaskLen&: Width))
16964 return SDValue();
16965
16966 // Check if all zero bits in CMask are also zero in Inserted
16967 if (!DAG.MaskedValueIsZero(Op: Inserted, Mask: ~CMask))
16968 return SDValue();
16969
16970 SDLoc DL(N);
16971
16972 // `Inserted` needs to be right shifted before it is put into the
16973 // instruction.
16974 Inserted = DAG.getNode(Opcode: ISD::SRA, DL, VT: MVT::i32, N1: Inserted,
16975 N2: DAG.getShiftAmountConstant(Val: ShAmt, VT: MVT::i32, DL));
16976
16977 SDValue Ops[] = {Base, Inserted, DAG.getConstant(Val: Width, DL, VT: MVT::i32),
16978 DAG.getConstant(Val: ShAmt, DL, VT: MVT::i32)};
16979 return DAG.getNode(Opcode: RISCVISD::QC_INSB, DL, VT: MVT::i32, Ops);
16980}
16981
16982static SDValue combineOrToBitfieldInsert(SDNode *N, SelectionDAG &DAG,
16983 const RISCVSubtarget &Subtarget) {
16984 if (!Subtarget.hasVendorXqcibm())
16985 return SDValue();
16986
16987 using namespace SDPatternMatch;
16988
16989 SDValue X;
16990 APInt MaskImm;
16991 if (!sd_match(N, P: m_Or(L: m_OneUse(P: m_Value(N&: X)), R: m_ConstInt(V&: MaskImm))))
16992 return SDValue();
16993
16994 unsigned ShAmt, Width;
16995 if (!MaskImm.isShiftedMask(MaskIdx&: ShAmt, MaskLen&: Width) || MaskImm.isSignedIntN(N: 12))
16996 return SDValue();
16997
16998 if (N->getValueType(ResNo: 0) != MVT::i32)
16999 return SDValue();
17000
17001 // If Zbs is enabled and it is a single bit set we can use BSETI which
17002 // can be compressed to C_BSETI when Xqcibm in enabled.
17003 if (Width == 1 && Subtarget.hasStdExtZbs())
17004 return SDValue();
17005
17006 // If C1 is a shifted mask (but can't be formed as an ORI),
17007 // use a bitfield insert of -1.
17008 // Transform (or x, C1)
17009 // -> (qc.insbi x, -1, width, shift)
17010 SDLoc DL(N);
17011
17012 SDValue Ops[] = {X, DAG.getSignedConstant(Val: -1, DL, VT: MVT::i32),
17013 DAG.getConstant(Val: Width, DL, VT: MVT::i32),
17014 DAG.getConstant(Val: ShAmt, DL, VT: MVT::i32)};
17015 return DAG.getNode(Opcode: RISCVISD::QC_INSB, DL, VT: MVT::i32, Ops);
17016}
17017
17018// Generate a QC_INSB/QC_INSBI from 'or (and X, MaskImm), OrImm' iff the value
17019// being inserted only sets known zero bits.
17020static SDValue combineOrAndToBitfieldInsert(SDNode *N, SelectionDAG &DAG,
17021 const RISCVSubtarget &Subtarget) {
17022 // Supported only in Xqcibm for now.
17023 if (!Subtarget.hasVendorXqcibm())
17024 return SDValue();
17025
17026 using namespace SDPatternMatch;
17027
17028 SDValue Inserted;
17029 APInt MaskImm, OrImm;
17030 if (!sd_match(
17031 N, P: m_SpecificVT(RefVT: MVT::i32, P: m_Or(L: m_OneUse(P: m_And(L: m_Value(N&: Inserted),
17032 R: m_ConstInt(V&: MaskImm))),
17033 R: m_ConstInt(V&: OrImm)))))
17034 return SDValue();
17035
17036 // Compute the Known Zero for the AND as this allows us to catch more general
17037 // cases than just looking for AND with imm.
17038 KnownBits Known = DAG.computeKnownBits(Op: N->getOperand(Num: 0));
17039
17040 // The bits being inserted must only set those bits that are known to be
17041 // zero.
17042 if (!OrImm.isSubsetOf(RHS: Known.Zero)) {
17043 // FIXME: It's okay if the OrImm sets NotKnownZero bits to 1, but we don't
17044 // currently handle this case.
17045 return SDValue();
17046 }
17047
17048 unsigned ShAmt, Width;
17049 // The KnownZero mask must be a shifted mask (e.g., 1110..011, 11100..00).
17050 if (!Known.Zero.isShiftedMask(MaskIdx&: ShAmt, MaskLen&: Width))
17051 return SDValue();
17052
17053 // QC_INSB(I) dst, src, #width, #shamt.
17054 SDLoc DL(N);
17055
17056 SDValue ImmNode =
17057 DAG.getSignedConstant(Val: OrImm.getSExtValue() >> ShAmt, DL, VT: MVT::i32);
17058
17059 SDValue Ops[] = {Inserted, ImmNode, DAG.getConstant(Val: Width, DL, VT: MVT::i32),
17060 DAG.getConstant(Val: ShAmt, DL, VT: MVT::i32)};
17061 return DAG.getNode(Opcode: RISCVISD::QC_INSB, DL, VT: MVT::i32, Ops);
17062}
17063
17064static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
17065 const RISCVSubtarget &Subtarget) {
17066 SelectionDAG &DAG = DCI.DAG;
17067
17068 if (SDValue V = combineOrAndToBitfieldInsert(N, DAG, Subtarget))
17069 return V;
17070 if (SDValue V = combineBinOpToReduce(N, DAG, Subtarget))
17071 return V;
17072 if (SDValue V = combineBinOpOfExtractToReduceTree(N, DAG, Subtarget))
17073 return V;
17074
17075 if (DCI.isAfterLegalizeDAG()) {
17076 if (SDValue V = combineOrToBitfieldInsert(N, DAG, Subtarget))
17077 return V;
17078 if (SDValue V = combineDeMorganOfBoolean(N, DAG))
17079 return V;
17080 }
17081
17082 // Look for Or of CZERO_EQZ/NEZ with same condition which is the select idiom.
17083 // We may be able to pull a common operation out of the true and false value.
17084 SDValue N0 = N->getOperand(Num: 0);
17085 SDValue N1 = N->getOperand(Num: 1);
17086 if (SDValue V = combineOrOfCZERO(N, N0, N1, DAG))
17087 return V;
17088 if (SDValue V = combineOrOfCZERO(N, N0: N1, N1: N0, DAG))
17089 return V;
17090
17091 // fold (or (select cond, 0, y), x) ->
17092 // (select cond, x, (or x, y))
17093 return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false, Subtarget);
17094}
17095
17096static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG,
17097 const RISCVSubtarget &Subtarget) {
17098 SDValue N0 = N->getOperand(Num: 0);
17099 SDValue N1 = N->getOperand(Num: 1);
17100
17101 // Pre-promote (i32 (xor (shl -1, X), ~0)) on RV64 with Zbs so we can use
17102 // (ADDI (BSET X0, X), -1). If we wait until type legalization, we'll create
17103 // RISCVISD:::SLLW and we can't recover it to use a BSET instruction.
17104 if (Subtarget.is64Bit() && Subtarget.hasStdExtZbs() &&
17105 N->getValueType(ResNo: 0) == MVT::i32 && isAllOnesConstant(V: N1) &&
17106 N0.getOpcode() == ISD::SHL && isAllOnesConstant(V: N0.getOperand(i: 0)) &&
17107 !isa<ConstantSDNode>(Val: N0.getOperand(i: 1)) && N0.hasOneUse()) {
17108 SDLoc DL(N);
17109 SDValue Op0 = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: MVT::i64, Operand: N0.getOperand(i: 0));
17110 SDValue Op1 = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL, VT: MVT::i64, Operand: N0.getOperand(i: 1));
17111 SDValue Shl = DAG.getNode(Opcode: ISD::SHL, DL, VT: MVT::i64, N1: Op0, N2: Op1);
17112 SDValue Not = DAG.getNOT(DL, Val: Shl, VT: MVT::i64);
17113 return DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: MVT::i32, Operand: Not);
17114 }
17115
17116 // fold (xor (sllw 1, x), -1) -> (rolw ~1, x)
17117 // NOTE: Assumes ROL being legal means ROLW is legal.
17118 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
17119 if (N0.getOpcode() == RISCVISD::SLLW &&
17120 isAllOnesConstant(V: N1) && isOneConstant(V: N0.getOperand(i: 0)) &&
17121 TLI.isOperationLegal(Op: ISD::ROTL, VT: MVT::i64)) {
17122 SDLoc DL(N);
17123 return DAG.getNode(Opcode: RISCVISD::ROLW, DL, VT: MVT::i64,
17124 N1: DAG.getConstant(Val: ~1, DL, VT: MVT::i64), N2: N0.getOperand(i: 1));
17125 }
17126
17127 // Fold (xor (setcc constant, y, setlt), 1) -> (setcc y, constant + 1, setlt)
17128 if (N0.getOpcode() == ISD::SETCC && isOneConstant(V: N1) && N0.hasOneUse()) {
17129 auto *ConstN00 = dyn_cast<ConstantSDNode>(Val: N0.getOperand(i: 0));
17130 ISD::CondCode CC = cast<CondCodeSDNode>(Val: N0.getOperand(i: 2))->get();
17131 if (ConstN00 && CC == ISD::SETLT) {
17132 EVT VT = N0.getValueType();
17133 SDLoc DL(N0);
17134 const APInt &Imm = ConstN00->getAPIntValue();
17135 if ((Imm + 1).isSignedIntN(N: 12))
17136 return DAG.getSetCC(DL, VT, LHS: N0.getOperand(i: 1),
17137 RHS: DAG.getConstant(Val: Imm + 1, DL, VT), Cond: CC);
17138 }
17139 }
17140
17141 if (SDValue V = combineXorToBitfieldInsert(N, DAG, Subtarget))
17142 return V;
17143
17144 if (SDValue V = combineBinOpToReduce(N, DAG, Subtarget))
17145 return V;
17146 if (SDValue V = combineBinOpOfExtractToReduceTree(N, DAG, Subtarget))
17147 return V;
17148
17149 // fold (xor (select cond, 0, y), x) ->
17150 // (select cond, x, (xor x, y))
17151 return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false, Subtarget);
17152}
17153
17154// Try to expand a multiply to a sequence of shifts and add/subs,
17155// for a machine without native mul instruction.
17156static SDValue expandMulToNAFSequence(SDNode *N, SelectionDAG &DAG,
17157 uint64_t MulAmt) {
17158 SDLoc DL(N);
17159 EVT VT = N->getValueType(ResNo: 0);
17160 const uint64_t BitWidth = VT.getFixedSizeInBits();
17161
17162 SDValue Result = DAG.getConstant(Val: 0, DL, VT: N->getValueType(ResNo: 0));
17163 SDValue N0 = N->getOperand(Num: 0);
17164
17165 // Find the Non-adjacent form of the multiplier.
17166 for (uint64_t E = MulAmt, I = 0; E && I < BitWidth; ++I, E >>= 1) {
17167 if (E & 1) {
17168 bool IsAdd = (E & 3) == 1;
17169 E -= IsAdd ? 1 : -1;
17170 SDValue ShiftVal = DAG.getNode(Opcode: ISD::SHL, DL, VT, N1: N0,
17171 N2: DAG.getShiftAmountConstant(Val: I, VT, DL));
17172 ISD::NodeType AddSubOp = IsAdd ? ISD::ADD : ISD::SUB;
17173 Result = DAG.getNode(Opcode: AddSubOp, DL, VT, N1: Result, N2: ShiftVal);
17174 }
17175 }
17176
17177 return Result;
17178}
17179
17180// X * (2^N +/- 2^M) -> (add/sub (shl X, C1), (shl X, C2))
17181static SDValue expandMulToAddOrSubOfShl(SDNode *N, SelectionDAG &DAG,
17182 uint64_t MulAmt) {
17183 uint64_t MulAmtLowBit = MulAmt & (-MulAmt);
17184 SDValue X = N->getOperand(Num: 0);
17185 ISD::NodeType Op;
17186 uint64_t ShiftAmt1;
17187 bool CanSub = isPowerOf2_64(Value: MulAmt + MulAmtLowBit);
17188 auto PreferSub = [X, MulAmtLowBit]() {
17189 // For MulAmt == 3 << M both (X << M + 2) - (X << M)
17190 // and (X << M + 1) + (X << M) are valid expansions.
17191 // Prefer SUB if we can get (X << M + 2) for free,
17192 // because X is exact (Y >> M + 2).
17193 uint64_t ShAmt = Log2_64(Value: MulAmtLowBit) + 2;
17194 using namespace SDPatternMatch;
17195 return sd_match(N: X, P: m_ExactSr(L: m_Value(), R: m_SpecificInt(V: ShAmt)));
17196 };
17197 if (isPowerOf2_64(Value: MulAmt - MulAmtLowBit) && !(CanSub && PreferSub())) {
17198 Op = ISD::ADD;
17199 ShiftAmt1 = MulAmt - MulAmtLowBit;
17200 } else if (CanSub) {
17201 Op = ISD::SUB;
17202 ShiftAmt1 = MulAmt + MulAmtLowBit;
17203 } else {
17204 return SDValue();
17205 }
17206 EVT VT = N->getValueType(ResNo: 0);
17207 SDLoc DL(N);
17208 SDValue Shift1 = DAG.getNode(Opcode: ISD::SHL, DL, VT, N1: X,
17209 N2: DAG.getConstant(Val: Log2_64(Value: ShiftAmt1), DL, VT));
17210 SDValue Shift2 = DAG.getNode(Opcode: ISD::SHL, DL, VT, N1: X,
17211 N2: DAG.getConstant(Val: Log2_64(Value: MulAmtLowBit), DL, VT));
17212 return DAG.getNode(Opcode: Op, DL, VT, N1: Shift1, N2: Shift2);
17213}
17214
17215static SDValue getShlAddShlAdd(SDNode *N, SelectionDAG &DAG, unsigned ShX,
17216 unsigned ShY, bool AddX, unsigned Shift) {
17217 SDLoc DL(N);
17218 EVT VT = N->getValueType(ResNo: 0);
17219 SDValue X = N->getOperand(Num: 0);
17220 // Put the shift first if we can fold:
17221 // a. a zext into the shift forming a slli.uw
17222 // b. an exact shift right forming one shorter shift or no shift at all
17223 using namespace SDPatternMatch;
17224 if (Shift != 0 &&
17225 sd_match(N: X, P: m_AnyOf(preds: m_And(L: m_Value(), R: m_SpecificInt(UINT64_C(0xffffffff))),
17226 preds: m_ExactSr(L: m_Value(), R: m_ConstInt())))) {
17227 X = DAG.getNode(Opcode: ISD::SHL, DL, VT, N1: X, N2: DAG.getConstant(Val: Shift, DL, VT));
17228 Shift = 0;
17229 }
17230 SDValue ShlAdd = DAG.getNode(Opcode: RISCVISD::SHL_ADD, DL, VT, N1: X,
17231 N2: DAG.getTargetConstant(Val: ShY, DL, VT), N3: X);
17232 if (ShX != 0)
17233 ShlAdd = DAG.getNode(Opcode: RISCVISD::SHL_ADD, DL, VT, N1: ShlAdd,
17234 N2: DAG.getTargetConstant(Val: ShX, DL, VT), N3: AddX ? X : ShlAdd);
17235 if (Shift == 0)
17236 return ShlAdd;
17237 // Otherwise, put the shl last so that it can fold with following instructions
17238 // (e.g. sext or add).
17239 return DAG.getNode(Opcode: ISD::SHL, DL, VT, N1: ShlAdd, N2: DAG.getConstant(Val: Shift, DL, VT));
17240}
17241
17242static SDValue expandMulToShlAddShlAdd(SDNode *N, SelectionDAG &DAG,
17243 uint64_t MulAmt, unsigned Shift) {
17244 switch (MulAmt) {
17245 // 3/5/9 -> (shYadd X, X)
17246 case 3:
17247 return getShlAddShlAdd(N, DAG, ShX: 0, ShY: 1, /*AddX=*/false, Shift);
17248 case 5:
17249 return getShlAddShlAdd(N, DAG, ShX: 0, ShY: 2, /*AddX=*/false, Shift);
17250 case 9:
17251 return getShlAddShlAdd(N, DAG, ShX: 0, ShY: 3, /*AddX=*/false, Shift);
17252 // 3/5/9 * 3/5/9 -> (shXadd (shYadd X, X), (shYadd X, X))
17253 case 5 * 3:
17254 return getShlAddShlAdd(N, DAG, ShX: 2, ShY: 1, /*AddX=*/false, Shift);
17255 case 9 * 3:
17256 return getShlAddShlAdd(N, DAG, ShX: 3, ShY: 1, /*AddX=*/false, Shift);
17257 case 5 * 5:
17258 return getShlAddShlAdd(N, DAG, ShX: 2, ShY: 2, /*AddX=*/false, Shift);
17259 case 9 * 5:
17260 return getShlAddShlAdd(N, DAG, ShX: 3, ShY: 2, /*AddX=*/false, Shift);
17261 case 9 * 9:
17262 return getShlAddShlAdd(N, DAG, ShX: 3, ShY: 3, /*AddX=*/false, Shift);
17263 default:
17264 break;
17265 }
17266
17267 int ShX;
17268 if (int ShY = isShifted359(Value: MulAmt - 1, Shift&: ShX)) {
17269 assert(ShX != 0 && "MulAmt=4,6,10 handled before");
17270 // 2/4/8 * 3/5/9 + 1 -> (shXadd (shYadd X, X), X)
17271 if (ShX <= 3)
17272 return getShlAddShlAdd(N, DAG, ShX, ShY, /*AddX=*/true, Shift);
17273 // 2^N * 3/5/9 + 1 -> (add (shYadd (shl X, N), (shl X, N)), X)
17274 if (Shift == 0) {
17275 SDLoc DL(N);
17276 EVT VT = N->getValueType(ResNo: 0);
17277 SDValue X = N->getOperand(Num: 0);
17278 SDValue Shl =
17279 DAG.getNode(Opcode: ISD::SHL, DL, VT, N1: X, N2: DAG.getConstant(Val: ShX, DL, VT));
17280 SDValue ShlAdd = DAG.getNode(Opcode: RISCVISD::SHL_ADD, DL, VT, N1: Shl,
17281 N2: DAG.getTargetConstant(Val: ShY, DL, VT), N3: Shl);
17282 return DAG.getNode(Opcode: ISD::ADD, DL, VT, N1: ShlAdd, N2: X);
17283 }
17284 }
17285 return SDValue();
17286}
17287
17288// Try to expand a scalar multiply to a faster sequence.
17289static SDValue expandMul(SDNode *N, SelectionDAG &DAG,
17290 TargetLowering::DAGCombinerInfo &DCI,
17291 const RISCVSubtarget &Subtarget) {
17292
17293 EVT VT = N->getValueType(ResNo: 0);
17294
17295 // LI + MUL is usually smaller than the alternative sequence.
17296 if (DAG.getMachineFunction().getFunction().hasMinSize())
17297 return SDValue();
17298
17299 if (VT != Subtarget.getXLenVT())
17300 return SDValue();
17301
17302 bool ShouldExpandMul =
17303 (!DCI.isBeforeLegalize() && !DCI.isCalledByLegalizer()) ||
17304 !Subtarget.hasStdExtZmmul();
17305 if (!ShouldExpandMul)
17306 return SDValue();
17307
17308 ConstantSDNode *CNode = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 1));
17309 if (!CNode)
17310 return SDValue();
17311 uint64_t MulAmt = CNode->getZExtValue();
17312
17313 // Don't do this if the Xqciac extension is enabled and the MulAmt in simm12.
17314 if (Subtarget.hasVendorXqciac() && isInt<12>(x: CNode->getSExtValue()))
17315 return SDValue();
17316
17317 // WARNING: The code below is knowingly incorrect with regards to undef
17318 // semantics. We're adding additional uses of X here, and in principle, we
17319 // should be freezing X before doing so. However, adding freeze here causes
17320 // real regressions, and no other target properly freezes X in these cases
17321 // either.
17322 if (Subtarget.hasShlAdd(ShAmt: 3)) {
17323 // 3/5/9 * 2^N -> (shl (shXadd X, X), N)
17324 // 3/5/9 * 3/5/9 * 2^N - In particular, this covers multiples
17325 // of 25 which happen to be quite common.
17326 // (2/4/8 * 3/5/9 + 1) * 2^N
17327 unsigned Shift = llvm::countr_zero(Val: MulAmt);
17328 if (SDValue V = expandMulToShlAddShlAdd(N, DAG, MulAmt: MulAmt >> Shift, Shift))
17329 return V;
17330
17331 // If this is a power 2 + 2/4/8, we can use a shift followed by a single
17332 // shXadd. First check if this a sum of two power of 2s because that's
17333 // easy. Then count how many zeros are up to the first bit.
17334 SDValue X = N->getOperand(Num: 0);
17335 if (Shift >= 1 && Shift <= 3 && isPowerOf2_64(Value: MulAmt & (MulAmt - 1))) {
17336 unsigned ShiftAmt = llvm::countr_zero(Val: (MulAmt & (MulAmt - 1)));
17337 SDLoc DL(N);
17338 SDValue Shift1 =
17339 DAG.getNode(Opcode: ISD::SHL, DL, VT, N1: X, N2: DAG.getConstant(Val: ShiftAmt, DL, VT));
17340 return DAG.getNode(Opcode: RISCVISD::SHL_ADD, DL, VT, N1: X,
17341 N2: DAG.getTargetConstant(Val: Shift, DL, VT), N3: Shift1);
17342 }
17343
17344 // TODO: 2^(C1>3) * 3/5/9 - 1
17345
17346 // 2^n + 2/4/8 + 1 -> (add (shl X, C1), (shXadd X, X))
17347 if (MulAmt > 2 && isPowerOf2_64(Value: (MulAmt - 1) & (MulAmt - 2))) {
17348 unsigned ScaleShift = llvm::countr_zero(Val: MulAmt - 1);
17349 if (ScaleShift >= 1 && ScaleShift < 4) {
17350 unsigned ShiftAmt = llvm::countr_zero(Val: (MulAmt - 1) & (MulAmt - 2));
17351 SDLoc DL(N);
17352 SDValue Shift1 =
17353 DAG.getNode(Opcode: ISD::SHL, DL, VT, N1: X, N2: DAG.getConstant(Val: ShiftAmt, DL, VT));
17354 return DAG.getNode(
17355 Opcode: ISD::ADD, DL, VT, N1: Shift1,
17356 N2: DAG.getNode(Opcode: RISCVISD::SHL_ADD, DL, VT, N1: X,
17357 N2: DAG.getTargetConstant(Val: ScaleShift, DL, VT), N3: X));
17358 }
17359 }
17360
17361 // 2^N - 3/5/9 --> (sub (shl X, C1), (shXadd X, x))
17362 for (uint64_t Offset : {3, 5, 9}) {
17363 if (isPowerOf2_64(Value: MulAmt + Offset)) {
17364 unsigned ShAmt = llvm::countr_zero(Val: MulAmt + Offset);
17365 if (ShAmt >= VT.getSizeInBits())
17366 continue;
17367 SDLoc DL(N);
17368 SDValue Shift1 =
17369 DAG.getNode(Opcode: ISD::SHL, DL, VT, N1: X, N2: DAG.getConstant(Val: ShAmt, DL, VT));
17370 SDValue Mul359 =
17371 DAG.getNode(Opcode: RISCVISD::SHL_ADD, DL, VT, N1: X,
17372 N2: DAG.getTargetConstant(Val: Log2_64(Value: Offset - 1), DL, VT), N3: X);
17373 return DAG.getNode(Opcode: ISD::SUB, DL, VT, N1: Shift1, N2: Mul359);
17374 }
17375 }
17376 }
17377
17378 if (SDValue V = expandMulToAddOrSubOfShl(N, DAG, MulAmt))
17379 return V;
17380
17381 if (!Subtarget.hasStdExtZmmul())
17382 return expandMulToNAFSequence(N, DAG, MulAmt);
17383
17384 return SDValue();
17385}
17386
17387// Combine vXi32 (mul (and (lshr X, 15), 0x10001), 0xffff) ->
17388// (bitcast (sra (v2Xi16 (bitcast X)), 15))
17389// Same for other equivalent types with other equivalent constants.
17390static SDValue combineVectorMulToSraBitcast(SDNode *N, SelectionDAG &DAG) {
17391 EVT VT = N->getValueType(ResNo: 0);
17392 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
17393
17394 // Do this for legal vectors unless they are i1 or i8 vectors.
17395 if (!VT.isVector() || !TLI.isTypeLegal(VT) || VT.getScalarSizeInBits() < 16)
17396 return SDValue();
17397
17398 if (N->getOperand(Num: 0).getOpcode() != ISD::AND ||
17399 N->getOperand(Num: 0).getOperand(i: 0).getOpcode() != ISD::SRL)
17400 return SDValue();
17401
17402 SDValue And = N->getOperand(Num: 0);
17403 SDValue Srl = And.getOperand(i: 0);
17404
17405 APInt V1, V2, V3;
17406 if (!ISD::isConstantSplatVector(N: N->getOperand(Num: 1).getNode(), SplatValue&: V1) ||
17407 !ISD::isConstantSplatVector(N: And.getOperand(i: 1).getNode(), SplatValue&: V2) ||
17408 !ISD::isConstantSplatVector(N: Srl.getOperand(i: 1).getNode(), SplatValue&: V3))
17409 return SDValue();
17410
17411 unsigned HalfSize = VT.getScalarSizeInBits() / 2;
17412 if (!V1.isMask(numBits: HalfSize) || V2 != (1ULL | 1ULL << HalfSize) ||
17413 V3 != (HalfSize - 1))
17414 return SDValue();
17415
17416 EVT HalfVT = EVT::getVectorVT(Context&: *DAG.getContext(),
17417 VT: EVT::getIntegerVT(Context&: *DAG.getContext(), BitWidth: HalfSize),
17418 EC: VT.getVectorElementCount() * 2);
17419 SDLoc DL(N);
17420 SDValue Cast = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: HalfVT, Operand: Srl.getOperand(i: 0));
17421 SDValue Sra = DAG.getNode(Opcode: ISD::SRA, DL, VT: HalfVT, N1: Cast,
17422 N2: DAG.getConstant(Val: HalfSize - 1, DL, VT: HalfVT));
17423 return DAG.getNode(Opcode: ISD::BITCAST, DL, VT, Operand: Sra);
17424}
17425
17426static SDValue performMULCombine(SDNode *N, SelectionDAG &DAG,
17427 TargetLowering::DAGCombinerInfo &DCI,
17428 const RISCVSubtarget &Subtarget) {
17429 EVT VT = N->getValueType(ResNo: 0);
17430 if (!VT.isVector())
17431 return expandMul(N, DAG, DCI, Subtarget);
17432
17433 SDLoc DL(N);
17434 SDValue N0 = N->getOperand(Num: 0);
17435 SDValue N1 = N->getOperand(Num: 1);
17436 SDValue MulOper;
17437 unsigned AddSubOpc;
17438
17439 // vmadd: (mul (add x, 1), y) -> (add (mul x, y), y)
17440 // (mul x, add (y, 1)) -> (add x, (mul x, y))
17441 // vnmsub: (mul (sub 1, x), y) -> (sub y, (mul x, y))
17442 // (mul x, (sub 1, y)) -> (sub x, (mul x, y))
17443 auto IsAddSubWith1 = [&](SDValue V) -> bool {
17444 AddSubOpc = V->getOpcode();
17445 if ((AddSubOpc == ISD::ADD || AddSubOpc == ISD::SUB) && V->hasOneUse()) {
17446 SDValue Opnd = V->getOperand(Num: 1);
17447 MulOper = V->getOperand(Num: 0);
17448 if (AddSubOpc == ISD::SUB)
17449 std::swap(a&: Opnd, b&: MulOper);
17450 if (isOneOrOneSplat(V: Opnd))
17451 return true;
17452 }
17453 return false;
17454 };
17455
17456 if (IsAddSubWith1(N0)) {
17457 SDValue MulVal = DAG.getNode(Opcode: ISD::MUL, DL, VT, N1, N2: MulOper);
17458 return DAG.getNode(Opcode: AddSubOpc, DL, VT, N1, N2: MulVal);
17459 }
17460
17461 if (IsAddSubWith1(N1)) {
17462 SDValue MulVal = DAG.getNode(Opcode: ISD::MUL, DL, VT, N1: N0, N2: MulOper);
17463 return DAG.getNode(Opcode: AddSubOpc, DL, VT, N1: N0, N2: MulVal);
17464 }
17465
17466 if (SDValue V = combineBinOpOfZExt(N, DAG))
17467 return V;
17468
17469 if (SDValue V = combineVectorMulToSraBitcast(N, DAG))
17470 return V;
17471
17472 return SDValue();
17473}
17474
17475/// According to the property that indexed load/store instructions zero-extend
17476/// their indices, try to narrow the type of index operand.
17477static bool narrowIndex(SDValue &N, ISD::MemIndexType IndexType, SelectionDAG &DAG) {
17478 if (isIndexTypeSigned(IndexType))
17479 return false;
17480
17481 if (!N->hasOneUse())
17482 return false;
17483
17484 EVT VT = N.getValueType();
17485 SDLoc DL(N);
17486
17487 // In general, what we're doing here is seeing if we can sink a truncate to
17488 // a smaller element type into the expression tree building our index.
17489 // TODO: We can generalize this and handle a bunch more cases if useful.
17490
17491 // Narrow a buildvector to the narrowest element type. This requires less
17492 // work and less register pressure at high LMUL, and creates smaller constants
17493 // which may be cheaper to materialize.
17494 if (ISD::isBuildVectorOfConstantSDNodes(N: N.getNode())) {
17495 KnownBits Known = DAG.computeKnownBits(Op: N);
17496 unsigned ActiveBits = std::max(a: 8u, b: Known.countMaxActiveBits());
17497 LLVMContext &C = *DAG.getContext();
17498 EVT ResultVT = EVT::getIntegerVT(Context&: C, BitWidth: ActiveBits).getRoundIntegerType(Context&: C);
17499 if (ResultVT.bitsLT(VT: VT.getVectorElementType())) {
17500 N = DAG.getNode(Opcode: ISD::TRUNCATE, DL,
17501 VT: VT.changeVectorElementType(Context&: C, EltVT: ResultVT), Operand: N);
17502 return true;
17503 }
17504 }
17505
17506 // Handle the pattern (shl (zext x to ty), C) and bits(x) + C < bits(ty).
17507 if (N.getOpcode() != ISD::SHL)
17508 return false;
17509
17510 SDValue N0 = N.getOperand(i: 0);
17511 if (N0.getOpcode() != ISD::ZERO_EXTEND &&
17512 N0.getOpcode() != RISCVISD::VZEXT_VL)
17513 return false;
17514 if (!N0->hasOneUse())
17515 return false;
17516
17517 APInt ShAmt;
17518 SDValue N1 = N.getOperand(i: 1);
17519 if (!ISD::isConstantSplatVector(N: N1.getNode(), SplatValue&: ShAmt))
17520 return false;
17521
17522 SDValue Src = N0.getOperand(i: 0);
17523 EVT SrcVT = Src.getValueType();
17524 unsigned SrcElen = SrcVT.getScalarSizeInBits();
17525 unsigned ShAmtV = ShAmt.getZExtValue();
17526 unsigned NewElen = PowerOf2Ceil(A: SrcElen + ShAmtV);
17527 NewElen = std::max(a: NewElen, b: 8U);
17528
17529 // Skip if NewElen is not narrower than the original extended type.
17530 if (NewElen >= N0.getValueType().getScalarSizeInBits())
17531 return false;
17532
17533 EVT NewEltVT = EVT::getIntegerVT(Context&: *DAG.getContext(), BitWidth: NewElen);
17534 EVT NewVT = SrcVT.changeVectorElementType(Context&: *DAG.getContext(), EltVT: NewEltVT);
17535
17536 SDValue NewExt = DAG.getNode(Opcode: N0->getOpcode(), DL, VT: NewVT, Ops: N0->ops());
17537 SDValue NewShAmtVec = DAG.getConstant(Val: ShAmtV, DL, VT: NewVT);
17538 N = DAG.getNode(Opcode: ISD::SHL, DL, VT: NewVT, N1: NewExt, N2: NewShAmtVec);
17539 return true;
17540}
17541
17542/// Try to map an integer comparison with size > XLEN to vector instructions
17543/// before type legalization splits it up into chunks.
17544static SDValue
17545combineVectorSizedSetCCEquality(EVT VT, SDValue X, SDValue Y, ISD::CondCode CC,
17546 const SDLoc &DL, SelectionDAG &DAG,
17547 const RISCVSubtarget &Subtarget) {
17548 assert(ISD::isIntEqualitySetCC(CC) && "Bad comparison predicate");
17549
17550 if (!Subtarget.hasVInstructions())
17551 return SDValue();
17552
17553 MVT XLenVT = Subtarget.getXLenVT();
17554 EVT OpVT = X.getValueType();
17555 // We're looking for an oversized integer equality comparison.
17556 if (!OpVT.isScalarInteger())
17557 return SDValue();
17558
17559 unsigned OpSize = OpVT.getSizeInBits();
17560 // The size should be larger than XLen and smaller than the maximum vector
17561 // size.
17562 if (OpSize <= Subtarget.getXLen() ||
17563 OpSize > Subtarget.getRealMinVLen() *
17564 Subtarget.getMaxLMULForFixedLengthVectors())
17565 return SDValue();
17566
17567 // Don't perform this combine if constructing the vector will be expensive.
17568 auto IsVectorBitCastCheap = [](SDValue X) {
17569 X = peekThroughBitcasts(V: X);
17570 return isa<ConstantSDNode>(Val: X) || X.getValueType().isVector() ||
17571 X.getOpcode() == ISD::LOAD;
17572 };
17573 if (!IsVectorBitCastCheap(X) || !IsVectorBitCastCheap(Y))
17574 return SDValue();
17575
17576 if (DAG.getMachineFunction().getFunction().hasFnAttribute(
17577 Kind: Attribute::NoImplicitFloat))
17578 return SDValue();
17579
17580 // Bail out for non-byte-sized types.
17581 if (!OpVT.isByteSized())
17582 return SDValue();
17583
17584 unsigned VecSize = OpSize / 8;
17585 EVT VecVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: MVT::i8, NumElements: VecSize);
17586 EVT CmpVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: MVT::i1, NumElements: VecSize);
17587
17588 SDValue VecX = DAG.getBitcast(VT: VecVT, V: X);
17589 SDValue VecY = DAG.getBitcast(VT: VecVT, V: Y);
17590 SDValue Mask = DAG.getAllOnesConstant(DL, VT: CmpVT);
17591 SDValue VL = DAG.getConstant(Val: VecSize, DL, VT: XLenVT);
17592
17593 SDValue Cmp = DAG.getNode(Opcode: ISD::VP_SETCC, DL, VT: CmpVT, N1: VecX, N2: VecY,
17594 N3: DAG.getCondCode(Cond: ISD::SETNE), N4: Mask, N5: VL);
17595 return DAG.getSetCC(DL, VT,
17596 LHS: DAG.getNode(Opcode: ISD::VP_REDUCE_OR, DL, VT: XLenVT,
17597 N1: DAG.getConstant(Val: 0, DL, VT: XLenVT), N2: Cmp, N3: Mask,
17598 N4: VL),
17599 RHS: DAG.getConstant(Val: 0, DL, VT: XLenVT), Cond: CC);
17600}
17601
17602static SDValue performSETCCCombine(SDNode *N,
17603 TargetLowering::DAGCombinerInfo &DCI,
17604 const RISCVSubtarget &Subtarget) {
17605 SelectionDAG &DAG = DCI.DAG;
17606 SDLoc dl(N);
17607 SDValue N0 = N->getOperand(Num: 0);
17608 SDValue N1 = N->getOperand(Num: 1);
17609 EVT VT = N->getValueType(ResNo: 0);
17610 EVT OpVT = N0.getValueType();
17611
17612 ISD::CondCode Cond = cast<CondCodeSDNode>(Val: N->getOperand(Num: 2))->get();
17613 // Looking for an equality compare.
17614 if (!isIntEqualitySetCC(Code: Cond))
17615 return SDValue();
17616
17617 if (SDValue V =
17618 combineVectorSizedSetCCEquality(VT, X: N0, Y: N1, CC: Cond, DL: dl, DAG, Subtarget))
17619 return V;
17620
17621 if (DCI.isAfterLegalizeDAG() && isa<ConstantSDNode>(Val: N1) &&
17622 N0.getOpcode() == ISD::AND && N0.hasOneUse() &&
17623 isa<ConstantSDNode>(Val: N0.getOperand(i: 1))) {
17624 const APInt &AndRHSC = N0.getConstantOperandAPInt(i: 1);
17625 // (X & -(1 << C)) == 0 -> (X >> C) == 0 if the AND constant can't use ANDI.
17626 if (isNullConstant(V: N1) && !isInt<12>(x: AndRHSC.getSExtValue()) &&
17627 AndRHSC.isNegatedPowerOf2()) {
17628 unsigned ShiftBits = AndRHSC.countr_zero();
17629 SDValue Shift = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT: OpVT, N1: N0.getOperand(i: 0),
17630 N2: DAG.getConstant(Val: ShiftBits, DL: dl, VT: OpVT));
17631 return DAG.getSetCC(DL: dl, VT, LHS: Shift, RHS: N1, Cond);
17632 }
17633
17634 // Similar to above but handling the lower 32 bits by using sraiw. Allow
17635 // comparing with constants other than 0 if the constant can be folded into
17636 // addi or xori after shifting.
17637 uint64_t N1Int = cast<ConstantSDNode>(Val&: N1)->getZExtValue();
17638 uint64_t AndRHSInt = AndRHSC.getZExtValue();
17639 if (OpVT == MVT::i64 && isUInt<32>(x: AndRHSInt) &&
17640 isPowerOf2_32(Value: -uint32_t(AndRHSInt)) && (N1Int & AndRHSInt) == N1Int) {
17641 unsigned ShiftBits = llvm::countr_zero(Val: AndRHSInt);
17642 int64_t NewC = SignExtend64<32>(x: N1Int) >> ShiftBits;
17643 if (NewC >= -2048 && NewC <= 2048) {
17644 SDValue SExt =
17645 DAG.getNode(Opcode: ISD::SIGN_EXTEND_INREG, DL: dl, VT: OpVT, N1: N0.getOperand(i: 0),
17646 N2: DAG.getValueType(MVT::i32));
17647 SDValue Shift = DAG.getNode(Opcode: ISD::SRA, DL: dl, VT: OpVT, N1: SExt,
17648 N2: DAG.getConstant(Val: ShiftBits, DL: dl, VT: OpVT));
17649 return DAG.getSetCC(DL: dl, VT, LHS: Shift,
17650 RHS: DAG.getSignedConstant(Val: NewC, DL: dl, VT: OpVT), Cond);
17651 }
17652 }
17653 }
17654
17655 // Replace (seteq (i64 (and X, 0xffffffff)), C1) with
17656 // (seteq (i64 (sext_inreg (X, i32)), C1')) where C1' is C1 sign extended from
17657 // bit 31. Same for setne. C1' may be cheaper to materialize and the
17658 // sext_inreg can become a sext.w instead of a shift pair.
17659 if (OpVT != MVT::i64 || !Subtarget.is64Bit())
17660 return SDValue();
17661
17662 // RHS needs to be a constant.
17663 auto *N1C = dyn_cast<ConstantSDNode>(Val&: N1);
17664 if (!N1C)
17665 return SDValue();
17666
17667 // LHS needs to be (and X, 0xffffffff).
17668 if (N0.getOpcode() != ISD::AND || !N0.hasOneUse() ||
17669 !isa<ConstantSDNode>(Val: N0.getOperand(i: 1)) ||
17670 N0.getConstantOperandVal(i: 1) != UINT64_C(0xffffffff))
17671 return SDValue();
17672
17673 // Don't do this if the sign bit is provably zero, it will be turned back into
17674 // an AND.
17675 APInt SignMask = APInt::getOneBitSet(numBits: 64, BitNo: 31);
17676 if (DAG.MaskedValueIsZero(Op: N0.getOperand(i: 0), Mask: SignMask))
17677 return SDValue();
17678
17679 const APInt &C1 = N1C->getAPIntValue();
17680
17681 // If the constant is larger than 2^32 - 1 it is impossible for both sides
17682 // to be equal.
17683 if (C1.getActiveBits() > 32)
17684 return DAG.getBoolConstant(V: Cond == ISD::SETNE, DL: dl, VT, OpVT);
17685
17686 SDValue SExtOp = DAG.getNode(Opcode: ISD::SIGN_EXTEND_INREG, DL: N, VT: OpVT,
17687 N1: N0.getOperand(i: 0), N2: DAG.getValueType(MVT::i32));
17688 return DAG.getSetCC(DL: dl, VT, LHS: SExtOp, RHS: DAG.getConstant(Val: C1.trunc(width: 32).sext(width: 64),
17689 DL: dl, VT: OpVT), Cond);
17690}
17691
17692static SDValue
17693performSIGN_EXTEND_INREGCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
17694 const RISCVSubtarget &Subtarget) {
17695 SelectionDAG &DAG = DCI.DAG;
17696 SDValue Src = N->getOperand(Num: 0);
17697 EVT VT = N->getValueType(ResNo: 0);
17698 EVT SrcVT = cast<VTSDNode>(Val: N->getOperand(Num: 1))->getVT();
17699 unsigned Opc = Src.getOpcode();
17700 SDLoc DL(N);
17701
17702 // Fold (sext_inreg (fmv_x_anyexth X), i16) -> (fmv_x_signexth X)
17703 // Don't do this with Zhinx. We need to explicitly sign extend the GPR.
17704 if (Opc == RISCVISD::FMV_X_ANYEXTH && SrcVT.bitsGE(VT: MVT::i16) &&
17705 Subtarget.hasStdExtZfhmin())
17706 return DAG.getNode(Opcode: RISCVISD::FMV_X_SIGNEXTH, DL, VT, Operand: Src.getOperand(i: 0));
17707
17708 // Fold (sext_inreg (shl X, Y), i32) -> (sllw X, Y) iff Y u< 32
17709 if (Opc == ISD::SHL && Subtarget.is64Bit() && SrcVT == MVT::i32 &&
17710 VT == MVT::i64 && !isa<ConstantSDNode>(Val: Src.getOperand(i: 1)) &&
17711 DAG.computeKnownBits(Op: Src.getOperand(i: 1)).countMaxActiveBits() <= 5)
17712 return DAG.getNode(Opcode: RISCVISD::SLLW, DL, VT, N1: Src.getOperand(i: 0),
17713 N2: Src.getOperand(i: 1));
17714
17715 // Fold (sext_inreg (setcc), i1) -> (sub 0, (setcc))
17716 if (Opc == ISD::SETCC && SrcVT == MVT::i1 && DCI.isAfterLegalizeDAG())
17717 return DAG.getNegative(Val: Src, DL, VT);
17718
17719 // Fold (sext_inreg (xor (setcc), -1), i1) -> (add (setcc), -1)
17720 if (Opc == ISD::XOR && SrcVT == MVT::i1 &&
17721 isAllOnesConstant(V: Src.getOperand(i: 1)) &&
17722 Src.getOperand(i: 0).getOpcode() == ISD::SETCC && DCI.isAfterLegalizeDAG())
17723 return DAG.getNode(Opcode: ISD::ADD, DL, VT, N1: Src.getOperand(i: 0),
17724 N2: DAG.getAllOnesConstant(DL, VT));
17725
17726 return SDValue();
17727}
17728
17729namespace {
17730// Forward declaration of the structure holding the necessary information to
17731// apply a combine.
17732struct CombineResult;
17733
17734enum ExtKind : uint8_t {
17735 ZExt = 1 << 0,
17736 SExt = 1 << 1,
17737 FPExt = 1 << 2,
17738 BF16Ext = 1 << 3
17739};
17740/// Helper class for folding sign/zero extensions.
17741/// In particular, this class is used for the following combines:
17742/// add | add_vl | or disjoint -> vwadd(u) | vwadd(u)_w
17743/// sub | sub_vl -> vwsub(u) | vwsub(u)_w
17744/// mul | mul_vl -> vwmul(u) | vwmul_su
17745/// shl | shl_vl -> vwsll
17746/// fadd -> vfwadd | vfwadd_w
17747/// fsub -> vfwsub | vfwsub_w
17748/// fmul -> vfwmul
17749/// An object of this class represents an operand of the operation we want to
17750/// combine.
17751/// E.g., when trying to combine `mul_vl a, b`, we will have one instance of
17752/// NodeExtensionHelper for `a` and one for `b`.
17753///
17754/// This class abstracts away how the extension is materialized and
17755/// how its number of users affect the combines.
17756///
17757/// In particular:
17758/// - VWADD_W is conceptually == add(op0, sext(op1))
17759/// - VWADDU_W == add(op0, zext(op1))
17760/// - VWSUB_W == sub(op0, sext(op1))
17761/// - VWSUBU_W == sub(op0, zext(op1))
17762/// - VFWADD_W == fadd(op0, fpext(op1))
17763/// - VFWSUB_W == fsub(op0, fpext(op1))
17764/// And VMV_V_X_VL, depending on the value, is conceptually equivalent to
17765/// zext|sext(smaller_value).
17766struct NodeExtensionHelper {
17767 /// Records if this operand is like being zero extended.
17768 bool SupportsZExt;
17769 /// Records if this operand is like being sign extended.
17770 /// Note: SupportsZExt and SupportsSExt are not mutually exclusive. For
17771 /// instance, a splat constant (e.g., 3), would support being both sign and
17772 /// zero extended.
17773 bool SupportsSExt;
17774 /// Records if this operand is like being floating point extended.
17775 bool SupportsFPExt;
17776 /// Records if this operand is extended from bf16.
17777 bool SupportsBF16Ext;
17778 /// This boolean captures whether we care if this operand would still be
17779 /// around after the folding happens.
17780 bool EnforceOneUse;
17781 /// Original value that this NodeExtensionHelper represents.
17782 SDValue OrigOperand;
17783
17784 /// Get the value feeding the extension or the value itself.
17785 /// E.g., for zext(a), this would return a.
17786 SDValue getSource() const {
17787 switch (OrigOperand.getOpcode()) {
17788 case ISD::ZERO_EXTEND:
17789 case ISD::SIGN_EXTEND:
17790 case RISCVISD::VSEXT_VL:
17791 case RISCVISD::VZEXT_VL:
17792 case RISCVISD::FP_EXTEND_VL:
17793 return OrigOperand.getOperand(i: 0);
17794 default:
17795 return OrigOperand;
17796 }
17797 }
17798
17799 /// Check if this instance represents a splat.
17800 bool isSplat() const {
17801 return OrigOperand.getOpcode() == RISCVISD::VMV_V_X_VL ||
17802 OrigOperand.getOpcode() == ISD::SPLAT_VECTOR;
17803 }
17804
17805 /// Get the extended opcode.
17806 unsigned getExtOpc(ExtKind SupportsExt) const {
17807 switch (SupportsExt) {
17808 case ExtKind::SExt:
17809 return RISCVISD::VSEXT_VL;
17810 case ExtKind::ZExt:
17811 return RISCVISD::VZEXT_VL;
17812 case ExtKind::FPExt:
17813 case ExtKind::BF16Ext:
17814 return RISCVISD::FP_EXTEND_VL;
17815 }
17816 llvm_unreachable("Unknown ExtKind enum");
17817 }
17818
17819 /// Get or create a value that can feed \p Root with the given extension \p
17820 /// SupportsExt. If \p SExt is std::nullopt, this returns the source of this
17821 /// operand. \see ::getSource().
17822 SDValue getOrCreateExtendedOp(SDNode *Root, SelectionDAG &DAG,
17823 const RISCVSubtarget &Subtarget,
17824 std::optional<ExtKind> SupportsExt) const {
17825 if (!SupportsExt.has_value())
17826 return OrigOperand;
17827
17828 MVT NarrowVT = getNarrowType(Root, SupportsExt: *SupportsExt);
17829
17830 SDValue Source = getSource();
17831 assert(Subtarget.getTargetLowering()->isTypeLegal(Source.getValueType()));
17832 if (Source.getValueType() == NarrowVT)
17833 return Source;
17834
17835 unsigned ExtOpc = getExtOpc(SupportsExt: *SupportsExt);
17836
17837 // If we need an extension, we should be changing the type.
17838 SDLoc DL(OrigOperand);
17839 auto [Mask, VL] = getMaskAndVL(Root, DAG, Subtarget);
17840 switch (OrigOperand.getOpcode()) {
17841 case ISD::ZERO_EXTEND:
17842 case ISD::SIGN_EXTEND:
17843 case RISCVISD::VSEXT_VL:
17844 case RISCVISD::VZEXT_VL:
17845 case RISCVISD::FP_EXTEND_VL:
17846 return DAG.getNode(Opcode: ExtOpc, DL, VT: NarrowVT, N1: Source, N2: Mask, N3: VL);
17847 case ISD::SPLAT_VECTOR:
17848 return DAG.getSplat(VT: NarrowVT, DL, Op: Source.getOperand(i: 0));
17849 case RISCVISD::VMV_V_X_VL:
17850 return DAG.getNode(Opcode: RISCVISD::VMV_V_X_VL, DL, VT: NarrowVT,
17851 N1: DAG.getUNDEF(VT: NarrowVT), N2: Source.getOperand(i: 1), N3: VL);
17852 case RISCVISD::VFMV_V_F_VL:
17853 Source = Source.getOperand(i: 1);
17854 assert(Source.getOpcode() == ISD::FP_EXTEND && "Unexpected source");
17855 Source = Source.getOperand(i: 0);
17856 assert(Source.getValueType() == NarrowVT.getVectorElementType());
17857 return DAG.getNode(Opcode: RISCVISD::VFMV_V_F_VL, DL, VT: NarrowVT,
17858 N1: DAG.getUNDEF(VT: NarrowVT), N2: Source, N3: VL);
17859 default:
17860 // Other opcodes can only come from the original LHS of VW(ADD|SUB)_W_VL
17861 // and that operand should already have the right NarrowVT so no
17862 // extension should be required at this point.
17863 llvm_unreachable("Unsupported opcode");
17864 }
17865 }
17866
17867 /// Helper function to get the narrow type for \p Root.
17868 /// The narrow type is the type of \p Root where we divided the size of each
17869 /// element by 2. E.g., if Root's type <2xi16> -> narrow type <2xi8>.
17870 /// \pre Both the narrow type and the original type should be legal.
17871 static MVT getNarrowType(const SDNode *Root, ExtKind SupportsExt) {
17872 MVT VT = Root->getSimpleValueType(ResNo: 0);
17873
17874 // Determine the narrow size.
17875 unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
17876
17877 MVT EltVT = SupportsExt == ExtKind::BF16Ext ? MVT::bf16
17878 : SupportsExt == ExtKind::FPExt
17879 ? MVT::getFloatingPointVT(BitWidth: NarrowSize)
17880 : MVT::getIntegerVT(BitWidth: NarrowSize);
17881
17882 assert((int)NarrowSize >= (SupportsExt == ExtKind::FPExt ? 16 : 8) &&
17883 "Trying to extend something we can't represent");
17884 MVT NarrowVT = MVT::getVectorVT(VT: EltVT, EC: VT.getVectorElementCount());
17885 return NarrowVT;
17886 }
17887
17888 /// Get the opcode to materialize:
17889 /// Opcode(sext(a), sext(b)) -> newOpcode(a, b)
17890 static unsigned getSExtOpcode(unsigned Opcode) {
17891 switch (Opcode) {
17892 case ISD::ADD:
17893 case RISCVISD::ADD_VL:
17894 case RISCVISD::VWADD_W_VL:
17895 case RISCVISD::VWADDU_W_VL:
17896 case ISD::OR:
17897 case RISCVISD::OR_VL:
17898 return RISCVISD::VWADD_VL;
17899 case ISD::SUB:
17900 case RISCVISD::SUB_VL:
17901 case RISCVISD::VWSUB_W_VL:
17902 case RISCVISD::VWSUBU_W_VL:
17903 return RISCVISD::VWSUB_VL;
17904 case ISD::MUL:
17905 case RISCVISD::MUL_VL:
17906 return RISCVISD::VWMUL_VL;
17907 default:
17908 llvm_unreachable("Unexpected opcode");
17909 }
17910 }
17911
17912 /// Get the opcode to materialize:
17913 /// Opcode(zext(a), zext(b)) -> newOpcode(a, b)
17914 static unsigned getZExtOpcode(unsigned Opcode) {
17915 switch (Opcode) {
17916 case ISD::ADD:
17917 case RISCVISD::ADD_VL:
17918 case RISCVISD::VWADD_W_VL:
17919 case RISCVISD::VWADDU_W_VL:
17920 case ISD::OR:
17921 case RISCVISD::OR_VL:
17922 return RISCVISD::VWADDU_VL;
17923 case ISD::SUB:
17924 case RISCVISD::SUB_VL:
17925 case RISCVISD::VWSUB_W_VL:
17926 case RISCVISD::VWSUBU_W_VL:
17927 return RISCVISD::VWSUBU_VL;
17928 case ISD::MUL:
17929 case RISCVISD::MUL_VL:
17930 return RISCVISD::VWMULU_VL;
17931 case ISD::SHL:
17932 case RISCVISD::SHL_VL:
17933 return RISCVISD::VWSLL_VL;
17934 default:
17935 llvm_unreachable("Unexpected opcode");
17936 }
17937 }
17938
17939 /// Get the opcode to materialize:
17940 /// Opcode(fpext(a), fpext(b)) -> newOpcode(a, b)
17941 static unsigned getFPExtOpcode(unsigned Opcode) {
17942 switch (Opcode) {
17943 case RISCVISD::FADD_VL:
17944 case RISCVISD::VFWADD_W_VL:
17945 return RISCVISD::VFWADD_VL;
17946 case RISCVISD::FSUB_VL:
17947 case RISCVISD::VFWSUB_W_VL:
17948 return RISCVISD::VFWSUB_VL;
17949 case RISCVISD::FMUL_VL:
17950 return RISCVISD::VFWMUL_VL;
17951 case RISCVISD::VFMADD_VL:
17952 return RISCVISD::VFWMADD_VL;
17953 case RISCVISD::VFMSUB_VL:
17954 return RISCVISD::VFWMSUB_VL;
17955 case RISCVISD::VFNMADD_VL:
17956 return RISCVISD::VFWNMADD_VL;
17957 case RISCVISD::VFNMSUB_VL:
17958 return RISCVISD::VFWNMSUB_VL;
17959 default:
17960 llvm_unreachable("Unexpected opcode");
17961 }
17962 }
17963
17964 /// Get the opcode to materialize \p Opcode(sext(a), zext(b)) ->
17965 /// newOpcode(a, b).
17966 static unsigned getSUOpcode(unsigned Opcode) {
17967 assert((Opcode == RISCVISD::MUL_VL || Opcode == ISD::MUL) &&
17968 "SU is only supported for MUL");
17969 return RISCVISD::VWMULSU_VL;
17970 }
17971
17972 /// Get the opcode to materialize
17973 /// \p Opcode(a, s|z|fpext(b)) -> newOpcode(a, b).
17974 static unsigned getWOpcode(unsigned Opcode, ExtKind SupportsExt) {
17975 switch (Opcode) {
17976 case ISD::ADD:
17977 case RISCVISD::ADD_VL:
17978 case ISD::OR:
17979 case RISCVISD::OR_VL:
17980 return SupportsExt == ExtKind::SExt ? RISCVISD::VWADD_W_VL
17981 : RISCVISD::VWADDU_W_VL;
17982 case ISD::SUB:
17983 case RISCVISD::SUB_VL:
17984 return SupportsExt == ExtKind::SExt ? RISCVISD::VWSUB_W_VL
17985 : RISCVISD::VWSUBU_W_VL;
17986 case RISCVISD::FADD_VL:
17987 return RISCVISD::VFWADD_W_VL;
17988 case RISCVISD::FSUB_VL:
17989 return RISCVISD::VFWSUB_W_VL;
17990 default:
17991 llvm_unreachable("Unexpected opcode");
17992 }
17993 }
17994
17995 using CombineToTry = std::function<std::optional<CombineResult>(
17996 SDNode * /*Root*/, const NodeExtensionHelper & /*LHS*/,
17997 const NodeExtensionHelper & /*RHS*/, SelectionDAG &,
17998 const RISCVSubtarget &)>;
17999
18000 /// Check if this node needs to be fully folded or extended for all users.
18001 bool needToPromoteOtherUsers() const { return EnforceOneUse; }
18002
18003 void fillUpExtensionSupportForSplat(SDNode *Root, SelectionDAG &DAG,
18004 const RISCVSubtarget &Subtarget) {
18005 unsigned Opc = OrigOperand.getOpcode();
18006 MVT VT = OrigOperand.getSimpleValueType();
18007
18008 assert((Opc == ISD::SPLAT_VECTOR || Opc == RISCVISD::VMV_V_X_VL) &&
18009 "Unexpected Opcode");
18010
18011 // The pasthru must be undef for tail agnostic.
18012 if (Opc == RISCVISD::VMV_V_X_VL && !OrigOperand.getOperand(i: 0).isUndef())
18013 return;
18014
18015 // Get the scalar value.
18016 SDValue Op = Opc == ISD::SPLAT_VECTOR ? OrigOperand.getOperand(i: 0)
18017 : OrigOperand.getOperand(i: 1);
18018
18019 // See if we have enough sign bits or zero bits in the scalar to use a
18020 // widening opcode by splatting to smaller element size.
18021 unsigned EltBits = VT.getScalarSizeInBits();
18022 unsigned ScalarBits = Op.getValueSizeInBits();
18023 // If we're not getting all bits from the element, we need special handling.
18024 if (ScalarBits < EltBits) {
18025 // This should only occur on RV32.
18026 assert(Opc == RISCVISD::VMV_V_X_VL && EltBits == 64 && ScalarBits == 32 &&
18027 !Subtarget.is64Bit() && "Unexpected splat");
18028 // vmv.v.x sign extends narrow inputs.
18029 SupportsSExt = true;
18030
18031 // If the input is positive, then sign extend is also zero extend.
18032 if (DAG.SignBitIsZero(Op))
18033 SupportsZExt = true;
18034
18035 EnforceOneUse = false;
18036 return;
18037 }
18038
18039 unsigned NarrowSize = EltBits / 2;
18040 // If the narrow type cannot be expressed with a legal VMV,
18041 // this is not a valid candidate.
18042 if (NarrowSize < 8)
18043 return;
18044
18045 if (DAG.ComputeMaxSignificantBits(Op) <= NarrowSize)
18046 SupportsSExt = true;
18047
18048 if (DAG.MaskedValueIsZero(Op,
18049 Mask: APInt::getBitsSetFrom(numBits: ScalarBits, loBit: NarrowSize)))
18050 SupportsZExt = true;
18051
18052 EnforceOneUse = false;
18053 }
18054
18055 bool isSupportedFPExtend(MVT NarrowEltVT, const RISCVSubtarget &Subtarget) {
18056 return (NarrowEltVT == MVT::f32 ||
18057 (NarrowEltVT == MVT::f16 && Subtarget.hasVInstructionsF16()));
18058 }
18059
18060 bool isSupportedBF16Extend(MVT NarrowEltVT, const RISCVSubtarget &Subtarget) {
18061 return NarrowEltVT == MVT::bf16 &&
18062 (Subtarget.hasStdExtZvfbfwma() || Subtarget.hasVInstructionsBF16());
18063 }
18064
18065 /// Helper method to set the various fields of this struct based on the
18066 /// type of \p Root.
18067 void fillUpExtensionSupport(SDNode *Root, SelectionDAG &DAG,
18068 const RISCVSubtarget &Subtarget) {
18069 SupportsZExt = false;
18070 SupportsSExt = false;
18071 SupportsFPExt = false;
18072 SupportsBF16Ext = false;
18073 EnforceOneUse = true;
18074 unsigned Opc = OrigOperand.getOpcode();
18075 // For the nodes we handle below, we end up using their inputs directly: see
18076 // getSource(). However since they either don't have a passthru or we check
18077 // that their passthru is undef, we can safely ignore their mask and VL.
18078 switch (Opc) {
18079 case ISD::ZERO_EXTEND:
18080 case ISD::SIGN_EXTEND: {
18081 MVT VT = OrigOperand.getSimpleValueType();
18082 if (!VT.isVector())
18083 break;
18084
18085 SDValue NarrowElt = OrigOperand.getOperand(i: 0);
18086 MVT NarrowVT = NarrowElt.getSimpleValueType();
18087 // i1 types are legal but we can't select V{S,Z}EXT_VLs with them.
18088 if (NarrowVT.getVectorElementType() == MVT::i1)
18089 break;
18090
18091 SupportsZExt = Opc == ISD::ZERO_EXTEND;
18092 SupportsSExt = Opc == ISD::SIGN_EXTEND;
18093 break;
18094 }
18095 case RISCVISD::VZEXT_VL:
18096 SupportsZExt = true;
18097 break;
18098 case RISCVISD::VSEXT_VL:
18099 SupportsSExt = true;
18100 break;
18101 case RISCVISD::FP_EXTEND_VL: {
18102 MVT NarrowEltVT =
18103 OrigOperand.getOperand(i: 0).getSimpleValueType().getVectorElementType();
18104 if (isSupportedFPExtend(NarrowEltVT, Subtarget))
18105 SupportsFPExt = true;
18106 if (isSupportedBF16Extend(NarrowEltVT, Subtarget))
18107 SupportsBF16Ext = true;
18108
18109 break;
18110 }
18111 case ISD::SPLAT_VECTOR:
18112 case RISCVISD::VMV_V_X_VL:
18113 fillUpExtensionSupportForSplat(Root, DAG, Subtarget);
18114 break;
18115 case RISCVISD::VFMV_V_F_VL: {
18116 MVT VT = OrigOperand.getSimpleValueType();
18117
18118 if (!OrigOperand.getOperand(i: 0).isUndef())
18119 break;
18120
18121 SDValue Op = OrigOperand.getOperand(i: 1);
18122 if (Op.getOpcode() != ISD::FP_EXTEND)
18123 break;
18124
18125 unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
18126 unsigned ScalarBits = Op.getOperand(i: 0).getValueSizeInBits();
18127 if (NarrowSize != ScalarBits)
18128 break;
18129
18130 if (isSupportedFPExtend(NarrowEltVT: Op.getOperand(i: 0).getSimpleValueType(), Subtarget))
18131 SupportsFPExt = true;
18132 if (isSupportedBF16Extend(NarrowEltVT: Op.getOperand(i: 0).getSimpleValueType(),
18133 Subtarget))
18134 SupportsBF16Ext = true;
18135 break;
18136 }
18137 default:
18138 break;
18139 }
18140 }
18141
18142 /// Check if \p Root supports any extension folding combines.
18143 static bool isSupportedRoot(const SDNode *Root,
18144 const RISCVSubtarget &Subtarget) {
18145 switch (Root->getOpcode()) {
18146 case ISD::ADD:
18147 case ISD::SUB:
18148 case ISD::MUL: {
18149 return Root->getValueType(ResNo: 0).isScalableVector();
18150 }
18151 case ISD::OR: {
18152 return Root->getValueType(ResNo: 0).isScalableVector() &&
18153 Root->getFlags().hasDisjoint();
18154 }
18155 // Vector Widening Integer Add/Sub/Mul Instructions
18156 case RISCVISD::ADD_VL:
18157 case RISCVISD::MUL_VL:
18158 case RISCVISD::VWADD_W_VL:
18159 case RISCVISD::VWADDU_W_VL:
18160 case RISCVISD::SUB_VL:
18161 case RISCVISD::VWSUB_W_VL:
18162 case RISCVISD::VWSUBU_W_VL:
18163 // Vector Widening Floating-Point Add/Sub/Mul Instructions
18164 case RISCVISD::FADD_VL:
18165 case RISCVISD::FSUB_VL:
18166 case RISCVISD::FMUL_VL:
18167 case RISCVISD::VFWADD_W_VL:
18168 case RISCVISD::VFWSUB_W_VL:
18169 return true;
18170 case RISCVISD::OR_VL:
18171 return Root->getFlags().hasDisjoint();
18172 case ISD::SHL:
18173 return Root->getValueType(ResNo: 0).isScalableVector() &&
18174 Subtarget.hasStdExtZvbb();
18175 case RISCVISD::SHL_VL:
18176 return Subtarget.hasStdExtZvbb();
18177 case RISCVISD::VFMADD_VL:
18178 case RISCVISD::VFNMSUB_VL:
18179 case RISCVISD::VFNMADD_VL:
18180 case RISCVISD::VFMSUB_VL:
18181 return true;
18182 default:
18183 return false;
18184 }
18185 }
18186
18187 /// Build a NodeExtensionHelper for \p Root.getOperand(\p OperandIdx).
18188 NodeExtensionHelper(SDNode *Root, unsigned OperandIdx, SelectionDAG &DAG,
18189 const RISCVSubtarget &Subtarget) {
18190 assert(isSupportedRoot(Root, Subtarget) &&
18191 "Trying to build an helper with an "
18192 "unsupported root");
18193 assert(OperandIdx < 2 && "Requesting something else than LHS or RHS");
18194 assert(DAG.getTargetLoweringInfo().isTypeLegal(Root->getValueType(0)));
18195 OrigOperand = Root->getOperand(Num: OperandIdx);
18196
18197 unsigned Opc = Root->getOpcode();
18198 switch (Opc) {
18199 // We consider
18200 // VW<ADD|SUB>_W(LHS, RHS) -> <ADD|SUB>(LHS, SEXT(RHS))
18201 // VW<ADD|SUB>U_W(LHS, RHS) -> <ADD|SUB>(LHS, ZEXT(RHS))
18202 // VFW<ADD|SUB>_W(LHS, RHS) -> F<ADD|SUB>(LHS, FPEXT(RHS))
18203 case RISCVISD::VWADD_W_VL:
18204 case RISCVISD::VWADDU_W_VL:
18205 case RISCVISD::VWSUB_W_VL:
18206 case RISCVISD::VWSUBU_W_VL:
18207 case RISCVISD::VFWADD_W_VL:
18208 case RISCVISD::VFWSUB_W_VL:
18209 // Operand 1 can't be changed.
18210 if (OperandIdx == 1)
18211 break;
18212 [[fallthrough]];
18213 default:
18214 fillUpExtensionSupport(Root, DAG, Subtarget);
18215 break;
18216 }
18217 }
18218
18219 /// Helper function to get the Mask and VL from \p Root.
18220 static std::pair<SDValue, SDValue>
18221 getMaskAndVL(const SDNode *Root, SelectionDAG &DAG,
18222 const RISCVSubtarget &Subtarget) {
18223 assert(isSupportedRoot(Root, Subtarget) && "Unexpected root");
18224 switch (Root->getOpcode()) {
18225 case ISD::ADD:
18226 case ISD::SUB:
18227 case ISD::MUL:
18228 case ISD::OR:
18229 case ISD::SHL: {
18230 SDLoc DL(Root);
18231 MVT VT = Root->getSimpleValueType(ResNo: 0);
18232 return getDefaultScalableVLOps(VecVT: VT, DL, DAG, Subtarget);
18233 }
18234 default:
18235 return std::make_pair(x: Root->getOperand(Num: 3), y: Root->getOperand(Num: 4));
18236 }
18237 }
18238
18239 /// Helper function to check if \p N is commutative with respect to the
18240 /// foldings that are supported by this class.
18241 static bool isCommutative(const SDNode *N) {
18242 switch (N->getOpcode()) {
18243 case ISD::ADD:
18244 case ISD::MUL:
18245 case ISD::OR:
18246 case RISCVISD::ADD_VL:
18247 case RISCVISD::MUL_VL:
18248 case RISCVISD::OR_VL:
18249 case RISCVISD::FADD_VL:
18250 case RISCVISD::FMUL_VL:
18251 case RISCVISD::VFMADD_VL:
18252 case RISCVISD::VFNMSUB_VL:
18253 case RISCVISD::VFNMADD_VL:
18254 case RISCVISD::VFMSUB_VL:
18255 return true;
18256 case RISCVISD::VWADD_W_VL:
18257 case RISCVISD::VWADDU_W_VL:
18258 case ISD::SUB:
18259 case RISCVISD::SUB_VL:
18260 case RISCVISD::VWSUB_W_VL:
18261 case RISCVISD::VWSUBU_W_VL:
18262 case RISCVISD::VFWADD_W_VL:
18263 case RISCVISD::FSUB_VL:
18264 case RISCVISD::VFWSUB_W_VL:
18265 case ISD::SHL:
18266 case RISCVISD::SHL_VL:
18267 return false;
18268 default:
18269 llvm_unreachable("Unexpected opcode");
18270 }
18271 }
18272
18273 /// Get a list of combine to try for folding extensions in \p Root.
18274 /// Note that each returned CombineToTry function doesn't actually modify
18275 /// anything. Instead they produce an optional CombineResult that if not None,
18276 /// need to be materialized for the combine to be applied.
18277 /// \see CombineResult::materialize.
18278 /// If the related CombineToTry function returns std::nullopt, that means the
18279 /// combine didn't match.
18280 static SmallVector<CombineToTry>
18281 getSupportedFoldings(const SDNode *Root, const RISCVSubtarget &Subtarget);
18282};
18283
18284/// Helper structure that holds all the necessary information to materialize a
18285/// combine that does some extension folding.
18286struct CombineResult {
18287 /// Opcode to be generated when materializing the combine.
18288 unsigned TargetOpcode;
18289 // No value means no extension is needed.
18290 std::optional<ExtKind> LHSExt;
18291 std::optional<ExtKind> RHSExt;
18292 /// Root of the combine.
18293 SDNode *Root;
18294 /// LHS of the TargetOpcode.
18295 NodeExtensionHelper LHS;
18296 /// RHS of the TargetOpcode.
18297 NodeExtensionHelper RHS;
18298
18299 CombineResult(unsigned TargetOpcode, SDNode *Root,
18300 const NodeExtensionHelper &LHS, std::optional<ExtKind> LHSExt,
18301 const NodeExtensionHelper &RHS, std::optional<ExtKind> RHSExt)
18302 : TargetOpcode(TargetOpcode), LHSExt(LHSExt), RHSExt(RHSExt), Root(Root),
18303 LHS(LHS), RHS(RHS) {}
18304
18305 /// Return a value that uses TargetOpcode and that can be used to replace
18306 /// Root.
18307 /// The actual replacement is *not* done in that method.
18308 SDValue materialize(SelectionDAG &DAG,
18309 const RISCVSubtarget &Subtarget) const {
18310 SDValue Mask, VL, Passthru;
18311 std::tie(args&: Mask, args&: VL) =
18312 NodeExtensionHelper::getMaskAndVL(Root, DAG, Subtarget);
18313 switch (Root->getOpcode()) {
18314 default:
18315 Passthru = Root->getOperand(Num: 2);
18316 break;
18317 case ISD::ADD:
18318 case ISD::SUB:
18319 case ISD::MUL:
18320 case ISD::OR:
18321 case ISD::SHL:
18322 Passthru = DAG.getUNDEF(VT: Root->getValueType(ResNo: 0));
18323 break;
18324 }
18325 return DAG.getNode(Opcode: TargetOpcode, DL: SDLoc(Root), VT: Root->getValueType(ResNo: 0),
18326 N1: LHS.getOrCreateExtendedOp(Root, DAG, Subtarget, SupportsExt: LHSExt),
18327 N2: RHS.getOrCreateExtendedOp(Root, DAG, Subtarget, SupportsExt: RHSExt),
18328 N3: Passthru, N4: Mask, N5: VL);
18329 }
18330};
18331
18332/// Check if \p Root follows a pattern Root(ext(LHS), ext(RHS))
18333/// where `ext` is the same for both LHS and RHS (i.e., both are sext or both
18334/// are zext) and LHS and RHS can be folded into Root.
18335/// AllowExtMask define which form `ext` can take in this pattern.
18336///
18337/// \note If the pattern can match with both zext and sext, the returned
18338/// CombineResult will feature the zext result.
18339///
18340/// \returns std::nullopt if the pattern doesn't match or a CombineResult that
18341/// can be used to apply the pattern.
18342static std::optional<CombineResult>
18343canFoldToVWWithSameExtensionImpl(SDNode *Root, const NodeExtensionHelper &LHS,
18344 const NodeExtensionHelper &RHS,
18345 uint8_t AllowExtMask, SelectionDAG &DAG,
18346 const RISCVSubtarget &Subtarget) {
18347 if ((AllowExtMask & ExtKind::ZExt) && LHS.SupportsZExt && RHS.SupportsZExt)
18348 return CombineResult(NodeExtensionHelper::getZExtOpcode(Opcode: Root->getOpcode()),
18349 Root, LHS, /*LHSExt=*/{ExtKind::ZExt}, RHS,
18350 /*RHSExt=*/{ExtKind::ZExt});
18351 if ((AllowExtMask & ExtKind::SExt) && LHS.SupportsSExt && RHS.SupportsSExt)
18352 return CombineResult(NodeExtensionHelper::getSExtOpcode(Opcode: Root->getOpcode()),
18353 Root, LHS, /*LHSExt=*/{ExtKind::SExt}, RHS,
18354 /*RHSExt=*/{ExtKind::SExt});
18355 if ((AllowExtMask & ExtKind::FPExt) && LHS.SupportsFPExt && RHS.SupportsFPExt)
18356 return CombineResult(NodeExtensionHelper::getFPExtOpcode(Opcode: Root->getOpcode()),
18357 Root, LHS, /*LHSExt=*/{ExtKind::FPExt}, RHS,
18358 /*RHSExt=*/{ExtKind::FPExt});
18359 if ((AllowExtMask & ExtKind::BF16Ext) && LHS.SupportsBF16Ext &&
18360 RHS.SupportsBF16Ext)
18361 return CombineResult(NodeExtensionHelper::getFPExtOpcode(Opcode: Root->getOpcode()),
18362 Root, LHS, /*LHSExt=*/{ExtKind::BF16Ext}, RHS,
18363 /*RHSExt=*/{ExtKind::BF16Ext});
18364 return std::nullopt;
18365}
18366
18367/// Check if \p Root follows a pattern Root(ext(LHS), ext(RHS))
18368/// where `ext` is the same for both LHS and RHS (i.e., both are sext or both
18369/// are zext) and LHS and RHS can be folded into Root.
18370///
18371/// \returns std::nullopt if the pattern doesn't match or a CombineResult that
18372/// can be used to apply the pattern.
18373static std::optional<CombineResult>
18374canFoldToVWWithSameExtension(SDNode *Root, const NodeExtensionHelper &LHS,
18375 const NodeExtensionHelper &RHS, SelectionDAG &DAG,
18376 const RISCVSubtarget &Subtarget) {
18377 return canFoldToVWWithSameExtensionImpl(
18378 Root, LHS, RHS, AllowExtMask: ExtKind::ZExt | ExtKind::SExt | ExtKind::FPExt, DAG,
18379 Subtarget);
18380}
18381
18382/// Check if \p Root follows a pattern Root(zext(LHS), zext(RHS))
18383///
18384/// \returns std::nullopt if the pattern doesn't match or a CombineResult that
18385/// can be used to apply the pattern.
18386static std::optional<CombineResult>
18387canFoldToVWWithSameExtZEXT(SDNode *Root, const NodeExtensionHelper &LHS,
18388 const NodeExtensionHelper &RHS, SelectionDAG &DAG,
18389 const RISCVSubtarget &Subtarget) {
18390 return canFoldToVWWithSameExtensionImpl(Root, LHS, RHS, AllowExtMask: ExtKind::ZExt, DAG,
18391 Subtarget);
18392}
18393
18394/// Check if \p Root follows a pattern Root(bf16ext(LHS), bf16ext(RHS))
18395///
18396/// \returns std::nullopt if the pattern doesn't match or a CombineResult that
18397/// can be used to apply the pattern.
18398static std::optional<CombineResult>
18399canFoldToVWWithSameExtBF16(SDNode *Root, const NodeExtensionHelper &LHS,
18400 const NodeExtensionHelper &RHS, SelectionDAG &DAG,
18401 const RISCVSubtarget &Subtarget) {
18402 return canFoldToVWWithSameExtensionImpl(Root, LHS, RHS, AllowExtMask: ExtKind::BF16Ext, DAG,
18403 Subtarget);
18404}
18405
18406/// Check if \p Root follows a pattern Root(LHS, ext(RHS))
18407///
18408/// \returns std::nullopt if the pattern doesn't match or a CombineResult that
18409/// can be used to apply the pattern.
18410static std::optional<CombineResult>
18411canFoldToVW_W(SDNode *Root, const NodeExtensionHelper &LHS,
18412 const NodeExtensionHelper &RHS, SelectionDAG &DAG,
18413 const RISCVSubtarget &Subtarget) {
18414 if (RHS.SupportsFPExt)
18415 return CombineResult(
18416 NodeExtensionHelper::getWOpcode(Opcode: Root->getOpcode(), SupportsExt: ExtKind::FPExt),
18417 Root, LHS, /*LHSExt=*/std::nullopt, RHS, /*RHSExt=*/{ExtKind::FPExt});
18418
18419 // FIXME: Is it useful to form a vwadd.wx or vwsub.wx if it removes a scalar
18420 // sext/zext?
18421 // Control this behavior behind an option (AllowSplatInVW_W) for testing
18422 // purposes.
18423 if (RHS.SupportsZExt && (!RHS.isSplat() || AllowSplatInVW_W))
18424 return CombineResult(
18425 NodeExtensionHelper::getWOpcode(Opcode: Root->getOpcode(), SupportsExt: ExtKind::ZExt), Root,
18426 LHS, /*LHSExt=*/std::nullopt, RHS, /*RHSExt=*/{ExtKind::ZExt});
18427 if (RHS.SupportsSExt && (!RHS.isSplat() || AllowSplatInVW_W))
18428 return CombineResult(
18429 NodeExtensionHelper::getWOpcode(Opcode: Root->getOpcode(), SupportsExt: ExtKind::SExt), Root,
18430 LHS, /*LHSExt=*/std::nullopt, RHS, /*RHSExt=*/{ExtKind::SExt});
18431 return std::nullopt;
18432}
18433
18434/// Check if \p Root follows a pattern Root(sext(LHS), RHS)
18435///
18436/// \returns std::nullopt if the pattern doesn't match or a CombineResult that
18437/// can be used to apply the pattern.
18438static std::optional<CombineResult>
18439canFoldToVWWithSEXT(SDNode *Root, const NodeExtensionHelper &LHS,
18440 const NodeExtensionHelper &RHS, SelectionDAG &DAG,
18441 const RISCVSubtarget &Subtarget) {
18442 if (LHS.SupportsSExt)
18443 return CombineResult(NodeExtensionHelper::getSExtOpcode(Opcode: Root->getOpcode()),
18444 Root, LHS, /*LHSExt=*/{ExtKind::SExt}, RHS,
18445 /*RHSExt=*/std::nullopt);
18446 return std::nullopt;
18447}
18448
18449/// Check if \p Root follows a pattern Root(zext(LHS), RHS)
18450///
18451/// \returns std::nullopt if the pattern doesn't match or a CombineResult that
18452/// can be used to apply the pattern.
18453static std::optional<CombineResult>
18454canFoldToVWWithZEXT(SDNode *Root, const NodeExtensionHelper &LHS,
18455 const NodeExtensionHelper &RHS, SelectionDAG &DAG,
18456 const RISCVSubtarget &Subtarget) {
18457 if (LHS.SupportsZExt)
18458 return CombineResult(NodeExtensionHelper::getZExtOpcode(Opcode: Root->getOpcode()),
18459 Root, LHS, /*LHSExt=*/{ExtKind::ZExt}, RHS,
18460 /*RHSExt=*/std::nullopt);
18461 return std::nullopt;
18462}
18463
18464/// Check if \p Root follows a pattern Root(fpext(LHS), RHS)
18465///
18466/// \returns std::nullopt if the pattern doesn't match or a CombineResult that
18467/// can be used to apply the pattern.
18468static std::optional<CombineResult>
18469canFoldToVWWithFPEXT(SDNode *Root, const NodeExtensionHelper &LHS,
18470 const NodeExtensionHelper &RHS, SelectionDAG &DAG,
18471 const RISCVSubtarget &Subtarget) {
18472 if (LHS.SupportsFPExt)
18473 return CombineResult(NodeExtensionHelper::getFPExtOpcode(Opcode: Root->getOpcode()),
18474 Root, LHS, /*LHSExt=*/{ExtKind::FPExt}, RHS,
18475 /*RHSExt=*/std::nullopt);
18476 return std::nullopt;
18477}
18478
18479/// Check if \p Root follows a pattern Root(sext(LHS), zext(RHS))
18480///
18481/// \returns std::nullopt if the pattern doesn't match or a CombineResult that
18482/// can be used to apply the pattern.
18483static std::optional<CombineResult>
18484canFoldToVW_SU(SDNode *Root, const NodeExtensionHelper &LHS,
18485 const NodeExtensionHelper &RHS, SelectionDAG &DAG,
18486 const RISCVSubtarget &Subtarget) {
18487
18488 if (!LHS.SupportsSExt || !RHS.SupportsZExt)
18489 return std::nullopt;
18490 return CombineResult(NodeExtensionHelper::getSUOpcode(Opcode: Root->getOpcode()),
18491 Root, LHS, /*LHSExt=*/{ExtKind::SExt}, RHS,
18492 /*RHSExt=*/{ExtKind::ZExt});
18493}
18494
18495SmallVector<NodeExtensionHelper::CombineToTry>
18496NodeExtensionHelper::getSupportedFoldings(const SDNode *Root,
18497 const RISCVSubtarget &Subtarget) {
18498 SmallVector<CombineToTry> Strategies;
18499 switch (Root->getOpcode()) {
18500 case ISD::ADD:
18501 case ISD::SUB:
18502 case ISD::OR:
18503 case RISCVISD::ADD_VL:
18504 case RISCVISD::SUB_VL:
18505 case RISCVISD::OR_VL:
18506 case RISCVISD::FADD_VL:
18507 case RISCVISD::FSUB_VL:
18508 // add|sub|fadd|fsub-> vwadd(u)|vwsub(u)|vfwadd|vfwsub
18509 Strategies.push_back(Elt: canFoldToVWWithSameExtension);
18510 // add|sub|fadd|fsub -> vwadd(u)_w|vwsub(u)_w}|vfwadd_w|vfwsub_w
18511 Strategies.push_back(Elt: canFoldToVW_W);
18512 break;
18513 case RISCVISD::FMUL_VL:
18514 case RISCVISD::VFMADD_VL:
18515 case RISCVISD::VFMSUB_VL:
18516 case RISCVISD::VFNMADD_VL:
18517 case RISCVISD::VFNMSUB_VL:
18518 Strategies.push_back(Elt: canFoldToVWWithSameExtension);
18519 if (Subtarget.hasStdExtZvfbfa() && Root->getOpcode() != RISCVISD::FMUL_VL)
18520 // TODO: Once other widen operations are supported we can merge
18521 // canFoldToVWWithSameExtension and canFoldToVWWithSameExtBF16.
18522 Strategies.push_back(Elt: canFoldToVWWithSameExtBF16);
18523 else if (Subtarget.hasStdExtZvfbfwma() &&
18524 Root->getOpcode() == RISCVISD::VFMADD_VL)
18525 Strategies.push_back(Elt: canFoldToVWWithSameExtBF16);
18526 break;
18527 case ISD::MUL:
18528 case RISCVISD::MUL_VL:
18529 // mul -> vwmul(u)
18530 Strategies.push_back(Elt: canFoldToVWWithSameExtension);
18531 // mul -> vwmulsu
18532 Strategies.push_back(Elt: canFoldToVW_SU);
18533 break;
18534 case ISD::SHL:
18535 case RISCVISD::SHL_VL:
18536 // shl -> vwsll
18537 Strategies.push_back(Elt: canFoldToVWWithSameExtZEXT);
18538 break;
18539 case RISCVISD::VWADD_W_VL:
18540 case RISCVISD::VWSUB_W_VL:
18541 // vwadd_w|vwsub_w -> vwadd|vwsub
18542 Strategies.push_back(Elt: canFoldToVWWithSEXT);
18543 break;
18544 case RISCVISD::VWADDU_W_VL:
18545 case RISCVISD::VWSUBU_W_VL:
18546 // vwaddu_w|vwsubu_w -> vwaddu|vwsubu
18547 Strategies.push_back(Elt: canFoldToVWWithZEXT);
18548 break;
18549 case RISCVISD::VFWADD_W_VL:
18550 case RISCVISD::VFWSUB_W_VL:
18551 // vfwadd_w|vfwsub_w -> vfwadd|vfwsub
18552 Strategies.push_back(Elt: canFoldToVWWithFPEXT);
18553 break;
18554 default:
18555 llvm_unreachable("Unexpected opcode");
18556 }
18557 return Strategies;
18558}
18559} // End anonymous namespace.
18560
18561static SDValue simplifyOp_VL(SDNode *N) {
18562 // TODO: Extend this to other binops using generic identity logic
18563 assert(N->getOpcode() == RISCVISD::ADD_VL);
18564 SDValue A = N->getOperand(Num: 0);
18565 SDValue B = N->getOperand(Num: 1);
18566 SDValue Passthru = N->getOperand(Num: 2);
18567 if (!Passthru.isUndef())
18568 // TODO:This could be a vmerge instead
18569 return SDValue();
18570 ;
18571 if (ISD::isConstantSplatVectorAllZeros(N: B.getNode()))
18572 return A;
18573 // Peek through fixed to scalable
18574 if (B.getOpcode() == ISD::INSERT_SUBVECTOR && B.getOperand(i: 0).isUndef() &&
18575 ISD::isConstantSplatVectorAllZeros(N: B.getOperand(i: 1).getNode()))
18576 return A;
18577 return SDValue();
18578}
18579
18580/// Combine a binary or FMA operation to its equivalent VW or VW_W form.
18581/// The supported combines are:
18582/// add | add_vl | or disjoint | or_vl disjoint -> vwadd(u) | vwadd(u)_w
18583/// sub | sub_vl -> vwsub(u) | vwsub(u)_w
18584/// mul | mul_vl -> vwmul(u) | vwmul_su
18585/// shl | shl_vl -> vwsll
18586/// fadd_vl -> vfwadd | vfwadd_w
18587/// fsub_vl -> vfwsub | vfwsub_w
18588/// fmul_vl -> vfwmul
18589/// vwadd_w(u) -> vwadd(u)
18590/// vwsub_w(u) -> vwsub(u)
18591/// vfwadd_w -> vfwadd
18592/// vfwsub_w -> vfwsub
18593static SDValue combineOp_VLToVWOp_VL(SDNode *N,
18594 TargetLowering::DAGCombinerInfo &DCI,
18595 const RISCVSubtarget &Subtarget) {
18596 SelectionDAG &DAG = DCI.DAG;
18597 if (DCI.isBeforeLegalize())
18598 return SDValue();
18599
18600 if (!NodeExtensionHelper::isSupportedRoot(Root: N, Subtarget))
18601 return SDValue();
18602
18603 SmallVector<SDNode *> Worklist;
18604 SmallPtrSet<SDNode *, 8> Inserted;
18605 SmallPtrSet<SDNode *, 8> ExtensionsToRemove;
18606 Worklist.push_back(Elt: N);
18607 Inserted.insert(Ptr: N);
18608 SmallVector<CombineResult> CombinesToApply;
18609
18610 while (!Worklist.empty()) {
18611 SDNode *Root = Worklist.pop_back_val();
18612
18613 NodeExtensionHelper LHS(Root, 0, DAG, Subtarget);
18614 NodeExtensionHelper RHS(Root, 1, DAG, Subtarget);
18615 auto AppendUsersIfNeeded =
18616 [&Worklist, &Subtarget, &Inserted,
18617 &ExtensionsToRemove](const NodeExtensionHelper &Op) {
18618 if (Op.needToPromoteOtherUsers()) {
18619 // Remember that we're supposed to remove this extension.
18620 ExtensionsToRemove.insert(Ptr: Op.OrigOperand.getNode());
18621 for (SDUse &Use : Op.OrigOperand->uses()) {
18622 SDNode *TheUser = Use.getUser();
18623 if (!NodeExtensionHelper::isSupportedRoot(Root: TheUser, Subtarget))
18624 return false;
18625 // We only support the first 2 operands of FMA.
18626 if (Use.getOperandNo() >= 2)
18627 return false;
18628 if (Inserted.insert(Ptr: TheUser).second)
18629 Worklist.push_back(Elt: TheUser);
18630 }
18631 }
18632 return true;
18633 };
18634
18635 // Control the compile time by limiting the number of node we look at in
18636 // total.
18637 if (Inserted.size() > ExtensionMaxWebSize)
18638 return SDValue();
18639
18640 SmallVector<NodeExtensionHelper::CombineToTry> FoldingStrategies =
18641 NodeExtensionHelper::getSupportedFoldings(Root, Subtarget);
18642
18643 assert(!FoldingStrategies.empty() && "Nothing to be folded");
18644 bool Matched = false;
18645 for (int Attempt = 0;
18646 (Attempt != 1 + NodeExtensionHelper::isCommutative(N: Root)) && !Matched;
18647 ++Attempt) {
18648
18649 for (NodeExtensionHelper::CombineToTry FoldingStrategy :
18650 FoldingStrategies) {
18651 std::optional<CombineResult> Res =
18652 FoldingStrategy(Root, LHS, RHS, DAG, Subtarget);
18653 if (Res) {
18654 // If this strategy wouldn't remove an extension we're supposed to
18655 // remove, reject it.
18656 if (!Res->LHSExt.has_value() &&
18657 ExtensionsToRemove.contains(Ptr: LHS.OrigOperand.getNode()))
18658 continue;
18659 if (!Res->RHSExt.has_value() &&
18660 ExtensionsToRemove.contains(Ptr: RHS.OrigOperand.getNode()))
18661 continue;
18662
18663 Matched = true;
18664 CombinesToApply.push_back(Elt: *Res);
18665 // All the inputs that are extended need to be folded, otherwise
18666 // we would be leaving the old input (since it is may still be used),
18667 // and the new one.
18668 if (Res->LHSExt.has_value())
18669 if (!AppendUsersIfNeeded(LHS))
18670 return SDValue();
18671 if (Res->RHSExt.has_value())
18672 if (!AppendUsersIfNeeded(RHS))
18673 return SDValue();
18674 break;
18675 }
18676 }
18677 std::swap(a&: LHS, b&: RHS);
18678 }
18679 // Right now we do an all or nothing approach.
18680 if (!Matched)
18681 return SDValue();
18682 }
18683 // Store the value for the replacement of the input node separately.
18684 SDValue InputRootReplacement;
18685 // We do the RAUW after we materialize all the combines, because some replaced
18686 // nodes may be feeding some of the yet-to-be-replaced nodes. Put differently,
18687 // some of these nodes may appear in the NodeExtensionHelpers of some of the
18688 // yet-to-be-visited CombinesToApply roots.
18689 SmallVector<std::pair<SDValue, SDValue>> ValuesToReplace;
18690 ValuesToReplace.reserve(N: CombinesToApply.size());
18691 for (CombineResult Res : CombinesToApply) {
18692 SDValue NewValue = Res.materialize(DAG, Subtarget);
18693 if (!InputRootReplacement) {
18694 assert(Res.Root == N &&
18695 "First element is expected to be the current node");
18696 InputRootReplacement = NewValue;
18697 } else {
18698 ValuesToReplace.emplace_back(Args: SDValue(Res.Root, 0), Args&: NewValue);
18699 }
18700 }
18701 for (std::pair<SDValue, SDValue> OldNewValues : ValuesToReplace) {
18702 DCI.CombineTo(N: OldNewValues.first.getNode(), Res: OldNewValues.second);
18703 }
18704 return InputRootReplacement;
18705}
18706
18707// Fold (vwadd(u).wv y, (vmerge cond, x, 0)) -> vwadd(u).wv y, x, y, cond
18708// (vwsub(u).wv y, (vmerge cond, x, 0)) -> vwsub(u).wv y, x, y, cond
18709// y will be the Passthru and cond will be the Mask.
18710static SDValue combineVWADDSUBWSelect(SDNode *N, SelectionDAG &DAG) {
18711 unsigned Opc = N->getOpcode();
18712 assert(Opc == RISCVISD::VWADD_W_VL || Opc == RISCVISD::VWADDU_W_VL ||
18713 Opc == RISCVISD::VWSUB_W_VL || Opc == RISCVISD::VWSUBU_W_VL);
18714
18715 SDValue Y = N->getOperand(Num: 0);
18716 SDValue MergeOp = N->getOperand(Num: 1);
18717 unsigned MergeOpc = MergeOp.getOpcode();
18718
18719 if (MergeOpc != RISCVISD::VMERGE_VL && MergeOpc != ISD::VSELECT)
18720 return SDValue();
18721
18722 SDValue X = MergeOp->getOperand(Num: 1);
18723
18724 if (!MergeOp.hasOneUse())
18725 return SDValue();
18726
18727 // Passthru should be undef
18728 SDValue Passthru = N->getOperand(Num: 2);
18729 if (!Passthru.isUndef())
18730 return SDValue();
18731
18732 // Mask should be all ones
18733 SDValue Mask = N->getOperand(Num: 3);
18734 if (Mask.getOpcode() != RISCVISD::VMSET_VL)
18735 return SDValue();
18736
18737 // False value of MergeOp should be all zeros
18738 SDValue Z = MergeOp->getOperand(Num: 2);
18739
18740 if (Z.getOpcode() == ISD::INSERT_SUBVECTOR &&
18741 (isNullOrNullSplat(V: Z.getOperand(i: 0)) || Z.getOperand(i: 0).isUndef()))
18742 Z = Z.getOperand(i: 1);
18743
18744 if (!ISD::isConstantSplatVectorAllZeros(N: Z.getNode()))
18745 return SDValue();
18746
18747 return DAG.getNode(Opcode: Opc, DL: SDLoc(N), VT: N->getValueType(ResNo: 0),
18748 Ops: {Y, X, Y, MergeOp->getOperand(Num: 0), N->getOperand(Num: 4)},
18749 Flags: N->getFlags());
18750}
18751
18752static SDValue performVWADDSUBW_VLCombine(SDNode *N,
18753 TargetLowering::DAGCombinerInfo &DCI,
18754 const RISCVSubtarget &Subtarget) {
18755 [[maybe_unused]] unsigned Opc = N->getOpcode();
18756 assert(Opc == RISCVISD::VWADD_W_VL || Opc == RISCVISD::VWADDU_W_VL ||
18757 Opc == RISCVISD::VWSUB_W_VL || Opc == RISCVISD::VWSUBU_W_VL);
18758
18759 if (SDValue V = combineOp_VLToVWOp_VL(N, DCI, Subtarget))
18760 return V;
18761
18762 return combineVWADDSUBWSelect(N, DAG&: DCI.DAG);
18763}
18764
18765// Helper function for performMemPairCombine.
18766// Try to combine the memory loads/stores LSNode1 and LSNode2
18767// into a single memory pair operation.
18768static SDValue tryMemPairCombine(SelectionDAG &DAG, LSBaseSDNode *LSNode1,
18769 LSBaseSDNode *LSNode2, SDValue BasePtr,
18770 uint64_t Imm) {
18771 SmallPtrSet<const SDNode *, 32> Visited;
18772 SmallVector<const SDNode *, 8> Worklist = {LSNode1, LSNode2};
18773
18774 if (SDNode::hasPredecessorHelper(N: LSNode1, Visited, Worklist) ||
18775 SDNode::hasPredecessorHelper(N: LSNode2, Visited, Worklist))
18776 return SDValue();
18777
18778 MachineFunction &MF = DAG.getMachineFunction();
18779 const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
18780
18781 // The new operation has twice the width.
18782 MVT XLenVT = Subtarget.getXLenVT();
18783 EVT MemVT = LSNode1->getMemoryVT();
18784 EVT NewMemVT = (MemVT == MVT::i32) ? MVT::i64 : MVT::i128;
18785 MachineMemOperand *MMO = LSNode1->getMemOperand();
18786 MachineMemOperand *NewMMO = MF.getMachineMemOperand(
18787 MMO, PtrInfo: MMO->getPointerInfo(), Size: MemVT == MVT::i32 ? 8 : 16);
18788
18789 if (LSNode1->getOpcode() == ISD::LOAD) {
18790 auto Ext = cast<LoadSDNode>(Val: LSNode1)->getExtensionType();
18791 unsigned Opcode;
18792 if (MemVT == MVT::i32)
18793 Opcode = (Ext == ISD::ZEXTLOAD) ? RISCVISD::TH_LWUD : RISCVISD::TH_LWD;
18794 else
18795 Opcode = RISCVISD::TH_LDD;
18796
18797 SDValue Res = DAG.getMemIntrinsicNode(
18798 Opcode, dl: SDLoc(LSNode1), VTList: DAG.getVTList(VTs: {XLenVT, XLenVT, MVT::Other}),
18799 Ops: {LSNode1->getChain(), BasePtr,
18800 DAG.getConstant(Val: Imm, DL: SDLoc(LSNode1), VT: XLenVT)},
18801 MemVT: NewMemVT, MMO: NewMMO);
18802
18803 SDValue Node1 =
18804 DAG.getMergeValues(Ops: {Res.getValue(R: 0), Res.getValue(R: 2)}, dl: SDLoc(LSNode1));
18805 SDValue Node2 =
18806 DAG.getMergeValues(Ops: {Res.getValue(R: 1), Res.getValue(R: 2)}, dl: SDLoc(LSNode2));
18807
18808 DAG.ReplaceAllUsesWith(From: LSNode2, To: Node2.getNode());
18809 return Node1;
18810 } else {
18811 unsigned Opcode = (MemVT == MVT::i32) ? RISCVISD::TH_SWD : RISCVISD::TH_SDD;
18812
18813 SDValue Res = DAG.getMemIntrinsicNode(
18814 Opcode, dl: SDLoc(LSNode1), VTList: DAG.getVTList(VT: MVT::Other),
18815 Ops: {LSNode1->getChain(), LSNode1->getOperand(Num: 1), LSNode2->getOperand(Num: 1),
18816 BasePtr, DAG.getConstant(Val: Imm, DL: SDLoc(LSNode1), VT: XLenVT)},
18817 MemVT: NewMemVT, MMO: NewMMO);
18818
18819 DAG.ReplaceAllUsesWith(From: LSNode2, To: Res.getNode());
18820 return Res;
18821 }
18822}
18823
18824// Try to combine two adjacent loads/stores to a single pair instruction from
18825// the XTHeadMemPair vendor extension.
18826static SDValue performMemPairCombine(SDNode *N,
18827 TargetLowering::DAGCombinerInfo &DCI) {
18828 SelectionDAG &DAG = DCI.DAG;
18829 MachineFunction &MF = DAG.getMachineFunction();
18830 const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
18831
18832 // Target does not support load/store pair.
18833 if (!Subtarget.hasVendorXTHeadMemPair())
18834 return SDValue();
18835
18836 LSBaseSDNode *LSNode1 = cast<LSBaseSDNode>(Val: N);
18837 EVT MemVT = LSNode1->getMemoryVT();
18838 unsigned OpNum = LSNode1->getOpcode() == ISD::LOAD ? 1 : 2;
18839
18840 // No volatile, indexed or atomic loads/stores.
18841 if (!LSNode1->isSimple() || LSNode1->isIndexed())
18842 return SDValue();
18843
18844 // Function to get a base + constant representation from a memory value.
18845 auto ExtractBaseAndOffset = [](SDValue Ptr) -> std::pair<SDValue, uint64_t> {
18846 if (Ptr->getOpcode() == ISD::ADD)
18847 if (auto *C1 = dyn_cast<ConstantSDNode>(Val: Ptr->getOperand(Num: 1)))
18848 return {Ptr->getOperand(Num: 0), C1->getZExtValue()};
18849 return {Ptr, 0};
18850 };
18851
18852 auto [Base1, Offset1] = ExtractBaseAndOffset(LSNode1->getOperand(Num: OpNum));
18853
18854 SDValue Chain = N->getOperand(Num: 0);
18855 for (SDUse &Use : Chain->uses()) {
18856 if (Use.getUser() != N && Use.getResNo() == 0 &&
18857 Use.getUser()->getOpcode() == N->getOpcode()) {
18858 LSBaseSDNode *LSNode2 = cast<LSBaseSDNode>(Val: Use.getUser());
18859
18860 // No volatile, indexed or atomic loads/stores.
18861 if (!LSNode2->isSimple() || LSNode2->isIndexed())
18862 continue;
18863
18864 // Check if LSNode1 and LSNode2 have the same type and extension.
18865 if (LSNode1->getOpcode() == ISD::LOAD)
18866 if (cast<LoadSDNode>(Val: LSNode2)->getExtensionType() !=
18867 cast<LoadSDNode>(Val: LSNode1)->getExtensionType())
18868 continue;
18869
18870 if (LSNode1->getMemoryVT() != LSNode2->getMemoryVT())
18871 continue;
18872
18873 auto [Base2, Offset2] = ExtractBaseAndOffset(LSNode2->getOperand(Num: OpNum));
18874
18875 // Check if the base pointer is the same for both instruction.
18876 if (Base1 != Base2)
18877 continue;
18878
18879 // Check if the offsets match the XTHeadMemPair encoding constraints.
18880 bool Valid = false;
18881 if (MemVT == MVT::i32) {
18882 // Check for adjacent i32 values and a 2-bit index.
18883 if ((Offset1 + 4 == Offset2) && isShiftedUInt<2, 3>(x: Offset1))
18884 Valid = true;
18885 } else if (MemVT == MVT::i64) {
18886 // Check for adjacent i64 values and a 2-bit index.
18887 if ((Offset1 + 8 == Offset2) && isShiftedUInt<2, 4>(x: Offset1))
18888 Valid = true;
18889 }
18890
18891 if (!Valid)
18892 continue;
18893
18894 // Try to combine.
18895 if (SDValue Res =
18896 tryMemPairCombine(DAG, LSNode1, LSNode2, BasePtr: Base1, Imm: Offset1))
18897 return Res;
18898 }
18899 }
18900
18901 return SDValue();
18902}
18903
18904// Fold
18905// (fp_to_int (froundeven X)) -> fcvt X, rne
18906// (fp_to_int (ftrunc X)) -> fcvt X, rtz
18907// (fp_to_int (ffloor X)) -> fcvt X, rdn
18908// (fp_to_int (fceil X)) -> fcvt X, rup
18909// (fp_to_int (fround X)) -> fcvt X, rmm
18910// (fp_to_int (frint X)) -> fcvt X
18911static SDValue performFP_TO_INTCombine(SDNode *N,
18912 TargetLowering::DAGCombinerInfo &DCI,
18913 const RISCVSubtarget &Subtarget) {
18914 SelectionDAG &DAG = DCI.DAG;
18915 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
18916 MVT XLenVT = Subtarget.getXLenVT();
18917
18918 SDValue Src = N->getOperand(Num: 0);
18919
18920 // Don't do this for strict-fp Src.
18921 if (Src->isStrictFPOpcode())
18922 return SDValue();
18923
18924 // Ensure the FP type is legal.
18925 if (!TLI.isTypeLegal(VT: Src.getValueType()))
18926 return SDValue();
18927
18928 // Don't do this for f16 with Zfhmin and not Zfh.
18929 if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
18930 return SDValue();
18931
18932 RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Opc: Src.getOpcode());
18933 // If the result is invalid, we didn't find a foldable instruction.
18934 if (FRM == RISCVFPRndMode::Invalid)
18935 return SDValue();
18936
18937 SDLoc DL(N);
18938 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
18939 EVT VT = N->getValueType(ResNo: 0);
18940
18941 if (VT.isVector() && TLI.isTypeLegal(VT)) {
18942 MVT SrcVT = Src.getSimpleValueType();
18943 MVT SrcContainerVT = SrcVT;
18944 MVT ContainerVT = VT.getSimpleVT();
18945 SDValue XVal = Src.getOperand(i: 0);
18946
18947 // For widening and narrowing conversions we just combine it into a
18948 // VFCVT_..._VL node, as there are no specific VFWCVT/VFNCVT VL nodes. They
18949 // end up getting lowered to their appropriate pseudo instructions based on
18950 // their operand types
18951 if (VT.getScalarSizeInBits() > SrcVT.getScalarSizeInBits() * 2 ||
18952 VT.getScalarSizeInBits() * 2 < SrcVT.getScalarSizeInBits())
18953 return SDValue();
18954
18955 // Make fixed-length vectors scalable first
18956 if (SrcVT.isFixedLengthVector()) {
18957 SrcContainerVT = getContainerForFixedLengthVector(DAG, VT: SrcVT, Subtarget);
18958 XVal = convertToScalableVector(VT: SrcContainerVT, V: XVal, DAG, Subtarget);
18959 ContainerVT =
18960 getContainerForFixedLengthVector(DAG, VT: ContainerVT, Subtarget);
18961 }
18962
18963 auto [Mask, VL] =
18964 getDefaultVLOps(VecVT: SrcVT, ContainerVT: SrcContainerVT, DL, DAG, Subtarget);
18965
18966 SDValue FpToInt;
18967 if (FRM == RISCVFPRndMode::RTZ) {
18968 // Use the dedicated trunc static rounding mode if we're truncating so we
18969 // don't need to generate calls to fsrmi/fsrm
18970 unsigned Opc =
18971 IsSigned ? RISCVISD::VFCVT_RTZ_X_F_VL : RISCVISD::VFCVT_RTZ_XU_F_VL;
18972 FpToInt = DAG.getNode(Opcode: Opc, DL, VT: ContainerVT, N1: XVal, N2: Mask, N3: VL);
18973 } else {
18974 unsigned Opc =
18975 IsSigned ? RISCVISD::VFCVT_RM_X_F_VL : RISCVISD::VFCVT_RM_XU_F_VL;
18976 FpToInt = DAG.getNode(Opcode: Opc, DL, VT: ContainerVT, N1: XVal, N2: Mask,
18977 N3: DAG.getTargetConstant(Val: FRM, DL, VT: XLenVT), N4: VL);
18978 }
18979
18980 // If converted from fixed-length to scalable, convert back
18981 if (VT.isFixedLengthVector())
18982 FpToInt = convertFromScalableVector(VT, V: FpToInt, DAG, Subtarget);
18983
18984 return FpToInt;
18985 }
18986
18987 // Only handle XLen or i32 types. Other types narrower than XLen will
18988 // eventually be legalized to XLenVT.
18989 if (VT != MVT::i32 && VT != XLenVT)
18990 return SDValue();
18991
18992 unsigned Opc;
18993 if (VT == XLenVT)
18994 Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
18995 else
18996 Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
18997
18998 SDValue FpToInt = DAG.getNode(Opcode: Opc, DL, VT: XLenVT, N1: Src.getOperand(i: 0),
18999 N2: DAG.getTargetConstant(Val: FRM, DL, VT: XLenVT));
19000 return DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT, Operand: FpToInt);
19001}
19002
19003// Fold
19004// (fp_to_int_sat (froundeven X)) -> (select X == nan, 0, (fcvt X, rne))
19005// (fp_to_int_sat (ftrunc X)) -> (select X == nan, 0, (fcvt X, rtz))
19006// (fp_to_int_sat (ffloor X)) -> (select X == nan, 0, (fcvt X, rdn))
19007// (fp_to_int_sat (fceil X)) -> (select X == nan, 0, (fcvt X, rup))
19008// (fp_to_int_sat (fround X)) -> (select X == nan, 0, (fcvt X, rmm))
19009// (fp_to_int_sat (frint X)) -> (select X == nan, 0, (fcvt X, dyn))
19010static SDValue performFP_TO_INT_SATCombine(SDNode *N,
19011 TargetLowering::DAGCombinerInfo &DCI,
19012 const RISCVSubtarget &Subtarget) {
19013 SelectionDAG &DAG = DCI.DAG;
19014 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
19015 MVT XLenVT = Subtarget.getXLenVT();
19016
19017 // Only handle XLen types. Other types narrower than XLen will eventually be
19018 // legalized to XLenVT.
19019 EVT DstVT = N->getValueType(ResNo: 0);
19020 if (DstVT != XLenVT)
19021 return SDValue();
19022
19023 SDValue Src = N->getOperand(Num: 0);
19024
19025 // Don't do this for strict-fp Src.
19026 if (Src->isStrictFPOpcode())
19027 return SDValue();
19028
19029 // Ensure the FP type is also legal.
19030 if (!TLI.isTypeLegal(VT: Src.getValueType()))
19031 return SDValue();
19032
19033 // Don't do this for f16 with Zfhmin and not Zfh.
19034 if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
19035 return SDValue();
19036
19037 EVT SatVT = cast<VTSDNode>(Val: N->getOperand(Num: 1))->getVT();
19038
19039 RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Opc: Src.getOpcode());
19040 if (FRM == RISCVFPRndMode::Invalid)
19041 return SDValue();
19042
19043 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT_SAT;
19044
19045 unsigned Opc;
19046 if (SatVT == DstVT)
19047 Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
19048 else if (DstVT == MVT::i64 && SatVT == MVT::i32)
19049 Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
19050 else
19051 return SDValue();
19052 // FIXME: Support other SatVTs by clamping before or after the conversion.
19053
19054 Src = Src.getOperand(i: 0);
19055
19056 SDLoc DL(N);
19057 SDValue FpToInt = DAG.getNode(Opcode: Opc, DL, VT: XLenVT, N1: Src,
19058 N2: DAG.getTargetConstant(Val: FRM, DL, VT: XLenVT));
19059
19060 // fcvt.wu.* sign extends bit 31 on RV64. FP_TO_UINT_SAT expects to zero
19061 // extend.
19062 if (Opc == RISCVISD::FCVT_WU_RV64)
19063 FpToInt = DAG.getZeroExtendInReg(Op: FpToInt, DL, VT: MVT::i32);
19064
19065 // RISC-V FP-to-int conversions saturate to the destination register size, but
19066 // don't produce 0 for nan.
19067 SDValue ZeroInt = DAG.getConstant(Val: 0, DL, VT: DstVT);
19068 return DAG.getSelectCC(DL, LHS: Src, RHS: Src, True: ZeroInt, False: FpToInt, Cond: ISD::CondCode::SETUO);
19069}
19070
19071// Combine (bitreverse (bswap X)) to the BREV8 GREVI encoding if the type is
19072// smaller than XLenVT.
19073static SDValue performBITREVERSECombine(SDNode *N, SelectionDAG &DAG,
19074 const RISCVSubtarget &Subtarget) {
19075 assert(Subtarget.hasStdExtZbkb() && "Unexpected extension");
19076
19077 SDValue Src = N->getOperand(Num: 0);
19078 if (Src.getOpcode() != ISD::BSWAP)
19079 return SDValue();
19080
19081 EVT VT = N->getValueType(ResNo: 0);
19082 if (!VT.isScalarInteger() || VT.getSizeInBits() >= Subtarget.getXLen() ||
19083 !llvm::has_single_bit<uint32_t>(Value: VT.getSizeInBits()))
19084 return SDValue();
19085
19086 SDLoc DL(N);
19087 return DAG.getNode(Opcode: RISCVISD::BREV8, DL, VT, Operand: Src.getOperand(i: 0));
19088}
19089
19090static SDValue performVP_REVERSECombine(SDNode *N, SelectionDAG &DAG,
19091 const RISCVSubtarget &Subtarget) {
19092 // Fold:
19093 // vp.reverse(vp.load(ADDR, MASK)) -> vp.strided.load(ADDR, -1, MASK)
19094
19095 // Check if its first operand is a vp.load.
19096 auto *VPLoad = dyn_cast<VPLoadSDNode>(Val: N->getOperand(Num: 0));
19097 if (!VPLoad)
19098 return SDValue();
19099
19100 EVT LoadVT = VPLoad->getValueType(ResNo: 0);
19101 // We do not have a strided_load version for masks, and the evl of vp.reverse
19102 // and vp.load should always be the same.
19103 if (!LoadVT.getVectorElementType().isByteSized() ||
19104 N->getOperand(Num: 2) != VPLoad->getVectorLength() ||
19105 !N->getOperand(Num: 0).hasOneUse())
19106 return SDValue();
19107
19108 // Check if the mask of outer vp.reverse are all 1's.
19109 if (!isOneOrOneSplat(V: N->getOperand(Num: 1)))
19110 return SDValue();
19111
19112 SDValue LoadMask = VPLoad->getMask();
19113 // If Mask is all ones, then load is unmasked and can be reversed.
19114 if (!isOneOrOneSplat(V: LoadMask)) {
19115 // If the mask is not all ones, we can reverse the load if the mask was also
19116 // reversed by an unmasked vp.reverse with the same EVL.
19117 if (LoadMask.getOpcode() != ISD::EXPERIMENTAL_VP_REVERSE ||
19118 !isOneOrOneSplat(V: LoadMask.getOperand(i: 1)) ||
19119 LoadMask.getOperand(i: 2) != VPLoad->getVectorLength())
19120 return SDValue();
19121 LoadMask = LoadMask.getOperand(i: 0);
19122 }
19123
19124 // Base = LoadAddr + (NumElem - 1) * ElemWidthByte
19125 SDLoc DL(N);
19126 MVT XLenVT = Subtarget.getXLenVT();
19127 SDValue NumElem = VPLoad->getVectorLength();
19128 uint64_t ElemWidthByte = VPLoad->getValueType(ResNo: 0).getScalarSizeInBits() / 8;
19129
19130 SDValue Temp1 = DAG.getNode(Opcode: ISD::SUB, DL, VT: XLenVT, N1: NumElem,
19131 N2: DAG.getConstant(Val: 1, DL, VT: XLenVT));
19132 SDValue Temp2 = DAG.getNode(Opcode: ISD::MUL, DL, VT: XLenVT, N1: Temp1,
19133 N2: DAG.getConstant(Val: ElemWidthByte, DL, VT: XLenVT));
19134 SDValue Base = DAG.getNode(Opcode: ISD::ADD, DL, VT: XLenVT, N1: VPLoad->getBasePtr(), N2: Temp2);
19135 SDValue Stride = DAG.getSignedConstant(Val: -ElemWidthByte, DL, VT: XLenVT);
19136
19137 MachineFunction &MF = DAG.getMachineFunction();
19138 MachinePointerInfo PtrInfo(VPLoad->getAddressSpace());
19139 MachineMemOperand *MMO = MF.getMachineMemOperand(
19140 PtrInfo, F: VPLoad->getMemOperand()->getFlags(),
19141 Size: LocationSize::beforeOrAfterPointer(), BaseAlignment: VPLoad->getAlign());
19142
19143 SDValue Ret = DAG.getStridedLoadVP(
19144 VT: LoadVT, DL, Chain: VPLoad->getChain(), Ptr: Base, Stride, Mask: LoadMask,
19145 EVL: VPLoad->getVectorLength(), MMO, IsExpanding: VPLoad->isExpandingLoad());
19146
19147 DAG.ReplaceAllUsesOfValueWith(From: SDValue(VPLoad, 1), To: Ret.getValue(R: 1));
19148
19149 return Ret;
19150}
19151
19152static SDValue performVP_STORECombine(SDNode *N, SelectionDAG &DAG,
19153 const RISCVSubtarget &Subtarget) {
19154 // Fold:
19155 // vp.store(vp.reverse(VAL), ADDR, MASK) -> vp.strided.store(VAL, NEW_ADDR,
19156 // -1, MASK)
19157 auto *VPStore = cast<VPStoreSDNode>(Val: N);
19158
19159 if (VPStore->getValue().getOpcode() != ISD::EXPERIMENTAL_VP_REVERSE)
19160 return SDValue();
19161
19162 SDValue VPReverse = VPStore->getValue();
19163 EVT ReverseVT = VPReverse->getValueType(ResNo: 0);
19164
19165 // We do not have a strided_store version for masks, and the evl of vp.reverse
19166 // and vp.store should always be the same.
19167 if (!ReverseVT.getVectorElementType().isByteSized() ||
19168 VPStore->getVectorLength() != VPReverse.getOperand(i: 2) ||
19169 !VPReverse.hasOneUse())
19170 return SDValue();
19171
19172 SDValue StoreMask = VPStore->getMask();
19173 // If Mask is all ones, then load is unmasked and can be reversed.
19174 if (!isOneOrOneSplat(V: StoreMask)) {
19175 // If the mask is not all ones, we can reverse the store if the mask was
19176 // also reversed by an unmasked vp.reverse with the same EVL.
19177 if (StoreMask.getOpcode() != ISD::EXPERIMENTAL_VP_REVERSE ||
19178 !isOneOrOneSplat(V: StoreMask.getOperand(i: 1)) ||
19179 StoreMask.getOperand(i: 2) != VPStore->getVectorLength())
19180 return SDValue();
19181 StoreMask = StoreMask.getOperand(i: 0);
19182 }
19183
19184 // Base = StoreAddr + (NumElem - 1) * ElemWidthByte
19185 SDLoc DL(N);
19186 MVT XLenVT = Subtarget.getXLenVT();
19187 SDValue NumElem = VPStore->getVectorLength();
19188 uint64_t ElemWidthByte = VPReverse.getValueType().getScalarSizeInBits() / 8;
19189
19190 SDValue Temp1 = DAG.getNode(Opcode: ISD::SUB, DL, VT: XLenVT, N1: NumElem,
19191 N2: DAG.getConstant(Val: 1, DL, VT: XLenVT));
19192 SDValue Temp2 = DAG.getNode(Opcode: ISD::MUL, DL, VT: XLenVT, N1: Temp1,
19193 N2: DAG.getConstant(Val: ElemWidthByte, DL, VT: XLenVT));
19194 SDValue Base =
19195 DAG.getNode(Opcode: ISD::ADD, DL, VT: XLenVT, N1: VPStore->getBasePtr(), N2: Temp2);
19196 SDValue Stride = DAG.getSignedConstant(Val: -ElemWidthByte, DL, VT: XLenVT);
19197
19198 MachineFunction &MF = DAG.getMachineFunction();
19199 MachinePointerInfo PtrInfo(VPStore->getAddressSpace());
19200 MachineMemOperand *MMO = MF.getMachineMemOperand(
19201 PtrInfo, F: VPStore->getMemOperand()->getFlags(),
19202 Size: LocationSize::beforeOrAfterPointer(), BaseAlignment: VPStore->getAlign());
19203
19204 return DAG.getStridedStoreVP(
19205 Chain: VPStore->getChain(), DL, Val: VPReverse.getOperand(i: 0), Ptr: Base,
19206 Offset: VPStore->getOffset(), Stride, Mask: StoreMask, EVL: VPStore->getVectorLength(),
19207 MemVT: VPStore->getMemoryVT(), MMO, AM: VPStore->getAddressingMode(),
19208 IsTruncating: VPStore->isTruncatingStore(), IsCompressing: VPStore->isCompressingStore());
19209}
19210
19211// Peephole avgceil pattern.
19212// %1 = zext <N x i8> %a to <N x i32>
19213// %2 = zext <N x i8> %b to <N x i32>
19214// %3 = add nuw nsw <N x i32> %1, splat (i32 1)
19215// %4 = add nuw nsw <N x i32> %3, %2
19216// %5 = lshr <N x i32> %4, splat (i32 1)
19217// %6 = trunc <N x i32> %5 to <N x i8>
19218static SDValue performVP_TRUNCATECombine(SDNode *N, SelectionDAG &DAG,
19219 const RISCVSubtarget &Subtarget) {
19220 EVT VT = N->getValueType(ResNo: 0);
19221
19222 // Ignore fixed vectors.
19223 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
19224 if (!VT.isScalableVector() || !TLI.isTypeLegal(VT))
19225 return SDValue();
19226
19227 SDValue In = N->getOperand(Num: 0);
19228 SDValue Mask = N->getOperand(Num: 1);
19229 SDValue VL = N->getOperand(Num: 2);
19230
19231 // Input should be a vp_srl with same mask and VL.
19232 if (In.getOpcode() != ISD::VP_SRL || In.getOperand(i: 2) != Mask ||
19233 In.getOperand(i: 3) != VL)
19234 return SDValue();
19235
19236 // Shift amount should be 1.
19237 if (!isOneOrOneSplat(V: In.getOperand(i: 1)))
19238 return SDValue();
19239
19240 // Shifted value should be a vp_add with same mask and VL.
19241 SDValue LHS = In.getOperand(i: 0);
19242 if (LHS.getOpcode() != ISD::VP_ADD || LHS.getOperand(i: 2) != Mask ||
19243 LHS.getOperand(i: 3) != VL)
19244 return SDValue();
19245
19246 SDValue Operands[3];
19247
19248 // Matches another VP_ADD with same VL and Mask.
19249 auto FindAdd = [&](SDValue V, SDValue Other) {
19250 if (V.getOpcode() != ISD::VP_ADD || V.getOperand(i: 2) != Mask ||
19251 V.getOperand(i: 3) != VL)
19252 return false;
19253
19254 Operands[0] = Other;
19255 Operands[1] = V.getOperand(i: 1);
19256 Operands[2] = V.getOperand(i: 0);
19257 return true;
19258 };
19259
19260 // We need to find another VP_ADD in one of the operands.
19261 SDValue LHS0 = LHS.getOperand(i: 0);
19262 SDValue LHS1 = LHS.getOperand(i: 1);
19263 if (!FindAdd(LHS0, LHS1) && !FindAdd(LHS1, LHS0))
19264 return SDValue();
19265
19266 // Now we have three operands of two additions. Check that one of them is a
19267 // constant vector with ones.
19268 auto I = llvm::find_if(Range&: Operands,
19269 P: [](const SDValue &Op) { return isOneOrOneSplat(V: Op); });
19270 if (I == std::end(arr&: Operands))
19271 return SDValue();
19272 // We found a vector with ones, move if it to the end of the Operands array.
19273 std::swap(a&: *I, b&: Operands[2]);
19274
19275 // Make sure the other 2 operands can be promoted from the result type.
19276 for (SDValue Op : drop_end(RangeOrContainer&: Operands)) {
19277 if (Op.getOpcode() != ISD::VP_ZERO_EXTEND || Op.getOperand(i: 1) != Mask ||
19278 Op.getOperand(i: 2) != VL)
19279 return SDValue();
19280 // Input must be the same size or smaller than our result.
19281 if (Op.getOperand(i: 0).getScalarValueSizeInBits() > VT.getScalarSizeInBits())
19282 return SDValue();
19283 }
19284
19285 // Pattern is detected.
19286 // Rebuild the zero extends in case the inputs are smaller than our result.
19287 SDValue NewOp0 = DAG.getNode(Opcode: ISD::VP_ZERO_EXTEND, DL: SDLoc(Operands[0]), VT,
19288 N1: Operands[0].getOperand(i: 0), N2: Mask, N3: VL);
19289 SDValue NewOp1 = DAG.getNode(Opcode: ISD::VP_ZERO_EXTEND, DL: SDLoc(Operands[1]), VT,
19290 N1: Operands[1].getOperand(i: 0), N2: Mask, N3: VL);
19291 // Build a AVGCEILU_VL which will be selected as a VAADDU with RNU rounding
19292 // mode.
19293 SDLoc DL(N);
19294 return DAG.getNode(Opcode: RISCVISD::AVGCEILU_VL, DL, VT,
19295 Ops: {NewOp0, NewOp1, DAG.getUNDEF(VT), Mask, VL});
19296}
19297
19298// Convert from one FMA opcode to another based on whether we are negating the
19299// multiply result and/or the accumulator.
19300// NOTE: Only supports RVV operations with VL.
19301static unsigned negateFMAOpcode(unsigned Opcode, bool NegMul, bool NegAcc) {
19302 // Negating the multiply result changes ADD<->SUB and toggles 'N'.
19303 if (NegMul) {
19304 // clang-format off
19305 switch (Opcode) {
19306 default: llvm_unreachable("Unexpected opcode");
19307 case RISCVISD::VFMADD_VL: Opcode = RISCVISD::VFNMSUB_VL; break;
19308 case RISCVISD::VFNMSUB_VL: Opcode = RISCVISD::VFMADD_VL; break;
19309 case RISCVISD::VFNMADD_VL: Opcode = RISCVISD::VFMSUB_VL; break;
19310 case RISCVISD::VFMSUB_VL: Opcode = RISCVISD::VFNMADD_VL; break;
19311 case RISCVISD::STRICT_VFMADD_VL: Opcode = RISCVISD::STRICT_VFNMSUB_VL; break;
19312 case RISCVISD::STRICT_VFNMSUB_VL: Opcode = RISCVISD::STRICT_VFMADD_VL; break;
19313 case RISCVISD::STRICT_VFNMADD_VL: Opcode = RISCVISD::STRICT_VFMSUB_VL; break;
19314 case RISCVISD::STRICT_VFMSUB_VL: Opcode = RISCVISD::STRICT_VFNMADD_VL; break;
19315 }
19316 // clang-format on
19317 }
19318
19319 // Negating the accumulator changes ADD<->SUB.
19320 if (NegAcc) {
19321 // clang-format off
19322 switch (Opcode) {
19323 default: llvm_unreachable("Unexpected opcode");
19324 case RISCVISD::VFMADD_VL: Opcode = RISCVISD::VFMSUB_VL; break;
19325 case RISCVISD::VFMSUB_VL: Opcode = RISCVISD::VFMADD_VL; break;
19326 case RISCVISD::VFNMADD_VL: Opcode = RISCVISD::VFNMSUB_VL; break;
19327 case RISCVISD::VFNMSUB_VL: Opcode = RISCVISD::VFNMADD_VL; break;
19328 case RISCVISD::STRICT_VFMADD_VL: Opcode = RISCVISD::STRICT_VFMSUB_VL; break;
19329 case RISCVISD::STRICT_VFMSUB_VL: Opcode = RISCVISD::STRICT_VFMADD_VL; break;
19330 case RISCVISD::STRICT_VFNMADD_VL: Opcode = RISCVISD::STRICT_VFNMSUB_VL; break;
19331 case RISCVISD::STRICT_VFNMSUB_VL: Opcode = RISCVISD::STRICT_VFNMADD_VL; break;
19332 }
19333 // clang-format on
19334 }
19335
19336 return Opcode;
19337}
19338
19339static SDValue combineVFMADD_VLWithVFNEG_VL(SDNode *N, SelectionDAG &DAG) {
19340 // Fold FNEG_VL into FMA opcodes.
19341 // The first operand of strict-fp is chain.
19342 bool IsStrict =
19343 DAG.getSelectionDAGInfo().isTargetStrictFPOpcode(Opcode: N->getOpcode());
19344 unsigned Offset = IsStrict ? 1 : 0;
19345 SDValue A = N->getOperand(Num: 0 + Offset);
19346 SDValue B = N->getOperand(Num: 1 + Offset);
19347 SDValue C = N->getOperand(Num: 2 + Offset);
19348 SDValue Mask = N->getOperand(Num: 3 + Offset);
19349 SDValue VL = N->getOperand(Num: 4 + Offset);
19350
19351 auto invertIfNegative = [&Mask, &VL](SDValue &V) {
19352 if (V.getOpcode() == RISCVISD::FNEG_VL && V.getOperand(i: 1) == Mask &&
19353 V.getOperand(i: 2) == VL) {
19354 // Return the negated input.
19355 V = V.getOperand(i: 0);
19356 return true;
19357 }
19358
19359 return false;
19360 };
19361
19362 bool NegA = invertIfNegative(A);
19363 bool NegB = invertIfNegative(B);
19364 bool NegC = invertIfNegative(C);
19365
19366 // If no operands are negated, we're done.
19367 if (!NegA && !NegB && !NegC)
19368 return SDValue();
19369
19370 unsigned NewOpcode = negateFMAOpcode(Opcode: N->getOpcode(), NegMul: NegA != NegB, NegAcc: NegC);
19371 if (IsStrict)
19372 return DAG.getNode(Opcode: NewOpcode, DL: SDLoc(N), VTList: N->getVTList(),
19373 Ops: {N->getOperand(Num: 0), A, B, C, Mask, VL});
19374 return DAG.getNode(Opcode: NewOpcode, DL: SDLoc(N), VT: N->getValueType(ResNo: 0), N1: A, N2: B, N3: C, N4: Mask,
19375 N5: VL);
19376}
19377
19378static SDValue performVFMADD_VLCombine(SDNode *N,
19379 TargetLowering::DAGCombinerInfo &DCI,
19380 const RISCVSubtarget &Subtarget) {
19381 SelectionDAG &DAG = DCI.DAG;
19382
19383 if (SDValue V = combineVFMADD_VLWithVFNEG_VL(N, DAG))
19384 return V;
19385
19386 // FIXME: Ignore strict opcodes for now.
19387 if (DAG.getSelectionDAGInfo().isTargetStrictFPOpcode(Opcode: N->getOpcode()))
19388 return SDValue();
19389
19390 return combineOp_VLToVWOp_VL(N, DCI, Subtarget);
19391}
19392
19393static SDValue performSRACombine(SDNode *N, SelectionDAG &DAG,
19394 const RISCVSubtarget &Subtarget) {
19395 assert(N->getOpcode() == ISD::SRA && "Unexpected opcode");
19396
19397 EVT VT = N->getValueType(ResNo: 0);
19398
19399 if (VT != Subtarget.getXLenVT())
19400 return SDValue();
19401
19402 if (!isa<ConstantSDNode>(Val: N->getOperand(Num: 1)))
19403 return SDValue();
19404 uint64_t ShAmt = N->getConstantOperandVal(Num: 1);
19405
19406 SDValue N0 = N->getOperand(Num: 0);
19407
19408 // Combine (sra (sext_inreg (shl X, C1), iX), C2) ->
19409 // (sra (shl X, C1+(XLen-iX)), C2+(XLen-iX)) so it gets selected as SLLI+SRAI.
19410 if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG && N0.hasOneUse()) {
19411 unsigned ExtSize =
19412 cast<VTSDNode>(Val: N0.getOperand(i: 1))->getVT().getSizeInBits();
19413 if (ShAmt < ExtSize && N0.getOperand(i: 0).getOpcode() == ISD::SHL &&
19414 N0.getOperand(i: 0).hasOneUse() &&
19415 isa<ConstantSDNode>(Val: N0.getOperand(i: 0).getOperand(i: 1))) {
19416 uint64_t LShAmt = N0.getOperand(i: 0).getConstantOperandVal(i: 1);
19417 if (LShAmt < ExtSize) {
19418 unsigned Size = VT.getSizeInBits();
19419 SDLoc ShlDL(N0.getOperand(i: 0));
19420 SDValue Shl =
19421 DAG.getNode(Opcode: ISD::SHL, DL: ShlDL, VT, N1: N0.getOperand(i: 0).getOperand(i: 0),
19422 N2: DAG.getConstant(Val: LShAmt + (Size - ExtSize), DL: ShlDL, VT));
19423 SDLoc DL(N);
19424 return DAG.getNode(Opcode: ISD::SRA, DL, VT, N1: Shl,
19425 N2: DAG.getConstant(Val: ShAmt + (Size - ExtSize), DL, VT));
19426 }
19427 }
19428 }
19429
19430 if (ShAmt > 32 || VT != MVT::i64)
19431 return SDValue();
19432
19433 // Combine (sra (shl X, 32), 32 - C) -> (shl (sext_inreg X, i32), C)
19434 // FIXME: Should this be a generic combine? There's a similar combine on X86.
19435 //
19436 // Also try these folds where an add or sub is in the middle.
19437 // (sra (add (shl X, 32), C1), 32 - C) -> (shl (sext_inreg (add X, C1), C)
19438 // (sra (sub C1, (shl X, 32)), 32 - C) -> (shl (sext_inreg (sub C1, X), C)
19439 SDValue Shl;
19440 ConstantSDNode *AddC = nullptr;
19441
19442 // We might have an ADD or SUB between the SRA and SHL.
19443 bool IsAdd = N0.getOpcode() == ISD::ADD;
19444 if ((IsAdd || N0.getOpcode() == ISD::SUB)) {
19445 // Other operand needs to be a constant we can modify.
19446 AddC = dyn_cast<ConstantSDNode>(Val: N0.getOperand(i: IsAdd ? 1 : 0));
19447 if (!AddC)
19448 return SDValue();
19449
19450 // AddC needs to have at least 32 trailing zeros.
19451 if (llvm::countr_zero(Val: AddC->getZExtValue()) < 32)
19452 return SDValue();
19453
19454 // All users should be a shift by constant less than or equal to 32. This
19455 // ensures we'll do this optimization for each of them to produce an
19456 // add/sub+sext_inreg they can all share.
19457 for (SDNode *U : N0->users()) {
19458 if (U->getOpcode() != ISD::SRA ||
19459 !isa<ConstantSDNode>(Val: U->getOperand(Num: 1)) ||
19460 U->getConstantOperandVal(Num: 1) > 32)
19461 return SDValue();
19462 }
19463
19464 Shl = N0.getOperand(i: IsAdd ? 0 : 1);
19465 } else {
19466 // Not an ADD or SUB.
19467 Shl = N0;
19468 }
19469
19470 // Look for a shift left by 32.
19471 if (Shl.getOpcode() != ISD::SHL || !isa<ConstantSDNode>(Val: Shl.getOperand(i: 1)) ||
19472 Shl.getConstantOperandVal(i: 1) != 32)
19473 return SDValue();
19474
19475 // We if we didn't look through an add/sub, then the shl should have one use.
19476 // If we did look through an add/sub, the sext_inreg we create is free so
19477 // we're only creating 2 new instructions. It's enough to only remove the
19478 // original sra+add/sub.
19479 if (!AddC && !Shl.hasOneUse())
19480 return SDValue();
19481
19482 SDLoc DL(N);
19483 SDValue In = Shl.getOperand(i: 0);
19484
19485 // If we looked through an ADD or SUB, we need to rebuild it with the shifted
19486 // constant.
19487 if (AddC) {
19488 SDValue ShiftedAddC =
19489 DAG.getConstant(Val: AddC->getZExtValue() >> 32, DL, VT: MVT::i64);
19490 if (IsAdd)
19491 In = DAG.getNode(Opcode: ISD::ADD, DL, VT: MVT::i64, N1: In, N2: ShiftedAddC);
19492 else
19493 In = DAG.getNode(Opcode: ISD::SUB, DL, VT: MVT::i64, N1: ShiftedAddC, N2: In);
19494 }
19495
19496 SDValue SExt = DAG.getNode(Opcode: ISD::SIGN_EXTEND_INREG, DL, VT: MVT::i64, N1: In,
19497 N2: DAG.getValueType(MVT::i32));
19498 if (ShAmt == 32)
19499 return SExt;
19500
19501 return DAG.getNode(
19502 Opcode: ISD::SHL, DL, VT: MVT::i64, N1: SExt,
19503 N2: DAG.getConstant(Val: 32 - ShAmt, DL, VT: MVT::i64));
19504}
19505
19506// Invert (and/or (set cc X, Y), (xor Z, 1)) to (or/and (set !cc X, Y)), Z) if
19507// the result is used as the condition of a br_cc or select_cc we can invert,
19508// inverting the setcc is free, and Z is 0/1. Caller will invert the
19509// br_cc/select_cc.
19510static SDValue tryDemorganOfBooleanCondition(SDValue Cond, SelectionDAG &DAG) {
19511 bool IsAnd = Cond.getOpcode() == ISD::AND;
19512 if (!IsAnd && Cond.getOpcode() != ISD::OR)
19513 return SDValue();
19514
19515 if (!Cond.hasOneUse())
19516 return SDValue();
19517
19518 SDValue Setcc = Cond.getOperand(i: 0);
19519 SDValue Xor = Cond.getOperand(i: 1);
19520 // Canonicalize setcc to LHS.
19521 if (Setcc.getOpcode() != ISD::SETCC)
19522 std::swap(a&: Setcc, b&: Xor);
19523 // LHS should be a setcc and RHS should be an xor.
19524 if (Setcc.getOpcode() != ISD::SETCC || !Setcc.hasOneUse() ||
19525 Xor.getOpcode() != ISD::XOR || !Xor.hasOneUse())
19526 return SDValue();
19527
19528 // If the condition is an And, SimplifyDemandedBits may have changed
19529 // (xor Z, 1) to (not Z).
19530 SDValue Xor1 = Xor.getOperand(i: 1);
19531 if (!isOneConstant(V: Xor1) && !(IsAnd && isAllOnesConstant(V: Xor1)))
19532 return SDValue();
19533
19534 EVT VT = Cond.getValueType();
19535 SDValue Xor0 = Xor.getOperand(i: 0);
19536
19537 // The LHS of the xor needs to be 0/1.
19538 APInt Mask = APInt::getBitsSetFrom(numBits: VT.getSizeInBits(), loBit: 1);
19539 if (!DAG.MaskedValueIsZero(Op: Xor0, Mask))
19540 return SDValue();
19541
19542 // We can only invert integer setccs.
19543 EVT SetCCOpVT = Setcc.getOperand(i: 0).getValueType();
19544 if (!SetCCOpVT.isScalarInteger())
19545 return SDValue();
19546
19547 ISD::CondCode CCVal = cast<CondCodeSDNode>(Val: Setcc.getOperand(i: 2))->get();
19548 if (ISD::isIntEqualitySetCC(Code: CCVal)) {
19549 CCVal = ISD::getSetCCInverse(Operation: CCVal, Type: SetCCOpVT);
19550 Setcc = DAG.getSetCC(DL: SDLoc(Setcc), VT, LHS: Setcc.getOperand(i: 0),
19551 RHS: Setcc.getOperand(i: 1), Cond: CCVal);
19552 } else if (CCVal == ISD::SETLT && isNullConstant(V: Setcc.getOperand(i: 0))) {
19553 // Invert (setlt 0, X) by converting to (setlt X, 1).
19554 Setcc = DAG.getSetCC(DL: SDLoc(Setcc), VT, LHS: Setcc.getOperand(i: 1),
19555 RHS: DAG.getConstant(Val: 1, DL: SDLoc(Setcc), VT), Cond: CCVal);
19556 } else if (CCVal == ISD::SETLT && isOneConstant(V: Setcc.getOperand(i: 1))) {
19557 // (setlt X, 1) by converting to (setlt 0, X).
19558 Setcc = DAG.getSetCC(DL: SDLoc(Setcc), VT,
19559 LHS: DAG.getConstant(Val: 0, DL: SDLoc(Setcc), VT),
19560 RHS: Setcc.getOperand(i: 0), Cond: CCVal);
19561 } else
19562 return SDValue();
19563
19564 unsigned Opc = IsAnd ? ISD::OR : ISD::AND;
19565 return DAG.getNode(Opcode: Opc, DL: SDLoc(Cond), VT, N1: Setcc, N2: Xor.getOperand(i: 0));
19566}
19567
19568// Perform common combines for BR_CC and SELECT_CC conditions.
19569static bool combine_CC(SDValue &LHS, SDValue &RHS, SDValue &CC, const SDLoc &DL,
19570 SelectionDAG &DAG, const RISCVSubtarget &Subtarget) {
19571 ISD::CondCode CCVal = cast<CondCodeSDNode>(Val&: CC)->get();
19572
19573 // As far as arithmetic right shift always saves the sign,
19574 // shift can be omitted.
19575 // Fold setlt (sra X, N), 0 -> setlt X, 0 and
19576 // setge (sra X, N), 0 -> setge X, 0
19577 if (isNullConstant(V: RHS) && (CCVal == ISD::SETGE || CCVal == ISD::SETLT) &&
19578 LHS.getOpcode() == ISD::SRA) {
19579 LHS = LHS.getOperand(i: 0);
19580 return true;
19581 }
19582
19583 if (!ISD::isIntEqualitySetCC(Code: CCVal))
19584 return false;
19585
19586 // Fold ((setlt X, Y), 0, ne) -> (X, Y, lt)
19587 // Sometimes the setcc is introduced after br_cc/select_cc has been formed.
19588 if (LHS.getOpcode() == ISD::SETCC && isNullConstant(V: RHS) &&
19589 LHS.getOperand(i: 0).getValueType() == Subtarget.getXLenVT()) {
19590 // If we're looking for eq 0 instead of ne 0, we need to invert the
19591 // condition.
19592 bool Invert = CCVal == ISD::SETEQ;
19593 CCVal = cast<CondCodeSDNode>(Val: LHS.getOperand(i: 2))->get();
19594 if (Invert)
19595 CCVal = ISD::getSetCCInverse(Operation: CCVal, Type: LHS.getValueType());
19596
19597 RHS = LHS.getOperand(i: 1);
19598 LHS = LHS.getOperand(i: 0);
19599 translateSetCCForBranch(DL, LHS, RHS, CC&: CCVal, DAG, Subtarget);
19600
19601 CC = DAG.getCondCode(Cond: CCVal);
19602 return true;
19603 }
19604
19605 // If XOR is reused and has an immediate that will fit in XORI,
19606 // do not fold.
19607 auto isXorImmediate = [](const SDValue &Op) -> bool {
19608 if (const auto *XorCnst = dyn_cast<ConstantSDNode>(Val: Op))
19609 return isInt<12>(x: XorCnst->getSExtValue());
19610 return false;
19611 };
19612 // Fold (X(i1) ^ 1) == 0 -> X != 0
19613 auto singleBitOp = [&DAG](const SDValue &VarOp,
19614 const SDValue &ConstOp) -> bool {
19615 if (const auto *XorCnst = dyn_cast<ConstantSDNode>(Val: ConstOp)) {
19616 const APInt Mask = APInt::getBitsSetFrom(numBits: VarOp.getValueSizeInBits(), loBit: 1);
19617 return (XorCnst->getSExtValue() == 1) &&
19618 DAG.MaskedValueIsZero(Op: VarOp, Mask);
19619 }
19620 return false;
19621 };
19622 auto onlyUsedBySelectOrBR = [](const SDValue &Op) -> bool {
19623 for (const SDNode *UserNode : Op->users()) {
19624 const unsigned Opcode = UserNode->getOpcode();
19625 if (Opcode != RISCVISD::SELECT_CC && Opcode != RISCVISD::BR_CC)
19626 return false;
19627 }
19628 return true;
19629 };
19630 auto isFoldableXorEq = [isXorImmediate, singleBitOp, onlyUsedBySelectOrBR](
19631 const SDValue &LHS, const SDValue &RHS) -> bool {
19632 return LHS.getOpcode() == ISD::XOR && isNullConstant(V: RHS) &&
19633 (!isXorImmediate(LHS.getOperand(i: 1)) ||
19634 singleBitOp(LHS.getOperand(i: 0), LHS.getOperand(i: 1)) ||
19635 onlyUsedBySelectOrBR(LHS));
19636 };
19637 // Fold ((xor X, Y), 0, eq/ne) -> (X, Y, eq/ne)
19638 if (isFoldableXorEq(LHS, RHS)) {
19639 RHS = LHS.getOperand(i: 1);
19640 LHS = LHS.getOperand(i: 0);
19641 return true;
19642 }
19643 // Fold ((sext (xor X, C)), 0, eq/ne) -> ((sext(X), C, eq/ne)
19644 if (LHS.getOpcode() == ISD::SIGN_EXTEND_INREG) {
19645 const SDValue LHS0 = LHS.getOperand(i: 0);
19646 if (isFoldableXorEq(LHS0, RHS) && isa<ConstantSDNode>(Val: LHS0.getOperand(i: 1))) {
19647 // SEXT(XOR(X, Y)) -> XOR(SEXT(X), SEXT(Y)))
19648 RHS = DAG.getNode(Opcode: ISD::SIGN_EXTEND_INREG, DL, VT: LHS.getValueType(),
19649 N1: LHS0.getOperand(i: 1), N2: LHS.getOperand(i: 1));
19650 LHS = DAG.getNode(Opcode: ISD::SIGN_EXTEND_INREG, DL, VT: LHS.getValueType(),
19651 N1: LHS0.getOperand(i: 0), N2: LHS.getOperand(i: 1));
19652 return true;
19653 }
19654 }
19655
19656 // Fold ((srl (and X, 1<<C), C), 0, eq/ne) -> ((shl X, XLen-1-C), 0, ge/lt)
19657 if (isNullConstant(V: RHS) && LHS.getOpcode() == ISD::SRL && LHS.hasOneUse() &&
19658 LHS.getOperand(i: 1).getOpcode() == ISD::Constant) {
19659 SDValue LHS0 = LHS.getOperand(i: 0);
19660 if (LHS0.getOpcode() == ISD::AND &&
19661 LHS0.getOperand(i: 1).getOpcode() == ISD::Constant) {
19662 uint64_t Mask = LHS0.getConstantOperandVal(i: 1);
19663 uint64_t ShAmt = LHS.getConstantOperandVal(i: 1);
19664 if (isPowerOf2_64(Value: Mask) && Log2_64(Value: Mask) == ShAmt) {
19665 // XAndesPerf supports branch on test bit.
19666 if (Subtarget.hasVendorXAndesPerf()) {
19667 LHS =
19668 DAG.getNode(Opcode: ISD::AND, DL, VT: LHS.getValueType(), N1: LHS0.getOperand(i: 0),
19669 N2: DAG.getConstant(Val: Mask, DL, VT: LHS.getValueType()));
19670 return true;
19671 }
19672
19673 CCVal = CCVal == ISD::SETEQ ? ISD::SETGE : ISD::SETLT;
19674 CC = DAG.getCondCode(Cond: CCVal);
19675
19676 ShAmt = LHS.getValueSizeInBits() - 1 - ShAmt;
19677 LHS = LHS0.getOperand(i: 0);
19678 if (ShAmt != 0)
19679 LHS =
19680 DAG.getNode(Opcode: ISD::SHL, DL, VT: LHS.getValueType(), N1: LHS0.getOperand(i: 0),
19681 N2: DAG.getConstant(Val: ShAmt, DL, VT: LHS.getValueType()));
19682 return true;
19683 }
19684 }
19685 }
19686
19687 // (X, 1, setne) -> // (X, 0, seteq) if we can prove X is 0/1.
19688 // This can occur when legalizing some floating point comparisons.
19689 APInt Mask = APInt::getBitsSetFrom(numBits: LHS.getValueSizeInBits(), loBit: 1);
19690 if (isOneConstant(V: RHS) && DAG.MaskedValueIsZero(Op: LHS, Mask)) {
19691 CCVal = ISD::getSetCCInverse(Operation: CCVal, Type: LHS.getValueType());
19692 CC = DAG.getCondCode(Cond: CCVal);
19693 RHS = DAG.getConstant(Val: 0, DL, VT: LHS.getValueType());
19694 return true;
19695 }
19696
19697 if (isNullConstant(V: RHS)) {
19698 if (SDValue NewCond = tryDemorganOfBooleanCondition(Cond: LHS, DAG)) {
19699 CCVal = ISD::getSetCCInverse(Operation: CCVal, Type: LHS.getValueType());
19700 CC = DAG.getCondCode(Cond: CCVal);
19701 LHS = NewCond;
19702 return true;
19703 }
19704 }
19705
19706 return false;
19707}
19708
19709// Fold
19710// (select C, (add Y, X), Y) -> (add Y, (select C, X, 0)).
19711// (select C, (sub Y, X), Y) -> (sub Y, (select C, X, 0)).
19712// (select C, (or Y, X), Y) -> (or Y, (select C, X, 0)).
19713// (select C, (xor Y, X), Y) -> (xor Y, (select C, X, 0)).
19714// (select C, (rotl Y, X), Y) -> (rotl Y, (select C, X, 0)).
19715// (select C, (rotr Y, X), Y) -> (rotr Y, (select C, X, 0)).
19716static SDValue tryFoldSelectIntoOp(SDNode *N, SelectionDAG &DAG,
19717 SDValue TrueVal, SDValue FalseVal,
19718 bool Swapped) {
19719 bool Commutative = true;
19720 unsigned Opc = TrueVal.getOpcode();
19721 switch (Opc) {
19722 default:
19723 return SDValue();
19724 case ISD::SHL:
19725 case ISD::SRA:
19726 case ISD::SRL:
19727 case ISD::SUB:
19728 case ISD::ROTL:
19729 case ISD::ROTR:
19730 Commutative = false;
19731 break;
19732 case ISD::ADD:
19733 case ISD::OR:
19734 case ISD::XOR:
19735 case ISD::UMIN:
19736 case ISD::UMAX:
19737 break;
19738 }
19739
19740 if (!TrueVal.hasOneUse())
19741 return SDValue();
19742
19743 unsigned OpToFold;
19744 if (FalseVal == TrueVal.getOperand(i: 0))
19745 OpToFold = 0;
19746 else if (Commutative && FalseVal == TrueVal.getOperand(i: 1))
19747 OpToFold = 1;
19748 else
19749 return SDValue();
19750
19751 EVT VT = N->getValueType(ResNo: 0);
19752 SDLoc DL(N);
19753 SDValue OtherOp = TrueVal.getOperand(i: 1 - OpToFold);
19754 EVT OtherOpVT = OtherOp.getValueType();
19755 SDValue IdentityOperand =
19756 DAG.getNeutralElement(Opcode: Opc, DL, VT: OtherOpVT, Flags: N->getFlags());
19757 if (!Commutative)
19758 IdentityOperand = DAG.getConstant(Val: 0, DL, VT: OtherOpVT);
19759 assert(IdentityOperand && "No identity operand!");
19760
19761 if (Swapped)
19762 std::swap(a&: OtherOp, b&: IdentityOperand);
19763 SDValue NewSel =
19764 DAG.getSelect(DL, VT: OtherOpVT, Cond: N->getOperand(Num: 0), LHS: OtherOp, RHS: IdentityOperand);
19765 return DAG.getNode(Opcode: TrueVal.getOpcode(), DL, VT, N1: FalseVal, N2: NewSel);
19766}
19767
19768// This tries to get rid of `select` and `icmp` that are being used to handle
19769// `Targets` that do not support `cttz(0)`/`ctlz(0)`.
19770static SDValue foldSelectOfCTTZOrCTLZ(SDNode *N, SelectionDAG &DAG) {
19771 SDValue Cond = N->getOperand(Num: 0);
19772
19773 // This represents either CTTZ or CTLZ instruction.
19774 SDValue CountZeroes;
19775
19776 SDValue ValOnZero;
19777
19778 if (Cond.getOpcode() != ISD::SETCC)
19779 return SDValue();
19780
19781 if (!isNullConstant(V: Cond->getOperand(Num: 1)))
19782 return SDValue();
19783
19784 ISD::CondCode CCVal = cast<CondCodeSDNode>(Val: Cond->getOperand(Num: 2))->get();
19785 if (CCVal == ISD::CondCode::SETEQ) {
19786 CountZeroes = N->getOperand(Num: 2);
19787 ValOnZero = N->getOperand(Num: 1);
19788 } else if (CCVal == ISD::CondCode::SETNE) {
19789 CountZeroes = N->getOperand(Num: 1);
19790 ValOnZero = N->getOperand(Num: 2);
19791 } else {
19792 return SDValue();
19793 }
19794
19795 if (CountZeroes.getOpcode() == ISD::TRUNCATE ||
19796 CountZeroes.getOpcode() == ISD::ZERO_EXTEND)
19797 CountZeroes = CountZeroes.getOperand(i: 0);
19798
19799 if (CountZeroes.getOpcode() != ISD::CTTZ &&
19800 CountZeroes.getOpcode() != ISD::CTTZ_ZERO_UNDEF &&
19801 CountZeroes.getOpcode() != ISD::CTLZ &&
19802 CountZeroes.getOpcode() != ISD::CTLZ_ZERO_UNDEF)
19803 return SDValue();
19804
19805 if (!isNullConstant(V: ValOnZero))
19806 return SDValue();
19807
19808 SDValue CountZeroesArgument = CountZeroes->getOperand(Num: 0);
19809 if (Cond->getOperand(Num: 0) != CountZeroesArgument)
19810 return SDValue();
19811
19812 unsigned BitWidth = CountZeroes.getValueSizeInBits();
19813 if (!isPowerOf2_32(Value: BitWidth))
19814 return SDValue();
19815
19816 if (CountZeroes.getOpcode() == ISD::CTTZ_ZERO_UNDEF) {
19817 CountZeroes = DAG.getNode(Opcode: ISD::CTTZ, DL: SDLoc(CountZeroes),
19818 VT: CountZeroes.getValueType(), Operand: CountZeroesArgument);
19819 } else if (CountZeroes.getOpcode() == ISD::CTLZ_ZERO_UNDEF) {
19820 CountZeroes = DAG.getNode(Opcode: ISD::CTLZ, DL: SDLoc(CountZeroes),
19821 VT: CountZeroes.getValueType(), Operand: CountZeroesArgument);
19822 }
19823
19824 SDValue BitWidthMinusOne =
19825 DAG.getConstant(Val: BitWidth - 1, DL: SDLoc(N), VT: CountZeroes.getValueType());
19826
19827 auto AndNode = DAG.getNode(Opcode: ISD::AND, DL: SDLoc(N), VT: CountZeroes.getValueType(),
19828 N1: CountZeroes, N2: BitWidthMinusOne);
19829 return DAG.getZExtOrTrunc(Op: AndNode, DL: SDLoc(N), VT: N->getValueType(ResNo: 0));
19830}
19831
19832static SDValue useInversedSetcc(SDNode *N, SelectionDAG &DAG,
19833 const RISCVSubtarget &Subtarget) {
19834 SDValue Cond = N->getOperand(Num: 0);
19835 SDValue True = N->getOperand(Num: 1);
19836 SDValue False = N->getOperand(Num: 2);
19837 SDLoc DL(N);
19838 EVT VT = N->getValueType(ResNo: 0);
19839 EVT CondVT = Cond.getValueType();
19840
19841 if (Cond.getOpcode() != ISD::SETCC || !Cond.hasOneUse())
19842 return SDValue();
19843
19844 // Replace (setcc eq (and x, C)) with (setcc ne (and x, C))) to generate
19845 // BEXTI, where C is power of 2.
19846 if (Subtarget.hasBEXTILike() && VT.isScalarInteger() &&
19847 (Subtarget.hasCZEROLike() || Subtarget.hasVendorXTHeadCondMov())) {
19848 SDValue LHS = Cond.getOperand(i: 0);
19849 SDValue RHS = Cond.getOperand(i: 1);
19850 ISD::CondCode CC = cast<CondCodeSDNode>(Val: Cond.getOperand(i: 2))->get();
19851 if (CC == ISD::SETEQ && LHS.getOpcode() == ISD::AND &&
19852 isa<ConstantSDNode>(Val: LHS.getOperand(i: 1)) && isNullConstant(V: RHS)) {
19853 const APInt &MaskVal = LHS.getConstantOperandAPInt(i: 1);
19854 if (MaskVal.isPowerOf2() && !MaskVal.isSignedIntN(N: 12))
19855 return DAG.getSelect(DL, VT,
19856 Cond: DAG.getSetCC(DL, VT: CondVT, LHS, RHS, Cond: ISD::SETNE),
19857 LHS: False, RHS: True);
19858 }
19859 }
19860 return SDValue();
19861}
19862
19863static bool matchSelectAddSub(SDValue TrueVal, SDValue FalseVal, bool &SwapCC) {
19864 if (!TrueVal.hasOneUse() || !FalseVal.hasOneUse())
19865 return false;
19866
19867 SwapCC = false;
19868 if (TrueVal.getOpcode() == ISD::SUB && FalseVal.getOpcode() == ISD::ADD) {
19869 std::swap(a&: TrueVal, b&: FalseVal);
19870 SwapCC = true;
19871 }
19872
19873 if (TrueVal.getOpcode() != ISD::ADD || FalseVal.getOpcode() != ISD::SUB)
19874 return false;
19875
19876 SDValue A = FalseVal.getOperand(i: 0);
19877 SDValue B = FalseVal.getOperand(i: 1);
19878 // Add is commutative, so check both orders
19879 return ((TrueVal.getOperand(i: 0) == A && TrueVal.getOperand(i: 1) == B) ||
19880 (TrueVal.getOperand(i: 1) == A && TrueVal.getOperand(i: 0) == B));
19881}
19882
19883/// Convert vselect CC, (add a, b), (sub a, b) to add a, (vselect CC, -b, b).
19884/// This allows us match a vadd.vv fed by a masked vrsub, which reduces
19885/// register pressure over the add followed by masked vsub sequence.
19886static SDValue performVSELECTCombine(SDNode *N, SelectionDAG &DAG) {
19887 SDLoc DL(N);
19888 EVT VT = N->getValueType(ResNo: 0);
19889 SDValue CC = N->getOperand(Num: 0);
19890 SDValue TrueVal = N->getOperand(Num: 1);
19891 SDValue FalseVal = N->getOperand(Num: 2);
19892
19893 bool SwapCC;
19894 if (!matchSelectAddSub(TrueVal, FalseVal, SwapCC))
19895 return SDValue();
19896
19897 SDValue Sub = SwapCC ? TrueVal : FalseVal;
19898 SDValue A = Sub.getOperand(i: 0);
19899 SDValue B = Sub.getOperand(i: 1);
19900
19901 // Arrange the select such that we can match a masked
19902 // vrsub.vi to perform the conditional negate
19903 SDValue NegB = DAG.getNegative(Val: B, DL, VT);
19904 if (!SwapCC)
19905 CC = DAG.getLogicalNOT(DL, Val: CC, VT: CC->getValueType(ResNo: 0));
19906 SDValue NewB = DAG.getNode(Opcode: ISD::VSELECT, DL, VT, N1: CC, N2: NegB, N3: B);
19907 return DAG.getNode(Opcode: ISD::ADD, DL, VT, N1: A, N2: NewB);
19908}
19909
19910static SDValue performSELECTCombine(SDNode *N, SelectionDAG &DAG,
19911 const RISCVSubtarget &Subtarget) {
19912 if (SDValue Folded = foldSelectOfCTTZOrCTLZ(N, DAG))
19913 return Folded;
19914
19915 if (SDValue V = useInversedSetcc(N, DAG, Subtarget))
19916 return V;
19917
19918 if (Subtarget.hasConditionalMoveFusion())
19919 return SDValue();
19920
19921 SDValue TrueVal = N->getOperand(Num: 1);
19922 SDValue FalseVal = N->getOperand(Num: 2);
19923 if (SDValue V = tryFoldSelectIntoOp(N, DAG, TrueVal, FalseVal, /*Swapped*/false))
19924 return V;
19925 return tryFoldSelectIntoOp(N, DAG, TrueVal: FalseVal, FalseVal: TrueVal, /*Swapped*/true);
19926}
19927
19928/// If we have a build_vector where each lane is binop X, C, where C
19929/// is a constant (but not necessarily the same constant on all lanes),
19930/// form binop (build_vector x1, x2, ...), (build_vector c1, c2, c3, ..).
19931/// We assume that materializing a constant build vector will be no more
19932/// expensive that performing O(n) binops.
19933static SDValue performBUILD_VECTORCombine(SDNode *N, SelectionDAG &DAG,
19934 const RISCVSubtarget &Subtarget,
19935 const RISCVTargetLowering &TLI) {
19936 SDLoc DL(N);
19937 EVT VT = N->getValueType(ResNo: 0);
19938
19939 assert(!VT.isScalableVector() && "unexpected build vector");
19940
19941 if (VT.getVectorNumElements() == 1)
19942 return SDValue();
19943
19944 const unsigned Opcode = N->op_begin()->getNode()->getOpcode();
19945 if (!TLI.isBinOp(Opcode))
19946 return SDValue();
19947
19948 if (!TLI.isOperationLegalOrCustom(Op: Opcode, VT) || !TLI.isTypeLegal(VT))
19949 return SDValue();
19950
19951 // This BUILD_VECTOR involves an implicit truncation, and sinking
19952 // truncates through binops is non-trivial.
19953 if (N->op_begin()->getValueType() != VT.getVectorElementType())
19954 return SDValue();
19955
19956 SmallVector<SDValue> LHSOps;
19957 SmallVector<SDValue> RHSOps;
19958 for (SDValue Op : N->ops()) {
19959 if (Op.isUndef()) {
19960 // We can't form a divide or remainder from undef.
19961 if (!DAG.isSafeToSpeculativelyExecute(Opcode))
19962 return SDValue();
19963
19964 LHSOps.push_back(Elt: Op);
19965 RHSOps.push_back(Elt: Op);
19966 continue;
19967 }
19968
19969 // TODO: We can handle operations which have an neutral rhs value
19970 // (e.g. x + 0, a * 1 or a << 0), but we then have to keep track
19971 // of profit in a more explicit manner.
19972 if (Op.getOpcode() != Opcode || !Op.hasOneUse())
19973 return SDValue();
19974
19975 LHSOps.push_back(Elt: Op.getOperand(i: 0));
19976 if (!isa<ConstantSDNode>(Val: Op.getOperand(i: 1)) &&
19977 !isa<ConstantFPSDNode>(Val: Op.getOperand(i: 1)))
19978 return SDValue();
19979 // FIXME: Return failure if the RHS type doesn't match the LHS. Shifts may
19980 // have different LHS and RHS types.
19981 if (Op.getOperand(i: 0).getValueType() != Op.getOperand(i: 1).getValueType())
19982 return SDValue();
19983
19984 RHSOps.push_back(Elt: Op.getOperand(i: 1));
19985 }
19986
19987 return DAG.getNode(Opcode, DL, VT, N1: DAG.getBuildVector(VT, DL, Ops: LHSOps),
19988 N2: DAG.getBuildVector(VT, DL, Ops: RHSOps));
19989}
19990
19991static MVT getQDOTXResultType(MVT OpVT) {
19992 ElementCount OpEC = OpVT.getVectorElementCount();
19993 assert(OpEC.isKnownMultipleOf(4) && OpVT.getVectorElementType() == MVT::i8);
19994 return MVT::getVectorVT(VT: MVT::i32, EC: OpEC.divideCoefficientBy(RHS: 4));
19995}
19996
19997/// Given fixed length vectors A and B with equal element types, but possibly
19998/// different number of elements, return A + B where either A or B is zero
19999/// padded to the larger number of elements.
20000static SDValue getZeroPaddedAdd(const SDLoc &DL, SDValue A, SDValue B,
20001 SelectionDAG &DAG) {
20002 // NOTE: Manually doing the extract/add/insert scheme produces
20003 // significantly better codegen than the naive pad with zeros
20004 // and add scheme.
20005 EVT AVT = A.getValueType();
20006 EVT BVT = B.getValueType();
20007 assert(AVT.getVectorElementType() == BVT.getVectorElementType());
20008 if (AVT.getVectorMinNumElements() > BVT.getVectorMinNumElements()) {
20009 std::swap(a&: A, b&: B);
20010 std::swap(a&: AVT, b&: BVT);
20011 }
20012
20013 SDValue BPart = DAG.getExtractSubvector(DL, VT: AVT, Vec: B, Idx: 0);
20014 SDValue Res = DAG.getNode(Opcode: ISD::ADD, DL, VT: AVT, N1: A, N2: BPart);
20015 return DAG.getInsertSubvector(DL, Vec: B, SubVec: Res, Idx: 0);
20016}
20017
20018static SDValue foldReduceOperandViaVQDOT(SDValue InVec, const SDLoc &DL,
20019 SelectionDAG &DAG,
20020 const RISCVSubtarget &Subtarget,
20021 const RISCVTargetLowering &TLI) {
20022 using namespace SDPatternMatch;
20023 // Note: We intentionally do not check the legality of the reduction type.
20024 // We want to handle the m4/m8 *src* types, and thus need to let illegal
20025 // intermediate types flow through here.
20026 if (InVec.getValueType().getVectorElementType() != MVT::i32 ||
20027 !InVec.getValueType().getVectorElementCount().isKnownMultipleOf(RHS: 4))
20028 return SDValue();
20029
20030 // Recurse through adds/disjoint ors (since generic dag canonicalizes to that
20031 // form).
20032 SDValue A, B;
20033 if (sd_match(N: InVec, P: m_AddLike(L: m_Value(N&: A), R: m_Value(N&: B)))) {
20034 SDValue AOpt = foldReduceOperandViaVQDOT(InVec: A, DL, DAG, Subtarget, TLI);
20035 SDValue BOpt = foldReduceOperandViaVQDOT(InVec: B, DL, DAG, Subtarget, TLI);
20036 if (AOpt || BOpt) {
20037 if (AOpt)
20038 A = AOpt;
20039 if (BOpt)
20040 B = BOpt;
20041 // From here, we're doing A + B with mixed types, implicitly zero
20042 // padded to the wider type. Note that we *don't* need the result
20043 // type to be the original VT, and in fact prefer narrower ones
20044 // if possible.
20045 return getZeroPaddedAdd(DL, A, B, DAG);
20046 }
20047 }
20048
20049 // zext a <--> partial_reduce_umla 0, a, 1
20050 // sext a <--> partial_reduce_smla 0, a, 1
20051 if (InVec.getOpcode() == ISD::ZERO_EXTEND ||
20052 InVec.getOpcode() == ISD::SIGN_EXTEND) {
20053 SDValue A = InVec.getOperand(i: 0);
20054 EVT OpVT = A.getValueType();
20055 if (OpVT.getVectorElementType() != MVT::i8 || !TLI.isTypeLegal(VT: OpVT))
20056 return SDValue();
20057
20058 MVT ResVT = getQDOTXResultType(OpVT: A.getSimpleValueType());
20059 SDValue B = DAG.getConstant(Val: 0x1, DL, VT: OpVT);
20060 bool IsSigned = InVec.getOpcode() == ISD::SIGN_EXTEND;
20061 unsigned Opc =
20062 IsSigned ? ISD::PARTIAL_REDUCE_SMLA : ISD::PARTIAL_REDUCE_UMLA;
20063 return DAG.getNode(Opcode: Opc, DL, VT: ResVT, Ops: {DAG.getConstant(Val: 0, DL, VT: ResVT), A, B});
20064 }
20065
20066 // mul (sext a, sext b) -> partial_reduce_smla 0, a, b
20067 // mul (zext a, zext b) -> partial_reduce_umla 0, a, b
20068 // mul (sext a, zext b) -> partial_reduce_ssmla 0, a, b
20069 // mul (zext a, sext b) -> partial_reduce_smla 0, b, a (swapped)
20070 if (!sd_match(N: InVec, P: m_Mul(L: m_Value(N&: A), R: m_Value(N&: B))))
20071 return SDValue();
20072
20073 if (!ISD::isExtOpcode(Opcode: A.getOpcode()))
20074 return SDValue();
20075
20076 EVT OpVT = A.getOperand(i: 0).getValueType();
20077 if (OpVT.getVectorElementType() != MVT::i8 ||
20078 OpVT != B.getOperand(i: 0).getValueType() ||
20079 !TLI.isTypeLegal(VT: A.getValueType()))
20080 return SDValue();
20081
20082 unsigned Opc;
20083 if (A.getOpcode() == ISD::SIGN_EXTEND && B.getOpcode() == ISD::SIGN_EXTEND)
20084 Opc = ISD::PARTIAL_REDUCE_SMLA;
20085 else if (A.getOpcode() == ISD::ZERO_EXTEND &&
20086 B.getOpcode() == ISD::ZERO_EXTEND)
20087 Opc = ISD::PARTIAL_REDUCE_UMLA;
20088 else if (A.getOpcode() == ISD::SIGN_EXTEND &&
20089 B.getOpcode() == ISD::ZERO_EXTEND)
20090 Opc = ISD::PARTIAL_REDUCE_SUMLA;
20091 else if (A.getOpcode() == ISD::ZERO_EXTEND &&
20092 B.getOpcode() == ISD::SIGN_EXTEND) {
20093 Opc = ISD::PARTIAL_REDUCE_SUMLA;
20094 std::swap(a&: A, b&: B);
20095 } else
20096 return SDValue();
20097
20098 MVT ResVT = getQDOTXResultType(OpVT: OpVT.getSimpleVT());
20099 return DAG.getNode(
20100 Opcode: Opc, DL, VT: ResVT,
20101 Ops: {DAG.getConstant(Val: 0, DL, VT: ResVT), A.getOperand(i: 0), B.getOperand(i: 0)});
20102}
20103
20104static SDValue performVECREDUCECombine(SDNode *N, SelectionDAG &DAG,
20105 const RISCVSubtarget &Subtarget,
20106 const RISCVTargetLowering &TLI) {
20107 if (!Subtarget.hasStdExtZvqdotq())
20108 return SDValue();
20109
20110 SDLoc DL(N);
20111 EVT VT = N->getValueType(ResNo: 0);
20112 SDValue InVec = N->getOperand(Num: 0);
20113 if (SDValue V = foldReduceOperandViaVQDOT(InVec, DL, DAG, Subtarget, TLI))
20114 return DAG.getNode(Opcode: ISD::VECREDUCE_ADD, DL, VT, Operand: V);
20115 return SDValue();
20116}
20117
20118static SDValue performINSERT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
20119 const RISCVSubtarget &Subtarget,
20120 const RISCVTargetLowering &TLI) {
20121 SDValue InVec = N->getOperand(Num: 0);
20122 SDValue InVal = N->getOperand(Num: 1);
20123 SDValue EltNo = N->getOperand(Num: 2);
20124 SDLoc DL(N);
20125
20126 EVT VT = InVec.getValueType();
20127 if (VT.isScalableVector())
20128 return SDValue();
20129
20130 if (!InVec.hasOneUse())
20131 return SDValue();
20132
20133 // Given insert_vector_elt (binop a, VecC), (same_binop b, C2), Elt
20134 // move the insert_vector_elts into the arms of the binop. Note that
20135 // the new RHS must be a constant.
20136 const unsigned InVecOpcode = InVec->getOpcode();
20137 if (InVecOpcode == InVal->getOpcode() && TLI.isBinOp(Opcode: InVecOpcode) &&
20138 InVal.hasOneUse()) {
20139 SDValue InVecLHS = InVec->getOperand(Num: 0);
20140 SDValue InVecRHS = InVec->getOperand(Num: 1);
20141 SDValue InValLHS = InVal->getOperand(Num: 0);
20142 SDValue InValRHS = InVal->getOperand(Num: 1);
20143
20144 if (!ISD::isBuildVectorOfConstantSDNodes(N: InVecRHS.getNode()))
20145 return SDValue();
20146 if (!isa<ConstantSDNode>(Val: InValRHS) && !isa<ConstantFPSDNode>(Val: InValRHS))
20147 return SDValue();
20148 // FIXME: Return failure if the RHS type doesn't match the LHS. Shifts may
20149 // have different LHS and RHS types.
20150 if (InVec.getOperand(i: 0).getValueType() != InVec.getOperand(i: 1).getValueType())
20151 return SDValue();
20152 SDValue LHS = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL, VT,
20153 N1: InVecLHS, N2: InValLHS, N3: EltNo);
20154 SDValue RHS = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL, VT,
20155 N1: InVecRHS, N2: InValRHS, N3: EltNo);
20156 return DAG.getNode(Opcode: InVecOpcode, DL, VT, N1: LHS, N2: RHS);
20157 }
20158
20159 // Given insert_vector_elt (concat_vectors ...), InVal, Elt
20160 // move the insert_vector_elt to the source operand of the concat_vector.
20161 if (InVec.getOpcode() != ISD::CONCAT_VECTORS)
20162 return SDValue();
20163
20164 auto *IndexC = dyn_cast<ConstantSDNode>(Val&: EltNo);
20165 if (!IndexC)
20166 return SDValue();
20167 unsigned Elt = IndexC->getZExtValue();
20168
20169 EVT ConcatVT = InVec.getOperand(i: 0).getValueType();
20170 if (ConcatVT.getVectorElementType() != InVal.getValueType())
20171 return SDValue();
20172 unsigned ConcatNumElts = ConcatVT.getVectorNumElements();
20173 unsigned NewIdx = Elt % ConcatNumElts;
20174
20175 unsigned ConcatOpIdx = Elt / ConcatNumElts;
20176 SDValue ConcatOp = InVec.getOperand(i: ConcatOpIdx);
20177 ConcatOp = DAG.getInsertVectorElt(DL, Vec: ConcatOp, Elt: InVal, Idx: NewIdx);
20178
20179 SmallVector<SDValue> ConcatOps(InVec->ops());
20180 ConcatOps[ConcatOpIdx] = ConcatOp;
20181 return DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT, Ops: ConcatOps);
20182}
20183
20184// If we're concatenating a series of vector loads like
20185// concat_vectors (load v4i8, p+0), (load v4i8, p+n), (load v4i8, p+n*2) ...
20186// Then we can turn this into a strided load by widening the vector elements
20187// vlse32 p, stride=n
20188static SDValue performCONCAT_VECTORSCombine(SDNode *N, SelectionDAG &DAG,
20189 const RISCVSubtarget &Subtarget,
20190 const RISCVTargetLowering &TLI) {
20191 SDLoc DL(N);
20192 EVT VT = N->getValueType(ResNo: 0);
20193
20194 // Only perform this combine on legal MVTs.
20195 if (!TLI.isTypeLegal(VT))
20196 return SDValue();
20197
20198 // TODO: Potentially extend this to scalable vectors
20199 if (VT.isScalableVector())
20200 return SDValue();
20201
20202 auto *BaseLd = dyn_cast<LoadSDNode>(Val: N->getOperand(Num: 0));
20203 if (!BaseLd || !BaseLd->isSimple() || !ISD::isNormalLoad(N: BaseLd) ||
20204 !SDValue(BaseLd, 0).hasOneUse())
20205 return SDValue();
20206
20207 EVT BaseLdVT = BaseLd->getValueType(ResNo: 0);
20208
20209 // Go through the loads and check that they're strided
20210 SmallVector<LoadSDNode *> Lds;
20211 Lds.push_back(Elt: BaseLd);
20212 Align Align = BaseLd->getAlign();
20213 for (SDValue Op : N->ops().drop_front()) {
20214 auto *Ld = dyn_cast<LoadSDNode>(Val&: Op);
20215 if (!Ld || !Ld->isSimple() || !Op.hasOneUse() ||
20216 Ld->getChain() != BaseLd->getChain() || !ISD::isNormalLoad(N: Ld) ||
20217 Ld->getValueType(ResNo: 0) != BaseLdVT)
20218 return SDValue();
20219
20220 Lds.push_back(Elt: Ld);
20221
20222 // The common alignment is the most restrictive (smallest) of all the loads
20223 Align = std::min(a: Align, b: Ld->getAlign());
20224 }
20225
20226 using PtrDiff = std::pair<std::variant<int64_t, SDValue>, bool>;
20227 auto GetPtrDiff = [&DAG](LoadSDNode *Ld1,
20228 LoadSDNode *Ld2) -> std::optional<PtrDiff> {
20229 // If the load ptrs can be decomposed into a common (Base + Index) with a
20230 // common constant stride, then return the constant stride.
20231 BaseIndexOffset BIO1 = BaseIndexOffset::match(N: Ld1, DAG);
20232 BaseIndexOffset BIO2 = BaseIndexOffset::match(N: Ld2, DAG);
20233 if (BIO1.equalBaseIndex(Other: BIO2, DAG))
20234 return {{BIO2.getOffset() - BIO1.getOffset(), false}};
20235
20236 // Otherwise try to match (add LastPtr, Stride) or (add NextPtr, Stride)
20237 SDValue P1 = Ld1->getBasePtr();
20238 SDValue P2 = Ld2->getBasePtr();
20239 if (P2.getOpcode() == ISD::ADD && P2.getOperand(i: 0) == P1)
20240 return {{P2.getOperand(i: 1), false}};
20241 if (P1.getOpcode() == ISD::ADD && P1.getOperand(i: 0) == P2)
20242 return {{P1.getOperand(i: 1), true}};
20243
20244 return std::nullopt;
20245 };
20246
20247 // Get the distance between the first and second loads
20248 auto BaseDiff = GetPtrDiff(Lds[0], Lds[1]);
20249 if (!BaseDiff)
20250 return SDValue();
20251
20252 // Check all the loads are the same distance apart
20253 for (auto *It = Lds.begin() + 1; It != Lds.end() - 1; It++)
20254 if (GetPtrDiff(*It, *std::next(x: It)) != BaseDiff)
20255 return SDValue();
20256
20257 // TODO: At this point, we've successfully matched a generalized gather
20258 // load. Maybe we should emit that, and then move the specialized
20259 // matchers above and below into a DAG combine?
20260
20261 // Get the widened scalar type, e.g. v4i8 -> i64
20262 unsigned WideScalarBitWidth =
20263 BaseLdVT.getScalarSizeInBits() * BaseLdVT.getVectorNumElements();
20264 MVT WideScalarVT = MVT::getIntegerVT(BitWidth: WideScalarBitWidth);
20265
20266 // Get the vector type for the strided load, e.g. 4 x v4i8 -> v4i64
20267 MVT WideVecVT = MVT::getVectorVT(VT: WideScalarVT, NumElements: N->getNumOperands());
20268 if (!TLI.isTypeLegal(VT: WideVecVT))
20269 return SDValue();
20270
20271 // Check that the operation is legal
20272 if (!TLI.isLegalStridedLoadStore(DataType: WideVecVT, Alignment: Align))
20273 return SDValue();
20274
20275 auto [StrideVariant, MustNegateStride] = *BaseDiff;
20276 SDValue Stride =
20277 std::holds_alternative<SDValue>(v: StrideVariant)
20278 ? std::get<SDValue>(v&: StrideVariant)
20279 : DAG.getSignedConstant(Val: std::get<int64_t>(v&: StrideVariant), DL,
20280 VT: Lds[0]->getOffset().getValueType());
20281 if (MustNegateStride)
20282 Stride = DAG.getNegative(Val: Stride, DL, VT: Stride.getValueType());
20283
20284 SDValue AllOneMask =
20285 DAG.getSplat(VT: WideVecVT.changeVectorElementType(EltVT: MVT::i1), DL,
20286 Op: DAG.getConstant(Val: 1, DL, VT: MVT::i1));
20287
20288 uint64_t MemSize;
20289 if (auto *ConstStride = dyn_cast<ConstantSDNode>(Val&: Stride);
20290 ConstStride && ConstStride->getSExtValue() >= 0)
20291 // total size = (elsize * n) + (stride - elsize) * (n-1)
20292 // = elsize + stride * (n-1)
20293 MemSize = WideScalarVT.getSizeInBits() +
20294 ConstStride->getSExtValue() * (N->getNumOperands() - 1);
20295 else
20296 // If Stride isn't constant, then we can't know how much it will load
20297 MemSize = MemoryLocation::UnknownSize;
20298
20299 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
20300 PtrInfo: BaseLd->getPointerInfo(), F: BaseLd->getMemOperand()->getFlags(), Size: MemSize,
20301 BaseAlignment: Align);
20302
20303 SDValue StridedLoad = DAG.getStridedLoadVP(
20304 VT: WideVecVT, DL, Chain: BaseLd->getChain(), Ptr: BaseLd->getBasePtr(), Stride,
20305 Mask: AllOneMask,
20306 EVL: DAG.getConstant(Val: N->getNumOperands(), DL, VT: Subtarget.getXLenVT()), MMO);
20307
20308 for (SDValue Ld : N->ops())
20309 DAG.makeEquivalentMemoryOrdering(OldLoad: cast<LoadSDNode>(Val&: Ld), NewMemOp: StridedLoad);
20310
20311 return DAG.getBitcast(VT: VT.getSimpleVT(), V: StridedLoad);
20312}
20313
20314static SDValue performVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG,
20315 const RISCVSubtarget &Subtarget,
20316 const RISCVTargetLowering &TLI) {
20317 SDLoc DL(N);
20318 EVT VT = N->getValueType(ResNo: 0);
20319 const unsigned ElementSize = VT.getScalarSizeInBits();
20320 const unsigned NumElts = VT.getVectorNumElements();
20321 SDValue V1 = N->getOperand(Num: 0);
20322 SDValue V2 = N->getOperand(Num: 1);
20323 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Val: N)->getMask();
20324 MVT XLenVT = Subtarget.getXLenVT();
20325
20326 // Recognized a disguised select of add/sub.
20327 bool SwapCC;
20328 if (ShuffleVectorInst::isSelectMask(Mask, NumSrcElts: NumElts) &&
20329 matchSelectAddSub(TrueVal: V1, FalseVal: V2, SwapCC)) {
20330 SDValue Sub = SwapCC ? V1 : V2;
20331 SDValue A = Sub.getOperand(i: 0);
20332 SDValue B = Sub.getOperand(i: 1);
20333
20334 SmallVector<SDValue> MaskVals;
20335 for (int MaskIndex : Mask) {
20336 bool SelectMaskVal = (MaskIndex < (int)NumElts);
20337 MaskVals.push_back(Elt: DAG.getConstant(Val: SelectMaskVal, DL, VT: XLenVT));
20338 }
20339 assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
20340 EVT MaskVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: MVT::i1, NumElements: NumElts);
20341 SDValue CC = DAG.getBuildVector(VT: MaskVT, DL, Ops: MaskVals);
20342
20343 // Arrange the select such that we can match a masked
20344 // vrsub.vi to perform the conditional negate
20345 SDValue NegB = DAG.getNegative(Val: B, DL, VT);
20346 if (!SwapCC)
20347 CC = DAG.getLogicalNOT(DL, Val: CC, VT: CC->getValueType(ResNo: 0));
20348 SDValue NewB = DAG.getNode(Opcode: ISD::VSELECT, DL, VT, N1: CC, N2: NegB, N3: B);
20349 return DAG.getNode(Opcode: ISD::ADD, DL, VT, N1: A, N2: NewB);
20350 }
20351
20352 // Custom legalize <N x i128> or <N x i256> to <M x ELEN>. This runs
20353 // during the combine phase before type legalization, and relies on
20354 // DAGCombine not undoing the transform if isShuffleMaskLegal returns false
20355 // for the source mask.
20356 if (TLI.isTypeLegal(VT) || ElementSize <= Subtarget.getELen() ||
20357 !isPowerOf2_64(Value: ElementSize) || VT.getVectorNumElements() % 2 != 0 ||
20358 VT.isFloatingPoint() || TLI.isShuffleMaskLegal(M: Mask, VT))
20359 return SDValue();
20360
20361 SmallVector<int, 8> NewMask;
20362 narrowShuffleMaskElts(Scale: 2, Mask, ScaledMask&: NewMask);
20363
20364 LLVMContext &C = *DAG.getContext();
20365 EVT NewEltVT = EVT::getIntegerVT(Context&: C, BitWidth: ElementSize / 2);
20366 EVT NewVT = EVT::getVectorVT(Context&: C, VT: NewEltVT, NumElements: VT.getVectorNumElements() * 2);
20367 SDValue Res = DAG.getVectorShuffle(VT: NewVT, dl: DL, N1: DAG.getBitcast(VT: NewVT, V: V1),
20368 N2: DAG.getBitcast(VT: NewVT, V: V2), Mask: NewMask);
20369 return DAG.getBitcast(VT, V: Res);
20370}
20371
20372static SDValue combineToVWMACC(SDNode *N, SelectionDAG &DAG,
20373 const RISCVSubtarget &Subtarget) {
20374 assert(N->getOpcode() == RISCVISD::ADD_VL || N->getOpcode() == ISD::ADD);
20375
20376 if (N->getValueType(ResNo: 0).isFixedLengthVector())
20377 return SDValue();
20378
20379 SDValue Addend = N->getOperand(Num: 0);
20380 SDValue MulOp = N->getOperand(Num: 1);
20381
20382 if (N->getOpcode() == RISCVISD::ADD_VL) {
20383 SDValue AddPassthruOp = N->getOperand(Num: 2);
20384 if (!AddPassthruOp.isUndef())
20385 return SDValue();
20386 }
20387
20388 auto IsVWMulOpc = [](unsigned Opc) {
20389 switch (Opc) {
20390 case RISCVISD::VWMUL_VL:
20391 case RISCVISD::VWMULU_VL:
20392 case RISCVISD::VWMULSU_VL:
20393 return true;
20394 default:
20395 return false;
20396 }
20397 };
20398
20399 if (!IsVWMulOpc(MulOp.getOpcode()))
20400 std::swap(a&: Addend, b&: MulOp);
20401
20402 if (!IsVWMulOpc(MulOp.getOpcode()))
20403 return SDValue();
20404
20405 SDValue MulPassthruOp = MulOp.getOperand(i: 2);
20406
20407 if (!MulPassthruOp.isUndef())
20408 return SDValue();
20409
20410 auto [AddMask, AddVL] = [](SDNode *N, SelectionDAG &DAG,
20411 const RISCVSubtarget &Subtarget) {
20412 if (N->getOpcode() == ISD::ADD) {
20413 SDLoc DL(N);
20414 return getDefaultScalableVLOps(VecVT: N->getSimpleValueType(ResNo: 0), DL, DAG,
20415 Subtarget);
20416 }
20417 return std::make_pair(x: N->getOperand(Num: 3), y: N->getOperand(Num: 4));
20418 }(N, DAG, Subtarget);
20419
20420 SDValue MulMask = MulOp.getOperand(i: 3);
20421 SDValue MulVL = MulOp.getOperand(i: 4);
20422
20423 if (AddMask != MulMask || AddVL != MulVL)
20424 return SDValue();
20425
20426 const auto &TSInfo =
20427 static_cast<const RISCVSelectionDAGInfo &>(DAG.getSelectionDAGInfo());
20428 unsigned Opc = TSInfo.getMAccOpcode(MulOpcode: MulOp.getOpcode());
20429
20430 SDLoc DL(N);
20431 EVT VT = N->getValueType(ResNo: 0);
20432 SDValue Ops[] = {MulOp.getOperand(i: 0), MulOp.getOperand(i: 1), Addend, AddMask,
20433 AddVL};
20434 return DAG.getNode(Opcode: Opc, DL, VT, Ops);
20435}
20436
20437static SDValue combineVqdotAccum(SDNode *N, SelectionDAG &DAG,
20438 const RISCVSubtarget &Subtarget) {
20439
20440 assert(N->getOpcode() == RISCVISD::ADD_VL || N->getOpcode() == ISD::ADD);
20441
20442 if (!N->getValueType(ResNo: 0).isVector())
20443 return SDValue();
20444
20445 SDValue Addend = N->getOperand(Num: 0);
20446 SDValue DotOp = N->getOperand(Num: 1);
20447
20448 if (N->getOpcode() == RISCVISD::ADD_VL) {
20449 SDValue AddPassthruOp = N->getOperand(Num: 2);
20450 if (!AddPassthruOp.isUndef())
20451 return SDValue();
20452 }
20453
20454 auto IsVqdotqOpc = [](unsigned Opc) {
20455 switch (Opc) {
20456 case RISCVISD::VQDOT_VL:
20457 case RISCVISD::VQDOTU_VL:
20458 case RISCVISD::VQDOTSU_VL:
20459 return true;
20460 default:
20461 return false;
20462 }
20463 };
20464
20465 if (!IsVqdotqOpc(DotOp.getOpcode()))
20466 std::swap(a&: Addend, b&: DotOp);
20467
20468 if (!IsVqdotqOpc(DotOp.getOpcode()))
20469 return SDValue();
20470
20471 auto [AddMask, AddVL] = [](SDNode *N, SelectionDAG &DAG,
20472 const RISCVSubtarget &Subtarget) {
20473 if (N->getOpcode() == ISD::ADD) {
20474 SDLoc DL(N);
20475 return getDefaultScalableVLOps(VecVT: N->getSimpleValueType(ResNo: 0), DL, DAG,
20476 Subtarget);
20477 }
20478 return std::make_pair(x: N->getOperand(Num: 3), y: N->getOperand(Num: 4));
20479 }(N, DAG, Subtarget);
20480
20481 SDValue MulVL = DotOp.getOperand(i: 4);
20482 if (AddVL != MulVL)
20483 return SDValue();
20484
20485 if (AddMask.getOpcode() != RISCVISD::VMSET_VL ||
20486 AddMask.getOperand(i: 0) != MulVL)
20487 return SDValue();
20488
20489 SDValue AccumOp = DotOp.getOperand(i: 2);
20490 SDLoc DL(N);
20491 EVT VT = N->getValueType(ResNo: 0);
20492 Addend = DAG.getNode(Opcode: RISCVISD::ADD_VL, DL, VT, N1: Addend, N2: AccumOp,
20493 N3: DAG.getUNDEF(VT), N4: AddMask, N5: AddVL);
20494
20495 SDValue Ops[] = {DotOp.getOperand(i: 0), DotOp.getOperand(i: 1), Addend,
20496 DotOp.getOperand(i: 3), DotOp->getOperand(Num: 4)};
20497 return DAG.getNode(Opcode: DotOp->getOpcode(), DL, VT, Ops);
20498}
20499
20500static bool
20501legalizeScatterGatherIndexType(SDLoc DL, SDValue &Index,
20502 ISD::MemIndexType &IndexType,
20503 RISCVTargetLowering::DAGCombinerInfo &DCI) {
20504 if (!DCI.isBeforeLegalize())
20505 return false;
20506
20507 SelectionDAG &DAG = DCI.DAG;
20508 const MVT XLenVT =
20509 DAG.getMachineFunction().getSubtarget<RISCVSubtarget>().getXLenVT();
20510
20511 const EVT IndexVT = Index.getValueType();
20512
20513 // RISC-V indexed loads only support the "unsigned unscaled" addressing
20514 // mode, so anything else must be manually legalized.
20515 if (!isIndexTypeSigned(IndexType))
20516 return false;
20517
20518 if (IndexVT.getVectorElementType().bitsLT(VT: XLenVT)) {
20519 // Any index legalization should first promote to XLenVT, so we don't lose
20520 // bits when scaling. This may create an illegal index type so we let
20521 // LLVM's legalization take care of the splitting.
20522 // FIXME: LLVM can't split VP_GATHER or VP_SCATTER yet.
20523 Index = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL,
20524 VT: EVT::getVectorVT(Context&: *DAG.getContext(), VT: XLenVT,
20525 EC: IndexVT.getVectorElementCount()),
20526 Operand: Index);
20527 }
20528 IndexType = ISD::UNSIGNED_SCALED;
20529 return true;
20530}
20531
20532/// Match the index vector of a scatter or gather node as the shuffle mask
20533/// which performs the rearrangement if possible. Will only match if
20534/// all lanes are touched, and thus replacing the scatter or gather with
20535/// a unit strided access and shuffle is legal.
20536static bool matchIndexAsShuffle(EVT VT, SDValue Index, SDValue Mask,
20537 SmallVector<int> &ShuffleMask) {
20538 if (!ISD::isConstantSplatVectorAllOnes(N: Mask.getNode()))
20539 return false;
20540 if (!ISD::isBuildVectorOfConstantSDNodes(N: Index.getNode()))
20541 return false;
20542
20543 const unsigned ElementSize = VT.getScalarStoreSize();
20544 const unsigned NumElems = VT.getVectorNumElements();
20545
20546 // Create the shuffle mask and check all bits active
20547 assert(ShuffleMask.empty());
20548 BitVector ActiveLanes(NumElems);
20549 for (unsigned i = 0; i < Index->getNumOperands(); i++) {
20550 // TODO: We've found an active bit of UB, and could be
20551 // more aggressive here if desired.
20552 if (Index->getOperand(Num: i)->isUndef())
20553 return false;
20554 uint64_t C = Index->getConstantOperandVal(Num: i);
20555 if (C % ElementSize != 0)
20556 return false;
20557 C = C / ElementSize;
20558 if (C >= NumElems)
20559 return false;
20560 ShuffleMask.push_back(Elt: C);
20561 ActiveLanes.set(C);
20562 }
20563 return ActiveLanes.all();
20564}
20565
20566/// Match the index of a gather or scatter operation as an operation
20567/// with twice the element width and half the number of elements. This is
20568/// generally profitable (if legal) because these operations are linear
20569/// in VL, so even if we cause some extract VTYPE/VL toggles, we still
20570/// come out ahead.
20571static bool matchIndexAsWiderOp(EVT VT, SDValue Index, SDValue Mask,
20572 Align BaseAlign, const RISCVSubtarget &ST) {
20573 if (!ISD::isConstantSplatVectorAllOnes(N: Mask.getNode()))
20574 return false;
20575 if (!ISD::isBuildVectorOfConstantSDNodes(N: Index.getNode()))
20576 return false;
20577
20578 // Attempt a doubling. If we can use a element type 4x or 8x in
20579 // size, this will happen via multiply iterations of the transform.
20580 const unsigned NumElems = VT.getVectorNumElements();
20581 if (NumElems % 2 != 0)
20582 return false;
20583
20584 const unsigned ElementSize = VT.getScalarStoreSize();
20585 const unsigned WiderElementSize = ElementSize * 2;
20586 if (WiderElementSize > ST.getELen()/8)
20587 return false;
20588
20589 if (!ST.enableUnalignedVectorMem() && BaseAlign < WiderElementSize)
20590 return false;
20591
20592 for (unsigned i = 0; i < Index->getNumOperands(); i++) {
20593 // TODO: We've found an active bit of UB, and could be
20594 // more aggressive here if desired.
20595 if (Index->getOperand(Num: i)->isUndef())
20596 return false;
20597 // TODO: This offset check is too strict if we support fully
20598 // misaligned memory operations.
20599 uint64_t C = Index->getConstantOperandVal(Num: i);
20600 if (i % 2 == 0) {
20601 if (C % WiderElementSize != 0)
20602 return false;
20603 continue;
20604 }
20605 uint64_t Last = Index->getConstantOperandVal(Num: i-1);
20606 if (C != Last + ElementSize)
20607 return false;
20608 }
20609 return true;
20610}
20611
20612// trunc (sra sext (X), zext (Y)) -> sra (X, smin (Y, scalarsize(Y) - 1))
20613// This would be benefit for the cases where X and Y are both the same value
20614// type of low precision vectors. Since the truncate would be lowered into
20615// n-levels TRUNCATE_VECTOR_VL to satisfy RVV's SEW*2->SEW truncate
20616// restriction, such pattern would be expanded into a series of "vsetvli"
20617// and "vnsrl" instructions later to reach this point.
20618static SDValue combineTruncOfSraSext(SDNode *N, SelectionDAG &DAG) {
20619 SDValue Mask = N->getOperand(Num: 1);
20620 SDValue VL = N->getOperand(Num: 2);
20621
20622 bool IsVLMAX = isAllOnesConstant(V: VL) ||
20623 (isa<RegisterSDNode>(Val: VL) &&
20624 cast<RegisterSDNode>(Val&: VL)->getReg() == RISCV::X0);
20625 if (!IsVLMAX || Mask.getOpcode() != RISCVISD::VMSET_VL ||
20626 Mask.getOperand(i: 0) != VL)
20627 return SDValue();
20628
20629 auto IsTruncNode = [&](SDValue V) {
20630 return V.getOpcode() == RISCVISD::TRUNCATE_VECTOR_VL &&
20631 V.getOperand(i: 1) == Mask && V.getOperand(i: 2) == VL;
20632 };
20633
20634 SDValue Op = N->getOperand(Num: 0);
20635
20636 // We need to first find the inner level of TRUNCATE_VECTOR_VL node
20637 // to distinguish such pattern.
20638 while (IsTruncNode(Op)) {
20639 if (!Op.hasOneUse())
20640 return SDValue();
20641 Op = Op.getOperand(i: 0);
20642 }
20643
20644 if (Op.getOpcode() != ISD::SRA || !Op.hasOneUse())
20645 return SDValue();
20646
20647 SDValue N0 = Op.getOperand(i: 0);
20648 SDValue N1 = Op.getOperand(i: 1);
20649 if (N0.getOpcode() != ISD::SIGN_EXTEND || !N0.hasOneUse() ||
20650 N1.getOpcode() != ISD::ZERO_EXTEND || !N1.hasOneUse())
20651 return SDValue();
20652
20653 SDValue N00 = N0.getOperand(i: 0);
20654 SDValue N10 = N1.getOperand(i: 0);
20655 if (!N00.getValueType().isVector() ||
20656 N00.getValueType() != N10.getValueType() ||
20657 N->getValueType(ResNo: 0) != N10.getValueType())
20658 return SDValue();
20659
20660 unsigned MaxShAmt = N10.getValueType().getScalarSizeInBits() - 1;
20661 SDValue SMin =
20662 DAG.getNode(Opcode: ISD::SMIN, DL: SDLoc(N1), VT: N->getValueType(ResNo: 0), N1: N10,
20663 N2: DAG.getConstant(Val: MaxShAmt, DL: SDLoc(N1), VT: N->getValueType(ResNo: 0)));
20664 return DAG.getNode(Opcode: ISD::SRA, DL: SDLoc(N), VT: N->getValueType(ResNo: 0), N1: N00, N2: SMin);
20665}
20666
20667// Combine (truncate_vector_vl (umin X, C)) -> (vnclipu_vl X) if C is the
20668// maximum value for the truncated type.
20669// Combine (truncate_vector_vl (smin (smax X, C2), C1)) -> (vnclip_vl X) if C1
20670// is the signed maximum value for the truncated type and C2 is the signed
20671// minimum value.
20672static SDValue combineTruncToVnclip(SDNode *N, SelectionDAG &DAG,
20673 const RISCVSubtarget &Subtarget) {
20674 assert(N->getOpcode() == RISCVISD::TRUNCATE_VECTOR_VL);
20675
20676 MVT VT = N->getSimpleValueType(ResNo: 0);
20677
20678 SDValue Mask = N->getOperand(Num: 1);
20679 SDValue VL = N->getOperand(Num: 2);
20680
20681 auto MatchMinMax = [&VL, &Mask](SDValue V, unsigned Opc, unsigned OpcVL,
20682 APInt &SplatVal) {
20683 if (V.getOpcode() != Opc &&
20684 !(V.getOpcode() == OpcVL && V.getOperand(i: 2).isUndef() &&
20685 V.getOperand(i: 3) == Mask && V.getOperand(i: 4) == VL))
20686 return SDValue();
20687
20688 SDValue Op = V.getOperand(i: 1);
20689
20690 // Peek through conversion between fixed and scalable vectors.
20691 if (Op.getOpcode() == ISD::INSERT_SUBVECTOR && Op.getOperand(i: 0).isUndef() &&
20692 isNullConstant(V: Op.getOperand(i: 2)) &&
20693 Op.getOperand(i: 1).getValueType().isFixedLengthVector() &&
20694 Op.getOperand(i: 1).getOpcode() == ISD::EXTRACT_SUBVECTOR &&
20695 Op.getOperand(i: 1).getOperand(i: 0).getValueType() == Op.getValueType() &&
20696 isNullConstant(V: Op.getOperand(i: 1).getOperand(i: 1)))
20697 Op = Op.getOperand(i: 1).getOperand(i: 0);
20698
20699 if (ISD::isConstantSplatVector(N: Op.getNode(), SplatValue&: SplatVal))
20700 return V.getOperand(i: 0);
20701
20702 if (Op.getOpcode() == RISCVISD::VMV_V_X_VL && Op.getOperand(i: 0).isUndef() &&
20703 Op.getOperand(i: 2) == VL) {
20704 if (auto *Op1 = dyn_cast<ConstantSDNode>(Val: Op.getOperand(i: 1))) {
20705 SplatVal =
20706 Op1->getAPIntValue().sextOrTrunc(width: Op.getScalarValueSizeInBits());
20707 return V.getOperand(i: 0);
20708 }
20709 }
20710
20711 return SDValue();
20712 };
20713
20714 SDLoc DL(N);
20715
20716 auto DetectUSatPattern = [&](SDValue V) {
20717 APInt LoC, HiC;
20718
20719 // Simple case, V is a UMIN.
20720 if (SDValue UMinOp = MatchMinMax(V, ISD::UMIN, RISCVISD::UMIN_VL, HiC))
20721 if (HiC.isMask(numBits: VT.getScalarSizeInBits()))
20722 return UMinOp;
20723
20724 // If we have an SMAX that removes negative numbers first, then we can match
20725 // SMIN instead of UMIN.
20726 if (SDValue SMinOp = MatchMinMax(V, ISD::SMIN, RISCVISD::SMIN_VL, HiC))
20727 if (SDValue SMaxOp =
20728 MatchMinMax(SMinOp, ISD::SMAX, RISCVISD::SMAX_VL, LoC))
20729 if (LoC.isNonNegative() && HiC.isMask(numBits: VT.getScalarSizeInBits()))
20730 return SMinOp;
20731
20732 // If we have an SMIN before an SMAX and the SMAX constant is less than or
20733 // equal to the SMIN constant, we can use vnclipu if we insert a new SMAX
20734 // first.
20735 if (SDValue SMaxOp = MatchMinMax(V, ISD::SMAX, RISCVISD::SMAX_VL, LoC))
20736 if (SDValue SMinOp =
20737 MatchMinMax(SMaxOp, ISD::SMIN, RISCVISD::SMIN_VL, HiC))
20738 if (LoC.isNonNegative() && HiC.isMask(numBits: VT.getScalarSizeInBits()) &&
20739 HiC.uge(RHS: LoC))
20740 return DAG.getNode(Opcode: RISCVISD::SMAX_VL, DL, VT: V.getValueType(), N1: SMinOp,
20741 N2: V.getOperand(i: 1), N3: DAG.getUNDEF(VT: V.getValueType()),
20742 N4: Mask, N5: VL);
20743
20744 return SDValue();
20745 };
20746
20747 auto DetectSSatPattern = [&](SDValue V) {
20748 unsigned NumDstBits = VT.getScalarSizeInBits();
20749 unsigned NumSrcBits = V.getScalarValueSizeInBits();
20750 APInt SignedMax = APInt::getSignedMaxValue(numBits: NumDstBits).sext(width: NumSrcBits);
20751 APInt SignedMin = APInt::getSignedMinValue(numBits: NumDstBits).sext(width: NumSrcBits);
20752
20753 APInt HiC, LoC;
20754 if (SDValue SMinOp = MatchMinMax(V, ISD::SMIN, RISCVISD::SMIN_VL, HiC))
20755 if (SDValue SMaxOp =
20756 MatchMinMax(SMinOp, ISD::SMAX, RISCVISD::SMAX_VL, LoC))
20757 if (HiC == SignedMax && LoC == SignedMin)
20758 return SMaxOp;
20759
20760 if (SDValue SMaxOp = MatchMinMax(V, ISD::SMAX, RISCVISD::SMAX_VL, LoC))
20761 if (SDValue SMinOp =
20762 MatchMinMax(SMaxOp, ISD::SMIN, RISCVISD::SMIN_VL, HiC))
20763 if (HiC == SignedMax && LoC == SignedMin)
20764 return SMinOp;
20765
20766 return SDValue();
20767 };
20768
20769 SDValue Src = N->getOperand(Num: 0);
20770
20771 // Look through multiple layers of truncates.
20772 while (Src.getOpcode() == RISCVISD::TRUNCATE_VECTOR_VL &&
20773 Src.getOperand(i: 1) == Mask && Src.getOperand(i: 2) == VL &&
20774 Src.hasOneUse())
20775 Src = Src.getOperand(i: 0);
20776
20777 SDValue Val;
20778 unsigned ClipOpc;
20779 if ((Val = DetectUSatPattern(Src)))
20780 ClipOpc = RISCVISD::TRUNCATE_VECTOR_VL_USAT;
20781 else if ((Val = DetectSSatPattern(Src)))
20782 ClipOpc = RISCVISD::TRUNCATE_VECTOR_VL_SSAT;
20783 else
20784 return SDValue();
20785
20786 MVT ValVT = Val.getSimpleValueType();
20787
20788 do {
20789 MVT ValEltVT = MVT::getIntegerVT(BitWidth: ValVT.getScalarSizeInBits() / 2);
20790 ValVT = ValVT.changeVectorElementType(EltVT: ValEltVT);
20791 Val = DAG.getNode(Opcode: ClipOpc, DL, VT: ValVT, N1: Val, N2: Mask, N3: VL);
20792 } while (ValVT != VT);
20793
20794 return Val;
20795}
20796
20797// Convert
20798// (iX ctpop (bitcast (vXi1 A)))
20799// ->
20800// (zext (vcpop.m (nxvYi1 (insert_subvec (vXi1 A)))))
20801// and
20802// (iN reduce.add (zext (vXi1 A to vXiN))
20803// ->
20804// (zext (vcpop.m (nxvYi1 (insert_subvec (vXi1 A)))))
20805// FIXME: It's complicated to match all the variations of this after type
20806// legalization so we only handle the pre-type legalization pattern, but that
20807// requires the fixed vector type to be legal.
20808static SDValue combineToVCPOP(SDNode *N, SelectionDAG &DAG,
20809 const RISCVSubtarget &Subtarget) {
20810 unsigned Opc = N->getOpcode();
20811 assert((Opc == ISD::CTPOP || Opc == ISD::VECREDUCE_ADD) &&
20812 "Unexpected opcode");
20813 EVT VT = N->getValueType(ResNo: 0);
20814 if (!VT.isScalarInteger())
20815 return SDValue();
20816
20817 SDValue Src = N->getOperand(Num: 0);
20818
20819 if (Opc == ISD::CTPOP) {
20820 // Peek through zero_extend. It doesn't change the count.
20821 if (Src.getOpcode() == ISD::ZERO_EXTEND)
20822 Src = Src.getOperand(i: 0);
20823
20824 if (Src.getOpcode() != ISD::BITCAST)
20825 return SDValue();
20826 Src = Src.getOperand(i: 0);
20827 } else if (Opc == ISD::VECREDUCE_ADD) {
20828 if (Src.getOpcode() != ISD::ZERO_EXTEND)
20829 return SDValue();
20830 Src = Src.getOperand(i: 0);
20831 }
20832
20833 EVT SrcEVT = Src.getValueType();
20834 if (!SrcEVT.isSimple())
20835 return SDValue();
20836
20837 MVT SrcMVT = SrcEVT.getSimpleVT();
20838 // Make sure the input is an i1 vector.
20839 if (!SrcMVT.isVector() || SrcMVT.getVectorElementType() != MVT::i1)
20840 return SDValue();
20841
20842 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
20843 if (!TLI.isTypeLegal(VT: SrcMVT))
20844 return SDValue();
20845
20846 // Check that destination type is large enough to hold result without
20847 // overflow.
20848 if (Opc == ISD::VECREDUCE_ADD) {
20849 unsigned EltSize = SrcMVT.getScalarSizeInBits();
20850 unsigned MinSize = SrcMVT.getSizeInBits().getKnownMinValue();
20851 unsigned VectorBitsMax = Subtarget.getRealMaxVLen();
20852 unsigned MaxVLMAX = SrcMVT.isFixedLengthVector()
20853 ? SrcMVT.getVectorNumElements()
20854 : RISCVTargetLowering::computeVLMAX(
20855 VectorBits: VectorBitsMax, EltSize, MinSize);
20856 if (VT.getFixedSizeInBits() < Log2_32(Value: MaxVLMAX) + 1)
20857 return SDValue();
20858 }
20859
20860 MVT ContainerVT = SrcMVT;
20861 if (SrcMVT.isFixedLengthVector()) {
20862 ContainerVT = getContainerForFixedLengthVector(DAG, VT: SrcMVT, Subtarget);
20863 Src = convertToScalableVector(VT: ContainerVT, V: Src, DAG, Subtarget);
20864 }
20865
20866 SDLoc DL(N);
20867 auto [Mask, VL] = getDefaultVLOps(VecVT: SrcMVT, ContainerVT, DL, DAG, Subtarget);
20868
20869 MVT XLenVT = Subtarget.getXLenVT();
20870 SDValue Pop = DAG.getNode(Opcode: RISCVISD::VCPOP_VL, DL, VT: XLenVT, N1: Src, N2: Mask, N3: VL);
20871 return DAG.getZExtOrTrunc(Op: Pop, DL, VT);
20872}
20873
20874static SDValue performSHLCombine(SDNode *N,
20875 TargetLowering::DAGCombinerInfo &DCI,
20876 const RISCVSubtarget &Subtarget) {
20877 // (shl (zext x), y) -> (vwsll x, y)
20878 if (SDValue V = combineOp_VLToVWOp_VL(N, DCI, Subtarget))
20879 return V;
20880
20881 // (shl (sext x), C) -> (vwmulsu x, 1u << C)
20882 // (shl (zext x), C) -> (vwmulu x, 1u << C)
20883
20884 if (!DCI.isAfterLegalizeDAG())
20885 return SDValue();
20886
20887 SDValue LHS = N->getOperand(Num: 0);
20888 if (!LHS.hasOneUse())
20889 return SDValue();
20890 unsigned Opcode;
20891 switch (LHS.getOpcode()) {
20892 case ISD::SIGN_EXTEND:
20893 case RISCVISD::VSEXT_VL:
20894 Opcode = RISCVISD::VWMULSU_VL;
20895 break;
20896 case ISD::ZERO_EXTEND:
20897 case RISCVISD::VZEXT_VL:
20898 Opcode = RISCVISD::VWMULU_VL;
20899 break;
20900 default:
20901 return SDValue();
20902 }
20903
20904 SDValue RHS = N->getOperand(Num: 1);
20905 APInt ShAmt;
20906 uint64_t ShAmtInt;
20907 if (ISD::isConstantSplatVector(N: RHS.getNode(), SplatValue&: ShAmt))
20908 ShAmtInt = ShAmt.getZExtValue();
20909 else if (RHS.getOpcode() == RISCVISD::VMV_V_X_VL &&
20910 RHS.getOperand(i: 1).getOpcode() == ISD::Constant)
20911 ShAmtInt = RHS.getConstantOperandVal(i: 1);
20912 else
20913 return SDValue();
20914
20915 // Better foldings:
20916 // (shl (sext x), 1) -> (vwadd x, x)
20917 // (shl (zext x), 1) -> (vwaddu x, x)
20918 if (ShAmtInt <= 1)
20919 return SDValue();
20920
20921 SDValue NarrowOp = LHS.getOperand(i: 0);
20922 MVT NarrowVT = NarrowOp.getSimpleValueType();
20923 uint64_t NarrowBits = NarrowVT.getScalarSizeInBits();
20924 if (ShAmtInt >= NarrowBits)
20925 return SDValue();
20926 MVT VT = N->getSimpleValueType(ResNo: 0);
20927 if (NarrowBits * 2 != VT.getScalarSizeInBits())
20928 return SDValue();
20929
20930 SelectionDAG &DAG = DCI.DAG;
20931 SDLoc DL(N);
20932 SDValue Passthru, Mask, VL;
20933 switch (N->getOpcode()) {
20934 case ISD::SHL:
20935 Passthru = DAG.getUNDEF(VT);
20936 std::tie(args&: Mask, args&: VL) = getDefaultScalableVLOps(VecVT: VT, DL, DAG, Subtarget);
20937 break;
20938 case RISCVISD::SHL_VL:
20939 Passthru = N->getOperand(Num: 2);
20940 Mask = N->getOperand(Num: 3);
20941 VL = N->getOperand(Num: 4);
20942 break;
20943 default:
20944 llvm_unreachable("Expected SHL");
20945 }
20946 return DAG.getNode(Opcode, DL, VT, N1: NarrowOp,
20947 N2: DAG.getConstant(Val: 1ULL << ShAmtInt, DL: SDLoc(RHS), VT: NarrowVT),
20948 N3: Passthru, N4: Mask, N5: VL);
20949}
20950
20951SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
20952 DAGCombinerInfo &DCI) const {
20953 SelectionDAG &DAG = DCI.DAG;
20954 const MVT XLenVT = Subtarget.getXLenVT();
20955 SDLoc DL(N);
20956
20957 // Helper to call SimplifyDemandedBits on an operand of N where only some low
20958 // bits are demanded. N will be added to the Worklist if it was not deleted.
20959 // Caller should return SDValue(N, 0) if this returns true.
20960 auto SimplifyDemandedLowBitsHelper = [&](unsigned OpNo, unsigned LowBits) {
20961 SDValue Op = N->getOperand(Num: OpNo);
20962 APInt Mask = APInt::getLowBitsSet(numBits: Op.getValueSizeInBits(), loBitsSet: LowBits);
20963 if (!SimplifyDemandedBits(Op, DemandedBits: Mask, DCI))
20964 return false;
20965
20966 if (N->getOpcode() != ISD::DELETED_NODE)
20967 DCI.AddToWorklist(N);
20968 return true;
20969 };
20970
20971 switch (N->getOpcode()) {
20972 default:
20973 break;
20974 case RISCVISD::SplitF64: {
20975 SDValue Op0 = N->getOperand(Num: 0);
20976 // If the input to SplitF64 is just BuildPairF64 then the operation is
20977 // redundant. Instead, use BuildPairF64's operands directly.
20978 if (Op0->getOpcode() == RISCVISD::BuildPairF64)
20979 return DCI.CombineTo(N, Res0: Op0.getOperand(i: 0), Res1: Op0.getOperand(i: 1));
20980
20981 if (Op0->isUndef()) {
20982 SDValue Lo = DAG.getUNDEF(VT: MVT::i32);
20983 SDValue Hi = DAG.getUNDEF(VT: MVT::i32);
20984 return DCI.CombineTo(N, Res0: Lo, Res1: Hi);
20985 }
20986
20987 // It's cheaper to materialise two 32-bit integers than to load a double
20988 // from the constant pool and transfer it to integer registers through the
20989 // stack.
20990 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Val&: Op0)) {
20991 APInt V = C->getValueAPF().bitcastToAPInt();
20992 SDValue Lo = DAG.getConstant(Val: V.trunc(width: 32), DL, VT: MVT::i32);
20993 SDValue Hi = DAG.getConstant(Val: V.lshr(shiftAmt: 32).trunc(width: 32), DL, VT: MVT::i32);
20994 return DCI.CombineTo(N, Res0: Lo, Res1: Hi);
20995 }
20996
20997 // This is a target-specific version of a DAGCombine performed in
20998 // DAGCombiner::visitBITCAST. It performs the equivalent of:
20999 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
21000 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
21001 if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
21002 !Op0.getNode()->hasOneUse() || Subtarget.hasStdExtZdinx())
21003 break;
21004 SDValue NewSplitF64 =
21005 DAG.getNode(Opcode: RISCVISD::SplitF64, DL, VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32),
21006 N: Op0.getOperand(i: 0));
21007 SDValue Lo = NewSplitF64.getValue(R: 0);
21008 SDValue Hi = NewSplitF64.getValue(R: 1);
21009 APInt SignBit = APInt::getSignMask(BitWidth: 32);
21010 if (Op0.getOpcode() == ISD::FNEG) {
21011 SDValue NewHi = DAG.getNode(Opcode: ISD::XOR, DL, VT: MVT::i32, N1: Hi,
21012 N2: DAG.getConstant(Val: SignBit, DL, VT: MVT::i32));
21013 return DCI.CombineTo(N, Res0: Lo, Res1: NewHi);
21014 }
21015 assert(Op0.getOpcode() == ISD::FABS);
21016 SDValue NewHi = DAG.getNode(Opcode: ISD::AND, DL, VT: MVT::i32, N1: Hi,
21017 N2: DAG.getConstant(Val: ~SignBit, DL, VT: MVT::i32));
21018 return DCI.CombineTo(N, Res0: Lo, Res1: NewHi);
21019 }
21020 case RISCVISD::SLLW:
21021 case RISCVISD::SRAW:
21022 case RISCVISD::SRLW:
21023 case RISCVISD::RORW:
21024 case RISCVISD::ROLW: {
21025 // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
21026 if (SimplifyDemandedLowBitsHelper(0, 32) ||
21027 SimplifyDemandedLowBitsHelper(1, 5))
21028 return SDValue(N, 0);
21029
21030 break;
21031 }
21032 case RISCVISD::ABSW:
21033 case RISCVISD::CLSW:
21034 case RISCVISD::CLZW:
21035 case RISCVISD::CTZW: {
21036 // Only the lower 32 bits of the first operand are read
21037 if (SimplifyDemandedLowBitsHelper(0, 32))
21038 return SDValue(N, 0);
21039 break;
21040 }
21041 case RISCVISD::FMV_W_X_RV64: {
21042 // If the input to FMV_W_X_RV64 is just FMV_X_ANYEXTW_RV64 the the
21043 // conversion is unnecessary and can be replaced with the
21044 // FMV_X_ANYEXTW_RV64 operand.
21045 SDValue Op0 = N->getOperand(Num: 0);
21046 if (Op0.getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64)
21047 return Op0.getOperand(i: 0);
21048 break;
21049 }
21050 case RISCVISD::FMV_X_ANYEXTH:
21051 case RISCVISD::FMV_X_ANYEXTW_RV64: {
21052 SDLoc DL(N);
21053 SDValue Op0 = N->getOperand(Num: 0);
21054 MVT VT = N->getSimpleValueType(ResNo: 0);
21055
21056 // Constant fold.
21057 if (auto *CFP = dyn_cast<ConstantFPSDNode>(Val&: Op0)) {
21058 APInt Val = CFP->getValueAPF().bitcastToAPInt().sext(width: VT.getSizeInBits());
21059 return DAG.getConstant(Val, DL, VT);
21060 }
21061
21062 // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
21063 // conversion is unnecessary and can be replaced with the FMV_W_X_RV64
21064 // operand. Similar for FMV_X_ANYEXTH and FMV_H_X.
21065 if ((N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 &&
21066 Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) ||
21067 (N->getOpcode() == RISCVISD::FMV_X_ANYEXTH &&
21068 Op0->getOpcode() == RISCVISD::FMV_H_X)) {
21069 assert(Op0.getOperand(0).getValueType() == VT &&
21070 "Unexpected value type!");
21071 return Op0.getOperand(i: 0);
21072 }
21073
21074 if (ISD::isNormalLoad(N: Op0.getNode()) && Op0.hasOneUse() &&
21075 cast<LoadSDNode>(Val&: Op0)->isSimple()) {
21076 MVT IVT = MVT::getIntegerVT(BitWidth: Op0.getValueSizeInBits());
21077 auto *LN0 = cast<LoadSDNode>(Val&: Op0);
21078 SDValue Load =
21079 DAG.getExtLoad(ExtType: ISD::EXTLOAD, dl: SDLoc(N), VT, Chain: LN0->getChain(),
21080 Ptr: LN0->getBasePtr(), MemVT: IVT, MMO: LN0->getMemOperand());
21081 DAG.ReplaceAllUsesOfValueWith(From: Op0.getValue(R: 1), To: Load.getValue(R: 1));
21082 return Load;
21083 }
21084
21085 // This is a target-specific version of a DAGCombine performed in
21086 // DAGCombiner::visitBITCAST. It performs the equivalent of:
21087 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
21088 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
21089 if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
21090 !Op0.getNode()->hasOneUse())
21091 break;
21092 SDValue NewFMV = DAG.getNode(Opcode: N->getOpcode(), DL, VT, Operand: Op0.getOperand(i: 0));
21093 unsigned FPBits = N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 ? 32 : 16;
21094 APInt SignBit = APInt::getSignMask(BitWidth: FPBits).sext(width: VT.getSizeInBits());
21095 if (Op0.getOpcode() == ISD::FNEG)
21096 return DAG.getNode(Opcode: ISD::XOR, DL, VT, N1: NewFMV,
21097 N2: DAG.getConstant(Val: SignBit, DL, VT));
21098
21099 assert(Op0.getOpcode() == ISD::FABS);
21100 return DAG.getNode(Opcode: ISD::AND, DL, VT, N1: NewFMV,
21101 N2: DAG.getConstant(Val: ~SignBit, DL, VT));
21102 }
21103 case ISD::ABS: {
21104 EVT VT = N->getValueType(ResNo: 0);
21105 SDValue N0 = N->getOperand(Num: 0);
21106 // abs (sext) -> zext (abs)
21107 // abs (zext) -> zext (handled elsewhere)
21108 if (VT.isVector() && N0.hasOneUse() && N0.getOpcode() == ISD::SIGN_EXTEND) {
21109 SDValue Src = N0.getOperand(i: 0);
21110 SDLoc DL(N);
21111 return DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL, VT,
21112 Operand: DAG.getNode(Opcode: ISD::ABS, DL, VT: Src.getValueType(), Operand: Src));
21113 }
21114 break;
21115 }
21116 case ISD::ADD: {
21117 if (SDValue V = combineOp_VLToVWOp_VL(N, DCI, Subtarget))
21118 return V;
21119 if (SDValue V = combineToVWMACC(N, DAG, Subtarget))
21120 return V;
21121 if (SDValue V = combineVqdotAccum(N, DAG, Subtarget))
21122 return V;
21123 return performADDCombine(N, DCI, Subtarget);
21124 }
21125 case ISD::SUB: {
21126 if (SDValue V = combineOp_VLToVWOp_VL(N, DCI, Subtarget))
21127 return V;
21128 return performSUBCombine(N, DAG, Subtarget);
21129 }
21130 case ISD::AND:
21131 return performANDCombine(N, DCI, Subtarget);
21132 case ISD::OR: {
21133 if (SDValue V = combineOp_VLToVWOp_VL(N, DCI, Subtarget))
21134 return V;
21135 return performORCombine(N, DCI, Subtarget);
21136 }
21137 case ISD::XOR:
21138 return performXORCombine(N, DAG, Subtarget);
21139 case ISD::MUL:
21140 if (SDValue V = combineOp_VLToVWOp_VL(N, DCI, Subtarget))
21141 return V;
21142 return performMULCombine(N, DAG, DCI, Subtarget);
21143 case ISD::SDIV:
21144 case ISD::UDIV:
21145 case ISD::SREM:
21146 case ISD::UREM:
21147 if (SDValue V = combineBinOpOfZExt(N, DAG))
21148 return V;
21149 break;
21150 case ISD::FMUL: {
21151 using namespace SDPatternMatch;
21152 SDLoc DL(N);
21153 EVT VT = N->getValueType(ResNo: 0);
21154 SDValue X, Y;
21155 // InstCombine canonicalizes fneg (fmul x, y) -> fmul x, (fneg y), see
21156 // hoistFNegAboveFMulFDiv.
21157 // Undo this and sink the fneg so we match more fmsub/fnmadd patterns.
21158 if (sd_match(N, P: m_FMul(L: m_Value(N&: X), R: m_OneUse(P: m_FNeg(Op: m_Value(N&: Y))))))
21159 return DAG.getNode(Opcode: ISD::FNEG, DL, VT,
21160 Operand: DAG.getNode(Opcode: ISD::FMUL, DL, VT, N1: X, N2: Y, Flags: N->getFlags()),
21161 Flags: N->getFlags());
21162
21163 // fmul X, (copysign 1.0, Y) -> fsgnjx X, Y
21164 SDValue N0 = N->getOperand(Num: 0);
21165 SDValue N1 = N->getOperand(Num: 1);
21166 if (N0->getOpcode() != ISD::FCOPYSIGN)
21167 std::swap(a&: N0, b&: N1);
21168 if (N0->getOpcode() != ISD::FCOPYSIGN)
21169 return SDValue();
21170 ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Val: N0->getOperand(Num: 0));
21171 if (!C || !C->getValueAPF().isExactlyValue(V: +1.0))
21172 return SDValue();
21173 if (VT.isVector() || !isOperationLegal(Op: ISD::FCOPYSIGN, VT))
21174 return SDValue();
21175 SDValue Sign = N0->getOperand(Num: 1);
21176 if (Sign.getValueType() != VT)
21177 return SDValue();
21178 return DAG.getNode(Opcode: RISCVISD::FSGNJX, DL, VT, N1, N2: N0->getOperand(Num: 1));
21179 }
21180 case ISD::FADD:
21181 case ISD::UMAX:
21182 case ISD::UMIN:
21183 case ISD::SMAX:
21184 case ISD::SMIN:
21185 case ISD::FMAXNUM:
21186 case ISD::FMINNUM: {
21187 if (SDValue V = combineBinOpToReduce(N, DAG, Subtarget))
21188 return V;
21189 if (SDValue V = combineBinOpOfExtractToReduceTree(N, DAG, Subtarget))
21190 return V;
21191 return SDValue();
21192 }
21193 case ISD::FMA: {
21194 SDValue N0 = N->getOperand(Num: 0);
21195 SDValue N1 = N->getOperand(Num: 1);
21196 if (N0.getOpcode() != ISD::SPLAT_VECTOR)
21197 std::swap(a&: N0, b&: N1);
21198 if (N0.getOpcode() != ISD::SPLAT_VECTOR)
21199 return SDValue();
21200 SDValue SplatN0 = N0.getOperand(i: 0);
21201 if (SplatN0.getOpcode() != ISD::FNEG || !SplatN0.hasOneUse())
21202 return SDValue();
21203 EVT VT = N->getValueType(ResNo: 0);
21204 SDValue Splat =
21205 DAG.getNode(Opcode: ISD::SPLAT_VECTOR, DL, VT, Operand: SplatN0.getOperand(i: 0));
21206 SDValue Fneg = DAG.getNode(Opcode: ISD::FNEG, DL, VT, Operand: Splat);
21207 return DAG.getNode(Opcode: ISD::FMA, DL, VT, N1: Fneg, N2: N1, N3: N->getOperand(Num: 2));
21208 }
21209 case ISD::SETCC:
21210 return performSETCCCombine(N, DCI, Subtarget);
21211 case ISD::SIGN_EXTEND_INREG:
21212 return performSIGN_EXTEND_INREGCombine(N, DCI, Subtarget);
21213 case ISD::ZERO_EXTEND:
21214 // Fold (zero_extend (fp_to_uint X)) to prevent forming fcvt+zexti32 during
21215 // type legalization. This is safe because fp_to_uint produces poison if
21216 // it overflows.
21217 if (N->getValueType(ResNo: 0) == MVT::i64 && Subtarget.is64Bit()) {
21218 SDValue Src = N->getOperand(Num: 0);
21219 if (Src.getOpcode() == ISD::FP_TO_UINT &&
21220 isTypeLegal(VT: Src.getOperand(i: 0).getValueType()))
21221 return DAG.getNode(Opcode: ISD::FP_TO_UINT, DL: SDLoc(N), VT: MVT::i64,
21222 Operand: Src.getOperand(i: 0));
21223 if (Src.getOpcode() == ISD::STRICT_FP_TO_UINT && Src.hasOneUse() &&
21224 isTypeLegal(VT: Src.getOperand(i: 1).getValueType())) {
21225 SDVTList VTs = DAG.getVTList(VT1: MVT::i64, VT2: MVT::Other);
21226 SDValue Res = DAG.getNode(Opcode: ISD::STRICT_FP_TO_UINT, DL: SDLoc(N), VTList: VTs,
21227 N1: Src.getOperand(i: 0), N2: Src.getOperand(i: 1));
21228 DCI.CombineTo(N, Res);
21229 DAG.ReplaceAllUsesOfValueWith(From: Src.getValue(R: 1), To: Res.getValue(R: 1));
21230 DCI.recursivelyDeleteUnusedNodes(N: Src.getNode());
21231 return SDValue(N, 0); // Return N so it doesn't get rechecked.
21232 }
21233 }
21234 return SDValue();
21235 case RISCVISD::TRUNCATE_VECTOR_VL:
21236 if (SDValue V = combineTruncOfSraSext(N, DAG))
21237 return V;
21238 return combineTruncToVnclip(N, DAG, Subtarget);
21239 case ISD::VP_TRUNCATE:
21240 return performVP_TRUNCATECombine(N, DAG, Subtarget);
21241 case ISD::TRUNCATE:
21242 return performTRUNCATECombine(N, DAG, Subtarget);
21243 case ISD::SELECT:
21244 return performSELECTCombine(N, DAG, Subtarget);
21245 case ISD::VSELECT:
21246 return performVSELECTCombine(N, DAG);
21247 case RISCVISD::CZERO_EQZ:
21248 case RISCVISD::CZERO_NEZ: {
21249 SDValue Val = N->getOperand(Num: 0);
21250 SDValue Cond = N->getOperand(Num: 1);
21251
21252 unsigned Opc = N->getOpcode();
21253
21254 // czero_eqz x, x -> x
21255 if (Opc == RISCVISD::CZERO_EQZ && Val == Cond)
21256 return Val;
21257
21258 unsigned InvOpc =
21259 Opc == RISCVISD::CZERO_EQZ ? RISCVISD::CZERO_NEZ : RISCVISD::CZERO_EQZ;
21260
21261 // czero_eqz X, (xor Y, 1) -> czero_nez X, Y if Y is 0 or 1.
21262 // czero_nez X, (xor Y, 1) -> czero_eqz X, Y if Y is 0 or 1.
21263 if (Cond.getOpcode() == ISD::XOR && isOneConstant(V: Cond.getOperand(i: 1))) {
21264 SDValue NewCond = Cond.getOperand(i: 0);
21265 APInt Mask = APInt::getBitsSetFrom(numBits: NewCond.getValueSizeInBits(), loBit: 1);
21266 if (DAG.MaskedValueIsZero(Op: NewCond, Mask))
21267 return DAG.getNode(Opcode: InvOpc, DL: SDLoc(N), VT: N->getValueType(ResNo: 0), N1: Val, N2: NewCond);
21268 }
21269 // czero_eqz x, (setcc y, 0, ne) -> czero_eqz x, y
21270 // czero_nez x, (setcc y, 0, ne) -> czero_nez x, y
21271 // czero_eqz x, (setcc y, 0, eq) -> czero_nez x, y
21272 // czero_nez x, (setcc y, 0, eq) -> czero_eqz x, y
21273 if (Cond.getOpcode() == ISD::SETCC && isNullConstant(V: Cond.getOperand(i: 1))) {
21274 ISD::CondCode CCVal = cast<CondCodeSDNode>(Val: Cond.getOperand(i: 2))->get();
21275 if (ISD::isIntEqualitySetCC(Code: CCVal))
21276 return DAG.getNode(Opcode: CCVal == ISD::SETNE ? Opc : InvOpc, DL: SDLoc(N),
21277 VT: N->getValueType(ResNo: 0), N1: Val, N2: Cond.getOperand(i: 0));
21278 }
21279 return SDValue();
21280 }
21281 case RISCVISD::SELECT_CC: {
21282 // Transform
21283 SDValue LHS = N->getOperand(Num: 0);
21284 SDValue RHS = N->getOperand(Num: 1);
21285 SDValue CC = N->getOperand(Num: 2);
21286 ISD::CondCode CCVal = cast<CondCodeSDNode>(Val&: CC)->get();
21287 SDValue TrueV = N->getOperand(Num: 3);
21288 SDValue FalseV = N->getOperand(Num: 4);
21289 SDLoc DL(N);
21290 EVT VT = N->getValueType(ResNo: 0);
21291
21292 // If the True and False values are the same, we don't need a select_cc.
21293 if (TrueV == FalseV)
21294 return TrueV;
21295
21296 // (select (x < 0), y, z) -> x >> (XLEN - 1) & (y - z) + z
21297 // (select (x >= 0), y, z) -> x >> (XLEN - 1) & (z - y) + y
21298 if (!Subtarget.hasShortForwardBranchIALU() && isa<ConstantSDNode>(Val: TrueV) &&
21299 isa<ConstantSDNode>(Val: FalseV) && isNullConstant(V: RHS) &&
21300 (CCVal == ISD::CondCode::SETLT || CCVal == ISD::CondCode::SETGE)) {
21301 if (CCVal == ISD::CondCode::SETGE)
21302 std::swap(a&: TrueV, b&: FalseV);
21303
21304 int64_t TrueSImm = cast<ConstantSDNode>(Val&: TrueV)->getSExtValue();
21305 int64_t FalseSImm = cast<ConstantSDNode>(Val&: FalseV)->getSExtValue();
21306 // Only handle simm12, if it is not in this range, it can be considered as
21307 // register.
21308 if (isInt<12>(x: TrueSImm) && isInt<12>(x: FalseSImm) &&
21309 isInt<12>(x: TrueSImm - FalseSImm)) {
21310 SDValue SRA =
21311 DAG.getNode(Opcode: ISD::SRA, DL, VT, N1: LHS,
21312 N2: DAG.getConstant(Val: Subtarget.getXLen() - 1, DL, VT));
21313 SDValue AND =
21314 DAG.getNode(Opcode: ISD::AND, DL, VT, N1: SRA,
21315 N2: DAG.getSignedConstant(Val: TrueSImm - FalseSImm, DL, VT));
21316 return DAG.getNode(Opcode: ISD::ADD, DL, VT, N1: AND, N2: FalseV);
21317 }
21318
21319 if (CCVal == ISD::CondCode::SETGE)
21320 std::swap(a&: TrueV, b&: FalseV);
21321 }
21322
21323 if (combine_CC(LHS, RHS, CC, DL, DAG, Subtarget))
21324 return DAG.getNode(Opcode: RISCVISD::SELECT_CC, DL, VT: N->getValueType(ResNo: 0),
21325 Ops: {LHS, RHS, CC, TrueV, FalseV});
21326
21327 if (!Subtarget.hasConditionalMoveFusion()) {
21328 // (select c, -1, y) -> -c | y
21329 if (isAllOnesConstant(V: TrueV)) {
21330 SDValue C = DAG.getSetCC(DL, VT, LHS, RHS, Cond: CCVal);
21331 SDValue Neg = DAG.getNegative(Val: C, DL, VT);
21332 return DAG.getNode(Opcode: ISD::OR, DL, VT, N1: Neg, N2: FalseV);
21333 }
21334 // (select c, y, -1) -> -!c | y
21335 if (isAllOnesConstant(V: FalseV)) {
21336 SDValue C =
21337 DAG.getSetCC(DL, VT, LHS, RHS, Cond: ISD::getSetCCInverse(Operation: CCVal, Type: VT));
21338 SDValue Neg = DAG.getNegative(Val: C, DL, VT);
21339 return DAG.getNode(Opcode: ISD::OR, DL, VT, N1: Neg, N2: TrueV);
21340 }
21341
21342 // (select c, 0, y) -> -!c & y
21343 if (isNullConstant(V: TrueV)) {
21344 SDValue C =
21345 DAG.getSetCC(DL, VT, LHS, RHS, Cond: ISD::getSetCCInverse(Operation: CCVal, Type: VT));
21346 SDValue Neg = DAG.getNegative(Val: C, DL, VT);
21347 return DAG.getNode(Opcode: ISD::AND, DL, VT, N1: Neg, N2: FalseV);
21348 }
21349 // (select c, y, 0) -> -c & y
21350 if (isNullConstant(V: FalseV)) {
21351 SDValue C = DAG.getSetCC(DL, VT, LHS, RHS, Cond: CCVal);
21352 SDValue Neg = DAG.getNegative(Val: C, DL, VT);
21353 return DAG.getNode(Opcode: ISD::AND, DL, VT, N1: Neg, N2: TrueV);
21354 }
21355 // (riscvisd::select_cc x, 0, ne, x, 1) -> (add x, (setcc x, 0, eq))
21356 // (riscvisd::select_cc x, 0, eq, 1, x) -> (add x, (setcc x, 0, eq))
21357 if (((isOneConstant(V: FalseV) && LHS == TrueV &&
21358 CCVal == ISD::CondCode::SETNE) ||
21359 (isOneConstant(V: TrueV) && LHS == FalseV &&
21360 CCVal == ISD::CondCode::SETEQ)) &&
21361 isNullConstant(V: RHS)) {
21362 // freeze it to be safe.
21363 LHS = DAG.getFreeze(V: LHS);
21364 SDValue C = DAG.getSetCC(DL, VT, LHS, RHS, Cond: ISD::CondCode::SETEQ);
21365 return DAG.getNode(Opcode: ISD::ADD, DL, VT, N1: LHS, N2: C);
21366 }
21367 }
21368
21369 // If both true/false are an xor with 1, pull through the select.
21370 // This can occur after op legalization if both operands are setccs that
21371 // require an xor to invert.
21372 // FIXME: Generalize to other binary ops with identical operand?
21373 if (TrueV.getOpcode() == ISD::XOR && FalseV.getOpcode() == ISD::XOR &&
21374 TrueV.getOperand(i: 1) == FalseV.getOperand(i: 1) &&
21375 isOneConstant(V: TrueV.getOperand(i: 1)) &&
21376 TrueV.hasOneUse() && FalseV.hasOneUse()) {
21377 SDValue NewSel = DAG.getNode(Opcode: RISCVISD::SELECT_CC, DL, VT, N1: LHS, N2: RHS, N3: CC,
21378 N4: TrueV.getOperand(i: 0), N5: FalseV.getOperand(i: 0));
21379 return DAG.getNode(Opcode: ISD::XOR, DL, VT, N1: NewSel, N2: TrueV.getOperand(i: 1));
21380 }
21381
21382 return SDValue();
21383 }
21384 case RISCVISD::BR_CC: {
21385 SDValue LHS = N->getOperand(Num: 1);
21386 SDValue RHS = N->getOperand(Num: 2);
21387 SDValue CC = N->getOperand(Num: 3);
21388 SDLoc DL(N);
21389
21390 if (combine_CC(LHS, RHS, CC, DL, DAG, Subtarget))
21391 return DAG.getNode(Opcode: RISCVISD::BR_CC, DL, VT: N->getValueType(ResNo: 0),
21392 N1: N->getOperand(Num: 0), N2: LHS, N3: RHS, N4: CC, N5: N->getOperand(Num: 4));
21393
21394 return SDValue();
21395 }
21396 case ISD::BITREVERSE:
21397 return performBITREVERSECombine(N, DAG, Subtarget);
21398 case ISD::FP_TO_SINT:
21399 case ISD::FP_TO_UINT:
21400 return performFP_TO_INTCombine(N, DCI, Subtarget);
21401 case ISD::FP_TO_SINT_SAT:
21402 case ISD::FP_TO_UINT_SAT:
21403 return performFP_TO_INT_SATCombine(N, DCI, Subtarget);
21404 case ISD::FCOPYSIGN: {
21405 EVT VT = N->getValueType(ResNo: 0);
21406 if (!VT.isVector())
21407 break;
21408 // There is a form of VFSGNJ which injects the negated sign of its second
21409 // operand. Try and bubble any FNEG up after the extend/round to produce
21410 // this optimized pattern. Avoid modifying cases where FP_ROUND and
21411 // TRUNC=1.
21412 SDValue In2 = N->getOperand(Num: 1);
21413 // Avoid cases where the extend/round has multiple uses, as duplicating
21414 // those is typically more expensive than removing a fneg.
21415 if (!In2.hasOneUse())
21416 break;
21417 if (In2.getOpcode() != ISD::FP_EXTEND &&
21418 (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(i: 1) != 0))
21419 break;
21420 In2 = In2.getOperand(i: 0);
21421 if (In2.getOpcode() != ISD::FNEG)
21422 break;
21423 SDLoc DL(N);
21424 SDValue NewFPExtRound = DAG.getFPExtendOrRound(Op: In2.getOperand(i: 0), DL, VT);
21425 return DAG.getNode(Opcode: ISD::FCOPYSIGN, DL, VT, N1: N->getOperand(Num: 0),
21426 N2: DAG.getNode(Opcode: ISD::FNEG, DL, VT, Operand: NewFPExtRound));
21427 }
21428 case ISD::MGATHER: {
21429 const auto *MGN = cast<MaskedGatherSDNode>(Val: N);
21430 const EVT VT = N->getValueType(ResNo: 0);
21431 SDValue Index = MGN->getIndex();
21432 SDValue ScaleOp = MGN->getScale();
21433 ISD::MemIndexType IndexType = MGN->getIndexType();
21434 assert(!MGN->isIndexScaled() &&
21435 "Scaled gather/scatter should not be formed");
21436
21437 SDLoc DL(N);
21438 if (legalizeScatterGatherIndexType(DL, Index, IndexType, DCI))
21439 return DAG.getMaskedGather(
21440 VTs: N->getVTList(), MemVT: MGN->getMemoryVT(), dl: DL,
21441 Ops: {MGN->getChain(), MGN->getPassThru(), MGN->getMask(),
21442 MGN->getBasePtr(), Index, ScaleOp},
21443 MMO: MGN->getMemOperand(), IndexType, ExtTy: MGN->getExtensionType());
21444
21445 if (narrowIndex(N&: Index, IndexType, DAG))
21446 return DAG.getMaskedGather(
21447 VTs: N->getVTList(), MemVT: MGN->getMemoryVT(), dl: DL,
21448 Ops: {MGN->getChain(), MGN->getPassThru(), MGN->getMask(),
21449 MGN->getBasePtr(), Index, ScaleOp},
21450 MMO: MGN->getMemOperand(), IndexType, ExtTy: MGN->getExtensionType());
21451
21452 if (Index.getOpcode() == ISD::BUILD_VECTOR &&
21453 MGN->getExtensionType() == ISD::NON_EXTLOAD && isTypeLegal(VT)) {
21454 // The sequence will be XLenVT, not the type of Index. Tell
21455 // isSimpleVIDSequence this so we avoid overflow.
21456 if (std::optional<VIDSequence> SimpleVID =
21457 isSimpleVIDSequence(Op: Index, EltSizeInBits: Subtarget.getXLen());
21458 SimpleVID && SimpleVID->StepDenominator == 1) {
21459 const int64_t StepNumerator = SimpleVID->StepNumerator;
21460 const int64_t Addend = SimpleVID->Addend;
21461
21462 // Note: We don't need to check alignment here since (by assumption
21463 // from the existence of the gather), our offsets must be sufficiently
21464 // aligned.
21465
21466 const EVT PtrVT = getPointerTy(DL: DAG.getDataLayout());
21467 assert(MGN->getBasePtr()->getValueType(0) == PtrVT);
21468 assert(IndexType == ISD::UNSIGNED_SCALED);
21469 SDValue BasePtr = DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT, N1: MGN->getBasePtr(),
21470 N2: DAG.getSignedConstant(Val: Addend, DL, VT: PtrVT));
21471
21472 SDValue EVL = DAG.getElementCount(DL, VT: Subtarget.getXLenVT(),
21473 EC: VT.getVectorElementCount());
21474 SDValue StridedLoad = DAG.getStridedLoadVP(
21475 VT, DL, Chain: MGN->getChain(), Ptr: BasePtr,
21476 Stride: DAG.getSignedConstant(Val: StepNumerator, DL, VT: XLenVT), Mask: MGN->getMask(),
21477 EVL, MMO: MGN->getMemOperand());
21478 SDValue Select = DAG.getSelect(DL, VT, Cond: MGN->getMask(), LHS: StridedLoad,
21479 RHS: MGN->getPassThru());
21480 return DAG.getMergeValues(Ops: {Select, SDValue(StridedLoad.getNode(), 1)},
21481 dl: DL);
21482 }
21483 }
21484
21485 SmallVector<int> ShuffleMask;
21486 if (MGN->getExtensionType() == ISD::NON_EXTLOAD &&
21487 matchIndexAsShuffle(VT, Index, Mask: MGN->getMask(), ShuffleMask)) {
21488 SDValue Load = DAG.getMaskedLoad(VT, dl: DL, Chain: MGN->getChain(),
21489 Base: MGN->getBasePtr(), Offset: DAG.getUNDEF(VT: XLenVT),
21490 Mask: MGN->getMask(), Src0: DAG.getUNDEF(VT),
21491 MemVT: MGN->getMemoryVT(), MMO: MGN->getMemOperand(),
21492 AM: ISD::UNINDEXED, ISD::NON_EXTLOAD);
21493 SDValue Shuffle =
21494 DAG.getVectorShuffle(VT, dl: DL, N1: Load, N2: DAG.getUNDEF(VT), Mask: ShuffleMask);
21495 return DAG.getMergeValues(Ops: {Shuffle, Load.getValue(R: 1)}, dl: DL);
21496 }
21497
21498 if (MGN->getExtensionType() == ISD::NON_EXTLOAD &&
21499 matchIndexAsWiderOp(VT, Index, Mask: MGN->getMask(),
21500 BaseAlign: MGN->getMemOperand()->getBaseAlign(), ST: Subtarget)) {
21501 SmallVector<SDValue> NewIndices;
21502 for (unsigned i = 0; i < Index->getNumOperands(); i += 2)
21503 NewIndices.push_back(Elt: Index.getOperand(i));
21504 EVT IndexVT = Index.getValueType()
21505 .getHalfNumVectorElementsVT(Context&: *DAG.getContext());
21506 Index = DAG.getBuildVector(VT: IndexVT, DL, Ops: NewIndices);
21507
21508 unsigned ElementSize = VT.getScalarStoreSize();
21509 EVT WideScalarVT = MVT::getIntegerVT(BitWidth: ElementSize * 8 * 2);
21510 auto EltCnt = VT.getVectorElementCount();
21511 assert(EltCnt.isKnownEven() && "Splitting vector, but not in half!");
21512 EVT WideVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: WideScalarVT,
21513 EC: EltCnt.divideCoefficientBy(RHS: 2));
21514 SDValue Passthru = DAG.getBitcast(VT: WideVT, V: MGN->getPassThru());
21515 EVT MaskVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: MVT::i1,
21516 EC: EltCnt.divideCoefficientBy(RHS: 2));
21517 SDValue Mask = DAG.getSplat(VT: MaskVT, DL, Op: DAG.getConstant(Val: 1, DL, VT: MVT::i1));
21518
21519 SDValue Gather =
21520 DAG.getMaskedGather(VTs: DAG.getVTList(VT1: WideVT, VT2: MVT::Other), MemVT: WideVT, dl: DL,
21521 Ops: {MGN->getChain(), Passthru, Mask, MGN->getBasePtr(),
21522 Index, ScaleOp},
21523 MMO: MGN->getMemOperand(), IndexType, ExtTy: ISD::NON_EXTLOAD);
21524 SDValue Result = DAG.getBitcast(VT, V: Gather.getValue(R: 0));
21525 return DAG.getMergeValues(Ops: {Result, Gather.getValue(R: 1)}, dl: DL);
21526 }
21527 break;
21528 }
21529 case ISD::MSCATTER:{
21530 const auto *MSN = cast<MaskedScatterSDNode>(Val: N);
21531 SDValue Index = MSN->getIndex();
21532 SDValue ScaleOp = MSN->getScale();
21533 ISD::MemIndexType IndexType = MSN->getIndexType();
21534 assert(!MSN->isIndexScaled() &&
21535 "Scaled gather/scatter should not be formed");
21536
21537 SDLoc DL(N);
21538 if (legalizeScatterGatherIndexType(DL, Index, IndexType, DCI))
21539 return DAG.getMaskedScatter(
21540 VTs: N->getVTList(), MemVT: MSN->getMemoryVT(), dl: DL,
21541 Ops: {MSN->getChain(), MSN->getValue(), MSN->getMask(), MSN->getBasePtr(),
21542 Index, ScaleOp},
21543 MMO: MSN->getMemOperand(), IndexType, IsTruncating: MSN->isTruncatingStore());
21544
21545 if (narrowIndex(N&: Index, IndexType, DAG))
21546 return DAG.getMaskedScatter(
21547 VTs: N->getVTList(), MemVT: MSN->getMemoryVT(), dl: DL,
21548 Ops: {MSN->getChain(), MSN->getValue(), MSN->getMask(), MSN->getBasePtr(),
21549 Index, ScaleOp},
21550 MMO: MSN->getMemOperand(), IndexType, IsTruncating: MSN->isTruncatingStore());
21551
21552 EVT VT = MSN->getValue()->getValueType(ResNo: 0);
21553 SmallVector<int> ShuffleMask;
21554 if (!MSN->isTruncatingStore() &&
21555 matchIndexAsShuffle(VT, Index, Mask: MSN->getMask(), ShuffleMask)) {
21556 SDValue Shuffle = DAG.getVectorShuffle(VT, dl: DL, N1: MSN->getValue(),
21557 N2: DAG.getUNDEF(VT), Mask: ShuffleMask);
21558 return DAG.getMaskedStore(Chain: MSN->getChain(), dl: DL, Val: Shuffle, Base: MSN->getBasePtr(),
21559 Offset: DAG.getUNDEF(VT: XLenVT), Mask: MSN->getMask(),
21560 MemVT: MSN->getMemoryVT(), MMO: MSN->getMemOperand(),
21561 AM: ISD::UNINDEXED, IsTruncating: false);
21562 }
21563 break;
21564 }
21565 case ISD::VP_GATHER: {
21566 const auto *VPGN = cast<VPGatherSDNode>(Val: N);
21567 SDValue Index = VPGN->getIndex();
21568 SDValue ScaleOp = VPGN->getScale();
21569 ISD::MemIndexType IndexType = VPGN->getIndexType();
21570 assert(!VPGN->isIndexScaled() &&
21571 "Scaled gather/scatter should not be formed");
21572
21573 SDLoc DL(N);
21574 if (legalizeScatterGatherIndexType(DL, Index, IndexType, DCI))
21575 return DAG.getGatherVP(VTs: N->getVTList(), VT: VPGN->getMemoryVT(), dl: DL,
21576 Ops: {VPGN->getChain(), VPGN->getBasePtr(), Index,
21577 ScaleOp, VPGN->getMask(),
21578 VPGN->getVectorLength()},
21579 MMO: VPGN->getMemOperand(), IndexType);
21580
21581 if (narrowIndex(N&: Index, IndexType, DAG))
21582 return DAG.getGatherVP(VTs: N->getVTList(), VT: VPGN->getMemoryVT(), dl: DL,
21583 Ops: {VPGN->getChain(), VPGN->getBasePtr(), Index,
21584 ScaleOp, VPGN->getMask(),
21585 VPGN->getVectorLength()},
21586 MMO: VPGN->getMemOperand(), IndexType);
21587
21588 break;
21589 }
21590 case ISD::VP_SCATTER: {
21591 const auto *VPSN = cast<VPScatterSDNode>(Val: N);
21592 SDValue Index = VPSN->getIndex();
21593 SDValue ScaleOp = VPSN->getScale();
21594 ISD::MemIndexType IndexType = VPSN->getIndexType();
21595 assert(!VPSN->isIndexScaled() &&
21596 "Scaled gather/scatter should not be formed");
21597
21598 SDLoc DL(N);
21599 if (legalizeScatterGatherIndexType(DL, Index, IndexType, DCI))
21600 return DAG.getScatterVP(VTs: N->getVTList(), VT: VPSN->getMemoryVT(), dl: DL,
21601 Ops: {VPSN->getChain(), VPSN->getValue(),
21602 VPSN->getBasePtr(), Index, ScaleOp,
21603 VPSN->getMask(), VPSN->getVectorLength()},
21604 MMO: VPSN->getMemOperand(), IndexType);
21605
21606 if (narrowIndex(N&: Index, IndexType, DAG))
21607 return DAG.getScatterVP(VTs: N->getVTList(), VT: VPSN->getMemoryVT(), dl: DL,
21608 Ops: {VPSN->getChain(), VPSN->getValue(),
21609 VPSN->getBasePtr(), Index, ScaleOp,
21610 VPSN->getMask(), VPSN->getVectorLength()},
21611 MMO: VPSN->getMemOperand(), IndexType);
21612 break;
21613 }
21614 case RISCVISD::SHL_VL:
21615 if (SDValue V = performSHLCombine(N, DCI, Subtarget))
21616 return V;
21617 [[fallthrough]];
21618 case RISCVISD::SRA_VL:
21619 case RISCVISD::SRL_VL: {
21620 SDValue ShAmt = N->getOperand(Num: 1);
21621 if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
21622 // We don't need the upper 32 bits of a 64-bit element for a shift amount.
21623 SDLoc DL(N);
21624 SDValue VL = N->getOperand(Num: 4);
21625 EVT VT = N->getValueType(ResNo: 0);
21626 ShAmt = DAG.getNode(Opcode: RISCVISD::VMV_V_X_VL, DL, VT, N1: DAG.getUNDEF(VT),
21627 N2: ShAmt.getOperand(i: 1), N3: VL);
21628 return DAG.getNode(Opcode: N->getOpcode(), DL, VT, N1: N->getOperand(Num: 0), N2: ShAmt,
21629 N3: N->getOperand(Num: 2), N4: N->getOperand(Num: 3), N5: N->getOperand(Num: 4));
21630 }
21631 break;
21632 }
21633 case ISD::SRA:
21634 if (SDValue V = performSRACombine(N, DAG, Subtarget))
21635 return V;
21636 [[fallthrough]];
21637 case ISD::SRL:
21638 case ISD::SHL: {
21639 if (N->getOpcode() == ISD::SHL) {
21640 if (SDValue V = performSHLCombine(N, DCI, Subtarget))
21641 return V;
21642 }
21643 SDValue ShAmt = N->getOperand(Num: 1);
21644 if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
21645 // We don't need the upper 32 bits of a 64-bit element for a shift amount.
21646 SDLoc DL(N);
21647 EVT VT = N->getValueType(ResNo: 0);
21648 ShAmt = DAG.getNode(Opcode: RISCVISD::VMV_V_X_VL, DL, VT, N1: DAG.getUNDEF(VT),
21649 N2: ShAmt.getOperand(i: 1),
21650 N3: DAG.getRegister(Reg: RISCV::X0, VT: Subtarget.getXLenVT()));
21651 return DAG.getNode(Opcode: N->getOpcode(), DL, VT, N1: N->getOperand(Num: 0), N2: ShAmt);
21652 }
21653 break;
21654 }
21655 case RISCVISD::ADD_VL:
21656 if (SDValue V = simplifyOp_VL(N))
21657 return V;
21658 if (SDValue V = combineOp_VLToVWOp_VL(N, DCI, Subtarget))
21659 return V;
21660 if (SDValue V = combineVqdotAccum(N, DAG, Subtarget))
21661 return V;
21662 return combineToVWMACC(N, DAG, Subtarget);
21663 case RISCVISD::VWADD_W_VL:
21664 case RISCVISD::VWADDU_W_VL:
21665 case RISCVISD::VWSUB_W_VL:
21666 case RISCVISD::VWSUBU_W_VL:
21667 return performVWADDSUBW_VLCombine(N, DCI, Subtarget);
21668 case RISCVISD::OR_VL:
21669 case RISCVISD::SUB_VL:
21670 case RISCVISD::MUL_VL:
21671 return combineOp_VLToVWOp_VL(N, DCI, Subtarget);
21672 case RISCVISD::VFMADD_VL:
21673 case RISCVISD::VFNMADD_VL:
21674 case RISCVISD::VFMSUB_VL:
21675 case RISCVISD::VFNMSUB_VL:
21676 case RISCVISD::STRICT_VFMADD_VL:
21677 case RISCVISD::STRICT_VFNMADD_VL:
21678 case RISCVISD::STRICT_VFMSUB_VL:
21679 case RISCVISD::STRICT_VFNMSUB_VL:
21680 return performVFMADD_VLCombine(N, DCI, Subtarget);
21681 case RISCVISD::FADD_VL:
21682 case RISCVISD::FSUB_VL:
21683 case RISCVISD::FMUL_VL:
21684 case RISCVISD::VFWADD_W_VL:
21685 case RISCVISD::VFWSUB_W_VL:
21686 return combineOp_VLToVWOp_VL(N, DCI, Subtarget);
21687 case ISD::LOAD:
21688 case ISD::STORE: {
21689 if (DCI.isAfterLegalizeDAG())
21690 if (SDValue V = performMemPairCombine(N, DCI))
21691 return V;
21692
21693 if (N->getOpcode() != ISD::STORE)
21694 break;
21695
21696 auto *Store = cast<StoreSDNode>(Val: N);
21697 SDValue Chain = Store->getChain();
21698 EVT MemVT = Store->getMemoryVT();
21699 SDValue Val = Store->getValue();
21700 SDLoc DL(N);
21701
21702 bool IsScalarizable =
21703 MemVT.isFixedLengthVector() && ISD::isNormalStore(N: Store) &&
21704 Store->isSimple() &&
21705 MemVT.getVectorElementType().bitsLE(VT: Subtarget.getXLenVT()) &&
21706 isPowerOf2_64(Value: MemVT.getSizeInBits()) &&
21707 MemVT.getSizeInBits() <= Subtarget.getXLen();
21708
21709 // If sufficiently aligned we can scalarize stores of constant vectors of
21710 // any power-of-two size up to XLen bits, provided that they aren't too
21711 // expensive to materialize.
21712 // vsetivli zero, 2, e8, m1, ta, ma
21713 // vmv.v.i v8, 4
21714 // vse64.v v8, (a0)
21715 // ->
21716 // li a1, 1028
21717 // sh a1, 0(a0)
21718 if (DCI.isBeforeLegalize() && IsScalarizable &&
21719 ISD::isBuildVectorOfConstantSDNodes(N: Val.getNode())) {
21720 // Get the constant vector bits
21721 APInt NewC(Val.getValueSizeInBits(), 0);
21722 uint64_t EltSize = Val.getScalarValueSizeInBits();
21723 for (unsigned i = 0; i < Val.getNumOperands(); i++) {
21724 if (Val.getOperand(i).isUndef())
21725 continue;
21726 NewC.insertBits(SubBits: Val.getConstantOperandAPInt(i).trunc(width: EltSize),
21727 bitPosition: i * EltSize);
21728 }
21729 MVT NewVT = MVT::getIntegerVT(BitWidth: MemVT.getSizeInBits());
21730
21731 if (RISCVMatInt::getIntMatCost(Val: NewC, Size: Subtarget.getXLen(), STI: Subtarget,
21732 CompressionCost: true) <= 2 &&
21733 allowsMemoryAccessForAlignment(Context&: *DAG.getContext(), DL: DAG.getDataLayout(),
21734 VT: NewVT, MMO: *Store->getMemOperand())) {
21735 SDValue NewV = DAG.getConstant(Val: NewC, DL, VT: NewVT);
21736 return DAG.getStore(Chain, dl: DL, Val: NewV, Ptr: Store->getBasePtr(),
21737 PtrInfo: Store->getPointerInfo(), Alignment: Store->getBaseAlign(),
21738 MMOFlags: Store->getMemOperand()->getFlags());
21739 }
21740 }
21741
21742 // Similarly, if sufficiently aligned we can scalarize vector copies, e.g.
21743 // vsetivli zero, 2, e16, m1, ta, ma
21744 // vle16.v v8, (a0)
21745 // vse16.v v8, (a1)
21746 if (auto *L = dyn_cast<LoadSDNode>(Val);
21747 L && DCI.isBeforeLegalize() && IsScalarizable && L->isSimple() &&
21748 L->hasNUsesOfValue(NUses: 1, Value: 0) && L->hasNUsesOfValue(NUses: 1, Value: 1) &&
21749 Store->getChain() == SDValue(L, 1) && ISD::isNormalLoad(N: L) &&
21750 L->getMemoryVT() == MemVT) {
21751 MVT NewVT = MVT::getIntegerVT(BitWidth: MemVT.getSizeInBits());
21752 if (allowsMemoryAccessForAlignment(Context&: *DAG.getContext(), DL: DAG.getDataLayout(),
21753 VT: NewVT, MMO: *Store->getMemOperand()) &&
21754 allowsMemoryAccessForAlignment(Context&: *DAG.getContext(), DL: DAG.getDataLayout(),
21755 VT: NewVT, MMO: *L->getMemOperand())) {
21756 SDValue NewL = DAG.getLoad(VT: NewVT, dl: DL, Chain: L->getChain(), Ptr: L->getBasePtr(),
21757 PtrInfo: L->getPointerInfo(), Alignment: L->getBaseAlign(),
21758 MMOFlags: L->getMemOperand()->getFlags());
21759 return DAG.getStore(Chain, dl: DL, Val: NewL, Ptr: Store->getBasePtr(),
21760 PtrInfo: Store->getPointerInfo(), Alignment: Store->getBaseAlign(),
21761 MMOFlags: Store->getMemOperand()->getFlags());
21762 }
21763 }
21764
21765 // Combine store of vmv.x.s/vfmv.f.s to vse with VL of 1.
21766 // vfmv.f.s is represented as extract element from 0. Match it late to avoid
21767 // any illegal types.
21768 if ((Val.getOpcode() == RISCVISD::VMV_X_S ||
21769 (DCI.isAfterLegalizeDAG() &&
21770 Val.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
21771 isNullConstant(V: Val.getOperand(i: 1)))) &&
21772 Val.hasOneUse()) {
21773 SDValue Src = Val.getOperand(i: 0);
21774 MVT VecVT = Src.getSimpleValueType();
21775 // VecVT should be scalable and memory VT should match the element type.
21776 if (!Store->isIndexed() && VecVT.isScalableVector() &&
21777 MemVT == VecVT.getVectorElementType()) {
21778 SDLoc DL(N);
21779 MVT MaskVT = getMaskTypeFor(VecVT);
21780 return DAG.getStoreVP(
21781 Chain: Store->getChain(), dl: DL, Val: Src, Ptr: Store->getBasePtr(), Offset: Store->getOffset(),
21782 Mask: DAG.getConstant(Val: 1, DL, VT: MaskVT),
21783 EVL: DAG.getConstant(Val: 1, DL, VT: Subtarget.getXLenVT()), MemVT,
21784 MMO: Store->getMemOperand(), AM: Store->getAddressingMode(),
21785 IsTruncating: Store->isTruncatingStore(), /*IsCompress*/ IsCompressing: false);
21786 }
21787 }
21788
21789 break;
21790 }
21791 case ISD::SPLAT_VECTOR: {
21792 EVT VT = N->getValueType(ResNo: 0);
21793 // Only perform this combine on legal MVT types.
21794 if (!isTypeLegal(VT))
21795 break;
21796 if (auto Gather = matchSplatAsGather(SplatVal: N->getOperand(Num: 0), VT: VT.getSimpleVT(), DL: N,
21797 DAG, Subtarget))
21798 return Gather;
21799 break;
21800 }
21801 case ISD::BUILD_VECTOR:
21802 if (SDValue V = performBUILD_VECTORCombine(N, DAG, Subtarget, TLI: *this))
21803 return V;
21804 break;
21805 case ISD::CONCAT_VECTORS:
21806 if (SDValue V = performCONCAT_VECTORSCombine(N, DAG, Subtarget, TLI: *this))
21807 return V;
21808 break;
21809 case ISD::VECTOR_SHUFFLE:
21810 if (SDValue V = performVECTOR_SHUFFLECombine(N, DAG, Subtarget, TLI: *this))
21811 return V;
21812 break;
21813 case ISD::INSERT_VECTOR_ELT:
21814 if (SDValue V = performINSERT_VECTOR_ELTCombine(N, DAG, Subtarget, TLI: *this))
21815 return V;
21816 break;
21817 case RISCVISD::VFMV_V_F_VL: {
21818 const MVT VT = N->getSimpleValueType(ResNo: 0);
21819 SDValue Passthru = N->getOperand(Num: 0);
21820 SDValue Scalar = N->getOperand(Num: 1);
21821 SDValue VL = N->getOperand(Num: 2);
21822
21823 // If VL is 1, we can use vfmv.s.f.
21824 if (isOneConstant(V: VL))
21825 return DAG.getNode(Opcode: RISCVISD::VFMV_S_F_VL, DL, VT, N1: Passthru, N2: Scalar, N3: VL);
21826 break;
21827 }
21828 case RISCVISD::VMV_V_X_VL: {
21829 const MVT VT = N->getSimpleValueType(ResNo: 0);
21830 SDValue Passthru = N->getOperand(Num: 0);
21831 SDValue Scalar = N->getOperand(Num: 1);
21832 SDValue VL = N->getOperand(Num: 2);
21833
21834 // Tail agnostic VMV.V.X only demands the vector element bitwidth from the
21835 // scalar input.
21836 unsigned ScalarSize = Scalar.getValueSizeInBits();
21837 unsigned EltWidth = VT.getScalarSizeInBits();
21838 if (ScalarSize > EltWidth && Passthru.isUndef())
21839 if (SimplifyDemandedLowBitsHelper(1, EltWidth))
21840 return SDValue(N, 0);
21841
21842 // If VL is 1 and the scalar value won't benefit from immediate, we can
21843 // use vmv.s.x.
21844 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Val&: Scalar);
21845 if (isOneConstant(V: VL) &&
21846 (!Const || Const->isZero() ||
21847 !Const->getAPIntValue().sextOrTrunc(width: EltWidth).isSignedIntN(N: 5)))
21848 return DAG.getNode(Opcode: RISCVISD::VMV_S_X_VL, DL, VT, N1: Passthru, N2: Scalar, N3: VL);
21849
21850 break;
21851 }
21852 case RISCVISD::VFMV_S_F_VL: {
21853 SDValue Src = N->getOperand(Num: 1);
21854 // Try to remove vector->scalar->vector if the scalar->vector is inserting
21855 // into an undef vector.
21856 // TODO: Could use a vslide or vmv.v.v for non-undef.
21857 if (N->getOperand(Num: 0).isUndef() &&
21858 Src.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
21859 isNullConstant(V: Src.getOperand(i: 1)) &&
21860 Src.getOperand(i: 0).getValueType().isScalableVector()) {
21861 EVT VT = N->getValueType(ResNo: 0);
21862 SDValue EVSrc = Src.getOperand(i: 0);
21863 EVT EVSrcVT = EVSrc.getValueType();
21864 assert(EVSrcVT.getVectorElementType() == VT.getVectorElementType());
21865 // Widths match, just return the original vector.
21866 if (EVSrcVT == VT)
21867 return EVSrc;
21868 SDLoc DL(N);
21869 // Width is narrower, using insert_subvector.
21870 if (EVSrcVT.getVectorMinNumElements() < VT.getVectorMinNumElements()) {
21871 return DAG.getNode(Opcode: ISD::INSERT_SUBVECTOR, DL, VT, N1: DAG.getUNDEF(VT),
21872 N2: EVSrc,
21873 N3: DAG.getConstant(Val: 0, DL, VT: Subtarget.getXLenVT()));
21874 }
21875 // Width is wider, using extract_subvector.
21876 return DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL, VT, N1: EVSrc,
21877 N2: DAG.getConstant(Val: 0, DL, VT: Subtarget.getXLenVT()));
21878 }
21879 [[fallthrough]];
21880 }
21881 case RISCVISD::VMV_S_X_VL: {
21882 const MVT VT = N->getSimpleValueType(ResNo: 0);
21883 SDValue Passthru = N->getOperand(Num: 0);
21884 SDValue Scalar = N->getOperand(Num: 1);
21885 SDValue VL = N->getOperand(Num: 2);
21886
21887 // The vmv.s.x instruction copies the scalar integer register to element 0
21888 // of the destination vector register. If SEW < XLEN, the least-significant
21889 // bits are copied and the upper XLEN-SEW bits are ignored.
21890 unsigned ScalarSize = Scalar.getValueSizeInBits();
21891 unsigned EltWidth = VT.getScalarSizeInBits();
21892 if (ScalarSize > EltWidth && SimplifyDemandedLowBitsHelper(1, EltWidth))
21893 return SDValue(N, 0);
21894
21895 if (Scalar.getOpcode() == RISCVISD::VMV_X_S && Passthru.isUndef() &&
21896 Scalar.getOperand(i: 0).getValueType() == N->getValueType(ResNo: 0))
21897 return Scalar.getOperand(i: 0);
21898
21899 // Use M1 or smaller to avoid over constraining register allocation
21900 const MVT M1VT = RISCVTargetLowering::getM1VT(VT);
21901 if (M1VT.bitsLT(VT)) {
21902 SDValue M1Passthru = DAG.getExtractSubvector(DL, VT: M1VT, Vec: Passthru, Idx: 0);
21903 SDValue Result =
21904 DAG.getNode(Opcode: N->getOpcode(), DL, VT: M1VT, N1: M1Passthru, N2: Scalar, N3: VL);
21905 Result = DAG.getInsertSubvector(DL, Vec: Passthru, SubVec: Result, Idx: 0);
21906 return Result;
21907 }
21908
21909 // We use a vmv.v.i if possible. We limit this to LMUL1. LMUL2 or
21910 // higher would involve overly constraining the register allocator for
21911 // no purpose.
21912 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Val&: Scalar);
21913 Const && !Const->isZero() && isInt<5>(x: Const->getSExtValue()) &&
21914 VT.bitsLE(VT: RISCVTargetLowering::getM1VT(VT)) && Passthru.isUndef())
21915 return DAG.getNode(Opcode: RISCVISD::VMV_V_X_VL, DL, VT, N1: Passthru, N2: Scalar, N3: VL);
21916
21917 break;
21918 }
21919 case RISCVISD::VMV_X_S: {
21920 SDValue Vec = N->getOperand(Num: 0);
21921 MVT VecVT = N->getOperand(Num: 0).getSimpleValueType();
21922 const MVT M1VT = RISCVTargetLowering::getM1VT(VT: VecVT);
21923 if (M1VT.bitsLT(VT: VecVT)) {
21924 Vec = DAG.getExtractSubvector(DL, VT: M1VT, Vec, Idx: 0);
21925 return DAG.getNode(Opcode: RISCVISD::VMV_X_S, DL, VT: N->getValueType(ResNo: 0), Operand: Vec);
21926 }
21927 break;
21928 }
21929 case ISD::INTRINSIC_VOID:
21930 case ISD::INTRINSIC_W_CHAIN:
21931 case ISD::INTRINSIC_WO_CHAIN: {
21932 unsigned IntOpNo = N->getOpcode() == ISD::INTRINSIC_WO_CHAIN ? 0 : 1;
21933 unsigned IntNo = N->getConstantOperandVal(Num: IntOpNo);
21934 switch (IntNo) {
21935 // By default we do not combine any intrinsic.
21936 default:
21937 return SDValue();
21938 case Intrinsic::riscv_vcpop:
21939 case Intrinsic::riscv_vcpop_mask:
21940 case Intrinsic::riscv_vfirst:
21941 case Intrinsic::riscv_vfirst_mask: {
21942 SDValue VL = N->getOperand(Num: 2);
21943 if (IntNo == Intrinsic::riscv_vcpop_mask ||
21944 IntNo == Intrinsic::riscv_vfirst_mask)
21945 VL = N->getOperand(Num: 3);
21946 if (!isNullConstant(V: VL))
21947 return SDValue();
21948 // If VL is 0, vcpop -> li 0, vfirst -> li -1.
21949 SDLoc DL(N);
21950 EVT VT = N->getValueType(ResNo: 0);
21951 if (IntNo == Intrinsic::riscv_vfirst ||
21952 IntNo == Intrinsic::riscv_vfirst_mask)
21953 return DAG.getAllOnesConstant(DL, VT);
21954 return DAG.getConstant(Val: 0, DL, VT);
21955 }
21956 case Intrinsic::riscv_vsseg2_mask:
21957 case Intrinsic::riscv_vsseg3_mask:
21958 case Intrinsic::riscv_vsseg4_mask:
21959 case Intrinsic::riscv_vsseg5_mask:
21960 case Intrinsic::riscv_vsseg6_mask:
21961 case Intrinsic::riscv_vsseg7_mask:
21962 case Intrinsic::riscv_vsseg8_mask: {
21963 SDValue Tuple = N->getOperand(Num: 2);
21964 unsigned NF = Tuple.getValueType().getRISCVVectorTupleNumFields();
21965
21966 if (Subtarget.hasOptimizedSegmentLoadStore(NF) || !Tuple.hasOneUse() ||
21967 Tuple.getOpcode() != RISCVISD::TUPLE_INSERT ||
21968 !Tuple.getOperand(i: 0).isUndef())
21969 return SDValue();
21970
21971 SDValue Val = Tuple.getOperand(i: 1);
21972 unsigned Idx = Tuple.getConstantOperandVal(i: 2);
21973
21974 unsigned SEW = Val.getValueType().getScalarSizeInBits();
21975 assert(Log2_64(SEW) == N->getConstantOperandVal(6) &&
21976 "Type mismatch without bitcast?");
21977 unsigned Stride = SEW / 8 * NF;
21978 unsigned Offset = SEW / 8 * Idx;
21979
21980 SDValue Ops[] = {
21981 /*Chain=*/N->getOperand(Num: 0),
21982 /*IntID=*/
21983 DAG.getTargetConstant(Val: Intrinsic::riscv_vsse_mask, DL, VT: XLenVT),
21984 /*StoredVal=*/Val,
21985 /*Ptr=*/
21986 DAG.getNode(Opcode: ISD::ADD, DL, VT: XLenVT, N1: N->getOperand(Num: 3),
21987 N2: DAG.getConstant(Val: Offset, DL, VT: XLenVT)),
21988 /*Stride=*/DAG.getConstant(Val: Stride, DL, VT: XLenVT),
21989 /*Mask=*/N->getOperand(Num: 4),
21990 /*VL=*/N->getOperand(Num: 5)};
21991
21992 auto *OldMemSD = cast<MemIntrinsicSDNode>(Val: N);
21993 // Match getTgtMemIntrinsic for non-unit stride case
21994 EVT MemVT = OldMemSD->getMemoryVT().getScalarType();
21995 MachineFunction &MF = DAG.getMachineFunction();
21996 MachineMemOperand *MMO = MF.getMachineMemOperand(
21997 MMO: OldMemSD->getMemOperand(), Offset, Size: MemoryLocation::UnknownSize);
21998
21999 SDVTList VTs = DAG.getVTList(VT: MVT::Other);
22000 return DAG.getMemIntrinsicNode(Opcode: ISD::INTRINSIC_VOID, dl: DL, VTList: VTs, Ops, MemVT,
22001 MMO);
22002 }
22003 }
22004 }
22005 case ISD::EXPERIMENTAL_VP_REVERSE:
22006 return performVP_REVERSECombine(N, DAG, Subtarget);
22007 case ISD::VP_STORE:
22008 return performVP_STORECombine(N, DAG, Subtarget);
22009 case ISD::BITCAST: {
22010 assert(Subtarget.useRVVForFixedLengthVectors());
22011 SDValue N0 = N->getOperand(Num: 0);
22012 EVT VT = N->getValueType(ResNo: 0);
22013 EVT SrcVT = N0.getValueType();
22014 if (VT.isRISCVVectorTuple() && N0->getOpcode() == ISD::SPLAT_VECTOR) {
22015 unsigned NF = VT.getRISCVVectorTupleNumFields();
22016 unsigned NumScalElts = VT.getSizeInBits().getKnownMinValue() / (NF * 8);
22017 SDValue EltVal = DAG.getConstant(Val: 0, DL, VT: Subtarget.getXLenVT());
22018 MVT ScalTy = MVT::getScalableVectorVT(VT: MVT::getIntegerVT(BitWidth: 8), NumElements: NumScalElts);
22019
22020 SDValue Splat = DAG.getNode(Opcode: ISD::SPLAT_VECTOR, DL, VT: ScalTy, Operand: EltVal);
22021
22022 SDValue Result = DAG.getUNDEF(VT);
22023 for (unsigned i = 0; i < NF; ++i)
22024 Result = DAG.getNode(Opcode: RISCVISD::TUPLE_INSERT, DL, VT, N1: Result, N2: Splat,
22025 N3: DAG.getTargetConstant(Val: i, DL, VT: MVT::i32));
22026 return Result;
22027 }
22028 // If this is a bitcast between a MVT::v4i1/v2i1/v1i1 and an illegal integer
22029 // type, widen both sides to avoid a trip through memory.
22030 if ((SrcVT == MVT::v1i1 || SrcVT == MVT::v2i1 || SrcVT == MVT::v4i1) &&
22031 VT.isScalarInteger()) {
22032 unsigned NumConcats = 8 / SrcVT.getVectorNumElements();
22033 SmallVector<SDValue, 4> Ops(NumConcats, DAG.getUNDEF(VT: SrcVT));
22034 Ops[0] = N0;
22035 SDLoc DL(N);
22036 N0 = DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: MVT::v8i1, Ops);
22037 N0 = DAG.getBitcast(VT: MVT::i8, V: N0);
22038 return DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT, Operand: N0);
22039 }
22040
22041 return SDValue();
22042 }
22043 case ISD::VECREDUCE_ADD:
22044 if (SDValue V = performVECREDUCECombine(N, DAG, Subtarget, TLI: *this))
22045 return V;
22046 [[fallthrough]];
22047 case ISD::CTPOP:
22048 if (SDValue V = combineToVCPOP(N, DAG, Subtarget))
22049 return V;
22050 break;
22051 case RISCVISD::VRGATHER_VX_VL: {
22052 // Note this assumes that out of bounds indices produce poison
22053 // and can thus be replaced without having to prove them inbounds..
22054 EVT VT = N->getValueType(ResNo: 0);
22055 SDValue Src = N->getOperand(Num: 0);
22056 SDValue Idx = N->getOperand(Num: 1);
22057 SDValue Passthru = N->getOperand(Num: 2);
22058 SDValue VL = N->getOperand(Num: 4);
22059
22060 // Warning: Unlike most cases we strip an insert_subvector, this one
22061 // does not require the first operand to be undef.
22062 if (Src.getOpcode() == ISD::INSERT_SUBVECTOR &&
22063 isNullConstant(V: Src.getOperand(i: 2)))
22064 Src = Src.getOperand(i: 1);
22065
22066 switch (Src.getOpcode()) {
22067 default:
22068 break;
22069 case RISCVISD::VMV_V_X_VL:
22070 case RISCVISD::VFMV_V_F_VL:
22071 // Drop a redundant vrgather_vx.
22072 // TODO: Remove the type restriction if we find a motivating
22073 // test case?
22074 if (Passthru.isUndef() && VL == Src.getOperand(i: 2) &&
22075 Src.getValueType() == VT)
22076 return Src;
22077 break;
22078 case RISCVISD::VMV_S_X_VL:
22079 case RISCVISD::VFMV_S_F_VL:
22080 // If this use only demands lane zero from the source vmv.s.x, and
22081 // doesn't have a passthru, then this vrgather.vi/vx is equivalent to
22082 // a vmv.v.x. Note that there can be other uses of the original
22083 // vmv.s.x and thus we can't eliminate it. (vfmv.s.f is analogous)
22084 if (isNullConstant(V: Idx) && Passthru.isUndef() &&
22085 VL == Src.getOperand(i: 2)) {
22086 unsigned Opc =
22087 VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
22088 return DAG.getNode(Opcode: Opc, DL, VT, N1: DAG.getUNDEF(VT), N2: Src.getOperand(i: 1),
22089 N3: VL);
22090 }
22091 break;
22092 }
22093 break;
22094 }
22095 case RISCVISD::TUPLE_EXTRACT: {
22096 EVT VT = N->getValueType(ResNo: 0);
22097 SDValue Tuple = N->getOperand(Num: 0);
22098 unsigned Idx = N->getConstantOperandVal(Num: 1);
22099 if (!Tuple.hasOneUse() || Tuple.getOpcode() != ISD::INTRINSIC_W_CHAIN)
22100 break;
22101
22102 unsigned NF = 0;
22103 switch (Tuple.getConstantOperandVal(i: 1)) {
22104 default:
22105 break;
22106 case Intrinsic::riscv_vlseg2_mask:
22107 case Intrinsic::riscv_vlseg3_mask:
22108 case Intrinsic::riscv_vlseg4_mask:
22109 case Intrinsic::riscv_vlseg5_mask:
22110 case Intrinsic::riscv_vlseg6_mask:
22111 case Intrinsic::riscv_vlseg7_mask:
22112 case Intrinsic::riscv_vlseg8_mask:
22113 NF = Tuple.getValueType().getRISCVVectorTupleNumFields();
22114 break;
22115 }
22116
22117 if (!NF || Subtarget.hasOptimizedSegmentLoadStore(NF))
22118 break;
22119
22120 unsigned SEW = VT.getScalarSizeInBits();
22121 assert(Log2_64(SEW) == Tuple.getConstantOperandVal(7) &&
22122 "Type mismatch without bitcast?");
22123 unsigned Stride = SEW / 8 * NF;
22124 unsigned Offset = SEW / 8 * Idx;
22125
22126 SDValue Passthru = Tuple.getOperand(i: 2);
22127 if (Passthru.isUndef())
22128 Passthru = DAG.getUNDEF(VT);
22129 else
22130 Passthru = DAG.getNode(Opcode: RISCVISD::TUPLE_EXTRACT, DL, VT, N1: Passthru,
22131 N2: N->getOperand(Num: 1));
22132
22133 SDValue Ops[] = {
22134 /*Chain=*/Tuple.getOperand(i: 0),
22135 /*IntID=*/DAG.getTargetConstant(Val: Intrinsic::riscv_vlse_mask, DL, VT: XLenVT),
22136 /*Passthru=*/Passthru,
22137 /*Ptr=*/
22138 DAG.getNode(Opcode: ISD::ADD, DL, VT: XLenVT, N1: Tuple.getOperand(i: 3),
22139 N2: DAG.getConstant(Val: Offset, DL, VT: XLenVT)),
22140 /*Stride=*/DAG.getConstant(Val: Stride, DL, VT: XLenVT),
22141 /*Mask=*/Tuple.getOperand(i: 4),
22142 /*VL=*/Tuple.getOperand(i: 5),
22143 /*Policy=*/Tuple.getOperand(i: 6)};
22144
22145 auto *TupleMemSD = cast<MemIntrinsicSDNode>(Val&: Tuple);
22146 // Match getTgtMemIntrinsic for non-unit stride case
22147 EVT MemVT = TupleMemSD->getMemoryVT().getScalarType();
22148 MachineFunction &MF = DAG.getMachineFunction();
22149 MachineMemOperand *MMO = MF.getMachineMemOperand(
22150 MMO: TupleMemSD->getMemOperand(), Offset, Size: MemoryLocation::UnknownSize);
22151
22152 SDVTList VTs = DAG.getVTList(VTs: {VT, MVT::Other});
22153 SDValue Result = DAG.getMemIntrinsicNode(Opcode: ISD::INTRINSIC_W_CHAIN, dl: DL, VTList: VTs,
22154 Ops, MemVT, MMO);
22155 DAG.ReplaceAllUsesOfValueWith(From: Tuple.getValue(R: 1), To: Result.getValue(R: 1));
22156 return Result.getValue(R: 0);
22157 }
22158 case RISCVISD::TUPLE_INSERT: {
22159 // tuple_insert tuple, undef, idx -> tuple
22160 if (N->getOperand(Num: 1).isUndef())
22161 return N->getOperand(Num: 0);
22162 break;
22163 }
22164 case RISCVISD::VMERGE_VL: {
22165 // vmerge_vl allones, x, y, passthru, vl -> vmv_v_v passthru, x, vl
22166 SDValue Mask = N->getOperand(Num: 0);
22167 SDValue True = N->getOperand(Num: 1);
22168 SDValue Passthru = N->getOperand(Num: 3);
22169 SDValue VL = N->getOperand(Num: 4);
22170
22171 // Fixed vectors are wrapped in scalable containers, unwrap them.
22172 using namespace SDPatternMatch;
22173 SDValue SubVec;
22174 if (sd_match(N: Mask, P: m_InsertSubvector(Base: m_Undef(), Sub: m_Value(N&: SubVec), Idx: m_Zero())))
22175 Mask = SubVec;
22176
22177 if (!isOneOrOneSplat(V: Mask))
22178 break;
22179
22180 return DAG.getNode(Opcode: RISCVISD::VMV_V_V_VL, DL: SDLoc(N), VT: N->getValueType(ResNo: 0),
22181 N1: Passthru, N2: True, N3: VL);
22182 }
22183 case RISCVISD::VMV_V_V_VL: {
22184 // vmv_v_v passthru, splat(x), vl -> vmv_v_x passthru, x, vl
22185 SDValue Passthru = N->getOperand(Num: 0);
22186 SDValue Src = N->getOperand(Num: 1);
22187 SDValue VL = N->getOperand(Num: 2);
22188
22189 // Fixed vectors are wrapped in scalable containers, unwrap them.
22190 using namespace SDPatternMatch;
22191 SDValue SubVec;
22192 if (sd_match(N: Src, P: m_InsertSubvector(Base: m_Undef(), Sub: m_Value(N&: SubVec), Idx: m_Zero())))
22193 Src = SubVec;
22194
22195 SDValue SplatVal = DAG.getSplatValue(V: Src, /*LegalTypes=*/true);
22196 if (!SplatVal)
22197 break;
22198 MVT VT = N->getSimpleValueType(ResNo: 0);
22199 return lowerScalarSplat(Passthru, Scalar: SplatVal, VL, VT, DL: SDLoc(N), DAG,
22200 Subtarget);
22201 }
22202 case RISCVISD::VSLIDEDOWN_VL:
22203 case RISCVISD::VSLIDEUP_VL:
22204 if (N->getOperand(Num: 1)->isUndef())
22205 return N->getOperand(Num: 0);
22206 break;
22207 case RISCVISD::VSLIDE1UP_VL:
22208 case RISCVISD::VFSLIDE1UP_VL: {
22209 using namespace SDPatternMatch;
22210 SDValue SrcVec;
22211 SDLoc DL(N);
22212 MVT VT = N->getSimpleValueType(ResNo: 0);
22213 // If the scalar we're sliding in was extracted from the first element of a
22214 // vector, we can use that vector as the passthru in a normal slideup of 1.
22215 // This saves us an extract_element instruction (i.e. vfmv.f.s, vmv.x.s).
22216 if (!N->getOperand(Num: 0).isUndef() ||
22217 !sd_match(N: N->getOperand(Num: 2),
22218 P: m_AnyOf(preds: m_ExtractElt(Vec: m_Value(N&: SrcVec), Idx: m_Zero()),
22219 preds: m_Node(Opcode: RISCVISD::VMV_X_S, preds: m_Value(N&: SrcVec)))))
22220 break;
22221
22222 MVT SrcVecVT = SrcVec.getSimpleValueType();
22223 if (SrcVecVT.getVectorElementType() != VT.getVectorElementType())
22224 break;
22225 // Adapt the value type of source vector.
22226 if (SrcVecVT.isFixedLengthVector()) {
22227 SrcVecVT = getContainerForFixedLengthVector(VT: SrcVecVT);
22228 SrcVec = convertToScalableVector(VT: SrcVecVT, V: SrcVec, DAG, Subtarget);
22229 }
22230 if (SrcVecVT.getVectorMinNumElements() < VT.getVectorMinNumElements())
22231 SrcVec = DAG.getInsertSubvector(DL, Vec: DAG.getUNDEF(VT), SubVec: SrcVec, Idx: 0);
22232 else
22233 SrcVec = DAG.getExtractSubvector(DL, VT, Vec: SrcVec, Idx: 0);
22234
22235 return getVSlideup(DAG, Subtarget, DL, VT, Passthru: SrcVec, Op: N->getOperand(Num: 1),
22236 Offset: DAG.getConstant(Val: 1, DL, VT: XLenVT), Mask: N->getOperand(Num: 3),
22237 VL: N->getOperand(Num: 4));
22238 }
22239 }
22240
22241 return SDValue();
22242}
22243
22244bool RISCVTargetLowering::shouldTransformSignedTruncationCheck(
22245 EVT XVT, unsigned KeptBits) const {
22246 // For vectors, we don't have a preference..
22247 if (XVT.isVector())
22248 return false;
22249
22250 if (XVT != MVT::i32 && XVT != MVT::i64)
22251 return false;
22252
22253 // We can use sext.w for RV64 or an srai 31 on RV32.
22254 if (KeptBits == 32 || KeptBits == 64)
22255 return true;
22256
22257 // With Zbb we can use sext.h/sext.b.
22258 return Subtarget.hasStdExtZbb() &&
22259 ((KeptBits == 8 && XVT == MVT::i64 && !Subtarget.is64Bit()) ||
22260 KeptBits == 16);
22261}
22262
22263bool RISCVTargetLowering::isDesirableToCommuteWithShift(
22264 const SDNode *N, CombineLevel Level) const {
22265 assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
22266 N->getOpcode() == ISD::SRL) &&
22267 "Expected shift op");
22268
22269 // The following folds are only desirable if `(OP _, c1 << c2)` can be
22270 // materialised in fewer instructions than `(OP _, c1)`:
22271 //
22272 // (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
22273 // (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
22274 SDValue N0 = N->getOperand(Num: 0);
22275 EVT Ty = N0.getValueType();
22276
22277 // LD/ST will optimize constant Offset extraction, so when AddNode is used by
22278 // LD/ST, it can still complete the folding optimization operation performed
22279 // above.
22280 auto isUsedByLdSt = [](const SDNode *X, const SDNode *User) {
22281 for (SDNode *Use : X->users()) {
22282 // This use is the one we're on right now. Skip it
22283 if (Use == User || Use->getOpcode() == ISD::SELECT)
22284 continue;
22285 if (!isa<StoreSDNode>(Val: Use) && !isa<LoadSDNode>(Val: Use))
22286 return false;
22287 }
22288 return true;
22289 };
22290
22291 if (Ty.isScalarInteger() &&
22292 (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
22293 if (N0.getOpcode() == ISD::ADD && !N0->hasOneUse())
22294 return isUsedByLdSt(N0.getNode(), N);
22295
22296 auto *C1 = dyn_cast<ConstantSDNode>(Val: N0->getOperand(Num: 1));
22297 auto *C2 = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 1));
22298
22299 // Bail if we might break a sh{1,2,3}add/qc.shladd pattern.
22300 if (C2 && Subtarget.hasShlAdd(ShAmt: C2->getZExtValue()) && N->hasOneUse() &&
22301 N->user_begin()->getOpcode() == ISD::ADD &&
22302 !isUsedByLdSt(*N->user_begin(), nullptr) &&
22303 !isa<ConstantSDNode>(Val: N->user_begin()->getOperand(Num: 1)))
22304 return false;
22305
22306 if (C1 && C2) {
22307 const APInt &C1Int = C1->getAPIntValue();
22308 APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
22309
22310 // We can materialise `c1 << c2` into an add immediate, so it's "free",
22311 // and the combine should happen, to potentially allow further combines
22312 // later.
22313 if (ShiftedC1Int.getSignificantBits() <= 64 &&
22314 isLegalAddImmediate(Imm: ShiftedC1Int.getSExtValue()))
22315 return true;
22316
22317 // We can materialise `c1` in an add immediate, so it's "free", and the
22318 // combine should be prevented.
22319 if (C1Int.getSignificantBits() <= 64 &&
22320 isLegalAddImmediate(Imm: C1Int.getSExtValue()))
22321 return false;
22322
22323 // Neither constant will fit into an immediate, so find materialisation
22324 // costs.
22325 int C1Cost =
22326 RISCVMatInt::getIntMatCost(Val: C1Int, Size: Ty.getSizeInBits(), STI: Subtarget,
22327 /*CompressionCost*/ true);
22328 int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
22329 Val: ShiftedC1Int, Size: Ty.getSizeInBits(), STI: Subtarget,
22330 /*CompressionCost*/ true);
22331
22332 // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
22333 // combine should be prevented.
22334 if (C1Cost < ShiftedC1Cost)
22335 return false;
22336 }
22337 }
22338
22339 if (!N0->hasOneUse())
22340 return false;
22341
22342 if (N0->getOpcode() == ISD::SIGN_EXTEND &&
22343 N0->getOperand(Num: 0)->getOpcode() == ISD::ADD &&
22344 !N0->getOperand(Num: 0)->hasOneUse())
22345 return isUsedByLdSt(N0->getOperand(Num: 0).getNode(), N0.getNode());
22346
22347 return true;
22348}
22349
22350bool RISCVTargetLowering::targetShrinkDemandedConstant(
22351 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
22352 TargetLoweringOpt &TLO) const {
22353 // Delay this optimization as late as possible.
22354 if (!TLO.LegalOps)
22355 return false;
22356
22357 EVT VT = Op.getValueType();
22358 if (VT.isVector())
22359 return false;
22360
22361 unsigned Opcode = Op.getOpcode();
22362 if (Opcode != ISD::AND && Opcode != ISD::OR && Opcode != ISD::XOR)
22363 return false;
22364
22365 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val: Op.getOperand(i: 1));
22366 if (!C)
22367 return false;
22368
22369 const APInt &Mask = C->getAPIntValue();
22370
22371 // Clear all non-demanded bits initially.
22372 APInt ShrunkMask = Mask & DemandedBits;
22373
22374 // Try to make a smaller immediate by setting undemanded bits.
22375
22376 APInt ExpandedMask = Mask | ~DemandedBits;
22377
22378 auto IsLegalMask = [ShrunkMask, ExpandedMask](const APInt &Mask) -> bool {
22379 return ShrunkMask.isSubsetOf(RHS: Mask) && Mask.isSubsetOf(RHS: ExpandedMask);
22380 };
22381 auto UseMask = [Mask, Op, &TLO](const APInt &NewMask) -> bool {
22382 if (NewMask == Mask)
22383 return true;
22384 SDLoc DL(Op);
22385 SDValue NewC = TLO.DAG.getConstant(Val: NewMask, DL, VT: Op.getValueType());
22386 SDValue NewOp = TLO.DAG.getNode(Opcode: Op.getOpcode(), DL, VT: Op.getValueType(),
22387 N1: Op.getOperand(i: 0), N2: NewC);
22388 return TLO.CombineTo(O: Op, N: NewOp);
22389 };
22390
22391 // If the shrunk mask fits in sign extended 12 bits, let the target
22392 // independent code apply it.
22393 if (ShrunkMask.isSignedIntN(N: 12))
22394 return false;
22395
22396 // And has a few special cases for zext.
22397 if (Opcode == ISD::AND) {
22398 // Preserve (and X, 0xffff), if zext.h exists use zext.h,
22399 // otherwise use SLLI + SRLI.
22400 APInt NewMask = APInt(Mask.getBitWidth(), 0xffff);
22401 if (IsLegalMask(NewMask))
22402 return UseMask(NewMask);
22403
22404 // Try to preserve (and X, 0xffffffff), the (zext_inreg X, i32) pattern.
22405 if (VT == MVT::i64) {
22406 APInt NewMask = APInt(64, 0xffffffff);
22407 if (IsLegalMask(NewMask))
22408 return UseMask(NewMask);
22409 }
22410 }
22411
22412 // For the remaining optimizations, we need to be able to make a negative
22413 // number through a combination of mask and undemanded bits.
22414 if (!ExpandedMask.isNegative())
22415 return false;
22416
22417 // What is the fewest number of bits we need to represent the negative number.
22418 unsigned MinSignedBits = ExpandedMask.getSignificantBits();
22419
22420 // Try to make a 12 bit negative immediate. If that fails try to make a 32
22421 // bit negative immediate unless the shrunk immediate already fits in 32 bits.
22422 // If we can't create a simm12, we shouldn't change opaque constants.
22423 APInt NewMask = ShrunkMask;
22424 if (MinSignedBits <= 12)
22425 NewMask.setBitsFrom(11);
22426 else if (!C->isOpaque() && MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(N: 32))
22427 NewMask.setBitsFrom(31);
22428 else
22429 return false;
22430
22431 // Check that our new mask is a subset of the demanded mask.
22432 assert(IsLegalMask(NewMask));
22433 return UseMask(NewMask);
22434}
22435
22436static uint64_t computeGREVOrGORC(uint64_t x, unsigned ShAmt, bool IsGORC) {
22437 static const uint64_t GREVMasks[] = {
22438 0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
22439 0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
22440
22441 for (unsigned Stage = 0; Stage != 6; ++Stage) {
22442 unsigned Shift = 1 << Stage;
22443 if (ShAmt & Shift) {
22444 uint64_t Mask = GREVMasks[Stage];
22445 uint64_t Res = ((x & Mask) << Shift) | ((x >> Shift) & Mask);
22446 if (IsGORC)
22447 Res |= x;
22448 x = Res;
22449 }
22450 }
22451
22452 return x;
22453}
22454
22455void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
22456 KnownBits &Known,
22457 const APInt &DemandedElts,
22458 const SelectionDAG &DAG,
22459 unsigned Depth) const {
22460 unsigned BitWidth = Known.getBitWidth();
22461 unsigned Opc = Op.getOpcode();
22462 assert((Opc >= ISD::BUILTIN_OP_END ||
22463 Opc == ISD::INTRINSIC_WO_CHAIN ||
22464 Opc == ISD::INTRINSIC_W_CHAIN ||
22465 Opc == ISD::INTRINSIC_VOID) &&
22466 "Should use MaskedValueIsZero if you don't know whether Op"
22467 " is a target node!");
22468
22469 Known.resetAll();
22470 switch (Opc) {
22471 default: break;
22472 case RISCVISD::SELECT_CC: {
22473 Known = DAG.computeKnownBits(Op: Op.getOperand(i: 4), Depth: Depth + 1);
22474 // If we don't know any bits, early out.
22475 if (Known.isUnknown())
22476 break;
22477 KnownBits Known2 = DAG.computeKnownBits(Op: Op.getOperand(i: 3), Depth: Depth + 1);
22478
22479 // Only known if known in both the LHS and RHS.
22480 Known = Known.intersectWith(RHS: Known2);
22481 break;
22482 }
22483 case RISCVISD::VCPOP_VL: {
22484 KnownBits Known2 = DAG.computeKnownBits(Op: Op.getOperand(i: 2), Depth: Depth + 1);
22485 Known.Zero.setBitsFrom(Known2.countMaxActiveBits());
22486 break;
22487 }
22488 case RISCVISD::CZERO_EQZ:
22489 case RISCVISD::CZERO_NEZ:
22490 Known = DAG.computeKnownBits(Op: Op.getOperand(i: 0), Depth: Depth + 1);
22491 // Result is either all zero or operand 0. We can propagate zeros, but not
22492 // ones.
22493 Known.One.clearAllBits();
22494 break;
22495 case RISCVISD::REMUW: {
22496 KnownBits Known2;
22497 Known = DAG.computeKnownBits(Op: Op.getOperand(i: 0), DemandedElts, Depth: Depth + 1);
22498 Known2 = DAG.computeKnownBits(Op: Op.getOperand(i: 1), DemandedElts, Depth: Depth + 1);
22499 // We only care about the lower 32 bits.
22500 Known = KnownBits::urem(LHS: Known.trunc(BitWidth: 32), RHS: Known2.trunc(BitWidth: 32));
22501 // Restore the original width by sign extending.
22502 Known = Known.sext(BitWidth);
22503 break;
22504 }
22505 case RISCVISD::DIVUW: {
22506 KnownBits Known2;
22507 Known = DAG.computeKnownBits(Op: Op.getOperand(i: 0), DemandedElts, Depth: Depth + 1);
22508 Known2 = DAG.computeKnownBits(Op: Op.getOperand(i: 1), DemandedElts, Depth: Depth + 1);
22509 // We only care about the lower 32 bits.
22510 Known = KnownBits::udiv(LHS: Known.trunc(BitWidth: 32), RHS: Known2.trunc(BitWidth: 32));
22511 // Restore the original width by sign extending.
22512 Known = Known.sext(BitWidth);
22513 break;
22514 }
22515 case RISCVISD::SLLW: {
22516 KnownBits Known2;
22517 Known = DAG.computeKnownBits(Op: Op.getOperand(i: 0), DemandedElts, Depth: Depth + 1);
22518 Known2 = DAG.computeKnownBits(Op: Op.getOperand(i: 1), DemandedElts, Depth: Depth + 1);
22519 Known = KnownBits::shl(LHS: Known.trunc(BitWidth: 32), RHS: Known2.trunc(BitWidth: 5).zext(BitWidth: 32));
22520 // Restore the original width by sign extending.
22521 Known = Known.sext(BitWidth);
22522 break;
22523 }
22524 case RISCVISD::SRLW: {
22525 KnownBits Known2;
22526 Known = DAG.computeKnownBits(Op: Op.getOperand(i: 0), DemandedElts, Depth: Depth + 1);
22527 Known2 = DAG.computeKnownBits(Op: Op.getOperand(i: 1), DemandedElts, Depth: Depth + 1);
22528 Known = KnownBits::lshr(LHS: Known.trunc(BitWidth: 32), RHS: Known2.trunc(BitWidth: 5).zext(BitWidth: 32));
22529 // Restore the original width by sign extending.
22530 Known = Known.sext(BitWidth);
22531 break;
22532 }
22533 case RISCVISD::SRAW: {
22534 KnownBits Known2;
22535 Known = DAG.computeKnownBits(Op: Op.getOperand(i: 0), DemandedElts, Depth: Depth + 1);
22536 Known2 = DAG.computeKnownBits(Op: Op.getOperand(i: 1), DemandedElts, Depth: Depth + 1);
22537 Known = KnownBits::ashr(LHS: Known.trunc(BitWidth: 32), RHS: Known2.trunc(BitWidth: 5).zext(BitWidth: 32));
22538 // Restore the original width by sign extending.
22539 Known = Known.sext(BitWidth);
22540 break;
22541 }
22542 case RISCVISD::SHL_ADD: {
22543 KnownBits Known2;
22544 Known = DAG.computeKnownBits(Op: Op.getOperand(i: 0), DemandedElts, Depth: Depth + 1);
22545 unsigned ShAmt = Op.getConstantOperandVal(i: 1);
22546 Known <<= ShAmt;
22547 Known.Zero.setLowBits(ShAmt); // the <<= operator left these bits unknown
22548 Known2 = DAG.computeKnownBits(Op: Op.getOperand(i: 2), DemandedElts, Depth: Depth + 1);
22549 Known = KnownBits::add(LHS: Known, RHS: Known2);
22550 break;
22551 }
22552 case RISCVISD::CTZW: {
22553 KnownBits Known2 = DAG.computeKnownBits(Op: Op.getOperand(i: 0), Depth: Depth + 1);
22554 unsigned PossibleTZ = Known2.trunc(BitWidth: 32).countMaxTrailingZeros();
22555 unsigned LowBits = llvm::bit_width(Value: PossibleTZ);
22556 Known.Zero.setBitsFrom(LowBits);
22557 break;
22558 }
22559 case RISCVISD::CLZW: {
22560 KnownBits Known2 = DAG.computeKnownBits(Op: Op.getOperand(i: 0), Depth: Depth + 1);
22561 unsigned PossibleLZ = Known2.trunc(BitWidth: 32).countMaxLeadingZeros();
22562 unsigned LowBits = llvm::bit_width(Value: PossibleLZ);
22563 Known.Zero.setBitsFrom(LowBits);
22564 break;
22565 }
22566 case RISCVISD::CLSW: {
22567 // The upper 32 bits are ignored by the instruction, but ComputeNumSignBits
22568 // doesn't give us a way to ignore them. If there are fewer than 33 sign
22569 // bits in the input consider it as having no redundant sign bits. Otherwise
22570 // the lower bound of the result is NumSignBits-33. The maximum value of the
22571 // the result is 31.
22572 unsigned NumSignBits = DAG.ComputeNumSignBits(Op: Op.getOperand(i: 0), Depth: Depth + 1);
22573 unsigned MinRedundantSignBits = NumSignBits < 33 ? 0 : NumSignBits - 33;
22574 // Create a ConstantRange [MinRedundantSignBits, 32) and convert it to
22575 // KnownBits.
22576 ConstantRange Range(APInt(BitWidth, MinRedundantSignBits),
22577 APInt(BitWidth, 32));
22578 Known = Range.toKnownBits();
22579 break;
22580 }
22581 case RISCVISD::BREV8:
22582 case RISCVISD::ORC_B: {
22583 // FIXME: This is based on the non-ratified Zbp GREV and GORC where a
22584 // control value of 7 is equivalent to brev8 and orc.b.
22585 Known = DAG.computeKnownBits(Op: Op.getOperand(i: 0), Depth: Depth + 1);
22586 bool IsGORC = Op.getOpcode() == RISCVISD::ORC_B;
22587 // To compute zeros for ORC_B, we need to invert the value and invert it
22588 // back after. This inverting is harmless for BREV8.
22589 Known.Zero =
22590 ~computeGREVOrGORC(x: ~Known.Zero.getZExtValue(), ShAmt: 7, IsGORC);
22591 Known.One = computeGREVOrGORC(x: Known.One.getZExtValue(), ShAmt: 7, IsGORC);
22592 break;
22593 }
22594 case RISCVISD::READ_VLENB: {
22595 // We can use the minimum and maximum VLEN values to bound VLENB. We
22596 // know VLEN must be a power of two.
22597 const unsigned MinVLenB = Subtarget.getRealMinVLen() / 8;
22598 const unsigned MaxVLenB = Subtarget.getRealMaxVLen() / 8;
22599 assert(MinVLenB > 0 && "READ_VLENB without vector extension enabled?");
22600 Known.Zero.setLowBits(Log2_32(Value: MinVLenB));
22601 Known.Zero.setBitsFrom(Log2_32(Value: MaxVLenB)+1);
22602 if (MaxVLenB == MinVLenB)
22603 Known.One.setBit(Log2_32(Value: MinVLenB));
22604 break;
22605 }
22606 case RISCVISD::FCLASS: {
22607 // fclass will only set one of the low 10 bits.
22608 Known.Zero.setBitsFrom(10);
22609 break;
22610 }
22611 case ISD::INTRINSIC_W_CHAIN:
22612 case ISD::INTRINSIC_WO_CHAIN: {
22613 unsigned IntNo =
22614 Op.getConstantOperandVal(i: Opc == ISD::INTRINSIC_WO_CHAIN ? 0 : 1);
22615 switch (IntNo) {
22616 default:
22617 // We can't do anything for most intrinsics.
22618 break;
22619 case Intrinsic::riscv_vsetvli:
22620 case Intrinsic::riscv_vsetvlimax: {
22621 bool HasAVL = IntNo == Intrinsic::riscv_vsetvli;
22622 unsigned VSEW = Op.getConstantOperandVal(i: HasAVL + 1);
22623 RISCVVType::VLMUL VLMUL =
22624 static_cast<RISCVVType::VLMUL>(Op.getConstantOperandVal(i: HasAVL + 2));
22625 unsigned SEW = RISCVVType::decodeVSEW(VSEW);
22626 auto [LMul, Fractional] = RISCVVType::decodeVLMUL(VLMul: VLMUL);
22627 uint64_t MaxVL = Subtarget.getRealMaxVLen() / SEW;
22628 MaxVL = (Fractional) ? MaxVL / LMul : MaxVL * LMul;
22629
22630 // Result of vsetvli must be not larger than AVL.
22631 if (HasAVL && isa<ConstantSDNode>(Val: Op.getOperand(i: 1)))
22632 MaxVL = std::min(a: MaxVL, b: Op.getConstantOperandVal(i: 1));
22633
22634 unsigned KnownZeroFirstBit = Log2_32(Value: MaxVL) + 1;
22635 if (BitWidth > KnownZeroFirstBit)
22636 Known.Zero.setBitsFrom(KnownZeroFirstBit);
22637 break;
22638 }
22639 }
22640 break;
22641 }
22642 }
22643}
22644
22645unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
22646 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
22647 unsigned Depth) const {
22648 switch (Op.getOpcode()) {
22649 default:
22650 break;
22651 case RISCVISD::SELECT_CC: {
22652 unsigned Tmp =
22653 DAG.ComputeNumSignBits(Op: Op.getOperand(i: 3), DemandedElts, Depth: Depth + 1);
22654 if (Tmp == 1) return 1; // Early out.
22655 unsigned Tmp2 =
22656 DAG.ComputeNumSignBits(Op: Op.getOperand(i: 4), DemandedElts, Depth: Depth + 1);
22657 return std::min(a: Tmp, b: Tmp2);
22658 }
22659 case RISCVISD::CZERO_EQZ:
22660 case RISCVISD::CZERO_NEZ:
22661 // Output is either all zero or operand 0. We can propagate sign bit count
22662 // from operand 0.
22663 return DAG.ComputeNumSignBits(Op: Op.getOperand(i: 0), DemandedElts, Depth: Depth + 1);
22664 case RISCVISD::NEGW_MAX: {
22665 // We expand this at isel to negw+max. The result will have 33 sign bits
22666 // if the input has at least 33 sign bits.
22667 unsigned Tmp =
22668 DAG.ComputeNumSignBits(Op: Op.getOperand(i: 0), DemandedElts, Depth: Depth + 1);
22669 if (Tmp < 33) return 1;
22670 return 33;
22671 }
22672 case RISCVISD::SRAW: {
22673 unsigned Tmp =
22674 DAG.ComputeNumSignBits(Op: Op.getOperand(i: 0), DemandedElts, Depth: Depth + 1);
22675 // sraw produces at least 33 sign bits. If the input already has more than
22676 // 33 sign bits sraw, will preserve them.
22677 // TODO: A more precise answer could be calculated depending on known bits
22678 // in the shift amount.
22679 return std::max(a: Tmp, b: 33U);
22680 }
22681 case RISCVISD::SLLW:
22682 case RISCVISD::SRLW:
22683 case RISCVISD::DIVW:
22684 case RISCVISD::DIVUW:
22685 case RISCVISD::REMUW:
22686 case RISCVISD::ROLW:
22687 case RISCVISD::RORW:
22688 case RISCVISD::ABSW:
22689 case RISCVISD::FCVT_W_RV64:
22690 case RISCVISD::FCVT_WU_RV64:
22691 case RISCVISD::STRICT_FCVT_W_RV64:
22692 case RISCVISD::STRICT_FCVT_WU_RV64:
22693 // TODO: As the result is sign-extended, this is conservatively correct.
22694 return 33;
22695 case RISCVISD::VMV_X_S: {
22696 // The number of sign bits of the scalar result is computed by obtaining the
22697 // element type of the input vector operand, subtracting its width from the
22698 // XLEN, and then adding one (sign bit within the element type). If the
22699 // element type is wider than XLen, the least-significant XLEN bits are
22700 // taken.
22701 unsigned XLen = Subtarget.getXLen();
22702 unsigned EltBits = Op.getOperand(i: 0).getScalarValueSizeInBits();
22703 if (EltBits <= XLen)
22704 return XLen - EltBits + 1;
22705 break;
22706 }
22707 case ISD::INTRINSIC_W_CHAIN: {
22708 unsigned IntNo = Op.getConstantOperandVal(i: 1);
22709 switch (IntNo) {
22710 default:
22711 break;
22712 case Intrinsic::riscv_masked_atomicrmw_xchg:
22713 case Intrinsic::riscv_masked_atomicrmw_add:
22714 case Intrinsic::riscv_masked_atomicrmw_sub:
22715 case Intrinsic::riscv_masked_atomicrmw_nand:
22716 case Intrinsic::riscv_masked_atomicrmw_max:
22717 case Intrinsic::riscv_masked_atomicrmw_min:
22718 case Intrinsic::riscv_masked_atomicrmw_umax:
22719 case Intrinsic::riscv_masked_atomicrmw_umin:
22720 case Intrinsic::riscv_masked_cmpxchg:
22721 // riscv_masked_{atomicrmw_*,cmpxchg} intrinsics represent an emulated
22722 // narrow atomic operation. These are implemented using atomic
22723 // operations at the minimum supported atomicrmw/cmpxchg width whose
22724 // result is then sign extended to XLEN. With +A, the minimum width is
22725 // 32 for both 64 and 32.
22726 assert(getMinCmpXchgSizeInBits() == 32);
22727 assert(Subtarget.hasStdExtZalrsc());
22728 return Op.getValueSizeInBits() - 31;
22729 }
22730 break;
22731 }
22732 }
22733
22734 return 1;
22735}
22736
22737bool RISCVTargetLowering::SimplifyDemandedBitsForTargetNode(
22738 SDValue Op, const APInt &OriginalDemandedBits,
22739 const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
22740 unsigned Depth) const {
22741 unsigned BitWidth = OriginalDemandedBits.getBitWidth();
22742
22743 switch (Op.getOpcode()) {
22744 case RISCVISD::BREV8:
22745 case RISCVISD::ORC_B: {
22746 KnownBits Known2;
22747 bool IsGORC = Op.getOpcode() == RISCVISD::ORC_B;
22748 // For BREV8, we need to do BREV8 on the demanded bits.
22749 // For ORC_B, any bit in the output demandeds all bits from the same byte.
22750 // So we need to do ORC_B on the demanded bits.
22751 APInt DemandedBits =
22752 APInt(BitWidth, computeGREVOrGORC(x: OriginalDemandedBits.getZExtValue(),
22753 ShAmt: 7, IsGORC));
22754 if (SimplifyDemandedBits(Op: Op.getOperand(i: 0), DemandedBits,
22755 DemandedElts: OriginalDemandedElts, Known&: Known2, TLO, Depth: Depth + 1))
22756 return true;
22757
22758 // To compute zeros for ORC_B, we need to invert the value and invert it
22759 // back after. This inverting is harmless for BREV8.
22760 Known.Zero = ~computeGREVOrGORC(x: ~Known2.Zero.getZExtValue(), ShAmt: 7, IsGORC);
22761 Known.One = computeGREVOrGORC(x: Known2.One.getZExtValue(), ShAmt: 7, IsGORC);
22762 return false;
22763 }
22764 }
22765
22766 return TargetLowering::SimplifyDemandedBitsForTargetNode(
22767 Op, DemandedBits: OriginalDemandedBits, DemandedElts: OriginalDemandedElts, Known, TLO, Depth);
22768}
22769
22770bool RISCVTargetLowering::canCreateUndefOrPoisonForTargetNode(
22771 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
22772 bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const {
22773
22774 // TODO: Add more target nodes.
22775 switch (Op.getOpcode()) {
22776 case RISCVISD::SLLW:
22777 case RISCVISD::SRAW:
22778 case RISCVISD::SRLW:
22779 case RISCVISD::RORW:
22780 case RISCVISD::ROLW:
22781 // Only the lower 5 bits of RHS are read, guaranteeing the rotate/shift
22782 // amount is bounds.
22783 return false;
22784 case RISCVISD::SELECT_CC:
22785 // Integer comparisons cannot create poison.
22786 assert(Op.getOperand(0).getValueType().isInteger() &&
22787 "RISCVISD::SELECT_CC only compares integers");
22788 return false;
22789 }
22790 return TargetLowering::canCreateUndefOrPoisonForTargetNode(
22791 Op, DemandedElts, DAG, PoisonOnly, ConsiderFlags, Depth);
22792}
22793
22794const Constant *
22795RISCVTargetLowering::getTargetConstantFromLoad(LoadSDNode *Ld) const {
22796 assert(Ld && "Unexpected null LoadSDNode");
22797 if (!ISD::isNormalLoad(N: Ld))
22798 return nullptr;
22799
22800 SDValue Ptr = Ld->getBasePtr();
22801
22802 // Only constant pools with no offset are supported.
22803 auto GetSupportedConstantPool = [](SDValue Ptr) -> ConstantPoolSDNode * {
22804 auto *CNode = dyn_cast<ConstantPoolSDNode>(Val&: Ptr);
22805 if (!CNode || CNode->isMachineConstantPoolEntry() ||
22806 CNode->getOffset() != 0)
22807 return nullptr;
22808
22809 return CNode;
22810 };
22811
22812 // Simple case, LLA.
22813 if (Ptr.getOpcode() == RISCVISD::LLA) {
22814 auto *CNode = GetSupportedConstantPool(Ptr.getOperand(i: 0));
22815 if (!CNode || CNode->getTargetFlags() != 0)
22816 return nullptr;
22817
22818 return CNode->getConstVal();
22819 }
22820
22821 // Look for a HI and ADD_LO pair.
22822 if (Ptr.getOpcode() != RISCVISD::ADD_LO ||
22823 Ptr.getOperand(i: 0).getOpcode() != RISCVISD::HI)
22824 return nullptr;
22825
22826 auto *CNodeLo = GetSupportedConstantPool(Ptr.getOperand(i: 1));
22827 auto *CNodeHi = GetSupportedConstantPool(Ptr.getOperand(i: 0).getOperand(i: 0));
22828
22829 if (!CNodeLo || CNodeLo->getTargetFlags() != RISCVII::MO_LO ||
22830 !CNodeHi || CNodeHi->getTargetFlags() != RISCVII::MO_HI)
22831 return nullptr;
22832
22833 if (CNodeLo->getConstVal() != CNodeHi->getConstVal())
22834 return nullptr;
22835
22836 return CNodeLo->getConstVal();
22837}
22838
22839static MachineBasicBlock *emitReadCounterWidePseudo(MachineInstr &MI,
22840 MachineBasicBlock *BB) {
22841 assert(MI.getOpcode() == RISCV::ReadCounterWide && "Unexpected instruction");
22842
22843 // To read a 64-bit counter CSR on a 32-bit target, we read the two halves.
22844 // Should the count have wrapped while it was being read, we need to try
22845 // again.
22846 // For example:
22847 // ```
22848 // read:
22849 // csrrs x3, counterh # load high word of counter
22850 // csrrs x2, counter # load low word of counter
22851 // csrrs x4, counterh # load high word of counter
22852 // bne x3, x4, read # check if high word reads match, otherwise try again
22853 // ```
22854
22855 MachineFunction &MF = *BB->getParent();
22856 const BasicBlock *LLVMBB = BB->getBasicBlock();
22857 MachineFunction::iterator It = ++BB->getIterator();
22858
22859 MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(BB: LLVMBB);
22860 MF.insert(MBBI: It, MBB: LoopMBB);
22861
22862 MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(BB: LLVMBB);
22863 MF.insert(MBBI: It, MBB: DoneMBB);
22864
22865 // Transfer the remainder of BB and its successor edges to DoneMBB.
22866 DoneMBB->splice(Where: DoneMBB->begin(), Other: BB,
22867 From: std::next(x: MachineBasicBlock::iterator(MI)), To: BB->end());
22868 DoneMBB->transferSuccessorsAndUpdatePHIs(FromMBB: BB);
22869
22870 BB->addSuccessor(Succ: LoopMBB);
22871
22872 MachineRegisterInfo &RegInfo = MF.getRegInfo();
22873 Register ReadAgainReg = RegInfo.createVirtualRegister(RegClass: &RISCV::GPRRegClass);
22874 Register LoReg = MI.getOperand(i: 0).getReg();
22875 Register HiReg = MI.getOperand(i: 1).getReg();
22876 int64_t LoCounter = MI.getOperand(i: 2).getImm();
22877 int64_t HiCounter = MI.getOperand(i: 3).getImm();
22878 DebugLoc DL = MI.getDebugLoc();
22879
22880 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
22881 BuildMI(BB: LoopMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::CSRRS), DestReg: HiReg)
22882 .addImm(Val: HiCounter)
22883 .addReg(RegNo: RISCV::X0);
22884 BuildMI(BB: LoopMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::CSRRS), DestReg: LoReg)
22885 .addImm(Val: LoCounter)
22886 .addReg(RegNo: RISCV::X0);
22887 BuildMI(BB: LoopMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::CSRRS), DestReg: ReadAgainReg)
22888 .addImm(Val: HiCounter)
22889 .addReg(RegNo: RISCV::X0);
22890
22891 BuildMI(BB: LoopMBB, MIMD: DL, MCID: TII->get(Opcode: RISCV::BNE))
22892 .addReg(RegNo: HiReg)
22893 .addReg(RegNo: ReadAgainReg)
22894 .addMBB(MBB: LoopMBB);
22895
22896 LoopMBB->addSuccessor(Succ: LoopMBB);
22897 LoopMBB->addSuccessor(Succ: DoneMBB);
22898
22899 MI.eraseFromParent();
22900
22901 return DoneMBB;
22902}
22903
22904static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
22905 MachineBasicBlock *BB,
22906 const RISCVSubtarget &Subtarget) {
22907 assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
22908
22909 MachineFunction &MF = *BB->getParent();
22910 DebugLoc DL = MI.getDebugLoc();
22911 const RISCVInstrInfo &TII = *MF.getSubtarget<RISCVSubtarget>().getInstrInfo();
22912 Register LoReg = MI.getOperand(i: 0).getReg();
22913 Register HiReg = MI.getOperand(i: 1).getReg();
22914 Register SrcReg = MI.getOperand(i: 2).getReg();
22915
22916 const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
22917 int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
22918
22919 TII.storeRegToStackSlot(MBB&: *BB, MBBI: MI, SrcReg, IsKill: MI.getOperand(i: 2).isKill(), FrameIndex: FI, RC: SrcRC,
22920 VReg: Register());
22921 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
22922 MachineMemOperand *MMOLo =
22923 MF.getMachineMemOperand(PtrInfo: MPI, F: MachineMemOperand::MOLoad, Size: 4, BaseAlignment: Align(8));
22924 MachineMemOperand *MMOHi = MF.getMachineMemOperand(
22925 PtrInfo: MPI.getWithOffset(O: 4), F: MachineMemOperand::MOLoad, Size: 4, BaseAlignment: Align(8));
22926
22927 // For big-endian, the high part is at offset 0 and the low part at offset 4.
22928 if (!Subtarget.isLittleEndian())
22929 std::swap(a&: LoReg, b&: HiReg);
22930
22931 BuildMI(BB&: *BB, I&: MI, MIMD: DL, MCID: TII.get(Opcode: RISCV::LW), DestReg: LoReg)
22932 .addFrameIndex(Idx: FI)
22933 .addImm(Val: 0)
22934 .addMemOperand(MMO: MMOLo);
22935 BuildMI(BB&: *BB, I&: MI, MIMD: DL, MCID: TII.get(Opcode: RISCV::LW), DestReg: HiReg)
22936 .addFrameIndex(Idx: FI)
22937 .addImm(Val: 4)
22938 .addMemOperand(MMO: MMOHi);
22939 MI.eraseFromParent(); // The pseudo instruction is gone now.
22940 return BB;
22941}
22942
22943static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
22944 MachineBasicBlock *BB,
22945 const RISCVSubtarget &Subtarget) {
22946 assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
22947 "Unexpected instruction");
22948
22949 MachineFunction &MF = *BB->getParent();
22950 DebugLoc DL = MI.getDebugLoc();
22951 const RISCVInstrInfo &TII = *MF.getSubtarget<RISCVSubtarget>().getInstrInfo();
22952 Register DstReg = MI.getOperand(i: 0).getReg();
22953 Register LoReg = MI.getOperand(i: 1).getReg();
22954 Register HiReg = MI.getOperand(i: 2).getReg();
22955 bool KillLo = MI.getOperand(i: 1).isKill();
22956 bool KillHi = MI.getOperand(i: 2).isKill();
22957
22958 const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
22959 int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
22960
22961 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
22962 MachineMemOperand *MMOLo =
22963 MF.getMachineMemOperand(PtrInfo: MPI, F: MachineMemOperand::MOStore, Size: 4, BaseAlignment: Align(8));
22964 MachineMemOperand *MMOHi = MF.getMachineMemOperand(
22965 PtrInfo: MPI.getWithOffset(O: 4), F: MachineMemOperand::MOStore, Size: 4, BaseAlignment: Align(8));
22966
22967 // For big-endian, store the high part at offset 0 and the low part at
22968 // offset 4.
22969 if (!Subtarget.isLittleEndian()) {
22970 std::swap(a&: LoReg, b&: HiReg);
22971 std::swap(a&: KillLo, b&: KillHi);
22972 }
22973
22974 BuildMI(BB&: *BB, I&: MI, MIMD: DL, MCID: TII.get(Opcode: RISCV::SW))
22975 .addReg(RegNo: LoReg, Flags: getKillRegState(B: KillLo))
22976 .addFrameIndex(Idx: FI)
22977 .addImm(Val: 0)
22978 .addMemOperand(MMO: MMOLo);
22979 BuildMI(BB&: *BB, I&: MI, MIMD: DL, MCID: TII.get(Opcode: RISCV::SW))
22980 .addReg(RegNo: HiReg, Flags: getKillRegState(B: KillHi))
22981 .addFrameIndex(Idx: FI)
22982 .addImm(Val: 4)
22983 .addMemOperand(MMO: MMOHi);
22984 TII.loadRegFromStackSlot(MBB&: *BB, MBBI: MI, DstReg, FrameIndex: FI, RC: DstRC, VReg: Register());
22985 MI.eraseFromParent(); // The pseudo instruction is gone now.
22986 return BB;
22987}
22988
22989static MachineBasicBlock *emitQuietFCMP(MachineInstr &MI, MachineBasicBlock *BB,
22990 unsigned RelOpcode, unsigned EqOpcode,
22991 const RISCVSubtarget &Subtarget) {
22992 DebugLoc DL = MI.getDebugLoc();
22993 Register DstReg = MI.getOperand(i: 0).getReg();
22994 Register Src1Reg = MI.getOperand(i: 1).getReg();
22995 Register Src2Reg = MI.getOperand(i: 2).getReg();
22996 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
22997 Register SavedFFlags = MRI.createVirtualRegister(RegClass: &RISCV::GPRRegClass);
22998 const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
22999
23000 // Save the current FFLAGS.
23001 BuildMI(BB&: *BB, I&: MI, MIMD: DL, MCID: TII.get(Opcode: RISCV::ReadFFLAGS), DestReg: SavedFFlags);
23002
23003 auto MIB = BuildMI(BB&: *BB, I&: MI, MIMD: DL, MCID: TII.get(Opcode: RelOpcode), DestReg: DstReg)
23004 .addReg(RegNo: Src1Reg)
23005 .addReg(RegNo: Src2Reg);
23006 if (MI.getFlag(Flag: MachineInstr::MIFlag::NoFPExcept))
23007 MIB->setFlag(MachineInstr::MIFlag::NoFPExcept);
23008
23009 // Restore the FFLAGS.
23010 BuildMI(BB&: *BB, I&: MI, MIMD: DL, MCID: TII.get(Opcode: RISCV::WriteFFLAGS))
23011 .addReg(RegNo: SavedFFlags, Flags: RegState::Kill);
23012
23013 // Issue a dummy FEQ opcode to raise exception for signaling NaNs.
23014 auto MIB2 = BuildMI(BB&: *BB, I&: MI, MIMD: DL, MCID: TII.get(Opcode: EqOpcode), DestReg: RISCV::X0)
23015 .addReg(RegNo: Src1Reg, Flags: getKillRegState(B: MI.getOperand(i: 1).isKill()))
23016 .addReg(RegNo: Src2Reg, Flags: getKillRegState(B: MI.getOperand(i: 2).isKill()));
23017 if (MI.getFlag(Flag: MachineInstr::MIFlag::NoFPExcept))
23018 MIB2->setFlag(MachineInstr::MIFlag::NoFPExcept);
23019
23020 // Erase the pseudoinstruction.
23021 MI.eraseFromParent();
23022 return BB;
23023}
23024
23025static MachineBasicBlock *
23026EmitLoweredCascadedSelect(MachineInstr &First, MachineInstr &Second,
23027 MachineBasicBlock *ThisMBB,
23028 const RISCVSubtarget &Subtarget) {
23029 // Select_FPRX_ (rs1, rs2, imm, rs4, (Select_FPRX_ rs1, rs2, imm, rs4, rs5)
23030 // Without this, custom-inserter would have generated:
23031 //
23032 // A
23033 // | \
23034 // | B
23035 // | /
23036 // C
23037 // | \
23038 // | D
23039 // | /
23040 // E
23041 //
23042 // A: X = ...; Y = ...
23043 // B: empty
23044 // C: Z = PHI [X, A], [Y, B]
23045 // D: empty
23046 // E: PHI [X, C], [Z, D]
23047 //
23048 // If we lower both Select_FPRX_ in a single step, we can instead generate:
23049 //
23050 // A
23051 // | \
23052 // | C
23053 // | /|
23054 // |/ |
23055 // | |
23056 // | D
23057 // | /
23058 // E
23059 //
23060 // A: X = ...; Y = ...
23061 // D: empty
23062 // E: PHI [X, A], [X, C], [Y, D]
23063
23064 const RISCVInstrInfo &TII = *Subtarget.getInstrInfo();
23065 const DebugLoc &DL = First.getDebugLoc();
23066 const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
23067 MachineFunction *F = ThisMBB->getParent();
23068 MachineBasicBlock *FirstMBB = F->CreateMachineBasicBlock(BB: LLVM_BB);
23069 MachineBasicBlock *SecondMBB = F->CreateMachineBasicBlock(BB: LLVM_BB);
23070 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(BB: LLVM_BB);
23071 MachineFunction::iterator It = ++ThisMBB->getIterator();
23072 F->insert(MBBI: It, MBB: FirstMBB);
23073 F->insert(MBBI: It, MBB: SecondMBB);
23074 F->insert(MBBI: It, MBB: SinkMBB);
23075
23076 // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
23077 SinkMBB->splice(Where: SinkMBB->begin(), Other: ThisMBB,
23078 From: std::next(x: MachineBasicBlock::iterator(First)),
23079 To: ThisMBB->end());
23080 SinkMBB->transferSuccessorsAndUpdatePHIs(FromMBB: ThisMBB);
23081
23082 // Fallthrough block for ThisMBB.
23083 ThisMBB->addSuccessor(Succ: FirstMBB);
23084 // Fallthrough block for FirstMBB.
23085 FirstMBB->addSuccessor(Succ: SecondMBB);
23086 ThisMBB->addSuccessor(Succ: SinkMBB);
23087 FirstMBB->addSuccessor(Succ: SinkMBB);
23088 // This is fallthrough.
23089 SecondMBB->addSuccessor(Succ: SinkMBB);
23090
23091 auto FirstCC = static_cast<RISCVCC::CondCode>(First.getOperand(i: 3).getImm());
23092 Register FLHS = First.getOperand(i: 1).getReg();
23093 Register FRHS = First.getOperand(i: 2).getReg();
23094 // Insert appropriate branch.
23095 BuildMI(BB: FirstMBB, MIMD: DL, MCID: TII.get(Opcode: RISCVCC::getBrCond(CC: FirstCC, SelectOpc: First.getOpcode())))
23096 .addReg(RegNo: FLHS)
23097 .addReg(RegNo: FRHS)
23098 .addMBB(MBB: SinkMBB);
23099
23100 Register SLHS = Second.getOperand(i: 1).getReg();
23101 Register SRHS = Second.getOperand(i: 2).getReg();
23102 Register Op1Reg4 = First.getOperand(i: 4).getReg();
23103 Register Op1Reg5 = First.getOperand(i: 5).getReg();
23104
23105 auto SecondCC = static_cast<RISCVCC::CondCode>(Second.getOperand(i: 3).getImm());
23106 // Insert appropriate branch.
23107 BuildMI(BB: ThisMBB, MIMD: DL,
23108 MCID: TII.get(Opcode: RISCVCC::getBrCond(CC: SecondCC, SelectOpc: Second.getOpcode())))
23109 .addReg(RegNo: SLHS)
23110 .addReg(RegNo: SRHS)
23111 .addMBB(MBB: SinkMBB);
23112
23113 Register DestReg = Second.getOperand(i: 0).getReg();
23114 Register Op2Reg4 = Second.getOperand(i: 4).getReg();
23115 BuildMI(BB&: *SinkMBB, I: SinkMBB->begin(), MIMD: DL, MCID: TII.get(Opcode: RISCV::PHI), DestReg)
23116 .addReg(RegNo: Op2Reg4)
23117 .addMBB(MBB: ThisMBB)
23118 .addReg(RegNo: Op1Reg4)
23119 .addMBB(MBB: FirstMBB)
23120 .addReg(RegNo: Op1Reg5)
23121 .addMBB(MBB: SecondMBB);
23122
23123 // Now remove the Select_FPRX_s.
23124 First.eraseFromParent();
23125 Second.eraseFromParent();
23126 return SinkMBB;
23127}
23128
23129static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
23130 MachineBasicBlock *BB,
23131 const RISCVSubtarget &Subtarget) {
23132 // To "insert" Select_* instructions, we actually have to insert the triangle
23133 // control-flow pattern. The incoming instructions know the destination vreg
23134 // to set, the condition code register to branch on, the true/false values to
23135 // select between, and the condcode to use to select the appropriate branch.
23136 //
23137 // We produce the following control flow:
23138 // HeadMBB
23139 // | \
23140 // | IfFalseMBB
23141 // | /
23142 // TailMBB
23143 //
23144 // When we find a sequence of selects we attempt to optimize their emission
23145 // by sharing the control flow. Currently we only handle cases where we have
23146 // multiple selects with the exact same condition (same LHS, RHS and CC).
23147 // The selects may be interleaved with other instructions if the other
23148 // instructions meet some requirements we deem safe:
23149 // - They are not pseudo instructions.
23150 // - They are debug instructions. Otherwise,
23151 // - They do not have side-effects, do not access memory and their inputs do
23152 // not depend on the results of the select pseudo-instructions.
23153 // - They don't adjust stack.
23154 // The TrueV/FalseV operands of the selects cannot depend on the result of
23155 // previous selects in the sequence.
23156 // These conditions could be further relaxed. See the X86 target for a
23157 // related approach and more information.
23158 //
23159 // Select_FPRX_ (rs1, rs2, imm, rs4, (Select_FPRX_ rs1, rs2, imm, rs4, rs5))
23160 // is checked here and handled by a separate function -
23161 // EmitLoweredCascadedSelect.
23162
23163 auto Next = next_nodbg(It: MI.getIterator(), End: BB->instr_end());
23164 if (MI.getOpcode() != RISCV::Select_GPR_Using_CC_GPR &&
23165 MI.getOperand(i: 1).isReg() && MI.getOperand(i: 2).isReg() &&
23166 Next != BB->end() && Next->getOpcode() == MI.getOpcode() &&
23167 Next->getOperand(i: 5).getReg() == MI.getOperand(i: 0).getReg() &&
23168 Next->getOperand(i: 5).isKill())
23169 return EmitLoweredCascadedSelect(First&: MI, Second&: *Next, ThisMBB: BB, Subtarget);
23170
23171 Register LHS = MI.getOperand(i: 1).getReg();
23172 Register RHS;
23173 if (MI.getOperand(i: 2).isReg())
23174 RHS = MI.getOperand(i: 2).getReg();
23175 auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(i: 3).getImm());
23176
23177 SmallVector<MachineInstr *, 4> SelectDebugValues;
23178 SmallSet<Register, 4> SelectDests;
23179 SelectDests.insert(V: MI.getOperand(i: 0).getReg());
23180
23181 MachineInstr *LastSelectPseudo = &MI;
23182 const RISCVInstrInfo &TII = *Subtarget.getInstrInfo();
23183
23184 for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
23185 SequenceMBBI != E; ++SequenceMBBI) {
23186 if (SequenceMBBI->isDebugInstr())
23187 continue;
23188 if (RISCVInstrInfo::isSelectPseudo(MI: *SequenceMBBI)) {
23189 if (SequenceMBBI->getOperand(i: 1).getReg() != LHS ||
23190 !SequenceMBBI->getOperand(i: 2).isReg() ||
23191 SequenceMBBI->getOperand(i: 2).getReg() != RHS ||
23192 SequenceMBBI->getOperand(i: 3).getImm() != CC ||
23193 SelectDests.count(V: SequenceMBBI->getOperand(i: 4).getReg()) ||
23194 SelectDests.count(V: SequenceMBBI->getOperand(i: 5).getReg()))
23195 break;
23196 LastSelectPseudo = &*SequenceMBBI;
23197 SequenceMBBI->collectDebugValues(DbgValues&: SelectDebugValues);
23198 SelectDests.insert(V: SequenceMBBI->getOperand(i: 0).getReg());
23199 continue;
23200 }
23201 if (SequenceMBBI->hasUnmodeledSideEffects() ||
23202 SequenceMBBI->mayLoadOrStore() ||
23203 SequenceMBBI->usesCustomInsertionHook() ||
23204 TII.isFrameInstr(I: *SequenceMBBI) ||
23205 SequenceMBBI->isStackAligningInlineAsm())
23206 break;
23207 if (llvm::any_of(Range: SequenceMBBI->operands(), P: [&](MachineOperand &MO) {
23208 return MO.isReg() && MO.isUse() && SelectDests.count(V: MO.getReg());
23209 }))
23210 break;
23211 }
23212
23213 const BasicBlock *LLVM_BB = BB->getBasicBlock();
23214 DebugLoc DL = MI.getDebugLoc();
23215 MachineFunction::iterator I = ++BB->getIterator();
23216
23217 MachineBasicBlock *HeadMBB = BB;
23218 MachineFunction *F = BB->getParent();
23219 MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(BB: LLVM_BB);
23220 MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(BB: LLVM_BB);
23221
23222 F->insert(MBBI: I, MBB: IfFalseMBB);
23223 F->insert(MBBI: I, MBB: TailMBB);
23224
23225 // Set the call frame size on entry to the new basic blocks.
23226 unsigned CallFrameSize = TII.getCallFrameSizeAt(MI&: *LastSelectPseudo);
23227 IfFalseMBB->setCallFrameSize(CallFrameSize);
23228 TailMBB->setCallFrameSize(CallFrameSize);
23229
23230 // Transfer debug instructions associated with the selects to TailMBB.
23231 for (MachineInstr *DebugInstr : SelectDebugValues) {
23232 TailMBB->push_back(MI: DebugInstr->removeFromParent());
23233 }
23234
23235 // Move all instructions after the sequence to TailMBB.
23236 TailMBB->splice(Where: TailMBB->end(), Other: HeadMBB,
23237 From: std::next(x: LastSelectPseudo->getIterator()), To: HeadMBB->end());
23238 // Update machine-CFG edges by transferring all successors of the current
23239 // block to the new block which will contain the Phi nodes for the selects.
23240 TailMBB->transferSuccessorsAndUpdatePHIs(FromMBB: HeadMBB);
23241 // Set the successors for HeadMBB.
23242 HeadMBB->addSuccessor(Succ: IfFalseMBB);
23243 HeadMBB->addSuccessor(Succ: TailMBB);
23244
23245 // Insert appropriate branch.
23246 if (MI.getOperand(i: 2).isImm())
23247 BuildMI(BB: HeadMBB, MIMD: DL, MCID: TII.get(Opcode: RISCVCC::getBrCond(CC, SelectOpc: MI.getOpcode())))
23248 .addReg(RegNo: LHS)
23249 .addImm(Val: MI.getOperand(i: 2).getImm())
23250 .addMBB(MBB: TailMBB);
23251 else
23252 BuildMI(BB: HeadMBB, MIMD: DL, MCID: TII.get(Opcode: RISCVCC::getBrCond(CC, SelectOpc: MI.getOpcode())))
23253 .addReg(RegNo: LHS)
23254 .addReg(RegNo: RHS)
23255 .addMBB(MBB: TailMBB);
23256
23257 // IfFalseMBB just falls through to TailMBB.
23258 IfFalseMBB->addSuccessor(Succ: TailMBB);
23259
23260 // Create PHIs for all of the select pseudo-instructions.
23261 auto SelectMBBI = MI.getIterator();
23262 auto SelectEnd = std::next(x: LastSelectPseudo->getIterator());
23263 auto InsertionPoint = TailMBB->begin();
23264 while (SelectMBBI != SelectEnd) {
23265 auto Next = std::next(x: SelectMBBI);
23266 if (RISCVInstrInfo::isSelectPseudo(MI: *SelectMBBI)) {
23267 // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
23268 BuildMI(BB&: *TailMBB, I: InsertionPoint, MIMD: SelectMBBI->getDebugLoc(),
23269 MCID: TII.get(Opcode: RISCV::PHI), DestReg: SelectMBBI->getOperand(i: 0).getReg())
23270 .addReg(RegNo: SelectMBBI->getOperand(i: 4).getReg())
23271 .addMBB(MBB: HeadMBB)
23272 .addReg(RegNo: SelectMBBI->getOperand(i: 5).getReg())
23273 .addMBB(MBB: IfFalseMBB);
23274 SelectMBBI->eraseFromParent();
23275 }
23276 SelectMBBI = Next;
23277 }
23278
23279 F->getProperties().resetNoPHIs();
23280 return TailMBB;
23281}
23282
23283// Helper to find Masked Pseudo instruction from MC instruction, LMUL and SEW.
23284static const RISCV::RISCVMaskedPseudoInfo *
23285lookupMaskedIntrinsic(uint16_t MCOpcode, RISCVVType::VLMUL LMul, unsigned SEW) {
23286 const RISCVVInversePseudosTable::PseudoInfo *Inverse =
23287 RISCVVInversePseudosTable::getBaseInfo(BaseInstr: MCOpcode, VLMul: LMul, SEW);
23288 assert(Inverse && "Unexpected LMUL and SEW pair for instruction");
23289 const RISCV::RISCVMaskedPseudoInfo *Masked =
23290 RISCV::lookupMaskedIntrinsicByUnmasked(UnmaskedPseudo: Inverse->Pseudo);
23291 assert(Masked && "Could not find masked instruction for LMUL and SEW pair");
23292 return Masked;
23293}
23294
23295static MachineBasicBlock *emitVFROUND_NOEXCEPT_MASK(MachineInstr &MI,
23296 MachineBasicBlock *BB,
23297 unsigned CVTXOpc) {
23298 DebugLoc DL = MI.getDebugLoc();
23299
23300 const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
23301
23302 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
23303 Register SavedFFLAGS = MRI.createVirtualRegister(RegClass: &RISCV::GPRRegClass);
23304
23305 // Save the old value of FFLAGS.
23306 BuildMI(BB&: *BB, I&: MI, MIMD: DL, MCID: TII.get(Opcode: RISCV::ReadFFLAGS), DestReg: SavedFFLAGS);
23307
23308 assert(MI.getNumOperands() == 7);
23309
23310 // Emit a VFCVT_X_F
23311 const TargetRegisterInfo *TRI =
23312 BB->getParent()->getSubtarget().getRegisterInfo();
23313 const TargetRegisterClass *RC = MI.getRegClassConstraint(OpIdx: 0, TII: &TII, TRI);
23314 Register Tmp = MRI.createVirtualRegister(RegClass: RC);
23315 BuildMI(BB&: *BB, I&: MI, MIMD: DL, MCID: TII.get(Opcode: CVTXOpc), DestReg: Tmp)
23316 .add(MO: MI.getOperand(i: 1))
23317 .add(MO: MI.getOperand(i: 2))
23318 .add(MO: MI.getOperand(i: 3))
23319 .add(MO: MachineOperand::CreateImm(Val: 7)) // frm = DYN
23320 .add(MO: MI.getOperand(i: 4))
23321 .add(MO: MI.getOperand(i: 5))
23322 .add(MO: MI.getOperand(i: 6))
23323 .add(MO: MachineOperand::CreateReg(Reg: RISCV::FRM,
23324 /*IsDef*/ isDef: false,
23325 /*IsImp*/ isImp: true));
23326
23327 // Emit a VFCVT_F_X
23328 RISCVVType::VLMUL LMul = RISCVII::getLMul(TSFlags: MI.getDesc().TSFlags);
23329 unsigned Log2SEW = MI.getOperand(i: RISCVII::getSEWOpNum(Desc: MI.getDesc())).getImm();
23330 // There is no E8 variant for VFCVT_F_X.
23331 assert(Log2SEW >= 4);
23332 unsigned CVTFOpc =
23333 lookupMaskedIntrinsic(MCOpcode: RISCV::VFCVT_F_X_V, LMul, SEW: 1 << Log2SEW)
23334 ->MaskedPseudo;
23335
23336 BuildMI(BB&: *BB, I&: MI, MIMD: DL, MCID: TII.get(Opcode: CVTFOpc))
23337 .add(MO: MI.getOperand(i: 0))
23338 .add(MO: MI.getOperand(i: 1))
23339 .addReg(RegNo: Tmp)
23340 .add(MO: MI.getOperand(i: 3))
23341 .add(MO: MachineOperand::CreateImm(Val: 7)) // frm = DYN
23342 .add(MO: MI.getOperand(i: 4))
23343 .add(MO: MI.getOperand(i: 5))
23344 .add(MO: MI.getOperand(i: 6))
23345 .add(MO: MachineOperand::CreateReg(Reg: RISCV::FRM,
23346 /*IsDef*/ isDef: false,
23347 /*IsImp*/ isImp: true));
23348
23349 // Restore FFLAGS.
23350 BuildMI(BB&: *BB, I&: MI, MIMD: DL, MCID: TII.get(Opcode: RISCV::WriteFFLAGS))
23351 .addReg(RegNo: SavedFFLAGS, Flags: RegState::Kill);
23352
23353 // Erase the pseudoinstruction.
23354 MI.eraseFromParent();
23355 return BB;
23356}
23357
23358static MachineBasicBlock *emitFROUND(MachineInstr &MI, MachineBasicBlock *MBB,
23359 const RISCVSubtarget &Subtarget) {
23360 unsigned CmpOpc, F2IOpc, I2FOpc, FSGNJOpc, FSGNJXOpc;
23361 const TargetRegisterClass *RC;
23362 switch (MI.getOpcode()) {
23363 default:
23364 llvm_unreachable("Unexpected opcode");
23365 case RISCV::PseudoFROUND_H:
23366 CmpOpc = RISCV::FLT_H;
23367 F2IOpc = RISCV::FCVT_W_H;
23368 I2FOpc = RISCV::FCVT_H_W;
23369 FSGNJOpc = RISCV::FSGNJ_H;
23370 FSGNJXOpc = RISCV::FSGNJX_H;
23371 RC = &RISCV::FPR16RegClass;
23372 break;
23373 case RISCV::PseudoFROUND_H_INX:
23374 CmpOpc = RISCV::FLT_H_INX;
23375 F2IOpc = RISCV::FCVT_W_H_INX;
23376 I2FOpc = RISCV::FCVT_H_W_INX;
23377 FSGNJOpc = RISCV::FSGNJ_H_INX;
23378 FSGNJXOpc = RISCV::FSGNJX_H_INX;
23379 RC = &RISCV::GPRF16RegClass;
23380 break;
23381 case RISCV::PseudoFROUND_S:
23382 CmpOpc = RISCV::FLT_S;
23383 F2IOpc = RISCV::FCVT_W_S;
23384 I2FOpc = RISCV::FCVT_S_W;
23385 FSGNJOpc = RISCV::FSGNJ_S;
23386 FSGNJXOpc = RISCV::FSGNJX_S;
23387 RC = &RISCV::FPR32RegClass;
23388 break;
23389 case RISCV::PseudoFROUND_S_INX:
23390 CmpOpc = RISCV::FLT_S_INX;
23391 F2IOpc = RISCV::FCVT_W_S_INX;
23392 I2FOpc = RISCV::FCVT_S_W_INX;
23393 FSGNJOpc = RISCV::FSGNJ_S_INX;
23394 FSGNJXOpc = RISCV::FSGNJX_S_INX;
23395 RC = &RISCV::GPRF32RegClass;
23396 break;
23397 case RISCV::PseudoFROUND_D:
23398 assert(Subtarget.is64Bit() && "Expected 64-bit GPR.");
23399 CmpOpc = RISCV::FLT_D;
23400 F2IOpc = RISCV::FCVT_L_D;
23401 I2FOpc = RISCV::FCVT_D_L;
23402 FSGNJOpc = RISCV::FSGNJ_D;
23403 FSGNJXOpc = RISCV::FSGNJX_D;
23404 RC = &RISCV::FPR64RegClass;
23405 break;
23406 case RISCV::PseudoFROUND_D_INX:
23407 assert(Subtarget.is64Bit() && "Expected 64-bit GPR.");
23408 CmpOpc = RISCV::FLT_D_INX;
23409 F2IOpc = RISCV::FCVT_L_D_INX;
23410 I2FOpc = RISCV::FCVT_D_L_INX;
23411 FSGNJOpc = RISCV::FSGNJ_D_INX;
23412 FSGNJXOpc = RISCV::FSGNJX_D_INX;
23413 RC = &RISCV::GPRRegClass;
23414 break;
23415 }
23416
23417 const BasicBlock *BB = MBB->getBasicBlock();
23418 DebugLoc DL = MI.getDebugLoc();
23419 MachineFunction::iterator I = ++MBB->getIterator();
23420
23421 MachineFunction *F = MBB->getParent();
23422 MachineBasicBlock *CvtMBB = F->CreateMachineBasicBlock(BB);
23423 MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(BB);
23424
23425 F->insert(MBBI: I, MBB: CvtMBB);
23426 F->insert(MBBI: I, MBB: DoneMBB);
23427 // Move all instructions after the sequence to DoneMBB.
23428 DoneMBB->splice(Where: DoneMBB->end(), Other: MBB, From: MachineBasicBlock::iterator(MI),
23429 To: MBB->end());
23430 // Update machine-CFG edges by transferring all successors of the current
23431 // block to the new block which will contain the Phi nodes for the selects.
23432 DoneMBB->transferSuccessorsAndUpdatePHIs(FromMBB: MBB);
23433 // Set the successors for MBB.
23434 MBB->addSuccessor(Succ: CvtMBB);
23435 MBB->addSuccessor(Succ: DoneMBB);
23436
23437 Register DstReg = MI.getOperand(i: 0).getReg();
23438 Register SrcReg = MI.getOperand(i: 1).getReg();
23439 Register MaxReg = MI.getOperand(i: 2).getReg();
23440 int64_t FRM = MI.getOperand(i: 3).getImm();
23441
23442 const RISCVInstrInfo &TII = *Subtarget.getInstrInfo();
23443 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
23444
23445 Register FabsReg = MRI.createVirtualRegister(RegClass: RC);
23446 BuildMI(BB: MBB, MIMD: DL, MCID: TII.get(Opcode: FSGNJXOpc), DestReg: FabsReg).addReg(RegNo: SrcReg).addReg(RegNo: SrcReg);
23447
23448 // Compare the FP value to the max value.
23449 Register CmpReg = MRI.createVirtualRegister(RegClass: &RISCV::GPRRegClass);
23450 auto MIB =
23451 BuildMI(BB: MBB, MIMD: DL, MCID: TII.get(Opcode: CmpOpc), DestReg: CmpReg).addReg(RegNo: FabsReg).addReg(RegNo: MaxReg);
23452 if (MI.getFlag(Flag: MachineInstr::MIFlag::NoFPExcept))
23453 MIB->setFlag(MachineInstr::MIFlag::NoFPExcept);
23454
23455 // Insert branch.
23456 BuildMI(BB: MBB, MIMD: DL, MCID: TII.get(Opcode: RISCV::BEQ))
23457 .addReg(RegNo: CmpReg)
23458 .addReg(RegNo: RISCV::X0)
23459 .addMBB(MBB: DoneMBB);
23460
23461 CvtMBB->addSuccessor(Succ: DoneMBB);
23462
23463 // Convert to integer.
23464 Register F2IReg = MRI.createVirtualRegister(RegClass: &RISCV::GPRRegClass);
23465 MIB = BuildMI(BB: CvtMBB, MIMD: DL, MCID: TII.get(Opcode: F2IOpc), DestReg: F2IReg).addReg(RegNo: SrcReg).addImm(Val: FRM);
23466 if (MI.getFlag(Flag: MachineInstr::MIFlag::NoFPExcept))
23467 MIB->setFlag(MachineInstr::MIFlag::NoFPExcept);
23468
23469 // Convert back to FP.
23470 Register I2FReg = MRI.createVirtualRegister(RegClass: RC);
23471 MIB = BuildMI(BB: CvtMBB, MIMD: DL, MCID: TII.get(Opcode: I2FOpc), DestReg: I2FReg).addReg(RegNo: F2IReg).addImm(Val: FRM);
23472 if (MI.getFlag(Flag: MachineInstr::MIFlag::NoFPExcept))
23473 MIB->setFlag(MachineInstr::MIFlag::NoFPExcept);
23474
23475 // Restore the sign bit.
23476 Register CvtReg = MRI.createVirtualRegister(RegClass: RC);
23477 BuildMI(BB: CvtMBB, MIMD: DL, MCID: TII.get(Opcode: FSGNJOpc), DestReg: CvtReg).addReg(RegNo: I2FReg).addReg(RegNo: SrcReg);
23478
23479 // Merge the results.
23480 BuildMI(BB&: *DoneMBB, I: DoneMBB->begin(), MIMD: DL, MCID: TII.get(Opcode: RISCV::PHI), DestReg: DstReg)
23481 .addReg(RegNo: SrcReg)
23482 .addMBB(MBB)
23483 .addReg(RegNo: CvtReg)
23484 .addMBB(MBB: CvtMBB);
23485
23486 MI.eraseFromParent();
23487 return DoneMBB;
23488}
23489
23490MachineBasicBlock *
23491RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
23492 MachineBasicBlock *BB) const {
23493 switch (MI.getOpcode()) {
23494 default:
23495 llvm_unreachable("Unexpected instr type to insert");
23496 case RISCV::ReadCounterWide:
23497 assert(!Subtarget.is64Bit() &&
23498 "ReadCounterWide is only to be used on riscv32");
23499 return emitReadCounterWidePseudo(MI, BB);
23500 case RISCV::Select_GPR_Using_CC_GPR:
23501 case RISCV::Select_GPR_Using_CC_Imm5_Zibi:
23502 case RISCV::Select_GPR_Using_CC_SImm5_CV:
23503 case RISCV::Select_GPRNoX0_Using_CC_SImm5NonZero_QC:
23504 case RISCV::Select_GPRNoX0_Using_CC_UImm5NonZero_QC:
23505 case RISCV::Select_GPRNoX0_Using_CC_SImm16NonZero_QC:
23506 case RISCV::Select_GPRNoX0_Using_CC_UImm16NonZero_QC:
23507 case RISCV::Select_GPR_Using_CC_UImmLog2XLen_NDS:
23508 case RISCV::Select_GPR_Using_CC_UImm7_NDS:
23509 case RISCV::Select_FPR16_Using_CC_GPR:
23510 case RISCV::Select_FPR16INX_Using_CC_GPR:
23511 case RISCV::Select_FPR32_Using_CC_GPR:
23512 case RISCV::Select_FPR32INX_Using_CC_GPR:
23513 case RISCV::Select_FPR64_Using_CC_GPR:
23514 case RISCV::Select_FPR64INX_Using_CC_GPR:
23515 case RISCV::Select_FPR64IN32X_Using_CC_GPR:
23516 return emitSelectPseudo(MI, BB, Subtarget);
23517 case RISCV::BuildPairF64Pseudo:
23518 return emitBuildPairF64Pseudo(MI, BB, Subtarget);
23519 case RISCV::SplitF64Pseudo:
23520 return emitSplitF64Pseudo(MI, BB, Subtarget);
23521 case RISCV::PseudoQuietFLE_H:
23522 return emitQuietFCMP(MI, BB, RelOpcode: RISCV::FLE_H, EqOpcode: RISCV::FEQ_H, Subtarget);
23523 case RISCV::PseudoQuietFLE_H_INX:
23524 return emitQuietFCMP(MI, BB, RelOpcode: RISCV::FLE_H_INX, EqOpcode: RISCV::FEQ_H_INX, Subtarget);
23525 case RISCV::PseudoQuietFLT_H:
23526 return emitQuietFCMP(MI, BB, RelOpcode: RISCV::FLT_H, EqOpcode: RISCV::FEQ_H, Subtarget);
23527 case RISCV::PseudoQuietFLT_H_INX:
23528 return emitQuietFCMP(MI, BB, RelOpcode: RISCV::FLT_H_INX, EqOpcode: RISCV::FEQ_H_INX, Subtarget);
23529 case RISCV::PseudoQuietFLE_S:
23530 return emitQuietFCMP(MI, BB, RelOpcode: RISCV::FLE_S, EqOpcode: RISCV::FEQ_S, Subtarget);
23531 case RISCV::PseudoQuietFLE_S_INX:
23532 return emitQuietFCMP(MI, BB, RelOpcode: RISCV::FLE_S_INX, EqOpcode: RISCV::FEQ_S_INX, Subtarget);
23533 case RISCV::PseudoQuietFLT_S:
23534 return emitQuietFCMP(MI, BB, RelOpcode: RISCV::FLT_S, EqOpcode: RISCV::FEQ_S, Subtarget);
23535 case RISCV::PseudoQuietFLT_S_INX:
23536 return emitQuietFCMP(MI, BB, RelOpcode: RISCV::FLT_S_INX, EqOpcode: RISCV::FEQ_S_INX, Subtarget);
23537 case RISCV::PseudoQuietFLE_D:
23538 return emitQuietFCMP(MI, BB, RelOpcode: RISCV::FLE_D, EqOpcode: RISCV::FEQ_D, Subtarget);
23539 case RISCV::PseudoQuietFLE_D_INX:
23540 return emitQuietFCMP(MI, BB, RelOpcode: RISCV::FLE_D_INX, EqOpcode: RISCV::FEQ_D_INX, Subtarget);
23541 case RISCV::PseudoQuietFLE_D_IN32X:
23542 return emitQuietFCMP(MI, BB, RelOpcode: RISCV::FLE_D_IN32X, EqOpcode: RISCV::FEQ_D_IN32X,
23543 Subtarget);
23544 case RISCV::PseudoQuietFLT_D:
23545 return emitQuietFCMP(MI, BB, RelOpcode: RISCV::FLT_D, EqOpcode: RISCV::FEQ_D, Subtarget);
23546 case RISCV::PseudoQuietFLT_D_INX:
23547 return emitQuietFCMP(MI, BB, RelOpcode: RISCV::FLT_D_INX, EqOpcode: RISCV::FEQ_D_INX, Subtarget);
23548 case RISCV::PseudoQuietFLT_D_IN32X:
23549 return emitQuietFCMP(MI, BB, RelOpcode: RISCV::FLT_D_IN32X, EqOpcode: RISCV::FEQ_D_IN32X,
23550 Subtarget);
23551
23552 case RISCV::PseudoVFROUND_NOEXCEPT_V_M1_MASK:
23553 return emitVFROUND_NOEXCEPT_MASK(MI, BB, CVTXOpc: RISCV::PseudoVFCVT_X_F_V_M1_MASK);
23554 case RISCV::PseudoVFROUND_NOEXCEPT_V_M2_MASK:
23555 return emitVFROUND_NOEXCEPT_MASK(MI, BB, CVTXOpc: RISCV::PseudoVFCVT_X_F_V_M2_MASK);
23556 case RISCV::PseudoVFROUND_NOEXCEPT_V_M4_MASK:
23557 return emitVFROUND_NOEXCEPT_MASK(MI, BB, CVTXOpc: RISCV::PseudoVFCVT_X_F_V_M4_MASK);
23558 case RISCV::PseudoVFROUND_NOEXCEPT_V_M8_MASK:
23559 return emitVFROUND_NOEXCEPT_MASK(MI, BB, CVTXOpc: RISCV::PseudoVFCVT_X_F_V_M8_MASK);
23560 case RISCV::PseudoVFROUND_NOEXCEPT_V_MF2_MASK:
23561 return emitVFROUND_NOEXCEPT_MASK(MI, BB, CVTXOpc: RISCV::PseudoVFCVT_X_F_V_MF2_MASK);
23562 case RISCV::PseudoVFROUND_NOEXCEPT_V_MF4_MASK:
23563 return emitVFROUND_NOEXCEPT_MASK(MI, BB, CVTXOpc: RISCV::PseudoVFCVT_X_F_V_MF4_MASK);
23564 case RISCV::PseudoFROUND_H:
23565 case RISCV::PseudoFROUND_H_INX:
23566 case RISCV::PseudoFROUND_S:
23567 case RISCV::PseudoFROUND_S_INX:
23568 case RISCV::PseudoFROUND_D:
23569 case RISCV::PseudoFROUND_D_INX:
23570 case RISCV::PseudoFROUND_D_IN32X:
23571 return emitFROUND(MI, MBB: BB, Subtarget);
23572 case RISCV::PROBED_STACKALLOC_DYN:
23573 return emitDynamicProbedAlloc(MI, MBB: BB);
23574 case TargetOpcode::STATEPOINT:
23575 // STATEPOINT is a pseudo instruction which has no implicit defs/uses
23576 // while jal call instruction (where statepoint will be lowered at the end)
23577 // has implicit def. This def is early-clobber as it will be set at
23578 // the moment of the call and earlier than any use is read.
23579 // Add this implicit dead def here as a workaround.
23580 MI.addOperand(MF&: *MI.getMF(),
23581 Op: MachineOperand::CreateReg(
23582 Reg: RISCV::X1, /*isDef*/ true,
23583 /*isImp*/ true, /*isKill*/ false, /*isDead*/ true,
23584 /*isUndef*/ false, /*isEarlyClobber*/ true));
23585 [[fallthrough]];
23586 case TargetOpcode::STACKMAP:
23587 case TargetOpcode::PATCHPOINT:
23588 if (!Subtarget.is64Bit())
23589 reportFatalUsageError(reason: "STACKMAP, PATCHPOINT and STATEPOINT are only "
23590 "supported on 64-bit targets");
23591 return emitPatchPoint(MI, MBB: BB);
23592 }
23593}
23594
23595void RISCVTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
23596 SDNode *Node) const {
23597 // If instruction defines FRM operand, conservatively set it as non-dead to
23598 // express data dependency with FRM users and prevent incorrect instruction
23599 // reordering.
23600 if (auto *FRMDef = MI.findRegisterDefOperand(Reg: RISCV::FRM, /*TRI=*/nullptr)) {
23601 FRMDef->setIsDead(false);
23602 return;
23603 }
23604 // Add FRM dependency to any instructions with dynamic rounding mode.
23605 int Idx = RISCV::getNamedOperandIdx(Opcode: MI.getOpcode(), Name: RISCV::OpName::frm);
23606 if (Idx < 0) {
23607 // Vector pseudos have FRM index indicated by TSFlags.
23608 Idx = RISCVII::getFRMOpNum(Desc: MI.getDesc());
23609 if (Idx < 0)
23610 return;
23611 }
23612 if (MI.getOperand(i: Idx).getImm() != RISCVFPRndMode::DYN)
23613 return;
23614 // If the instruction already reads FRM, don't add another read.
23615 if (MI.readsRegister(Reg: RISCV::FRM, /*TRI=*/nullptr))
23616 return;
23617 MI.addOperand(
23618 Op: MachineOperand::CreateReg(Reg: RISCV::FRM, /*isDef*/ false, /*isImp*/ true));
23619}
23620
23621void RISCVTargetLowering::analyzeInputArgs(
23622 MachineFunction &MF, CCState &CCInfo,
23623 const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
23624 RISCVCCAssignFn Fn) const {
23625 for (const auto &[Idx, In] : enumerate(First: Ins)) {
23626 MVT ArgVT = In.VT;
23627 ISD::ArgFlagsTy ArgFlags = In.Flags;
23628
23629 if (Fn(Idx, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo, IsRet,
23630 In.OrigTy)) {
23631 LLVM_DEBUG(dbgs() << "InputArg #" << Idx << " has unhandled type "
23632 << ArgVT << '\n');
23633 llvm_unreachable(nullptr);
23634 }
23635 }
23636}
23637
23638void RISCVTargetLowering::analyzeOutputArgs(
23639 MachineFunction &MF, CCState &CCInfo,
23640 const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
23641 CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const {
23642 for (const auto &[Idx, Out] : enumerate(First: Outs)) {
23643 MVT ArgVT = Out.VT;
23644 ISD::ArgFlagsTy ArgFlags = Out.Flags;
23645
23646 if (Fn(Idx, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo, IsRet,
23647 Out.OrigTy)) {
23648 LLVM_DEBUG(dbgs() << "OutputArg #" << Idx << " has unhandled type "
23649 << ArgVT << "\n");
23650 llvm_unreachable(nullptr);
23651 }
23652 }
23653}
23654
23655// Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
23656// values.
23657static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
23658 const CCValAssign &VA, const SDLoc &DL,
23659 const RISCVSubtarget &Subtarget) {
23660 if (VA.needsCustom()) {
23661 if (VA.getLocVT().isInteger() &&
23662 (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16))
23663 return DAG.getNode(Opcode: RISCVISD::FMV_H_X, DL, VT: VA.getValVT(), Operand: Val);
23664 if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
23665 return DAG.getNode(Opcode: RISCVISD::FMV_W_X_RV64, DL, VT: MVT::f32, Operand: Val);
23666 if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector())
23667 return convertFromScalableVector(VT: VA.getValVT(), V: Val, DAG, Subtarget);
23668 llvm_unreachable("Unexpected Custom handling.");
23669 }
23670
23671 switch (VA.getLocInfo()) {
23672 default:
23673 llvm_unreachable("Unexpected CCValAssign::LocInfo");
23674 case CCValAssign::Full:
23675 break;
23676 case CCValAssign::BCvt:
23677 Val = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: VA.getValVT(), Operand: Val);
23678 break;
23679 }
23680 return Val;
23681}
23682
23683// The caller is responsible for loading the full value if the argument is
23684// passed with CCValAssign::Indirect.
23685static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
23686 const CCValAssign &VA, const SDLoc &DL,
23687 const ISD::InputArg &In,
23688 const RISCVTargetLowering &TLI) {
23689 MachineFunction &MF = DAG.getMachineFunction();
23690 MachineRegisterInfo &RegInfo = MF.getRegInfo();
23691 EVT LocVT = VA.getLocVT();
23692 SDValue Val;
23693 const TargetRegisterClass *RC = TLI.getRegClassFor(VT: LocVT.getSimpleVT());
23694 Register VReg = RegInfo.createVirtualRegister(RegClass: RC);
23695 RegInfo.addLiveIn(Reg: VA.getLocReg(), vreg: VReg);
23696 Val = DAG.getCopyFromReg(Chain, dl: DL, Reg: VReg, VT: LocVT);
23697
23698 // If input is sign extended from 32 bits, note it for the SExtWRemoval pass.
23699 if (In.isOrigArg()) {
23700 Argument *OrigArg = MF.getFunction().getArg(i: In.getOrigArgIndex());
23701 if (OrigArg->getType()->isIntegerTy()) {
23702 unsigned BitWidth = OrigArg->getType()->getIntegerBitWidth();
23703 // An input zero extended from i31 can also be considered sign extended.
23704 if ((BitWidth <= 32 && In.Flags.isSExt()) ||
23705 (BitWidth < 32 && In.Flags.isZExt())) {
23706 RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
23707 RVFI->addSExt32Register(Reg: VReg);
23708 }
23709 }
23710 }
23711
23712 if (VA.getLocInfo() == CCValAssign::Indirect)
23713 return Val;
23714
23715 return convertLocVTToValVT(DAG, Val, VA, DL, Subtarget: TLI.getSubtarget());
23716}
23717
23718static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
23719 const CCValAssign &VA, const SDLoc &DL,
23720 const RISCVSubtarget &Subtarget) {
23721 EVT LocVT = VA.getLocVT();
23722
23723 if (VA.needsCustom()) {
23724 if (LocVT.isInteger() &&
23725 (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16))
23726 return DAG.getNode(Opcode: RISCVISD::FMV_X_ANYEXTH, DL, VT: LocVT, Operand: Val);
23727 if (LocVT == MVT::i64 && VA.getValVT() == MVT::f32)
23728 return DAG.getNode(Opcode: RISCVISD::FMV_X_ANYEXTW_RV64, DL, VT: MVT::i64, Operand: Val);
23729 if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector())
23730 return convertToScalableVector(VT: LocVT, V: Val, DAG, Subtarget);
23731 llvm_unreachable("Unexpected Custom handling.");
23732 }
23733
23734 switch (VA.getLocInfo()) {
23735 default:
23736 llvm_unreachable("Unexpected CCValAssign::LocInfo");
23737 case CCValAssign::Full:
23738 break;
23739 case CCValAssign::BCvt:
23740 Val = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: LocVT, Operand: Val);
23741 break;
23742 }
23743 return Val;
23744}
23745
23746// The caller is responsible for loading the full value if the argument is
23747// passed with CCValAssign::Indirect.
23748static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
23749 const CCValAssign &VA, const SDLoc &DL,
23750 const RISCVTargetLowering &TLI) {
23751 MachineFunction &MF = DAG.getMachineFunction();
23752 MachineFrameInfo &MFI = MF.getFrameInfo();
23753 EVT LocVT = VA.getLocVT();
23754 EVT PtrVT = MVT::getIntegerVT(BitWidth: DAG.getDataLayout().getPointerSizeInBits(AS: 0));
23755 int FI = MFI.CreateFixedObject(Size: LocVT.getStoreSize(), SPOffset: VA.getLocMemOffset(),
23756 /*IsImmutable=*/true);
23757 SDValue FIN = DAG.getFrameIndex(FI, VT: PtrVT);
23758 SDValue Val = DAG.getLoad(
23759 VT: LocVT, dl: DL, Chain, Ptr: FIN,
23760 PtrInfo: MachinePointerInfo::getFixedStack(MF&: DAG.getMachineFunction(), FI));
23761
23762 if (VA.getLocInfo() == CCValAssign::Indirect)
23763 return Val;
23764
23765 return convertLocVTToValVT(DAG, Val, VA, DL, Subtarget: TLI.getSubtarget());
23766}
23767
23768static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
23769 const CCValAssign &VA,
23770 const CCValAssign &HiVA,
23771 const SDLoc &DL) {
23772 assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
23773 "Unexpected VA");
23774 MachineFunction &MF = DAG.getMachineFunction();
23775 MachineFrameInfo &MFI = MF.getFrameInfo();
23776 MachineRegisterInfo &RegInfo = MF.getRegInfo();
23777
23778 assert(VA.isRegLoc() && "Expected register VA assignment");
23779
23780 Register LoVReg = RegInfo.createVirtualRegister(RegClass: &RISCV::GPRRegClass);
23781 RegInfo.addLiveIn(Reg: VA.getLocReg(), vreg: LoVReg);
23782 SDValue Lo = DAG.getCopyFromReg(Chain, dl: DL, Reg: LoVReg, VT: MVT::i32);
23783 SDValue Hi;
23784 if (HiVA.isMemLoc()) {
23785 // Second half of f64 is passed on the stack.
23786 int FI = MFI.CreateFixedObject(Size: 4, SPOffset: HiVA.getLocMemOffset(),
23787 /*IsImmutable=*/true);
23788 SDValue FIN = DAG.getFrameIndex(FI, VT: MVT::i32);
23789 Hi = DAG.getLoad(VT: MVT::i32, dl: DL, Chain, Ptr: FIN,
23790 PtrInfo: MachinePointerInfo::getFixedStack(MF, FI));
23791 } else {
23792 // Second half of f64 is passed in another GPR.
23793 Register HiVReg = RegInfo.createVirtualRegister(RegClass: &RISCV::GPRRegClass);
23794 RegInfo.addLiveIn(Reg: HiVA.getLocReg(), vreg: HiVReg);
23795 Hi = DAG.getCopyFromReg(Chain, dl: DL, Reg: HiVReg, VT: MVT::i32);
23796 }
23797
23798 // For big-endian, swap the order of Lo and Hi when building the pair.
23799 const RISCVSubtarget &Subtarget = DAG.getSubtarget<RISCVSubtarget>();
23800 if (!Subtarget.isLittleEndian())
23801 std::swap(a&: Lo, b&: Hi);
23802
23803 return DAG.getNode(Opcode: RISCVISD::BuildPairF64, DL, VT: MVT::f64, N1: Lo, N2: Hi);
23804}
23805
23806// Transform physical registers into virtual registers.
23807SDValue RISCVTargetLowering::LowerFormalArguments(
23808 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
23809 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
23810 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
23811
23812 MachineFunction &MF = DAG.getMachineFunction();
23813
23814 switch (CallConv) {
23815 default:
23816 reportFatalUsageError(reason: "Unsupported calling convention");
23817 case CallingConv::C:
23818 case CallingConv::Fast:
23819 case CallingConv::SPIR_KERNEL:
23820 case CallingConv::PreserveMost:
23821 case CallingConv::GRAAL:
23822 case CallingConv::RISCV_VectorCall:
23823#define CC_VLS_CASE(ABI_VLEN) case CallingConv::RISCV_VLSCall_##ABI_VLEN:
23824 CC_VLS_CASE(32)
23825 CC_VLS_CASE(64)
23826 CC_VLS_CASE(128)
23827 CC_VLS_CASE(256)
23828 CC_VLS_CASE(512)
23829 CC_VLS_CASE(1024)
23830 CC_VLS_CASE(2048)
23831 CC_VLS_CASE(4096)
23832 CC_VLS_CASE(8192)
23833 CC_VLS_CASE(16384)
23834 CC_VLS_CASE(32768)
23835 CC_VLS_CASE(65536)
23836#undef CC_VLS_CASE
23837 break;
23838 case CallingConv::GHC:
23839 if (Subtarget.hasStdExtE())
23840 reportFatalUsageError(reason: "GHC calling convention is not supported on RVE!");
23841 if (!Subtarget.hasStdExtFOrZfinx() || !Subtarget.hasStdExtDOrZdinx())
23842 reportFatalUsageError(reason: "GHC calling convention requires the (Zfinx/F) and "
23843 "(Zdinx/D) instruction set extensions");
23844 }
23845
23846 const Function &Func = MF.getFunction();
23847 if (Func.hasFnAttribute(Kind: "interrupt")) {
23848 if (!Func.arg_empty())
23849 reportFatalUsageError(
23850 reason: "Functions with the interrupt attribute cannot have arguments!");
23851
23852 StringRef Kind =
23853 MF.getFunction().getFnAttribute(Kind: "interrupt").getValueAsString();
23854
23855 constexpr StringLiteral SupportedInterruptKinds[] = {
23856 "machine",
23857 "supervisor",
23858 "rnmi",
23859 "qci-nest",
23860 "qci-nonest",
23861 "SiFive-CLIC-preemptible",
23862 "SiFive-CLIC-stack-swap",
23863 "SiFive-CLIC-preemptible-stack-swap",
23864 };
23865 if (!llvm::is_contained(Range: SupportedInterruptKinds, Element: Kind))
23866 reportFatalUsageError(
23867 reason: "Function interrupt attribute argument not supported!");
23868
23869 if (Kind.starts_with(Prefix: "qci-") && !Subtarget.hasVendorXqciint())
23870 reportFatalUsageError(
23871 reason: "'qci-*' interrupt kinds require Xqciint extension");
23872
23873 if (Kind.starts_with(Prefix: "SiFive-CLIC-") && !Subtarget.hasVendorXSfmclic())
23874 reportFatalUsageError(
23875 reason: "'SiFive-CLIC-*' interrupt kinds require XSfmclic extension");
23876
23877 if (Kind == "rnmi" && !Subtarget.hasStdExtSmrnmi())
23878 reportFatalUsageError(reason: "'rnmi' interrupt kind requires Srnmi extension");
23879 const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
23880 if (Kind.starts_with(Prefix: "SiFive-CLIC-preemptible") && TFI->hasFP(MF))
23881 reportFatalUsageError(reason: "'SiFive-CLIC-preemptible' interrupt kinds cannot "
23882 "have a frame pointer");
23883 }
23884
23885 EVT PtrVT = getPointerTy(DL: DAG.getDataLayout());
23886 MVT XLenVT = Subtarget.getXLenVT();
23887 unsigned XLenInBytes = Subtarget.getXLen() / 8;
23888 // Used with vargs to accumulate store chains.
23889 std::vector<SDValue> OutChains;
23890
23891 // Assign locations to all of the incoming arguments.
23892 SmallVector<CCValAssign, 16> ArgLocs;
23893 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
23894
23895 if (CallConv == CallingConv::GHC)
23896 CCInfo.AnalyzeFormalArguments(Ins, Fn: CC_RISCV_GHC);
23897 else
23898 analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false,
23899 Fn: CallConv == CallingConv::Fast ? CC_RISCV_FastCC
23900 : CC_RISCV);
23901
23902 for (unsigned i = 0, e = ArgLocs.size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
23903 CCValAssign &VA = ArgLocs[i];
23904 SDValue ArgValue;
23905 // Passing f64 on RV32D with a soft float ABI must be handled as a special
23906 // case.
23907 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
23908 assert(VA.needsCustom());
23909 ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, HiVA: ArgLocs[++i], DL);
23910 } else if (VA.isRegLoc())
23911 ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, In: Ins[InsIdx], TLI: *this);
23912 else
23913 ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL, TLI: *this);
23914
23915 if (VA.getLocInfo() == CCValAssign::Indirect) {
23916 // If the original argument was split and passed by reference (e.g. i128
23917 // on RV32), we need to load all parts of it here (using the same
23918 // address). Vectors may be partly split to registers and partly to the
23919 // stack, in which case the base address is partly offset and subsequent
23920 // stores are relative to that.
23921 InVals.push_back(Elt: DAG.getLoad(VT: VA.getValVT(), dl: DL, Chain, Ptr: ArgValue,
23922 PtrInfo: MachinePointerInfo()));
23923 unsigned ArgIndex = Ins[InsIdx].OrigArgIndex;
23924 unsigned ArgPartOffset = Ins[InsIdx].PartOffset;
23925 assert(VA.getValVT().isVector() || ArgPartOffset == 0);
23926 while (i + 1 != e && Ins[InsIdx + 1].OrigArgIndex == ArgIndex) {
23927 CCValAssign &PartVA = ArgLocs[i + 1];
23928 unsigned PartOffset = Ins[InsIdx + 1].PartOffset - ArgPartOffset;
23929 SDValue Offset = DAG.getIntPtrConstant(Val: PartOffset, DL);
23930 if (PartVA.getValVT().isScalableVector())
23931 Offset = DAG.getNode(Opcode: ISD::VSCALE, DL, VT: XLenVT, Operand: Offset);
23932 SDValue Address = DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT, N1: ArgValue, N2: Offset);
23933 InVals.push_back(Elt: DAG.getLoad(VT: PartVA.getValVT(), dl: DL, Chain, Ptr: Address,
23934 PtrInfo: MachinePointerInfo()));
23935 ++i;
23936 ++InsIdx;
23937 }
23938 continue;
23939 }
23940 InVals.push_back(Elt: ArgValue);
23941 }
23942
23943 if (any_of(Range&: ArgLocs,
23944 P: [](CCValAssign &VA) { return VA.getLocVT().isScalableVector(); }))
23945 MF.getInfo<RISCVMachineFunctionInfo>()->setIsVectorCall();
23946
23947 if (IsVarArg) {
23948 ArrayRef<MCPhysReg> ArgRegs = RISCV::getArgGPRs(ABI: Subtarget.getTargetABI());
23949 unsigned Idx = CCInfo.getFirstUnallocated(Regs: ArgRegs);
23950 const TargetRegisterClass *RC = &RISCV::GPRRegClass;
23951 MachineFrameInfo &MFI = MF.getFrameInfo();
23952 MachineRegisterInfo &RegInfo = MF.getRegInfo();
23953 RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
23954
23955 // Size of the vararg save area. For now, the varargs save area is either
23956 // zero or large enough to hold a0-a7.
23957 int VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
23958 int FI;
23959
23960 // If all registers are allocated, then all varargs must be passed on the
23961 // stack and we don't need to save any argregs.
23962 if (VarArgsSaveSize == 0) {
23963 int VaArgOffset = CCInfo.getStackSize();
23964 FI = MFI.CreateFixedObject(Size: XLenInBytes, SPOffset: VaArgOffset, IsImmutable: true);
23965 } else {
23966 int VaArgOffset = -VarArgsSaveSize;
23967 FI = MFI.CreateFixedObject(Size: VarArgsSaveSize, SPOffset: VaArgOffset, IsImmutable: true);
23968
23969 // If saving an odd number of registers then create an extra stack slot to
23970 // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
23971 // offsets to even-numbered registered remain 2*XLEN-aligned.
23972 if (Idx % 2) {
23973 MFI.CreateFixedObject(
23974 Size: XLenInBytes, SPOffset: VaArgOffset - static_cast<int>(XLenInBytes), IsImmutable: true);
23975 VarArgsSaveSize += XLenInBytes;
23976 }
23977
23978 SDValue FIN = DAG.getFrameIndex(FI, VT: PtrVT);
23979
23980 // Copy the integer registers that may have been used for passing varargs
23981 // to the vararg save area.
23982 for (unsigned I = Idx; I < ArgRegs.size(); ++I) {
23983 const Register Reg = RegInfo.createVirtualRegister(RegClass: RC);
23984 RegInfo.addLiveIn(Reg: ArgRegs[I], vreg: Reg);
23985 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl: DL, Reg, VT: XLenVT);
23986 SDValue Store = DAG.getStore(
23987 Chain, dl: DL, Val: ArgValue, Ptr: FIN,
23988 PtrInfo: MachinePointerInfo::getFixedStack(MF, FI, Offset: (I - Idx) * XLenInBytes));
23989 OutChains.push_back(x: Store);
23990 FIN =
23991 DAG.getMemBasePlusOffset(Base: FIN, Offset: TypeSize::getFixed(ExactSize: XLenInBytes), DL);
23992 }
23993 }
23994
23995 // Record the frame index of the first variable argument
23996 // which is a value necessary to VASTART.
23997 RVFI->setVarArgsFrameIndex(FI);
23998 RVFI->setVarArgsSaveSize(VarArgsSaveSize);
23999 }
24000
24001 // All stores are grouped in one node to allow the matching between
24002 // the size of Ins and InVals. This only happens for vararg functions.
24003 if (!OutChains.empty()) {
24004 OutChains.push_back(x: Chain);
24005 Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other, Ops: OutChains);
24006 }
24007
24008 return Chain;
24009}
24010
24011/// isEligibleForTailCallOptimization - Check whether the call is eligible
24012/// for tail call optimization.
24013/// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
24014bool RISCVTargetLowering::isEligibleForTailCallOptimization(
24015 CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
24016 const SmallVector<CCValAssign, 16> &ArgLocs) const {
24017
24018 auto CalleeCC = CLI.CallConv;
24019 auto &Outs = CLI.Outs;
24020 auto &Caller = MF.getFunction();
24021 auto CallerCC = Caller.getCallingConv();
24022
24023 // Exception-handling functions need a special set of instructions to
24024 // indicate a return to the hardware. Tail-calling another function would
24025 // probably break this.
24026 // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
24027 // should be expanded as new function attributes are introduced.
24028 if (Caller.hasFnAttribute(Kind: "interrupt"))
24029 return false;
24030
24031 // Do not tail call opt if the stack is used to pass parameters.
24032 if (CCInfo.getStackSize() != 0)
24033 return false;
24034
24035 // Do not tail call opt if any parameters need to be passed indirectly.
24036 // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
24037 // passed indirectly. So the address of the value will be passed in a
24038 // register, or if not available, then the address is put on the stack. In
24039 // order to pass indirectly, space on the stack often needs to be allocated
24040 // in order to store the value. In this case the CCInfo.getNextStackOffset()
24041 // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
24042 // are passed CCValAssign::Indirect.
24043 for (auto &VA : ArgLocs)
24044 if (VA.getLocInfo() == CCValAssign::Indirect)
24045 return false;
24046
24047 // Do not tail call opt if either caller or callee uses struct return
24048 // semantics.
24049 auto IsCallerStructRet = Caller.hasStructRetAttr();
24050 auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
24051 if (IsCallerStructRet || IsCalleeStructRet)
24052 return false;
24053
24054 // The callee has to preserve all registers the caller needs to preserve.
24055 const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
24056 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
24057 if (CalleeCC != CallerCC) {
24058 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
24059 if (!TRI->regmaskSubsetEqual(mask0: CallerPreserved, mask1: CalleePreserved))
24060 return false;
24061 }
24062
24063 // Byval parameters hand the function a pointer directly into the stack area
24064 // we want to reuse during a tail call. Working around this *is* possible
24065 // but less efficient and uglier in LowerCall.
24066 for (auto &Arg : Outs)
24067 if (Arg.Flags.isByVal())
24068 return false;
24069
24070 return true;
24071}
24072
24073static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) {
24074 return DAG.getDataLayout().getPrefTypeAlign(
24075 Ty: VT.getTypeForEVT(Context&: *DAG.getContext()));
24076}
24077
24078// Lower a call to a callseq_start + CALL + callseq_end chain, and add input
24079// and output parameter nodes.
24080SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
24081 SmallVectorImpl<SDValue> &InVals) const {
24082 SelectionDAG &DAG = CLI.DAG;
24083 SDLoc &DL = CLI.DL;
24084 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
24085 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
24086 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
24087 SDValue Chain = CLI.Chain;
24088 SDValue Callee = CLI.Callee;
24089 bool &IsTailCall = CLI.IsTailCall;
24090 CallingConv::ID CallConv = CLI.CallConv;
24091 bool IsVarArg = CLI.IsVarArg;
24092 EVT PtrVT = getPointerTy(DL: DAG.getDataLayout());
24093 MVT XLenVT = Subtarget.getXLenVT();
24094 const CallBase *CB = CLI.CB;
24095
24096 MachineFunction &MF = DAG.getMachineFunction();
24097 MachineFunction::CallSiteInfo CSInfo;
24098
24099 // Set type id for call site info.
24100 if (MF.getTarget().Options.EmitCallGraphSection && CB && CB->isIndirectCall())
24101 CSInfo = MachineFunction::CallSiteInfo(*CB);
24102
24103 // Analyze the operands of the call, assigning locations to each operand.
24104 SmallVector<CCValAssign, 16> ArgLocs;
24105 CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
24106
24107 if (CallConv == CallingConv::GHC) {
24108 if (Subtarget.hasStdExtE())
24109 reportFatalUsageError(reason: "GHC calling convention is not supported on RVE!");
24110 ArgCCInfo.AnalyzeCallOperands(Outs, Fn: CC_RISCV_GHC);
24111 } else
24112 analyzeOutputArgs(MF, CCInfo&: ArgCCInfo, Outs, /*IsRet=*/false, CLI: &CLI,
24113 Fn: CallConv == CallingConv::Fast ? CC_RISCV_FastCC
24114 : CC_RISCV);
24115
24116 // Check if it's really possible to do a tail call.
24117 if (IsTailCall)
24118 IsTailCall = isEligibleForTailCallOptimization(CCInfo&: ArgCCInfo, CLI, MF, ArgLocs);
24119
24120 if (IsTailCall)
24121 ++NumTailCalls;
24122 else if (CLI.CB && CLI.CB->isMustTailCall())
24123 reportFatalInternalError(reason: "failed to perform tail call elimination on a "
24124 "call site marked musttail");
24125
24126 // Get a count of how many bytes are to be pushed on the stack.
24127 unsigned NumBytes = ArgCCInfo.getStackSize();
24128
24129 // Create local copies for byval args
24130 SmallVector<SDValue, 8> ByValArgs;
24131 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
24132 ISD::ArgFlagsTy Flags = Outs[i].Flags;
24133 if (!Flags.isByVal())
24134 continue;
24135
24136 SDValue Arg = OutVals[i];
24137 unsigned Size = Flags.getByValSize();
24138 Align Alignment = Flags.getNonZeroByValAlign();
24139
24140 int FI =
24141 MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/isSpillSlot: false);
24142 SDValue FIPtr = DAG.getFrameIndex(FI, VT: getPointerTy(DL: DAG.getDataLayout()));
24143 SDValue SizeNode = DAG.getConstant(Val: Size, DL, VT: XLenVT);
24144
24145 Chain = DAG.getMemcpy(Chain, dl: DL, Dst: FIPtr, Src: Arg, Size: SizeNode, Alignment,
24146 /*IsVolatile=*/isVol: false,
24147 /*AlwaysInline=*/false, /*CI*/ nullptr, OverrideTailCall: IsTailCall,
24148 DstPtrInfo: MachinePointerInfo(), SrcPtrInfo: MachinePointerInfo());
24149 ByValArgs.push_back(Elt: FIPtr);
24150 }
24151
24152 if (!IsTailCall)
24153 Chain = DAG.getCALLSEQ_START(Chain, InSize: NumBytes, OutSize: 0, DL: CLI.DL);
24154
24155 // Copy argument values to their designated locations.
24156 SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
24157 SmallVector<SDValue, 8> MemOpChains;
24158 SDValue StackPtr;
24159 for (unsigned i = 0, j = 0, e = ArgLocs.size(), OutIdx = 0; i != e;
24160 ++i, ++OutIdx) {
24161 CCValAssign &VA = ArgLocs[i];
24162 SDValue ArgValue = OutVals[OutIdx];
24163 ISD::ArgFlagsTy Flags = Outs[OutIdx].Flags;
24164
24165 // Handle passing f64 on RV32D with a soft float ABI as a special case.
24166 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
24167 assert(VA.isRegLoc() && "Expected register VA assignment");
24168 assert(VA.needsCustom());
24169 SDValue SplitF64 = DAG.getNode(
24170 Opcode: RISCVISD::SplitF64, DL, VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N: ArgValue);
24171 SDValue Lo = SplitF64.getValue(R: 0);
24172 SDValue Hi = SplitF64.getValue(R: 1);
24173
24174 // For big-endian, swap the order of Lo and Hi when passing.
24175 if (!Subtarget.isLittleEndian())
24176 std::swap(a&: Lo, b&: Hi);
24177
24178 Register RegLo = VA.getLocReg();
24179 RegsToPass.push_back(Elt: std::make_pair(x&: RegLo, y&: Lo));
24180
24181 // Get the CCValAssign for the Hi part.
24182 CCValAssign &HiVA = ArgLocs[++i];
24183
24184 if (HiVA.isMemLoc()) {
24185 // Second half of f64 is passed on the stack.
24186 if (!StackPtr.getNode())
24187 StackPtr = DAG.getCopyFromReg(Chain, dl: DL, Reg: RISCV::X2, VT: PtrVT);
24188 SDValue Address =
24189 DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT, N1: StackPtr,
24190 N2: DAG.getIntPtrConstant(Val: HiVA.getLocMemOffset(), DL));
24191 // Emit the store.
24192 MemOpChains.push_back(Elt: DAG.getStore(
24193 Chain, dl: DL, Val: Hi, Ptr: Address,
24194 PtrInfo: MachinePointerInfo::getStack(MF, Offset: HiVA.getLocMemOffset())));
24195 } else {
24196 // Second half of f64 is passed in another GPR.
24197 Register RegHigh = HiVA.getLocReg();
24198 RegsToPass.push_back(Elt: std::make_pair(x&: RegHigh, y&: Hi));
24199 }
24200 continue;
24201 }
24202
24203 // Promote the value if needed.
24204 // For now, only handle fully promoted and indirect arguments.
24205 if (VA.getLocInfo() == CCValAssign::Indirect) {
24206 // Store the argument in a stack slot and pass its address.
24207 Align StackAlign =
24208 std::max(a: getPrefTypeAlign(VT: Outs[OutIdx].ArgVT, DAG),
24209 b: getPrefTypeAlign(VT: ArgValue.getValueType(), DAG));
24210 TypeSize StoredSize = ArgValue.getValueType().getStoreSize();
24211 // If the original argument was split (e.g. i128), we need
24212 // to store the required parts of it here (and pass just one address).
24213 // Vectors may be partly split to registers and partly to the stack, in
24214 // which case the base address is partly offset and subsequent stores are
24215 // relative to that.
24216 unsigned ArgIndex = Outs[OutIdx].OrigArgIndex;
24217 unsigned ArgPartOffset = Outs[OutIdx].PartOffset;
24218 assert(VA.getValVT().isVector() || ArgPartOffset == 0);
24219 // Calculate the total size to store. We don't have access to what we're
24220 // actually storing other than performing the loop and collecting the
24221 // info.
24222 SmallVector<std::pair<SDValue, SDValue>> Parts;
24223 while (i + 1 != e && Outs[OutIdx + 1].OrigArgIndex == ArgIndex) {
24224 SDValue PartValue = OutVals[OutIdx + 1];
24225 unsigned PartOffset = Outs[OutIdx + 1].PartOffset - ArgPartOffset;
24226 SDValue Offset = DAG.getIntPtrConstant(Val: PartOffset, DL);
24227 EVT PartVT = PartValue.getValueType();
24228 if (PartVT.isScalableVector())
24229 Offset = DAG.getNode(Opcode: ISD::VSCALE, DL, VT: XLenVT, Operand: Offset);
24230 StoredSize += PartVT.getStoreSize();
24231 StackAlign = std::max(a: StackAlign, b: getPrefTypeAlign(VT: PartVT, DAG));
24232 Parts.push_back(Elt: std::make_pair(x&: PartValue, y&: Offset));
24233 ++i;
24234 ++OutIdx;
24235 }
24236 SDValue SpillSlot = DAG.CreateStackTemporary(Bytes: StoredSize, Alignment: StackAlign);
24237 int FI = cast<FrameIndexSDNode>(Val&: SpillSlot)->getIndex();
24238 MemOpChains.push_back(
24239 Elt: DAG.getStore(Chain, dl: DL, Val: ArgValue, Ptr: SpillSlot,
24240 PtrInfo: MachinePointerInfo::getFixedStack(MF, FI)));
24241 for (const auto &Part : Parts) {
24242 SDValue PartValue = Part.first;
24243 SDValue PartOffset = Part.second;
24244 SDValue Address =
24245 DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT, N1: SpillSlot, N2: PartOffset);
24246 MemOpChains.push_back(
24247 Elt: DAG.getStore(Chain, dl: DL, Val: PartValue, Ptr: Address,
24248 PtrInfo: MachinePointerInfo::getFixedStack(MF, FI)));
24249 }
24250 ArgValue = SpillSlot;
24251 } else {
24252 ArgValue = convertValVTToLocVT(DAG, Val: ArgValue, VA, DL, Subtarget);
24253 }
24254
24255 // Use local copy if it is a byval arg.
24256 if (Flags.isByVal())
24257 ArgValue = ByValArgs[j++];
24258
24259 if (VA.isRegLoc()) {
24260 // Queue up the argument copies and emit them at the end.
24261 RegsToPass.push_back(Elt: std::make_pair(x: VA.getLocReg(), y&: ArgValue));
24262
24263 const TargetOptions &Options = DAG.getTarget().Options;
24264 if (Options.EmitCallSiteInfo)
24265 CSInfo.ArgRegPairs.emplace_back(Args: VA.getLocReg(), Args&: i);
24266 } else {
24267 assert(VA.isMemLoc() && "Argument not register or memory");
24268 assert(!IsTailCall && "Tail call not allowed if stack is used "
24269 "for passing parameters");
24270
24271 // Work out the address of the stack slot.
24272 if (!StackPtr.getNode())
24273 StackPtr = DAG.getCopyFromReg(Chain, dl: DL, Reg: RISCV::X2, VT: PtrVT);
24274 SDValue Address =
24275 DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT, N1: StackPtr,
24276 N2: DAG.getIntPtrConstant(Val: VA.getLocMemOffset(), DL));
24277
24278 // Emit the store.
24279 MemOpChains.push_back(
24280 Elt: DAG.getStore(Chain, dl: DL, Val: ArgValue, Ptr: Address,
24281 PtrInfo: MachinePointerInfo::getStack(MF, Offset: VA.getLocMemOffset())));
24282 }
24283 }
24284
24285 // Join the stores, which are independent of one another.
24286 if (!MemOpChains.empty())
24287 Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other, Ops: MemOpChains);
24288
24289 SDValue Glue;
24290
24291 // Build a sequence of copy-to-reg nodes, chained and glued together.
24292 for (auto &Reg : RegsToPass) {
24293 Chain = DAG.getCopyToReg(Chain, dl: DL, Reg: Reg.first, N: Reg.second, Glue);
24294 Glue = Chain.getValue(R: 1);
24295 }
24296
24297 // Validate that none of the argument registers have been marked as
24298 // reserved, if so report an error. Do the same for the return address if this
24299 // is not a tailcall.
24300 validateCCReservedRegs(Regs: RegsToPass, MF);
24301 if (!IsTailCall && MF.getSubtarget().isRegisterReservedByUser(R: RISCV::X1))
24302 MF.getFunction().getContext().diagnose(DI: DiagnosticInfoUnsupported{
24303 MF.getFunction(),
24304 "Return address register required, but has been reserved."});
24305
24306 // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
24307 // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
24308 // split it and then direct call can be matched by PseudoCALL.
24309 bool CalleeIsLargeExternalSymbol = false;
24310 if (getTargetMachine().getCodeModel() == CodeModel::Large) {
24311 if (auto *S = dyn_cast<GlobalAddressSDNode>(Val&: Callee))
24312 Callee = getLargeGlobalAddress(N: S, DL, Ty: PtrVT, DAG);
24313 else if (auto *S = dyn_cast<ExternalSymbolSDNode>(Val&: Callee)) {
24314 Callee = getLargeExternalSymbol(N: S, DL, Ty: PtrVT, DAG);
24315 CalleeIsLargeExternalSymbol = true;
24316 }
24317 } else if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Val&: Callee)) {
24318 const GlobalValue *GV = S->getGlobal();
24319 Callee = DAG.getTargetGlobalAddress(GV, DL, VT: PtrVT, offset: 0, TargetFlags: RISCVII::MO_CALL);
24320 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Val&: Callee)) {
24321 Callee = DAG.getTargetExternalSymbol(Sym: S->getSymbol(), VT: PtrVT, TargetFlags: RISCVII::MO_CALL);
24322 }
24323
24324 // The first call operand is the chain and the second is the target address.
24325 SmallVector<SDValue, 8> Ops;
24326 Ops.push_back(Elt: Chain);
24327 Ops.push_back(Elt: Callee);
24328
24329 // Add argument registers to the end of the list so that they are
24330 // known live into the call.
24331 for (auto &Reg : RegsToPass)
24332 Ops.push_back(Elt: DAG.getRegister(Reg: Reg.first, VT: Reg.second.getValueType()));
24333
24334 // Add a register mask operand representing the call-preserved registers.
24335 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
24336 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
24337 assert(Mask && "Missing call preserved mask for calling convention");
24338 Ops.push_back(Elt: DAG.getRegisterMask(RegMask: Mask));
24339
24340 // Glue the call to the argument copies, if any.
24341 if (Glue.getNode())
24342 Ops.push_back(Elt: Glue);
24343
24344 assert((!CLI.CFIType || CLI.CB->isIndirectCall()) &&
24345 "Unexpected CFI type for a direct call");
24346
24347 // Emit the call.
24348 SDVTList NodeTys = DAG.getVTList(VT1: MVT::Other, VT2: MVT::Glue);
24349
24350 // Use software guarded branch for large code model non-indirect calls
24351 // Tail call to external symbol will have a null CLI.CB and we need another
24352 // way to determine the callsite type
24353 bool NeedSWGuarded = false;
24354 if (getTargetMachine().getCodeModel() == CodeModel::Large &&
24355 Subtarget.hasStdExtZicfilp() &&
24356 ((CLI.CB && !CLI.CB->isIndirectCall()) || CalleeIsLargeExternalSymbol))
24357 NeedSWGuarded = true;
24358
24359 if (IsTailCall) {
24360 MF.getFrameInfo().setHasTailCall();
24361 unsigned CallOpc =
24362 NeedSWGuarded ? RISCVISD::SW_GUARDED_TAIL : RISCVISD::TAIL;
24363 SDValue Ret = DAG.getNode(Opcode: CallOpc, DL, VTList: NodeTys, Ops);
24364 if (CLI.CFIType)
24365 Ret.getNode()->setCFIType(CLI.CFIType->getZExtValue());
24366 DAG.addNoMergeSiteInfo(Node: Ret.getNode(), NoMerge: CLI.NoMerge);
24367 DAG.addCallSiteInfo(Node: Ret.getNode(), CallInfo: std::move(CSInfo));
24368 return Ret;
24369 }
24370
24371 unsigned CallOpc = NeedSWGuarded ? RISCVISD::SW_GUARDED_CALL : RISCVISD::CALL;
24372 Chain = DAG.getNode(Opcode: CallOpc, DL, VTList: NodeTys, Ops);
24373 if (CLI.CFIType)
24374 Chain.getNode()->setCFIType(CLI.CFIType->getZExtValue());
24375
24376 DAG.addNoMergeSiteInfo(Node: Chain.getNode(), NoMerge: CLI.NoMerge);
24377 DAG.addCallSiteInfo(Node: Chain.getNode(), CallInfo: std::move(CSInfo));
24378 Glue = Chain.getValue(R: 1);
24379
24380 // Mark the end of the call, which is glued to the call itself.
24381 Chain = DAG.getCALLSEQ_END(Chain, Size1: NumBytes, Size2: 0, Glue, DL);
24382 Glue = Chain.getValue(R: 1);
24383
24384 // Assign locations to each value returned by this call.
24385 SmallVector<CCValAssign, 16> RVLocs;
24386 CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
24387 analyzeInputArgs(MF, CCInfo&: RetCCInfo, Ins, /*IsRet=*/true, Fn: CC_RISCV);
24388
24389 // Copy all of the result registers out of their specified physreg.
24390 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
24391 auto &VA = RVLocs[i];
24392 // Copy the value out
24393 SDValue RetValue =
24394 DAG.getCopyFromReg(Chain, dl: DL, Reg: VA.getLocReg(), VT: VA.getLocVT(), Glue);
24395 // Glue the RetValue to the end of the call sequence
24396 Chain = RetValue.getValue(R: 1);
24397 Glue = RetValue.getValue(R: 2);
24398
24399 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
24400 assert(VA.needsCustom());
24401 SDValue RetValue2 = DAG.getCopyFromReg(Chain, dl: DL, Reg: RVLocs[++i].getLocReg(),
24402 VT: MVT::i32, Glue);
24403 Chain = RetValue2.getValue(R: 1);
24404 Glue = RetValue2.getValue(R: 2);
24405
24406 // For big-endian, swap the order when building the pair.
24407 SDValue Lo = RetValue;
24408 SDValue Hi = RetValue2;
24409 if (!Subtarget.isLittleEndian())
24410 std::swap(a&: Lo, b&: Hi);
24411
24412 RetValue = DAG.getNode(Opcode: RISCVISD::BuildPairF64, DL, VT: MVT::f64, N1: Lo, N2: Hi);
24413 } else
24414 RetValue = convertLocVTToValVT(DAG, Val: RetValue, VA, DL, Subtarget);
24415
24416 InVals.push_back(Elt: RetValue);
24417 }
24418
24419 return Chain;
24420}
24421
24422bool RISCVTargetLowering::CanLowerReturn(
24423 CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
24424 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context,
24425 const Type *RetTy) const {
24426 SmallVector<CCValAssign, 16> RVLocs;
24427 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
24428
24429 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
24430 MVT VT = Outs[i].VT;
24431 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
24432 if (CC_RISCV(ValNo: i, ValVT: VT, LocVT: VT, LocInfo: CCValAssign::Full, ArgFlags, State&: CCInfo,
24433 /*IsRet=*/true, OrigTy: Outs[i].OrigTy))
24434 return false;
24435 }
24436 return true;
24437}
24438
24439SDValue
24440RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
24441 bool IsVarArg,
24442 const SmallVectorImpl<ISD::OutputArg> &Outs,
24443 const SmallVectorImpl<SDValue> &OutVals,
24444 const SDLoc &DL, SelectionDAG &DAG) const {
24445 MachineFunction &MF = DAG.getMachineFunction();
24446
24447 // Stores the assignment of the return value to a location.
24448 SmallVector<CCValAssign, 16> RVLocs;
24449
24450 // Info about the registers and stack slot.
24451 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
24452 *DAG.getContext());
24453
24454 analyzeOutputArgs(MF&: DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
24455 CLI: nullptr, Fn: CC_RISCV);
24456
24457 if (CallConv == CallingConv::GHC && !RVLocs.empty())
24458 reportFatalUsageError(reason: "GHC functions return void only");
24459
24460 SDValue Glue;
24461 SmallVector<SDValue, 4> RetOps(1, Chain);
24462
24463 // Copy the result values into the output registers.
24464 for (unsigned i = 0, e = RVLocs.size(), OutIdx = 0; i < e; ++i, ++OutIdx) {
24465 SDValue Val = OutVals[OutIdx];
24466 CCValAssign &VA = RVLocs[i];
24467 assert(VA.isRegLoc() && "Can only return in registers!");
24468
24469 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
24470 // Handle returning f64 on RV32D with a soft float ABI.
24471 assert(VA.isRegLoc() && "Expected return via registers");
24472 assert(VA.needsCustom());
24473 SDValue SplitF64 = DAG.getNode(Opcode: RISCVISD::SplitF64, DL,
24474 VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N: Val);
24475 SDValue Lo = SplitF64.getValue(R: 0);
24476 SDValue Hi = SplitF64.getValue(R: 1);
24477
24478 // For big-endian, swap the order of Lo and Hi when returning.
24479 if (!Subtarget.isLittleEndian())
24480 std::swap(a&: Lo, b&: Hi);
24481
24482 Register RegLo = VA.getLocReg();
24483 Register RegHi = RVLocs[++i].getLocReg();
24484
24485 if (Subtarget.isRegisterReservedByUser(i: RegLo) ||
24486 Subtarget.isRegisterReservedByUser(i: RegHi))
24487 MF.getFunction().getContext().diagnose(DI: DiagnosticInfoUnsupported{
24488 MF.getFunction(),
24489 "Return value register required, but has been reserved."});
24490
24491 Chain = DAG.getCopyToReg(Chain, dl: DL, Reg: RegLo, N: Lo, Glue);
24492 Glue = Chain.getValue(R: 1);
24493 RetOps.push_back(Elt: DAG.getRegister(Reg: RegLo, VT: MVT::i32));
24494 Chain = DAG.getCopyToReg(Chain, dl: DL, Reg: RegHi, N: Hi, Glue);
24495 Glue = Chain.getValue(R: 1);
24496 RetOps.push_back(Elt: DAG.getRegister(Reg: RegHi, VT: MVT::i32));
24497 } else {
24498 // Handle a 'normal' return.
24499 Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget);
24500 Chain = DAG.getCopyToReg(Chain, dl: DL, Reg: VA.getLocReg(), N: Val, Glue);
24501
24502 if (Subtarget.isRegisterReservedByUser(i: VA.getLocReg()))
24503 MF.getFunction().getContext().diagnose(DI: DiagnosticInfoUnsupported{
24504 MF.getFunction(),
24505 "Return value register required, but has been reserved."});
24506
24507 // Guarantee that all emitted copies are stuck together.
24508 Glue = Chain.getValue(R: 1);
24509 RetOps.push_back(Elt: DAG.getRegister(Reg: VA.getLocReg(), VT: VA.getLocVT()));
24510 }
24511 }
24512
24513 RetOps[0] = Chain; // Update chain.
24514
24515 // Add the glue node if we have it.
24516 if (Glue.getNode()) {
24517 RetOps.push_back(Elt: Glue);
24518 }
24519
24520 if (any_of(Range&: RVLocs,
24521 P: [](CCValAssign &VA) { return VA.getLocVT().isScalableVector(); }))
24522 MF.getInfo<RISCVMachineFunctionInfo>()->setIsVectorCall();
24523
24524 unsigned RetOpc = RISCVISD::RET_GLUE;
24525 // Interrupt service routines use different return instructions.
24526 const Function &Func = DAG.getMachineFunction().getFunction();
24527 if (Func.hasFnAttribute(Kind: "interrupt")) {
24528 if (!Func.getReturnType()->isVoidTy())
24529 reportFatalUsageError(
24530 reason: "Functions with the interrupt attribute must have void return type!");
24531
24532 MachineFunction &MF = DAG.getMachineFunction();
24533 StringRef Kind =
24534 MF.getFunction().getFnAttribute(Kind: "interrupt").getValueAsString();
24535
24536 if (Kind == "supervisor")
24537 RetOpc = RISCVISD::SRET_GLUE;
24538 else if (Kind == "rnmi") {
24539 assert(Subtarget.hasFeature(RISCV::FeatureStdExtSmrnmi) &&
24540 "Need Smrnmi extension for rnmi");
24541 RetOpc = RISCVISD::MNRET_GLUE;
24542 } else if (Kind == "qci-nest" || Kind == "qci-nonest") {
24543 assert(Subtarget.hasFeature(RISCV::FeatureVendorXqciint) &&
24544 "Need Xqciint for qci-(no)nest");
24545 RetOpc = RISCVISD::QC_C_MILEAVERET_GLUE;
24546 } else
24547 RetOpc = RISCVISD::MRET_GLUE;
24548 }
24549
24550 return DAG.getNode(Opcode: RetOpc, DL, VT: MVT::Other, Ops: RetOps);
24551}
24552
24553void RISCVTargetLowering::validateCCReservedRegs(
24554 const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
24555 MachineFunction &MF) const {
24556 const Function &F = MF.getFunction();
24557
24558 if (llvm::any_of(Range: Regs, P: [this](auto Reg) {
24559 return Subtarget.isRegisterReservedByUser(i: Reg.first);
24560 }))
24561 F.getContext().diagnose(DI: DiagnosticInfoUnsupported{
24562 F, "Argument register required, but has been reserved."});
24563}
24564
24565// Check if the result of the node is only used as a return value, as
24566// otherwise we can't perform a tail-call.
24567bool RISCVTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
24568 if (N->getNumValues() != 1)
24569 return false;
24570 if (!N->hasNUsesOfValue(NUses: 1, Value: 0))
24571 return false;
24572
24573 SDNode *Copy = *N->user_begin();
24574
24575 if (Copy->getOpcode() == ISD::BITCAST) {
24576 return isUsedByReturnOnly(N: Copy, Chain);
24577 }
24578
24579 // TODO: Handle additional opcodes in order to support tail-calling libcalls
24580 // with soft float ABIs.
24581 if (Copy->getOpcode() != ISD::CopyToReg) {
24582 return false;
24583 }
24584
24585 // If the ISD::CopyToReg has a glue operand, we conservatively assume it
24586 // isn't safe to perform a tail call.
24587 if (Copy->getOperand(Num: Copy->getNumOperands() - 1).getValueType() == MVT::Glue)
24588 return false;
24589
24590 // The copy must be used by a RISCVISD::RET_GLUE, and nothing else.
24591 bool HasRet = false;
24592 for (SDNode *Node : Copy->users()) {
24593 if (Node->getOpcode() != RISCVISD::RET_GLUE)
24594 return false;
24595 HasRet = true;
24596 }
24597 if (!HasRet)
24598 return false;
24599
24600 Chain = Copy->getOperand(Num: 0);
24601 return true;
24602}
24603
24604bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
24605 return CI->isTailCall();
24606}
24607
24608/// getConstraintType - Given a constraint letter, return the type of
24609/// constraint it is for this target.
24610RISCVTargetLowering::ConstraintType
24611RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
24612 if (Constraint.size() == 1) {
24613 switch (Constraint[0]) {
24614 default:
24615 break;
24616 case 'f':
24617 case 'R':
24618 return C_RegisterClass;
24619 case 'I':
24620 case 'J':
24621 case 'K':
24622 return C_Immediate;
24623 case 'A':
24624 return C_Memory;
24625 case 's':
24626 case 'S': // A symbolic address
24627 return C_Other;
24628 }
24629 } else {
24630 if (Constraint == "vr" || Constraint == "vd" || Constraint == "vm")
24631 return C_RegisterClass;
24632 if (Constraint == "cr" || Constraint == "cR" || Constraint == "cf")
24633 return C_RegisterClass;
24634 }
24635 return TargetLowering::getConstraintType(Constraint);
24636}
24637
24638std::pair<unsigned, const TargetRegisterClass *>
24639RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
24640 StringRef Constraint,
24641 MVT VT) const {
24642 // First, see if this is a constraint that directly corresponds to a RISC-V
24643 // register class.
24644 if (Constraint.size() == 1) {
24645 switch (Constraint[0]) {
24646 case 'r':
24647 // TODO: Support fixed vectors up to XLen for P extension?
24648 if (VT.isVector())
24649 break;
24650 if (VT == MVT::f16 && Subtarget.hasStdExtZhinxmin())
24651 return std::make_pair(x: 0U, y: &RISCV::GPRF16NoX0RegClass);
24652 if (VT == MVT::f32 && Subtarget.hasStdExtZfinx())
24653 return std::make_pair(x: 0U, y: &RISCV::GPRF32NoX0RegClass);
24654 if (VT == MVT::f64 && Subtarget.hasStdExtZdinx() && !Subtarget.is64Bit())
24655 return std::make_pair(x: 0U, y: &RISCV::GPRPairNoX0RegClass);
24656 return std::make_pair(x: 0U, y: &RISCV::GPRNoX0RegClass);
24657 case 'f':
24658 if (VT == MVT::f16) {
24659 if (Subtarget.hasStdExtZfhmin())
24660 return std::make_pair(x: 0U, y: &RISCV::FPR16RegClass);
24661 if (Subtarget.hasStdExtZhinxmin())
24662 return std::make_pair(x: 0U, y: &RISCV::GPRF16NoX0RegClass);
24663 } else if (VT == MVT::f32) {
24664 if (Subtarget.hasStdExtF())
24665 return std::make_pair(x: 0U, y: &RISCV::FPR32RegClass);
24666 if (Subtarget.hasStdExtZfinx())
24667 return std::make_pair(x: 0U, y: &RISCV::GPRF32NoX0RegClass);
24668 } else if (VT == MVT::f64) {
24669 if (Subtarget.hasStdExtD())
24670 return std::make_pair(x: 0U, y: &RISCV::FPR64RegClass);
24671 if (Subtarget.hasStdExtZdinx() && !Subtarget.is64Bit())
24672 return std::make_pair(x: 0U, y: &RISCV::GPRPairNoX0RegClass);
24673 if (Subtarget.hasStdExtZdinx() && Subtarget.is64Bit())
24674 return std::make_pair(x: 0U, y: &RISCV::GPRNoX0RegClass);
24675 }
24676 break;
24677 case 'R':
24678 if (((VT == MVT::i64 || VT == MVT::f64) && !Subtarget.is64Bit()) ||
24679 (VT == MVT::i128 && Subtarget.is64Bit()))
24680 return std::make_pair(x: 0U, y: &RISCV::GPRPairNoX0RegClass);
24681 break;
24682 default:
24683 break;
24684 }
24685 } else if (Constraint == "vr") {
24686 // Check VM and fractional LMUL first so that those types will use that
24687 // class instead of VR.
24688 for (const auto *RC :
24689 {&RISCV::ZZZ_VMRegClass, &RISCV::ZZZ_VRMF8RegClass,
24690 &RISCV::ZZZ_VRMF4RegClass, &RISCV::ZZZ_VRMF2RegClass,
24691 &RISCV::VRRegClass, &RISCV::VRM2RegClass, &RISCV::VRM4RegClass,
24692 &RISCV::VRM8RegClass, &RISCV::VRN2M1RegClass, &RISCV::VRN3M1RegClass,
24693 &RISCV::VRN4M1RegClass, &RISCV::VRN5M1RegClass,
24694 &RISCV::VRN6M1RegClass, &RISCV::VRN7M1RegClass,
24695 &RISCV::VRN8M1RegClass, &RISCV::VRN2M2RegClass,
24696 &RISCV::VRN3M2RegClass, &RISCV::VRN4M2RegClass,
24697 &RISCV::VRN2M4RegClass}) {
24698 if (TRI->isTypeLegalForClass(RC: *RC, T: VT.SimpleTy))
24699 return std::make_pair(x: 0U, y&: RC);
24700
24701 if (VT.isFixedLengthVector() && useRVVForFixedLengthVectorVT(VT)) {
24702 MVT ContainerVT = getContainerForFixedLengthVector(VT);
24703 if (TRI->isTypeLegalForClass(RC: *RC, T: ContainerVT))
24704 return std::make_pair(x: 0U, y&: RC);
24705 }
24706 }
24707 } else if (Constraint == "vd") {
24708 // Check VMNoV0 and fractional LMUL first so that those types will use that
24709 // class instead of VRNoV0.
24710 for (const auto *RC :
24711 {&RISCV::ZZZ_VMNoV0RegClass, &RISCV::ZZZ_VRMF8NoV0RegClass,
24712 &RISCV::ZZZ_VRMF4NoV0RegClass, &RISCV::ZZZ_VRMF2NoV0RegClass,
24713 &RISCV::VRNoV0RegClass, &RISCV::VRM2NoV0RegClass,
24714 &RISCV::VRM4NoV0RegClass, &RISCV::VRM8NoV0RegClass,
24715 &RISCV::VRN2M1NoV0RegClass, &RISCV::VRN3M1NoV0RegClass,
24716 &RISCV::VRN4M1NoV0RegClass, &RISCV::VRN5M1NoV0RegClass,
24717 &RISCV::VRN6M1NoV0RegClass, &RISCV::VRN7M1NoV0RegClass,
24718 &RISCV::VRN8M1NoV0RegClass, &RISCV::VRN2M2NoV0RegClass,
24719 &RISCV::VRN3M2NoV0RegClass, &RISCV::VRN4M2NoV0RegClass,
24720 &RISCV::VRN2M4NoV0RegClass}) {
24721 if (TRI->isTypeLegalForClass(RC: *RC, T: VT.SimpleTy))
24722 return std::make_pair(x: 0U, y&: RC);
24723
24724 if (VT.isFixedLengthVector() && useRVVForFixedLengthVectorVT(VT)) {
24725 MVT ContainerVT = getContainerForFixedLengthVector(VT);
24726 if (TRI->isTypeLegalForClass(RC: *RC, T: ContainerVT))
24727 return std::make_pair(x: 0U, y&: RC);
24728 }
24729 }
24730 } else if (Constraint == "vm") {
24731 if (TRI->isTypeLegalForClass(RC: RISCV::VMV0RegClass, T: VT.SimpleTy))
24732 return std::make_pair(x: 0U, y: &RISCV::VMV0RegClass);
24733
24734 if (VT.isFixedLengthVector() && useRVVForFixedLengthVectorVT(VT)) {
24735 MVT ContainerVT = getContainerForFixedLengthVector(VT);
24736 // VT here might be coerced to vector with i8 elements, so we need to
24737 // check if this is a M1 register here instead of checking VMV0RegClass.
24738 if (TRI->isTypeLegalForClass(RC: RISCV::VRRegClass, T: ContainerVT))
24739 return std::make_pair(x: 0U, y: &RISCV::VMV0RegClass);
24740 }
24741 } else if (Constraint == "cr") {
24742 if (VT == MVT::f16 && Subtarget.hasStdExtZhinxmin())
24743 return std::make_pair(x: 0U, y: &RISCV::GPRF16CRegClass);
24744 if (VT == MVT::f32 && Subtarget.hasStdExtZfinx())
24745 return std::make_pair(x: 0U, y: &RISCV::GPRF32CRegClass);
24746 if (VT == MVT::f64 && Subtarget.hasStdExtZdinx() && !Subtarget.is64Bit())
24747 return std::make_pair(x: 0U, y: &RISCV::GPRPairCRegClass);
24748 if (!VT.isVector())
24749 return std::make_pair(x: 0U, y: &RISCV::GPRCRegClass);
24750 } else if (Constraint == "cR") {
24751 if (((VT == MVT::i64 || VT == MVT::f64) && !Subtarget.is64Bit()) ||
24752 (VT == MVT::i128 && Subtarget.is64Bit()))
24753 return std::make_pair(x: 0U, y: &RISCV::GPRPairCRegClass);
24754 } else if (Constraint == "cf") {
24755 if (VT == MVT::f16) {
24756 if (Subtarget.hasStdExtZfhmin())
24757 return std::make_pair(x: 0U, y: &RISCV::FPR16CRegClass);
24758 if (Subtarget.hasStdExtZhinxmin())
24759 return std::make_pair(x: 0U, y: &RISCV::GPRF16CRegClass);
24760 } else if (VT == MVT::f32) {
24761 if (Subtarget.hasStdExtF())
24762 return std::make_pair(x: 0U, y: &RISCV::FPR32CRegClass);
24763 if (Subtarget.hasStdExtZfinx())
24764 return std::make_pair(x: 0U, y: &RISCV::GPRF32CRegClass);
24765 } else if (VT == MVT::f64) {
24766 if (Subtarget.hasStdExtD())
24767 return std::make_pair(x: 0U, y: &RISCV::FPR64CRegClass);
24768 if (Subtarget.hasStdExtZdinx() && !Subtarget.is64Bit())
24769 return std::make_pair(x: 0U, y: &RISCV::GPRPairCRegClass);
24770 if (Subtarget.hasStdExtZdinx() && Subtarget.is64Bit())
24771 return std::make_pair(x: 0U, y: &RISCV::GPRCRegClass);
24772 }
24773 }
24774
24775 // Clang will correctly decode the usage of register name aliases into their
24776 // official names. However, other frontends like `rustc` do not. This allows
24777 // users of these frontends to use the ABI names for registers in LLVM-style
24778 // register constraints.
24779 unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower())
24780 .Case(S: "{zero}", Value: RISCV::X0)
24781 .Case(S: "{ra}", Value: RISCV::X1)
24782 .Case(S: "{sp}", Value: RISCV::X2)
24783 .Case(S: "{gp}", Value: RISCV::X3)
24784 .Case(S: "{tp}", Value: RISCV::X4)
24785 .Case(S: "{t0}", Value: RISCV::X5)
24786 .Case(S: "{t1}", Value: RISCV::X6)
24787 .Case(S: "{t2}", Value: RISCV::X7)
24788 .Cases(CaseStrings: {"{s0}", "{fp}"}, Value: RISCV::X8)
24789 .Case(S: "{s1}", Value: RISCV::X9)
24790 .Case(S: "{a0}", Value: RISCV::X10)
24791 .Case(S: "{a1}", Value: RISCV::X11)
24792 .Case(S: "{a2}", Value: RISCV::X12)
24793 .Case(S: "{a3}", Value: RISCV::X13)
24794 .Case(S: "{a4}", Value: RISCV::X14)
24795 .Case(S: "{a5}", Value: RISCV::X15)
24796 .Case(S: "{a6}", Value: RISCV::X16)
24797 .Case(S: "{a7}", Value: RISCV::X17)
24798 .Case(S: "{s2}", Value: RISCV::X18)
24799 .Case(S: "{s3}", Value: RISCV::X19)
24800 .Case(S: "{s4}", Value: RISCV::X20)
24801 .Case(S: "{s5}", Value: RISCV::X21)
24802 .Case(S: "{s6}", Value: RISCV::X22)
24803 .Case(S: "{s7}", Value: RISCV::X23)
24804 .Case(S: "{s8}", Value: RISCV::X24)
24805 .Case(S: "{s9}", Value: RISCV::X25)
24806 .Case(S: "{s10}", Value: RISCV::X26)
24807 .Case(S: "{s11}", Value: RISCV::X27)
24808 .Case(S: "{t3}", Value: RISCV::X28)
24809 .Case(S: "{t4}", Value: RISCV::X29)
24810 .Case(S: "{t5}", Value: RISCV::X30)
24811 .Case(S: "{t6}", Value: RISCV::X31)
24812 .Default(Value: RISCV::NoRegister);
24813 if (XRegFromAlias != RISCV::NoRegister)
24814 return std::make_pair(x&: XRegFromAlias, y: &RISCV::GPRRegClass);
24815
24816 // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
24817 // TableGen record rather than the AsmName to choose registers for InlineAsm
24818 // constraints, plus we want to match those names to the widest floating point
24819 // register type available, manually select floating point registers here.
24820 //
24821 // The second case is the ABI name of the register, so that frontends can also
24822 // use the ABI names in register constraint lists.
24823 if (Subtarget.hasStdExtF()) {
24824 unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
24825 .Cases(CaseStrings: {"{f0}", "{ft0}"}, Value: RISCV::F0_F)
24826 .Cases(CaseStrings: {"{f1}", "{ft1}"}, Value: RISCV::F1_F)
24827 .Cases(CaseStrings: {"{f2}", "{ft2}"}, Value: RISCV::F2_F)
24828 .Cases(CaseStrings: {"{f3}", "{ft3}"}, Value: RISCV::F3_F)
24829 .Cases(CaseStrings: {"{f4}", "{ft4}"}, Value: RISCV::F4_F)
24830 .Cases(CaseStrings: {"{f5}", "{ft5}"}, Value: RISCV::F5_F)
24831 .Cases(CaseStrings: {"{f6}", "{ft6}"}, Value: RISCV::F6_F)
24832 .Cases(CaseStrings: {"{f7}", "{ft7}"}, Value: RISCV::F7_F)
24833 .Cases(CaseStrings: {"{f8}", "{fs0}"}, Value: RISCV::F8_F)
24834 .Cases(CaseStrings: {"{f9}", "{fs1}"}, Value: RISCV::F9_F)
24835 .Cases(CaseStrings: {"{f10}", "{fa0}"}, Value: RISCV::F10_F)
24836 .Cases(CaseStrings: {"{f11}", "{fa1}"}, Value: RISCV::F11_F)
24837 .Cases(CaseStrings: {"{f12}", "{fa2}"}, Value: RISCV::F12_F)
24838 .Cases(CaseStrings: {"{f13}", "{fa3}"}, Value: RISCV::F13_F)
24839 .Cases(CaseStrings: {"{f14}", "{fa4}"}, Value: RISCV::F14_F)
24840 .Cases(CaseStrings: {"{f15}", "{fa5}"}, Value: RISCV::F15_F)
24841 .Cases(CaseStrings: {"{f16}", "{fa6}"}, Value: RISCV::F16_F)
24842 .Cases(CaseStrings: {"{f17}", "{fa7}"}, Value: RISCV::F17_F)
24843 .Cases(CaseStrings: {"{f18}", "{fs2}"}, Value: RISCV::F18_F)
24844 .Cases(CaseStrings: {"{f19}", "{fs3}"}, Value: RISCV::F19_F)
24845 .Cases(CaseStrings: {"{f20}", "{fs4}"}, Value: RISCV::F20_F)
24846 .Cases(CaseStrings: {"{f21}", "{fs5}"}, Value: RISCV::F21_F)
24847 .Cases(CaseStrings: {"{f22}", "{fs6}"}, Value: RISCV::F22_F)
24848 .Cases(CaseStrings: {"{f23}", "{fs7}"}, Value: RISCV::F23_F)
24849 .Cases(CaseStrings: {"{f24}", "{fs8}"}, Value: RISCV::F24_F)
24850 .Cases(CaseStrings: {"{f25}", "{fs9}"}, Value: RISCV::F25_F)
24851 .Cases(CaseStrings: {"{f26}", "{fs10}"}, Value: RISCV::F26_F)
24852 .Cases(CaseStrings: {"{f27}", "{fs11}"}, Value: RISCV::F27_F)
24853 .Cases(CaseStrings: {"{f28}", "{ft8}"}, Value: RISCV::F28_F)
24854 .Cases(CaseStrings: {"{f29}", "{ft9}"}, Value: RISCV::F29_F)
24855 .Cases(CaseStrings: {"{f30}", "{ft10}"}, Value: RISCV::F30_F)
24856 .Cases(CaseStrings: {"{f31}", "{ft11}"}, Value: RISCV::F31_F)
24857 .Default(Value: RISCV::NoRegister);
24858 if (FReg != RISCV::NoRegister) {
24859 assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
24860 if (Subtarget.hasStdExtD() && (VT == MVT::f64 || VT == MVT::Other)) {
24861 unsigned RegNo = FReg - RISCV::F0_F;
24862 unsigned DReg = RISCV::F0_D + RegNo;
24863 return std::make_pair(x&: DReg, y: &RISCV::FPR64RegClass);
24864 }
24865 if (VT == MVT::f32 || VT == MVT::Other)
24866 return std::make_pair(x&: FReg, y: &RISCV::FPR32RegClass);
24867 if (Subtarget.hasStdExtZfhmin() && VT == MVT::f16) {
24868 unsigned RegNo = FReg - RISCV::F0_F;
24869 unsigned HReg = RISCV::F0_H + RegNo;
24870 return std::make_pair(x&: HReg, y: &RISCV::FPR16RegClass);
24871 }
24872 }
24873 }
24874
24875 if (Subtarget.hasVInstructions()) {
24876 Register VReg = StringSwitch<Register>(Constraint.lower())
24877 .Case(S: "{v0}", Value: RISCV::V0)
24878 .Case(S: "{v1}", Value: RISCV::V1)
24879 .Case(S: "{v2}", Value: RISCV::V2)
24880 .Case(S: "{v3}", Value: RISCV::V3)
24881 .Case(S: "{v4}", Value: RISCV::V4)
24882 .Case(S: "{v5}", Value: RISCV::V5)
24883 .Case(S: "{v6}", Value: RISCV::V6)
24884 .Case(S: "{v7}", Value: RISCV::V7)
24885 .Case(S: "{v8}", Value: RISCV::V8)
24886 .Case(S: "{v9}", Value: RISCV::V9)
24887 .Case(S: "{v10}", Value: RISCV::V10)
24888 .Case(S: "{v11}", Value: RISCV::V11)
24889 .Case(S: "{v12}", Value: RISCV::V12)
24890 .Case(S: "{v13}", Value: RISCV::V13)
24891 .Case(S: "{v14}", Value: RISCV::V14)
24892 .Case(S: "{v15}", Value: RISCV::V15)
24893 .Case(S: "{v16}", Value: RISCV::V16)
24894 .Case(S: "{v17}", Value: RISCV::V17)
24895 .Case(S: "{v18}", Value: RISCV::V18)
24896 .Case(S: "{v19}", Value: RISCV::V19)
24897 .Case(S: "{v20}", Value: RISCV::V20)
24898 .Case(S: "{v21}", Value: RISCV::V21)
24899 .Case(S: "{v22}", Value: RISCV::V22)
24900 .Case(S: "{v23}", Value: RISCV::V23)
24901 .Case(S: "{v24}", Value: RISCV::V24)
24902 .Case(S: "{v25}", Value: RISCV::V25)
24903 .Case(S: "{v26}", Value: RISCV::V26)
24904 .Case(S: "{v27}", Value: RISCV::V27)
24905 .Case(S: "{v28}", Value: RISCV::V28)
24906 .Case(S: "{v29}", Value: RISCV::V29)
24907 .Case(S: "{v30}", Value: RISCV::V30)
24908 .Case(S: "{v31}", Value: RISCV::V31)
24909 .Default(Value: RISCV::NoRegister);
24910 if (VReg != RISCV::NoRegister) {
24911 if (TRI->isTypeLegalForClass(RC: RISCV::ZZZ_VMRegClass, T: VT.SimpleTy))
24912 return std::make_pair(x&: VReg, y: &RISCV::ZZZ_VMRegClass);
24913 if (TRI->isTypeLegalForClass(RC: RISCV::VRRegClass, T: VT.SimpleTy))
24914 return std::make_pair(x&: VReg, y: &RISCV::VRRegClass);
24915 for (const auto *RC :
24916 {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
24917 if (TRI->isTypeLegalForClass(RC: *RC, T: VT.SimpleTy)) {
24918 VReg = TRI->getMatchingSuperReg(Reg: VReg, SubIdx: RISCV::sub_vrm1_0, RC);
24919 return std::make_pair(x&: VReg, y&: RC);
24920 }
24921 }
24922 }
24923 }
24924
24925 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
24926}
24927
24928InlineAsm::ConstraintCode
24929RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
24930 // Currently only support length 1 constraints.
24931 if (ConstraintCode.size() == 1) {
24932 switch (ConstraintCode[0]) {
24933 case 'A':
24934 return InlineAsm::ConstraintCode::A;
24935 default:
24936 break;
24937 }
24938 }
24939
24940 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
24941}
24942
24943void RISCVTargetLowering::LowerAsmOperandForConstraint(
24944 SDValue Op, StringRef Constraint, std::vector<SDValue> &Ops,
24945 SelectionDAG &DAG) const {
24946 // Currently only support length 1 constraints.
24947 if (Constraint.size() == 1) {
24948 switch (Constraint[0]) {
24949 case 'I':
24950 // Validate & create a 12-bit signed immediate operand.
24951 if (auto *C = dyn_cast<ConstantSDNode>(Val&: Op)) {
24952 uint64_t CVal = C->getSExtValue();
24953 if (isInt<12>(x: CVal))
24954 Ops.push_back(x: DAG.getSignedTargetConstant(Val: CVal, DL: SDLoc(Op),
24955 VT: Subtarget.getXLenVT()));
24956 }
24957 return;
24958 case 'J':
24959 // Validate & create an integer zero operand.
24960 if (isNullConstant(V: Op))
24961 Ops.push_back(
24962 x: DAG.getTargetConstant(Val: 0, DL: SDLoc(Op), VT: Subtarget.getXLenVT()));
24963 return;
24964 case 'K':
24965 // Validate & create a 5-bit unsigned immediate operand.
24966 if (auto *C = dyn_cast<ConstantSDNode>(Val&: Op)) {
24967 uint64_t CVal = C->getZExtValue();
24968 if (isUInt<5>(x: CVal))
24969 Ops.push_back(
24970 x: DAG.getTargetConstant(Val: CVal, DL: SDLoc(Op), VT: Subtarget.getXLenVT()));
24971 }
24972 return;
24973 case 'S':
24974 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint: "s", Ops, DAG);
24975 return;
24976 default:
24977 break;
24978 }
24979 }
24980 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
24981}
24982
24983Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
24984 Instruction *Inst,
24985 AtomicOrdering Ord) const {
24986 if (Subtarget.hasStdExtZtso()) {
24987 if (isa<LoadInst>(Val: Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
24988 return Builder.CreateFence(Ordering: Ord);
24989 return nullptr;
24990 }
24991
24992 if (isa<LoadInst>(Val: Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
24993 return Builder.CreateFence(Ordering: Ord);
24994 if (isa<StoreInst>(Val: Inst) && isReleaseOrStronger(AO: Ord))
24995 return Builder.CreateFence(Ordering: AtomicOrdering::Release);
24996 return nullptr;
24997}
24998
24999Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
25000 Instruction *Inst,
25001 AtomicOrdering Ord) const {
25002 if (Subtarget.hasStdExtZtso()) {
25003 if (isa<StoreInst>(Val: Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
25004 return Builder.CreateFence(Ordering: Ord);
25005 return nullptr;
25006 }
25007
25008 if (isa<LoadInst>(Val: Inst) && isAcquireOrStronger(AO: Ord))
25009 return Builder.CreateFence(Ordering: AtomicOrdering::Acquire);
25010 if (Subtarget.enableTrailingSeqCstFence() && isa<StoreInst>(Val: Inst) &&
25011 Ord == AtomicOrdering::SequentiallyConsistent)
25012 return Builder.CreateFence(Ordering: AtomicOrdering::SequentiallyConsistent);
25013 return nullptr;
25014}
25015
25016TargetLowering::AtomicExpansionKind
25017RISCVTargetLowering::shouldExpandAtomicRMWInIR(const AtomicRMWInst *AI) const {
25018 // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
25019 // point operations can't be used in an lr/sc sequence without breaking the
25020 // forward-progress guarantee.
25021 if (AI->isFloatingPointOperation() ||
25022 AI->getOperation() == AtomicRMWInst::UIncWrap ||
25023 AI->getOperation() == AtomicRMWInst::UDecWrap ||
25024 AI->getOperation() == AtomicRMWInst::USubCond ||
25025 AI->getOperation() == AtomicRMWInst::USubSat)
25026 return AtomicExpansionKind::CmpXChg;
25027
25028 // Don't expand forced atomics, we want to have __sync libcalls instead.
25029 if (Subtarget.hasForcedAtomics())
25030 return AtomicExpansionKind::None;
25031
25032 unsigned Size = AI->getType()->getPrimitiveSizeInBits();
25033 if (AI->getOperation() == AtomicRMWInst::Nand) {
25034 if (Subtarget.hasStdExtZacas() &&
25035 (Size >= 32 || Subtarget.hasStdExtZabha()))
25036 return AtomicExpansionKind::CmpXChg;
25037 if (Size < 32)
25038 return AtomicExpansionKind::MaskedIntrinsic;
25039 }
25040
25041 if (Size < 32 && !Subtarget.hasStdExtZabha())
25042 return AtomicExpansionKind::MaskedIntrinsic;
25043
25044 return AtomicExpansionKind::None;
25045}
25046
25047static Intrinsic::ID
25048getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
25049 switch (BinOp) {
25050 default:
25051 llvm_unreachable("Unexpected AtomicRMW BinOp");
25052 case AtomicRMWInst::Xchg:
25053 return Intrinsic::riscv_masked_atomicrmw_xchg;
25054 case AtomicRMWInst::Add:
25055 return Intrinsic::riscv_masked_atomicrmw_add;
25056 case AtomicRMWInst::Sub:
25057 return Intrinsic::riscv_masked_atomicrmw_sub;
25058 case AtomicRMWInst::Nand:
25059 return Intrinsic::riscv_masked_atomicrmw_nand;
25060 case AtomicRMWInst::Max:
25061 return Intrinsic::riscv_masked_atomicrmw_max;
25062 case AtomicRMWInst::Min:
25063 return Intrinsic::riscv_masked_atomicrmw_min;
25064 case AtomicRMWInst::UMax:
25065 return Intrinsic::riscv_masked_atomicrmw_umax;
25066 case AtomicRMWInst::UMin:
25067 return Intrinsic::riscv_masked_atomicrmw_umin;
25068 }
25069}
25070
25071Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
25072 IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
25073 Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
25074 // In the case of an atomicrmw xchg with a constant 0/-1 operand, replace
25075 // the atomic instruction with an AtomicRMWInst::And/Or with appropriate
25076 // mask, as this produces better code than the LR/SC loop emitted by
25077 // int_riscv_masked_atomicrmw_xchg.
25078 if (AI->getOperation() == AtomicRMWInst::Xchg &&
25079 isa<ConstantInt>(Val: AI->getValOperand())) {
25080 ConstantInt *CVal = cast<ConstantInt>(Val: AI->getValOperand());
25081 if (CVal->isZero())
25082 return Builder.CreateAtomicRMW(Op: AtomicRMWInst::And, Ptr: AlignedAddr,
25083 Val: Builder.CreateNot(V: Mask, Name: "Inv_Mask"),
25084 Align: AI->getAlign(), Ordering: Ord);
25085 if (CVal->isMinusOne())
25086 return Builder.CreateAtomicRMW(Op: AtomicRMWInst::Or, Ptr: AlignedAddr, Val: Mask,
25087 Align: AI->getAlign(), Ordering: Ord);
25088 }
25089
25090 unsigned XLen = Subtarget.getXLen();
25091 Value *Ordering =
25092 Builder.getIntN(N: XLen, C: static_cast<uint64_t>(AI->getOrdering()));
25093 Type *Tys[] = {Builder.getIntNTy(N: XLen), AlignedAddr->getType()};
25094 Function *LrwOpScwLoop = Intrinsic::getOrInsertDeclaration(
25095 M: AI->getModule(),
25096 id: getIntrinsicForMaskedAtomicRMWBinOp(XLen, BinOp: AI->getOperation()), Tys);
25097
25098 if (XLen == 64) {
25099 Incr = Builder.CreateSExt(V: Incr, DestTy: Builder.getInt64Ty());
25100 Mask = Builder.CreateSExt(V: Mask, DestTy: Builder.getInt64Ty());
25101 ShiftAmt = Builder.CreateSExt(V: ShiftAmt, DestTy: Builder.getInt64Ty());
25102 }
25103
25104 Value *Result;
25105
25106 // Must pass the shift amount needed to sign extend the loaded value prior
25107 // to performing a signed comparison for min/max. ShiftAmt is the number of
25108 // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
25109 // is the number of bits to left+right shift the value in order to
25110 // sign-extend.
25111 if (AI->getOperation() == AtomicRMWInst::Min ||
25112 AI->getOperation() == AtomicRMWInst::Max) {
25113 const DataLayout &DL = AI->getDataLayout();
25114 unsigned ValWidth =
25115 DL.getTypeStoreSizeInBits(Ty: AI->getValOperand()->getType());
25116 Value *SextShamt =
25117 Builder.CreateSub(LHS: Builder.getIntN(N: XLen, C: XLen - ValWidth), RHS: ShiftAmt);
25118 Result = Builder.CreateCall(Callee: LrwOpScwLoop,
25119 Args: {AlignedAddr, Incr, Mask, SextShamt, Ordering});
25120 } else {
25121 Result =
25122 Builder.CreateCall(Callee: LrwOpScwLoop, Args: {AlignedAddr, Incr, Mask, Ordering});
25123 }
25124
25125 if (XLen == 64)
25126 Result = Builder.CreateTrunc(V: Result, DestTy: Builder.getInt32Ty());
25127 return Result;
25128}
25129
25130TargetLowering::AtomicExpansionKind
25131RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
25132 const AtomicCmpXchgInst *CI) const {
25133 // Don't expand forced atomics, we want to have __sync libcalls instead.
25134 if (Subtarget.hasForcedAtomics())
25135 return AtomicExpansionKind::None;
25136
25137 unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
25138 if (!(Subtarget.hasStdExtZabha() && Subtarget.hasStdExtZacas()) &&
25139 (Size == 8 || Size == 16))
25140 return AtomicExpansionKind::MaskedIntrinsic;
25141 return AtomicExpansionKind::None;
25142}
25143
25144Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
25145 IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
25146 Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
25147 unsigned XLen = Subtarget.getXLen();
25148 Value *Ordering = Builder.getIntN(N: XLen, C: static_cast<uint64_t>(Ord));
25149 Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg;
25150 if (XLen == 64) {
25151 CmpVal = Builder.CreateSExt(V: CmpVal, DestTy: Builder.getInt64Ty());
25152 NewVal = Builder.CreateSExt(V: NewVal, DestTy: Builder.getInt64Ty());
25153 Mask = Builder.CreateSExt(V: Mask, DestTy: Builder.getInt64Ty());
25154 }
25155 Type *Tys[] = {Builder.getIntNTy(N: XLen), AlignedAddr->getType()};
25156 Value *Result = Builder.CreateIntrinsic(
25157 ID: CmpXchgIntrID, Types: Tys, Args: {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
25158 if (XLen == 64)
25159 Result = Builder.CreateTrunc(V: Result, DestTy: Builder.getInt32Ty());
25160 return Result;
25161}
25162
25163bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(SDValue Extend,
25164 EVT DataVT) const {
25165 // We have indexed loads for all supported EEW types. Indices are always
25166 // zero extended.
25167 return Extend.getOpcode() == ISD::ZERO_EXTEND &&
25168 isTypeLegal(VT: Extend.getValueType()) &&
25169 isTypeLegal(VT: Extend.getOperand(i: 0).getValueType()) &&
25170 Extend.getOperand(i: 0).getValueType().getVectorElementType() != MVT::i1;
25171}
25172
25173bool RISCVTargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT,
25174 EVT VT) const {
25175 if (!isOperationLegalOrCustom(Op, VT) || !FPVT.isSimple())
25176 return false;
25177
25178 switch (FPVT.getSimpleVT().SimpleTy) {
25179 case MVT::f16:
25180 return Subtarget.hasStdExtZfhmin();
25181 case MVT::f32:
25182 return Subtarget.hasStdExtF();
25183 case MVT::f64:
25184 return Subtarget.hasStdExtD();
25185 default:
25186 return false;
25187 }
25188}
25189
25190unsigned RISCVTargetLowering::getJumpTableEncoding() const {
25191 // If we are using the small code model, we can reduce size of jump table
25192 // entry to 4 bytes.
25193 if (Subtarget.is64Bit() && !isPositionIndependent() &&
25194 getTargetMachine().getCodeModel() == CodeModel::Small) {
25195 return MachineJumpTableInfo::EK_Custom32;
25196 }
25197 return TargetLowering::getJumpTableEncoding();
25198}
25199
25200const MCExpr *RISCVTargetLowering::LowerCustomJumpTableEntry(
25201 const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB,
25202 unsigned uid, MCContext &Ctx) const {
25203 assert(Subtarget.is64Bit() && !isPositionIndependent() &&
25204 getTargetMachine().getCodeModel() == CodeModel::Small);
25205 return MCSymbolRefExpr::create(Symbol: MBB->getSymbol(), Ctx);
25206}
25207
25208bool RISCVTargetLowering::isVScaleKnownToBeAPowerOfTwo() const {
25209 // We define vscale to be VLEN/RVVBitsPerBlock. VLEN is always a power
25210 // of two >= 64, and RVVBitsPerBlock is 64. Thus, vscale must be
25211 // a power of two as well.
25212 // FIXME: This doesn't work for zve32, but that's already broken
25213 // elsewhere for the same reason.
25214 assert(Subtarget.getRealMinVLen() >= 64 && "zve32* unsupported");
25215 static_assert(RISCV::RVVBitsPerBlock == 64,
25216 "RVVBitsPerBlock changed, audit needed");
25217 return true;
25218}
25219
25220bool RISCVTargetLowering::getIndexedAddressParts(SDNode *Op, SDValue &Base,
25221 SDValue &Offset,
25222 ISD::MemIndexedMode &AM,
25223 SelectionDAG &DAG) const {
25224 // Target does not support indexed loads.
25225 if (!Subtarget.hasVendorXTHeadMemIdx())
25226 return false;
25227
25228 if (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB)
25229 return false;
25230
25231 Base = Op->getOperand(Num: 0);
25232 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Val: Op->getOperand(Num: 1))) {
25233 int64_t RHSC = RHS->getSExtValue();
25234 if (Op->getOpcode() == ISD::SUB)
25235 RHSC = -(uint64_t)RHSC;
25236
25237 // The constants that can be encoded in the THeadMemIdx instructions
25238 // are of the form (sign_extend(imm5) << imm2).
25239 bool isLegalIndexedOffset = false;
25240 for (unsigned i = 0; i < 4; i++)
25241 if (isInt<5>(x: RHSC >> i) && ((RHSC % (1LL << i)) == 0)) {
25242 isLegalIndexedOffset = true;
25243 break;
25244 }
25245
25246 if (!isLegalIndexedOffset)
25247 return false;
25248
25249 Offset = Op->getOperand(Num: 1);
25250 return true;
25251 }
25252
25253 return false;
25254}
25255
25256bool RISCVTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
25257 SDValue &Offset,
25258 ISD::MemIndexedMode &AM,
25259 SelectionDAG &DAG) const {
25260 EVT VT;
25261 SDValue Ptr;
25262 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val: N)) {
25263 VT = LD->getMemoryVT();
25264 Ptr = LD->getBasePtr();
25265 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(Val: N)) {
25266 VT = ST->getMemoryVT();
25267 Ptr = ST->getBasePtr();
25268 } else
25269 return false;
25270
25271 if (!getIndexedAddressParts(Op: Ptr.getNode(), Base, Offset, AM, DAG))
25272 return false;
25273
25274 AM = ISD::PRE_INC;
25275 return true;
25276}
25277
25278bool RISCVTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
25279 SDValue &Base,
25280 SDValue &Offset,
25281 ISD::MemIndexedMode &AM,
25282 SelectionDAG &DAG) const {
25283 if (Subtarget.hasVendorXCVmem() && !Subtarget.is64Bit()) {
25284 if (Op->getOpcode() != ISD::ADD)
25285 return false;
25286
25287 if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(Val: N))
25288 Base = LS->getBasePtr();
25289 else
25290 return false;
25291
25292 if (Base == Op->getOperand(Num: 0))
25293 Offset = Op->getOperand(Num: 1);
25294 else if (Base == Op->getOperand(Num: 1))
25295 Offset = Op->getOperand(Num: 0);
25296 else
25297 return false;
25298
25299 AM = ISD::POST_INC;
25300 return true;
25301 }
25302
25303 EVT VT;
25304 SDValue Ptr;
25305 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val: N)) {
25306 VT = LD->getMemoryVT();
25307 Ptr = LD->getBasePtr();
25308 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(Val: N)) {
25309 VT = ST->getMemoryVT();
25310 Ptr = ST->getBasePtr();
25311 } else
25312 return false;
25313
25314 if (!getIndexedAddressParts(Op, Base, Offset, AM, DAG))
25315 return false;
25316 // Post-indexing updates the base, so it's not a valid transform
25317 // if that's not the same as the load's pointer.
25318 if (Ptr != Base)
25319 return false;
25320
25321 AM = ISD::POST_INC;
25322 return true;
25323}
25324
25325bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
25326 EVT VT) const {
25327 EVT SVT = VT.getScalarType();
25328
25329 if (!SVT.isSimple())
25330 return false;
25331
25332 switch (SVT.getSimpleVT().SimpleTy) {
25333 case MVT::f16:
25334 return VT.isVector() ? Subtarget.hasVInstructionsF16()
25335 : Subtarget.hasStdExtZfhOrZhinx();
25336 case MVT::f32:
25337 return Subtarget.hasStdExtFOrZfinx();
25338 case MVT::f64:
25339 return Subtarget.hasStdExtDOrZdinx();
25340 default:
25341 break;
25342 }
25343
25344 return false;
25345}
25346
25347ISD::NodeType RISCVTargetLowering::getExtendForAtomicCmpSwapArg() const {
25348 // Zacas will use amocas.w which does not require extension.
25349 return Subtarget.hasStdExtZacas() ? ISD::ANY_EXTEND : ISD::SIGN_EXTEND;
25350}
25351
25352ISD::NodeType RISCVTargetLowering::getExtendForAtomicRMWArg(unsigned Op) const {
25353 // Zaamo will use amo<op>.w which does not require extension.
25354 if (Subtarget.hasStdExtZaamo() || Subtarget.hasForcedAtomics())
25355 return ISD::ANY_EXTEND;
25356
25357 // Zalrsc pseudo expansions with comparison require sign-extension.
25358 assert(Subtarget.hasStdExtZalrsc());
25359 switch (Op) {
25360 case ISD::ATOMIC_LOAD_MIN:
25361 case ISD::ATOMIC_LOAD_MAX:
25362 case ISD::ATOMIC_LOAD_UMIN:
25363 case ISD::ATOMIC_LOAD_UMAX:
25364 return ISD::SIGN_EXTEND;
25365 default:
25366 break;
25367 }
25368 return ISD::ANY_EXTEND;
25369}
25370
25371Register RISCVTargetLowering::getExceptionPointerRegister(
25372 const Constant *PersonalityFn) const {
25373 return RISCV::X10;
25374}
25375
25376Register RISCVTargetLowering::getExceptionSelectorRegister(
25377 const Constant *PersonalityFn) const {
25378 return RISCV::X11;
25379}
25380
25381bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
25382 // Return false to suppress the unnecessary extensions if the LibCall
25383 // arguments or return value is a float narrower than XLEN on a soft FP ABI.
25384 if (Subtarget.isSoftFPABI() && (Type.isFloatingPoint() && !Type.isVector() &&
25385 Type.getSizeInBits() < Subtarget.getXLen()))
25386 return false;
25387
25388 return true;
25389}
25390
25391bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(Type *Ty,
25392 bool IsSigned) const {
25393 if (Subtarget.is64Bit() && Ty->isIntegerTy(Bitwidth: 32))
25394 return true;
25395
25396 return IsSigned;
25397}
25398
25399bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
25400 SDValue C) const {
25401 // Check integral scalar types.
25402 if (!VT.isScalarInteger())
25403 return false;
25404
25405 // Omit the optimization if the sub target has the M extension and the data
25406 // size exceeds XLen.
25407 const bool HasZmmul = Subtarget.hasStdExtZmmul();
25408 if (HasZmmul && VT.getSizeInBits() > Subtarget.getXLen())
25409 return false;
25410
25411 auto *ConstNode = cast<ConstantSDNode>(Val&: C);
25412 const APInt &Imm = ConstNode->getAPIntValue();
25413
25414 // Don't do this if the Xqciac extension is enabled and the Imm in simm12.
25415 if (Subtarget.hasVendorXqciac() && Imm.isSignedIntN(N: 12))
25416 return false;
25417
25418 // Break the MUL to a SLLI and an ADD/SUB.
25419 if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
25420 (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
25421 return true;
25422
25423 // Optimize the MUL to (SH*ADD x, (SLLI x, bits)) if Imm is not simm12.
25424 if (Subtarget.hasShlAdd(ShAmt: 3) && !Imm.isSignedIntN(N: 12) &&
25425 ((Imm - 2).isPowerOf2() || (Imm - 4).isPowerOf2() ||
25426 (Imm - 8).isPowerOf2()))
25427 return true;
25428
25429 // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs
25430 // a pair of LUI/ADDI.
25431 if (!Imm.isSignedIntN(N: 12) && Imm.countr_zero() < 12 &&
25432 ConstNode->hasOneUse()) {
25433 APInt ImmS = Imm.ashr(ShiftAmt: Imm.countr_zero());
25434 if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
25435 (1 - ImmS).isPowerOf2())
25436 return true;
25437 }
25438
25439 return false;
25440}
25441
25442bool RISCVTargetLowering::isMulAddWithConstProfitable(SDValue AddNode,
25443 SDValue ConstNode) const {
25444 // Let the DAGCombiner decide for vectors.
25445 EVT VT = AddNode.getValueType();
25446 if (VT.isVector())
25447 return true;
25448
25449 // Let the DAGCombiner decide for larger types.
25450 if (VT.getScalarSizeInBits() > Subtarget.getXLen())
25451 return true;
25452
25453 // It is worse if c1 is simm12 while c1*c2 is not.
25454 ConstantSDNode *C1Node = cast<ConstantSDNode>(Val: AddNode.getOperand(i: 1));
25455 ConstantSDNode *C2Node = cast<ConstantSDNode>(Val&: ConstNode);
25456 const APInt &C1 = C1Node->getAPIntValue();
25457 const APInt &C2 = C2Node->getAPIntValue();
25458 if (C1.isSignedIntN(N: 12) && !(C1 * C2).isSignedIntN(N: 12))
25459 return false;
25460
25461 // Default to true and let the DAGCombiner decide.
25462 return true;
25463}
25464
25465bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
25466 EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
25467 unsigned *Fast) const {
25468 if (!VT.isVector() || Subtarget.enablePExtSIMDCodeGen()) {
25469 if (Fast)
25470 *Fast = Subtarget.enableUnalignedScalarMem();
25471 return Subtarget.enableUnalignedScalarMem();
25472 }
25473
25474 // All vector implementations must support element alignment
25475 EVT ElemVT = VT.getVectorElementType();
25476 if (Alignment >= ElemVT.getStoreSize()) {
25477 if (Fast)
25478 *Fast = 1;
25479 return true;
25480 }
25481
25482 // Note: We lower an unmasked unaligned vector access to an equally sized
25483 // e8 element type access. Given this, we effectively support all unmasked
25484 // misaligned accesses. TODO: Work through the codegen implications of
25485 // allowing such accesses to be formed, and considered fast.
25486 if (Fast)
25487 *Fast = Subtarget.enableUnalignedVectorMem();
25488 return Subtarget.enableUnalignedVectorMem();
25489}
25490
25491EVT RISCVTargetLowering::getOptimalMemOpType(
25492 LLVMContext &Context, const MemOp &Op,
25493 const AttributeList &FuncAttributes) const {
25494 if (!Subtarget.hasVInstructions())
25495 return MVT::Other;
25496
25497 if (FuncAttributes.hasFnAttr(Kind: Attribute::NoImplicitFloat))
25498 return MVT::Other;
25499
25500 // We use LMUL1 memory operations here for a non-obvious reason. Our caller
25501 // has an expansion threshold, and we want the number of hardware memory
25502 // operations to correspond roughly to that threshold. LMUL>1 operations
25503 // are typically expanded linearly internally, and thus correspond to more
25504 // than one actual memory operation. Note that store merging and load
25505 // combining will typically form larger LMUL operations from the LMUL1
25506 // operations emitted here, and that's okay because combining isn't
25507 // introducing new memory operations; it's just merging existing ones.
25508 // NOTE: We limit to 1024 bytes to avoid creating an invalid MVT.
25509 const unsigned MinVLenInBytes =
25510 std::min(a: Subtarget.getRealMinVLen() / 8, b: 1024U);
25511
25512 if (Op.size() < MinVLenInBytes)
25513 // TODO: Figure out short memops. For the moment, do the default thing
25514 // which ends up using scalar sequences.
25515 return MVT::Other;
25516
25517 // If the minimum VLEN is less than RISCV::RVVBitsPerBlock we don't support
25518 // fixed vectors.
25519 if (MinVLenInBytes <= RISCV::RVVBytesPerBlock)
25520 return MVT::Other;
25521
25522 // Prefer i8 for non-zero memset as it allows us to avoid materializing
25523 // a large scalar constant and instead use vmv.v.x/i to do the
25524 // broadcast. For everything else, prefer ELenVT to minimize VL and thus
25525 // maximize the chance we can encode the size in the vsetvli.
25526 MVT ELenVT = MVT::getIntegerVT(BitWidth: Subtarget.getELen());
25527 MVT PreferredVT = (Op.isMemset() && !Op.isZeroMemset()) ? MVT::i8 : ELenVT;
25528
25529 // Do we have sufficient alignment for our preferred VT? If not, revert
25530 // to largest size allowed by our alignment criteria.
25531 if (PreferredVT != MVT::i8 && !Subtarget.enableUnalignedVectorMem()) {
25532 Align RequiredAlign(PreferredVT.getStoreSize());
25533 if (Op.isFixedDstAlign())
25534 RequiredAlign = std::min(a: RequiredAlign, b: Op.getDstAlign());
25535 if (Op.isMemcpy())
25536 RequiredAlign = std::min(a: RequiredAlign, b: Op.getSrcAlign());
25537 PreferredVT = MVT::getIntegerVT(BitWidth: RequiredAlign.value() * 8);
25538 }
25539 return MVT::getVectorVT(VT: PreferredVT, NumElements: MinVLenInBytes/PreferredVT.getStoreSize());
25540}
25541
25542bool RISCVTargetLowering::splitValueIntoRegisterParts(
25543 SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
25544 unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) const {
25545 bool IsABIRegCopy = CC.has_value();
25546 EVT ValueVT = Val.getValueType();
25547
25548 MVT PairVT = Subtarget.is64Bit() ? MVT::i128 : MVT::i64;
25549 if ((ValueVT == PairVT ||
25550 (!Subtarget.is64Bit() && Subtarget.hasStdExtZdinx() &&
25551 ValueVT == MVT::f64)) &&
25552 NumParts == 1 && PartVT == MVT::Untyped) {
25553 // Pairs in Inline Assembly, f64 in Inline assembly on rv32_zdinx
25554 MVT XLenVT = Subtarget.getXLenVT();
25555 if (ValueVT == MVT::f64)
25556 Val = DAG.getBitcast(VT: MVT::i64, V: Val);
25557 auto [Lo, Hi] = DAG.SplitScalar(N: Val, DL, LoVT: XLenVT, HiVT: XLenVT);
25558 // Always creating an MVT::Untyped part, so always use
25559 // RISCVISD::BuildGPRPair.
25560 Parts[0] = DAG.getNode(Opcode: RISCVISD::BuildGPRPair, DL, VT: PartVT, N1: Lo, N2: Hi);
25561 return true;
25562 }
25563
25564 if (IsABIRegCopy && (ValueVT == MVT::f16 || ValueVT == MVT::bf16) &&
25565 PartVT == MVT::f32) {
25566 // Cast the [b]f16 to i16, extend to i32, pad with ones to make a float
25567 // nan, and cast to f32.
25568 Val = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: MVT::i16, Operand: Val);
25569 Val = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: MVT::i32, Operand: Val);
25570 Val = DAG.getNode(Opcode: ISD::OR, DL, VT: MVT::i32, N1: Val,
25571 N2: DAG.getConstant(Val: 0xFFFF0000, DL, VT: MVT::i32));
25572 Val = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: PartVT, Operand: Val);
25573 Parts[0] = Val;
25574 return true;
25575 }
25576
25577 if (ValueVT.isRISCVVectorTuple() && PartVT.isRISCVVectorTuple()) {
25578#ifndef NDEBUG
25579 unsigned ValNF = ValueVT.getRISCVVectorTupleNumFields();
25580 [[maybe_unused]] unsigned ValLMUL =
25581 divideCeil(ValueVT.getSizeInBits().getKnownMinValue(),
25582 ValNF * RISCV::RVVBitsPerBlock);
25583 unsigned PartNF = PartVT.getRISCVVectorTupleNumFields();
25584 [[maybe_unused]] unsigned PartLMUL =
25585 divideCeil(PartVT.getSizeInBits().getKnownMinValue(),
25586 PartNF * RISCV::RVVBitsPerBlock);
25587 assert(ValNF == PartNF && ValLMUL == PartLMUL &&
25588 "RISC-V vector tuple type only accepts same register class type "
25589 "TUPLE_INSERT");
25590#endif
25591
25592 Val = DAG.getNode(Opcode: RISCVISD::TUPLE_INSERT, DL, VT: PartVT, N1: DAG.getUNDEF(VT: PartVT),
25593 N2: Val, N3: DAG.getTargetConstant(Val: 0, DL, VT: MVT::i32));
25594 Parts[0] = Val;
25595 return true;
25596 }
25597
25598 if (ValueVT.isFixedLengthVector() && PartVT.isScalableVector()) {
25599 ValueVT = getContainerForFixedLengthVector(VT: ValueVT.getSimpleVT());
25600 Val = convertToScalableVector(VT: ValueVT, V: Val, DAG, Subtarget);
25601
25602 LLVMContext &Context = *DAG.getContext();
25603 EVT ValueEltVT = ValueVT.getVectorElementType();
25604 EVT PartEltVT = PartVT.getVectorElementType();
25605 unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinValue();
25606 unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinValue();
25607 if (PartVTBitSize % ValueVTBitSize == 0) {
25608 assert(PartVTBitSize >= ValueVTBitSize);
25609 // If the element types are different, bitcast to the same element type of
25610 // PartVT first.
25611 // Give an example here, we want copy a <vscale x 1 x i8> value to
25612 // <vscale x 4 x i16>.
25613 // We need to convert <vscale x 1 x i8> to <vscale x 8 x i8> by insert
25614 // subvector, then we can bitcast to <vscale x 4 x i16>.
25615 if (ValueEltVT != PartEltVT) {
25616 if (PartVTBitSize > ValueVTBitSize) {
25617 unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits();
25618 assert(Count != 0 && "The number of element should not be zero.");
25619 EVT SameEltTypeVT =
25620 EVT::getVectorVT(Context, VT: ValueEltVT, NumElements: Count, /*IsScalable=*/true);
25621 Val = DAG.getInsertSubvector(DL, Vec: DAG.getUNDEF(VT: SameEltTypeVT), SubVec: Val, Idx: 0);
25622 }
25623 Val = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: PartVT, Operand: Val);
25624 } else {
25625 Val = DAG.getInsertSubvector(DL, Vec: DAG.getUNDEF(VT: PartVT), SubVec: Val, Idx: 0);
25626 }
25627 Parts[0] = Val;
25628 return true;
25629 }
25630 }
25631
25632 return false;
25633}
25634
25635SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
25636 SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
25637 MVT PartVT, EVT ValueVT, std::optional<CallingConv::ID> CC) const {
25638 bool IsABIRegCopy = CC.has_value();
25639
25640 MVT PairVT = Subtarget.is64Bit() ? MVT::i128 : MVT::i64;
25641 if ((ValueVT == PairVT ||
25642 (!Subtarget.is64Bit() && Subtarget.hasStdExtZdinx() &&
25643 ValueVT == MVT::f64)) &&
25644 NumParts == 1 && PartVT == MVT::Untyped) {
25645 // Pairs in Inline Assembly, f64 in Inline assembly on rv32_zdinx
25646 MVT XLenVT = Subtarget.getXLenVT();
25647
25648 SDValue Val = Parts[0];
25649 // Always starting with an MVT::Untyped part, so always use
25650 // RISCVISD::SplitGPRPair
25651 Val = DAG.getNode(Opcode: RISCVISD::SplitGPRPair, DL, VTList: DAG.getVTList(VT1: XLenVT, VT2: XLenVT),
25652 N: Val);
25653 Val = DAG.getNode(Opcode: ISD::BUILD_PAIR, DL, VT: PairVT, N1: Val.getValue(R: 0),
25654 N2: Val.getValue(R: 1));
25655 if (ValueVT == MVT::f64)
25656 Val = DAG.getBitcast(VT: ValueVT, V: Val);
25657 return Val;
25658 }
25659
25660 if (IsABIRegCopy && (ValueVT == MVT::f16 || ValueVT == MVT::bf16) &&
25661 PartVT == MVT::f32) {
25662 SDValue Val = Parts[0];
25663
25664 // Cast the f32 to i32, truncate to i16, and cast back to [b]f16.
25665 Val = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: MVT::i32, Operand: Val);
25666 Val = DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: MVT::i16, Operand: Val);
25667 Val = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: ValueVT, Operand: Val);
25668 return Val;
25669 }
25670
25671 if (ValueVT.isFixedLengthVector() && PartVT.isScalableVector()) {
25672 LLVMContext &Context = *DAG.getContext();
25673 SDValue Val = Parts[0];
25674 EVT ValueEltVT = ValueVT.getVectorElementType();
25675 EVT PartEltVT = PartVT.getVectorElementType();
25676
25677 unsigned ValueVTBitSize =
25678 getContainerForFixedLengthVector(VT: ValueVT.getSimpleVT())
25679 .getSizeInBits()
25680 .getKnownMinValue();
25681
25682 unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinValue();
25683 if (PartVTBitSize % ValueVTBitSize == 0) {
25684 assert(PartVTBitSize >= ValueVTBitSize);
25685 EVT SameEltTypeVT = ValueVT;
25686 // If the element types are different, convert it to the same element type
25687 // of PartVT.
25688 // Give an example here, we want copy a <vscale x 1 x i8> value from
25689 // <vscale x 4 x i16>.
25690 // We need to convert <vscale x 4 x i16> to <vscale x 8 x i8> first,
25691 // then we can extract <vscale x 1 x i8>.
25692 if (ValueEltVT != PartEltVT) {
25693 unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits();
25694 assert(Count != 0 && "The number of element should not be zero.");
25695 SameEltTypeVT =
25696 EVT::getVectorVT(Context, VT: ValueEltVT, NumElements: Count, /*IsScalable=*/true);
25697 Val = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: SameEltTypeVT, Operand: Val);
25698 }
25699 if (ValueVT.isFixedLengthVector())
25700 Val = convertFromScalableVector(VT: ValueVT, V: Val, DAG, Subtarget);
25701 else
25702 Val = DAG.getExtractSubvector(DL, VT: ValueVT, Vec: Val, Idx: 0);
25703 return Val;
25704 }
25705 }
25706 return SDValue();
25707}
25708
25709bool RISCVTargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const {
25710 // When aggressively optimizing for code size, we prefer to use a div
25711 // instruction, as it is usually smaller than the alternative sequence.
25712 // TODO: Add vector division?
25713 bool OptSize = Attr.hasFnAttr(Kind: Attribute::MinSize);
25714 return OptSize && !VT.isVector() &&
25715 VT.getSizeInBits() <= getMaxDivRemBitWidthSupported();
25716}
25717
25718bool RISCVTargetLowering::preferScalarizeSplat(SDNode *N) const {
25719 // Scalarize zero_ext and sign_ext might stop match to widening instruction in
25720 // some situation.
25721 unsigned Opc = N->getOpcode();
25722 if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND)
25723 return false;
25724 return true;
25725}
25726
25727static Value *useTpOffset(IRBuilderBase &IRB, unsigned Offset) {
25728 Module *M = IRB.GetInsertBlock()->getModule();
25729 Function *ThreadPointerFunc = Intrinsic::getOrInsertDeclaration(
25730 M, id: Intrinsic::thread_pointer, Tys: IRB.getPtrTy());
25731 return IRB.CreateConstGEP1_32(Ty: IRB.getInt8Ty(),
25732 Ptr: IRB.CreateCall(Callee: ThreadPointerFunc), Idx0: Offset);
25733}
25734
25735Value *RISCVTargetLowering::getIRStackGuard(
25736 IRBuilderBase &IRB, const LibcallLoweringInfo &Libcalls) const {
25737 // Fuchsia provides a fixed TLS slot for the stack cookie.
25738 // <zircon/tls.h> defines ZX_TLS_STACK_GUARD_OFFSET with this value.
25739 if (Subtarget.isTargetFuchsia())
25740 return useTpOffset(IRB, Offset: -0x10);
25741
25742 // Android provides a fixed TLS slot for the stack cookie. See the definition
25743 // of TLS_SLOT_STACK_GUARD in
25744 // https://android.googlesource.com/platform/bionic/+/main/libc/platform/bionic/tls_defines.h
25745 if (Subtarget.isTargetAndroid())
25746 return useTpOffset(IRB, Offset: -0x18);
25747
25748 Module *M = IRB.GetInsertBlock()->getModule();
25749
25750 if (M->getStackProtectorGuard() == "tls") {
25751 // Users must specify the offset explicitly
25752 int Offset = M->getStackProtectorGuardOffset();
25753 return useTpOffset(IRB, Offset);
25754 }
25755
25756 return TargetLowering::getIRStackGuard(IRB, Libcalls);
25757}
25758
25759bool RISCVTargetLowering::isLegalStridedLoadStore(EVT DataType,
25760 Align Alignment) const {
25761 if (!Subtarget.hasVInstructions())
25762 return false;
25763
25764 // Only support fixed vectors if we know the minimum vector size.
25765 if (DataType.isFixedLengthVector() && !Subtarget.useRVVForFixedLengthVectors())
25766 return false;
25767
25768 EVT ScalarType = DataType.getScalarType();
25769 if (!isLegalElementTypeForRVV(ScalarTy: ScalarType))
25770 return false;
25771
25772 if (!Subtarget.enableUnalignedVectorMem() &&
25773 Alignment < ScalarType.getStoreSize())
25774 return false;
25775
25776 return true;
25777}
25778
25779bool RISCVTargetLowering::isLegalFirstFaultLoad(EVT DataType,
25780 Align Alignment) const {
25781 if (!Subtarget.hasVInstructions())
25782 return false;
25783
25784 EVT ScalarType = DataType.getScalarType();
25785 if (!isLegalElementTypeForRVV(ScalarTy: ScalarType))
25786 return false;
25787
25788 if (!Subtarget.enableUnalignedVectorMem() &&
25789 Alignment < ScalarType.getStoreSize())
25790 return false;
25791
25792 return true;
25793}
25794
25795MachineInstr *
25796RISCVTargetLowering::EmitKCFICheck(MachineBasicBlock &MBB,
25797 MachineBasicBlock::instr_iterator &MBBI,
25798 const TargetInstrInfo *TII) const {
25799 assert(MBBI->isCall() && MBBI->getCFIType() &&
25800 "Invalid call instruction for a KCFI check");
25801 assert(is_contained({RISCV::PseudoCALLIndirect, RISCV::PseudoTAILIndirect},
25802 MBBI->getOpcode()));
25803
25804 MachineOperand &Target = MBBI->getOperand(i: 0);
25805 Target.setIsRenamable(false);
25806
25807 return BuildMI(BB&: MBB, I: MBBI, MIMD: MBBI->getDebugLoc(), MCID: TII->get(Opcode: RISCV::KCFI_CHECK))
25808 .addReg(RegNo: Target.getReg())
25809 .addImm(Val: MBBI->getCFIType())
25810 .getInstr();
25811}
25812
25813#define GET_REGISTER_MATCHER
25814#include "RISCVGenAsmMatcher.inc"
25815
25816Register
25817RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
25818 const MachineFunction &MF) const {
25819 Register Reg = MatchRegisterAltName(Name: RegName);
25820 if (!Reg)
25821 Reg = MatchRegisterName(Name: RegName);
25822 if (!Reg)
25823 return Reg;
25824
25825 BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
25826 if (!ReservedRegs.test(Idx: Reg) && !Subtarget.isRegisterReservedByUser(i: Reg))
25827 reportFatalUsageError(reason: Twine("Trying to obtain non-reserved register \"" +
25828 StringRef(RegName) + "\"."));
25829 return Reg;
25830}
25831
25832MachineMemOperand::Flags
25833RISCVTargetLowering::getTargetMMOFlags(const Instruction &I) const {
25834 const MDNode *NontemporalInfo = I.getMetadata(KindID: LLVMContext::MD_nontemporal);
25835
25836 if (NontemporalInfo == nullptr)
25837 return MachineMemOperand::MONone;
25838
25839 // 1 for default value work as __RISCV_NTLH_ALL
25840 // 2 -> __RISCV_NTLH_INNERMOST_PRIVATE
25841 // 3 -> __RISCV_NTLH_ALL_PRIVATE
25842 // 4 -> __RISCV_NTLH_INNERMOST_SHARED
25843 // 5 -> __RISCV_NTLH_ALL
25844 int NontemporalLevel = 5;
25845 const MDNode *RISCVNontemporalInfo =
25846 I.getMetadata(Kind: "riscv-nontemporal-domain");
25847 if (RISCVNontemporalInfo != nullptr)
25848 NontemporalLevel =
25849 cast<ConstantInt>(
25850 Val: cast<ConstantAsMetadata>(Val: RISCVNontemporalInfo->getOperand(I: 0))
25851 ->getValue())
25852 ->getZExtValue();
25853
25854 assert((1 <= NontemporalLevel && NontemporalLevel <= 5) &&
25855 "RISC-V target doesn't support this non-temporal domain.");
25856
25857 NontemporalLevel -= 2;
25858 MachineMemOperand::Flags Flags = MachineMemOperand::MONone;
25859 if (NontemporalLevel & 0b1)
25860 Flags |= MONontemporalBit0;
25861 if (NontemporalLevel & 0b10)
25862 Flags |= MONontemporalBit1;
25863
25864 return Flags;
25865}
25866
25867MachineMemOperand::Flags
25868RISCVTargetLowering::getTargetMMOFlags(const MemSDNode &Node) const {
25869
25870 MachineMemOperand::Flags NodeFlags = Node.getMemOperand()->getFlags();
25871 MachineMemOperand::Flags TargetFlags = MachineMemOperand::MONone;
25872 TargetFlags |= (NodeFlags & MONontemporalBit0);
25873 TargetFlags |= (NodeFlags & MONontemporalBit1);
25874 return TargetFlags;
25875}
25876
25877bool RISCVTargetLowering::areTwoSDNodeTargetMMOFlagsMergeable(
25878 const MemSDNode &NodeX, const MemSDNode &NodeY) const {
25879 return getTargetMMOFlags(Node: NodeX) == getTargetMMOFlags(Node: NodeY);
25880}
25881
25882bool RISCVTargetLowering::isCtpopFast(EVT VT) const {
25883 if (VT.isVector()) {
25884 EVT SVT = VT.getVectorElementType();
25885 // If the element type is legal we can use cpop.v if it is enabled.
25886 if (isLegalElementTypeForRVV(ScalarTy: SVT))
25887 return Subtarget.hasStdExtZvbb();
25888 // Don't consider it fast if the type needs to be legalized or scalarized.
25889 return false;
25890 }
25891
25892 return Subtarget.hasCPOPLike() && (VT == MVT::i32 || VT == MVT::i64);
25893}
25894
25895unsigned RISCVTargetLowering::getCustomCtpopCost(EVT VT,
25896 ISD::CondCode Cond) const {
25897 return isCtpopFast(VT) ? 0 : 1;
25898}
25899
25900bool RISCVTargetLowering::shouldInsertFencesForAtomic(
25901 const Instruction *I) const {
25902 if (Subtarget.hasStdExtZalasr()) {
25903 if (Subtarget.hasStdExtZtso()) {
25904 // Zalasr + TSO means that atomic_load_acquire and atomic_store_release
25905 // should be lowered to plain load/store. The easiest way to do this is
25906 // to say we should insert fences for them, and the fence insertion code
25907 // will just not insert any fences
25908 auto *LI = dyn_cast<LoadInst>(Val: I);
25909 auto *SI = dyn_cast<StoreInst>(Val: I);
25910 if ((LI &&
25911 (LI->getOrdering() == AtomicOrdering::SequentiallyConsistent)) ||
25912 (SI &&
25913 (SI->getOrdering() == AtomicOrdering::SequentiallyConsistent))) {
25914 // Here, this is a load or store which is seq_cst, and needs a .aq or
25915 // .rl therefore we shouldn't try to insert fences
25916 return false;
25917 }
25918 // Here, we are a TSO inst that isn't a seq_cst load/store
25919 return isa<LoadInst>(Val: I) || isa<StoreInst>(Val: I);
25920 }
25921 return false;
25922 }
25923 // Note that one specific case requires fence insertion for an
25924 // AtomicCmpXchgInst but is handled via the RISCVZacasABIFix pass rather
25925 // than this hook due to limitations in the interface here.
25926 return isa<LoadInst>(Val: I) || isa<StoreInst>(Val: I);
25927}
25928
25929bool RISCVTargetLowering::fallBackToDAGISel(const Instruction &Inst) const {
25930
25931 // GISel support is in progress or complete for these opcodes.
25932 unsigned Op = Inst.getOpcode();
25933 if (Op == Instruction::Add || Op == Instruction::Sub ||
25934 Op == Instruction::And || Op == Instruction::Or ||
25935 Op == Instruction::Xor || Op == Instruction::InsertElement ||
25936 Op == Instruction::ShuffleVector || Op == Instruction::Load ||
25937 Op == Instruction::Freeze || Op == Instruction::Store)
25938 return false;
25939
25940 if (auto *II = dyn_cast<IntrinsicInst>(Val: &Inst)) {
25941 // Mark RVV intrinsic as supported.
25942 if (RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntrinsicID: II->getIntrinsicID())) {
25943 // GISel doesn't support tuple types yet. It also doesn't suport returning
25944 // a struct containing a scalable vector like vleff.
25945 if (Inst.getType()->isRISCVVectorTupleTy() ||
25946 Inst.getType()->isStructTy())
25947 return true;
25948
25949 for (unsigned i = 0; i < II->arg_size(); ++i)
25950 if (II->getArgOperand(i)->getType()->isRISCVVectorTupleTy())
25951 return true;
25952
25953 return false;
25954 }
25955 if (II->getIntrinsicID() == Intrinsic::vector_extract)
25956 return false;
25957 }
25958
25959 if (Inst.getType()->isScalableTy())
25960 return true;
25961
25962 for (unsigned i = 0; i < Inst.getNumOperands(); ++i)
25963 if (Inst.getOperand(i)->getType()->isScalableTy() &&
25964 !isa<ReturnInst>(Val: &Inst))
25965 return true;
25966
25967 if (const AllocaInst *AI = dyn_cast<AllocaInst>(Val: &Inst)) {
25968 if (AI->getAllocatedType()->isScalableTy())
25969 return true;
25970 }
25971
25972 return false;
25973}
25974
25975SDValue
25976RISCVTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
25977 SelectionDAG &DAG,
25978 SmallVectorImpl<SDNode *> &Created) const {
25979 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
25980 if (isIntDivCheap(VT: N->getValueType(ResNo: 0), Attr))
25981 return SDValue(N, 0); // Lower SDIV as SDIV
25982
25983 // Only perform this transform if short forward branch opt is supported.
25984 if (!Subtarget.hasShortForwardBranchIALU())
25985 return SDValue();
25986 EVT VT = N->getValueType(ResNo: 0);
25987 if (!(VT == MVT::i32 || (VT == MVT::i64 && Subtarget.is64Bit())))
25988 return SDValue();
25989
25990 // Ensure 2**k-1 < 2048 so that we can just emit a single addi/addiw.
25991 if (Divisor.sgt(RHS: 2048) || Divisor.slt(RHS: -2048))
25992 return SDValue();
25993 return TargetLowering::buildSDIVPow2WithCMov(N, Divisor, DAG, Created);
25994}
25995
25996bool RISCVTargetLowering::shouldFoldSelectWithSingleBitTest(
25997 EVT VT, const APInt &AndMask) const {
25998 if (Subtarget.hasCZEROLike() || Subtarget.hasVendorXTHeadCondMov())
25999 return !Subtarget.hasBEXTILike() && AndMask.ugt(RHS: 1024);
26000 return TargetLowering::shouldFoldSelectWithSingleBitTest(VT, AndMask);
26001}
26002
26003unsigned RISCVTargetLowering::getMinimumJumpTableEntries() const {
26004 return Subtarget.getMinimumJumpTableEntries();
26005}
26006
26007SDValue RISCVTargetLowering::expandIndirectJTBranch(const SDLoc &dl,
26008 SDValue Value, SDValue Addr,
26009 int JTI,
26010 SelectionDAG &DAG) const {
26011 if (Subtarget.hasStdExtZicfilp()) {
26012 // When Zicfilp enabled, we need to use software guarded branch for jump
26013 // table branch.
26014 SDValue Chain = Value;
26015 // Jump table debug info is only needed if CodeView is enabled.
26016 if (DAG.getTarget().getTargetTriple().isOSBinFormatCOFF())
26017 Chain = DAG.getJumpTableDebugInfo(JTI, Chain, DL: dl);
26018 return DAG.getNode(Opcode: RISCVISD::SW_GUARDED_BRIND, DL: dl, VT: MVT::Other, N1: Chain, N2: Addr);
26019 }
26020 return TargetLowering::expandIndirectJTBranch(dl, Value, Addr, JTI, DAG);
26021}
26022
26023// If an output pattern produces multiple instructions tablegen may pick an
26024// arbitrary type from an instructions destination register class to use for the
26025// VT of that MachineSDNode. This VT may be used to look up the representative
26026// register class. If the type isn't legal, the default implementation will
26027// not find a register class.
26028//
26029// Some integer types smaller than XLen are listed in the GPR register class to
26030// support isel patterns for GISel, but are not legal in SelectionDAG. The
26031// arbitrary type tablegen picks may be one of these smaller types.
26032//
26033// f16 and bf16 are both valid for the FPR16 or GPRF16 register class. It's
26034// possible for tablegen to pick bf16 as the arbitrary type for an f16 pattern.
26035std::pair<const TargetRegisterClass *, uint8_t>
26036RISCVTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
26037 MVT VT) const {
26038 switch (VT.SimpleTy) {
26039 default:
26040 break;
26041 case MVT::i8:
26042 case MVT::i16:
26043 case MVT::i32:
26044 return TargetLowering::findRepresentativeClass(TRI, VT: Subtarget.getXLenVT());
26045 case MVT::bf16:
26046 case MVT::f16:
26047 return TargetLowering::findRepresentativeClass(TRI, VT: MVT::f32);
26048 }
26049
26050 return TargetLowering::findRepresentativeClass(TRI, VT);
26051}
26052
26053namespace llvm::RISCVVIntrinsicsTable {
26054
26055#define GET_RISCVVIntrinsicsTable_IMPL
26056#include "RISCVGenSearchableTables.inc"
26057
26058} // namespace llvm::RISCVVIntrinsicsTable
26059
26060bool RISCVTargetLowering::hasInlineStackProbe(const MachineFunction &MF) const {
26061
26062 // If the function specifically requests inline stack probes, emit them.
26063 if (MF.getFunction().hasFnAttribute(Kind: "probe-stack"))
26064 return MF.getFunction().getFnAttribute(Kind: "probe-stack").getValueAsString() ==
26065 "inline-asm";
26066
26067 return false;
26068}
26069
26070unsigned RISCVTargetLowering::getStackProbeSize(const MachineFunction &MF,
26071 Align StackAlign) const {
26072 // The default stack probe size is 4096 if the function has no
26073 // stack-probe-size attribute.
26074 const Function &Fn = MF.getFunction();
26075 unsigned StackProbeSize =
26076 Fn.getFnAttributeAsParsedInteger(Kind: "stack-probe-size", Default: 4096);
26077 // Round down to the stack alignment.
26078 StackProbeSize = alignDown(Value: StackProbeSize, Align: StackAlign.value());
26079 return StackProbeSize ? StackProbeSize : StackAlign.value();
26080}
26081
26082SDValue RISCVTargetLowering::lowerDYNAMIC_STACKALLOC(SDValue Op,
26083 SelectionDAG &DAG) const {
26084 MachineFunction &MF = DAG.getMachineFunction();
26085 if (!hasInlineStackProbe(MF))
26086 return SDValue();
26087
26088 MVT XLenVT = Subtarget.getXLenVT();
26089 // Get the inputs.
26090 SDValue Chain = Op.getOperand(i: 0);
26091 SDValue Size = Op.getOperand(i: 1);
26092
26093 MaybeAlign Align =
26094 cast<ConstantSDNode>(Val: Op.getOperand(i: 2))->getMaybeAlignValue();
26095 SDLoc dl(Op);
26096 EVT VT = Op.getValueType();
26097
26098 // Construct the new SP value in a GPR.
26099 SDValue SP = DAG.getCopyFromReg(Chain, dl, Reg: RISCV::X2, VT: XLenVT);
26100 Chain = SP.getValue(R: 1);
26101 SP = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: XLenVT, N1: SP, N2: Size);
26102 if (Align)
26103 SP = DAG.getNode(Opcode: ISD::AND, DL: dl, VT, N1: SP.getValue(R: 0),
26104 N2: DAG.getSignedConstant(Val: -Align->value(), DL: dl, VT));
26105
26106 // Set the real SP to the new value with a probing loop.
26107 Chain = DAG.getNode(Opcode: RISCVISD::PROBED_ALLOCA, DL: dl, VT: MVT::Other, N1: Chain, N2: SP);
26108 return DAG.getMergeValues(Ops: {SP, Chain}, dl);
26109}
26110
26111MachineBasicBlock *
26112RISCVTargetLowering::emitDynamicProbedAlloc(MachineInstr &MI,
26113 MachineBasicBlock *MBB) const {
26114 MachineFunction &MF = *MBB->getParent();
26115 MachineBasicBlock::iterator MBBI = MI.getIterator();
26116 DebugLoc DL = MBB->findDebugLoc(MBBI);
26117 Register TargetReg = MI.getOperand(i: 0).getReg();
26118
26119 const RISCVInstrInfo *TII = Subtarget.getInstrInfo();
26120 bool IsRV64 = Subtarget.is64Bit();
26121 Align StackAlign = Subtarget.getFrameLowering()->getStackAlign();
26122 const RISCVTargetLowering *TLI = Subtarget.getTargetLowering();
26123 uint64_t ProbeSize = TLI->getStackProbeSize(MF, StackAlign);
26124
26125 MachineFunction::iterator MBBInsertPoint = std::next(x: MBB->getIterator());
26126 MachineBasicBlock *LoopTestMBB =
26127 MF.CreateMachineBasicBlock(BB: MBB->getBasicBlock());
26128 MF.insert(MBBI: MBBInsertPoint, MBB: LoopTestMBB);
26129 MachineBasicBlock *ExitMBB = MF.CreateMachineBasicBlock(BB: MBB->getBasicBlock());
26130 MF.insert(MBBI: MBBInsertPoint, MBB: ExitMBB);
26131 Register SPReg = RISCV::X2;
26132 Register ScratchReg =
26133 MF.getRegInfo().createVirtualRegister(RegClass: &RISCV::GPRRegClass);
26134
26135 // ScratchReg = ProbeSize
26136 TII->movImm(MBB&: *MBB, MBBI, DL, DstReg: ScratchReg, Val: ProbeSize, Flag: MachineInstr::NoFlags);
26137
26138 // LoopTest:
26139 // SUB SP, SP, ProbeSize
26140 BuildMI(BB&: *LoopTestMBB, I: LoopTestMBB->end(), MIMD: DL, MCID: TII->get(Opcode: RISCV::SUB), DestReg: SPReg)
26141 .addReg(RegNo: SPReg)
26142 .addReg(RegNo: ScratchReg);
26143
26144 // s[d|w] zero, 0(sp)
26145 BuildMI(BB&: *LoopTestMBB, I: LoopTestMBB->end(), MIMD: DL,
26146 MCID: TII->get(Opcode: IsRV64 ? RISCV::SD : RISCV::SW))
26147 .addReg(RegNo: RISCV::X0)
26148 .addReg(RegNo: SPReg)
26149 .addImm(Val: 0);
26150
26151 // BLT TargetReg, SP, LoopTest
26152 BuildMI(BB&: *LoopTestMBB, I: LoopTestMBB->end(), MIMD: DL, MCID: TII->get(Opcode: RISCV::BLT))
26153 .addReg(RegNo: TargetReg)
26154 .addReg(RegNo: SPReg)
26155 .addMBB(MBB: LoopTestMBB);
26156
26157 // Adjust with: MV SP, TargetReg.
26158 BuildMI(BB&: *ExitMBB, I: ExitMBB->end(), MIMD: DL, MCID: TII->get(Opcode: RISCV::ADDI), DestReg: SPReg)
26159 .addReg(RegNo: TargetReg)
26160 .addImm(Val: 0);
26161
26162 ExitMBB->splice(Where: ExitMBB->end(), Other: MBB, From: std::next(x: MBBI), To: MBB->end());
26163 ExitMBB->transferSuccessorsAndUpdatePHIs(FromMBB: MBB);
26164
26165 LoopTestMBB->addSuccessor(Succ: ExitMBB);
26166 LoopTestMBB->addSuccessor(Succ: LoopTestMBB);
26167 MBB->addSuccessor(Succ: LoopTestMBB);
26168
26169 MI.eraseFromParent();
26170 MF.getInfo<RISCVMachineFunctionInfo>()->setDynamicAllocation();
26171 return ExitMBB->begin()->getParent();
26172}
26173
26174ArrayRef<MCPhysReg> RISCVTargetLowering::getRoundingControlRegisters() const {
26175 if (Subtarget.hasStdExtFOrZfinx()) {
26176 static const MCPhysReg RCRegs[] = {RISCV::FRM, RISCV::FFLAGS};
26177 return RCRegs;
26178 }
26179 return {};
26180}
26181
26182bool RISCVTargetLowering::shouldFoldMaskToVariableShiftPair(SDValue Y) const {
26183 EVT VT = Y.getValueType();
26184
26185 if (VT.isVector())
26186 return false;
26187
26188 return VT.getSizeInBits() <= Subtarget.getXLen();
26189}
26190
26191bool RISCVTargetLowering::isReassocProfitable(SelectionDAG &DAG, SDValue N0,
26192 SDValue N1) const {
26193 if (!N0.hasOneUse())
26194 return false;
26195
26196 // Avoid reassociating expressions that can be lowered to vector
26197 // multiply accumulate (i.e. add (mul x, y), z)
26198 if (N0.getOpcode() == ISD::ADD && N1.getOpcode() == ISD::MUL &&
26199 (N0.getValueType().isVector() && Subtarget.hasVInstructions()))
26200 return false;
26201
26202 return true;
26203}
26204