1 | //===- ARMISelLowering.cpp - ARM DAG Lowering Implementation --------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file defines the interfaces that ARM uses to lower LLVM code into a |
10 | // selection DAG. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #include "ARMISelLowering.h" |
15 | #include "ARMBaseInstrInfo.h" |
16 | #include "ARMBaseRegisterInfo.h" |
17 | #include "ARMCallingConv.h" |
18 | #include "ARMConstantPoolValue.h" |
19 | #include "ARMMachineFunctionInfo.h" |
20 | #include "ARMPerfectShuffle.h" |
21 | #include "ARMRegisterInfo.h" |
22 | #include "ARMSelectionDAGInfo.h" |
23 | #include "ARMSubtarget.h" |
24 | #include "ARMTargetTransformInfo.h" |
25 | #include "MCTargetDesc/ARMAddressingModes.h" |
26 | #include "MCTargetDesc/ARMBaseInfo.h" |
27 | #include "Utils/ARMBaseInfo.h" |
28 | #include "llvm/ADT/APFloat.h" |
29 | #include "llvm/ADT/APInt.h" |
30 | #include "llvm/ADT/ArrayRef.h" |
31 | #include "llvm/ADT/BitVector.h" |
32 | #include "llvm/ADT/DenseMap.h" |
33 | #include "llvm/ADT/STLExtras.h" |
34 | #include "llvm/ADT/SmallPtrSet.h" |
35 | #include "llvm/ADT/SmallVector.h" |
36 | #include "llvm/ADT/Statistic.h" |
37 | #include "llvm/ADT/StringExtras.h" |
38 | #include "llvm/ADT/StringRef.h" |
39 | #include "llvm/ADT/StringSwitch.h" |
40 | #include "llvm/ADT/Twine.h" |
41 | #include "llvm/Analysis/VectorUtils.h" |
42 | #include "llvm/CodeGen/CallingConvLower.h" |
43 | #include "llvm/CodeGen/ComplexDeinterleavingPass.h" |
44 | #include "llvm/CodeGen/ISDOpcodes.h" |
45 | #include "llvm/CodeGen/IntrinsicLowering.h" |
46 | #include "llvm/CodeGen/MachineBasicBlock.h" |
47 | #include "llvm/CodeGen/MachineConstantPool.h" |
48 | #include "llvm/CodeGen/MachineFrameInfo.h" |
49 | #include "llvm/CodeGen/MachineFunction.h" |
50 | #include "llvm/CodeGen/MachineInstr.h" |
51 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
52 | #include "llvm/CodeGen/MachineJumpTableInfo.h" |
53 | #include "llvm/CodeGen/MachineMemOperand.h" |
54 | #include "llvm/CodeGen/MachineOperand.h" |
55 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
56 | #include "llvm/CodeGen/RuntimeLibcallUtil.h" |
57 | #include "llvm/CodeGen/SelectionDAG.h" |
58 | #include "llvm/CodeGen/SelectionDAGAddressAnalysis.h" |
59 | #include "llvm/CodeGen/SelectionDAGNodes.h" |
60 | #include "llvm/CodeGen/TargetInstrInfo.h" |
61 | #include "llvm/CodeGen/TargetLowering.h" |
62 | #include "llvm/CodeGen/TargetOpcodes.h" |
63 | #include "llvm/CodeGen/TargetRegisterInfo.h" |
64 | #include "llvm/CodeGen/TargetSubtargetInfo.h" |
65 | #include "llvm/CodeGen/ValueTypes.h" |
66 | #include "llvm/CodeGenTypes/MachineValueType.h" |
67 | #include "llvm/IR/Attributes.h" |
68 | #include "llvm/IR/CallingConv.h" |
69 | #include "llvm/IR/Constant.h" |
70 | #include "llvm/IR/Constants.h" |
71 | #include "llvm/IR/DataLayout.h" |
72 | #include "llvm/IR/DebugLoc.h" |
73 | #include "llvm/IR/DerivedTypes.h" |
74 | #include "llvm/IR/Function.h" |
75 | #include "llvm/IR/GlobalAlias.h" |
76 | #include "llvm/IR/GlobalValue.h" |
77 | #include "llvm/IR/GlobalVariable.h" |
78 | #include "llvm/IR/IRBuilder.h" |
79 | #include "llvm/IR/InlineAsm.h" |
80 | #include "llvm/IR/Instruction.h" |
81 | #include "llvm/IR/Instructions.h" |
82 | #include "llvm/IR/IntrinsicInst.h" |
83 | #include "llvm/IR/Intrinsics.h" |
84 | #include "llvm/IR/IntrinsicsARM.h" |
85 | #include "llvm/IR/Module.h" |
86 | #include "llvm/IR/PatternMatch.h" |
87 | #include "llvm/IR/Type.h" |
88 | #include "llvm/IR/User.h" |
89 | #include "llvm/IR/Value.h" |
90 | #include "llvm/MC/MCInstrDesc.h" |
91 | #include "llvm/MC/MCInstrItineraries.h" |
92 | #include "llvm/MC/MCRegisterInfo.h" |
93 | #include "llvm/MC/MCSchedule.h" |
94 | #include "llvm/Support/AtomicOrdering.h" |
95 | #include "llvm/Support/BranchProbability.h" |
96 | #include "llvm/Support/Casting.h" |
97 | #include "llvm/Support/CodeGen.h" |
98 | #include "llvm/Support/CommandLine.h" |
99 | #include "llvm/Support/Compiler.h" |
100 | #include "llvm/Support/Debug.h" |
101 | #include "llvm/Support/ErrorHandling.h" |
102 | #include "llvm/Support/KnownBits.h" |
103 | #include "llvm/Support/MathExtras.h" |
104 | #include "llvm/Support/raw_ostream.h" |
105 | #include "llvm/Target/TargetMachine.h" |
106 | #include "llvm/Target/TargetOptions.h" |
107 | #include "llvm/TargetParser/Triple.h" |
108 | #include <algorithm> |
109 | #include <cassert> |
110 | #include <cstdint> |
111 | #include <cstdlib> |
112 | #include <iterator> |
113 | #include <limits> |
114 | #include <optional> |
115 | #include <tuple> |
116 | #include <utility> |
117 | #include <vector> |
118 | |
119 | using namespace llvm; |
120 | using namespace llvm::PatternMatch; |
121 | |
122 | #define DEBUG_TYPE "arm-isel" |
123 | |
124 | STATISTIC(NumTailCalls, "Number of tail calls" ); |
125 | STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt" ); |
126 | STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments" ); |
127 | STATISTIC(NumConstpoolPromoted, |
128 | "Number of constants with their storage promoted into constant pools" ); |
129 | |
130 | static cl::opt<bool> |
131 | ARMInterworking("arm-interworking" , cl::Hidden, |
132 | cl::desc("Enable / disable ARM interworking (for debugging only)" ), |
133 | cl::init(Val: true)); |
134 | |
135 | static cl::opt<bool> EnableConstpoolPromotion( |
136 | "arm-promote-constant" , cl::Hidden, |
137 | cl::desc("Enable / disable promotion of unnamed_addr constants into " |
138 | "constant pools" ), |
139 | cl::init(Val: false)); // FIXME: set to true by default once PR32780 is fixed |
140 | static cl::opt<unsigned> ConstpoolPromotionMaxSize( |
141 | "arm-promote-constant-max-size" , cl::Hidden, |
142 | cl::desc("Maximum size of constant to promote into a constant pool" ), |
143 | cl::init(Val: 64)); |
144 | static cl::opt<unsigned> ConstpoolPromotionMaxTotal( |
145 | "arm-promote-constant-max-total" , cl::Hidden, |
146 | cl::desc("Maximum size of ALL constants to promote into a constant pool" ), |
147 | cl::init(Val: 128)); |
148 | |
149 | cl::opt<unsigned> |
150 | MVEMaxSupportedInterleaveFactor("mve-max-interleave-factor" , cl::Hidden, |
151 | cl::desc("Maximum interleave factor for MVE VLDn to generate." ), |
152 | cl::init(Val: 2)); |
153 | |
154 | // The APCS parameter registers. |
155 | static const MCPhysReg GPRArgRegs[] = { |
156 | ARM::R0, ARM::R1, ARM::R2, ARM::R3 |
157 | }; |
158 | |
159 | static SDValue handleCMSEValue(const SDValue &Value, const ISD::InputArg &Arg, |
160 | SelectionDAG &DAG, const SDLoc &DL) { |
161 | assert(Arg.ArgVT.isScalarInteger()); |
162 | assert(Arg.ArgVT.bitsLT(MVT::i32)); |
163 | SDValue Trunc = DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: Arg.ArgVT, Operand: Value); |
164 | SDValue Ext = |
165 | DAG.getNode(Opcode: Arg.Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL, |
166 | VT: MVT::i32, Operand: Trunc); |
167 | return Ext; |
168 | } |
169 | |
170 | void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT) { |
171 | if (VT != PromotedLdStVT) { |
172 | setOperationAction(Op: ISD::LOAD, VT, Action: Promote); |
173 | AddPromotedToType (Opc: ISD::LOAD, OrigVT: VT, DestVT: PromotedLdStVT); |
174 | |
175 | setOperationAction(Op: ISD::STORE, VT, Action: Promote); |
176 | AddPromotedToType (Opc: ISD::STORE, OrigVT: VT, DestVT: PromotedLdStVT); |
177 | } |
178 | |
179 | MVT ElemTy = VT.getVectorElementType(); |
180 | if (ElemTy != MVT::f64) |
181 | setOperationAction(Op: ISD::SETCC, VT, Action: Custom); |
182 | setOperationAction(Op: ISD::INSERT_VECTOR_ELT, VT, Action: Custom); |
183 | setOperationAction(Op: ISD::EXTRACT_VECTOR_ELT, VT, Action: Custom); |
184 | if (ElemTy == MVT::i32) { |
185 | setOperationAction(Op: ISD::SINT_TO_FP, VT, Action: Custom); |
186 | setOperationAction(Op: ISD::UINT_TO_FP, VT, Action: Custom); |
187 | setOperationAction(Op: ISD::FP_TO_SINT, VT, Action: Custom); |
188 | setOperationAction(Op: ISD::FP_TO_UINT, VT, Action: Custom); |
189 | } else { |
190 | setOperationAction(Op: ISD::SINT_TO_FP, VT, Action: Expand); |
191 | setOperationAction(Op: ISD::UINT_TO_FP, VT, Action: Expand); |
192 | setOperationAction(Op: ISD::FP_TO_SINT, VT, Action: Expand); |
193 | setOperationAction(Op: ISD::FP_TO_UINT, VT, Action: Expand); |
194 | } |
195 | setOperationAction(Op: ISD::BUILD_VECTOR, VT, Action: Custom); |
196 | setOperationAction(Op: ISD::VECTOR_SHUFFLE, VT, Action: Custom); |
197 | setOperationAction(Op: ISD::CONCAT_VECTORS, VT, Action: Legal); |
198 | setOperationAction(Op: ISD::EXTRACT_SUBVECTOR, VT, Action: Legal); |
199 | setOperationAction(Op: ISD::SELECT, VT, Action: Expand); |
200 | setOperationAction(Op: ISD::SELECT_CC, VT, Action: Expand); |
201 | setOperationAction(Op: ISD::VSELECT, VT, Action: Expand); |
202 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT, Action: Expand); |
203 | if (VT.isInteger()) { |
204 | setOperationAction(Op: ISD::SHL, VT, Action: Custom); |
205 | setOperationAction(Op: ISD::SRA, VT, Action: Custom); |
206 | setOperationAction(Op: ISD::SRL, VT, Action: Custom); |
207 | } |
208 | |
209 | // Neon does not support vector divide/remainder operations. |
210 | setOperationAction(Op: ISD::SDIV, VT, Action: Expand); |
211 | setOperationAction(Op: ISD::UDIV, VT, Action: Expand); |
212 | setOperationAction(Op: ISD::FDIV, VT, Action: Expand); |
213 | setOperationAction(Op: ISD::SREM, VT, Action: Expand); |
214 | setOperationAction(Op: ISD::UREM, VT, Action: Expand); |
215 | setOperationAction(Op: ISD::FREM, VT, Action: Expand); |
216 | setOperationAction(Op: ISD::SDIVREM, VT, Action: Expand); |
217 | setOperationAction(Op: ISD::UDIVREM, VT, Action: Expand); |
218 | |
219 | if (!VT.isFloatingPoint() && VT != MVT::v2i64 && VT != MVT::v1i64) |
220 | for (auto Opcode : {ISD::ABS, ISD::ABDS, ISD::ABDU, ISD::SMIN, ISD::SMAX, |
221 | ISD::UMIN, ISD::UMAX}) |
222 | setOperationAction(Op: Opcode, VT, Action: Legal); |
223 | if (!VT.isFloatingPoint()) |
224 | for (auto Opcode : {ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT}) |
225 | setOperationAction(Op: Opcode, VT, Action: Legal); |
226 | } |
227 | |
228 | void ARMTargetLowering::addDRTypeForNEON(MVT VT) { |
229 | addRegisterClass(VT, RC: &ARM::DPRRegClass); |
230 | addTypeForNEON(VT, PromotedLdStVT: MVT::f64); |
231 | } |
232 | |
233 | void ARMTargetLowering::addQRTypeForNEON(MVT VT) { |
234 | addRegisterClass(VT, RC: &ARM::DPairRegClass); |
235 | addTypeForNEON(VT, PromotedLdStVT: MVT::v2f64); |
236 | } |
237 | |
238 | void ARMTargetLowering::setAllExpand(MVT VT) { |
239 | for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc) |
240 | setOperationAction(Op: Opc, VT, Action: Expand); |
241 | |
242 | // We support these really simple operations even on types where all |
243 | // the actual arithmetic has to be broken down into simpler |
244 | // operations or turned into library calls. |
245 | setOperationAction(Op: ISD::BITCAST, VT, Action: Legal); |
246 | setOperationAction(Op: ISD::LOAD, VT, Action: Legal); |
247 | setOperationAction(Op: ISD::STORE, VT, Action: Legal); |
248 | setOperationAction(Op: ISD::UNDEF, VT, Action: Legal); |
249 | } |
250 | |
251 | void ARMTargetLowering::addAllExtLoads(const MVT From, const MVT To, |
252 | LegalizeAction Action) { |
253 | setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: From, MemVT: To, Action); |
254 | setLoadExtAction(ExtType: ISD::ZEXTLOAD, ValVT: From, MemVT: To, Action); |
255 | setLoadExtAction(ExtType: ISD::SEXTLOAD, ValVT: From, MemVT: To, Action); |
256 | } |
257 | |
258 | void ARMTargetLowering::addMVEVectorTypes(bool HasMVEFP) { |
259 | const MVT IntTypes[] = { MVT::v16i8, MVT::v8i16, MVT::v4i32 }; |
260 | |
261 | for (auto VT : IntTypes) { |
262 | addRegisterClass(VT, RC: &ARM::MQPRRegClass); |
263 | setOperationAction(Op: ISD::VECTOR_SHUFFLE, VT, Action: Custom); |
264 | setOperationAction(Op: ISD::INSERT_VECTOR_ELT, VT, Action: Custom); |
265 | setOperationAction(Op: ISD::EXTRACT_VECTOR_ELT, VT, Action: Custom); |
266 | setOperationAction(Op: ISD::BUILD_VECTOR, VT, Action: Custom); |
267 | setOperationAction(Op: ISD::SHL, VT, Action: Custom); |
268 | setOperationAction(Op: ISD::SRA, VT, Action: Custom); |
269 | setOperationAction(Op: ISD::SRL, VT, Action: Custom); |
270 | setOperationAction(Op: ISD::SMIN, VT, Action: Legal); |
271 | setOperationAction(Op: ISD::SMAX, VT, Action: Legal); |
272 | setOperationAction(Op: ISD::UMIN, VT, Action: Legal); |
273 | setOperationAction(Op: ISD::UMAX, VT, Action: Legal); |
274 | setOperationAction(Op: ISD::ABS, VT, Action: Legal); |
275 | setOperationAction(Op: ISD::SETCC, VT, Action: Custom); |
276 | setOperationAction(Op: ISD::MLOAD, VT, Action: Custom); |
277 | setOperationAction(Op: ISD::MSTORE, VT, Action: Legal); |
278 | setOperationAction(Op: ISD::CTLZ, VT, Action: Legal); |
279 | setOperationAction(Op: ISD::CTTZ, VT, Action: Custom); |
280 | setOperationAction(Op: ISD::BITREVERSE, VT, Action: Legal); |
281 | setOperationAction(Op: ISD::BSWAP, VT, Action: Legal); |
282 | setOperationAction(Op: ISD::SADDSAT, VT, Action: Legal); |
283 | setOperationAction(Op: ISD::UADDSAT, VT, Action: Legal); |
284 | setOperationAction(Op: ISD::SSUBSAT, VT, Action: Legal); |
285 | setOperationAction(Op: ISD::USUBSAT, VT, Action: Legal); |
286 | setOperationAction(Op: ISD::ABDS, VT, Action: Legal); |
287 | setOperationAction(Op: ISD::ABDU, VT, Action: Legal); |
288 | setOperationAction(Op: ISD::AVGFLOORS, VT, Action: Legal); |
289 | setOperationAction(Op: ISD::AVGFLOORU, VT, Action: Legal); |
290 | setOperationAction(Op: ISD::AVGCEILS, VT, Action: Legal); |
291 | setOperationAction(Op: ISD::AVGCEILU, VT, Action: Legal); |
292 | |
293 | // No native support for these. |
294 | setOperationAction(Op: ISD::UDIV, VT, Action: Expand); |
295 | setOperationAction(Op: ISD::SDIV, VT, Action: Expand); |
296 | setOperationAction(Op: ISD::UREM, VT, Action: Expand); |
297 | setOperationAction(Op: ISD::SREM, VT, Action: Expand); |
298 | setOperationAction(Op: ISD::UDIVREM, VT, Action: Expand); |
299 | setOperationAction(Op: ISD::SDIVREM, VT, Action: Expand); |
300 | setOperationAction(Op: ISD::CTPOP, VT, Action: Expand); |
301 | setOperationAction(Op: ISD::SELECT, VT, Action: Expand); |
302 | setOperationAction(Op: ISD::SELECT_CC, VT, Action: Expand); |
303 | |
304 | // Vector reductions |
305 | setOperationAction(Op: ISD::VECREDUCE_ADD, VT, Action: Legal); |
306 | setOperationAction(Op: ISD::VECREDUCE_SMAX, VT, Action: Legal); |
307 | setOperationAction(Op: ISD::VECREDUCE_UMAX, VT, Action: Legal); |
308 | setOperationAction(Op: ISD::VECREDUCE_SMIN, VT, Action: Legal); |
309 | setOperationAction(Op: ISD::VECREDUCE_UMIN, VT, Action: Legal); |
310 | setOperationAction(Op: ISD::VECREDUCE_MUL, VT, Action: Custom); |
311 | setOperationAction(Op: ISD::VECREDUCE_AND, VT, Action: Custom); |
312 | setOperationAction(Op: ISD::VECREDUCE_OR, VT, Action: Custom); |
313 | setOperationAction(Op: ISD::VECREDUCE_XOR, VT, Action: Custom); |
314 | |
315 | if (!HasMVEFP) { |
316 | setOperationAction(Op: ISD::SINT_TO_FP, VT, Action: Expand); |
317 | setOperationAction(Op: ISD::UINT_TO_FP, VT, Action: Expand); |
318 | setOperationAction(Op: ISD::FP_TO_SINT, VT, Action: Expand); |
319 | setOperationAction(Op: ISD::FP_TO_UINT, VT, Action: Expand); |
320 | } else { |
321 | setOperationAction(Op: ISD::FP_TO_SINT_SAT, VT, Action: Custom); |
322 | setOperationAction(Op: ISD::FP_TO_UINT_SAT, VT, Action: Custom); |
323 | } |
324 | |
325 | // Pre and Post inc are supported on loads and stores |
326 | for (unsigned im = (unsigned)ISD::PRE_INC; |
327 | im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { |
328 | setIndexedLoadAction(IdxModes: im, VT, Action: Legal); |
329 | setIndexedStoreAction(IdxModes: im, VT, Action: Legal); |
330 | setIndexedMaskedLoadAction(IdxMode: im, VT, Action: Legal); |
331 | setIndexedMaskedStoreAction(IdxMode: im, VT, Action: Legal); |
332 | } |
333 | } |
334 | |
335 | const MVT FloatTypes[] = { MVT::v8f16, MVT::v4f32 }; |
336 | for (auto VT : FloatTypes) { |
337 | addRegisterClass(VT, RC: &ARM::MQPRRegClass); |
338 | if (!HasMVEFP) |
339 | setAllExpand(VT); |
340 | |
341 | // These are legal or custom whether we have MVE.fp or not |
342 | setOperationAction(Op: ISD::VECTOR_SHUFFLE, VT, Action: Custom); |
343 | setOperationAction(Op: ISD::INSERT_VECTOR_ELT, VT, Action: Custom); |
344 | setOperationAction(Op: ISD::INSERT_VECTOR_ELT, VT: VT.getVectorElementType(), Action: Custom); |
345 | setOperationAction(Op: ISD::EXTRACT_VECTOR_ELT, VT, Action: Custom); |
346 | setOperationAction(Op: ISD::BUILD_VECTOR, VT, Action: Custom); |
347 | setOperationAction(Op: ISD::BUILD_VECTOR, VT: VT.getVectorElementType(), Action: Custom); |
348 | setOperationAction(Op: ISD::SCALAR_TO_VECTOR, VT, Action: Legal); |
349 | setOperationAction(Op: ISD::SETCC, VT, Action: Custom); |
350 | setOperationAction(Op: ISD::MLOAD, VT, Action: Custom); |
351 | setOperationAction(Op: ISD::MSTORE, VT, Action: Legal); |
352 | setOperationAction(Op: ISD::SELECT, VT, Action: Expand); |
353 | setOperationAction(Op: ISD::SELECT_CC, VT, Action: Expand); |
354 | |
355 | // Pre and Post inc are supported on loads and stores |
356 | for (unsigned im = (unsigned)ISD::PRE_INC; |
357 | im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { |
358 | setIndexedLoadAction(IdxModes: im, VT, Action: Legal); |
359 | setIndexedStoreAction(IdxModes: im, VT, Action: Legal); |
360 | setIndexedMaskedLoadAction(IdxMode: im, VT, Action: Legal); |
361 | setIndexedMaskedStoreAction(IdxMode: im, VT, Action: Legal); |
362 | } |
363 | |
364 | if (HasMVEFP) { |
365 | setOperationAction(Op: ISD::FMINNUM, VT, Action: Legal); |
366 | setOperationAction(Op: ISD::FMAXNUM, VT, Action: Legal); |
367 | setOperationAction(Op: ISD::FROUND, VT, Action: Legal); |
368 | setOperationAction(Op: ISD::VECREDUCE_FADD, VT, Action: Custom); |
369 | setOperationAction(Op: ISD::VECREDUCE_FMUL, VT, Action: Custom); |
370 | setOperationAction(Op: ISD::VECREDUCE_FMIN, VT, Action: Custom); |
371 | setOperationAction(Op: ISD::VECREDUCE_FMAX, VT, Action: Custom); |
372 | |
373 | // No native support for these. |
374 | setOperationAction(Op: ISD::FDIV, VT, Action: Expand); |
375 | setOperationAction(Op: ISD::FREM, VT, Action: Expand); |
376 | setOperationAction(Op: ISD::FSQRT, VT, Action: Expand); |
377 | setOperationAction(Op: ISD::FSIN, VT, Action: Expand); |
378 | setOperationAction(Op: ISD::FCOS, VT, Action: Expand); |
379 | setOperationAction(Op: ISD::FTAN, VT, Action: Expand); |
380 | setOperationAction(Op: ISD::FPOW, VT, Action: Expand); |
381 | setOperationAction(Op: ISD::FLOG, VT, Action: Expand); |
382 | setOperationAction(Op: ISD::FLOG2, VT, Action: Expand); |
383 | setOperationAction(Op: ISD::FLOG10, VT, Action: Expand); |
384 | setOperationAction(Op: ISD::FEXP, VT, Action: Expand); |
385 | setOperationAction(Op: ISD::FEXP2, VT, Action: Expand); |
386 | setOperationAction(Op: ISD::FEXP10, VT, Action: Expand); |
387 | setOperationAction(Op: ISD::FNEARBYINT, VT, Action: Expand); |
388 | } |
389 | } |
390 | |
391 | // Custom Expand smaller than legal vector reductions to prevent false zero |
392 | // items being added. |
393 | setOperationAction(Op: ISD::VECREDUCE_FADD, VT: MVT::v4f16, Action: Custom); |
394 | setOperationAction(Op: ISD::VECREDUCE_FMUL, VT: MVT::v4f16, Action: Custom); |
395 | setOperationAction(Op: ISD::VECREDUCE_FMIN, VT: MVT::v4f16, Action: Custom); |
396 | setOperationAction(Op: ISD::VECREDUCE_FMAX, VT: MVT::v4f16, Action: Custom); |
397 | setOperationAction(Op: ISD::VECREDUCE_FADD, VT: MVT::v2f16, Action: Custom); |
398 | setOperationAction(Op: ISD::VECREDUCE_FMUL, VT: MVT::v2f16, Action: Custom); |
399 | setOperationAction(Op: ISD::VECREDUCE_FMIN, VT: MVT::v2f16, Action: Custom); |
400 | setOperationAction(Op: ISD::VECREDUCE_FMAX, VT: MVT::v2f16, Action: Custom); |
401 | |
402 | // We 'support' these types up to bitcast/load/store level, regardless of |
403 | // MVE integer-only / float support. Only doing FP data processing on the FP |
404 | // vector types is inhibited at integer-only level. |
405 | const MVT LongTypes[] = { MVT::v2i64, MVT::v2f64 }; |
406 | for (auto VT : LongTypes) { |
407 | addRegisterClass(VT, RC: &ARM::MQPRRegClass); |
408 | setAllExpand(VT); |
409 | setOperationAction(Op: ISD::INSERT_VECTOR_ELT, VT, Action: Custom); |
410 | setOperationAction(Op: ISD::EXTRACT_VECTOR_ELT, VT, Action: Custom); |
411 | setOperationAction(Op: ISD::BUILD_VECTOR, VT, Action: Custom); |
412 | setOperationAction(Op: ISD::VSELECT, VT, Action: Legal); |
413 | setOperationAction(Op: ISD::VECTOR_SHUFFLE, VT, Action: Custom); |
414 | } |
415 | setOperationAction(Op: ISD::SCALAR_TO_VECTOR, VT: MVT::v2f64, Action: Legal); |
416 | |
417 | // We can do bitwise operations on v2i64 vectors |
418 | setOperationAction(Op: ISD::AND, VT: MVT::v2i64, Action: Legal); |
419 | setOperationAction(Op: ISD::OR, VT: MVT::v2i64, Action: Legal); |
420 | setOperationAction(Op: ISD::XOR, VT: MVT::v2i64, Action: Legal); |
421 | |
422 | // It is legal to extload from v4i8 to v4i16 or v4i32. |
423 | addAllExtLoads(From: MVT::v8i16, To: MVT::v8i8, Action: Legal); |
424 | addAllExtLoads(From: MVT::v4i32, To: MVT::v4i16, Action: Legal); |
425 | addAllExtLoads(From: MVT::v4i32, To: MVT::v4i8, Action: Legal); |
426 | |
427 | // It is legal to sign extend from v4i8/v4i16 to v4i32 or v8i8 to v8i16. |
428 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::v4i8, Action: Legal); |
429 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::v4i16, Action: Legal); |
430 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::v4i32, Action: Legal); |
431 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::v8i8, Action: Legal); |
432 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::v8i16, Action: Legal); |
433 | |
434 | // Some truncating stores are legal too. |
435 | setTruncStoreAction(ValVT: MVT::v4i32, MemVT: MVT::v4i16, Action: Legal); |
436 | setTruncStoreAction(ValVT: MVT::v4i32, MemVT: MVT::v4i8, Action: Legal); |
437 | setTruncStoreAction(ValVT: MVT::v8i16, MemVT: MVT::v8i8, Action: Legal); |
438 | |
439 | // Pre and Post inc on these are legal, given the correct extends |
440 | for (unsigned im = (unsigned)ISD::PRE_INC; |
441 | im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { |
442 | for (auto VT : {MVT::v8i8, MVT::v4i8, MVT::v4i16}) { |
443 | setIndexedLoadAction(IdxModes: im, VT, Action: Legal); |
444 | setIndexedStoreAction(IdxModes: im, VT, Action: Legal); |
445 | setIndexedMaskedLoadAction(IdxMode: im, VT, Action: Legal); |
446 | setIndexedMaskedStoreAction(IdxMode: im, VT, Action: Legal); |
447 | } |
448 | } |
449 | |
450 | // Predicate types |
451 | const MVT pTypes[] = {MVT::v16i1, MVT::v8i1, MVT::v4i1, MVT::v2i1}; |
452 | for (auto VT : pTypes) { |
453 | addRegisterClass(VT, RC: &ARM::VCCRRegClass); |
454 | setOperationAction(Op: ISD::BUILD_VECTOR, VT, Action: Custom); |
455 | setOperationAction(Op: ISD::VECTOR_SHUFFLE, VT, Action: Custom); |
456 | setOperationAction(Op: ISD::EXTRACT_SUBVECTOR, VT, Action: Custom); |
457 | setOperationAction(Op: ISD::CONCAT_VECTORS, VT, Action: Custom); |
458 | setOperationAction(Op: ISD::INSERT_VECTOR_ELT, VT, Action: Custom); |
459 | setOperationAction(Op: ISD::EXTRACT_VECTOR_ELT, VT, Action: Custom); |
460 | setOperationAction(Op: ISD::SETCC, VT, Action: Custom); |
461 | setOperationAction(Op: ISD::SCALAR_TO_VECTOR, VT, Action: Expand); |
462 | setOperationAction(Op: ISD::LOAD, VT, Action: Custom); |
463 | setOperationAction(Op: ISD::STORE, VT, Action: Custom); |
464 | setOperationAction(Op: ISD::TRUNCATE, VT, Action: Custom); |
465 | setOperationAction(Op: ISD::VSELECT, VT, Action: Expand); |
466 | setOperationAction(Op: ISD::SELECT, VT, Action: Expand); |
467 | setOperationAction(Op: ISD::SELECT_CC, VT, Action: Expand); |
468 | |
469 | if (!HasMVEFP) { |
470 | setOperationAction(Op: ISD::SINT_TO_FP, VT, Action: Expand); |
471 | setOperationAction(Op: ISD::UINT_TO_FP, VT, Action: Expand); |
472 | setOperationAction(Op: ISD::FP_TO_SINT, VT, Action: Expand); |
473 | setOperationAction(Op: ISD::FP_TO_UINT, VT, Action: Expand); |
474 | } |
475 | } |
476 | setOperationAction(Op: ISD::SETCC, VT: MVT::v2i1, Action: Expand); |
477 | setOperationAction(Op: ISD::TRUNCATE, VT: MVT::v2i1, Action: Expand); |
478 | setOperationAction(Op: ISD::AND, VT: MVT::v2i1, Action: Expand); |
479 | setOperationAction(Op: ISD::OR, VT: MVT::v2i1, Action: Expand); |
480 | setOperationAction(Op: ISD::XOR, VT: MVT::v2i1, Action: Expand); |
481 | setOperationAction(Op: ISD::SINT_TO_FP, VT: MVT::v2i1, Action: Expand); |
482 | setOperationAction(Op: ISD::UINT_TO_FP, VT: MVT::v2i1, Action: Expand); |
483 | setOperationAction(Op: ISD::FP_TO_SINT, VT: MVT::v2i1, Action: Expand); |
484 | setOperationAction(Op: ISD::FP_TO_UINT, VT: MVT::v2i1, Action: Expand); |
485 | |
486 | setOperationAction(Op: ISD::SIGN_EXTEND, VT: MVT::v8i32, Action: Custom); |
487 | setOperationAction(Op: ISD::SIGN_EXTEND, VT: MVT::v16i16, Action: Custom); |
488 | setOperationAction(Op: ISD::SIGN_EXTEND, VT: MVT::v16i32, Action: Custom); |
489 | setOperationAction(Op: ISD::ZERO_EXTEND, VT: MVT::v8i32, Action: Custom); |
490 | setOperationAction(Op: ISD::ZERO_EXTEND, VT: MVT::v16i16, Action: Custom); |
491 | setOperationAction(Op: ISD::ZERO_EXTEND, VT: MVT::v16i32, Action: Custom); |
492 | setOperationAction(Op: ISD::TRUNCATE, VT: MVT::v8i32, Action: Custom); |
493 | setOperationAction(Op: ISD::TRUNCATE, VT: MVT::v16i16, Action: Custom); |
494 | } |
495 | |
496 | ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM, |
497 | const ARMSubtarget &STI) |
498 | : TargetLowering(TM), Subtarget(&STI) { |
499 | RegInfo = Subtarget->getRegisterInfo(); |
500 | Itins = Subtarget->getInstrItineraryData(); |
501 | |
502 | setBooleanContents(ZeroOrOneBooleanContent); |
503 | setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); |
504 | |
505 | if (!Subtarget->isTargetDarwin() && !Subtarget->isTargetIOS() && |
506 | !Subtarget->isTargetWatchOS() && !Subtarget->isTargetDriverKit()) { |
507 | bool IsHFTarget = TM.Options.FloatABIType == FloatABI::Hard; |
508 | for (int LCID = 0; LCID < RTLIB::UNKNOWN_LIBCALL; ++LCID) |
509 | setLibcallCallingConv(Call: static_cast<RTLIB::Libcall>(LCID), |
510 | CC: IsHFTarget ? CallingConv::ARM_AAPCS_VFP |
511 | : CallingConv::ARM_AAPCS); |
512 | } |
513 | |
514 | if (Subtarget->isTargetMachO()) { |
515 | // Uses VFP for Thumb libfuncs if available. |
516 | if (Subtarget->isThumb() && Subtarget->hasVFP2Base() && |
517 | Subtarget->hasARMOps() && !Subtarget->useSoftFloat()) { |
518 | static const struct { |
519 | const RTLIB::Libcall Op; |
520 | const char * const Name; |
521 | const ISD::CondCode Cond; |
522 | } LibraryCalls[] = { |
523 | // Single-precision floating-point arithmetic. |
524 | { .Op: RTLIB::ADD_F32, .Name: "__addsf3vfp" , .Cond: ISD::SETCC_INVALID }, |
525 | { .Op: RTLIB::SUB_F32, .Name: "__subsf3vfp" , .Cond: ISD::SETCC_INVALID }, |
526 | { .Op: RTLIB::MUL_F32, .Name: "__mulsf3vfp" , .Cond: ISD::SETCC_INVALID }, |
527 | { .Op: RTLIB::DIV_F32, .Name: "__divsf3vfp" , .Cond: ISD::SETCC_INVALID }, |
528 | |
529 | // Double-precision floating-point arithmetic. |
530 | { .Op: RTLIB::ADD_F64, .Name: "__adddf3vfp" , .Cond: ISD::SETCC_INVALID }, |
531 | { .Op: RTLIB::SUB_F64, .Name: "__subdf3vfp" , .Cond: ISD::SETCC_INVALID }, |
532 | { .Op: RTLIB::MUL_F64, .Name: "__muldf3vfp" , .Cond: ISD::SETCC_INVALID }, |
533 | { .Op: RTLIB::DIV_F64, .Name: "__divdf3vfp" , .Cond: ISD::SETCC_INVALID }, |
534 | |
535 | // Single-precision comparisons. |
536 | { .Op: RTLIB::OEQ_F32, .Name: "__eqsf2vfp" , .Cond: ISD::SETNE }, |
537 | { .Op: RTLIB::UNE_F32, .Name: "__nesf2vfp" , .Cond: ISD::SETNE }, |
538 | { .Op: RTLIB::OLT_F32, .Name: "__ltsf2vfp" , .Cond: ISD::SETNE }, |
539 | { .Op: RTLIB::OLE_F32, .Name: "__lesf2vfp" , .Cond: ISD::SETNE }, |
540 | { .Op: RTLIB::OGE_F32, .Name: "__gesf2vfp" , .Cond: ISD::SETNE }, |
541 | { .Op: RTLIB::OGT_F32, .Name: "__gtsf2vfp" , .Cond: ISD::SETNE }, |
542 | { .Op: RTLIB::UO_F32, .Name: "__unordsf2vfp" , .Cond: ISD::SETNE }, |
543 | |
544 | // Double-precision comparisons. |
545 | { .Op: RTLIB::OEQ_F64, .Name: "__eqdf2vfp" , .Cond: ISD::SETNE }, |
546 | { .Op: RTLIB::UNE_F64, .Name: "__nedf2vfp" , .Cond: ISD::SETNE }, |
547 | { .Op: RTLIB::OLT_F64, .Name: "__ltdf2vfp" , .Cond: ISD::SETNE }, |
548 | { .Op: RTLIB::OLE_F64, .Name: "__ledf2vfp" , .Cond: ISD::SETNE }, |
549 | { .Op: RTLIB::OGE_F64, .Name: "__gedf2vfp" , .Cond: ISD::SETNE }, |
550 | { .Op: RTLIB::OGT_F64, .Name: "__gtdf2vfp" , .Cond: ISD::SETNE }, |
551 | { .Op: RTLIB::UO_F64, .Name: "__unorddf2vfp" , .Cond: ISD::SETNE }, |
552 | |
553 | // Floating-point to integer conversions. |
554 | // i64 conversions are done via library routines even when generating VFP |
555 | // instructions, so use the same ones. |
556 | { .Op: RTLIB::FPTOSINT_F64_I32, .Name: "__fixdfsivfp" , .Cond: ISD::SETCC_INVALID }, |
557 | { .Op: RTLIB::FPTOUINT_F64_I32, .Name: "__fixunsdfsivfp" , .Cond: ISD::SETCC_INVALID }, |
558 | { .Op: RTLIB::FPTOSINT_F32_I32, .Name: "__fixsfsivfp" , .Cond: ISD::SETCC_INVALID }, |
559 | { .Op: RTLIB::FPTOUINT_F32_I32, .Name: "__fixunssfsivfp" , .Cond: ISD::SETCC_INVALID }, |
560 | |
561 | // Conversions between floating types. |
562 | { .Op: RTLIB::FPROUND_F64_F32, .Name: "__truncdfsf2vfp" , .Cond: ISD::SETCC_INVALID }, |
563 | { .Op: RTLIB::FPEXT_F32_F64, .Name: "__extendsfdf2vfp" , .Cond: ISD::SETCC_INVALID }, |
564 | |
565 | // Integer to floating-point conversions. |
566 | // i64 conversions are done via library routines even when generating VFP |
567 | // instructions, so use the same ones. |
568 | // FIXME: There appears to be some naming inconsistency in ARM libgcc: |
569 | // e.g., __floatunsidf vs. __floatunssidfvfp. |
570 | { .Op: RTLIB::SINTTOFP_I32_F64, .Name: "__floatsidfvfp" , .Cond: ISD::SETCC_INVALID }, |
571 | { .Op: RTLIB::UINTTOFP_I32_F64, .Name: "__floatunssidfvfp" , .Cond: ISD::SETCC_INVALID }, |
572 | { .Op: RTLIB::SINTTOFP_I32_F32, .Name: "__floatsisfvfp" , .Cond: ISD::SETCC_INVALID }, |
573 | { .Op: RTLIB::UINTTOFP_I32_F32, .Name: "__floatunssisfvfp" , .Cond: ISD::SETCC_INVALID }, |
574 | }; |
575 | |
576 | for (const auto &LC : LibraryCalls) { |
577 | setLibcallName(Call: LC.Op, Name: LC.Name); |
578 | if (LC.Cond != ISD::SETCC_INVALID) |
579 | setCmpLibcallCC(Call: LC.Op, CC: LC.Cond); |
580 | } |
581 | } |
582 | } |
583 | |
584 | // RTLIB |
585 | if (Subtarget->isAAPCS_ABI() && |
586 | (Subtarget->isTargetAEABI() || Subtarget->isTargetGNUAEABI() || |
587 | Subtarget->isTargetMuslAEABI() || Subtarget->isTargetAndroid())) { |
588 | static const struct { |
589 | const RTLIB::Libcall Op; |
590 | const char * const Name; |
591 | const CallingConv::ID CC; |
592 | const ISD::CondCode Cond; |
593 | } LibraryCalls[] = { |
594 | // Double-precision floating-point arithmetic helper functions |
595 | // RTABI chapter 4.1.2, Table 2 |
596 | { .Op: RTLIB::ADD_F64, .Name: "__aeabi_dadd" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
597 | { .Op: RTLIB::DIV_F64, .Name: "__aeabi_ddiv" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
598 | { .Op: RTLIB::MUL_F64, .Name: "__aeabi_dmul" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
599 | { .Op: RTLIB::SUB_F64, .Name: "__aeabi_dsub" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
600 | |
601 | // Double-precision floating-point comparison helper functions |
602 | // RTABI chapter 4.1.2, Table 3 |
603 | { .Op: RTLIB::OEQ_F64, .Name: "__aeabi_dcmpeq" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETNE }, |
604 | { .Op: RTLIB::UNE_F64, .Name: "__aeabi_dcmpeq" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETEQ }, |
605 | { .Op: RTLIB::OLT_F64, .Name: "__aeabi_dcmplt" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETNE }, |
606 | { .Op: RTLIB::OLE_F64, .Name: "__aeabi_dcmple" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETNE }, |
607 | { .Op: RTLIB::OGE_F64, .Name: "__aeabi_dcmpge" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETNE }, |
608 | { .Op: RTLIB::OGT_F64, .Name: "__aeabi_dcmpgt" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETNE }, |
609 | { .Op: RTLIB::UO_F64, .Name: "__aeabi_dcmpun" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETNE }, |
610 | |
611 | // Single-precision floating-point arithmetic helper functions |
612 | // RTABI chapter 4.1.2, Table 4 |
613 | { .Op: RTLIB::ADD_F32, .Name: "__aeabi_fadd" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
614 | { .Op: RTLIB::DIV_F32, .Name: "__aeabi_fdiv" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
615 | { .Op: RTLIB::MUL_F32, .Name: "__aeabi_fmul" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
616 | { .Op: RTLIB::SUB_F32, .Name: "__aeabi_fsub" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
617 | |
618 | // Single-precision floating-point comparison helper functions |
619 | // RTABI chapter 4.1.2, Table 5 |
620 | { .Op: RTLIB::OEQ_F32, .Name: "__aeabi_fcmpeq" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETNE }, |
621 | { .Op: RTLIB::UNE_F32, .Name: "__aeabi_fcmpeq" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETEQ }, |
622 | { .Op: RTLIB::OLT_F32, .Name: "__aeabi_fcmplt" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETNE }, |
623 | { .Op: RTLIB::OLE_F32, .Name: "__aeabi_fcmple" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETNE }, |
624 | { .Op: RTLIB::OGE_F32, .Name: "__aeabi_fcmpge" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETNE }, |
625 | { .Op: RTLIB::OGT_F32, .Name: "__aeabi_fcmpgt" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETNE }, |
626 | { .Op: RTLIB::UO_F32, .Name: "__aeabi_fcmpun" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETNE }, |
627 | |
628 | // Floating-point to integer conversions. |
629 | // RTABI chapter 4.1.2, Table 6 |
630 | { .Op: RTLIB::FPTOSINT_F64_I32, .Name: "__aeabi_d2iz" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
631 | { .Op: RTLIB::FPTOUINT_F64_I32, .Name: "__aeabi_d2uiz" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
632 | { .Op: RTLIB::FPTOSINT_F64_I64, .Name: "__aeabi_d2lz" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
633 | { .Op: RTLIB::FPTOUINT_F64_I64, .Name: "__aeabi_d2ulz" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
634 | { .Op: RTLIB::FPTOSINT_F32_I32, .Name: "__aeabi_f2iz" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
635 | { .Op: RTLIB::FPTOUINT_F32_I32, .Name: "__aeabi_f2uiz" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
636 | { .Op: RTLIB::FPTOSINT_F32_I64, .Name: "__aeabi_f2lz" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
637 | { .Op: RTLIB::FPTOUINT_F32_I64, .Name: "__aeabi_f2ulz" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
638 | |
639 | // Conversions between floating types. |
640 | // RTABI chapter 4.1.2, Table 7 |
641 | { .Op: RTLIB::FPROUND_F64_F32, .Name: "__aeabi_d2f" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
642 | { .Op: RTLIB::FPROUND_F64_F16, .Name: "__aeabi_d2h" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
643 | { .Op: RTLIB::FPEXT_F32_F64, .Name: "__aeabi_f2d" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
644 | |
645 | // Integer to floating-point conversions. |
646 | // RTABI chapter 4.1.2, Table 8 |
647 | { .Op: RTLIB::SINTTOFP_I32_F64, .Name: "__aeabi_i2d" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
648 | { .Op: RTLIB::UINTTOFP_I32_F64, .Name: "__aeabi_ui2d" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
649 | { .Op: RTLIB::SINTTOFP_I64_F64, .Name: "__aeabi_l2d" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
650 | { .Op: RTLIB::UINTTOFP_I64_F64, .Name: "__aeabi_ul2d" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
651 | { .Op: RTLIB::SINTTOFP_I32_F32, .Name: "__aeabi_i2f" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
652 | { .Op: RTLIB::UINTTOFP_I32_F32, .Name: "__aeabi_ui2f" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
653 | { .Op: RTLIB::SINTTOFP_I64_F32, .Name: "__aeabi_l2f" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
654 | { .Op: RTLIB::UINTTOFP_I64_F32, .Name: "__aeabi_ul2f" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
655 | |
656 | // Long long helper functions |
657 | // RTABI chapter 4.2, Table 9 |
658 | { .Op: RTLIB::MUL_I64, .Name: "__aeabi_lmul" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
659 | { .Op: RTLIB::SHL_I64, .Name: "__aeabi_llsl" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
660 | { .Op: RTLIB::SRL_I64, .Name: "__aeabi_llsr" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
661 | { .Op: RTLIB::SRA_I64, .Name: "__aeabi_lasr" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
662 | |
663 | // Integer division functions |
664 | // RTABI chapter 4.3.1 |
665 | { .Op: RTLIB::SDIV_I8, .Name: "__aeabi_idiv" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
666 | { .Op: RTLIB::SDIV_I16, .Name: "__aeabi_idiv" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
667 | { .Op: RTLIB::SDIV_I32, .Name: "__aeabi_idiv" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
668 | { .Op: RTLIB::SDIV_I64, .Name: "__aeabi_ldivmod" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
669 | { .Op: RTLIB::UDIV_I8, .Name: "__aeabi_uidiv" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
670 | { .Op: RTLIB::UDIV_I16, .Name: "__aeabi_uidiv" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
671 | { .Op: RTLIB::UDIV_I32, .Name: "__aeabi_uidiv" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
672 | { .Op: RTLIB::UDIV_I64, .Name: "__aeabi_uldivmod" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
673 | }; |
674 | |
675 | for (const auto &LC : LibraryCalls) { |
676 | setLibcallName(Call: LC.Op, Name: LC.Name); |
677 | setLibcallCallingConv(Call: LC.Op, CC: LC.CC); |
678 | if (LC.Cond != ISD::SETCC_INVALID) |
679 | setCmpLibcallCC(Call: LC.Op, CC: LC.Cond); |
680 | } |
681 | |
682 | // EABI dependent RTLIB |
683 | if (TM.Options.EABIVersion == EABI::EABI4 || |
684 | TM.Options.EABIVersion == EABI::EABI5) { |
685 | static const struct { |
686 | const RTLIB::Libcall Op; |
687 | const char *const Name; |
688 | const CallingConv::ID CC; |
689 | const ISD::CondCode Cond; |
690 | } MemOpsLibraryCalls[] = { |
691 | // Memory operations |
692 | // RTABI chapter 4.3.4 |
693 | { .Op: RTLIB::MEMCPY, .Name: "__aeabi_memcpy" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
694 | { .Op: RTLIB::MEMMOVE, .Name: "__aeabi_memmove" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
695 | { .Op: RTLIB::MEMSET, .Name: "__aeabi_memset" , .CC: CallingConv::ARM_AAPCS, .Cond: ISD::SETCC_INVALID }, |
696 | }; |
697 | |
698 | for (const auto &LC : MemOpsLibraryCalls) { |
699 | setLibcallName(Call: LC.Op, Name: LC.Name); |
700 | setLibcallCallingConv(Call: LC.Op, CC: LC.CC); |
701 | if (LC.Cond != ISD::SETCC_INVALID) |
702 | setCmpLibcallCC(Call: LC.Op, CC: LC.Cond); |
703 | } |
704 | } |
705 | } |
706 | |
707 | if (Subtarget->isTargetWindows()) { |
708 | static const struct { |
709 | const RTLIB::Libcall Op; |
710 | const char * const Name; |
711 | const CallingConv::ID CC; |
712 | } LibraryCalls[] = { |
713 | { .Op: RTLIB::FPTOSINT_F32_I64, .Name: "__stoi64" , .CC: CallingConv::ARM_AAPCS_VFP }, |
714 | { .Op: RTLIB::FPTOSINT_F64_I64, .Name: "__dtoi64" , .CC: CallingConv::ARM_AAPCS_VFP }, |
715 | { .Op: RTLIB::FPTOUINT_F32_I64, .Name: "__stou64" , .CC: CallingConv::ARM_AAPCS_VFP }, |
716 | { .Op: RTLIB::FPTOUINT_F64_I64, .Name: "__dtou64" , .CC: CallingConv::ARM_AAPCS_VFP }, |
717 | { .Op: RTLIB::SINTTOFP_I64_F32, .Name: "__i64tos" , .CC: CallingConv::ARM_AAPCS_VFP }, |
718 | { .Op: RTLIB::SINTTOFP_I64_F64, .Name: "__i64tod" , .CC: CallingConv::ARM_AAPCS_VFP }, |
719 | { .Op: RTLIB::UINTTOFP_I64_F32, .Name: "__u64tos" , .CC: CallingConv::ARM_AAPCS_VFP }, |
720 | { .Op: RTLIB::UINTTOFP_I64_F64, .Name: "__u64tod" , .CC: CallingConv::ARM_AAPCS_VFP }, |
721 | }; |
722 | |
723 | for (const auto &LC : LibraryCalls) { |
724 | setLibcallName(Call: LC.Op, Name: LC.Name); |
725 | setLibcallCallingConv(Call: LC.Op, CC: LC.CC); |
726 | } |
727 | } |
728 | |
729 | // Use divmod compiler-rt calls for iOS 5.0 and later. |
730 | if (Subtarget->isTargetMachO() && |
731 | !(Subtarget->isTargetIOS() && |
732 | Subtarget->getTargetTriple().isOSVersionLT(Major: 5, Minor: 0))) { |
733 | setLibcallName(Call: RTLIB::SDIVREM_I32, Name: "__divmodsi4" ); |
734 | setLibcallName(Call: RTLIB::UDIVREM_I32, Name: "__udivmodsi4" ); |
735 | } |
736 | |
737 | // The half <-> float conversion functions are always soft-float on |
738 | // non-watchos platforms, but are needed for some targets which use a |
739 | // hard-float calling convention by default. |
740 | if (!Subtarget->isTargetWatchABI()) { |
741 | if (Subtarget->isAAPCS_ABI()) { |
742 | setLibcallCallingConv(Call: RTLIB::FPROUND_F32_F16, CC: CallingConv::ARM_AAPCS); |
743 | setLibcallCallingConv(Call: RTLIB::FPROUND_F64_F16, CC: CallingConv::ARM_AAPCS); |
744 | setLibcallCallingConv(Call: RTLIB::FPEXT_F16_F32, CC: CallingConv::ARM_AAPCS); |
745 | } else { |
746 | setLibcallCallingConv(Call: RTLIB::FPROUND_F32_F16, CC: CallingConv::ARM_APCS); |
747 | setLibcallCallingConv(Call: RTLIB::FPROUND_F64_F16, CC: CallingConv::ARM_APCS); |
748 | setLibcallCallingConv(Call: RTLIB::FPEXT_F16_F32, CC: CallingConv::ARM_APCS); |
749 | } |
750 | } |
751 | |
752 | // In EABI, these functions have an __aeabi_ prefix, but in GNUEABI they have |
753 | // a __gnu_ prefix (which is the default). |
754 | if (Subtarget->isTargetAEABI()) { |
755 | static const struct { |
756 | const RTLIB::Libcall Op; |
757 | const char * const Name; |
758 | const CallingConv::ID CC; |
759 | } LibraryCalls[] = { |
760 | { .Op: RTLIB::FPROUND_F32_F16, .Name: "__aeabi_f2h" , .CC: CallingConv::ARM_AAPCS }, |
761 | { .Op: RTLIB::FPROUND_F64_F16, .Name: "__aeabi_d2h" , .CC: CallingConv::ARM_AAPCS }, |
762 | { .Op: RTLIB::FPEXT_F16_F32, .Name: "__aeabi_h2f" , .CC: CallingConv::ARM_AAPCS }, |
763 | }; |
764 | |
765 | for (const auto &LC : LibraryCalls) { |
766 | setLibcallName(Call: LC.Op, Name: LC.Name); |
767 | setLibcallCallingConv(Call: LC.Op, CC: LC.CC); |
768 | } |
769 | } |
770 | |
771 | if (Subtarget->isThumb1Only()) |
772 | addRegisterClass(VT: MVT::i32, RC: &ARM::tGPRRegClass); |
773 | else |
774 | addRegisterClass(VT: MVT::i32, RC: &ARM::GPRRegClass); |
775 | |
776 | if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only() && |
777 | Subtarget->hasFPRegs()) { |
778 | addRegisterClass(VT: MVT::f32, RC: &ARM::SPRRegClass); |
779 | addRegisterClass(VT: MVT::f64, RC: &ARM::DPRRegClass); |
780 | |
781 | setOperationAction(Op: ISD::FP_TO_SINT_SAT, VT: MVT::i32, Action: Custom); |
782 | setOperationAction(Op: ISD::FP_TO_UINT_SAT, VT: MVT::i32, Action: Custom); |
783 | setOperationAction(Op: ISD::FP_TO_SINT_SAT, VT: MVT::i64, Action: Custom); |
784 | setOperationAction(Op: ISD::FP_TO_UINT_SAT, VT: MVT::i64, Action: Custom); |
785 | |
786 | if (!Subtarget->hasVFP2Base()) |
787 | setAllExpand(MVT::f32); |
788 | if (!Subtarget->hasFP64()) |
789 | setAllExpand(MVT::f64); |
790 | } |
791 | |
792 | if (Subtarget->hasFullFP16()) { |
793 | addRegisterClass(VT: MVT::f16, RC: &ARM::HPRRegClass); |
794 | setOperationAction(Op: ISD::BITCAST, VT: MVT::i16, Action: Custom); |
795 | setOperationAction(Op: ISD::BITCAST, VT: MVT::f16, Action: Custom); |
796 | |
797 | setOperationAction(Op: ISD::FMINNUM, VT: MVT::f16, Action: Legal); |
798 | setOperationAction(Op: ISD::FMAXNUM, VT: MVT::f16, Action: Legal); |
799 | } |
800 | |
801 | if (Subtarget->hasBF16()) { |
802 | addRegisterClass(VT: MVT::bf16, RC: &ARM::HPRRegClass); |
803 | setAllExpand(MVT::bf16); |
804 | if (!Subtarget->hasFullFP16()) |
805 | setOperationAction(Op: ISD::BITCAST, VT: MVT::bf16, Action: Custom); |
806 | } |
807 | |
808 | for (MVT VT : MVT::fixedlen_vector_valuetypes()) { |
809 | for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) { |
810 | setTruncStoreAction(ValVT: VT, MemVT: InnerVT, Action: Expand); |
811 | addAllExtLoads(From: VT, To: InnerVT, Action: Expand); |
812 | } |
813 | |
814 | setOperationAction(Op: ISD::SMUL_LOHI, VT, Action: Expand); |
815 | setOperationAction(Op: ISD::UMUL_LOHI, VT, Action: Expand); |
816 | |
817 | setOperationAction(Op: ISD::BSWAP, VT, Action: Expand); |
818 | } |
819 | |
820 | setOperationAction(Op: ISD::ConstantFP, VT: MVT::f32, Action: Custom); |
821 | setOperationAction(Op: ISD::ConstantFP, VT: MVT::f64, Action: Custom); |
822 | |
823 | setOperationAction(Op: ISD::READ_REGISTER, VT: MVT::i64, Action: Custom); |
824 | setOperationAction(Op: ISD::WRITE_REGISTER, VT: MVT::i64, Action: Custom); |
825 | |
826 | if (Subtarget->hasMVEIntegerOps()) |
827 | addMVEVectorTypes(HasMVEFP: Subtarget->hasMVEFloatOps()); |
828 | |
829 | // Combine low-overhead loop intrinsics so that we can lower i1 types. |
830 | if (Subtarget->hasLOB()) { |
831 | setTargetDAGCombine({ISD::BRCOND, ISD::BR_CC}); |
832 | } |
833 | |
834 | if (Subtarget->hasNEON()) { |
835 | addDRTypeForNEON(VT: MVT::v2f32); |
836 | addDRTypeForNEON(VT: MVT::v8i8); |
837 | addDRTypeForNEON(VT: MVT::v4i16); |
838 | addDRTypeForNEON(VT: MVT::v2i32); |
839 | addDRTypeForNEON(VT: MVT::v1i64); |
840 | |
841 | addQRTypeForNEON(VT: MVT::v4f32); |
842 | addQRTypeForNEON(VT: MVT::v2f64); |
843 | addQRTypeForNEON(VT: MVT::v16i8); |
844 | addQRTypeForNEON(VT: MVT::v8i16); |
845 | addQRTypeForNEON(VT: MVT::v4i32); |
846 | addQRTypeForNEON(VT: MVT::v2i64); |
847 | |
848 | if (Subtarget->hasFullFP16()) { |
849 | addQRTypeForNEON(VT: MVT::v8f16); |
850 | addDRTypeForNEON(VT: MVT::v4f16); |
851 | } |
852 | |
853 | if (Subtarget->hasBF16()) { |
854 | addQRTypeForNEON(VT: MVT::v8bf16); |
855 | addDRTypeForNEON(VT: MVT::v4bf16); |
856 | } |
857 | } |
858 | |
859 | if (Subtarget->hasMVEIntegerOps() || Subtarget->hasNEON()) { |
860 | // v2f64 is legal so that QR subregs can be extracted as f64 elements, but |
861 | // none of Neon, MVE or VFP supports any arithmetic operations on it. |
862 | setOperationAction(Op: ISD::FADD, VT: MVT::v2f64, Action: Expand); |
863 | setOperationAction(Op: ISD::FSUB, VT: MVT::v2f64, Action: Expand); |
864 | setOperationAction(Op: ISD::FMUL, VT: MVT::v2f64, Action: Expand); |
865 | // FIXME: Code duplication: FDIV and FREM are expanded always, see |
866 | // ARMTargetLowering::addTypeForNEON method for details. |
867 | setOperationAction(Op: ISD::FDIV, VT: MVT::v2f64, Action: Expand); |
868 | setOperationAction(Op: ISD::FREM, VT: MVT::v2f64, Action: Expand); |
869 | // FIXME: Create unittest. |
870 | // In another words, find a way when "copysign" appears in DAG with vector |
871 | // operands. |
872 | setOperationAction(Op: ISD::FCOPYSIGN, VT: MVT::v2f64, Action: Expand); |
873 | // FIXME: Code duplication: SETCC has custom operation action, see |
874 | // ARMTargetLowering::addTypeForNEON method for details. |
875 | setOperationAction(Op: ISD::SETCC, VT: MVT::v2f64, Action: Expand); |
876 | // FIXME: Create unittest for FNEG and for FABS. |
877 | setOperationAction(Op: ISD::FNEG, VT: MVT::v2f64, Action: Expand); |
878 | setOperationAction(Op: ISD::FABS, VT: MVT::v2f64, Action: Expand); |
879 | setOperationAction(Op: ISD::FSQRT, VT: MVT::v2f64, Action: Expand); |
880 | setOperationAction(Op: ISD::FSIN, VT: MVT::v2f64, Action: Expand); |
881 | setOperationAction(Op: ISD::FCOS, VT: MVT::v2f64, Action: Expand); |
882 | setOperationAction(Op: ISD::FTAN, VT: MVT::v2f64, Action: Expand); |
883 | setOperationAction(Op: ISD::FPOW, VT: MVT::v2f64, Action: Expand); |
884 | setOperationAction(Op: ISD::FLOG, VT: MVT::v2f64, Action: Expand); |
885 | setOperationAction(Op: ISD::FLOG2, VT: MVT::v2f64, Action: Expand); |
886 | setOperationAction(Op: ISD::FLOG10, VT: MVT::v2f64, Action: Expand); |
887 | setOperationAction(Op: ISD::FEXP, VT: MVT::v2f64, Action: Expand); |
888 | setOperationAction(Op: ISD::FEXP2, VT: MVT::v2f64, Action: Expand); |
889 | setOperationAction(Op: ISD::FEXP10, VT: MVT::v2f64, Action: Expand); |
890 | // FIXME: Create unittest for FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR. |
891 | setOperationAction(Op: ISD::FCEIL, VT: MVT::v2f64, Action: Expand); |
892 | setOperationAction(Op: ISD::FTRUNC, VT: MVT::v2f64, Action: Expand); |
893 | setOperationAction(Op: ISD::FRINT, VT: MVT::v2f64, Action: Expand); |
894 | setOperationAction(Op: ISD::FNEARBYINT, VT: MVT::v2f64, Action: Expand); |
895 | setOperationAction(Op: ISD::FFLOOR, VT: MVT::v2f64, Action: Expand); |
896 | setOperationAction(Op: ISD::FMA, VT: MVT::v2f64, Action: Expand); |
897 | } |
898 | |
899 | if (Subtarget->hasNEON()) { |
900 | // The same with v4f32. But keep in mind that vadd, vsub, vmul are natively |
901 | // supported for v4f32. |
902 | setOperationAction(Op: ISD::FSQRT, VT: MVT::v4f32, Action: Expand); |
903 | setOperationAction(Op: ISD::FSIN, VT: MVT::v4f32, Action: Expand); |
904 | setOperationAction(Op: ISD::FCOS, VT: MVT::v4f32, Action: Expand); |
905 | setOperationAction(Op: ISD::FTAN, VT: MVT::v4f32, Action: Expand); |
906 | setOperationAction(Op: ISD::FPOW, VT: MVT::v4f32, Action: Expand); |
907 | setOperationAction(Op: ISD::FLOG, VT: MVT::v4f32, Action: Expand); |
908 | setOperationAction(Op: ISD::FLOG2, VT: MVT::v4f32, Action: Expand); |
909 | setOperationAction(Op: ISD::FLOG10, VT: MVT::v4f32, Action: Expand); |
910 | setOperationAction(Op: ISD::FEXP, VT: MVT::v4f32, Action: Expand); |
911 | setOperationAction(Op: ISD::FEXP2, VT: MVT::v4f32, Action: Expand); |
912 | setOperationAction(Op: ISD::FEXP10, VT: MVT::v4f32, Action: Expand); |
913 | setOperationAction(Op: ISD::FCEIL, VT: MVT::v4f32, Action: Expand); |
914 | setOperationAction(Op: ISD::FTRUNC, VT: MVT::v4f32, Action: Expand); |
915 | setOperationAction(Op: ISD::FRINT, VT: MVT::v4f32, Action: Expand); |
916 | setOperationAction(Op: ISD::FNEARBYINT, VT: MVT::v4f32, Action: Expand); |
917 | setOperationAction(Op: ISD::FFLOOR, VT: MVT::v4f32, Action: Expand); |
918 | |
919 | // Mark v2f32 intrinsics. |
920 | setOperationAction(Op: ISD::FSQRT, VT: MVT::v2f32, Action: Expand); |
921 | setOperationAction(Op: ISD::FSIN, VT: MVT::v2f32, Action: Expand); |
922 | setOperationAction(Op: ISD::FCOS, VT: MVT::v2f32, Action: Expand); |
923 | setOperationAction(Op: ISD::FTAN, VT: MVT::v2f32, Action: Expand); |
924 | setOperationAction(Op: ISD::FPOW, VT: MVT::v2f32, Action: Expand); |
925 | setOperationAction(Op: ISD::FLOG, VT: MVT::v2f32, Action: Expand); |
926 | setOperationAction(Op: ISD::FLOG2, VT: MVT::v2f32, Action: Expand); |
927 | setOperationAction(Op: ISD::FLOG10, VT: MVT::v2f32, Action: Expand); |
928 | setOperationAction(Op: ISD::FEXP, VT: MVT::v2f32, Action: Expand); |
929 | setOperationAction(Op: ISD::FEXP2, VT: MVT::v2f32, Action: Expand); |
930 | setOperationAction(Op: ISD::FEXP10, VT: MVT::v2f32, Action: Expand); |
931 | setOperationAction(Op: ISD::FCEIL, VT: MVT::v2f32, Action: Expand); |
932 | setOperationAction(Op: ISD::FTRUNC, VT: MVT::v2f32, Action: Expand); |
933 | setOperationAction(Op: ISD::FRINT, VT: MVT::v2f32, Action: Expand); |
934 | setOperationAction(Op: ISD::FNEARBYINT, VT: MVT::v2f32, Action: Expand); |
935 | setOperationAction(Op: ISD::FFLOOR, VT: MVT::v2f32, Action: Expand); |
936 | |
937 | // Neon does not support some operations on v1i64 and v2i64 types. |
938 | setOperationAction(Op: ISD::MUL, VT: MVT::v1i64, Action: Expand); |
939 | // Custom handling for some quad-vector types to detect VMULL. |
940 | setOperationAction(Op: ISD::MUL, VT: MVT::v8i16, Action: Custom); |
941 | setOperationAction(Op: ISD::MUL, VT: MVT::v4i32, Action: Custom); |
942 | setOperationAction(Op: ISD::MUL, VT: MVT::v2i64, Action: Custom); |
943 | // Custom handling for some vector types to avoid expensive expansions |
944 | setOperationAction(Op: ISD::SDIV, VT: MVT::v4i16, Action: Custom); |
945 | setOperationAction(Op: ISD::SDIV, VT: MVT::v8i8, Action: Custom); |
946 | setOperationAction(Op: ISD::UDIV, VT: MVT::v4i16, Action: Custom); |
947 | setOperationAction(Op: ISD::UDIV, VT: MVT::v8i8, Action: Custom); |
948 | // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with |
949 | // a destination type that is wider than the source, and nor does |
950 | // it have a FP_TO_[SU]INT instruction with a narrower destination than |
951 | // source. |
952 | setOperationAction(Op: ISD::SINT_TO_FP, VT: MVT::v4i16, Action: Custom); |
953 | setOperationAction(Op: ISD::SINT_TO_FP, VT: MVT::v8i16, Action: Custom); |
954 | setOperationAction(Op: ISD::UINT_TO_FP, VT: MVT::v4i16, Action: Custom); |
955 | setOperationAction(Op: ISD::UINT_TO_FP, VT: MVT::v8i16, Action: Custom); |
956 | setOperationAction(Op: ISD::FP_TO_UINT, VT: MVT::v4i16, Action: Custom); |
957 | setOperationAction(Op: ISD::FP_TO_UINT, VT: MVT::v8i16, Action: Custom); |
958 | setOperationAction(Op: ISD::FP_TO_SINT, VT: MVT::v4i16, Action: Custom); |
959 | setOperationAction(Op: ISD::FP_TO_SINT, VT: MVT::v8i16, Action: Custom); |
960 | |
961 | setOperationAction(Op: ISD::FP_ROUND, VT: MVT::v2f32, Action: Expand); |
962 | setOperationAction(Op: ISD::FP_EXTEND, VT: MVT::v2f64, Action: Expand); |
963 | |
964 | // NEON does not have single instruction CTPOP for vectors with element |
965 | // types wider than 8-bits. However, custom lowering can leverage the |
966 | // v8i8/v16i8 vcnt instruction. |
967 | setOperationAction(Op: ISD::CTPOP, VT: MVT::v2i32, Action: Custom); |
968 | setOperationAction(Op: ISD::CTPOP, VT: MVT::v4i32, Action: Custom); |
969 | setOperationAction(Op: ISD::CTPOP, VT: MVT::v4i16, Action: Custom); |
970 | setOperationAction(Op: ISD::CTPOP, VT: MVT::v8i16, Action: Custom); |
971 | setOperationAction(Op: ISD::CTPOP, VT: MVT::v1i64, Action: Custom); |
972 | setOperationAction(Op: ISD::CTPOP, VT: MVT::v2i64, Action: Custom); |
973 | |
974 | setOperationAction(Op: ISD::CTLZ, VT: MVT::v1i64, Action: Expand); |
975 | setOperationAction(Op: ISD::CTLZ, VT: MVT::v2i64, Action: Expand); |
976 | |
977 | // NEON does not have single instruction CTTZ for vectors. |
978 | setOperationAction(Op: ISD::CTTZ, VT: MVT::v8i8, Action: Custom); |
979 | setOperationAction(Op: ISD::CTTZ, VT: MVT::v4i16, Action: Custom); |
980 | setOperationAction(Op: ISD::CTTZ, VT: MVT::v2i32, Action: Custom); |
981 | setOperationAction(Op: ISD::CTTZ, VT: MVT::v1i64, Action: Custom); |
982 | |
983 | setOperationAction(Op: ISD::CTTZ, VT: MVT::v16i8, Action: Custom); |
984 | setOperationAction(Op: ISD::CTTZ, VT: MVT::v8i16, Action: Custom); |
985 | setOperationAction(Op: ISD::CTTZ, VT: MVT::v4i32, Action: Custom); |
986 | setOperationAction(Op: ISD::CTTZ, VT: MVT::v2i64, Action: Custom); |
987 | |
988 | setOperationAction(Op: ISD::CTTZ_ZERO_UNDEF, VT: MVT::v8i8, Action: Custom); |
989 | setOperationAction(Op: ISD::CTTZ_ZERO_UNDEF, VT: MVT::v4i16, Action: Custom); |
990 | setOperationAction(Op: ISD::CTTZ_ZERO_UNDEF, VT: MVT::v2i32, Action: Custom); |
991 | setOperationAction(Op: ISD::CTTZ_ZERO_UNDEF, VT: MVT::v1i64, Action: Custom); |
992 | |
993 | setOperationAction(Op: ISD::CTTZ_ZERO_UNDEF, VT: MVT::v16i8, Action: Custom); |
994 | setOperationAction(Op: ISD::CTTZ_ZERO_UNDEF, VT: MVT::v8i16, Action: Custom); |
995 | setOperationAction(Op: ISD::CTTZ_ZERO_UNDEF, VT: MVT::v4i32, Action: Custom); |
996 | setOperationAction(Op: ISD::CTTZ_ZERO_UNDEF, VT: MVT::v2i64, Action: Custom); |
997 | |
998 | for (MVT VT : MVT::fixedlen_vector_valuetypes()) { |
999 | setOperationAction(Op: ISD::MULHS, VT, Action: Expand); |
1000 | setOperationAction(Op: ISD::MULHU, VT, Action: Expand); |
1001 | } |
1002 | |
1003 | // NEON only has FMA instructions as of VFP4. |
1004 | if (!Subtarget->hasVFP4Base()) { |
1005 | setOperationAction(Op: ISD::FMA, VT: MVT::v2f32, Action: Expand); |
1006 | setOperationAction(Op: ISD::FMA, VT: MVT::v4f32, Action: Expand); |
1007 | } |
1008 | |
1009 | setTargetDAGCombine({ISD::SHL, ISD::SRL, ISD::SRA, ISD::FP_TO_SINT, |
1010 | ISD::FP_TO_UINT, ISD::FMUL, ISD::LOAD}); |
1011 | |
1012 | // It is legal to extload from v4i8 to v4i16 or v4i32. |
1013 | for (MVT Ty : {MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v4i16, MVT::v2i16, |
1014 | MVT::v2i32}) { |
1015 | for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) { |
1016 | setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: VT, MemVT: Ty, Action: Legal); |
1017 | setLoadExtAction(ExtType: ISD::ZEXTLOAD, ValVT: VT, MemVT: Ty, Action: Legal); |
1018 | setLoadExtAction(ExtType: ISD::SEXTLOAD, ValVT: VT, MemVT: Ty, Action: Legal); |
1019 | } |
1020 | } |
1021 | |
1022 | for (auto VT : {MVT::v8i8, MVT::v4i16, MVT::v2i32, MVT::v16i8, MVT::v8i16, |
1023 | MVT::v4i32}) { |
1024 | setOperationAction(Op: ISD::VECREDUCE_SMAX, VT, Action: Custom); |
1025 | setOperationAction(Op: ISD::VECREDUCE_UMAX, VT, Action: Custom); |
1026 | setOperationAction(Op: ISD::VECREDUCE_SMIN, VT, Action: Custom); |
1027 | setOperationAction(Op: ISD::VECREDUCE_UMIN, VT, Action: Custom); |
1028 | } |
1029 | } |
1030 | |
1031 | if (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) { |
1032 | setTargetDAGCombine( |
1033 | {ISD::BUILD_VECTOR, ISD::VECTOR_SHUFFLE, ISD::INSERT_SUBVECTOR, |
1034 | ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT, |
1035 | ISD::SIGN_EXTEND_INREG, ISD::STORE, ISD::SIGN_EXTEND, ISD::ZERO_EXTEND, |
1036 | ISD::ANY_EXTEND, ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN, |
1037 | ISD::INTRINSIC_VOID, ISD::VECREDUCE_ADD, ISD::ADD, ISD::BITCAST}); |
1038 | } |
1039 | if (Subtarget->hasMVEIntegerOps()) { |
1040 | setTargetDAGCombine({ISD::SMIN, ISD::UMIN, ISD::SMAX, ISD::UMAX, |
1041 | ISD::FP_EXTEND, ISD::SELECT, ISD::SELECT_CC, |
1042 | ISD::SETCC}); |
1043 | } |
1044 | if (Subtarget->hasMVEFloatOps()) { |
1045 | setTargetDAGCombine(ISD::FADD); |
1046 | } |
1047 | |
1048 | if (!Subtarget->hasFP64()) { |
1049 | // When targeting a floating-point unit with only single-precision |
1050 | // operations, f64 is legal for the few double-precision instructions which |
1051 | // are present However, no double-precision operations other than moves, |
1052 | // loads and stores are provided by the hardware. |
1053 | setOperationAction(Op: ISD::FADD, VT: MVT::f64, Action: Expand); |
1054 | setOperationAction(Op: ISD::FSUB, VT: MVT::f64, Action: Expand); |
1055 | setOperationAction(Op: ISD::FMUL, VT: MVT::f64, Action: Expand); |
1056 | setOperationAction(Op: ISD::FMA, VT: MVT::f64, Action: Expand); |
1057 | setOperationAction(Op: ISD::FDIV, VT: MVT::f64, Action: Expand); |
1058 | setOperationAction(Op: ISD::FREM, VT: MVT::f64, Action: Expand); |
1059 | setOperationAction(Op: ISD::FCOPYSIGN, VT: MVT::f64, Action: Expand); |
1060 | setOperationAction(Op: ISD::FGETSIGN, VT: MVT::f64, Action: Expand); |
1061 | setOperationAction(Op: ISD::FNEG, VT: MVT::f64, Action: Expand); |
1062 | setOperationAction(Op: ISD::FABS, VT: MVT::f64, Action: Expand); |
1063 | setOperationAction(Op: ISD::FSQRT, VT: MVT::f64, Action: Expand); |
1064 | setOperationAction(Op: ISD::FSIN, VT: MVT::f64, Action: Expand); |
1065 | setOperationAction(Op: ISD::FCOS, VT: MVT::f64, Action: Expand); |
1066 | setOperationAction(Op: ISD::FPOW, VT: MVT::f64, Action: Expand); |
1067 | setOperationAction(Op: ISD::FLOG, VT: MVT::f64, Action: Expand); |
1068 | setOperationAction(Op: ISD::FLOG2, VT: MVT::f64, Action: Expand); |
1069 | setOperationAction(Op: ISD::FLOG10, VT: MVT::f64, Action: Expand); |
1070 | setOperationAction(Op: ISD::FEXP, VT: MVT::f64, Action: Expand); |
1071 | setOperationAction(Op: ISD::FEXP2, VT: MVT::f64, Action: Expand); |
1072 | setOperationAction(Op: ISD::FEXP10, VT: MVT::f64, Action: Expand); |
1073 | setOperationAction(Op: ISD::FCEIL, VT: MVT::f64, Action: Expand); |
1074 | setOperationAction(Op: ISD::FTRUNC, VT: MVT::f64, Action: Expand); |
1075 | setOperationAction(Op: ISD::FRINT, VT: MVT::f64, Action: Expand); |
1076 | setOperationAction(Op: ISD::FNEARBYINT, VT: MVT::f64, Action: Expand); |
1077 | setOperationAction(Op: ISD::FFLOOR, VT: MVT::f64, Action: Expand); |
1078 | setOperationAction(Op: ISD::SINT_TO_FP, VT: MVT::i32, Action: Custom); |
1079 | setOperationAction(Op: ISD::UINT_TO_FP, VT: MVT::i32, Action: Custom); |
1080 | setOperationAction(Op: ISD::FP_TO_SINT, VT: MVT::i32, Action: Custom); |
1081 | setOperationAction(Op: ISD::FP_TO_UINT, VT: MVT::i32, Action: Custom); |
1082 | setOperationAction(Op: ISD::FP_TO_SINT, VT: MVT::f64, Action: Custom); |
1083 | setOperationAction(Op: ISD::FP_TO_UINT, VT: MVT::f64, Action: Custom); |
1084 | setOperationAction(Op: ISD::FP_ROUND, VT: MVT::f32, Action: Custom); |
1085 | setOperationAction(Op: ISD::STRICT_FP_TO_SINT, VT: MVT::i32, Action: Custom); |
1086 | setOperationAction(Op: ISD::STRICT_FP_TO_UINT, VT: MVT::i32, Action: Custom); |
1087 | setOperationAction(Op: ISD::STRICT_FP_TO_SINT, VT: MVT::f64, Action: Custom); |
1088 | setOperationAction(Op: ISD::STRICT_FP_TO_UINT, VT: MVT::f64, Action: Custom); |
1089 | setOperationAction(Op: ISD::STRICT_FP_ROUND, VT: MVT::f32, Action: Custom); |
1090 | } |
1091 | |
1092 | if (!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) { |
1093 | setOperationAction(Op: ISD::FP_EXTEND, VT: MVT::f64, Action: Custom); |
1094 | setOperationAction(Op: ISD::STRICT_FP_EXTEND, VT: MVT::f64, Action: Custom); |
1095 | if (Subtarget->hasFullFP16()) { |
1096 | setOperationAction(Op: ISD::FP_ROUND, VT: MVT::f16, Action: Custom); |
1097 | setOperationAction(Op: ISD::STRICT_FP_ROUND, VT: MVT::f16, Action: Custom); |
1098 | } |
1099 | } |
1100 | |
1101 | if (!Subtarget->hasFP16()) { |
1102 | setOperationAction(Op: ISD::FP_EXTEND, VT: MVT::f32, Action: Custom); |
1103 | setOperationAction(Op: ISD::STRICT_FP_EXTEND, VT: MVT::f32, Action: Custom); |
1104 | } |
1105 | |
1106 | computeRegisterProperties(TRI: Subtarget->getRegisterInfo()); |
1107 | |
1108 | // ARM does not have floating-point extending loads. |
1109 | for (MVT VT : MVT::fp_valuetypes()) { |
1110 | setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: VT, MemVT: MVT::f32, Action: Expand); |
1111 | setLoadExtAction(ExtType: ISD::EXTLOAD, ValVT: VT, MemVT: MVT::f16, Action: Expand); |
1112 | } |
1113 | |
1114 | // ... or truncating stores |
1115 | setTruncStoreAction(ValVT: MVT::f64, MemVT: MVT::f32, Action: Expand); |
1116 | setTruncStoreAction(ValVT: MVT::f32, MemVT: MVT::f16, Action: Expand); |
1117 | setTruncStoreAction(ValVT: MVT::f64, MemVT: MVT::f16, Action: Expand); |
1118 | |
1119 | // ARM does not have i1 sign extending load. |
1120 | for (MVT VT : MVT::integer_valuetypes()) |
1121 | setLoadExtAction(ExtType: ISD::SEXTLOAD, ValVT: VT, MemVT: MVT::i1, Action: Promote); |
1122 | |
1123 | // ARM supports all 4 flavors of integer indexed load / store. |
1124 | if (!Subtarget->isThumb1Only()) { |
1125 | for (unsigned im = (unsigned)ISD::PRE_INC; |
1126 | im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { |
1127 | setIndexedLoadAction(IdxModes: im, VT: MVT::i1, Action: Legal); |
1128 | setIndexedLoadAction(IdxModes: im, VT: MVT::i8, Action: Legal); |
1129 | setIndexedLoadAction(IdxModes: im, VT: MVT::i16, Action: Legal); |
1130 | setIndexedLoadAction(IdxModes: im, VT: MVT::i32, Action: Legal); |
1131 | setIndexedStoreAction(IdxModes: im, VT: MVT::i1, Action: Legal); |
1132 | setIndexedStoreAction(IdxModes: im, VT: MVT::i8, Action: Legal); |
1133 | setIndexedStoreAction(IdxModes: im, VT: MVT::i16, Action: Legal); |
1134 | setIndexedStoreAction(IdxModes: im, VT: MVT::i32, Action: Legal); |
1135 | } |
1136 | } else { |
1137 | // Thumb-1 has limited post-inc load/store support - LDM r0!, {r1}. |
1138 | setIndexedLoadAction(IdxModes: ISD::POST_INC, VT: MVT::i32, Action: Legal); |
1139 | setIndexedStoreAction(IdxModes: ISD::POST_INC, VT: MVT::i32, Action: Legal); |
1140 | } |
1141 | |
1142 | setOperationAction(Op: ISD::SADDO, VT: MVT::i32, Action: Custom); |
1143 | setOperationAction(Op: ISD::UADDO, VT: MVT::i32, Action: Custom); |
1144 | setOperationAction(Op: ISD::SSUBO, VT: MVT::i32, Action: Custom); |
1145 | setOperationAction(Op: ISD::USUBO, VT: MVT::i32, Action: Custom); |
1146 | |
1147 | setOperationAction(Op: ISD::UADDO_CARRY, VT: MVT::i32, Action: Custom); |
1148 | setOperationAction(Op: ISD::USUBO_CARRY, VT: MVT::i32, Action: Custom); |
1149 | if (Subtarget->hasDSP()) { |
1150 | setOperationAction(Op: ISD::SADDSAT, VT: MVT::i8, Action: Custom); |
1151 | setOperationAction(Op: ISD::SSUBSAT, VT: MVT::i8, Action: Custom); |
1152 | setOperationAction(Op: ISD::SADDSAT, VT: MVT::i16, Action: Custom); |
1153 | setOperationAction(Op: ISD::SSUBSAT, VT: MVT::i16, Action: Custom); |
1154 | setOperationAction(Op: ISD::UADDSAT, VT: MVT::i8, Action: Custom); |
1155 | setOperationAction(Op: ISD::USUBSAT, VT: MVT::i8, Action: Custom); |
1156 | setOperationAction(Op: ISD::UADDSAT, VT: MVT::i16, Action: Custom); |
1157 | setOperationAction(Op: ISD::USUBSAT, VT: MVT::i16, Action: Custom); |
1158 | } |
1159 | if (Subtarget->hasBaseDSP()) { |
1160 | setOperationAction(Op: ISD::SADDSAT, VT: MVT::i32, Action: Legal); |
1161 | setOperationAction(Op: ISD::SSUBSAT, VT: MVT::i32, Action: Legal); |
1162 | } |
1163 | |
1164 | // i64 operation support. |
1165 | setOperationAction(Op: ISD::MUL, VT: MVT::i64, Action: Expand); |
1166 | setOperationAction(Op: ISD::MULHU, VT: MVT::i32, Action: Expand); |
1167 | if (Subtarget->isThumb1Only()) { |
1168 | setOperationAction(Op: ISD::UMUL_LOHI, VT: MVT::i32, Action: Expand); |
1169 | setOperationAction(Op: ISD::SMUL_LOHI, VT: MVT::i32, Action: Expand); |
1170 | } |
1171 | if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops() |
1172 | || (Subtarget->isThumb2() && !Subtarget->hasDSP())) |
1173 | setOperationAction(Op: ISD::MULHS, VT: MVT::i32, Action: Expand); |
1174 | |
1175 | setOperationAction(Op: ISD::SHL_PARTS, VT: MVT::i32, Action: Custom); |
1176 | setOperationAction(Op: ISD::SRA_PARTS, VT: MVT::i32, Action: Custom); |
1177 | setOperationAction(Op: ISD::SRL_PARTS, VT: MVT::i32, Action: Custom); |
1178 | setOperationAction(Op: ISD::SRL, VT: MVT::i64, Action: Custom); |
1179 | setOperationAction(Op: ISD::SRA, VT: MVT::i64, Action: Custom); |
1180 | setOperationAction(Op: ISD::INTRINSIC_VOID, VT: MVT::Other, Action: Custom); |
1181 | setOperationAction(Op: ISD::INTRINSIC_WO_CHAIN, VT: MVT::i64, Action: Custom); |
1182 | setOperationAction(Op: ISD::LOAD, VT: MVT::i64, Action: Custom); |
1183 | setOperationAction(Op: ISD::STORE, VT: MVT::i64, Action: Custom); |
1184 | |
1185 | // MVE lowers 64 bit shifts to lsll and lsrl |
1186 | // assuming that ISD::SRL and SRA of i64 are already marked custom |
1187 | if (Subtarget->hasMVEIntegerOps()) |
1188 | setOperationAction(Op: ISD::SHL, VT: MVT::i64, Action: Custom); |
1189 | |
1190 | // Expand to __aeabi_l{lsl,lsr,asr} calls for Thumb1. |
1191 | if (Subtarget->isThumb1Only()) { |
1192 | setOperationAction(Op: ISD::SHL_PARTS, VT: MVT::i32, Action: Expand); |
1193 | setOperationAction(Op: ISD::SRA_PARTS, VT: MVT::i32, Action: Expand); |
1194 | setOperationAction(Op: ISD::SRL_PARTS, VT: MVT::i32, Action: Expand); |
1195 | } |
1196 | |
1197 | if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) |
1198 | setOperationAction(Op: ISD::BITREVERSE, VT: MVT::i32, Action: Legal); |
1199 | |
1200 | // ARM does not have ROTL. |
1201 | setOperationAction(Op: ISD::ROTL, VT: MVT::i32, Action: Expand); |
1202 | for (MVT VT : MVT::fixedlen_vector_valuetypes()) { |
1203 | setOperationAction(Op: ISD::ROTL, VT, Action: Expand); |
1204 | setOperationAction(Op: ISD::ROTR, VT, Action: Expand); |
1205 | } |
1206 | setOperationAction(Op: ISD::CTTZ, VT: MVT::i32, Action: Custom); |
1207 | setOperationAction(Op: ISD::CTPOP, VT: MVT::i32, Action: Expand); |
1208 | if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) { |
1209 | setOperationAction(Op: ISD::CTLZ, VT: MVT::i32, Action: Expand); |
1210 | setOperationAction(Op: ISD::CTLZ_ZERO_UNDEF, VT: MVT::i32, Action: LibCall); |
1211 | } |
1212 | |
1213 | // @llvm.readcyclecounter requires the Performance Monitors extension. |
1214 | // Default to the 0 expansion on unsupported platforms. |
1215 | // FIXME: Technically there are older ARM CPUs that have |
1216 | // implementation-specific ways of obtaining this information. |
1217 | if (Subtarget->hasPerfMon()) |
1218 | setOperationAction(Op: ISD::READCYCLECOUNTER, VT: MVT::i64, Action: Custom); |
1219 | |
1220 | // Only ARMv6 has BSWAP. |
1221 | if (!Subtarget->hasV6Ops()) |
1222 | setOperationAction(Op: ISD::BSWAP, VT: MVT::i32, Action: Expand); |
1223 | |
1224 | bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode() |
1225 | : Subtarget->hasDivideInARMMode(); |
1226 | if (!hasDivide) { |
1227 | // These are expanded into libcalls if the cpu doesn't have HW divider. |
1228 | setOperationAction(Op: ISD::SDIV, VT: MVT::i32, Action: LibCall); |
1229 | setOperationAction(Op: ISD::UDIV, VT: MVT::i32, Action: LibCall); |
1230 | } |
1231 | |
1232 | if (Subtarget->isTargetWindows() && !Subtarget->hasDivideInThumbMode()) { |
1233 | setOperationAction(Op: ISD::SDIV, VT: MVT::i32, Action: Custom); |
1234 | setOperationAction(Op: ISD::UDIV, VT: MVT::i32, Action: Custom); |
1235 | |
1236 | setOperationAction(Op: ISD::SDIV, VT: MVT::i64, Action: Custom); |
1237 | setOperationAction(Op: ISD::UDIV, VT: MVT::i64, Action: Custom); |
1238 | } |
1239 | |
1240 | setOperationAction(Op: ISD::SREM, VT: MVT::i32, Action: Expand); |
1241 | setOperationAction(Op: ISD::UREM, VT: MVT::i32, Action: Expand); |
1242 | |
1243 | // Register based DivRem for AEABI (RTABI 4.2) |
1244 | if (Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() || |
1245 | Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() || |
1246 | Subtarget->isTargetWindows()) { |
1247 | setOperationAction(Op: ISD::SREM, VT: MVT::i64, Action: Custom); |
1248 | setOperationAction(Op: ISD::UREM, VT: MVT::i64, Action: Custom); |
1249 | HasStandaloneRem = false; |
1250 | |
1251 | if (Subtarget->isTargetWindows()) { |
1252 | const struct { |
1253 | const RTLIB::Libcall Op; |
1254 | const char * const Name; |
1255 | const CallingConv::ID CC; |
1256 | } LibraryCalls[] = { |
1257 | { .Op: RTLIB::SDIVREM_I8, .Name: "__rt_sdiv" , .CC: CallingConv::ARM_AAPCS }, |
1258 | { .Op: RTLIB::SDIVREM_I16, .Name: "__rt_sdiv" , .CC: CallingConv::ARM_AAPCS }, |
1259 | { .Op: RTLIB::SDIVREM_I32, .Name: "__rt_sdiv" , .CC: CallingConv::ARM_AAPCS }, |
1260 | { .Op: RTLIB::SDIVREM_I64, .Name: "__rt_sdiv64" , .CC: CallingConv::ARM_AAPCS }, |
1261 | |
1262 | { .Op: RTLIB::UDIVREM_I8, .Name: "__rt_udiv" , .CC: CallingConv::ARM_AAPCS }, |
1263 | { .Op: RTLIB::UDIVREM_I16, .Name: "__rt_udiv" , .CC: CallingConv::ARM_AAPCS }, |
1264 | { .Op: RTLIB::UDIVREM_I32, .Name: "__rt_udiv" , .CC: CallingConv::ARM_AAPCS }, |
1265 | { .Op: RTLIB::UDIVREM_I64, .Name: "__rt_udiv64" , .CC: CallingConv::ARM_AAPCS }, |
1266 | }; |
1267 | |
1268 | for (const auto &LC : LibraryCalls) { |
1269 | setLibcallName(Call: LC.Op, Name: LC.Name); |
1270 | setLibcallCallingConv(Call: LC.Op, CC: LC.CC); |
1271 | } |
1272 | } else { |
1273 | const struct { |
1274 | const RTLIB::Libcall Op; |
1275 | const char * const Name; |
1276 | const CallingConv::ID CC; |
1277 | } LibraryCalls[] = { |
1278 | { .Op: RTLIB::SDIVREM_I8, .Name: "__aeabi_idivmod" , .CC: CallingConv::ARM_AAPCS }, |
1279 | { .Op: RTLIB::SDIVREM_I16, .Name: "__aeabi_idivmod" , .CC: CallingConv::ARM_AAPCS }, |
1280 | { .Op: RTLIB::SDIVREM_I32, .Name: "__aeabi_idivmod" , .CC: CallingConv::ARM_AAPCS }, |
1281 | { .Op: RTLIB::SDIVREM_I64, .Name: "__aeabi_ldivmod" , .CC: CallingConv::ARM_AAPCS }, |
1282 | |
1283 | { .Op: RTLIB::UDIVREM_I8, .Name: "__aeabi_uidivmod" , .CC: CallingConv::ARM_AAPCS }, |
1284 | { .Op: RTLIB::UDIVREM_I16, .Name: "__aeabi_uidivmod" , .CC: CallingConv::ARM_AAPCS }, |
1285 | { .Op: RTLIB::UDIVREM_I32, .Name: "__aeabi_uidivmod" , .CC: CallingConv::ARM_AAPCS }, |
1286 | { .Op: RTLIB::UDIVREM_I64, .Name: "__aeabi_uldivmod" , .CC: CallingConv::ARM_AAPCS }, |
1287 | }; |
1288 | |
1289 | for (const auto &LC : LibraryCalls) { |
1290 | setLibcallName(Call: LC.Op, Name: LC.Name); |
1291 | setLibcallCallingConv(Call: LC.Op, CC: LC.CC); |
1292 | } |
1293 | } |
1294 | |
1295 | setOperationAction(Op: ISD::SDIVREM, VT: MVT::i32, Action: Custom); |
1296 | setOperationAction(Op: ISD::UDIVREM, VT: MVT::i32, Action: Custom); |
1297 | setOperationAction(Op: ISD::SDIVREM, VT: MVT::i64, Action: Custom); |
1298 | setOperationAction(Op: ISD::UDIVREM, VT: MVT::i64, Action: Custom); |
1299 | } else { |
1300 | setOperationAction(Op: ISD::SDIVREM, VT: MVT::i32, Action: Expand); |
1301 | setOperationAction(Op: ISD::UDIVREM, VT: MVT::i32, Action: Expand); |
1302 | } |
1303 | |
1304 | setOperationAction(Op: ISD::GlobalAddress, VT: MVT::i32, Action: Custom); |
1305 | setOperationAction(Op: ISD::ConstantPool, VT: MVT::i32, Action: Custom); |
1306 | setOperationAction(Op: ISD::GlobalTLSAddress, VT: MVT::i32, Action: Custom); |
1307 | setOperationAction(Op: ISD::BlockAddress, VT: MVT::i32, Action: Custom); |
1308 | |
1309 | setOperationAction(Op: ISD::TRAP, VT: MVT::Other, Action: Legal); |
1310 | setOperationAction(Op: ISD::DEBUGTRAP, VT: MVT::Other, Action: Legal); |
1311 | |
1312 | // Use the default implementation. |
1313 | setOperationAction(Op: ISD::VASTART, VT: MVT::Other, Action: Custom); |
1314 | setOperationAction(Op: ISD::VAARG, VT: MVT::Other, Action: Expand); |
1315 | setOperationAction(Op: ISD::VACOPY, VT: MVT::Other, Action: Expand); |
1316 | setOperationAction(Op: ISD::VAEND, VT: MVT::Other, Action: Expand); |
1317 | setOperationAction(Op: ISD::STACKSAVE, VT: MVT::Other, Action: Expand); |
1318 | setOperationAction(Op: ISD::STACKRESTORE, VT: MVT::Other, Action: Expand); |
1319 | |
1320 | if (Subtarget->isTargetWindows()) |
1321 | setOperationAction(Op: ISD::DYNAMIC_STACKALLOC, VT: MVT::i32, Action: Custom); |
1322 | else |
1323 | setOperationAction(Op: ISD::DYNAMIC_STACKALLOC, VT: MVT::i32, Action: Expand); |
1324 | |
1325 | // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use |
1326 | // the default expansion. |
1327 | InsertFencesForAtomic = false; |
1328 | if (Subtarget->hasAnyDataBarrier() && |
1329 | (!Subtarget->isThumb() || Subtarget->hasV8MBaselineOps())) { |
1330 | // ATOMIC_FENCE needs custom lowering; the others should have been expanded |
1331 | // to ldrex/strex loops already. |
1332 | setOperationAction(Op: ISD::ATOMIC_FENCE, VT: MVT::Other, Action: Custom); |
1333 | if (!Subtarget->isThumb() || !Subtarget->isMClass()) |
1334 | setOperationAction(Op: ISD::ATOMIC_CMP_SWAP, VT: MVT::i64, Action: Custom); |
1335 | |
1336 | // On v8, we have particularly efficient implementations of atomic fences |
1337 | // if they can be combined with nearby atomic loads and stores. |
1338 | if (!Subtarget->hasAcquireRelease() || |
1339 | getTargetMachine().getOptLevel() == CodeGenOptLevel::None) { |
1340 | // Automatically insert fences (dmb ish) around ATOMIC_SWAP etc. |
1341 | InsertFencesForAtomic = true; |
1342 | } |
1343 | } else { |
1344 | // If there's anything we can use as a barrier, go through custom lowering |
1345 | // for ATOMIC_FENCE. |
1346 | // If target has DMB in thumb, Fences can be inserted. |
1347 | if (Subtarget->hasDataBarrier()) |
1348 | InsertFencesForAtomic = true; |
1349 | |
1350 | setOperationAction(Op: ISD::ATOMIC_FENCE, VT: MVT::Other, |
1351 | Action: Subtarget->hasAnyDataBarrier() ? Custom : Expand); |
1352 | |
1353 | // Set them all for libcall, which will force libcalls. |
1354 | setOperationAction(Op: ISD::ATOMIC_CMP_SWAP, VT: MVT::i32, Action: LibCall); |
1355 | setOperationAction(Op: ISD::ATOMIC_SWAP, VT: MVT::i32, Action: LibCall); |
1356 | setOperationAction(Op: ISD::ATOMIC_LOAD_ADD, VT: MVT::i32, Action: LibCall); |
1357 | setOperationAction(Op: ISD::ATOMIC_LOAD_SUB, VT: MVT::i32, Action: LibCall); |
1358 | setOperationAction(Op: ISD::ATOMIC_LOAD_AND, VT: MVT::i32, Action: LibCall); |
1359 | setOperationAction(Op: ISD::ATOMIC_LOAD_OR, VT: MVT::i32, Action: LibCall); |
1360 | setOperationAction(Op: ISD::ATOMIC_LOAD_XOR, VT: MVT::i32, Action: LibCall); |
1361 | setOperationAction(Op: ISD::ATOMIC_LOAD_NAND, VT: MVT::i32, Action: LibCall); |
1362 | setOperationAction(Op: ISD::ATOMIC_LOAD_MIN, VT: MVT::i32, Action: LibCall); |
1363 | setOperationAction(Op: ISD::ATOMIC_LOAD_MAX, VT: MVT::i32, Action: LibCall); |
1364 | setOperationAction(Op: ISD::ATOMIC_LOAD_UMIN, VT: MVT::i32, Action: LibCall); |
1365 | setOperationAction(Op: ISD::ATOMIC_LOAD_UMAX, VT: MVT::i32, Action: LibCall); |
1366 | // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the |
1367 | // Unordered/Monotonic case. |
1368 | if (!InsertFencesForAtomic) { |
1369 | setOperationAction(Op: ISD::ATOMIC_LOAD, VT: MVT::i32, Action: Custom); |
1370 | setOperationAction(Op: ISD::ATOMIC_STORE, VT: MVT::i32, Action: Custom); |
1371 | } |
1372 | } |
1373 | |
1374 | // Compute supported atomic widths. |
1375 | if (Subtarget->isTargetLinux() || |
1376 | (!Subtarget->isMClass() && Subtarget->hasV6Ops())) { |
1377 | // For targets where __sync_* routines are reliably available, we use them |
1378 | // if necessary. |
1379 | // |
1380 | // ARM Linux always supports 64-bit atomics through kernel-assisted atomic |
1381 | // routines (kernel 3.1 or later). FIXME: Not with compiler-rt? |
1382 | // |
1383 | // ARMv6 targets have native instructions in ARM mode. For Thumb mode, |
1384 | // such targets should provide __sync_* routines, which use the ARM mode |
1385 | // instructions. (ARMv6 doesn't have dmb, but it has an equivalent |
1386 | // encoding; see ARMISD::MEMBARRIER_MCR.) |
1387 | setMaxAtomicSizeInBitsSupported(64); |
1388 | } else if ((Subtarget->isMClass() && Subtarget->hasV8MBaselineOps()) || |
1389 | Subtarget->hasForced32BitAtomics()) { |
1390 | // Cortex-M (besides Cortex-M0) have 32-bit atomics. |
1391 | setMaxAtomicSizeInBitsSupported(32); |
1392 | } else { |
1393 | // We can't assume anything about other targets; just use libatomic |
1394 | // routines. |
1395 | setMaxAtomicSizeInBitsSupported(0); |
1396 | } |
1397 | |
1398 | setMaxDivRemBitWidthSupported(64); |
1399 | |
1400 | setOperationAction(Op: ISD::PREFETCH, VT: MVT::Other, Action: Custom); |
1401 | |
1402 | // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes. |
1403 | if (!Subtarget->hasV6Ops()) { |
1404 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::i16, Action: Expand); |
1405 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::i8, Action: Expand); |
1406 | } |
1407 | setOperationAction(Op: ISD::SIGN_EXTEND_INREG, VT: MVT::i1, Action: Expand); |
1408 | |
1409 | if (!Subtarget->useSoftFloat() && Subtarget->hasFPRegs() && |
1410 | !Subtarget->isThumb1Only()) { |
1411 | // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR |
1412 | // iff target supports vfp2. |
1413 | setOperationAction(Op: ISD::BITCAST, VT: MVT::i64, Action: Custom); |
1414 | setOperationAction(Op: ISD::GET_ROUNDING, VT: MVT::i32, Action: Custom); |
1415 | setOperationAction(Op: ISD::SET_ROUNDING, VT: MVT::Other, Action: Custom); |
1416 | setOperationAction(Op: ISD::GET_FPENV, VT: MVT::i32, Action: Legal); |
1417 | setOperationAction(Op: ISD::SET_FPENV, VT: MVT::i32, Action: Legal); |
1418 | setOperationAction(Op: ISD::RESET_FPENV, VT: MVT::Other, Action: Legal); |
1419 | setOperationAction(Op: ISD::GET_FPMODE, VT: MVT::i32, Action: Legal); |
1420 | setOperationAction(Op: ISD::SET_FPMODE, VT: MVT::i32, Action: Custom); |
1421 | setOperationAction(Op: ISD::RESET_FPMODE, VT: MVT::Other, Action: Custom); |
1422 | } |
1423 | |
1424 | // We want to custom lower some of our intrinsics. |
1425 | setOperationAction(Op: ISD::INTRINSIC_WO_CHAIN, VT: MVT::Other, Action: Custom); |
1426 | setOperationAction(Op: ISD::EH_SJLJ_SETJMP, VT: MVT::i32, Action: Custom); |
1427 | setOperationAction(Op: ISD::EH_SJLJ_LONGJMP, VT: MVT::Other, Action: Custom); |
1428 | setOperationAction(Op: ISD::EH_SJLJ_SETUP_DISPATCH, VT: MVT::Other, Action: Custom); |
1429 | if (Subtarget->useSjLjEH()) |
1430 | setLibcallName(Call: RTLIB::UNWIND_RESUME, Name: "_Unwind_SjLj_Resume" ); |
1431 | |
1432 | setOperationAction(Op: ISD::SETCC, VT: MVT::i32, Action: Expand); |
1433 | setOperationAction(Op: ISD::SETCC, VT: MVT::f32, Action: Expand); |
1434 | setOperationAction(Op: ISD::SETCC, VT: MVT::f64, Action: Expand); |
1435 | setOperationAction(Op: ISD::SELECT, VT: MVT::i32, Action: Custom); |
1436 | setOperationAction(Op: ISD::SELECT, VT: MVT::f32, Action: Custom); |
1437 | setOperationAction(Op: ISD::SELECT, VT: MVT::f64, Action: Custom); |
1438 | setOperationAction(Op: ISD::SELECT_CC, VT: MVT::i32, Action: Custom); |
1439 | setOperationAction(Op: ISD::SELECT_CC, VT: MVT::f32, Action: Custom); |
1440 | setOperationAction(Op: ISD::SELECT_CC, VT: MVT::f64, Action: Custom); |
1441 | if (Subtarget->hasFullFP16()) { |
1442 | setOperationAction(Op: ISD::SETCC, VT: MVT::f16, Action: Expand); |
1443 | setOperationAction(Op: ISD::SELECT, VT: MVT::f16, Action: Custom); |
1444 | setOperationAction(Op: ISD::SELECT_CC, VT: MVT::f16, Action: Custom); |
1445 | } |
1446 | |
1447 | setOperationAction(Op: ISD::SETCCCARRY, VT: MVT::i32, Action: Custom); |
1448 | |
1449 | setOperationAction(Op: ISD::BRCOND, VT: MVT::Other, Action: Custom); |
1450 | setOperationAction(Op: ISD::BR_CC, VT: MVT::i32, Action: Custom); |
1451 | if (Subtarget->hasFullFP16()) |
1452 | setOperationAction(Op: ISD::BR_CC, VT: MVT::f16, Action: Custom); |
1453 | setOperationAction(Op: ISD::BR_CC, VT: MVT::f32, Action: Custom); |
1454 | setOperationAction(Op: ISD::BR_CC, VT: MVT::f64, Action: Custom); |
1455 | setOperationAction(Op: ISD::BR_JT, VT: MVT::Other, Action: Custom); |
1456 | |
1457 | // We don't support sin/cos/fmod/copysign/pow |
1458 | setOperationAction(Op: ISD::FSIN, VT: MVT::f64, Action: Expand); |
1459 | setOperationAction(Op: ISD::FSIN, VT: MVT::f32, Action: Expand); |
1460 | setOperationAction(Op: ISD::FCOS, VT: MVT::f32, Action: Expand); |
1461 | setOperationAction(Op: ISD::FCOS, VT: MVT::f64, Action: Expand); |
1462 | setOperationAction(Op: ISD::FSINCOS, VT: MVT::f64, Action: Expand); |
1463 | setOperationAction(Op: ISD::FSINCOS, VT: MVT::f32, Action: Expand); |
1464 | setOperationAction(Op: ISD::FREM, VT: MVT::f64, Action: Expand); |
1465 | setOperationAction(Op: ISD::FREM, VT: MVT::f32, Action: Expand); |
1466 | if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2Base() && |
1467 | !Subtarget->isThumb1Only()) { |
1468 | setOperationAction(Op: ISD::FCOPYSIGN, VT: MVT::f64, Action: Custom); |
1469 | setOperationAction(Op: ISD::FCOPYSIGN, VT: MVT::f32, Action: Custom); |
1470 | } |
1471 | setOperationAction(Op: ISD::FPOW, VT: MVT::f64, Action: Expand); |
1472 | setOperationAction(Op: ISD::FPOW, VT: MVT::f32, Action: Expand); |
1473 | |
1474 | if (!Subtarget->hasVFP4Base()) { |
1475 | setOperationAction(Op: ISD::FMA, VT: MVT::f64, Action: Expand); |
1476 | setOperationAction(Op: ISD::FMA, VT: MVT::f32, Action: Expand); |
1477 | } |
1478 | |
1479 | // Various VFP goodness |
1480 | if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only()) { |
1481 | // FP-ARMv8 adds f64 <-> f16 conversion. Before that it should be expanded. |
1482 | if (!Subtarget->hasFPARMv8Base() || !Subtarget->hasFP64()) { |
1483 | setOperationAction(Op: ISD::FP16_TO_FP, VT: MVT::f64, Action: Expand); |
1484 | setOperationAction(Op: ISD::FP_TO_FP16, VT: MVT::f64, Action: Expand); |
1485 | } |
1486 | |
1487 | // fp16 is a special v7 extension that adds f16 <-> f32 conversions. |
1488 | if (!Subtarget->hasFP16()) { |
1489 | setOperationAction(Op: ISD::FP16_TO_FP, VT: MVT::f32, Action: Expand); |
1490 | setOperationAction(Op: ISD::FP_TO_FP16, VT: MVT::f32, Action: Expand); |
1491 | } |
1492 | |
1493 | // Strict floating-point comparisons need custom lowering. |
1494 | setOperationAction(Op: ISD::STRICT_FSETCC, VT: MVT::f16, Action: Custom); |
1495 | setOperationAction(Op: ISD::STRICT_FSETCCS, VT: MVT::f16, Action: Custom); |
1496 | setOperationAction(Op: ISD::STRICT_FSETCC, VT: MVT::f32, Action: Custom); |
1497 | setOperationAction(Op: ISD::STRICT_FSETCCS, VT: MVT::f32, Action: Custom); |
1498 | setOperationAction(Op: ISD::STRICT_FSETCC, VT: MVT::f64, Action: Custom); |
1499 | setOperationAction(Op: ISD::STRICT_FSETCCS, VT: MVT::f64, Action: Custom); |
1500 | } |
1501 | |
1502 | // Use __sincos_stret if available. |
1503 | if (getLibcallName(Call: RTLIB::SINCOS_STRET_F32) != nullptr && |
1504 | getLibcallName(Call: RTLIB::SINCOS_STRET_F64) != nullptr) { |
1505 | setOperationAction(Op: ISD::FSINCOS, VT: MVT::f64, Action: Custom); |
1506 | setOperationAction(Op: ISD::FSINCOS, VT: MVT::f32, Action: Custom); |
1507 | } |
1508 | |
1509 | // FP-ARMv8 implements a lot of rounding-like FP operations. |
1510 | if (Subtarget->hasFPARMv8Base()) { |
1511 | setOperationAction(Op: ISD::FFLOOR, VT: MVT::f32, Action: Legal); |
1512 | setOperationAction(Op: ISD::FCEIL, VT: MVT::f32, Action: Legal); |
1513 | setOperationAction(Op: ISD::FROUND, VT: MVT::f32, Action: Legal); |
1514 | setOperationAction(Op: ISD::FTRUNC, VT: MVT::f32, Action: Legal); |
1515 | setOperationAction(Op: ISD::FNEARBYINT, VT: MVT::f32, Action: Legal); |
1516 | setOperationAction(Op: ISD::FRINT, VT: MVT::f32, Action: Legal); |
1517 | setOperationAction(Op: ISD::FMINNUM, VT: MVT::f32, Action: Legal); |
1518 | setOperationAction(Op: ISD::FMAXNUM, VT: MVT::f32, Action: Legal); |
1519 | if (Subtarget->hasNEON()) { |
1520 | setOperationAction(Op: ISD::FMINNUM, VT: MVT::v2f32, Action: Legal); |
1521 | setOperationAction(Op: ISD::FMAXNUM, VT: MVT::v2f32, Action: Legal); |
1522 | setOperationAction(Op: ISD::FMINNUM, VT: MVT::v4f32, Action: Legal); |
1523 | setOperationAction(Op: ISD::FMAXNUM, VT: MVT::v4f32, Action: Legal); |
1524 | } |
1525 | |
1526 | if (Subtarget->hasFP64()) { |
1527 | setOperationAction(Op: ISD::FFLOOR, VT: MVT::f64, Action: Legal); |
1528 | setOperationAction(Op: ISD::FCEIL, VT: MVT::f64, Action: Legal); |
1529 | setOperationAction(Op: ISD::FROUND, VT: MVT::f64, Action: Legal); |
1530 | setOperationAction(Op: ISD::FTRUNC, VT: MVT::f64, Action: Legal); |
1531 | setOperationAction(Op: ISD::FNEARBYINT, VT: MVT::f64, Action: Legal); |
1532 | setOperationAction(Op: ISD::FRINT, VT: MVT::f64, Action: Legal); |
1533 | setOperationAction(Op: ISD::FMINNUM, VT: MVT::f64, Action: Legal); |
1534 | setOperationAction(Op: ISD::FMAXNUM, VT: MVT::f64, Action: Legal); |
1535 | } |
1536 | } |
1537 | |
1538 | // FP16 often need to be promoted to call lib functions |
1539 | if (Subtarget->hasFullFP16()) { |
1540 | setOperationAction(Op: ISD::FREM, VT: MVT::f16, Action: Promote); |
1541 | setOperationAction(Op: ISD::FCOPYSIGN, VT: MVT::f16, Action: Expand); |
1542 | setOperationAction(Op: ISD::FSIN, VT: MVT::f16, Action: Promote); |
1543 | setOperationAction(Op: ISD::FCOS, VT: MVT::f16, Action: Promote); |
1544 | setOperationAction(Op: ISD::FTAN, VT: MVT::f16, Action: Promote); |
1545 | setOperationAction(Op: ISD::FSINCOS, VT: MVT::f16, Action: Promote); |
1546 | setOperationAction(Op: ISD::FPOWI, VT: MVT::f16, Action: Promote); |
1547 | setOperationAction(Op: ISD::FPOW, VT: MVT::f16, Action: Promote); |
1548 | setOperationAction(Op: ISD::FEXP, VT: MVT::f16, Action: Promote); |
1549 | setOperationAction(Op: ISD::FEXP2, VT: MVT::f16, Action: Promote); |
1550 | setOperationAction(Op: ISD::FEXP10, VT: MVT::f16, Action: Promote); |
1551 | setOperationAction(Op: ISD::FLOG, VT: MVT::f16, Action: Promote); |
1552 | setOperationAction(Op: ISD::FLOG10, VT: MVT::f16, Action: Promote); |
1553 | setOperationAction(Op: ISD::FLOG2, VT: MVT::f16, Action: Promote); |
1554 | |
1555 | setOperationAction(Op: ISD::FROUND, VT: MVT::f16, Action: Legal); |
1556 | } |
1557 | |
1558 | if (Subtarget->hasNEON()) { |
1559 | // vmin and vmax aren't available in a scalar form, so we can use |
1560 | // a NEON instruction with an undef lane instead. |
1561 | setOperationAction(Op: ISD::FMINIMUM, VT: MVT::f32, Action: Legal); |
1562 | setOperationAction(Op: ISD::FMAXIMUM, VT: MVT::f32, Action: Legal); |
1563 | setOperationAction(Op: ISD::FMINIMUM, VT: MVT::f16, Action: Legal); |
1564 | setOperationAction(Op: ISD::FMAXIMUM, VT: MVT::f16, Action: Legal); |
1565 | setOperationAction(Op: ISD::FMINIMUM, VT: MVT::v2f32, Action: Legal); |
1566 | setOperationAction(Op: ISD::FMAXIMUM, VT: MVT::v2f32, Action: Legal); |
1567 | setOperationAction(Op: ISD::FMINIMUM, VT: MVT::v4f32, Action: Legal); |
1568 | setOperationAction(Op: ISD::FMAXIMUM, VT: MVT::v4f32, Action: Legal); |
1569 | |
1570 | if (Subtarget->hasFullFP16()) { |
1571 | setOperationAction(Op: ISD::FMINNUM, VT: MVT::v4f16, Action: Legal); |
1572 | setOperationAction(Op: ISD::FMAXNUM, VT: MVT::v4f16, Action: Legal); |
1573 | setOperationAction(Op: ISD::FMINNUM, VT: MVT::v8f16, Action: Legal); |
1574 | setOperationAction(Op: ISD::FMAXNUM, VT: MVT::v8f16, Action: Legal); |
1575 | |
1576 | setOperationAction(Op: ISD::FMINIMUM, VT: MVT::v4f16, Action: Legal); |
1577 | setOperationAction(Op: ISD::FMAXIMUM, VT: MVT::v4f16, Action: Legal); |
1578 | setOperationAction(Op: ISD::FMINIMUM, VT: MVT::v8f16, Action: Legal); |
1579 | setOperationAction(Op: ISD::FMAXIMUM, VT: MVT::v8f16, Action: Legal); |
1580 | } |
1581 | } |
1582 | |
1583 | // On MSVC, both 32-bit and 64-bit, ldexpf(f32) is not defined. MinGW has |
1584 | // it, but it's just a wrapper around ldexp. |
1585 | if (Subtarget->isTargetWindows()) { |
1586 | for (ISD::NodeType Op : {ISD::FLDEXP, ISD::STRICT_FLDEXP, ISD::FFREXP}) |
1587 | if (isOperationExpand(Op, VT: MVT::f32)) |
1588 | setOperationAction(Op, VT: MVT::f32, Action: Promote); |
1589 | } |
1590 | |
1591 | // LegalizeDAG currently can't expand fp16 LDEXP/FREXP on targets where i16 |
1592 | // isn't legal. |
1593 | for (ISD::NodeType Op : {ISD::FLDEXP, ISD::STRICT_FLDEXP, ISD::FFREXP}) |
1594 | if (isOperationExpand(Op, VT: MVT::f16)) |
1595 | setOperationAction(Op, VT: MVT::f16, Action: Promote); |
1596 | |
1597 | // We have target-specific dag combine patterns for the following nodes: |
1598 | // ARMISD::VMOVRRD - No need to call setTargetDAGCombine |
1599 | setTargetDAGCombine( |
1600 | {ISD::ADD, ISD::SUB, ISD::MUL, ISD::AND, ISD::OR, ISD::XOR}); |
1601 | |
1602 | if (Subtarget->hasMVEIntegerOps()) |
1603 | setTargetDAGCombine(ISD::VSELECT); |
1604 | |
1605 | if (Subtarget->hasV6Ops()) |
1606 | setTargetDAGCombine(ISD::SRL); |
1607 | if (Subtarget->isThumb1Only()) |
1608 | setTargetDAGCombine(ISD::SHL); |
1609 | // Attempt to lower smin/smax to ssat/usat |
1610 | if ((!Subtarget->isThumb() && Subtarget->hasV6Ops()) || |
1611 | Subtarget->isThumb2()) { |
1612 | setTargetDAGCombine({ISD::SMIN, ISD::SMAX}); |
1613 | } |
1614 | |
1615 | setStackPointerRegisterToSaveRestore(ARM::SP); |
1616 | |
1617 | if (Subtarget->useSoftFloat() || Subtarget->isThumb1Only() || |
1618 | !Subtarget->hasVFP2Base() || Subtarget->hasMinSize()) |
1619 | setSchedulingPreference(Sched::RegPressure); |
1620 | else |
1621 | setSchedulingPreference(Sched::Hybrid); |
1622 | |
1623 | //// temporary - rewrite interface to use type |
1624 | MaxStoresPerMemset = 8; |
1625 | MaxStoresPerMemsetOptSize = 4; |
1626 | MaxStoresPerMemcpy = 4; // For @llvm.memcpy -> sequence of stores |
1627 | MaxStoresPerMemcpyOptSize = 2; |
1628 | MaxStoresPerMemmove = 4; // For @llvm.memmove -> sequence of stores |
1629 | MaxStoresPerMemmoveOptSize = 2; |
1630 | |
1631 | // On ARM arguments smaller than 4 bytes are extended, so all arguments |
1632 | // are at least 4 bytes aligned. |
1633 | setMinStackArgumentAlignment(Align(4)); |
1634 | |
1635 | // Prefer likely predicted branches to selects on out-of-order cores. |
1636 | PredictableSelectIsExpensive = Subtarget->getSchedModel().isOutOfOrder(); |
1637 | |
1638 | setPrefLoopAlignment(Align(1ULL << Subtarget->getPrefLoopLogAlignment())); |
1639 | setPrefFunctionAlignment(Align(1ULL << Subtarget->getPrefLoopLogAlignment())); |
1640 | |
1641 | setMinFunctionAlignment(Subtarget->isThumb() ? Align(2) : Align(4)); |
1642 | } |
1643 | |
1644 | bool ARMTargetLowering::useSoftFloat() const { |
1645 | return Subtarget->useSoftFloat(); |
1646 | } |
1647 | |
1648 | // FIXME: It might make sense to define the representative register class as the |
1649 | // nearest super-register that has a non-null superset. For example, DPR_VFP2 is |
1650 | // a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently, |
1651 | // SPR's representative would be DPR_VFP2. This should work well if register |
1652 | // pressure tracking were modified such that a register use would increment the |
1653 | // pressure of the register class's representative and all of it's super |
1654 | // classes' representatives transitively. We have not implemented this because |
1655 | // of the difficulty prior to coalescing of modeling operand register classes |
1656 | // due to the common occurrence of cross class copies and subregister insertions |
1657 | // and extractions. |
1658 | std::pair<const TargetRegisterClass *, uint8_t> |
1659 | ARMTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI, |
1660 | MVT VT) const { |
1661 | const TargetRegisterClass *RRC = nullptr; |
1662 | uint8_t Cost = 1; |
1663 | switch (VT.SimpleTy) { |
1664 | default: |
1665 | return TargetLowering::findRepresentativeClass(TRI, VT); |
1666 | // Use DPR as representative register class for all floating point |
1667 | // and vector types. Since there are 32 SPR registers and 32 DPR registers so |
1668 | // the cost is 1 for both f32 and f64. |
1669 | case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16: |
1670 | case MVT::v2i32: case MVT::v1i64: case MVT::v2f32: |
1671 | RRC = &ARM::DPRRegClass; |
1672 | // When NEON is used for SP, only half of the register file is available |
1673 | // because operations that define both SP and DP results will be constrained |
1674 | // to the VFP2 class (D0-D15). We currently model this constraint prior to |
1675 | // coalescing by double-counting the SP regs. See the FIXME above. |
1676 | if (Subtarget->useNEONForSinglePrecisionFP()) |
1677 | Cost = 2; |
1678 | break; |
1679 | case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: |
1680 | case MVT::v4f32: case MVT::v2f64: |
1681 | RRC = &ARM::DPRRegClass; |
1682 | Cost = 2; |
1683 | break; |
1684 | case MVT::v4i64: |
1685 | RRC = &ARM::DPRRegClass; |
1686 | Cost = 4; |
1687 | break; |
1688 | case MVT::v8i64: |
1689 | RRC = &ARM::DPRRegClass; |
1690 | Cost = 8; |
1691 | break; |
1692 | } |
1693 | return std::make_pair(x&: RRC, y&: Cost); |
1694 | } |
1695 | |
1696 | const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { |
1697 | #define MAKE_CASE(V) \ |
1698 | case V: \ |
1699 | return #V; |
1700 | switch ((ARMISD::NodeType)Opcode) { |
1701 | case ARMISD::FIRST_NUMBER: |
1702 | break; |
1703 | MAKE_CASE(ARMISD::Wrapper) |
1704 | MAKE_CASE(ARMISD::WrapperPIC) |
1705 | MAKE_CASE(ARMISD::WrapperJT) |
1706 | MAKE_CASE(ARMISD::COPY_STRUCT_BYVAL) |
1707 | MAKE_CASE(ARMISD::CALL) |
1708 | MAKE_CASE(ARMISD::CALL_PRED) |
1709 | MAKE_CASE(ARMISD::CALL_NOLINK) |
1710 | MAKE_CASE(ARMISD::tSECALL) |
1711 | MAKE_CASE(ARMISD::t2CALL_BTI) |
1712 | MAKE_CASE(ARMISD::BRCOND) |
1713 | MAKE_CASE(ARMISD::BR_JT) |
1714 | MAKE_CASE(ARMISD::BR2_JT) |
1715 | MAKE_CASE(ARMISD::RET_GLUE) |
1716 | MAKE_CASE(ARMISD::SERET_GLUE) |
1717 | MAKE_CASE(ARMISD::INTRET_GLUE) |
1718 | MAKE_CASE(ARMISD::PIC_ADD) |
1719 | MAKE_CASE(ARMISD::CMP) |
1720 | MAKE_CASE(ARMISD::CMN) |
1721 | MAKE_CASE(ARMISD::CMPZ) |
1722 | MAKE_CASE(ARMISD::CMPFP) |
1723 | MAKE_CASE(ARMISD::CMPFPE) |
1724 | MAKE_CASE(ARMISD::CMPFPw0) |
1725 | MAKE_CASE(ARMISD::CMPFPEw0) |
1726 | MAKE_CASE(ARMISD::BCC_i64) |
1727 | MAKE_CASE(ARMISD::FMSTAT) |
1728 | MAKE_CASE(ARMISD::CMOV) |
1729 | MAKE_CASE(ARMISD::SSAT) |
1730 | MAKE_CASE(ARMISD::USAT) |
1731 | MAKE_CASE(ARMISD::ASRL) |
1732 | MAKE_CASE(ARMISD::LSRL) |
1733 | MAKE_CASE(ARMISD::LSLL) |
1734 | MAKE_CASE(ARMISD::SRL_GLUE) |
1735 | MAKE_CASE(ARMISD::SRA_GLUE) |
1736 | MAKE_CASE(ARMISD::RRX) |
1737 | MAKE_CASE(ARMISD::ADDC) |
1738 | MAKE_CASE(ARMISD::ADDE) |
1739 | MAKE_CASE(ARMISD::SUBC) |
1740 | MAKE_CASE(ARMISD::SUBE) |
1741 | MAKE_CASE(ARMISD::LSLS) |
1742 | MAKE_CASE(ARMISD::VMOVRRD) |
1743 | MAKE_CASE(ARMISD::VMOVDRR) |
1744 | MAKE_CASE(ARMISD::VMOVhr) |
1745 | MAKE_CASE(ARMISD::VMOVrh) |
1746 | MAKE_CASE(ARMISD::VMOVSR) |
1747 | MAKE_CASE(ARMISD::EH_SJLJ_SETJMP) |
1748 | MAKE_CASE(ARMISD::EH_SJLJ_LONGJMP) |
1749 | MAKE_CASE(ARMISD::EH_SJLJ_SETUP_DISPATCH) |
1750 | MAKE_CASE(ARMISD::TC_RETURN) |
1751 | MAKE_CASE(ARMISD::THREAD_POINTER) |
1752 | MAKE_CASE(ARMISD::DYN_ALLOC) |
1753 | MAKE_CASE(ARMISD::MEMBARRIER_MCR) |
1754 | MAKE_CASE(ARMISD::PRELOAD) |
1755 | MAKE_CASE(ARMISD::LDRD) |
1756 | MAKE_CASE(ARMISD::STRD) |
1757 | MAKE_CASE(ARMISD::WIN__CHKSTK) |
1758 | MAKE_CASE(ARMISD::WIN__DBZCHK) |
1759 | MAKE_CASE(ARMISD::PREDICATE_CAST) |
1760 | MAKE_CASE(ARMISD::VECTOR_REG_CAST) |
1761 | MAKE_CASE(ARMISD::MVESEXT) |
1762 | MAKE_CASE(ARMISD::MVEZEXT) |
1763 | MAKE_CASE(ARMISD::MVETRUNC) |
1764 | MAKE_CASE(ARMISD::VCMP) |
1765 | MAKE_CASE(ARMISD::VCMPZ) |
1766 | MAKE_CASE(ARMISD::VTST) |
1767 | MAKE_CASE(ARMISD::VSHLs) |
1768 | MAKE_CASE(ARMISD::VSHLu) |
1769 | MAKE_CASE(ARMISD::VSHLIMM) |
1770 | MAKE_CASE(ARMISD::VSHRsIMM) |
1771 | MAKE_CASE(ARMISD::VSHRuIMM) |
1772 | MAKE_CASE(ARMISD::VRSHRsIMM) |
1773 | MAKE_CASE(ARMISD::VRSHRuIMM) |
1774 | MAKE_CASE(ARMISD::VRSHRNIMM) |
1775 | MAKE_CASE(ARMISD::VQSHLsIMM) |
1776 | MAKE_CASE(ARMISD::VQSHLuIMM) |
1777 | MAKE_CASE(ARMISD::VQSHLsuIMM) |
1778 | MAKE_CASE(ARMISD::VQSHRNsIMM) |
1779 | MAKE_CASE(ARMISD::VQSHRNuIMM) |
1780 | MAKE_CASE(ARMISD::VQSHRNsuIMM) |
1781 | MAKE_CASE(ARMISD::VQRSHRNsIMM) |
1782 | MAKE_CASE(ARMISD::VQRSHRNuIMM) |
1783 | MAKE_CASE(ARMISD::VQRSHRNsuIMM) |
1784 | MAKE_CASE(ARMISD::VSLIIMM) |
1785 | MAKE_CASE(ARMISD::VSRIIMM) |
1786 | MAKE_CASE(ARMISD::VGETLANEu) |
1787 | MAKE_CASE(ARMISD::VGETLANEs) |
1788 | MAKE_CASE(ARMISD::VMOVIMM) |
1789 | MAKE_CASE(ARMISD::VMVNIMM) |
1790 | MAKE_CASE(ARMISD::VMOVFPIMM) |
1791 | MAKE_CASE(ARMISD::VDUP) |
1792 | MAKE_CASE(ARMISD::VDUPLANE) |
1793 | MAKE_CASE(ARMISD::VEXT) |
1794 | MAKE_CASE(ARMISD::VREV64) |
1795 | MAKE_CASE(ARMISD::VREV32) |
1796 | MAKE_CASE(ARMISD::VREV16) |
1797 | MAKE_CASE(ARMISD::VZIP) |
1798 | MAKE_CASE(ARMISD::VUZP) |
1799 | MAKE_CASE(ARMISD::VTRN) |
1800 | MAKE_CASE(ARMISD::VTBL1) |
1801 | MAKE_CASE(ARMISD::VTBL2) |
1802 | MAKE_CASE(ARMISD::VMOVN) |
1803 | MAKE_CASE(ARMISD::VQMOVNs) |
1804 | MAKE_CASE(ARMISD::VQMOVNu) |
1805 | MAKE_CASE(ARMISD::VCVTN) |
1806 | MAKE_CASE(ARMISD::VCVTL) |
1807 | MAKE_CASE(ARMISD::VIDUP) |
1808 | MAKE_CASE(ARMISD::VMULLs) |
1809 | MAKE_CASE(ARMISD::VMULLu) |
1810 | MAKE_CASE(ARMISD::VQDMULH) |
1811 | MAKE_CASE(ARMISD::VADDVs) |
1812 | MAKE_CASE(ARMISD::VADDVu) |
1813 | MAKE_CASE(ARMISD::VADDVps) |
1814 | MAKE_CASE(ARMISD::VADDVpu) |
1815 | MAKE_CASE(ARMISD::VADDLVs) |
1816 | MAKE_CASE(ARMISD::VADDLVu) |
1817 | MAKE_CASE(ARMISD::VADDLVAs) |
1818 | MAKE_CASE(ARMISD::VADDLVAu) |
1819 | MAKE_CASE(ARMISD::VADDLVps) |
1820 | MAKE_CASE(ARMISD::VADDLVpu) |
1821 | MAKE_CASE(ARMISD::VADDLVAps) |
1822 | MAKE_CASE(ARMISD::VADDLVApu) |
1823 | MAKE_CASE(ARMISD::VMLAVs) |
1824 | MAKE_CASE(ARMISD::VMLAVu) |
1825 | MAKE_CASE(ARMISD::VMLAVps) |
1826 | MAKE_CASE(ARMISD::VMLAVpu) |
1827 | MAKE_CASE(ARMISD::VMLALVs) |
1828 | MAKE_CASE(ARMISD::VMLALVu) |
1829 | MAKE_CASE(ARMISD::VMLALVps) |
1830 | MAKE_CASE(ARMISD::VMLALVpu) |
1831 | MAKE_CASE(ARMISD::VMLALVAs) |
1832 | MAKE_CASE(ARMISD::VMLALVAu) |
1833 | MAKE_CASE(ARMISD::VMLALVAps) |
1834 | MAKE_CASE(ARMISD::VMLALVApu) |
1835 | MAKE_CASE(ARMISD::VMINVu) |
1836 | MAKE_CASE(ARMISD::VMINVs) |
1837 | MAKE_CASE(ARMISD::VMAXVu) |
1838 | MAKE_CASE(ARMISD::VMAXVs) |
1839 | MAKE_CASE(ARMISD::UMAAL) |
1840 | MAKE_CASE(ARMISD::UMLAL) |
1841 | MAKE_CASE(ARMISD::SMLAL) |
1842 | MAKE_CASE(ARMISD::SMLALBB) |
1843 | MAKE_CASE(ARMISD::SMLALBT) |
1844 | MAKE_CASE(ARMISD::SMLALTB) |
1845 | MAKE_CASE(ARMISD::SMLALTT) |
1846 | MAKE_CASE(ARMISD::SMULWB) |
1847 | MAKE_CASE(ARMISD::SMULWT) |
1848 | MAKE_CASE(ARMISD::SMLALD) |
1849 | MAKE_CASE(ARMISD::SMLALDX) |
1850 | MAKE_CASE(ARMISD::SMLSLD) |
1851 | MAKE_CASE(ARMISD::SMLSLDX) |
1852 | MAKE_CASE(ARMISD::SMMLAR) |
1853 | MAKE_CASE(ARMISD::SMMLSR) |
1854 | MAKE_CASE(ARMISD::QADD16b) |
1855 | MAKE_CASE(ARMISD::QSUB16b) |
1856 | MAKE_CASE(ARMISD::QADD8b) |
1857 | MAKE_CASE(ARMISD::QSUB8b) |
1858 | MAKE_CASE(ARMISD::UQADD16b) |
1859 | MAKE_CASE(ARMISD::UQSUB16b) |
1860 | MAKE_CASE(ARMISD::UQADD8b) |
1861 | MAKE_CASE(ARMISD::UQSUB8b) |
1862 | MAKE_CASE(ARMISD::BUILD_VECTOR) |
1863 | MAKE_CASE(ARMISD::BFI) |
1864 | MAKE_CASE(ARMISD::VORRIMM) |
1865 | MAKE_CASE(ARMISD::VBICIMM) |
1866 | MAKE_CASE(ARMISD::VBSP) |
1867 | MAKE_CASE(ARMISD::MEMCPY) |
1868 | MAKE_CASE(ARMISD::VLD1DUP) |
1869 | MAKE_CASE(ARMISD::VLD2DUP) |
1870 | MAKE_CASE(ARMISD::VLD3DUP) |
1871 | MAKE_CASE(ARMISD::VLD4DUP) |
1872 | MAKE_CASE(ARMISD::VLD1_UPD) |
1873 | MAKE_CASE(ARMISD::VLD2_UPD) |
1874 | MAKE_CASE(ARMISD::VLD3_UPD) |
1875 | MAKE_CASE(ARMISD::VLD4_UPD) |
1876 | MAKE_CASE(ARMISD::VLD1x2_UPD) |
1877 | MAKE_CASE(ARMISD::VLD1x3_UPD) |
1878 | MAKE_CASE(ARMISD::VLD1x4_UPD) |
1879 | MAKE_CASE(ARMISD::VLD2LN_UPD) |
1880 | MAKE_CASE(ARMISD::VLD3LN_UPD) |
1881 | MAKE_CASE(ARMISD::VLD4LN_UPD) |
1882 | MAKE_CASE(ARMISD::VLD1DUP_UPD) |
1883 | MAKE_CASE(ARMISD::VLD2DUP_UPD) |
1884 | MAKE_CASE(ARMISD::VLD3DUP_UPD) |
1885 | MAKE_CASE(ARMISD::VLD4DUP_UPD) |
1886 | MAKE_CASE(ARMISD::VST1_UPD) |
1887 | MAKE_CASE(ARMISD::VST2_UPD) |
1888 | MAKE_CASE(ARMISD::VST3_UPD) |
1889 | MAKE_CASE(ARMISD::VST4_UPD) |
1890 | MAKE_CASE(ARMISD::VST1x2_UPD) |
1891 | MAKE_CASE(ARMISD::VST1x3_UPD) |
1892 | MAKE_CASE(ARMISD::VST1x4_UPD) |
1893 | MAKE_CASE(ARMISD::VST2LN_UPD) |
1894 | MAKE_CASE(ARMISD::VST3LN_UPD) |
1895 | MAKE_CASE(ARMISD::VST4LN_UPD) |
1896 | MAKE_CASE(ARMISD::WLS) |
1897 | MAKE_CASE(ARMISD::WLSSETUP) |
1898 | MAKE_CASE(ARMISD::LE) |
1899 | MAKE_CASE(ARMISD::LOOP_DEC) |
1900 | MAKE_CASE(ARMISD::CSINV) |
1901 | MAKE_CASE(ARMISD::CSNEG) |
1902 | MAKE_CASE(ARMISD::CSINC) |
1903 | MAKE_CASE(ARMISD::MEMCPYLOOP) |
1904 | MAKE_CASE(ARMISD::MEMSETLOOP) |
1905 | #undef MAKE_CASE |
1906 | } |
1907 | return nullptr; |
1908 | } |
1909 | |
1910 | EVT ARMTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &, |
1911 | EVT VT) const { |
1912 | if (!VT.isVector()) |
1913 | return getPointerTy(DL); |
1914 | |
1915 | // MVE has a predicate register. |
1916 | if ((Subtarget->hasMVEIntegerOps() && |
1917 | (VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 || |
1918 | VT == MVT::v16i8)) || |
1919 | (Subtarget->hasMVEFloatOps() && |
1920 | (VT == MVT::v2f64 || VT == MVT::v4f32 || VT == MVT::v8f16))) |
1921 | return MVT::getVectorVT(VT: MVT::i1, EC: VT.getVectorElementCount()); |
1922 | return VT.changeVectorElementTypeToInteger(); |
1923 | } |
1924 | |
1925 | /// getRegClassFor - Return the register class that should be used for the |
1926 | /// specified value type. |
1927 | const TargetRegisterClass * |
1928 | ARMTargetLowering::getRegClassFor(MVT VT, bool isDivergent) const { |
1929 | (void)isDivergent; |
1930 | // Map v4i64 to QQ registers but do not make the type legal. Similarly map |
1931 | // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to |
1932 | // load / store 4 to 8 consecutive NEON D registers, or 2 to 4 consecutive |
1933 | // MVE Q registers. |
1934 | if (Subtarget->hasNEON()) { |
1935 | if (VT == MVT::v4i64) |
1936 | return &ARM::QQPRRegClass; |
1937 | if (VT == MVT::v8i64) |
1938 | return &ARM::QQQQPRRegClass; |
1939 | } |
1940 | if (Subtarget->hasMVEIntegerOps()) { |
1941 | if (VT == MVT::v4i64) |
1942 | return &ARM::MQQPRRegClass; |
1943 | if (VT == MVT::v8i64) |
1944 | return &ARM::MQQQQPRRegClass; |
1945 | } |
1946 | return TargetLowering::getRegClassFor(VT); |
1947 | } |
1948 | |
1949 | // memcpy, and other memory intrinsics, typically tries to use LDM/STM if the |
1950 | // source/dest is aligned and the copy size is large enough. We therefore want |
1951 | // to align such objects passed to memory intrinsics. |
1952 | bool ARMTargetLowering::shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, |
1953 | Align &PrefAlign) const { |
1954 | if (!isa<MemIntrinsic>(Val: CI)) |
1955 | return false; |
1956 | MinSize = 8; |
1957 | // On ARM11 onwards (excluding M class) 8-byte aligned LDM is typically 1 |
1958 | // cycle faster than 4-byte aligned LDM. |
1959 | PrefAlign = |
1960 | (Subtarget->hasV6Ops() && !Subtarget->isMClass() ? Align(8) : Align(4)); |
1961 | return true; |
1962 | } |
1963 | |
1964 | // Create a fast isel object. |
1965 | FastISel * |
1966 | ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo, |
1967 | const TargetLibraryInfo *libInfo) const { |
1968 | return ARM::createFastISel(funcInfo, libInfo); |
1969 | } |
1970 | |
1971 | Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const { |
1972 | unsigned NumVals = N->getNumValues(); |
1973 | if (!NumVals) |
1974 | return Sched::RegPressure; |
1975 | |
1976 | for (unsigned i = 0; i != NumVals; ++i) { |
1977 | EVT VT = N->getValueType(ResNo: i); |
1978 | if (VT == MVT::Glue || VT == MVT::Other) |
1979 | continue; |
1980 | if (VT.isFloatingPoint() || VT.isVector()) |
1981 | return Sched::ILP; |
1982 | } |
1983 | |
1984 | if (!N->isMachineOpcode()) |
1985 | return Sched::RegPressure; |
1986 | |
1987 | // Load are scheduled for latency even if there instruction itinerary |
1988 | // is not available. |
1989 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
1990 | const MCInstrDesc &MCID = TII->get(Opcode: N->getMachineOpcode()); |
1991 | |
1992 | if (MCID.getNumDefs() == 0) |
1993 | return Sched::RegPressure; |
1994 | if (!Itins->isEmpty() && |
1995 | Itins->getOperandCycle(ItinClassIndx: MCID.getSchedClass(), OperandIdx: 0) > 2U) |
1996 | return Sched::ILP; |
1997 | |
1998 | return Sched::RegPressure; |
1999 | } |
2000 | |
2001 | //===----------------------------------------------------------------------===// |
2002 | // Lowering Code |
2003 | //===----------------------------------------------------------------------===// |
2004 | |
2005 | static bool isSRL16(const SDValue &Op) { |
2006 | if (Op.getOpcode() != ISD::SRL) |
2007 | return false; |
2008 | if (auto Const = dyn_cast<ConstantSDNode>(Val: Op.getOperand(i: 1))) |
2009 | return Const->getZExtValue() == 16; |
2010 | return false; |
2011 | } |
2012 | |
2013 | static bool isSRA16(const SDValue &Op) { |
2014 | if (Op.getOpcode() != ISD::SRA) |
2015 | return false; |
2016 | if (auto Const = dyn_cast<ConstantSDNode>(Val: Op.getOperand(i: 1))) |
2017 | return Const->getZExtValue() == 16; |
2018 | return false; |
2019 | } |
2020 | |
2021 | static bool isSHL16(const SDValue &Op) { |
2022 | if (Op.getOpcode() != ISD::SHL) |
2023 | return false; |
2024 | if (auto Const = dyn_cast<ConstantSDNode>(Val: Op.getOperand(i: 1))) |
2025 | return Const->getZExtValue() == 16; |
2026 | return false; |
2027 | } |
2028 | |
2029 | // Check for a signed 16-bit value. We special case SRA because it makes it |
2030 | // more simple when also looking for SRAs that aren't sign extending a |
2031 | // smaller value. Without the check, we'd need to take extra care with |
2032 | // checking order for some operations. |
2033 | static bool isS16(const SDValue &Op, SelectionDAG &DAG) { |
2034 | if (isSRA16(Op)) |
2035 | return isSHL16(Op: Op.getOperand(i: 0)); |
2036 | return DAG.ComputeNumSignBits(Op) == 17; |
2037 | } |
2038 | |
2039 | /// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC |
2040 | static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) { |
2041 | switch (CC) { |
2042 | default: llvm_unreachable("Unknown condition code!" ); |
2043 | case ISD::SETNE: return ARMCC::NE; |
2044 | case ISD::SETEQ: return ARMCC::EQ; |
2045 | case ISD::SETGT: return ARMCC::GT; |
2046 | case ISD::SETGE: return ARMCC::GE; |
2047 | case ISD::SETLT: return ARMCC::LT; |
2048 | case ISD::SETLE: return ARMCC::LE; |
2049 | case ISD::SETUGT: return ARMCC::HI; |
2050 | case ISD::SETUGE: return ARMCC::HS; |
2051 | case ISD::SETULT: return ARMCC::LO; |
2052 | case ISD::SETULE: return ARMCC::LS; |
2053 | } |
2054 | } |
2055 | |
2056 | /// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. |
2057 | static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, |
2058 | ARMCC::CondCodes &CondCode2) { |
2059 | CondCode2 = ARMCC::AL; |
2060 | switch (CC) { |
2061 | default: llvm_unreachable("Unknown FP condition!" ); |
2062 | case ISD::SETEQ: |
2063 | case ISD::SETOEQ: CondCode = ARMCC::EQ; break; |
2064 | case ISD::SETGT: |
2065 | case ISD::SETOGT: CondCode = ARMCC::GT; break; |
2066 | case ISD::SETGE: |
2067 | case ISD::SETOGE: CondCode = ARMCC::GE; break; |
2068 | case ISD::SETOLT: CondCode = ARMCC::MI; break; |
2069 | case ISD::SETOLE: CondCode = ARMCC::LS; break; |
2070 | case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break; |
2071 | case ISD::SETO: CondCode = ARMCC::VC; break; |
2072 | case ISD::SETUO: CondCode = ARMCC::VS; break; |
2073 | case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break; |
2074 | case ISD::SETUGT: CondCode = ARMCC::HI; break; |
2075 | case ISD::SETUGE: CondCode = ARMCC::PL; break; |
2076 | case ISD::SETLT: |
2077 | case ISD::SETULT: CondCode = ARMCC::LT; break; |
2078 | case ISD::SETLE: |
2079 | case ISD::SETULE: CondCode = ARMCC::LE; break; |
2080 | case ISD::SETNE: |
2081 | case ISD::SETUNE: CondCode = ARMCC::NE; break; |
2082 | } |
2083 | } |
2084 | |
2085 | //===----------------------------------------------------------------------===// |
2086 | // Calling Convention Implementation |
2087 | //===----------------------------------------------------------------------===// |
2088 | |
2089 | /// getEffectiveCallingConv - Get the effective calling convention, taking into |
2090 | /// account presence of floating point hardware and calling convention |
2091 | /// limitations, such as support for variadic functions. |
2092 | CallingConv::ID |
2093 | ARMTargetLowering::getEffectiveCallingConv(CallingConv::ID CC, |
2094 | bool isVarArg) const { |
2095 | switch (CC) { |
2096 | default: |
2097 | report_fatal_error(reason: "Unsupported calling convention" ); |
2098 | case CallingConv::ARM_AAPCS: |
2099 | case CallingConv::ARM_APCS: |
2100 | case CallingConv::GHC: |
2101 | case CallingConv::CFGuard_Check: |
2102 | return CC; |
2103 | case CallingConv::PreserveMost: |
2104 | return CallingConv::PreserveMost; |
2105 | case CallingConv::PreserveAll: |
2106 | return CallingConv::PreserveAll; |
2107 | case CallingConv::ARM_AAPCS_VFP: |
2108 | case CallingConv::Swift: |
2109 | case CallingConv::SwiftTail: |
2110 | return isVarArg ? CallingConv::ARM_AAPCS : CallingConv::ARM_AAPCS_VFP; |
2111 | case CallingConv::C: |
2112 | case CallingConv::Tail: |
2113 | if (!Subtarget->isAAPCS_ABI()) |
2114 | return CallingConv::ARM_APCS; |
2115 | else if (Subtarget->hasFPRegs() && !Subtarget->isThumb1Only() && |
2116 | getTargetMachine().Options.FloatABIType == FloatABI::Hard && |
2117 | !isVarArg) |
2118 | return CallingConv::ARM_AAPCS_VFP; |
2119 | else |
2120 | return CallingConv::ARM_AAPCS; |
2121 | case CallingConv::Fast: |
2122 | case CallingConv::CXX_FAST_TLS: |
2123 | if (!Subtarget->isAAPCS_ABI()) { |
2124 | if (Subtarget->hasVFP2Base() && !Subtarget->isThumb1Only() && !isVarArg) |
2125 | return CallingConv::Fast; |
2126 | return CallingConv::ARM_APCS; |
2127 | } else if (Subtarget->hasVFP2Base() && |
2128 | !Subtarget->isThumb1Only() && !isVarArg) |
2129 | return CallingConv::ARM_AAPCS_VFP; |
2130 | else |
2131 | return CallingConv::ARM_AAPCS; |
2132 | } |
2133 | } |
2134 | |
2135 | CCAssignFn *ARMTargetLowering::CCAssignFnForCall(CallingConv::ID CC, |
2136 | bool isVarArg) const { |
2137 | return CCAssignFnForNode(CC, Return: false, isVarArg); |
2138 | } |
2139 | |
2140 | CCAssignFn *ARMTargetLowering::CCAssignFnForReturn(CallingConv::ID CC, |
2141 | bool isVarArg) const { |
2142 | return CCAssignFnForNode(CC, Return: true, isVarArg); |
2143 | } |
2144 | |
2145 | /// CCAssignFnForNode - Selects the correct CCAssignFn for the given |
2146 | /// CallingConvention. |
2147 | CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC, |
2148 | bool Return, |
2149 | bool isVarArg) const { |
2150 | switch (getEffectiveCallingConv(CC, isVarArg)) { |
2151 | default: |
2152 | report_fatal_error(reason: "Unsupported calling convention" ); |
2153 | case CallingConv::ARM_APCS: |
2154 | return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); |
2155 | case CallingConv::ARM_AAPCS: |
2156 | return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); |
2157 | case CallingConv::ARM_AAPCS_VFP: |
2158 | return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); |
2159 | case CallingConv::Fast: |
2160 | return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); |
2161 | case CallingConv::GHC: |
2162 | return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC); |
2163 | case CallingConv::PreserveMost: |
2164 | return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); |
2165 | case CallingConv::PreserveAll: |
2166 | return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); |
2167 | case CallingConv::CFGuard_Check: |
2168 | return (Return ? RetCC_ARM_AAPCS : CC_ARM_Win32_CFGuard_Check); |
2169 | } |
2170 | } |
2171 | |
2172 | SDValue ARMTargetLowering::MoveToHPR(const SDLoc &dl, SelectionDAG &DAG, |
2173 | MVT LocVT, MVT ValVT, SDValue Val) const { |
2174 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::getIntegerVT(BitWidth: LocVT.getSizeInBits()), |
2175 | Operand: Val); |
2176 | if (Subtarget->hasFullFP16()) { |
2177 | Val = DAG.getNode(Opcode: ARMISD::VMOVhr, DL: dl, VT: ValVT, Operand: Val); |
2178 | } else { |
2179 | Val = DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, |
2180 | VT: MVT::getIntegerVT(BitWidth: ValVT.getSizeInBits()), Operand: Val); |
2181 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: ValVT, Operand: Val); |
2182 | } |
2183 | return Val; |
2184 | } |
2185 | |
2186 | SDValue ARMTargetLowering::MoveFromHPR(const SDLoc &dl, SelectionDAG &DAG, |
2187 | MVT LocVT, MVT ValVT, |
2188 | SDValue Val) const { |
2189 | if (Subtarget->hasFullFP16()) { |
2190 | Val = DAG.getNode(Opcode: ARMISD::VMOVrh, DL: dl, |
2191 | VT: MVT::getIntegerVT(BitWidth: LocVT.getSizeInBits()), Operand: Val); |
2192 | } else { |
2193 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, |
2194 | VT: MVT::getIntegerVT(BitWidth: ValVT.getSizeInBits()), Operand: Val); |
2195 | Val = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: dl, |
2196 | VT: MVT::getIntegerVT(BitWidth: LocVT.getSizeInBits()), Operand: Val); |
2197 | } |
2198 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: LocVT, Operand: Val); |
2199 | } |
2200 | |
2201 | /// LowerCallResult - Lower the result values of a call into the |
2202 | /// appropriate copies out of appropriate physical registers. |
2203 | SDValue ARMTargetLowering::LowerCallResult( |
2204 | SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool isVarArg, |
2205 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, |
2206 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn, |
2207 | SDValue ThisVal, bool isCmseNSCall) const { |
2208 | // Assign locations to each value returned by this call. |
2209 | SmallVector<CCValAssign, 16> RVLocs; |
2210 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, |
2211 | *DAG.getContext()); |
2212 | CCInfo.AnalyzeCallResult(Ins, Fn: CCAssignFnForReturn(CC: CallConv, isVarArg)); |
2213 | |
2214 | // Copy all of the result registers out of their specified physreg. |
2215 | for (unsigned i = 0; i != RVLocs.size(); ++i) { |
2216 | CCValAssign VA = RVLocs[i]; |
2217 | |
2218 | // Pass 'this' value directly from the argument to return value, to avoid |
2219 | // reg unit interference |
2220 | if (i == 0 && isThisReturn) { |
2221 | assert(!VA.needsCustom() && VA.getLocVT() == MVT::i32 && |
2222 | "unexpected return calling convention register assignment" ); |
2223 | InVals.push_back(Elt: ThisVal); |
2224 | continue; |
2225 | } |
2226 | |
2227 | SDValue Val; |
2228 | if (VA.needsCustom() && |
2229 | (VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2f64)) { |
2230 | // Handle f64 or half of a v2f64. |
2231 | SDValue Lo = DAG.getCopyFromReg(Chain, dl, Reg: VA.getLocReg(), VT: MVT::i32, |
2232 | Glue: InGlue); |
2233 | Chain = Lo.getValue(R: 1); |
2234 | InGlue = Lo.getValue(R: 2); |
2235 | VA = RVLocs[++i]; // skip ahead to next loc |
2236 | SDValue Hi = DAG.getCopyFromReg(Chain, dl, Reg: VA.getLocReg(), VT: MVT::i32, |
2237 | Glue: InGlue); |
2238 | Chain = Hi.getValue(R: 1); |
2239 | InGlue = Hi.getValue(R: 2); |
2240 | if (!Subtarget->isLittle()) |
2241 | std::swap (a&: Lo, b&: Hi); |
2242 | Val = DAG.getNode(Opcode: ARMISD::VMOVDRR, DL: dl, VT: MVT::f64, N1: Lo, N2: Hi); |
2243 | |
2244 | if (VA.getLocVT() == MVT::v2f64) { |
2245 | SDValue Vec = DAG.getNode(Opcode: ISD::UNDEF, DL: dl, VT: MVT::v2f64); |
2246 | Vec = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: MVT::v2f64, N1: Vec, N2: Val, |
2247 | N3: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
2248 | |
2249 | VA = RVLocs[++i]; // skip ahead to next loc |
2250 | Lo = DAG.getCopyFromReg(Chain, dl, Reg: VA.getLocReg(), VT: MVT::i32, Glue: InGlue); |
2251 | Chain = Lo.getValue(R: 1); |
2252 | InGlue = Lo.getValue(R: 2); |
2253 | VA = RVLocs[++i]; // skip ahead to next loc |
2254 | Hi = DAG.getCopyFromReg(Chain, dl, Reg: VA.getLocReg(), VT: MVT::i32, Glue: InGlue); |
2255 | Chain = Hi.getValue(R: 1); |
2256 | InGlue = Hi.getValue(R: 2); |
2257 | if (!Subtarget->isLittle()) |
2258 | std::swap (a&: Lo, b&: Hi); |
2259 | Val = DAG.getNode(Opcode: ARMISD::VMOVDRR, DL: dl, VT: MVT::f64, N1: Lo, N2: Hi); |
2260 | Val = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: MVT::v2f64, N1: Vec, N2: Val, |
2261 | N3: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32)); |
2262 | } |
2263 | } else { |
2264 | Val = DAG.getCopyFromReg(Chain, dl, Reg: VA.getLocReg(), VT: VA.getLocVT(), |
2265 | Glue: InGlue); |
2266 | Chain = Val.getValue(R: 1); |
2267 | InGlue = Val.getValue(R: 2); |
2268 | } |
2269 | |
2270 | switch (VA.getLocInfo()) { |
2271 | default: llvm_unreachable("Unknown loc info!" ); |
2272 | case CCValAssign::Full: break; |
2273 | case CCValAssign::BCvt: |
2274 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VA.getValVT(), Operand: Val); |
2275 | break; |
2276 | } |
2277 | |
2278 | // f16 arguments have their size extended to 4 bytes and passed as if they |
2279 | // had been copied to the LSBs of a 32-bit register. |
2280 | // For that, it's passed extended to i32 (soft ABI) or to f32 (hard ABI) |
2281 | if (VA.needsCustom() && |
2282 | (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16)) |
2283 | Val = MoveToHPR(dl, DAG, LocVT: VA.getLocVT(), ValVT: VA.getValVT(), Val); |
2284 | |
2285 | // On CMSE Non-secure Calls, call results (returned values) whose bitwidth |
2286 | // is less than 32 bits must be sign- or zero-extended after the call for |
2287 | // security reasons. Although the ABI mandates an extension done by the |
2288 | // callee, the latter cannot be trusted to follow the rules of the ABI. |
2289 | const ISD::InputArg &Arg = Ins[VA.getValNo()]; |
2290 | if (isCmseNSCall && Arg.ArgVT.isScalarInteger() && |
2291 | VA.getLocVT().isScalarInteger() && Arg.ArgVT.bitsLT(VT: MVT::i32)) |
2292 | Val = handleCMSEValue(Value: Val, Arg, DAG, DL: dl); |
2293 | |
2294 | InVals.push_back(Elt: Val); |
2295 | } |
2296 | |
2297 | return Chain; |
2298 | } |
2299 | |
2300 | std::pair<SDValue, MachinePointerInfo> ARMTargetLowering::computeAddrForCallArg( |
2301 | const SDLoc &dl, SelectionDAG &DAG, const CCValAssign &VA, SDValue StackPtr, |
2302 | bool IsTailCall, int SPDiff) const { |
2303 | SDValue DstAddr; |
2304 | MachinePointerInfo DstInfo; |
2305 | int32_t Offset = VA.getLocMemOffset(); |
2306 | MachineFunction &MF = DAG.getMachineFunction(); |
2307 | |
2308 | if (IsTailCall) { |
2309 | Offset += SPDiff; |
2310 | auto PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
2311 | int Size = VA.getLocVT().getFixedSizeInBits() / 8; |
2312 | int FI = MF.getFrameInfo().CreateFixedObject(Size, SPOffset: Offset, IsImmutable: true); |
2313 | DstAddr = DAG.getFrameIndex(FI, VT: PtrVT); |
2314 | DstInfo = |
2315 | MachinePointerInfo::getFixedStack(MF&: DAG.getMachineFunction(), FI); |
2316 | } else { |
2317 | SDValue PtrOff = DAG.getIntPtrConstant(Val: Offset, DL: dl); |
2318 | DstAddr = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: getPointerTy(DL: DAG.getDataLayout()), |
2319 | N1: StackPtr, N2: PtrOff); |
2320 | DstInfo = |
2321 | MachinePointerInfo::getStack(MF&: DAG.getMachineFunction(), Offset); |
2322 | } |
2323 | |
2324 | return std::make_pair(x&: DstAddr, y&: DstInfo); |
2325 | } |
2326 | |
2327 | void ARMTargetLowering::PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG, |
2328 | SDValue Chain, SDValue &Arg, |
2329 | RegsToPassVector &RegsToPass, |
2330 | CCValAssign &VA, CCValAssign &NextVA, |
2331 | SDValue &StackPtr, |
2332 | SmallVectorImpl<SDValue> &MemOpChains, |
2333 | bool IsTailCall, |
2334 | int SPDiff) const { |
2335 | SDValue fmrrd = DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, |
2336 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N: Arg); |
2337 | unsigned id = Subtarget->isLittle() ? 0 : 1; |
2338 | RegsToPass.push_back(Elt: std::make_pair(x: VA.getLocReg(), y: fmrrd.getValue(R: id))); |
2339 | |
2340 | if (NextVA.isRegLoc()) |
2341 | RegsToPass.push_back(Elt: std::make_pair(x: NextVA.getLocReg(), y: fmrrd.getValue(R: 1-id))); |
2342 | else { |
2343 | assert(NextVA.isMemLoc()); |
2344 | if (!StackPtr.getNode()) |
2345 | StackPtr = DAG.getCopyFromReg(Chain, dl, Reg: ARM::SP, |
2346 | VT: getPointerTy(DL: DAG.getDataLayout())); |
2347 | |
2348 | SDValue DstAddr; |
2349 | MachinePointerInfo DstInfo; |
2350 | std::tie(args&: DstAddr, args&: DstInfo) = |
2351 | computeAddrForCallArg(dl, DAG, VA: NextVA, StackPtr, IsTailCall, SPDiff); |
2352 | MemOpChains.push_back( |
2353 | Elt: DAG.getStore(Chain, dl, Val: fmrrd.getValue(R: 1 - id), Ptr: DstAddr, PtrInfo: DstInfo)); |
2354 | } |
2355 | } |
2356 | |
2357 | static bool canGuaranteeTCO(CallingConv::ID CC, bool GuaranteeTailCalls) { |
2358 | return (CC == CallingConv::Fast && GuaranteeTailCalls) || |
2359 | CC == CallingConv::Tail || CC == CallingConv::SwiftTail; |
2360 | } |
2361 | |
2362 | /// LowerCall - Lowering a call into a callseq_start <- |
2363 | /// ARMISD:CALL <- callseq_end chain. Also add input and output parameter |
2364 | /// nodes. |
2365 | SDValue |
2366 | ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, |
2367 | SmallVectorImpl<SDValue> &InVals) const { |
2368 | SelectionDAG &DAG = CLI.DAG; |
2369 | SDLoc &dl = CLI.DL; |
2370 | SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; |
2371 | SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; |
2372 | SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; |
2373 | SDValue Chain = CLI.Chain; |
2374 | SDValue Callee = CLI.Callee; |
2375 | bool &isTailCall = CLI.IsTailCall; |
2376 | CallingConv::ID CallConv = CLI.CallConv; |
2377 | bool doesNotRet = CLI.DoesNotReturn; |
2378 | bool isVarArg = CLI.IsVarArg; |
2379 | |
2380 | MachineFunction &MF = DAG.getMachineFunction(); |
2381 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
2382 | MachineFunction::CallSiteInfo CSInfo; |
2383 | bool isStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); |
2384 | bool isThisReturn = false; |
2385 | bool isCmseNSCall = false; |
2386 | bool isSibCall = false; |
2387 | bool PreferIndirect = false; |
2388 | bool GuardWithBTI = false; |
2389 | |
2390 | // Analyze operands of the call, assigning locations to each operand. |
2391 | SmallVector<CCValAssign, 16> ArgLocs; |
2392 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, |
2393 | *DAG.getContext()); |
2394 | CCInfo.AnalyzeCallOperands(Outs, Fn: CCAssignFnForCall(CC: CallConv, isVarArg)); |
2395 | |
2396 | // Lower 'returns_twice' calls to a pseudo-instruction. |
2397 | if (CLI.CB && CLI.CB->getAttributes().hasFnAttr(Kind: Attribute::ReturnsTwice) && |
2398 | !Subtarget->noBTIAtReturnTwice()) |
2399 | GuardWithBTI = AFI->branchTargetEnforcement(); |
2400 | |
2401 | // Determine whether this is a non-secure function call. |
2402 | if (CLI.CB && CLI.CB->getAttributes().hasFnAttr(Kind: "cmse_nonsecure_call" )) |
2403 | isCmseNSCall = true; |
2404 | |
2405 | // Disable tail calls if they're not supported. |
2406 | if (!Subtarget->supportsTailCall()) |
2407 | isTailCall = false; |
2408 | |
2409 | // For both the non-secure calls and the returns from a CMSE entry function, |
2410 | // the function needs to do some extra work afte r the call, or before the |
2411 | // return, respectively, thus it cannot end with atail call |
2412 | if (isCmseNSCall || AFI->isCmseNSEntryFunction()) |
2413 | isTailCall = false; |
2414 | |
2415 | if (isa<GlobalAddressSDNode>(Val: Callee)) { |
2416 | // If we're optimizing for minimum size and the function is called three or |
2417 | // more times in this block, we can improve codesize by calling indirectly |
2418 | // as BLXr has a 16-bit encoding. |
2419 | auto *GV = cast<GlobalAddressSDNode>(Val&: Callee)->getGlobal(); |
2420 | if (CLI.CB) { |
2421 | auto *BB = CLI.CB->getParent(); |
2422 | PreferIndirect = Subtarget->isThumb() && Subtarget->hasMinSize() && |
2423 | count_if(Range: GV->users(), P: [&BB](const User *U) { |
2424 | return isa<Instruction>(Val: U) && |
2425 | cast<Instruction>(Val: U)->getParent() == BB; |
2426 | }) > 2; |
2427 | } |
2428 | } |
2429 | if (isTailCall) { |
2430 | // Check if it's really possible to do a tail call. |
2431 | isTailCall = |
2432 | IsEligibleForTailCallOptimization(CLI, CCInfo, ArgLocs, isIndirect: PreferIndirect); |
2433 | |
2434 | if (isTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt && |
2435 | CallConv != CallingConv::Tail && CallConv != CallingConv::SwiftTail) |
2436 | isSibCall = true; |
2437 | |
2438 | // We don't support GuaranteedTailCallOpt for ARM, only automatically |
2439 | // detected sibcalls. |
2440 | if (isTailCall) |
2441 | ++NumTailCalls; |
2442 | } |
2443 | |
2444 | if (!isTailCall && CLI.CB && CLI.CB->isMustTailCall()) |
2445 | report_fatal_error(reason: "failed to perform tail call elimination on a call " |
2446 | "site marked musttail" ); |
2447 | |
2448 | // Get a count of how many bytes are to be pushed on the stack. |
2449 | unsigned NumBytes = CCInfo.getStackSize(); |
2450 | |
2451 | // SPDiff is the byte offset of the call's argument area from the callee's. |
2452 | // Stores to callee stack arguments will be placed in FixedStackSlots offset |
2453 | // by this amount for a tail call. In a sibling call it must be 0 because the |
2454 | // caller will deallocate the entire stack and the callee still expects its |
2455 | // arguments to begin at SP+0. Completely unused for non-tail calls. |
2456 | int SPDiff = 0; |
2457 | |
2458 | if (isTailCall && !isSibCall) { |
2459 | auto FuncInfo = MF.getInfo<ARMFunctionInfo>(); |
2460 | unsigned NumReusableBytes = FuncInfo->getArgumentStackSize(); |
2461 | |
2462 | // Since callee will pop argument stack as a tail call, we must keep the |
2463 | // popped size 16-byte aligned. |
2464 | Align StackAlign = DAG.getDataLayout().getStackAlignment(); |
2465 | NumBytes = alignTo(Size: NumBytes, A: StackAlign); |
2466 | |
2467 | // SPDiff will be negative if this tail call requires more space than we |
2468 | // would automatically have in our incoming argument space. Positive if we |
2469 | // can actually shrink the stack. |
2470 | SPDiff = NumReusableBytes - NumBytes; |
2471 | |
2472 | // If this call requires more stack than we have available from |
2473 | // LowerFormalArguments, tell FrameLowering to reserve space for it. |
2474 | if (SPDiff < 0 && AFI->getArgRegsSaveSize() < (unsigned)-SPDiff) |
2475 | AFI->setArgRegsSaveSize(-SPDiff); |
2476 | } |
2477 | |
2478 | if (isSibCall) { |
2479 | // For sibling tail calls, memory operands are available in our caller's stack. |
2480 | NumBytes = 0; |
2481 | } else { |
2482 | // Adjust the stack pointer for the new arguments... |
2483 | // These operations are automatically eliminated by the prolog/epilog pass |
2484 | Chain = DAG.getCALLSEQ_START(Chain, InSize: isTailCall ? 0 : NumBytes, OutSize: 0, DL: dl); |
2485 | } |
2486 | |
2487 | SDValue StackPtr = |
2488 | DAG.getCopyFromReg(Chain, dl, Reg: ARM::SP, VT: getPointerTy(DL: DAG.getDataLayout())); |
2489 | |
2490 | RegsToPassVector RegsToPass; |
2491 | SmallVector<SDValue, 8> MemOpChains; |
2492 | |
2493 | // During a tail call, stores to the argument area must happen after all of |
2494 | // the function's incoming arguments have been loaded because they may alias. |
2495 | // This is done by folding in a TokenFactor from LowerFormalArguments, but |
2496 | // there's no point in doing so repeatedly so this tracks whether that's |
2497 | // happened yet. |
2498 | bool AfterFormalArgLoads = false; |
2499 | |
2500 | // Walk the register/memloc assignments, inserting copies/loads. In the case |
2501 | // of tail call optimization, arguments are handled later. |
2502 | for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); |
2503 | i != e; |
2504 | ++i, ++realArgIdx) { |
2505 | CCValAssign &VA = ArgLocs[i]; |
2506 | SDValue Arg = OutVals[realArgIdx]; |
2507 | ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; |
2508 | bool isByVal = Flags.isByVal(); |
2509 | |
2510 | // Promote the value if needed. |
2511 | switch (VA.getLocInfo()) { |
2512 | default: llvm_unreachable("Unknown loc info!" ); |
2513 | case CCValAssign::Full: break; |
2514 | case CCValAssign::SExt: |
2515 | Arg = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: VA.getLocVT(), Operand: Arg); |
2516 | break; |
2517 | case CCValAssign::ZExt: |
2518 | Arg = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: dl, VT: VA.getLocVT(), Operand: Arg); |
2519 | break; |
2520 | case CCValAssign::AExt: |
2521 | Arg = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL: dl, VT: VA.getLocVT(), Operand: Arg); |
2522 | break; |
2523 | case CCValAssign::BCvt: |
2524 | Arg = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VA.getLocVT(), Operand: Arg); |
2525 | break; |
2526 | } |
2527 | |
2528 | if (isTailCall && VA.isMemLoc() && !AfterFormalArgLoads) { |
2529 | Chain = DAG.getStackArgumentTokenFactor(Chain); |
2530 | AfterFormalArgLoads = true; |
2531 | } |
2532 | |
2533 | // f16 arguments have their size extended to 4 bytes and passed as if they |
2534 | // had been copied to the LSBs of a 32-bit register. |
2535 | // For that, it's passed extended to i32 (soft ABI) or to f32 (hard ABI) |
2536 | if (VA.needsCustom() && |
2537 | (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16)) { |
2538 | Arg = MoveFromHPR(dl, DAG, LocVT: VA.getLocVT(), ValVT: VA.getValVT(), Val: Arg); |
2539 | } else { |
2540 | // f16 arguments could have been extended prior to argument lowering. |
2541 | // Mask them arguments if this is a CMSE nonsecure call. |
2542 | auto ArgVT = Outs[realArgIdx].ArgVT; |
2543 | if (isCmseNSCall && (ArgVT == MVT::f16)) { |
2544 | auto LocBits = VA.getLocVT().getSizeInBits(); |
2545 | auto MaskValue = APInt::getLowBitsSet(numBits: LocBits, loBitsSet: ArgVT.getSizeInBits()); |
2546 | SDValue Mask = |
2547 | DAG.getConstant(Val: MaskValue, DL: dl, VT: MVT::getIntegerVT(BitWidth: LocBits)); |
2548 | Arg = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::getIntegerVT(BitWidth: LocBits), Operand: Arg); |
2549 | Arg = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::getIntegerVT(BitWidth: LocBits), N1: Arg, N2: Mask); |
2550 | Arg = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VA.getLocVT(), Operand: Arg); |
2551 | } |
2552 | } |
2553 | |
2554 | // f64 and v2f64 might be passed in i32 pairs and must be split into pieces |
2555 | if (VA.needsCustom() && VA.getLocVT() == MVT::v2f64) { |
2556 | SDValue Op0 = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::f64, N1: Arg, |
2557 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
2558 | SDValue Op1 = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::f64, N1: Arg, |
2559 | N2: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32)); |
2560 | |
2561 | PassF64ArgInRegs(dl, DAG, Chain, Arg&: Op0, RegsToPass, VA, NextVA&: ArgLocs[++i], |
2562 | StackPtr, MemOpChains, IsTailCall: isTailCall, SPDiff); |
2563 | |
2564 | VA = ArgLocs[++i]; // skip ahead to next loc |
2565 | if (VA.isRegLoc()) { |
2566 | PassF64ArgInRegs(dl, DAG, Chain, Arg&: Op1, RegsToPass, VA, NextVA&: ArgLocs[++i], |
2567 | StackPtr, MemOpChains, IsTailCall: isTailCall, SPDiff); |
2568 | } else { |
2569 | assert(VA.isMemLoc()); |
2570 | SDValue DstAddr; |
2571 | MachinePointerInfo DstInfo; |
2572 | std::tie(args&: DstAddr, args&: DstInfo) = |
2573 | computeAddrForCallArg(dl, DAG, VA, StackPtr, IsTailCall: isTailCall, SPDiff); |
2574 | MemOpChains.push_back(Elt: DAG.getStore(Chain, dl, Val: Op1, Ptr: DstAddr, PtrInfo: DstInfo)); |
2575 | } |
2576 | } else if (VA.needsCustom() && VA.getLocVT() == MVT::f64) { |
2577 | PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, NextVA&: ArgLocs[++i], |
2578 | StackPtr, MemOpChains, IsTailCall: isTailCall, SPDiff); |
2579 | } else if (VA.isRegLoc()) { |
2580 | if (realArgIdx == 0 && Flags.isReturned() && !Flags.isSwiftSelf() && |
2581 | Outs[0].VT == MVT::i32) { |
2582 | assert(VA.getLocVT() == MVT::i32 && |
2583 | "unexpected calling convention register assignment" ); |
2584 | assert(!Ins.empty() && Ins[0].VT == MVT::i32 && |
2585 | "unexpected use of 'returned'" ); |
2586 | isThisReturn = true; |
2587 | } |
2588 | const TargetOptions &Options = DAG.getTarget().Options; |
2589 | if (Options.EmitCallSiteInfo) |
2590 | CSInfo.ArgRegPairs.emplace_back(Args: VA.getLocReg(), Args&: i); |
2591 | RegsToPass.push_back(Elt: std::make_pair(x: VA.getLocReg(), y&: Arg)); |
2592 | } else if (isByVal) { |
2593 | assert(VA.isMemLoc()); |
2594 | unsigned offset = 0; |
2595 | |
2596 | // True if this byval aggregate will be split between registers |
2597 | // and memory. |
2598 | unsigned ByValArgsCount = CCInfo.getInRegsParamsCount(); |
2599 | unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed(); |
2600 | |
2601 | if (CurByValIdx < ByValArgsCount) { |
2602 | |
2603 | unsigned RegBegin, RegEnd; |
2604 | CCInfo.getInRegsParamInfo(InRegsParamRecordIndex: CurByValIdx, BeginReg&: RegBegin, EndReg&: RegEnd); |
2605 | |
2606 | EVT PtrVT = |
2607 | DAG.getTargetLoweringInfo().getPointerTy(DL: DAG.getDataLayout()); |
2608 | unsigned int i, j; |
2609 | for (i = 0, j = RegBegin; j < RegEnd; i++, j++) { |
2610 | SDValue Const = DAG.getConstant(Val: 4*i, DL: dl, VT: MVT::i32); |
2611 | SDValue AddArg = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: Arg, N2: Const); |
2612 | SDValue Load = |
2613 | DAG.getLoad(VT: PtrVT, dl, Chain, Ptr: AddArg, PtrInfo: MachinePointerInfo(), |
2614 | Alignment: DAG.InferPtrAlign(Ptr: AddArg)); |
2615 | MemOpChains.push_back(Elt: Load.getValue(R: 1)); |
2616 | RegsToPass.push_back(Elt: std::make_pair(x&: j, y&: Load)); |
2617 | } |
2618 | |
2619 | // If parameter size outsides register area, "offset" value |
2620 | // helps us to calculate stack slot for remained part properly. |
2621 | offset = RegEnd - RegBegin; |
2622 | |
2623 | CCInfo.nextInRegsParam(); |
2624 | } |
2625 | |
2626 | if (Flags.getByValSize() > 4*offset) { |
2627 | auto PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
2628 | SDValue Dst; |
2629 | MachinePointerInfo DstInfo; |
2630 | std::tie(args&: Dst, args&: DstInfo) = |
2631 | computeAddrForCallArg(dl, DAG, VA, StackPtr, IsTailCall: isTailCall, SPDiff); |
2632 | SDValue SrcOffset = DAG.getIntPtrConstant(Val: 4*offset, DL: dl); |
2633 | SDValue Src = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: Arg, N2: SrcOffset); |
2634 | SDValue SizeNode = DAG.getConstant(Val: Flags.getByValSize() - 4*offset, DL: dl, |
2635 | VT: MVT::i32); |
2636 | SDValue AlignNode = |
2637 | DAG.getConstant(Val: Flags.getNonZeroByValAlign().value(), DL: dl, VT: MVT::i32); |
2638 | |
2639 | SDVTList VTs = DAG.getVTList(VT1: MVT::Other, VT2: MVT::Glue); |
2640 | SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode}; |
2641 | MemOpChains.push_back(Elt: DAG.getNode(Opcode: ARMISD::COPY_STRUCT_BYVAL, DL: dl, VTList: VTs, |
2642 | Ops)); |
2643 | } |
2644 | } else { |
2645 | assert(VA.isMemLoc()); |
2646 | SDValue DstAddr; |
2647 | MachinePointerInfo DstInfo; |
2648 | std::tie(args&: DstAddr, args&: DstInfo) = |
2649 | computeAddrForCallArg(dl, DAG, VA, StackPtr, IsTailCall: isTailCall, SPDiff); |
2650 | |
2651 | SDValue Store = DAG.getStore(Chain, dl, Val: Arg, Ptr: DstAddr, PtrInfo: DstInfo); |
2652 | MemOpChains.push_back(Elt: Store); |
2653 | } |
2654 | } |
2655 | |
2656 | if (!MemOpChains.empty()) |
2657 | Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, Ops: MemOpChains); |
2658 | |
2659 | // Build a sequence of copy-to-reg nodes chained together with token chain |
2660 | // and flag operands which copy the outgoing args into the appropriate regs. |
2661 | SDValue InGlue; |
2662 | for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { |
2663 | Chain = DAG.getCopyToReg(Chain, dl, Reg: RegsToPass[i].first, |
2664 | N: RegsToPass[i].second, Glue: InGlue); |
2665 | InGlue = Chain.getValue(R: 1); |
2666 | } |
2667 | |
2668 | // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every |
2669 | // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol |
2670 | // node so that legalize doesn't hack it. |
2671 | bool isDirect = false; |
2672 | |
2673 | const TargetMachine &TM = getTargetMachine(); |
2674 | const GlobalValue *GVal = nullptr; |
2675 | if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Val&: Callee)) |
2676 | GVal = G->getGlobal(); |
2677 | bool isStub = !TM.shouldAssumeDSOLocal(GV: GVal) && Subtarget->isTargetMachO(); |
2678 | |
2679 | bool isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass()); |
2680 | bool isLocalARMFunc = false; |
2681 | auto PtrVt = getPointerTy(DL: DAG.getDataLayout()); |
2682 | |
2683 | if (Subtarget->genLongCalls()) { |
2684 | assert((!isPositionIndependent() || Subtarget->isTargetWindows()) && |
2685 | "long-calls codegen is not position independent!" ); |
2686 | // Handle a global address or an external symbol. If it's not one of |
2687 | // those, the target's already in a register, so we don't need to do |
2688 | // anything extra. |
2689 | if (isa<GlobalAddressSDNode>(Val: Callee)) { |
2690 | if (Subtarget->genExecuteOnly()) { |
2691 | if (Subtarget->useMovt()) |
2692 | ++NumMovwMovt; |
2693 | Callee = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: PtrVt, |
2694 | Operand: DAG.getTargetGlobalAddress(GV: GVal, DL: dl, VT: PtrVt)); |
2695 | } else { |
2696 | // Create a constant pool entry for the callee address |
2697 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
2698 | ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create( |
2699 | C: GVal, ID: ARMPCLabelIndex, Kind: ARMCP::CPValue, PCAdj: 0); |
2700 | |
2701 | // Get the address of the callee into a register |
2702 | SDValue Addr = DAG.getTargetConstantPool(C: CPV, VT: PtrVt, Align: Align(4)); |
2703 | Addr = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: Addr); |
2704 | Callee = DAG.getLoad( |
2705 | VT: PtrVt, dl, Chain: DAG.getEntryNode(), Ptr: Addr, |
2706 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
2707 | } |
2708 | } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Val&: Callee)) { |
2709 | const char *Sym = S->getSymbol(); |
2710 | |
2711 | if (Subtarget->genExecuteOnly()) { |
2712 | if (Subtarget->useMovt()) |
2713 | ++NumMovwMovt; |
2714 | Callee = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: PtrVt, |
2715 | Operand: DAG.getTargetGlobalAddress(GV: GVal, DL: dl, VT: PtrVt)); |
2716 | } else { |
2717 | // Create a constant pool entry for the callee address |
2718 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
2719 | ARMConstantPoolValue *CPV = ARMConstantPoolSymbol::Create( |
2720 | C&: *DAG.getContext(), s: Sym, ID: ARMPCLabelIndex, PCAdj: 0); |
2721 | |
2722 | // Get the address of the callee into a register |
2723 | SDValue Addr = DAG.getTargetConstantPool(C: CPV, VT: PtrVt, Align: Align(4)); |
2724 | Addr = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: Addr); |
2725 | Callee = DAG.getLoad( |
2726 | VT: PtrVt, dl, Chain: DAG.getEntryNode(), Ptr: Addr, |
2727 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
2728 | } |
2729 | } |
2730 | } else if (isa<GlobalAddressSDNode>(Val: Callee)) { |
2731 | if (!PreferIndirect) { |
2732 | isDirect = true; |
2733 | bool isDef = GVal->isStrongDefinitionForLinker(); |
2734 | |
2735 | // ARM call to a local ARM function is predicable. |
2736 | isLocalARMFunc = !Subtarget->isThumb() && (isDef || !ARMInterworking); |
2737 | // tBX takes a register source operand. |
2738 | if (isStub && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { |
2739 | assert(Subtarget->isTargetMachO() && "WrapperPIC use on non-MachO?" ); |
2740 | Callee = DAG.getNode( |
2741 | Opcode: ARMISD::WrapperPIC, DL: dl, VT: PtrVt, |
2742 | Operand: DAG.getTargetGlobalAddress(GV: GVal, DL: dl, VT: PtrVt, offset: 0, TargetFlags: ARMII::MO_NONLAZY)); |
2743 | Callee = DAG.getLoad( |
2744 | VT: PtrVt, dl, Chain: DAG.getEntryNode(), Ptr: Callee, |
2745 | PtrInfo: MachinePointerInfo::getGOT(MF&: DAG.getMachineFunction()), Alignment: MaybeAlign(), |
2746 | MMOFlags: MachineMemOperand::MODereferenceable | |
2747 | MachineMemOperand::MOInvariant); |
2748 | } else if (Subtarget->isTargetCOFF()) { |
2749 | assert(Subtarget->isTargetWindows() && |
2750 | "Windows is the only supported COFF target" ); |
2751 | unsigned TargetFlags = ARMII::MO_NO_FLAG; |
2752 | if (GVal->hasDLLImportStorageClass()) |
2753 | TargetFlags = ARMII::MO_DLLIMPORT; |
2754 | else if (!TM.shouldAssumeDSOLocal(GV: GVal)) |
2755 | TargetFlags = ARMII::MO_COFFSTUB; |
2756 | Callee = DAG.getTargetGlobalAddress(GV: GVal, DL: dl, VT: PtrVt, /*offset=*/0, |
2757 | TargetFlags); |
2758 | if (TargetFlags & (ARMII::MO_DLLIMPORT | ARMII::MO_COFFSTUB)) |
2759 | Callee = |
2760 | DAG.getLoad(VT: PtrVt, dl, Chain: DAG.getEntryNode(), |
2761 | Ptr: DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: PtrVt, Operand: Callee), |
2762 | PtrInfo: MachinePointerInfo::getGOT(MF&: DAG.getMachineFunction())); |
2763 | } else { |
2764 | Callee = DAG.getTargetGlobalAddress(GV: GVal, DL: dl, VT: PtrVt, offset: 0, TargetFlags: 0); |
2765 | } |
2766 | } |
2767 | } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Val&: Callee)) { |
2768 | isDirect = true; |
2769 | // tBX takes a register source operand. |
2770 | const char *Sym = S->getSymbol(); |
2771 | if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { |
2772 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
2773 | ARMConstantPoolValue *CPV = |
2774 | ARMConstantPoolSymbol::Create(C&: *DAG.getContext(), s: Sym, |
2775 | ID: ARMPCLabelIndex, PCAdj: 4); |
2776 | SDValue CPAddr = DAG.getTargetConstantPool(C: CPV, VT: PtrVt, Align: Align(4)); |
2777 | CPAddr = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: CPAddr); |
2778 | Callee = DAG.getLoad( |
2779 | VT: PtrVt, dl, Chain: DAG.getEntryNode(), Ptr: CPAddr, |
2780 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
2781 | SDValue PICLabel = DAG.getConstant(Val: ARMPCLabelIndex, DL: dl, VT: MVT::i32); |
2782 | Callee = DAG.getNode(Opcode: ARMISD::PIC_ADD, DL: dl, VT: PtrVt, N1: Callee, N2: PICLabel); |
2783 | } else { |
2784 | Callee = DAG.getTargetExternalSymbol(Sym, VT: PtrVt, TargetFlags: 0); |
2785 | } |
2786 | } |
2787 | |
2788 | if (isCmseNSCall) { |
2789 | assert(!isARMFunc && !isDirect && |
2790 | "Cannot handle call to ARM function or direct call" ); |
2791 | if (NumBytes > 0) { |
2792 | DiagnosticInfoUnsupported Diag(DAG.getMachineFunction().getFunction(), |
2793 | "call to non-secure function would " |
2794 | "require passing arguments on stack" , |
2795 | dl.getDebugLoc()); |
2796 | DAG.getContext()->diagnose(DI: Diag); |
2797 | } |
2798 | if (isStructRet) { |
2799 | DiagnosticInfoUnsupported Diag( |
2800 | DAG.getMachineFunction().getFunction(), |
2801 | "call to non-secure function would return value through pointer" , |
2802 | dl.getDebugLoc()); |
2803 | DAG.getContext()->diagnose(DI: Diag); |
2804 | } |
2805 | } |
2806 | |
2807 | // FIXME: handle tail calls differently. |
2808 | unsigned CallOpc; |
2809 | if (Subtarget->isThumb()) { |
2810 | if (GuardWithBTI) |
2811 | CallOpc = ARMISD::t2CALL_BTI; |
2812 | else if (isCmseNSCall) |
2813 | CallOpc = ARMISD::tSECALL; |
2814 | else if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps()) |
2815 | CallOpc = ARMISD::CALL_NOLINK; |
2816 | else |
2817 | CallOpc = ARMISD::CALL; |
2818 | } else { |
2819 | if (!isDirect && !Subtarget->hasV5TOps()) |
2820 | CallOpc = ARMISD::CALL_NOLINK; |
2821 | else if (doesNotRet && isDirect && Subtarget->hasRetAddrStack() && |
2822 | // Emit regular call when code size is the priority |
2823 | !Subtarget->hasMinSize()) |
2824 | // "mov lr, pc; b _foo" to avoid confusing the RSP |
2825 | CallOpc = ARMISD::CALL_NOLINK; |
2826 | else |
2827 | CallOpc = isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL; |
2828 | } |
2829 | |
2830 | // We don't usually want to end the call-sequence here because we would tidy |
2831 | // the frame up *after* the call, however in the ABI-changing tail-call case |
2832 | // we've carefully laid out the parameters so that when sp is reset they'll be |
2833 | // in the correct location. |
2834 | if (isTailCall && !isSibCall) { |
2835 | Chain = DAG.getCALLSEQ_END(Chain, Size1: 0, Size2: 0, Glue: InGlue, DL: dl); |
2836 | InGlue = Chain.getValue(R: 1); |
2837 | } |
2838 | |
2839 | std::vector<SDValue> Ops; |
2840 | Ops.push_back(x: Chain); |
2841 | Ops.push_back(x: Callee); |
2842 | |
2843 | if (isTailCall) { |
2844 | Ops.push_back(x: DAG.getTargetConstant(Val: SPDiff, DL: dl, VT: MVT::i32)); |
2845 | } |
2846 | |
2847 | // Add argument registers to the end of the list so that they are known live |
2848 | // into the call. |
2849 | for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) |
2850 | Ops.push_back(x: DAG.getRegister(Reg: RegsToPass[i].first, |
2851 | VT: RegsToPass[i].second.getValueType())); |
2852 | |
2853 | // Add a register mask operand representing the call-preserved registers. |
2854 | const uint32_t *Mask; |
2855 | const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo(); |
2856 | if (isThisReturn) { |
2857 | // For 'this' returns, use the R0-preserving mask if applicable |
2858 | Mask = ARI->getThisReturnPreservedMask(MF, CallConv); |
2859 | if (!Mask) { |
2860 | // Set isThisReturn to false if the calling convention is not one that |
2861 | // allows 'returned' to be modeled in this way, so LowerCallResult does |
2862 | // not try to pass 'this' straight through |
2863 | isThisReturn = false; |
2864 | Mask = ARI->getCallPreservedMask(MF, CallConv); |
2865 | } |
2866 | } else |
2867 | Mask = ARI->getCallPreservedMask(MF, CallConv); |
2868 | |
2869 | assert(Mask && "Missing call preserved mask for calling convention" ); |
2870 | Ops.push_back(x: DAG.getRegisterMask(RegMask: Mask)); |
2871 | |
2872 | if (InGlue.getNode()) |
2873 | Ops.push_back(x: InGlue); |
2874 | |
2875 | SDVTList NodeTys = DAG.getVTList(VT1: MVT::Other, VT2: MVT::Glue); |
2876 | if (isTailCall) { |
2877 | MF.getFrameInfo().setHasTailCall(); |
2878 | SDValue Ret = DAG.getNode(Opcode: ARMISD::TC_RETURN, DL: dl, VTList: NodeTys, Ops); |
2879 | DAG.addNoMergeSiteInfo(Node: Ret.getNode(), NoMerge: CLI.NoMerge); |
2880 | DAG.addCallSiteInfo(Node: Ret.getNode(), CallInfo: std::move(CSInfo)); |
2881 | return Ret; |
2882 | } |
2883 | |
2884 | // Returns a chain and a flag for retval copy to use. |
2885 | Chain = DAG.getNode(Opcode: CallOpc, DL: dl, VTList: NodeTys, Ops); |
2886 | DAG.addNoMergeSiteInfo(Node: Chain.getNode(), NoMerge: CLI.NoMerge); |
2887 | InGlue = Chain.getValue(R: 1); |
2888 | DAG.addCallSiteInfo(Node: Chain.getNode(), CallInfo: std::move(CSInfo)); |
2889 | |
2890 | // If we're guaranteeing tail-calls will be honoured, the callee must |
2891 | // pop its own argument stack on return. But this call is *not* a tail call so |
2892 | // we need to undo that after it returns to restore the status-quo. |
2893 | bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt; |
2894 | uint64_t CalleePopBytes = |
2895 | canGuaranteeTCO(CC: CallConv, GuaranteeTailCalls: TailCallOpt) ? alignTo(Value: NumBytes, Align: 16) : -1ULL; |
2896 | |
2897 | Chain = DAG.getCALLSEQ_END(Chain, Size1: NumBytes, Size2: CalleePopBytes, Glue: InGlue, DL: dl); |
2898 | if (!Ins.empty()) |
2899 | InGlue = Chain.getValue(R: 1); |
2900 | |
2901 | // Handle result values, copying them out of physregs into vregs that we |
2902 | // return. |
2903 | return LowerCallResult(Chain, InGlue, CallConv, isVarArg, Ins, dl, DAG, |
2904 | InVals, isThisReturn, |
2905 | ThisVal: isThisReturn ? OutVals[0] : SDValue(), isCmseNSCall); |
2906 | } |
2907 | |
2908 | /// HandleByVal - Every parameter *after* a byval parameter is passed |
2909 | /// on the stack. Remember the next parameter register to allocate, |
2910 | /// and then confiscate the rest of the parameter registers to insure |
2911 | /// this. |
2912 | void ARMTargetLowering::HandleByVal(CCState *State, unsigned &Size, |
2913 | Align Alignment) const { |
2914 | // Byval (as with any stack) slots are always at least 4 byte aligned. |
2915 | Alignment = std::max(a: Alignment, b: Align(4)); |
2916 | |
2917 | unsigned Reg = State->AllocateReg(Regs: GPRArgRegs); |
2918 | if (!Reg) |
2919 | return; |
2920 | |
2921 | unsigned AlignInRegs = Alignment.value() / 4; |
2922 | unsigned Waste = (ARM::R4 - Reg) % AlignInRegs; |
2923 | for (unsigned i = 0; i < Waste; ++i) |
2924 | Reg = State->AllocateReg(Regs: GPRArgRegs); |
2925 | |
2926 | if (!Reg) |
2927 | return; |
2928 | |
2929 | unsigned Excess = 4 * (ARM::R4 - Reg); |
2930 | |
2931 | // Special case when NSAA != SP and parameter size greater than size of |
2932 | // all remained GPR regs. In that case we can't split parameter, we must |
2933 | // send it to stack. We also must set NCRN to R4, so waste all |
2934 | // remained registers. |
2935 | const unsigned NSAAOffset = State->getStackSize(); |
2936 | if (NSAAOffset != 0 && Size > Excess) { |
2937 | while (State->AllocateReg(Regs: GPRArgRegs)) |
2938 | ; |
2939 | return; |
2940 | } |
2941 | |
2942 | // First register for byval parameter is the first register that wasn't |
2943 | // allocated before this method call, so it would be "reg". |
2944 | // If parameter is small enough to be saved in range [reg, r4), then |
2945 | // the end (first after last) register would be reg + param-size-in-regs, |
2946 | // else parameter would be splitted between registers and stack, |
2947 | // end register would be r4 in this case. |
2948 | unsigned ByValRegBegin = Reg; |
2949 | unsigned ByValRegEnd = std::min<unsigned>(a: Reg + Size / 4, b: ARM::R4); |
2950 | State->addInRegsParamInfo(RegBegin: ByValRegBegin, RegEnd: ByValRegEnd); |
2951 | // Note, first register is allocated in the beginning of function already, |
2952 | // allocate remained amount of registers we need. |
2953 | for (unsigned i = Reg + 1; i != ByValRegEnd; ++i) |
2954 | State->AllocateReg(Regs: GPRArgRegs); |
2955 | // A byval parameter that is split between registers and memory needs its |
2956 | // size truncated here. |
2957 | // In the case where the entire structure fits in registers, we set the |
2958 | // size in memory to zero. |
2959 | Size = std::max<int>(a: Size - Excess, b: 0); |
2960 | } |
2961 | |
2962 | /// MatchingStackOffset - Return true if the given stack call argument is |
2963 | /// already available in the same position (relatively) of the caller's |
2964 | /// incoming argument stack. |
2965 | static |
2966 | bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, |
2967 | MachineFrameInfo &MFI, const MachineRegisterInfo *MRI, |
2968 | const TargetInstrInfo *TII) { |
2969 | unsigned Bytes = Arg.getValueSizeInBits() / 8; |
2970 | int FI = std::numeric_limits<int>::max(); |
2971 | if (Arg.getOpcode() == ISD::CopyFromReg) { |
2972 | Register VR = cast<RegisterSDNode>(Val: Arg.getOperand(i: 1))->getReg(); |
2973 | if (!VR.isVirtual()) |
2974 | return false; |
2975 | MachineInstr *Def = MRI->getVRegDef(Reg: VR); |
2976 | if (!Def) |
2977 | return false; |
2978 | if (!Flags.isByVal()) { |
2979 | if (!TII->isLoadFromStackSlot(MI: *Def, FrameIndex&: FI)) |
2980 | return false; |
2981 | } else { |
2982 | return false; |
2983 | } |
2984 | } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Val&: Arg)) { |
2985 | if (Flags.isByVal()) |
2986 | // ByVal argument is passed in as a pointer but it's now being |
2987 | // dereferenced. e.g. |
2988 | // define @foo(%struct.X* %A) { |
2989 | // tail call @bar(%struct.X* byval %A) |
2990 | // } |
2991 | return false; |
2992 | SDValue Ptr = Ld->getBasePtr(); |
2993 | FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Val&: Ptr); |
2994 | if (!FINode) |
2995 | return false; |
2996 | FI = FINode->getIndex(); |
2997 | } else |
2998 | return false; |
2999 | |
3000 | assert(FI != std::numeric_limits<int>::max()); |
3001 | if (!MFI.isFixedObjectIndex(ObjectIdx: FI)) |
3002 | return false; |
3003 | return Offset == MFI.getObjectOffset(ObjectIdx: FI) && Bytes == MFI.getObjectSize(ObjectIdx: FI); |
3004 | } |
3005 | |
3006 | /// IsEligibleForTailCallOptimization - Check whether the call is eligible |
3007 | /// for tail call optimization. Targets which want to do tail call |
3008 | /// optimization should implement this function. Note that this function also |
3009 | /// processes musttail calls, so when this function returns false on a valid |
3010 | /// musttail call, a fatal backend error occurs. |
3011 | bool ARMTargetLowering::IsEligibleForTailCallOptimization( |
3012 | TargetLowering::CallLoweringInfo &CLI, CCState &CCInfo, |
3013 | SmallVectorImpl<CCValAssign> &ArgLocs, const bool isIndirect) const { |
3014 | CallingConv::ID CalleeCC = CLI.CallConv; |
3015 | SDValue Callee = CLI.Callee; |
3016 | bool isVarArg = CLI.IsVarArg; |
3017 | const SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; |
3018 | const SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; |
3019 | const SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; |
3020 | const SelectionDAG &DAG = CLI.DAG; |
3021 | MachineFunction &MF = DAG.getMachineFunction(); |
3022 | const Function &CallerF = MF.getFunction(); |
3023 | CallingConv::ID CallerCC = CallerF.getCallingConv(); |
3024 | |
3025 | assert(Subtarget->supportsTailCall()); |
3026 | |
3027 | // Indirect tail calls cannot be optimized for Thumb1 if the args |
3028 | // to the call take up r0-r3. The reason is that there are no legal registers |
3029 | // left to hold the pointer to the function to be called. |
3030 | // Similarly, if the function uses return address sign and authentication, |
3031 | // r12 is needed to hold the PAC and is not available to hold the callee |
3032 | // address. |
3033 | if (Outs.size() >= 4 && |
3034 | (!isa<GlobalAddressSDNode>(Val: Callee.getNode()) || isIndirect)) { |
3035 | if (Subtarget->isThumb1Only()) |
3036 | return false; |
3037 | // Conservatively assume the function spills LR. |
3038 | if (MF.getInfo<ARMFunctionInfo>()->shouldSignReturnAddress(SpillsLR: true)) |
3039 | return false; |
3040 | } |
3041 | |
3042 | // Look for obvious safe cases to perform tail call optimization that do not |
3043 | // require ABI changes. This is what gcc calls sibcall. |
3044 | |
3045 | // Exception-handling functions need a special set of instructions to indicate |
3046 | // a return to the hardware. Tail-calling another function would probably |
3047 | // break this. |
3048 | if (CallerF.hasFnAttribute(Kind: "interrupt" )) |
3049 | return false; |
3050 | |
3051 | if (canGuaranteeTCO(CC: CalleeCC, GuaranteeTailCalls: getTargetMachine().Options.GuaranteedTailCallOpt)) |
3052 | return CalleeCC == CallerCC; |
3053 | |
3054 | // Also avoid sibcall optimization if either caller or callee uses struct |
3055 | // return semantics. |
3056 | bool isCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet(); |
3057 | bool isCallerStructRet = MF.getFunction().hasStructRetAttr(); |
3058 | if (isCalleeStructRet || isCallerStructRet) |
3059 | return false; |
3060 | |
3061 | // Externally-defined functions with weak linkage should not be |
3062 | // tail-called on ARM when the OS does not support dynamic |
3063 | // pre-emption of symbols, as the AAELF spec requires normal calls |
3064 | // to undefined weak functions to be replaced with a NOP or jump to the |
3065 | // next instruction. The behaviour of branch instructions in this |
3066 | // situation (as used for tail calls) is implementation-defined, so we |
3067 | // cannot rely on the linker replacing the tail call with a return. |
3068 | if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Val&: Callee)) { |
3069 | const GlobalValue *GV = G->getGlobal(); |
3070 | const Triple &TT = getTargetMachine().getTargetTriple(); |
3071 | if (GV->hasExternalWeakLinkage() && |
3072 | (!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO())) |
3073 | return false; |
3074 | } |
3075 | |
3076 | // Check that the call results are passed in the same way. |
3077 | LLVMContext &C = *DAG.getContext(); |
3078 | if (!CCState::resultsCompatible( |
3079 | CalleeCC: getEffectiveCallingConv(CC: CalleeCC, isVarArg), |
3080 | CallerCC: getEffectiveCallingConv(CC: CallerCC, isVarArg: CallerF.isVarArg()), MF, C, Ins, |
3081 | CalleeFn: CCAssignFnForReturn(CC: CalleeCC, isVarArg), |
3082 | CallerFn: CCAssignFnForReturn(CC: CallerCC, isVarArg: CallerF.isVarArg()))) |
3083 | return false; |
3084 | // The callee has to preserve all registers the caller needs to preserve. |
3085 | const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
3086 | const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); |
3087 | if (CalleeCC != CallerCC) { |
3088 | const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); |
3089 | if (!TRI->regmaskSubsetEqual(mask0: CallerPreserved, mask1: CalleePreserved)) |
3090 | return false; |
3091 | } |
3092 | |
3093 | // If Caller's vararg or byval argument has been split between registers and |
3094 | // stack, do not perform tail call, since part of the argument is in caller's |
3095 | // local frame. |
3096 | const ARMFunctionInfo *AFI_Caller = MF.getInfo<ARMFunctionInfo>(); |
3097 | if (AFI_Caller->getArgRegsSaveSize()) |
3098 | return false; |
3099 | |
3100 | // If the callee takes no arguments then go on to check the results of the |
3101 | // call. |
3102 | if (!Outs.empty()) { |
3103 | if (CCInfo.getStackSize()) { |
3104 | // Check if the arguments are already laid out in the right way as |
3105 | // the caller's fixed stack objects. |
3106 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
3107 | const MachineRegisterInfo *MRI = &MF.getRegInfo(); |
3108 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
3109 | for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); |
3110 | i != e; |
3111 | ++i, ++realArgIdx) { |
3112 | CCValAssign &VA = ArgLocs[i]; |
3113 | EVT RegVT = VA.getLocVT(); |
3114 | SDValue Arg = OutVals[realArgIdx]; |
3115 | ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; |
3116 | if (VA.getLocInfo() == CCValAssign::Indirect) |
3117 | return false; |
3118 | if (VA.needsCustom() && (RegVT == MVT::f64 || RegVT == MVT::v2f64)) { |
3119 | // f64 and vector types are split into multiple registers or |
3120 | // register/stack-slot combinations. The types will not match |
3121 | // the registers; give up on memory f64 refs until we figure |
3122 | // out what to do about this. |
3123 | if (!VA.isRegLoc()) |
3124 | return false; |
3125 | if (!ArgLocs[++i].isRegLoc()) |
3126 | return false; |
3127 | if (RegVT == MVT::v2f64) { |
3128 | if (!ArgLocs[++i].isRegLoc()) |
3129 | return false; |
3130 | if (!ArgLocs[++i].isRegLoc()) |
3131 | return false; |
3132 | } |
3133 | } else if (!VA.isRegLoc()) { |
3134 | if (!MatchingStackOffset(Arg, Offset: VA.getLocMemOffset(), Flags, |
3135 | MFI, MRI, TII)) |
3136 | return false; |
3137 | } |
3138 | } |
3139 | } |
3140 | |
3141 | const MachineRegisterInfo &MRI = MF.getRegInfo(); |
3142 | if (!parametersInCSRMatch(MRI, CallerPreservedMask: CallerPreserved, ArgLocs, OutVals)) |
3143 | return false; |
3144 | } |
3145 | |
3146 | return true; |
3147 | } |
3148 | |
3149 | bool |
3150 | ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv, |
3151 | MachineFunction &MF, bool isVarArg, |
3152 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
3153 | LLVMContext &Context) const { |
3154 | SmallVector<CCValAssign, 16> RVLocs; |
3155 | CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); |
3156 | return CCInfo.CheckReturn(Outs, Fn: CCAssignFnForReturn(CC: CallConv, isVarArg)); |
3157 | } |
3158 | |
3159 | static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps, |
3160 | const SDLoc &DL, SelectionDAG &DAG) { |
3161 | const MachineFunction &MF = DAG.getMachineFunction(); |
3162 | const Function &F = MF.getFunction(); |
3163 | |
3164 | StringRef IntKind = F.getFnAttribute(Kind: "interrupt" ).getValueAsString(); |
3165 | |
3166 | // See ARM ARM v7 B1.8.3. On exception entry LR is set to a possibly offset |
3167 | // version of the "preferred return address". These offsets affect the return |
3168 | // instruction if this is a return from PL1 without hypervisor extensions. |
3169 | // IRQ/FIQ: +4 "subs pc, lr, #4" |
3170 | // SWI: 0 "subs pc, lr, #0" |
3171 | // ABORT: +4 "subs pc, lr, #4" |
3172 | // UNDEF: +4/+2 "subs pc, lr, #0" |
3173 | // UNDEF varies depending on where the exception came from ARM or Thumb |
3174 | // mode. Alongside GCC, we throw our hands up in disgust and pretend it's 0. |
3175 | |
3176 | int64_t LROffset; |
3177 | if (IntKind == "" || IntKind == "IRQ" || IntKind == "FIQ" || |
3178 | IntKind == "ABORT" ) |
3179 | LROffset = 4; |
3180 | else if (IntKind == "SWI" || IntKind == "UNDEF" ) |
3181 | LROffset = 0; |
3182 | else |
3183 | report_fatal_error(reason: "Unsupported interrupt attribute. If present, value " |
3184 | "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF" ); |
3185 | |
3186 | RetOps.insert(I: RetOps.begin() + 1, |
3187 | Elt: DAG.getConstant(Val: LROffset, DL, VT: MVT::i32, isTarget: false)); |
3188 | |
3189 | return DAG.getNode(Opcode: ARMISD::INTRET_GLUE, DL, VT: MVT::Other, Ops: RetOps); |
3190 | } |
3191 | |
3192 | SDValue |
3193 | ARMTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, |
3194 | bool isVarArg, |
3195 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
3196 | const SmallVectorImpl<SDValue> &OutVals, |
3197 | const SDLoc &dl, SelectionDAG &DAG) const { |
3198 | // CCValAssign - represent the assignment of the return value to a location. |
3199 | SmallVector<CCValAssign, 16> RVLocs; |
3200 | |
3201 | // CCState - Info about the registers and stack slots. |
3202 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, |
3203 | *DAG.getContext()); |
3204 | |
3205 | // Analyze outgoing return values. |
3206 | CCInfo.AnalyzeReturn(Outs, Fn: CCAssignFnForReturn(CC: CallConv, isVarArg)); |
3207 | |
3208 | SDValue Glue; |
3209 | SmallVector<SDValue, 4> RetOps; |
3210 | RetOps.push_back(Elt: Chain); // Operand #0 = Chain (updated below) |
3211 | bool isLittleEndian = Subtarget->isLittle(); |
3212 | |
3213 | MachineFunction &MF = DAG.getMachineFunction(); |
3214 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
3215 | AFI->setReturnRegsCount(RVLocs.size()); |
3216 | |
3217 | // Report error if cmse entry function returns structure through first ptr arg. |
3218 | if (AFI->isCmseNSEntryFunction() && MF.getFunction().hasStructRetAttr()) { |
3219 | // Note: using an empty SDLoc(), as the first line of the function is a |
3220 | // better place to report than the last line. |
3221 | DiagnosticInfoUnsupported Diag( |
3222 | DAG.getMachineFunction().getFunction(), |
3223 | "secure entry function would return value through pointer" , |
3224 | SDLoc().getDebugLoc()); |
3225 | DAG.getContext()->diagnose(DI: Diag); |
3226 | } |
3227 | |
3228 | // Copy the result values into the output registers. |
3229 | for (unsigned i = 0, realRVLocIdx = 0; |
3230 | i != RVLocs.size(); |
3231 | ++i, ++realRVLocIdx) { |
3232 | CCValAssign &VA = RVLocs[i]; |
3233 | assert(VA.isRegLoc() && "Can only return in registers!" ); |
3234 | |
3235 | SDValue Arg = OutVals[realRVLocIdx]; |
3236 | bool ReturnF16 = false; |
3237 | |
3238 | if (Subtarget->hasFullFP16() && Subtarget->isTargetHardFloat()) { |
3239 | // Half-precision return values can be returned like this: |
3240 | // |
3241 | // t11 f16 = fadd ... |
3242 | // t12: i16 = bitcast t11 |
3243 | // t13: i32 = zero_extend t12 |
3244 | // t14: f32 = bitcast t13 <~~~~~~~ Arg |
3245 | // |
3246 | // to avoid code generation for bitcasts, we simply set Arg to the node |
3247 | // that produces the f16 value, t11 in this case. |
3248 | // |
3249 | if (Arg.getValueType() == MVT::f32 && Arg.getOpcode() == ISD::BITCAST) { |
3250 | SDValue ZE = Arg.getOperand(i: 0); |
3251 | if (ZE.getOpcode() == ISD::ZERO_EXTEND && ZE.getValueType() == MVT::i32) { |
3252 | SDValue BC = ZE.getOperand(i: 0); |
3253 | if (BC.getOpcode() == ISD::BITCAST && BC.getValueType() == MVT::i16) { |
3254 | Arg = BC.getOperand(i: 0); |
3255 | ReturnF16 = true; |
3256 | } |
3257 | } |
3258 | } |
3259 | } |
3260 | |
3261 | switch (VA.getLocInfo()) { |
3262 | default: llvm_unreachable("Unknown loc info!" ); |
3263 | case CCValAssign::Full: break; |
3264 | case CCValAssign::BCvt: |
3265 | if (!ReturnF16) |
3266 | Arg = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VA.getLocVT(), Operand: Arg); |
3267 | break; |
3268 | } |
3269 | |
3270 | // Mask f16 arguments if this is a CMSE nonsecure entry. |
3271 | auto RetVT = Outs[realRVLocIdx].ArgVT; |
3272 | if (AFI->isCmseNSEntryFunction() && (RetVT == MVT::f16)) { |
3273 | if (VA.needsCustom() && VA.getValVT() == MVT::f16) { |
3274 | Arg = MoveFromHPR(dl, DAG, LocVT: VA.getLocVT(), ValVT: VA.getValVT(), Val: Arg); |
3275 | } else { |
3276 | auto LocBits = VA.getLocVT().getSizeInBits(); |
3277 | auto MaskValue = APInt::getLowBitsSet(numBits: LocBits, loBitsSet: RetVT.getSizeInBits()); |
3278 | SDValue Mask = |
3279 | DAG.getConstant(Val: MaskValue, DL: dl, VT: MVT::getIntegerVT(BitWidth: LocBits)); |
3280 | Arg = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::getIntegerVT(BitWidth: LocBits), Operand: Arg); |
3281 | Arg = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::getIntegerVT(BitWidth: LocBits), N1: Arg, N2: Mask); |
3282 | Arg = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VA.getLocVT(), Operand: Arg); |
3283 | } |
3284 | } |
3285 | |
3286 | if (VA.needsCustom() && |
3287 | (VA.getLocVT() == MVT::v2f64 || VA.getLocVT() == MVT::f64)) { |
3288 | if (VA.getLocVT() == MVT::v2f64) { |
3289 | // Extract the first half and return it in two registers. |
3290 | SDValue Half = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::f64, N1: Arg, |
3291 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
3292 | SDValue HalfGPRs = DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, |
3293 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N: Half); |
3294 | |
3295 | Chain = |
3296 | DAG.getCopyToReg(Chain, dl, Reg: VA.getLocReg(), |
3297 | N: HalfGPRs.getValue(R: isLittleEndian ? 0 : 1), Glue); |
3298 | Glue = Chain.getValue(R: 1); |
3299 | RetOps.push_back(Elt: DAG.getRegister(Reg: VA.getLocReg(), VT: VA.getLocVT())); |
3300 | VA = RVLocs[++i]; // skip ahead to next loc |
3301 | Chain = |
3302 | DAG.getCopyToReg(Chain, dl, Reg: VA.getLocReg(), |
3303 | N: HalfGPRs.getValue(R: isLittleEndian ? 1 : 0), Glue); |
3304 | Glue = Chain.getValue(R: 1); |
3305 | RetOps.push_back(Elt: DAG.getRegister(Reg: VA.getLocReg(), VT: VA.getLocVT())); |
3306 | VA = RVLocs[++i]; // skip ahead to next loc |
3307 | |
3308 | // Extract the 2nd half and fall through to handle it as an f64 value. |
3309 | Arg = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::f64, N1: Arg, |
3310 | N2: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32)); |
3311 | } |
3312 | // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is |
3313 | // available. |
3314 | SDValue fmrrd = DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, |
3315 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N: Arg); |
3316 | Chain = DAG.getCopyToReg(Chain, dl, Reg: VA.getLocReg(), |
3317 | N: fmrrd.getValue(R: isLittleEndian ? 0 : 1), Glue); |
3318 | Glue = Chain.getValue(R: 1); |
3319 | RetOps.push_back(Elt: DAG.getRegister(Reg: VA.getLocReg(), VT: VA.getLocVT())); |
3320 | VA = RVLocs[++i]; // skip ahead to next loc |
3321 | Chain = DAG.getCopyToReg(Chain, dl, Reg: VA.getLocReg(), |
3322 | N: fmrrd.getValue(R: isLittleEndian ? 1 : 0), Glue); |
3323 | } else |
3324 | Chain = DAG.getCopyToReg(Chain, dl, Reg: VA.getLocReg(), N: Arg, Glue); |
3325 | |
3326 | // Guarantee that all emitted copies are |
3327 | // stuck together, avoiding something bad. |
3328 | Glue = Chain.getValue(R: 1); |
3329 | RetOps.push_back(Elt: DAG.getRegister( |
3330 | Reg: VA.getLocReg(), VT: ReturnF16 ? Arg.getValueType() : VA.getLocVT())); |
3331 | } |
3332 | const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
3333 | const MCPhysReg *I = |
3334 | TRI->getCalleeSavedRegsViaCopy(MF: &DAG.getMachineFunction()); |
3335 | if (I) { |
3336 | for (; *I; ++I) { |
3337 | if (ARM::GPRRegClass.contains(Reg: *I)) |
3338 | RetOps.push_back(Elt: DAG.getRegister(Reg: *I, VT: MVT::i32)); |
3339 | else if (ARM::DPRRegClass.contains(Reg: *I)) |
3340 | RetOps.push_back(Elt: DAG.getRegister(Reg: *I, VT: MVT::getFloatingPointVT(BitWidth: 64))); |
3341 | else |
3342 | llvm_unreachable("Unexpected register class in CSRsViaCopy!" ); |
3343 | } |
3344 | } |
3345 | |
3346 | // Update chain and glue. |
3347 | RetOps[0] = Chain; |
3348 | if (Glue.getNode()) |
3349 | RetOps.push_back(Elt: Glue); |
3350 | |
3351 | // CPUs which aren't M-class use a special sequence to return from |
3352 | // exceptions (roughly, any instruction setting pc and cpsr simultaneously, |
3353 | // though we use "subs pc, lr, #N"). |
3354 | // |
3355 | // M-class CPUs actually use a normal return sequence with a special |
3356 | // (hardware-provided) value in LR, so the normal code path works. |
3357 | if (DAG.getMachineFunction().getFunction().hasFnAttribute(Kind: "interrupt" ) && |
3358 | !Subtarget->isMClass()) { |
3359 | if (Subtarget->isThumb1Only()) |
3360 | report_fatal_error(reason: "interrupt attribute is not supported in Thumb1" ); |
3361 | return LowerInterruptReturn(RetOps, DL: dl, DAG); |
3362 | } |
3363 | |
3364 | ARMISD::NodeType RetNode = AFI->isCmseNSEntryFunction() ? ARMISD::SERET_GLUE : |
3365 | ARMISD::RET_GLUE; |
3366 | return DAG.getNode(Opcode: RetNode, DL: dl, VT: MVT::Other, Ops: RetOps); |
3367 | } |
3368 | |
3369 | bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { |
3370 | if (N->getNumValues() != 1) |
3371 | return false; |
3372 | if (!N->hasNUsesOfValue(NUses: 1, Value: 0)) |
3373 | return false; |
3374 | |
3375 | SDValue TCChain = Chain; |
3376 | SDNode *Copy = *N->use_begin(); |
3377 | if (Copy->getOpcode() == ISD::CopyToReg) { |
3378 | // If the copy has a glue operand, we conservatively assume it isn't safe to |
3379 | // perform a tail call. |
3380 | if (Copy->getOperand(Num: Copy->getNumOperands()-1).getValueType() == MVT::Glue) |
3381 | return false; |
3382 | TCChain = Copy->getOperand(Num: 0); |
3383 | } else if (Copy->getOpcode() == ARMISD::VMOVRRD) { |
3384 | SDNode *VMov = Copy; |
3385 | // f64 returned in a pair of GPRs. |
3386 | SmallPtrSet<SDNode*, 2> Copies; |
3387 | for (SDNode *U : VMov->uses()) { |
3388 | if (U->getOpcode() != ISD::CopyToReg) |
3389 | return false; |
3390 | Copies.insert(Ptr: U); |
3391 | } |
3392 | if (Copies.size() > 2) |
3393 | return false; |
3394 | |
3395 | for (SDNode *U : VMov->uses()) { |
3396 | SDValue UseChain = U->getOperand(Num: 0); |
3397 | if (Copies.count(Ptr: UseChain.getNode())) |
3398 | // Second CopyToReg |
3399 | Copy = U; |
3400 | else { |
3401 | // We are at the top of this chain. |
3402 | // If the copy has a glue operand, we conservatively assume it |
3403 | // isn't safe to perform a tail call. |
3404 | if (U->getOperand(Num: U->getNumOperands() - 1).getValueType() == MVT::Glue) |
3405 | return false; |
3406 | // First CopyToReg |
3407 | TCChain = UseChain; |
3408 | } |
3409 | } |
3410 | } else if (Copy->getOpcode() == ISD::BITCAST) { |
3411 | // f32 returned in a single GPR. |
3412 | if (!Copy->hasOneUse()) |
3413 | return false; |
3414 | Copy = *Copy->use_begin(); |
3415 | if (Copy->getOpcode() != ISD::CopyToReg || !Copy->hasNUsesOfValue(NUses: 1, Value: 0)) |
3416 | return false; |
3417 | // If the copy has a glue operand, we conservatively assume it isn't safe to |
3418 | // perform a tail call. |
3419 | if (Copy->getOperand(Num: Copy->getNumOperands()-1).getValueType() == MVT::Glue) |
3420 | return false; |
3421 | TCChain = Copy->getOperand(Num: 0); |
3422 | } else { |
3423 | return false; |
3424 | } |
3425 | |
3426 | bool HasRet = false; |
3427 | for (const SDNode *U : Copy->uses()) { |
3428 | if (U->getOpcode() != ARMISD::RET_GLUE && |
3429 | U->getOpcode() != ARMISD::INTRET_GLUE) |
3430 | return false; |
3431 | HasRet = true; |
3432 | } |
3433 | |
3434 | if (!HasRet) |
3435 | return false; |
3436 | |
3437 | Chain = TCChain; |
3438 | return true; |
3439 | } |
3440 | |
3441 | bool ARMTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { |
3442 | if (!Subtarget->supportsTailCall()) |
3443 | return false; |
3444 | |
3445 | if (!CI->isTailCall()) |
3446 | return false; |
3447 | |
3448 | return true; |
3449 | } |
3450 | |
3451 | // Trying to write a 64 bit value so need to split into two 32 bit values first, |
3452 | // and pass the lower and high parts through. |
3453 | static SDValue LowerWRITE_REGISTER(SDValue Op, SelectionDAG &DAG) { |
3454 | SDLoc DL(Op); |
3455 | SDValue WriteValue = Op->getOperand(Num: 2); |
3456 | |
3457 | // This function is only supposed to be called for i64 type argument. |
3458 | assert(WriteValue.getValueType() == MVT::i64 |
3459 | && "LowerWRITE_REGISTER called for non-i64 type argument." ); |
3460 | |
3461 | SDValue Lo, Hi; |
3462 | std::tie(args&: Lo, args&: Hi) = DAG.SplitScalar(N: WriteValue, DL, LoVT: MVT::i32, HiVT: MVT::i32); |
3463 | SDValue Ops[] = { Op->getOperand(Num: 0), Op->getOperand(Num: 1), Lo, Hi }; |
3464 | return DAG.getNode(Opcode: ISD::WRITE_REGISTER, DL, VT: MVT::Other, Ops); |
3465 | } |
3466 | |
3467 | // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as |
3468 | // their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is |
3469 | // one of the above mentioned nodes. It has to be wrapped because otherwise |
3470 | // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only |
3471 | // be used to form addressing mode. These wrapped nodes will be selected |
3472 | // into MOVi. |
3473 | SDValue ARMTargetLowering::LowerConstantPool(SDValue Op, |
3474 | SelectionDAG &DAG) const { |
3475 | EVT PtrVT = Op.getValueType(); |
3476 | // FIXME there is no actual debug info here |
3477 | SDLoc dl(Op); |
3478 | ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Val&: Op); |
3479 | SDValue Res; |
3480 | |
3481 | // When generating execute-only code Constant Pools must be promoted to the |
3482 | // global data section. It's a bit ugly that we can't share them across basic |
3483 | // blocks, but this way we guarantee that execute-only behaves correct with |
3484 | // position-independent addressing modes. |
3485 | if (Subtarget->genExecuteOnly()) { |
3486 | auto AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); |
3487 | auto T = const_cast<Type*>(CP->getType()); |
3488 | auto C = const_cast<Constant*>(CP->getConstVal()); |
3489 | auto M = const_cast<Module*>(DAG.getMachineFunction(). |
3490 | getFunction().getParent()); |
3491 | auto GV = new GlobalVariable( |
3492 | *M, T, /*isConstant=*/true, GlobalVariable::InternalLinkage, C, |
3493 | Twine(DAG.getDataLayout().getPrivateGlobalPrefix()) + "CP" + |
3494 | Twine(DAG.getMachineFunction().getFunctionNumber()) + "_" + |
3495 | Twine(AFI->createPICLabelUId()) |
3496 | ); |
3497 | SDValue GA = DAG.getTargetGlobalAddress(GV: dyn_cast<GlobalValue>(Val: GV), |
3498 | DL: dl, VT: PtrVT); |
3499 | return LowerGlobalAddress(Op: GA, DAG); |
3500 | } |
3501 | |
3502 | // The 16-bit ADR instruction can only encode offsets that are multiples of 4, |
3503 | // so we need to align to at least 4 bytes when we don't have 32-bit ADR. |
3504 | Align CPAlign = CP->getAlign(); |
3505 | if (Subtarget->isThumb1Only()) |
3506 | CPAlign = std::max(a: CPAlign, b: Align(4)); |
3507 | if (CP->isMachineConstantPoolEntry()) |
3508 | Res = |
3509 | DAG.getTargetConstantPool(C: CP->getMachineCPVal(), VT: PtrVT, Align: CPAlign); |
3510 | else |
3511 | Res = DAG.getTargetConstantPool(C: CP->getConstVal(), VT: PtrVT, Align: CPAlign); |
3512 | return DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: Res); |
3513 | } |
3514 | |
3515 | unsigned ARMTargetLowering::getJumpTableEncoding() const { |
3516 | // If we don't have a 32-bit pc-relative branch instruction then the jump |
3517 | // table consists of block addresses. Usually this is inline, but for |
3518 | // execute-only it must be placed out-of-line. |
3519 | if (Subtarget->genExecuteOnly() && !Subtarget->hasV8MBaselineOps()) |
3520 | return MachineJumpTableInfo::EK_BlockAddress; |
3521 | return MachineJumpTableInfo::EK_Inline; |
3522 | } |
3523 | |
3524 | SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, |
3525 | SelectionDAG &DAG) const { |
3526 | MachineFunction &MF = DAG.getMachineFunction(); |
3527 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
3528 | unsigned ARMPCLabelIndex = 0; |
3529 | SDLoc DL(Op); |
3530 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
3531 | const BlockAddress *BA = cast<BlockAddressSDNode>(Val&: Op)->getBlockAddress(); |
3532 | SDValue CPAddr; |
3533 | bool IsPositionIndependent = isPositionIndependent() || Subtarget->isROPI(); |
3534 | if (!IsPositionIndependent) { |
3535 | CPAddr = DAG.getTargetConstantPool(C: BA, VT: PtrVT, Align: Align(4)); |
3536 | } else { |
3537 | unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; |
3538 | ARMPCLabelIndex = AFI->createPICLabelUId(); |
3539 | ARMConstantPoolValue *CPV = |
3540 | ARMConstantPoolConstant::Create(C: BA, ID: ARMPCLabelIndex, |
3541 | Kind: ARMCP::CPBlockAddress, PCAdj); |
3542 | CPAddr = DAG.getTargetConstantPool(C: CPV, VT: PtrVT, Align: Align(4)); |
3543 | } |
3544 | CPAddr = DAG.getNode(Opcode: ARMISD::Wrapper, DL, VT: PtrVT, Operand: CPAddr); |
3545 | SDValue Result = DAG.getLoad( |
3546 | VT: PtrVT, dl: DL, Chain: DAG.getEntryNode(), Ptr: CPAddr, |
3547 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
3548 | if (!IsPositionIndependent) |
3549 | return Result; |
3550 | SDValue PICLabel = DAG.getConstant(Val: ARMPCLabelIndex, DL, VT: MVT::i32); |
3551 | return DAG.getNode(Opcode: ARMISD::PIC_ADD, DL, VT: PtrVT, N1: Result, N2: PICLabel); |
3552 | } |
3553 | |
3554 | /// Convert a TLS address reference into the correct sequence of loads |
3555 | /// and calls to compute the variable's address for Darwin, and return an |
3556 | /// SDValue containing the final node. |
3557 | |
3558 | /// Darwin only has one TLS scheme which must be capable of dealing with the |
3559 | /// fully general situation, in the worst case. This means: |
3560 | /// + "extern __thread" declaration. |
3561 | /// + Defined in a possibly unknown dynamic library. |
3562 | /// |
3563 | /// The general system is that each __thread variable has a [3 x i32] descriptor |
3564 | /// which contains information used by the runtime to calculate the address. The |
3565 | /// only part of this the compiler needs to know about is the first word, which |
3566 | /// contains a function pointer that must be called with the address of the |
3567 | /// entire descriptor in "r0". |
3568 | /// |
3569 | /// Since this descriptor may be in a different unit, in general access must |
3570 | /// proceed along the usual ARM rules. A common sequence to produce is: |
3571 | /// |
3572 | /// movw rT1, :lower16:_var$non_lazy_ptr |
3573 | /// movt rT1, :upper16:_var$non_lazy_ptr |
3574 | /// ldr r0, [rT1] |
3575 | /// ldr rT2, [r0] |
3576 | /// blx rT2 |
3577 | /// [...address now in r0...] |
3578 | SDValue |
3579 | ARMTargetLowering::LowerGlobalTLSAddressDarwin(SDValue Op, |
3580 | SelectionDAG &DAG) const { |
3581 | assert(Subtarget->isTargetDarwin() && |
3582 | "This function expects a Darwin target" ); |
3583 | SDLoc DL(Op); |
3584 | |
3585 | // First step is to get the address of the actua global symbol. This is where |
3586 | // the TLS descriptor lives. |
3587 | SDValue DescAddr = LowerGlobalAddressDarwin(Op, DAG); |
3588 | |
3589 | // The first entry in the descriptor is a function pointer that we must call |
3590 | // to obtain the address of the variable. |
3591 | SDValue Chain = DAG.getEntryNode(); |
3592 | SDValue FuncTLVGet = DAG.getLoad( |
3593 | VT: MVT::i32, dl: DL, Chain, Ptr: DescAddr, |
3594 | PtrInfo: MachinePointerInfo::getGOT(MF&: DAG.getMachineFunction()), Alignment: Align(4), |
3595 | MMOFlags: MachineMemOperand::MONonTemporal | MachineMemOperand::MODereferenceable | |
3596 | MachineMemOperand::MOInvariant); |
3597 | Chain = FuncTLVGet.getValue(R: 1); |
3598 | |
3599 | MachineFunction &F = DAG.getMachineFunction(); |
3600 | MachineFrameInfo &MFI = F.getFrameInfo(); |
3601 | MFI.setAdjustsStack(true); |
3602 | |
3603 | // TLS calls preserve all registers except those that absolutely must be |
3604 | // trashed: R0 (it takes an argument), LR (it's a call) and CPSR (let's not be |
3605 | // silly). |
3606 | auto TRI = |
3607 | getTargetMachine().getSubtargetImpl(F.getFunction())->getRegisterInfo(); |
3608 | auto ARI = static_cast<const ARMRegisterInfo *>(TRI); |
3609 | const uint32_t *Mask = ARI->getTLSCallPreservedMask(MF: DAG.getMachineFunction()); |
3610 | |
3611 | // Finally, we can make the call. This is just a degenerate version of a |
3612 | // normal AArch64 call node: r0 takes the address of the descriptor, and |
3613 | // returns the address of the variable in this thread. |
3614 | Chain = DAG.getCopyToReg(Chain, dl: DL, Reg: ARM::R0, N: DescAddr, Glue: SDValue()); |
3615 | Chain = |
3616 | DAG.getNode(Opcode: ARMISD::CALL, DL, VTList: DAG.getVTList(VT1: MVT::Other, VT2: MVT::Glue), |
3617 | N1: Chain, N2: FuncTLVGet, N3: DAG.getRegister(Reg: ARM::R0, VT: MVT::i32), |
3618 | N4: DAG.getRegisterMask(RegMask: Mask), N5: Chain.getValue(R: 1)); |
3619 | return DAG.getCopyFromReg(Chain, dl: DL, Reg: ARM::R0, VT: MVT::i32, Glue: Chain.getValue(R: 1)); |
3620 | } |
3621 | |
3622 | SDValue |
3623 | ARMTargetLowering::LowerGlobalTLSAddressWindows(SDValue Op, |
3624 | SelectionDAG &DAG) const { |
3625 | assert(Subtarget->isTargetWindows() && "Windows specific TLS lowering" ); |
3626 | |
3627 | SDValue Chain = DAG.getEntryNode(); |
3628 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
3629 | SDLoc DL(Op); |
3630 | |
3631 | // Load the current TEB (thread environment block) |
3632 | SDValue Ops[] = {Chain, |
3633 | DAG.getTargetConstant(Val: Intrinsic::arm_mrc, DL, VT: MVT::i32), |
3634 | DAG.getTargetConstant(Val: 15, DL, VT: MVT::i32), |
3635 | DAG.getTargetConstant(Val: 0, DL, VT: MVT::i32), |
3636 | DAG.getTargetConstant(Val: 13, DL, VT: MVT::i32), |
3637 | DAG.getTargetConstant(Val: 0, DL, VT: MVT::i32), |
3638 | DAG.getTargetConstant(Val: 2, DL, VT: MVT::i32)}; |
3639 | SDValue CurrentTEB = DAG.getNode(Opcode: ISD::INTRINSIC_W_CHAIN, DL, |
3640 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::Other), Ops); |
3641 | |
3642 | SDValue TEB = CurrentTEB.getValue(R: 0); |
3643 | Chain = CurrentTEB.getValue(R: 1); |
3644 | |
3645 | // Load the ThreadLocalStoragePointer from the TEB |
3646 | // A pointer to the TLS array is located at offset 0x2c from the TEB. |
3647 | SDValue TLSArray = |
3648 | DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT, N1: TEB, N2: DAG.getIntPtrConstant(Val: 0x2c, DL)); |
3649 | TLSArray = DAG.getLoad(VT: PtrVT, dl: DL, Chain, Ptr: TLSArray, PtrInfo: MachinePointerInfo()); |
3650 | |
3651 | // The pointer to the thread's TLS data area is at the TLS Index scaled by 4 |
3652 | // offset into the TLSArray. |
3653 | |
3654 | // Load the TLS index from the C runtime |
3655 | SDValue TLSIndex = |
3656 | DAG.getTargetExternalSymbol(Sym: "_tls_index" , VT: PtrVT, TargetFlags: ARMII::MO_NO_FLAG); |
3657 | TLSIndex = DAG.getNode(Opcode: ARMISD::Wrapper, DL, VT: PtrVT, Operand: TLSIndex); |
3658 | TLSIndex = DAG.getLoad(VT: PtrVT, dl: DL, Chain, Ptr: TLSIndex, PtrInfo: MachinePointerInfo()); |
3659 | |
3660 | SDValue Slot = DAG.getNode(Opcode: ISD::SHL, DL, VT: PtrVT, N1: TLSIndex, |
3661 | N2: DAG.getConstant(Val: 2, DL, VT: MVT::i32)); |
3662 | SDValue TLS = DAG.getLoad(VT: PtrVT, dl: DL, Chain, |
3663 | Ptr: DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT, N1: TLSArray, N2: Slot), |
3664 | PtrInfo: MachinePointerInfo()); |
3665 | |
3666 | // Get the offset of the start of the .tls section (section base) |
3667 | const auto *GA = cast<GlobalAddressSDNode>(Val&: Op); |
3668 | auto *CPV = ARMConstantPoolConstant::Create(GV: GA->getGlobal(), Modifier: ARMCP::SECREL); |
3669 | SDValue Offset = DAG.getLoad( |
3670 | VT: PtrVT, dl: DL, Chain, |
3671 | Ptr: DAG.getNode(Opcode: ARMISD::Wrapper, DL, VT: MVT::i32, |
3672 | Operand: DAG.getTargetConstantPool(C: CPV, VT: PtrVT, Align: Align(4))), |
3673 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
3674 | |
3675 | return DAG.getNode(Opcode: ISD::ADD, DL, VT: PtrVT, N1: TLS, N2: Offset); |
3676 | } |
3677 | |
3678 | // Lower ISD::GlobalTLSAddress using the "general dynamic" model |
3679 | SDValue |
3680 | ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, |
3681 | SelectionDAG &DAG) const { |
3682 | SDLoc dl(GA); |
3683 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
3684 | unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; |
3685 | MachineFunction &MF = DAG.getMachineFunction(); |
3686 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
3687 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
3688 | ARMConstantPoolValue *CPV = |
3689 | ARMConstantPoolConstant::Create(C: GA->getGlobal(), ID: ARMPCLabelIndex, |
3690 | Kind: ARMCP::CPValue, PCAdj, Modifier: ARMCP::TLSGD, AddCurrentAddress: true); |
3691 | SDValue Argument = DAG.getTargetConstantPool(C: CPV, VT: PtrVT, Align: Align(4)); |
3692 | Argument = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: Argument); |
3693 | Argument = DAG.getLoad( |
3694 | VT: PtrVT, dl, Chain: DAG.getEntryNode(), Ptr: Argument, |
3695 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
3696 | SDValue Chain = Argument.getValue(R: 1); |
3697 | |
3698 | SDValue PICLabel = DAG.getConstant(Val: ARMPCLabelIndex, DL: dl, VT: MVT::i32); |
3699 | Argument = DAG.getNode(Opcode: ARMISD::PIC_ADD, DL: dl, VT: PtrVT, N1: Argument, N2: PICLabel); |
3700 | |
3701 | // call __tls_get_addr. |
3702 | ArgListTy Args; |
3703 | ArgListEntry Entry; |
3704 | Entry.Node = Argument; |
3705 | Entry.Ty = (Type *) Type::getInt32Ty(C&: *DAG.getContext()); |
3706 | Args.push_back(x: Entry); |
3707 | |
3708 | // FIXME: is there useful debug info available here? |
3709 | TargetLowering::CallLoweringInfo CLI(DAG); |
3710 | CLI.setDebugLoc(dl).setChain(Chain).setLibCallee( |
3711 | CC: CallingConv::C, ResultType: Type::getInt32Ty(C&: *DAG.getContext()), |
3712 | Target: DAG.getExternalSymbol(Sym: "__tls_get_addr" , VT: PtrVT), ArgsList: std::move(Args)); |
3713 | |
3714 | std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); |
3715 | return CallResult.first; |
3716 | } |
3717 | |
3718 | // Lower ISD::GlobalTLSAddress using the "initial exec" or |
3719 | // "local exec" model. |
3720 | SDValue |
3721 | ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, |
3722 | SelectionDAG &DAG, |
3723 | TLSModel::Model model) const { |
3724 | const GlobalValue *GV = GA->getGlobal(); |
3725 | SDLoc dl(GA); |
3726 | SDValue Offset; |
3727 | SDValue Chain = DAG.getEntryNode(); |
3728 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
3729 | // Get the Thread Pointer |
3730 | SDValue ThreadPointer = DAG.getNode(Opcode: ARMISD::THREAD_POINTER, DL: dl, VT: PtrVT); |
3731 | |
3732 | if (model == TLSModel::InitialExec) { |
3733 | MachineFunction &MF = DAG.getMachineFunction(); |
3734 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
3735 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
3736 | // Initial exec model. |
3737 | unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; |
3738 | ARMConstantPoolValue *CPV = |
3739 | ARMConstantPoolConstant::Create(C: GA->getGlobal(), ID: ARMPCLabelIndex, |
3740 | Kind: ARMCP::CPValue, PCAdj, Modifier: ARMCP::GOTTPOFF, |
3741 | AddCurrentAddress: true); |
3742 | Offset = DAG.getTargetConstantPool(C: CPV, VT: PtrVT, Align: Align(4)); |
3743 | Offset = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: Offset); |
3744 | Offset = DAG.getLoad( |
3745 | VT: PtrVT, dl, Chain, Ptr: Offset, |
3746 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
3747 | Chain = Offset.getValue(R: 1); |
3748 | |
3749 | SDValue PICLabel = DAG.getConstant(Val: ARMPCLabelIndex, DL: dl, VT: MVT::i32); |
3750 | Offset = DAG.getNode(Opcode: ARMISD::PIC_ADD, DL: dl, VT: PtrVT, N1: Offset, N2: PICLabel); |
3751 | |
3752 | Offset = DAG.getLoad( |
3753 | VT: PtrVT, dl, Chain, Ptr: Offset, |
3754 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
3755 | } else { |
3756 | // local exec model |
3757 | assert(model == TLSModel::LocalExec); |
3758 | ARMConstantPoolValue *CPV = |
3759 | ARMConstantPoolConstant::Create(GV, Modifier: ARMCP::TPOFF); |
3760 | Offset = DAG.getTargetConstantPool(C: CPV, VT: PtrVT, Align: Align(4)); |
3761 | Offset = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: Offset); |
3762 | Offset = DAG.getLoad( |
3763 | VT: PtrVT, dl, Chain, Ptr: Offset, |
3764 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
3765 | } |
3766 | |
3767 | // The address of the thread local variable is the add of the thread |
3768 | // pointer with the offset of the variable. |
3769 | return DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: ThreadPointer, N2: Offset); |
3770 | } |
3771 | |
3772 | SDValue |
3773 | ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { |
3774 | GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Val&: Op); |
3775 | if (DAG.getTarget().useEmulatedTLS()) |
3776 | return LowerToTLSEmulatedModel(GA, DAG); |
3777 | |
3778 | if (Subtarget->isTargetDarwin()) |
3779 | return LowerGlobalTLSAddressDarwin(Op, DAG); |
3780 | |
3781 | if (Subtarget->isTargetWindows()) |
3782 | return LowerGlobalTLSAddressWindows(Op, DAG); |
3783 | |
3784 | // TODO: implement the "local dynamic" model |
3785 | assert(Subtarget->isTargetELF() && "Only ELF implemented here" ); |
3786 | TLSModel::Model model = getTargetMachine().getTLSModel(GV: GA->getGlobal()); |
3787 | |
3788 | switch (model) { |
3789 | case TLSModel::GeneralDynamic: |
3790 | case TLSModel::LocalDynamic: |
3791 | return LowerToTLSGeneralDynamicModel(GA, DAG); |
3792 | case TLSModel::InitialExec: |
3793 | case TLSModel::LocalExec: |
3794 | return LowerToTLSExecModels(GA, DAG, model); |
3795 | } |
3796 | llvm_unreachable("bogus TLS model" ); |
3797 | } |
3798 | |
3799 | /// Return true if all users of V are within function F, looking through |
3800 | /// ConstantExprs. |
3801 | static bool allUsersAreInFunction(const Value *V, const Function *F) { |
3802 | SmallVector<const User*,4> Worklist(V->users()); |
3803 | while (!Worklist.empty()) { |
3804 | auto *U = Worklist.pop_back_val(); |
3805 | if (isa<ConstantExpr>(Val: U)) { |
3806 | append_range(C&: Worklist, R: U->users()); |
3807 | continue; |
3808 | } |
3809 | |
3810 | auto *I = dyn_cast<Instruction>(Val: U); |
3811 | if (!I || I->getParent()->getParent() != F) |
3812 | return false; |
3813 | } |
3814 | return true; |
3815 | } |
3816 | |
3817 | static SDValue promoteToConstantPool(const ARMTargetLowering *TLI, |
3818 | const GlobalValue *GV, SelectionDAG &DAG, |
3819 | EVT PtrVT, const SDLoc &dl) { |
3820 | // If we're creating a pool entry for a constant global with unnamed address, |
3821 | // and the global is small enough, we can emit it inline into the constant pool |
3822 | // to save ourselves an indirection. |
3823 | // |
3824 | // This is a win if the constant is only used in one function (so it doesn't |
3825 | // need to be duplicated) or duplicating the constant wouldn't increase code |
3826 | // size (implying the constant is no larger than 4 bytes). |
3827 | const Function &F = DAG.getMachineFunction().getFunction(); |
3828 | |
3829 | // We rely on this decision to inline being idemopotent and unrelated to the |
3830 | // use-site. We know that if we inline a variable at one use site, we'll |
3831 | // inline it elsewhere too (and reuse the constant pool entry). Fast-isel |
3832 | // doesn't know about this optimization, so bail out if it's enabled else |
3833 | // we could decide to inline here (and thus never emit the GV) but require |
3834 | // the GV from fast-isel generated code. |
3835 | if (!EnableConstpoolPromotion || |
3836 | DAG.getMachineFunction().getTarget().Options.EnableFastISel) |
3837 | return SDValue(); |
3838 | |
3839 | auto *GVar = dyn_cast<GlobalVariable>(Val: GV); |
3840 | if (!GVar || !GVar->hasInitializer() || |
3841 | !GVar->isConstant() || !GVar->hasGlobalUnnamedAddr() || |
3842 | !GVar->hasLocalLinkage()) |
3843 | return SDValue(); |
3844 | |
3845 | // If we inline a value that contains relocations, we move the relocations |
3846 | // from .data to .text. This is not allowed in position-independent code. |
3847 | auto *Init = GVar->getInitializer(); |
3848 | if ((TLI->isPositionIndependent() || TLI->getSubtarget()->isROPI()) && |
3849 | Init->needsDynamicRelocation()) |
3850 | return SDValue(); |
3851 | |
3852 | // The constant islands pass can only really deal with alignment requests |
3853 | // <= 4 bytes and cannot pad constants itself. Therefore we cannot promote |
3854 | // any type wanting greater alignment requirements than 4 bytes. We also |
3855 | // can only promote constants that are multiples of 4 bytes in size or |
3856 | // are paddable to a multiple of 4. Currently we only try and pad constants |
3857 | // that are strings for simplicity. |
3858 | auto *CDAInit = dyn_cast<ConstantDataArray>(Val: Init); |
3859 | unsigned Size = DAG.getDataLayout().getTypeAllocSize(Ty: Init->getType()); |
3860 | Align PrefAlign = DAG.getDataLayout().getPreferredAlign(GV: GVar); |
3861 | unsigned RequiredPadding = 4 - (Size % 4); |
3862 | bool PaddingPossible = |
3863 | RequiredPadding == 4 || (CDAInit && CDAInit->isString()); |
3864 | if (!PaddingPossible || PrefAlign > 4 || Size > ConstpoolPromotionMaxSize || |
3865 | Size == 0) |
3866 | return SDValue(); |
3867 | |
3868 | unsigned PaddedSize = Size + ((RequiredPadding == 4) ? 0 : RequiredPadding); |
3869 | MachineFunction &MF = DAG.getMachineFunction(); |
3870 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
3871 | |
3872 | // We can't bloat the constant pool too much, else the ConstantIslands pass |
3873 | // may fail to converge. If we haven't promoted this global yet (it may have |
3874 | // multiple uses), and promoting it would increase the constant pool size (Sz |
3875 | // > 4), ensure we have space to do so up to MaxTotal. |
3876 | if (!AFI->getGlobalsPromotedToConstantPool().count(Ptr: GVar) && Size > 4) |
3877 | if (AFI->getPromotedConstpoolIncrease() + PaddedSize - 4 >= |
3878 | ConstpoolPromotionMaxTotal) |
3879 | return SDValue(); |
3880 | |
3881 | // This is only valid if all users are in a single function; we can't clone |
3882 | // the constant in general. The LLVM IR unnamed_addr allows merging |
3883 | // constants, but not cloning them. |
3884 | // |
3885 | // We could potentially allow cloning if we could prove all uses of the |
3886 | // constant in the current function don't care about the address, like |
3887 | // printf format strings. But that isn't implemented for now. |
3888 | if (!allUsersAreInFunction(V: GVar, F: &F)) |
3889 | return SDValue(); |
3890 | |
3891 | // We're going to inline this global. Pad it out if needed. |
3892 | if (RequiredPadding != 4) { |
3893 | StringRef S = CDAInit->getAsString(); |
3894 | |
3895 | SmallVector<uint8_t,16> V(S.size()); |
3896 | std::copy(S.bytes_begin(), S.bytes_end(), V.begin()); |
3897 | while (RequiredPadding--) |
3898 | V.push_back(Elt: 0); |
3899 | Init = ConstantDataArray::get(Context&: *DAG.getContext(), Elts&: V); |
3900 | } |
3901 | |
3902 | auto CPVal = ARMConstantPoolConstant::Create(GV: GVar, Initializer: Init); |
3903 | SDValue CPAddr = DAG.getTargetConstantPool(C: CPVal, VT: PtrVT, Align: Align(4)); |
3904 | if (!AFI->getGlobalsPromotedToConstantPool().count(Ptr: GVar)) { |
3905 | AFI->markGlobalAsPromotedToConstantPool(GV: GVar); |
3906 | AFI->setPromotedConstpoolIncrease(AFI->getPromotedConstpoolIncrease() + |
3907 | PaddedSize - 4); |
3908 | } |
3909 | ++NumConstpoolPromoted; |
3910 | return DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: CPAddr); |
3911 | } |
3912 | |
3913 | bool ARMTargetLowering::isReadOnly(const GlobalValue *GV) const { |
3914 | if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(Val: GV)) |
3915 | if (!(GV = GA->getAliaseeObject())) |
3916 | return false; |
3917 | if (const auto *V = dyn_cast<GlobalVariable>(Val: GV)) |
3918 | return V->isConstant(); |
3919 | return isa<Function>(Val: GV); |
3920 | } |
3921 | |
3922 | SDValue ARMTargetLowering::LowerGlobalAddress(SDValue Op, |
3923 | SelectionDAG &DAG) const { |
3924 | switch (Subtarget->getTargetTriple().getObjectFormat()) { |
3925 | default: llvm_unreachable("unknown object format" ); |
3926 | case Triple::COFF: |
3927 | return LowerGlobalAddressWindows(Op, DAG); |
3928 | case Triple::ELF: |
3929 | return LowerGlobalAddressELF(Op, DAG); |
3930 | case Triple::MachO: |
3931 | return LowerGlobalAddressDarwin(Op, DAG); |
3932 | } |
3933 | } |
3934 | |
3935 | SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op, |
3936 | SelectionDAG &DAG) const { |
3937 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
3938 | SDLoc dl(Op); |
3939 | const GlobalValue *GV = cast<GlobalAddressSDNode>(Val&: Op)->getGlobal(); |
3940 | bool IsRO = isReadOnly(GV); |
3941 | |
3942 | // promoteToConstantPool only if not generating XO text section |
3943 | if (GV->isDSOLocal() && !Subtarget->genExecuteOnly()) |
3944 | if (SDValue V = promoteToConstantPool(TLI: this, GV, DAG, PtrVT, dl)) |
3945 | return V; |
3946 | |
3947 | if (isPositionIndependent()) { |
3948 | SDValue G = DAG.getTargetGlobalAddress( |
3949 | GV, DL: dl, VT: PtrVT, offset: 0, TargetFlags: GV->isDSOLocal() ? 0 : ARMII::MO_GOT); |
3950 | SDValue Result = DAG.getNode(Opcode: ARMISD::WrapperPIC, DL: dl, VT: PtrVT, Operand: G); |
3951 | if (!GV->isDSOLocal()) |
3952 | Result = |
3953 | DAG.getLoad(VT: PtrVT, dl, Chain: DAG.getEntryNode(), Ptr: Result, |
3954 | PtrInfo: MachinePointerInfo::getGOT(MF&: DAG.getMachineFunction())); |
3955 | return Result; |
3956 | } else if (Subtarget->isROPI() && IsRO) { |
3957 | // PC-relative. |
3958 | SDValue G = DAG.getTargetGlobalAddress(GV, DL: dl, VT: PtrVT); |
3959 | SDValue Result = DAG.getNode(Opcode: ARMISD::WrapperPIC, DL: dl, VT: PtrVT, Operand: G); |
3960 | return Result; |
3961 | } else if (Subtarget->isRWPI() && !IsRO) { |
3962 | // SB-relative. |
3963 | SDValue RelAddr; |
3964 | if (Subtarget->useMovt()) { |
3965 | ++NumMovwMovt; |
3966 | SDValue G = DAG.getTargetGlobalAddress(GV, DL: dl, VT: PtrVT, offset: 0, TargetFlags: ARMII::MO_SBREL); |
3967 | RelAddr = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: PtrVT, Operand: G); |
3968 | } else { // use literal pool for address constant |
3969 | ARMConstantPoolValue *CPV = |
3970 | ARMConstantPoolConstant::Create(GV, Modifier: ARMCP::SBREL); |
3971 | SDValue CPAddr = DAG.getTargetConstantPool(C: CPV, VT: PtrVT, Align: Align(4)); |
3972 | CPAddr = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: CPAddr); |
3973 | RelAddr = DAG.getLoad( |
3974 | VT: PtrVT, dl, Chain: DAG.getEntryNode(), Ptr: CPAddr, |
3975 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
3976 | } |
3977 | SDValue SB = DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl, Reg: ARM::R9, VT: PtrVT); |
3978 | SDValue Result = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: SB, N2: RelAddr); |
3979 | return Result; |
3980 | } |
3981 | |
3982 | // If we have T2 ops, we can materialize the address directly via movt/movw |
3983 | // pair. This is always cheaper. If need to generate Execute Only code, and we |
3984 | // only have Thumb1 available, we can't use a constant pool and are forced to |
3985 | // use immediate relocations. |
3986 | if (Subtarget->useMovt() || Subtarget->genExecuteOnly()) { |
3987 | if (Subtarget->useMovt()) |
3988 | ++NumMovwMovt; |
3989 | // FIXME: Once remat is capable of dealing with instructions with register |
3990 | // operands, expand this into two nodes. |
3991 | return DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: PtrVT, |
3992 | Operand: DAG.getTargetGlobalAddress(GV, DL: dl, VT: PtrVT)); |
3993 | } else { |
3994 | SDValue CPAddr = DAG.getTargetConstantPool(C: GV, VT: PtrVT, Align: Align(4)); |
3995 | CPAddr = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: CPAddr); |
3996 | return DAG.getLoad( |
3997 | VT: PtrVT, dl, Chain: DAG.getEntryNode(), Ptr: CPAddr, |
3998 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
3999 | } |
4000 | } |
4001 | |
4002 | SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op, |
4003 | SelectionDAG &DAG) const { |
4004 | assert(!Subtarget->isROPI() && !Subtarget->isRWPI() && |
4005 | "ROPI/RWPI not currently supported for Darwin" ); |
4006 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
4007 | SDLoc dl(Op); |
4008 | const GlobalValue *GV = cast<GlobalAddressSDNode>(Val&: Op)->getGlobal(); |
4009 | |
4010 | if (Subtarget->useMovt()) |
4011 | ++NumMovwMovt; |
4012 | |
4013 | // FIXME: Once remat is capable of dealing with instructions with register |
4014 | // operands, expand this into multiple nodes |
4015 | unsigned Wrapper = |
4016 | isPositionIndependent() ? ARMISD::WrapperPIC : ARMISD::Wrapper; |
4017 | |
4018 | SDValue G = DAG.getTargetGlobalAddress(GV, DL: dl, VT: PtrVT, offset: 0, TargetFlags: ARMII::MO_NONLAZY); |
4019 | SDValue Result = DAG.getNode(Opcode: Wrapper, DL: dl, VT: PtrVT, Operand: G); |
4020 | |
4021 | if (Subtarget->isGVIndirectSymbol(GV)) |
4022 | Result = DAG.getLoad(VT: PtrVT, dl, Chain: DAG.getEntryNode(), Ptr: Result, |
4023 | PtrInfo: MachinePointerInfo::getGOT(MF&: DAG.getMachineFunction())); |
4024 | return Result; |
4025 | } |
4026 | |
4027 | SDValue ARMTargetLowering::LowerGlobalAddressWindows(SDValue Op, |
4028 | SelectionDAG &DAG) const { |
4029 | assert(Subtarget->isTargetWindows() && "non-Windows COFF is not supported" ); |
4030 | assert(Subtarget->useMovt() && |
4031 | "Windows on ARM expects to use movw/movt" ); |
4032 | assert(!Subtarget->isROPI() && !Subtarget->isRWPI() && |
4033 | "ROPI/RWPI not currently supported for Windows" ); |
4034 | |
4035 | const TargetMachine &TM = getTargetMachine(); |
4036 | const GlobalValue *GV = cast<GlobalAddressSDNode>(Val&: Op)->getGlobal(); |
4037 | ARMII::TOF TargetFlags = ARMII::MO_NO_FLAG; |
4038 | if (GV->hasDLLImportStorageClass()) |
4039 | TargetFlags = ARMII::MO_DLLIMPORT; |
4040 | else if (!TM.shouldAssumeDSOLocal(GV)) |
4041 | TargetFlags = ARMII::MO_COFFSTUB; |
4042 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
4043 | SDValue Result; |
4044 | SDLoc DL(Op); |
4045 | |
4046 | ++NumMovwMovt; |
4047 | |
4048 | // FIXME: Once remat is capable of dealing with instructions with register |
4049 | // operands, expand this into two nodes. |
4050 | Result = DAG.getNode(Opcode: ARMISD::Wrapper, DL, VT: PtrVT, |
4051 | Operand: DAG.getTargetGlobalAddress(GV, DL, VT: PtrVT, /*offset=*/0, |
4052 | TargetFlags)); |
4053 | if (TargetFlags & (ARMII::MO_DLLIMPORT | ARMII::MO_COFFSTUB)) |
4054 | Result = DAG.getLoad(VT: PtrVT, dl: DL, Chain: DAG.getEntryNode(), Ptr: Result, |
4055 | PtrInfo: MachinePointerInfo::getGOT(MF&: DAG.getMachineFunction())); |
4056 | return Result; |
4057 | } |
4058 | |
4059 | SDValue |
4060 | ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const { |
4061 | SDLoc dl(Op); |
4062 | SDValue Val = DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32); |
4063 | return DAG.getNode(Opcode: ARMISD::EH_SJLJ_SETJMP, DL: dl, |
4064 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::Other), N1: Op.getOperand(i: 0), |
4065 | N2: Op.getOperand(i: 1), N3: Val); |
4066 | } |
4067 | |
4068 | SDValue |
4069 | ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const { |
4070 | SDLoc dl(Op); |
4071 | return DAG.getNode(Opcode: ARMISD::EH_SJLJ_LONGJMP, DL: dl, VT: MVT::Other, N1: Op.getOperand(i: 0), |
4072 | N2: Op.getOperand(i: 1), N3: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
4073 | } |
4074 | |
4075 | SDValue ARMTargetLowering::LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, |
4076 | SelectionDAG &DAG) const { |
4077 | SDLoc dl(Op); |
4078 | return DAG.getNode(Opcode: ARMISD::EH_SJLJ_SETUP_DISPATCH, DL: dl, VT: MVT::Other, |
4079 | Operand: Op.getOperand(i: 0)); |
4080 | } |
4081 | |
4082 | SDValue ARMTargetLowering::LowerINTRINSIC_VOID( |
4083 | SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget) const { |
4084 | unsigned IntNo = |
4085 | Op.getConstantOperandVal(i: Op.getOperand(i: 0).getValueType() == MVT::Other); |
4086 | switch (IntNo) { |
4087 | default: |
4088 | return SDValue(); // Don't custom lower most intrinsics. |
4089 | case Intrinsic::arm_gnu_eabi_mcount: { |
4090 | MachineFunction &MF = DAG.getMachineFunction(); |
4091 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
4092 | SDLoc dl(Op); |
4093 | SDValue Chain = Op.getOperand(i: 0); |
4094 | // call "\01__gnu_mcount_nc" |
4095 | const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo(); |
4096 | const uint32_t *Mask = |
4097 | ARI->getCallPreservedMask(MF: DAG.getMachineFunction(), CallingConv::C); |
4098 | assert(Mask && "Missing call preserved mask for calling convention" ); |
4099 | // Mark LR an implicit live-in. |
4100 | Register Reg = MF.addLiveIn(PReg: ARM::LR, RC: getRegClassFor(VT: MVT::i32)); |
4101 | SDValue ReturnAddress = |
4102 | DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl, Reg, VT: PtrVT); |
4103 | constexpr EVT ResultTys[] = {MVT::Other, MVT::Glue}; |
4104 | SDValue Callee = |
4105 | DAG.getTargetExternalSymbol(Sym: "\01__gnu_mcount_nc" , VT: PtrVT, TargetFlags: 0); |
4106 | SDValue RegisterMask = DAG.getRegisterMask(RegMask: Mask); |
4107 | if (Subtarget->isThumb()) |
4108 | return SDValue( |
4109 | DAG.getMachineNode( |
4110 | Opcode: ARM::tBL_PUSHLR, dl, ResultTys, |
4111 | Ops: {ReturnAddress, DAG.getTargetConstant(Val: ARMCC::AL, DL: dl, VT: PtrVT), |
4112 | DAG.getRegister(Reg: 0, VT: PtrVT), Callee, RegisterMask, Chain}), |
4113 | 0); |
4114 | return SDValue( |
4115 | DAG.getMachineNode(Opcode: ARM::BL_PUSHLR, dl, ResultTys, |
4116 | Ops: {ReturnAddress, Callee, RegisterMask, Chain}), |
4117 | 0); |
4118 | } |
4119 | } |
4120 | } |
4121 | |
4122 | SDValue |
4123 | ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, |
4124 | const ARMSubtarget *Subtarget) const { |
4125 | unsigned IntNo = Op.getConstantOperandVal(i: 0); |
4126 | SDLoc dl(Op); |
4127 | switch (IntNo) { |
4128 | default: return SDValue(); // Don't custom lower most intrinsics. |
4129 | case Intrinsic::thread_pointer: { |
4130 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
4131 | return DAG.getNode(Opcode: ARMISD::THREAD_POINTER, DL: dl, VT: PtrVT); |
4132 | } |
4133 | case Intrinsic::arm_cls: { |
4134 | const SDValue &Operand = Op.getOperand(i: 1); |
4135 | const EVT VTy = Op.getValueType(); |
4136 | SDValue SRA = |
4137 | DAG.getNode(Opcode: ISD::SRA, DL: dl, VT: VTy, N1: Operand, N2: DAG.getConstant(Val: 31, DL: dl, VT: VTy)); |
4138 | SDValue XOR = DAG.getNode(Opcode: ISD::XOR, DL: dl, VT: VTy, N1: SRA, N2: Operand); |
4139 | SDValue SHL = |
4140 | DAG.getNode(Opcode: ISD::SHL, DL: dl, VT: VTy, N1: XOR, N2: DAG.getConstant(Val: 1, DL: dl, VT: VTy)); |
4141 | SDValue OR = |
4142 | DAG.getNode(Opcode: ISD::OR, DL: dl, VT: VTy, N1: SHL, N2: DAG.getConstant(Val: 1, DL: dl, VT: VTy)); |
4143 | SDValue Result = DAG.getNode(Opcode: ISD::CTLZ, DL: dl, VT: VTy, Operand: OR); |
4144 | return Result; |
4145 | } |
4146 | case Intrinsic::arm_cls64: { |
4147 | // cls(x) = if cls(hi(x)) != 31 then cls(hi(x)) |
4148 | // else 31 + clz(if hi(x) == 0 then lo(x) else not(lo(x))) |
4149 | const SDValue &Operand = Op.getOperand(i: 1); |
4150 | const EVT VTy = Op.getValueType(); |
4151 | SDValue Lo, Hi; |
4152 | std::tie(args&: Lo, args&: Hi) = DAG.SplitScalar(N: Operand, DL: dl, LoVT: VTy, HiVT: VTy); |
4153 | SDValue Constant0 = DAG.getConstant(Val: 0, DL: dl, VT: VTy); |
4154 | SDValue Constant1 = DAG.getConstant(Val: 1, DL: dl, VT: VTy); |
4155 | SDValue Constant31 = DAG.getConstant(Val: 31, DL: dl, VT: VTy); |
4156 | SDValue SRAHi = DAG.getNode(Opcode: ISD::SRA, DL: dl, VT: VTy, N1: Hi, N2: Constant31); |
4157 | SDValue XORHi = DAG.getNode(Opcode: ISD::XOR, DL: dl, VT: VTy, N1: SRAHi, N2: Hi); |
4158 | SDValue SHLHi = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT: VTy, N1: XORHi, N2: Constant1); |
4159 | SDValue ORHi = DAG.getNode(Opcode: ISD::OR, DL: dl, VT: VTy, N1: SHLHi, N2: Constant1); |
4160 | SDValue CLSHi = DAG.getNode(Opcode: ISD::CTLZ, DL: dl, VT: VTy, Operand: ORHi); |
4161 | SDValue CheckLo = |
4162 | DAG.getSetCC(DL: dl, VT: MVT::i1, LHS: CLSHi, RHS: Constant31, Cond: ISD::CondCode::SETEQ); |
4163 | SDValue HiIsZero = |
4164 | DAG.getSetCC(DL: dl, VT: MVT::i1, LHS: Hi, RHS: Constant0, Cond: ISD::CondCode::SETEQ); |
4165 | SDValue AdjustedLo = |
4166 | DAG.getSelect(DL: dl, VT: VTy, Cond: HiIsZero, LHS: Lo, RHS: DAG.getNOT(DL: dl, Val: Lo, VT: VTy)); |
4167 | SDValue CLZAdjustedLo = DAG.getNode(Opcode: ISD::CTLZ, DL: dl, VT: VTy, Operand: AdjustedLo); |
4168 | SDValue Result = |
4169 | DAG.getSelect(DL: dl, VT: VTy, Cond: CheckLo, |
4170 | LHS: DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: VTy, N1: CLZAdjustedLo, N2: Constant31), RHS: CLSHi); |
4171 | return Result; |
4172 | } |
4173 | case Intrinsic::eh_sjlj_lsda: { |
4174 | MachineFunction &MF = DAG.getMachineFunction(); |
4175 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
4176 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
4177 | EVT PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
4178 | SDValue CPAddr; |
4179 | bool IsPositionIndependent = isPositionIndependent(); |
4180 | unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0; |
4181 | ARMConstantPoolValue *CPV = |
4182 | ARMConstantPoolConstant::Create(C: &MF.getFunction(), ID: ARMPCLabelIndex, |
4183 | Kind: ARMCP::CPLSDA, PCAdj); |
4184 | CPAddr = DAG.getTargetConstantPool(C: CPV, VT: PtrVT, Align: Align(4)); |
4185 | CPAddr = DAG.getNode(Opcode: ARMISD::Wrapper, DL: dl, VT: MVT::i32, Operand: CPAddr); |
4186 | SDValue Result = DAG.getLoad( |
4187 | VT: PtrVT, dl, Chain: DAG.getEntryNode(), Ptr: CPAddr, |
4188 | PtrInfo: MachinePointerInfo::getConstantPool(MF&: DAG.getMachineFunction())); |
4189 | |
4190 | if (IsPositionIndependent) { |
4191 | SDValue PICLabel = DAG.getConstant(Val: ARMPCLabelIndex, DL: dl, VT: MVT::i32); |
4192 | Result = DAG.getNode(Opcode: ARMISD::PIC_ADD, DL: dl, VT: PtrVT, N1: Result, N2: PICLabel); |
4193 | } |
4194 | return Result; |
4195 | } |
4196 | case Intrinsic::arm_neon_vabs: |
4197 | return DAG.getNode(Opcode: ISD::ABS, DL: SDLoc(Op), VT: Op.getValueType(), |
4198 | Operand: Op.getOperand(i: 1)); |
4199 | case Intrinsic::arm_neon_vabds: |
4200 | if (Op.getValueType().isInteger()) |
4201 | return DAG.getNode(Opcode: ISD::ABDS, DL: SDLoc(Op), VT: Op.getValueType(), |
4202 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2)); |
4203 | return SDValue(); |
4204 | case Intrinsic::arm_neon_vabdu: |
4205 | return DAG.getNode(Opcode: ISD::ABDU, DL: SDLoc(Op), VT: Op.getValueType(), |
4206 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2)); |
4207 | case Intrinsic::arm_neon_vmulls: |
4208 | case Intrinsic::arm_neon_vmullu: { |
4209 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls) |
4210 | ? ARMISD::VMULLs : ARMISD::VMULLu; |
4211 | return DAG.getNode(Opcode: NewOpc, DL: SDLoc(Op), VT: Op.getValueType(), |
4212 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2)); |
4213 | } |
4214 | case Intrinsic::arm_neon_vminnm: |
4215 | case Intrinsic::arm_neon_vmaxnm: { |
4216 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminnm) |
4217 | ? ISD::FMINNUM : ISD::FMAXNUM; |
4218 | return DAG.getNode(Opcode: NewOpc, DL: SDLoc(Op), VT: Op.getValueType(), |
4219 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2)); |
4220 | } |
4221 | case Intrinsic::arm_neon_vminu: |
4222 | case Intrinsic::arm_neon_vmaxu: { |
4223 | if (Op.getValueType().isFloatingPoint()) |
4224 | return SDValue(); |
4225 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminu) |
4226 | ? ISD::UMIN : ISD::UMAX; |
4227 | return DAG.getNode(Opcode: NewOpc, DL: SDLoc(Op), VT: Op.getValueType(), |
4228 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2)); |
4229 | } |
4230 | case Intrinsic::arm_neon_vmins: |
4231 | case Intrinsic::arm_neon_vmaxs: { |
4232 | // v{min,max}s is overloaded between signed integers and floats. |
4233 | if (!Op.getValueType().isFloatingPoint()) { |
4234 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins) |
4235 | ? ISD::SMIN : ISD::SMAX; |
4236 | return DAG.getNode(Opcode: NewOpc, DL: SDLoc(Op), VT: Op.getValueType(), |
4237 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2)); |
4238 | } |
4239 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins) |
4240 | ? ISD::FMINIMUM : ISD::FMAXIMUM; |
4241 | return DAG.getNode(Opcode: NewOpc, DL: SDLoc(Op), VT: Op.getValueType(), |
4242 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2)); |
4243 | } |
4244 | case Intrinsic::arm_neon_vtbl1: |
4245 | return DAG.getNode(Opcode: ARMISD::VTBL1, DL: SDLoc(Op), VT: Op.getValueType(), |
4246 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2)); |
4247 | case Intrinsic::arm_neon_vtbl2: |
4248 | return DAG.getNode(Opcode: ARMISD::VTBL2, DL: SDLoc(Op), VT: Op.getValueType(), |
4249 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2), N3: Op.getOperand(i: 3)); |
4250 | case Intrinsic::arm_mve_pred_i2v: |
4251 | case Intrinsic::arm_mve_pred_v2i: |
4252 | return DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: SDLoc(Op), VT: Op.getValueType(), |
4253 | Operand: Op.getOperand(i: 1)); |
4254 | case Intrinsic::arm_mve_vreinterpretq: |
4255 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: SDLoc(Op), VT: Op.getValueType(), |
4256 | Operand: Op.getOperand(i: 1)); |
4257 | case Intrinsic::arm_mve_lsll: |
4258 | return DAG.getNode(Opcode: ARMISD::LSLL, DL: SDLoc(Op), VTList: Op->getVTList(), |
4259 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2), N3: Op.getOperand(i: 3)); |
4260 | case Intrinsic::arm_mve_asrl: |
4261 | return DAG.getNode(Opcode: ARMISD::ASRL, DL: SDLoc(Op), VTList: Op->getVTList(), |
4262 | N1: Op.getOperand(i: 1), N2: Op.getOperand(i: 2), N3: Op.getOperand(i: 3)); |
4263 | } |
4264 | } |
4265 | |
4266 | static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG, |
4267 | const ARMSubtarget *Subtarget) { |
4268 | SDLoc dl(Op); |
4269 | auto SSID = static_cast<SyncScope::ID>(Op.getConstantOperandVal(i: 2)); |
4270 | if (SSID == SyncScope::SingleThread) |
4271 | return Op; |
4272 | |
4273 | if (!Subtarget->hasDataBarrier()) { |
4274 | // Some ARMv6 cpus can support data barriers with an mcr instruction. |
4275 | // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get |
4276 | // here. |
4277 | assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() && |
4278 | "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!" ); |
4279 | return DAG.getNode(Opcode: ARMISD::MEMBARRIER_MCR, DL: dl, VT: MVT::Other, N1: Op.getOperand(i: 0), |
4280 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
4281 | } |
4282 | |
4283 | AtomicOrdering Ord = |
4284 | static_cast<AtomicOrdering>(Op.getConstantOperandVal(i: 1)); |
4285 | ARM_MB::MemBOpt Domain = ARM_MB::ISH; |
4286 | if (Subtarget->isMClass()) { |
4287 | // Only a full system barrier exists in the M-class architectures. |
4288 | Domain = ARM_MB::SY; |
4289 | } else if (Subtarget->preferISHSTBarriers() && |
4290 | Ord == AtomicOrdering::Release) { |
4291 | // Swift happens to implement ISHST barriers in a way that's compatible with |
4292 | // Release semantics but weaker than ISH so we'd be fools not to use |
4293 | // it. Beware: other processors probably don't! |
4294 | Domain = ARM_MB::ISHST; |
4295 | } |
4296 | |
4297 | return DAG.getNode(Opcode: ISD::INTRINSIC_VOID, DL: dl, VT: MVT::Other, N1: Op.getOperand(i: 0), |
4298 | N2: DAG.getConstant(Val: Intrinsic::arm_dmb, DL: dl, VT: MVT::i32), |
4299 | N3: DAG.getConstant(Val: Domain, DL: dl, VT: MVT::i32)); |
4300 | } |
4301 | |
4302 | static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG, |
4303 | const ARMSubtarget *Subtarget) { |
4304 | // ARM pre v5TE and Thumb1 does not have preload instructions. |
4305 | if (!(Subtarget->isThumb2() || |
4306 | (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps()))) |
4307 | // Just preserve the chain. |
4308 | return Op.getOperand(i: 0); |
4309 | |
4310 | SDLoc dl(Op); |
4311 | unsigned isRead = ~Op.getConstantOperandVal(i: 2) & 1; |
4312 | if (!isRead && |
4313 | (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension())) |
4314 | // ARMv7 with MP extension has PLDW. |
4315 | return Op.getOperand(i: 0); |
4316 | |
4317 | unsigned isData = Op.getConstantOperandVal(i: 4); |
4318 | if (Subtarget->isThumb()) { |
4319 | // Invert the bits. |
4320 | isRead = ~isRead & 1; |
4321 | isData = ~isData & 1; |
4322 | } |
4323 | |
4324 | return DAG.getNode(Opcode: ARMISD::PRELOAD, DL: dl, VT: MVT::Other, N1: Op.getOperand(i: 0), |
4325 | N2: Op.getOperand(i: 1), N3: DAG.getConstant(Val: isRead, DL: dl, VT: MVT::i32), |
4326 | N4: DAG.getConstant(Val: isData, DL: dl, VT: MVT::i32)); |
4327 | } |
4328 | |
4329 | static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) { |
4330 | MachineFunction &MF = DAG.getMachineFunction(); |
4331 | ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>(); |
4332 | |
4333 | // vastart just stores the address of the VarArgsFrameIndex slot into the |
4334 | // memory location argument. |
4335 | SDLoc dl(Op); |
4336 | EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DL: DAG.getDataLayout()); |
4337 | SDValue FR = DAG.getFrameIndex(FI: FuncInfo->getVarArgsFrameIndex(), VT: PtrVT); |
4338 | const Value *SV = cast<SrcValueSDNode>(Val: Op.getOperand(i: 2))->getValue(); |
4339 | return DAG.getStore(Chain: Op.getOperand(i: 0), dl, Val: FR, Ptr: Op.getOperand(i: 1), |
4340 | PtrInfo: MachinePointerInfo(SV)); |
4341 | } |
4342 | |
4343 | SDValue ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, |
4344 | CCValAssign &NextVA, |
4345 | SDValue &Root, |
4346 | SelectionDAG &DAG, |
4347 | const SDLoc &dl) const { |
4348 | MachineFunction &MF = DAG.getMachineFunction(); |
4349 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
4350 | |
4351 | const TargetRegisterClass *RC; |
4352 | if (AFI->isThumb1OnlyFunction()) |
4353 | RC = &ARM::tGPRRegClass; |
4354 | else |
4355 | RC = &ARM::GPRRegClass; |
4356 | |
4357 | // Transform the arguments stored in physical registers into virtual ones. |
4358 | Register Reg = MF.addLiveIn(PReg: VA.getLocReg(), RC); |
4359 | SDValue ArgValue = DAG.getCopyFromReg(Chain: Root, dl, Reg, VT: MVT::i32); |
4360 | |
4361 | SDValue ArgValue2; |
4362 | if (NextVA.isMemLoc()) { |
4363 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
4364 | int FI = MFI.CreateFixedObject(Size: 4, SPOffset: NextVA.getLocMemOffset(), IsImmutable: true); |
4365 | |
4366 | // Create load node to retrieve arguments from the stack. |
4367 | SDValue FIN = DAG.getFrameIndex(FI, VT: getPointerTy(DL: DAG.getDataLayout())); |
4368 | ArgValue2 = DAG.getLoad( |
4369 | VT: MVT::i32, dl, Chain: Root, Ptr: FIN, |
4370 | PtrInfo: MachinePointerInfo::getFixedStack(MF&: DAG.getMachineFunction(), FI)); |
4371 | } else { |
4372 | Reg = MF.addLiveIn(PReg: NextVA.getLocReg(), RC); |
4373 | ArgValue2 = DAG.getCopyFromReg(Chain: Root, dl, Reg, VT: MVT::i32); |
4374 | } |
4375 | if (!Subtarget->isLittle()) |
4376 | std::swap (a&: ArgValue, b&: ArgValue2); |
4377 | return DAG.getNode(Opcode: ARMISD::VMOVDRR, DL: dl, VT: MVT::f64, N1: ArgValue, N2: ArgValue2); |
4378 | } |
4379 | |
4380 | // The remaining GPRs hold either the beginning of variable-argument |
4381 | // data, or the beginning of an aggregate passed by value (usually |
4382 | // byval). Either way, we allocate stack slots adjacent to the data |
4383 | // provided by our caller, and store the unallocated registers there. |
4384 | // If this is a variadic function, the va_list pointer will begin with |
4385 | // these values; otherwise, this reassembles a (byval) structure that |
4386 | // was split between registers and memory. |
4387 | // Return: The frame index registers were stored into. |
4388 | int ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, |
4389 | const SDLoc &dl, SDValue &Chain, |
4390 | const Value *OrigArg, |
4391 | unsigned InRegsParamRecordIdx, |
4392 | int ArgOffset, unsigned ArgSize) const { |
4393 | // Currently, two use-cases possible: |
4394 | // Case #1. Non-var-args function, and we meet first byval parameter. |
4395 | // Setup first unallocated register as first byval register; |
4396 | // eat all remained registers |
4397 | // (these two actions are performed by HandleByVal method). |
4398 | // Then, here, we initialize stack frame with |
4399 | // "store-reg" instructions. |
4400 | // Case #2. Var-args function, that doesn't contain byval parameters. |
4401 | // The same: eat all remained unallocated registers, |
4402 | // initialize stack frame. |
4403 | |
4404 | MachineFunction &MF = DAG.getMachineFunction(); |
4405 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
4406 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
4407 | unsigned RBegin, REnd; |
4408 | if (InRegsParamRecordIdx < CCInfo.getInRegsParamsCount()) { |
4409 | CCInfo.getInRegsParamInfo(InRegsParamRecordIndex: InRegsParamRecordIdx, BeginReg&: RBegin, EndReg&: REnd); |
4410 | } else { |
4411 | unsigned RBeginIdx = CCInfo.getFirstUnallocated(Regs: GPRArgRegs); |
4412 | RBegin = RBeginIdx == 4 ? (unsigned)ARM::R4 : GPRArgRegs[RBeginIdx]; |
4413 | REnd = ARM::R4; |
4414 | } |
4415 | |
4416 | if (REnd != RBegin) |
4417 | ArgOffset = -4 * (ARM::R4 - RBegin); |
4418 | |
4419 | auto PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
4420 | int FrameIndex = MFI.CreateFixedObject(Size: ArgSize, SPOffset: ArgOffset, IsImmutable: false); |
4421 | SDValue FIN = DAG.getFrameIndex(FI: FrameIndex, VT: PtrVT); |
4422 | |
4423 | SmallVector<SDValue, 4> MemOps; |
4424 | const TargetRegisterClass *RC = |
4425 | AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass : &ARM::GPRRegClass; |
4426 | |
4427 | for (unsigned Reg = RBegin, i = 0; Reg < REnd; ++Reg, ++i) { |
4428 | Register VReg = MF.addLiveIn(PReg: Reg, RC); |
4429 | SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg: VReg, VT: MVT::i32); |
4430 | SDValue Store = DAG.getStore(Chain: Val.getValue(R: 1), dl, Val, Ptr: FIN, |
4431 | PtrInfo: MachinePointerInfo(OrigArg, 4 * i)); |
4432 | MemOps.push_back(Elt: Store); |
4433 | FIN = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: FIN, N2: DAG.getConstant(Val: 4, DL: dl, VT: PtrVT)); |
4434 | } |
4435 | |
4436 | if (!MemOps.empty()) |
4437 | Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, Ops: MemOps); |
4438 | return FrameIndex; |
4439 | } |
4440 | |
4441 | // Setup stack frame, the va_list pointer will start from. |
4442 | void ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, |
4443 | const SDLoc &dl, SDValue &Chain, |
4444 | unsigned ArgOffset, |
4445 | unsigned TotalArgRegsSaveSize, |
4446 | bool ForceMutable) const { |
4447 | MachineFunction &MF = DAG.getMachineFunction(); |
4448 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
4449 | |
4450 | // Try to store any remaining integer argument regs |
4451 | // to their spots on the stack so that they may be loaded by dereferencing |
4452 | // the result of va_next. |
4453 | // If there is no regs to be stored, just point address after last |
4454 | // argument passed via stack. |
4455 | int FrameIndex = StoreByValRegs( |
4456 | CCInfo, DAG, dl, Chain, OrigArg: nullptr, InRegsParamRecordIdx: CCInfo.getInRegsParamsCount(), |
4457 | ArgOffset: CCInfo.getStackSize(), ArgSize: std::max(a: 4U, b: TotalArgRegsSaveSize)); |
4458 | AFI->setVarArgsFrameIndex(FrameIndex); |
4459 | } |
4460 | |
4461 | bool ARMTargetLowering::splitValueIntoRegisterParts( |
4462 | SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, |
4463 | unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) const { |
4464 | EVT ValueVT = Val.getValueType(); |
4465 | if ((ValueVT == MVT::f16 || ValueVT == MVT::bf16) && PartVT == MVT::f32) { |
4466 | unsigned ValueBits = ValueVT.getSizeInBits(); |
4467 | unsigned PartBits = PartVT.getSizeInBits(); |
4468 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: MVT::getIntegerVT(BitWidth: ValueBits), Operand: Val); |
4469 | Val = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: MVT::getIntegerVT(BitWidth: PartBits), Operand: Val); |
4470 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: PartVT, Operand: Val); |
4471 | Parts[0] = Val; |
4472 | return true; |
4473 | } |
4474 | return false; |
4475 | } |
4476 | |
4477 | SDValue ARMTargetLowering::joinRegisterPartsIntoValue( |
4478 | SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, |
4479 | MVT PartVT, EVT ValueVT, std::optional<CallingConv::ID> CC) const { |
4480 | if ((ValueVT == MVT::f16 || ValueVT == MVT::bf16) && PartVT == MVT::f32) { |
4481 | unsigned ValueBits = ValueVT.getSizeInBits(); |
4482 | unsigned PartBits = PartVT.getSizeInBits(); |
4483 | SDValue Val = Parts[0]; |
4484 | |
4485 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: MVT::getIntegerVT(BitWidth: PartBits), Operand: Val); |
4486 | Val = DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: MVT::getIntegerVT(BitWidth: ValueBits), Operand: Val); |
4487 | Val = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: ValueVT, Operand: Val); |
4488 | return Val; |
4489 | } |
4490 | return SDValue(); |
4491 | } |
4492 | |
4493 | SDValue ARMTargetLowering::LowerFormalArguments( |
4494 | SDValue Chain, CallingConv::ID CallConv, bool isVarArg, |
4495 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, |
4496 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { |
4497 | MachineFunction &MF = DAG.getMachineFunction(); |
4498 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
4499 | |
4500 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
4501 | |
4502 | // Assign locations to all of the incoming arguments. |
4503 | SmallVector<CCValAssign, 16> ArgLocs; |
4504 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, |
4505 | *DAG.getContext()); |
4506 | CCInfo.AnalyzeFormalArguments(Ins, Fn: CCAssignFnForCall(CC: CallConv, isVarArg)); |
4507 | |
4508 | Function::const_arg_iterator CurOrigArg = MF.getFunction().arg_begin(); |
4509 | unsigned CurArgIdx = 0; |
4510 | |
4511 | // Initially ArgRegsSaveSize is zero. |
4512 | // Then we increase this value each time we meet byval parameter. |
4513 | // We also increase this value in case of varargs function. |
4514 | AFI->setArgRegsSaveSize(0); |
4515 | |
4516 | // Calculate the amount of stack space that we need to allocate to store |
4517 | // byval and variadic arguments that are passed in registers. |
4518 | // We need to know this before we allocate the first byval or variadic |
4519 | // argument, as they will be allocated a stack slot below the CFA (Canonical |
4520 | // Frame Address, the stack pointer at entry to the function). |
4521 | unsigned ArgRegBegin = ARM::R4; |
4522 | for (const CCValAssign &VA : ArgLocs) { |
4523 | if (CCInfo.getInRegsParamsProcessed() >= CCInfo.getInRegsParamsCount()) |
4524 | break; |
4525 | |
4526 | unsigned Index = VA.getValNo(); |
4527 | ISD::ArgFlagsTy Flags = Ins[Index].Flags; |
4528 | if (!Flags.isByVal()) |
4529 | continue; |
4530 | |
4531 | assert(VA.isMemLoc() && "unexpected byval pointer in reg" ); |
4532 | unsigned RBegin, REnd; |
4533 | CCInfo.getInRegsParamInfo(InRegsParamRecordIndex: CCInfo.getInRegsParamsProcessed(), BeginReg&: RBegin, EndReg&: REnd); |
4534 | ArgRegBegin = std::min(a: ArgRegBegin, b: RBegin); |
4535 | |
4536 | CCInfo.nextInRegsParam(); |
4537 | } |
4538 | CCInfo.rewindByValRegsInfo(); |
4539 | |
4540 | int lastInsIndex = -1; |
4541 | if (isVarArg && MFI.hasVAStart()) { |
4542 | unsigned RegIdx = CCInfo.getFirstUnallocated(Regs: GPRArgRegs); |
4543 | if (RegIdx != std::size(GPRArgRegs)) |
4544 | ArgRegBegin = std::min(a: ArgRegBegin, b: (unsigned)GPRArgRegs[RegIdx]); |
4545 | } |
4546 | |
4547 | unsigned TotalArgRegsSaveSize = 4 * (ARM::R4 - ArgRegBegin); |
4548 | AFI->setArgRegsSaveSize(TotalArgRegsSaveSize); |
4549 | auto PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
4550 | |
4551 | for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { |
4552 | CCValAssign &VA = ArgLocs[i]; |
4553 | if (Ins[VA.getValNo()].isOrigArg()) { |
4554 | std::advance(i&: CurOrigArg, |
4555 | n: Ins[VA.getValNo()].getOrigArgIndex() - CurArgIdx); |
4556 | CurArgIdx = Ins[VA.getValNo()].getOrigArgIndex(); |
4557 | } |
4558 | // Arguments stored in registers. |
4559 | if (VA.isRegLoc()) { |
4560 | EVT RegVT = VA.getLocVT(); |
4561 | SDValue ArgValue; |
4562 | |
4563 | if (VA.needsCustom() && VA.getLocVT() == MVT::v2f64) { |
4564 | // f64 and vector types are split up into multiple registers or |
4565 | // combinations of registers and stack slots. |
4566 | SDValue ArgValue1 = |
4567 | GetF64FormalArgument(VA, NextVA&: ArgLocs[++i], Root&: Chain, DAG, dl); |
4568 | VA = ArgLocs[++i]; // skip ahead to next loc |
4569 | SDValue ArgValue2; |
4570 | if (VA.isMemLoc()) { |
4571 | int FI = MFI.CreateFixedObject(Size: 8, SPOffset: VA.getLocMemOffset(), IsImmutable: true); |
4572 | SDValue FIN = DAG.getFrameIndex(FI, VT: PtrVT); |
4573 | ArgValue2 = DAG.getLoad( |
4574 | VT: MVT::f64, dl, Chain, Ptr: FIN, |
4575 | PtrInfo: MachinePointerInfo::getFixedStack(MF&: DAG.getMachineFunction(), FI)); |
4576 | } else { |
4577 | ArgValue2 = GetF64FormalArgument(VA, NextVA&: ArgLocs[++i], Root&: Chain, DAG, dl); |
4578 | } |
4579 | ArgValue = DAG.getNode(Opcode: ISD::UNDEF, DL: dl, VT: MVT::v2f64); |
4580 | ArgValue = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: MVT::v2f64, N1: ArgValue, |
4581 | N2: ArgValue1, N3: DAG.getIntPtrConstant(Val: 0, DL: dl)); |
4582 | ArgValue = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: MVT::v2f64, N1: ArgValue, |
4583 | N2: ArgValue2, N3: DAG.getIntPtrConstant(Val: 1, DL: dl)); |
4584 | } else if (VA.needsCustom() && VA.getLocVT() == MVT::f64) { |
4585 | ArgValue = GetF64FormalArgument(VA, NextVA&: ArgLocs[++i], Root&: Chain, DAG, dl); |
4586 | } else { |
4587 | const TargetRegisterClass *RC; |
4588 | |
4589 | if (RegVT == MVT::f16 || RegVT == MVT::bf16) |
4590 | RC = &ARM::HPRRegClass; |
4591 | else if (RegVT == MVT::f32) |
4592 | RC = &ARM::SPRRegClass; |
4593 | else if (RegVT == MVT::f64 || RegVT == MVT::v4f16 || |
4594 | RegVT == MVT::v4bf16) |
4595 | RC = &ARM::DPRRegClass; |
4596 | else if (RegVT == MVT::v2f64 || RegVT == MVT::v8f16 || |
4597 | RegVT == MVT::v8bf16) |
4598 | RC = &ARM::QPRRegClass; |
4599 | else if (RegVT == MVT::i32) |
4600 | RC = AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass |
4601 | : &ARM::GPRRegClass; |
4602 | else |
4603 | llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering" ); |
4604 | |
4605 | // Transform the arguments in physical registers into virtual ones. |
4606 | Register Reg = MF.addLiveIn(PReg: VA.getLocReg(), RC); |
4607 | ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, VT: RegVT); |
4608 | |
4609 | // If this value is passed in r0 and has the returned attribute (e.g. |
4610 | // C++ 'structors), record this fact for later use. |
4611 | if (VA.getLocReg() == ARM::R0 && Ins[VA.getValNo()].Flags.isReturned()) { |
4612 | AFI->setPreservesR0(); |
4613 | } |
4614 | } |
4615 | |
4616 | // If this is an 8 or 16-bit value, it is really passed promoted |
4617 | // to 32 bits. Insert an assert[sz]ext to capture this, then |
4618 | // truncate to the right size. |
4619 | switch (VA.getLocInfo()) { |
4620 | default: llvm_unreachable("Unknown loc info!" ); |
4621 | case CCValAssign::Full: break; |
4622 | case CCValAssign::BCvt: |
4623 | ArgValue = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VA.getValVT(), Operand: ArgValue); |
4624 | break; |
4625 | } |
4626 | |
4627 | // f16 arguments have their size extended to 4 bytes and passed as if they |
4628 | // had been copied to the LSBs of a 32-bit register. |
4629 | // For that, it's passed extended to i32 (soft ABI) or to f32 (hard ABI) |
4630 | if (VA.needsCustom() && |
4631 | (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16)) |
4632 | ArgValue = MoveToHPR(dl, DAG, LocVT: VA.getLocVT(), ValVT: VA.getValVT(), Val: ArgValue); |
4633 | |
4634 | // On CMSE Entry Functions, formal integer arguments whose bitwidth is |
4635 | // less than 32 bits must be sign- or zero-extended in the callee for |
4636 | // security reasons. Although the ABI mandates an extension done by the |
4637 | // caller, the latter cannot be trusted to follow the rules of the ABI. |
4638 | const ISD::InputArg &Arg = Ins[VA.getValNo()]; |
4639 | if (AFI->isCmseNSEntryFunction() && Arg.ArgVT.isScalarInteger() && |
4640 | RegVT.isScalarInteger() && Arg.ArgVT.bitsLT(VT: MVT::i32)) |
4641 | ArgValue = handleCMSEValue(Value: ArgValue, Arg, DAG, DL: dl); |
4642 | |
4643 | InVals.push_back(Elt: ArgValue); |
4644 | } else { // VA.isRegLoc() |
4645 | // Only arguments passed on the stack should make it here. |
4646 | assert(VA.isMemLoc()); |
4647 | assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered" ); |
4648 | |
4649 | int index = VA.getValNo(); |
4650 | |
4651 | // Some Ins[] entries become multiple ArgLoc[] entries. |
4652 | // Process them only once. |
4653 | if (index != lastInsIndex) |
4654 | { |
4655 | ISD::ArgFlagsTy Flags = Ins[index].Flags; |
4656 | // FIXME: For now, all byval parameter objects are marked mutable. |
4657 | // This can be changed with more analysis. |
4658 | // In case of tail call optimization mark all arguments mutable. |
4659 | // Since they could be overwritten by lowering of arguments in case of |
4660 | // a tail call. |
4661 | if (Flags.isByVal()) { |
4662 | assert(Ins[index].isOrigArg() && |
4663 | "Byval arguments cannot be implicit" ); |
4664 | unsigned CurByValIndex = CCInfo.getInRegsParamsProcessed(); |
4665 | |
4666 | int FrameIndex = StoreByValRegs( |
4667 | CCInfo, DAG, dl, Chain, OrigArg: &*CurOrigArg, InRegsParamRecordIdx: CurByValIndex, |
4668 | ArgOffset: VA.getLocMemOffset(), ArgSize: Flags.getByValSize()); |
4669 | InVals.push_back(Elt: DAG.getFrameIndex(FI: FrameIndex, VT: PtrVT)); |
4670 | CCInfo.nextInRegsParam(); |
4671 | } else { |
4672 | unsigned FIOffset = VA.getLocMemOffset(); |
4673 | int FI = MFI.CreateFixedObject(Size: VA.getLocVT().getSizeInBits()/8, |
4674 | SPOffset: FIOffset, IsImmutable: true); |
4675 | |
4676 | // Create load nodes to retrieve arguments from the stack. |
4677 | SDValue FIN = DAG.getFrameIndex(FI, VT: PtrVT); |
4678 | InVals.push_back(Elt: DAG.getLoad(VT: VA.getValVT(), dl, Chain, Ptr: FIN, |
4679 | PtrInfo: MachinePointerInfo::getFixedStack( |
4680 | MF&: DAG.getMachineFunction(), FI))); |
4681 | } |
4682 | lastInsIndex = index; |
4683 | } |
4684 | } |
4685 | } |
4686 | |
4687 | // varargs |
4688 | if (isVarArg && MFI.hasVAStart()) { |
4689 | VarArgStyleRegisters(CCInfo, DAG, dl, Chain, ArgOffset: CCInfo.getStackSize(), |
4690 | TotalArgRegsSaveSize); |
4691 | if (AFI->isCmseNSEntryFunction()) { |
4692 | DiagnosticInfoUnsupported Diag( |
4693 | DAG.getMachineFunction().getFunction(), |
4694 | "secure entry function must not be variadic" , dl.getDebugLoc()); |
4695 | DAG.getContext()->diagnose(DI: Diag); |
4696 | } |
4697 | } |
4698 | |
4699 | unsigned StackArgSize = CCInfo.getStackSize(); |
4700 | bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt; |
4701 | if (canGuaranteeTCO(CC: CallConv, GuaranteeTailCalls: TailCallOpt)) { |
4702 | // The only way to guarantee a tail call is if the callee restores its |
4703 | // argument area, but it must also keep the stack aligned when doing so. |
4704 | const DataLayout &DL = DAG.getDataLayout(); |
4705 | StackArgSize = alignTo(Size: StackArgSize, A: DL.getStackAlignment()); |
4706 | |
4707 | AFI->setArgumentStackToRestore(StackArgSize); |
4708 | } |
4709 | AFI->setArgumentStackSize(StackArgSize); |
4710 | |
4711 | if (CCInfo.getStackSize() > 0 && AFI->isCmseNSEntryFunction()) { |
4712 | DiagnosticInfoUnsupported Diag( |
4713 | DAG.getMachineFunction().getFunction(), |
4714 | "secure entry function requires arguments on stack" , dl.getDebugLoc()); |
4715 | DAG.getContext()->diagnose(DI: Diag); |
4716 | } |
4717 | |
4718 | return Chain; |
4719 | } |
4720 | |
4721 | /// isFloatingPointZero - Return true if this is +0.0. |
4722 | static bool isFloatingPointZero(SDValue Op) { |
4723 | if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Val&: Op)) |
4724 | return CFP->getValueAPF().isPosZero(); |
4725 | else if (ISD::isEXTLoad(N: Op.getNode()) || ISD::isNON_EXTLoad(N: Op.getNode())) { |
4726 | // Maybe this has already been legalized into the constant pool? |
4727 | if (Op.getOperand(i: 1).getOpcode() == ARMISD::Wrapper) { |
4728 | SDValue WrapperOp = Op.getOperand(i: 1).getOperand(i: 0); |
4729 | if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Val&: WrapperOp)) |
4730 | if (const ConstantFP *CFP = dyn_cast<ConstantFP>(Val: CP->getConstVal())) |
4731 | return CFP->getValueAPF().isPosZero(); |
4732 | } |
4733 | } else if (Op->getOpcode() == ISD::BITCAST && |
4734 | Op->getValueType(ResNo: 0) == MVT::f64) { |
4735 | // Handle (ISD::BITCAST (ARMISD::VMOVIMM (ISD::TargetConstant 0)) MVT::f64) |
4736 | // created by LowerConstantFP(). |
4737 | SDValue BitcastOp = Op->getOperand(Num: 0); |
4738 | if (BitcastOp->getOpcode() == ARMISD::VMOVIMM && |
4739 | isNullConstant(V: BitcastOp->getOperand(Num: 0))) |
4740 | return true; |
4741 | } |
4742 | return false; |
4743 | } |
4744 | |
4745 | /// Returns appropriate ARM CMP (cmp) and corresponding condition code for |
4746 | /// the given operands. |
4747 | SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, |
4748 | SDValue &ARMcc, SelectionDAG &DAG, |
4749 | const SDLoc &dl) const { |
4750 | if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(Val: RHS.getNode())) { |
4751 | unsigned C = RHSC->getZExtValue(); |
4752 | if (!isLegalICmpImmediate(Imm: (int32_t)C)) { |
4753 | // Constant does not fit, try adjusting it by one. |
4754 | switch (CC) { |
4755 | default: break; |
4756 | case ISD::SETLT: |
4757 | case ISD::SETGE: |
4758 | if (C != 0x80000000 && isLegalICmpImmediate(Imm: C-1)) { |
4759 | CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; |
4760 | RHS = DAG.getConstant(Val: C - 1, DL: dl, VT: MVT::i32); |
4761 | } |
4762 | break; |
4763 | case ISD::SETULT: |
4764 | case ISD::SETUGE: |
4765 | if (C != 0 && isLegalICmpImmediate(Imm: C-1)) { |
4766 | CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; |
4767 | RHS = DAG.getConstant(Val: C - 1, DL: dl, VT: MVT::i32); |
4768 | } |
4769 | break; |
4770 | case ISD::SETLE: |
4771 | case ISD::SETGT: |
4772 | if (C != 0x7fffffff && isLegalICmpImmediate(Imm: C+1)) { |
4773 | CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; |
4774 | RHS = DAG.getConstant(Val: C + 1, DL: dl, VT: MVT::i32); |
4775 | } |
4776 | break; |
4777 | case ISD::SETULE: |
4778 | case ISD::SETUGT: |
4779 | if (C != 0xffffffff && isLegalICmpImmediate(Imm: C+1)) { |
4780 | CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; |
4781 | RHS = DAG.getConstant(Val: C + 1, DL: dl, VT: MVT::i32); |
4782 | } |
4783 | break; |
4784 | } |
4785 | } |
4786 | } else if ((ARM_AM::getShiftOpcForNode(Opcode: LHS.getOpcode()) != ARM_AM::no_shift) && |
4787 | (ARM_AM::getShiftOpcForNode(Opcode: RHS.getOpcode()) == ARM_AM::no_shift)) { |
4788 | // In ARM and Thumb-2, the compare instructions can shift their second |
4789 | // operand. |
4790 | CC = ISD::getSetCCSwappedOperands(Operation: CC); |
4791 | std::swap(a&: LHS, b&: RHS); |
4792 | } |
4793 | |
4794 | // Thumb1 has very limited immediate modes, so turning an "and" into a |
4795 | // shift can save multiple instructions. |
4796 | // |
4797 | // If we have (x & C1), and C1 is an appropriate mask, we can transform it |
4798 | // into "((x << n) >> n)". But that isn't necessarily profitable on its |
4799 | // own. If it's the operand to an unsigned comparison with an immediate, |
4800 | // we can eliminate one of the shifts: we transform |
4801 | // "((x << n) >> n) == C2" to "(x << n) == (C2 << n)". |
4802 | // |
4803 | // We avoid transforming cases which aren't profitable due to encoding |
4804 | // details: |
4805 | // |
4806 | // 1. C2 fits into the immediate field of a cmp, and the transformed version |
4807 | // would not; in that case, we're essentially trading one immediate load for |
4808 | // another. |
4809 | // 2. C1 is 255 or 65535, so we can use uxtb or uxth. |
4810 | // 3. C2 is zero; we have other code for this special case. |
4811 | // |
4812 | // FIXME: Figure out profitability for Thumb2; we usually can't save an |
4813 | // instruction, since the AND is always one instruction anyway, but we could |
4814 | // use narrow instructions in some cases. |
4815 | if (Subtarget->isThumb1Only() && LHS->getOpcode() == ISD::AND && |
4816 | LHS->hasOneUse() && isa<ConstantSDNode>(Val: LHS.getOperand(i: 1)) && |
4817 | LHS.getValueType() == MVT::i32 && isa<ConstantSDNode>(Val: RHS) && |
4818 | !isSignedIntSetCC(Code: CC)) { |
4819 | unsigned Mask = LHS.getConstantOperandVal(i: 1); |
4820 | auto *RHSC = cast<ConstantSDNode>(Val: RHS.getNode()); |
4821 | uint64_t RHSV = RHSC->getZExtValue(); |
4822 | if (isMask_32(Value: Mask) && (RHSV & ~Mask) == 0 && Mask != 255 && Mask != 65535) { |
4823 | unsigned ShiftBits = llvm::countl_zero(Val: Mask); |
4824 | if (RHSV && (RHSV > 255 || (RHSV << ShiftBits) <= 255)) { |
4825 | SDValue ShiftAmt = DAG.getConstant(Val: ShiftBits, DL: dl, VT: MVT::i32); |
4826 | LHS = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT: MVT::i32, N1: LHS.getOperand(i: 0), N2: ShiftAmt); |
4827 | RHS = DAG.getConstant(Val: RHSV << ShiftBits, DL: dl, VT: MVT::i32); |
4828 | } |
4829 | } |
4830 | } |
4831 | |
4832 | // The specific comparison "(x<<c) > 0x80000000U" can be optimized to a |
4833 | // single "lsls x, c+1". The shift sets the "C" and "Z" flags the same |
4834 | // way a cmp would. |
4835 | // FIXME: Add support for ARM/Thumb2; this would need isel patterns, and |
4836 | // some tweaks to the heuristics for the previous and->shift transform. |
4837 | // FIXME: Optimize cases where the LHS isn't a shift. |
4838 | if (Subtarget->isThumb1Only() && LHS->getOpcode() == ISD::SHL && |
4839 | isa<ConstantSDNode>(Val: RHS) && RHS->getAsZExtVal() == 0x80000000U && |
4840 | CC == ISD::SETUGT && isa<ConstantSDNode>(Val: LHS.getOperand(i: 1)) && |
4841 | LHS.getConstantOperandVal(i: 1) < 31) { |
4842 | unsigned ShiftAmt = LHS.getConstantOperandVal(i: 1) + 1; |
4843 | SDValue Shift = DAG.getNode(Opcode: ARMISD::LSLS, DL: dl, |
4844 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), |
4845 | N1: LHS.getOperand(i: 0), |
4846 | N2: DAG.getConstant(Val: ShiftAmt, DL: dl, VT: MVT::i32)); |
4847 | SDValue Chain = DAG.getCopyToReg(Chain: DAG.getEntryNode(), dl, Reg: ARM::CPSR, |
4848 | N: Shift.getValue(R: 1), Glue: SDValue()); |
4849 | ARMcc = DAG.getConstant(Val: ARMCC::HI, DL: dl, VT: MVT::i32); |
4850 | return Chain.getValue(R: 1); |
4851 | } |
4852 | |
4853 | ARMCC::CondCodes CondCode = IntCCToARMCC(CC); |
4854 | |
4855 | // If the RHS is a constant zero then the V (overflow) flag will never be |
4856 | // set. This can allow us to simplify GE to PL or LT to MI, which can be |
4857 | // simpler for other passes (like the peephole optimiser) to deal with. |
4858 | if (isNullConstant(V: RHS)) { |
4859 | switch (CondCode) { |
4860 | default: break; |
4861 | case ARMCC::GE: |
4862 | CondCode = ARMCC::PL; |
4863 | break; |
4864 | case ARMCC::LT: |
4865 | CondCode = ARMCC::MI; |
4866 | break; |
4867 | } |
4868 | } |
4869 | |
4870 | ARMISD::NodeType CompareType; |
4871 | switch (CondCode) { |
4872 | default: |
4873 | CompareType = ARMISD::CMP; |
4874 | break; |
4875 | case ARMCC::EQ: |
4876 | case ARMCC::NE: |
4877 | // Uses only Z Flag |
4878 | CompareType = ARMISD::CMPZ; |
4879 | break; |
4880 | } |
4881 | ARMcc = DAG.getConstant(Val: CondCode, DL: dl, VT: MVT::i32); |
4882 | return DAG.getNode(Opcode: CompareType, DL: dl, VT: MVT::Glue, N1: LHS, N2: RHS); |
4883 | } |
4884 | |
4885 | /// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. |
4886 | SDValue ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, |
4887 | SelectionDAG &DAG, const SDLoc &dl, |
4888 | bool Signaling) const { |
4889 | assert(Subtarget->hasFP64() || RHS.getValueType() != MVT::f64); |
4890 | SDValue Cmp; |
4891 | if (!isFloatingPointZero(Op: RHS)) |
4892 | Cmp = DAG.getNode(Opcode: Signaling ? ARMISD::CMPFPE : ARMISD::CMPFP, |
4893 | DL: dl, VT: MVT::Glue, N1: LHS, N2: RHS); |
4894 | else |
4895 | Cmp = DAG.getNode(Opcode: Signaling ? ARMISD::CMPFPEw0 : ARMISD::CMPFPw0, |
4896 | DL: dl, VT: MVT::Glue, Operand: LHS); |
4897 | return DAG.getNode(Opcode: ARMISD::FMSTAT, DL: dl, VT: MVT::Glue, Operand: Cmp); |
4898 | } |
4899 | |
4900 | /// duplicateCmp - Glue values can have only one use, so this function |
4901 | /// duplicates a comparison node. |
4902 | SDValue |
4903 | ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const { |
4904 | unsigned Opc = Cmp.getOpcode(); |
4905 | SDLoc DL(Cmp); |
4906 | if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ) |
4907 | return DAG.getNode(Opcode: Opc, DL, VT: MVT::Glue, N1: Cmp.getOperand(i: 0),N2: Cmp.getOperand(i: 1)); |
4908 | |
4909 | assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation" ); |
4910 | Cmp = Cmp.getOperand(i: 0); |
4911 | Opc = Cmp.getOpcode(); |
4912 | if (Opc == ARMISD::CMPFP) |
4913 | Cmp = DAG.getNode(Opcode: Opc, DL, VT: MVT::Glue, N1: Cmp.getOperand(i: 0),N2: Cmp.getOperand(i: 1)); |
4914 | else { |
4915 | assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT" ); |
4916 | Cmp = DAG.getNode(Opcode: Opc, DL, VT: MVT::Glue, Operand: Cmp.getOperand(i: 0)); |
4917 | } |
4918 | return DAG.getNode(Opcode: ARMISD::FMSTAT, DL, VT: MVT::Glue, Operand: Cmp); |
4919 | } |
4920 | |
4921 | // This function returns three things: the arithmetic computation itself |
4922 | // (Value), a comparison (OverflowCmp), and a condition code (ARMcc). The |
4923 | // comparison and the condition code define the case in which the arithmetic |
4924 | // computation *does not* overflow. |
4925 | std::pair<SDValue, SDValue> |
4926 | ARMTargetLowering::getARMXALUOOp(SDValue Op, SelectionDAG &DAG, |
4927 | SDValue &ARMcc) const { |
4928 | assert(Op.getValueType() == MVT::i32 && "Unsupported value type" ); |
4929 | |
4930 | SDValue Value, OverflowCmp; |
4931 | SDValue LHS = Op.getOperand(i: 0); |
4932 | SDValue RHS = Op.getOperand(i: 1); |
4933 | SDLoc dl(Op); |
4934 | |
4935 | // FIXME: We are currently always generating CMPs because we don't support |
4936 | // generating CMN through the backend. This is not as good as the natural |
4937 | // CMP case because it causes a register dependency and cannot be folded |
4938 | // later. |
4939 | |
4940 | switch (Op.getOpcode()) { |
4941 | default: |
4942 | llvm_unreachable("Unknown overflow instruction!" ); |
4943 | case ISD::SADDO: |
4944 | ARMcc = DAG.getConstant(Val: ARMCC::VC, DL: dl, VT: MVT::i32); |
4945 | Value = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: Op.getValueType(), N1: LHS, N2: RHS); |
4946 | OverflowCmp = DAG.getNode(Opcode: ARMISD::CMP, DL: dl, VT: MVT::Glue, N1: Value, N2: LHS); |
4947 | break; |
4948 | case ISD::UADDO: |
4949 | ARMcc = DAG.getConstant(Val: ARMCC::HS, DL: dl, VT: MVT::i32); |
4950 | // We use ADDC here to correspond to its use in LowerUnsignedALUO. |
4951 | // We do not use it in the USUBO case as Value may not be used. |
4952 | Value = DAG.getNode(Opcode: ARMISD::ADDC, DL: dl, |
4953 | VTList: DAG.getVTList(VT1: Op.getValueType(), VT2: MVT::i32), N1: LHS, N2: RHS) |
4954 | .getValue(R: 0); |
4955 | OverflowCmp = DAG.getNode(Opcode: ARMISD::CMP, DL: dl, VT: MVT::Glue, N1: Value, N2: LHS); |
4956 | break; |
4957 | case ISD::SSUBO: |
4958 | ARMcc = DAG.getConstant(Val: ARMCC::VC, DL: dl, VT: MVT::i32); |
4959 | Value = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: Op.getValueType(), N1: LHS, N2: RHS); |
4960 | OverflowCmp = DAG.getNode(Opcode: ARMISD::CMP, DL: dl, VT: MVT::Glue, N1: LHS, N2: RHS); |
4961 | break; |
4962 | case ISD::USUBO: |
4963 | ARMcc = DAG.getConstant(Val: ARMCC::HS, DL: dl, VT: MVT::i32); |
4964 | Value = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: Op.getValueType(), N1: LHS, N2: RHS); |
4965 | OverflowCmp = DAG.getNode(Opcode: ARMISD::CMP, DL: dl, VT: MVT::Glue, N1: LHS, N2: RHS); |
4966 | break; |
4967 | case ISD::UMULO: |
4968 | // We generate a UMUL_LOHI and then check if the high word is 0. |
4969 | ARMcc = DAG.getConstant(Val: ARMCC::EQ, DL: dl, VT: MVT::i32); |
4970 | Value = DAG.getNode(Opcode: ISD::UMUL_LOHI, DL: dl, |
4971 | VTList: DAG.getVTList(VT1: Op.getValueType(), VT2: Op.getValueType()), |
4972 | N1: LHS, N2: RHS); |
4973 | OverflowCmp = DAG.getNode(Opcode: ARMISD::CMP, DL: dl, VT: MVT::Glue, N1: Value.getValue(R: 1), |
4974 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
4975 | Value = Value.getValue(R: 0); // We only want the low 32 bits for the result. |
4976 | break; |
4977 | case ISD::SMULO: |
4978 | // We generate a SMUL_LOHI and then check if all the bits of the high word |
4979 | // are the same as the sign bit of the low word. |
4980 | ARMcc = DAG.getConstant(Val: ARMCC::EQ, DL: dl, VT: MVT::i32); |
4981 | Value = DAG.getNode(Opcode: ISD::SMUL_LOHI, DL: dl, |
4982 | VTList: DAG.getVTList(VT1: Op.getValueType(), VT2: Op.getValueType()), |
4983 | N1: LHS, N2: RHS); |
4984 | OverflowCmp = DAG.getNode(Opcode: ARMISD::CMP, DL: dl, VT: MVT::Glue, N1: Value.getValue(R: 1), |
4985 | N2: DAG.getNode(Opcode: ISD::SRA, DL: dl, VT: Op.getValueType(), |
4986 | N1: Value.getValue(R: 0), |
4987 | N2: DAG.getConstant(Val: 31, DL: dl, VT: MVT::i32))); |
4988 | Value = Value.getValue(R: 0); // We only want the low 32 bits for the result. |
4989 | break; |
4990 | } // switch (...) |
4991 | |
4992 | return std::make_pair(x&: Value, y&: OverflowCmp); |
4993 | } |
4994 | |
4995 | SDValue |
4996 | ARMTargetLowering::LowerSignedALUO(SDValue Op, SelectionDAG &DAG) const { |
4997 | // Let legalize expand this if it isn't a legal type yet. |
4998 | if (!DAG.getTargetLoweringInfo().isTypeLegal(VT: Op.getValueType())) |
4999 | return SDValue(); |
5000 | |
5001 | SDValue Value, OverflowCmp; |
5002 | SDValue ARMcc; |
5003 | std::tie(args&: Value, args&: OverflowCmp) = getARMXALUOOp(Op, DAG, ARMcc); |
5004 | SDValue CCR = DAG.getRegister(Reg: ARM::CPSR, VT: MVT::i32); |
5005 | SDLoc dl(Op); |
5006 | // We use 0 and 1 as false and true values. |
5007 | SDValue TVal = DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32); |
5008 | SDValue FVal = DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32); |
5009 | EVT VT = Op.getValueType(); |
5010 | |
5011 | SDValue Overflow = DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: TVal, N2: FVal, |
5012 | N3: ARMcc, N4: CCR, N5: OverflowCmp); |
5013 | |
5014 | SDVTList VTs = DAG.getVTList(VT1: Op.getValueType(), VT2: MVT::i32); |
5015 | return DAG.getNode(Opcode: ISD::MERGE_VALUES, DL: dl, VTList: VTs, N1: Value, N2: Overflow); |
5016 | } |
5017 | |
5018 | static SDValue ConvertBooleanCarryToCarryFlag(SDValue BoolCarry, |
5019 | SelectionDAG &DAG) { |
5020 | SDLoc DL(BoolCarry); |
5021 | EVT CarryVT = BoolCarry.getValueType(); |
5022 | |
5023 | // This converts the boolean value carry into the carry flag by doing |
5024 | // ARMISD::SUBC Carry, 1 |
5025 | SDValue Carry = DAG.getNode(Opcode: ARMISD::SUBC, DL, |
5026 | VTList: DAG.getVTList(VT1: CarryVT, VT2: MVT::i32), |
5027 | N1: BoolCarry, N2: DAG.getConstant(Val: 1, DL, VT: CarryVT)); |
5028 | return Carry.getValue(R: 1); |
5029 | } |
5030 | |
5031 | static SDValue ConvertCarryFlagToBooleanCarry(SDValue Flags, EVT VT, |
5032 | SelectionDAG &DAG) { |
5033 | SDLoc DL(Flags); |
5034 | |
5035 | // Now convert the carry flag into a boolean carry. We do this |
5036 | // using ARMISD:ADDE 0, 0, Carry |
5037 | return DAG.getNode(Opcode: ARMISD::ADDE, DL, VTList: DAG.getVTList(VT1: VT, VT2: MVT::i32), |
5038 | N1: DAG.getConstant(Val: 0, DL, VT: MVT::i32), |
5039 | N2: DAG.getConstant(Val: 0, DL, VT: MVT::i32), N3: Flags); |
5040 | } |
5041 | |
5042 | SDValue ARMTargetLowering::LowerUnsignedALUO(SDValue Op, |
5043 | SelectionDAG &DAG) const { |
5044 | // Let legalize expand this if it isn't a legal type yet. |
5045 | if (!DAG.getTargetLoweringInfo().isTypeLegal(VT: Op.getValueType())) |
5046 | return SDValue(); |
5047 | |
5048 | SDValue LHS = Op.getOperand(i: 0); |
5049 | SDValue RHS = Op.getOperand(i: 1); |
5050 | SDLoc dl(Op); |
5051 | |
5052 | EVT VT = Op.getValueType(); |
5053 | SDVTList VTs = DAG.getVTList(VT1: VT, VT2: MVT::i32); |
5054 | SDValue Value; |
5055 | SDValue Overflow; |
5056 | switch (Op.getOpcode()) { |
5057 | default: |
5058 | llvm_unreachable("Unknown overflow instruction!" ); |
5059 | case ISD::UADDO: |
5060 | Value = DAG.getNode(Opcode: ARMISD::ADDC, DL: dl, VTList: VTs, N1: LHS, N2: RHS); |
5061 | // Convert the carry flag into a boolean value. |
5062 | Overflow = ConvertCarryFlagToBooleanCarry(Flags: Value.getValue(R: 1), VT, DAG); |
5063 | break; |
5064 | case ISD::USUBO: { |
5065 | Value = DAG.getNode(Opcode: ARMISD::SUBC, DL: dl, VTList: VTs, N1: LHS, N2: RHS); |
5066 | // Convert the carry flag into a boolean value. |
5067 | Overflow = ConvertCarryFlagToBooleanCarry(Flags: Value.getValue(R: 1), VT, DAG); |
5068 | // ARMISD::SUBC returns 0 when we have to borrow, so make it an overflow |
5069 | // value. So compute 1 - C. |
5070 | Overflow = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: MVT::i32, |
5071 | N1: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32), N2: Overflow); |
5072 | break; |
5073 | } |
5074 | } |
5075 | |
5076 | return DAG.getNode(Opcode: ISD::MERGE_VALUES, DL: dl, VTList: VTs, N1: Value, N2: Overflow); |
5077 | } |
5078 | |
5079 | static SDValue LowerADDSUBSAT(SDValue Op, SelectionDAG &DAG, |
5080 | const ARMSubtarget *Subtarget) { |
5081 | EVT VT = Op.getValueType(); |
5082 | if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP() || Subtarget->isThumb1Only()) |
5083 | return SDValue(); |
5084 | if (!VT.isSimple()) |
5085 | return SDValue(); |
5086 | |
5087 | unsigned NewOpcode; |
5088 | switch (VT.getSimpleVT().SimpleTy) { |
5089 | default: |
5090 | return SDValue(); |
5091 | case MVT::i8: |
5092 | switch (Op->getOpcode()) { |
5093 | case ISD::UADDSAT: |
5094 | NewOpcode = ARMISD::UQADD8b; |
5095 | break; |
5096 | case ISD::SADDSAT: |
5097 | NewOpcode = ARMISD::QADD8b; |
5098 | break; |
5099 | case ISD::USUBSAT: |
5100 | NewOpcode = ARMISD::UQSUB8b; |
5101 | break; |
5102 | case ISD::SSUBSAT: |
5103 | NewOpcode = ARMISD::QSUB8b; |
5104 | break; |
5105 | } |
5106 | break; |
5107 | case MVT::i16: |
5108 | switch (Op->getOpcode()) { |
5109 | case ISD::UADDSAT: |
5110 | NewOpcode = ARMISD::UQADD16b; |
5111 | break; |
5112 | case ISD::SADDSAT: |
5113 | NewOpcode = ARMISD::QADD16b; |
5114 | break; |
5115 | case ISD::USUBSAT: |
5116 | NewOpcode = ARMISD::UQSUB16b; |
5117 | break; |
5118 | case ISD::SSUBSAT: |
5119 | NewOpcode = ARMISD::QSUB16b; |
5120 | break; |
5121 | } |
5122 | break; |
5123 | } |
5124 | |
5125 | SDLoc dl(Op); |
5126 | SDValue Add = |
5127 | DAG.getNode(Opcode: NewOpcode, DL: dl, VT: MVT::i32, |
5128 | N1: DAG.getSExtOrTrunc(Op: Op->getOperand(Num: 0), DL: dl, VT: MVT::i32), |
5129 | N2: DAG.getSExtOrTrunc(Op: Op->getOperand(Num: 1), DL: dl, VT: MVT::i32)); |
5130 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT, Operand: Add); |
5131 | } |
5132 | |
5133 | SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { |
5134 | SDValue Cond = Op.getOperand(i: 0); |
5135 | SDValue SelectTrue = Op.getOperand(i: 1); |
5136 | SDValue SelectFalse = Op.getOperand(i: 2); |
5137 | SDLoc dl(Op); |
5138 | unsigned Opc = Cond.getOpcode(); |
5139 | |
5140 | if (Cond.getResNo() == 1 && |
5141 | (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || |
5142 | Opc == ISD::USUBO)) { |
5143 | if (!DAG.getTargetLoweringInfo().isTypeLegal(VT: Cond->getValueType(ResNo: 0))) |
5144 | return SDValue(); |
5145 | |
5146 | SDValue Value, OverflowCmp; |
5147 | SDValue ARMcc; |
5148 | std::tie(args&: Value, args&: OverflowCmp) = getARMXALUOOp(Op: Cond, DAG, ARMcc); |
5149 | SDValue CCR = DAG.getRegister(Reg: ARM::CPSR, VT: MVT::i32); |
5150 | EVT VT = Op.getValueType(); |
5151 | |
5152 | return getCMOV(dl, VT, FalseVal: SelectTrue, TrueVal: SelectFalse, ARMcc, CCR, |
5153 | Cmp: OverflowCmp, DAG); |
5154 | } |
5155 | |
5156 | // Convert: |
5157 | // |
5158 | // (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond) |
5159 | // (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond) |
5160 | // |
5161 | if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) { |
5162 | const ConstantSDNode *CMOVTrue = |
5163 | dyn_cast<ConstantSDNode>(Val: Cond.getOperand(i: 0)); |
5164 | const ConstantSDNode *CMOVFalse = |
5165 | dyn_cast<ConstantSDNode>(Val: Cond.getOperand(i: 1)); |
5166 | |
5167 | if (CMOVTrue && CMOVFalse) { |
5168 | unsigned CMOVTrueVal = CMOVTrue->getZExtValue(); |
5169 | unsigned CMOVFalseVal = CMOVFalse->getZExtValue(); |
5170 | |
5171 | SDValue True; |
5172 | SDValue False; |
5173 | if (CMOVTrueVal == 1 && CMOVFalseVal == 0) { |
5174 | True = SelectTrue; |
5175 | False = SelectFalse; |
5176 | } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) { |
5177 | True = SelectFalse; |
5178 | False = SelectTrue; |
5179 | } |
5180 | |
5181 | if (True.getNode() && False.getNode()) { |
5182 | EVT VT = Op.getValueType(); |
5183 | SDValue ARMcc = Cond.getOperand(i: 2); |
5184 | SDValue CCR = Cond.getOperand(i: 3); |
5185 | SDValue Cmp = duplicateCmp(Cmp: Cond.getOperand(i: 4), DAG); |
5186 | assert(True.getValueType() == VT); |
5187 | return getCMOV(dl, VT, FalseVal: True, TrueVal: False, ARMcc, CCR, Cmp, DAG); |
5188 | } |
5189 | } |
5190 | } |
5191 | |
5192 | // ARM's BooleanContents value is UndefinedBooleanContent. Mask out the |
5193 | // undefined bits before doing a full-word comparison with zero. |
5194 | Cond = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: Cond.getValueType(), N1: Cond, |
5195 | N2: DAG.getConstant(Val: 1, DL: dl, VT: Cond.getValueType())); |
5196 | |
5197 | return DAG.getSelectCC(DL: dl, LHS: Cond, |
5198 | RHS: DAG.getConstant(Val: 0, DL: dl, VT: Cond.getValueType()), |
5199 | True: SelectTrue, False: SelectFalse, Cond: ISD::SETNE); |
5200 | } |
5201 | |
5202 | static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode, |
5203 | bool &swpCmpOps, bool &swpVselOps) { |
5204 | // Start by selecting the GE condition code for opcodes that return true for |
5205 | // 'equality' |
5206 | if (CC == ISD::SETUGE || CC == ISD::SETOGE || CC == ISD::SETOLE || |
5207 | CC == ISD::SETULE || CC == ISD::SETGE || CC == ISD::SETLE) |
5208 | CondCode = ARMCC::GE; |
5209 | |
5210 | // and GT for opcodes that return false for 'equality'. |
5211 | else if (CC == ISD::SETUGT || CC == ISD::SETOGT || CC == ISD::SETOLT || |
5212 | CC == ISD::SETULT || CC == ISD::SETGT || CC == ISD::SETLT) |
5213 | CondCode = ARMCC::GT; |
5214 | |
5215 | // Since we are constrained to GE/GT, if the opcode contains 'less', we need |
5216 | // to swap the compare operands. |
5217 | if (CC == ISD::SETOLE || CC == ISD::SETULE || CC == ISD::SETOLT || |
5218 | CC == ISD::SETULT || CC == ISD::SETLE || CC == ISD::SETLT) |
5219 | swpCmpOps = true; |
5220 | |
5221 | // Both GT and GE are ordered comparisons, and return false for 'unordered'. |
5222 | // If we have an unordered opcode, we need to swap the operands to the VSEL |
5223 | // instruction (effectively negating the condition). |
5224 | // |
5225 | // This also has the effect of swapping which one of 'less' or 'greater' |
5226 | // returns true, so we also swap the compare operands. It also switches |
5227 | // whether we return true for 'equality', so we compensate by picking the |
5228 | // opposite condition code to our original choice. |
5229 | if (CC == ISD::SETULE || CC == ISD::SETULT || CC == ISD::SETUGE || |
5230 | CC == ISD::SETUGT) { |
5231 | swpCmpOps = !swpCmpOps; |
5232 | swpVselOps = !swpVselOps; |
5233 | CondCode = CondCode == ARMCC::GT ? ARMCC::GE : ARMCC::GT; |
5234 | } |
5235 | |
5236 | // 'ordered' is 'anything but unordered', so use the VS condition code and |
5237 | // swap the VSEL operands. |
5238 | if (CC == ISD::SETO) { |
5239 | CondCode = ARMCC::VS; |
5240 | swpVselOps = true; |
5241 | } |
5242 | |
5243 | // 'unordered or not equal' is 'anything but equal', so use the EQ condition |
5244 | // code and swap the VSEL operands. Also do this if we don't care about the |
5245 | // unordered case. |
5246 | if (CC == ISD::SETUNE || CC == ISD::SETNE) { |
5247 | CondCode = ARMCC::EQ; |
5248 | swpVselOps = true; |
5249 | } |
5250 | } |
5251 | |
5252 | SDValue ARMTargetLowering::getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal, |
5253 | SDValue TrueVal, SDValue ARMcc, SDValue CCR, |
5254 | SDValue Cmp, SelectionDAG &DAG) const { |
5255 | if (!Subtarget->hasFP64() && VT == MVT::f64) { |
5256 | FalseVal = DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, |
5257 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N: FalseVal); |
5258 | TrueVal = DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, |
5259 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N: TrueVal); |
5260 | |
5261 | SDValue TrueLow = TrueVal.getValue(R: 0); |
5262 | SDValue TrueHigh = TrueVal.getValue(R: 1); |
5263 | SDValue FalseLow = FalseVal.getValue(R: 0); |
5264 | SDValue FalseHigh = FalseVal.getValue(R: 1); |
5265 | |
5266 | SDValue Low = DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT: MVT::i32, N1: FalseLow, N2: TrueLow, |
5267 | N3: ARMcc, N4: CCR, N5: Cmp); |
5268 | SDValue High = DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT: MVT::i32, N1: FalseHigh, N2: TrueHigh, |
5269 | N3: ARMcc, N4: CCR, N5: duplicateCmp(Cmp, DAG)); |
5270 | |
5271 | return DAG.getNode(Opcode: ARMISD::VMOVDRR, DL: dl, VT: MVT::f64, N1: Low, N2: High); |
5272 | } else { |
5273 | return DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: FalseVal, N2: TrueVal, N3: ARMcc, N4: CCR, |
5274 | N5: Cmp); |
5275 | } |
5276 | } |
5277 | |
5278 | static bool isGTorGE(ISD::CondCode CC) { |
5279 | return CC == ISD::SETGT || CC == ISD::SETGE; |
5280 | } |
5281 | |
5282 | static bool isLTorLE(ISD::CondCode CC) { |
5283 | return CC == ISD::SETLT || CC == ISD::SETLE; |
5284 | } |
5285 | |
5286 | // See if a conditional (LHS CC RHS ? TrueVal : FalseVal) is lower-saturating. |
5287 | // All of these conditions (and their <= and >= counterparts) will do: |
5288 | // x < k ? k : x |
5289 | // x > k ? x : k |
5290 | // k < x ? x : k |
5291 | // k > x ? k : x |
5292 | static bool isLowerSaturate(const SDValue LHS, const SDValue RHS, |
5293 | const SDValue TrueVal, const SDValue FalseVal, |
5294 | const ISD::CondCode CC, const SDValue K) { |
5295 | return (isGTorGE(CC) && |
5296 | ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal))) || |
5297 | (isLTorLE(CC) && |
5298 | ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal))); |
5299 | } |
5300 | |
5301 | // Check if two chained conditionals could be converted into SSAT or USAT. |
5302 | // |
5303 | // SSAT can replace a set of two conditional selectors that bound a number to an |
5304 | // interval of type [k, ~k] when k + 1 is a power of 2. Here are some examples: |
5305 | // |
5306 | // x < -k ? -k : (x > k ? k : x) |
5307 | // x < -k ? -k : (x < k ? x : k) |
5308 | // x > -k ? (x > k ? k : x) : -k |
5309 | // x < k ? (x < -k ? -k : x) : k |
5310 | // etc. |
5311 | // |
5312 | // LLVM canonicalizes these to either a min(max()) or a max(min()) |
5313 | // pattern. This function tries to match one of these and will return a SSAT |
5314 | // node if successful. |
5315 | // |
5316 | // USAT works similarily to SSAT but bounds on the interval [0, k] where k + 1 |
5317 | // is a power of 2. |
5318 | static SDValue LowerSaturatingConditional(SDValue Op, SelectionDAG &DAG) { |
5319 | EVT VT = Op.getValueType(); |
5320 | SDValue V1 = Op.getOperand(i: 0); |
5321 | SDValue K1 = Op.getOperand(i: 1); |
5322 | SDValue TrueVal1 = Op.getOperand(i: 2); |
5323 | SDValue FalseVal1 = Op.getOperand(i: 3); |
5324 | ISD::CondCode CC1 = cast<CondCodeSDNode>(Val: Op.getOperand(i: 4))->get(); |
5325 | |
5326 | const SDValue Op2 = isa<ConstantSDNode>(Val: TrueVal1) ? FalseVal1 : TrueVal1; |
5327 | if (Op2.getOpcode() != ISD::SELECT_CC) |
5328 | return SDValue(); |
5329 | |
5330 | SDValue V2 = Op2.getOperand(i: 0); |
5331 | SDValue K2 = Op2.getOperand(i: 1); |
5332 | SDValue TrueVal2 = Op2.getOperand(i: 2); |
5333 | SDValue FalseVal2 = Op2.getOperand(i: 3); |
5334 | ISD::CondCode CC2 = cast<CondCodeSDNode>(Val: Op2.getOperand(i: 4))->get(); |
5335 | |
5336 | SDValue V1Tmp = V1; |
5337 | SDValue V2Tmp = V2; |
5338 | |
5339 | // Check that the registers and the constants match a max(min()) or min(max()) |
5340 | // pattern |
5341 | if (V1Tmp != TrueVal1 || V2Tmp != TrueVal2 || K1 != FalseVal1 || |
5342 | K2 != FalseVal2 || |
5343 | !((isGTorGE(CC: CC1) && isLTorLE(CC: CC2)) || (isLTorLE(CC: CC1) && isGTorGE(CC: CC2)))) |
5344 | return SDValue(); |
5345 | |
5346 | // Check that the constant in the lower-bound check is |
5347 | // the opposite of the constant in the upper-bound check |
5348 | // in 1's complement. |
5349 | if (!isa<ConstantSDNode>(Val: K1) || !isa<ConstantSDNode>(Val: K2)) |
5350 | return SDValue(); |
5351 | |
5352 | int64_t Val1 = cast<ConstantSDNode>(Val&: K1)->getSExtValue(); |
5353 | int64_t Val2 = cast<ConstantSDNode>(Val&: K2)->getSExtValue(); |
5354 | int64_t PosVal = std::max(a: Val1, b: Val2); |
5355 | int64_t NegVal = std::min(a: Val1, b: Val2); |
5356 | |
5357 | if (!((Val1 > Val2 && isLTorLE(CC: CC1)) || (Val1 < Val2 && isLTorLE(CC: CC2))) || |
5358 | !isPowerOf2_64(Value: PosVal + 1)) |
5359 | return SDValue(); |
5360 | |
5361 | // Handle the difference between USAT (unsigned) and SSAT (signed) |
5362 | // saturation |
5363 | // At this point, PosVal is guaranteed to be positive |
5364 | uint64_t K = PosVal; |
5365 | SDLoc dl(Op); |
5366 | if (Val1 == ~Val2) |
5367 | return DAG.getNode(Opcode: ARMISD::SSAT, DL: dl, VT, N1: V2Tmp, |
5368 | N2: DAG.getConstant(Val: llvm::countr_one(Value: K), DL: dl, VT)); |
5369 | if (NegVal == 0) |
5370 | return DAG.getNode(Opcode: ARMISD::USAT, DL: dl, VT, N1: V2Tmp, |
5371 | N2: DAG.getConstant(Val: llvm::countr_one(Value: K), DL: dl, VT)); |
5372 | |
5373 | return SDValue(); |
5374 | } |
5375 | |
5376 | // Check if a condition of the type x < k ? k : x can be converted into a |
5377 | // bit operation instead of conditional moves. |
5378 | // Currently this is allowed given: |
5379 | // - The conditions and values match up |
5380 | // - k is 0 or -1 (all ones) |
5381 | // This function will not check the last condition, thats up to the caller |
5382 | // It returns true if the transformation can be made, and in such case |
5383 | // returns x in V, and k in SatK. |
5384 | static bool isLowerSaturatingConditional(const SDValue &Op, SDValue &V, |
5385 | SDValue &SatK) |
5386 | { |
5387 | SDValue LHS = Op.getOperand(i: 0); |
5388 | SDValue RHS = Op.getOperand(i: 1); |
5389 | ISD::CondCode CC = cast<CondCodeSDNode>(Val: Op.getOperand(i: 4))->get(); |
5390 | SDValue TrueVal = Op.getOperand(i: 2); |
5391 | SDValue FalseVal = Op.getOperand(i: 3); |
5392 | |
5393 | SDValue *K = isa<ConstantSDNode>(Val: LHS) ? &LHS : isa<ConstantSDNode>(Val: RHS) |
5394 | ? &RHS |
5395 | : nullptr; |
5396 | |
5397 | // No constant operation in comparison, early out |
5398 | if (!K) |
5399 | return false; |
5400 | |
5401 | SDValue KTmp = isa<ConstantSDNode>(Val: TrueVal) ? TrueVal : FalseVal; |
5402 | V = (KTmp == TrueVal) ? FalseVal : TrueVal; |
5403 | SDValue VTmp = (K && *K == LHS) ? RHS : LHS; |
5404 | |
5405 | // If the constant on left and right side, or variable on left and right, |
5406 | // does not match, early out |
5407 | if (*K != KTmp || V != VTmp) |
5408 | return false; |
5409 | |
5410 | if (isLowerSaturate(LHS, RHS, TrueVal, FalseVal, CC, K: *K)) { |
5411 | SatK = *K; |
5412 | return true; |
5413 | } |
5414 | |
5415 | return false; |
5416 | } |
5417 | |
5418 | bool ARMTargetLowering::isUnsupportedFloatingType(EVT VT) const { |
5419 | if (VT == MVT::f32) |
5420 | return !Subtarget->hasVFP2Base(); |
5421 | if (VT == MVT::f64) |
5422 | return !Subtarget->hasFP64(); |
5423 | if (VT == MVT::f16) |
5424 | return !Subtarget->hasFullFP16(); |
5425 | return false; |
5426 | } |
5427 | |
5428 | SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { |
5429 | EVT VT = Op.getValueType(); |
5430 | SDLoc dl(Op); |
5431 | |
5432 | // Try to convert two saturating conditional selects into a single SSAT |
5433 | if ((!Subtarget->isThumb() && Subtarget->hasV6Ops()) || Subtarget->isThumb2()) |
5434 | if (SDValue SatValue = LowerSaturatingConditional(Op, DAG)) |
5435 | return SatValue; |
5436 | |
5437 | // Try to convert expressions of the form x < k ? k : x (and similar forms) |
5438 | // into more efficient bit operations, which is possible when k is 0 or -1 |
5439 | // On ARM and Thumb-2 which have flexible operand 2 this will result in |
5440 | // single instructions. On Thumb the shift and the bit operation will be two |
5441 | // instructions. |
5442 | // Only allow this transformation on full-width (32-bit) operations |
5443 | SDValue LowerSatConstant; |
5444 | SDValue SatValue; |
5445 | if (VT == MVT::i32 && |
5446 | isLowerSaturatingConditional(Op, V&: SatValue, SatK&: LowerSatConstant)) { |
5447 | SDValue ShiftV = DAG.getNode(Opcode: ISD::SRA, DL: dl, VT, N1: SatValue, |
5448 | N2: DAG.getConstant(Val: 31, DL: dl, VT)); |
5449 | if (isNullConstant(V: LowerSatConstant)) { |
5450 | SDValue NotShiftV = DAG.getNode(Opcode: ISD::XOR, DL: dl, VT, N1: ShiftV, |
5451 | N2: DAG.getAllOnesConstant(DL: dl, VT)); |
5452 | return DAG.getNode(Opcode: ISD::AND, DL: dl, VT, N1: SatValue, N2: NotShiftV); |
5453 | } else if (isAllOnesConstant(V: LowerSatConstant)) |
5454 | return DAG.getNode(Opcode: ISD::OR, DL: dl, VT, N1: SatValue, N2: ShiftV); |
5455 | } |
5456 | |
5457 | SDValue LHS = Op.getOperand(i: 0); |
5458 | SDValue RHS = Op.getOperand(i: 1); |
5459 | ISD::CondCode CC = cast<CondCodeSDNode>(Val: Op.getOperand(i: 4))->get(); |
5460 | SDValue TrueVal = Op.getOperand(i: 2); |
5461 | SDValue FalseVal = Op.getOperand(i: 3); |
5462 | ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(Val&: FalseVal); |
5463 | ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(Val&: TrueVal); |
5464 | |
5465 | if (Subtarget->hasV8_1MMainlineOps() && CFVal && CTVal && |
5466 | LHS.getValueType() == MVT::i32 && RHS.getValueType() == MVT::i32) { |
5467 | unsigned TVal = CTVal->getZExtValue(); |
5468 | unsigned FVal = CFVal->getZExtValue(); |
5469 | unsigned Opcode = 0; |
5470 | |
5471 | if (TVal == ~FVal) { |
5472 | Opcode = ARMISD::CSINV; |
5473 | } else if (TVal == ~FVal + 1) { |
5474 | Opcode = ARMISD::CSNEG; |
5475 | } else if (TVal + 1 == FVal) { |
5476 | Opcode = ARMISD::CSINC; |
5477 | } else if (TVal == FVal + 1) { |
5478 | Opcode = ARMISD::CSINC; |
5479 | std::swap(a&: TrueVal, b&: FalseVal); |
5480 | std::swap(a&: TVal, b&: FVal); |
5481 | CC = ISD::getSetCCInverse(Operation: CC, Type: LHS.getValueType()); |
5482 | } |
5483 | |
5484 | if (Opcode) { |
5485 | // If one of the constants is cheaper than another, materialise the |
5486 | // cheaper one and let the csel generate the other. |
5487 | if (Opcode != ARMISD::CSINC && |
5488 | HasLowerConstantMaterializationCost(Val1: FVal, Val2: TVal, Subtarget)) { |
5489 | std::swap(a&: TrueVal, b&: FalseVal); |
5490 | std::swap(a&: TVal, b&: FVal); |
5491 | CC = ISD::getSetCCInverse(Operation: CC, Type: LHS.getValueType()); |
5492 | } |
5493 | |
5494 | // Attempt to use ZR checking TVal is 0, possibly inverting the condition |
5495 | // to get there. CSINC not is invertable like the other two (~(~a) == a, |
5496 | // -(-a) == a, but (a+1)+1 != a). |
5497 | if (FVal == 0 && Opcode != ARMISD::CSINC) { |
5498 | std::swap(a&: TrueVal, b&: FalseVal); |
5499 | std::swap(a&: TVal, b&: FVal); |
5500 | CC = ISD::getSetCCInverse(Operation: CC, Type: LHS.getValueType()); |
5501 | } |
5502 | |
5503 | // Drops F's value because we can get it by inverting/negating TVal. |
5504 | FalseVal = TrueVal; |
5505 | |
5506 | SDValue ARMcc; |
5507 | SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); |
5508 | EVT VT = TrueVal.getValueType(); |
5509 | return DAG.getNode(Opcode, DL: dl, VT, N1: TrueVal, N2: FalseVal, N3: ARMcc, N4: Cmp); |
5510 | } |
5511 | } |
5512 | |
5513 | if (isUnsupportedFloatingType(VT: LHS.getValueType())) { |
5514 | DAG.getTargetLoweringInfo().softenSetCCOperands( |
5515 | DAG, VT: LHS.getValueType(), NewLHS&: LHS, NewRHS&: RHS, CCCode&: CC, DL: dl, OldLHS: LHS, OldRHS: RHS); |
5516 | |
5517 | // If softenSetCCOperands only returned one value, we should compare it to |
5518 | // zero. |
5519 | if (!RHS.getNode()) { |
5520 | RHS = DAG.getConstant(Val: 0, DL: dl, VT: LHS.getValueType()); |
5521 | CC = ISD::SETNE; |
5522 | } |
5523 | } |
5524 | |
5525 | if (LHS.getValueType() == MVT::i32) { |
5526 | // Try to generate VSEL on ARMv8. |
5527 | // The VSEL instruction can't use all the usual ARM condition |
5528 | // codes: it only has two bits to select the condition code, so it's |
5529 | // constrained to use only GE, GT, VS and EQ. |
5530 | // |
5531 | // To implement all the various ISD::SETXXX opcodes, we sometimes need to |
5532 | // swap the operands of the previous compare instruction (effectively |
5533 | // inverting the compare condition, swapping 'less' and 'greater') and |
5534 | // sometimes need to swap the operands to the VSEL (which inverts the |
5535 | // condition in the sense of firing whenever the previous condition didn't) |
5536 | if (Subtarget->hasFPARMv8Base() && (TrueVal.getValueType() == MVT::f16 || |
5537 | TrueVal.getValueType() == MVT::f32 || |
5538 | TrueVal.getValueType() == MVT::f64)) { |
5539 | ARMCC::CondCodes CondCode = IntCCToARMCC(CC); |
5540 | if (CondCode == ARMCC::LT || CondCode == ARMCC::LE || |
5541 | CondCode == ARMCC::VC || CondCode == ARMCC::NE) { |
5542 | CC = ISD::getSetCCInverse(Operation: CC, Type: LHS.getValueType()); |
5543 | std::swap(a&: TrueVal, b&: FalseVal); |
5544 | } |
5545 | } |
5546 | |
5547 | SDValue ARMcc; |
5548 | SDValue CCR = DAG.getRegister(Reg: ARM::CPSR, VT: MVT::i32); |
5549 | SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); |
5550 | // Choose GE over PL, which vsel does now support |
5551 | if (ARMcc->getAsZExtVal() == ARMCC::PL) |
5552 | ARMcc = DAG.getConstant(Val: ARMCC::GE, DL: dl, VT: MVT::i32); |
5553 | return getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG); |
5554 | } |
5555 | |
5556 | ARMCC::CondCodes CondCode, CondCode2; |
5557 | FPCCToARMCC(CC, CondCode, CondCode2); |
5558 | |
5559 | // Normalize the fp compare. If RHS is zero we prefer to keep it there so we |
5560 | // match CMPFPw0 instead of CMPFP, though we don't do this for f16 because we |
5561 | // must use VSEL (limited condition codes), due to not having conditional f16 |
5562 | // moves. |
5563 | if (Subtarget->hasFPARMv8Base() && |
5564 | !(isFloatingPointZero(Op: RHS) && TrueVal.getValueType() != MVT::f16) && |
5565 | (TrueVal.getValueType() == MVT::f16 || |
5566 | TrueVal.getValueType() == MVT::f32 || |
5567 | TrueVal.getValueType() == MVT::f64)) { |
5568 | bool swpCmpOps = false; |
5569 | bool swpVselOps = false; |
5570 | checkVSELConstraints(CC, CondCode, swpCmpOps, swpVselOps); |
5571 | |
5572 | if (CondCode == ARMCC::GT || CondCode == ARMCC::GE || |
5573 | CondCode == ARMCC::VS || CondCode == ARMCC::EQ) { |
5574 | if (swpCmpOps) |
5575 | std::swap(a&: LHS, b&: RHS); |
5576 | if (swpVselOps) |
5577 | std::swap(a&: TrueVal, b&: FalseVal); |
5578 | } |
5579 | } |
5580 | |
5581 | SDValue ARMcc = DAG.getConstant(Val: CondCode, DL: dl, VT: MVT::i32); |
5582 | SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); |
5583 | SDValue CCR = DAG.getRegister(Reg: ARM::CPSR, VT: MVT::i32); |
5584 | SDValue Result = getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG); |
5585 | if (CondCode2 != ARMCC::AL) { |
5586 | SDValue ARMcc2 = DAG.getConstant(Val: CondCode2, DL: dl, VT: MVT::i32); |
5587 | // FIXME: Needs another CMP because flag can have but one use. |
5588 | SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl); |
5589 | Result = getCMOV(dl, VT, FalseVal: Result, TrueVal, ARMcc: ARMcc2, CCR, Cmp: Cmp2, DAG); |
5590 | } |
5591 | return Result; |
5592 | } |
5593 | |
5594 | /// canChangeToInt - Given the fp compare operand, return true if it is suitable |
5595 | /// to morph to an integer compare sequence. |
5596 | static bool canChangeToInt(SDValue Op, bool &SeenZero, |
5597 | const ARMSubtarget *Subtarget) { |
5598 | SDNode *N = Op.getNode(); |
5599 | if (!N->hasOneUse()) |
5600 | // Otherwise it requires moving the value from fp to integer registers. |
5601 | return false; |
5602 | if (!N->getNumValues()) |
5603 | return false; |
5604 | EVT VT = Op.getValueType(); |
5605 | if (VT != MVT::f32 && !Subtarget->isFPBrccSlow()) |
5606 | // f32 case is generally profitable. f64 case only makes sense when vcmpe + |
5607 | // vmrs are very slow, e.g. cortex-a8. |
5608 | return false; |
5609 | |
5610 | if (isFloatingPointZero(Op)) { |
5611 | SeenZero = true; |
5612 | return true; |
5613 | } |
5614 | return ISD::isNormalLoad(N); |
5615 | } |
5616 | |
5617 | static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) { |
5618 | if (isFloatingPointZero(Op)) |
5619 | return DAG.getConstant(Val: 0, DL: SDLoc(Op), VT: MVT::i32); |
5620 | |
5621 | if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Val&: Op)) |
5622 | return DAG.getLoad(VT: MVT::i32, dl: SDLoc(Op), Chain: Ld->getChain(), Ptr: Ld->getBasePtr(), |
5623 | PtrInfo: Ld->getPointerInfo(), Alignment: Ld->getAlign(), |
5624 | MMOFlags: Ld->getMemOperand()->getFlags()); |
5625 | |
5626 | llvm_unreachable("Unknown VFP cmp argument!" ); |
5627 | } |
5628 | |
5629 | static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, |
5630 | SDValue &RetVal1, SDValue &RetVal2) { |
5631 | SDLoc dl(Op); |
5632 | |
5633 | if (isFloatingPointZero(Op)) { |
5634 | RetVal1 = DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32); |
5635 | RetVal2 = DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32); |
5636 | return; |
5637 | } |
5638 | |
5639 | if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Val&: Op)) { |
5640 | SDValue Ptr = Ld->getBasePtr(); |
5641 | RetVal1 = |
5642 | DAG.getLoad(VT: MVT::i32, dl, Chain: Ld->getChain(), Ptr, PtrInfo: Ld->getPointerInfo(), |
5643 | Alignment: Ld->getAlign(), MMOFlags: Ld->getMemOperand()->getFlags()); |
5644 | |
5645 | EVT PtrType = Ptr.getValueType(); |
5646 | SDValue NewPtr = DAG.getNode(Opcode: ISD::ADD, DL: dl, |
5647 | VT: PtrType, N1: Ptr, N2: DAG.getConstant(Val: 4, DL: dl, VT: PtrType)); |
5648 | RetVal2 = DAG.getLoad(VT: MVT::i32, dl, Chain: Ld->getChain(), Ptr: NewPtr, |
5649 | PtrInfo: Ld->getPointerInfo().getWithOffset(O: 4), |
5650 | Alignment: commonAlignment(A: Ld->getAlign(), Offset: 4), |
5651 | MMOFlags: Ld->getMemOperand()->getFlags()); |
5652 | return; |
5653 | } |
5654 | |
5655 | llvm_unreachable("Unknown VFP cmp argument!" ); |
5656 | } |
5657 | |
5658 | /// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some |
5659 | /// f32 and even f64 comparisons to integer ones. |
5660 | SDValue |
5661 | ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const { |
5662 | SDValue Chain = Op.getOperand(i: 0); |
5663 | ISD::CondCode CC = cast<CondCodeSDNode>(Val: Op.getOperand(i: 1))->get(); |
5664 | SDValue LHS = Op.getOperand(i: 2); |
5665 | SDValue RHS = Op.getOperand(i: 3); |
5666 | SDValue Dest = Op.getOperand(i: 4); |
5667 | SDLoc dl(Op); |
5668 | |
5669 | bool LHSSeenZero = false; |
5670 | bool LHSOk = canChangeToInt(Op: LHS, SeenZero&: LHSSeenZero, Subtarget); |
5671 | bool RHSSeenZero = false; |
5672 | bool RHSOk = canChangeToInt(Op: RHS, SeenZero&: RHSSeenZero, Subtarget); |
5673 | if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) { |
5674 | // If unsafe fp math optimization is enabled and there are no other uses of |
5675 | // the CMP operands, and the condition code is EQ or NE, we can optimize it |
5676 | // to an integer comparison. |
5677 | if (CC == ISD::SETOEQ) |
5678 | CC = ISD::SETEQ; |
5679 | else if (CC == ISD::SETUNE) |
5680 | CC = ISD::SETNE; |
5681 | |
5682 | SDValue Mask = DAG.getConstant(Val: 0x7fffffff, DL: dl, VT: MVT::i32); |
5683 | SDValue ARMcc; |
5684 | if (LHS.getValueType() == MVT::f32) { |
5685 | LHS = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::i32, |
5686 | N1: bitcastf32Toi32(Op: LHS, DAG), N2: Mask); |
5687 | RHS = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::i32, |
5688 | N1: bitcastf32Toi32(Op: RHS, DAG), N2: Mask); |
5689 | SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); |
5690 | SDValue CCR = DAG.getRegister(Reg: ARM::CPSR, VT: MVT::i32); |
5691 | return DAG.getNode(Opcode: ARMISD::BRCOND, DL: dl, VT: MVT::Other, |
5692 | N1: Chain, N2: Dest, N3: ARMcc, N4: CCR, N5: Cmp); |
5693 | } |
5694 | |
5695 | SDValue LHS1, LHS2; |
5696 | SDValue RHS1, RHS2; |
5697 | expandf64Toi32(Op: LHS, DAG, RetVal1&: LHS1, RetVal2&: LHS2); |
5698 | expandf64Toi32(Op: RHS, DAG, RetVal1&: RHS1, RetVal2&: RHS2); |
5699 | LHS2 = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::i32, N1: LHS2, N2: Mask); |
5700 | RHS2 = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::i32, N1: RHS2, N2: Mask); |
5701 | ARMCC::CondCodes CondCode = IntCCToARMCC(CC); |
5702 | ARMcc = DAG.getConstant(Val: CondCode, DL: dl, VT: MVT::i32); |
5703 | SDVTList VTList = DAG.getVTList(VT1: MVT::Other, VT2: MVT::Glue); |
5704 | SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest }; |
5705 | return DAG.getNode(Opcode: ARMISD::BCC_i64, DL: dl, VTList, Ops); |
5706 | } |
5707 | |
5708 | return SDValue(); |
5709 | } |
5710 | |
5711 | SDValue ARMTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { |
5712 | SDValue Chain = Op.getOperand(i: 0); |
5713 | SDValue Cond = Op.getOperand(i: 1); |
5714 | SDValue Dest = Op.getOperand(i: 2); |
5715 | SDLoc dl(Op); |
5716 | |
5717 | // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch |
5718 | // instruction. |
5719 | unsigned Opc = Cond.getOpcode(); |
5720 | bool OptimizeMul = (Opc == ISD::SMULO || Opc == ISD::UMULO) && |
5721 | !Subtarget->isThumb1Only(); |
5722 | if (Cond.getResNo() == 1 && |
5723 | (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || |
5724 | Opc == ISD::USUBO || OptimizeMul)) { |
5725 | // Only lower legal XALUO ops. |
5726 | if (!DAG.getTargetLoweringInfo().isTypeLegal(VT: Cond->getValueType(ResNo: 0))) |
5727 | return SDValue(); |
5728 | |
5729 | // The actual operation with overflow check. |
5730 | SDValue Value, OverflowCmp; |
5731 | SDValue ARMcc; |
5732 | std::tie(args&: Value, args&: OverflowCmp) = getARMXALUOOp(Op: Cond, DAG, ARMcc); |
5733 | |
5734 | // Reverse the condition code. |
5735 | ARMCC::CondCodes CondCode = |
5736 | (ARMCC::CondCodes)cast<const ConstantSDNode>(Val&: ARMcc)->getZExtValue(); |
5737 | CondCode = ARMCC::getOppositeCondition(CC: CondCode); |
5738 | ARMcc = DAG.getConstant(Val: CondCode, DL: SDLoc(ARMcc), VT: MVT::i32); |
5739 | SDValue CCR = DAG.getRegister(Reg: ARM::CPSR, VT: MVT::i32); |
5740 | |
5741 | return DAG.getNode(Opcode: ARMISD::BRCOND, DL: dl, VT: MVT::Other, N1: Chain, N2: Dest, N3: ARMcc, N4: CCR, |
5742 | N5: OverflowCmp); |
5743 | } |
5744 | |
5745 | return SDValue(); |
5746 | } |
5747 | |
5748 | SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { |
5749 | SDValue Chain = Op.getOperand(i: 0); |
5750 | ISD::CondCode CC = cast<CondCodeSDNode>(Val: Op.getOperand(i: 1))->get(); |
5751 | SDValue LHS = Op.getOperand(i: 2); |
5752 | SDValue RHS = Op.getOperand(i: 3); |
5753 | SDValue Dest = Op.getOperand(i: 4); |
5754 | SDLoc dl(Op); |
5755 | |
5756 | if (isUnsupportedFloatingType(VT: LHS.getValueType())) { |
5757 | DAG.getTargetLoweringInfo().softenSetCCOperands( |
5758 | DAG, VT: LHS.getValueType(), NewLHS&: LHS, NewRHS&: RHS, CCCode&: CC, DL: dl, OldLHS: LHS, OldRHS: RHS); |
5759 | |
5760 | // If softenSetCCOperands only returned one value, we should compare it to |
5761 | // zero. |
5762 | if (!RHS.getNode()) { |
5763 | RHS = DAG.getConstant(Val: 0, DL: dl, VT: LHS.getValueType()); |
5764 | CC = ISD::SETNE; |
5765 | } |
5766 | } |
5767 | |
5768 | // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch |
5769 | // instruction. |
5770 | unsigned Opc = LHS.getOpcode(); |
5771 | bool OptimizeMul = (Opc == ISD::SMULO || Opc == ISD::UMULO) && |
5772 | !Subtarget->isThumb1Only(); |
5773 | if (LHS.getResNo() == 1 && (isOneConstant(V: RHS) || isNullConstant(V: RHS)) && |
5774 | (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || |
5775 | Opc == ISD::USUBO || OptimizeMul) && |
5776 | (CC == ISD::SETEQ || CC == ISD::SETNE)) { |
5777 | // Only lower legal XALUO ops. |
5778 | if (!DAG.getTargetLoweringInfo().isTypeLegal(VT: LHS->getValueType(ResNo: 0))) |
5779 | return SDValue(); |
5780 | |
5781 | // The actual operation with overflow check. |
5782 | SDValue Value, OverflowCmp; |
5783 | SDValue ARMcc; |
5784 | std::tie(args&: Value, args&: OverflowCmp) = getARMXALUOOp(Op: LHS.getValue(R: 0), DAG, ARMcc); |
5785 | |
5786 | if ((CC == ISD::SETNE) != isOneConstant(V: RHS)) { |
5787 | // Reverse the condition code. |
5788 | ARMCC::CondCodes CondCode = |
5789 | (ARMCC::CondCodes)cast<const ConstantSDNode>(Val&: ARMcc)->getZExtValue(); |
5790 | CondCode = ARMCC::getOppositeCondition(CC: CondCode); |
5791 | ARMcc = DAG.getConstant(Val: CondCode, DL: SDLoc(ARMcc), VT: MVT::i32); |
5792 | } |
5793 | SDValue CCR = DAG.getRegister(Reg: ARM::CPSR, VT: MVT::i32); |
5794 | |
5795 | return DAG.getNode(Opcode: ARMISD::BRCOND, DL: dl, VT: MVT::Other, N1: Chain, N2: Dest, N3: ARMcc, N4: CCR, |
5796 | N5: OverflowCmp); |
5797 | } |
5798 | |
5799 | if (LHS.getValueType() == MVT::i32) { |
5800 | SDValue ARMcc; |
5801 | SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); |
5802 | SDValue CCR = DAG.getRegister(Reg: ARM::CPSR, VT: MVT::i32); |
5803 | return DAG.getNode(Opcode: ARMISD::BRCOND, DL: dl, VT: MVT::Other, |
5804 | N1: Chain, N2: Dest, N3: ARMcc, N4: CCR, N5: Cmp); |
5805 | } |
5806 | |
5807 | if (getTargetMachine().Options.UnsafeFPMath && |
5808 | (CC == ISD::SETEQ || CC == ISD::SETOEQ || |
5809 | CC == ISD::SETNE || CC == ISD::SETUNE)) { |
5810 | if (SDValue Result = OptimizeVFPBrcond(Op, DAG)) |
5811 | return Result; |
5812 | } |
5813 | |
5814 | ARMCC::CondCodes CondCode, CondCode2; |
5815 | FPCCToARMCC(CC, CondCode, CondCode2); |
5816 | |
5817 | SDValue ARMcc = DAG.getConstant(Val: CondCode, DL: dl, VT: MVT::i32); |
5818 | SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); |
5819 | SDValue CCR = DAG.getRegister(Reg: ARM::CPSR, VT: MVT::i32); |
5820 | SDVTList VTList = DAG.getVTList(VT1: MVT::Other, VT2: MVT::Glue); |
5821 | SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp }; |
5822 | SDValue Res = DAG.getNode(Opcode: ARMISD::BRCOND, DL: dl, VTList, Ops); |
5823 | if (CondCode2 != ARMCC::AL) { |
5824 | ARMcc = DAG.getConstant(Val: CondCode2, DL: dl, VT: MVT::i32); |
5825 | SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(R: 1) }; |
5826 | Res = DAG.getNode(Opcode: ARMISD::BRCOND, DL: dl, VTList, Ops); |
5827 | } |
5828 | return Res; |
5829 | } |
5830 | |
5831 | SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { |
5832 | SDValue Chain = Op.getOperand(i: 0); |
5833 | SDValue Table = Op.getOperand(i: 1); |
5834 | SDValue Index = Op.getOperand(i: 2); |
5835 | SDLoc dl(Op); |
5836 | |
5837 | EVT PTy = getPointerTy(DL: DAG.getDataLayout()); |
5838 | JumpTableSDNode *JT = cast<JumpTableSDNode>(Val&: Table); |
5839 | SDValue JTI = DAG.getTargetJumpTable(JTI: JT->getIndex(), VT: PTy); |
5840 | Table = DAG.getNode(Opcode: ARMISD::WrapperJT, DL: dl, VT: MVT::i32, Operand: JTI); |
5841 | Index = DAG.getNode(Opcode: ISD::MUL, DL: dl, VT: PTy, N1: Index, N2: DAG.getConstant(Val: 4, DL: dl, VT: PTy)); |
5842 | SDValue Addr = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PTy, N1: Table, N2: Index); |
5843 | if (Subtarget->isThumb2() || (Subtarget->hasV8MBaselineOps() && Subtarget->isThumb())) { |
5844 | // Thumb2 and ARMv8-M use a two-level jump. That is, it jumps into the jump table |
5845 | // which does another jump to the destination. This also makes it easier |
5846 | // to translate it to TBB / TBH later (Thumb2 only). |
5847 | // FIXME: This might not work if the function is extremely large. |
5848 | return DAG.getNode(Opcode: ARMISD::BR2_JT, DL: dl, VT: MVT::Other, N1: Chain, |
5849 | N2: Addr, N3: Op.getOperand(i: 2), N4: JTI); |
5850 | } |
5851 | if (isPositionIndependent() || Subtarget->isROPI()) { |
5852 | Addr = |
5853 | DAG.getLoad(VT: (EVT)MVT::i32, dl, Chain, Ptr: Addr, |
5854 | PtrInfo: MachinePointerInfo::getJumpTable(MF&: DAG.getMachineFunction())); |
5855 | Chain = Addr.getValue(R: 1); |
5856 | Addr = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PTy, N1: Table, N2: Addr); |
5857 | return DAG.getNode(Opcode: ARMISD::BR_JT, DL: dl, VT: MVT::Other, N1: Chain, N2: Addr, N3: JTI); |
5858 | } else { |
5859 | Addr = |
5860 | DAG.getLoad(VT: PTy, dl, Chain, Ptr: Addr, |
5861 | PtrInfo: MachinePointerInfo::getJumpTable(MF&: DAG.getMachineFunction())); |
5862 | Chain = Addr.getValue(R: 1); |
5863 | return DAG.getNode(Opcode: ARMISD::BR_JT, DL: dl, VT: MVT::Other, N1: Chain, N2: Addr, N3: JTI); |
5864 | } |
5865 | } |
5866 | |
5867 | static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) { |
5868 | EVT VT = Op.getValueType(); |
5869 | SDLoc dl(Op); |
5870 | |
5871 | if (Op.getValueType().getVectorElementType() == MVT::i32) { |
5872 | if (Op.getOperand(i: 0).getValueType().getVectorElementType() == MVT::f32) |
5873 | return Op; |
5874 | return DAG.UnrollVectorOp(N: Op.getNode()); |
5875 | } |
5876 | |
5877 | const bool HasFullFP16 = DAG.getSubtarget<ARMSubtarget>().hasFullFP16(); |
5878 | |
5879 | EVT NewTy; |
5880 | const EVT OpTy = Op.getOperand(i: 0).getValueType(); |
5881 | if (OpTy == MVT::v4f32) |
5882 | NewTy = MVT::v4i32; |
5883 | else if (OpTy == MVT::v4f16 && HasFullFP16) |
5884 | NewTy = MVT::v4i16; |
5885 | else if (OpTy == MVT::v8f16 && HasFullFP16) |
5886 | NewTy = MVT::v8i16; |
5887 | else |
5888 | llvm_unreachable("Invalid type for custom lowering!" ); |
5889 | |
5890 | if (VT != MVT::v4i16 && VT != MVT::v8i16) |
5891 | return DAG.UnrollVectorOp(N: Op.getNode()); |
5892 | |
5893 | Op = DAG.getNode(Opcode: Op.getOpcode(), DL: dl, VT: NewTy, Operand: Op.getOperand(i: 0)); |
5894 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT, Operand: Op); |
5895 | } |
5896 | |
5897 | SDValue ARMTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const { |
5898 | EVT VT = Op.getValueType(); |
5899 | if (VT.isVector()) |
5900 | return LowerVectorFP_TO_INT(Op, DAG); |
5901 | |
5902 | bool IsStrict = Op->isStrictFPOpcode(); |
5903 | SDValue SrcVal = Op.getOperand(i: IsStrict ? 1 : 0); |
5904 | |
5905 | if (isUnsupportedFloatingType(VT: SrcVal.getValueType())) { |
5906 | RTLIB::Libcall LC; |
5907 | if (Op.getOpcode() == ISD::FP_TO_SINT || |
5908 | Op.getOpcode() == ISD::STRICT_FP_TO_SINT) |
5909 | LC = RTLIB::getFPTOSINT(OpVT: SrcVal.getValueType(), |
5910 | RetVT: Op.getValueType()); |
5911 | else |
5912 | LC = RTLIB::getFPTOUINT(OpVT: SrcVal.getValueType(), |
5913 | RetVT: Op.getValueType()); |
5914 | SDLoc Loc(Op); |
5915 | MakeLibCallOptions CallOptions; |
5916 | SDValue Chain = IsStrict ? Op.getOperand(i: 0) : SDValue(); |
5917 | SDValue Result; |
5918 | std::tie(args&: Result, args&: Chain) = makeLibCall(DAG, LC, RetVT: Op.getValueType(), Ops: SrcVal, |
5919 | CallOptions, dl: Loc, Chain); |
5920 | return IsStrict ? DAG.getMergeValues(Ops: {Result, Chain}, dl: Loc) : Result; |
5921 | } |
5922 | |
5923 | // FIXME: Remove this when we have strict fp instruction selection patterns |
5924 | if (IsStrict) { |
5925 | SDLoc Loc(Op); |
5926 | SDValue Result = |
5927 | DAG.getNode(Opcode: Op.getOpcode() == ISD::STRICT_FP_TO_SINT ? ISD::FP_TO_SINT |
5928 | : ISD::FP_TO_UINT, |
5929 | DL: Loc, VT: Op.getValueType(), Operand: SrcVal); |
5930 | return DAG.getMergeValues(Ops: {Result, Op.getOperand(i: 0)}, dl: Loc); |
5931 | } |
5932 | |
5933 | return Op; |
5934 | } |
5935 | |
5936 | static SDValue LowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG, |
5937 | const ARMSubtarget *Subtarget) { |
5938 | EVT VT = Op.getValueType(); |
5939 | EVT ToVT = cast<VTSDNode>(Val: Op.getOperand(i: 1))->getVT(); |
5940 | EVT FromVT = Op.getOperand(i: 0).getValueType(); |
5941 | |
5942 | if (VT == MVT::i32 && ToVT == MVT::i32 && FromVT == MVT::f32) |
5943 | return Op; |
5944 | if (VT == MVT::i32 && ToVT == MVT::i32 && FromVT == MVT::f64 && |
5945 | Subtarget->hasFP64()) |
5946 | return Op; |
5947 | if (VT == MVT::i32 && ToVT == MVT::i32 && FromVT == MVT::f16 && |
5948 | Subtarget->hasFullFP16()) |
5949 | return Op; |
5950 | if (VT == MVT::v4i32 && ToVT == MVT::i32 && FromVT == MVT::v4f32 && |
5951 | Subtarget->hasMVEFloatOps()) |
5952 | return Op; |
5953 | if (VT == MVT::v8i16 && ToVT == MVT::i16 && FromVT == MVT::v8f16 && |
5954 | Subtarget->hasMVEFloatOps()) |
5955 | return Op; |
5956 | |
5957 | if (FromVT != MVT::v4f32 && FromVT != MVT::v8f16) |
5958 | return SDValue(); |
5959 | |
5960 | SDLoc DL(Op); |
5961 | bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT; |
5962 | unsigned BW = ToVT.getScalarSizeInBits() - IsSigned; |
5963 | SDValue CVT = DAG.getNode(Opcode: Op.getOpcode(), DL, VT, N1: Op.getOperand(i: 0), |
5964 | N2: DAG.getValueType(VT.getScalarType())); |
5965 | SDValue Max = DAG.getNode(Opcode: IsSigned ? ISD::SMIN : ISD::UMIN, DL, VT, N1: CVT, |
5966 | N2: DAG.getConstant(Val: (1 << BW) - 1, DL, VT)); |
5967 | if (IsSigned) |
5968 | Max = DAG.getNode(Opcode: ISD::SMAX, DL, VT, N1: Max, |
5969 | N2: DAG.getConstant(Val: -(1 << BW), DL, VT)); |
5970 | return Max; |
5971 | } |
5972 | |
5973 | static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) { |
5974 | EVT VT = Op.getValueType(); |
5975 | SDLoc dl(Op); |
5976 | |
5977 | if (Op.getOperand(i: 0).getValueType().getVectorElementType() == MVT::i32) { |
5978 | if (VT.getVectorElementType() == MVT::f32) |
5979 | return Op; |
5980 | return DAG.UnrollVectorOp(N: Op.getNode()); |
5981 | } |
5982 | |
5983 | assert((Op.getOperand(0).getValueType() == MVT::v4i16 || |
5984 | Op.getOperand(0).getValueType() == MVT::v8i16) && |
5985 | "Invalid type for custom lowering!" ); |
5986 | |
5987 | const bool HasFullFP16 = DAG.getSubtarget<ARMSubtarget>().hasFullFP16(); |
5988 | |
5989 | EVT DestVecType; |
5990 | if (VT == MVT::v4f32) |
5991 | DestVecType = MVT::v4i32; |
5992 | else if (VT == MVT::v4f16 && HasFullFP16) |
5993 | DestVecType = MVT::v4i16; |
5994 | else if (VT == MVT::v8f16 && HasFullFP16) |
5995 | DestVecType = MVT::v8i16; |
5996 | else |
5997 | return DAG.UnrollVectorOp(N: Op.getNode()); |
5998 | |
5999 | unsigned CastOpc; |
6000 | unsigned Opc; |
6001 | switch (Op.getOpcode()) { |
6002 | default: llvm_unreachable("Invalid opcode!" ); |
6003 | case ISD::SINT_TO_FP: |
6004 | CastOpc = ISD::SIGN_EXTEND; |
6005 | Opc = ISD::SINT_TO_FP; |
6006 | break; |
6007 | case ISD::UINT_TO_FP: |
6008 | CastOpc = ISD::ZERO_EXTEND; |
6009 | Opc = ISD::UINT_TO_FP; |
6010 | break; |
6011 | } |
6012 | |
6013 | Op = DAG.getNode(Opcode: CastOpc, DL: dl, VT: DestVecType, Operand: Op.getOperand(i: 0)); |
6014 | return DAG.getNode(Opcode: Opc, DL: dl, VT, Operand: Op); |
6015 | } |
6016 | |
6017 | SDValue ARMTargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const { |
6018 | EVT VT = Op.getValueType(); |
6019 | if (VT.isVector()) |
6020 | return LowerVectorINT_TO_FP(Op, DAG); |
6021 | if (isUnsupportedFloatingType(VT)) { |
6022 | RTLIB::Libcall LC; |
6023 | if (Op.getOpcode() == ISD::SINT_TO_FP) |
6024 | LC = RTLIB::getSINTTOFP(OpVT: Op.getOperand(i: 0).getValueType(), |
6025 | RetVT: Op.getValueType()); |
6026 | else |
6027 | LC = RTLIB::getUINTTOFP(OpVT: Op.getOperand(i: 0).getValueType(), |
6028 | RetVT: Op.getValueType()); |
6029 | MakeLibCallOptions CallOptions; |
6030 | return makeLibCall(DAG, LC, RetVT: Op.getValueType(), Ops: Op.getOperand(i: 0), |
6031 | CallOptions, dl: SDLoc(Op)).first; |
6032 | } |
6033 | |
6034 | return Op; |
6035 | } |
6036 | |
6037 | SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { |
6038 | // Implement fcopysign with a fabs and a conditional fneg. |
6039 | SDValue Tmp0 = Op.getOperand(i: 0); |
6040 | SDValue Tmp1 = Op.getOperand(i: 1); |
6041 | SDLoc dl(Op); |
6042 | EVT VT = Op.getValueType(); |
6043 | EVT SrcVT = Tmp1.getValueType(); |
6044 | bool InGPR = Tmp0.getOpcode() == ISD::BITCAST || |
6045 | Tmp0.getOpcode() == ARMISD::VMOVDRR; |
6046 | bool UseNEON = !InGPR && Subtarget->hasNEON(); |
6047 | |
6048 | if (UseNEON) { |
6049 | // Use VBSL to copy the sign bit. |
6050 | unsigned EncodedVal = ARM_AM::createVMOVModImm(OpCmode: 0x6, Val: 0x80); |
6051 | SDValue Mask = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT: MVT::v2i32, |
6052 | Operand: DAG.getTargetConstant(Val: EncodedVal, DL: dl, VT: MVT::i32)); |
6053 | EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64; |
6054 | if (VT == MVT::f64) |
6055 | Mask = DAG.getNode(Opcode: ARMISD::VSHLIMM, DL: dl, VT: OpVT, |
6056 | N1: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: OpVT, Operand: Mask), |
6057 | N2: DAG.getConstant(Val: 32, DL: dl, VT: MVT::i32)); |
6058 | else /*if (VT == MVT::f32)*/ |
6059 | Tmp0 = DAG.getNode(Opcode: ISD::SCALAR_TO_VECTOR, DL: dl, VT: MVT::v2f32, Operand: Tmp0); |
6060 | if (SrcVT == MVT::f32) { |
6061 | Tmp1 = DAG.getNode(Opcode: ISD::SCALAR_TO_VECTOR, DL: dl, VT: MVT::v2f32, Operand: Tmp1); |
6062 | if (VT == MVT::f64) |
6063 | Tmp1 = DAG.getNode(Opcode: ARMISD::VSHLIMM, DL: dl, VT: OpVT, |
6064 | N1: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: OpVT, Operand: Tmp1), |
6065 | N2: DAG.getConstant(Val: 32, DL: dl, VT: MVT::i32)); |
6066 | } else if (VT == MVT::f32) |
6067 | Tmp1 = DAG.getNode(Opcode: ARMISD::VSHRuIMM, DL: dl, VT: MVT::v1i64, |
6068 | N1: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::v1i64, Operand: Tmp1), |
6069 | N2: DAG.getConstant(Val: 32, DL: dl, VT: MVT::i32)); |
6070 | Tmp0 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: OpVT, Operand: Tmp0); |
6071 | Tmp1 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: OpVT, Operand: Tmp1); |
6072 | |
6073 | SDValue AllOnes = DAG.getTargetConstant(Val: ARM_AM::createVMOVModImm(OpCmode: 0xe, Val: 0xff), |
6074 | DL: dl, VT: MVT::i32); |
6075 | AllOnes = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT: MVT::v8i8, Operand: AllOnes); |
6076 | SDValue MaskNot = DAG.getNode(Opcode: ISD::XOR, DL: dl, VT: OpVT, N1: Mask, |
6077 | N2: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: OpVT, Operand: AllOnes)); |
6078 | |
6079 | SDValue Res = DAG.getNode(Opcode: ISD::OR, DL: dl, VT: OpVT, |
6080 | N1: DAG.getNode(Opcode: ISD::AND, DL: dl, VT: OpVT, N1: Tmp1, N2: Mask), |
6081 | N2: DAG.getNode(Opcode: ISD::AND, DL: dl, VT: OpVT, N1: Tmp0, N2: MaskNot)); |
6082 | if (VT == MVT::f32) { |
6083 | Res = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::v2f32, Operand: Res); |
6084 | Res = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::f32, N1: Res, |
6085 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
6086 | } else { |
6087 | Res = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::f64, Operand: Res); |
6088 | } |
6089 | |
6090 | return Res; |
6091 | } |
6092 | |
6093 | // Bitcast operand 1 to i32. |
6094 | if (SrcVT == MVT::f64) |
6095 | Tmp1 = DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), |
6096 | N: Tmp1).getValue(R: 1); |
6097 | Tmp1 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::i32, Operand: Tmp1); |
6098 | |
6099 | // Or in the signbit with integer operations. |
6100 | SDValue Mask1 = DAG.getConstant(Val: 0x80000000, DL: dl, VT: MVT::i32); |
6101 | SDValue Mask2 = DAG.getConstant(Val: 0x7fffffff, DL: dl, VT: MVT::i32); |
6102 | Tmp1 = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::i32, N1: Tmp1, N2: Mask1); |
6103 | if (VT == MVT::f32) { |
6104 | Tmp0 = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::i32, |
6105 | N1: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::i32, Operand: Tmp0), N2: Mask2); |
6106 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::f32, |
6107 | Operand: DAG.getNode(Opcode: ISD::OR, DL: dl, VT: MVT::i32, N1: Tmp0, N2: Tmp1)); |
6108 | } |
6109 | |
6110 | // f64: Or the high part with signbit and then combine two parts. |
6111 | Tmp0 = DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), |
6112 | N: Tmp0); |
6113 | SDValue Lo = Tmp0.getValue(R: 0); |
6114 | SDValue Hi = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::i32, N1: Tmp0.getValue(R: 1), N2: Mask2); |
6115 | Hi = DAG.getNode(Opcode: ISD::OR, DL: dl, VT: MVT::i32, N1: Hi, N2: Tmp1); |
6116 | return DAG.getNode(Opcode: ARMISD::VMOVDRR, DL: dl, VT: MVT::f64, N1: Lo, N2: Hi); |
6117 | } |
6118 | |
6119 | SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ |
6120 | MachineFunction &MF = DAG.getMachineFunction(); |
6121 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
6122 | MFI.setReturnAddressIsTaken(true); |
6123 | |
6124 | if (verifyReturnAddressArgumentIsConstant(Op, DAG)) |
6125 | return SDValue(); |
6126 | |
6127 | EVT VT = Op.getValueType(); |
6128 | SDLoc dl(Op); |
6129 | unsigned Depth = Op.getConstantOperandVal(i: 0); |
6130 | if (Depth) { |
6131 | SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); |
6132 | SDValue Offset = DAG.getConstant(Val: 4, DL: dl, VT: MVT::i32); |
6133 | return DAG.getLoad(VT, dl, Chain: DAG.getEntryNode(), |
6134 | Ptr: DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: FrameAddr, N2: Offset), |
6135 | PtrInfo: MachinePointerInfo()); |
6136 | } |
6137 | |
6138 | // Return LR, which contains the return address. Mark it an implicit live-in. |
6139 | Register Reg = MF.addLiveIn(PReg: ARM::LR, RC: getRegClassFor(VT: MVT::i32)); |
6140 | return DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl, Reg, VT); |
6141 | } |
6142 | |
6143 | SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { |
6144 | const ARMBaseRegisterInfo &ARI = |
6145 | *static_cast<const ARMBaseRegisterInfo*>(RegInfo); |
6146 | MachineFunction &MF = DAG.getMachineFunction(); |
6147 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
6148 | MFI.setFrameAddressIsTaken(true); |
6149 | |
6150 | EVT VT = Op.getValueType(); |
6151 | SDLoc dl(Op); // FIXME probably not meaningful |
6152 | unsigned Depth = Op.getConstantOperandVal(i: 0); |
6153 | Register FrameReg = ARI.getFrameRegister(MF); |
6154 | SDValue FrameAddr = DAG.getCopyFromReg(Chain: DAG.getEntryNode(), dl, Reg: FrameReg, VT); |
6155 | while (Depth--) |
6156 | FrameAddr = DAG.getLoad(VT, dl, Chain: DAG.getEntryNode(), Ptr: FrameAddr, |
6157 | PtrInfo: MachinePointerInfo()); |
6158 | return FrameAddr; |
6159 | } |
6160 | |
6161 | // FIXME? Maybe this could be a TableGen attribute on some registers and |
6162 | // this table could be generated automatically from RegInfo. |
6163 | Register ARMTargetLowering::getRegisterByName(const char* RegName, LLT VT, |
6164 | const MachineFunction &MF) const { |
6165 | Register Reg = StringSwitch<unsigned>(RegName) |
6166 | .Case(S: "sp" , Value: ARM::SP) |
6167 | .Default(Value: 0); |
6168 | if (Reg) |
6169 | return Reg; |
6170 | report_fatal_error(reason: Twine("Invalid register name \"" |
6171 | + StringRef(RegName) + "\"." )); |
6172 | } |
6173 | |
6174 | // Result is 64 bit value so split into two 32 bit values and return as a |
6175 | // pair of values. |
6176 | static void ExpandREAD_REGISTER(SDNode *N, SmallVectorImpl<SDValue> &Results, |
6177 | SelectionDAG &DAG) { |
6178 | SDLoc DL(N); |
6179 | |
6180 | // This function is only supposed to be called for i64 type destination. |
6181 | assert(N->getValueType(0) == MVT::i64 |
6182 | && "ExpandREAD_REGISTER called for non-i64 type result." ); |
6183 | |
6184 | SDValue Read = DAG.getNode(Opcode: ISD::READ_REGISTER, DL, |
6185 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32, VT3: MVT::Other), |
6186 | N1: N->getOperand(Num: 0), |
6187 | N2: N->getOperand(Num: 1)); |
6188 | |
6189 | Results.push_back(Elt: DAG.getNode(Opcode: ISD::BUILD_PAIR, DL, VT: MVT::i64, N1: Read.getValue(R: 0), |
6190 | N2: Read.getValue(R: 1))); |
6191 | Results.push_back(Elt: Read.getOperand(i: 0)); |
6192 | } |
6193 | |
6194 | /// \p BC is a bitcast that is about to be turned into a VMOVDRR. |
6195 | /// When \p DstVT, the destination type of \p BC, is on the vector |
6196 | /// register bank and the source of bitcast, \p Op, operates on the same bank, |
6197 | /// it might be possible to combine them, such that everything stays on the |
6198 | /// vector register bank. |
6199 | /// \p return The node that would replace \p BT, if the combine |
6200 | /// is possible. |
6201 | static SDValue CombineVMOVDRRCandidateWithVecOp(const SDNode *BC, |
6202 | SelectionDAG &DAG) { |
6203 | SDValue Op = BC->getOperand(Num: 0); |
6204 | EVT DstVT = BC->getValueType(ResNo: 0); |
6205 | |
6206 | // The only vector instruction that can produce a scalar (remember, |
6207 | // since the bitcast was about to be turned into VMOVDRR, the source |
6208 | // type is i64) from a vector is EXTRACT_VECTOR_ELT. |
6209 | // Moreover, we can do this combine only if there is one use. |
6210 | // Finally, if the destination type is not a vector, there is not |
6211 | // much point on forcing everything on the vector bank. |
6212 | if (!DstVT.isVector() || Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT || |
6213 | !Op.hasOneUse()) |
6214 | return SDValue(); |
6215 | |
6216 | // If the index is not constant, we will introduce an additional |
6217 | // multiply that will stick. |
6218 | // Give up in that case. |
6219 | ConstantSDNode *Index = dyn_cast<ConstantSDNode>(Val: Op.getOperand(i: 1)); |
6220 | if (!Index) |
6221 | return SDValue(); |
6222 | unsigned DstNumElt = DstVT.getVectorNumElements(); |
6223 | |
6224 | // Compute the new index. |
6225 | const APInt &APIntIndex = Index->getAPIntValue(); |
6226 | APInt NewIndex(APIntIndex.getBitWidth(), DstNumElt); |
6227 | NewIndex *= APIntIndex; |
6228 | // Check if the new constant index fits into i32. |
6229 | if (NewIndex.getBitWidth() > 32) |
6230 | return SDValue(); |
6231 | |
6232 | // vMTy bitcast(i64 extractelt vNi64 src, i32 index) -> |
6233 | // vMTy extractsubvector vNxMTy (bitcast vNi64 src), i32 index*M) |
6234 | SDLoc dl(Op); |
6235 | SDValue = Op.getOperand(i: 0); |
6236 | EVT VecVT = EVT::getVectorVT( |
6237 | Context&: *DAG.getContext(), VT: DstVT.getScalarType(), |
6238 | NumElements: ExtractSrc.getValueType().getVectorNumElements() * DstNumElt); |
6239 | SDValue BitCast = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VecVT, Operand: ExtractSrc); |
6240 | return DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: DstVT, N1: BitCast, |
6241 | N2: DAG.getConstant(Val: NewIndex.getZExtValue(), DL: dl, VT: MVT::i32)); |
6242 | } |
6243 | |
6244 | /// ExpandBITCAST - If the target supports VFP, this function is called to |
6245 | /// expand a bit convert where either the source or destination type is i64 to |
6246 | /// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64 |
6247 | /// operand type is illegal (e.g., v2f32 for a target that doesn't support |
6248 | /// vectors), since the legalizer won't know what to do with that. |
6249 | SDValue ARMTargetLowering::ExpandBITCAST(SDNode *N, SelectionDAG &DAG, |
6250 | const ARMSubtarget *Subtarget) const { |
6251 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
6252 | SDLoc dl(N); |
6253 | SDValue Op = N->getOperand(Num: 0); |
6254 | |
6255 | // This function is only supposed to be called for i16 and i64 types, either |
6256 | // as the source or destination of the bit convert. |
6257 | EVT SrcVT = Op.getValueType(); |
6258 | EVT DstVT = N->getValueType(ResNo: 0); |
6259 | |
6260 | if ((SrcVT == MVT::i16 || SrcVT == MVT::i32) && |
6261 | (DstVT == MVT::f16 || DstVT == MVT::bf16)) |
6262 | return MoveToHPR(dl: SDLoc(N), DAG, LocVT: MVT::i32, ValVT: DstVT.getSimpleVT(), |
6263 | Val: DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: SDLoc(N), VT: MVT::i32, Operand: Op)); |
6264 | |
6265 | if ((DstVT == MVT::i16 || DstVT == MVT::i32) && |
6266 | (SrcVT == MVT::f16 || SrcVT == MVT::bf16)) |
6267 | return DAG.getNode( |
6268 | Opcode: ISD::TRUNCATE, DL: SDLoc(N), VT: DstVT, |
6269 | Operand: MoveFromHPR(dl: SDLoc(N), DAG, LocVT: MVT::i32, ValVT: SrcVT.getSimpleVT(), Val: Op)); |
6270 | |
6271 | if (!(SrcVT == MVT::i64 || DstVT == MVT::i64)) |
6272 | return SDValue(); |
6273 | |
6274 | // Turn i64->f64 into VMOVDRR. |
6275 | if (SrcVT == MVT::i64 && TLI.isTypeLegal(VT: DstVT)) { |
6276 | // Do not force values to GPRs (this is what VMOVDRR does for the inputs) |
6277 | // if we can combine the bitcast with its source. |
6278 | if (SDValue Val = CombineVMOVDRRCandidateWithVecOp(BC: N, DAG)) |
6279 | return Val; |
6280 | SDValue Lo, Hi; |
6281 | std::tie(args&: Lo, args&: Hi) = DAG.SplitScalar(N: Op, DL: dl, LoVT: MVT::i32, HiVT: MVT::i32); |
6282 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: DstVT, |
6283 | Operand: DAG.getNode(Opcode: ARMISD::VMOVDRR, DL: dl, VT: MVT::f64, N1: Lo, N2: Hi)); |
6284 | } |
6285 | |
6286 | // Turn f64->i64 into VMOVRRD. |
6287 | if (DstVT == MVT::i64 && TLI.isTypeLegal(VT: SrcVT)) { |
6288 | SDValue Cvt; |
6289 | if (DAG.getDataLayout().isBigEndian() && SrcVT.isVector() && |
6290 | SrcVT.getVectorNumElements() > 1) |
6291 | Cvt = DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, |
6292 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), |
6293 | N: DAG.getNode(Opcode: ARMISD::VREV64, DL: dl, VT: SrcVT, Operand: Op)); |
6294 | else |
6295 | Cvt = DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, |
6296 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N: Op); |
6297 | // Merge the pieces into a single i64 value. |
6298 | return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: Cvt, N2: Cvt.getValue(R: 1)); |
6299 | } |
6300 | |
6301 | return SDValue(); |
6302 | } |
6303 | |
6304 | /// getZeroVector - Returns a vector of specified type with all zero elements. |
6305 | /// Zero vectors are used to represent vector negation and in those cases |
6306 | /// will be implemented with the NEON VNEG instruction. However, VNEG does |
6307 | /// not support i64 elements, so sometimes the zero vectors will need to be |
6308 | /// explicitly constructed. Regardless, use a canonical VMOV to create the |
6309 | /// zero vector. |
6310 | static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) { |
6311 | assert(VT.isVector() && "Expected a vector type" ); |
6312 | // The canonical modified immediate encoding of a zero vector is....0! |
6313 | SDValue EncodedVal = DAG.getTargetConstant(Val: 0, DL: dl, VT: MVT::i32); |
6314 | EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; |
6315 | SDValue Vmov = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT: VmovVT, Operand: EncodedVal); |
6316 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: Vmov); |
6317 | } |
6318 | |
6319 | /// LowerShiftRightParts - Lower SRA_PARTS, which returns two |
6320 | /// i32 values and take a 2 x i32 value to shift plus a shift amount. |
6321 | SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op, |
6322 | SelectionDAG &DAG) const { |
6323 | assert(Op.getNumOperands() == 3 && "Not a double-shift!" ); |
6324 | EVT VT = Op.getValueType(); |
6325 | unsigned VTBits = VT.getSizeInBits(); |
6326 | SDLoc dl(Op); |
6327 | SDValue ShOpLo = Op.getOperand(i: 0); |
6328 | SDValue ShOpHi = Op.getOperand(i: 1); |
6329 | SDValue ShAmt = Op.getOperand(i: 2); |
6330 | SDValue ARMcc; |
6331 | SDValue CCR = DAG.getRegister(Reg: ARM::CPSR, VT: MVT::i32); |
6332 | unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL; |
6333 | |
6334 | assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS); |
6335 | |
6336 | SDValue RevShAmt = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: MVT::i32, |
6337 | N1: DAG.getConstant(Val: VTBits, DL: dl, VT: MVT::i32), N2: ShAmt); |
6338 | SDValue Tmp1 = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT, N1: ShOpLo, N2: ShAmt); |
6339 | SDValue = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: MVT::i32, N1: ShAmt, |
6340 | N2: DAG.getConstant(Val: VTBits, DL: dl, VT: MVT::i32)); |
6341 | SDValue Tmp2 = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT, N1: ShOpHi, N2: RevShAmt); |
6342 | SDValue LoSmallShift = DAG.getNode(Opcode: ISD::OR, DL: dl, VT, N1: Tmp1, N2: Tmp2); |
6343 | SDValue LoBigShift = DAG.getNode(Opcode: Opc, DL: dl, VT, N1: ShOpHi, N2: ExtraShAmt); |
6344 | SDValue CmpLo = getARMCmp(LHS: ExtraShAmt, RHS: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32), |
6345 | CC: ISD::SETGE, ARMcc, DAG, dl); |
6346 | SDValue Lo = DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: LoSmallShift, N2: LoBigShift, |
6347 | N3: ARMcc, N4: CCR, N5: CmpLo); |
6348 | |
6349 | SDValue HiSmallShift = DAG.getNode(Opcode: Opc, DL: dl, VT, N1: ShOpHi, N2: ShAmt); |
6350 | SDValue HiBigShift = Opc == ISD::SRA |
6351 | ? DAG.getNode(Opcode: Opc, DL: dl, VT, N1: ShOpHi, |
6352 | N2: DAG.getConstant(Val: VTBits - 1, DL: dl, VT)) |
6353 | : DAG.getConstant(Val: 0, DL: dl, VT); |
6354 | SDValue CmpHi = getARMCmp(LHS: ExtraShAmt, RHS: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32), |
6355 | CC: ISD::SETGE, ARMcc, DAG, dl); |
6356 | SDValue Hi = DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: HiSmallShift, N2: HiBigShift, |
6357 | N3: ARMcc, N4: CCR, N5: CmpHi); |
6358 | |
6359 | SDValue Ops[2] = { Lo, Hi }; |
6360 | return DAG.getMergeValues(Ops, dl); |
6361 | } |
6362 | |
6363 | /// LowerShiftLeftParts - Lower SHL_PARTS, which returns two |
6364 | /// i32 values and take a 2 x i32 value to shift plus a shift amount. |
6365 | SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op, |
6366 | SelectionDAG &DAG) const { |
6367 | assert(Op.getNumOperands() == 3 && "Not a double-shift!" ); |
6368 | EVT VT = Op.getValueType(); |
6369 | unsigned VTBits = VT.getSizeInBits(); |
6370 | SDLoc dl(Op); |
6371 | SDValue ShOpLo = Op.getOperand(i: 0); |
6372 | SDValue ShOpHi = Op.getOperand(i: 1); |
6373 | SDValue ShAmt = Op.getOperand(i: 2); |
6374 | SDValue ARMcc; |
6375 | SDValue CCR = DAG.getRegister(Reg: ARM::CPSR, VT: MVT::i32); |
6376 | |
6377 | assert(Op.getOpcode() == ISD::SHL_PARTS); |
6378 | SDValue RevShAmt = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: MVT::i32, |
6379 | N1: DAG.getConstant(Val: VTBits, DL: dl, VT: MVT::i32), N2: ShAmt); |
6380 | SDValue Tmp1 = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT, N1: ShOpLo, N2: RevShAmt); |
6381 | SDValue Tmp2 = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT, N1: ShOpHi, N2: ShAmt); |
6382 | SDValue HiSmallShift = DAG.getNode(Opcode: ISD::OR, DL: dl, VT, N1: Tmp1, N2: Tmp2); |
6383 | |
6384 | SDValue = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: MVT::i32, N1: ShAmt, |
6385 | N2: DAG.getConstant(Val: VTBits, DL: dl, VT: MVT::i32)); |
6386 | SDValue HiBigShift = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT, N1: ShOpLo, N2: ExtraShAmt); |
6387 | SDValue CmpHi = getARMCmp(LHS: ExtraShAmt, RHS: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32), |
6388 | CC: ISD::SETGE, ARMcc, DAG, dl); |
6389 | SDValue Hi = DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: HiSmallShift, N2: HiBigShift, |
6390 | N3: ARMcc, N4: CCR, N5: CmpHi); |
6391 | |
6392 | SDValue CmpLo = getARMCmp(LHS: ExtraShAmt, RHS: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32), |
6393 | CC: ISD::SETGE, ARMcc, DAG, dl); |
6394 | SDValue LoSmallShift = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT, N1: ShOpLo, N2: ShAmt); |
6395 | SDValue Lo = DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: LoSmallShift, |
6396 | N2: DAG.getConstant(Val: 0, DL: dl, VT), N3: ARMcc, N4: CCR, N5: CmpLo); |
6397 | |
6398 | SDValue Ops[2] = { Lo, Hi }; |
6399 | return DAG.getMergeValues(Ops, dl); |
6400 | } |
6401 | |
6402 | SDValue ARMTargetLowering::LowerGET_ROUNDING(SDValue Op, |
6403 | SelectionDAG &DAG) const { |
6404 | // The rounding mode is in bits 23:22 of the FPSCR. |
6405 | // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0 |
6406 | // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3) |
6407 | // so that the shift + and get folded into a bitfield extract. |
6408 | SDLoc dl(Op); |
6409 | SDValue Chain = Op.getOperand(i: 0); |
6410 | SDValue Ops[] = {Chain, |
6411 | DAG.getConstant(Val: Intrinsic::arm_get_fpscr, DL: dl, VT: MVT::i32)}; |
6412 | |
6413 | SDValue FPSCR = |
6414 | DAG.getNode(Opcode: ISD::INTRINSIC_W_CHAIN, DL: dl, ResultTys: {MVT::i32, MVT::Other}, Ops); |
6415 | Chain = FPSCR.getValue(R: 1); |
6416 | SDValue FltRounds = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: MVT::i32, N1: FPSCR, |
6417 | N2: DAG.getConstant(Val: 1U << 22, DL: dl, VT: MVT::i32)); |
6418 | SDValue RMODE = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT: MVT::i32, N1: FltRounds, |
6419 | N2: DAG.getConstant(Val: 22, DL: dl, VT: MVT::i32)); |
6420 | SDValue And = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: MVT::i32, N1: RMODE, |
6421 | N2: DAG.getConstant(Val: 3, DL: dl, VT: MVT::i32)); |
6422 | return DAG.getMergeValues(Ops: {And, Chain}, dl); |
6423 | } |
6424 | |
6425 | SDValue ARMTargetLowering::LowerSET_ROUNDING(SDValue Op, |
6426 | SelectionDAG &DAG) const { |
6427 | SDLoc DL(Op); |
6428 | SDValue Chain = Op->getOperand(Num: 0); |
6429 | SDValue RMValue = Op->getOperand(Num: 1); |
6430 | |
6431 | // The rounding mode is in bits 23:22 of the FPSCR. |
6432 | // The llvm.set.rounding argument value to ARM rounding mode value mapping |
6433 | // is 0->3, 1->0, 2->1, 3->2. The formula we use to implement this is |
6434 | // ((arg - 1) & 3) << 22). |
6435 | // |
6436 | // It is expected that the argument of llvm.set.rounding is within the |
6437 | // segment [0, 3], so NearestTiesToAway (4) is not handled here. It is |
6438 | // responsibility of the code generated llvm.set.rounding to ensure this |
6439 | // condition. |
6440 | |
6441 | // Calculate new value of FPSCR[23:22]. |
6442 | RMValue = DAG.getNode(Opcode: ISD::SUB, DL, VT: MVT::i32, N1: RMValue, |
6443 | N2: DAG.getConstant(Val: 1, DL, VT: MVT::i32)); |
6444 | RMValue = DAG.getNode(Opcode: ISD::AND, DL, VT: MVT::i32, N1: RMValue, |
6445 | N2: DAG.getConstant(Val: 0x3, DL, VT: MVT::i32)); |
6446 | RMValue = DAG.getNode(Opcode: ISD::SHL, DL, VT: MVT::i32, N1: RMValue, |
6447 | N2: DAG.getConstant(Val: ARM::RoundingBitsPos, DL, VT: MVT::i32)); |
6448 | |
6449 | // Get current value of FPSCR. |
6450 | SDValue Ops[] = {Chain, |
6451 | DAG.getConstant(Val: Intrinsic::arm_get_fpscr, DL, VT: MVT::i32)}; |
6452 | SDValue FPSCR = |
6453 | DAG.getNode(Opcode: ISD::INTRINSIC_W_CHAIN, DL, ResultTys: {MVT::i32, MVT::Other}, Ops); |
6454 | Chain = FPSCR.getValue(R: 1); |
6455 | FPSCR = FPSCR.getValue(R: 0); |
6456 | |
6457 | // Put new rounding mode into FPSCR[23:22]. |
6458 | const unsigned RMMask = ~(ARM::Rounding::rmMask << ARM::RoundingBitsPos); |
6459 | FPSCR = DAG.getNode(Opcode: ISD::AND, DL, VT: MVT::i32, N1: FPSCR, |
6460 | N2: DAG.getConstant(Val: RMMask, DL, VT: MVT::i32)); |
6461 | FPSCR = DAG.getNode(Opcode: ISD::OR, DL, VT: MVT::i32, N1: FPSCR, N2: RMValue); |
6462 | SDValue Ops2[] = { |
6463 | Chain, DAG.getConstant(Val: Intrinsic::arm_set_fpscr, DL, VT: MVT::i32), FPSCR}; |
6464 | return DAG.getNode(Opcode: ISD::INTRINSIC_VOID, DL, VT: MVT::Other, Ops: Ops2); |
6465 | } |
6466 | |
6467 | SDValue ARMTargetLowering::LowerSET_FPMODE(SDValue Op, |
6468 | SelectionDAG &DAG) const { |
6469 | SDLoc DL(Op); |
6470 | SDValue Chain = Op->getOperand(Num: 0); |
6471 | SDValue Mode = Op->getOperand(Num: 1); |
6472 | |
6473 | // Generate nodes to build: |
6474 | // FPSCR = (FPSCR & FPStatusBits) | (Mode & ~FPStatusBits) |
6475 | SDValue Ops[] = {Chain, |
6476 | DAG.getConstant(Val: Intrinsic::arm_get_fpscr, DL, VT: MVT::i32)}; |
6477 | SDValue FPSCR = |
6478 | DAG.getNode(Opcode: ISD::INTRINSIC_W_CHAIN, DL, ResultTys: {MVT::i32, MVT::Other}, Ops); |
6479 | Chain = FPSCR.getValue(R: 1); |
6480 | FPSCR = FPSCR.getValue(R: 0); |
6481 | |
6482 | SDValue FPSCRMasked = |
6483 | DAG.getNode(Opcode: ISD::AND, DL, VT: MVT::i32, N1: FPSCR, |
6484 | N2: DAG.getConstant(Val: ARM::FPStatusBits, DL, VT: MVT::i32)); |
6485 | SDValue InputMasked = |
6486 | DAG.getNode(Opcode: ISD::AND, DL, VT: MVT::i32, N1: Mode, |
6487 | N2: DAG.getConstant(Val: ~ARM::FPStatusBits, DL, VT: MVT::i32)); |
6488 | FPSCR = DAG.getNode(Opcode: ISD::OR, DL, VT: MVT::i32, N1: FPSCRMasked, N2: InputMasked); |
6489 | |
6490 | SDValue Ops2[] = { |
6491 | Chain, DAG.getConstant(Val: Intrinsic::arm_set_fpscr, DL, VT: MVT::i32), FPSCR}; |
6492 | return DAG.getNode(Opcode: ISD::INTRINSIC_VOID, DL, VT: MVT::Other, Ops: Ops2); |
6493 | } |
6494 | |
6495 | SDValue ARMTargetLowering::LowerRESET_FPMODE(SDValue Op, |
6496 | SelectionDAG &DAG) const { |
6497 | SDLoc DL(Op); |
6498 | SDValue Chain = Op->getOperand(Num: 0); |
6499 | |
6500 | // To get the default FP mode all control bits are cleared: |
6501 | // FPSCR = FPSCR & (FPStatusBits | FPReservedBits) |
6502 | SDValue Ops[] = {Chain, |
6503 | DAG.getConstant(Val: Intrinsic::arm_get_fpscr, DL, VT: MVT::i32)}; |
6504 | SDValue FPSCR = |
6505 | DAG.getNode(Opcode: ISD::INTRINSIC_W_CHAIN, DL, ResultTys: {MVT::i32, MVT::Other}, Ops); |
6506 | Chain = FPSCR.getValue(R: 1); |
6507 | FPSCR = FPSCR.getValue(R: 0); |
6508 | |
6509 | SDValue FPSCRMasked = DAG.getNode( |
6510 | Opcode: ISD::AND, DL, VT: MVT::i32, N1: FPSCR, |
6511 | N2: DAG.getConstant(Val: ARM::FPStatusBits | ARM::FPReservedBits, DL, VT: MVT::i32)); |
6512 | SDValue Ops2[] = {Chain, |
6513 | DAG.getConstant(Val: Intrinsic::arm_set_fpscr, DL, VT: MVT::i32), |
6514 | FPSCRMasked}; |
6515 | return DAG.getNode(Opcode: ISD::INTRINSIC_VOID, DL, VT: MVT::Other, Ops: Ops2); |
6516 | } |
6517 | |
6518 | static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, |
6519 | const ARMSubtarget *ST) { |
6520 | SDLoc dl(N); |
6521 | EVT VT = N->getValueType(ResNo: 0); |
6522 | if (VT.isVector() && ST->hasNEON()) { |
6523 | |
6524 | // Compute the least significant set bit: LSB = X & -X |
6525 | SDValue X = N->getOperand(Num: 0); |
6526 | SDValue NX = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT, N1: getZeroVector(VT, DAG, dl), N2: X); |
6527 | SDValue LSB = DAG.getNode(Opcode: ISD::AND, DL: dl, VT, N1: X, N2: NX); |
6528 | |
6529 | EVT ElemTy = VT.getVectorElementType(); |
6530 | |
6531 | if (ElemTy == MVT::i8) { |
6532 | // Compute with: cttz(x) = ctpop(lsb - 1) |
6533 | SDValue One = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT, |
6534 | Operand: DAG.getTargetConstant(Val: 1, DL: dl, VT: ElemTy)); |
6535 | SDValue Bits = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT, N1: LSB, N2: One); |
6536 | return DAG.getNode(Opcode: ISD::CTPOP, DL: dl, VT, Operand: Bits); |
6537 | } |
6538 | |
6539 | if ((ElemTy == MVT::i16 || ElemTy == MVT::i32) && |
6540 | (N->getOpcode() == ISD::CTTZ_ZERO_UNDEF)) { |
6541 | // Compute with: cttz(x) = (width - 1) - ctlz(lsb), if x != 0 |
6542 | unsigned NumBits = ElemTy.getSizeInBits(); |
6543 | SDValue WidthMinus1 = |
6544 | DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT, |
6545 | Operand: DAG.getTargetConstant(Val: NumBits - 1, DL: dl, VT: ElemTy)); |
6546 | SDValue CTLZ = DAG.getNode(Opcode: ISD::CTLZ, DL: dl, VT, Operand: LSB); |
6547 | return DAG.getNode(Opcode: ISD::SUB, DL: dl, VT, N1: WidthMinus1, N2: CTLZ); |
6548 | } |
6549 | |
6550 | // Compute with: cttz(x) = ctpop(lsb - 1) |
6551 | |
6552 | // Compute LSB - 1. |
6553 | SDValue Bits; |
6554 | if (ElemTy == MVT::i64) { |
6555 | // Load constant 0xffff'ffff'ffff'ffff to register. |
6556 | SDValue FF = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT, |
6557 | Operand: DAG.getTargetConstant(Val: 0x1eff, DL: dl, VT: MVT::i32)); |
6558 | Bits = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: LSB, N2: FF); |
6559 | } else { |
6560 | SDValue One = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT, |
6561 | Operand: DAG.getTargetConstant(Val: 1, DL: dl, VT: ElemTy)); |
6562 | Bits = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT, N1: LSB, N2: One); |
6563 | } |
6564 | return DAG.getNode(Opcode: ISD::CTPOP, DL: dl, VT, Operand: Bits); |
6565 | } |
6566 | |
6567 | if (!ST->hasV6T2Ops()) |
6568 | return SDValue(); |
6569 | |
6570 | SDValue rbit = DAG.getNode(Opcode: ISD::BITREVERSE, DL: dl, VT, Operand: N->getOperand(Num: 0)); |
6571 | return DAG.getNode(Opcode: ISD::CTLZ, DL: dl, VT, Operand: rbit); |
6572 | } |
6573 | |
6574 | static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG, |
6575 | const ARMSubtarget *ST) { |
6576 | EVT VT = N->getValueType(ResNo: 0); |
6577 | SDLoc DL(N); |
6578 | |
6579 | assert(ST->hasNEON() && "Custom ctpop lowering requires NEON." ); |
6580 | assert((VT == MVT::v1i64 || VT == MVT::v2i64 || VT == MVT::v2i32 || |
6581 | VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) && |
6582 | "Unexpected type for custom ctpop lowering" ); |
6583 | |
6584 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
6585 | EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8; |
6586 | SDValue Res = DAG.getBitcast(VT: VT8Bit, V: N->getOperand(Num: 0)); |
6587 | Res = DAG.getNode(Opcode: ISD::CTPOP, DL, VT: VT8Bit, Operand: Res); |
6588 | |
6589 | // Widen v8i8/v16i8 CTPOP result to VT by repeatedly widening pairwise adds. |
6590 | unsigned EltSize = 8; |
6591 | unsigned NumElts = VT.is64BitVector() ? 8 : 16; |
6592 | while (EltSize != VT.getScalarSizeInBits()) { |
6593 | SmallVector<SDValue, 8> Ops; |
6594 | Ops.push_back(Elt: DAG.getConstant(Val: Intrinsic::arm_neon_vpaddlu, DL, |
6595 | VT: TLI.getPointerTy(DL: DAG.getDataLayout()))); |
6596 | Ops.push_back(Elt: Res); |
6597 | |
6598 | EltSize *= 2; |
6599 | NumElts /= 2; |
6600 | MVT WidenVT = MVT::getVectorVT(VT: MVT::getIntegerVT(BitWidth: EltSize), NumElements: NumElts); |
6601 | Res = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL, VT: WidenVT, Ops); |
6602 | } |
6603 | |
6604 | return Res; |
6605 | } |
6606 | |
6607 | /// Getvshiftimm - Check if this is a valid build_vector for the immediate |
6608 | /// operand of a vector shift operation, where all the elements of the |
6609 | /// build_vector must have the same constant integer value. |
6610 | static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { |
6611 | // Ignore bit_converts. |
6612 | while (Op.getOpcode() == ISD::BITCAST) |
6613 | Op = Op.getOperand(i: 0); |
6614 | BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Val: Op.getNode()); |
6615 | APInt SplatBits, SplatUndef; |
6616 | unsigned SplatBitSize; |
6617 | bool HasAnyUndefs; |
6618 | if (!BVN || |
6619 | !BVN->isConstantSplat(SplatValue&: SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, |
6620 | MinSplatBits: ElementBits) || |
6621 | SplatBitSize > ElementBits) |
6622 | return false; |
6623 | Cnt = SplatBits.getSExtValue(); |
6624 | return true; |
6625 | } |
6626 | |
6627 | /// isVShiftLImm - Check if this is a valid build_vector for the immediate |
6628 | /// operand of a vector shift left operation. That value must be in the range: |
6629 | /// 0 <= Value < ElementBits for a left shift; or |
6630 | /// 0 <= Value <= ElementBits for a long left shift. |
6631 | static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { |
6632 | assert(VT.isVector() && "vector shift count is not a vector type" ); |
6633 | int64_t ElementBits = VT.getScalarSizeInBits(); |
6634 | if (!getVShiftImm(Op, ElementBits, Cnt)) |
6635 | return false; |
6636 | return (Cnt >= 0 && (isLong ? Cnt - 1 : Cnt) < ElementBits); |
6637 | } |
6638 | |
6639 | /// isVShiftRImm - Check if this is a valid build_vector for the immediate |
6640 | /// operand of a vector shift right operation. For a shift opcode, the value |
6641 | /// is positive, but for an intrinsic the value count must be negative. The |
6642 | /// absolute value must be in the range: |
6643 | /// 1 <= |Value| <= ElementBits for a right shift; or |
6644 | /// 1 <= |Value| <= ElementBits/2 for a narrow right shift. |
6645 | static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, |
6646 | int64_t &Cnt) { |
6647 | assert(VT.isVector() && "vector shift count is not a vector type" ); |
6648 | int64_t ElementBits = VT.getScalarSizeInBits(); |
6649 | if (!getVShiftImm(Op, ElementBits, Cnt)) |
6650 | return false; |
6651 | if (!isIntrinsic) |
6652 | return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits / 2 : ElementBits)); |
6653 | if (Cnt >= -(isNarrow ? ElementBits / 2 : ElementBits) && Cnt <= -1) { |
6654 | Cnt = -Cnt; |
6655 | return true; |
6656 | } |
6657 | return false; |
6658 | } |
6659 | |
6660 | static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, |
6661 | const ARMSubtarget *ST) { |
6662 | EVT VT = N->getValueType(ResNo: 0); |
6663 | SDLoc dl(N); |
6664 | int64_t Cnt; |
6665 | |
6666 | if (!VT.isVector()) |
6667 | return SDValue(); |
6668 | |
6669 | // We essentially have two forms here. Shift by an immediate and shift by a |
6670 | // vector register (there are also shift by a gpr, but that is just handled |
6671 | // with a tablegen pattern). We cannot easily match shift by an immediate in |
6672 | // tablegen so we do that here and generate a VSHLIMM/VSHRsIMM/VSHRuIMM. |
6673 | // For shifting by a vector, we don't have VSHR, only VSHL (which can be |
6674 | // signed or unsigned, and a negative shift indicates a shift right). |
6675 | if (N->getOpcode() == ISD::SHL) { |
6676 | if (isVShiftLImm(Op: N->getOperand(Num: 1), VT, isLong: false, Cnt)) |
6677 | return DAG.getNode(Opcode: ARMISD::VSHLIMM, DL: dl, VT, N1: N->getOperand(Num: 0), |
6678 | N2: DAG.getConstant(Val: Cnt, DL: dl, VT: MVT::i32)); |
6679 | return DAG.getNode(Opcode: ARMISD::VSHLu, DL: dl, VT, N1: N->getOperand(Num: 0), |
6680 | N2: N->getOperand(Num: 1)); |
6681 | } |
6682 | |
6683 | assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) && |
6684 | "unexpected vector shift opcode" ); |
6685 | |
6686 | if (isVShiftRImm(Op: N->getOperand(Num: 1), VT, isNarrow: false, isIntrinsic: false, Cnt)) { |
6687 | unsigned VShiftOpc = |
6688 | (N->getOpcode() == ISD::SRA ? ARMISD::VSHRsIMM : ARMISD::VSHRuIMM); |
6689 | return DAG.getNode(Opcode: VShiftOpc, DL: dl, VT, N1: N->getOperand(Num: 0), |
6690 | N2: DAG.getConstant(Val: Cnt, DL: dl, VT: MVT::i32)); |
6691 | } |
6692 | |
6693 | // Other right shifts we don't have operations for (we use a shift left by a |
6694 | // negative number). |
6695 | EVT ShiftVT = N->getOperand(Num: 1).getValueType(); |
6696 | SDValue NegatedCount = DAG.getNode( |
6697 | Opcode: ISD::SUB, DL: dl, VT: ShiftVT, N1: getZeroVector(VT: ShiftVT, DAG, dl), N2: N->getOperand(Num: 1)); |
6698 | unsigned VShiftOpc = |
6699 | (N->getOpcode() == ISD::SRA ? ARMISD::VSHLs : ARMISD::VSHLu); |
6700 | return DAG.getNode(Opcode: VShiftOpc, DL: dl, VT, N1: N->getOperand(Num: 0), N2: NegatedCount); |
6701 | } |
6702 | |
6703 | static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, |
6704 | const ARMSubtarget *ST) { |
6705 | EVT VT = N->getValueType(ResNo: 0); |
6706 | SDLoc dl(N); |
6707 | |
6708 | // We can get here for a node like i32 = ISD::SHL i32, i64 |
6709 | if (VT != MVT::i64) |
6710 | return SDValue(); |
6711 | |
6712 | assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA || |
6713 | N->getOpcode() == ISD::SHL) && |
6714 | "Unknown shift to lower!" ); |
6715 | |
6716 | unsigned ShOpc = N->getOpcode(); |
6717 | if (ST->hasMVEIntegerOps()) { |
6718 | SDValue ShAmt = N->getOperand(Num: 1); |
6719 | unsigned ShPartsOpc = ARMISD::LSLL; |
6720 | ConstantSDNode *Con = dyn_cast<ConstantSDNode>(Val&: ShAmt); |
6721 | |
6722 | // If the shift amount is greater than 32 or has a greater bitwidth than 64 |
6723 | // then do the default optimisation |
6724 | if ((!Con && ShAmt->getValueType(ResNo: 0).getSizeInBits() > 64) || |
6725 | (Con && (Con->getAPIntValue() == 0 || Con->getAPIntValue().uge(RHS: 32)))) |
6726 | return SDValue(); |
6727 | |
6728 | // Extract the lower 32 bits of the shift amount if it's not an i32 |
6729 | if (ShAmt->getValueType(ResNo: 0) != MVT::i32) |
6730 | ShAmt = DAG.getZExtOrTrunc(Op: ShAmt, DL: dl, VT: MVT::i32); |
6731 | |
6732 | if (ShOpc == ISD::SRL) { |
6733 | if (!Con) |
6734 | // There is no t2LSRLr instruction so negate and perform an lsll if the |
6735 | // shift amount is in a register, emulating a right shift. |
6736 | ShAmt = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: MVT::i32, |
6737 | N1: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32), N2: ShAmt); |
6738 | else |
6739 | // Else generate an lsrl on the immediate shift amount |
6740 | ShPartsOpc = ARMISD::LSRL; |
6741 | } else if (ShOpc == ISD::SRA) |
6742 | ShPartsOpc = ARMISD::ASRL; |
6743 | |
6744 | // Split Lower/Upper 32 bits of the destination/source |
6745 | SDValue Lo, Hi; |
6746 | std::tie(args&: Lo, args&: Hi) = |
6747 | DAG.SplitScalar(N: N->getOperand(Num: 0), DL: dl, LoVT: MVT::i32, HiVT: MVT::i32); |
6748 | // Generate the shift operation as computed above |
6749 | Lo = DAG.getNode(Opcode: ShPartsOpc, DL: dl, VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N1: Lo, N2: Hi, |
6750 | N3: ShAmt); |
6751 | // The upper 32 bits come from the second return value of lsll |
6752 | Hi = SDValue(Lo.getNode(), 1); |
6753 | return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: Lo, N2: Hi); |
6754 | } |
6755 | |
6756 | // We only lower SRA, SRL of 1 here, all others use generic lowering. |
6757 | if (!isOneConstant(V: N->getOperand(Num: 1)) || N->getOpcode() == ISD::SHL) |
6758 | return SDValue(); |
6759 | |
6760 | // If we are in thumb mode, we don't have RRX. |
6761 | if (ST->isThumb1Only()) |
6762 | return SDValue(); |
6763 | |
6764 | // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr. |
6765 | SDValue Lo, Hi; |
6766 | std::tie(args&: Lo, args&: Hi) = DAG.SplitScalar(N: N->getOperand(Num: 0), DL: dl, LoVT: MVT::i32, HiVT: MVT::i32); |
6767 | |
6768 | // First, build a SRA_GLUE/SRL_GLUE op, which shifts the top part by one and |
6769 | // captures the result into a carry flag. |
6770 | unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_GLUE:ARMISD::SRA_GLUE; |
6771 | Hi = DAG.getNode(Opcode: Opc, DL: dl, VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::Glue), N: Hi); |
6772 | |
6773 | // The low part is an ARMISD::RRX operand, which shifts the carry in. |
6774 | Lo = DAG.getNode(Opcode: ARMISD::RRX, DL: dl, VT: MVT::i32, N1: Lo, N2: Hi.getValue(R: 1)); |
6775 | |
6776 | // Merge the pieces into a single i64 value. |
6777 | return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: Lo, N2: Hi); |
6778 | } |
6779 | |
6780 | static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG, |
6781 | const ARMSubtarget *ST) { |
6782 | bool Invert = false; |
6783 | bool Swap = false; |
6784 | unsigned Opc = ARMCC::AL; |
6785 | |
6786 | SDValue Op0 = Op.getOperand(i: 0); |
6787 | SDValue Op1 = Op.getOperand(i: 1); |
6788 | SDValue CC = Op.getOperand(i: 2); |
6789 | EVT VT = Op.getValueType(); |
6790 | ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(Val&: CC)->get(); |
6791 | SDLoc dl(Op); |
6792 | |
6793 | EVT CmpVT; |
6794 | if (ST->hasNEON()) |
6795 | CmpVT = Op0.getValueType().changeVectorElementTypeToInteger(); |
6796 | else { |
6797 | assert(ST->hasMVEIntegerOps() && |
6798 | "No hardware support for integer vector comparison!" ); |
6799 | |
6800 | if (Op.getValueType().getVectorElementType() != MVT::i1) |
6801 | return SDValue(); |
6802 | |
6803 | // Make sure we expand floating point setcc to scalar if we do not have |
6804 | // mve.fp, so that we can handle them from there. |
6805 | if (Op0.getValueType().isFloatingPoint() && !ST->hasMVEFloatOps()) |
6806 | return SDValue(); |
6807 | |
6808 | CmpVT = VT; |
6809 | } |
6810 | |
6811 | if (Op0.getValueType().getVectorElementType() == MVT::i64 && |
6812 | (SetCCOpcode == ISD::SETEQ || SetCCOpcode == ISD::SETNE)) { |
6813 | // Special-case integer 64-bit equality comparisons. They aren't legal, |
6814 | // but they can be lowered with a few vector instructions. |
6815 | unsigned CmpElements = CmpVT.getVectorNumElements() * 2; |
6816 | EVT SplitVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: MVT::i32, NumElements: CmpElements); |
6817 | SDValue CastOp0 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: SplitVT, Operand: Op0); |
6818 | SDValue CastOp1 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: SplitVT, Operand: Op1); |
6819 | SDValue Cmp = DAG.getNode(Opcode: ISD::SETCC, DL: dl, VT: SplitVT, N1: CastOp0, N2: CastOp1, |
6820 | N3: DAG.getCondCode(Cond: ISD::SETEQ)); |
6821 | SDValue Reversed = DAG.getNode(Opcode: ARMISD::VREV64, DL: dl, VT: SplitVT, Operand: Cmp); |
6822 | SDValue Merged = DAG.getNode(Opcode: ISD::AND, DL: dl, VT: SplitVT, N1: Cmp, N2: Reversed); |
6823 | Merged = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: CmpVT, Operand: Merged); |
6824 | if (SetCCOpcode == ISD::SETNE) |
6825 | Merged = DAG.getNOT(DL: dl, Val: Merged, VT: CmpVT); |
6826 | Merged = DAG.getSExtOrTrunc(Op: Merged, DL: dl, VT); |
6827 | return Merged; |
6828 | } |
6829 | |
6830 | if (CmpVT.getVectorElementType() == MVT::i64) |
6831 | // 64-bit comparisons are not legal in general. |
6832 | return SDValue(); |
6833 | |
6834 | if (Op1.getValueType().isFloatingPoint()) { |
6835 | switch (SetCCOpcode) { |
6836 | default: llvm_unreachable("Illegal FP comparison" ); |
6837 | case ISD::SETUNE: |
6838 | case ISD::SETNE: |
6839 | if (ST->hasMVEFloatOps()) { |
6840 | Opc = ARMCC::NE; break; |
6841 | } else { |
6842 | Invert = true; [[fallthrough]]; |
6843 | } |
6844 | case ISD::SETOEQ: |
6845 | case ISD::SETEQ: Opc = ARMCC::EQ; break; |
6846 | case ISD::SETOLT: |
6847 | case ISD::SETLT: Swap = true; [[fallthrough]]; |
6848 | case ISD::SETOGT: |
6849 | case ISD::SETGT: Opc = ARMCC::GT; break; |
6850 | case ISD::SETOLE: |
6851 | case ISD::SETLE: Swap = true; [[fallthrough]]; |
6852 | case ISD::SETOGE: |
6853 | case ISD::SETGE: Opc = ARMCC::GE; break; |
6854 | case ISD::SETUGE: Swap = true; [[fallthrough]]; |
6855 | case ISD::SETULE: Invert = true; Opc = ARMCC::GT; break; |
6856 | case ISD::SETUGT: Swap = true; [[fallthrough]]; |
6857 | case ISD::SETULT: Invert = true; Opc = ARMCC::GE; break; |
6858 | case ISD::SETUEQ: Invert = true; [[fallthrough]]; |
6859 | case ISD::SETONE: { |
6860 | // Expand this to (OLT | OGT). |
6861 | SDValue TmpOp0 = DAG.getNode(Opcode: ARMISD::VCMP, DL: dl, VT: CmpVT, N1: Op1, N2: Op0, |
6862 | N3: DAG.getConstant(Val: ARMCC::GT, DL: dl, VT: MVT::i32)); |
6863 | SDValue TmpOp1 = DAG.getNode(Opcode: ARMISD::VCMP, DL: dl, VT: CmpVT, N1: Op0, N2: Op1, |
6864 | N3: DAG.getConstant(Val: ARMCC::GT, DL: dl, VT: MVT::i32)); |
6865 | SDValue Result = DAG.getNode(Opcode: ISD::OR, DL: dl, VT: CmpVT, N1: TmpOp0, N2: TmpOp1); |
6866 | if (Invert) |
6867 | Result = DAG.getNOT(DL: dl, Val: Result, VT); |
6868 | return Result; |
6869 | } |
6870 | case ISD::SETUO: Invert = true; [[fallthrough]]; |
6871 | case ISD::SETO: { |
6872 | // Expand this to (OLT | OGE). |
6873 | SDValue TmpOp0 = DAG.getNode(Opcode: ARMISD::VCMP, DL: dl, VT: CmpVT, N1: Op1, N2: Op0, |
6874 | N3: DAG.getConstant(Val: ARMCC::GT, DL: dl, VT: MVT::i32)); |
6875 | SDValue TmpOp1 = DAG.getNode(Opcode: ARMISD::VCMP, DL: dl, VT: CmpVT, N1: Op0, N2: Op1, |
6876 | N3: DAG.getConstant(Val: ARMCC::GE, DL: dl, VT: MVT::i32)); |
6877 | SDValue Result = DAG.getNode(Opcode: ISD::OR, DL: dl, VT: CmpVT, N1: TmpOp0, N2: TmpOp1); |
6878 | if (Invert) |
6879 | Result = DAG.getNOT(DL: dl, Val: Result, VT); |
6880 | return Result; |
6881 | } |
6882 | } |
6883 | } else { |
6884 | // Integer comparisons. |
6885 | switch (SetCCOpcode) { |
6886 | default: llvm_unreachable("Illegal integer comparison" ); |
6887 | case ISD::SETNE: |
6888 | if (ST->hasMVEIntegerOps()) { |
6889 | Opc = ARMCC::NE; break; |
6890 | } else { |
6891 | Invert = true; [[fallthrough]]; |
6892 | } |
6893 | case ISD::SETEQ: Opc = ARMCC::EQ; break; |
6894 | case ISD::SETLT: Swap = true; [[fallthrough]]; |
6895 | case ISD::SETGT: Opc = ARMCC::GT; break; |
6896 | case ISD::SETLE: Swap = true; [[fallthrough]]; |
6897 | case ISD::SETGE: Opc = ARMCC::GE; break; |
6898 | case ISD::SETULT: Swap = true; [[fallthrough]]; |
6899 | case ISD::SETUGT: Opc = ARMCC::HI; break; |
6900 | case ISD::SETULE: Swap = true; [[fallthrough]]; |
6901 | case ISD::SETUGE: Opc = ARMCC::HS; break; |
6902 | } |
6903 | |
6904 | // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero). |
6905 | if (ST->hasNEON() && Opc == ARMCC::EQ) { |
6906 | SDValue AndOp; |
6907 | if (ISD::isBuildVectorAllZeros(N: Op1.getNode())) |
6908 | AndOp = Op0; |
6909 | else if (ISD::isBuildVectorAllZeros(N: Op0.getNode())) |
6910 | AndOp = Op1; |
6911 | |
6912 | // Ignore bitconvert. |
6913 | if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST) |
6914 | AndOp = AndOp.getOperand(i: 0); |
6915 | |
6916 | if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) { |
6917 | Op0 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: CmpVT, Operand: AndOp.getOperand(i: 0)); |
6918 | Op1 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: CmpVT, Operand: AndOp.getOperand(i: 1)); |
6919 | SDValue Result = DAG.getNode(Opcode: ARMISD::VTST, DL: dl, VT: CmpVT, N1: Op0, N2: Op1); |
6920 | if (!Invert) |
6921 | Result = DAG.getNOT(DL: dl, Val: Result, VT); |
6922 | return Result; |
6923 | } |
6924 | } |
6925 | } |
6926 | |
6927 | if (Swap) |
6928 | std::swap(a&: Op0, b&: Op1); |
6929 | |
6930 | // If one of the operands is a constant vector zero, attempt to fold the |
6931 | // comparison to a specialized compare-against-zero form. |
6932 | if (ISD::isBuildVectorAllZeros(N: Op0.getNode()) && |
6933 | (Opc == ARMCC::GE || Opc == ARMCC::GT || Opc == ARMCC::EQ || |
6934 | Opc == ARMCC::NE)) { |
6935 | if (Opc == ARMCC::GE) |
6936 | Opc = ARMCC::LE; |
6937 | else if (Opc == ARMCC::GT) |
6938 | Opc = ARMCC::LT; |
6939 | std::swap(a&: Op0, b&: Op1); |
6940 | } |
6941 | |
6942 | SDValue Result; |
6943 | if (ISD::isBuildVectorAllZeros(N: Op1.getNode()) && |
6944 | (Opc == ARMCC::GE || Opc == ARMCC::GT || Opc == ARMCC::LE || |
6945 | Opc == ARMCC::LT || Opc == ARMCC::NE || Opc == ARMCC::EQ)) |
6946 | Result = DAG.getNode(Opcode: ARMISD::VCMPZ, DL: dl, VT: CmpVT, N1: Op0, |
6947 | N2: DAG.getConstant(Val: Opc, DL: dl, VT: MVT::i32)); |
6948 | else |
6949 | Result = DAG.getNode(Opcode: ARMISD::VCMP, DL: dl, VT: CmpVT, N1: Op0, N2: Op1, |
6950 | N3: DAG.getConstant(Val: Opc, DL: dl, VT: MVT::i32)); |
6951 | |
6952 | Result = DAG.getSExtOrTrunc(Op: Result, DL: dl, VT); |
6953 | |
6954 | if (Invert) |
6955 | Result = DAG.getNOT(DL: dl, Val: Result, VT); |
6956 | |
6957 | return Result; |
6958 | } |
6959 | |
6960 | static SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) { |
6961 | SDValue LHS = Op.getOperand(i: 0); |
6962 | SDValue RHS = Op.getOperand(i: 1); |
6963 | SDValue Carry = Op.getOperand(i: 2); |
6964 | SDValue Cond = Op.getOperand(i: 3); |
6965 | SDLoc DL(Op); |
6966 | |
6967 | assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only." ); |
6968 | |
6969 | // ARMISD::SUBE expects a carry not a borrow like ISD::USUBO_CARRY so we |
6970 | // have to invert the carry first. |
6971 | Carry = DAG.getNode(Opcode: ISD::SUB, DL, VT: MVT::i32, |
6972 | N1: DAG.getConstant(Val: 1, DL, VT: MVT::i32), N2: Carry); |
6973 | // This converts the boolean value carry into the carry flag. |
6974 | Carry = ConvertBooleanCarryToCarryFlag(BoolCarry: Carry, DAG); |
6975 | |
6976 | SDVTList VTs = DAG.getVTList(VT1: LHS.getValueType(), VT2: MVT::i32); |
6977 | SDValue Cmp = DAG.getNode(Opcode: ARMISD::SUBE, DL, VTList: VTs, N1: LHS, N2: RHS, N3: Carry); |
6978 | |
6979 | SDValue FVal = DAG.getConstant(Val: 0, DL, VT: MVT::i32); |
6980 | SDValue TVal = DAG.getConstant(Val: 1, DL, VT: MVT::i32); |
6981 | SDValue ARMcc = DAG.getConstant( |
6982 | Val: IntCCToARMCC(CC: cast<CondCodeSDNode>(Val&: Cond)->get()), DL, VT: MVT::i32); |
6983 | SDValue CCR = DAG.getRegister(Reg: ARM::CPSR, VT: MVT::i32); |
6984 | SDValue Chain = DAG.getCopyToReg(Chain: DAG.getEntryNode(), dl: DL, Reg: ARM::CPSR, |
6985 | N: Cmp.getValue(R: 1), Glue: SDValue()); |
6986 | return DAG.getNode(Opcode: ARMISD::CMOV, DL, VT: Op.getValueType(), N1: FVal, N2: TVal, N3: ARMcc, |
6987 | N4: CCR, N5: Chain.getValue(R: 1)); |
6988 | } |
6989 | |
6990 | /// isVMOVModifiedImm - Check if the specified splat value corresponds to a |
6991 | /// valid vector constant for a NEON or MVE instruction with a "modified |
6992 | /// immediate" operand (e.g., VMOV). If so, return the encoded value. |
6993 | static SDValue isVMOVModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, |
6994 | unsigned SplatBitSize, SelectionDAG &DAG, |
6995 | const SDLoc &dl, EVT &VT, EVT VectorVT, |
6996 | VMOVModImmType type) { |
6997 | unsigned OpCmode, Imm; |
6998 | bool is128Bits = VectorVT.is128BitVector(); |
6999 | |
7000 | // SplatBitSize is set to the smallest size that splats the vector, so a |
7001 | // zero vector will always have SplatBitSize == 8. However, NEON modified |
7002 | // immediate instructions others than VMOV do not support the 8-bit encoding |
7003 | // of a zero vector, and the default encoding of zero is supposed to be the |
7004 | // 32-bit version. |
7005 | if (SplatBits == 0) |
7006 | SplatBitSize = 32; |
7007 | |
7008 | switch (SplatBitSize) { |
7009 | case 8: |
7010 | if (type != VMOVModImm) |
7011 | return SDValue(); |
7012 | // Any 1-byte value is OK. Op=0, Cmode=1110. |
7013 | assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big" ); |
7014 | OpCmode = 0xe; |
7015 | Imm = SplatBits; |
7016 | VT = is128Bits ? MVT::v16i8 : MVT::v8i8; |
7017 | break; |
7018 | |
7019 | case 16: |
7020 | // NEON's 16-bit VMOV supports splat values where only one byte is nonzero. |
7021 | VT = is128Bits ? MVT::v8i16 : MVT::v4i16; |
7022 | if ((SplatBits & ~0xff) == 0) { |
7023 | // Value = 0x00nn: Op=x, Cmode=100x. |
7024 | OpCmode = 0x8; |
7025 | Imm = SplatBits; |
7026 | break; |
7027 | } |
7028 | if ((SplatBits & ~0xff00) == 0) { |
7029 | // Value = 0xnn00: Op=x, Cmode=101x. |
7030 | OpCmode = 0xa; |
7031 | Imm = SplatBits >> 8; |
7032 | break; |
7033 | } |
7034 | return SDValue(); |
7035 | |
7036 | case 32: |
7037 | // NEON's 32-bit VMOV supports splat values where: |
7038 | // * only one byte is nonzero, or |
7039 | // * the least significant byte is 0xff and the second byte is nonzero, or |
7040 | // * the least significant 2 bytes are 0xff and the third is nonzero. |
7041 | VT = is128Bits ? MVT::v4i32 : MVT::v2i32; |
7042 | if ((SplatBits & ~0xff) == 0) { |
7043 | // Value = 0x000000nn: Op=x, Cmode=000x. |
7044 | OpCmode = 0; |
7045 | Imm = SplatBits; |
7046 | break; |
7047 | } |
7048 | if ((SplatBits & ~0xff00) == 0) { |
7049 | // Value = 0x0000nn00: Op=x, Cmode=001x. |
7050 | OpCmode = 0x2; |
7051 | Imm = SplatBits >> 8; |
7052 | break; |
7053 | } |
7054 | if ((SplatBits & ~0xff0000) == 0) { |
7055 | // Value = 0x00nn0000: Op=x, Cmode=010x. |
7056 | OpCmode = 0x4; |
7057 | Imm = SplatBits >> 16; |
7058 | break; |
7059 | } |
7060 | if ((SplatBits & ~0xff000000) == 0) { |
7061 | // Value = 0xnn000000: Op=x, Cmode=011x. |
7062 | OpCmode = 0x6; |
7063 | Imm = SplatBits >> 24; |
7064 | break; |
7065 | } |
7066 | |
7067 | // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC |
7068 | if (type == OtherModImm) return SDValue(); |
7069 | |
7070 | if ((SplatBits & ~0xffff) == 0 && |
7071 | ((SplatBits | SplatUndef) & 0xff) == 0xff) { |
7072 | // Value = 0x0000nnff: Op=x, Cmode=1100. |
7073 | OpCmode = 0xc; |
7074 | Imm = SplatBits >> 8; |
7075 | break; |
7076 | } |
7077 | |
7078 | // cmode == 0b1101 is not supported for MVE VMVN |
7079 | if (type == MVEVMVNModImm) |
7080 | return SDValue(); |
7081 | |
7082 | if ((SplatBits & ~0xffffff) == 0 && |
7083 | ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { |
7084 | // Value = 0x00nnffff: Op=x, Cmode=1101. |
7085 | OpCmode = 0xd; |
7086 | Imm = SplatBits >> 16; |
7087 | break; |
7088 | } |
7089 | |
7090 | // Note: there are a few 32-bit splat values (specifically: 00ffff00, |
7091 | // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not |
7092 | // VMOV.I32. A (very) minor optimization would be to replicate the value |
7093 | // and fall through here to test for a valid 64-bit splat. But, then the |
7094 | // caller would also need to check and handle the change in size. |
7095 | return SDValue(); |
7096 | |
7097 | case 64: { |
7098 | if (type != VMOVModImm) |
7099 | return SDValue(); |
7100 | // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff. |
7101 | uint64_t BitMask = 0xff; |
7102 | unsigned ImmMask = 1; |
7103 | Imm = 0; |
7104 | for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { |
7105 | if (((SplatBits | SplatUndef) & BitMask) == BitMask) { |
7106 | Imm |= ImmMask; |
7107 | } else if ((SplatBits & BitMask) != 0) { |
7108 | return SDValue(); |
7109 | } |
7110 | BitMask <<= 8; |
7111 | ImmMask <<= 1; |
7112 | } |
7113 | |
7114 | if (DAG.getDataLayout().isBigEndian()) { |
7115 | // Reverse the order of elements within the vector. |
7116 | unsigned BytesPerElem = VectorVT.getScalarSizeInBits() / 8; |
7117 | unsigned Mask = (1 << BytesPerElem) - 1; |
7118 | unsigned NumElems = 8 / BytesPerElem; |
7119 | unsigned NewImm = 0; |
7120 | for (unsigned ElemNum = 0; ElemNum < NumElems; ++ElemNum) { |
7121 | unsigned Elem = ((Imm >> ElemNum * BytesPerElem) & Mask); |
7122 | NewImm |= Elem << (NumElems - ElemNum - 1) * BytesPerElem; |
7123 | } |
7124 | Imm = NewImm; |
7125 | } |
7126 | |
7127 | // Op=1, Cmode=1110. |
7128 | OpCmode = 0x1e; |
7129 | VT = is128Bits ? MVT::v2i64 : MVT::v1i64; |
7130 | break; |
7131 | } |
7132 | |
7133 | default: |
7134 | llvm_unreachable("unexpected size for isVMOVModifiedImm" ); |
7135 | } |
7136 | |
7137 | unsigned EncodedVal = ARM_AM::createVMOVModImm(OpCmode, Val: Imm); |
7138 | return DAG.getTargetConstant(Val: EncodedVal, DL: dl, VT: MVT::i32); |
7139 | } |
7140 | |
7141 | SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG, |
7142 | const ARMSubtarget *ST) const { |
7143 | EVT VT = Op.getValueType(); |
7144 | bool IsDouble = (VT == MVT::f64); |
7145 | ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Val&: Op); |
7146 | const APFloat &FPVal = CFP->getValueAPF(); |
7147 | |
7148 | // Prevent floating-point constants from using literal loads |
7149 | // when execute-only is enabled. |
7150 | if (ST->genExecuteOnly()) { |
7151 | // We shouldn't trigger this for v6m execute-only |
7152 | assert((!ST->isThumb1Only() || ST->hasV8MBaselineOps()) && |
7153 | "Unexpected architecture" ); |
7154 | |
7155 | // If we can represent the constant as an immediate, don't lower it |
7156 | if (isFPImmLegal(Imm: FPVal, VT)) |
7157 | return Op; |
7158 | // Otherwise, construct as integer, and move to float register |
7159 | APInt INTVal = FPVal.bitcastToAPInt(); |
7160 | SDLoc DL(CFP); |
7161 | switch (VT.getSimpleVT().SimpleTy) { |
7162 | default: |
7163 | llvm_unreachable("Unknown floating point type!" ); |
7164 | break; |
7165 | case MVT::f64: { |
7166 | SDValue Lo = DAG.getConstant(Val: INTVal.trunc(width: 32), DL, VT: MVT::i32); |
7167 | SDValue Hi = DAG.getConstant(Val: INTVal.lshr(shiftAmt: 32).trunc(width: 32), DL, VT: MVT::i32); |
7168 | return DAG.getNode(Opcode: ARMISD::VMOVDRR, DL, VT: MVT::f64, N1: Lo, N2: Hi); |
7169 | } |
7170 | case MVT::f32: |
7171 | return DAG.getNode(Opcode: ARMISD::VMOVSR, DL, VT, |
7172 | Operand: DAG.getConstant(Val: INTVal, DL, VT: MVT::i32)); |
7173 | } |
7174 | } |
7175 | |
7176 | if (!ST->hasVFP3Base()) |
7177 | return SDValue(); |
7178 | |
7179 | // Use the default (constant pool) lowering for double constants when we have |
7180 | // an SP-only FPU |
7181 | if (IsDouble && !Subtarget->hasFP64()) |
7182 | return SDValue(); |
7183 | |
7184 | // Try splatting with a VMOV.f32... |
7185 | int ImmVal = IsDouble ? ARM_AM::getFP64Imm(FPImm: FPVal) : ARM_AM::getFP32Imm(FPImm: FPVal); |
7186 | |
7187 | if (ImmVal != -1) { |
7188 | if (IsDouble || !ST->useNEONForSinglePrecisionFP()) { |
7189 | // We have code in place to select a valid ConstantFP already, no need to |
7190 | // do any mangling. |
7191 | return Op; |
7192 | } |
7193 | |
7194 | // It's a float and we are trying to use NEON operations where |
7195 | // possible. Lower it to a splat followed by an extract. |
7196 | SDLoc DL(Op); |
7197 | SDValue NewVal = DAG.getTargetConstant(Val: ImmVal, DL, VT: MVT::i32); |
7198 | SDValue VecConstant = DAG.getNode(Opcode: ARMISD::VMOVFPIMM, DL, VT: MVT::v2f32, |
7199 | Operand: NewVal); |
7200 | return DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL, VT: MVT::f32, N1: VecConstant, |
7201 | N2: DAG.getConstant(Val: 0, DL, VT: MVT::i32)); |
7202 | } |
7203 | |
7204 | // The rest of our options are NEON only, make sure that's allowed before |
7205 | // proceeding.. |
7206 | if (!ST->hasNEON() || (!IsDouble && !ST->useNEONForSinglePrecisionFP())) |
7207 | return SDValue(); |
7208 | |
7209 | EVT VMovVT; |
7210 | uint64_t iVal = FPVal.bitcastToAPInt().getZExtValue(); |
7211 | |
7212 | // It wouldn't really be worth bothering for doubles except for one very |
7213 | // important value, which does happen to match: 0.0. So make sure we don't do |
7214 | // anything stupid. |
7215 | if (IsDouble && (iVal & 0xffffffff) != (iVal >> 32)) |
7216 | return SDValue(); |
7217 | |
7218 | // Try a VMOV.i32 (FIXME: i8, i16, or i64 could work too). |
7219 | SDValue NewVal = isVMOVModifiedImm(SplatBits: iVal & 0xffffffffU, SplatUndef: 0, SplatBitSize: 32, DAG, dl: SDLoc(Op), |
7220 | VT&: VMovVT, VectorVT: VT, type: VMOVModImm); |
7221 | if (NewVal != SDValue()) { |
7222 | SDLoc DL(Op); |
7223 | SDValue VecConstant = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL, VT: VMovVT, |
7224 | Operand: NewVal); |
7225 | if (IsDouble) |
7226 | return DAG.getNode(Opcode: ISD::BITCAST, DL, VT: MVT::f64, Operand: VecConstant); |
7227 | |
7228 | // It's a float: cast and extract a vector element. |
7229 | SDValue VecFConstant = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: MVT::v2f32, |
7230 | Operand: VecConstant); |
7231 | return DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL, VT: MVT::f32, N1: VecFConstant, |
7232 | N2: DAG.getConstant(Val: 0, DL, VT: MVT::i32)); |
7233 | } |
7234 | |
7235 | // Finally, try a VMVN.i32 |
7236 | NewVal = isVMOVModifiedImm(SplatBits: ~iVal & 0xffffffffU, SplatUndef: 0, SplatBitSize: 32, DAG, dl: SDLoc(Op), VT&: VMovVT, |
7237 | VectorVT: VT, type: VMVNModImm); |
7238 | if (NewVal != SDValue()) { |
7239 | SDLoc DL(Op); |
7240 | SDValue VecConstant = DAG.getNode(Opcode: ARMISD::VMVNIMM, DL, VT: VMovVT, Operand: NewVal); |
7241 | |
7242 | if (IsDouble) |
7243 | return DAG.getNode(Opcode: ISD::BITCAST, DL, VT: MVT::f64, Operand: VecConstant); |
7244 | |
7245 | // It's a float: cast and extract a vector element. |
7246 | SDValue VecFConstant = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: MVT::v2f32, |
7247 | Operand: VecConstant); |
7248 | return DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL, VT: MVT::f32, N1: VecFConstant, |
7249 | N2: DAG.getConstant(Val: 0, DL, VT: MVT::i32)); |
7250 | } |
7251 | |
7252 | return SDValue(); |
7253 | } |
7254 | |
7255 | // check if an VEXT instruction can handle the shuffle mask when the |
7256 | // vector sources of the shuffle are the same. |
7257 | static bool isSingletonVEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) { |
7258 | unsigned NumElts = VT.getVectorNumElements(); |
7259 | |
7260 | // Assume that the first shuffle index is not UNDEF. Fail if it is. |
7261 | if (M[0] < 0) |
7262 | return false; |
7263 | |
7264 | Imm = M[0]; |
7265 | |
7266 | // If this is a VEXT shuffle, the immediate value is the index of the first |
7267 | // element. The other shuffle indices must be the successive elements after |
7268 | // the first one. |
7269 | unsigned ExpectedElt = Imm; |
7270 | for (unsigned i = 1; i < NumElts; ++i) { |
7271 | // Increment the expected index. If it wraps around, just follow it |
7272 | // back to index zero and keep going. |
7273 | ++ExpectedElt; |
7274 | if (ExpectedElt == NumElts) |
7275 | ExpectedElt = 0; |
7276 | |
7277 | if (M[i] < 0) continue; // ignore UNDEF indices |
7278 | if (ExpectedElt != static_cast<unsigned>(M[i])) |
7279 | return false; |
7280 | } |
7281 | |
7282 | return true; |
7283 | } |
7284 | |
7285 | static bool isVEXTMask(ArrayRef<int> M, EVT VT, |
7286 | bool &ReverseVEXT, unsigned &Imm) { |
7287 | unsigned NumElts = VT.getVectorNumElements(); |
7288 | ReverseVEXT = false; |
7289 | |
7290 | // Assume that the first shuffle index is not UNDEF. Fail if it is. |
7291 | if (M[0] < 0) |
7292 | return false; |
7293 | |
7294 | Imm = M[0]; |
7295 | |
7296 | // If this is a VEXT shuffle, the immediate value is the index of the first |
7297 | // element. The other shuffle indices must be the successive elements after |
7298 | // the first one. |
7299 | unsigned ExpectedElt = Imm; |
7300 | for (unsigned i = 1; i < NumElts; ++i) { |
7301 | // Increment the expected index. If it wraps around, it may still be |
7302 | // a VEXT but the source vectors must be swapped. |
7303 | ExpectedElt += 1; |
7304 | if (ExpectedElt == NumElts * 2) { |
7305 | ExpectedElt = 0; |
7306 | ReverseVEXT = true; |
7307 | } |
7308 | |
7309 | if (M[i] < 0) continue; // ignore UNDEF indices |
7310 | if (ExpectedElt != static_cast<unsigned>(M[i])) |
7311 | return false; |
7312 | } |
7313 | |
7314 | // Adjust the index value if the source operands will be swapped. |
7315 | if (ReverseVEXT) |
7316 | Imm -= NumElts; |
7317 | |
7318 | return true; |
7319 | } |
7320 | |
7321 | static bool isVTBLMask(ArrayRef<int> M, EVT VT) { |
7322 | // We can handle <8 x i8> vector shuffles. If the index in the mask is out of |
7323 | // range, then 0 is placed into the resulting vector. So pretty much any mask |
7324 | // of 8 elements can work here. |
7325 | return VT == MVT::v8i8 && M.size() == 8; |
7326 | } |
7327 | |
7328 | static unsigned SelectPairHalf(unsigned Elements, ArrayRef<int> Mask, |
7329 | unsigned Index) { |
7330 | if (Mask.size() == Elements * 2) |
7331 | return Index / Elements; |
7332 | return Mask[Index] == 0 ? 0 : 1; |
7333 | } |
7334 | |
7335 | // Checks whether the shuffle mask represents a vector transpose (VTRN) by |
7336 | // checking that pairs of elements in the shuffle mask represent the same index |
7337 | // in each vector, incrementing the expected index by 2 at each step. |
7338 | // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 2, 6] |
7339 | // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,c,g} |
7340 | // v2={e,f,g,h} |
7341 | // WhichResult gives the offset for each element in the mask based on which |
7342 | // of the two results it belongs to. |
7343 | // |
7344 | // The transpose can be represented either as: |
7345 | // result1 = shufflevector v1, v2, result1_shuffle_mask |
7346 | // result2 = shufflevector v1, v2, result2_shuffle_mask |
7347 | // where v1/v2 and the shuffle masks have the same number of elements |
7348 | // (here WhichResult (see below) indicates which result is being checked) |
7349 | // |
7350 | // or as: |
7351 | // results = shufflevector v1, v2, shuffle_mask |
7352 | // where both results are returned in one vector and the shuffle mask has twice |
7353 | // as many elements as v1/v2 (here WhichResult will always be 0 if true) here we |
7354 | // want to check the low half and high half of the shuffle mask as if it were |
7355 | // the other case |
7356 | static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { |
7357 | unsigned EltSz = VT.getScalarSizeInBits(); |
7358 | if (EltSz == 64) |
7359 | return false; |
7360 | |
7361 | unsigned NumElts = VT.getVectorNumElements(); |
7362 | if (M.size() != NumElts && M.size() != NumElts*2) |
7363 | return false; |
7364 | |
7365 | // If the mask is twice as long as the input vector then we need to check the |
7366 | // upper and lower parts of the mask with a matching value for WhichResult |
7367 | // FIXME: A mask with only even values will be rejected in case the first |
7368 | // element is undefined, e.g. [-1, 4, 2, 6] will be rejected, because only |
7369 | // M[0] is used to determine WhichResult |
7370 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
7371 | WhichResult = SelectPairHalf(Elements: NumElts, Mask: M, Index: i); |
7372 | for (unsigned j = 0; j < NumElts; j += 2) { |
7373 | if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) || |
7374 | (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + NumElts + WhichResult)) |
7375 | return false; |
7376 | } |
7377 | } |
7378 | |
7379 | if (M.size() == NumElts*2) |
7380 | WhichResult = 0; |
7381 | |
7382 | return true; |
7383 | } |
7384 | |
7385 | /// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of |
7386 | /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". |
7387 | /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. |
7388 | static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ |
7389 | unsigned EltSz = VT.getScalarSizeInBits(); |
7390 | if (EltSz == 64) |
7391 | return false; |
7392 | |
7393 | unsigned NumElts = VT.getVectorNumElements(); |
7394 | if (M.size() != NumElts && M.size() != NumElts*2) |
7395 | return false; |
7396 | |
7397 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
7398 | WhichResult = SelectPairHalf(Elements: NumElts, Mask: M, Index: i); |
7399 | for (unsigned j = 0; j < NumElts; j += 2) { |
7400 | if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) || |
7401 | (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + WhichResult)) |
7402 | return false; |
7403 | } |
7404 | } |
7405 | |
7406 | if (M.size() == NumElts*2) |
7407 | WhichResult = 0; |
7408 | |
7409 | return true; |
7410 | } |
7411 | |
7412 | // Checks whether the shuffle mask represents a vector unzip (VUZP) by checking |
7413 | // that the mask elements are either all even and in steps of size 2 or all odd |
7414 | // and in steps of size 2. |
7415 | // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 2, 4, 6] |
7416 | // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,c,e,g} |
7417 | // v2={e,f,g,h} |
7418 | // Requires similar checks to that of isVTRNMask with |
7419 | // respect the how results are returned. |
7420 | static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { |
7421 | unsigned EltSz = VT.getScalarSizeInBits(); |
7422 | if (EltSz == 64) |
7423 | return false; |
7424 | |
7425 | unsigned NumElts = VT.getVectorNumElements(); |
7426 | if (M.size() != NumElts && M.size() != NumElts*2) |
7427 | return false; |
7428 | |
7429 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
7430 | WhichResult = SelectPairHalf(Elements: NumElts, Mask: M, Index: i); |
7431 | for (unsigned j = 0; j < NumElts; ++j) { |
7432 | if (M[i+j] >= 0 && (unsigned) M[i+j] != 2 * j + WhichResult) |
7433 | return false; |
7434 | } |
7435 | } |
7436 | |
7437 | if (M.size() == NumElts*2) |
7438 | WhichResult = 0; |
7439 | |
7440 | // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. |
7441 | if (VT.is64BitVector() && EltSz == 32) |
7442 | return false; |
7443 | |
7444 | return true; |
7445 | } |
7446 | |
7447 | /// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of |
7448 | /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". |
7449 | /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, |
7450 | static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ |
7451 | unsigned EltSz = VT.getScalarSizeInBits(); |
7452 | if (EltSz == 64) |
7453 | return false; |
7454 | |
7455 | unsigned NumElts = VT.getVectorNumElements(); |
7456 | if (M.size() != NumElts && M.size() != NumElts*2) |
7457 | return false; |
7458 | |
7459 | unsigned Half = NumElts / 2; |
7460 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
7461 | WhichResult = SelectPairHalf(Elements: NumElts, Mask: M, Index: i); |
7462 | for (unsigned j = 0; j < NumElts; j += Half) { |
7463 | unsigned Idx = WhichResult; |
7464 | for (unsigned k = 0; k < Half; ++k) { |
7465 | int MIdx = M[i + j + k]; |
7466 | if (MIdx >= 0 && (unsigned) MIdx != Idx) |
7467 | return false; |
7468 | Idx += 2; |
7469 | } |
7470 | } |
7471 | } |
7472 | |
7473 | if (M.size() == NumElts*2) |
7474 | WhichResult = 0; |
7475 | |
7476 | // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. |
7477 | if (VT.is64BitVector() && EltSz == 32) |
7478 | return false; |
7479 | |
7480 | return true; |
7481 | } |
7482 | |
7483 | // Checks whether the shuffle mask represents a vector zip (VZIP) by checking |
7484 | // that pairs of elements of the shufflemask represent the same index in each |
7485 | // vector incrementing sequentially through the vectors. |
7486 | // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 1, 5] |
7487 | // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,b,f} |
7488 | // v2={e,f,g,h} |
7489 | // Requires similar checks to that of isVTRNMask with respect the how results |
7490 | // are returned. |
7491 | static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { |
7492 | unsigned EltSz = VT.getScalarSizeInBits(); |
7493 | if (EltSz == 64) |
7494 | return false; |
7495 | |
7496 | unsigned NumElts = VT.getVectorNumElements(); |
7497 | if (M.size() != NumElts && M.size() != NumElts*2) |
7498 | return false; |
7499 | |
7500 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
7501 | WhichResult = SelectPairHalf(Elements: NumElts, Mask: M, Index: i); |
7502 | unsigned Idx = WhichResult * NumElts / 2; |
7503 | for (unsigned j = 0; j < NumElts; j += 2) { |
7504 | if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) || |
7505 | (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx + NumElts)) |
7506 | return false; |
7507 | Idx += 1; |
7508 | } |
7509 | } |
7510 | |
7511 | if (M.size() == NumElts*2) |
7512 | WhichResult = 0; |
7513 | |
7514 | // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. |
7515 | if (VT.is64BitVector() && EltSz == 32) |
7516 | return false; |
7517 | |
7518 | return true; |
7519 | } |
7520 | |
7521 | /// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of |
7522 | /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". |
7523 | /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. |
7524 | static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ |
7525 | unsigned EltSz = VT.getScalarSizeInBits(); |
7526 | if (EltSz == 64) |
7527 | return false; |
7528 | |
7529 | unsigned NumElts = VT.getVectorNumElements(); |
7530 | if (M.size() != NumElts && M.size() != NumElts*2) |
7531 | return false; |
7532 | |
7533 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
7534 | WhichResult = SelectPairHalf(Elements: NumElts, Mask: M, Index: i); |
7535 | unsigned Idx = WhichResult * NumElts / 2; |
7536 | for (unsigned j = 0; j < NumElts; j += 2) { |
7537 | if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) || |
7538 | (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx)) |
7539 | return false; |
7540 | Idx += 1; |
7541 | } |
7542 | } |
7543 | |
7544 | if (M.size() == NumElts*2) |
7545 | WhichResult = 0; |
7546 | |
7547 | // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. |
7548 | if (VT.is64BitVector() && EltSz == 32) |
7549 | return false; |
7550 | |
7551 | return true; |
7552 | } |
7553 | |
7554 | /// Check if \p ShuffleMask is a NEON two-result shuffle (VZIP, VUZP, VTRN), |
7555 | /// and return the corresponding ARMISD opcode if it is, or 0 if it isn't. |
7556 | static unsigned isNEONTwoResultShuffleMask(ArrayRef<int> ShuffleMask, EVT VT, |
7557 | unsigned &WhichResult, |
7558 | bool &isV_UNDEF) { |
7559 | isV_UNDEF = false; |
7560 | if (isVTRNMask(M: ShuffleMask, VT, WhichResult)) |
7561 | return ARMISD::VTRN; |
7562 | if (isVUZPMask(M: ShuffleMask, VT, WhichResult)) |
7563 | return ARMISD::VUZP; |
7564 | if (isVZIPMask(M: ShuffleMask, VT, WhichResult)) |
7565 | return ARMISD::VZIP; |
7566 | |
7567 | isV_UNDEF = true; |
7568 | if (isVTRN_v_undef_Mask(M: ShuffleMask, VT, WhichResult)) |
7569 | return ARMISD::VTRN; |
7570 | if (isVUZP_v_undef_Mask(M: ShuffleMask, VT, WhichResult)) |
7571 | return ARMISD::VUZP; |
7572 | if (isVZIP_v_undef_Mask(M: ShuffleMask, VT, WhichResult)) |
7573 | return ARMISD::VZIP; |
7574 | |
7575 | return 0; |
7576 | } |
7577 | |
7578 | /// \return true if this is a reverse operation on an vector. |
7579 | static bool isReverseMask(ArrayRef<int> M, EVT VT) { |
7580 | unsigned NumElts = VT.getVectorNumElements(); |
7581 | // Make sure the mask has the right size. |
7582 | if (NumElts != M.size()) |
7583 | return false; |
7584 | |
7585 | // Look for <15, ..., 3, -1, 1, 0>. |
7586 | for (unsigned i = 0; i != NumElts; ++i) |
7587 | if (M[i] >= 0 && M[i] != (int) (NumElts - 1 - i)) |
7588 | return false; |
7589 | |
7590 | return true; |
7591 | } |
7592 | |
7593 | static bool isTruncMask(ArrayRef<int> M, EVT VT, bool Top, bool SingleSource) { |
7594 | unsigned NumElts = VT.getVectorNumElements(); |
7595 | // Make sure the mask has the right size. |
7596 | if (NumElts != M.size() || (VT != MVT::v8i16 && VT != MVT::v16i8)) |
7597 | return false; |
7598 | |
7599 | // Half-width truncation patterns (e.g. v4i32 -> v8i16): |
7600 | // !Top && SingleSource: <0, 2, 4, 6, 0, 2, 4, 6> |
7601 | // !Top && !SingleSource: <0, 2, 4, 6, 8, 10, 12, 14> |
7602 | // Top && SingleSource: <1, 3, 5, 7, 1, 3, 5, 7> |
7603 | // Top && !SingleSource: <1, 3, 5, 7, 9, 11, 13, 15> |
7604 | int Ofs = Top ? 1 : 0; |
7605 | int Upper = SingleSource ? 0 : NumElts; |
7606 | for (int i = 0, e = NumElts / 2; i != e; ++i) { |
7607 | if (M[i] >= 0 && M[i] != (i * 2) + Ofs) |
7608 | return false; |
7609 | if (M[i + e] >= 0 && M[i + e] != (i * 2) + Ofs + Upper) |
7610 | return false; |
7611 | } |
7612 | return true; |
7613 | } |
7614 | |
7615 | static bool isVMOVNMask(ArrayRef<int> M, EVT VT, bool Top, bool SingleSource) { |
7616 | unsigned NumElts = VT.getVectorNumElements(); |
7617 | // Make sure the mask has the right size. |
7618 | if (NumElts != M.size() || (VT != MVT::v8i16 && VT != MVT::v16i8)) |
7619 | return false; |
7620 | |
7621 | // If Top |
7622 | // Look for <0, N, 2, N+2, 4, N+4, ..>. |
7623 | // This inserts Input2 into Input1 |
7624 | // else if not Top |
7625 | // Look for <0, N+1, 2, N+3, 4, N+5, ..> |
7626 | // This inserts Input1 into Input2 |
7627 | unsigned Offset = Top ? 0 : 1; |
7628 | unsigned N = SingleSource ? 0 : NumElts; |
7629 | for (unsigned i = 0; i < NumElts; i += 2) { |
7630 | if (M[i] >= 0 && M[i] != (int)i) |
7631 | return false; |
7632 | if (M[i + 1] >= 0 && M[i + 1] != (int)(N + i + Offset)) |
7633 | return false; |
7634 | } |
7635 | |
7636 | return true; |
7637 | } |
7638 | |
7639 | static bool isVMOVNTruncMask(ArrayRef<int> M, EVT ToVT, bool rev) { |
7640 | unsigned NumElts = ToVT.getVectorNumElements(); |
7641 | if (NumElts != M.size()) |
7642 | return false; |
7643 | |
7644 | // Test if the Trunc can be convertable to a VMOVN with this shuffle. We are |
7645 | // looking for patterns of: |
7646 | // !rev: 0 N/2 1 N/2+1 2 N/2+2 ... |
7647 | // rev: N/2 0 N/2+1 1 N/2+2 2 ... |
7648 | |
7649 | unsigned Off0 = rev ? NumElts / 2 : 0; |
7650 | unsigned Off1 = rev ? 0 : NumElts / 2; |
7651 | for (unsigned i = 0; i < NumElts; i += 2) { |
7652 | if (M[i] >= 0 && M[i] != (int)(Off0 + i / 2)) |
7653 | return false; |
7654 | if (M[i + 1] >= 0 && M[i + 1] != (int)(Off1 + i / 2)) |
7655 | return false; |
7656 | } |
7657 | |
7658 | return true; |
7659 | } |
7660 | |
7661 | // Reconstruct an MVE VCVT from a BuildVector of scalar fptrunc, all extracted |
7662 | // from a pair of inputs. For example: |
7663 | // BUILDVECTOR(FP_ROUND(EXTRACT_ELT(X, 0), |
7664 | // FP_ROUND(EXTRACT_ELT(Y, 0), |
7665 | // FP_ROUND(EXTRACT_ELT(X, 1), |
7666 | // FP_ROUND(EXTRACT_ELT(Y, 1), ...) |
7667 | static SDValue LowerBuildVectorOfFPTrunc(SDValue BV, SelectionDAG &DAG, |
7668 | const ARMSubtarget *ST) { |
7669 | assert(BV.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!" ); |
7670 | if (!ST->hasMVEFloatOps()) |
7671 | return SDValue(); |
7672 | |
7673 | SDLoc dl(BV); |
7674 | EVT VT = BV.getValueType(); |
7675 | if (VT != MVT::v8f16) |
7676 | return SDValue(); |
7677 | |
7678 | // We are looking for a buildvector of fptrunc elements, where all the |
7679 | // elements are interleavingly extracted from two sources. Check the first two |
7680 | // items are valid enough and extract some info from them (they are checked |
7681 | // properly in the loop below). |
7682 | if (BV.getOperand(i: 0).getOpcode() != ISD::FP_ROUND || |
7683 | BV.getOperand(i: 0).getOperand(i: 0).getOpcode() != ISD::EXTRACT_VECTOR_ELT || |
7684 | BV.getOperand(i: 0).getOperand(i: 0).getConstantOperandVal(i: 1) != 0) |
7685 | return SDValue(); |
7686 | if (BV.getOperand(i: 1).getOpcode() != ISD::FP_ROUND || |
7687 | BV.getOperand(i: 1).getOperand(i: 0).getOpcode() != ISD::EXTRACT_VECTOR_ELT || |
7688 | BV.getOperand(i: 1).getOperand(i: 0).getConstantOperandVal(i: 1) != 0) |
7689 | return SDValue(); |
7690 | SDValue Op0 = BV.getOperand(i: 0).getOperand(i: 0).getOperand(i: 0); |
7691 | SDValue Op1 = BV.getOperand(i: 1).getOperand(i: 0).getOperand(i: 0); |
7692 | if (Op0.getValueType() != MVT::v4f32 || Op1.getValueType() != MVT::v4f32) |
7693 | return SDValue(); |
7694 | |
7695 | // Check all the values in the BuildVector line up with our expectations. |
7696 | for (unsigned i = 1; i < 4; i++) { |
7697 | auto Check = [](SDValue Trunc, SDValue Op, unsigned Idx) { |
7698 | return Trunc.getOpcode() == ISD::FP_ROUND && |
7699 | Trunc.getOperand(i: 0).getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
7700 | Trunc.getOperand(i: 0).getOperand(i: 0) == Op && |
7701 | Trunc.getOperand(i: 0).getConstantOperandVal(i: 1) == Idx; |
7702 | }; |
7703 | if (!Check(BV.getOperand(i: i * 2 + 0), Op0, i)) |
7704 | return SDValue(); |
7705 | if (!Check(BV.getOperand(i: i * 2 + 1), Op1, i)) |
7706 | return SDValue(); |
7707 | } |
7708 | |
7709 | SDValue N1 = DAG.getNode(Opcode: ARMISD::VCVTN, DL: dl, VT, N1: DAG.getUNDEF(VT), N2: Op0, |
7710 | N3: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
7711 | return DAG.getNode(Opcode: ARMISD::VCVTN, DL: dl, VT, N1, N2: Op1, |
7712 | N3: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32)); |
7713 | } |
7714 | |
7715 | // Reconstruct an MVE VCVT from a BuildVector of scalar fpext, all extracted |
7716 | // from a single input on alternating lanes. For example: |
7717 | // BUILDVECTOR(FP_ROUND(EXTRACT_ELT(X, 0), |
7718 | // FP_ROUND(EXTRACT_ELT(X, 2), |
7719 | // FP_ROUND(EXTRACT_ELT(X, 4), ...) |
7720 | static SDValue LowerBuildVectorOfFPExt(SDValue BV, SelectionDAG &DAG, |
7721 | const ARMSubtarget *ST) { |
7722 | assert(BV.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!" ); |
7723 | if (!ST->hasMVEFloatOps()) |
7724 | return SDValue(); |
7725 | |
7726 | SDLoc dl(BV); |
7727 | EVT VT = BV.getValueType(); |
7728 | if (VT != MVT::v4f32) |
7729 | return SDValue(); |
7730 | |
7731 | // We are looking for a buildvector of fptext elements, where all the |
7732 | // elements are alternating lanes from a single source. For example <0,2,4,6> |
7733 | // or <1,3,5,7>. Check the first two items are valid enough and extract some |
7734 | // info from them (they are checked properly in the loop below). |
7735 | if (BV.getOperand(i: 0).getOpcode() != ISD::FP_EXTEND || |
7736 | BV.getOperand(i: 0).getOperand(i: 0).getOpcode() != ISD::EXTRACT_VECTOR_ELT) |
7737 | return SDValue(); |
7738 | SDValue Op0 = BV.getOperand(i: 0).getOperand(i: 0).getOperand(i: 0); |
7739 | int Offset = BV.getOperand(i: 0).getOperand(i: 0).getConstantOperandVal(i: 1); |
7740 | if (Op0.getValueType() != MVT::v8f16 || (Offset != 0 && Offset != 1)) |
7741 | return SDValue(); |
7742 | |
7743 | // Check all the values in the BuildVector line up with our expectations. |
7744 | for (unsigned i = 1; i < 4; i++) { |
7745 | auto Check = [](SDValue Trunc, SDValue Op, unsigned Idx) { |
7746 | return Trunc.getOpcode() == ISD::FP_EXTEND && |
7747 | Trunc.getOperand(i: 0).getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
7748 | Trunc.getOperand(i: 0).getOperand(i: 0) == Op && |
7749 | Trunc.getOperand(i: 0).getConstantOperandVal(i: 1) == Idx; |
7750 | }; |
7751 | if (!Check(BV.getOperand(i), Op0, 2 * i + Offset)) |
7752 | return SDValue(); |
7753 | } |
7754 | |
7755 | return DAG.getNode(Opcode: ARMISD::VCVTL, DL: dl, VT, N1: Op0, |
7756 | N2: DAG.getConstant(Val: Offset, DL: dl, VT: MVT::i32)); |
7757 | } |
7758 | |
7759 | // If N is an integer constant that can be moved into a register in one |
7760 | // instruction, return an SDValue of such a constant (will become a MOV |
7761 | // instruction). Otherwise return null. |
7762 | static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, |
7763 | const ARMSubtarget *ST, const SDLoc &dl) { |
7764 | uint64_t Val; |
7765 | if (!isa<ConstantSDNode>(Val: N)) |
7766 | return SDValue(); |
7767 | Val = N->getAsZExtVal(); |
7768 | |
7769 | if (ST->isThumb1Only()) { |
7770 | if (Val <= 255 || ~Val <= 255) |
7771 | return DAG.getConstant(Val, DL: dl, VT: MVT::i32); |
7772 | } else { |
7773 | if (ARM_AM::getSOImmVal(Arg: Val) != -1 || ARM_AM::getSOImmVal(Arg: ~Val) != -1) |
7774 | return DAG.getConstant(Val, DL: dl, VT: MVT::i32); |
7775 | } |
7776 | return SDValue(); |
7777 | } |
7778 | |
7779 | static SDValue LowerBUILD_VECTOR_i1(SDValue Op, SelectionDAG &DAG, |
7780 | const ARMSubtarget *ST) { |
7781 | SDLoc dl(Op); |
7782 | EVT VT = Op.getValueType(); |
7783 | |
7784 | assert(ST->hasMVEIntegerOps() && "LowerBUILD_VECTOR_i1 called without MVE!" ); |
7785 | |
7786 | unsigned NumElts = VT.getVectorNumElements(); |
7787 | unsigned BoolMask; |
7788 | unsigned BitsPerBool; |
7789 | if (NumElts == 2) { |
7790 | BitsPerBool = 8; |
7791 | BoolMask = 0xff; |
7792 | } else if (NumElts == 4) { |
7793 | BitsPerBool = 4; |
7794 | BoolMask = 0xf; |
7795 | } else if (NumElts == 8) { |
7796 | BitsPerBool = 2; |
7797 | BoolMask = 0x3; |
7798 | } else if (NumElts == 16) { |
7799 | BitsPerBool = 1; |
7800 | BoolMask = 0x1; |
7801 | } else |
7802 | return SDValue(); |
7803 | |
7804 | // If this is a single value copied into all lanes (a splat), we can just sign |
7805 | // extend that single value |
7806 | SDValue FirstOp = Op.getOperand(i: 0); |
7807 | if (!isa<ConstantSDNode>(Val: FirstOp) && |
7808 | llvm::all_of(Range: llvm::drop_begin(RangeOrContainer: Op->ops()), P: [&FirstOp](const SDUse &U) { |
7809 | return U.get().isUndef() || U.get() == FirstOp; |
7810 | })) { |
7811 | SDValue Ext = DAG.getNode(Opcode: ISD::SIGN_EXTEND_INREG, DL: dl, VT: MVT::i32, N1: FirstOp, |
7812 | N2: DAG.getValueType(MVT::i1)); |
7813 | return DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: Op.getValueType(), Operand: Ext); |
7814 | } |
7815 | |
7816 | // First create base with bits set where known |
7817 | unsigned Bits32 = 0; |
7818 | for (unsigned i = 0; i < NumElts; ++i) { |
7819 | SDValue V = Op.getOperand(i); |
7820 | if (!isa<ConstantSDNode>(Val: V) && !V.isUndef()) |
7821 | continue; |
7822 | bool BitSet = V.isUndef() ? false : V->getAsZExtVal(); |
7823 | if (BitSet) |
7824 | Bits32 |= BoolMask << (i * BitsPerBool); |
7825 | } |
7826 | |
7827 | // Add in unknown nodes |
7828 | SDValue Base = DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT, |
7829 | Operand: DAG.getConstant(Val: Bits32, DL: dl, VT: MVT::i32)); |
7830 | for (unsigned i = 0; i < NumElts; ++i) { |
7831 | SDValue V = Op.getOperand(i); |
7832 | if (isa<ConstantSDNode>(Val: V) || V.isUndef()) |
7833 | continue; |
7834 | Base = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT, N1: Base, N2: V, |
7835 | N3: DAG.getConstant(Val: i, DL: dl, VT: MVT::i32)); |
7836 | } |
7837 | |
7838 | return Base; |
7839 | } |
7840 | |
7841 | static SDValue LowerBUILD_VECTORToVIDUP(SDValue Op, SelectionDAG &DAG, |
7842 | const ARMSubtarget *ST) { |
7843 | if (!ST->hasMVEIntegerOps()) |
7844 | return SDValue(); |
7845 | |
7846 | // We are looking for a buildvector where each element is Op[0] + i*N |
7847 | EVT VT = Op.getValueType(); |
7848 | SDValue Op0 = Op.getOperand(i: 0); |
7849 | unsigned NumElts = VT.getVectorNumElements(); |
7850 | |
7851 | // Get the increment value from operand 1 |
7852 | SDValue Op1 = Op.getOperand(i: 1); |
7853 | if (Op1.getOpcode() != ISD::ADD || Op1.getOperand(i: 0) != Op0 || |
7854 | !isa<ConstantSDNode>(Val: Op1.getOperand(i: 1))) |
7855 | return SDValue(); |
7856 | unsigned N = Op1.getConstantOperandVal(i: 1); |
7857 | if (N != 1 && N != 2 && N != 4 && N != 8) |
7858 | return SDValue(); |
7859 | |
7860 | // Check that each other operand matches |
7861 | for (unsigned I = 2; I < NumElts; I++) { |
7862 | SDValue OpI = Op.getOperand(i: I); |
7863 | if (OpI.getOpcode() != ISD::ADD || OpI.getOperand(i: 0) != Op0 || |
7864 | !isa<ConstantSDNode>(Val: OpI.getOperand(i: 1)) || |
7865 | OpI.getConstantOperandVal(i: 1) != I * N) |
7866 | return SDValue(); |
7867 | } |
7868 | |
7869 | SDLoc DL(Op); |
7870 | return DAG.getNode(Opcode: ARMISD::VIDUP, DL, VTList: DAG.getVTList(VT1: VT, VT2: MVT::i32), N1: Op0, |
7871 | N2: DAG.getConstant(Val: N, DL, VT: MVT::i32)); |
7872 | } |
7873 | |
7874 | // Returns true if the operation N can be treated as qr instruction variant at |
7875 | // operand Op. |
7876 | static bool IsQRMVEInstruction(const SDNode *N, const SDNode *Op) { |
7877 | switch (N->getOpcode()) { |
7878 | case ISD::ADD: |
7879 | case ISD::MUL: |
7880 | case ISD::SADDSAT: |
7881 | case ISD::UADDSAT: |
7882 | return true; |
7883 | case ISD::SUB: |
7884 | case ISD::SSUBSAT: |
7885 | case ISD::USUBSAT: |
7886 | return N->getOperand(Num: 1).getNode() == Op; |
7887 | case ISD::INTRINSIC_WO_CHAIN: |
7888 | switch (N->getConstantOperandVal(Num: 0)) { |
7889 | case Intrinsic::arm_mve_add_predicated: |
7890 | case Intrinsic::arm_mve_mul_predicated: |
7891 | case Intrinsic::arm_mve_qadd_predicated: |
7892 | case Intrinsic::arm_mve_vhadd: |
7893 | case Intrinsic::arm_mve_hadd_predicated: |
7894 | case Intrinsic::arm_mve_vqdmulh: |
7895 | case Intrinsic::arm_mve_qdmulh_predicated: |
7896 | case Intrinsic::arm_mve_vqrdmulh: |
7897 | case Intrinsic::arm_mve_qrdmulh_predicated: |
7898 | case Intrinsic::arm_mve_vqdmull: |
7899 | case Intrinsic::arm_mve_vqdmull_predicated: |
7900 | return true; |
7901 | case Intrinsic::arm_mve_sub_predicated: |
7902 | case Intrinsic::arm_mve_qsub_predicated: |
7903 | case Intrinsic::arm_mve_vhsub: |
7904 | case Intrinsic::arm_mve_hsub_predicated: |
7905 | return N->getOperand(Num: 2).getNode() == Op; |
7906 | default: |
7907 | return false; |
7908 | } |
7909 | default: |
7910 | return false; |
7911 | } |
7912 | } |
7913 | |
7914 | // If this is a case we can't handle, return null and let the default |
7915 | // expansion code take care of it. |
7916 | SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, |
7917 | const ARMSubtarget *ST) const { |
7918 | BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Val: Op.getNode()); |
7919 | SDLoc dl(Op); |
7920 | EVT VT = Op.getValueType(); |
7921 | |
7922 | if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == 1) |
7923 | return LowerBUILD_VECTOR_i1(Op, DAG, ST); |
7924 | |
7925 | if (SDValue R = LowerBUILD_VECTORToVIDUP(Op, DAG, ST)) |
7926 | return R; |
7927 | |
7928 | APInt SplatBits, SplatUndef; |
7929 | unsigned SplatBitSize; |
7930 | bool HasAnyUndefs; |
7931 | if (BVN->isConstantSplat(SplatValue&: SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { |
7932 | if (SplatUndef.isAllOnes()) |
7933 | return DAG.getUNDEF(VT); |
7934 | |
7935 | // If all the users of this constant splat are qr instruction variants, |
7936 | // generate a vdup of the constant. |
7937 | if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == SplatBitSize && |
7938 | (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32) && |
7939 | all_of(Range: BVN->uses(), |
7940 | P: [BVN](const SDNode *U) { return IsQRMVEInstruction(N: U, Op: BVN); })) { |
7941 | EVT DupVT = SplatBitSize == 32 ? MVT::v4i32 |
7942 | : SplatBitSize == 16 ? MVT::v8i16 |
7943 | : MVT::v16i8; |
7944 | SDValue Const = DAG.getConstant(Val: SplatBits.getZExtValue(), DL: dl, VT: MVT::i32); |
7945 | SDValue VDup = DAG.getNode(Opcode: ARMISD::VDUP, DL: dl, VT: DupVT, Operand: Const); |
7946 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT, Operand: VDup); |
7947 | } |
7948 | |
7949 | if ((ST->hasNEON() && SplatBitSize <= 64) || |
7950 | (ST->hasMVEIntegerOps() && SplatBitSize <= 64)) { |
7951 | // Check if an immediate VMOV works. |
7952 | EVT VmovVT; |
7953 | SDValue Val = |
7954 | isVMOVModifiedImm(SplatBits: SplatBits.getZExtValue(), SplatUndef: SplatUndef.getZExtValue(), |
7955 | SplatBitSize, DAG, dl, VT&: VmovVT, VectorVT: VT, type: VMOVModImm); |
7956 | |
7957 | if (Val.getNode()) { |
7958 | SDValue Vmov = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT: VmovVT, Operand: Val); |
7959 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: Vmov); |
7960 | } |
7961 | |
7962 | // Try an immediate VMVN. |
7963 | uint64_t NegatedImm = (~SplatBits).getZExtValue(); |
7964 | Val = isVMOVModifiedImm( |
7965 | SplatBits: NegatedImm, SplatUndef: SplatUndef.getZExtValue(), SplatBitSize, DAG, dl, VT&: VmovVT, |
7966 | VectorVT: VT, type: ST->hasMVEIntegerOps() ? MVEVMVNModImm : VMVNModImm); |
7967 | if (Val.getNode()) { |
7968 | SDValue Vmov = DAG.getNode(Opcode: ARMISD::VMVNIMM, DL: dl, VT: VmovVT, Operand: Val); |
7969 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: Vmov); |
7970 | } |
7971 | |
7972 | // Use vmov.f32 to materialize other v2f32 and v4f32 splats. |
7973 | if ((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) { |
7974 | int ImmVal = ARM_AM::getFP32Imm(Imm: SplatBits); |
7975 | if (ImmVal != -1) { |
7976 | SDValue Val = DAG.getTargetConstant(Val: ImmVal, DL: dl, VT: MVT::i32); |
7977 | return DAG.getNode(Opcode: ARMISD::VMOVFPIMM, DL: dl, VT, Operand: Val); |
7978 | } |
7979 | } |
7980 | |
7981 | // If we are under MVE, generate a VDUP(constant), bitcast to the original |
7982 | // type. |
7983 | if (ST->hasMVEIntegerOps() && |
7984 | (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32)) { |
7985 | EVT DupVT = SplatBitSize == 32 ? MVT::v4i32 |
7986 | : SplatBitSize == 16 ? MVT::v8i16 |
7987 | : MVT::v16i8; |
7988 | SDValue Const = DAG.getConstant(Val: SplatBits.getZExtValue(), DL: dl, VT: MVT::i32); |
7989 | SDValue VDup = DAG.getNode(Opcode: ARMISD::VDUP, DL: dl, VT: DupVT, Operand: Const); |
7990 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT, Operand: VDup); |
7991 | } |
7992 | } |
7993 | } |
7994 | |
7995 | // Scan through the operands to see if only one value is used. |
7996 | // |
7997 | // As an optimisation, even if more than one value is used it may be more |
7998 | // profitable to splat with one value then change some lanes. |
7999 | // |
8000 | // Heuristically we decide to do this if the vector has a "dominant" value, |
8001 | // defined as splatted to more than half of the lanes. |
8002 | unsigned NumElts = VT.getVectorNumElements(); |
8003 | bool isOnlyLowElement = true; |
8004 | bool usesOnlyOneValue = true; |
8005 | bool hasDominantValue = false; |
8006 | bool isConstant = true; |
8007 | |
8008 | // Map of the number of times a particular SDValue appears in the |
8009 | // element list. |
8010 | DenseMap<SDValue, unsigned> ValueCounts; |
8011 | SDValue Value; |
8012 | for (unsigned i = 0; i < NumElts; ++i) { |
8013 | SDValue V = Op.getOperand(i); |
8014 | if (V.isUndef()) |
8015 | continue; |
8016 | if (i > 0) |
8017 | isOnlyLowElement = false; |
8018 | if (!isa<ConstantFPSDNode>(Val: V) && !isa<ConstantSDNode>(Val: V)) |
8019 | isConstant = false; |
8020 | |
8021 | ValueCounts.insert(KV: std::make_pair(x&: V, y: 0)); |
8022 | unsigned &Count = ValueCounts[V]; |
8023 | |
8024 | // Is this value dominant? (takes up more than half of the lanes) |
8025 | if (++Count > (NumElts / 2)) { |
8026 | hasDominantValue = true; |
8027 | Value = V; |
8028 | } |
8029 | } |
8030 | if (ValueCounts.size() != 1) |
8031 | usesOnlyOneValue = false; |
8032 | if (!Value.getNode() && !ValueCounts.empty()) |
8033 | Value = ValueCounts.begin()->first; |
8034 | |
8035 | if (ValueCounts.empty()) |
8036 | return DAG.getUNDEF(VT); |
8037 | |
8038 | // Loads are better lowered with insert_vector_elt/ARMISD::BUILD_VECTOR. |
8039 | // Keep going if we are hitting this case. |
8040 | if (isOnlyLowElement && !ISD::isNormalLoad(N: Value.getNode())) |
8041 | return DAG.getNode(Opcode: ISD::SCALAR_TO_VECTOR, DL: dl, VT, Operand: Value); |
8042 | |
8043 | unsigned EltSize = VT.getScalarSizeInBits(); |
8044 | |
8045 | // Use VDUP for non-constant splats. For f32 constant splats, reduce to |
8046 | // i32 and try again. |
8047 | if (hasDominantValue && EltSize <= 32) { |
8048 | if (!isConstant) { |
8049 | SDValue N; |
8050 | |
8051 | // If we are VDUPing a value that comes directly from a vector, that will |
8052 | // cause an unnecessary move to and from a GPR, where instead we could |
8053 | // just use VDUPLANE. We can only do this if the lane being extracted |
8054 | // is at a constant index, as the VDUP from lane instructions only have |
8055 | // constant-index forms. |
8056 | ConstantSDNode *constIndex; |
8057 | if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
8058 | (constIndex = dyn_cast<ConstantSDNode>(Val: Value->getOperand(Num: 1)))) { |
8059 | // We need to create a new undef vector to use for the VDUPLANE if the |
8060 | // size of the vector from which we get the value is different than the |
8061 | // size of the vector that we need to create. We will insert the element |
8062 | // such that the register coalescer will remove unnecessary copies. |
8063 | if (VT != Value->getOperand(Num: 0).getValueType()) { |
8064 | unsigned index = constIndex->getAPIntValue().getLimitedValue() % |
8065 | VT.getVectorNumElements(); |
8066 | N = DAG.getNode(Opcode: ARMISD::VDUPLANE, DL: dl, VT, |
8067 | N1: DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT, N1: DAG.getUNDEF(VT), |
8068 | N2: Value, N3: DAG.getConstant(Val: index, DL: dl, VT: MVT::i32)), |
8069 | N2: DAG.getConstant(Val: index, DL: dl, VT: MVT::i32)); |
8070 | } else |
8071 | N = DAG.getNode(Opcode: ARMISD::VDUPLANE, DL: dl, VT, |
8072 | N1: Value->getOperand(Num: 0), N2: Value->getOperand(Num: 1)); |
8073 | } else |
8074 | N = DAG.getNode(Opcode: ARMISD::VDUP, DL: dl, VT, Operand: Value); |
8075 | |
8076 | if (!usesOnlyOneValue) { |
8077 | // The dominant value was splatted as 'N', but we now have to insert |
8078 | // all differing elements. |
8079 | for (unsigned I = 0; I < NumElts; ++I) { |
8080 | if (Op.getOperand(i: I) == Value) |
8081 | continue; |
8082 | SmallVector<SDValue, 3> Ops; |
8083 | Ops.push_back(Elt: N); |
8084 | Ops.push_back(Elt: Op.getOperand(i: I)); |
8085 | Ops.push_back(Elt: DAG.getConstant(Val: I, DL: dl, VT: MVT::i32)); |
8086 | N = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT, Ops); |
8087 | } |
8088 | } |
8089 | return N; |
8090 | } |
8091 | if (VT.getVectorElementType().isFloatingPoint()) { |
8092 | SmallVector<SDValue, 8> Ops; |
8093 | MVT FVT = VT.getVectorElementType().getSimpleVT(); |
8094 | assert(FVT == MVT::f32 || FVT == MVT::f16); |
8095 | MVT IVT = (FVT == MVT::f32) ? MVT::i32 : MVT::i16; |
8096 | for (unsigned i = 0; i < NumElts; ++i) |
8097 | Ops.push_back(Elt: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: IVT, |
8098 | Operand: Op.getOperand(i))); |
8099 | EVT VecVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: IVT, NumElements: NumElts); |
8100 | SDValue Val = DAG.getBuildVector(VT: VecVT, DL: dl, Ops); |
8101 | Val = LowerBUILD_VECTOR(Op: Val, DAG, ST); |
8102 | if (Val.getNode()) |
8103 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: Val); |
8104 | } |
8105 | if (usesOnlyOneValue) { |
8106 | SDValue Val = IsSingleInstrConstant(N: Value, DAG, ST, dl); |
8107 | if (isConstant && Val.getNode()) |
8108 | return DAG.getNode(Opcode: ARMISD::VDUP, DL: dl, VT, Operand: Val); |
8109 | } |
8110 | } |
8111 | |
8112 | // If all elements are constants and the case above didn't get hit, fall back |
8113 | // to the default expansion, which will generate a load from the constant |
8114 | // pool. |
8115 | if (isConstant) |
8116 | return SDValue(); |
8117 | |
8118 | // Reconstruct the BUILDVECTOR to one of the legal shuffles (such as vext and |
8119 | // vmovn). Empirical tests suggest this is rarely worth it for vectors of |
8120 | // length <= 2. |
8121 | if (NumElts >= 4) |
8122 | if (SDValue shuffle = ReconstructShuffle(Op, DAG)) |
8123 | return shuffle; |
8124 | |
8125 | // Attempt to turn a buildvector of scalar fptrunc's or fpext's back into |
8126 | // VCVT's |
8127 | if (SDValue VCVT = LowerBuildVectorOfFPTrunc(BV: Op, DAG, ST: Subtarget)) |
8128 | return VCVT; |
8129 | if (SDValue VCVT = LowerBuildVectorOfFPExt(BV: Op, DAG, ST: Subtarget)) |
8130 | return VCVT; |
8131 | |
8132 | if (ST->hasNEON() && VT.is128BitVector() && VT != MVT::v2f64 && VT != MVT::v4f32) { |
8133 | // If we haven't found an efficient lowering, try splitting a 128-bit vector |
8134 | // into two 64-bit vectors; we might discover a better way to lower it. |
8135 | SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElts); |
8136 | EVT ExtVT = VT.getVectorElementType(); |
8137 | EVT HVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: ExtVT, NumElements: NumElts / 2); |
8138 | SDValue Lower = DAG.getBuildVector(VT: HVT, DL: dl, Ops: ArrayRef(&Ops[0], NumElts / 2)); |
8139 | if (Lower.getOpcode() == ISD::BUILD_VECTOR) |
8140 | Lower = LowerBUILD_VECTOR(Op: Lower, DAG, ST); |
8141 | SDValue Upper = |
8142 | DAG.getBuildVector(VT: HVT, DL: dl, Ops: ArrayRef(&Ops[NumElts / 2], NumElts / 2)); |
8143 | if (Upper.getOpcode() == ISD::BUILD_VECTOR) |
8144 | Upper = LowerBUILD_VECTOR(Op: Upper, DAG, ST); |
8145 | if (Lower && Upper) |
8146 | return DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL: dl, VT, N1: Lower, N2: Upper); |
8147 | } |
8148 | |
8149 | // Vectors with 32- or 64-bit elements can be built by directly assigning |
8150 | // the subregisters. Lower it to an ARMISD::BUILD_VECTOR so the operands |
8151 | // will be legalized. |
8152 | if (EltSize >= 32) { |
8153 | // Do the expansion with floating-point types, since that is what the VFP |
8154 | // registers are defined to use, and since i64 is not legal. |
8155 | EVT EltVT = EVT::getFloatingPointVT(BitWidth: EltSize); |
8156 | EVT VecVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: EltVT, NumElements: NumElts); |
8157 | SmallVector<SDValue, 8> Ops; |
8158 | for (unsigned i = 0; i < NumElts; ++i) |
8159 | Ops.push_back(Elt: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: EltVT, Operand: Op.getOperand(i))); |
8160 | SDValue Val = DAG.getNode(Opcode: ARMISD::BUILD_VECTOR, DL: dl, VT: VecVT, Ops); |
8161 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: Val); |
8162 | } |
8163 | |
8164 | // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we |
8165 | // know the default expansion would otherwise fall back on something even |
8166 | // worse. For a vector with one or two non-undef values, that's |
8167 | // scalar_to_vector for the elements followed by a shuffle (provided the |
8168 | // shuffle is valid for the target) and materialization element by element |
8169 | // on the stack followed by a load for everything else. |
8170 | if (!isConstant && !usesOnlyOneValue) { |
8171 | SDValue Vec = DAG.getUNDEF(VT); |
8172 | for (unsigned i = 0 ; i < NumElts; ++i) { |
8173 | SDValue V = Op.getOperand(i); |
8174 | if (V.isUndef()) |
8175 | continue; |
8176 | SDValue LaneIdx = DAG.getConstant(Val: i, DL: dl, VT: MVT::i32); |
8177 | Vec = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT, N1: Vec, N2: V, N3: LaneIdx); |
8178 | } |
8179 | return Vec; |
8180 | } |
8181 | |
8182 | return SDValue(); |
8183 | } |
8184 | |
8185 | // Gather data to see if the operation can be modelled as a |
8186 | // shuffle in combination with VEXTs. |
8187 | SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op, |
8188 | SelectionDAG &DAG) const { |
8189 | assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!" ); |
8190 | SDLoc dl(Op); |
8191 | EVT VT = Op.getValueType(); |
8192 | unsigned NumElts = VT.getVectorNumElements(); |
8193 | |
8194 | struct ShuffleSourceInfo { |
8195 | SDValue Vec; |
8196 | unsigned MinElt = std::numeric_limits<unsigned>::max(); |
8197 | unsigned MaxElt = 0; |
8198 | |
8199 | // We may insert some combination of BITCASTs and VEXT nodes to force Vec to |
8200 | // be compatible with the shuffle we intend to construct. As a result |
8201 | // ShuffleVec will be some sliding window into the original Vec. |
8202 | SDValue ShuffleVec; |
8203 | |
8204 | // Code should guarantee that element i in Vec starts at element "WindowBase |
8205 | // + i * WindowScale in ShuffleVec". |
8206 | int WindowBase = 0; |
8207 | int WindowScale = 1; |
8208 | |
8209 | ShuffleSourceInfo(SDValue Vec) : Vec(Vec), ShuffleVec(Vec) {} |
8210 | |
8211 | bool operator ==(SDValue OtherVec) { return Vec == OtherVec; } |
8212 | }; |
8213 | |
8214 | // First gather all vectors used as an immediate source for this BUILD_VECTOR |
8215 | // node. |
8216 | SmallVector<ShuffleSourceInfo, 2> Sources; |
8217 | for (unsigned i = 0; i < NumElts; ++i) { |
8218 | SDValue V = Op.getOperand(i); |
8219 | if (V.isUndef()) |
8220 | continue; |
8221 | else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) { |
8222 | // A shuffle can only come from building a vector from various |
8223 | // elements of other vectors. |
8224 | return SDValue(); |
8225 | } else if (!isa<ConstantSDNode>(Val: V.getOperand(i: 1))) { |
8226 | // Furthermore, shuffles require a constant mask, whereas extractelts |
8227 | // accept variable indices. |
8228 | return SDValue(); |
8229 | } |
8230 | |
8231 | // Add this element source to the list if it's not already there. |
8232 | SDValue SourceVec = V.getOperand(i: 0); |
8233 | auto Source = llvm::find(Range&: Sources, Val: SourceVec); |
8234 | if (Source == Sources.end()) |
8235 | Source = Sources.insert(I: Sources.end(), Elt: ShuffleSourceInfo(SourceVec)); |
8236 | |
8237 | // Update the minimum and maximum lane number seen. |
8238 | unsigned EltNo = V.getConstantOperandVal(i: 1); |
8239 | Source->MinElt = std::min(a: Source->MinElt, b: EltNo); |
8240 | Source->MaxElt = std::max(a: Source->MaxElt, b: EltNo); |
8241 | } |
8242 | |
8243 | // Currently only do something sane when at most two source vectors |
8244 | // are involved. |
8245 | if (Sources.size() > 2) |
8246 | return SDValue(); |
8247 | |
8248 | // Find out the smallest element size among result and two sources, and use |
8249 | // it as element size to build the shuffle_vector. |
8250 | EVT SmallestEltTy = VT.getVectorElementType(); |
8251 | for (auto &Source : Sources) { |
8252 | EVT SrcEltTy = Source.Vec.getValueType().getVectorElementType(); |
8253 | if (SrcEltTy.bitsLT(VT: SmallestEltTy)) |
8254 | SmallestEltTy = SrcEltTy; |
8255 | } |
8256 | unsigned ResMultiplier = |
8257 | VT.getScalarSizeInBits() / SmallestEltTy.getSizeInBits(); |
8258 | NumElts = VT.getSizeInBits() / SmallestEltTy.getSizeInBits(); |
8259 | EVT ShuffleVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: SmallestEltTy, NumElements: NumElts); |
8260 | |
8261 | // If the source vector is too wide or too narrow, we may nevertheless be able |
8262 | // to construct a compatible shuffle either by concatenating it with UNDEF or |
8263 | // extracting a suitable range of elements. |
8264 | for (auto &Src : Sources) { |
8265 | EVT SrcVT = Src.ShuffleVec.getValueType(); |
8266 | |
8267 | uint64_t SrcVTSize = SrcVT.getFixedSizeInBits(); |
8268 | uint64_t VTSize = VT.getFixedSizeInBits(); |
8269 | if (SrcVTSize == VTSize) |
8270 | continue; |
8271 | |
8272 | // This stage of the search produces a source with the same element type as |
8273 | // the original, but with a total width matching the BUILD_VECTOR output. |
8274 | EVT EltVT = SrcVT.getVectorElementType(); |
8275 | unsigned NumSrcElts = VTSize / EltVT.getFixedSizeInBits(); |
8276 | EVT DestVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: EltVT, NumElements: NumSrcElts); |
8277 | |
8278 | if (SrcVTSize < VTSize) { |
8279 | if (2 * SrcVTSize != VTSize) |
8280 | return SDValue(); |
8281 | // We can pad out the smaller vector for free, so if it's part of a |
8282 | // shuffle... |
8283 | Src.ShuffleVec = |
8284 | DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL: dl, VT: DestVT, N1: Src.ShuffleVec, |
8285 | N2: DAG.getUNDEF(VT: Src.ShuffleVec.getValueType())); |
8286 | continue; |
8287 | } |
8288 | |
8289 | if (SrcVTSize != 2 * VTSize) |
8290 | return SDValue(); |
8291 | |
8292 | if (Src.MaxElt - Src.MinElt >= NumSrcElts) { |
8293 | // Span too large for a VEXT to cope |
8294 | return SDValue(); |
8295 | } |
8296 | |
8297 | if (Src.MinElt >= NumSrcElts) { |
8298 | // The extraction can just take the second half |
8299 | Src.ShuffleVec = |
8300 | DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: DestVT, N1: Src.ShuffleVec, |
8301 | N2: DAG.getConstant(Val: NumSrcElts, DL: dl, VT: MVT::i32)); |
8302 | Src.WindowBase = -NumSrcElts; |
8303 | } else if (Src.MaxElt < NumSrcElts) { |
8304 | // The extraction can just take the first half |
8305 | Src.ShuffleVec = |
8306 | DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: DestVT, N1: Src.ShuffleVec, |
8307 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
8308 | } else { |
8309 | // An actual VEXT is needed |
8310 | SDValue VEXTSrc1 = |
8311 | DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: DestVT, N1: Src.ShuffleVec, |
8312 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
8313 | SDValue VEXTSrc2 = |
8314 | DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: DestVT, N1: Src.ShuffleVec, |
8315 | N2: DAG.getConstant(Val: NumSrcElts, DL: dl, VT: MVT::i32)); |
8316 | |
8317 | Src.ShuffleVec = DAG.getNode(Opcode: ARMISD::VEXT, DL: dl, VT: DestVT, N1: VEXTSrc1, |
8318 | N2: VEXTSrc2, |
8319 | N3: DAG.getConstant(Val: Src.MinElt, DL: dl, VT: MVT::i32)); |
8320 | Src.WindowBase = -Src.MinElt; |
8321 | } |
8322 | } |
8323 | |
8324 | // Another possible incompatibility occurs from the vector element types. We |
8325 | // can fix this by bitcasting the source vectors to the same type we intend |
8326 | // for the shuffle. |
8327 | for (auto &Src : Sources) { |
8328 | EVT SrcEltTy = Src.ShuffleVec.getValueType().getVectorElementType(); |
8329 | if (SrcEltTy == SmallestEltTy) |
8330 | continue; |
8331 | assert(ShuffleVT.getVectorElementType() == SmallestEltTy); |
8332 | Src.ShuffleVec = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: ShuffleVT, Operand: Src.ShuffleVec); |
8333 | Src.WindowScale = SrcEltTy.getSizeInBits() / SmallestEltTy.getSizeInBits(); |
8334 | Src.WindowBase *= Src.WindowScale; |
8335 | } |
8336 | |
8337 | // Final check before we try to actually produce a shuffle. |
8338 | LLVM_DEBUG(for (auto Src |
8339 | : Sources) |
8340 | assert(Src.ShuffleVec.getValueType() == ShuffleVT);); |
8341 | |
8342 | // The stars all align, our next step is to produce the mask for the shuffle. |
8343 | SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1); |
8344 | int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits(); |
8345 | for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) { |
8346 | SDValue Entry = Op.getOperand(i); |
8347 | if (Entry.isUndef()) |
8348 | continue; |
8349 | |
8350 | auto Src = llvm::find(Range&: Sources, Val: Entry.getOperand(i: 0)); |
8351 | int EltNo = cast<ConstantSDNode>(Val: Entry.getOperand(i: 1))->getSExtValue(); |
8352 | |
8353 | // EXTRACT_VECTOR_ELT performs an implicit any_ext; BUILD_VECTOR an implicit |
8354 | // trunc. So only std::min(SrcBits, DestBits) actually get defined in this |
8355 | // segment. |
8356 | EVT OrigEltTy = Entry.getOperand(i: 0).getValueType().getVectorElementType(); |
8357 | int BitsDefined = std::min(a: OrigEltTy.getScalarSizeInBits(), |
8358 | b: VT.getScalarSizeInBits()); |
8359 | int LanesDefined = BitsDefined / BitsPerShuffleLane; |
8360 | |
8361 | // This source is expected to fill ResMultiplier lanes of the final shuffle, |
8362 | // starting at the appropriate offset. |
8363 | int *LaneMask = &Mask[i * ResMultiplier]; |
8364 | |
8365 | int = EltNo * Src->WindowScale + Src->WindowBase; |
8366 | ExtractBase += NumElts * (Src - Sources.begin()); |
8367 | for (int j = 0; j < LanesDefined; ++j) |
8368 | LaneMask[j] = ExtractBase + j; |
8369 | } |
8370 | |
8371 | |
8372 | // We can't handle more than two sources. This should have already |
8373 | // been checked before this point. |
8374 | assert(Sources.size() <= 2 && "Too many sources!" ); |
8375 | |
8376 | SDValue ShuffleOps[] = { DAG.getUNDEF(VT: ShuffleVT), DAG.getUNDEF(VT: ShuffleVT) }; |
8377 | for (unsigned i = 0; i < Sources.size(); ++i) |
8378 | ShuffleOps[i] = Sources[i].ShuffleVec; |
8379 | |
8380 | SDValue Shuffle = buildLegalVectorShuffle(VT: ShuffleVT, DL: dl, N0: ShuffleOps[0], |
8381 | N1: ShuffleOps[1], Mask, DAG); |
8382 | if (!Shuffle) |
8383 | return SDValue(); |
8384 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT, Operand: Shuffle); |
8385 | } |
8386 | |
8387 | enum ShuffleOpCodes { |
8388 | OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> |
8389 | OP_VREV, |
8390 | OP_VDUP0, |
8391 | OP_VDUP1, |
8392 | OP_VDUP2, |
8393 | OP_VDUP3, |
8394 | OP_VEXT1, |
8395 | OP_VEXT2, |
8396 | OP_VEXT3, |
8397 | OP_VUZPL, // VUZP, left result |
8398 | OP_VUZPR, // VUZP, right result |
8399 | OP_VZIPL, // VZIP, left result |
8400 | OP_VZIPR, // VZIP, right result |
8401 | OP_VTRNL, // VTRN, left result |
8402 | OP_VTRNR // VTRN, right result |
8403 | }; |
8404 | |
8405 | static bool isLegalMVEShuffleOp(unsigned PFEntry) { |
8406 | unsigned OpNum = (PFEntry >> 26) & 0x0F; |
8407 | switch (OpNum) { |
8408 | case OP_COPY: |
8409 | case OP_VREV: |
8410 | case OP_VDUP0: |
8411 | case OP_VDUP1: |
8412 | case OP_VDUP2: |
8413 | case OP_VDUP3: |
8414 | return true; |
8415 | } |
8416 | return false; |
8417 | } |
8418 | |
8419 | /// isShuffleMaskLegal - Targets can use this to indicate that they only |
8420 | /// support *some* VECTOR_SHUFFLE operations, those with specific masks. |
8421 | /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values |
8422 | /// are assumed to be legal. |
8423 | bool ARMTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const { |
8424 | if (VT.getVectorNumElements() == 4 && |
8425 | (VT.is128BitVector() || VT.is64BitVector())) { |
8426 | unsigned PFIndexes[4]; |
8427 | for (unsigned i = 0; i != 4; ++i) { |
8428 | if (M[i] < 0) |
8429 | PFIndexes[i] = 8; |
8430 | else |
8431 | PFIndexes[i] = M[i]; |
8432 | } |
8433 | |
8434 | // Compute the index in the perfect shuffle table. |
8435 | unsigned PFTableIndex = |
8436 | PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; |
8437 | unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; |
8438 | unsigned Cost = (PFEntry >> 30); |
8439 | |
8440 | if (Cost <= 4 && (Subtarget->hasNEON() || isLegalMVEShuffleOp(PFEntry))) |
8441 | return true; |
8442 | } |
8443 | |
8444 | bool ReverseVEXT, isV_UNDEF; |
8445 | unsigned Imm, WhichResult; |
8446 | |
8447 | unsigned EltSize = VT.getScalarSizeInBits(); |
8448 | if (EltSize >= 32 || |
8449 | ShuffleVectorSDNode::isSplatMask(Mask: &M[0], VT) || |
8450 | ShuffleVectorInst::isIdentityMask(Mask: M, NumSrcElts: M.size()) || |
8451 | isVREVMask(M, VT, BlockSize: 64) || |
8452 | isVREVMask(M, VT, BlockSize: 32) || |
8453 | isVREVMask(M, VT, BlockSize: 16)) |
8454 | return true; |
8455 | else if (Subtarget->hasNEON() && |
8456 | (isVEXTMask(M, VT, ReverseVEXT, Imm) || |
8457 | isVTBLMask(M, VT) || |
8458 | isNEONTwoResultShuffleMask(ShuffleMask: M, VT, WhichResult, isV_UNDEF))) |
8459 | return true; |
8460 | else if ((VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i8) && |
8461 | isReverseMask(M, VT)) |
8462 | return true; |
8463 | else if (Subtarget->hasMVEIntegerOps() && |
8464 | (isVMOVNMask(M, VT, Top: true, SingleSource: false) || |
8465 | isVMOVNMask(M, VT, Top: false, SingleSource: false) || isVMOVNMask(M, VT, Top: true, SingleSource: true))) |
8466 | return true; |
8467 | else if (Subtarget->hasMVEIntegerOps() && |
8468 | (isTruncMask(M, VT, Top: false, SingleSource: false) || |
8469 | isTruncMask(M, VT, Top: false, SingleSource: true) || |
8470 | isTruncMask(M, VT, Top: true, SingleSource: false) || isTruncMask(M, VT, Top: true, SingleSource: true))) |
8471 | return true; |
8472 | else |
8473 | return false; |
8474 | } |
8475 | |
8476 | /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit |
8477 | /// the specified operations to build the shuffle. |
8478 | static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, |
8479 | SDValue RHS, SelectionDAG &DAG, |
8480 | const SDLoc &dl) { |
8481 | unsigned OpNum = (PFEntry >> 26) & 0x0F; |
8482 | unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); |
8483 | unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); |
8484 | |
8485 | if (OpNum == OP_COPY) { |
8486 | if (LHSID == (1*9+2)*9+3) return LHS; |
8487 | assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!" ); |
8488 | return RHS; |
8489 | } |
8490 | |
8491 | SDValue OpLHS, OpRHS; |
8492 | OpLHS = GeneratePerfectShuffle(PFEntry: PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); |
8493 | OpRHS = GeneratePerfectShuffle(PFEntry: PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); |
8494 | EVT VT = OpLHS.getValueType(); |
8495 | |
8496 | switch (OpNum) { |
8497 | default: llvm_unreachable("Unknown shuffle opcode!" ); |
8498 | case OP_VREV: |
8499 | // VREV divides the vector in half and swaps within the half. |
8500 | if (VT.getScalarSizeInBits() == 32) |
8501 | return DAG.getNode(Opcode: ARMISD::VREV64, DL: dl, VT, Operand: OpLHS); |
8502 | // vrev <4 x i16> -> VREV32 |
8503 | if (VT.getScalarSizeInBits() == 16) |
8504 | return DAG.getNode(Opcode: ARMISD::VREV32, DL: dl, VT, Operand: OpLHS); |
8505 | // vrev <4 x i8> -> VREV16 |
8506 | assert(VT.getScalarSizeInBits() == 8); |
8507 | return DAG.getNode(Opcode: ARMISD::VREV16, DL: dl, VT, Operand: OpLHS); |
8508 | case OP_VDUP0: |
8509 | case OP_VDUP1: |
8510 | case OP_VDUP2: |
8511 | case OP_VDUP3: |
8512 | return DAG.getNode(Opcode: ARMISD::VDUPLANE, DL: dl, VT, |
8513 | N1: OpLHS, N2: DAG.getConstant(Val: OpNum-OP_VDUP0, DL: dl, VT: MVT::i32)); |
8514 | case OP_VEXT1: |
8515 | case OP_VEXT2: |
8516 | case OP_VEXT3: |
8517 | return DAG.getNode(Opcode: ARMISD::VEXT, DL: dl, VT, |
8518 | N1: OpLHS, N2: OpRHS, |
8519 | N3: DAG.getConstant(Val: OpNum - OP_VEXT1 + 1, DL: dl, VT: MVT::i32)); |
8520 | case OP_VUZPL: |
8521 | case OP_VUZPR: |
8522 | return DAG.getNode(Opcode: ARMISD::VUZP, DL: dl, VTList: DAG.getVTList(VT1: VT, VT2: VT), |
8523 | N1: OpLHS, N2: OpRHS).getValue(R: OpNum-OP_VUZPL); |
8524 | case OP_VZIPL: |
8525 | case OP_VZIPR: |
8526 | return DAG.getNode(Opcode: ARMISD::VZIP, DL: dl, VTList: DAG.getVTList(VT1: VT, VT2: VT), |
8527 | N1: OpLHS, N2: OpRHS).getValue(R: OpNum-OP_VZIPL); |
8528 | case OP_VTRNL: |
8529 | case OP_VTRNR: |
8530 | return DAG.getNode(Opcode: ARMISD::VTRN, DL: dl, VTList: DAG.getVTList(VT1: VT, VT2: VT), |
8531 | N1: OpLHS, N2: OpRHS).getValue(R: OpNum-OP_VTRNL); |
8532 | } |
8533 | } |
8534 | |
8535 | static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op, |
8536 | ArrayRef<int> ShuffleMask, |
8537 | SelectionDAG &DAG) { |
8538 | // Check to see if we can use the VTBL instruction. |
8539 | SDValue V1 = Op.getOperand(i: 0); |
8540 | SDValue V2 = Op.getOperand(i: 1); |
8541 | SDLoc DL(Op); |
8542 | |
8543 | SmallVector<SDValue, 8> VTBLMask; |
8544 | for (int I : ShuffleMask) |
8545 | VTBLMask.push_back(Elt: DAG.getConstant(Val: I, DL, VT: MVT::i32)); |
8546 | |
8547 | if (V2.getNode()->isUndef()) |
8548 | return DAG.getNode(Opcode: ARMISD::VTBL1, DL, VT: MVT::v8i8, N1: V1, |
8549 | N2: DAG.getBuildVector(VT: MVT::v8i8, DL, Ops: VTBLMask)); |
8550 | |
8551 | return DAG.getNode(Opcode: ARMISD::VTBL2, DL, VT: MVT::v8i8, N1: V1, N2: V2, |
8552 | N3: DAG.getBuildVector(VT: MVT::v8i8, DL, Ops: VTBLMask)); |
8553 | } |
8554 | |
8555 | static SDValue LowerReverse_VECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { |
8556 | SDLoc DL(Op); |
8557 | EVT VT = Op.getValueType(); |
8558 | |
8559 | assert((VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i8) && |
8560 | "Expect an v8i16/v16i8 type" ); |
8561 | SDValue OpLHS = DAG.getNode(Opcode: ARMISD::VREV64, DL, VT, Operand: Op.getOperand(i: 0)); |
8562 | // For a v16i8 type: After the VREV, we have got <7, ..., 0, 15, ..., 8>. Now, |
8563 | // extract the first 8 bytes into the top double word and the last 8 bytes |
8564 | // into the bottom double word, through a new vector shuffle that will be |
8565 | // turned into a VEXT on Neon, or a couple of VMOVDs on MVE. |
8566 | std::vector<int> NewMask; |
8567 | for (unsigned i = 0; i < VT.getVectorNumElements() / 2; i++) |
8568 | NewMask.push_back(x: VT.getVectorNumElements() / 2 + i); |
8569 | for (unsigned i = 0; i < VT.getVectorNumElements() / 2; i++) |
8570 | NewMask.push_back(x: i); |
8571 | return DAG.getVectorShuffle(VT, dl: DL, N1: OpLHS, N2: OpLHS, Mask: NewMask); |
8572 | } |
8573 | |
8574 | static EVT getVectorTyFromPredicateVector(EVT VT) { |
8575 | switch (VT.getSimpleVT().SimpleTy) { |
8576 | case MVT::v2i1: |
8577 | return MVT::v2f64; |
8578 | case MVT::v4i1: |
8579 | return MVT::v4i32; |
8580 | case MVT::v8i1: |
8581 | return MVT::v8i16; |
8582 | case MVT::v16i1: |
8583 | return MVT::v16i8; |
8584 | default: |
8585 | llvm_unreachable("Unexpected vector predicate type" ); |
8586 | } |
8587 | } |
8588 | |
8589 | static SDValue PromoteMVEPredVector(SDLoc dl, SDValue Pred, EVT VT, |
8590 | SelectionDAG &DAG) { |
8591 | // Converting from boolean predicates to integers involves creating a vector |
8592 | // of all ones or all zeroes and selecting the lanes based upon the real |
8593 | // predicate. |
8594 | SDValue AllOnes = |
8595 | DAG.getTargetConstant(Val: ARM_AM::createVMOVModImm(OpCmode: 0xe, Val: 0xff), DL: dl, VT: MVT::i32); |
8596 | AllOnes = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT: MVT::v16i8, Operand: AllOnes); |
8597 | |
8598 | SDValue AllZeroes = |
8599 | DAG.getTargetConstant(Val: ARM_AM::createVMOVModImm(OpCmode: 0xe, Val: 0x0), DL: dl, VT: MVT::i32); |
8600 | AllZeroes = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT: MVT::v16i8, Operand: AllZeroes); |
8601 | |
8602 | // Get full vector type from predicate type |
8603 | EVT NewVT = getVectorTyFromPredicateVector(VT); |
8604 | |
8605 | SDValue RecastV1; |
8606 | // If the real predicate is an v8i1 or v4i1 (not v16i1) then we need to recast |
8607 | // this to a v16i1. This cannot be done with an ordinary bitcast because the |
8608 | // sizes are not the same. We have to use a MVE specific PREDICATE_CAST node, |
8609 | // since we know in hardware the sizes are really the same. |
8610 | if (VT != MVT::v16i1) |
8611 | RecastV1 = DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: MVT::v16i1, Operand: Pred); |
8612 | else |
8613 | RecastV1 = Pred; |
8614 | |
8615 | // Select either all ones or zeroes depending upon the real predicate bits. |
8616 | SDValue PredAsVector = |
8617 | DAG.getNode(Opcode: ISD::VSELECT, DL: dl, VT: MVT::v16i8, N1: RecastV1, N2: AllOnes, N3: AllZeroes); |
8618 | |
8619 | // Recast our new predicate-as-integer v16i8 vector into something |
8620 | // appropriate for the shuffle, i.e. v4i32 for a real v4i1 predicate. |
8621 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: NewVT, Operand: PredAsVector); |
8622 | } |
8623 | |
8624 | static SDValue LowerVECTOR_SHUFFLE_i1(SDValue Op, SelectionDAG &DAG, |
8625 | const ARMSubtarget *ST) { |
8626 | EVT VT = Op.getValueType(); |
8627 | ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Val: Op.getNode()); |
8628 | ArrayRef<int> ShuffleMask = SVN->getMask(); |
8629 | |
8630 | assert(ST->hasMVEIntegerOps() && |
8631 | "No support for vector shuffle of boolean predicates" ); |
8632 | |
8633 | SDValue V1 = Op.getOperand(i: 0); |
8634 | SDValue V2 = Op.getOperand(i: 1); |
8635 | SDLoc dl(Op); |
8636 | if (isReverseMask(M: ShuffleMask, VT)) { |
8637 | SDValue cast = DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: MVT::i32, Operand: V1); |
8638 | SDValue rbit = DAG.getNode(Opcode: ISD::BITREVERSE, DL: dl, VT: MVT::i32, Operand: cast); |
8639 | SDValue srl = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT: MVT::i32, N1: rbit, |
8640 | N2: DAG.getConstant(Val: 16, DL: dl, VT: MVT::i32)); |
8641 | return DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT, Operand: srl); |
8642 | } |
8643 | |
8644 | // Until we can come up with optimised cases for every single vector |
8645 | // shuffle in existence we have chosen the least painful strategy. This is |
8646 | // to essentially promote the boolean predicate to a 8-bit integer, where |
8647 | // each predicate represents a byte. Then we fall back on a normal integer |
8648 | // vector shuffle and convert the result back into a predicate vector. In |
8649 | // many cases the generated code might be even better than scalar code |
8650 | // operating on bits. Just imagine trying to shuffle 8 arbitrary 2-bit |
8651 | // fields in a register into 8 other arbitrary 2-bit fields! |
8652 | SDValue PredAsVector1 = PromoteMVEPredVector(dl, Pred: V1, VT, DAG); |
8653 | EVT NewVT = PredAsVector1.getValueType(); |
8654 | SDValue PredAsVector2 = V2.isUndef() ? DAG.getUNDEF(VT: NewVT) |
8655 | : PromoteMVEPredVector(dl, Pred: V2, VT, DAG); |
8656 | assert(PredAsVector2.getValueType() == NewVT && |
8657 | "Expected identical vector type in expanded i1 shuffle!" ); |
8658 | |
8659 | // Do the shuffle! |
8660 | SDValue Shuffled = DAG.getVectorShuffle(VT: NewVT, dl, N1: PredAsVector1, |
8661 | N2: PredAsVector2, Mask: ShuffleMask); |
8662 | |
8663 | // Now return the result of comparing the shuffled vector with zero, |
8664 | // which will generate a real predicate, i.e. v4i1, v8i1 or v16i1. For a v2i1 |
8665 | // we convert to a v4i1 compare to fill in the two halves of the i64 as i32s. |
8666 | if (VT == MVT::v2i1) { |
8667 | SDValue BC = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: MVT::v4i32, Operand: Shuffled); |
8668 | SDValue Cmp = DAG.getNode(Opcode: ARMISD::VCMPZ, DL: dl, VT: MVT::v4i1, N1: BC, |
8669 | N2: DAG.getConstant(Val: ARMCC::NE, DL: dl, VT: MVT::i32)); |
8670 | return DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: MVT::v2i1, Operand: Cmp); |
8671 | } |
8672 | return DAG.getNode(Opcode: ARMISD::VCMPZ, DL: dl, VT, N1: Shuffled, |
8673 | N2: DAG.getConstant(Val: ARMCC::NE, DL: dl, VT: MVT::i32)); |
8674 | } |
8675 | |
8676 | static SDValue LowerVECTOR_SHUFFLEUsingMovs(SDValue Op, |
8677 | ArrayRef<int> ShuffleMask, |
8678 | SelectionDAG &DAG) { |
8679 | // Attempt to lower the vector shuffle using as many whole register movs as |
8680 | // possible. This is useful for types smaller than 32bits, which would |
8681 | // often otherwise become a series for grp movs. |
8682 | SDLoc dl(Op); |
8683 | EVT VT = Op.getValueType(); |
8684 | if (VT.getScalarSizeInBits() >= 32) |
8685 | return SDValue(); |
8686 | |
8687 | assert((VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i8) && |
8688 | "Unexpected vector type" ); |
8689 | int NumElts = VT.getVectorNumElements(); |
8690 | int QuarterSize = NumElts / 4; |
8691 | // The four final parts of the vector, as i32's |
8692 | SDValue Parts[4]; |
8693 | |
8694 | // Look for full lane vmovs like <0,1,2,3> or <u,5,6,7> etc, (but not |
8695 | // <u,u,u,u>), returning the vmov lane index |
8696 | auto getMovIdx = [](ArrayRef<int> ShuffleMask, int Start, int Length) { |
8697 | // Detect which mov lane this would be from the first non-undef element. |
8698 | int MovIdx = -1; |
8699 | for (int i = 0; i < Length; i++) { |
8700 | if (ShuffleMask[Start + i] >= 0) { |
8701 | if (ShuffleMask[Start + i] % Length != i) |
8702 | return -1; |
8703 | MovIdx = ShuffleMask[Start + i] / Length; |
8704 | break; |
8705 | } |
8706 | } |
8707 | // If all items are undef, leave this for other combines |
8708 | if (MovIdx == -1) |
8709 | return -1; |
8710 | // Check the remaining values are the correct part of the same mov |
8711 | for (int i = 1; i < Length; i++) { |
8712 | if (ShuffleMask[Start + i] >= 0 && |
8713 | (ShuffleMask[Start + i] / Length != MovIdx || |
8714 | ShuffleMask[Start + i] % Length != i)) |
8715 | return -1; |
8716 | } |
8717 | return MovIdx; |
8718 | }; |
8719 | |
8720 | for (int Part = 0; Part < 4; ++Part) { |
8721 | // Does this part look like a mov |
8722 | int Elt = getMovIdx(ShuffleMask, Part * QuarterSize, QuarterSize); |
8723 | if (Elt != -1) { |
8724 | SDValue Input = Op->getOperand(Num: 0); |
8725 | if (Elt >= 4) { |
8726 | Input = Op->getOperand(Num: 1); |
8727 | Elt -= 4; |
8728 | } |
8729 | SDValue BitCast = DAG.getBitcast(VT: MVT::v4f32, V: Input); |
8730 | Parts[Part] = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::f32, N1: BitCast, |
8731 | N2: DAG.getConstant(Val: Elt, DL: dl, VT: MVT::i32)); |
8732 | } |
8733 | } |
8734 | |
8735 | // Nothing interesting found, just return |
8736 | if (!Parts[0] && !Parts[1] && !Parts[2] && !Parts[3]) |
8737 | return SDValue(); |
8738 | |
8739 | // The other parts need to be built with the old shuffle vector, cast to a |
8740 | // v4i32 and extract_vector_elts |
8741 | if (!Parts[0] || !Parts[1] || !Parts[2] || !Parts[3]) { |
8742 | SmallVector<int, 16> NewShuffleMask; |
8743 | for (int Part = 0; Part < 4; ++Part) |
8744 | for (int i = 0; i < QuarterSize; i++) |
8745 | NewShuffleMask.push_back( |
8746 | Elt: Parts[Part] ? -1 : ShuffleMask[Part * QuarterSize + i]); |
8747 | SDValue NewShuffle = DAG.getVectorShuffle( |
8748 | VT, dl, N1: Op->getOperand(Num: 0), N2: Op->getOperand(Num: 1), Mask: NewShuffleMask); |
8749 | SDValue BitCast = DAG.getBitcast(VT: MVT::v4f32, V: NewShuffle); |
8750 | |
8751 | for (int Part = 0; Part < 4; ++Part) |
8752 | if (!Parts[Part]) |
8753 | Parts[Part] = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::f32, |
8754 | N1: BitCast, N2: DAG.getConstant(Val: Part, DL: dl, VT: MVT::i32)); |
8755 | } |
8756 | // Build a vector out of the various parts and bitcast it back to the original |
8757 | // type. |
8758 | SDValue NewVec = DAG.getNode(Opcode: ARMISD::BUILD_VECTOR, DL: dl, VT: MVT::v4f32, Ops: Parts); |
8759 | return DAG.getBitcast(VT, V: NewVec); |
8760 | } |
8761 | |
8762 | static SDValue LowerVECTOR_SHUFFLEUsingOneOff(SDValue Op, |
8763 | ArrayRef<int> ShuffleMask, |
8764 | SelectionDAG &DAG) { |
8765 | SDValue V1 = Op.getOperand(i: 0); |
8766 | SDValue V2 = Op.getOperand(i: 1); |
8767 | EVT VT = Op.getValueType(); |
8768 | unsigned NumElts = VT.getVectorNumElements(); |
8769 | |
8770 | // An One-Off Identity mask is one that is mostly an identity mask from as |
8771 | // single source but contains a single element out-of-place, either from a |
8772 | // different vector or from another position in the same vector. As opposed to |
8773 | // lowering this via a ARMISD::BUILD_VECTOR we can generate an extract/insert |
8774 | // pair directly. |
8775 | auto isOneOffIdentityMask = [](ArrayRef<int> Mask, EVT VT, int BaseOffset, |
8776 | int &OffElement) { |
8777 | OffElement = -1; |
8778 | int NonUndef = 0; |
8779 | for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) { |
8780 | if (Mask[i] == -1) |
8781 | continue; |
8782 | NonUndef++; |
8783 | if (Mask[i] != i + BaseOffset) { |
8784 | if (OffElement == -1) |
8785 | OffElement = i; |
8786 | else |
8787 | return false; |
8788 | } |
8789 | } |
8790 | return NonUndef > 2 && OffElement != -1; |
8791 | }; |
8792 | int OffElement; |
8793 | SDValue VInput; |
8794 | if (isOneOffIdentityMask(ShuffleMask, VT, 0, OffElement)) |
8795 | VInput = V1; |
8796 | else if (isOneOffIdentityMask(ShuffleMask, VT, NumElts, OffElement)) |
8797 | VInput = V2; |
8798 | else |
8799 | return SDValue(); |
8800 | |
8801 | SDLoc dl(Op); |
8802 | EVT SVT = VT.getScalarType() == MVT::i8 || VT.getScalarType() == MVT::i16 |
8803 | ? MVT::i32 |
8804 | : VT.getScalarType(); |
8805 | SDValue Elt = DAG.getNode( |
8806 | Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: SVT, |
8807 | N1: ShuffleMask[OffElement] < (int)NumElts ? V1 : V2, |
8808 | N2: DAG.getVectorIdxConstant(Val: ShuffleMask[OffElement] % NumElts, DL: dl)); |
8809 | return DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT, N1: VInput, N2: Elt, |
8810 | N3: DAG.getVectorIdxConstant(Val: OffElement % NumElts, DL: dl)); |
8811 | } |
8812 | |
8813 | static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG, |
8814 | const ARMSubtarget *ST) { |
8815 | SDValue V1 = Op.getOperand(i: 0); |
8816 | SDValue V2 = Op.getOperand(i: 1); |
8817 | SDLoc dl(Op); |
8818 | EVT VT = Op.getValueType(); |
8819 | ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Val: Op.getNode()); |
8820 | unsigned EltSize = VT.getScalarSizeInBits(); |
8821 | |
8822 | if (ST->hasMVEIntegerOps() && EltSize == 1) |
8823 | return LowerVECTOR_SHUFFLE_i1(Op, DAG, ST); |
8824 | |
8825 | // Convert shuffles that are directly supported on NEON to target-specific |
8826 | // DAG nodes, instead of keeping them as shuffles and matching them again |
8827 | // during code selection. This is more efficient and avoids the possibility |
8828 | // of inconsistencies between legalization and selection. |
8829 | // FIXME: floating-point vectors should be canonicalized to integer vectors |
8830 | // of the same time so that they get CSEd properly. |
8831 | ArrayRef<int> ShuffleMask = SVN->getMask(); |
8832 | |
8833 | if (EltSize <= 32) { |
8834 | if (SVN->isSplat()) { |
8835 | int Lane = SVN->getSplatIndex(); |
8836 | // If this is undef splat, generate it via "just" vdup, if possible. |
8837 | if (Lane == -1) Lane = 0; |
8838 | |
8839 | // Test if V1 is a SCALAR_TO_VECTOR. |
8840 | if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { |
8841 | return DAG.getNode(Opcode: ARMISD::VDUP, DL: dl, VT, Operand: V1.getOperand(i: 0)); |
8842 | } |
8843 | // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR |
8844 | // (and probably will turn into a SCALAR_TO_VECTOR once legalization |
8845 | // reaches it). |
8846 | if (Lane == 0 && V1.getOpcode() == ISD::BUILD_VECTOR && |
8847 | !isa<ConstantSDNode>(Val: V1.getOperand(i: 0))) { |
8848 | bool IsScalarToVector = true; |
8849 | for (unsigned i = 1, e = V1.getNumOperands(); i != e; ++i) |
8850 | if (!V1.getOperand(i).isUndef()) { |
8851 | IsScalarToVector = false; |
8852 | break; |
8853 | } |
8854 | if (IsScalarToVector) |
8855 | return DAG.getNode(Opcode: ARMISD::VDUP, DL: dl, VT, Operand: V1.getOperand(i: 0)); |
8856 | } |
8857 | return DAG.getNode(Opcode: ARMISD::VDUPLANE, DL: dl, VT, N1: V1, |
8858 | N2: DAG.getConstant(Val: Lane, DL: dl, VT: MVT::i32)); |
8859 | } |
8860 | |
8861 | bool ReverseVEXT = false; |
8862 | unsigned Imm = 0; |
8863 | if (ST->hasNEON() && isVEXTMask(M: ShuffleMask, VT, ReverseVEXT, Imm)) { |
8864 | if (ReverseVEXT) |
8865 | std::swap(a&: V1, b&: V2); |
8866 | return DAG.getNode(Opcode: ARMISD::VEXT, DL: dl, VT, N1: V1, N2: V2, |
8867 | N3: DAG.getConstant(Val: Imm, DL: dl, VT: MVT::i32)); |
8868 | } |
8869 | |
8870 | if (isVREVMask(M: ShuffleMask, VT, BlockSize: 64)) |
8871 | return DAG.getNode(Opcode: ARMISD::VREV64, DL: dl, VT, Operand: V1); |
8872 | if (isVREVMask(M: ShuffleMask, VT, BlockSize: 32)) |
8873 | return DAG.getNode(Opcode: ARMISD::VREV32, DL: dl, VT, Operand: V1); |
8874 | if (isVREVMask(M: ShuffleMask, VT, BlockSize: 16)) |
8875 | return DAG.getNode(Opcode: ARMISD::VREV16, DL: dl, VT, Operand: V1); |
8876 | |
8877 | if (ST->hasNEON() && V2->isUndef() && isSingletonVEXTMask(M: ShuffleMask, VT, Imm)) { |
8878 | return DAG.getNode(Opcode: ARMISD::VEXT, DL: dl, VT, N1: V1, N2: V1, |
8879 | N3: DAG.getConstant(Val: Imm, DL: dl, VT: MVT::i32)); |
8880 | } |
8881 | |
8882 | // Check for Neon shuffles that modify both input vectors in place. |
8883 | // If both results are used, i.e., if there are two shuffles with the same |
8884 | // source operands and with masks corresponding to both results of one of |
8885 | // these operations, DAG memoization will ensure that a single node is |
8886 | // used for both shuffles. |
8887 | unsigned WhichResult = 0; |
8888 | bool isV_UNDEF = false; |
8889 | if (ST->hasNEON()) { |
8890 | if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask( |
8891 | ShuffleMask, VT, WhichResult, isV_UNDEF)) { |
8892 | if (isV_UNDEF) |
8893 | V2 = V1; |
8894 | return DAG.getNode(Opcode: ShuffleOpc, DL: dl, VTList: DAG.getVTList(VT1: VT, VT2: VT), N1: V1, N2: V2) |
8895 | .getValue(R: WhichResult); |
8896 | } |
8897 | } |
8898 | if (ST->hasMVEIntegerOps()) { |
8899 | if (isVMOVNMask(M: ShuffleMask, VT, Top: false, SingleSource: false)) |
8900 | return DAG.getNode(Opcode: ARMISD::VMOVN, DL: dl, VT, N1: V2, N2: V1, |
8901 | N3: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
8902 | if (isVMOVNMask(M: ShuffleMask, VT, Top: true, SingleSource: false)) |
8903 | return DAG.getNode(Opcode: ARMISD::VMOVN, DL: dl, VT, N1: V1, N2: V2, |
8904 | N3: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32)); |
8905 | if (isVMOVNMask(M: ShuffleMask, VT, Top: true, SingleSource: true)) |
8906 | return DAG.getNode(Opcode: ARMISD::VMOVN, DL: dl, VT, N1: V1, N2: V1, |
8907 | N3: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32)); |
8908 | } |
8909 | |
8910 | // Also check for these shuffles through CONCAT_VECTORS: we canonicalize |
8911 | // shuffles that produce a result larger than their operands with: |
8912 | // shuffle(concat(v1, undef), concat(v2, undef)) |
8913 | // -> |
8914 | // shuffle(concat(v1, v2), undef) |
8915 | // because we can access quad vectors (see PerformVECTOR_SHUFFLECombine). |
8916 | // |
8917 | // This is useful in the general case, but there are special cases where |
8918 | // native shuffles produce larger results: the two-result ops. |
8919 | // |
8920 | // Look through the concat when lowering them: |
8921 | // shuffle(concat(v1, v2), undef) |
8922 | // -> |
8923 | // concat(VZIP(v1, v2):0, :1) |
8924 | // |
8925 | if (ST->hasNEON() && V1->getOpcode() == ISD::CONCAT_VECTORS && V2->isUndef()) { |
8926 | SDValue SubV1 = V1->getOperand(Num: 0); |
8927 | SDValue SubV2 = V1->getOperand(Num: 1); |
8928 | EVT SubVT = SubV1.getValueType(); |
8929 | |
8930 | // We expect these to have been canonicalized to -1. |
8931 | assert(llvm::all_of(ShuffleMask, [&](int i) { |
8932 | return i < (int)VT.getVectorNumElements(); |
8933 | }) && "Unexpected shuffle index into UNDEF operand!" ); |
8934 | |
8935 | if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask( |
8936 | ShuffleMask, VT: SubVT, WhichResult, isV_UNDEF)) { |
8937 | if (isV_UNDEF) |
8938 | SubV2 = SubV1; |
8939 | assert((WhichResult == 0) && |
8940 | "In-place shuffle of concat can only have one result!" ); |
8941 | SDValue Res = DAG.getNode(Opcode: ShuffleOpc, DL: dl, VTList: DAG.getVTList(VT1: SubVT, VT2: SubVT), |
8942 | N1: SubV1, N2: SubV2); |
8943 | return DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL: dl, VT, N1: Res.getValue(R: 0), |
8944 | N2: Res.getValue(R: 1)); |
8945 | } |
8946 | } |
8947 | } |
8948 | |
8949 | if (ST->hasMVEIntegerOps() && EltSize <= 32) { |
8950 | if (SDValue V = LowerVECTOR_SHUFFLEUsingOneOff(Op, ShuffleMask, DAG)) |
8951 | return V; |
8952 | |
8953 | for (bool Top : {false, true}) { |
8954 | for (bool SingleSource : {false, true}) { |
8955 | if (isTruncMask(M: ShuffleMask, VT, Top, SingleSource)) { |
8956 | MVT FromSVT = MVT::getIntegerVT(BitWidth: EltSize * 2); |
8957 | MVT FromVT = MVT::getVectorVT(VT: FromSVT, NumElements: ShuffleMask.size() / 2); |
8958 | SDValue Lo = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: FromVT, Operand: V1); |
8959 | SDValue Hi = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: FromVT, |
8960 | Operand: SingleSource ? V1 : V2); |
8961 | if (Top) { |
8962 | SDValue Amt = DAG.getConstant(Val: EltSize, DL: dl, VT: FromVT); |
8963 | Lo = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT: FromVT, N1: Lo, N2: Amt); |
8964 | Hi = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT: FromVT, N1: Hi, N2: Amt); |
8965 | } |
8966 | return DAG.getNode(Opcode: ARMISD::MVETRUNC, DL: dl, VT, N1: Lo, N2: Hi); |
8967 | } |
8968 | } |
8969 | } |
8970 | } |
8971 | |
8972 | // If the shuffle is not directly supported and it has 4 elements, use |
8973 | // the PerfectShuffle-generated table to synthesize it from other shuffles. |
8974 | unsigned NumElts = VT.getVectorNumElements(); |
8975 | if (NumElts == 4) { |
8976 | unsigned PFIndexes[4]; |
8977 | for (unsigned i = 0; i != 4; ++i) { |
8978 | if (ShuffleMask[i] < 0) |
8979 | PFIndexes[i] = 8; |
8980 | else |
8981 | PFIndexes[i] = ShuffleMask[i]; |
8982 | } |
8983 | |
8984 | // Compute the index in the perfect shuffle table. |
8985 | unsigned PFTableIndex = |
8986 | PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; |
8987 | unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; |
8988 | unsigned Cost = (PFEntry >> 30); |
8989 | |
8990 | if (Cost <= 4) { |
8991 | if (ST->hasNEON()) |
8992 | return GeneratePerfectShuffle(PFEntry, LHS: V1, RHS: V2, DAG, dl); |
8993 | else if (isLegalMVEShuffleOp(PFEntry)) { |
8994 | unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); |
8995 | unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); |
8996 | unsigned PFEntryLHS = PerfectShuffleTable[LHSID]; |
8997 | unsigned PFEntryRHS = PerfectShuffleTable[RHSID]; |
8998 | if (isLegalMVEShuffleOp(PFEntry: PFEntryLHS) && isLegalMVEShuffleOp(PFEntry: PFEntryRHS)) |
8999 | return GeneratePerfectShuffle(PFEntry, LHS: V1, RHS: V2, DAG, dl); |
9000 | } |
9001 | } |
9002 | } |
9003 | |
9004 | // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs. |
9005 | if (EltSize >= 32) { |
9006 | // Do the expansion with floating-point types, since that is what the VFP |
9007 | // registers are defined to use, and since i64 is not legal. |
9008 | EVT EltVT = EVT::getFloatingPointVT(BitWidth: EltSize); |
9009 | EVT VecVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: EltVT, NumElements: NumElts); |
9010 | V1 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VecVT, Operand: V1); |
9011 | V2 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VecVT, Operand: V2); |
9012 | SmallVector<SDValue, 8> Ops; |
9013 | for (unsigned i = 0; i < NumElts; ++i) { |
9014 | if (ShuffleMask[i] < 0) |
9015 | Ops.push_back(Elt: DAG.getUNDEF(VT: EltVT)); |
9016 | else |
9017 | Ops.push_back(Elt: DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: EltVT, |
9018 | N1: ShuffleMask[i] < (int)NumElts ? V1 : V2, |
9019 | N2: DAG.getConstant(Val: ShuffleMask[i] & (NumElts-1), |
9020 | DL: dl, VT: MVT::i32))); |
9021 | } |
9022 | SDValue Val = DAG.getNode(Opcode: ARMISD::BUILD_VECTOR, DL: dl, VT: VecVT, Ops); |
9023 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: Val); |
9024 | } |
9025 | |
9026 | if ((VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i8) && |
9027 | isReverseMask(M: ShuffleMask, VT)) |
9028 | return LowerReverse_VECTOR_SHUFFLE(Op, DAG); |
9029 | |
9030 | if (ST->hasNEON() && VT == MVT::v8i8) |
9031 | if (SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG)) |
9032 | return NewOp; |
9033 | |
9034 | if (ST->hasMVEIntegerOps()) |
9035 | if (SDValue NewOp = LowerVECTOR_SHUFFLEUsingMovs(Op, ShuffleMask, DAG)) |
9036 | return NewOp; |
9037 | |
9038 | return SDValue(); |
9039 | } |
9040 | |
9041 | static SDValue LowerINSERT_VECTOR_ELT_i1(SDValue Op, SelectionDAG &DAG, |
9042 | const ARMSubtarget *ST) { |
9043 | EVT VecVT = Op.getOperand(i: 0).getValueType(); |
9044 | SDLoc dl(Op); |
9045 | |
9046 | assert(ST->hasMVEIntegerOps() && |
9047 | "LowerINSERT_VECTOR_ELT_i1 called without MVE!" ); |
9048 | |
9049 | SDValue Conv = |
9050 | DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: MVT::i32, Operand: Op->getOperand(Num: 0)); |
9051 | unsigned Lane = Op.getConstantOperandVal(i: 2); |
9052 | unsigned LaneWidth = |
9053 | getVectorTyFromPredicateVector(VT: VecVT).getScalarSizeInBits() / 8; |
9054 | unsigned Mask = ((1 << LaneWidth) - 1) << Lane * LaneWidth; |
9055 | SDValue Ext = DAG.getNode(Opcode: ISD::SIGN_EXTEND_INREG, DL: dl, VT: MVT::i32, |
9056 | N1: Op.getOperand(i: 1), N2: DAG.getValueType(MVT::i1)); |
9057 | SDValue BFI = DAG.getNode(Opcode: ARMISD::BFI, DL: dl, VT: MVT::i32, N1: Conv, N2: Ext, |
9058 | N3: DAG.getConstant(Val: ~Mask, DL: dl, VT: MVT::i32)); |
9059 | return DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: Op.getValueType(), Operand: BFI); |
9060 | } |
9061 | |
9062 | SDValue ARMTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, |
9063 | SelectionDAG &DAG) const { |
9064 | // INSERT_VECTOR_ELT is legal only for immediate indexes. |
9065 | SDValue Lane = Op.getOperand(i: 2); |
9066 | if (!isa<ConstantSDNode>(Val: Lane)) |
9067 | return SDValue(); |
9068 | |
9069 | SDValue Elt = Op.getOperand(i: 1); |
9070 | EVT EltVT = Elt.getValueType(); |
9071 | |
9072 | if (Subtarget->hasMVEIntegerOps() && |
9073 | Op.getValueType().getScalarSizeInBits() == 1) |
9074 | return LowerINSERT_VECTOR_ELT_i1(Op, DAG, ST: Subtarget); |
9075 | |
9076 | if (getTypeAction(Context&: *DAG.getContext(), VT: EltVT) == |
9077 | TargetLowering::TypeSoftPromoteHalf) { |
9078 | // INSERT_VECTOR_ELT doesn't want f16 operands promoting to f32, |
9079 | // but the type system will try to do that if we don't intervene. |
9080 | // Reinterpret any such vector-element insertion as one with the |
9081 | // corresponding integer types. |
9082 | |
9083 | SDLoc dl(Op); |
9084 | |
9085 | EVT IEltVT = MVT::getIntegerVT(BitWidth: EltVT.getScalarSizeInBits()); |
9086 | assert(getTypeAction(*DAG.getContext(), IEltVT) != |
9087 | TargetLowering::TypeSoftPromoteHalf); |
9088 | |
9089 | SDValue VecIn = Op.getOperand(i: 0); |
9090 | EVT VecVT = VecIn.getValueType(); |
9091 | EVT IVecVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: IEltVT, |
9092 | NumElements: VecVT.getVectorNumElements()); |
9093 | |
9094 | SDValue IElt = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: IEltVT, Operand: Elt); |
9095 | SDValue IVecIn = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: IVecVT, Operand: VecIn); |
9096 | SDValue IVecOut = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: IVecVT, |
9097 | N1: IVecIn, N2: IElt, N3: Lane); |
9098 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VecVT, Operand: IVecOut); |
9099 | } |
9100 | |
9101 | return Op; |
9102 | } |
9103 | |
9104 | static SDValue (SDValue Op, SelectionDAG &DAG, |
9105 | const ARMSubtarget *ST) { |
9106 | EVT VecVT = Op.getOperand(i: 0).getValueType(); |
9107 | SDLoc dl(Op); |
9108 | |
9109 | assert(ST->hasMVEIntegerOps() && |
9110 | "LowerINSERT_VECTOR_ELT_i1 called without MVE!" ); |
9111 | |
9112 | SDValue Conv = |
9113 | DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: MVT::i32, Operand: Op->getOperand(Num: 0)); |
9114 | unsigned Lane = Op.getConstantOperandVal(i: 1); |
9115 | unsigned LaneWidth = |
9116 | getVectorTyFromPredicateVector(VT: VecVT).getScalarSizeInBits() / 8; |
9117 | SDValue Shift = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT: MVT::i32, N1: Conv, |
9118 | N2: DAG.getConstant(Val: Lane * LaneWidth, DL: dl, VT: MVT::i32)); |
9119 | return Shift; |
9120 | } |
9121 | |
9122 | static SDValue (SDValue Op, SelectionDAG &DAG, |
9123 | const ARMSubtarget *ST) { |
9124 | // EXTRACT_VECTOR_ELT is legal only for immediate indexes. |
9125 | SDValue Lane = Op.getOperand(i: 1); |
9126 | if (!isa<ConstantSDNode>(Val: Lane)) |
9127 | return SDValue(); |
9128 | |
9129 | SDValue Vec = Op.getOperand(i: 0); |
9130 | EVT VT = Vec.getValueType(); |
9131 | |
9132 | if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == 1) |
9133 | return LowerEXTRACT_VECTOR_ELT_i1(Op, DAG, ST); |
9134 | |
9135 | if (Op.getValueType() == MVT::i32 && Vec.getScalarValueSizeInBits() < 32) { |
9136 | SDLoc dl(Op); |
9137 | return DAG.getNode(Opcode: ARMISD::VGETLANEu, DL: dl, VT: MVT::i32, N1: Vec, N2: Lane); |
9138 | } |
9139 | |
9140 | return Op; |
9141 | } |
9142 | |
9143 | static SDValue LowerCONCAT_VECTORS_i1(SDValue Op, SelectionDAG &DAG, |
9144 | const ARMSubtarget *ST) { |
9145 | SDLoc dl(Op); |
9146 | assert(Op.getValueType().getScalarSizeInBits() == 1 && |
9147 | "Unexpected custom CONCAT_VECTORS lowering" ); |
9148 | assert(isPowerOf2_32(Op.getNumOperands()) && |
9149 | "Unexpected custom CONCAT_VECTORS lowering" ); |
9150 | assert(ST->hasMVEIntegerOps() && |
9151 | "CONCAT_VECTORS lowering only supported for MVE" ); |
9152 | |
9153 | auto ConcatPair = [&](SDValue V1, SDValue V2) { |
9154 | EVT Op1VT = V1.getValueType(); |
9155 | EVT Op2VT = V2.getValueType(); |
9156 | assert(Op1VT == Op2VT && "Operand types don't match!" ); |
9157 | assert((Op1VT == MVT::v2i1 || Op1VT == MVT::v4i1 || Op1VT == MVT::v8i1) && |
9158 | "Unexpected i1 concat operations!" ); |
9159 | EVT VT = Op1VT.getDoubleNumVectorElementsVT(Context&: *DAG.getContext()); |
9160 | |
9161 | SDValue NewV1 = PromoteMVEPredVector(dl, Pred: V1, VT: Op1VT, DAG); |
9162 | SDValue NewV2 = PromoteMVEPredVector(dl, Pred: V2, VT: Op2VT, DAG); |
9163 | |
9164 | // We now have Op1 + Op2 promoted to vectors of integers, where v8i1 gets |
9165 | // promoted to v8i16, etc. |
9166 | MVT ElType = |
9167 | getVectorTyFromPredicateVector(VT).getScalarType().getSimpleVT(); |
9168 | unsigned NumElts = 2 * Op1VT.getVectorNumElements(); |
9169 | |
9170 | EVT ConcatVT = MVT::getVectorVT(VT: ElType, NumElements: NumElts); |
9171 | if (Op1VT == MVT::v4i1 || Op1VT == MVT::v8i1) { |
9172 | // Use MVETRUNC to truncate the combined NewV1::NewV2 into the smaller |
9173 | // ConcatVT. |
9174 | SDValue ConVec = |
9175 | DAG.getNode(Opcode: ARMISD::MVETRUNC, DL: dl, VT: ConcatVT, N1: NewV1, N2: NewV2); |
9176 | return DAG.getNode(Opcode: ARMISD::VCMPZ, DL: dl, VT, N1: ConVec, |
9177 | N2: DAG.getConstant(Val: ARMCC::NE, DL: dl, VT: MVT::i32)); |
9178 | } |
9179 | |
9180 | // Extract the vector elements from Op1 and Op2 one by one and truncate them |
9181 | // to be the right size for the destination. For example, if Op1 is v4i1 |
9182 | // then the promoted vector is v4i32. The result of concatenation gives a |
9183 | // v8i1, which when promoted is v8i16. That means each i32 element from Op1 |
9184 | // needs truncating to i16 and inserting in the result. |
9185 | auto = [&DAG, &dl](SDValue NewV, SDValue ConVec, unsigned &j) { |
9186 | EVT NewVT = NewV.getValueType(); |
9187 | EVT ConcatVT = ConVec.getValueType(); |
9188 | unsigned ExtScale = 1; |
9189 | if (NewVT == MVT::v2f64) { |
9190 | NewV = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: MVT::v4i32, Operand: NewV); |
9191 | ExtScale = 2; |
9192 | } |
9193 | for (unsigned i = 0, e = NewVT.getVectorNumElements(); i < e; i++, j++) { |
9194 | SDValue Elt = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::i32, N1: NewV, |
9195 | N2: DAG.getIntPtrConstant(Val: i * ExtScale, DL: dl)); |
9196 | ConVec = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: ConcatVT, N1: ConVec, N2: Elt, |
9197 | N3: DAG.getConstant(Val: j, DL: dl, VT: MVT::i32)); |
9198 | } |
9199 | return ConVec; |
9200 | }; |
9201 | unsigned j = 0; |
9202 | SDValue ConVec = DAG.getNode(Opcode: ISD::UNDEF, DL: dl, VT: ConcatVT); |
9203 | ConVec = ExtractInto(NewV1, ConVec, j); |
9204 | ConVec = ExtractInto(NewV2, ConVec, j); |
9205 | |
9206 | // Now return the result of comparing the subvector with zero, which will |
9207 | // generate a real predicate, i.e. v4i1, v8i1 or v16i1. |
9208 | return DAG.getNode(Opcode: ARMISD::VCMPZ, DL: dl, VT, N1: ConVec, |
9209 | N2: DAG.getConstant(Val: ARMCC::NE, DL: dl, VT: MVT::i32)); |
9210 | }; |
9211 | |
9212 | // Concat each pair of subvectors and pack into the lower half of the array. |
9213 | SmallVector<SDValue> ConcatOps(Op->op_begin(), Op->op_end()); |
9214 | while (ConcatOps.size() > 1) { |
9215 | for (unsigned I = 0, E = ConcatOps.size(); I != E; I += 2) { |
9216 | SDValue V1 = ConcatOps[I]; |
9217 | SDValue V2 = ConcatOps[I + 1]; |
9218 | ConcatOps[I / 2] = ConcatPair(V1, V2); |
9219 | } |
9220 | ConcatOps.resize(N: ConcatOps.size() / 2); |
9221 | } |
9222 | return ConcatOps[0]; |
9223 | } |
9224 | |
9225 | static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG, |
9226 | const ARMSubtarget *ST) { |
9227 | EVT VT = Op->getValueType(ResNo: 0); |
9228 | if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == 1) |
9229 | return LowerCONCAT_VECTORS_i1(Op, DAG, ST); |
9230 | |
9231 | // The only time a CONCAT_VECTORS operation can have legal types is when |
9232 | // two 64-bit vectors are concatenated to a 128-bit vector. |
9233 | assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && |
9234 | "unexpected CONCAT_VECTORS" ); |
9235 | SDLoc dl(Op); |
9236 | SDValue Val = DAG.getUNDEF(VT: MVT::v2f64); |
9237 | SDValue Op0 = Op.getOperand(i: 0); |
9238 | SDValue Op1 = Op.getOperand(i: 1); |
9239 | if (!Op0.isUndef()) |
9240 | Val = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: MVT::v2f64, N1: Val, |
9241 | N2: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::f64, Operand: Op0), |
9242 | N3: DAG.getIntPtrConstant(Val: 0, DL: dl)); |
9243 | if (!Op1.isUndef()) |
9244 | Val = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: MVT::v2f64, N1: Val, |
9245 | N2: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::f64, Operand: Op1), |
9246 | N3: DAG.getIntPtrConstant(Val: 1, DL: dl)); |
9247 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: Op.getValueType(), Operand: Val); |
9248 | } |
9249 | |
9250 | static SDValue (SDValue Op, SelectionDAG &DAG, |
9251 | const ARMSubtarget *ST) { |
9252 | SDValue V1 = Op.getOperand(i: 0); |
9253 | SDValue V2 = Op.getOperand(i: 1); |
9254 | SDLoc dl(Op); |
9255 | EVT VT = Op.getValueType(); |
9256 | EVT Op1VT = V1.getValueType(); |
9257 | unsigned NumElts = VT.getVectorNumElements(); |
9258 | unsigned Index = V2->getAsZExtVal(); |
9259 | |
9260 | assert(VT.getScalarSizeInBits() == 1 && |
9261 | "Unexpected custom EXTRACT_SUBVECTOR lowering" ); |
9262 | assert(ST->hasMVEIntegerOps() && |
9263 | "EXTRACT_SUBVECTOR lowering only supported for MVE" ); |
9264 | |
9265 | SDValue NewV1 = PromoteMVEPredVector(dl, Pred: V1, VT: Op1VT, DAG); |
9266 | |
9267 | // We now have Op1 promoted to a vector of integers, where v8i1 gets |
9268 | // promoted to v8i16, etc. |
9269 | |
9270 | MVT ElType = getVectorTyFromPredicateVector(VT).getScalarType().getSimpleVT(); |
9271 | |
9272 | if (NumElts == 2) { |
9273 | EVT SubVT = MVT::v4i32; |
9274 | SDValue SubVec = DAG.getNode(Opcode: ISD::UNDEF, DL: dl, VT: SubVT); |
9275 | for (unsigned i = Index, j = 0; i < (Index + NumElts); i++, j += 2) { |
9276 | SDValue Elt = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::i32, N1: NewV1, |
9277 | N2: DAG.getIntPtrConstant(Val: i, DL: dl)); |
9278 | SubVec = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: SubVT, N1: SubVec, N2: Elt, |
9279 | N3: DAG.getConstant(Val: j, DL: dl, VT: MVT::i32)); |
9280 | SubVec = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: SubVT, N1: SubVec, N2: Elt, |
9281 | N3: DAG.getConstant(Val: j + 1, DL: dl, VT: MVT::i32)); |
9282 | } |
9283 | SDValue Cmp = DAG.getNode(Opcode: ARMISD::VCMPZ, DL: dl, VT: MVT::v4i1, N1: SubVec, |
9284 | N2: DAG.getConstant(Val: ARMCC::NE, DL: dl, VT: MVT::i32)); |
9285 | return DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: MVT::v2i1, Operand: Cmp); |
9286 | } |
9287 | |
9288 | EVT SubVT = MVT::getVectorVT(VT: ElType, NumElements: NumElts); |
9289 | SDValue SubVec = DAG.getNode(Opcode: ISD::UNDEF, DL: dl, VT: SubVT); |
9290 | for (unsigned i = Index, j = 0; i < (Index + NumElts); i++, j++) { |
9291 | SDValue Elt = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::i32, N1: NewV1, |
9292 | N2: DAG.getIntPtrConstant(Val: i, DL: dl)); |
9293 | SubVec = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: SubVT, N1: SubVec, N2: Elt, |
9294 | N3: DAG.getConstant(Val: j, DL: dl, VT: MVT::i32)); |
9295 | } |
9296 | |
9297 | // Now return the result of comparing the subvector with zero, |
9298 | // which will generate a real predicate, i.e. v4i1, v8i1 or v16i1. |
9299 | return DAG.getNode(Opcode: ARMISD::VCMPZ, DL: dl, VT, N1: SubVec, |
9300 | N2: DAG.getConstant(Val: ARMCC::NE, DL: dl, VT: MVT::i32)); |
9301 | } |
9302 | |
9303 | // Turn a truncate into a predicate (an i1 vector) into icmp(and(x, 1), 0). |
9304 | static SDValue LowerTruncatei1(SDNode *N, SelectionDAG &DAG, |
9305 | const ARMSubtarget *ST) { |
9306 | assert(ST->hasMVEIntegerOps() && "Expected MVE!" ); |
9307 | EVT VT = N->getValueType(ResNo: 0); |
9308 | assert((VT == MVT::v16i1 || VT == MVT::v8i1 || VT == MVT::v4i1) && |
9309 | "Expected a vector i1 type!" ); |
9310 | SDValue Op = N->getOperand(Num: 0); |
9311 | EVT FromVT = Op.getValueType(); |
9312 | SDLoc DL(N); |
9313 | |
9314 | SDValue And = |
9315 | DAG.getNode(Opcode: ISD::AND, DL, VT: FromVT, N1: Op, N2: DAG.getConstant(Val: 1, DL, VT: FromVT)); |
9316 | return DAG.getNode(Opcode: ISD::SETCC, DL, VT, N1: And, N2: DAG.getConstant(Val: 0, DL, VT: FromVT), |
9317 | N3: DAG.getCondCode(Cond: ISD::SETNE)); |
9318 | } |
9319 | |
9320 | static SDValue LowerTruncate(SDNode *N, SelectionDAG &DAG, |
9321 | const ARMSubtarget *Subtarget) { |
9322 | if (!Subtarget->hasMVEIntegerOps()) |
9323 | return SDValue(); |
9324 | |
9325 | EVT ToVT = N->getValueType(ResNo: 0); |
9326 | if (ToVT.getScalarType() == MVT::i1) |
9327 | return LowerTruncatei1(N, DAG, ST: Subtarget); |
9328 | |
9329 | // MVE does not have a single instruction to perform the truncation of a v4i32 |
9330 | // into the lower half of a v8i16, in the same way that a NEON vmovn would. |
9331 | // Most of the instructions in MVE follow the 'Beats' system, where moving |
9332 | // values from different lanes is usually something that the instructions |
9333 | // avoid. |
9334 | // |
9335 | // Instead it has top/bottom instructions such as VMOVLT/B and VMOVNT/B, |
9336 | // which take a the top/bottom half of a larger lane and extend it (or do the |
9337 | // opposite, truncating into the top/bottom lane from a larger lane). Note |
9338 | // that because of the way we widen lanes, a v4i16 is really a v4i32 using the |
9339 | // bottom 16bits from each vector lane. This works really well with T/B |
9340 | // instructions, but that doesn't extend to v8i32->v8i16 where the lanes need |
9341 | // to move order. |
9342 | // |
9343 | // But truncates and sext/zext are always going to be fairly common from llvm. |
9344 | // We have several options for how to deal with them: |
9345 | // - Wherever possible combine them into an instruction that makes them |
9346 | // "free". This includes loads/stores, which can perform the trunc as part |
9347 | // of the memory operation. Or certain shuffles that can be turned into |
9348 | // VMOVN/VMOVL. |
9349 | // - Lane Interleaving to transform blocks surrounded by ext/trunc. So |
9350 | // trunc(mul(sext(a), sext(b))) may become |
9351 | // VMOVNT(VMUL(VMOVLB(a), VMOVLB(b)), VMUL(VMOVLT(a), VMOVLT(b))). (Which in |
9352 | // this case can use VMULL). This is performed in the |
9353 | // MVELaneInterleavingPass. |
9354 | // - Otherwise we have an option. By default we would expand the |
9355 | // zext/sext/trunc into a series of lane extract/inserts going via GPR |
9356 | // registers. One for each vector lane in the vector. This can obviously be |
9357 | // very expensive. |
9358 | // - The other option is to use the fact that loads/store can extend/truncate |
9359 | // to turn a trunc into two truncating stack stores and a stack reload. This |
9360 | // becomes 3 back-to-back memory operations, but at least that is less than |
9361 | // all the insert/extracts. |
9362 | // |
9363 | // In order to do the last, we convert certain trunc's into MVETRUNC, which |
9364 | // are either optimized where they can be, or eventually lowered into stack |
9365 | // stores/loads. This prevents us from splitting a v8i16 trunc into two stores |
9366 | // two early, where other instructions would be better, and stops us from |
9367 | // having to reconstruct multiple buildvector shuffles into loads/stores. |
9368 | if (ToVT != MVT::v8i16 && ToVT != MVT::v16i8) |
9369 | return SDValue(); |
9370 | EVT FromVT = N->getOperand(Num: 0).getValueType(); |
9371 | if (FromVT != MVT::v8i32 && FromVT != MVT::v16i16) |
9372 | return SDValue(); |
9373 | |
9374 | SDValue Lo, Hi; |
9375 | std::tie(args&: Lo, args&: Hi) = DAG.SplitVectorOperand(N, OpNo: 0); |
9376 | SDLoc DL(N); |
9377 | return DAG.getNode(Opcode: ARMISD::MVETRUNC, DL, VT: ToVT, N1: Lo, N2: Hi); |
9378 | } |
9379 | |
9380 | static SDValue LowerVectorExtend(SDNode *N, SelectionDAG &DAG, |
9381 | const ARMSubtarget *Subtarget) { |
9382 | if (!Subtarget->hasMVEIntegerOps()) |
9383 | return SDValue(); |
9384 | |
9385 | // See LowerTruncate above for an explanation of MVEEXT/MVETRUNC. |
9386 | |
9387 | EVT ToVT = N->getValueType(ResNo: 0); |
9388 | if (ToVT != MVT::v16i32 && ToVT != MVT::v8i32 && ToVT != MVT::v16i16) |
9389 | return SDValue(); |
9390 | SDValue Op = N->getOperand(Num: 0); |
9391 | EVT FromVT = Op.getValueType(); |
9392 | if (FromVT != MVT::v8i16 && FromVT != MVT::v16i8) |
9393 | return SDValue(); |
9394 | |
9395 | SDLoc DL(N); |
9396 | EVT ExtVT = ToVT.getHalfNumVectorElementsVT(Context&: *DAG.getContext()); |
9397 | if (ToVT.getScalarType() == MVT::i32 && FromVT.getScalarType() == MVT::i8) |
9398 | ExtVT = MVT::v8i16; |
9399 | |
9400 | unsigned Opcode = |
9401 | N->getOpcode() == ISD::SIGN_EXTEND ? ARMISD::MVESEXT : ARMISD::MVEZEXT; |
9402 | SDValue Ext = DAG.getNode(Opcode, DL, VTList: DAG.getVTList(VT1: ExtVT, VT2: ExtVT), N: Op); |
9403 | SDValue Ext1 = Ext.getValue(R: 1); |
9404 | |
9405 | if (ToVT.getScalarType() == MVT::i32 && FromVT.getScalarType() == MVT::i8) { |
9406 | Ext = DAG.getNode(Opcode: N->getOpcode(), DL, VT: MVT::v8i32, Operand: Ext); |
9407 | Ext1 = DAG.getNode(Opcode: N->getOpcode(), DL, VT: MVT::v8i32, Operand: Ext1); |
9408 | } |
9409 | |
9410 | return DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: ToVT, N1: Ext, N2: Ext1); |
9411 | } |
9412 | |
9413 | /// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each |
9414 | /// element has been zero/sign-extended, depending on the isSigned parameter, |
9415 | /// from an integer type half its size. |
9416 | static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, |
9417 | bool isSigned) { |
9418 | // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32. |
9419 | EVT VT = N->getValueType(ResNo: 0); |
9420 | if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) { |
9421 | SDNode *BVN = N->getOperand(Num: 0).getNode(); |
9422 | if (BVN->getValueType(ResNo: 0) != MVT::v4i32 || |
9423 | BVN->getOpcode() != ISD::BUILD_VECTOR) |
9424 | return false; |
9425 | unsigned LoElt = DAG.getDataLayout().isBigEndian() ? 1 : 0; |
9426 | unsigned HiElt = 1 - LoElt; |
9427 | ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(Val: BVN->getOperand(Num: LoElt)); |
9428 | ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(Val: BVN->getOperand(Num: HiElt)); |
9429 | ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(Val: BVN->getOperand(Num: LoElt+2)); |
9430 | ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(Val: BVN->getOperand(Num: HiElt+2)); |
9431 | if (!Lo0 || !Hi0 || !Lo1 || !Hi1) |
9432 | return false; |
9433 | if (isSigned) { |
9434 | if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 && |
9435 | Hi1->getSExtValue() == Lo1->getSExtValue() >> 32) |
9436 | return true; |
9437 | } else { |
9438 | if (Hi0->isZero() && Hi1->isZero()) |
9439 | return true; |
9440 | } |
9441 | return false; |
9442 | } |
9443 | |
9444 | if (N->getOpcode() != ISD::BUILD_VECTOR) |
9445 | return false; |
9446 | |
9447 | for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { |
9448 | SDNode *Elt = N->getOperand(Num: i).getNode(); |
9449 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val: Elt)) { |
9450 | unsigned EltSize = VT.getScalarSizeInBits(); |
9451 | unsigned HalfSize = EltSize / 2; |
9452 | if (isSigned) { |
9453 | if (!isIntN(N: HalfSize, x: C->getSExtValue())) |
9454 | return false; |
9455 | } else { |
9456 | if (!isUIntN(N: HalfSize, x: C->getZExtValue())) |
9457 | return false; |
9458 | } |
9459 | continue; |
9460 | } |
9461 | return false; |
9462 | } |
9463 | |
9464 | return true; |
9465 | } |
9466 | |
9467 | /// isSignExtended - Check if a node is a vector value that is sign-extended |
9468 | /// or a constant BUILD_VECTOR with sign-extended elements. |
9469 | static bool isSignExtended(SDNode *N, SelectionDAG &DAG) { |
9470 | if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N)) |
9471 | return true; |
9472 | if (isExtendedBUILD_VECTOR(N, DAG, isSigned: true)) |
9473 | return true; |
9474 | return false; |
9475 | } |
9476 | |
9477 | /// isZeroExtended - Check if a node is a vector value that is zero-extended (or |
9478 | /// any-extended) or a constant BUILD_VECTOR with zero-extended elements. |
9479 | static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) { |
9480 | if (N->getOpcode() == ISD::ZERO_EXTEND || N->getOpcode() == ISD::ANY_EXTEND || |
9481 | ISD::isZEXTLoad(N)) |
9482 | return true; |
9483 | if (isExtendedBUILD_VECTOR(N, DAG, isSigned: false)) |
9484 | return true; |
9485 | return false; |
9486 | } |
9487 | |
9488 | static EVT getExtensionTo64Bits(const EVT &OrigVT) { |
9489 | if (OrigVT.getSizeInBits() >= 64) |
9490 | return OrigVT; |
9491 | |
9492 | assert(OrigVT.isSimple() && "Expecting a simple value type" ); |
9493 | |
9494 | MVT::SimpleValueType OrigSimpleTy = OrigVT.getSimpleVT().SimpleTy; |
9495 | switch (OrigSimpleTy) { |
9496 | default: llvm_unreachable("Unexpected Vector Type" ); |
9497 | case MVT::v2i8: |
9498 | case MVT::v2i16: |
9499 | return MVT::v2i32; |
9500 | case MVT::v4i8: |
9501 | return MVT::v4i16; |
9502 | } |
9503 | } |
9504 | |
9505 | /// AddRequiredExtensionForVMULL - Add a sign/zero extension to extend the total |
9506 | /// value size to 64 bits. We need a 64-bit D register as an operand to VMULL. |
9507 | /// We insert the required extension here to get the vector to fill a D register. |
9508 | static SDValue AddRequiredExtensionForVMULL(SDValue N, SelectionDAG &DAG, |
9509 | const EVT &OrigTy, |
9510 | const EVT &ExtTy, |
9511 | unsigned ExtOpcode) { |
9512 | // The vector originally had a size of OrigTy. It was then extended to ExtTy. |
9513 | // We expect the ExtTy to be 128-bits total. If the OrigTy is less than |
9514 | // 64-bits we need to insert a new extension so that it will be 64-bits. |
9515 | assert(ExtTy.is128BitVector() && "Unexpected extension size" ); |
9516 | if (OrigTy.getSizeInBits() >= 64) |
9517 | return N; |
9518 | |
9519 | // Must extend size to at least 64 bits to be used as an operand for VMULL. |
9520 | EVT NewVT = getExtensionTo64Bits(OrigVT: OrigTy); |
9521 | |
9522 | return DAG.getNode(Opcode: ExtOpcode, DL: SDLoc(N), VT: NewVT, Operand: N); |
9523 | } |
9524 | |
9525 | /// SkipLoadExtensionForVMULL - return a load of the original vector size that |
9526 | /// does not do any sign/zero extension. If the original vector is less |
9527 | /// than 64 bits, an appropriate extension will be added after the load to |
9528 | /// reach a total size of 64 bits. We have to add the extension separately |
9529 | /// because ARM does not have a sign/zero extending load for vectors. |
9530 | static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG& DAG) { |
9531 | EVT ExtendedTy = getExtensionTo64Bits(OrigVT: LD->getMemoryVT()); |
9532 | |
9533 | // The load already has the right type. |
9534 | if (ExtendedTy == LD->getMemoryVT()) |
9535 | return DAG.getLoad(VT: LD->getMemoryVT(), dl: SDLoc(LD), Chain: LD->getChain(), |
9536 | Ptr: LD->getBasePtr(), PtrInfo: LD->getPointerInfo(), Alignment: LD->getAlign(), |
9537 | MMOFlags: LD->getMemOperand()->getFlags()); |
9538 | |
9539 | // We need to create a zextload/sextload. We cannot just create a load |
9540 | // followed by a zext/zext node because LowerMUL is also run during normal |
9541 | // operation legalization where we can't create illegal types. |
9542 | return DAG.getExtLoad(ExtType: LD->getExtensionType(), dl: SDLoc(LD), VT: ExtendedTy, |
9543 | Chain: LD->getChain(), Ptr: LD->getBasePtr(), PtrInfo: LD->getPointerInfo(), |
9544 | MemVT: LD->getMemoryVT(), Alignment: LD->getAlign(), |
9545 | MMOFlags: LD->getMemOperand()->getFlags()); |
9546 | } |
9547 | |
9548 | /// SkipExtensionForVMULL - For a node that is a SIGN_EXTEND, ZERO_EXTEND, |
9549 | /// ANY_EXTEND, extending load, or BUILD_VECTOR with extended elements, return |
9550 | /// the unextended value. The unextended vector should be 64 bits so that it can |
9551 | /// be used as an operand to a VMULL instruction. If the original vector size |
9552 | /// before extension is less than 64 bits we add a an extension to resize |
9553 | /// the vector to 64 bits. |
9554 | static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) { |
9555 | if (N->getOpcode() == ISD::SIGN_EXTEND || |
9556 | N->getOpcode() == ISD::ZERO_EXTEND || N->getOpcode() == ISD::ANY_EXTEND) |
9557 | return AddRequiredExtensionForVMULL(N: N->getOperand(Num: 0), DAG, |
9558 | OrigTy: N->getOperand(Num: 0)->getValueType(ResNo: 0), |
9559 | ExtTy: N->getValueType(ResNo: 0), |
9560 | ExtOpcode: N->getOpcode()); |
9561 | |
9562 | if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val: N)) { |
9563 | assert((ISD::isSEXTLoad(LD) || ISD::isZEXTLoad(LD)) && |
9564 | "Expected extending load" ); |
9565 | |
9566 | SDValue newLoad = SkipLoadExtensionForVMULL(LD, DAG); |
9567 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(LD, 1), To: newLoad.getValue(R: 1)); |
9568 | unsigned Opcode = ISD::isSEXTLoad(N: LD) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; |
9569 | SDValue extLoad = |
9570 | DAG.getNode(Opcode, DL: SDLoc(newLoad), VT: LD->getValueType(ResNo: 0), Operand: newLoad); |
9571 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(LD, 0), To: extLoad); |
9572 | |
9573 | return newLoad; |
9574 | } |
9575 | |
9576 | // Otherwise, the value must be a BUILD_VECTOR. For v2i64, it will |
9577 | // have been legalized as a BITCAST from v4i32. |
9578 | if (N->getOpcode() == ISD::BITCAST) { |
9579 | SDNode *BVN = N->getOperand(Num: 0).getNode(); |
9580 | assert(BVN->getOpcode() == ISD::BUILD_VECTOR && |
9581 | BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR" ); |
9582 | unsigned LowElt = DAG.getDataLayout().isBigEndian() ? 1 : 0; |
9583 | return DAG.getBuildVector( |
9584 | VT: MVT::v2i32, DL: SDLoc(N), |
9585 | Ops: {BVN->getOperand(Num: LowElt), BVN->getOperand(Num: LowElt + 2)}); |
9586 | } |
9587 | // Construct a new BUILD_VECTOR with elements truncated to half the size. |
9588 | assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR" ); |
9589 | EVT VT = N->getValueType(ResNo: 0); |
9590 | unsigned EltSize = VT.getScalarSizeInBits() / 2; |
9591 | unsigned NumElts = VT.getVectorNumElements(); |
9592 | MVT TruncVT = MVT::getIntegerVT(BitWidth: EltSize); |
9593 | SmallVector<SDValue, 8> Ops; |
9594 | SDLoc dl(N); |
9595 | for (unsigned i = 0; i != NumElts; ++i) { |
9596 | const APInt &CInt = N->getConstantOperandAPInt(Num: i); |
9597 | // Element types smaller than 32 bits are not legal, so use i32 elements. |
9598 | // The values are implicitly truncated so sext vs. zext doesn't matter. |
9599 | Ops.push_back(Elt: DAG.getConstant(Val: CInt.zextOrTrunc(width: 32), DL: dl, VT: MVT::i32)); |
9600 | } |
9601 | return DAG.getBuildVector(VT: MVT::getVectorVT(VT: TruncVT, NumElements: NumElts), DL: dl, Ops); |
9602 | } |
9603 | |
9604 | static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) { |
9605 | unsigned Opcode = N->getOpcode(); |
9606 | if (Opcode == ISD::ADD || Opcode == ISD::SUB) { |
9607 | SDNode *N0 = N->getOperand(Num: 0).getNode(); |
9608 | SDNode *N1 = N->getOperand(Num: 1).getNode(); |
9609 | return N0->hasOneUse() && N1->hasOneUse() && |
9610 | isSignExtended(N: N0, DAG) && isSignExtended(N: N1, DAG); |
9611 | } |
9612 | return false; |
9613 | } |
9614 | |
9615 | static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) { |
9616 | unsigned Opcode = N->getOpcode(); |
9617 | if (Opcode == ISD::ADD || Opcode == ISD::SUB) { |
9618 | SDNode *N0 = N->getOperand(Num: 0).getNode(); |
9619 | SDNode *N1 = N->getOperand(Num: 1).getNode(); |
9620 | return N0->hasOneUse() && N1->hasOneUse() && |
9621 | isZeroExtended(N: N0, DAG) && isZeroExtended(N: N1, DAG); |
9622 | } |
9623 | return false; |
9624 | } |
9625 | |
9626 | static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) { |
9627 | // Multiplications are only custom-lowered for 128-bit vectors so that |
9628 | // VMULL can be detected. Otherwise v2i64 multiplications are not legal. |
9629 | EVT VT = Op.getValueType(); |
9630 | assert(VT.is128BitVector() && VT.isInteger() && |
9631 | "unexpected type for custom-lowering ISD::MUL" ); |
9632 | SDNode *N0 = Op.getOperand(i: 0).getNode(); |
9633 | SDNode *N1 = Op.getOperand(i: 1).getNode(); |
9634 | unsigned NewOpc = 0; |
9635 | bool isMLA = false; |
9636 | bool isN0SExt = isSignExtended(N: N0, DAG); |
9637 | bool isN1SExt = isSignExtended(N: N1, DAG); |
9638 | if (isN0SExt && isN1SExt) |
9639 | NewOpc = ARMISD::VMULLs; |
9640 | else { |
9641 | bool isN0ZExt = isZeroExtended(N: N0, DAG); |
9642 | bool isN1ZExt = isZeroExtended(N: N1, DAG); |
9643 | if (isN0ZExt && isN1ZExt) |
9644 | NewOpc = ARMISD::VMULLu; |
9645 | else if (isN1SExt || isN1ZExt) { |
9646 | // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these |
9647 | // into (s/zext A * s/zext C) + (s/zext B * s/zext C) |
9648 | if (isN1SExt && isAddSubSExt(N: N0, DAG)) { |
9649 | NewOpc = ARMISD::VMULLs; |
9650 | isMLA = true; |
9651 | } else if (isN1ZExt && isAddSubZExt(N: N0, DAG)) { |
9652 | NewOpc = ARMISD::VMULLu; |
9653 | isMLA = true; |
9654 | } else if (isN0ZExt && isAddSubZExt(N: N1, DAG)) { |
9655 | std::swap(a&: N0, b&: N1); |
9656 | NewOpc = ARMISD::VMULLu; |
9657 | isMLA = true; |
9658 | } |
9659 | } |
9660 | |
9661 | if (!NewOpc) { |
9662 | if (VT == MVT::v2i64) |
9663 | // Fall through to expand this. It is not legal. |
9664 | return SDValue(); |
9665 | else |
9666 | // Other vector multiplications are legal. |
9667 | return Op; |
9668 | } |
9669 | } |
9670 | |
9671 | // Legalize to a VMULL instruction. |
9672 | SDLoc DL(Op); |
9673 | SDValue Op0; |
9674 | SDValue Op1 = SkipExtensionForVMULL(N: N1, DAG); |
9675 | if (!isMLA) { |
9676 | Op0 = SkipExtensionForVMULL(N: N0, DAG); |
9677 | assert(Op0.getValueType().is64BitVector() && |
9678 | Op1.getValueType().is64BitVector() && |
9679 | "unexpected types for extended operands to VMULL" ); |
9680 | return DAG.getNode(Opcode: NewOpc, DL, VT, N1: Op0, N2: Op1); |
9681 | } |
9682 | |
9683 | // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during |
9684 | // isel lowering to take advantage of no-stall back to back vmul + vmla. |
9685 | // vmull q0, d4, d6 |
9686 | // vmlal q0, d5, d6 |
9687 | // is faster than |
9688 | // vaddl q0, d4, d5 |
9689 | // vmovl q1, d6 |
9690 | // vmul q0, q0, q1 |
9691 | SDValue N00 = SkipExtensionForVMULL(N: N0->getOperand(Num: 0).getNode(), DAG); |
9692 | SDValue N01 = SkipExtensionForVMULL(N: N0->getOperand(Num: 1).getNode(), DAG); |
9693 | EVT Op1VT = Op1.getValueType(); |
9694 | return DAG.getNode(Opcode: N0->getOpcode(), DL, VT, |
9695 | N1: DAG.getNode(Opcode: NewOpc, DL, VT, |
9696 | N1: DAG.getNode(Opcode: ISD::BITCAST, DL, VT: Op1VT, Operand: N00), N2: Op1), |
9697 | N2: DAG.getNode(Opcode: NewOpc, DL, VT, |
9698 | N1: DAG.getNode(Opcode: ISD::BITCAST, DL, VT: Op1VT, Operand: N01), N2: Op1)); |
9699 | } |
9700 | |
9701 | static SDValue LowerSDIV_v4i8(SDValue X, SDValue Y, const SDLoc &dl, |
9702 | SelectionDAG &DAG) { |
9703 | // TODO: Should this propagate fast-math-flags? |
9704 | |
9705 | // Convert to float |
9706 | // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo)); |
9707 | // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo)); |
9708 | X = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: MVT::v4i32, Operand: X); |
9709 | Y = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: MVT::v4i32, Operand: Y); |
9710 | X = DAG.getNode(Opcode: ISD::SINT_TO_FP, DL: dl, VT: MVT::v4f32, Operand: X); |
9711 | Y = DAG.getNode(Opcode: ISD::SINT_TO_FP, DL: dl, VT: MVT::v4f32, Operand: Y); |
9712 | // Get reciprocal estimate. |
9713 | // float4 recip = vrecpeq_f32(yf); |
9714 | Y = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT: MVT::v4f32, |
9715 | N1: DAG.getConstant(Val: Intrinsic::arm_neon_vrecpe, DL: dl, VT: MVT::i32), |
9716 | N2: Y); |
9717 | // Because char has a smaller range than uchar, we can actually get away |
9718 | // without any newton steps. This requires that we use a weird bias |
9719 | // of 0xb000, however (again, this has been exhaustively tested). |
9720 | // float4 result = as_float4(as_int4(xf*recip) + 0xb000); |
9721 | X = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::v4f32, N1: X, N2: Y); |
9722 | X = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::v4i32, Operand: X); |
9723 | Y = DAG.getConstant(Val: 0xb000, DL: dl, VT: MVT::v4i32); |
9724 | X = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: MVT::v4i32, N1: X, N2: Y); |
9725 | X = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::v4f32, Operand: X); |
9726 | // Convert back to short. |
9727 | X = DAG.getNode(Opcode: ISD::FP_TO_SINT, DL: dl, VT: MVT::v4i32, Operand: X); |
9728 | X = DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: MVT::v4i16, Operand: X); |
9729 | return X; |
9730 | } |
9731 | |
9732 | static SDValue LowerSDIV_v4i16(SDValue N0, SDValue N1, const SDLoc &dl, |
9733 | SelectionDAG &DAG) { |
9734 | // TODO: Should this propagate fast-math-flags? |
9735 | |
9736 | SDValue N2; |
9737 | // Convert to float. |
9738 | // float4 yf = vcvt_f32_s32(vmovl_s16(y)); |
9739 | // float4 xf = vcvt_f32_s32(vmovl_s16(x)); |
9740 | N0 = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: MVT::v4i32, Operand: N0); |
9741 | N1 = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: MVT::v4i32, Operand: N1); |
9742 | N0 = DAG.getNode(Opcode: ISD::SINT_TO_FP, DL: dl, VT: MVT::v4f32, Operand: N0); |
9743 | N1 = DAG.getNode(Opcode: ISD::SINT_TO_FP, DL: dl, VT: MVT::v4f32, Operand: N1); |
9744 | |
9745 | // Use reciprocal estimate and one refinement step. |
9746 | // float4 recip = vrecpeq_f32(yf); |
9747 | // recip *= vrecpsq_f32(yf, recip); |
9748 | N2 = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT: MVT::v4f32, |
9749 | N1: DAG.getConstant(Val: Intrinsic::arm_neon_vrecpe, DL: dl, VT: MVT::i32), |
9750 | N2: N1); |
9751 | N1 = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT: MVT::v4f32, |
9752 | N1: DAG.getConstant(Val: Intrinsic::arm_neon_vrecps, DL: dl, VT: MVT::i32), |
9753 | N2: N1, N3: N2); |
9754 | N2 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::v4f32, N1, N2); |
9755 | // Because short has a smaller range than ushort, we can actually get away |
9756 | // with only a single newton step. This requires that we use a weird bias |
9757 | // of 89, however (again, this has been exhaustively tested). |
9758 | // float4 result = as_float4(as_int4(xf*recip) + 0x89); |
9759 | N0 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::v4f32, N1: N0, N2); |
9760 | N0 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::v4i32, Operand: N0); |
9761 | N1 = DAG.getConstant(Val: 0x89, DL: dl, VT: MVT::v4i32); |
9762 | N0 = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: MVT::v4i32, N1: N0, N2: N1); |
9763 | N0 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::v4f32, Operand: N0); |
9764 | // Convert back to integer and return. |
9765 | // return vmovn_s32(vcvt_s32_f32(result)); |
9766 | N0 = DAG.getNode(Opcode: ISD::FP_TO_SINT, DL: dl, VT: MVT::v4i32, Operand: N0); |
9767 | N0 = DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: MVT::v4i16, Operand: N0); |
9768 | return N0; |
9769 | } |
9770 | |
9771 | static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG, |
9772 | const ARMSubtarget *ST) { |
9773 | EVT VT = Op.getValueType(); |
9774 | assert((VT == MVT::v4i16 || VT == MVT::v8i8) && |
9775 | "unexpected type for custom-lowering ISD::SDIV" ); |
9776 | |
9777 | SDLoc dl(Op); |
9778 | SDValue N0 = Op.getOperand(i: 0); |
9779 | SDValue N1 = Op.getOperand(i: 1); |
9780 | SDValue N2, N3; |
9781 | |
9782 | if (VT == MVT::v8i8) { |
9783 | N0 = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: MVT::v8i16, Operand: N0); |
9784 | N1 = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: MVT::v8i16, Operand: N1); |
9785 | |
9786 | N2 = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: MVT::v4i16, N1: N0, |
9787 | N2: DAG.getIntPtrConstant(Val: 4, DL: dl)); |
9788 | N3 = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: MVT::v4i16, N1, |
9789 | N2: DAG.getIntPtrConstant(Val: 4, DL: dl)); |
9790 | N0 = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: MVT::v4i16, N1: N0, |
9791 | N2: DAG.getIntPtrConstant(Val: 0, DL: dl)); |
9792 | N1 = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: MVT::v4i16, N1, |
9793 | N2: DAG.getIntPtrConstant(Val: 0, DL: dl)); |
9794 | |
9795 | N0 = LowerSDIV_v4i8(X: N0, Y: N1, dl, DAG); // v4i16 |
9796 | N2 = LowerSDIV_v4i8(X: N2, Y: N3, dl, DAG); // v4i16 |
9797 | |
9798 | N0 = DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL: dl, VT: MVT::v8i16, N1: N0, N2); |
9799 | N0 = LowerCONCAT_VECTORS(Op: N0, DAG, ST); |
9800 | |
9801 | N0 = DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: MVT::v8i8, Operand: N0); |
9802 | return N0; |
9803 | } |
9804 | return LowerSDIV_v4i16(N0, N1, dl, DAG); |
9805 | } |
9806 | |
9807 | static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG, |
9808 | const ARMSubtarget *ST) { |
9809 | // TODO: Should this propagate fast-math-flags? |
9810 | EVT VT = Op.getValueType(); |
9811 | assert((VT == MVT::v4i16 || VT == MVT::v8i8) && |
9812 | "unexpected type for custom-lowering ISD::UDIV" ); |
9813 | |
9814 | SDLoc dl(Op); |
9815 | SDValue N0 = Op.getOperand(i: 0); |
9816 | SDValue N1 = Op.getOperand(i: 1); |
9817 | SDValue N2, N3; |
9818 | |
9819 | if (VT == MVT::v8i8) { |
9820 | N0 = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: dl, VT: MVT::v8i16, Operand: N0); |
9821 | N1 = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: dl, VT: MVT::v8i16, Operand: N1); |
9822 | |
9823 | N2 = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: MVT::v4i16, N1: N0, |
9824 | N2: DAG.getIntPtrConstant(Val: 4, DL: dl)); |
9825 | N3 = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: MVT::v4i16, N1, |
9826 | N2: DAG.getIntPtrConstant(Val: 4, DL: dl)); |
9827 | N0 = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: MVT::v4i16, N1: N0, |
9828 | N2: DAG.getIntPtrConstant(Val: 0, DL: dl)); |
9829 | N1 = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: MVT::v4i16, N1, |
9830 | N2: DAG.getIntPtrConstant(Val: 0, DL: dl)); |
9831 | |
9832 | N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16 |
9833 | N2 = LowerSDIV_v4i16(N0: N2, N1: N3, dl, DAG); // v4i16 |
9834 | |
9835 | N0 = DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL: dl, VT: MVT::v8i16, N1: N0, N2); |
9836 | N0 = LowerCONCAT_VECTORS(Op: N0, DAG, ST); |
9837 | |
9838 | N0 = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT: MVT::v8i8, |
9839 | N1: DAG.getConstant(Val: Intrinsic::arm_neon_vqmovnsu, DL: dl, |
9840 | VT: MVT::i32), |
9841 | N2: N0); |
9842 | return N0; |
9843 | } |
9844 | |
9845 | // v4i16 sdiv ... Convert to float. |
9846 | // float4 yf = vcvt_f32_s32(vmovl_u16(y)); |
9847 | // float4 xf = vcvt_f32_s32(vmovl_u16(x)); |
9848 | N0 = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: dl, VT: MVT::v4i32, Operand: N0); |
9849 | N1 = DAG.getNode(Opcode: ISD::ZERO_EXTEND, DL: dl, VT: MVT::v4i32, Operand: N1); |
9850 | N0 = DAG.getNode(Opcode: ISD::SINT_TO_FP, DL: dl, VT: MVT::v4f32, Operand: N0); |
9851 | SDValue BN1 = DAG.getNode(Opcode: ISD::SINT_TO_FP, DL: dl, VT: MVT::v4f32, Operand: N1); |
9852 | |
9853 | // Use reciprocal estimate and two refinement steps. |
9854 | // float4 recip = vrecpeq_f32(yf); |
9855 | // recip *= vrecpsq_f32(yf, recip); |
9856 | // recip *= vrecpsq_f32(yf, recip); |
9857 | N2 = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT: MVT::v4f32, |
9858 | N1: DAG.getConstant(Val: Intrinsic::arm_neon_vrecpe, DL: dl, VT: MVT::i32), |
9859 | N2: BN1); |
9860 | N1 = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT: MVT::v4f32, |
9861 | N1: DAG.getConstant(Val: Intrinsic::arm_neon_vrecps, DL: dl, VT: MVT::i32), |
9862 | N2: BN1, N3: N2); |
9863 | N2 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::v4f32, N1, N2); |
9864 | N1 = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT: MVT::v4f32, |
9865 | N1: DAG.getConstant(Val: Intrinsic::arm_neon_vrecps, DL: dl, VT: MVT::i32), |
9866 | N2: BN1, N3: N2); |
9867 | N2 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::v4f32, N1, N2); |
9868 | // Simply multiplying by the reciprocal estimate can leave us a few ulps |
9869 | // too low, so we add 2 ulps (exhaustive testing shows that this is enough, |
9870 | // and that it will never cause us to return an answer too large). |
9871 | // float4 result = as_float4(as_int4(xf*recip) + 2); |
9872 | N0 = DAG.getNode(Opcode: ISD::FMUL, DL: dl, VT: MVT::v4f32, N1: N0, N2); |
9873 | N0 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::v4i32, Operand: N0); |
9874 | N1 = DAG.getConstant(Val: 2, DL: dl, VT: MVT::v4i32); |
9875 | N0 = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: MVT::v4i32, N1: N0, N2: N1); |
9876 | N0 = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::v4f32, Operand: N0); |
9877 | // Convert back to integer and return. |
9878 | // return vmovn_u32(vcvt_s32_f32(result)); |
9879 | N0 = DAG.getNode(Opcode: ISD::FP_TO_SINT, DL: dl, VT: MVT::v4i32, Operand: N0); |
9880 | N0 = DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: MVT::v4i16, Operand: N0); |
9881 | return N0; |
9882 | } |
9883 | |
9884 | static SDValue LowerUADDSUBO_CARRY(SDValue Op, SelectionDAG &DAG) { |
9885 | SDNode *N = Op.getNode(); |
9886 | EVT VT = N->getValueType(ResNo: 0); |
9887 | SDVTList VTs = DAG.getVTList(VT1: VT, VT2: MVT::i32); |
9888 | |
9889 | SDValue Carry = Op.getOperand(i: 2); |
9890 | |
9891 | SDLoc DL(Op); |
9892 | |
9893 | SDValue Result; |
9894 | if (Op.getOpcode() == ISD::UADDO_CARRY) { |
9895 | // This converts the boolean value carry into the carry flag. |
9896 | Carry = ConvertBooleanCarryToCarryFlag(BoolCarry: Carry, DAG); |
9897 | |
9898 | // Do the addition proper using the carry flag we wanted. |
9899 | Result = DAG.getNode(Opcode: ARMISD::ADDE, DL, VTList: VTs, N1: Op.getOperand(i: 0), |
9900 | N2: Op.getOperand(i: 1), N3: Carry); |
9901 | |
9902 | // Now convert the carry flag into a boolean value. |
9903 | Carry = ConvertCarryFlagToBooleanCarry(Flags: Result.getValue(R: 1), VT, DAG); |
9904 | } else { |
9905 | // ARMISD::SUBE expects a carry not a borrow like ISD::USUBO_CARRY so we |
9906 | // have to invert the carry first. |
9907 | Carry = DAG.getNode(Opcode: ISD::SUB, DL, VT: MVT::i32, |
9908 | N1: DAG.getConstant(Val: 1, DL, VT: MVT::i32), N2: Carry); |
9909 | // This converts the boolean value carry into the carry flag. |
9910 | Carry = ConvertBooleanCarryToCarryFlag(BoolCarry: Carry, DAG); |
9911 | |
9912 | // Do the subtraction proper using the carry flag we wanted. |
9913 | Result = DAG.getNode(Opcode: ARMISD::SUBE, DL, VTList: VTs, N1: Op.getOperand(i: 0), |
9914 | N2: Op.getOperand(i: 1), N3: Carry); |
9915 | |
9916 | // Now convert the carry flag into a boolean value. |
9917 | Carry = ConvertCarryFlagToBooleanCarry(Flags: Result.getValue(R: 1), VT, DAG); |
9918 | // But the carry returned by ARMISD::SUBE is not a borrow as expected |
9919 | // by ISD::USUBO_CARRY, so compute 1 - C. |
9920 | Carry = DAG.getNode(Opcode: ISD::SUB, DL, VT: MVT::i32, |
9921 | N1: DAG.getConstant(Val: 1, DL, VT: MVT::i32), N2: Carry); |
9922 | } |
9923 | |
9924 | // Return both values. |
9925 | return DAG.getNode(Opcode: ISD::MERGE_VALUES, DL, VTList: N->getVTList(), N1: Result, N2: Carry); |
9926 | } |
9927 | |
9928 | SDValue ARMTargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const { |
9929 | assert(Subtarget->isTargetDarwin()); |
9930 | |
9931 | // For iOS, we want to call an alternative entry point: __sincos_stret, |
9932 | // return values are passed via sret. |
9933 | SDLoc dl(Op); |
9934 | SDValue Arg = Op.getOperand(i: 0); |
9935 | EVT ArgVT = Arg.getValueType(); |
9936 | Type *ArgTy = ArgVT.getTypeForEVT(Context&: *DAG.getContext()); |
9937 | auto PtrVT = getPointerTy(DL: DAG.getDataLayout()); |
9938 | |
9939 | MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); |
9940 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
9941 | |
9942 | // Pair of floats / doubles used to pass the result. |
9943 | Type *RetTy = StructType::get(elt1: ArgTy, elts: ArgTy); |
9944 | auto &DL = DAG.getDataLayout(); |
9945 | |
9946 | ArgListTy Args; |
9947 | bool ShouldUseSRet = Subtarget->isAPCS_ABI(); |
9948 | SDValue SRet; |
9949 | if (ShouldUseSRet) { |
9950 | // Create stack object for sret. |
9951 | const uint64_t ByteSize = DL.getTypeAllocSize(Ty: RetTy); |
9952 | const Align StackAlign = DL.getPrefTypeAlign(Ty: RetTy); |
9953 | int FrameIdx = MFI.CreateStackObject(Size: ByteSize, Alignment: StackAlign, isSpillSlot: false); |
9954 | SRet = DAG.getFrameIndex(FI: FrameIdx, VT: TLI.getPointerTy(DL)); |
9955 | |
9956 | ArgListEntry Entry; |
9957 | Entry.Node = SRet; |
9958 | Entry.Ty = PointerType::getUnqual(C&: RetTy->getContext()); |
9959 | Entry.IsSExt = false; |
9960 | Entry.IsZExt = false; |
9961 | Entry.IsSRet = true; |
9962 | Args.push_back(x: Entry); |
9963 | RetTy = Type::getVoidTy(C&: *DAG.getContext()); |
9964 | } |
9965 | |
9966 | ArgListEntry Entry; |
9967 | Entry.Node = Arg; |
9968 | Entry.Ty = ArgTy; |
9969 | Entry.IsSExt = false; |
9970 | Entry.IsZExt = false; |
9971 | Args.push_back(x: Entry); |
9972 | |
9973 | RTLIB::Libcall LC = |
9974 | (ArgVT == MVT::f64) ? RTLIB::SINCOS_STRET_F64 : RTLIB::SINCOS_STRET_F32; |
9975 | const char *LibcallName = getLibcallName(Call: LC); |
9976 | CallingConv::ID CC = getLibcallCallingConv(Call: LC); |
9977 | SDValue Callee = DAG.getExternalSymbol(Sym: LibcallName, VT: getPointerTy(DL)); |
9978 | |
9979 | TargetLowering::CallLoweringInfo CLI(DAG); |
9980 | CLI.setDebugLoc(dl) |
9981 | .setChain(DAG.getEntryNode()) |
9982 | .setCallee(CC, ResultType: RetTy, Target: Callee, ArgsList: std::move(Args)) |
9983 | .setDiscardResult(ShouldUseSRet); |
9984 | std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); |
9985 | |
9986 | if (!ShouldUseSRet) |
9987 | return CallResult.first; |
9988 | |
9989 | SDValue LoadSin = |
9990 | DAG.getLoad(VT: ArgVT, dl, Chain: CallResult.second, Ptr: SRet, PtrInfo: MachinePointerInfo()); |
9991 | |
9992 | // Address of cos field. |
9993 | SDValue Add = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: PtrVT, N1: SRet, |
9994 | N2: DAG.getIntPtrConstant(Val: ArgVT.getStoreSize(), DL: dl)); |
9995 | SDValue LoadCos = |
9996 | DAG.getLoad(VT: ArgVT, dl, Chain: LoadSin.getValue(R: 1), Ptr: Add, PtrInfo: MachinePointerInfo()); |
9997 | |
9998 | SDVTList Tys = DAG.getVTList(VT1: ArgVT, VT2: ArgVT); |
9999 | return DAG.getNode(Opcode: ISD::MERGE_VALUES, DL: dl, VTList: Tys, |
10000 | N1: LoadSin.getValue(R: 0), N2: LoadCos.getValue(R: 0)); |
10001 | } |
10002 | |
10003 | SDValue ARMTargetLowering::LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG, |
10004 | bool Signed, |
10005 | SDValue &Chain) const { |
10006 | EVT VT = Op.getValueType(); |
10007 | assert((VT == MVT::i32 || VT == MVT::i64) && |
10008 | "unexpected type for custom lowering DIV" ); |
10009 | SDLoc dl(Op); |
10010 | |
10011 | const auto &DL = DAG.getDataLayout(); |
10012 | const auto &TLI = DAG.getTargetLoweringInfo(); |
10013 | |
10014 | const char *Name = nullptr; |
10015 | if (Signed) |
10016 | Name = (VT == MVT::i32) ? "__rt_sdiv" : "__rt_sdiv64" ; |
10017 | else |
10018 | Name = (VT == MVT::i32) ? "__rt_udiv" : "__rt_udiv64" ; |
10019 | |
10020 | SDValue ES = DAG.getExternalSymbol(Sym: Name, VT: TLI.getPointerTy(DL)); |
10021 | |
10022 | ARMTargetLowering::ArgListTy Args; |
10023 | |
10024 | for (auto AI : {1, 0}) { |
10025 | ArgListEntry Arg; |
10026 | Arg.Node = Op.getOperand(i: AI); |
10027 | Arg.Ty = Arg.Node.getValueType().getTypeForEVT(Context&: *DAG.getContext()); |
10028 | Args.push_back(x: Arg); |
10029 | } |
10030 | |
10031 | CallLoweringInfo CLI(DAG); |
10032 | CLI.setDebugLoc(dl) |
10033 | .setChain(Chain) |
10034 | .setCallee(CC: CallingConv::ARM_AAPCS_VFP, ResultType: VT.getTypeForEVT(Context&: *DAG.getContext()), |
10035 | Target: ES, ArgsList: std::move(Args)); |
10036 | |
10037 | return LowerCallTo(CLI).first; |
10038 | } |
10039 | |
10040 | // This is a code size optimisation: return the original SDIV node to |
10041 | // DAGCombiner when we don't want to expand SDIV into a sequence of |
10042 | // instructions, and an empty node otherwise which will cause the |
10043 | // SDIV to be expanded in DAGCombine. |
10044 | SDValue |
10045 | ARMTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, |
10046 | SelectionDAG &DAG, |
10047 | SmallVectorImpl<SDNode *> &Created) const { |
10048 | // TODO: Support SREM |
10049 | if (N->getOpcode() != ISD::SDIV) |
10050 | return SDValue(); |
10051 | |
10052 | const auto &ST = DAG.getSubtarget<ARMSubtarget>(); |
10053 | const bool MinSize = ST.hasMinSize(); |
10054 | const bool HasDivide = ST.isThumb() ? ST.hasDivideInThumbMode() |
10055 | : ST.hasDivideInARMMode(); |
10056 | |
10057 | // Don't touch vector types; rewriting this may lead to scalarizing |
10058 | // the int divs. |
10059 | if (N->getOperand(Num: 0).getValueType().isVector()) |
10060 | return SDValue(); |
10061 | |
10062 | // Bail if MinSize is not set, and also for both ARM and Thumb mode we need |
10063 | // hwdiv support for this to be really profitable. |
10064 | if (!(MinSize && HasDivide)) |
10065 | return SDValue(); |
10066 | |
10067 | // ARM mode is a bit simpler than Thumb: we can handle large power |
10068 | // of 2 immediates with 1 mov instruction; no further checks required, |
10069 | // just return the sdiv node. |
10070 | if (!ST.isThumb()) |
10071 | return SDValue(N, 0); |
10072 | |
10073 | // In Thumb mode, immediates larger than 128 need a wide 4-byte MOV, |
10074 | // and thus lose the code size benefits of a MOVS that requires only 2. |
10075 | // TargetTransformInfo and 'getIntImmCodeSizeCost' could be helpful here, |
10076 | // but as it's doing exactly this, it's not worth the trouble to get TTI. |
10077 | if (Divisor.sgt(RHS: 128)) |
10078 | return SDValue(); |
10079 | |
10080 | return SDValue(N, 0); |
10081 | } |
10082 | |
10083 | SDValue ARMTargetLowering::LowerDIV_Windows(SDValue Op, SelectionDAG &DAG, |
10084 | bool Signed) const { |
10085 | assert(Op.getValueType() == MVT::i32 && |
10086 | "unexpected type for custom lowering DIV" ); |
10087 | SDLoc dl(Op); |
10088 | |
10089 | SDValue DBZCHK = DAG.getNode(Opcode: ARMISD::WIN__DBZCHK, DL: dl, VT: MVT::Other, |
10090 | N1: DAG.getEntryNode(), N2: Op.getOperand(i: 1)); |
10091 | |
10092 | return LowerWindowsDIVLibCall(Op, DAG, Signed, Chain&: DBZCHK); |
10093 | } |
10094 | |
10095 | static SDValue WinDBZCheckDenominator(SelectionDAG &DAG, SDNode *N, SDValue InChain) { |
10096 | SDLoc DL(N); |
10097 | SDValue Op = N->getOperand(Num: 1); |
10098 | if (N->getValueType(ResNo: 0) == MVT::i32) |
10099 | return DAG.getNode(Opcode: ARMISD::WIN__DBZCHK, DL, VT: MVT::Other, N1: InChain, N2: Op); |
10100 | SDValue Lo, Hi; |
10101 | std::tie(args&: Lo, args&: Hi) = DAG.SplitScalar(N: Op, DL, LoVT: MVT::i32, HiVT: MVT::i32); |
10102 | return DAG.getNode(Opcode: ARMISD::WIN__DBZCHK, DL, VT: MVT::Other, N1: InChain, |
10103 | N2: DAG.getNode(Opcode: ISD::OR, DL, VT: MVT::i32, N1: Lo, N2: Hi)); |
10104 | } |
10105 | |
10106 | void ARMTargetLowering::ExpandDIV_Windows( |
10107 | SDValue Op, SelectionDAG &DAG, bool Signed, |
10108 | SmallVectorImpl<SDValue> &Results) const { |
10109 | const auto &DL = DAG.getDataLayout(); |
10110 | const auto &TLI = DAG.getTargetLoweringInfo(); |
10111 | |
10112 | assert(Op.getValueType() == MVT::i64 && |
10113 | "unexpected type for custom lowering DIV" ); |
10114 | SDLoc dl(Op); |
10115 | |
10116 | SDValue DBZCHK = WinDBZCheckDenominator(DAG, N: Op.getNode(), InChain: DAG.getEntryNode()); |
10117 | |
10118 | SDValue Result = LowerWindowsDIVLibCall(Op, DAG, Signed, Chain&: DBZCHK); |
10119 | |
10120 | SDValue Lower = DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: MVT::i32, Operand: Result); |
10121 | SDValue Upper = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT: MVT::i64, N1: Result, |
10122 | N2: DAG.getConstant(Val: 32, DL: dl, VT: TLI.getPointerTy(DL))); |
10123 | Upper = DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: MVT::i32, Operand: Upper); |
10124 | |
10125 | Results.push_back(Elt: DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: Lower, N2: Upper)); |
10126 | } |
10127 | |
10128 | static SDValue LowerPredicateLoad(SDValue Op, SelectionDAG &DAG) { |
10129 | LoadSDNode *LD = cast<LoadSDNode>(Val: Op.getNode()); |
10130 | EVT MemVT = LD->getMemoryVT(); |
10131 | assert((MemVT == MVT::v2i1 || MemVT == MVT::v4i1 || MemVT == MVT::v8i1 || |
10132 | MemVT == MVT::v16i1) && |
10133 | "Expected a predicate type!" ); |
10134 | assert(MemVT == Op.getValueType()); |
10135 | assert(LD->getExtensionType() == ISD::NON_EXTLOAD && |
10136 | "Expected a non-extending load" ); |
10137 | assert(LD->isUnindexed() && "Expected a unindexed load" ); |
10138 | |
10139 | // The basic MVE VLDR on a v2i1/v4i1/v8i1 actually loads the entire 16bit |
10140 | // predicate, with the "v4i1" bits spread out over the 16 bits loaded. We |
10141 | // need to make sure that 8/4/2 bits are actually loaded into the correct |
10142 | // place, which means loading the value and then shuffling the values into |
10143 | // the bottom bits of the predicate. |
10144 | // Equally, VLDR for an v16i1 will actually load 32bits (so will be incorrect |
10145 | // for BE). |
10146 | // Speaking of BE, apparently the rest of llvm will assume a reverse order to |
10147 | // a natural VMSR(load), so needs to be reversed. |
10148 | |
10149 | SDLoc dl(Op); |
10150 | SDValue Load = DAG.getExtLoad( |
10151 | ExtType: ISD::EXTLOAD, dl, VT: MVT::i32, Chain: LD->getChain(), Ptr: LD->getBasePtr(), |
10152 | MemVT: EVT::getIntegerVT(Context&: *DAG.getContext(), BitWidth: MemVT.getSizeInBits()), |
10153 | MMO: LD->getMemOperand()); |
10154 | SDValue Val = Load; |
10155 | if (DAG.getDataLayout().isBigEndian()) |
10156 | Val = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT: MVT::i32, |
10157 | N1: DAG.getNode(Opcode: ISD::BITREVERSE, DL: dl, VT: MVT::i32, Operand: Load), |
10158 | N2: DAG.getConstant(Val: 32 - MemVT.getSizeInBits(), DL: dl, VT: MVT::i32)); |
10159 | SDValue Pred = DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: MVT::v16i1, Operand: Val); |
10160 | if (MemVT != MVT::v16i1) |
10161 | Pred = DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL: dl, VT: MemVT, N1: Pred, |
10162 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
10163 | return DAG.getMergeValues(Ops: {Pred, Load.getValue(R: 1)}, dl); |
10164 | } |
10165 | |
10166 | void ARMTargetLowering::LowerLOAD(SDNode *N, SmallVectorImpl<SDValue> &Results, |
10167 | SelectionDAG &DAG) const { |
10168 | LoadSDNode *LD = cast<LoadSDNode>(Val: N); |
10169 | EVT MemVT = LD->getMemoryVT(); |
10170 | assert(LD->isUnindexed() && "Loads should be unindexed at this point." ); |
10171 | |
10172 | if (MemVT == MVT::i64 && Subtarget->hasV5TEOps() && |
10173 | !Subtarget->isThumb1Only() && LD->isVolatile() && |
10174 | LD->getAlign() >= Subtarget->getDualLoadStoreAlignment()) { |
10175 | SDLoc dl(N); |
10176 | SDValue Result = DAG.getMemIntrinsicNode( |
10177 | Opcode: ARMISD::LDRD, dl, VTList: DAG.getVTList(VTs: {MVT::i32, MVT::i32, MVT::Other}), |
10178 | Ops: {LD->getChain(), LD->getBasePtr()}, MemVT, MMO: LD->getMemOperand()); |
10179 | SDValue Lo = Result.getValue(R: DAG.getDataLayout().isLittleEndian() ? 0 : 1); |
10180 | SDValue Hi = Result.getValue(R: DAG.getDataLayout().isLittleEndian() ? 1 : 0); |
10181 | SDValue Pair = DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: Lo, N2: Hi); |
10182 | Results.append(IL: {Pair, Result.getValue(R: 2)}); |
10183 | } |
10184 | } |
10185 | |
10186 | static SDValue LowerPredicateStore(SDValue Op, SelectionDAG &DAG) { |
10187 | StoreSDNode *ST = cast<StoreSDNode>(Val: Op.getNode()); |
10188 | EVT MemVT = ST->getMemoryVT(); |
10189 | assert((MemVT == MVT::v2i1 || MemVT == MVT::v4i1 || MemVT == MVT::v8i1 || |
10190 | MemVT == MVT::v16i1) && |
10191 | "Expected a predicate type!" ); |
10192 | assert(MemVT == ST->getValue().getValueType()); |
10193 | assert(!ST->isTruncatingStore() && "Expected a non-extending store" ); |
10194 | assert(ST->isUnindexed() && "Expected a unindexed store" ); |
10195 | |
10196 | // Only store the v2i1 or v4i1 or v8i1 worth of bits, via a buildvector with |
10197 | // top bits unset and a scalar store. |
10198 | SDLoc dl(Op); |
10199 | SDValue Build = ST->getValue(); |
10200 | if (MemVT != MVT::v16i1) { |
10201 | SmallVector<SDValue, 16> Ops; |
10202 | for (unsigned I = 0; I < MemVT.getVectorNumElements(); I++) { |
10203 | unsigned Elt = DAG.getDataLayout().isBigEndian() |
10204 | ? MemVT.getVectorNumElements() - I - 1 |
10205 | : I; |
10206 | Ops.push_back(Elt: DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::i32, N1: Build, |
10207 | N2: DAG.getConstant(Val: Elt, DL: dl, VT: MVT::i32))); |
10208 | } |
10209 | for (unsigned I = MemVT.getVectorNumElements(); I < 16; I++) |
10210 | Ops.push_back(Elt: DAG.getUNDEF(VT: MVT::i32)); |
10211 | Build = DAG.getNode(Opcode: ISD::BUILD_VECTOR, DL: dl, VT: MVT::v16i1, Ops); |
10212 | } |
10213 | SDValue GRP = DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT: MVT::i32, Operand: Build); |
10214 | if (MemVT == MVT::v16i1 && DAG.getDataLayout().isBigEndian()) |
10215 | GRP = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT: MVT::i32, |
10216 | N1: DAG.getNode(Opcode: ISD::BITREVERSE, DL: dl, VT: MVT::i32, Operand: GRP), |
10217 | N2: DAG.getConstant(Val: 16, DL: dl, VT: MVT::i32)); |
10218 | return DAG.getTruncStore( |
10219 | Chain: ST->getChain(), dl, Val: GRP, Ptr: ST->getBasePtr(), |
10220 | SVT: EVT::getIntegerVT(Context&: *DAG.getContext(), BitWidth: MemVT.getSizeInBits()), |
10221 | MMO: ST->getMemOperand()); |
10222 | } |
10223 | |
10224 | static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG, |
10225 | const ARMSubtarget *Subtarget) { |
10226 | StoreSDNode *ST = cast<StoreSDNode>(Val: Op.getNode()); |
10227 | EVT MemVT = ST->getMemoryVT(); |
10228 | assert(ST->isUnindexed() && "Stores should be unindexed at this point." ); |
10229 | |
10230 | if (MemVT == MVT::i64 && Subtarget->hasV5TEOps() && |
10231 | !Subtarget->isThumb1Only() && ST->isVolatile() && |
10232 | ST->getAlign() >= Subtarget->getDualLoadStoreAlignment()) { |
10233 | SDNode *N = Op.getNode(); |
10234 | SDLoc dl(N); |
10235 | |
10236 | SDValue Lo = DAG.getNode( |
10237 | Opcode: ISD::EXTRACT_ELEMENT, DL: dl, VT: MVT::i32, N1: ST->getValue(), |
10238 | N2: DAG.getTargetConstant(Val: DAG.getDataLayout().isLittleEndian() ? 0 : 1, DL: dl, |
10239 | VT: MVT::i32)); |
10240 | SDValue Hi = DAG.getNode( |
10241 | Opcode: ISD::EXTRACT_ELEMENT, DL: dl, VT: MVT::i32, N1: ST->getValue(), |
10242 | N2: DAG.getTargetConstant(Val: DAG.getDataLayout().isLittleEndian() ? 1 : 0, DL: dl, |
10243 | VT: MVT::i32)); |
10244 | |
10245 | return DAG.getMemIntrinsicNode(Opcode: ARMISD::STRD, dl, VTList: DAG.getVTList(VT: MVT::Other), |
10246 | Ops: {ST->getChain(), Lo, Hi, ST->getBasePtr()}, |
10247 | MemVT, MMO: ST->getMemOperand()); |
10248 | } else if (Subtarget->hasMVEIntegerOps() && |
10249 | ((MemVT == MVT::v2i1 || MemVT == MVT::v4i1 || MemVT == MVT::v8i1 || |
10250 | MemVT == MVT::v16i1))) { |
10251 | return LowerPredicateStore(Op, DAG); |
10252 | } |
10253 | |
10254 | return SDValue(); |
10255 | } |
10256 | |
10257 | static bool isZeroVector(SDValue N) { |
10258 | return (ISD::isBuildVectorAllZeros(N: N.getNode()) || |
10259 | (N->getOpcode() == ARMISD::VMOVIMM && |
10260 | isNullConstant(V: N->getOperand(Num: 0)))); |
10261 | } |
10262 | |
10263 | static SDValue LowerMLOAD(SDValue Op, SelectionDAG &DAG) { |
10264 | MaskedLoadSDNode *N = cast<MaskedLoadSDNode>(Val: Op.getNode()); |
10265 | MVT VT = Op.getSimpleValueType(); |
10266 | SDValue Mask = N->getMask(); |
10267 | SDValue PassThru = N->getPassThru(); |
10268 | SDLoc dl(Op); |
10269 | |
10270 | if (isZeroVector(N: PassThru)) |
10271 | return Op; |
10272 | |
10273 | // MVE Masked loads use zero as the passthru value. Here we convert undef to |
10274 | // zero too, and other values are lowered to a select. |
10275 | SDValue ZeroVec = DAG.getNode(Opcode: ARMISD::VMOVIMM, DL: dl, VT, |
10276 | Operand: DAG.getTargetConstant(Val: 0, DL: dl, VT: MVT::i32)); |
10277 | SDValue NewLoad = DAG.getMaskedLoad( |
10278 | VT, dl, Chain: N->getChain(), Base: N->getBasePtr(), Offset: N->getOffset(), Mask, Src0: ZeroVec, |
10279 | MemVT: N->getMemoryVT(), MMO: N->getMemOperand(), AM: N->getAddressingMode(), |
10280 | N->getExtensionType(), IsExpanding: N->isExpandingLoad()); |
10281 | SDValue Combo = NewLoad; |
10282 | bool PassThruIsCastZero = (PassThru.getOpcode() == ISD::BITCAST || |
10283 | PassThru.getOpcode() == ARMISD::VECTOR_REG_CAST) && |
10284 | isZeroVector(N: PassThru->getOperand(Num: 0)); |
10285 | if (!PassThru.isUndef() && !PassThruIsCastZero) |
10286 | Combo = DAG.getNode(Opcode: ISD::VSELECT, DL: dl, VT, N1: Mask, N2: NewLoad, N3: PassThru); |
10287 | return DAG.getMergeValues(Ops: {Combo, NewLoad.getValue(R: 1)}, dl); |
10288 | } |
10289 | |
10290 | static SDValue LowerVecReduce(SDValue Op, SelectionDAG &DAG, |
10291 | const ARMSubtarget *ST) { |
10292 | if (!ST->hasMVEIntegerOps()) |
10293 | return SDValue(); |
10294 | |
10295 | SDLoc dl(Op); |
10296 | unsigned BaseOpcode = 0; |
10297 | switch (Op->getOpcode()) { |
10298 | default: llvm_unreachable("Expected VECREDUCE opcode" ); |
10299 | case ISD::VECREDUCE_FADD: BaseOpcode = ISD::FADD; break; |
10300 | case ISD::VECREDUCE_FMUL: BaseOpcode = ISD::FMUL; break; |
10301 | case ISD::VECREDUCE_MUL: BaseOpcode = ISD::MUL; break; |
10302 | case ISD::VECREDUCE_AND: BaseOpcode = ISD::AND; break; |
10303 | case ISD::VECREDUCE_OR: BaseOpcode = ISD::OR; break; |
10304 | case ISD::VECREDUCE_XOR: BaseOpcode = ISD::XOR; break; |
10305 | case ISD::VECREDUCE_FMAX: BaseOpcode = ISD::FMAXNUM; break; |
10306 | case ISD::VECREDUCE_FMIN: BaseOpcode = ISD::FMINNUM; break; |
10307 | } |
10308 | |
10309 | SDValue Op0 = Op->getOperand(Num: 0); |
10310 | EVT VT = Op0.getValueType(); |
10311 | EVT EltVT = VT.getVectorElementType(); |
10312 | unsigned NumElts = VT.getVectorNumElements(); |
10313 | unsigned NumActiveLanes = NumElts; |
10314 | |
10315 | assert((NumActiveLanes == 16 || NumActiveLanes == 8 || NumActiveLanes == 4 || |
10316 | NumActiveLanes == 2) && |
10317 | "Only expected a power 2 vector size" ); |
10318 | |
10319 | // Use Mul(X, Rev(X)) until 4 items remain. Going down to 4 vector elements |
10320 | // allows us to easily extract vector elements from the lanes. |
10321 | while (NumActiveLanes > 4) { |
10322 | unsigned RevOpcode = NumActiveLanes == 16 ? ARMISD::VREV16 : ARMISD::VREV32; |
10323 | SDValue Rev = DAG.getNode(Opcode: RevOpcode, DL: dl, VT, Operand: Op0); |
10324 | Op0 = DAG.getNode(Opcode: BaseOpcode, DL: dl, VT, N1: Op0, N2: Rev); |
10325 | NumActiveLanes /= 2; |
10326 | } |
10327 | |
10328 | SDValue Res; |
10329 | if (NumActiveLanes == 4) { |
10330 | // The remaining 4 elements are summed sequentially |
10331 | SDValue Ext0 = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: EltVT, N1: Op0, |
10332 | N2: DAG.getConstant(Val: 0 * NumElts / 4, DL: dl, VT: MVT::i32)); |
10333 | SDValue Ext1 = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: EltVT, N1: Op0, |
10334 | N2: DAG.getConstant(Val: 1 * NumElts / 4, DL: dl, VT: MVT::i32)); |
10335 | SDValue Ext2 = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: EltVT, N1: Op0, |
10336 | N2: DAG.getConstant(Val: 2 * NumElts / 4, DL: dl, VT: MVT::i32)); |
10337 | SDValue Ext3 = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: EltVT, N1: Op0, |
10338 | N2: DAG.getConstant(Val: 3 * NumElts / 4, DL: dl, VT: MVT::i32)); |
10339 | SDValue Res0 = DAG.getNode(Opcode: BaseOpcode, DL: dl, VT: EltVT, N1: Ext0, N2: Ext1, Flags: Op->getFlags()); |
10340 | SDValue Res1 = DAG.getNode(Opcode: BaseOpcode, DL: dl, VT: EltVT, N1: Ext2, N2: Ext3, Flags: Op->getFlags()); |
10341 | Res = DAG.getNode(Opcode: BaseOpcode, DL: dl, VT: EltVT, N1: Res0, N2: Res1, Flags: Op->getFlags()); |
10342 | } else { |
10343 | SDValue Ext0 = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: EltVT, N1: Op0, |
10344 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
10345 | SDValue Ext1 = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: EltVT, N1: Op0, |
10346 | N2: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32)); |
10347 | Res = DAG.getNode(Opcode: BaseOpcode, DL: dl, VT: EltVT, N1: Ext0, N2: Ext1, Flags: Op->getFlags()); |
10348 | } |
10349 | |
10350 | // Result type may be wider than element type. |
10351 | if (EltVT != Op->getValueType(ResNo: 0)) |
10352 | Res = DAG.getNode(Opcode: ISD::ANY_EXTEND, DL: dl, VT: Op->getValueType(ResNo: 0), Operand: Res); |
10353 | return Res; |
10354 | } |
10355 | |
10356 | static SDValue LowerVecReduceF(SDValue Op, SelectionDAG &DAG, |
10357 | const ARMSubtarget *ST) { |
10358 | if (!ST->hasMVEFloatOps()) |
10359 | return SDValue(); |
10360 | return LowerVecReduce(Op, DAG, ST); |
10361 | } |
10362 | |
10363 | static SDValue LowerVecReduceMinMax(SDValue Op, SelectionDAG &DAG, |
10364 | const ARMSubtarget *ST) { |
10365 | if (!ST->hasNEON()) |
10366 | return SDValue(); |
10367 | |
10368 | SDLoc dl(Op); |
10369 | SDValue Op0 = Op->getOperand(Num: 0); |
10370 | EVT VT = Op0.getValueType(); |
10371 | EVT EltVT = VT.getVectorElementType(); |
10372 | |
10373 | unsigned PairwiseIntrinsic = 0; |
10374 | switch (Op->getOpcode()) { |
10375 | default: |
10376 | llvm_unreachable("Expected VECREDUCE opcode" ); |
10377 | case ISD::VECREDUCE_UMIN: |
10378 | PairwiseIntrinsic = Intrinsic::arm_neon_vpminu; |
10379 | break; |
10380 | case ISD::VECREDUCE_UMAX: |
10381 | PairwiseIntrinsic = Intrinsic::arm_neon_vpmaxu; |
10382 | break; |
10383 | case ISD::VECREDUCE_SMIN: |
10384 | PairwiseIntrinsic = Intrinsic::arm_neon_vpmins; |
10385 | break; |
10386 | case ISD::VECREDUCE_SMAX: |
10387 | PairwiseIntrinsic = Intrinsic::arm_neon_vpmaxs; |
10388 | break; |
10389 | } |
10390 | SDValue PairwiseOp = DAG.getConstant(Val: PairwiseIntrinsic, DL: dl, VT: MVT::i32); |
10391 | |
10392 | unsigned NumElts = VT.getVectorNumElements(); |
10393 | unsigned NumActiveLanes = NumElts; |
10394 | |
10395 | assert((NumActiveLanes == 16 || NumActiveLanes == 8 || NumActiveLanes == 4 || |
10396 | NumActiveLanes == 2) && |
10397 | "Only expected a power 2 vector size" ); |
10398 | |
10399 | // Split 128-bit vectors, since vpmin/max takes 2 64-bit vectors. |
10400 | if (VT.is128BitVector()) { |
10401 | SDValue Lo, Hi; |
10402 | std::tie(args&: Lo, args&: Hi) = DAG.SplitVector(N: Op0, DL: dl); |
10403 | VT = Lo.getValueType(); |
10404 | Op0 = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT, Ops: {PairwiseOp, Lo, Hi}); |
10405 | NumActiveLanes /= 2; |
10406 | } |
10407 | |
10408 | // Use pairwise reductions until one lane remains |
10409 | while (NumActiveLanes > 1) { |
10410 | Op0 = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT, Ops: {PairwiseOp, Op0, Op0}); |
10411 | NumActiveLanes /= 2; |
10412 | } |
10413 | |
10414 | SDValue Res = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: EltVT, N1: Op0, |
10415 | N2: DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32)); |
10416 | |
10417 | // Result type may be wider than element type. |
10418 | if (EltVT != Op.getValueType()) { |
10419 | unsigned Extend = 0; |
10420 | switch (Op->getOpcode()) { |
10421 | default: |
10422 | llvm_unreachable("Expected VECREDUCE opcode" ); |
10423 | case ISD::VECREDUCE_UMIN: |
10424 | case ISD::VECREDUCE_UMAX: |
10425 | Extend = ISD::ZERO_EXTEND; |
10426 | break; |
10427 | case ISD::VECREDUCE_SMIN: |
10428 | case ISD::VECREDUCE_SMAX: |
10429 | Extend = ISD::SIGN_EXTEND; |
10430 | break; |
10431 | } |
10432 | Res = DAG.getNode(Opcode: Extend, DL: dl, VT: Op.getValueType(), Operand: Res); |
10433 | } |
10434 | return Res; |
10435 | } |
10436 | |
10437 | static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) { |
10438 | if (isStrongerThanMonotonic(AO: cast<AtomicSDNode>(Val&: Op)->getSuccessOrdering())) |
10439 | // Acquire/Release load/store is not legal for targets without a dmb or |
10440 | // equivalent available. |
10441 | return SDValue(); |
10442 | |
10443 | // Monotonic load/store is legal for all targets. |
10444 | return Op; |
10445 | } |
10446 | |
10447 | static void ReplaceREADCYCLECOUNTER(SDNode *N, |
10448 | SmallVectorImpl<SDValue> &Results, |
10449 | SelectionDAG &DAG, |
10450 | const ARMSubtarget *Subtarget) { |
10451 | SDLoc DL(N); |
10452 | // Under Power Management extensions, the cycle-count is: |
10453 | // mrc p15, #0, <Rt>, c9, c13, #0 |
10454 | SDValue Ops[] = { N->getOperand(Num: 0), // Chain |
10455 | DAG.getTargetConstant(Val: Intrinsic::arm_mrc, DL, VT: MVT::i32), |
10456 | DAG.getTargetConstant(Val: 15, DL, VT: MVT::i32), |
10457 | DAG.getTargetConstant(Val: 0, DL, VT: MVT::i32), |
10458 | DAG.getTargetConstant(Val: 9, DL, VT: MVT::i32), |
10459 | DAG.getTargetConstant(Val: 13, DL, VT: MVT::i32), |
10460 | DAG.getTargetConstant(Val: 0, DL, VT: MVT::i32) |
10461 | }; |
10462 | |
10463 | SDValue Cycles32 = DAG.getNode(Opcode: ISD::INTRINSIC_W_CHAIN, DL, |
10464 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::Other), Ops); |
10465 | Results.push_back(Elt: DAG.getNode(Opcode: ISD::BUILD_PAIR, DL, VT: MVT::i64, N1: Cycles32, |
10466 | N2: DAG.getConstant(Val: 0, DL, VT: MVT::i32))); |
10467 | Results.push_back(Elt: Cycles32.getValue(R: 1)); |
10468 | } |
10469 | |
10470 | static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V) { |
10471 | SDLoc dl(V.getNode()); |
10472 | auto [VLo, VHi] = DAG.SplitScalar(N: V, DL: dl, LoVT: MVT::i32, HiVT: MVT::i32); |
10473 | bool isBigEndian = DAG.getDataLayout().isBigEndian(); |
10474 | if (isBigEndian) |
10475 | std::swap (a&: VLo, b&: VHi); |
10476 | SDValue RegClass = |
10477 | DAG.getTargetConstant(Val: ARM::GPRPairRegClassID, DL: dl, VT: MVT::i32); |
10478 | SDValue SubReg0 = DAG.getTargetConstant(Val: ARM::gsub_0, DL: dl, VT: MVT::i32); |
10479 | SDValue SubReg1 = DAG.getTargetConstant(Val: ARM::gsub_1, DL: dl, VT: MVT::i32); |
10480 | const SDValue Ops[] = { RegClass, VLo, SubReg0, VHi, SubReg1 }; |
10481 | return SDValue( |
10482 | DAG.getMachineNode(Opcode: TargetOpcode::REG_SEQUENCE, dl, VT: MVT::Untyped, Ops), 0); |
10483 | } |
10484 | |
10485 | static void ReplaceCMP_SWAP_64Results(SDNode *N, |
10486 | SmallVectorImpl<SDValue> & Results, |
10487 | SelectionDAG &DAG) { |
10488 | assert(N->getValueType(0) == MVT::i64 && |
10489 | "AtomicCmpSwap on types less than 64 should be legal" ); |
10490 | SDValue Ops[] = {N->getOperand(Num: 1), |
10491 | createGPRPairNode(DAG, V: N->getOperand(Num: 2)), |
10492 | createGPRPairNode(DAG, V: N->getOperand(Num: 3)), |
10493 | N->getOperand(Num: 0)}; |
10494 | SDNode *CmpSwap = DAG.getMachineNode( |
10495 | Opcode: ARM::CMP_SWAP_64, dl: SDLoc(N), |
10496 | VTs: DAG.getVTList(VT1: MVT::Untyped, VT2: MVT::i32, VT3: MVT::Other), Ops); |
10497 | |
10498 | MachineMemOperand *MemOp = cast<MemSDNode>(Val: N)->getMemOperand(); |
10499 | DAG.setNodeMemRefs(N: cast<MachineSDNode>(Val: CmpSwap), NewMemRefs: {MemOp}); |
10500 | |
10501 | bool isBigEndian = DAG.getDataLayout().isBigEndian(); |
10502 | |
10503 | SDValue Lo = |
10504 | DAG.getTargetExtractSubreg(SRIdx: isBigEndian ? ARM::gsub_1 : ARM::gsub_0, |
10505 | DL: SDLoc(N), VT: MVT::i32, Operand: SDValue(CmpSwap, 0)); |
10506 | SDValue Hi = |
10507 | DAG.getTargetExtractSubreg(SRIdx: isBigEndian ? ARM::gsub_0 : ARM::gsub_1, |
10508 | DL: SDLoc(N), VT: MVT::i32, Operand: SDValue(CmpSwap, 0)); |
10509 | Results.push_back(Elt: DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: SDLoc(N), VT: MVT::i64, N1: Lo, N2: Hi)); |
10510 | Results.push_back(Elt: SDValue(CmpSwap, 2)); |
10511 | } |
10512 | |
10513 | SDValue ARMTargetLowering::LowerFSETCC(SDValue Op, SelectionDAG &DAG) const { |
10514 | SDLoc dl(Op); |
10515 | EVT VT = Op.getValueType(); |
10516 | SDValue Chain = Op.getOperand(i: 0); |
10517 | SDValue LHS = Op.getOperand(i: 1); |
10518 | SDValue RHS = Op.getOperand(i: 2); |
10519 | ISD::CondCode CC = cast<CondCodeSDNode>(Val: Op.getOperand(i: 3))->get(); |
10520 | bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS; |
10521 | |
10522 | // If we don't have instructions of this float type then soften to a libcall |
10523 | // and use SETCC instead. |
10524 | if (isUnsupportedFloatingType(VT: LHS.getValueType())) { |
10525 | DAG.getTargetLoweringInfo().softenSetCCOperands( |
10526 | DAG, VT: LHS.getValueType(), NewLHS&: LHS, NewRHS&: RHS, CCCode&: CC, DL: dl, OldLHS: LHS, OldRHS: RHS, Chain, IsSignaling); |
10527 | if (!RHS.getNode()) { |
10528 | RHS = DAG.getConstant(Val: 0, DL: dl, VT: LHS.getValueType()); |
10529 | CC = ISD::SETNE; |
10530 | } |
10531 | SDValue Result = DAG.getNode(Opcode: ISD::SETCC, DL: dl, VT, N1: LHS, N2: RHS, |
10532 | N3: DAG.getCondCode(Cond: CC)); |
10533 | return DAG.getMergeValues(Ops: {Result, Chain}, dl); |
10534 | } |
10535 | |
10536 | ARMCC::CondCodes CondCode, CondCode2; |
10537 | FPCCToARMCC(CC, CondCode, CondCode2); |
10538 | |
10539 | // FIXME: Chain is not handled correctly here. Currently the FPSCR is implicit |
10540 | // in CMPFP and CMPFPE, but instead it should be made explicit by these |
10541 | // instructions using a chain instead of glue. This would also fix the problem |
10542 | // here (and also in LowerSELECT_CC) where we generate two comparisons when |
10543 | // CondCode2 != AL. |
10544 | SDValue True = DAG.getConstant(Val: 1, DL: dl, VT); |
10545 | SDValue False = DAG.getConstant(Val: 0, DL: dl, VT); |
10546 | SDValue ARMcc = DAG.getConstant(Val: CondCode, DL: dl, VT: MVT::i32); |
10547 | SDValue CCR = DAG.getRegister(Reg: ARM::CPSR, VT: MVT::i32); |
10548 | SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl, Signaling: IsSignaling); |
10549 | SDValue Result = getCMOV(dl, VT, FalseVal: False, TrueVal: True, ARMcc, CCR, Cmp, DAG); |
10550 | if (CondCode2 != ARMCC::AL) { |
10551 | ARMcc = DAG.getConstant(Val: CondCode2, DL: dl, VT: MVT::i32); |
10552 | Cmp = getVFPCmp(LHS, RHS, DAG, dl, Signaling: IsSignaling); |
10553 | Result = getCMOV(dl, VT, FalseVal: Result, TrueVal: True, ARMcc, CCR, Cmp, DAG); |
10554 | } |
10555 | return DAG.getMergeValues(Ops: {Result, Chain}, dl); |
10556 | } |
10557 | |
10558 | SDValue ARMTargetLowering::LowerSPONENTRY(SDValue Op, SelectionDAG &DAG) const { |
10559 | MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); |
10560 | |
10561 | EVT VT = getPointerTy(DL: DAG.getDataLayout()); |
10562 | SDLoc DL(Op); |
10563 | int FI = MFI.CreateFixedObject(Size: 4, SPOffset: 0, IsImmutable: false); |
10564 | return DAG.getFrameIndex(FI, VT); |
10565 | } |
10566 | |
10567 | SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { |
10568 | LLVM_DEBUG(dbgs() << "Lowering node: " ; Op.dump()); |
10569 | switch (Op.getOpcode()) { |
10570 | default: llvm_unreachable("Don't know how to custom lower this!" ); |
10571 | case ISD::WRITE_REGISTER: return LowerWRITE_REGISTER(Op, DAG); |
10572 | case ISD::ConstantPool: return LowerConstantPool(Op, DAG); |
10573 | case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); |
10574 | case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); |
10575 | case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); |
10576 | case ISD::SELECT: return LowerSELECT(Op, DAG); |
10577 | case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); |
10578 | case ISD::BRCOND: return LowerBRCOND(Op, DAG); |
10579 | case ISD::BR_CC: return LowerBR_CC(Op, DAG); |
10580 | case ISD::BR_JT: return LowerBR_JT(Op, DAG); |
10581 | case ISD::VASTART: return LowerVASTART(Op, DAG); |
10582 | case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG, Subtarget); |
10583 | case ISD::PREFETCH: return LowerPREFETCH(Op, DAG, Subtarget); |
10584 | case ISD::SINT_TO_FP: |
10585 | case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG); |
10586 | case ISD::STRICT_FP_TO_SINT: |
10587 | case ISD::STRICT_FP_TO_UINT: |
10588 | case ISD::FP_TO_SINT: |
10589 | case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); |
10590 | case ISD::FP_TO_SINT_SAT: |
10591 | case ISD::FP_TO_UINT_SAT: return LowerFP_TO_INT_SAT(Op, DAG, Subtarget); |
10592 | case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); |
10593 | case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); |
10594 | case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); |
10595 | case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG); |
10596 | case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG); |
10597 | case ISD::EH_SJLJ_SETUP_DISPATCH: return LowerEH_SJLJ_SETUP_DISPATCH(Op, DAG); |
10598 | case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG, Subtarget); |
10599 | case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG, |
10600 | Subtarget); |
10601 | case ISD::BITCAST: return ExpandBITCAST(N: Op.getNode(), DAG, Subtarget); |
10602 | case ISD::SHL: |
10603 | case ISD::SRL: |
10604 | case ISD::SRA: return LowerShift(N: Op.getNode(), DAG, ST: Subtarget); |
10605 | case ISD::SREM: return LowerREM(N: Op.getNode(), DAG); |
10606 | case ISD::UREM: return LowerREM(N: Op.getNode(), DAG); |
10607 | case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG); |
10608 | case ISD::SRL_PARTS: |
10609 | case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG); |
10610 | case ISD::CTTZ: |
10611 | case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(N: Op.getNode(), DAG, ST: Subtarget); |
10612 | case ISD::CTPOP: return LowerCTPOP(N: Op.getNode(), DAG, ST: Subtarget); |
10613 | case ISD::SETCC: return LowerVSETCC(Op, DAG, ST: Subtarget); |
10614 | case ISD::SETCCCARRY: return LowerSETCCCARRY(Op, DAG); |
10615 | case ISD::ConstantFP: return LowerConstantFP(Op, DAG, ST: Subtarget); |
10616 | case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG, ST: Subtarget); |
10617 | case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG, ST: Subtarget); |
10618 | case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG, ST: Subtarget); |
10619 | case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); |
10620 | case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG, ST: Subtarget); |
10621 | case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG, ST: Subtarget); |
10622 | case ISD::TRUNCATE: return LowerTruncate(N: Op.getNode(), DAG, Subtarget); |
10623 | case ISD::SIGN_EXTEND: |
10624 | case ISD::ZERO_EXTEND: return LowerVectorExtend(N: Op.getNode(), DAG, Subtarget); |
10625 | case ISD::GET_ROUNDING: return LowerGET_ROUNDING(Op, DAG); |
10626 | case ISD::SET_ROUNDING: return LowerSET_ROUNDING(Op, DAG); |
10627 | case ISD::SET_FPMODE: |
10628 | return LowerSET_FPMODE(Op, DAG); |
10629 | case ISD::RESET_FPMODE: |
10630 | return LowerRESET_FPMODE(Op, DAG); |
10631 | case ISD::MUL: return LowerMUL(Op, DAG); |
10632 | case ISD::SDIV: |
10633 | if (Subtarget->isTargetWindows() && !Op.getValueType().isVector()) |
10634 | return LowerDIV_Windows(Op, DAG, /* Signed */ true); |
10635 | return LowerSDIV(Op, DAG, ST: Subtarget); |
10636 | case ISD::UDIV: |
10637 | if (Subtarget->isTargetWindows() && !Op.getValueType().isVector()) |
10638 | return LowerDIV_Windows(Op, DAG, /* Signed */ false); |
10639 | return LowerUDIV(Op, DAG, ST: Subtarget); |
10640 | case ISD::UADDO_CARRY: |
10641 | case ISD::USUBO_CARRY: |
10642 | return LowerUADDSUBO_CARRY(Op, DAG); |
10643 | case ISD::SADDO: |
10644 | case ISD::SSUBO: |
10645 | return LowerSignedALUO(Op, DAG); |
10646 | case ISD::UADDO: |
10647 | case ISD::USUBO: |
10648 | return LowerUnsignedALUO(Op, DAG); |
10649 | case ISD::SADDSAT: |
10650 | case ISD::SSUBSAT: |
10651 | case ISD::UADDSAT: |
10652 | case ISD::USUBSAT: |
10653 | return LowerADDSUBSAT(Op, DAG, Subtarget); |
10654 | case ISD::LOAD: |
10655 | return LowerPredicateLoad(Op, DAG); |
10656 | case ISD::STORE: |
10657 | return LowerSTORE(Op, DAG, Subtarget); |
10658 | case ISD::MLOAD: |
10659 | return LowerMLOAD(Op, DAG); |
10660 | case ISD::VECREDUCE_MUL: |
10661 | case ISD::VECREDUCE_AND: |
10662 | case ISD::VECREDUCE_OR: |
10663 | case ISD::VECREDUCE_XOR: |
10664 | return LowerVecReduce(Op, DAG, ST: Subtarget); |
10665 | case ISD::VECREDUCE_FADD: |
10666 | case ISD::VECREDUCE_FMUL: |
10667 | case ISD::VECREDUCE_FMIN: |
10668 | case ISD::VECREDUCE_FMAX: |
10669 | return LowerVecReduceF(Op, DAG, ST: Subtarget); |
10670 | case ISD::VECREDUCE_UMIN: |
10671 | case ISD::VECREDUCE_UMAX: |
10672 | case ISD::VECREDUCE_SMIN: |
10673 | case ISD::VECREDUCE_SMAX: |
10674 | return LowerVecReduceMinMax(Op, DAG, ST: Subtarget); |
10675 | case ISD::ATOMIC_LOAD: |
10676 | case ISD::ATOMIC_STORE: return LowerAtomicLoadStore(Op, DAG); |
10677 | case ISD::FSINCOS: return LowerFSINCOS(Op, DAG); |
10678 | case ISD::SDIVREM: |
10679 | case ISD::UDIVREM: return LowerDivRem(Op, DAG); |
10680 | case ISD::DYNAMIC_STACKALLOC: |
10681 | if (Subtarget->isTargetWindows()) |
10682 | return LowerDYNAMIC_STACKALLOC(Op, DAG); |
10683 | llvm_unreachable("Don't know how to custom lower this!" ); |
10684 | case ISD::STRICT_FP_ROUND: |
10685 | case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG); |
10686 | case ISD::STRICT_FP_EXTEND: |
10687 | case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG); |
10688 | case ISD::STRICT_FSETCC: |
10689 | case ISD::STRICT_FSETCCS: return LowerFSETCC(Op, DAG); |
10690 | case ISD::SPONENTRY: |
10691 | return LowerSPONENTRY(Op, DAG); |
10692 | case ARMISD::WIN__DBZCHK: return SDValue(); |
10693 | } |
10694 | } |
10695 | |
10696 | static void ReplaceLongIntrinsic(SDNode *N, SmallVectorImpl<SDValue> &Results, |
10697 | SelectionDAG &DAG) { |
10698 | unsigned IntNo = N->getConstantOperandVal(Num: 0); |
10699 | unsigned Opc = 0; |
10700 | if (IntNo == Intrinsic::arm_smlald) |
10701 | Opc = ARMISD::SMLALD; |
10702 | else if (IntNo == Intrinsic::arm_smlaldx) |
10703 | Opc = ARMISD::SMLALDX; |
10704 | else if (IntNo == Intrinsic::arm_smlsld) |
10705 | Opc = ARMISD::SMLSLD; |
10706 | else if (IntNo == Intrinsic::arm_smlsldx) |
10707 | Opc = ARMISD::SMLSLDX; |
10708 | else |
10709 | return; |
10710 | |
10711 | SDLoc dl(N); |
10712 | SDValue Lo, Hi; |
10713 | std::tie(args&: Lo, args&: Hi) = DAG.SplitScalar(N: N->getOperand(Num: 3), DL: dl, LoVT: MVT::i32, HiVT: MVT::i32); |
10714 | |
10715 | SDValue LongMul = DAG.getNode(Opcode: Opc, DL: dl, |
10716 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), |
10717 | N1: N->getOperand(Num: 1), N2: N->getOperand(Num: 2), |
10718 | N3: Lo, N4: Hi); |
10719 | Results.push_back(Elt: DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, |
10720 | N1: LongMul.getValue(R: 0), N2: LongMul.getValue(R: 1))); |
10721 | } |
10722 | |
10723 | /// ReplaceNodeResults - Replace the results of node with an illegal result |
10724 | /// type with new values built out of custom code. |
10725 | void ARMTargetLowering::ReplaceNodeResults(SDNode *N, |
10726 | SmallVectorImpl<SDValue> &Results, |
10727 | SelectionDAG &DAG) const { |
10728 | SDValue Res; |
10729 | switch (N->getOpcode()) { |
10730 | default: |
10731 | llvm_unreachable("Don't know how to custom expand this!" ); |
10732 | case ISD::READ_REGISTER: |
10733 | ExpandREAD_REGISTER(N, Results, DAG); |
10734 | break; |
10735 | case ISD::BITCAST: |
10736 | Res = ExpandBITCAST(N, DAG, Subtarget); |
10737 | break; |
10738 | case ISD::SRL: |
10739 | case ISD::SRA: |
10740 | case ISD::SHL: |
10741 | Res = Expand64BitShift(N, DAG, ST: Subtarget); |
10742 | break; |
10743 | case ISD::SREM: |
10744 | case ISD::UREM: |
10745 | Res = LowerREM(N, DAG); |
10746 | break; |
10747 | case ISD::SDIVREM: |
10748 | case ISD::UDIVREM: |
10749 | Res = LowerDivRem(Op: SDValue(N, 0), DAG); |
10750 | assert(Res.getNumOperands() == 2 && "DivRem needs two values" ); |
10751 | Results.push_back(Elt: Res.getValue(R: 0)); |
10752 | Results.push_back(Elt: Res.getValue(R: 1)); |
10753 | return; |
10754 | case ISD::SADDSAT: |
10755 | case ISD::SSUBSAT: |
10756 | case ISD::UADDSAT: |
10757 | case ISD::USUBSAT: |
10758 | Res = LowerADDSUBSAT(Op: SDValue(N, 0), DAG, Subtarget); |
10759 | break; |
10760 | case ISD::READCYCLECOUNTER: |
10761 | ReplaceREADCYCLECOUNTER(N, Results, DAG, Subtarget); |
10762 | return; |
10763 | case ISD::UDIV: |
10764 | case ISD::SDIV: |
10765 | assert(Subtarget->isTargetWindows() && "can only expand DIV on Windows" ); |
10766 | return ExpandDIV_Windows(Op: SDValue(N, 0), DAG, Signed: N->getOpcode() == ISD::SDIV, |
10767 | Results); |
10768 | case ISD::ATOMIC_CMP_SWAP: |
10769 | ReplaceCMP_SWAP_64Results(N, Results, DAG); |
10770 | return; |
10771 | case ISD::INTRINSIC_WO_CHAIN: |
10772 | return ReplaceLongIntrinsic(N, Results, DAG); |
10773 | case ISD::LOAD: |
10774 | LowerLOAD(N, Results, DAG); |
10775 | break; |
10776 | case ISD::TRUNCATE: |
10777 | Res = LowerTruncate(N, DAG, Subtarget); |
10778 | break; |
10779 | case ISD::SIGN_EXTEND: |
10780 | case ISD::ZERO_EXTEND: |
10781 | Res = LowerVectorExtend(N, DAG, Subtarget); |
10782 | break; |
10783 | case ISD::FP_TO_SINT_SAT: |
10784 | case ISD::FP_TO_UINT_SAT: |
10785 | Res = LowerFP_TO_INT_SAT(Op: SDValue(N, 0), DAG, Subtarget); |
10786 | break; |
10787 | } |
10788 | if (Res.getNode()) |
10789 | Results.push_back(Elt: Res); |
10790 | } |
10791 | |
10792 | //===----------------------------------------------------------------------===// |
10793 | // ARM Scheduler Hooks |
10794 | //===----------------------------------------------------------------------===// |
10795 | |
10796 | /// SetupEntryBlockForSjLj - Insert code into the entry block that creates and |
10797 | /// registers the function context. |
10798 | void ARMTargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI, |
10799 | MachineBasicBlock *MBB, |
10800 | MachineBasicBlock *DispatchBB, |
10801 | int FI) const { |
10802 | assert(!Subtarget->isROPI() && !Subtarget->isRWPI() && |
10803 | "ROPI/RWPI not currently supported with SjLj" ); |
10804 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
10805 | DebugLoc dl = MI.getDebugLoc(); |
10806 | MachineFunction *MF = MBB->getParent(); |
10807 | MachineRegisterInfo *MRI = &MF->getRegInfo(); |
10808 | MachineConstantPool *MCP = MF->getConstantPool(); |
10809 | ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>(); |
10810 | const Function &F = MF->getFunction(); |
10811 | |
10812 | bool isThumb = Subtarget->isThumb(); |
10813 | bool isThumb2 = Subtarget->isThumb2(); |
10814 | |
10815 | unsigned PCLabelId = AFI->createPICLabelUId(); |
10816 | unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8; |
10817 | ARMConstantPoolValue *CPV = |
10818 | ARMConstantPoolMBB::Create(C&: F.getContext(), mbb: DispatchBB, ID: PCLabelId, PCAdj); |
10819 | unsigned CPI = MCP->getConstantPoolIndex(V: CPV, Alignment: Align(4)); |
10820 | |
10821 | const TargetRegisterClass *TRC = isThumb ? &ARM::tGPRRegClass |
10822 | : &ARM::GPRRegClass; |
10823 | |
10824 | // Grab constant pool and fixed stack memory operands. |
10825 | MachineMemOperand *CPMMO = |
10826 | MF->getMachineMemOperand(PtrInfo: MachinePointerInfo::getConstantPool(MF&: *MF), |
10827 | F: MachineMemOperand::MOLoad, Size: 4, BaseAlignment: Align(4)); |
10828 | |
10829 | MachineMemOperand *FIMMOSt = |
10830 | MF->getMachineMemOperand(PtrInfo: MachinePointerInfo::getFixedStack(MF&: *MF, FI), |
10831 | F: MachineMemOperand::MOStore, Size: 4, BaseAlignment: Align(4)); |
10832 | |
10833 | // Load the address of the dispatch MBB into the jump buffer. |
10834 | if (isThumb2) { |
10835 | // Incoming value: jbuf |
10836 | // ldr.n r5, LCPI1_1 |
10837 | // orr r5, r5, #1 |
10838 | // add r5, pc |
10839 | // str r5, [$jbuf, #+4] ; &jbuf[1] |
10840 | Register NewVReg1 = MRI->createVirtualRegister(RegClass: TRC); |
10841 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::t2LDRpci), DestReg: NewVReg1) |
10842 | .addConstantPoolIndex(Idx: CPI) |
10843 | .addMemOperand(MMO: CPMMO) |
10844 | .add(MOs: predOps(Pred: ARMCC::AL)); |
10845 | // Set the low bit because of thumb mode. |
10846 | Register NewVReg2 = MRI->createVirtualRegister(RegClass: TRC); |
10847 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::t2ORRri), DestReg: NewVReg2) |
10848 | .addReg(RegNo: NewVReg1, flags: RegState::Kill) |
10849 | .addImm(Val: 0x01) |
10850 | .add(MOs: predOps(Pred: ARMCC::AL)) |
10851 | .add(MO: condCodeOp()); |
10852 | Register NewVReg3 = MRI->createVirtualRegister(RegClass: TRC); |
10853 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::tPICADD), DestReg: NewVReg3) |
10854 | .addReg(RegNo: NewVReg2, flags: RegState::Kill) |
10855 | .addImm(Val: PCLabelId); |
10856 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::t2STRi12)) |
10857 | .addReg(RegNo: NewVReg3, flags: RegState::Kill) |
10858 | .addFrameIndex(Idx: FI) |
10859 | .addImm(Val: 36) // &jbuf[1] :: pc |
10860 | .addMemOperand(MMO: FIMMOSt) |
10861 | .add(MOs: predOps(Pred: ARMCC::AL)); |
10862 | } else if (isThumb) { |
10863 | // Incoming value: jbuf |
10864 | // ldr.n r1, LCPI1_4 |
10865 | // add r1, pc |
10866 | // mov r2, #1 |
10867 | // orrs r1, r2 |
10868 | // add r2, $jbuf, #+4 ; &jbuf[1] |
10869 | // str r1, [r2] |
10870 | Register NewVReg1 = MRI->createVirtualRegister(RegClass: TRC); |
10871 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::tLDRpci), DestReg: NewVReg1) |
10872 | .addConstantPoolIndex(Idx: CPI) |
10873 | .addMemOperand(MMO: CPMMO) |
10874 | .add(MOs: predOps(Pred: ARMCC::AL)); |
10875 | Register NewVReg2 = MRI->createVirtualRegister(RegClass: TRC); |
10876 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::tPICADD), DestReg: NewVReg2) |
10877 | .addReg(RegNo: NewVReg1, flags: RegState::Kill) |
10878 | .addImm(Val: PCLabelId); |
10879 | // Set the low bit because of thumb mode. |
10880 | Register NewVReg3 = MRI->createVirtualRegister(RegClass: TRC); |
10881 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::tMOVi8), DestReg: NewVReg3) |
10882 | .addReg(RegNo: ARM::CPSR, flags: RegState::Define) |
10883 | .addImm(Val: 1) |
10884 | .add(MOs: predOps(Pred: ARMCC::AL)); |
10885 | Register NewVReg4 = MRI->createVirtualRegister(RegClass: TRC); |
10886 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::tORR), DestReg: NewVReg4) |
10887 | .addReg(RegNo: ARM::CPSR, flags: RegState::Define) |
10888 | .addReg(RegNo: NewVReg2, flags: RegState::Kill) |
10889 | .addReg(RegNo: NewVReg3, flags: RegState::Kill) |
10890 | .add(MOs: predOps(Pred: ARMCC::AL)); |
10891 | Register NewVReg5 = MRI->createVirtualRegister(RegClass: TRC); |
10892 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::tADDframe), DestReg: NewVReg5) |
10893 | .addFrameIndex(Idx: FI) |
10894 | .addImm(Val: 36); // &jbuf[1] :: pc |
10895 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::tSTRi)) |
10896 | .addReg(RegNo: NewVReg4, flags: RegState::Kill) |
10897 | .addReg(RegNo: NewVReg5, flags: RegState::Kill) |
10898 | .addImm(Val: 0) |
10899 | .addMemOperand(MMO: FIMMOSt) |
10900 | .add(MOs: predOps(Pred: ARMCC::AL)); |
10901 | } else { |
10902 | // Incoming value: jbuf |
10903 | // ldr r1, LCPI1_1 |
10904 | // add r1, pc, r1 |
10905 | // str r1, [$jbuf, #+4] ; &jbuf[1] |
10906 | Register NewVReg1 = MRI->createVirtualRegister(RegClass: TRC); |
10907 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::LDRi12), DestReg: NewVReg1) |
10908 | .addConstantPoolIndex(Idx: CPI) |
10909 | .addImm(Val: 0) |
10910 | .addMemOperand(MMO: CPMMO) |
10911 | .add(MOs: predOps(Pred: ARMCC::AL)); |
10912 | Register NewVReg2 = MRI->createVirtualRegister(RegClass: TRC); |
10913 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::PICADD), DestReg: NewVReg2) |
10914 | .addReg(RegNo: NewVReg1, flags: RegState::Kill) |
10915 | .addImm(Val: PCLabelId) |
10916 | .add(MOs: predOps(Pred: ARMCC::AL)); |
10917 | BuildMI(BB&: *MBB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::STRi12)) |
10918 | .addReg(RegNo: NewVReg2, flags: RegState::Kill) |
10919 | .addFrameIndex(Idx: FI) |
10920 | .addImm(Val: 36) // &jbuf[1] :: pc |
10921 | .addMemOperand(MMO: FIMMOSt) |
10922 | .add(MOs: predOps(Pred: ARMCC::AL)); |
10923 | } |
10924 | } |
10925 | |
10926 | void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI, |
10927 | MachineBasicBlock *MBB) const { |
10928 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
10929 | DebugLoc dl = MI.getDebugLoc(); |
10930 | MachineFunction *MF = MBB->getParent(); |
10931 | MachineRegisterInfo *MRI = &MF->getRegInfo(); |
10932 | MachineFrameInfo &MFI = MF->getFrameInfo(); |
10933 | int FI = MFI.getFunctionContextIndex(); |
10934 | |
10935 | const TargetRegisterClass *TRC = Subtarget->isThumb() ? &ARM::tGPRRegClass |
10936 | : &ARM::GPRnopcRegClass; |
10937 | |
10938 | // Get a mapping of the call site numbers to all of the landing pads they're |
10939 | // associated with. |
10940 | DenseMap<unsigned, SmallVector<MachineBasicBlock*, 2>> CallSiteNumToLPad; |
10941 | unsigned MaxCSNum = 0; |
10942 | for (MachineBasicBlock &BB : *MF) { |
10943 | if (!BB.isEHPad()) |
10944 | continue; |
10945 | |
10946 | // FIXME: We should assert that the EH_LABEL is the first MI in the landing |
10947 | // pad. |
10948 | for (MachineInstr &II : BB) { |
10949 | if (!II.isEHLabel()) |
10950 | continue; |
10951 | |
10952 | MCSymbol *Sym = II.getOperand(i: 0).getMCSymbol(); |
10953 | if (!MF->hasCallSiteLandingPad(Sym)) continue; |
10954 | |
10955 | SmallVectorImpl<unsigned> &CallSiteIdxs = MF->getCallSiteLandingPad(Sym); |
10956 | for (unsigned Idx : CallSiteIdxs) { |
10957 | CallSiteNumToLPad[Idx].push_back(Elt: &BB); |
10958 | MaxCSNum = std::max(a: MaxCSNum, b: Idx); |
10959 | } |
10960 | break; |
10961 | } |
10962 | } |
10963 | |
10964 | // Get an ordered list of the machine basic blocks for the jump table. |
10965 | std::vector<MachineBasicBlock*> LPadList; |
10966 | SmallPtrSet<MachineBasicBlock*, 32> InvokeBBs; |
10967 | LPadList.reserve(n: CallSiteNumToLPad.size()); |
10968 | for (unsigned I = 1; I <= MaxCSNum; ++I) { |
10969 | SmallVectorImpl<MachineBasicBlock*> &MBBList = CallSiteNumToLPad[I]; |
10970 | for (MachineBasicBlock *MBB : MBBList) { |
10971 | LPadList.push_back(x: MBB); |
10972 | InvokeBBs.insert(I: MBB->pred_begin(), E: MBB->pred_end()); |
10973 | } |
10974 | } |
10975 | |
10976 | assert(!LPadList.empty() && |
10977 | "No landing pad destinations for the dispatch jump table!" ); |
10978 | |
10979 | // Create the jump table and associated information. |
10980 | MachineJumpTableInfo *JTI = |
10981 | MF->getOrCreateJumpTableInfo(JTEntryKind: MachineJumpTableInfo::EK_Inline); |
10982 | unsigned MJTI = JTI->createJumpTableIndex(DestBBs: LPadList); |
10983 | |
10984 | // Create the MBBs for the dispatch code. |
10985 | |
10986 | // Shove the dispatch's address into the return slot in the function context. |
10987 | MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock(); |
10988 | DispatchBB->setIsEHPad(); |
10989 | |
10990 | MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); |
10991 | unsigned trap_opcode; |
10992 | if (Subtarget->isThumb()) |
10993 | trap_opcode = ARM::tTRAP; |
10994 | else |
10995 | trap_opcode = Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP; |
10996 | |
10997 | BuildMI(BB: TrapBB, MIMD: dl, MCID: TII->get(Opcode: trap_opcode)); |
10998 | DispatchBB->addSuccessor(Succ: TrapBB); |
10999 | |
11000 | MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock(); |
11001 | DispatchBB->addSuccessor(Succ: DispContBB); |
11002 | |
11003 | // Insert and MBBs. |
11004 | MF->insert(MBBI: MF->end(), MBB: DispatchBB); |
11005 | MF->insert(MBBI: MF->end(), MBB: DispContBB); |
11006 | MF->insert(MBBI: MF->end(), MBB: TrapBB); |
11007 | |
11008 | // Insert code into the entry block that creates and registers the function |
11009 | // context. |
11010 | SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI); |
11011 | |
11012 | MachineMemOperand *FIMMOLd = MF->getMachineMemOperand( |
11013 | PtrInfo: MachinePointerInfo::getFixedStack(MF&: *MF, FI), |
11014 | F: MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile, Size: 4, BaseAlignment: Align(4)); |
11015 | |
11016 | MachineInstrBuilder MIB; |
11017 | MIB = BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::Int_eh_sjlj_dispatchsetup)); |
11018 | |
11019 | const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII); |
11020 | const ARMBaseRegisterInfo &RI = AII->getRegisterInfo(); |
11021 | |
11022 | // Add a register mask with no preserved registers. This results in all |
11023 | // registers being marked as clobbered. This can't work if the dispatch block |
11024 | // is in a Thumb1 function and is linked with ARM code which uses the FP |
11025 | // registers, as there is no way to preserve the FP registers in Thumb1 mode. |
11026 | MIB.addRegMask(Mask: RI.getSjLjDispatchPreservedMask(MF: *MF)); |
11027 | |
11028 | bool IsPositionIndependent = isPositionIndependent(); |
11029 | unsigned NumLPads = LPadList.size(); |
11030 | if (Subtarget->isThumb2()) { |
11031 | Register NewVReg1 = MRI->createVirtualRegister(RegClass: TRC); |
11032 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2LDRi12), DestReg: NewVReg1) |
11033 | .addFrameIndex(Idx: FI) |
11034 | .addImm(Val: 4) |
11035 | .addMemOperand(MMO: FIMMOLd) |
11036 | .add(MOs: predOps(Pred: ARMCC::AL)); |
11037 | |
11038 | if (NumLPads < 256) { |
11039 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2CMPri)) |
11040 | .addReg(RegNo: NewVReg1) |
11041 | .addImm(Val: LPadList.size()) |
11042 | .add(MOs: predOps(Pred: ARMCC::AL)); |
11043 | } else { |
11044 | Register VReg1 = MRI->createVirtualRegister(RegClass: TRC); |
11045 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2MOVi16), DestReg: VReg1) |
11046 | .addImm(Val: NumLPads & 0xFFFF) |
11047 | .add(MOs: predOps(Pred: ARMCC::AL)); |
11048 | |
11049 | unsigned VReg2 = VReg1; |
11050 | if ((NumLPads & 0xFFFF0000) != 0) { |
11051 | VReg2 = MRI->createVirtualRegister(RegClass: TRC); |
11052 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2MOVTi16), DestReg: VReg2) |
11053 | .addReg(RegNo: VReg1) |
11054 | .addImm(Val: NumLPads >> 16) |
11055 | .add(MOs: predOps(Pred: ARMCC::AL)); |
11056 | } |
11057 | |
11058 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2CMPrr)) |
11059 | .addReg(RegNo: NewVReg1) |
11060 | .addReg(RegNo: VReg2) |
11061 | .add(MOs: predOps(Pred: ARMCC::AL)); |
11062 | } |
11063 | |
11064 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2Bcc)) |
11065 | .addMBB(MBB: TrapBB) |
11066 | .addImm(Val: ARMCC::HI) |
11067 | .addReg(RegNo: ARM::CPSR); |
11068 | |
11069 | Register NewVReg3 = MRI->createVirtualRegister(RegClass: TRC); |
11070 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2LEApcrelJT), DestReg: NewVReg3) |
11071 | .addJumpTableIndex(Idx: MJTI) |
11072 | .add(MOs: predOps(Pred: ARMCC::AL)); |
11073 | |
11074 | Register NewVReg4 = MRI->createVirtualRegister(RegClass: TRC); |
11075 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2ADDrs), DestReg: NewVReg4) |
11076 | .addReg(RegNo: NewVReg3, flags: RegState::Kill) |
11077 | .addReg(RegNo: NewVReg1) |
11078 | .addImm(Val: ARM_AM::getSORegOpc(ShOp: ARM_AM::lsl, Imm: 2)) |
11079 | .add(MOs: predOps(Pred: ARMCC::AL)) |
11080 | .add(MO: condCodeOp()); |
11081 | |
11082 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2BR_JT)) |
11083 | .addReg(RegNo: NewVReg4, flags: RegState::Kill) |
11084 | .addReg(RegNo: NewVReg1) |
11085 | .addJumpTableIndex(Idx: MJTI); |
11086 | } else if (Subtarget->isThumb()) { |
11087 | Register NewVReg1 = MRI->createVirtualRegister(RegClass: TRC); |
11088 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tLDRspi), DestReg: NewVReg1) |
11089 | .addFrameIndex(Idx: FI) |
11090 | .addImm(Val: 1) |
11091 | .addMemOperand(MMO: FIMMOLd) |
11092 | .add(MOs: predOps(Pred: ARMCC::AL)); |
11093 | |
11094 | if (NumLPads < 256) { |
11095 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tCMPi8)) |
11096 | .addReg(RegNo: NewVReg1) |
11097 | .addImm(Val: NumLPads) |
11098 | .add(MOs: predOps(Pred: ARMCC::AL)); |
11099 | } else { |
11100 | MachineConstantPool *ConstantPool = MF->getConstantPool(); |
11101 | Type *Int32Ty = Type::getInt32Ty(C&: MF->getFunction().getContext()); |
11102 | const Constant *C = ConstantInt::get(Ty: Int32Ty, V: NumLPads); |
11103 | |
11104 | // MachineConstantPool wants an explicit alignment. |
11105 | Align Alignment = MF->getDataLayout().getPrefTypeAlign(Ty: Int32Ty); |
11106 | unsigned Idx = ConstantPool->getConstantPoolIndex(C, Alignment); |
11107 | |
11108 | Register VReg1 = MRI->createVirtualRegister(RegClass: TRC); |
11109 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tLDRpci)) |
11110 | .addReg(RegNo: VReg1, flags: RegState::Define) |
11111 | .addConstantPoolIndex(Idx) |
11112 | .add(MOs: predOps(Pred: ARMCC::AL)); |
11113 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tCMPr)) |
11114 | .addReg(RegNo: NewVReg1) |
11115 | .addReg(RegNo: VReg1) |
11116 | .add(MOs: predOps(Pred: ARMCC::AL)); |
11117 | } |
11118 | |
11119 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tBcc)) |
11120 | .addMBB(MBB: TrapBB) |
11121 | .addImm(Val: ARMCC::HI) |
11122 | .addReg(RegNo: ARM::CPSR); |
11123 | |
11124 | Register NewVReg2 = MRI->createVirtualRegister(RegClass: TRC); |
11125 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tLSLri), DestReg: NewVReg2) |
11126 | .addReg(RegNo: ARM::CPSR, flags: RegState::Define) |
11127 | .addReg(RegNo: NewVReg1) |
11128 | .addImm(Val: 2) |
11129 | .add(MOs: predOps(Pred: ARMCC::AL)); |
11130 | |
11131 | Register NewVReg3 = MRI->createVirtualRegister(RegClass: TRC); |
11132 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tLEApcrelJT), DestReg: NewVReg3) |
11133 | .addJumpTableIndex(Idx: MJTI) |
11134 | .add(MOs: predOps(Pred: ARMCC::AL)); |
11135 | |
11136 | Register NewVReg4 = MRI->createVirtualRegister(RegClass: TRC); |
11137 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tADDrr), DestReg: NewVReg4) |
11138 | .addReg(RegNo: ARM::CPSR, flags: RegState::Define) |
11139 | .addReg(RegNo: NewVReg2, flags: RegState::Kill) |
11140 | .addReg(RegNo: NewVReg3) |
11141 | .add(MOs: predOps(Pred: ARMCC::AL)); |
11142 | |
11143 | MachineMemOperand *JTMMOLd = |
11144 | MF->getMachineMemOperand(PtrInfo: MachinePointerInfo::getJumpTable(MF&: *MF), |
11145 | F: MachineMemOperand::MOLoad, Size: 4, BaseAlignment: Align(4)); |
11146 | |
11147 | Register NewVReg5 = MRI->createVirtualRegister(RegClass: TRC); |
11148 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tLDRi), DestReg: NewVReg5) |
11149 | .addReg(RegNo: NewVReg4, flags: RegState::Kill) |
11150 | .addImm(Val: 0) |
11151 | .addMemOperand(MMO: JTMMOLd) |
11152 | .add(MOs: predOps(Pred: ARMCC::AL)); |
11153 | |
11154 | unsigned NewVReg6 = NewVReg5; |
11155 | if (IsPositionIndependent) { |
11156 | NewVReg6 = MRI->createVirtualRegister(RegClass: TRC); |
11157 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tADDrr), DestReg: NewVReg6) |
11158 | .addReg(RegNo: ARM::CPSR, flags: RegState::Define) |
11159 | .addReg(RegNo: NewVReg5, flags: RegState::Kill) |
11160 | .addReg(RegNo: NewVReg3) |
11161 | .add(MOs: predOps(Pred: ARMCC::AL)); |
11162 | } |
11163 | |
11164 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::tBR_JTr)) |
11165 | .addReg(RegNo: NewVReg6, flags: RegState::Kill) |
11166 | .addJumpTableIndex(Idx: MJTI); |
11167 | } else { |
11168 | Register NewVReg1 = MRI->createVirtualRegister(RegClass: TRC); |
11169 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::LDRi12), DestReg: NewVReg1) |
11170 | .addFrameIndex(Idx: FI) |
11171 | .addImm(Val: 4) |
11172 | .addMemOperand(MMO: FIMMOLd) |
11173 | .add(MOs: predOps(Pred: ARMCC::AL)); |
11174 | |
11175 | if (NumLPads < 256) { |
11176 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::CMPri)) |
11177 | .addReg(RegNo: NewVReg1) |
11178 | .addImm(Val: NumLPads) |
11179 | .add(MOs: predOps(Pred: ARMCC::AL)); |
11180 | } else if (Subtarget->hasV6T2Ops() && isUInt<16>(x: NumLPads)) { |
11181 | Register VReg1 = MRI->createVirtualRegister(RegClass: TRC); |
11182 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::MOVi16), DestReg: VReg1) |
11183 | .addImm(Val: NumLPads & 0xFFFF) |
11184 | .add(MOs: predOps(Pred: ARMCC::AL)); |
11185 | |
11186 | unsigned VReg2 = VReg1; |
11187 | if ((NumLPads & 0xFFFF0000) != 0) { |
11188 | VReg2 = MRI->createVirtualRegister(RegClass: TRC); |
11189 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::MOVTi16), DestReg: VReg2) |
11190 | .addReg(RegNo: VReg1) |
11191 | .addImm(Val: NumLPads >> 16) |
11192 | .add(MOs: predOps(Pred: ARMCC::AL)); |
11193 | } |
11194 | |
11195 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::CMPrr)) |
11196 | .addReg(RegNo: NewVReg1) |
11197 | .addReg(RegNo: VReg2) |
11198 | .add(MOs: predOps(Pred: ARMCC::AL)); |
11199 | } else { |
11200 | MachineConstantPool *ConstantPool = MF->getConstantPool(); |
11201 | Type *Int32Ty = Type::getInt32Ty(C&: MF->getFunction().getContext()); |
11202 | const Constant *C = ConstantInt::get(Ty: Int32Ty, V: NumLPads); |
11203 | |
11204 | // MachineConstantPool wants an explicit alignment. |
11205 | Align Alignment = MF->getDataLayout().getPrefTypeAlign(Ty: Int32Ty); |
11206 | unsigned Idx = ConstantPool->getConstantPoolIndex(C, Alignment); |
11207 | |
11208 | Register VReg1 = MRI->createVirtualRegister(RegClass: TRC); |
11209 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::LDRcp)) |
11210 | .addReg(RegNo: VReg1, flags: RegState::Define) |
11211 | .addConstantPoolIndex(Idx) |
11212 | .addImm(Val: 0) |
11213 | .add(MOs: predOps(Pred: ARMCC::AL)); |
11214 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::CMPrr)) |
11215 | .addReg(RegNo: NewVReg1) |
11216 | .addReg(RegNo: VReg1, flags: RegState::Kill) |
11217 | .add(MOs: predOps(Pred: ARMCC::AL)); |
11218 | } |
11219 | |
11220 | BuildMI(BB: DispatchBB, MIMD: dl, MCID: TII->get(Opcode: ARM::Bcc)) |
11221 | .addMBB(MBB: TrapBB) |
11222 | .addImm(Val: ARMCC::HI) |
11223 | .addReg(RegNo: ARM::CPSR); |
11224 | |
11225 | Register NewVReg3 = MRI->createVirtualRegister(RegClass: TRC); |
11226 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::MOVsi), DestReg: NewVReg3) |
11227 | .addReg(RegNo: NewVReg1) |
11228 | .addImm(Val: ARM_AM::getSORegOpc(ShOp: ARM_AM::lsl, Imm: 2)) |
11229 | .add(MOs: predOps(Pred: ARMCC::AL)) |
11230 | .add(MO: condCodeOp()); |
11231 | Register NewVReg4 = MRI->createVirtualRegister(RegClass: TRC); |
11232 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::LEApcrelJT), DestReg: NewVReg4) |
11233 | .addJumpTableIndex(Idx: MJTI) |
11234 | .add(MOs: predOps(Pred: ARMCC::AL)); |
11235 | |
11236 | MachineMemOperand *JTMMOLd = |
11237 | MF->getMachineMemOperand(PtrInfo: MachinePointerInfo::getJumpTable(MF&: *MF), |
11238 | F: MachineMemOperand::MOLoad, Size: 4, BaseAlignment: Align(4)); |
11239 | Register NewVReg5 = MRI->createVirtualRegister(RegClass: TRC); |
11240 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::LDRrs), DestReg: NewVReg5) |
11241 | .addReg(RegNo: NewVReg3, flags: RegState::Kill) |
11242 | .addReg(RegNo: NewVReg4) |
11243 | .addImm(Val: 0) |
11244 | .addMemOperand(MMO: JTMMOLd) |
11245 | .add(MOs: predOps(Pred: ARMCC::AL)); |
11246 | |
11247 | if (IsPositionIndependent) { |
11248 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::BR_JTadd)) |
11249 | .addReg(RegNo: NewVReg5, flags: RegState::Kill) |
11250 | .addReg(RegNo: NewVReg4) |
11251 | .addJumpTableIndex(Idx: MJTI); |
11252 | } else { |
11253 | BuildMI(BB: DispContBB, MIMD: dl, MCID: TII->get(Opcode: ARM::BR_JTr)) |
11254 | .addReg(RegNo: NewVReg5, flags: RegState::Kill) |
11255 | .addJumpTableIndex(Idx: MJTI); |
11256 | } |
11257 | } |
11258 | |
11259 | // Add the jump table entries as successors to the MBB. |
11260 | SmallPtrSet<MachineBasicBlock*, 8> SeenMBBs; |
11261 | for (MachineBasicBlock *CurMBB : LPadList) { |
11262 | if (SeenMBBs.insert(Ptr: CurMBB).second) |
11263 | DispContBB->addSuccessor(Succ: CurMBB); |
11264 | } |
11265 | |
11266 | // N.B. the order the invoke BBs are processed in doesn't matter here. |
11267 | const MCPhysReg *SavedRegs = RI.getCalleeSavedRegs(MF); |
11268 | SmallVector<MachineBasicBlock*, 64> MBBLPads; |
11269 | for (MachineBasicBlock *BB : InvokeBBs) { |
11270 | |
11271 | // Remove the landing pad successor from the invoke block and replace it |
11272 | // with the new dispatch block. |
11273 | SmallVector<MachineBasicBlock*, 4> Successors(BB->successors()); |
11274 | while (!Successors.empty()) { |
11275 | MachineBasicBlock *SMBB = Successors.pop_back_val(); |
11276 | if (SMBB->isEHPad()) { |
11277 | BB->removeSuccessor(Succ: SMBB); |
11278 | MBBLPads.push_back(Elt: SMBB); |
11279 | } |
11280 | } |
11281 | |
11282 | BB->addSuccessor(Succ: DispatchBB, Prob: BranchProbability::getZero()); |
11283 | BB->normalizeSuccProbs(); |
11284 | |
11285 | // Find the invoke call and mark all of the callee-saved registers as |
11286 | // 'implicit defined' so that they're spilled. This prevents code from |
11287 | // moving instructions to before the EH block, where they will never be |
11288 | // executed. |
11289 | for (MachineBasicBlock::reverse_iterator |
11290 | II = BB->rbegin(), IE = BB->rend(); II != IE; ++II) { |
11291 | if (!II->isCall()) continue; |
11292 | |
11293 | DenseMap<unsigned, bool> DefRegs; |
11294 | for (MachineInstr::mop_iterator |
11295 | OI = II->operands_begin(), OE = II->operands_end(); |
11296 | OI != OE; ++OI) { |
11297 | if (!OI->isReg()) continue; |
11298 | DefRegs[OI->getReg()] = true; |
11299 | } |
11300 | |
11301 | MachineInstrBuilder MIB(*MF, &*II); |
11302 | |
11303 | for (unsigned i = 0; SavedRegs[i] != 0; ++i) { |
11304 | unsigned Reg = SavedRegs[i]; |
11305 | if (Subtarget->isThumb2() && |
11306 | !ARM::tGPRRegClass.contains(Reg) && |
11307 | !ARM::hGPRRegClass.contains(Reg)) |
11308 | continue; |
11309 | if (Subtarget->isThumb1Only() && !ARM::tGPRRegClass.contains(Reg)) |
11310 | continue; |
11311 | if (!Subtarget->isThumb() && !ARM::GPRRegClass.contains(Reg)) |
11312 | continue; |
11313 | if (!DefRegs[Reg]) |
11314 | MIB.addReg(RegNo: Reg, flags: RegState::ImplicitDefine | RegState::Dead); |
11315 | } |
11316 | |
11317 | break; |
11318 | } |
11319 | } |
11320 | |
11321 | // Mark all former landing pads as non-landing pads. The dispatch is the only |
11322 | // landing pad now. |
11323 | for (MachineBasicBlock *MBBLPad : MBBLPads) |
11324 | MBBLPad->setIsEHPad(false); |
11325 | |
11326 | // The instruction is gone now. |
11327 | MI.eraseFromParent(); |
11328 | } |
11329 | |
11330 | static |
11331 | MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) { |
11332 | for (MachineBasicBlock *S : MBB->successors()) |
11333 | if (S != Succ) |
11334 | return S; |
11335 | llvm_unreachable("Expecting a BB with two successors!" ); |
11336 | } |
11337 | |
11338 | /// Return the load opcode for a given load size. If load size >= 8, |
11339 | /// neon opcode will be returned. |
11340 | static unsigned getLdOpcode(unsigned LdSize, bool IsThumb1, bool IsThumb2) { |
11341 | if (LdSize >= 8) |
11342 | return LdSize == 16 ? ARM::VLD1q32wb_fixed |
11343 | : LdSize == 8 ? ARM::VLD1d32wb_fixed : 0; |
11344 | if (IsThumb1) |
11345 | return LdSize == 4 ? ARM::tLDRi |
11346 | : LdSize == 2 ? ARM::tLDRHi |
11347 | : LdSize == 1 ? ARM::tLDRBi : 0; |
11348 | if (IsThumb2) |
11349 | return LdSize == 4 ? ARM::t2LDR_POST |
11350 | : LdSize == 2 ? ARM::t2LDRH_POST |
11351 | : LdSize == 1 ? ARM::t2LDRB_POST : 0; |
11352 | return LdSize == 4 ? ARM::LDR_POST_IMM |
11353 | : LdSize == 2 ? ARM::LDRH_POST |
11354 | : LdSize == 1 ? ARM::LDRB_POST_IMM : 0; |
11355 | } |
11356 | |
11357 | /// Return the store opcode for a given store size. If store size >= 8, |
11358 | /// neon opcode will be returned. |
11359 | static unsigned getStOpcode(unsigned StSize, bool IsThumb1, bool IsThumb2) { |
11360 | if (StSize >= 8) |
11361 | return StSize == 16 ? ARM::VST1q32wb_fixed |
11362 | : StSize == 8 ? ARM::VST1d32wb_fixed : 0; |
11363 | if (IsThumb1) |
11364 | return StSize == 4 ? ARM::tSTRi |
11365 | : StSize == 2 ? ARM::tSTRHi |
11366 | : StSize == 1 ? ARM::tSTRBi : 0; |
11367 | if (IsThumb2) |
11368 | return StSize == 4 ? ARM::t2STR_POST |
11369 | : StSize == 2 ? ARM::t2STRH_POST |
11370 | : StSize == 1 ? ARM::t2STRB_POST : 0; |
11371 | return StSize == 4 ? ARM::STR_POST_IMM |
11372 | : StSize == 2 ? ARM::STRH_POST |
11373 | : StSize == 1 ? ARM::STRB_POST_IMM : 0; |
11374 | } |
11375 | |
11376 | /// Emit a post-increment load operation with given size. The instructions |
11377 | /// will be added to BB at Pos. |
11378 | static void emitPostLd(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos, |
11379 | const TargetInstrInfo *TII, const DebugLoc &dl, |
11380 | unsigned LdSize, unsigned Data, unsigned AddrIn, |
11381 | unsigned AddrOut, bool IsThumb1, bool IsThumb2) { |
11382 | unsigned LdOpc = getLdOpcode(LdSize, IsThumb1, IsThumb2); |
11383 | assert(LdOpc != 0 && "Should have a load opcode" ); |
11384 | if (LdSize >= 8) { |
11385 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: LdOpc), DestReg: Data) |
11386 | .addReg(RegNo: AddrOut, flags: RegState::Define) |
11387 | .addReg(RegNo: AddrIn) |
11388 | .addImm(Val: 0) |
11389 | .add(MOs: predOps(Pred: ARMCC::AL)); |
11390 | } else if (IsThumb1) { |
11391 | // load + update AddrIn |
11392 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: LdOpc), DestReg: Data) |
11393 | .addReg(RegNo: AddrIn) |
11394 | .addImm(Val: 0) |
11395 | .add(MOs: predOps(Pred: ARMCC::AL)); |
11396 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: ARM::tADDi8), DestReg: AddrOut) |
11397 | .add(MO: t1CondCodeOp()) |
11398 | .addReg(RegNo: AddrIn) |
11399 | .addImm(Val: LdSize) |
11400 | .add(MOs: predOps(Pred: ARMCC::AL)); |
11401 | } else if (IsThumb2) { |
11402 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: LdOpc), DestReg: Data) |
11403 | .addReg(RegNo: AddrOut, flags: RegState::Define) |
11404 | .addReg(RegNo: AddrIn) |
11405 | .addImm(Val: LdSize) |
11406 | .add(MOs: predOps(Pred: ARMCC::AL)); |
11407 | } else { // arm |
11408 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: LdOpc), DestReg: Data) |
11409 | .addReg(RegNo: AddrOut, flags: RegState::Define) |
11410 | .addReg(RegNo: AddrIn) |
11411 | .addReg(RegNo: 0) |
11412 | .addImm(Val: LdSize) |
11413 | .add(MOs: predOps(Pred: ARMCC::AL)); |
11414 | } |
11415 | } |
11416 | |
11417 | /// Emit a post-increment store operation with given size. The instructions |
11418 | /// will be added to BB at Pos. |
11419 | static void emitPostSt(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos, |
11420 | const TargetInstrInfo *TII, const DebugLoc &dl, |
11421 | unsigned StSize, unsigned Data, unsigned AddrIn, |
11422 | unsigned AddrOut, bool IsThumb1, bool IsThumb2) { |
11423 | unsigned StOpc = getStOpcode(StSize, IsThumb1, IsThumb2); |
11424 | assert(StOpc != 0 && "Should have a store opcode" ); |
11425 | if (StSize >= 8) { |
11426 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: StOpc), DestReg: AddrOut) |
11427 | .addReg(RegNo: AddrIn) |
11428 | .addImm(Val: 0) |
11429 | .addReg(RegNo: Data) |
11430 | .add(MOs: predOps(Pred: ARMCC::AL)); |
11431 | } else if (IsThumb1) { |
11432 | // store + update AddrIn |
11433 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: StOpc)) |
11434 | .addReg(RegNo: Data) |
11435 | .addReg(RegNo: AddrIn) |
11436 | .addImm(Val: 0) |
11437 | .add(MOs: predOps(Pred: ARMCC::AL)); |
11438 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: ARM::tADDi8), DestReg: AddrOut) |
11439 | .add(MO: t1CondCodeOp()) |
11440 | .addReg(RegNo: AddrIn) |
11441 | .addImm(Val: StSize) |
11442 | .add(MOs: predOps(Pred: ARMCC::AL)); |
11443 | } else if (IsThumb2) { |
11444 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: StOpc), DestReg: AddrOut) |
11445 | .addReg(RegNo: Data) |
11446 | .addReg(RegNo: AddrIn) |
11447 | .addImm(Val: StSize) |
11448 | .add(MOs: predOps(Pred: ARMCC::AL)); |
11449 | } else { // arm |
11450 | BuildMI(BB&: *BB, I: Pos, MIMD: dl, MCID: TII->get(Opcode: StOpc), DestReg: AddrOut) |
11451 | .addReg(RegNo: Data) |
11452 | .addReg(RegNo: AddrIn) |
11453 | .addReg(RegNo: 0) |
11454 | .addImm(Val: StSize) |
11455 | .add(MOs: predOps(Pred: ARMCC::AL)); |
11456 | } |
11457 | } |
11458 | |
11459 | MachineBasicBlock * |
11460 | ARMTargetLowering::EmitStructByval(MachineInstr &MI, |
11461 | MachineBasicBlock *BB) const { |
11462 | // This pseudo instruction has 3 operands: dst, src, size |
11463 | // We expand it to a loop if size > Subtarget->getMaxInlineSizeThreshold(). |
11464 | // Otherwise, we will generate unrolled scalar copies. |
11465 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
11466 | const BasicBlock *LLVM_BB = BB->getBasicBlock(); |
11467 | MachineFunction::iterator It = ++BB->getIterator(); |
11468 | |
11469 | Register dest = MI.getOperand(i: 0).getReg(); |
11470 | Register src = MI.getOperand(i: 1).getReg(); |
11471 | unsigned SizeVal = MI.getOperand(i: 2).getImm(); |
11472 | unsigned Alignment = MI.getOperand(i: 3).getImm(); |
11473 | DebugLoc dl = MI.getDebugLoc(); |
11474 | |
11475 | MachineFunction *MF = BB->getParent(); |
11476 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
11477 | unsigned UnitSize = 0; |
11478 | const TargetRegisterClass *TRC = nullptr; |
11479 | const TargetRegisterClass *VecTRC = nullptr; |
11480 | |
11481 | bool IsThumb1 = Subtarget->isThumb1Only(); |
11482 | bool IsThumb2 = Subtarget->isThumb2(); |
11483 | bool IsThumb = Subtarget->isThumb(); |
11484 | |
11485 | if (Alignment & 1) { |
11486 | UnitSize = 1; |
11487 | } else if (Alignment & 2) { |
11488 | UnitSize = 2; |
11489 | } else { |
11490 | // Check whether we can use NEON instructions. |
11491 | if (!MF->getFunction().hasFnAttribute(Kind: Attribute::NoImplicitFloat) && |
11492 | Subtarget->hasNEON()) { |
11493 | if ((Alignment % 16 == 0) && SizeVal >= 16) |
11494 | UnitSize = 16; |
11495 | else if ((Alignment % 8 == 0) && SizeVal >= 8) |
11496 | UnitSize = 8; |
11497 | } |
11498 | // Can't use NEON instructions. |
11499 | if (UnitSize == 0) |
11500 | UnitSize = 4; |
11501 | } |
11502 | |
11503 | // Select the correct opcode and register class for unit size load/store |
11504 | bool IsNeon = UnitSize >= 8; |
11505 | TRC = IsThumb ? &ARM::tGPRRegClass : &ARM::GPRRegClass; |
11506 | if (IsNeon) |
11507 | VecTRC = UnitSize == 16 ? &ARM::DPairRegClass |
11508 | : UnitSize == 8 ? &ARM::DPRRegClass |
11509 | : nullptr; |
11510 | |
11511 | unsigned BytesLeft = SizeVal % UnitSize; |
11512 | unsigned LoopSize = SizeVal - BytesLeft; |
11513 | |
11514 | if (SizeVal <= Subtarget->getMaxInlineSizeThreshold()) { |
11515 | // Use LDR and STR to copy. |
11516 | // [scratch, srcOut] = LDR_POST(srcIn, UnitSize) |
11517 | // [destOut] = STR_POST(scratch, destIn, UnitSize) |
11518 | unsigned srcIn = src; |
11519 | unsigned destIn = dest; |
11520 | for (unsigned i = 0; i < LoopSize; i+=UnitSize) { |
11521 | Register srcOut = MRI.createVirtualRegister(RegClass: TRC); |
11522 | Register destOut = MRI.createVirtualRegister(RegClass: TRC); |
11523 | Register scratch = MRI.createVirtualRegister(RegClass: IsNeon ? VecTRC : TRC); |
11524 | emitPostLd(BB, Pos: MI, TII, dl, LdSize: UnitSize, Data: scratch, AddrIn: srcIn, AddrOut: srcOut, |
11525 | IsThumb1, IsThumb2); |
11526 | emitPostSt(BB, Pos: MI, TII, dl, StSize: UnitSize, Data: scratch, AddrIn: destIn, AddrOut: destOut, |
11527 | IsThumb1, IsThumb2); |
11528 | srcIn = srcOut; |
11529 | destIn = destOut; |
11530 | } |
11531 | |
11532 | // Handle the leftover bytes with LDRB and STRB. |
11533 | // [scratch, srcOut] = LDRB_POST(srcIn, 1) |
11534 | // [destOut] = STRB_POST(scratch, destIn, 1) |
11535 | for (unsigned i = 0; i < BytesLeft; i++) { |
11536 | Register srcOut = MRI.createVirtualRegister(RegClass: TRC); |
11537 | Register destOut = MRI.createVirtualRegister(RegClass: TRC); |
11538 | Register scratch = MRI.createVirtualRegister(RegClass: TRC); |
11539 | emitPostLd(BB, Pos: MI, TII, dl, LdSize: 1, Data: scratch, AddrIn: srcIn, AddrOut: srcOut, |
11540 | IsThumb1, IsThumb2); |
11541 | emitPostSt(BB, Pos: MI, TII, dl, StSize: 1, Data: scratch, AddrIn: destIn, AddrOut: destOut, |
11542 | IsThumb1, IsThumb2); |
11543 | srcIn = srcOut; |
11544 | destIn = destOut; |
11545 | } |
11546 | MI.eraseFromParent(); // The instruction is gone now. |
11547 | return BB; |
11548 | } |
11549 | |
11550 | // Expand the pseudo op to a loop. |
11551 | // thisMBB: |
11552 | // ... |
11553 | // movw varEnd, # --> with thumb2 |
11554 | // movt varEnd, # |
11555 | // ldrcp varEnd, idx --> without thumb2 |
11556 | // fallthrough --> loopMBB |
11557 | // loopMBB: |
11558 | // PHI varPhi, varEnd, varLoop |
11559 | // PHI srcPhi, src, srcLoop |
11560 | // PHI destPhi, dst, destLoop |
11561 | // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize) |
11562 | // [destLoop] = STR_POST(scratch, destPhi, UnitSize) |
11563 | // subs varLoop, varPhi, #UnitSize |
11564 | // bne loopMBB |
11565 | // fallthrough --> exitMBB |
11566 | // exitMBB: |
11567 | // epilogue to handle left-over bytes |
11568 | // [scratch, srcOut] = LDRB_POST(srcLoop, 1) |
11569 | // [destOut] = STRB_POST(scratch, destLoop, 1) |
11570 | MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(BB: LLVM_BB); |
11571 | MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(BB: LLVM_BB); |
11572 | MF->insert(MBBI: It, MBB: loopMBB); |
11573 | MF->insert(MBBI: It, MBB: exitMBB); |
11574 | |
11575 | // Set the call frame size on entry to the new basic blocks. |
11576 | unsigned CallFrameSize = TII->getCallFrameSizeAt(MI); |
11577 | loopMBB->setCallFrameSize(CallFrameSize); |
11578 | exitMBB->setCallFrameSize(CallFrameSize); |
11579 | |
11580 | // Transfer the remainder of BB and its successor edges to exitMBB. |
11581 | exitMBB->splice(Where: exitMBB->begin(), Other: BB, |
11582 | From: std::next(x: MachineBasicBlock::iterator(MI)), To: BB->end()); |
11583 | exitMBB->transferSuccessorsAndUpdatePHIs(FromMBB: BB); |
11584 | |
11585 | // Load an immediate to varEnd. |
11586 | Register varEnd = MRI.createVirtualRegister(RegClass: TRC); |
11587 | if (Subtarget->useMovt()) { |
11588 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: IsThumb ? ARM::t2MOVi32imm : ARM::MOVi32imm), |
11589 | DestReg: varEnd) |
11590 | .addImm(Val: LoopSize); |
11591 | } else if (Subtarget->genExecuteOnly()) { |
11592 | assert(IsThumb && "Non-thumb expected to have used movt" ); |
11593 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: ARM::tMOVi32imm), DestReg: varEnd).addImm(Val: LoopSize); |
11594 | } else { |
11595 | MachineConstantPool *ConstantPool = MF->getConstantPool(); |
11596 | Type *Int32Ty = Type::getInt32Ty(C&: MF->getFunction().getContext()); |
11597 | const Constant *C = ConstantInt::get(Ty: Int32Ty, V: LoopSize); |
11598 | |
11599 | // MachineConstantPool wants an explicit alignment. |
11600 | Align Alignment = MF->getDataLayout().getPrefTypeAlign(Ty: Int32Ty); |
11601 | unsigned Idx = ConstantPool->getConstantPoolIndex(C, Alignment); |
11602 | MachineMemOperand *CPMMO = |
11603 | MF->getMachineMemOperand(PtrInfo: MachinePointerInfo::getConstantPool(MF&: *MF), |
11604 | F: MachineMemOperand::MOLoad, Size: 4, BaseAlignment: Align(4)); |
11605 | |
11606 | if (IsThumb) |
11607 | BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::tLDRpci)) |
11608 | .addReg(RegNo: varEnd, flags: RegState::Define) |
11609 | .addConstantPoolIndex(Idx) |
11610 | .add(MOs: predOps(Pred: ARMCC::AL)) |
11611 | .addMemOperand(MMO: CPMMO); |
11612 | else |
11613 | BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::LDRcp)) |
11614 | .addReg(RegNo: varEnd, flags: RegState::Define) |
11615 | .addConstantPoolIndex(Idx) |
11616 | .addImm(Val: 0) |
11617 | .add(MOs: predOps(Pred: ARMCC::AL)) |
11618 | .addMemOperand(MMO: CPMMO); |
11619 | } |
11620 | BB->addSuccessor(Succ: loopMBB); |
11621 | |
11622 | // Generate the loop body: |
11623 | // varPhi = PHI(varLoop, varEnd) |
11624 | // srcPhi = PHI(srcLoop, src) |
11625 | // destPhi = PHI(destLoop, dst) |
11626 | MachineBasicBlock *entryBB = BB; |
11627 | BB = loopMBB; |
11628 | Register varLoop = MRI.createVirtualRegister(RegClass: TRC); |
11629 | Register varPhi = MRI.createVirtualRegister(RegClass: TRC); |
11630 | Register srcLoop = MRI.createVirtualRegister(RegClass: TRC); |
11631 | Register srcPhi = MRI.createVirtualRegister(RegClass: TRC); |
11632 | Register destLoop = MRI.createVirtualRegister(RegClass: TRC); |
11633 | Register destPhi = MRI.createVirtualRegister(RegClass: TRC); |
11634 | |
11635 | BuildMI(BB&: *BB, I: BB->begin(), MIMD: dl, MCID: TII->get(Opcode: ARM::PHI), DestReg: varPhi) |
11636 | .addReg(RegNo: varLoop).addMBB(MBB: loopMBB) |
11637 | .addReg(RegNo: varEnd).addMBB(MBB: entryBB); |
11638 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: ARM::PHI), DestReg: srcPhi) |
11639 | .addReg(RegNo: srcLoop).addMBB(MBB: loopMBB) |
11640 | .addReg(RegNo: src).addMBB(MBB: entryBB); |
11641 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: ARM::PHI), DestReg: destPhi) |
11642 | .addReg(RegNo: destLoop).addMBB(MBB: loopMBB) |
11643 | .addReg(RegNo: dest).addMBB(MBB: entryBB); |
11644 | |
11645 | // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize) |
11646 | // [destLoop] = STR_POST(scratch, destPhi, UnitSiz) |
11647 | Register scratch = MRI.createVirtualRegister(RegClass: IsNeon ? VecTRC : TRC); |
11648 | emitPostLd(BB, Pos: BB->end(), TII, dl, LdSize: UnitSize, Data: scratch, AddrIn: srcPhi, AddrOut: srcLoop, |
11649 | IsThumb1, IsThumb2); |
11650 | emitPostSt(BB, Pos: BB->end(), TII, dl, StSize: UnitSize, Data: scratch, AddrIn: destPhi, AddrOut: destLoop, |
11651 | IsThumb1, IsThumb2); |
11652 | |
11653 | // Decrement loop variable by UnitSize. |
11654 | if (IsThumb1) { |
11655 | BuildMI(BB&: *BB, I: BB->end(), MIMD: dl, MCID: TII->get(Opcode: ARM::tSUBi8), DestReg: varLoop) |
11656 | .add(MO: t1CondCodeOp()) |
11657 | .addReg(RegNo: varPhi) |
11658 | .addImm(Val: UnitSize) |
11659 | .add(MOs: predOps(Pred: ARMCC::AL)); |
11660 | } else { |
11661 | MachineInstrBuilder MIB = |
11662 | BuildMI(BB&: *BB, I: BB->end(), MIMD: dl, |
11663 | MCID: TII->get(Opcode: IsThumb2 ? ARM::t2SUBri : ARM::SUBri), DestReg: varLoop); |
11664 | MIB.addReg(RegNo: varPhi) |
11665 | .addImm(Val: UnitSize) |
11666 | .add(MOs: predOps(Pred: ARMCC::AL)) |
11667 | .add(MO: condCodeOp()); |
11668 | MIB->getOperand(i: 5).setReg(ARM::CPSR); |
11669 | MIB->getOperand(i: 5).setIsDef(true); |
11670 | } |
11671 | BuildMI(BB&: *BB, I: BB->end(), MIMD: dl, |
11672 | MCID: TII->get(Opcode: IsThumb1 ? ARM::tBcc : IsThumb2 ? ARM::t2Bcc : ARM::Bcc)) |
11673 | .addMBB(MBB: loopMBB).addImm(Val: ARMCC::NE).addReg(RegNo: ARM::CPSR); |
11674 | |
11675 | // loopMBB can loop back to loopMBB or fall through to exitMBB. |
11676 | BB->addSuccessor(Succ: loopMBB); |
11677 | BB->addSuccessor(Succ: exitMBB); |
11678 | |
11679 | // Add epilogue to handle BytesLeft. |
11680 | BB = exitMBB; |
11681 | auto StartOfExit = exitMBB->begin(); |
11682 | |
11683 | // [scratch, srcOut] = LDRB_POST(srcLoop, 1) |
11684 | // [destOut] = STRB_POST(scratch, destLoop, 1) |
11685 | unsigned srcIn = srcLoop; |
11686 | unsigned destIn = destLoop; |
11687 | for (unsigned i = 0; i < BytesLeft; i++) { |
11688 | Register srcOut = MRI.createVirtualRegister(RegClass: TRC); |
11689 | Register destOut = MRI.createVirtualRegister(RegClass: TRC); |
11690 | Register scratch = MRI.createVirtualRegister(RegClass: TRC); |
11691 | emitPostLd(BB, Pos: StartOfExit, TII, dl, LdSize: 1, Data: scratch, AddrIn: srcIn, AddrOut: srcOut, |
11692 | IsThumb1, IsThumb2); |
11693 | emitPostSt(BB, Pos: StartOfExit, TII, dl, StSize: 1, Data: scratch, AddrIn: destIn, AddrOut: destOut, |
11694 | IsThumb1, IsThumb2); |
11695 | srcIn = srcOut; |
11696 | destIn = destOut; |
11697 | } |
11698 | |
11699 | MI.eraseFromParent(); // The instruction is gone now. |
11700 | return BB; |
11701 | } |
11702 | |
11703 | MachineBasicBlock * |
11704 | ARMTargetLowering::EmitLowered__chkstk(MachineInstr &MI, |
11705 | MachineBasicBlock *MBB) const { |
11706 | const TargetMachine &TM = getTargetMachine(); |
11707 | const TargetInstrInfo &TII = *Subtarget->getInstrInfo(); |
11708 | DebugLoc DL = MI.getDebugLoc(); |
11709 | |
11710 | assert(Subtarget->isTargetWindows() && |
11711 | "__chkstk is only supported on Windows" ); |
11712 | assert(Subtarget->isThumb2() && "Windows on ARM requires Thumb-2 mode" ); |
11713 | |
11714 | // __chkstk takes the number of words to allocate on the stack in R4, and |
11715 | // returns the stack adjustment in number of bytes in R4. This will not |
11716 | // clober any other registers (other than the obvious lr). |
11717 | // |
11718 | // Although, technically, IP should be considered a register which may be |
11719 | // clobbered, the call itself will not touch it. Windows on ARM is a pure |
11720 | // thumb-2 environment, so there is no interworking required. As a result, we |
11721 | // do not expect a veneer to be emitted by the linker, clobbering IP. |
11722 | // |
11723 | // Each module receives its own copy of __chkstk, so no import thunk is |
11724 | // required, again, ensuring that IP is not clobbered. |
11725 | // |
11726 | // Finally, although some linkers may theoretically provide a trampoline for |
11727 | // out of range calls (which is quite common due to a 32M range limitation of |
11728 | // branches for Thumb), we can generate the long-call version via |
11729 | // -mcmodel=large, alleviating the need for the trampoline which may clobber |
11730 | // IP. |
11731 | |
11732 | switch (TM.getCodeModel()) { |
11733 | case CodeModel::Tiny: |
11734 | llvm_unreachable("Tiny code model not available on ARM." ); |
11735 | case CodeModel::Small: |
11736 | case CodeModel::Medium: |
11737 | case CodeModel::Kernel: |
11738 | BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: TII.get(Opcode: ARM::tBL)) |
11739 | .add(MOs: predOps(Pred: ARMCC::AL)) |
11740 | .addExternalSymbol(FnName: "__chkstk" ) |
11741 | .addReg(RegNo: ARM::R4, flags: RegState::Implicit | RegState::Kill) |
11742 | .addReg(RegNo: ARM::R4, flags: RegState::Implicit | RegState::Define) |
11743 | .addReg(RegNo: ARM::R12, |
11744 | flags: RegState::Implicit | RegState::Define | RegState::Dead) |
11745 | .addReg(RegNo: ARM::CPSR, |
11746 | flags: RegState::Implicit | RegState::Define | RegState::Dead); |
11747 | break; |
11748 | case CodeModel::Large: { |
11749 | MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); |
11750 | Register Reg = MRI.createVirtualRegister(RegClass: &ARM::rGPRRegClass); |
11751 | |
11752 | BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: TII.get(Opcode: ARM::t2MOVi32imm), DestReg: Reg) |
11753 | .addExternalSymbol(FnName: "__chkstk" ); |
11754 | BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: TII.get(Opcode: gettBLXrOpcode(MF: *MBB->getParent()))) |
11755 | .add(MOs: predOps(Pred: ARMCC::AL)) |
11756 | .addReg(RegNo: Reg, flags: RegState::Kill) |
11757 | .addReg(RegNo: ARM::R4, flags: RegState::Implicit | RegState::Kill) |
11758 | .addReg(RegNo: ARM::R4, flags: RegState::Implicit | RegState::Define) |
11759 | .addReg(RegNo: ARM::R12, |
11760 | flags: RegState::Implicit | RegState::Define | RegState::Dead) |
11761 | .addReg(RegNo: ARM::CPSR, |
11762 | flags: RegState::Implicit | RegState::Define | RegState::Dead); |
11763 | break; |
11764 | } |
11765 | } |
11766 | |
11767 | BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: TII.get(Opcode: ARM::t2SUBrr), DestReg: ARM::SP) |
11768 | .addReg(RegNo: ARM::SP, flags: RegState::Kill) |
11769 | .addReg(RegNo: ARM::R4, flags: RegState::Kill) |
11770 | .setMIFlags(MachineInstr::FrameSetup) |
11771 | .add(MOs: predOps(Pred: ARMCC::AL)) |
11772 | .add(MO: condCodeOp()); |
11773 | |
11774 | MI.eraseFromParent(); |
11775 | return MBB; |
11776 | } |
11777 | |
11778 | MachineBasicBlock * |
11779 | ARMTargetLowering::EmitLowered__dbzchk(MachineInstr &MI, |
11780 | MachineBasicBlock *MBB) const { |
11781 | DebugLoc DL = MI.getDebugLoc(); |
11782 | MachineFunction *MF = MBB->getParent(); |
11783 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
11784 | |
11785 | MachineBasicBlock *ContBB = MF->CreateMachineBasicBlock(); |
11786 | MF->insert(MBBI: ++MBB->getIterator(), MBB: ContBB); |
11787 | ContBB->splice(Where: ContBB->begin(), Other: MBB, |
11788 | From: std::next(x: MachineBasicBlock::iterator(MI)), To: MBB->end()); |
11789 | ContBB->transferSuccessorsAndUpdatePHIs(FromMBB: MBB); |
11790 | MBB->addSuccessor(Succ: ContBB); |
11791 | |
11792 | MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); |
11793 | BuildMI(BB: TrapBB, MIMD: DL, MCID: TII->get(Opcode: ARM::t__brkdiv0)); |
11794 | MF->push_back(MBB: TrapBB); |
11795 | MBB->addSuccessor(Succ: TrapBB); |
11796 | |
11797 | BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: TII->get(Opcode: ARM::tCMPi8)) |
11798 | .addReg(RegNo: MI.getOperand(i: 0).getReg()) |
11799 | .addImm(Val: 0) |
11800 | .add(MOs: predOps(Pred: ARMCC::AL)); |
11801 | BuildMI(BB&: *MBB, I&: MI, MIMD: DL, MCID: TII->get(Opcode: ARM::t2Bcc)) |
11802 | .addMBB(MBB: TrapBB) |
11803 | .addImm(Val: ARMCC::EQ) |
11804 | .addReg(RegNo: ARM::CPSR); |
11805 | |
11806 | MI.eraseFromParent(); |
11807 | return ContBB; |
11808 | } |
11809 | |
11810 | // The CPSR operand of SelectItr might be missing a kill marker |
11811 | // because there were multiple uses of CPSR, and ISel didn't know |
11812 | // which to mark. Figure out whether SelectItr should have had a |
11813 | // kill marker, and set it if it should. Returns the correct kill |
11814 | // marker value. |
11815 | static bool checkAndUpdateCPSRKill(MachineBasicBlock::iterator SelectItr, |
11816 | MachineBasicBlock* BB, |
11817 | const TargetRegisterInfo* TRI) { |
11818 | // Scan forward through BB for a use/def of CPSR. |
11819 | MachineBasicBlock::iterator miI(std::next(x: SelectItr)); |
11820 | for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) { |
11821 | const MachineInstr& mi = *miI; |
11822 | if (mi.readsRegister(Reg: ARM::CPSR, /*TRI=*/nullptr)) |
11823 | return false; |
11824 | if (mi.definesRegister(Reg: ARM::CPSR, /*TRI=*/nullptr)) |
11825 | break; // Should have kill-flag - update below. |
11826 | } |
11827 | |
11828 | // If we hit the end of the block, check whether CPSR is live into a |
11829 | // successor. |
11830 | if (miI == BB->end()) { |
11831 | for (MachineBasicBlock *Succ : BB->successors()) |
11832 | if (Succ->isLiveIn(Reg: ARM::CPSR)) |
11833 | return false; |
11834 | } |
11835 | |
11836 | // We found a def, or hit the end of the basic block and CPSR wasn't live |
11837 | // out. SelectMI should have a kill flag on CPSR. |
11838 | SelectItr->addRegisterKilled(IncomingReg: ARM::CPSR, RegInfo: TRI); |
11839 | return true; |
11840 | } |
11841 | |
11842 | /// Adds logic in loop entry MBB to calculate loop iteration count and adds |
11843 | /// t2WhileLoopSetup and t2WhileLoopStart to generate WLS loop |
11844 | static Register genTPEntry(MachineBasicBlock *TpEntry, |
11845 | MachineBasicBlock *TpLoopBody, |
11846 | MachineBasicBlock *TpExit, Register OpSizeReg, |
11847 | const TargetInstrInfo *TII, DebugLoc Dl, |
11848 | MachineRegisterInfo &MRI) { |
11849 | // Calculates loop iteration count = ceil(n/16) = (n + 15) >> 4. |
11850 | Register AddDestReg = MRI.createVirtualRegister(RegClass: &ARM::rGPRRegClass); |
11851 | BuildMI(BB: TpEntry, MIMD: Dl, MCID: TII->get(Opcode: ARM::t2ADDri), DestReg: AddDestReg) |
11852 | .addUse(RegNo: OpSizeReg) |
11853 | .addImm(Val: 15) |
11854 | .add(MOs: predOps(Pred: ARMCC::AL)) |
11855 | .addReg(RegNo: 0); |
11856 | |
11857 | Register LsrDestReg = MRI.createVirtualRegister(RegClass: &ARM::rGPRRegClass); |
11858 | BuildMI(BB: TpEntry, MIMD: Dl, MCID: TII->get(Opcode: ARM::t2LSRri), DestReg: LsrDestReg) |
11859 | .addUse(RegNo: AddDestReg, Flags: RegState::Kill) |
11860 | .addImm(Val: 4) |
11861 | .add(MOs: predOps(Pred: ARMCC::AL)) |
11862 | .addReg(RegNo: 0); |
11863 | |
11864 | Register TotalIterationsReg = MRI.createVirtualRegister(RegClass: &ARM::GPRlrRegClass); |
11865 | BuildMI(BB: TpEntry, MIMD: Dl, MCID: TII->get(Opcode: ARM::t2WhileLoopSetup), DestReg: TotalIterationsReg) |
11866 | .addUse(RegNo: LsrDestReg, Flags: RegState::Kill); |
11867 | |
11868 | BuildMI(BB: TpEntry, MIMD: Dl, MCID: TII->get(Opcode: ARM::t2WhileLoopStart)) |
11869 | .addUse(RegNo: TotalIterationsReg) |
11870 | .addMBB(MBB: TpExit); |
11871 | |
11872 | BuildMI(BB: TpEntry, MIMD: Dl, MCID: TII->get(Opcode: ARM::t2B)) |
11873 | .addMBB(MBB: TpLoopBody) |
11874 | .add(MOs: predOps(Pred: ARMCC::AL)); |
11875 | |
11876 | return TotalIterationsReg; |
11877 | } |
11878 | |
11879 | /// Adds logic in the loopBody MBB to generate MVE_VCTP, t2DoLoopDec and |
11880 | /// t2DoLoopEnd. These are used by later passes to generate tail predicated |
11881 | /// loops. |
11882 | static void genTPLoopBody(MachineBasicBlock *TpLoopBody, |
11883 | MachineBasicBlock *TpEntry, MachineBasicBlock *TpExit, |
11884 | const TargetInstrInfo *TII, DebugLoc Dl, |
11885 | MachineRegisterInfo &MRI, Register OpSrcReg, |
11886 | Register OpDestReg, Register ElementCountReg, |
11887 | Register TotalIterationsReg, bool IsMemcpy) { |
11888 | // First insert 4 PHI nodes for: Current pointer to Src (if memcpy), Dest |
11889 | // array, loop iteration counter, predication counter. |
11890 | |
11891 | Register SrcPhiReg, CurrSrcReg; |
11892 | if (IsMemcpy) { |
11893 | // Current position in the src array |
11894 | SrcPhiReg = MRI.createVirtualRegister(RegClass: &ARM::rGPRRegClass); |
11895 | CurrSrcReg = MRI.createVirtualRegister(RegClass: &ARM::rGPRRegClass); |
11896 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::PHI), DestReg: SrcPhiReg) |
11897 | .addUse(RegNo: OpSrcReg) |
11898 | .addMBB(MBB: TpEntry) |
11899 | .addUse(RegNo: CurrSrcReg) |
11900 | .addMBB(MBB: TpLoopBody); |
11901 | } |
11902 | |
11903 | // Current position in the dest array |
11904 | Register DestPhiReg = MRI.createVirtualRegister(RegClass: &ARM::rGPRRegClass); |
11905 | Register CurrDestReg = MRI.createVirtualRegister(RegClass: &ARM::rGPRRegClass); |
11906 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::PHI), DestReg: DestPhiReg) |
11907 | .addUse(RegNo: OpDestReg) |
11908 | .addMBB(MBB: TpEntry) |
11909 | .addUse(RegNo: CurrDestReg) |
11910 | .addMBB(MBB: TpLoopBody); |
11911 | |
11912 | // Current loop counter |
11913 | Register LoopCounterPhiReg = MRI.createVirtualRegister(RegClass: &ARM::GPRlrRegClass); |
11914 | Register RemainingLoopIterationsReg = |
11915 | MRI.createVirtualRegister(RegClass: &ARM::GPRlrRegClass); |
11916 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::PHI), DestReg: LoopCounterPhiReg) |
11917 | .addUse(RegNo: TotalIterationsReg) |
11918 | .addMBB(MBB: TpEntry) |
11919 | .addUse(RegNo: RemainingLoopIterationsReg) |
11920 | .addMBB(MBB: TpLoopBody); |
11921 | |
11922 | // Predication counter |
11923 | Register PredCounterPhiReg = MRI.createVirtualRegister(RegClass: &ARM::rGPRRegClass); |
11924 | Register RemainingElementsReg = MRI.createVirtualRegister(RegClass: &ARM::rGPRRegClass); |
11925 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::PHI), DestReg: PredCounterPhiReg) |
11926 | .addUse(RegNo: ElementCountReg) |
11927 | .addMBB(MBB: TpEntry) |
11928 | .addUse(RegNo: RemainingElementsReg) |
11929 | .addMBB(MBB: TpLoopBody); |
11930 | |
11931 | // Pass predication counter to VCTP |
11932 | Register VccrReg = MRI.createVirtualRegister(RegClass: &ARM::VCCRRegClass); |
11933 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::MVE_VCTP8), DestReg: VccrReg) |
11934 | .addUse(RegNo: PredCounterPhiReg) |
11935 | .addImm(Val: ARMVCC::None) |
11936 | .addReg(RegNo: 0) |
11937 | .addReg(RegNo: 0); |
11938 | |
11939 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::t2SUBri), DestReg: RemainingElementsReg) |
11940 | .addUse(RegNo: PredCounterPhiReg) |
11941 | .addImm(Val: 16) |
11942 | .add(MOs: predOps(Pred: ARMCC::AL)) |
11943 | .addReg(RegNo: 0); |
11944 | |
11945 | // VLDRB (only if memcpy) and VSTRB instructions, predicated using VPR |
11946 | Register SrcValueReg; |
11947 | if (IsMemcpy) { |
11948 | SrcValueReg = MRI.createVirtualRegister(RegClass: &ARM::MQPRRegClass); |
11949 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::MVE_VLDRBU8_post)) |
11950 | .addDef(RegNo: CurrSrcReg) |
11951 | .addDef(RegNo: SrcValueReg) |
11952 | .addReg(RegNo: SrcPhiReg) |
11953 | .addImm(Val: 16) |
11954 | .addImm(Val: ARMVCC::Then) |
11955 | .addUse(RegNo: VccrReg) |
11956 | .addReg(RegNo: 0); |
11957 | } else |
11958 | SrcValueReg = OpSrcReg; |
11959 | |
11960 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::MVE_VSTRBU8_post)) |
11961 | .addDef(RegNo: CurrDestReg) |
11962 | .addUse(RegNo: SrcValueReg) |
11963 | .addReg(RegNo: DestPhiReg) |
11964 | .addImm(Val: 16) |
11965 | .addImm(Val: ARMVCC::Then) |
11966 | .addUse(RegNo: VccrReg) |
11967 | .addReg(RegNo: 0); |
11968 | |
11969 | // Add the pseudoInstrs for decrementing the loop counter and marking the |
11970 | // end:t2DoLoopDec and t2DoLoopEnd |
11971 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::t2LoopDec), DestReg: RemainingLoopIterationsReg) |
11972 | .addUse(RegNo: LoopCounterPhiReg) |
11973 | .addImm(Val: 1); |
11974 | |
11975 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::t2LoopEnd)) |
11976 | .addUse(RegNo: RemainingLoopIterationsReg) |
11977 | .addMBB(MBB: TpLoopBody); |
11978 | |
11979 | BuildMI(BB: TpLoopBody, MIMD: Dl, MCID: TII->get(Opcode: ARM::t2B)) |
11980 | .addMBB(MBB: TpExit) |
11981 | .add(MOs: predOps(Pred: ARMCC::AL)); |
11982 | } |
11983 | |
11984 | MachineBasicBlock * |
11985 | ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, |
11986 | MachineBasicBlock *BB) const { |
11987 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
11988 | DebugLoc dl = MI.getDebugLoc(); |
11989 | bool isThumb2 = Subtarget->isThumb2(); |
11990 | switch (MI.getOpcode()) { |
11991 | default: { |
11992 | MI.print(OS&: errs()); |
11993 | llvm_unreachable("Unexpected instr type to insert" ); |
11994 | } |
11995 | |
11996 | // Thumb1 post-indexed loads are really just single-register LDMs. |
11997 | case ARM::tLDR_postidx: { |
11998 | MachineOperand Def(MI.getOperand(i: 1)); |
11999 | BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: ARM::tLDMIA_UPD)) |
12000 | .add(MO: Def) // Rn_wb |
12001 | .add(MO: MI.getOperand(i: 2)) // Rn |
12002 | .add(MO: MI.getOperand(i: 3)) // PredImm |
12003 | .add(MO: MI.getOperand(i: 4)) // PredReg |
12004 | .add(MO: MI.getOperand(i: 0)) // Rt |
12005 | .cloneMemRefs(OtherMI: MI); |
12006 | MI.eraseFromParent(); |
12007 | return BB; |
12008 | } |
12009 | |
12010 | case ARM::MVE_MEMCPYLOOPINST: |
12011 | case ARM::MVE_MEMSETLOOPINST: { |
12012 | |
12013 | // Transformation below expands MVE_MEMCPYLOOPINST/MVE_MEMSETLOOPINST Pseudo |
12014 | // into a Tail Predicated (TP) Loop. It adds the instructions to calculate |
12015 | // the iteration count =ceil(size_in_bytes/16)) in the TP entry block and |
12016 | // adds the relevant instructions in the TP loop Body for generation of a |
12017 | // WLSTP loop. |
12018 | |
12019 | // Below is relevant portion of the CFG after the transformation. |
12020 | // The Machine Basic Blocks are shown along with branch conditions (in |
12021 | // brackets). Note that TP entry/exit MBBs depict the entry/exit of this |
12022 | // portion of the CFG and may not necessarily be the entry/exit of the |
12023 | // function. |
12024 | |
12025 | // (Relevant) CFG after transformation: |
12026 | // TP entry MBB |
12027 | // | |
12028 | // |-----------------| |
12029 | // (n <= 0) (n > 0) |
12030 | // | | |
12031 | // | TP loop Body MBB<--| |
12032 | // | | | |
12033 | // \ |___________| |
12034 | // \ / |
12035 | // TP exit MBB |
12036 | |
12037 | MachineFunction *MF = BB->getParent(); |
12038 | MachineFunctionProperties &Properties = MF->getProperties(); |
12039 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
12040 | |
12041 | Register OpDestReg = MI.getOperand(i: 0).getReg(); |
12042 | Register OpSrcReg = MI.getOperand(i: 1).getReg(); |
12043 | Register OpSizeReg = MI.getOperand(i: 2).getReg(); |
12044 | |
12045 | // Allocate the required MBBs and add to parent function. |
12046 | MachineBasicBlock *TpEntry = BB; |
12047 | MachineBasicBlock *TpLoopBody = MF->CreateMachineBasicBlock(); |
12048 | MachineBasicBlock *TpExit; |
12049 | |
12050 | MF->push_back(MBB: TpLoopBody); |
12051 | |
12052 | // If any instructions are present in the current block after |
12053 | // MVE_MEMCPYLOOPINST or MVE_MEMSETLOOPINST, split the current block and |
12054 | // move the instructions into the newly created exit block. If there are no |
12055 | // instructions add an explicit branch to the FallThrough block and then |
12056 | // split. |
12057 | // |
12058 | // The split is required for two reasons: |
12059 | // 1) A terminator(t2WhileLoopStart) will be placed at that site. |
12060 | // 2) Since a TPLoopBody will be added later, any phis in successive blocks |
12061 | // need to be updated. splitAt() already handles this. |
12062 | TpExit = BB->splitAt(SplitInst&: MI, UpdateLiveIns: false); |
12063 | if (TpExit == BB) { |
12064 | assert(BB->canFallThrough() && "Exit Block must be Fallthrough of the " |
12065 | "block containing memcpy/memset Pseudo" ); |
12066 | TpExit = BB->getFallThrough(); |
12067 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2B)) |
12068 | .addMBB(MBB: TpExit) |
12069 | .add(MOs: predOps(Pred: ARMCC::AL)); |
12070 | TpExit = BB->splitAt(SplitInst&: MI, UpdateLiveIns: false); |
12071 | } |
12072 | |
12073 | // Add logic for iteration count |
12074 | Register TotalIterationsReg = |
12075 | genTPEntry(TpEntry, TpLoopBody, TpExit, OpSizeReg, TII, Dl: dl, MRI); |
12076 | |
12077 | // Add the vectorized (and predicated) loads/store instructions |
12078 | bool IsMemcpy = MI.getOpcode() == ARM::MVE_MEMCPYLOOPINST; |
12079 | genTPLoopBody(TpLoopBody, TpEntry, TpExit, TII, Dl: dl, MRI, OpSrcReg, |
12080 | OpDestReg, ElementCountReg: OpSizeReg, TotalIterationsReg, IsMemcpy); |
12081 | |
12082 | // Required to avoid conflict with the MachineVerifier during testing. |
12083 | Properties.reset(P: MachineFunctionProperties::Property::NoPHIs); |
12084 | |
12085 | // Connect the blocks |
12086 | TpEntry->addSuccessor(Succ: TpLoopBody); |
12087 | TpLoopBody->addSuccessor(Succ: TpLoopBody); |
12088 | TpLoopBody->addSuccessor(Succ: TpExit); |
12089 | |
12090 | // Reorder for a more natural layout |
12091 | TpLoopBody->moveAfter(NewBefore: TpEntry); |
12092 | TpExit->moveAfter(NewBefore: TpLoopBody); |
12093 | |
12094 | // Finally, remove the memcpy Pseudo Instruction |
12095 | MI.eraseFromParent(); |
12096 | |
12097 | // Return the exit block as it may contain other instructions requiring a |
12098 | // custom inserter |
12099 | return TpExit; |
12100 | } |
12101 | |
12102 | // The Thumb2 pre-indexed stores have the same MI operands, they just |
12103 | // define them differently in the .td files from the isel patterns, so |
12104 | // they need pseudos. |
12105 | case ARM::t2STR_preidx: |
12106 | MI.setDesc(TII->get(Opcode: ARM::t2STR_PRE)); |
12107 | return BB; |
12108 | case ARM::t2STRB_preidx: |
12109 | MI.setDesc(TII->get(Opcode: ARM::t2STRB_PRE)); |
12110 | return BB; |
12111 | case ARM::t2STRH_preidx: |
12112 | MI.setDesc(TII->get(Opcode: ARM::t2STRH_PRE)); |
12113 | return BB; |
12114 | |
12115 | case ARM::STRi_preidx: |
12116 | case ARM::STRBi_preidx: { |
12117 | unsigned NewOpc = MI.getOpcode() == ARM::STRi_preidx ? ARM::STR_PRE_IMM |
12118 | : ARM::STRB_PRE_IMM; |
12119 | // Decode the offset. |
12120 | unsigned Offset = MI.getOperand(i: 4).getImm(); |
12121 | bool isSub = ARM_AM::getAM2Op(AM2Opc: Offset) == ARM_AM::sub; |
12122 | Offset = ARM_AM::getAM2Offset(AM2Opc: Offset); |
12123 | if (isSub) |
12124 | Offset = -Offset; |
12125 | |
12126 | MachineMemOperand *MMO = *MI.memoperands_begin(); |
12127 | BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: NewOpc)) |
12128 | .add(MO: MI.getOperand(i: 0)) // Rn_wb |
12129 | .add(MO: MI.getOperand(i: 1)) // Rt |
12130 | .add(MO: MI.getOperand(i: 2)) // Rn |
12131 | .addImm(Val: Offset) // offset (skip GPR==zero_reg) |
12132 | .add(MO: MI.getOperand(i: 5)) // pred |
12133 | .add(MO: MI.getOperand(i: 6)) |
12134 | .addMemOperand(MMO); |
12135 | MI.eraseFromParent(); |
12136 | return BB; |
12137 | } |
12138 | case ARM::STRr_preidx: |
12139 | case ARM::STRBr_preidx: |
12140 | case ARM::STRH_preidx: { |
12141 | unsigned NewOpc; |
12142 | switch (MI.getOpcode()) { |
12143 | default: llvm_unreachable("unexpected opcode!" ); |
12144 | case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG; break; |
12145 | case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG; break; |
12146 | case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE; break; |
12147 | } |
12148 | MachineInstrBuilder MIB = BuildMI(BB&: *BB, I&: MI, MIMD: dl, MCID: TII->get(Opcode: NewOpc)); |
12149 | for (const MachineOperand &MO : MI.operands()) |
12150 | MIB.add(MO); |
12151 | MI.eraseFromParent(); |
12152 | return BB; |
12153 | } |
12154 | |
12155 | case ARM::tMOVCCr_pseudo: { |
12156 | // To "insert" a SELECT_CC instruction, we actually have to insert the |
12157 | // diamond control-flow pattern. The incoming instruction knows the |
12158 | // destination vreg to set, the condition code register to branch on, the |
12159 | // true/false values to select between, and a branch opcode to use. |
12160 | const BasicBlock *LLVM_BB = BB->getBasicBlock(); |
12161 | MachineFunction::iterator It = ++BB->getIterator(); |
12162 | |
12163 | // thisMBB: |
12164 | // ... |
12165 | // TrueVal = ... |
12166 | // cmpTY ccX, r1, r2 |
12167 | // bCC copy1MBB |
12168 | // fallthrough --> copy0MBB |
12169 | MachineBasicBlock *thisMBB = BB; |
12170 | MachineFunction *F = BB->getParent(); |
12171 | MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(BB: LLVM_BB); |
12172 | MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(BB: LLVM_BB); |
12173 | F->insert(MBBI: It, MBB: copy0MBB); |
12174 | F->insert(MBBI: It, MBB: sinkMBB); |
12175 | |
12176 | // Set the call frame size on entry to the new basic blocks. |
12177 | unsigned CallFrameSize = TII->getCallFrameSizeAt(MI); |
12178 | copy0MBB->setCallFrameSize(CallFrameSize); |
12179 | sinkMBB->setCallFrameSize(CallFrameSize); |
12180 | |
12181 | // Check whether CPSR is live past the tMOVCCr_pseudo. |
12182 | const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
12183 | if (!MI.killsRegister(Reg: ARM::CPSR, /*TRI=*/nullptr) && |
12184 | !checkAndUpdateCPSRKill(SelectItr: MI, BB: thisMBB, TRI)) { |
12185 | copy0MBB->addLiveIn(PhysReg: ARM::CPSR); |
12186 | sinkMBB->addLiveIn(PhysReg: ARM::CPSR); |
12187 | } |
12188 | |
12189 | // Transfer the remainder of BB and its successor edges to sinkMBB. |
12190 | sinkMBB->splice(Where: sinkMBB->begin(), Other: BB, |
12191 | From: std::next(x: MachineBasicBlock::iterator(MI)), To: BB->end()); |
12192 | sinkMBB->transferSuccessorsAndUpdatePHIs(FromMBB: BB); |
12193 | |
12194 | BB->addSuccessor(Succ: copy0MBB); |
12195 | BB->addSuccessor(Succ: sinkMBB); |
12196 | |
12197 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: ARM::tBcc)) |
12198 | .addMBB(MBB: sinkMBB) |
12199 | .addImm(Val: MI.getOperand(i: 3).getImm()) |
12200 | .addReg(RegNo: MI.getOperand(i: 4).getReg()); |
12201 | |
12202 | // copy0MBB: |
12203 | // %FalseValue = ... |
12204 | // # fallthrough to sinkMBB |
12205 | BB = copy0MBB; |
12206 | |
12207 | // Update machine-CFG edges |
12208 | BB->addSuccessor(Succ: sinkMBB); |
12209 | |
12210 | // sinkMBB: |
12211 | // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] |
12212 | // ... |
12213 | BB = sinkMBB; |
12214 | BuildMI(BB&: *BB, I: BB->begin(), MIMD: dl, MCID: TII->get(Opcode: ARM::PHI), DestReg: MI.getOperand(i: 0).getReg()) |
12215 | .addReg(RegNo: MI.getOperand(i: 1).getReg()) |
12216 | .addMBB(MBB: copy0MBB) |
12217 | .addReg(RegNo: MI.getOperand(i: 2).getReg()) |
12218 | .addMBB(MBB: thisMBB); |
12219 | |
12220 | MI.eraseFromParent(); // The pseudo instruction is gone now. |
12221 | return BB; |
12222 | } |
12223 | |
12224 | case ARM::BCCi64: |
12225 | case ARM::BCCZi64: { |
12226 | // If there is an unconditional branch to the other successor, remove it. |
12227 | BB->erase(I: std::next(x: MachineBasicBlock::iterator(MI)), E: BB->end()); |
12228 | |
12229 | // Compare both parts that make up the double comparison separately for |
12230 | // equality. |
12231 | bool RHSisZero = MI.getOpcode() == ARM::BCCZi64; |
12232 | |
12233 | Register LHS1 = MI.getOperand(i: 1).getReg(); |
12234 | Register LHS2 = MI.getOperand(i: 2).getReg(); |
12235 | if (RHSisZero) { |
12236 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: isThumb2 ? ARM::t2CMPri : ARM::CMPri)) |
12237 | .addReg(RegNo: LHS1) |
12238 | .addImm(Val: 0) |
12239 | .add(MOs: predOps(Pred: ARMCC::AL)); |
12240 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: isThumb2 ? ARM::t2CMPri : ARM::CMPri)) |
12241 | .addReg(RegNo: LHS2).addImm(Val: 0) |
12242 | .addImm(Val: ARMCC::EQ).addReg(RegNo: ARM::CPSR); |
12243 | } else { |
12244 | Register RHS1 = MI.getOperand(i: 3).getReg(); |
12245 | Register RHS2 = MI.getOperand(i: 4).getReg(); |
12246 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) |
12247 | .addReg(RegNo: LHS1) |
12248 | .addReg(RegNo: RHS1) |
12249 | .add(MOs: predOps(Pred: ARMCC::AL)); |
12250 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) |
12251 | .addReg(RegNo: LHS2).addReg(RegNo: RHS2) |
12252 | .addImm(Val: ARMCC::EQ).addReg(RegNo: ARM::CPSR); |
12253 | } |
12254 | |
12255 | MachineBasicBlock *destMBB = MI.getOperand(i: RHSisZero ? 3 : 5).getMBB(); |
12256 | MachineBasicBlock *exitMBB = OtherSucc(MBB: BB, Succ: destMBB); |
12257 | if (MI.getOperand(i: 0).getImm() == ARMCC::NE) |
12258 | std::swap(a&: destMBB, b&: exitMBB); |
12259 | |
12260 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: isThumb2 ? ARM::t2Bcc : ARM::Bcc)) |
12261 | .addMBB(MBB: destMBB).addImm(Val: ARMCC::EQ).addReg(RegNo: ARM::CPSR); |
12262 | if (isThumb2) |
12263 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: ARM::t2B)) |
12264 | .addMBB(MBB: exitMBB) |
12265 | .add(MOs: predOps(Pred: ARMCC::AL)); |
12266 | else |
12267 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: ARM::B)) .addMBB(MBB: exitMBB); |
12268 | |
12269 | MI.eraseFromParent(); // The pseudo instruction is gone now. |
12270 | return BB; |
12271 | } |
12272 | |
12273 | case ARM::Int_eh_sjlj_setjmp: |
12274 | case ARM::Int_eh_sjlj_setjmp_nofp: |
12275 | case ARM::tInt_eh_sjlj_setjmp: |
12276 | case ARM::t2Int_eh_sjlj_setjmp: |
12277 | case ARM::t2Int_eh_sjlj_setjmp_nofp: |
12278 | return BB; |
12279 | |
12280 | case ARM::Int_eh_sjlj_setup_dispatch: |
12281 | EmitSjLjDispatchBlock(MI, MBB: BB); |
12282 | return BB; |
12283 | |
12284 | case ARM::ABS: |
12285 | case ARM::t2ABS: { |
12286 | // To insert an ABS instruction, we have to insert the |
12287 | // diamond control-flow pattern. The incoming instruction knows the |
12288 | // source vreg to test against 0, the destination vreg to set, |
12289 | // the condition code register to branch on, the |
12290 | // true/false values to select between, and a branch opcode to use. |
12291 | // It transforms |
12292 | // V1 = ABS V0 |
12293 | // into |
12294 | // V2 = MOVS V0 |
12295 | // BCC (branch to SinkBB if V0 >= 0) |
12296 | // RSBBB: V3 = RSBri V2, 0 (compute ABS if V2 < 0) |
12297 | // SinkBB: V1 = PHI(V2, V3) |
12298 | const BasicBlock *LLVM_BB = BB->getBasicBlock(); |
12299 | MachineFunction::iterator BBI = ++BB->getIterator(); |
12300 | MachineFunction *Fn = BB->getParent(); |
12301 | MachineBasicBlock *RSBBB = Fn->CreateMachineBasicBlock(BB: LLVM_BB); |
12302 | MachineBasicBlock *SinkBB = Fn->CreateMachineBasicBlock(BB: LLVM_BB); |
12303 | Fn->insert(MBBI: BBI, MBB: RSBBB); |
12304 | Fn->insert(MBBI: BBI, MBB: SinkBB); |
12305 | |
12306 | Register ABSSrcReg = MI.getOperand(i: 1).getReg(); |
12307 | Register ABSDstReg = MI.getOperand(i: 0).getReg(); |
12308 | bool ABSSrcKIll = MI.getOperand(i: 1).isKill(); |
12309 | bool isThumb2 = Subtarget->isThumb2(); |
12310 | MachineRegisterInfo &MRI = Fn->getRegInfo(); |
12311 | // In Thumb mode S must not be specified if source register is the SP or |
12312 | // PC and if destination register is the SP, so restrict register class |
12313 | Register NewRsbDstReg = MRI.createVirtualRegister( |
12314 | RegClass: isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass); |
12315 | |
12316 | // Transfer the remainder of BB and its successor edges to sinkMBB. |
12317 | SinkBB->splice(Where: SinkBB->begin(), Other: BB, |
12318 | From: std::next(x: MachineBasicBlock::iterator(MI)), To: BB->end()); |
12319 | SinkBB->transferSuccessorsAndUpdatePHIs(FromMBB: BB); |
12320 | |
12321 | BB->addSuccessor(Succ: RSBBB); |
12322 | BB->addSuccessor(Succ: SinkBB); |
12323 | |
12324 | // fall through to SinkMBB |
12325 | RSBBB->addSuccessor(Succ: SinkBB); |
12326 | |
12327 | // insert a cmp at the end of BB |
12328 | BuildMI(BB, MIMD: dl, MCID: TII->get(Opcode: isThumb2 ? ARM::t2CMPri : ARM::CMPri)) |
12329 | .addReg(RegNo: ABSSrcReg) |
12330 | .addImm(Val: 0) |
12331 | .add(MOs: predOps(Pred: ARMCC::AL)); |
12332 | |
12333 | // insert a bcc with opposite CC to ARMCC::MI at the end of BB |
12334 | BuildMI(BB, MIMD: dl, |
12335 | MCID: TII->get(Opcode: isThumb2 ? ARM::t2Bcc : ARM::Bcc)).addMBB(MBB: SinkBB) |
12336 | .addImm(Val: ARMCC::getOppositeCondition(CC: ARMCC::MI)).addReg(RegNo: ARM::CPSR); |
12337 | |
12338 | // insert rsbri in RSBBB |
12339 | // Note: BCC and rsbri will be converted into predicated rsbmi |
12340 | // by if-conversion pass |
12341 | BuildMI(BB&: *RSBBB, I: RSBBB->begin(), MIMD: dl, |
12342 | MCID: TII->get(Opcode: isThumb2 ? ARM::t2RSBri : ARM::RSBri), DestReg: NewRsbDstReg) |
12343 | .addReg(RegNo: ABSSrcReg, flags: ABSSrcKIll ? RegState::Kill : 0) |
12344 | .addImm(Val: 0) |
12345 | .add(MOs: predOps(Pred: ARMCC::AL)) |
12346 | .add(MO: condCodeOp()); |
12347 | |
12348 | // insert PHI in SinkBB, |
12349 | // reuse ABSDstReg to not change uses of ABS instruction |
12350 | BuildMI(BB&: *SinkBB, I: SinkBB->begin(), MIMD: dl, |
12351 | MCID: TII->get(Opcode: ARM::PHI), DestReg: ABSDstReg) |
12352 | .addReg(RegNo: NewRsbDstReg).addMBB(MBB: RSBBB) |
12353 | .addReg(RegNo: ABSSrcReg).addMBB(MBB: BB); |
12354 | |
12355 | // remove ABS instruction |
12356 | MI.eraseFromParent(); |
12357 | |
12358 | // return last added BB |
12359 | return SinkBB; |
12360 | } |
12361 | case ARM::COPY_STRUCT_BYVAL_I32: |
12362 | ++NumLoopByVals; |
12363 | return EmitStructByval(MI, BB); |
12364 | case ARM::WIN__CHKSTK: |
12365 | return EmitLowered__chkstk(MI, MBB: BB); |
12366 | case ARM::WIN__DBZCHK: |
12367 | return EmitLowered__dbzchk(MI, MBB: BB); |
12368 | } |
12369 | } |
12370 | |
12371 | /// Attaches vregs to MEMCPY that it will use as scratch registers |
12372 | /// when it is expanded into LDM/STM. This is done as a post-isel lowering |
12373 | /// instead of as a custom inserter because we need the use list from the SDNode. |
12374 | static void attachMEMCPYScratchRegs(const ARMSubtarget *Subtarget, |
12375 | MachineInstr &MI, const SDNode *Node) { |
12376 | bool isThumb1 = Subtarget->isThumb1Only(); |
12377 | |
12378 | DebugLoc DL = MI.getDebugLoc(); |
12379 | MachineFunction *MF = MI.getParent()->getParent(); |
12380 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
12381 | MachineInstrBuilder MIB(*MF, MI); |
12382 | |
12383 | // If the new dst/src is unused mark it as dead. |
12384 | if (!Node->hasAnyUseOfValue(Value: 0)) { |
12385 | MI.getOperand(i: 0).setIsDead(true); |
12386 | } |
12387 | if (!Node->hasAnyUseOfValue(Value: 1)) { |
12388 | MI.getOperand(i: 1).setIsDead(true); |
12389 | } |
12390 | |
12391 | // The MEMCPY both defines and kills the scratch registers. |
12392 | for (unsigned I = 0; I != MI.getOperand(i: 4).getImm(); ++I) { |
12393 | Register TmpReg = MRI.createVirtualRegister(RegClass: isThumb1 ? &ARM::tGPRRegClass |
12394 | : &ARM::GPRRegClass); |
12395 | MIB.addReg(RegNo: TmpReg, flags: RegState::Define|RegState::Dead); |
12396 | } |
12397 | } |
12398 | |
12399 | void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, |
12400 | SDNode *Node) const { |
12401 | if (MI.getOpcode() == ARM::MEMCPY) { |
12402 | attachMEMCPYScratchRegs(Subtarget, MI, Node); |
12403 | return; |
12404 | } |
12405 | |
12406 | const MCInstrDesc *MCID = &MI.getDesc(); |
12407 | // Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB, |
12408 | // RSC. Coming out of isel, they have an implicit CPSR def, but the optional |
12409 | // operand is still set to noreg. If needed, set the optional operand's |
12410 | // register to CPSR, and remove the redundant implicit def. |
12411 | // |
12412 | // e.g. ADCS (..., implicit-def CPSR) -> ADC (... opt:def CPSR). |
12413 | |
12414 | // Rename pseudo opcodes. |
12415 | unsigned NewOpc = convertAddSubFlagsOpcode(OldOpc: MI.getOpcode()); |
12416 | unsigned ccOutIdx; |
12417 | if (NewOpc) { |
12418 | const ARMBaseInstrInfo *TII = Subtarget->getInstrInfo(); |
12419 | MCID = &TII->get(Opcode: NewOpc); |
12420 | |
12421 | assert(MCID->getNumOperands() == |
12422 | MI.getDesc().getNumOperands() + 5 - MI.getDesc().getSize() |
12423 | && "converted opcode should be the same except for cc_out" |
12424 | " (and, on Thumb1, pred)" ); |
12425 | |
12426 | MI.setDesc(*MCID); |
12427 | |
12428 | // Add the optional cc_out operand |
12429 | MI.addOperand(Op: MachineOperand::CreateReg(Reg: 0, /*isDef=*/true)); |
12430 | |
12431 | // On Thumb1, move all input operands to the end, then add the predicate |
12432 | if (Subtarget->isThumb1Only()) { |
12433 | for (unsigned c = MCID->getNumOperands() - 4; c--;) { |
12434 | MI.addOperand(Op: MI.getOperand(i: 1)); |
12435 | MI.removeOperand(OpNo: 1); |
12436 | } |
12437 | |
12438 | // Restore the ties |
12439 | for (unsigned i = MI.getNumOperands(); i--;) { |
12440 | const MachineOperand& op = MI.getOperand(i); |
12441 | if (op.isReg() && op.isUse()) { |
12442 | int DefIdx = MCID->getOperandConstraint(OpNum: i, Constraint: MCOI::TIED_TO); |
12443 | if (DefIdx != -1) |
12444 | MI.tieOperands(DefIdx, UseIdx: i); |
12445 | } |
12446 | } |
12447 | |
12448 | MI.addOperand(Op: MachineOperand::CreateImm(Val: ARMCC::AL)); |
12449 | MI.addOperand(Op: MachineOperand::CreateReg(Reg: 0, /*isDef=*/false)); |
12450 | ccOutIdx = 1; |
12451 | } else |
12452 | ccOutIdx = MCID->getNumOperands() - 1; |
12453 | } else |
12454 | ccOutIdx = MCID->getNumOperands() - 1; |
12455 | |
12456 | // Any ARM instruction that sets the 's' bit should specify an optional |
12457 | // "cc_out" operand in the last operand position. |
12458 | if (!MI.hasOptionalDef() || !MCID->operands()[ccOutIdx].isOptionalDef()) { |
12459 | assert(!NewOpc && "Optional cc_out operand required" ); |
12460 | return; |
12461 | } |
12462 | // Look for an implicit def of CPSR added by MachineInstr ctor. Remove it |
12463 | // since we already have an optional CPSR def. |
12464 | bool definesCPSR = false; |
12465 | bool deadCPSR = false; |
12466 | for (unsigned i = MCID->getNumOperands(), e = MI.getNumOperands(); i != e; |
12467 | ++i) { |
12468 | const MachineOperand &MO = MI.getOperand(i); |
12469 | if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) { |
12470 | definesCPSR = true; |
12471 | if (MO.isDead()) |
12472 | deadCPSR = true; |
12473 | MI.removeOperand(OpNo: i); |
12474 | break; |
12475 | } |
12476 | } |
12477 | if (!definesCPSR) { |
12478 | assert(!NewOpc && "Optional cc_out operand required" ); |
12479 | return; |
12480 | } |
12481 | assert(deadCPSR == !Node->hasAnyUseOfValue(1) && "inconsistent dead flag" ); |
12482 | if (deadCPSR) { |
12483 | assert(!MI.getOperand(ccOutIdx).getReg() && |
12484 | "expect uninitialized optional cc_out operand" ); |
12485 | // Thumb1 instructions must have the S bit even if the CPSR is dead. |
12486 | if (!Subtarget->isThumb1Only()) |
12487 | return; |
12488 | } |
12489 | |
12490 | // If this instruction was defined with an optional CPSR def and its dag node |
12491 | // had a live implicit CPSR def, then activate the optional CPSR def. |
12492 | MachineOperand &MO = MI.getOperand(i: ccOutIdx); |
12493 | MO.setReg(ARM::CPSR); |
12494 | MO.setIsDef(true); |
12495 | } |
12496 | |
12497 | //===----------------------------------------------------------------------===// |
12498 | // ARM Optimization Hooks |
12499 | //===----------------------------------------------------------------------===// |
12500 | |
12501 | // Helper function that checks if N is a null or all ones constant. |
12502 | static inline bool isZeroOrAllOnes(SDValue N, bool AllOnes) { |
12503 | return AllOnes ? isAllOnesConstant(V: N) : isNullConstant(V: N); |
12504 | } |
12505 | |
12506 | // Return true if N is conditionally 0 or all ones. |
12507 | // Detects these expressions where cc is an i1 value: |
12508 | // |
12509 | // (select cc 0, y) [AllOnes=0] |
12510 | // (select cc y, 0) [AllOnes=0] |
12511 | // (zext cc) [AllOnes=0] |
12512 | // (sext cc) [AllOnes=0/1] |
12513 | // (select cc -1, y) [AllOnes=1] |
12514 | // (select cc y, -1) [AllOnes=1] |
12515 | // |
12516 | // Invert is set when N is the null/all ones constant when CC is false. |
12517 | // OtherOp is set to the alternative value of N. |
12518 | static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes, |
12519 | SDValue &CC, bool &Invert, |
12520 | SDValue &OtherOp, |
12521 | SelectionDAG &DAG) { |
12522 | switch (N->getOpcode()) { |
12523 | default: return false; |
12524 | case ISD::SELECT: { |
12525 | CC = N->getOperand(Num: 0); |
12526 | SDValue N1 = N->getOperand(Num: 1); |
12527 | SDValue N2 = N->getOperand(Num: 2); |
12528 | if (isZeroOrAllOnes(N: N1, AllOnes)) { |
12529 | Invert = false; |
12530 | OtherOp = N2; |
12531 | return true; |
12532 | } |
12533 | if (isZeroOrAllOnes(N: N2, AllOnes)) { |
12534 | Invert = true; |
12535 | OtherOp = N1; |
12536 | return true; |
12537 | } |
12538 | return false; |
12539 | } |
12540 | case ISD::ZERO_EXTEND: |
12541 | // (zext cc) can never be the all ones value. |
12542 | if (AllOnes) |
12543 | return false; |
12544 | [[fallthrough]]; |
12545 | case ISD::SIGN_EXTEND: { |
12546 | SDLoc dl(N); |
12547 | EVT VT = N->getValueType(ResNo: 0); |
12548 | CC = N->getOperand(Num: 0); |
12549 | if (CC.getValueType() != MVT::i1 || CC.getOpcode() != ISD::SETCC) |
12550 | return false; |
12551 | Invert = !AllOnes; |
12552 | if (AllOnes) |
12553 | // When looking for an AllOnes constant, N is an sext, and the 'other' |
12554 | // value is 0. |
12555 | OtherOp = DAG.getConstant(Val: 0, DL: dl, VT); |
12556 | else if (N->getOpcode() == ISD::ZERO_EXTEND) |
12557 | // When looking for a 0 constant, N can be zext or sext. |
12558 | OtherOp = DAG.getConstant(Val: 1, DL: dl, VT); |
12559 | else |
12560 | OtherOp = DAG.getAllOnesConstant(DL: dl, VT); |
12561 | return true; |
12562 | } |
12563 | } |
12564 | } |
12565 | |
12566 | // Combine a constant select operand into its use: |
12567 | // |
12568 | // (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) |
12569 | // (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) |
12570 | // (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) [AllOnes=1] |
12571 | // (or (select cc, 0, c), x) -> (select cc, x, (or, x, c)) |
12572 | // (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c)) |
12573 | // |
12574 | // The transform is rejected if the select doesn't have a constant operand that |
12575 | // is null, or all ones when AllOnes is set. |
12576 | // |
12577 | // Also recognize sext/zext from i1: |
12578 | // |
12579 | // (add (zext cc), x) -> (select cc (add x, 1), x) |
12580 | // (add (sext cc), x) -> (select cc (add x, -1), x) |
12581 | // |
12582 | // These transformations eventually create predicated instructions. |
12583 | // |
12584 | // @param N The node to transform. |
12585 | // @param Slct The N operand that is a select. |
12586 | // @param OtherOp The other N operand (x above). |
12587 | // @param DCI Context. |
12588 | // @param AllOnes Require the select constant to be all ones instead of null. |
12589 | // @returns The new node, or SDValue() on failure. |
12590 | static |
12591 | SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, |
12592 | TargetLowering::DAGCombinerInfo &DCI, |
12593 | bool AllOnes = false) { |
12594 | SelectionDAG &DAG = DCI.DAG; |
12595 | EVT VT = N->getValueType(ResNo: 0); |
12596 | SDValue NonConstantVal; |
12597 | SDValue CCOp; |
12598 | bool SwapSelectOps; |
12599 | if (!isConditionalZeroOrAllOnes(N: Slct.getNode(), AllOnes, CC&: CCOp, Invert&: SwapSelectOps, |
12600 | OtherOp&: NonConstantVal, DAG)) |
12601 | return SDValue(); |
12602 | |
12603 | // Slct is now know to be the desired identity constant when CC is true. |
12604 | SDValue TrueVal = OtherOp; |
12605 | SDValue FalseVal = DAG.getNode(Opcode: N->getOpcode(), DL: SDLoc(N), VT, |
12606 | N1: OtherOp, N2: NonConstantVal); |
12607 | // Unless SwapSelectOps says CC should be false. |
12608 | if (SwapSelectOps) |
12609 | std::swap(a&: TrueVal, b&: FalseVal); |
12610 | |
12611 | return DAG.getNode(Opcode: ISD::SELECT, DL: SDLoc(N), VT, |
12612 | N1: CCOp, N2: TrueVal, N3: FalseVal); |
12613 | } |
12614 | |
12615 | // Attempt combineSelectAndUse on each operand of a commutative operator N. |
12616 | static |
12617 | SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes, |
12618 | TargetLowering::DAGCombinerInfo &DCI) { |
12619 | SDValue N0 = N->getOperand(Num: 0); |
12620 | SDValue N1 = N->getOperand(Num: 1); |
12621 | if (N0.getNode()->hasOneUse()) |
12622 | if (SDValue Result = combineSelectAndUse(N, Slct: N0, OtherOp: N1, DCI, AllOnes)) |
12623 | return Result; |
12624 | if (N1.getNode()->hasOneUse()) |
12625 | if (SDValue Result = combineSelectAndUse(N, Slct: N1, OtherOp: N0, DCI, AllOnes)) |
12626 | return Result; |
12627 | return SDValue(); |
12628 | } |
12629 | |
12630 | static bool IsVUZPShuffleNode(SDNode *N) { |
12631 | // VUZP shuffle node. |
12632 | if (N->getOpcode() == ARMISD::VUZP) |
12633 | return true; |
12634 | |
12635 | // "VUZP" on i32 is an alias for VTRN. |
12636 | if (N->getOpcode() == ARMISD::VTRN && N->getValueType(ResNo: 0) == MVT::v2i32) |
12637 | return true; |
12638 | |
12639 | return false; |
12640 | } |
12641 | |
12642 | static SDValue AddCombineToVPADD(SDNode *N, SDValue N0, SDValue N1, |
12643 | TargetLowering::DAGCombinerInfo &DCI, |
12644 | const ARMSubtarget *Subtarget) { |
12645 | // Look for ADD(VUZP.0, VUZP.1). |
12646 | if (!IsVUZPShuffleNode(N: N0.getNode()) || N0.getNode() != N1.getNode() || |
12647 | N0 == N1) |
12648 | return SDValue(); |
12649 | |
12650 | // Make sure the ADD is a 64-bit add; there is no 128-bit VPADD. |
12651 | if (!N->getValueType(ResNo: 0).is64BitVector()) |
12652 | return SDValue(); |
12653 | |
12654 | // Generate vpadd. |
12655 | SelectionDAG &DAG = DCI.DAG; |
12656 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
12657 | SDLoc dl(N); |
12658 | SDNode *Unzip = N0.getNode(); |
12659 | EVT VT = N->getValueType(ResNo: 0); |
12660 | |
12661 | SmallVector<SDValue, 8> Ops; |
12662 | Ops.push_back(Elt: DAG.getConstant(Val: Intrinsic::arm_neon_vpadd, DL: dl, |
12663 | VT: TLI.getPointerTy(DL: DAG.getDataLayout()))); |
12664 | Ops.push_back(Elt: Unzip->getOperand(Num: 0)); |
12665 | Ops.push_back(Elt: Unzip->getOperand(Num: 1)); |
12666 | |
12667 | return DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT, Ops); |
12668 | } |
12669 | |
12670 | static SDValue AddCombineVUZPToVPADDL(SDNode *N, SDValue N0, SDValue N1, |
12671 | TargetLowering::DAGCombinerInfo &DCI, |
12672 | const ARMSubtarget *Subtarget) { |
12673 | // Check for two extended operands. |
12674 | if (!(N0.getOpcode() == ISD::SIGN_EXTEND && |
12675 | N1.getOpcode() == ISD::SIGN_EXTEND) && |
12676 | !(N0.getOpcode() == ISD::ZERO_EXTEND && |
12677 | N1.getOpcode() == ISD::ZERO_EXTEND)) |
12678 | return SDValue(); |
12679 | |
12680 | SDValue N00 = N0.getOperand(i: 0); |
12681 | SDValue N10 = N1.getOperand(i: 0); |
12682 | |
12683 | // Look for ADD(SEXT(VUZP.0), SEXT(VUZP.1)) |
12684 | if (!IsVUZPShuffleNode(N: N00.getNode()) || N00.getNode() != N10.getNode() || |
12685 | N00 == N10) |
12686 | return SDValue(); |
12687 | |
12688 | // We only recognize Q register paddl here; this can't be reached until |
12689 | // after type legalization. |
12690 | if (!N00.getValueType().is64BitVector() || |
12691 | !N0.getValueType().is128BitVector()) |
12692 | return SDValue(); |
12693 | |
12694 | // Generate vpaddl. |
12695 | SelectionDAG &DAG = DCI.DAG; |
12696 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
12697 | SDLoc dl(N); |
12698 | EVT VT = N->getValueType(ResNo: 0); |
12699 | |
12700 | SmallVector<SDValue, 8> Ops; |
12701 | // Form vpaddl.sN or vpaddl.uN depending on the kind of extension. |
12702 | unsigned Opcode; |
12703 | if (N0.getOpcode() == ISD::SIGN_EXTEND) |
12704 | Opcode = Intrinsic::arm_neon_vpaddls; |
12705 | else |
12706 | Opcode = Intrinsic::arm_neon_vpaddlu; |
12707 | Ops.push_back(Elt: DAG.getConstant(Val: Opcode, DL: dl, |
12708 | VT: TLI.getPointerTy(DL: DAG.getDataLayout()))); |
12709 | EVT ElemTy = N00.getValueType().getVectorElementType(); |
12710 | unsigned NumElts = VT.getVectorNumElements(); |
12711 | EVT ConcatVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: ElemTy, NumElements: NumElts * 2); |
12712 | SDValue Concat = DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL: SDLoc(N), VT: ConcatVT, |
12713 | N1: N00.getOperand(i: 0), N2: N00.getOperand(i: 1)); |
12714 | Ops.push_back(Elt: Concat); |
12715 | |
12716 | return DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT, Ops); |
12717 | } |
12718 | |
12719 | // FIXME: This function shouldn't be necessary; if we lower BUILD_VECTOR in |
12720 | // an appropriate manner, we end up with ADD(VUZP(ZEXT(N))), which is |
12721 | // much easier to match. |
12722 | static SDValue |
12723 | AddCombineBUILD_VECTORToVPADDL(SDNode *N, SDValue N0, SDValue N1, |
12724 | TargetLowering::DAGCombinerInfo &DCI, |
12725 | const ARMSubtarget *Subtarget) { |
12726 | // Only perform optimization if after legalize, and if NEON is available. We |
12727 | // also expected both operands to be BUILD_VECTORs. |
12728 | if (DCI.isBeforeLegalize() || !Subtarget->hasNEON() |
12729 | || N0.getOpcode() != ISD::BUILD_VECTOR |
12730 | || N1.getOpcode() != ISD::BUILD_VECTOR) |
12731 | return SDValue(); |
12732 | |
12733 | // Check output type since VPADDL operand elements can only be 8, 16, or 32. |
12734 | EVT VT = N->getValueType(ResNo: 0); |
12735 | if (!VT.isInteger() || VT.getVectorElementType() == MVT::i64) |
12736 | return SDValue(); |
12737 | |
12738 | // Check that the vector operands are of the right form. |
12739 | // N0 and N1 are BUILD_VECTOR nodes with N number of EXTRACT_VECTOR |
12740 | // operands, where N is the size of the formed vector. |
12741 | // Each EXTRACT_VECTOR should have the same input vector and odd or even |
12742 | // index such that we have a pair wise add pattern. |
12743 | |
12744 | // Grab the vector that all EXTRACT_VECTOR nodes should be referencing. |
12745 | if (N0->getOperand(Num: 0)->getOpcode() != ISD::EXTRACT_VECTOR_ELT) |
12746 | return SDValue(); |
12747 | SDValue Vec = N0->getOperand(Num: 0)->getOperand(Num: 0); |
12748 | SDNode *V = Vec.getNode(); |
12749 | unsigned nextIndex = 0; |
12750 | |
12751 | // For each operands to the ADD which are BUILD_VECTORs, |
12752 | // check to see if each of their operands are an EXTRACT_VECTOR with |
12753 | // the same vector and appropriate index. |
12754 | for (unsigned i = 0, e = N0->getNumOperands(); i != e; ++i) { |
12755 | if (N0->getOperand(Num: i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT |
12756 | && N1->getOperand(Num: i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { |
12757 | |
12758 | SDValue ExtVec0 = N0->getOperand(Num: i); |
12759 | SDValue ExtVec1 = N1->getOperand(Num: i); |
12760 | |
12761 | // First operand is the vector, verify its the same. |
12762 | if (V != ExtVec0->getOperand(Num: 0).getNode() || |
12763 | V != ExtVec1->getOperand(Num: 0).getNode()) |
12764 | return SDValue(); |
12765 | |
12766 | // Second is the constant, verify its correct. |
12767 | ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(Val: ExtVec0->getOperand(Num: 1)); |
12768 | ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(Val: ExtVec1->getOperand(Num: 1)); |
12769 | |
12770 | // For the constant, we want to see all the even or all the odd. |
12771 | if (!C0 || !C1 || C0->getZExtValue() != nextIndex |
12772 | || C1->getZExtValue() != nextIndex+1) |
12773 | return SDValue(); |
12774 | |
12775 | // Increment index. |
12776 | nextIndex+=2; |
12777 | } else |
12778 | return SDValue(); |
12779 | } |
12780 | |
12781 | // Don't generate vpaddl+vmovn; we'll match it to vpadd later. Also make sure |
12782 | // we're using the entire input vector, otherwise there's a size/legality |
12783 | // mismatch somewhere. |
12784 | if (nextIndex != Vec.getValueType().getVectorNumElements() || |
12785 | Vec.getValueType().getVectorElementType() == VT.getVectorElementType()) |
12786 | return SDValue(); |
12787 | |
12788 | // Create VPADDL node. |
12789 | SelectionDAG &DAG = DCI.DAG; |
12790 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
12791 | |
12792 | SDLoc dl(N); |
12793 | |
12794 | // Build operand list. |
12795 | SmallVector<SDValue, 8> Ops; |
12796 | Ops.push_back(Elt: DAG.getConstant(Val: Intrinsic::arm_neon_vpaddls, DL: dl, |
12797 | VT: TLI.getPointerTy(DL: DAG.getDataLayout()))); |
12798 | |
12799 | // Input is the vector. |
12800 | Ops.push_back(Elt: Vec); |
12801 | |
12802 | // Get widened type and narrowed type. |
12803 | MVT widenType; |
12804 | unsigned numElem = VT.getVectorNumElements(); |
12805 | |
12806 | EVT inputLaneType = Vec.getValueType().getVectorElementType(); |
12807 | switch (inputLaneType.getSimpleVT().SimpleTy) { |
12808 | case MVT::i8: widenType = MVT::getVectorVT(VT: MVT::i16, NumElements: numElem); break; |
12809 | case MVT::i16: widenType = MVT::getVectorVT(VT: MVT::i32, NumElements: numElem); break; |
12810 | case MVT::i32: widenType = MVT::getVectorVT(VT: MVT::i64, NumElements: numElem); break; |
12811 | default: |
12812 | llvm_unreachable("Invalid vector element type for padd optimization." ); |
12813 | } |
12814 | |
12815 | SDValue tmp = DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT: widenType, Ops); |
12816 | unsigned ExtOp = VT.bitsGT(VT: tmp.getValueType()) ? ISD::ANY_EXTEND : ISD::TRUNCATE; |
12817 | return DAG.getNode(Opcode: ExtOp, DL: dl, VT, Operand: tmp); |
12818 | } |
12819 | |
12820 | static SDValue findMUL_LOHI(SDValue V) { |
12821 | if (V->getOpcode() == ISD::UMUL_LOHI || |
12822 | V->getOpcode() == ISD::SMUL_LOHI) |
12823 | return V; |
12824 | return SDValue(); |
12825 | } |
12826 | |
12827 | static SDValue AddCombineTo64BitSMLAL16(SDNode *AddcNode, SDNode *AddeNode, |
12828 | TargetLowering::DAGCombinerInfo &DCI, |
12829 | const ARMSubtarget *Subtarget) { |
12830 | if (!Subtarget->hasBaseDSP()) |
12831 | return SDValue(); |
12832 | |
12833 | // SMLALBB, SMLALBT, SMLALTB, SMLALTT multiply two 16-bit values and |
12834 | // accumulates the product into a 64-bit value. The 16-bit values will |
12835 | // be sign extended somehow or SRA'd into 32-bit values |
12836 | // (addc (adde (mul 16bit, 16bit), lo), hi) |
12837 | SDValue Mul = AddcNode->getOperand(Num: 0); |
12838 | SDValue Lo = AddcNode->getOperand(Num: 1); |
12839 | if (Mul.getOpcode() != ISD::MUL) { |
12840 | Lo = AddcNode->getOperand(Num: 0); |
12841 | Mul = AddcNode->getOperand(Num: 1); |
12842 | if (Mul.getOpcode() != ISD::MUL) |
12843 | return SDValue(); |
12844 | } |
12845 | |
12846 | SDValue SRA = AddeNode->getOperand(Num: 0); |
12847 | SDValue Hi = AddeNode->getOperand(Num: 1); |
12848 | if (SRA.getOpcode() != ISD::SRA) { |
12849 | SRA = AddeNode->getOperand(Num: 1); |
12850 | Hi = AddeNode->getOperand(Num: 0); |
12851 | if (SRA.getOpcode() != ISD::SRA) |
12852 | return SDValue(); |
12853 | } |
12854 | if (auto Const = dyn_cast<ConstantSDNode>(Val: SRA.getOperand(i: 1))) { |
12855 | if (Const->getZExtValue() != 31) |
12856 | return SDValue(); |
12857 | } else |
12858 | return SDValue(); |
12859 | |
12860 | if (SRA.getOperand(i: 0) != Mul) |
12861 | return SDValue(); |
12862 | |
12863 | SelectionDAG &DAG = DCI.DAG; |
12864 | SDLoc dl(AddcNode); |
12865 | unsigned Opcode = 0; |
12866 | SDValue Op0; |
12867 | SDValue Op1; |
12868 | |
12869 | if (isS16(Op: Mul.getOperand(i: 0), DAG) && isS16(Op: Mul.getOperand(i: 1), DAG)) { |
12870 | Opcode = ARMISD::SMLALBB; |
12871 | Op0 = Mul.getOperand(i: 0); |
12872 | Op1 = Mul.getOperand(i: 1); |
12873 | } else if (isS16(Op: Mul.getOperand(i: 0), DAG) && isSRA16(Op: Mul.getOperand(i: 1))) { |
12874 | Opcode = ARMISD::SMLALBT; |
12875 | Op0 = Mul.getOperand(i: 0); |
12876 | Op1 = Mul.getOperand(i: 1).getOperand(i: 0); |
12877 | } else if (isSRA16(Op: Mul.getOperand(i: 0)) && isS16(Op: Mul.getOperand(i: 1), DAG)) { |
12878 | Opcode = ARMISD::SMLALTB; |
12879 | Op0 = Mul.getOperand(i: 0).getOperand(i: 0); |
12880 | Op1 = Mul.getOperand(i: 1); |
12881 | } else if (isSRA16(Op: Mul.getOperand(i: 0)) && isSRA16(Op: Mul.getOperand(i: 1))) { |
12882 | Opcode = ARMISD::SMLALTT; |
12883 | Op0 = Mul->getOperand(Num: 0).getOperand(i: 0); |
12884 | Op1 = Mul->getOperand(Num: 1).getOperand(i: 0); |
12885 | } |
12886 | |
12887 | if (!Op0 || !Op1) |
12888 | return SDValue(); |
12889 | |
12890 | SDValue SMLAL = DAG.getNode(Opcode, DL: dl, VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), |
12891 | N1: Op0, N2: Op1, N3: Lo, N4: Hi); |
12892 | // Replace the ADDs' nodes uses by the MLA node's values. |
12893 | SDValue HiMLALResult(SMLAL.getNode(), 1); |
12894 | SDValue LoMLALResult(SMLAL.getNode(), 0); |
12895 | |
12896 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(AddcNode, 0), To: LoMLALResult); |
12897 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(AddeNode, 0), To: HiMLALResult); |
12898 | |
12899 | // Return original node to notify the driver to stop replacing. |
12900 | SDValue resNode(AddcNode, 0); |
12901 | return resNode; |
12902 | } |
12903 | |
12904 | static SDValue AddCombineTo64bitMLAL(SDNode *AddeSubeNode, |
12905 | TargetLowering::DAGCombinerInfo &DCI, |
12906 | const ARMSubtarget *Subtarget) { |
12907 | // Look for multiply add opportunities. |
12908 | // The pattern is a ISD::UMUL_LOHI followed by two add nodes, where |
12909 | // each add nodes consumes a value from ISD::UMUL_LOHI and there is |
12910 | // a glue link from the first add to the second add. |
12911 | // If we find this pattern, we can replace the U/SMUL_LOHI, ADDC, and ADDE by |
12912 | // a S/UMLAL instruction. |
12913 | // UMUL_LOHI |
12914 | // / :lo \ :hi |
12915 | // V \ [no multiline comment] |
12916 | // loAdd -> ADDC | |
12917 | // \ :carry / |
12918 | // V V |
12919 | // ADDE <- hiAdd |
12920 | // |
12921 | // In the special case where only the higher part of a signed result is used |
12922 | // and the add to the low part of the result of ISD::UMUL_LOHI adds or subtracts |
12923 | // a constant with the exact value of 0x80000000, we recognize we are dealing |
12924 | // with a "rounded multiply and add" (or subtract) and transform it into |
12925 | // either a ARMISD::SMMLAR or ARMISD::SMMLSR respectively. |
12926 | |
12927 | assert((AddeSubeNode->getOpcode() == ARMISD::ADDE || |
12928 | AddeSubeNode->getOpcode() == ARMISD::SUBE) && |
12929 | "Expect an ADDE or SUBE" ); |
12930 | |
12931 | assert(AddeSubeNode->getNumOperands() == 3 && |
12932 | AddeSubeNode->getOperand(2).getValueType() == MVT::i32 && |
12933 | "ADDE node has the wrong inputs" ); |
12934 | |
12935 | // Check that we are chained to the right ADDC or SUBC node. |
12936 | SDNode *AddcSubcNode = AddeSubeNode->getOperand(Num: 2).getNode(); |
12937 | if ((AddeSubeNode->getOpcode() == ARMISD::ADDE && |
12938 | AddcSubcNode->getOpcode() != ARMISD::ADDC) || |
12939 | (AddeSubeNode->getOpcode() == ARMISD::SUBE && |
12940 | AddcSubcNode->getOpcode() != ARMISD::SUBC)) |
12941 | return SDValue(); |
12942 | |
12943 | SDValue AddcSubcOp0 = AddcSubcNode->getOperand(Num: 0); |
12944 | SDValue AddcSubcOp1 = AddcSubcNode->getOperand(Num: 1); |
12945 | |
12946 | // Check if the two operands are from the same mul_lohi node. |
12947 | if (AddcSubcOp0.getNode() == AddcSubcOp1.getNode()) |
12948 | return SDValue(); |
12949 | |
12950 | assert(AddcSubcNode->getNumValues() == 2 && |
12951 | AddcSubcNode->getValueType(0) == MVT::i32 && |
12952 | "Expect ADDC with two result values. First: i32" ); |
12953 | |
12954 | // Check that the ADDC adds the low result of the S/UMUL_LOHI. If not, it |
12955 | // maybe a SMLAL which multiplies two 16-bit values. |
12956 | if (AddeSubeNode->getOpcode() == ARMISD::ADDE && |
12957 | AddcSubcOp0->getOpcode() != ISD::UMUL_LOHI && |
12958 | AddcSubcOp0->getOpcode() != ISD::SMUL_LOHI && |
12959 | AddcSubcOp1->getOpcode() != ISD::UMUL_LOHI && |
12960 | AddcSubcOp1->getOpcode() != ISD::SMUL_LOHI) |
12961 | return AddCombineTo64BitSMLAL16(AddcNode: AddcSubcNode, AddeNode: AddeSubeNode, DCI, Subtarget); |
12962 | |
12963 | // Check for the triangle shape. |
12964 | SDValue AddeSubeOp0 = AddeSubeNode->getOperand(Num: 0); |
12965 | SDValue AddeSubeOp1 = AddeSubeNode->getOperand(Num: 1); |
12966 | |
12967 | // Make sure that the ADDE/SUBE operands are not coming from the same node. |
12968 | if (AddeSubeOp0.getNode() == AddeSubeOp1.getNode()) |
12969 | return SDValue(); |
12970 | |
12971 | // Find the MUL_LOHI node walking up ADDE/SUBE's operands. |
12972 | bool IsLeftOperandMUL = false; |
12973 | SDValue MULOp = findMUL_LOHI(V: AddeSubeOp0); |
12974 | if (MULOp == SDValue()) |
12975 | MULOp = findMUL_LOHI(V: AddeSubeOp1); |
12976 | else |
12977 | IsLeftOperandMUL = true; |
12978 | if (MULOp == SDValue()) |
12979 | return SDValue(); |
12980 | |
12981 | // Figure out the right opcode. |
12982 | unsigned Opc = MULOp->getOpcode(); |
12983 | unsigned FinalOpc = (Opc == ISD::SMUL_LOHI) ? ARMISD::SMLAL : ARMISD::UMLAL; |
12984 | |
12985 | // Figure out the high and low input values to the MLAL node. |
12986 | SDValue *HiAddSub = nullptr; |
12987 | SDValue *LoMul = nullptr; |
12988 | SDValue *LowAddSub = nullptr; |
12989 | |
12990 | // Ensure that ADDE/SUBE is from high result of ISD::xMUL_LOHI. |
12991 | if ((AddeSubeOp0 != MULOp.getValue(R: 1)) && (AddeSubeOp1 != MULOp.getValue(R: 1))) |
12992 | return SDValue(); |
12993 | |
12994 | if (IsLeftOperandMUL) |
12995 | HiAddSub = &AddeSubeOp1; |
12996 | else |
12997 | HiAddSub = &AddeSubeOp0; |
12998 | |
12999 | // Ensure that LoMul and LowAddSub are taken from correct ISD::SMUL_LOHI node |
13000 | // whose low result is fed to the ADDC/SUBC we are checking. |
13001 | |
13002 | if (AddcSubcOp0 == MULOp.getValue(R: 0)) { |
13003 | LoMul = &AddcSubcOp0; |
13004 | LowAddSub = &AddcSubcOp1; |
13005 | } |
13006 | if (AddcSubcOp1 == MULOp.getValue(R: 0)) { |
13007 | LoMul = &AddcSubcOp1; |
13008 | LowAddSub = &AddcSubcOp0; |
13009 | } |
13010 | |
13011 | if (!LoMul) |
13012 | return SDValue(); |
13013 | |
13014 | // If HiAddSub is the same node as ADDC/SUBC or is a predecessor of ADDC/SUBC |
13015 | // the replacement below will create a cycle. |
13016 | if (AddcSubcNode == HiAddSub->getNode() || |
13017 | AddcSubcNode->isPredecessorOf(N: HiAddSub->getNode())) |
13018 | return SDValue(); |
13019 | |
13020 | // Create the merged node. |
13021 | SelectionDAG &DAG = DCI.DAG; |
13022 | |
13023 | // Start building operand list. |
13024 | SmallVector<SDValue, 8> Ops; |
13025 | Ops.push_back(Elt: LoMul->getOperand(i: 0)); |
13026 | Ops.push_back(Elt: LoMul->getOperand(i: 1)); |
13027 | |
13028 | // Check whether we can use SMMLAR, SMMLSR or SMMULR instead. For this to be |
13029 | // the case, we must be doing signed multiplication and only use the higher |
13030 | // part of the result of the MLAL, furthermore the LowAddSub must be a constant |
13031 | // addition or subtraction with the value of 0x800000. |
13032 | if (Subtarget->hasV6Ops() && Subtarget->hasDSP() && Subtarget->useMulOps() && |
13033 | FinalOpc == ARMISD::SMLAL && !AddeSubeNode->hasAnyUseOfValue(Value: 1) && |
13034 | LowAddSub->getNode()->getOpcode() == ISD::Constant && |
13035 | static_cast<ConstantSDNode *>(LowAddSub->getNode())->getZExtValue() == |
13036 | 0x80000000) { |
13037 | Ops.push_back(Elt: *HiAddSub); |
13038 | if (AddcSubcNode->getOpcode() == ARMISD::SUBC) { |
13039 | FinalOpc = ARMISD::SMMLSR; |
13040 | } else { |
13041 | FinalOpc = ARMISD::SMMLAR; |
13042 | } |
13043 | SDValue NewNode = DAG.getNode(Opcode: FinalOpc, DL: SDLoc(AddcSubcNode), VT: MVT::i32, Ops); |
13044 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(AddeSubeNode, 0), To: NewNode); |
13045 | |
13046 | return SDValue(AddeSubeNode, 0); |
13047 | } else if (AddcSubcNode->getOpcode() == ARMISD::SUBC) |
13048 | // SMMLS is generated during instruction selection and the rest of this |
13049 | // function can not handle the case where AddcSubcNode is a SUBC. |
13050 | return SDValue(); |
13051 | |
13052 | // Finish building the operand list for {U/S}MLAL |
13053 | Ops.push_back(Elt: *LowAddSub); |
13054 | Ops.push_back(Elt: *HiAddSub); |
13055 | |
13056 | SDValue MLALNode = DAG.getNode(Opcode: FinalOpc, DL: SDLoc(AddcSubcNode), |
13057 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), Ops); |
13058 | |
13059 | // Replace the ADDs' nodes uses by the MLA node's values. |
13060 | SDValue HiMLALResult(MLALNode.getNode(), 1); |
13061 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(AddeSubeNode, 0), To: HiMLALResult); |
13062 | |
13063 | SDValue LoMLALResult(MLALNode.getNode(), 0); |
13064 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(AddcSubcNode, 0), To: LoMLALResult); |
13065 | |
13066 | // Return original node to notify the driver to stop replacing. |
13067 | return SDValue(AddeSubeNode, 0); |
13068 | } |
13069 | |
13070 | static SDValue AddCombineTo64bitUMAAL(SDNode *AddeNode, |
13071 | TargetLowering::DAGCombinerInfo &DCI, |
13072 | const ARMSubtarget *Subtarget) { |
13073 | // UMAAL is similar to UMLAL except that it adds two unsigned values. |
13074 | // While trying to combine for the other MLAL nodes, first search for the |
13075 | // chance to use UMAAL. Check if Addc uses a node which has already |
13076 | // been combined into a UMLAL. The other pattern is UMLAL using Addc/Adde |
13077 | // as the addend, and it's handled in PerformUMLALCombine. |
13078 | |
13079 | if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP()) |
13080 | return AddCombineTo64bitMLAL(AddeSubeNode: AddeNode, DCI, Subtarget); |
13081 | |
13082 | // Check that we have a glued ADDC node. |
13083 | SDNode* AddcNode = AddeNode->getOperand(Num: 2).getNode(); |
13084 | if (AddcNode->getOpcode() != ARMISD::ADDC) |
13085 | return SDValue(); |
13086 | |
13087 | // Find the converted UMAAL or quit if it doesn't exist. |
13088 | SDNode *UmlalNode = nullptr; |
13089 | SDValue AddHi; |
13090 | if (AddcNode->getOperand(Num: 0).getOpcode() == ARMISD::UMLAL) { |
13091 | UmlalNode = AddcNode->getOperand(Num: 0).getNode(); |
13092 | AddHi = AddcNode->getOperand(Num: 1); |
13093 | } else if (AddcNode->getOperand(Num: 1).getOpcode() == ARMISD::UMLAL) { |
13094 | UmlalNode = AddcNode->getOperand(Num: 1).getNode(); |
13095 | AddHi = AddcNode->getOperand(Num: 0); |
13096 | } else { |
13097 | return AddCombineTo64bitMLAL(AddeSubeNode: AddeNode, DCI, Subtarget); |
13098 | } |
13099 | |
13100 | // The ADDC should be glued to an ADDE node, which uses the same UMLAL as |
13101 | // the ADDC as well as Zero. |
13102 | if (!isNullConstant(V: UmlalNode->getOperand(Num: 3))) |
13103 | return SDValue(); |
13104 | |
13105 | if ((isNullConstant(V: AddeNode->getOperand(Num: 0)) && |
13106 | AddeNode->getOperand(Num: 1).getNode() == UmlalNode) || |
13107 | (AddeNode->getOperand(Num: 0).getNode() == UmlalNode && |
13108 | isNullConstant(V: AddeNode->getOperand(Num: 1)))) { |
13109 | SelectionDAG &DAG = DCI.DAG; |
13110 | SDValue Ops[] = { UmlalNode->getOperand(Num: 0), UmlalNode->getOperand(Num: 1), |
13111 | UmlalNode->getOperand(Num: 2), AddHi }; |
13112 | SDValue UMAAL = DAG.getNode(Opcode: ARMISD::UMAAL, DL: SDLoc(AddcNode), |
13113 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), Ops); |
13114 | |
13115 | // Replace the ADDs' nodes uses by the UMAAL node's values. |
13116 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(AddeNode, 0), To: SDValue(UMAAL.getNode(), 1)); |
13117 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(AddcNode, 0), To: SDValue(UMAAL.getNode(), 0)); |
13118 | |
13119 | // Return original node to notify the driver to stop replacing. |
13120 | return SDValue(AddeNode, 0); |
13121 | } |
13122 | return SDValue(); |
13123 | } |
13124 | |
13125 | static SDValue PerformUMLALCombine(SDNode *N, SelectionDAG &DAG, |
13126 | const ARMSubtarget *Subtarget) { |
13127 | if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP()) |
13128 | return SDValue(); |
13129 | |
13130 | // Check that we have a pair of ADDC and ADDE as operands. |
13131 | // Both addends of the ADDE must be zero. |
13132 | SDNode* AddcNode = N->getOperand(Num: 2).getNode(); |
13133 | SDNode* AddeNode = N->getOperand(Num: 3).getNode(); |
13134 | if ((AddcNode->getOpcode() == ARMISD::ADDC) && |
13135 | (AddeNode->getOpcode() == ARMISD::ADDE) && |
13136 | isNullConstant(V: AddeNode->getOperand(Num: 0)) && |
13137 | isNullConstant(V: AddeNode->getOperand(Num: 1)) && |
13138 | (AddeNode->getOperand(Num: 2).getNode() == AddcNode)) |
13139 | return DAG.getNode(Opcode: ARMISD::UMAAL, DL: SDLoc(N), |
13140 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), |
13141 | Ops: {N->getOperand(Num: 0), N->getOperand(Num: 1), |
13142 | AddcNode->getOperand(Num: 0), AddcNode->getOperand(Num: 1)}); |
13143 | else |
13144 | return SDValue(); |
13145 | } |
13146 | |
13147 | static SDValue PerformAddcSubcCombine(SDNode *N, |
13148 | TargetLowering::DAGCombinerInfo &DCI, |
13149 | const ARMSubtarget *Subtarget) { |
13150 | SelectionDAG &DAG(DCI.DAG); |
13151 | |
13152 | if (N->getOpcode() == ARMISD::SUBC && N->hasAnyUseOfValue(Value: 1)) { |
13153 | // (SUBC (ADDE 0, 0, C), 1) -> C |
13154 | SDValue LHS = N->getOperand(Num: 0); |
13155 | SDValue RHS = N->getOperand(Num: 1); |
13156 | if (LHS->getOpcode() == ARMISD::ADDE && |
13157 | isNullConstant(V: LHS->getOperand(Num: 0)) && |
13158 | isNullConstant(V: LHS->getOperand(Num: 1)) && isOneConstant(V: RHS)) { |
13159 | return DCI.CombineTo(N, Res0: SDValue(N, 0), Res1: LHS->getOperand(Num: 2)); |
13160 | } |
13161 | } |
13162 | |
13163 | if (Subtarget->isThumb1Only()) { |
13164 | SDValue RHS = N->getOperand(Num: 1); |
13165 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: RHS)) { |
13166 | int32_t imm = C->getSExtValue(); |
13167 | if (imm < 0 && imm > std::numeric_limits<int>::min()) { |
13168 | SDLoc DL(N); |
13169 | RHS = DAG.getConstant(Val: -imm, DL, VT: MVT::i32); |
13170 | unsigned Opcode = (N->getOpcode() == ARMISD::ADDC) ? ARMISD::SUBC |
13171 | : ARMISD::ADDC; |
13172 | return DAG.getNode(Opcode, DL, VTList: N->getVTList(), N1: N->getOperand(Num: 0), N2: RHS); |
13173 | } |
13174 | } |
13175 | } |
13176 | |
13177 | return SDValue(); |
13178 | } |
13179 | |
13180 | static SDValue PerformAddeSubeCombine(SDNode *N, |
13181 | TargetLowering::DAGCombinerInfo &DCI, |
13182 | const ARMSubtarget *Subtarget) { |
13183 | if (Subtarget->isThumb1Only()) { |
13184 | SelectionDAG &DAG = DCI.DAG; |
13185 | SDValue RHS = N->getOperand(Num: 1); |
13186 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: RHS)) { |
13187 | int64_t imm = C->getSExtValue(); |
13188 | if (imm < 0) { |
13189 | SDLoc DL(N); |
13190 | |
13191 | // The with-carry-in form matches bitwise not instead of the negation. |
13192 | // Effectively, the inverse interpretation of the carry flag already |
13193 | // accounts for part of the negation. |
13194 | RHS = DAG.getConstant(Val: ~imm, DL, VT: MVT::i32); |
13195 | |
13196 | unsigned Opcode = (N->getOpcode() == ARMISD::ADDE) ? ARMISD::SUBE |
13197 | : ARMISD::ADDE; |
13198 | return DAG.getNode(Opcode, DL, VTList: N->getVTList(), |
13199 | N1: N->getOperand(Num: 0), N2: RHS, N3: N->getOperand(Num: 2)); |
13200 | } |
13201 | } |
13202 | } else if (N->getOperand(Num: 1)->getOpcode() == ISD::SMUL_LOHI) { |
13203 | return AddCombineTo64bitMLAL(AddeSubeNode: N, DCI, Subtarget); |
13204 | } |
13205 | return SDValue(); |
13206 | } |
13207 | |
13208 | static SDValue PerformSELECTCombine(SDNode *N, |
13209 | TargetLowering::DAGCombinerInfo &DCI, |
13210 | const ARMSubtarget *Subtarget) { |
13211 | if (!Subtarget->hasMVEIntegerOps()) |
13212 | return SDValue(); |
13213 | |
13214 | SDLoc dl(N); |
13215 | SDValue SetCC; |
13216 | SDValue LHS; |
13217 | SDValue RHS; |
13218 | ISD::CondCode CC; |
13219 | SDValue TrueVal; |
13220 | SDValue FalseVal; |
13221 | |
13222 | if (N->getOpcode() == ISD::SELECT && |
13223 | N->getOperand(Num: 0)->getOpcode() == ISD::SETCC) { |
13224 | SetCC = N->getOperand(Num: 0); |
13225 | LHS = SetCC->getOperand(Num: 0); |
13226 | RHS = SetCC->getOperand(Num: 1); |
13227 | CC = cast<CondCodeSDNode>(Val: SetCC->getOperand(Num: 2))->get(); |
13228 | TrueVal = N->getOperand(Num: 1); |
13229 | FalseVal = N->getOperand(Num: 2); |
13230 | } else if (N->getOpcode() == ISD::SELECT_CC) { |
13231 | LHS = N->getOperand(Num: 0); |
13232 | RHS = N->getOperand(Num: 1); |
13233 | CC = cast<CondCodeSDNode>(Val: N->getOperand(Num: 4))->get(); |
13234 | TrueVal = N->getOperand(Num: 2); |
13235 | FalseVal = N->getOperand(Num: 3); |
13236 | } else { |
13237 | return SDValue(); |
13238 | } |
13239 | |
13240 | unsigned int Opcode = 0; |
13241 | if ((TrueVal->getOpcode() == ISD::VECREDUCE_UMIN || |
13242 | FalseVal->getOpcode() == ISD::VECREDUCE_UMIN) && |
13243 | (CC == ISD::SETULT || CC == ISD::SETUGT)) { |
13244 | Opcode = ARMISD::VMINVu; |
13245 | if (CC == ISD::SETUGT) |
13246 | std::swap(a&: TrueVal, b&: FalseVal); |
13247 | } else if ((TrueVal->getOpcode() == ISD::VECREDUCE_SMIN || |
13248 | FalseVal->getOpcode() == ISD::VECREDUCE_SMIN) && |
13249 | (CC == ISD::SETLT || CC == ISD::SETGT)) { |
13250 | Opcode = ARMISD::VMINVs; |
13251 | if (CC == ISD::SETGT) |
13252 | std::swap(a&: TrueVal, b&: FalseVal); |
13253 | } else if ((TrueVal->getOpcode() == ISD::VECREDUCE_UMAX || |
13254 | FalseVal->getOpcode() == ISD::VECREDUCE_UMAX) && |
13255 | (CC == ISD::SETUGT || CC == ISD::SETULT)) { |
13256 | Opcode = ARMISD::VMAXVu; |
13257 | if (CC == ISD::SETULT) |
13258 | std::swap(a&: TrueVal, b&: FalseVal); |
13259 | } else if ((TrueVal->getOpcode() == ISD::VECREDUCE_SMAX || |
13260 | FalseVal->getOpcode() == ISD::VECREDUCE_SMAX) && |
13261 | (CC == ISD::SETGT || CC == ISD::SETLT)) { |
13262 | Opcode = ARMISD::VMAXVs; |
13263 | if (CC == ISD::SETLT) |
13264 | std::swap(a&: TrueVal, b&: FalseVal); |
13265 | } else |
13266 | return SDValue(); |
13267 | |
13268 | // Normalise to the right hand side being the vector reduction |
13269 | switch (TrueVal->getOpcode()) { |
13270 | case ISD::VECREDUCE_UMIN: |
13271 | case ISD::VECREDUCE_SMIN: |
13272 | case ISD::VECREDUCE_UMAX: |
13273 | case ISD::VECREDUCE_SMAX: |
13274 | std::swap(a&: LHS, b&: RHS); |
13275 | std::swap(a&: TrueVal, b&: FalseVal); |
13276 | break; |
13277 | } |
13278 | |
13279 | EVT VectorType = FalseVal->getOperand(Num: 0).getValueType(); |
13280 | |
13281 | if (VectorType != MVT::v16i8 && VectorType != MVT::v8i16 && |
13282 | VectorType != MVT::v4i32) |
13283 | return SDValue(); |
13284 | |
13285 | EVT VectorScalarType = VectorType.getVectorElementType(); |
13286 | |
13287 | // The values being selected must also be the ones being compared |
13288 | if (TrueVal != LHS || FalseVal != RHS) |
13289 | return SDValue(); |
13290 | |
13291 | EVT LeftType = LHS->getValueType(ResNo: 0); |
13292 | EVT RightType = RHS->getValueType(ResNo: 0); |
13293 | |
13294 | // The types must match the reduced type too |
13295 | if (LeftType != VectorScalarType || RightType != VectorScalarType) |
13296 | return SDValue(); |
13297 | |
13298 | // Legalise the scalar to an i32 |
13299 | if (VectorScalarType != MVT::i32) |
13300 | LHS = DCI.DAG.getNode(Opcode: ISD::ANY_EXTEND, DL: dl, VT: MVT::i32, Operand: LHS); |
13301 | |
13302 | // Generate the reduction as an i32 for legalisation purposes |
13303 | auto Reduction = |
13304 | DCI.DAG.getNode(Opcode, DL: dl, VT: MVT::i32, N1: LHS, N2: RHS->getOperand(Num: 0)); |
13305 | |
13306 | // The result isn't actually an i32 so truncate it back to its original type |
13307 | if (VectorScalarType != MVT::i32) |
13308 | Reduction = DCI.DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: VectorScalarType, Operand: Reduction); |
13309 | |
13310 | return Reduction; |
13311 | } |
13312 | |
13313 | // A special combine for the vqdmulh family of instructions. This is one of the |
13314 | // potential set of patterns that could patch this instruction. The base pattern |
13315 | // you would expect to be min(max(ashr(mul(mul(sext(x), 2), sext(y)), 16))). |
13316 | // This matches the different min(max(ashr(mul(mul(sext(x), sext(y)), 2), 16))), |
13317 | // which llvm will have optimized to min(ashr(mul(sext(x), sext(y)), 15))) as |
13318 | // the max is unnecessary. |
13319 | static SDValue PerformVQDMULHCombine(SDNode *N, SelectionDAG &DAG) { |
13320 | EVT VT = N->getValueType(ResNo: 0); |
13321 | SDValue Shft; |
13322 | ConstantSDNode *Clamp; |
13323 | |
13324 | if (!VT.isVector() || VT.getScalarSizeInBits() > 64) |
13325 | return SDValue(); |
13326 | |
13327 | if (N->getOpcode() == ISD::SMIN) { |
13328 | Shft = N->getOperand(Num: 0); |
13329 | Clamp = isConstOrConstSplat(N: N->getOperand(Num: 1)); |
13330 | } else if (N->getOpcode() == ISD::VSELECT) { |
13331 | // Detect a SMIN, which for an i64 node will be a vselect/setcc, not a smin. |
13332 | SDValue Cmp = N->getOperand(Num: 0); |
13333 | if (Cmp.getOpcode() != ISD::SETCC || |
13334 | cast<CondCodeSDNode>(Val: Cmp.getOperand(i: 2))->get() != ISD::SETLT || |
13335 | Cmp.getOperand(i: 0) != N->getOperand(Num: 1) || |
13336 | Cmp.getOperand(i: 1) != N->getOperand(Num: 2)) |
13337 | return SDValue(); |
13338 | Shft = N->getOperand(Num: 1); |
13339 | Clamp = isConstOrConstSplat(N: N->getOperand(Num: 2)); |
13340 | } else |
13341 | return SDValue(); |
13342 | |
13343 | if (!Clamp) |
13344 | return SDValue(); |
13345 | |
13346 | MVT ScalarType; |
13347 | int ShftAmt = 0; |
13348 | switch (Clamp->getSExtValue()) { |
13349 | case (1 << 7) - 1: |
13350 | ScalarType = MVT::i8; |
13351 | ShftAmt = 7; |
13352 | break; |
13353 | case (1 << 15) - 1: |
13354 | ScalarType = MVT::i16; |
13355 | ShftAmt = 15; |
13356 | break; |
13357 | case (1ULL << 31) - 1: |
13358 | ScalarType = MVT::i32; |
13359 | ShftAmt = 31; |
13360 | break; |
13361 | default: |
13362 | return SDValue(); |
13363 | } |
13364 | |
13365 | if (Shft.getOpcode() != ISD::SRA) |
13366 | return SDValue(); |
13367 | ConstantSDNode *N1 = isConstOrConstSplat(N: Shft.getOperand(i: 1)); |
13368 | if (!N1 || N1->getSExtValue() != ShftAmt) |
13369 | return SDValue(); |
13370 | |
13371 | SDValue Mul = Shft.getOperand(i: 0); |
13372 | if (Mul.getOpcode() != ISD::MUL) |
13373 | return SDValue(); |
13374 | |
13375 | SDValue Ext0 = Mul.getOperand(i: 0); |
13376 | SDValue Ext1 = Mul.getOperand(i: 1); |
13377 | if (Ext0.getOpcode() != ISD::SIGN_EXTEND || |
13378 | Ext1.getOpcode() != ISD::SIGN_EXTEND) |
13379 | return SDValue(); |
13380 | EVT VecVT = Ext0.getOperand(i: 0).getValueType(); |
13381 | if (!VecVT.isPow2VectorType() || VecVT.getVectorNumElements() == 1) |
13382 | return SDValue(); |
13383 | if (Ext1.getOperand(i: 0).getValueType() != VecVT || |
13384 | VecVT.getScalarType() != ScalarType || |
13385 | VT.getScalarSizeInBits() < ScalarType.getScalarSizeInBits() * 2) |
13386 | return SDValue(); |
13387 | |
13388 | SDLoc DL(Mul); |
13389 | unsigned LegalLanes = 128 / (ShftAmt + 1); |
13390 | EVT LegalVecVT = MVT::getVectorVT(VT: ScalarType, NumElements: LegalLanes); |
13391 | // For types smaller than legal vectors extend to be legal and only use needed |
13392 | // lanes. |
13393 | if (VecVT.getSizeInBits() < 128) { |
13394 | EVT ExtVecVT = |
13395 | MVT::getVectorVT(VT: MVT::getIntegerVT(BitWidth: 128 / VecVT.getVectorNumElements()), |
13396 | NumElements: VecVT.getVectorNumElements()); |
13397 | SDValue Inp0 = |
13398 | DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: ExtVecVT, Operand: Ext0.getOperand(i: 0)); |
13399 | SDValue Inp1 = |
13400 | DAG.getNode(Opcode: ISD::ANY_EXTEND, DL, VT: ExtVecVT, Operand: Ext1.getOperand(i: 0)); |
13401 | Inp0 = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT: LegalVecVT, Operand: Inp0); |
13402 | Inp1 = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT: LegalVecVT, Operand: Inp1); |
13403 | SDValue VQDMULH = DAG.getNode(Opcode: ARMISD::VQDMULH, DL, VT: LegalVecVT, N1: Inp0, N2: Inp1); |
13404 | SDValue Trunc = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT: ExtVecVT, Operand: VQDMULH); |
13405 | Trunc = DAG.getNode(Opcode: ISD::TRUNCATE, DL, VT: VecVT, Operand: Trunc); |
13406 | return DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL, VT, Operand: Trunc); |
13407 | } |
13408 | |
13409 | // For larger types, split into legal sized chunks. |
13410 | assert(VecVT.getSizeInBits() % 128 == 0 && "Expected a power2 type" ); |
13411 | unsigned NumParts = VecVT.getSizeInBits() / 128; |
13412 | SmallVector<SDValue> Parts; |
13413 | for (unsigned I = 0; I < NumParts; ++I) { |
13414 | SDValue Inp0 = |
13415 | DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL, VT: LegalVecVT, N1: Ext0.getOperand(i: 0), |
13416 | N2: DAG.getVectorIdxConstant(Val: I * LegalLanes, DL)); |
13417 | SDValue Inp1 = |
13418 | DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL, VT: LegalVecVT, N1: Ext1.getOperand(i: 0), |
13419 | N2: DAG.getVectorIdxConstant(Val: I * LegalLanes, DL)); |
13420 | SDValue VQDMULH = DAG.getNode(Opcode: ARMISD::VQDMULH, DL, VT: LegalVecVT, N1: Inp0, N2: Inp1); |
13421 | Parts.push_back(Elt: VQDMULH); |
13422 | } |
13423 | return DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL, VT, |
13424 | Operand: DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: VecVT, Ops: Parts)); |
13425 | } |
13426 | |
13427 | static SDValue PerformVSELECTCombine(SDNode *N, |
13428 | TargetLowering::DAGCombinerInfo &DCI, |
13429 | const ARMSubtarget *Subtarget) { |
13430 | if (!Subtarget->hasMVEIntegerOps()) |
13431 | return SDValue(); |
13432 | |
13433 | if (SDValue V = PerformVQDMULHCombine(N, DAG&: DCI.DAG)) |
13434 | return V; |
13435 | |
13436 | // Transforms vselect(not(cond), lhs, rhs) into vselect(cond, rhs, lhs). |
13437 | // |
13438 | // We need to re-implement this optimization here as the implementation in the |
13439 | // Target-Independent DAGCombiner does not handle the kind of constant we make |
13440 | // (it calls isConstOrConstSplat with AllowTruncation set to false - and for |
13441 | // good reason, allowing truncation there would break other targets). |
13442 | // |
13443 | // Currently, this is only done for MVE, as it's the only target that benefits |
13444 | // from this transformation (e.g. VPNOT+VPSEL becomes a single VPSEL). |
13445 | if (N->getOperand(Num: 0).getOpcode() != ISD::XOR) |
13446 | return SDValue(); |
13447 | SDValue XOR = N->getOperand(Num: 0); |
13448 | |
13449 | // Check if the XOR's RHS is either a 1, or a BUILD_VECTOR of 1s. |
13450 | // It is important to check with truncation allowed as the BUILD_VECTORs we |
13451 | // generate in those situations will truncate their operands. |
13452 | ConstantSDNode *Const = |
13453 | isConstOrConstSplat(N: XOR->getOperand(Num: 1), /*AllowUndefs*/ false, |
13454 | /*AllowTruncation*/ true); |
13455 | if (!Const || !Const->isOne()) |
13456 | return SDValue(); |
13457 | |
13458 | // Rewrite into vselect(cond, rhs, lhs). |
13459 | SDValue Cond = XOR->getOperand(Num: 0); |
13460 | SDValue LHS = N->getOperand(Num: 1); |
13461 | SDValue RHS = N->getOperand(Num: 2); |
13462 | EVT Type = N->getValueType(ResNo: 0); |
13463 | return DCI.DAG.getNode(Opcode: ISD::VSELECT, DL: SDLoc(N), VT: Type, N1: Cond, N2: RHS, N3: LHS); |
13464 | } |
13465 | |
13466 | // Convert vsetcc([0,1,2,..], splat(n), ult) -> vctp n |
13467 | static SDValue PerformVSetCCToVCTPCombine(SDNode *N, |
13468 | TargetLowering::DAGCombinerInfo &DCI, |
13469 | const ARMSubtarget *Subtarget) { |
13470 | SDValue Op0 = N->getOperand(Num: 0); |
13471 | SDValue Op1 = N->getOperand(Num: 1); |
13472 | ISD::CondCode CC = cast<CondCodeSDNode>(Val: N->getOperand(Num: 2))->get(); |
13473 | EVT VT = N->getValueType(ResNo: 0); |
13474 | |
13475 | if (!Subtarget->hasMVEIntegerOps() || |
13476 | !DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT)) |
13477 | return SDValue(); |
13478 | |
13479 | if (CC == ISD::SETUGE) { |
13480 | std::swap(a&: Op0, b&: Op1); |
13481 | CC = ISD::SETULT; |
13482 | } |
13483 | |
13484 | if (CC != ISD::SETULT || VT.getScalarSizeInBits() != 1 || |
13485 | Op0.getOpcode() != ISD::BUILD_VECTOR) |
13486 | return SDValue(); |
13487 | |
13488 | // Check first operand is BuildVector of 0,1,2,... |
13489 | for (unsigned I = 0; I < VT.getVectorNumElements(); I++) { |
13490 | if (!Op0.getOperand(i: I).isUndef() && |
13491 | !(isa<ConstantSDNode>(Val: Op0.getOperand(i: I)) && |
13492 | Op0.getConstantOperandVal(i: I) == I)) |
13493 | return SDValue(); |
13494 | } |
13495 | |
13496 | // The second is a Splat of Op1S |
13497 | SDValue Op1S = DCI.DAG.getSplatValue(V: Op1); |
13498 | if (!Op1S) |
13499 | return SDValue(); |
13500 | |
13501 | unsigned Opc; |
13502 | switch (VT.getVectorNumElements()) { |
13503 | case 2: |
13504 | Opc = Intrinsic::arm_mve_vctp64; |
13505 | break; |
13506 | case 4: |
13507 | Opc = Intrinsic::arm_mve_vctp32; |
13508 | break; |
13509 | case 8: |
13510 | Opc = Intrinsic::arm_mve_vctp16; |
13511 | break; |
13512 | case 16: |
13513 | Opc = Intrinsic::arm_mve_vctp8; |
13514 | break; |
13515 | default: |
13516 | return SDValue(); |
13517 | } |
13518 | |
13519 | SDLoc DL(N); |
13520 | return DCI.DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL, VT, |
13521 | N1: DCI.DAG.getConstant(Val: Opc, DL, VT: MVT::i32), |
13522 | N2: DCI.DAG.getZExtOrTrunc(Op: Op1S, DL, VT: MVT::i32)); |
13523 | } |
13524 | |
13525 | /// PerformADDECombine - Target-specific dag combine transform from |
13526 | /// ARMISD::ADDC, ARMISD::ADDE, and ISD::MUL_LOHI to MLAL or |
13527 | /// ARMISD::ADDC, ARMISD::ADDE and ARMISD::UMLAL to ARMISD::UMAAL |
13528 | static SDValue PerformADDECombine(SDNode *N, |
13529 | TargetLowering::DAGCombinerInfo &DCI, |
13530 | const ARMSubtarget *Subtarget) { |
13531 | // Only ARM and Thumb2 support UMLAL/SMLAL. |
13532 | if (Subtarget->isThumb1Only()) |
13533 | return PerformAddeSubeCombine(N, DCI, Subtarget); |
13534 | |
13535 | // Only perform the checks after legalize when the pattern is available. |
13536 | if (DCI.isBeforeLegalize()) return SDValue(); |
13537 | |
13538 | return AddCombineTo64bitUMAAL(AddeNode: N, DCI, Subtarget); |
13539 | } |
13540 | |
13541 | /// PerformADDCombineWithOperands - Try DAG combinations for an ADD with |
13542 | /// operands N0 and N1. This is a helper for PerformADDCombine that is |
13543 | /// called with the default operands, and if that fails, with commuted |
13544 | /// operands. |
13545 | static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, |
13546 | TargetLowering::DAGCombinerInfo &DCI, |
13547 | const ARMSubtarget *Subtarget){ |
13548 | // Attempt to create vpadd for this add. |
13549 | if (SDValue Result = AddCombineToVPADD(N, N0, N1, DCI, Subtarget)) |
13550 | return Result; |
13551 | |
13552 | // Attempt to create vpaddl for this add. |
13553 | if (SDValue Result = AddCombineVUZPToVPADDL(N, N0, N1, DCI, Subtarget)) |
13554 | return Result; |
13555 | if (SDValue Result = AddCombineBUILD_VECTORToVPADDL(N, N0, N1, DCI, |
13556 | Subtarget)) |
13557 | return Result; |
13558 | |
13559 | // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) |
13560 | if (N0.getNode()->hasOneUse()) |
13561 | if (SDValue Result = combineSelectAndUse(N, Slct: N0, OtherOp: N1, DCI)) |
13562 | return Result; |
13563 | return SDValue(); |
13564 | } |
13565 | |
13566 | static SDValue TryDistrubutionADDVecReduce(SDNode *N, SelectionDAG &DAG) { |
13567 | EVT VT = N->getValueType(ResNo: 0); |
13568 | SDValue N0 = N->getOperand(Num: 0); |
13569 | SDValue N1 = N->getOperand(Num: 1); |
13570 | SDLoc dl(N); |
13571 | |
13572 | auto IsVecReduce = [](SDValue Op) { |
13573 | switch (Op.getOpcode()) { |
13574 | case ISD::VECREDUCE_ADD: |
13575 | case ARMISD::VADDVs: |
13576 | case ARMISD::VADDVu: |
13577 | case ARMISD::VMLAVs: |
13578 | case ARMISD::VMLAVu: |
13579 | return true; |
13580 | } |
13581 | return false; |
13582 | }; |
13583 | |
13584 | auto DistrubuteAddAddVecReduce = [&](SDValue N0, SDValue N1) { |
13585 | // Distribute add(X, add(vecreduce(Y), vecreduce(Z))) -> |
13586 | // add(add(X, vecreduce(Y)), vecreduce(Z)) |
13587 | // to make better use of vaddva style instructions. |
13588 | if (VT == MVT::i32 && N1.getOpcode() == ISD::ADD && !IsVecReduce(N0) && |
13589 | IsVecReduce(N1.getOperand(i: 0)) && IsVecReduce(N1.getOperand(i: 1)) && |
13590 | !isa<ConstantSDNode>(Val: N0) && N1->hasOneUse()) { |
13591 | SDValue Add0 = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: N0, N2: N1.getOperand(i: 0)); |
13592 | return DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: Add0, N2: N1.getOperand(i: 1)); |
13593 | } |
13594 | // And turn add(add(A, reduce(B)), add(C, reduce(D))) -> |
13595 | // add(add(add(A, C), reduce(B)), reduce(D)) |
13596 | if (VT == MVT::i32 && N0.getOpcode() == ISD::ADD && |
13597 | N1.getOpcode() == ISD::ADD && N0->hasOneUse() && N1->hasOneUse()) { |
13598 | unsigned N0RedOp = 0; |
13599 | if (!IsVecReduce(N0.getOperand(i: N0RedOp))) { |
13600 | N0RedOp = 1; |
13601 | if (!IsVecReduce(N0.getOperand(i: N0RedOp))) |
13602 | return SDValue(); |
13603 | } |
13604 | |
13605 | unsigned N1RedOp = 0; |
13606 | if (!IsVecReduce(N1.getOperand(i: N1RedOp))) |
13607 | N1RedOp = 1; |
13608 | if (!IsVecReduce(N1.getOperand(i: N1RedOp))) |
13609 | return SDValue(); |
13610 | |
13611 | SDValue Add0 = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: N0.getOperand(i: 1 - N0RedOp), |
13612 | N2: N1.getOperand(i: 1 - N1RedOp)); |
13613 | SDValue Add1 = |
13614 | DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: Add0, N2: N0.getOperand(i: N0RedOp)); |
13615 | return DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: Add1, N2: N1.getOperand(i: N1RedOp)); |
13616 | } |
13617 | return SDValue(); |
13618 | }; |
13619 | if (SDValue R = DistrubuteAddAddVecReduce(N0, N1)) |
13620 | return R; |
13621 | if (SDValue R = DistrubuteAddAddVecReduce(N1, N0)) |
13622 | return R; |
13623 | |
13624 | // Distribute add(vecreduce(load(Y)), vecreduce(load(Z))) |
13625 | // Or add(add(X, vecreduce(load(Y))), vecreduce(load(Z))) |
13626 | // by ascending load offsets. This can help cores prefetch if the order of |
13627 | // loads is more predictable. |
13628 | auto DistrubuteVecReduceLoad = [&](SDValue N0, SDValue N1, bool IsForward) { |
13629 | // Check if two reductions are known to load data where one is before/after |
13630 | // another. Return negative if N0 loads data before N1, positive if N1 is |
13631 | // before N0 and 0 otherwise if nothing is known. |
13632 | auto IsKnownOrderedLoad = [&](SDValue N0, SDValue N1) { |
13633 | // Look through to the first operand of a MUL, for the VMLA case. |
13634 | // Currently only looks at the first operand, in the hope they are equal. |
13635 | if (N0.getOpcode() == ISD::MUL) |
13636 | N0 = N0.getOperand(i: 0); |
13637 | if (N1.getOpcode() == ISD::MUL) |
13638 | N1 = N1.getOperand(i: 0); |
13639 | |
13640 | // Return true if the two operands are loads to the same object and the |
13641 | // offset of the first is known to be less than the offset of the second. |
13642 | LoadSDNode *Load0 = dyn_cast<LoadSDNode>(Val&: N0); |
13643 | LoadSDNode *Load1 = dyn_cast<LoadSDNode>(Val&: N1); |
13644 | if (!Load0 || !Load1 || Load0->getChain() != Load1->getChain() || |
13645 | !Load0->isSimple() || !Load1->isSimple() || Load0->isIndexed() || |
13646 | Load1->isIndexed()) |
13647 | return 0; |
13648 | |
13649 | auto BaseLocDecomp0 = BaseIndexOffset::match(N: Load0, DAG); |
13650 | auto BaseLocDecomp1 = BaseIndexOffset::match(N: Load1, DAG); |
13651 | |
13652 | if (!BaseLocDecomp0.getBase() || |
13653 | BaseLocDecomp0.getBase() != BaseLocDecomp1.getBase() || |
13654 | !BaseLocDecomp0.hasValidOffset() || !BaseLocDecomp1.hasValidOffset()) |
13655 | return 0; |
13656 | if (BaseLocDecomp0.getOffset() < BaseLocDecomp1.getOffset()) |
13657 | return -1; |
13658 | if (BaseLocDecomp0.getOffset() > BaseLocDecomp1.getOffset()) |
13659 | return 1; |
13660 | return 0; |
13661 | }; |
13662 | |
13663 | SDValue X; |
13664 | if (N0.getOpcode() == ISD::ADD && N0->hasOneUse()) { |
13665 | if (IsVecReduce(N0.getOperand(i: 0)) && IsVecReduce(N0.getOperand(i: 1))) { |
13666 | int IsBefore = IsKnownOrderedLoad(N0.getOperand(i: 0).getOperand(i: 0), |
13667 | N0.getOperand(i: 1).getOperand(i: 0)); |
13668 | if (IsBefore < 0) { |
13669 | X = N0.getOperand(i: 0); |
13670 | N0 = N0.getOperand(i: 1); |
13671 | } else if (IsBefore > 0) { |
13672 | X = N0.getOperand(i: 1); |
13673 | N0 = N0.getOperand(i: 0); |
13674 | } else |
13675 | return SDValue(); |
13676 | } else if (IsVecReduce(N0.getOperand(i: 0))) { |
13677 | X = N0.getOperand(i: 1); |
13678 | N0 = N0.getOperand(i: 0); |
13679 | } else if (IsVecReduce(N0.getOperand(i: 1))) { |
13680 | X = N0.getOperand(i: 0); |
13681 | N0 = N0.getOperand(i: 1); |
13682 | } else |
13683 | return SDValue(); |
13684 | } else if (IsForward && IsVecReduce(N0) && IsVecReduce(N1) && |
13685 | IsKnownOrderedLoad(N0.getOperand(i: 0), N1.getOperand(i: 0)) < 0) { |
13686 | // Note this is backward to how you would expect. We create |
13687 | // add(reduce(load + 16), reduce(load + 0)) so that the |
13688 | // add(reduce(load+16), X) is combined into VADDVA(X, load+16)), leaving |
13689 | // the X as VADDV(load + 0) |
13690 | return DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1, N2: N0); |
13691 | } else |
13692 | return SDValue(); |
13693 | |
13694 | if (!IsVecReduce(N0) || !IsVecReduce(N1)) |
13695 | return SDValue(); |
13696 | |
13697 | if (IsKnownOrderedLoad(N1.getOperand(i: 0), N0.getOperand(i: 0)) >= 0) |
13698 | return SDValue(); |
13699 | |
13700 | // Switch from add(add(X, N0), N1) to add(add(X, N1), N0) |
13701 | SDValue Add0 = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: X, N2: N1); |
13702 | return DAG.getNode(Opcode: ISD::ADD, DL: dl, VT, N1: Add0, N2: N0); |
13703 | }; |
13704 | if (SDValue R = DistrubuteVecReduceLoad(N0, N1, true)) |
13705 | return R; |
13706 | if (SDValue R = DistrubuteVecReduceLoad(N1, N0, false)) |
13707 | return R; |
13708 | return SDValue(); |
13709 | } |
13710 | |
13711 | static SDValue PerformADDVecReduce(SDNode *N, SelectionDAG &DAG, |
13712 | const ARMSubtarget *Subtarget) { |
13713 | if (!Subtarget->hasMVEIntegerOps()) |
13714 | return SDValue(); |
13715 | |
13716 | if (SDValue R = TryDistrubutionADDVecReduce(N, DAG)) |
13717 | return R; |
13718 | |
13719 | EVT VT = N->getValueType(ResNo: 0); |
13720 | SDValue N0 = N->getOperand(Num: 0); |
13721 | SDValue N1 = N->getOperand(Num: 1); |
13722 | SDLoc dl(N); |
13723 | |
13724 | if (VT != MVT::i64) |
13725 | return SDValue(); |
13726 | |
13727 | // We are looking for a i64 add of a VADDLVx. Due to these being i64's, this |
13728 | // will look like: |
13729 | // t1: i32,i32 = ARMISD::VADDLVs x |
13730 | // t2: i64 = build_pair t1, t1:1 |
13731 | // t3: i64 = add t2, y |
13732 | // Otherwise we try to push the add up above VADDLVAx, to potentially allow |
13733 | // the add to be simplified separately. |
13734 | // We also need to check for sext / zext and commutitive adds. |
13735 | auto MakeVecReduce = [&](unsigned Opcode, unsigned OpcodeA, SDValue NA, |
13736 | SDValue NB) { |
13737 | if (NB->getOpcode() != ISD::BUILD_PAIR) |
13738 | return SDValue(); |
13739 | SDValue VecRed = NB->getOperand(Num: 0); |
13740 | if ((VecRed->getOpcode() != Opcode && VecRed->getOpcode() != OpcodeA) || |
13741 | VecRed.getResNo() != 0 || |
13742 | NB->getOperand(Num: 1) != SDValue(VecRed.getNode(), 1)) |
13743 | return SDValue(); |
13744 | |
13745 | if (VecRed->getOpcode() == OpcodeA) { |
13746 | // add(NA, VADDLVA(Inp), Y) -> VADDLVA(add(NA, Inp), Y) |
13747 | SDValue Inp = DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, |
13748 | N1: VecRed.getOperand(i: 0), N2: VecRed.getOperand(i: 1)); |
13749 | NA = DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: MVT::i64, N1: Inp, N2: NA); |
13750 | } |
13751 | |
13752 | SmallVector<SDValue, 4> Ops(2); |
13753 | std::tie(args&: Ops[0], args&: Ops[1]) = DAG.SplitScalar(N: NA, DL: dl, LoVT: MVT::i32, HiVT: MVT::i32); |
13754 | |
13755 | unsigned S = VecRed->getOpcode() == OpcodeA ? 2 : 0; |
13756 | for (unsigned I = S, E = VecRed.getNumOperands(); I < E; I++) |
13757 | Ops.push_back(Elt: VecRed->getOperand(Num: I)); |
13758 | SDValue Red = |
13759 | DAG.getNode(Opcode: OpcodeA, DL: dl, VTList: DAG.getVTList(VTs: {MVT::i32, MVT::i32}), Ops); |
13760 | return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: Red, |
13761 | N2: SDValue(Red.getNode(), 1)); |
13762 | }; |
13763 | |
13764 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVs, ARMISD::VADDLVAs, N0, N1)) |
13765 | return M; |
13766 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVu, ARMISD::VADDLVAu, N0, N1)) |
13767 | return M; |
13768 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVs, ARMISD::VADDLVAs, N1, N0)) |
13769 | return M; |
13770 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVu, ARMISD::VADDLVAu, N1, N0)) |
13771 | return M; |
13772 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVps, ARMISD::VADDLVAps, N0, N1)) |
13773 | return M; |
13774 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVpu, ARMISD::VADDLVApu, N0, N1)) |
13775 | return M; |
13776 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVps, ARMISD::VADDLVAps, N1, N0)) |
13777 | return M; |
13778 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVpu, ARMISD::VADDLVApu, N1, N0)) |
13779 | return M; |
13780 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVs, ARMISD::VMLALVAs, N0, N1)) |
13781 | return M; |
13782 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVu, ARMISD::VMLALVAu, N0, N1)) |
13783 | return M; |
13784 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVs, ARMISD::VMLALVAs, N1, N0)) |
13785 | return M; |
13786 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVu, ARMISD::VMLALVAu, N1, N0)) |
13787 | return M; |
13788 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVps, ARMISD::VMLALVAps, N0, N1)) |
13789 | return M; |
13790 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVpu, ARMISD::VMLALVApu, N0, N1)) |
13791 | return M; |
13792 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVps, ARMISD::VMLALVAps, N1, N0)) |
13793 | return M; |
13794 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVpu, ARMISD::VMLALVApu, N1, N0)) |
13795 | return M; |
13796 | return SDValue(); |
13797 | } |
13798 | |
13799 | bool |
13800 | ARMTargetLowering::isDesirableToCommuteWithShift(const SDNode *N, |
13801 | CombineLevel Level) const { |
13802 | assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA || |
13803 | N->getOpcode() == ISD::SRL) && |
13804 | "Expected shift op" ); |
13805 | |
13806 | if (Level == BeforeLegalizeTypes) |
13807 | return true; |
13808 | |
13809 | if (N->getOpcode() != ISD::SHL) |
13810 | return true; |
13811 | |
13812 | if (Subtarget->isThumb1Only()) { |
13813 | // Avoid making expensive immediates by commuting shifts. (This logic |
13814 | // only applies to Thumb1 because ARM and Thumb2 immediates can be shifted |
13815 | // for free.) |
13816 | if (N->getOpcode() != ISD::SHL) |
13817 | return true; |
13818 | SDValue N1 = N->getOperand(Num: 0); |
13819 | if (N1->getOpcode() != ISD::ADD && N1->getOpcode() != ISD::AND && |
13820 | N1->getOpcode() != ISD::OR && N1->getOpcode() != ISD::XOR) |
13821 | return true; |
13822 | if (auto *Const = dyn_cast<ConstantSDNode>(Val: N1->getOperand(Num: 1))) { |
13823 | if (Const->getAPIntValue().ult(RHS: 256)) |
13824 | return false; |
13825 | if (N1->getOpcode() == ISD::ADD && Const->getAPIntValue().slt(RHS: 0) && |
13826 | Const->getAPIntValue().sgt(RHS: -256)) |
13827 | return false; |
13828 | } |
13829 | return true; |
13830 | } |
13831 | |
13832 | // Turn off commute-with-shift transform after legalization, so it doesn't |
13833 | // conflict with PerformSHLSimplify. (We could try to detect when |
13834 | // PerformSHLSimplify would trigger more precisely, but it isn't |
13835 | // really necessary.) |
13836 | return false; |
13837 | } |
13838 | |
13839 | bool ARMTargetLowering::isDesirableToCommuteXorWithShift( |
13840 | const SDNode *N) const { |
13841 | assert(N->getOpcode() == ISD::XOR && |
13842 | (N->getOperand(0).getOpcode() == ISD::SHL || |
13843 | N->getOperand(0).getOpcode() == ISD::SRL) && |
13844 | "Expected XOR(SHIFT) pattern" ); |
13845 | |
13846 | // Only commute if the entire NOT mask is a hidden shifted mask. |
13847 | auto *XorC = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 1)); |
13848 | auto *ShiftC = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 0).getOperand(i: 1)); |
13849 | if (XorC && ShiftC) { |
13850 | unsigned MaskIdx, MaskLen; |
13851 | if (XorC->getAPIntValue().isShiftedMask(MaskIdx, MaskLen)) { |
13852 | unsigned ShiftAmt = ShiftC->getZExtValue(); |
13853 | unsigned BitWidth = N->getValueType(ResNo: 0).getScalarSizeInBits(); |
13854 | if (N->getOperand(Num: 0).getOpcode() == ISD::SHL) |
13855 | return MaskIdx == ShiftAmt && MaskLen == (BitWidth - ShiftAmt); |
13856 | return MaskIdx == 0 && MaskLen == (BitWidth - ShiftAmt); |
13857 | } |
13858 | } |
13859 | |
13860 | return false; |
13861 | } |
13862 | |
13863 | bool ARMTargetLowering::shouldFoldConstantShiftPairToMask( |
13864 | const SDNode *N, CombineLevel Level) const { |
13865 | assert(((N->getOpcode() == ISD::SHL && |
13866 | N->getOperand(0).getOpcode() == ISD::SRL) || |
13867 | (N->getOpcode() == ISD::SRL && |
13868 | N->getOperand(0).getOpcode() == ISD::SHL)) && |
13869 | "Expected shift-shift mask" ); |
13870 | |
13871 | if (!Subtarget->isThumb1Only()) |
13872 | return true; |
13873 | |
13874 | if (Level == BeforeLegalizeTypes) |
13875 | return true; |
13876 | |
13877 | return false; |
13878 | } |
13879 | |
13880 | bool ARMTargetLowering::shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, |
13881 | EVT VT) const { |
13882 | return Subtarget->hasMVEIntegerOps() && isTypeLegal(VT); |
13883 | } |
13884 | |
13885 | bool ARMTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const { |
13886 | if (!Subtarget->hasNEON()) { |
13887 | if (Subtarget->isThumb1Only()) |
13888 | return VT.getScalarSizeInBits() <= 32; |
13889 | return true; |
13890 | } |
13891 | return VT.isScalarInteger(); |
13892 | } |
13893 | |
13894 | bool ARMTargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT, |
13895 | EVT VT) const { |
13896 | if (!isOperationLegalOrCustom(Op, VT) || !FPVT.isSimple()) |
13897 | return false; |
13898 | |
13899 | switch (FPVT.getSimpleVT().SimpleTy) { |
13900 | case MVT::f16: |
13901 | return Subtarget->hasVFP2Base(); |
13902 | case MVT::f32: |
13903 | return Subtarget->hasVFP2Base(); |
13904 | case MVT::f64: |
13905 | return Subtarget->hasFP64(); |
13906 | case MVT::v4f32: |
13907 | case MVT::v8f16: |
13908 | return Subtarget->hasMVEFloatOps(); |
13909 | default: |
13910 | return false; |
13911 | } |
13912 | } |
13913 | |
13914 | static SDValue PerformSHLSimplify(SDNode *N, |
13915 | TargetLowering::DAGCombinerInfo &DCI, |
13916 | const ARMSubtarget *ST) { |
13917 | // Allow the generic combiner to identify potential bswaps. |
13918 | if (DCI.isBeforeLegalize()) |
13919 | return SDValue(); |
13920 | |
13921 | // DAG combiner will fold: |
13922 | // (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2) |
13923 | // (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2 |
13924 | // Other code patterns that can be also be modified have the following form: |
13925 | // b + ((a << 1) | 510) |
13926 | // b + ((a << 1) & 510) |
13927 | // b + ((a << 1) ^ 510) |
13928 | // b + ((a << 1) + 510) |
13929 | |
13930 | // Many instructions can perform the shift for free, but it requires both |
13931 | // the operands to be registers. If c1 << c2 is too large, a mov immediate |
13932 | // instruction will needed. So, unfold back to the original pattern if: |
13933 | // - if c1 and c2 are small enough that they don't require mov imms. |
13934 | // - the user(s) of the node can perform an shl |
13935 | |
13936 | // No shifted operands for 16-bit instructions. |
13937 | if (ST->isThumb() && ST->isThumb1Only()) |
13938 | return SDValue(); |
13939 | |
13940 | // Check that all the users could perform the shl themselves. |
13941 | for (auto *U : N->uses()) { |
13942 | switch(U->getOpcode()) { |
13943 | default: |
13944 | return SDValue(); |
13945 | case ISD::SUB: |
13946 | case ISD::ADD: |
13947 | case ISD::AND: |
13948 | case ISD::OR: |
13949 | case ISD::XOR: |
13950 | case ISD::SETCC: |
13951 | case ARMISD::CMP: |
13952 | // Check that the user isn't already using a constant because there |
13953 | // aren't any instructions that support an immediate operand and a |
13954 | // shifted operand. |
13955 | if (isa<ConstantSDNode>(Val: U->getOperand(Num: 0)) || |
13956 | isa<ConstantSDNode>(Val: U->getOperand(Num: 1))) |
13957 | return SDValue(); |
13958 | |
13959 | // Check that it's not already using a shift. |
13960 | if (U->getOperand(Num: 0).getOpcode() == ISD::SHL || |
13961 | U->getOperand(Num: 1).getOpcode() == ISD::SHL) |
13962 | return SDValue(); |
13963 | break; |
13964 | } |
13965 | } |
13966 | |
13967 | if (N->getOpcode() != ISD::ADD && N->getOpcode() != ISD::OR && |
13968 | N->getOpcode() != ISD::XOR && N->getOpcode() != ISD::AND) |
13969 | return SDValue(); |
13970 | |
13971 | if (N->getOperand(Num: 0).getOpcode() != ISD::SHL) |
13972 | return SDValue(); |
13973 | |
13974 | SDValue SHL = N->getOperand(Num: 0); |
13975 | |
13976 | auto *C1ShlC2 = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 1)); |
13977 | auto *C2 = dyn_cast<ConstantSDNode>(Val: SHL.getOperand(i: 1)); |
13978 | if (!C1ShlC2 || !C2) |
13979 | return SDValue(); |
13980 | |
13981 | APInt C2Int = C2->getAPIntValue(); |
13982 | APInt C1Int = C1ShlC2->getAPIntValue(); |
13983 | unsigned C2Width = C2Int.getBitWidth(); |
13984 | if (C2Int.uge(RHS: C2Width)) |
13985 | return SDValue(); |
13986 | uint64_t C2Value = C2Int.getZExtValue(); |
13987 | |
13988 | // Check that performing a lshr will not lose any information. |
13989 | APInt Mask = APInt::getHighBitsSet(numBits: C2Width, hiBitsSet: C2Width - C2Value); |
13990 | if ((C1Int & Mask) != C1Int) |
13991 | return SDValue(); |
13992 | |
13993 | // Shift the first constant. |
13994 | C1Int.lshrInPlace(ShiftAmt: C2Int); |
13995 | |
13996 | // The immediates are encoded as an 8-bit value that can be rotated. |
13997 | auto LargeImm = [](const APInt &Imm) { |
13998 | unsigned Zeros = Imm.countl_zero() + Imm.countr_zero(); |
13999 | return Imm.getBitWidth() - Zeros > 8; |
14000 | }; |
14001 | |
14002 | if (LargeImm(C1Int) || LargeImm(C2Int)) |
14003 | return SDValue(); |
14004 | |
14005 | SelectionDAG &DAG = DCI.DAG; |
14006 | SDLoc dl(N); |
14007 | SDValue X = SHL.getOperand(i: 0); |
14008 | SDValue BinOp = DAG.getNode(Opcode: N->getOpcode(), DL: dl, VT: MVT::i32, N1: X, |
14009 | N2: DAG.getConstant(Val: C1Int, DL: dl, VT: MVT::i32)); |
14010 | // Shift left to compensate for the lshr of C1Int. |
14011 | SDValue Res = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT: MVT::i32, N1: BinOp, N2: SHL.getOperand(i: 1)); |
14012 | |
14013 | LLVM_DEBUG(dbgs() << "Simplify shl use:\n" ; SHL.getOperand(0).dump(); |
14014 | SHL.dump(); N->dump()); |
14015 | LLVM_DEBUG(dbgs() << "Into:\n" ; X.dump(); BinOp.dump(); Res.dump()); |
14016 | return Res; |
14017 | } |
14018 | |
14019 | |
14020 | /// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD. |
14021 | /// |
14022 | static SDValue PerformADDCombine(SDNode *N, |
14023 | TargetLowering::DAGCombinerInfo &DCI, |
14024 | const ARMSubtarget *Subtarget) { |
14025 | SDValue N0 = N->getOperand(Num: 0); |
14026 | SDValue N1 = N->getOperand(Num: 1); |
14027 | |
14028 | // Only works one way, because it needs an immediate operand. |
14029 | if (SDValue Result = PerformSHLSimplify(N, DCI, ST: Subtarget)) |
14030 | return Result; |
14031 | |
14032 | if (SDValue Result = PerformADDVecReduce(N, DAG&: DCI.DAG, Subtarget)) |
14033 | return Result; |
14034 | |
14035 | // First try with the default operand order. |
14036 | if (SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget)) |
14037 | return Result; |
14038 | |
14039 | // If that didn't work, try again with the operands commuted. |
14040 | return PerformADDCombineWithOperands(N, N0: N1, N1: N0, DCI, Subtarget); |
14041 | } |
14042 | |
14043 | // Combine (sub 0, (csinc X, Y, CC)) -> (csinv -X, Y, CC) |
14044 | // providing -X is as cheap as X (currently, just a constant). |
14045 | static SDValue PerformSubCSINCCombine(SDNode *N, SelectionDAG &DAG) { |
14046 | if (N->getValueType(ResNo: 0) != MVT::i32 || !isNullConstant(V: N->getOperand(Num: 0))) |
14047 | return SDValue(); |
14048 | SDValue CSINC = N->getOperand(Num: 1); |
14049 | if (CSINC.getOpcode() != ARMISD::CSINC || !CSINC.hasOneUse()) |
14050 | return SDValue(); |
14051 | |
14052 | ConstantSDNode *X = dyn_cast<ConstantSDNode>(Val: CSINC.getOperand(i: 0)); |
14053 | if (!X) |
14054 | return SDValue(); |
14055 | |
14056 | return DAG.getNode(Opcode: ARMISD::CSINV, DL: SDLoc(N), VT: MVT::i32, |
14057 | N1: DAG.getNode(Opcode: ISD::SUB, DL: SDLoc(N), VT: MVT::i32, N1: N->getOperand(Num: 0), |
14058 | N2: CSINC.getOperand(i: 0)), |
14059 | N2: CSINC.getOperand(i: 1), N3: CSINC.getOperand(i: 2), |
14060 | N4: CSINC.getOperand(i: 3)); |
14061 | } |
14062 | |
14063 | /// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB. |
14064 | /// |
14065 | static SDValue PerformSUBCombine(SDNode *N, |
14066 | TargetLowering::DAGCombinerInfo &DCI, |
14067 | const ARMSubtarget *Subtarget) { |
14068 | SDValue N0 = N->getOperand(Num: 0); |
14069 | SDValue N1 = N->getOperand(Num: 1); |
14070 | |
14071 | // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) |
14072 | if (N1.getNode()->hasOneUse()) |
14073 | if (SDValue Result = combineSelectAndUse(N, Slct: N1, OtherOp: N0, DCI)) |
14074 | return Result; |
14075 | |
14076 | if (SDValue R = PerformSubCSINCCombine(N, DAG&: DCI.DAG)) |
14077 | return R; |
14078 | |
14079 | if (!Subtarget->hasMVEIntegerOps() || !N->getValueType(ResNo: 0).isVector()) |
14080 | return SDValue(); |
14081 | |
14082 | // Fold (sub (ARMvmovImm 0), (ARMvdup x)) -> (ARMvdup (sub 0, x)) |
14083 | // so that we can readily pattern match more mve instructions which can use |
14084 | // a scalar operand. |
14085 | SDValue VDup = N->getOperand(Num: 1); |
14086 | if (VDup->getOpcode() != ARMISD::VDUP) |
14087 | return SDValue(); |
14088 | |
14089 | SDValue VMov = N->getOperand(Num: 0); |
14090 | if (VMov->getOpcode() == ISD::BITCAST) |
14091 | VMov = VMov->getOperand(Num: 0); |
14092 | |
14093 | if (VMov->getOpcode() != ARMISD::VMOVIMM || !isZeroVector(N: VMov)) |
14094 | return SDValue(); |
14095 | |
14096 | SDLoc dl(N); |
14097 | SDValue Negate = DCI.DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: MVT::i32, |
14098 | N1: DCI.DAG.getConstant(Val: 0, DL: dl, VT: MVT::i32), |
14099 | N2: VDup->getOperand(Num: 0)); |
14100 | return DCI.DAG.getNode(Opcode: ARMISD::VDUP, DL: dl, VT: N->getValueType(ResNo: 0), Operand: Negate); |
14101 | } |
14102 | |
14103 | /// PerformVMULCombine |
14104 | /// Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the |
14105 | /// special multiplier accumulator forwarding. |
14106 | /// vmul d3, d0, d2 |
14107 | /// vmla d3, d1, d2 |
14108 | /// is faster than |
14109 | /// vadd d3, d0, d1 |
14110 | /// vmul d3, d3, d2 |
14111 | // However, for (A + B) * (A + B), |
14112 | // vadd d2, d0, d1 |
14113 | // vmul d3, d0, d2 |
14114 | // vmla d3, d1, d2 |
14115 | // is slower than |
14116 | // vadd d2, d0, d1 |
14117 | // vmul d3, d2, d2 |
14118 | static SDValue PerformVMULCombine(SDNode *N, |
14119 | TargetLowering::DAGCombinerInfo &DCI, |
14120 | const ARMSubtarget *Subtarget) { |
14121 | if (!Subtarget->hasVMLxForwarding()) |
14122 | return SDValue(); |
14123 | |
14124 | SelectionDAG &DAG = DCI.DAG; |
14125 | SDValue N0 = N->getOperand(Num: 0); |
14126 | SDValue N1 = N->getOperand(Num: 1); |
14127 | unsigned Opcode = N0.getOpcode(); |
14128 | if (Opcode != ISD::ADD && Opcode != ISD::SUB && |
14129 | Opcode != ISD::FADD && Opcode != ISD::FSUB) { |
14130 | Opcode = N1.getOpcode(); |
14131 | if (Opcode != ISD::ADD && Opcode != ISD::SUB && |
14132 | Opcode != ISD::FADD && Opcode != ISD::FSUB) |
14133 | return SDValue(); |
14134 | std::swap(a&: N0, b&: N1); |
14135 | } |
14136 | |
14137 | if (N0 == N1) |
14138 | return SDValue(); |
14139 | |
14140 | EVT VT = N->getValueType(ResNo: 0); |
14141 | SDLoc DL(N); |
14142 | SDValue N00 = N0->getOperand(Num: 0); |
14143 | SDValue N01 = N0->getOperand(Num: 1); |
14144 | return DAG.getNode(Opcode, DL, VT, |
14145 | N1: DAG.getNode(Opcode: ISD::MUL, DL, VT, N1: N00, N2: N1), |
14146 | N2: DAG.getNode(Opcode: ISD::MUL, DL, VT, N1: N01, N2: N1)); |
14147 | } |
14148 | |
14149 | static SDValue PerformMVEVMULLCombine(SDNode *N, SelectionDAG &DAG, |
14150 | const ARMSubtarget *Subtarget) { |
14151 | EVT VT = N->getValueType(ResNo: 0); |
14152 | if (VT != MVT::v2i64) |
14153 | return SDValue(); |
14154 | |
14155 | SDValue N0 = N->getOperand(Num: 0); |
14156 | SDValue N1 = N->getOperand(Num: 1); |
14157 | |
14158 | auto IsSignExt = [&](SDValue Op) { |
14159 | if (Op->getOpcode() != ISD::SIGN_EXTEND_INREG) |
14160 | return SDValue(); |
14161 | EVT VT = cast<VTSDNode>(Val: Op->getOperand(Num: 1))->getVT(); |
14162 | if (VT.getScalarSizeInBits() == 32) |
14163 | return Op->getOperand(Num: 0); |
14164 | return SDValue(); |
14165 | }; |
14166 | auto IsZeroExt = [&](SDValue Op) { |
14167 | // Zero extends are a little more awkward. At the point we are matching |
14168 | // this, we are looking for an AND with a (-1, 0, -1, 0) buildvector mask. |
14169 | // That might be before of after a bitcast depending on how the and is |
14170 | // placed. Because this has to look through bitcasts, it is currently only |
14171 | // supported on LE. |
14172 | if (!Subtarget->isLittle()) |
14173 | return SDValue(); |
14174 | |
14175 | SDValue And = Op; |
14176 | if (And->getOpcode() == ISD::BITCAST) |
14177 | And = And->getOperand(Num: 0); |
14178 | if (And->getOpcode() != ISD::AND) |
14179 | return SDValue(); |
14180 | SDValue Mask = And->getOperand(Num: 1); |
14181 | if (Mask->getOpcode() == ISD::BITCAST) |
14182 | Mask = Mask->getOperand(Num: 0); |
14183 | |
14184 | if (Mask->getOpcode() != ISD::BUILD_VECTOR || |
14185 | Mask.getValueType() != MVT::v4i32) |
14186 | return SDValue(); |
14187 | if (isAllOnesConstant(V: Mask->getOperand(Num: 0)) && |
14188 | isNullConstant(V: Mask->getOperand(Num: 1)) && |
14189 | isAllOnesConstant(V: Mask->getOperand(Num: 2)) && |
14190 | isNullConstant(V: Mask->getOperand(Num: 3))) |
14191 | return And->getOperand(Num: 0); |
14192 | return SDValue(); |
14193 | }; |
14194 | |
14195 | SDLoc dl(N); |
14196 | if (SDValue Op0 = IsSignExt(N0)) { |
14197 | if (SDValue Op1 = IsSignExt(N1)) { |
14198 | SDValue New0a = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: MVT::v4i32, Operand: Op0); |
14199 | SDValue New1a = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: MVT::v4i32, Operand: Op1); |
14200 | return DAG.getNode(Opcode: ARMISD::VMULLs, DL: dl, VT, N1: New0a, N2: New1a); |
14201 | } |
14202 | } |
14203 | if (SDValue Op0 = IsZeroExt(N0)) { |
14204 | if (SDValue Op1 = IsZeroExt(N1)) { |
14205 | SDValue New0a = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: MVT::v4i32, Operand: Op0); |
14206 | SDValue New1a = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: MVT::v4i32, Operand: Op1); |
14207 | return DAG.getNode(Opcode: ARMISD::VMULLu, DL: dl, VT, N1: New0a, N2: New1a); |
14208 | } |
14209 | } |
14210 | |
14211 | return SDValue(); |
14212 | } |
14213 | |
14214 | static SDValue PerformMULCombine(SDNode *N, |
14215 | TargetLowering::DAGCombinerInfo &DCI, |
14216 | const ARMSubtarget *Subtarget) { |
14217 | SelectionDAG &DAG = DCI.DAG; |
14218 | |
14219 | EVT VT = N->getValueType(ResNo: 0); |
14220 | if (Subtarget->hasMVEIntegerOps() && VT == MVT::v2i64) |
14221 | return PerformMVEVMULLCombine(N, DAG, Subtarget); |
14222 | |
14223 | if (Subtarget->isThumb1Only()) |
14224 | return SDValue(); |
14225 | |
14226 | if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) |
14227 | return SDValue(); |
14228 | |
14229 | if (VT.is64BitVector() || VT.is128BitVector()) |
14230 | return PerformVMULCombine(N, DCI, Subtarget); |
14231 | if (VT != MVT::i32) |
14232 | return SDValue(); |
14233 | |
14234 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 1)); |
14235 | if (!C) |
14236 | return SDValue(); |
14237 | |
14238 | int64_t MulAmt = C->getSExtValue(); |
14239 | unsigned ShiftAmt = llvm::countr_zero<uint64_t>(Val: MulAmt); |
14240 | |
14241 | ShiftAmt = ShiftAmt & (32 - 1); |
14242 | SDValue V = N->getOperand(Num: 0); |
14243 | SDLoc DL(N); |
14244 | |
14245 | SDValue Res; |
14246 | MulAmt >>= ShiftAmt; |
14247 | |
14248 | if (MulAmt >= 0) { |
14249 | if (llvm::has_single_bit<uint32_t>(Value: MulAmt - 1)) { |
14250 | // (mul x, 2^N + 1) => (add (shl x, N), x) |
14251 | Res = DAG.getNode(Opcode: ISD::ADD, DL, VT, |
14252 | N1: V, |
14253 | N2: DAG.getNode(Opcode: ISD::SHL, DL, VT, |
14254 | N1: V, |
14255 | N2: DAG.getConstant(Val: Log2_32(Value: MulAmt - 1), DL, |
14256 | VT: MVT::i32))); |
14257 | } else if (llvm::has_single_bit<uint32_t>(Value: MulAmt + 1)) { |
14258 | // (mul x, 2^N - 1) => (sub (shl x, N), x) |
14259 | Res = DAG.getNode(Opcode: ISD::SUB, DL, VT, |
14260 | N1: DAG.getNode(Opcode: ISD::SHL, DL, VT, |
14261 | N1: V, |
14262 | N2: DAG.getConstant(Val: Log2_32(Value: MulAmt + 1), DL, |
14263 | VT: MVT::i32)), |
14264 | N2: V); |
14265 | } else |
14266 | return SDValue(); |
14267 | } else { |
14268 | uint64_t MulAmtAbs = -MulAmt; |
14269 | if (llvm::has_single_bit<uint32_t>(Value: MulAmtAbs + 1)) { |
14270 | // (mul x, -(2^N - 1)) => (sub x, (shl x, N)) |
14271 | Res = DAG.getNode(Opcode: ISD::SUB, DL, VT, |
14272 | N1: V, |
14273 | N2: DAG.getNode(Opcode: ISD::SHL, DL, VT, |
14274 | N1: V, |
14275 | N2: DAG.getConstant(Val: Log2_32(Value: MulAmtAbs + 1), DL, |
14276 | VT: MVT::i32))); |
14277 | } else if (llvm::has_single_bit<uint32_t>(Value: MulAmtAbs - 1)) { |
14278 | // (mul x, -(2^N + 1)) => - (add (shl x, N), x) |
14279 | Res = DAG.getNode(Opcode: ISD::ADD, DL, VT, |
14280 | N1: V, |
14281 | N2: DAG.getNode(Opcode: ISD::SHL, DL, VT, |
14282 | N1: V, |
14283 | N2: DAG.getConstant(Val: Log2_32(Value: MulAmtAbs - 1), DL, |
14284 | VT: MVT::i32))); |
14285 | Res = DAG.getNode(Opcode: ISD::SUB, DL, VT, |
14286 | N1: DAG.getConstant(Val: 0, DL, VT: MVT::i32), N2: Res); |
14287 | } else |
14288 | return SDValue(); |
14289 | } |
14290 | |
14291 | if (ShiftAmt != 0) |
14292 | Res = DAG.getNode(Opcode: ISD::SHL, DL, VT, |
14293 | N1: Res, N2: DAG.getConstant(Val: ShiftAmt, DL, VT: MVT::i32)); |
14294 | |
14295 | // Do not add new nodes to DAG combiner worklist. |
14296 | DCI.CombineTo(N, Res, AddTo: false); |
14297 | return SDValue(); |
14298 | } |
14299 | |
14300 | static SDValue CombineANDShift(SDNode *N, |
14301 | TargetLowering::DAGCombinerInfo &DCI, |
14302 | const ARMSubtarget *Subtarget) { |
14303 | // Allow DAGCombine to pattern-match before we touch the canonical form. |
14304 | if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) |
14305 | return SDValue(); |
14306 | |
14307 | if (N->getValueType(ResNo: 0) != MVT::i32) |
14308 | return SDValue(); |
14309 | |
14310 | ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 1)); |
14311 | if (!N1C) |
14312 | return SDValue(); |
14313 | |
14314 | uint32_t C1 = (uint32_t)N1C->getZExtValue(); |
14315 | // Don't transform uxtb/uxth. |
14316 | if (C1 == 255 || C1 == 65535) |
14317 | return SDValue(); |
14318 | |
14319 | SDNode *N0 = N->getOperand(Num: 0).getNode(); |
14320 | if (!N0->hasOneUse()) |
14321 | return SDValue(); |
14322 | |
14323 | if (N0->getOpcode() != ISD::SHL && N0->getOpcode() != ISD::SRL) |
14324 | return SDValue(); |
14325 | |
14326 | bool LeftShift = N0->getOpcode() == ISD::SHL; |
14327 | |
14328 | ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(Val: N0->getOperand(Num: 1)); |
14329 | if (!N01C) |
14330 | return SDValue(); |
14331 | |
14332 | uint32_t C2 = (uint32_t)N01C->getZExtValue(); |
14333 | if (!C2 || C2 >= 32) |
14334 | return SDValue(); |
14335 | |
14336 | // Clear irrelevant bits in the mask. |
14337 | if (LeftShift) |
14338 | C1 &= (-1U << C2); |
14339 | else |
14340 | C1 &= (-1U >> C2); |
14341 | |
14342 | SelectionDAG &DAG = DCI.DAG; |
14343 | SDLoc DL(N); |
14344 | |
14345 | // We have a pattern of the form "(and (shl x, c2) c1)" or |
14346 | // "(and (srl x, c2) c1)", where c1 is a shifted mask. Try to |
14347 | // transform to a pair of shifts, to save materializing c1. |
14348 | |
14349 | // First pattern: right shift, then mask off leading bits. |
14350 | // FIXME: Use demanded bits? |
14351 | if (!LeftShift && isMask_32(Value: C1)) { |
14352 | uint32_t C3 = llvm::countl_zero(Val: C1); |
14353 | if (C2 < C3) { |
14354 | SDValue SHL = DAG.getNode(Opcode: ISD::SHL, DL, VT: MVT::i32, N1: N0->getOperand(Num: 0), |
14355 | N2: DAG.getConstant(Val: C3 - C2, DL, VT: MVT::i32)); |
14356 | return DAG.getNode(Opcode: ISD::SRL, DL, VT: MVT::i32, N1: SHL, |
14357 | N2: DAG.getConstant(Val: C3, DL, VT: MVT::i32)); |
14358 | } |
14359 | } |
14360 | |
14361 | // First pattern, reversed: left shift, then mask off trailing bits. |
14362 | if (LeftShift && isMask_32(Value: ~C1)) { |
14363 | uint32_t C3 = llvm::countr_zero(Val: C1); |
14364 | if (C2 < C3) { |
14365 | SDValue SHL = DAG.getNode(Opcode: ISD::SRL, DL, VT: MVT::i32, N1: N0->getOperand(Num: 0), |
14366 | N2: DAG.getConstant(Val: C3 - C2, DL, VT: MVT::i32)); |
14367 | return DAG.getNode(Opcode: ISD::SHL, DL, VT: MVT::i32, N1: SHL, |
14368 | N2: DAG.getConstant(Val: C3, DL, VT: MVT::i32)); |
14369 | } |
14370 | } |
14371 | |
14372 | // Second pattern: left shift, then mask off leading bits. |
14373 | // FIXME: Use demanded bits? |
14374 | if (LeftShift && isShiftedMask_32(Value: C1)) { |
14375 | uint32_t Trailing = llvm::countr_zero(Val: C1); |
14376 | uint32_t C3 = llvm::countl_zero(Val: C1); |
14377 | if (Trailing == C2 && C2 + C3 < 32) { |
14378 | SDValue SHL = DAG.getNode(Opcode: ISD::SHL, DL, VT: MVT::i32, N1: N0->getOperand(Num: 0), |
14379 | N2: DAG.getConstant(Val: C2 + C3, DL, VT: MVT::i32)); |
14380 | return DAG.getNode(Opcode: ISD::SRL, DL, VT: MVT::i32, N1: SHL, |
14381 | N2: DAG.getConstant(Val: C3, DL, VT: MVT::i32)); |
14382 | } |
14383 | } |
14384 | |
14385 | // Second pattern, reversed: right shift, then mask off trailing bits. |
14386 | // FIXME: Handle other patterns of known/demanded bits. |
14387 | if (!LeftShift && isShiftedMask_32(Value: C1)) { |
14388 | uint32_t Leading = llvm::countl_zero(Val: C1); |
14389 | uint32_t C3 = llvm::countr_zero(Val: C1); |
14390 | if (Leading == C2 && C2 + C3 < 32) { |
14391 | SDValue SHL = DAG.getNode(Opcode: ISD::SRL, DL, VT: MVT::i32, N1: N0->getOperand(Num: 0), |
14392 | N2: DAG.getConstant(Val: C2 + C3, DL, VT: MVT::i32)); |
14393 | return DAG.getNode(Opcode: ISD::SHL, DL, VT: MVT::i32, N1: SHL, |
14394 | N2: DAG.getConstant(Val: C3, DL, VT: MVT::i32)); |
14395 | } |
14396 | } |
14397 | |
14398 | // Transform "(and (shl x, c2) c1)" into "(shl (and x, c1>>c2), c2)" |
14399 | // if "c1 >> c2" is a cheaper immediate than "c1" |
14400 | if (LeftShift && |
14401 | HasLowerConstantMaterializationCost(Val1: C1 >> C2, Val2: C1, Subtarget)) { |
14402 | |
14403 | SDValue And = DAG.getNode(Opcode: ISD::AND, DL, VT: MVT::i32, N1: N0->getOperand(Num: 0), |
14404 | N2: DAG.getConstant(Val: C1 >> C2, DL, VT: MVT::i32)); |
14405 | return DAG.getNode(Opcode: ISD::SHL, DL, VT: MVT::i32, N1: And, |
14406 | N2: DAG.getConstant(Val: C2, DL, VT: MVT::i32)); |
14407 | } |
14408 | |
14409 | return SDValue(); |
14410 | } |
14411 | |
14412 | static SDValue PerformANDCombine(SDNode *N, |
14413 | TargetLowering::DAGCombinerInfo &DCI, |
14414 | const ARMSubtarget *Subtarget) { |
14415 | // Attempt to use immediate-form VBIC |
14416 | BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Val: N->getOperand(Num: 1)); |
14417 | SDLoc dl(N); |
14418 | EVT VT = N->getValueType(ResNo: 0); |
14419 | SelectionDAG &DAG = DCI.DAG; |
14420 | |
14421 | if (!DAG.getTargetLoweringInfo().isTypeLegal(VT) || VT == MVT::v2i1 || |
14422 | VT == MVT::v4i1 || VT == MVT::v8i1 || VT == MVT::v16i1) |
14423 | return SDValue(); |
14424 | |
14425 | APInt SplatBits, SplatUndef; |
14426 | unsigned SplatBitSize; |
14427 | bool HasAnyUndefs; |
14428 | if (BVN && (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) && |
14429 | BVN->isConstantSplat(SplatValue&: SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { |
14430 | if (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32 || |
14431 | SplatBitSize == 64) { |
14432 | EVT VbicVT; |
14433 | SDValue Val = isVMOVModifiedImm(SplatBits: (~SplatBits).getZExtValue(), |
14434 | SplatUndef: SplatUndef.getZExtValue(), SplatBitSize, |
14435 | DAG, dl, VT&: VbicVT, VectorVT: VT, type: OtherModImm); |
14436 | if (Val.getNode()) { |
14437 | SDValue Input = |
14438 | DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VbicVT, Operand: N->getOperand(Num: 0)); |
14439 | SDValue Vbic = DAG.getNode(Opcode: ARMISD::VBICIMM, DL: dl, VT: VbicVT, N1: Input, N2: Val); |
14440 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: Vbic); |
14441 | } |
14442 | } |
14443 | } |
14444 | |
14445 | if (!Subtarget->isThumb1Only()) { |
14446 | // fold (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) |
14447 | if (SDValue Result = combineSelectAndUseCommutative(N, AllOnes: true, DCI)) |
14448 | return Result; |
14449 | |
14450 | if (SDValue Result = PerformSHLSimplify(N, DCI, ST: Subtarget)) |
14451 | return Result; |
14452 | } |
14453 | |
14454 | if (Subtarget->isThumb1Only()) |
14455 | if (SDValue Result = CombineANDShift(N, DCI, Subtarget)) |
14456 | return Result; |
14457 | |
14458 | return SDValue(); |
14459 | } |
14460 | |
14461 | // Try combining OR nodes to SMULWB, SMULWT. |
14462 | static SDValue PerformORCombineToSMULWBT(SDNode *OR, |
14463 | TargetLowering::DAGCombinerInfo &DCI, |
14464 | const ARMSubtarget *Subtarget) { |
14465 | if (!Subtarget->hasV6Ops() || |
14466 | (Subtarget->isThumb() && |
14467 | (!Subtarget->hasThumb2() || !Subtarget->hasDSP()))) |
14468 | return SDValue(); |
14469 | |
14470 | SDValue SRL = OR->getOperand(Num: 0); |
14471 | SDValue SHL = OR->getOperand(Num: 1); |
14472 | |
14473 | if (SRL.getOpcode() != ISD::SRL || SHL.getOpcode() != ISD::SHL) { |
14474 | SRL = OR->getOperand(Num: 1); |
14475 | SHL = OR->getOperand(Num: 0); |
14476 | } |
14477 | if (!isSRL16(Op: SRL) || !isSHL16(Op: SHL)) |
14478 | return SDValue(); |
14479 | |
14480 | // The first operands to the shifts need to be the two results from the |
14481 | // same smul_lohi node. |
14482 | if ((SRL.getOperand(i: 0).getNode() != SHL.getOperand(i: 0).getNode()) || |
14483 | SRL.getOperand(i: 0).getOpcode() != ISD::SMUL_LOHI) |
14484 | return SDValue(); |
14485 | |
14486 | SDNode *SMULLOHI = SRL.getOperand(i: 0).getNode(); |
14487 | if (SRL.getOperand(i: 0) != SDValue(SMULLOHI, 0) || |
14488 | SHL.getOperand(i: 0) != SDValue(SMULLOHI, 1)) |
14489 | return SDValue(); |
14490 | |
14491 | // Now we have: |
14492 | // (or (srl (smul_lohi ?, ?), 16), (shl (smul_lohi ?, ?), 16))) |
14493 | // For SMUL[B|T] smul_lohi will take a 32-bit and a 16-bit arguments. |
14494 | // For SMUWB the 16-bit value will signed extended somehow. |
14495 | // For SMULWT only the SRA is required. |
14496 | // Check both sides of SMUL_LOHI |
14497 | SDValue OpS16 = SMULLOHI->getOperand(Num: 0); |
14498 | SDValue OpS32 = SMULLOHI->getOperand(Num: 1); |
14499 | |
14500 | SelectionDAG &DAG = DCI.DAG; |
14501 | if (!isS16(Op: OpS16, DAG) && !isSRA16(Op: OpS16)) { |
14502 | OpS16 = OpS32; |
14503 | OpS32 = SMULLOHI->getOperand(Num: 0); |
14504 | } |
14505 | |
14506 | SDLoc dl(OR); |
14507 | unsigned Opcode = 0; |
14508 | if (isS16(Op: OpS16, DAG)) |
14509 | Opcode = ARMISD::SMULWB; |
14510 | else if (isSRA16(Op: OpS16)) { |
14511 | Opcode = ARMISD::SMULWT; |
14512 | OpS16 = OpS16->getOperand(Num: 0); |
14513 | } |
14514 | else |
14515 | return SDValue(); |
14516 | |
14517 | SDValue Res = DAG.getNode(Opcode, DL: dl, VT: MVT::i32, N1: OpS32, N2: OpS16); |
14518 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(OR, 0), To: Res); |
14519 | return SDValue(OR, 0); |
14520 | } |
14521 | |
14522 | static SDValue PerformORCombineToBFI(SDNode *N, |
14523 | TargetLowering::DAGCombinerInfo &DCI, |
14524 | const ARMSubtarget *Subtarget) { |
14525 | // BFI is only available on V6T2+ |
14526 | if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops()) |
14527 | return SDValue(); |
14528 | |
14529 | EVT VT = N->getValueType(ResNo: 0); |
14530 | SDValue N0 = N->getOperand(Num: 0); |
14531 | SDValue N1 = N->getOperand(Num: 1); |
14532 | SelectionDAG &DAG = DCI.DAG; |
14533 | SDLoc DL(N); |
14534 | // 1) or (and A, mask), val => ARMbfi A, val, mask |
14535 | // iff (val & mask) == val |
14536 | // |
14537 | // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask |
14538 | // 2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2) |
14539 | // && mask == ~mask2 |
14540 | // 2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2) |
14541 | // && ~mask == mask2 |
14542 | // (i.e., copy a bitfield value into another bitfield of the same width) |
14543 | |
14544 | if (VT != MVT::i32) |
14545 | return SDValue(); |
14546 | |
14547 | SDValue N00 = N0.getOperand(i: 0); |
14548 | |
14549 | // The value and the mask need to be constants so we can verify this is |
14550 | // actually a bitfield set. If the mask is 0xffff, we can do better |
14551 | // via a movt instruction, so don't use BFI in that case. |
14552 | SDValue MaskOp = N0.getOperand(i: 1); |
14553 | ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Val&: MaskOp); |
14554 | if (!MaskC) |
14555 | return SDValue(); |
14556 | unsigned Mask = MaskC->getZExtValue(); |
14557 | if (Mask == 0xffff) |
14558 | return SDValue(); |
14559 | SDValue Res; |
14560 | // Case (1): or (and A, mask), val => ARMbfi A, val, mask |
14561 | ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Val&: N1); |
14562 | if (N1C) { |
14563 | unsigned Val = N1C->getZExtValue(); |
14564 | if ((Val & ~Mask) != Val) |
14565 | return SDValue(); |
14566 | |
14567 | if (ARM::isBitFieldInvertedMask(v: Mask)) { |
14568 | Val >>= llvm::countr_zero(Val: ~Mask); |
14569 | |
14570 | Res = DAG.getNode(Opcode: ARMISD::BFI, DL, VT, N1: N00, |
14571 | N2: DAG.getConstant(Val, DL, VT: MVT::i32), |
14572 | N3: DAG.getConstant(Val: Mask, DL, VT: MVT::i32)); |
14573 | |
14574 | DCI.CombineTo(N, Res, AddTo: false); |
14575 | // Return value from the original node to inform the combiner than N is |
14576 | // now dead. |
14577 | return SDValue(N, 0); |
14578 | } |
14579 | } else if (N1.getOpcode() == ISD::AND) { |
14580 | // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask |
14581 | ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(Val: N1.getOperand(i: 1)); |
14582 | if (!N11C) |
14583 | return SDValue(); |
14584 | unsigned Mask2 = N11C->getZExtValue(); |
14585 | |
14586 | // Mask and ~Mask2 (or reverse) must be equivalent for the BFI pattern |
14587 | // as is to match. |
14588 | if (ARM::isBitFieldInvertedMask(v: Mask) && |
14589 | (Mask == ~Mask2)) { |
14590 | // The pack halfword instruction works better for masks that fit it, |
14591 | // so use that when it's available. |
14592 | if (Subtarget->hasDSP() && |
14593 | (Mask == 0xffff || Mask == 0xffff0000)) |
14594 | return SDValue(); |
14595 | // 2a |
14596 | unsigned amt = llvm::countr_zero(Val: Mask2); |
14597 | Res = DAG.getNode(Opcode: ISD::SRL, DL, VT, N1: N1.getOperand(i: 0), |
14598 | N2: DAG.getConstant(Val: amt, DL, VT: MVT::i32)); |
14599 | Res = DAG.getNode(Opcode: ARMISD::BFI, DL, VT, N1: N00, N2: Res, |
14600 | N3: DAG.getConstant(Val: Mask, DL, VT: MVT::i32)); |
14601 | DCI.CombineTo(N, Res, AddTo: false); |
14602 | // Return value from the original node to inform the combiner than N is |
14603 | // now dead. |
14604 | return SDValue(N, 0); |
14605 | } else if (ARM::isBitFieldInvertedMask(v: ~Mask) && |
14606 | (~Mask == Mask2)) { |
14607 | // The pack halfword instruction works better for masks that fit it, |
14608 | // so use that when it's available. |
14609 | if (Subtarget->hasDSP() && |
14610 | (Mask2 == 0xffff || Mask2 == 0xffff0000)) |
14611 | return SDValue(); |
14612 | // 2b |
14613 | unsigned lsb = llvm::countr_zero(Val: Mask); |
14614 | Res = DAG.getNode(Opcode: ISD::SRL, DL, VT, N1: N00, |
14615 | N2: DAG.getConstant(Val: lsb, DL, VT: MVT::i32)); |
14616 | Res = DAG.getNode(Opcode: ARMISD::BFI, DL, VT, N1: N1.getOperand(i: 0), N2: Res, |
14617 | N3: DAG.getConstant(Val: Mask2, DL, VT: MVT::i32)); |
14618 | DCI.CombineTo(N, Res, AddTo: false); |
14619 | // Return value from the original node to inform the combiner than N is |
14620 | // now dead. |
14621 | return SDValue(N, 0); |
14622 | } |
14623 | } |
14624 | |
14625 | if (DAG.MaskedValueIsZero(Op: N1, Mask: MaskC->getAPIntValue()) && |
14626 | N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(Val: N00.getOperand(i: 1)) && |
14627 | ARM::isBitFieldInvertedMask(v: ~Mask)) { |
14628 | // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask |
14629 | // where lsb(mask) == #shamt and masked bits of B are known zero. |
14630 | SDValue ShAmt = N00.getOperand(i: 1); |
14631 | unsigned ShAmtC = ShAmt->getAsZExtVal(); |
14632 | unsigned LSB = llvm::countr_zero(Val: Mask); |
14633 | if (ShAmtC != LSB) |
14634 | return SDValue(); |
14635 | |
14636 | Res = DAG.getNode(Opcode: ARMISD::BFI, DL, VT, N1, N2: N00.getOperand(i: 0), |
14637 | N3: DAG.getConstant(Val: ~Mask, DL, VT: MVT::i32)); |
14638 | |
14639 | DCI.CombineTo(N, Res, AddTo: false); |
14640 | // Return value from the original node to inform the combiner than N is |
14641 | // now dead. |
14642 | return SDValue(N, 0); |
14643 | } |
14644 | |
14645 | return SDValue(); |
14646 | } |
14647 | |
14648 | static bool isValidMVECond(unsigned CC, bool IsFloat) { |
14649 | switch (CC) { |
14650 | case ARMCC::EQ: |
14651 | case ARMCC::NE: |
14652 | case ARMCC::LE: |
14653 | case ARMCC::GT: |
14654 | case ARMCC::GE: |
14655 | case ARMCC::LT: |
14656 | return true; |
14657 | case ARMCC::HS: |
14658 | case ARMCC::HI: |
14659 | return !IsFloat; |
14660 | default: |
14661 | return false; |
14662 | }; |
14663 | } |
14664 | |
14665 | static ARMCC::CondCodes getVCMPCondCode(SDValue N) { |
14666 | if (N->getOpcode() == ARMISD::VCMP) |
14667 | return (ARMCC::CondCodes)N->getConstantOperandVal(Num: 2); |
14668 | else if (N->getOpcode() == ARMISD::VCMPZ) |
14669 | return (ARMCC::CondCodes)N->getConstantOperandVal(Num: 1); |
14670 | else |
14671 | llvm_unreachable("Not a VCMP/VCMPZ!" ); |
14672 | } |
14673 | |
14674 | static bool CanInvertMVEVCMP(SDValue N) { |
14675 | ARMCC::CondCodes CC = ARMCC::getOppositeCondition(CC: getVCMPCondCode(N)); |
14676 | return isValidMVECond(CC, IsFloat: N->getOperand(Num: 0).getValueType().isFloatingPoint()); |
14677 | } |
14678 | |
14679 | static SDValue PerformORCombine_i1(SDNode *N, SelectionDAG &DAG, |
14680 | const ARMSubtarget *Subtarget) { |
14681 | // Try to invert "or A, B" -> "and ~A, ~B", as the "and" is easier to chain |
14682 | // together with predicates |
14683 | EVT VT = N->getValueType(ResNo: 0); |
14684 | SDLoc DL(N); |
14685 | SDValue N0 = N->getOperand(Num: 0); |
14686 | SDValue N1 = N->getOperand(Num: 1); |
14687 | |
14688 | auto IsFreelyInvertable = [&](SDValue V) { |
14689 | if (V->getOpcode() == ARMISD::VCMP || V->getOpcode() == ARMISD::VCMPZ) |
14690 | return CanInvertMVEVCMP(N: V); |
14691 | return false; |
14692 | }; |
14693 | |
14694 | // At least one operand must be freely invertable. |
14695 | if (!(IsFreelyInvertable(N0) || IsFreelyInvertable(N1))) |
14696 | return SDValue(); |
14697 | |
14698 | SDValue NewN0 = DAG.getLogicalNOT(DL, Val: N0, VT); |
14699 | SDValue NewN1 = DAG.getLogicalNOT(DL, Val: N1, VT); |
14700 | SDValue And = DAG.getNode(Opcode: ISD::AND, DL, VT, N1: NewN0, N2: NewN1); |
14701 | return DAG.getLogicalNOT(DL, Val: And, VT); |
14702 | } |
14703 | |
14704 | /// PerformORCombine - Target-specific dag combine xforms for ISD::OR |
14705 | static SDValue PerformORCombine(SDNode *N, |
14706 | TargetLowering::DAGCombinerInfo &DCI, |
14707 | const ARMSubtarget *Subtarget) { |
14708 | // Attempt to use immediate-form VORR |
14709 | BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Val: N->getOperand(Num: 1)); |
14710 | SDLoc dl(N); |
14711 | EVT VT = N->getValueType(ResNo: 0); |
14712 | SelectionDAG &DAG = DCI.DAG; |
14713 | |
14714 | if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) |
14715 | return SDValue(); |
14716 | |
14717 | if (Subtarget->hasMVEIntegerOps() && (VT == MVT::v2i1 || VT == MVT::v4i1 || |
14718 | VT == MVT::v8i1 || VT == MVT::v16i1)) |
14719 | return PerformORCombine_i1(N, DAG, Subtarget); |
14720 | |
14721 | APInt SplatBits, SplatUndef; |
14722 | unsigned SplatBitSize; |
14723 | bool HasAnyUndefs; |
14724 | if (BVN && (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) && |
14725 | BVN->isConstantSplat(SplatValue&: SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { |
14726 | if (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32 || |
14727 | SplatBitSize == 64) { |
14728 | EVT VorrVT; |
14729 | SDValue Val = |
14730 | isVMOVModifiedImm(SplatBits: SplatBits.getZExtValue(), SplatUndef: SplatUndef.getZExtValue(), |
14731 | SplatBitSize, DAG, dl, VT&: VorrVT, VectorVT: VT, type: OtherModImm); |
14732 | if (Val.getNode()) { |
14733 | SDValue Input = |
14734 | DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VorrVT, Operand: N->getOperand(Num: 0)); |
14735 | SDValue Vorr = DAG.getNode(Opcode: ARMISD::VORRIMM, DL: dl, VT: VorrVT, N1: Input, N2: Val); |
14736 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: Vorr); |
14737 | } |
14738 | } |
14739 | } |
14740 | |
14741 | if (!Subtarget->isThumb1Only()) { |
14742 | // fold (or (select cc, 0, c), x) -> (select cc, x, (or, x, c)) |
14743 | if (SDValue Result = combineSelectAndUseCommutative(N, AllOnes: false, DCI)) |
14744 | return Result; |
14745 | if (SDValue Result = PerformORCombineToSMULWBT(OR: N, DCI, Subtarget)) |
14746 | return Result; |
14747 | } |
14748 | |
14749 | SDValue N0 = N->getOperand(Num: 0); |
14750 | SDValue N1 = N->getOperand(Num: 1); |
14751 | |
14752 | // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant. |
14753 | if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() && |
14754 | DAG.getTargetLoweringInfo().isTypeLegal(VT)) { |
14755 | |
14756 | // The code below optimizes (or (and X, Y), Z). |
14757 | // The AND operand needs to have a single user to make these optimizations |
14758 | // profitable. |
14759 | if (N0.getOpcode() != ISD::AND || !N0.hasOneUse()) |
14760 | return SDValue(); |
14761 | |
14762 | APInt SplatUndef; |
14763 | unsigned SplatBitSize; |
14764 | bool HasAnyUndefs; |
14765 | |
14766 | APInt SplatBits0, SplatBits1; |
14767 | BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(Val: N0->getOperand(Num: 1)); |
14768 | BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(Val: N1->getOperand(Num: 1)); |
14769 | // Ensure that the second operand of both ands are constants |
14770 | if (BVN0 && BVN0->isConstantSplat(SplatValue&: SplatBits0, SplatUndef, SplatBitSize, |
14771 | HasAnyUndefs) && !HasAnyUndefs) { |
14772 | if (BVN1 && BVN1->isConstantSplat(SplatValue&: SplatBits1, SplatUndef, SplatBitSize, |
14773 | HasAnyUndefs) && !HasAnyUndefs) { |
14774 | // Ensure that the bit width of the constants are the same and that |
14775 | // the splat arguments are logical inverses as per the pattern we |
14776 | // are trying to simplify. |
14777 | if (SplatBits0.getBitWidth() == SplatBits1.getBitWidth() && |
14778 | SplatBits0 == ~SplatBits1) { |
14779 | // Canonicalize the vector type to make instruction selection |
14780 | // simpler. |
14781 | EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; |
14782 | SDValue Result = DAG.getNode(Opcode: ARMISD::VBSP, DL: dl, VT: CanonicalVT, |
14783 | N1: N0->getOperand(Num: 1), |
14784 | N2: N0->getOperand(Num: 0), |
14785 | N3: N1->getOperand(Num: 0)); |
14786 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT, Operand: Result); |
14787 | } |
14788 | } |
14789 | } |
14790 | } |
14791 | |
14792 | // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when |
14793 | // reasonable. |
14794 | if (N0.getOpcode() == ISD::AND && N0.hasOneUse()) { |
14795 | if (SDValue Res = PerformORCombineToBFI(N, DCI, Subtarget)) |
14796 | return Res; |
14797 | } |
14798 | |
14799 | if (SDValue Result = PerformSHLSimplify(N, DCI, ST: Subtarget)) |
14800 | return Result; |
14801 | |
14802 | return SDValue(); |
14803 | } |
14804 | |
14805 | static SDValue PerformXORCombine(SDNode *N, |
14806 | TargetLowering::DAGCombinerInfo &DCI, |
14807 | const ARMSubtarget *Subtarget) { |
14808 | EVT VT = N->getValueType(ResNo: 0); |
14809 | SelectionDAG &DAG = DCI.DAG; |
14810 | |
14811 | if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) |
14812 | return SDValue(); |
14813 | |
14814 | if (!Subtarget->isThumb1Only()) { |
14815 | // fold (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c)) |
14816 | if (SDValue Result = combineSelectAndUseCommutative(N, AllOnes: false, DCI)) |
14817 | return Result; |
14818 | |
14819 | if (SDValue Result = PerformSHLSimplify(N, DCI, ST: Subtarget)) |
14820 | return Result; |
14821 | } |
14822 | |
14823 | if (Subtarget->hasMVEIntegerOps()) { |
14824 | // fold (xor(vcmp/z, 1)) into a vcmp with the opposite condition. |
14825 | SDValue N0 = N->getOperand(Num: 0); |
14826 | SDValue N1 = N->getOperand(Num: 1); |
14827 | const TargetLowering *TLI = Subtarget->getTargetLowering(); |
14828 | if (TLI->isConstTrueVal(N: N1) && |
14829 | (N0->getOpcode() == ARMISD::VCMP || N0->getOpcode() == ARMISD::VCMPZ)) { |
14830 | if (CanInvertMVEVCMP(N: N0)) { |
14831 | SDLoc DL(N0); |
14832 | ARMCC::CondCodes CC = ARMCC::getOppositeCondition(CC: getVCMPCondCode(N: N0)); |
14833 | |
14834 | SmallVector<SDValue, 4> Ops; |
14835 | Ops.push_back(Elt: N0->getOperand(Num: 0)); |
14836 | if (N0->getOpcode() == ARMISD::VCMP) |
14837 | Ops.push_back(Elt: N0->getOperand(Num: 1)); |
14838 | Ops.push_back(Elt: DAG.getConstant(Val: CC, DL, VT: MVT::i32)); |
14839 | return DAG.getNode(Opcode: N0->getOpcode(), DL, VT: N0->getValueType(ResNo: 0), Ops); |
14840 | } |
14841 | } |
14842 | } |
14843 | |
14844 | return SDValue(); |
14845 | } |
14846 | |
14847 | // ParseBFI - given a BFI instruction in N, extract the "from" value (Rn) and return it, |
14848 | // and fill in FromMask and ToMask with (consecutive) bits in "from" to be extracted and |
14849 | // their position in "to" (Rd). |
14850 | static SDValue ParseBFI(SDNode *N, APInt &ToMask, APInt &FromMask) { |
14851 | assert(N->getOpcode() == ARMISD::BFI); |
14852 | |
14853 | SDValue From = N->getOperand(Num: 1); |
14854 | ToMask = ~N->getConstantOperandAPInt(Num: 2); |
14855 | FromMask = APInt::getLowBitsSet(numBits: ToMask.getBitWidth(), loBitsSet: ToMask.popcount()); |
14856 | |
14857 | // If the Base came from a SHR #C, we can deduce that it is really testing bit |
14858 | // #C in the base of the SHR. |
14859 | if (From->getOpcode() == ISD::SRL && |
14860 | isa<ConstantSDNode>(Val: From->getOperand(Num: 1))) { |
14861 | APInt Shift = From->getConstantOperandAPInt(Num: 1); |
14862 | assert(Shift.getLimitedValue() < 32 && "Shift too large!" ); |
14863 | FromMask <<= Shift.getLimitedValue(Limit: 31); |
14864 | From = From->getOperand(Num: 0); |
14865 | } |
14866 | |
14867 | return From; |
14868 | } |
14869 | |
14870 | // If A and B contain one contiguous set of bits, does A | B == A . B? |
14871 | // |
14872 | // Neither A nor B must be zero. |
14873 | static bool BitsProperlyConcatenate(const APInt &A, const APInt &B) { |
14874 | unsigned LastActiveBitInA = A.countr_zero(); |
14875 | unsigned FirstActiveBitInB = B.getBitWidth() - B.countl_zero() - 1; |
14876 | return LastActiveBitInA - 1 == FirstActiveBitInB; |
14877 | } |
14878 | |
14879 | static SDValue FindBFIToCombineWith(SDNode *N) { |
14880 | // We have a BFI in N. Find a BFI it can combine with, if one exists. |
14881 | APInt ToMask, FromMask; |
14882 | SDValue From = ParseBFI(N, ToMask, FromMask); |
14883 | SDValue To = N->getOperand(Num: 0); |
14884 | |
14885 | SDValue V = To; |
14886 | if (V.getOpcode() != ARMISD::BFI) |
14887 | return SDValue(); |
14888 | |
14889 | APInt NewToMask, NewFromMask; |
14890 | SDValue NewFrom = ParseBFI(N: V.getNode(), ToMask&: NewToMask, FromMask&: NewFromMask); |
14891 | if (NewFrom != From) |
14892 | return SDValue(); |
14893 | |
14894 | // Do the written bits conflict with any we've seen so far? |
14895 | if ((NewToMask & ToMask).getBoolValue()) |
14896 | // Conflicting bits. |
14897 | return SDValue(); |
14898 | |
14899 | // Are the new bits contiguous when combined with the old bits? |
14900 | if (BitsProperlyConcatenate(A: ToMask, B: NewToMask) && |
14901 | BitsProperlyConcatenate(A: FromMask, B: NewFromMask)) |
14902 | return V; |
14903 | if (BitsProperlyConcatenate(A: NewToMask, B: ToMask) && |
14904 | BitsProperlyConcatenate(A: NewFromMask, B: FromMask)) |
14905 | return V; |
14906 | |
14907 | return SDValue(); |
14908 | } |
14909 | |
14910 | static SDValue PerformBFICombine(SDNode *N, SelectionDAG &DAG) { |
14911 | SDValue N0 = N->getOperand(Num: 0); |
14912 | SDValue N1 = N->getOperand(Num: 1); |
14913 | |
14914 | if (N1.getOpcode() == ISD::AND) { |
14915 | // (bfi A, (and B, Mask1), Mask2) -> (bfi A, B, Mask2) iff |
14916 | // the bits being cleared by the AND are not demanded by the BFI. |
14917 | ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(Val: N1.getOperand(i: 1)); |
14918 | if (!N11C) |
14919 | return SDValue(); |
14920 | unsigned InvMask = N->getConstantOperandVal(Num: 2); |
14921 | unsigned LSB = llvm::countr_zero(Val: ~InvMask); |
14922 | unsigned Width = llvm::bit_width<unsigned>(Value: ~InvMask) - LSB; |
14923 | assert(Width < |
14924 | static_cast<unsigned>(std::numeric_limits<unsigned>::digits) && |
14925 | "undefined behavior" ); |
14926 | unsigned Mask = (1u << Width) - 1; |
14927 | unsigned Mask2 = N11C->getZExtValue(); |
14928 | if ((Mask & (~Mask2)) == 0) |
14929 | return DAG.getNode(Opcode: ARMISD::BFI, DL: SDLoc(N), VT: N->getValueType(ResNo: 0), |
14930 | N1: N->getOperand(Num: 0), N2: N1.getOperand(i: 0), N3: N->getOperand(Num: 2)); |
14931 | return SDValue(); |
14932 | } |
14933 | |
14934 | // Look for another BFI to combine with. |
14935 | if (SDValue CombineBFI = FindBFIToCombineWith(N)) { |
14936 | // We've found a BFI. |
14937 | APInt ToMask1, FromMask1; |
14938 | SDValue From1 = ParseBFI(N, ToMask&: ToMask1, FromMask&: FromMask1); |
14939 | |
14940 | APInt ToMask2, FromMask2; |
14941 | SDValue From2 = ParseBFI(N: CombineBFI.getNode(), ToMask&: ToMask2, FromMask&: FromMask2); |
14942 | assert(From1 == From2); |
14943 | (void)From2; |
14944 | |
14945 | // Create a new BFI, combining the two together. |
14946 | APInt NewFromMask = FromMask1 | FromMask2; |
14947 | APInt NewToMask = ToMask1 | ToMask2; |
14948 | |
14949 | EVT VT = N->getValueType(ResNo: 0); |
14950 | SDLoc dl(N); |
14951 | |
14952 | if (NewFromMask[0] == 0) |
14953 | From1 = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT, N1: From1, |
14954 | N2: DAG.getConstant(Val: NewFromMask.countr_zero(), DL: dl, VT)); |
14955 | return DAG.getNode(Opcode: ARMISD::BFI, DL: dl, VT, N1: CombineBFI.getOperand(i: 0), N2: From1, |
14956 | N3: DAG.getConstant(Val: ~NewToMask, DL: dl, VT)); |
14957 | } |
14958 | |
14959 | // Reassociate BFI(BFI (A, B, M1), C, M2) to BFI(BFI (A, C, M2), B, M1) so |
14960 | // that lower bit insertions are performed first, providing that M1 and M2 |
14961 | // do no overlap. This can allow multiple BFI instructions to be combined |
14962 | // together by the other folds above. |
14963 | if (N->getOperand(Num: 0).getOpcode() == ARMISD::BFI) { |
14964 | APInt ToMask1 = ~N->getConstantOperandAPInt(Num: 2); |
14965 | APInt ToMask2 = ~N0.getConstantOperandAPInt(i: 2); |
14966 | |
14967 | if (!N0.hasOneUse() || (ToMask1 & ToMask2) != 0 || |
14968 | ToMask1.countl_zero() < ToMask2.countl_zero()) |
14969 | return SDValue(); |
14970 | |
14971 | EVT VT = N->getValueType(ResNo: 0); |
14972 | SDLoc dl(N); |
14973 | SDValue BFI1 = DAG.getNode(Opcode: ARMISD::BFI, DL: dl, VT, N1: N0.getOperand(i: 0), |
14974 | N2: N->getOperand(Num: 1), N3: N->getOperand(Num: 2)); |
14975 | return DAG.getNode(Opcode: ARMISD::BFI, DL: dl, VT, N1: BFI1, N2: N0.getOperand(i: 1), |
14976 | N3: N0.getOperand(i: 2)); |
14977 | } |
14978 | |
14979 | return SDValue(); |
14980 | } |
14981 | |
14982 | // Check that N is CMPZ(CSINC(0, 0, CC, X)), |
14983 | // or CMPZ(CMOV(1, 0, CC, $cpsr, X)) |
14984 | // return X if valid. |
14985 | static SDValue IsCMPZCSINC(SDNode *Cmp, ARMCC::CondCodes &CC) { |
14986 | if (Cmp->getOpcode() != ARMISD::CMPZ || !isNullConstant(V: Cmp->getOperand(Num: 1))) |
14987 | return SDValue(); |
14988 | SDValue CSInc = Cmp->getOperand(Num: 0); |
14989 | |
14990 | // Ignore any `And 1` nodes that may not yet have been removed. We are |
14991 | // looking for a value that produces 1/0, so these have no effect on the |
14992 | // code. |
14993 | while (CSInc.getOpcode() == ISD::AND && |
14994 | isa<ConstantSDNode>(Val: CSInc.getOperand(i: 1)) && |
14995 | CSInc.getConstantOperandVal(i: 1) == 1 && CSInc->hasOneUse()) |
14996 | CSInc = CSInc.getOperand(i: 0); |
14997 | |
14998 | if (CSInc.getOpcode() == ARMISD::CSINC && |
14999 | isNullConstant(V: CSInc.getOperand(i: 0)) && |
15000 | isNullConstant(V: CSInc.getOperand(i: 1)) && CSInc->hasOneUse()) { |
15001 | CC = (ARMCC::CondCodes)CSInc.getConstantOperandVal(i: 2); |
15002 | return CSInc.getOperand(i: 3); |
15003 | } |
15004 | if (CSInc.getOpcode() == ARMISD::CMOV && isOneConstant(V: CSInc.getOperand(i: 0)) && |
15005 | isNullConstant(V: CSInc.getOperand(i: 1)) && CSInc->hasOneUse()) { |
15006 | CC = (ARMCC::CondCodes)CSInc.getConstantOperandVal(i: 2); |
15007 | return CSInc.getOperand(i: 4); |
15008 | } |
15009 | if (CSInc.getOpcode() == ARMISD::CMOV && isOneConstant(V: CSInc.getOperand(i: 1)) && |
15010 | isNullConstant(V: CSInc.getOperand(i: 0)) && CSInc->hasOneUse()) { |
15011 | CC = ARMCC::getOppositeCondition( |
15012 | CC: (ARMCC::CondCodes)CSInc.getConstantOperandVal(i: 2)); |
15013 | return CSInc.getOperand(i: 4); |
15014 | } |
15015 | return SDValue(); |
15016 | } |
15017 | |
15018 | static SDValue PerformCMPZCombine(SDNode *N, SelectionDAG &DAG) { |
15019 | // Given CMPZ(CSINC(C, 0, 0, EQ), 0), we can just use C directly. As in |
15020 | // t92: glue = ARMISD::CMPZ t74, 0 |
15021 | // t93: i32 = ARMISD::CSINC 0, 0, 1, t92 |
15022 | // t96: glue = ARMISD::CMPZ t93, 0 |
15023 | // t114: i32 = ARMISD::CSINV 0, 0, 0, t96 |
15024 | ARMCC::CondCodes Cond; |
15025 | if (SDValue C = IsCMPZCSINC(Cmp: N, CC&: Cond)) |
15026 | if (Cond == ARMCC::EQ) |
15027 | return C; |
15028 | return SDValue(); |
15029 | } |
15030 | |
15031 | static SDValue PerformCSETCombine(SDNode *N, SelectionDAG &DAG) { |
15032 | // Fold away an unneccessary CMPZ/CSINC |
15033 | // CSXYZ A, B, C1 (CMPZ (CSINC 0, 0, C2, D), 0) -> |
15034 | // if C1==EQ -> CSXYZ A, B, C2, D |
15035 | // if C1==NE -> CSXYZ A, B, NOT(C2), D |
15036 | ARMCC::CondCodes Cond; |
15037 | if (SDValue C = IsCMPZCSINC(Cmp: N->getOperand(Num: 3).getNode(), CC&: Cond)) { |
15038 | if (N->getConstantOperandVal(Num: 2) == ARMCC::EQ) |
15039 | return DAG.getNode(Opcode: N->getOpcode(), DL: SDLoc(N), VT: MVT::i32, N1: N->getOperand(Num: 0), |
15040 | N2: N->getOperand(Num: 1), |
15041 | N3: DAG.getConstant(Val: Cond, DL: SDLoc(N), VT: MVT::i32), N4: C); |
15042 | if (N->getConstantOperandVal(Num: 2) == ARMCC::NE) |
15043 | return DAG.getNode( |
15044 | Opcode: N->getOpcode(), DL: SDLoc(N), VT: MVT::i32, N1: N->getOperand(Num: 0), |
15045 | N2: N->getOperand(Num: 1), |
15046 | N3: DAG.getConstant(Val: ARMCC::getOppositeCondition(CC: Cond), DL: SDLoc(N), VT: MVT::i32), N4: C); |
15047 | } |
15048 | return SDValue(); |
15049 | } |
15050 | |
15051 | /// PerformVMOVRRDCombine - Target-specific dag combine xforms for |
15052 | /// ARMISD::VMOVRRD. |
15053 | static SDValue PerformVMOVRRDCombine(SDNode *N, |
15054 | TargetLowering::DAGCombinerInfo &DCI, |
15055 | const ARMSubtarget *Subtarget) { |
15056 | // vmovrrd(vmovdrr x, y) -> x,y |
15057 | SDValue InDouble = N->getOperand(Num: 0); |
15058 | if (InDouble.getOpcode() == ARMISD::VMOVDRR && Subtarget->hasFP64()) |
15059 | return DCI.CombineTo(N, Res0: InDouble.getOperand(i: 0), Res1: InDouble.getOperand(i: 1)); |
15060 | |
15061 | // vmovrrd(load f64) -> (load i32), (load i32) |
15062 | SDNode *InNode = InDouble.getNode(); |
15063 | if (ISD::isNormalLoad(N: InNode) && InNode->hasOneUse() && |
15064 | InNode->getValueType(ResNo: 0) == MVT::f64 && |
15065 | InNode->getOperand(Num: 1).getOpcode() == ISD::FrameIndex && |
15066 | !cast<LoadSDNode>(Val: InNode)->isVolatile()) { |
15067 | // TODO: Should this be done for non-FrameIndex operands? |
15068 | LoadSDNode *LD = cast<LoadSDNode>(Val: InNode); |
15069 | |
15070 | SelectionDAG &DAG = DCI.DAG; |
15071 | SDLoc DL(LD); |
15072 | SDValue BasePtr = LD->getBasePtr(); |
15073 | SDValue NewLD1 = |
15074 | DAG.getLoad(VT: MVT::i32, dl: DL, Chain: LD->getChain(), Ptr: BasePtr, PtrInfo: LD->getPointerInfo(), |
15075 | Alignment: LD->getAlign(), MMOFlags: LD->getMemOperand()->getFlags()); |
15076 | |
15077 | SDValue OffsetPtr = DAG.getNode(Opcode: ISD::ADD, DL, VT: MVT::i32, N1: BasePtr, |
15078 | N2: DAG.getConstant(Val: 4, DL, VT: MVT::i32)); |
15079 | |
15080 | SDValue NewLD2 = DAG.getLoad(VT: MVT::i32, dl: DL, Chain: LD->getChain(), Ptr: OffsetPtr, |
15081 | PtrInfo: LD->getPointerInfo().getWithOffset(O: 4), |
15082 | Alignment: commonAlignment(A: LD->getAlign(), Offset: 4), |
15083 | MMOFlags: LD->getMemOperand()->getFlags()); |
15084 | |
15085 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(LD, 1), To: NewLD2.getValue(R: 1)); |
15086 | if (DCI.DAG.getDataLayout().isBigEndian()) |
15087 | std::swap (a&: NewLD1, b&: NewLD2); |
15088 | SDValue Result = DCI.CombineTo(N, Res0: NewLD1, Res1: NewLD2); |
15089 | return Result; |
15090 | } |
15091 | |
15092 | // VMOVRRD(extract(..(build_vector(a, b, c, d)))) -> a,b or c,d |
15093 | // VMOVRRD(extract(insert_vector(insert_vector(.., a, l1), b, l2))) -> a,b |
15094 | if (InDouble.getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
15095 | isa<ConstantSDNode>(Val: InDouble.getOperand(i: 1))) { |
15096 | SDValue BV = InDouble.getOperand(i: 0); |
15097 | // Look up through any nop bitcasts and vector_reg_casts. bitcasts may |
15098 | // change lane order under big endian. |
15099 | bool BVSwap = BV.getOpcode() == ISD::BITCAST; |
15100 | while ( |
15101 | (BV.getOpcode() == ISD::BITCAST || |
15102 | BV.getOpcode() == ARMISD::VECTOR_REG_CAST) && |
15103 | (BV.getValueType() == MVT::v2f64 || BV.getValueType() == MVT::v2i64)) { |
15104 | BVSwap = BV.getOpcode() == ISD::BITCAST; |
15105 | BV = BV.getOperand(i: 0); |
15106 | } |
15107 | if (BV.getValueType() != MVT::v4i32) |
15108 | return SDValue(); |
15109 | |
15110 | // Handle buildvectors, pulling out the correct lane depending on |
15111 | // endianness. |
15112 | unsigned Offset = InDouble.getConstantOperandVal(i: 1) == 1 ? 2 : 0; |
15113 | if (BV.getOpcode() == ISD::BUILD_VECTOR) { |
15114 | SDValue Op0 = BV.getOperand(i: Offset); |
15115 | SDValue Op1 = BV.getOperand(i: Offset + 1); |
15116 | if (!Subtarget->isLittle() && BVSwap) |
15117 | std::swap(a&: Op0, b&: Op1); |
15118 | |
15119 | return DCI.DAG.getMergeValues(Ops: {Op0, Op1}, dl: SDLoc(N)); |
15120 | } |
15121 | |
15122 | // A chain of insert_vectors, grabbing the correct value of the chain of |
15123 | // inserts. |
15124 | SDValue Op0, Op1; |
15125 | while (BV.getOpcode() == ISD::INSERT_VECTOR_ELT) { |
15126 | if (isa<ConstantSDNode>(Val: BV.getOperand(i: 2))) { |
15127 | if (BV.getConstantOperandVal(i: 2) == Offset) |
15128 | Op0 = BV.getOperand(i: 1); |
15129 | if (BV.getConstantOperandVal(i: 2) == Offset + 1) |
15130 | Op1 = BV.getOperand(i: 1); |
15131 | } |
15132 | BV = BV.getOperand(i: 0); |
15133 | } |
15134 | if (!Subtarget->isLittle() && BVSwap) |
15135 | std::swap(a&: Op0, b&: Op1); |
15136 | if (Op0 && Op1) |
15137 | return DCI.DAG.getMergeValues(Ops: {Op0, Op1}, dl: SDLoc(N)); |
15138 | } |
15139 | |
15140 | return SDValue(); |
15141 | } |
15142 | |
15143 | /// PerformVMOVDRRCombine - Target-specific dag combine xforms for |
15144 | /// ARMISD::VMOVDRR. This is also used for BUILD_VECTORs with 2 operands. |
15145 | static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) { |
15146 | // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X) |
15147 | SDValue Op0 = N->getOperand(Num: 0); |
15148 | SDValue Op1 = N->getOperand(Num: 1); |
15149 | if (Op0.getOpcode() == ISD::BITCAST) |
15150 | Op0 = Op0.getOperand(i: 0); |
15151 | if (Op1.getOpcode() == ISD::BITCAST) |
15152 | Op1 = Op1.getOperand(i: 0); |
15153 | if (Op0.getOpcode() == ARMISD::VMOVRRD && |
15154 | Op0.getNode() == Op1.getNode() && |
15155 | Op0.getResNo() == 0 && Op1.getResNo() == 1) |
15156 | return DAG.getNode(Opcode: ISD::BITCAST, DL: SDLoc(N), |
15157 | VT: N->getValueType(ResNo: 0), Operand: Op0.getOperand(i: 0)); |
15158 | return SDValue(); |
15159 | } |
15160 | |
15161 | static SDValue PerformVMOVhrCombine(SDNode *N, |
15162 | TargetLowering::DAGCombinerInfo &DCI) { |
15163 | SDValue Op0 = N->getOperand(Num: 0); |
15164 | |
15165 | // VMOVhr (VMOVrh (X)) -> X |
15166 | if (Op0->getOpcode() == ARMISD::VMOVrh) |
15167 | return Op0->getOperand(Num: 0); |
15168 | |
15169 | // FullFP16: half values are passed in S-registers, and we don't |
15170 | // need any of the bitcast and moves: |
15171 | // |
15172 | // t2: f32,ch1,gl1? = CopyFromReg ch, Register:f32 %0, gl? |
15173 | // t5: i32 = bitcast t2 |
15174 | // t18: f16 = ARMISD::VMOVhr t5 |
15175 | // => |
15176 | // tN: f16,ch2,gl2? = CopyFromReg ch, Register::f32 %0, gl? |
15177 | if (Op0->getOpcode() == ISD::BITCAST) { |
15178 | SDValue Copy = Op0->getOperand(Num: 0); |
15179 | if (Copy.getValueType() == MVT::f32 && |
15180 | Copy->getOpcode() == ISD::CopyFromReg) { |
15181 | bool HasGlue = Copy->getNumOperands() == 3; |
15182 | SDValue Ops[] = {Copy->getOperand(Num: 0), Copy->getOperand(Num: 1), |
15183 | HasGlue ? Copy->getOperand(Num: 2) : SDValue()}; |
15184 | EVT OutTys[] = {N->getValueType(ResNo: 0), MVT::Other, MVT::Glue}; |
15185 | SDValue NewCopy = |
15186 | DCI.DAG.getNode(Opcode: ISD::CopyFromReg, DL: SDLoc(N), |
15187 | VTList: DCI.DAG.getVTList(VTs: ArrayRef(OutTys, HasGlue ? 3 : 2)), |
15188 | Ops: ArrayRef(Ops, HasGlue ? 3 : 2)); |
15189 | |
15190 | // Update Users, Chains, and Potential Glue. |
15191 | DCI.DAG.ReplaceAllUsesOfValueWith(From: SDValue(N, 0), To: NewCopy.getValue(R: 0)); |
15192 | DCI.DAG.ReplaceAllUsesOfValueWith(From: Copy.getValue(R: 1), To: NewCopy.getValue(R: 1)); |
15193 | if (HasGlue) |
15194 | DCI.DAG.ReplaceAllUsesOfValueWith(From: Copy.getValue(R: 2), |
15195 | To: NewCopy.getValue(R: 2)); |
15196 | |
15197 | return NewCopy; |
15198 | } |
15199 | } |
15200 | |
15201 | // fold (VMOVhr (load x)) -> (load (f16*)x) |
15202 | if (LoadSDNode *LN0 = dyn_cast<LoadSDNode>(Val&: Op0)) { |
15203 | if (LN0->hasOneUse() && LN0->isUnindexed() && |
15204 | LN0->getMemoryVT() == MVT::i16) { |
15205 | SDValue Load = |
15206 | DCI.DAG.getLoad(VT: N->getValueType(ResNo: 0), dl: SDLoc(N), Chain: LN0->getChain(), |
15207 | Ptr: LN0->getBasePtr(), MMO: LN0->getMemOperand()); |
15208 | DCI.DAG.ReplaceAllUsesOfValueWith(From: SDValue(N, 0), To: Load.getValue(R: 0)); |
15209 | DCI.DAG.ReplaceAllUsesOfValueWith(From: Op0.getValue(R: 1), To: Load.getValue(R: 1)); |
15210 | return Load; |
15211 | } |
15212 | } |
15213 | |
15214 | // Only the bottom 16 bits of the source register are used. |
15215 | APInt DemandedMask = APInt::getLowBitsSet(numBits: 32, loBitsSet: 16); |
15216 | const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo(); |
15217 | if (TLI.SimplifyDemandedBits(Op: Op0, DemandedBits: DemandedMask, DCI)) |
15218 | return SDValue(N, 0); |
15219 | |
15220 | return SDValue(); |
15221 | } |
15222 | |
15223 | static SDValue PerformVMOVrhCombine(SDNode *N, SelectionDAG &DAG) { |
15224 | SDValue N0 = N->getOperand(Num: 0); |
15225 | EVT VT = N->getValueType(ResNo: 0); |
15226 | |
15227 | // fold (VMOVrh (fpconst x)) -> const x |
15228 | if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Val&: N0)) { |
15229 | APFloat V = C->getValueAPF(); |
15230 | return DAG.getConstant(Val: V.bitcastToAPInt().getZExtValue(), DL: SDLoc(N), VT); |
15231 | } |
15232 | |
15233 | // fold (VMOVrh (load x)) -> (zextload (i16*)x) |
15234 | if (ISD::isNormalLoad(N: N0.getNode()) && N0.hasOneUse()) { |
15235 | LoadSDNode *LN0 = cast<LoadSDNode>(Val&: N0); |
15236 | |
15237 | SDValue Load = |
15238 | DAG.getExtLoad(ExtType: ISD::ZEXTLOAD, dl: SDLoc(N), VT, Chain: LN0->getChain(), |
15239 | Ptr: LN0->getBasePtr(), MemVT: MVT::i16, MMO: LN0->getMemOperand()); |
15240 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(N, 0), To: Load.getValue(R: 0)); |
15241 | DAG.ReplaceAllUsesOfValueWith(From: N0.getValue(R: 1), To: Load.getValue(R: 1)); |
15242 | return Load; |
15243 | } |
15244 | |
15245 | // Fold VMOVrh(extract(x, n)) -> vgetlaneu(x, n) |
15246 | if (N0->getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
15247 | isa<ConstantSDNode>(Val: N0->getOperand(Num: 1))) |
15248 | return DAG.getNode(Opcode: ARMISD::VGETLANEu, DL: SDLoc(N), VT, N1: N0->getOperand(Num: 0), |
15249 | N2: N0->getOperand(Num: 1)); |
15250 | |
15251 | return SDValue(); |
15252 | } |
15253 | |
15254 | /// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node |
15255 | /// are normal, non-volatile loads. If so, it is profitable to bitcast an |
15256 | /// i64 vector to have f64 elements, since the value can then be loaded |
15257 | /// directly into a VFP register. |
15258 | static bool hasNormalLoadOperand(SDNode *N) { |
15259 | unsigned NumElts = N->getValueType(ResNo: 0).getVectorNumElements(); |
15260 | for (unsigned i = 0; i < NumElts; ++i) { |
15261 | SDNode *Elt = N->getOperand(Num: i).getNode(); |
15262 | if (ISD::isNormalLoad(N: Elt) && !cast<LoadSDNode>(Val: Elt)->isVolatile()) |
15263 | return true; |
15264 | } |
15265 | return false; |
15266 | } |
15267 | |
15268 | /// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for |
15269 | /// ISD::BUILD_VECTOR. |
15270 | static SDValue PerformBUILD_VECTORCombine(SDNode *N, |
15271 | TargetLowering::DAGCombinerInfo &DCI, |
15272 | const ARMSubtarget *Subtarget) { |
15273 | // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X): |
15274 | // VMOVRRD is introduced when legalizing i64 types. It forces the i64 value |
15275 | // into a pair of GPRs, which is fine when the value is used as a scalar, |
15276 | // but if the i64 value is converted to a vector, we need to undo the VMOVRRD. |
15277 | SelectionDAG &DAG = DCI.DAG; |
15278 | if (N->getNumOperands() == 2) |
15279 | if (SDValue RV = PerformVMOVDRRCombine(N, DAG)) |
15280 | return RV; |
15281 | |
15282 | // Load i64 elements as f64 values so that type legalization does not split |
15283 | // them up into i32 values. |
15284 | EVT VT = N->getValueType(ResNo: 0); |
15285 | if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N)) |
15286 | return SDValue(); |
15287 | SDLoc dl(N); |
15288 | SmallVector<SDValue, 8> Ops; |
15289 | unsigned NumElts = VT.getVectorNumElements(); |
15290 | for (unsigned i = 0; i < NumElts; ++i) { |
15291 | SDValue V = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::f64, Operand: N->getOperand(Num: i)); |
15292 | Ops.push_back(Elt: V); |
15293 | // Make the DAGCombiner fold the bitcast. |
15294 | DCI.AddToWorklist(N: V.getNode()); |
15295 | } |
15296 | EVT FloatVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: MVT::f64, NumElements: NumElts); |
15297 | SDValue BV = DAG.getBuildVector(VT: FloatVT, DL: dl, Ops); |
15298 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: BV); |
15299 | } |
15300 | |
15301 | /// Target-specific dag combine xforms for ARMISD::BUILD_VECTOR. |
15302 | static SDValue |
15303 | PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { |
15304 | // ARMISD::BUILD_VECTOR is introduced when legalizing ISD::BUILD_VECTOR. |
15305 | // At that time, we may have inserted bitcasts from integer to float. |
15306 | // If these bitcasts have survived DAGCombine, change the lowering of this |
15307 | // BUILD_VECTOR in something more vector friendly, i.e., that does not |
15308 | // force to use floating point types. |
15309 | |
15310 | // Make sure we can change the type of the vector. |
15311 | // This is possible iff: |
15312 | // 1. The vector is only used in a bitcast to a integer type. I.e., |
15313 | // 1.1. Vector is used only once. |
15314 | // 1.2. Use is a bit convert to an integer type. |
15315 | // 2. The size of its operands are 32-bits (64-bits are not legal). |
15316 | EVT VT = N->getValueType(ResNo: 0); |
15317 | EVT EltVT = VT.getVectorElementType(); |
15318 | |
15319 | // Check 1.1. and 2. |
15320 | if (EltVT.getSizeInBits() != 32 || !N->hasOneUse()) |
15321 | return SDValue(); |
15322 | |
15323 | // By construction, the input type must be float. |
15324 | assert(EltVT == MVT::f32 && "Unexpected type!" ); |
15325 | |
15326 | // Check 1.2. |
15327 | SDNode *Use = *N->use_begin(); |
15328 | if (Use->getOpcode() != ISD::BITCAST || |
15329 | Use->getValueType(ResNo: 0).isFloatingPoint()) |
15330 | return SDValue(); |
15331 | |
15332 | // Check profitability. |
15333 | // Model is, if more than half of the relevant operands are bitcast from |
15334 | // i32, turn the build_vector into a sequence of insert_vector_elt. |
15335 | // Relevant operands are everything that is not statically |
15336 | // (i.e., at compile time) bitcasted. |
15337 | unsigned NumOfBitCastedElts = 0; |
15338 | unsigned NumElts = VT.getVectorNumElements(); |
15339 | unsigned NumOfRelevantElts = NumElts; |
15340 | for (unsigned Idx = 0; Idx < NumElts; ++Idx) { |
15341 | SDValue Elt = N->getOperand(Num: Idx); |
15342 | if (Elt->getOpcode() == ISD::BITCAST) { |
15343 | // Assume only bit cast to i32 will go away. |
15344 | if (Elt->getOperand(Num: 0).getValueType() == MVT::i32) |
15345 | ++NumOfBitCastedElts; |
15346 | } else if (Elt.isUndef() || isa<ConstantSDNode>(Val: Elt)) |
15347 | // Constants are statically casted, thus do not count them as |
15348 | // relevant operands. |
15349 | --NumOfRelevantElts; |
15350 | } |
15351 | |
15352 | // Check if more than half of the elements require a non-free bitcast. |
15353 | if (NumOfBitCastedElts <= NumOfRelevantElts / 2) |
15354 | return SDValue(); |
15355 | |
15356 | SelectionDAG &DAG = DCI.DAG; |
15357 | // Create the new vector type. |
15358 | EVT VecVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: MVT::i32, NumElements: NumElts); |
15359 | // Check if the type is legal. |
15360 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
15361 | if (!TLI.isTypeLegal(VT: VecVT)) |
15362 | return SDValue(); |
15363 | |
15364 | // Combine: |
15365 | // ARMISD::BUILD_VECTOR E1, E2, ..., EN. |
15366 | // => BITCAST INSERT_VECTOR_ELT |
15367 | // (INSERT_VECTOR_ELT (...), (BITCAST EN-1), N-1), |
15368 | // (BITCAST EN), N. |
15369 | SDValue Vec = DAG.getUNDEF(VT: VecVT); |
15370 | SDLoc dl(N); |
15371 | for (unsigned Idx = 0 ; Idx < NumElts; ++Idx) { |
15372 | SDValue V = N->getOperand(Num: Idx); |
15373 | if (V.isUndef()) |
15374 | continue; |
15375 | if (V.getOpcode() == ISD::BITCAST && |
15376 | V->getOperand(Num: 0).getValueType() == MVT::i32) |
15377 | // Fold obvious case. |
15378 | V = V.getOperand(i: 0); |
15379 | else { |
15380 | V = DAG.getNode(Opcode: ISD::BITCAST, DL: SDLoc(V), VT: MVT::i32, Operand: V); |
15381 | // Make the DAGCombiner fold the bitcasts. |
15382 | DCI.AddToWorklist(N: V.getNode()); |
15383 | } |
15384 | SDValue LaneIdx = DAG.getConstant(Val: Idx, DL: dl, VT: MVT::i32); |
15385 | Vec = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: VecVT, N1: Vec, N2: V, N3: LaneIdx); |
15386 | } |
15387 | Vec = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: Vec); |
15388 | // Make the DAGCombiner fold the bitcasts. |
15389 | DCI.AddToWorklist(N: Vec.getNode()); |
15390 | return Vec; |
15391 | } |
15392 | |
15393 | static SDValue |
15394 | PerformPREDICATE_CASTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { |
15395 | EVT VT = N->getValueType(ResNo: 0); |
15396 | SDValue Op = N->getOperand(Num: 0); |
15397 | SDLoc dl(N); |
15398 | |
15399 | // PREDICATE_CAST(PREDICATE_CAST(x)) == PREDICATE_CAST(x) |
15400 | if (Op->getOpcode() == ARMISD::PREDICATE_CAST) { |
15401 | // If the valuetypes are the same, we can remove the cast entirely. |
15402 | if (Op->getOperand(Num: 0).getValueType() == VT) |
15403 | return Op->getOperand(Num: 0); |
15404 | return DCI.DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT, Operand: Op->getOperand(Num: 0)); |
15405 | } |
15406 | |
15407 | // Turn pred_cast(xor x, -1) into xor(pred_cast x, -1), in order to produce |
15408 | // more VPNOT which might get folded as else predicates. |
15409 | if (Op.getValueType() == MVT::i32 && isBitwiseNot(V: Op)) { |
15410 | SDValue X = |
15411 | DCI.DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT, Operand: Op->getOperand(Num: 0)); |
15412 | SDValue C = DCI.DAG.getNode(Opcode: ARMISD::PREDICATE_CAST, DL: dl, VT, |
15413 | Operand: DCI.DAG.getConstant(Val: 65535, DL: dl, VT: MVT::i32)); |
15414 | return DCI.DAG.getNode(Opcode: ISD::XOR, DL: dl, VT, N1: X, N2: C); |
15415 | } |
15416 | |
15417 | // Only the bottom 16 bits of the source register are used. |
15418 | if (Op.getValueType() == MVT::i32) { |
15419 | APInt DemandedMask = APInt::getLowBitsSet(numBits: 32, loBitsSet: 16); |
15420 | const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo(); |
15421 | if (TLI.SimplifyDemandedBits(Op, DemandedBits: DemandedMask, DCI)) |
15422 | return SDValue(N, 0); |
15423 | } |
15424 | return SDValue(); |
15425 | } |
15426 | |
15427 | static SDValue PerformVECTOR_REG_CASTCombine(SDNode *N, SelectionDAG &DAG, |
15428 | const ARMSubtarget *ST) { |
15429 | EVT VT = N->getValueType(ResNo: 0); |
15430 | SDValue Op = N->getOperand(Num: 0); |
15431 | SDLoc dl(N); |
15432 | |
15433 | // Under Little endian, a VECTOR_REG_CAST is equivalent to a BITCAST |
15434 | if (ST->isLittle()) |
15435 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: Op); |
15436 | |
15437 | // VECTOR_REG_CAST undef -> undef |
15438 | if (Op.isUndef()) |
15439 | return DAG.getUNDEF(VT); |
15440 | |
15441 | // VECTOR_REG_CAST(VECTOR_REG_CAST(x)) == VECTOR_REG_CAST(x) |
15442 | if (Op->getOpcode() == ARMISD::VECTOR_REG_CAST) { |
15443 | // If the valuetypes are the same, we can remove the cast entirely. |
15444 | if (Op->getOperand(Num: 0).getValueType() == VT) |
15445 | return Op->getOperand(Num: 0); |
15446 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT, Operand: Op->getOperand(Num: 0)); |
15447 | } |
15448 | |
15449 | return SDValue(); |
15450 | } |
15451 | |
15452 | static SDValue PerformVCMPCombine(SDNode *N, SelectionDAG &DAG, |
15453 | const ARMSubtarget *Subtarget) { |
15454 | if (!Subtarget->hasMVEIntegerOps()) |
15455 | return SDValue(); |
15456 | |
15457 | EVT VT = N->getValueType(ResNo: 0); |
15458 | SDValue Op0 = N->getOperand(Num: 0); |
15459 | SDValue Op1 = N->getOperand(Num: 1); |
15460 | ARMCC::CondCodes Cond = (ARMCC::CondCodes)N->getConstantOperandVal(Num: 2); |
15461 | SDLoc dl(N); |
15462 | |
15463 | // vcmp X, 0, cc -> vcmpz X, cc |
15464 | if (isZeroVector(N: Op1)) |
15465 | return DAG.getNode(Opcode: ARMISD::VCMPZ, DL: dl, VT, N1: Op0, N2: N->getOperand(Num: 2)); |
15466 | |
15467 | unsigned SwappedCond = getSwappedCondition(CC: Cond); |
15468 | if (isValidMVECond(CC: SwappedCond, IsFloat: VT.isFloatingPoint())) { |
15469 | // vcmp 0, X, cc -> vcmpz X, reversed(cc) |
15470 | if (isZeroVector(N: Op0)) |
15471 | return DAG.getNode(Opcode: ARMISD::VCMPZ, DL: dl, VT, N1: Op1, |
15472 | N2: DAG.getConstant(Val: SwappedCond, DL: dl, VT: MVT::i32)); |
15473 | // vcmp vdup(Y), X, cc -> vcmp X, vdup(Y), reversed(cc) |
15474 | if (Op0->getOpcode() == ARMISD::VDUP && Op1->getOpcode() != ARMISD::VDUP) |
15475 | return DAG.getNode(Opcode: ARMISD::VCMP, DL: dl, VT, N1: Op1, N2: Op0, |
15476 | N3: DAG.getConstant(Val: SwappedCond, DL: dl, VT: MVT::i32)); |
15477 | } |
15478 | |
15479 | return SDValue(); |
15480 | } |
15481 | |
15482 | /// PerformInsertEltCombine - Target-specific dag combine xforms for |
15483 | /// ISD::INSERT_VECTOR_ELT. |
15484 | static SDValue PerformInsertEltCombine(SDNode *N, |
15485 | TargetLowering::DAGCombinerInfo &DCI) { |
15486 | // Bitcast an i64 load inserted into a vector to f64. |
15487 | // Otherwise, the i64 value will be legalized to a pair of i32 values. |
15488 | EVT VT = N->getValueType(ResNo: 0); |
15489 | SDNode *Elt = N->getOperand(Num: 1).getNode(); |
15490 | if (VT.getVectorElementType() != MVT::i64 || |
15491 | !ISD::isNormalLoad(N: Elt) || cast<LoadSDNode>(Val: Elt)->isVolatile()) |
15492 | return SDValue(); |
15493 | |
15494 | SelectionDAG &DAG = DCI.DAG; |
15495 | SDLoc dl(N); |
15496 | EVT FloatVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: MVT::f64, |
15497 | NumElements: VT.getVectorNumElements()); |
15498 | SDValue Vec = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: FloatVT, Operand: N->getOperand(Num: 0)); |
15499 | SDValue V = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::f64, Operand: N->getOperand(Num: 1)); |
15500 | // Make the DAGCombiner fold the bitcasts. |
15501 | DCI.AddToWorklist(N: Vec.getNode()); |
15502 | DCI.AddToWorklist(N: V.getNode()); |
15503 | SDValue InsElt = DAG.getNode(Opcode: ISD::INSERT_VECTOR_ELT, DL: dl, VT: FloatVT, |
15504 | N1: Vec, N2: V, N3: N->getOperand(Num: 2)); |
15505 | return DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: InsElt); |
15506 | } |
15507 | |
15508 | // Convert a pair of extracts from the same base vector to a VMOVRRD. Either |
15509 | // directly or bitcast to an integer if the original is a float vector. |
15510 | // extract(x, n); extract(x, n+1) -> VMOVRRD(extract v2f64 x, n/2) |
15511 | // bitcast(extract(x, n)); bitcast(extract(x, n+1)) -> VMOVRRD(extract x, n/2) |
15512 | static SDValue |
15513 | (SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { |
15514 | EVT VT = N->getValueType(ResNo: 0); |
15515 | SDLoc dl(N); |
15516 | |
15517 | if (!DCI.isAfterLegalizeDAG() || VT != MVT::i32 || |
15518 | !DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT: MVT::f64)) |
15519 | return SDValue(); |
15520 | |
15521 | SDValue Ext = SDValue(N, 0); |
15522 | if (Ext.getOpcode() == ISD::BITCAST && |
15523 | Ext.getOperand(i: 0).getValueType() == MVT::f32) |
15524 | Ext = Ext.getOperand(i: 0); |
15525 | if (Ext.getOpcode() != ISD::EXTRACT_VECTOR_ELT || |
15526 | !isa<ConstantSDNode>(Val: Ext.getOperand(i: 1)) || |
15527 | Ext.getConstantOperandVal(i: 1) % 2 != 0) |
15528 | return SDValue(); |
15529 | if (Ext->use_size() == 1 && |
15530 | (Ext->use_begin()->getOpcode() == ISD::SINT_TO_FP || |
15531 | Ext->use_begin()->getOpcode() == ISD::UINT_TO_FP)) |
15532 | return SDValue(); |
15533 | |
15534 | SDValue Op0 = Ext.getOperand(i: 0); |
15535 | EVT VecVT = Op0.getValueType(); |
15536 | unsigned ResNo = Op0.getResNo(); |
15537 | unsigned Lane = Ext.getConstantOperandVal(i: 1); |
15538 | if (VecVT.getVectorNumElements() != 4) |
15539 | return SDValue(); |
15540 | |
15541 | // Find another extract, of Lane + 1 |
15542 | auto OtherIt = find_if(Range: Op0->uses(), P: [&](SDNode *V) { |
15543 | return V->getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
15544 | isa<ConstantSDNode>(Val: V->getOperand(Num: 1)) && |
15545 | V->getConstantOperandVal(Num: 1) == Lane + 1 && |
15546 | V->getOperand(Num: 0).getResNo() == ResNo; |
15547 | }); |
15548 | if (OtherIt == Op0->uses().end()) |
15549 | return SDValue(); |
15550 | |
15551 | // For float extracts, we need to be converting to a i32 for both vector |
15552 | // lanes. |
15553 | SDValue OtherExt(*OtherIt, 0); |
15554 | if (OtherExt.getValueType() != MVT::i32) { |
15555 | if (OtherExt->use_size() != 1 || |
15556 | OtherExt->use_begin()->getOpcode() != ISD::BITCAST || |
15557 | OtherExt->use_begin()->getValueType(ResNo: 0) != MVT::i32) |
15558 | return SDValue(); |
15559 | OtherExt = SDValue(*OtherExt->use_begin(), 0); |
15560 | } |
15561 | |
15562 | // Convert the type to a f64 and extract with a VMOVRRD. |
15563 | SDValue F64 = DCI.DAG.getNode( |
15564 | Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::f64, |
15565 | N1: DCI.DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: dl, VT: MVT::v2f64, Operand: Op0), |
15566 | N2: DCI.DAG.getConstant(Val: Ext.getConstantOperandVal(i: 1) / 2, DL: dl, VT: MVT::i32)); |
15567 | SDValue VMOVRRD = |
15568 | DCI.DAG.getNode(Opcode: ARMISD::VMOVRRD, DL: dl, ResultTys: {MVT::i32, MVT::i32}, Ops: F64); |
15569 | |
15570 | DCI.CombineTo(N: OtherExt.getNode(), Res: SDValue(VMOVRRD.getNode(), 1)); |
15571 | return VMOVRRD; |
15572 | } |
15573 | |
15574 | static SDValue (SDNode *N, |
15575 | TargetLowering::DAGCombinerInfo &DCI, |
15576 | const ARMSubtarget *ST) { |
15577 | SDValue Op0 = N->getOperand(Num: 0); |
15578 | EVT VT = N->getValueType(ResNo: 0); |
15579 | SDLoc dl(N); |
15580 | |
15581 | // extract (vdup x) -> x |
15582 | if (Op0->getOpcode() == ARMISD::VDUP) { |
15583 | SDValue X = Op0->getOperand(Num: 0); |
15584 | if (VT == MVT::f16 && X.getValueType() == MVT::i32) |
15585 | return DCI.DAG.getNode(Opcode: ARMISD::VMOVhr, DL: dl, VT, Operand: X); |
15586 | if (VT == MVT::i32 && X.getValueType() == MVT::f16) |
15587 | return DCI.DAG.getNode(Opcode: ARMISD::VMOVrh, DL: dl, VT, Operand: X); |
15588 | if (VT == MVT::f32 && X.getValueType() == MVT::i32) |
15589 | return DCI.DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT, Operand: X); |
15590 | |
15591 | while (X.getValueType() != VT && X->getOpcode() == ISD::BITCAST) |
15592 | X = X->getOperand(Num: 0); |
15593 | if (X.getValueType() == VT) |
15594 | return X; |
15595 | } |
15596 | |
15597 | // extract ARM_BUILD_VECTOR -> x |
15598 | if (Op0->getOpcode() == ARMISD::BUILD_VECTOR && |
15599 | isa<ConstantSDNode>(Val: N->getOperand(Num: 1)) && |
15600 | N->getConstantOperandVal(Num: 1) < Op0.getNumOperands()) { |
15601 | return Op0.getOperand(i: N->getConstantOperandVal(Num: 1)); |
15602 | } |
15603 | |
15604 | // extract(bitcast(BUILD_VECTOR(VMOVDRR(a, b), ..))) -> a or b |
15605 | if (Op0.getValueType() == MVT::v4i32 && |
15606 | isa<ConstantSDNode>(Val: N->getOperand(Num: 1)) && |
15607 | Op0.getOpcode() == ISD::BITCAST && |
15608 | Op0.getOperand(i: 0).getOpcode() == ISD::BUILD_VECTOR && |
15609 | Op0.getOperand(i: 0).getValueType() == MVT::v2f64) { |
15610 | SDValue BV = Op0.getOperand(i: 0); |
15611 | unsigned Offset = N->getConstantOperandVal(Num: 1); |
15612 | SDValue MOV = BV.getOperand(i: Offset < 2 ? 0 : 1); |
15613 | if (MOV.getOpcode() == ARMISD::VMOVDRR) |
15614 | return MOV.getOperand(i: ST->isLittle() ? Offset % 2 : 1 - Offset % 2); |
15615 | } |
15616 | |
15617 | // extract x, n; extract x, n+1 -> VMOVRRD x |
15618 | if (SDValue R = PerformExtractEltToVMOVRRD(N, DCI)) |
15619 | return R; |
15620 | |
15621 | // extract (MVETrunc(x)) -> extract x |
15622 | if (Op0->getOpcode() == ARMISD::MVETRUNC) { |
15623 | unsigned Idx = N->getConstantOperandVal(Num: 1); |
15624 | unsigned Vec = |
15625 | Idx / Op0->getOperand(Num: 0).getValueType().getVectorNumElements(); |
15626 | unsigned SubIdx = |
15627 | Idx % Op0->getOperand(Num: 0).getValueType().getVectorNumElements(); |
15628 | return DCI.DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT, N1: Op0.getOperand(i: Vec), |
15629 | N2: DCI.DAG.getConstant(Val: SubIdx, DL: dl, VT: MVT::i32)); |
15630 | } |
15631 | |
15632 | return SDValue(); |
15633 | } |
15634 | |
15635 | static SDValue PerformSignExtendInregCombine(SDNode *N, SelectionDAG &DAG) { |
15636 | SDValue Op = N->getOperand(Num: 0); |
15637 | EVT VT = N->getValueType(ResNo: 0); |
15638 | |
15639 | // sext_inreg(VGETLANEu) -> VGETLANEs |
15640 | if (Op.getOpcode() == ARMISD::VGETLANEu && |
15641 | cast<VTSDNode>(Val: N->getOperand(Num: 1))->getVT() == |
15642 | Op.getOperand(i: 0).getValueType().getScalarType()) |
15643 | return DAG.getNode(Opcode: ARMISD::VGETLANEs, DL: SDLoc(N), VT, N1: Op.getOperand(i: 0), |
15644 | N2: Op.getOperand(i: 1)); |
15645 | |
15646 | return SDValue(); |
15647 | } |
15648 | |
15649 | static SDValue |
15650 | PerformInsertSubvectorCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { |
15651 | SDValue Vec = N->getOperand(Num: 0); |
15652 | SDValue SubVec = N->getOperand(Num: 1); |
15653 | uint64_t IdxVal = N->getConstantOperandVal(Num: 2); |
15654 | EVT VecVT = Vec.getValueType(); |
15655 | EVT SubVT = SubVec.getValueType(); |
15656 | |
15657 | // Only do this for legal fixed vector types. |
15658 | if (!VecVT.isFixedLengthVector() || |
15659 | !DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT: VecVT) || |
15660 | !DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT: SubVT)) |
15661 | return SDValue(); |
15662 | |
15663 | // Ignore widening patterns. |
15664 | if (IdxVal == 0 && Vec.isUndef()) |
15665 | return SDValue(); |
15666 | |
15667 | // Subvector must be half the width and an "aligned" insertion. |
15668 | unsigned NumSubElts = SubVT.getVectorNumElements(); |
15669 | if ((SubVT.getSizeInBits() * 2) != VecVT.getSizeInBits() || |
15670 | (IdxVal != 0 && IdxVal != NumSubElts)) |
15671 | return SDValue(); |
15672 | |
15673 | // Fold insert_subvector -> concat_vectors |
15674 | // insert_subvector(Vec,Sub,lo) -> concat_vectors(Sub,extract(Vec,hi)) |
15675 | // insert_subvector(Vec,Sub,hi) -> concat_vectors(extract(Vec,lo),Sub) |
15676 | SDLoc DL(N); |
15677 | SDValue Lo, Hi; |
15678 | if (IdxVal == 0) { |
15679 | Lo = SubVec; |
15680 | Hi = DCI.DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL, VT: SubVT, N1: Vec, |
15681 | N2: DCI.DAG.getVectorIdxConstant(Val: NumSubElts, DL)); |
15682 | } else { |
15683 | Lo = DCI.DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL, VT: SubVT, N1: Vec, |
15684 | N2: DCI.DAG.getVectorIdxConstant(Val: 0, DL)); |
15685 | Hi = SubVec; |
15686 | } |
15687 | return DCI.DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: VecVT, N1: Lo, N2: Hi); |
15688 | } |
15689 | |
15690 | // shuffle(MVETrunc(x, y)) -> VMOVN(x, y) |
15691 | static SDValue PerformShuffleVMOVNCombine(ShuffleVectorSDNode *N, |
15692 | SelectionDAG &DAG) { |
15693 | SDValue Trunc = N->getOperand(Num: 0); |
15694 | EVT VT = Trunc.getValueType(); |
15695 | if (Trunc.getOpcode() != ARMISD::MVETRUNC || !N->getOperand(Num: 1).isUndef()) |
15696 | return SDValue(); |
15697 | |
15698 | SDLoc DL(Trunc); |
15699 | if (isVMOVNTruncMask(M: N->getMask(), ToVT: VT, rev: false)) |
15700 | return DAG.getNode( |
15701 | Opcode: ARMISD::VMOVN, DL, VT, |
15702 | N1: DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: Trunc.getOperand(i: 0)), |
15703 | N2: DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: Trunc.getOperand(i: 1)), |
15704 | N3: DAG.getConstant(Val: 1, DL, VT: MVT::i32)); |
15705 | else if (isVMOVNTruncMask(M: N->getMask(), ToVT: VT, rev: true)) |
15706 | return DAG.getNode( |
15707 | Opcode: ARMISD::VMOVN, DL, VT, |
15708 | N1: DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: Trunc.getOperand(i: 1)), |
15709 | N2: DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: Trunc.getOperand(i: 0)), |
15710 | N3: DAG.getConstant(Val: 1, DL, VT: MVT::i32)); |
15711 | return SDValue(); |
15712 | } |
15713 | |
15714 | /// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for |
15715 | /// ISD::VECTOR_SHUFFLE. |
15716 | static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) { |
15717 | if (SDValue R = PerformShuffleVMOVNCombine(N: cast<ShuffleVectorSDNode>(Val: N), DAG)) |
15718 | return R; |
15719 | |
15720 | // The LLVM shufflevector instruction does not require the shuffle mask |
15721 | // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does |
15722 | // have that requirement. When translating to ISD::VECTOR_SHUFFLE, if the |
15723 | // operands do not match the mask length, they are extended by concatenating |
15724 | // them with undef vectors. That is probably the right thing for other |
15725 | // targets, but for NEON it is better to concatenate two double-register |
15726 | // size vector operands into a single quad-register size vector. Do that |
15727 | // transformation here: |
15728 | // shuffle(concat(v1, undef), concat(v2, undef)) -> |
15729 | // shuffle(concat(v1, v2), undef) |
15730 | SDValue Op0 = N->getOperand(Num: 0); |
15731 | SDValue Op1 = N->getOperand(Num: 1); |
15732 | if (Op0.getOpcode() != ISD::CONCAT_VECTORS || |
15733 | Op1.getOpcode() != ISD::CONCAT_VECTORS || |
15734 | Op0.getNumOperands() != 2 || |
15735 | Op1.getNumOperands() != 2) |
15736 | return SDValue(); |
15737 | SDValue Concat0Op1 = Op0.getOperand(i: 1); |
15738 | SDValue Concat1Op1 = Op1.getOperand(i: 1); |
15739 | if (!Concat0Op1.isUndef() || !Concat1Op1.isUndef()) |
15740 | return SDValue(); |
15741 | // Skip the transformation if any of the types are illegal. |
15742 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
15743 | EVT VT = N->getValueType(ResNo: 0); |
15744 | if (!TLI.isTypeLegal(VT) || |
15745 | !TLI.isTypeLegal(VT: Concat0Op1.getValueType()) || |
15746 | !TLI.isTypeLegal(VT: Concat1Op1.getValueType())) |
15747 | return SDValue(); |
15748 | |
15749 | SDValue NewConcat = DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL: SDLoc(N), VT, |
15750 | N1: Op0.getOperand(i: 0), N2: Op1.getOperand(i: 0)); |
15751 | // Translate the shuffle mask. |
15752 | SmallVector<int, 16> NewMask; |
15753 | unsigned NumElts = VT.getVectorNumElements(); |
15754 | unsigned HalfElts = NumElts/2; |
15755 | ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Val: N); |
15756 | for (unsigned n = 0; n < NumElts; ++n) { |
15757 | int MaskElt = SVN->getMaskElt(Idx: n); |
15758 | int NewElt = -1; |
15759 | if (MaskElt < (int)HalfElts) |
15760 | NewElt = MaskElt; |
15761 | else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts)) |
15762 | NewElt = HalfElts + MaskElt - NumElts; |
15763 | NewMask.push_back(Elt: NewElt); |
15764 | } |
15765 | return DAG.getVectorShuffle(VT, dl: SDLoc(N), N1: NewConcat, |
15766 | N2: DAG.getUNDEF(VT), Mask: NewMask); |
15767 | } |
15768 | |
15769 | /// Load/store instruction that can be merged with a base address |
15770 | /// update |
15771 | struct BaseUpdateTarget { |
15772 | SDNode *N; |
15773 | bool isIntrinsic; |
15774 | bool isStore; |
15775 | unsigned AddrOpIdx; |
15776 | }; |
15777 | |
15778 | struct BaseUpdateUser { |
15779 | /// Instruction that updates a pointer |
15780 | SDNode *N; |
15781 | /// Pointer increment operand |
15782 | SDValue Inc; |
15783 | /// Pointer increment value if it is a constant, or 0 otherwise |
15784 | unsigned ConstInc; |
15785 | }; |
15786 | |
15787 | static bool TryCombineBaseUpdate(struct BaseUpdateTarget &Target, |
15788 | struct BaseUpdateUser &User, |
15789 | bool SimpleConstIncOnly, |
15790 | TargetLowering::DAGCombinerInfo &DCI) { |
15791 | SelectionDAG &DAG = DCI.DAG; |
15792 | SDNode *N = Target.N; |
15793 | MemSDNode *MemN = cast<MemSDNode>(Val: N); |
15794 | SDLoc dl(N); |
15795 | |
15796 | // Find the new opcode for the updating load/store. |
15797 | bool isLoadOp = true; |
15798 | bool isLaneOp = false; |
15799 | // Workaround for vst1x and vld1x intrinsics which do not have alignment |
15800 | // as an operand. |
15801 | bool hasAlignment = true; |
15802 | unsigned NewOpc = 0; |
15803 | unsigned NumVecs = 0; |
15804 | if (Target.isIntrinsic) { |
15805 | unsigned IntNo = N->getConstantOperandVal(Num: 1); |
15806 | switch (IntNo) { |
15807 | default: |
15808 | llvm_unreachable("unexpected intrinsic for Neon base update" ); |
15809 | case Intrinsic::arm_neon_vld1: |
15810 | NewOpc = ARMISD::VLD1_UPD; |
15811 | NumVecs = 1; |
15812 | break; |
15813 | case Intrinsic::arm_neon_vld2: |
15814 | NewOpc = ARMISD::VLD2_UPD; |
15815 | NumVecs = 2; |
15816 | break; |
15817 | case Intrinsic::arm_neon_vld3: |
15818 | NewOpc = ARMISD::VLD3_UPD; |
15819 | NumVecs = 3; |
15820 | break; |
15821 | case Intrinsic::arm_neon_vld4: |
15822 | NewOpc = ARMISD::VLD4_UPD; |
15823 | NumVecs = 4; |
15824 | break; |
15825 | case Intrinsic::arm_neon_vld1x2: |
15826 | NewOpc = ARMISD::VLD1x2_UPD; |
15827 | NumVecs = 2; |
15828 | hasAlignment = false; |
15829 | break; |
15830 | case Intrinsic::arm_neon_vld1x3: |
15831 | NewOpc = ARMISD::VLD1x3_UPD; |
15832 | NumVecs = 3; |
15833 | hasAlignment = false; |
15834 | break; |
15835 | case Intrinsic::arm_neon_vld1x4: |
15836 | NewOpc = ARMISD::VLD1x4_UPD; |
15837 | NumVecs = 4; |
15838 | hasAlignment = false; |
15839 | break; |
15840 | case Intrinsic::arm_neon_vld2dup: |
15841 | NewOpc = ARMISD::VLD2DUP_UPD; |
15842 | NumVecs = 2; |
15843 | break; |
15844 | case Intrinsic::arm_neon_vld3dup: |
15845 | NewOpc = ARMISD::VLD3DUP_UPD; |
15846 | NumVecs = 3; |
15847 | break; |
15848 | case Intrinsic::arm_neon_vld4dup: |
15849 | NewOpc = ARMISD::VLD4DUP_UPD; |
15850 | NumVecs = 4; |
15851 | break; |
15852 | case Intrinsic::arm_neon_vld2lane: |
15853 | NewOpc = ARMISD::VLD2LN_UPD; |
15854 | NumVecs = 2; |
15855 | isLaneOp = true; |
15856 | break; |
15857 | case Intrinsic::arm_neon_vld3lane: |
15858 | NewOpc = ARMISD::VLD3LN_UPD; |
15859 | NumVecs = 3; |
15860 | isLaneOp = true; |
15861 | break; |
15862 | case Intrinsic::arm_neon_vld4lane: |
15863 | NewOpc = ARMISD::VLD4LN_UPD; |
15864 | NumVecs = 4; |
15865 | isLaneOp = true; |
15866 | break; |
15867 | case Intrinsic::arm_neon_vst1: |
15868 | NewOpc = ARMISD::VST1_UPD; |
15869 | NumVecs = 1; |
15870 | isLoadOp = false; |
15871 | break; |
15872 | case Intrinsic::arm_neon_vst2: |
15873 | NewOpc = ARMISD::VST2_UPD; |
15874 | NumVecs = 2; |
15875 | isLoadOp = false; |
15876 | break; |
15877 | case Intrinsic::arm_neon_vst3: |
15878 | NewOpc = ARMISD::VST3_UPD; |
15879 | NumVecs = 3; |
15880 | isLoadOp = false; |
15881 | break; |
15882 | case Intrinsic::arm_neon_vst4: |
15883 | NewOpc = ARMISD::VST4_UPD; |
15884 | NumVecs = 4; |
15885 | isLoadOp = false; |
15886 | break; |
15887 | case Intrinsic::arm_neon_vst2lane: |
15888 | NewOpc = ARMISD::VST2LN_UPD; |
15889 | NumVecs = 2; |
15890 | isLoadOp = false; |
15891 | isLaneOp = true; |
15892 | break; |
15893 | case Intrinsic::arm_neon_vst3lane: |
15894 | NewOpc = ARMISD::VST3LN_UPD; |
15895 | NumVecs = 3; |
15896 | isLoadOp = false; |
15897 | isLaneOp = true; |
15898 | break; |
15899 | case Intrinsic::arm_neon_vst4lane: |
15900 | NewOpc = ARMISD::VST4LN_UPD; |
15901 | NumVecs = 4; |
15902 | isLoadOp = false; |
15903 | isLaneOp = true; |
15904 | break; |
15905 | case Intrinsic::arm_neon_vst1x2: |
15906 | NewOpc = ARMISD::VST1x2_UPD; |
15907 | NumVecs = 2; |
15908 | isLoadOp = false; |
15909 | hasAlignment = false; |
15910 | break; |
15911 | case Intrinsic::arm_neon_vst1x3: |
15912 | NewOpc = ARMISD::VST1x3_UPD; |
15913 | NumVecs = 3; |
15914 | isLoadOp = false; |
15915 | hasAlignment = false; |
15916 | break; |
15917 | case Intrinsic::arm_neon_vst1x4: |
15918 | NewOpc = ARMISD::VST1x4_UPD; |
15919 | NumVecs = 4; |
15920 | isLoadOp = false; |
15921 | hasAlignment = false; |
15922 | break; |
15923 | } |
15924 | } else { |
15925 | isLaneOp = true; |
15926 | switch (N->getOpcode()) { |
15927 | default: |
15928 | llvm_unreachable("unexpected opcode for Neon base update" ); |
15929 | case ARMISD::VLD1DUP: |
15930 | NewOpc = ARMISD::VLD1DUP_UPD; |
15931 | NumVecs = 1; |
15932 | break; |
15933 | case ARMISD::VLD2DUP: |
15934 | NewOpc = ARMISD::VLD2DUP_UPD; |
15935 | NumVecs = 2; |
15936 | break; |
15937 | case ARMISD::VLD3DUP: |
15938 | NewOpc = ARMISD::VLD3DUP_UPD; |
15939 | NumVecs = 3; |
15940 | break; |
15941 | case ARMISD::VLD4DUP: |
15942 | NewOpc = ARMISD::VLD4DUP_UPD; |
15943 | NumVecs = 4; |
15944 | break; |
15945 | case ISD::LOAD: |
15946 | NewOpc = ARMISD::VLD1_UPD; |
15947 | NumVecs = 1; |
15948 | isLaneOp = false; |
15949 | break; |
15950 | case ISD::STORE: |
15951 | NewOpc = ARMISD::VST1_UPD; |
15952 | NumVecs = 1; |
15953 | isLaneOp = false; |
15954 | isLoadOp = false; |
15955 | break; |
15956 | } |
15957 | } |
15958 | |
15959 | // Find the size of memory referenced by the load/store. |
15960 | EVT VecTy; |
15961 | if (isLoadOp) { |
15962 | VecTy = N->getValueType(ResNo: 0); |
15963 | } else if (Target.isIntrinsic) { |
15964 | VecTy = N->getOperand(Num: Target.AddrOpIdx + 1).getValueType(); |
15965 | } else { |
15966 | assert(Target.isStore && |
15967 | "Node has to be a load, a store, or an intrinsic!" ); |
15968 | VecTy = N->getOperand(Num: 1).getValueType(); |
15969 | } |
15970 | |
15971 | bool isVLDDUPOp = |
15972 | NewOpc == ARMISD::VLD1DUP_UPD || NewOpc == ARMISD::VLD2DUP_UPD || |
15973 | NewOpc == ARMISD::VLD3DUP_UPD || NewOpc == ARMISD::VLD4DUP_UPD; |
15974 | |
15975 | unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; |
15976 | if (isLaneOp || isVLDDUPOp) |
15977 | NumBytes /= VecTy.getVectorNumElements(); |
15978 | |
15979 | if (NumBytes >= 3 * 16 && User.ConstInc != NumBytes) { |
15980 | // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two |
15981 | // separate instructions that make it harder to use a non-constant update. |
15982 | return false; |
15983 | } |
15984 | |
15985 | if (SimpleConstIncOnly && User.ConstInc != NumBytes) |
15986 | return false; |
15987 | |
15988 | // OK, we found an ADD we can fold into the base update. |
15989 | // Now, create a _UPD node, taking care of not breaking alignment. |
15990 | |
15991 | EVT AlignedVecTy = VecTy; |
15992 | Align Alignment = MemN->getAlign(); |
15993 | |
15994 | // If this is a less-than-standard-aligned load/store, change the type to |
15995 | // match the standard alignment. |
15996 | // The alignment is overlooked when selecting _UPD variants; and it's |
15997 | // easier to introduce bitcasts here than fix that. |
15998 | // There are 3 ways to get to this base-update combine: |
15999 | // - intrinsics: they are assumed to be properly aligned (to the standard |
16000 | // alignment of the memory type), so we don't need to do anything. |
16001 | // - ARMISD::VLDx nodes: they are only generated from the aforementioned |
16002 | // intrinsics, so, likewise, there's nothing to do. |
16003 | // - generic load/store instructions: the alignment is specified as an |
16004 | // explicit operand, rather than implicitly as the standard alignment |
16005 | // of the memory type (like the intrisics). We need to change the |
16006 | // memory type to match the explicit alignment. That way, we don't |
16007 | // generate non-standard-aligned ARMISD::VLDx nodes. |
16008 | if (isa<LSBaseSDNode>(Val: N)) { |
16009 | if (Alignment.value() < VecTy.getScalarSizeInBits() / 8) { |
16010 | MVT EltTy = MVT::getIntegerVT(BitWidth: Alignment.value() * 8); |
16011 | assert(NumVecs == 1 && "Unexpected multi-element generic load/store." ); |
16012 | assert(!isLaneOp && "Unexpected generic load/store lane." ); |
16013 | unsigned NumElts = NumBytes / (EltTy.getSizeInBits() / 8); |
16014 | AlignedVecTy = MVT::getVectorVT(VT: EltTy, NumElements: NumElts); |
16015 | } |
16016 | // Don't set an explicit alignment on regular load/stores that we want |
16017 | // to transform to VLD/VST 1_UPD nodes. |
16018 | // This matches the behavior of regular load/stores, which only get an |
16019 | // explicit alignment if the MMO alignment is larger than the standard |
16020 | // alignment of the memory type. |
16021 | // Intrinsics, however, always get an explicit alignment, set to the |
16022 | // alignment of the MMO. |
16023 | Alignment = Align(1); |
16024 | } |
16025 | |
16026 | // Create the new updating load/store node. |
16027 | // First, create an SDVTList for the new updating node's results. |
16028 | EVT Tys[6]; |
16029 | unsigned NumResultVecs = (isLoadOp ? NumVecs : 0); |
16030 | unsigned n; |
16031 | for (n = 0; n < NumResultVecs; ++n) |
16032 | Tys[n] = AlignedVecTy; |
16033 | Tys[n++] = MVT::i32; |
16034 | Tys[n] = MVT::Other; |
16035 | SDVTList SDTys = DAG.getVTList(VTs: ArrayRef(Tys, NumResultVecs + 2)); |
16036 | |
16037 | // Then, gather the new node's operands. |
16038 | SmallVector<SDValue, 8> Ops; |
16039 | Ops.push_back(Elt: N->getOperand(Num: 0)); // incoming chain |
16040 | Ops.push_back(Elt: N->getOperand(Num: Target.AddrOpIdx)); |
16041 | Ops.push_back(Elt: User.Inc); |
16042 | |
16043 | if (StoreSDNode *StN = dyn_cast<StoreSDNode>(Val: N)) { |
16044 | // Try to match the intrinsic's signature |
16045 | Ops.push_back(Elt: StN->getValue()); |
16046 | } else { |
16047 | // Loads (and of course intrinsics) match the intrinsics' signature, |
16048 | // so just add all but the alignment operand. |
16049 | unsigned LastOperand = |
16050 | hasAlignment ? N->getNumOperands() - 1 : N->getNumOperands(); |
16051 | for (unsigned i = Target.AddrOpIdx + 1; i < LastOperand; ++i) |
16052 | Ops.push_back(Elt: N->getOperand(Num: i)); |
16053 | } |
16054 | |
16055 | // For all node types, the alignment operand is always the last one. |
16056 | Ops.push_back(Elt: DAG.getConstant(Val: Alignment.value(), DL: dl, VT: MVT::i32)); |
16057 | |
16058 | // If this is a non-standard-aligned STORE, the penultimate operand is the |
16059 | // stored value. Bitcast it to the aligned type. |
16060 | if (AlignedVecTy != VecTy && N->getOpcode() == ISD::STORE) { |
16061 | SDValue &StVal = Ops[Ops.size() - 2]; |
16062 | StVal = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: AlignedVecTy, Operand: StVal); |
16063 | } |
16064 | |
16065 | EVT LoadVT = isLaneOp ? VecTy.getVectorElementType() : AlignedVecTy; |
16066 | SDValue UpdN = DAG.getMemIntrinsicNode(Opcode: NewOpc, dl, VTList: SDTys, Ops, MemVT: LoadVT, |
16067 | MMO: MemN->getMemOperand()); |
16068 | |
16069 | // Update the uses. |
16070 | SmallVector<SDValue, 5> NewResults; |
16071 | for (unsigned i = 0; i < NumResultVecs; ++i) |
16072 | NewResults.push_back(Elt: SDValue(UpdN.getNode(), i)); |
16073 | |
16074 | // If this is an non-standard-aligned LOAD, the first result is the loaded |
16075 | // value. Bitcast it to the expected result type. |
16076 | if (AlignedVecTy != VecTy && N->getOpcode() == ISD::LOAD) { |
16077 | SDValue &LdVal = NewResults[0]; |
16078 | LdVal = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: VecTy, Operand: LdVal); |
16079 | } |
16080 | |
16081 | NewResults.push_back(Elt: SDValue(UpdN.getNode(), NumResultVecs + 1)); // chain |
16082 | DCI.CombineTo(N, To: NewResults); |
16083 | DCI.CombineTo(N: User.N, Res: SDValue(UpdN.getNode(), NumResultVecs)); |
16084 | |
16085 | return true; |
16086 | } |
16087 | |
16088 | // If (opcode ptr inc) is and ADD-like instruction, return the |
16089 | // increment value. Otherwise return 0. |
16090 | static unsigned getPointerConstIncrement(unsigned Opcode, SDValue Ptr, |
16091 | SDValue Inc, const SelectionDAG &DAG) { |
16092 | ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Val: Inc.getNode()); |
16093 | if (!CInc) |
16094 | return 0; |
16095 | |
16096 | switch (Opcode) { |
16097 | case ARMISD::VLD1_UPD: |
16098 | case ISD::ADD: |
16099 | return CInc->getZExtValue(); |
16100 | case ISD::OR: { |
16101 | if (DAG.haveNoCommonBitsSet(A: Ptr, B: Inc)) { |
16102 | // (OR ptr inc) is the same as (ADD ptr inc) |
16103 | return CInc->getZExtValue(); |
16104 | } |
16105 | return 0; |
16106 | } |
16107 | default: |
16108 | return 0; |
16109 | } |
16110 | } |
16111 | |
16112 | static bool findPointerConstIncrement(SDNode *N, SDValue *Ptr, SDValue *CInc) { |
16113 | switch (N->getOpcode()) { |
16114 | case ISD::ADD: |
16115 | case ISD::OR: { |
16116 | if (isa<ConstantSDNode>(Val: N->getOperand(Num: 1))) { |
16117 | *Ptr = N->getOperand(Num: 0); |
16118 | *CInc = N->getOperand(Num: 1); |
16119 | return true; |
16120 | } |
16121 | return false; |
16122 | } |
16123 | case ARMISD::VLD1_UPD: { |
16124 | if (isa<ConstantSDNode>(Val: N->getOperand(Num: 2))) { |
16125 | *Ptr = N->getOperand(Num: 1); |
16126 | *CInc = N->getOperand(Num: 2); |
16127 | return true; |
16128 | } |
16129 | return false; |
16130 | } |
16131 | default: |
16132 | return false; |
16133 | } |
16134 | } |
16135 | |
16136 | static bool isValidBaseUpdate(SDNode *N, SDNode *User) { |
16137 | // Check that the add is independent of the load/store. |
16138 | // Otherwise, folding it would create a cycle. Search through Addr |
16139 | // as well, since the User may not be a direct user of Addr and |
16140 | // only share a base pointer. |
16141 | SmallPtrSet<const SDNode *, 32> Visited; |
16142 | SmallVector<const SDNode *, 16> Worklist; |
16143 | Worklist.push_back(Elt: N); |
16144 | Worklist.push_back(Elt: User); |
16145 | if (SDNode::hasPredecessorHelper(N, Visited, Worklist) || |
16146 | SDNode::hasPredecessorHelper(N: User, Visited, Worklist)) |
16147 | return false; |
16148 | return true; |
16149 | } |
16150 | |
16151 | /// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP, |
16152 | /// NEON load/store intrinsics, and generic vector load/stores, to merge |
16153 | /// base address updates. |
16154 | /// For generic load/stores, the memory type is assumed to be a vector. |
16155 | /// The caller is assumed to have checked legality. |
16156 | static SDValue CombineBaseUpdate(SDNode *N, |
16157 | TargetLowering::DAGCombinerInfo &DCI) { |
16158 | const bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID || |
16159 | N->getOpcode() == ISD::INTRINSIC_W_CHAIN); |
16160 | const bool isStore = N->getOpcode() == ISD::STORE; |
16161 | const unsigned AddrOpIdx = ((isIntrinsic || isStore) ? 2 : 1); |
16162 | BaseUpdateTarget Target = {.N: N, .isIntrinsic: isIntrinsic, .isStore: isStore, .AddrOpIdx: AddrOpIdx}; |
16163 | |
16164 | SDValue Addr = N->getOperand(Num: AddrOpIdx); |
16165 | |
16166 | SmallVector<BaseUpdateUser, 8> BaseUpdates; |
16167 | |
16168 | // Search for a use of the address operand that is an increment. |
16169 | for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), |
16170 | UE = Addr.getNode()->use_end(); UI != UE; ++UI) { |
16171 | SDNode *User = *UI; |
16172 | if (UI.getUse().getResNo() != Addr.getResNo() || |
16173 | User->getNumOperands() != 2) |
16174 | continue; |
16175 | |
16176 | SDValue Inc = User->getOperand(Num: UI.getOperandNo() == 1 ? 0 : 1); |
16177 | unsigned ConstInc = |
16178 | getPointerConstIncrement(Opcode: User->getOpcode(), Ptr: Addr, Inc, DAG: DCI.DAG); |
16179 | |
16180 | if (ConstInc || User->getOpcode() == ISD::ADD) |
16181 | BaseUpdates.push_back(Elt: {.N: User, .Inc: Inc, .ConstInc: ConstInc}); |
16182 | } |
16183 | |
16184 | // If the address is a constant pointer increment itself, find |
16185 | // another constant increment that has the same base operand |
16186 | SDValue Base; |
16187 | SDValue CInc; |
16188 | if (findPointerConstIncrement(N: Addr.getNode(), Ptr: &Base, CInc: &CInc)) { |
16189 | unsigned Offset = |
16190 | getPointerConstIncrement(Opcode: Addr->getOpcode(), Ptr: Base, Inc: CInc, DAG: DCI.DAG); |
16191 | for (SDNode::use_iterator UI = Base->use_begin(), UE = Base->use_end(); |
16192 | UI != UE; ++UI) { |
16193 | |
16194 | SDNode *User = *UI; |
16195 | if (UI.getUse().getResNo() != Base.getResNo() || User == Addr.getNode() || |
16196 | User->getNumOperands() != 2) |
16197 | continue; |
16198 | |
16199 | SDValue UserInc = User->getOperand(Num: UI.getOperandNo() == 0 ? 1 : 0); |
16200 | unsigned UserOffset = |
16201 | getPointerConstIncrement(Opcode: User->getOpcode(), Ptr: Base, Inc: UserInc, DAG: DCI.DAG); |
16202 | |
16203 | if (!UserOffset || UserOffset <= Offset) |
16204 | continue; |
16205 | |
16206 | unsigned NewConstInc = UserOffset - Offset; |
16207 | SDValue NewInc = DCI.DAG.getConstant(Val: NewConstInc, DL: SDLoc(N), VT: MVT::i32); |
16208 | BaseUpdates.push_back(Elt: {.N: User, .Inc: NewInc, .ConstInc: NewConstInc}); |
16209 | } |
16210 | } |
16211 | |
16212 | // Try to fold the load/store with an update that matches memory |
16213 | // access size. This should work well for sequential loads. |
16214 | // |
16215 | // Filter out invalid updates as well. |
16216 | unsigned NumValidUpd = BaseUpdates.size(); |
16217 | for (unsigned I = 0; I < NumValidUpd;) { |
16218 | BaseUpdateUser &User = BaseUpdates[I]; |
16219 | if (!isValidBaseUpdate(N, User: User.N)) { |
16220 | --NumValidUpd; |
16221 | std::swap(a&: BaseUpdates[I], b&: BaseUpdates[NumValidUpd]); |
16222 | continue; |
16223 | } |
16224 | |
16225 | if (TryCombineBaseUpdate(Target, User, /*SimpleConstIncOnly=*/true, DCI)) |
16226 | return SDValue(); |
16227 | ++I; |
16228 | } |
16229 | BaseUpdates.resize(N: NumValidUpd); |
16230 | |
16231 | // Try to fold with other users. Non-constant updates are considered |
16232 | // first, and constant updates are sorted to not break a sequence of |
16233 | // strided accesses (if there is any). |
16234 | std::stable_sort(first: BaseUpdates.begin(), last: BaseUpdates.end(), |
16235 | comp: [](const BaseUpdateUser &LHS, const BaseUpdateUser &RHS) { |
16236 | return LHS.ConstInc < RHS.ConstInc; |
16237 | }); |
16238 | for (BaseUpdateUser &User : BaseUpdates) { |
16239 | if (TryCombineBaseUpdate(Target, User, /*SimpleConstIncOnly=*/false, DCI)) |
16240 | return SDValue(); |
16241 | } |
16242 | return SDValue(); |
16243 | } |
16244 | |
16245 | static SDValue PerformVLDCombine(SDNode *N, |
16246 | TargetLowering::DAGCombinerInfo &DCI) { |
16247 | if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) |
16248 | return SDValue(); |
16249 | |
16250 | return CombineBaseUpdate(N, DCI); |
16251 | } |
16252 | |
16253 | static SDValue PerformMVEVLDCombine(SDNode *N, |
16254 | TargetLowering::DAGCombinerInfo &DCI) { |
16255 | if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) |
16256 | return SDValue(); |
16257 | |
16258 | SelectionDAG &DAG = DCI.DAG; |
16259 | SDValue Addr = N->getOperand(Num: 2); |
16260 | MemSDNode *MemN = cast<MemSDNode>(Val: N); |
16261 | SDLoc dl(N); |
16262 | |
16263 | // For the stores, where there are multiple intrinsics we only actually want |
16264 | // to post-inc the last of the them. |
16265 | unsigned IntNo = N->getConstantOperandVal(Num: 1); |
16266 | if (IntNo == Intrinsic::arm_mve_vst2q && N->getConstantOperandVal(Num: 5) != 1) |
16267 | return SDValue(); |
16268 | if (IntNo == Intrinsic::arm_mve_vst4q && N->getConstantOperandVal(Num: 7) != 3) |
16269 | return SDValue(); |
16270 | |
16271 | // Search for a use of the address operand that is an increment. |
16272 | for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), |
16273 | UE = Addr.getNode()->use_end(); |
16274 | UI != UE; ++UI) { |
16275 | SDNode *User = *UI; |
16276 | if (User->getOpcode() != ISD::ADD || |
16277 | UI.getUse().getResNo() != Addr.getResNo()) |
16278 | continue; |
16279 | |
16280 | // Check that the add is independent of the load/store. Otherwise, folding |
16281 | // it would create a cycle. We can avoid searching through Addr as it's a |
16282 | // predecessor to both. |
16283 | SmallPtrSet<const SDNode *, 32> Visited; |
16284 | SmallVector<const SDNode *, 16> Worklist; |
16285 | Visited.insert(Ptr: Addr.getNode()); |
16286 | Worklist.push_back(Elt: N); |
16287 | Worklist.push_back(Elt: User); |
16288 | if (SDNode::hasPredecessorHelper(N, Visited, Worklist) || |
16289 | SDNode::hasPredecessorHelper(N: User, Visited, Worklist)) |
16290 | continue; |
16291 | |
16292 | // Find the new opcode for the updating load/store. |
16293 | bool isLoadOp = true; |
16294 | unsigned NewOpc = 0; |
16295 | unsigned NumVecs = 0; |
16296 | switch (IntNo) { |
16297 | default: |
16298 | llvm_unreachable("unexpected intrinsic for MVE VLDn combine" ); |
16299 | case Intrinsic::arm_mve_vld2q: |
16300 | NewOpc = ARMISD::VLD2_UPD; |
16301 | NumVecs = 2; |
16302 | break; |
16303 | case Intrinsic::arm_mve_vld4q: |
16304 | NewOpc = ARMISD::VLD4_UPD; |
16305 | NumVecs = 4; |
16306 | break; |
16307 | case Intrinsic::arm_mve_vst2q: |
16308 | NewOpc = ARMISD::VST2_UPD; |
16309 | NumVecs = 2; |
16310 | isLoadOp = false; |
16311 | break; |
16312 | case Intrinsic::arm_mve_vst4q: |
16313 | NewOpc = ARMISD::VST4_UPD; |
16314 | NumVecs = 4; |
16315 | isLoadOp = false; |
16316 | break; |
16317 | } |
16318 | |
16319 | // Find the size of memory referenced by the load/store. |
16320 | EVT VecTy; |
16321 | if (isLoadOp) { |
16322 | VecTy = N->getValueType(ResNo: 0); |
16323 | } else { |
16324 | VecTy = N->getOperand(Num: 3).getValueType(); |
16325 | } |
16326 | |
16327 | unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; |
16328 | |
16329 | // If the increment is a constant, it must match the memory ref size. |
16330 | SDValue Inc = User->getOperand(Num: User->getOperand(Num: 0) == Addr ? 1 : 0); |
16331 | ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Val: Inc.getNode()); |
16332 | if (!CInc || CInc->getZExtValue() != NumBytes) |
16333 | continue; |
16334 | |
16335 | // Create the new updating load/store node. |
16336 | // First, create an SDVTList for the new updating node's results. |
16337 | EVT Tys[6]; |
16338 | unsigned NumResultVecs = (isLoadOp ? NumVecs : 0); |
16339 | unsigned n; |
16340 | for (n = 0; n < NumResultVecs; ++n) |
16341 | Tys[n] = VecTy; |
16342 | Tys[n++] = MVT::i32; |
16343 | Tys[n] = MVT::Other; |
16344 | SDVTList SDTys = DAG.getVTList(VTs: ArrayRef(Tys, NumResultVecs + 2)); |
16345 | |
16346 | // Then, gather the new node's operands. |
16347 | SmallVector<SDValue, 8> Ops; |
16348 | Ops.push_back(Elt: N->getOperand(Num: 0)); // incoming chain |
16349 | Ops.push_back(Elt: N->getOperand(Num: 2)); // ptr |
16350 | Ops.push_back(Elt: Inc); |
16351 | |
16352 | for (unsigned i = 3; i < N->getNumOperands(); ++i) |
16353 | Ops.push_back(Elt: N->getOperand(Num: i)); |
16354 | |
16355 | SDValue UpdN = DAG.getMemIntrinsicNode(Opcode: NewOpc, dl, VTList: SDTys, Ops, MemVT: VecTy, |
16356 | MMO: MemN->getMemOperand()); |
16357 | |
16358 | // Update the uses. |
16359 | SmallVector<SDValue, 5> NewResults; |
16360 | for (unsigned i = 0; i < NumResultVecs; ++i) |
16361 | NewResults.push_back(Elt: SDValue(UpdN.getNode(), i)); |
16362 | |
16363 | NewResults.push_back(Elt: SDValue(UpdN.getNode(), NumResultVecs + 1)); // chain |
16364 | DCI.CombineTo(N, To: NewResults); |
16365 | DCI.CombineTo(N: User, Res: SDValue(UpdN.getNode(), NumResultVecs)); |
16366 | |
16367 | break; |
16368 | } |
16369 | |
16370 | return SDValue(); |
16371 | } |
16372 | |
16373 | /// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a |
16374 | /// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic |
16375 | /// are also VDUPLANEs. If so, combine them to a vldN-dup operation and |
16376 | /// return true. |
16377 | static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { |
16378 | SelectionDAG &DAG = DCI.DAG; |
16379 | EVT VT = N->getValueType(ResNo: 0); |
16380 | // vldN-dup instructions only support 64-bit vectors for N > 1. |
16381 | if (!VT.is64BitVector()) |
16382 | return false; |
16383 | |
16384 | // Check if the VDUPLANE operand is a vldN-dup intrinsic. |
16385 | SDNode *VLD = N->getOperand(Num: 0).getNode(); |
16386 | if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN) |
16387 | return false; |
16388 | unsigned NumVecs = 0; |
16389 | unsigned NewOpc = 0; |
16390 | unsigned IntNo = VLD->getConstantOperandVal(Num: 1); |
16391 | if (IntNo == Intrinsic::arm_neon_vld2lane) { |
16392 | NumVecs = 2; |
16393 | NewOpc = ARMISD::VLD2DUP; |
16394 | } else if (IntNo == Intrinsic::arm_neon_vld3lane) { |
16395 | NumVecs = 3; |
16396 | NewOpc = ARMISD::VLD3DUP; |
16397 | } else if (IntNo == Intrinsic::arm_neon_vld4lane) { |
16398 | NumVecs = 4; |
16399 | NewOpc = ARMISD::VLD4DUP; |
16400 | } else { |
16401 | return false; |
16402 | } |
16403 | |
16404 | // First check that all the vldN-lane uses are VDUPLANEs and that the lane |
16405 | // numbers match the load. |
16406 | unsigned VLDLaneNo = VLD->getConstantOperandVal(Num: NumVecs + 3); |
16407 | for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); |
16408 | UI != UE; ++UI) { |
16409 | // Ignore uses of the chain result. |
16410 | if (UI.getUse().getResNo() == NumVecs) |
16411 | continue; |
16412 | SDNode *User = *UI; |
16413 | if (User->getOpcode() != ARMISD::VDUPLANE || |
16414 | VLDLaneNo != User->getConstantOperandVal(Num: 1)) |
16415 | return false; |
16416 | } |
16417 | |
16418 | // Create the vldN-dup node. |
16419 | EVT Tys[5]; |
16420 | unsigned n; |
16421 | for (n = 0; n < NumVecs; ++n) |
16422 | Tys[n] = VT; |
16423 | Tys[n] = MVT::Other; |
16424 | SDVTList SDTys = DAG.getVTList(VTs: ArrayRef(Tys, NumVecs + 1)); |
16425 | SDValue Ops[] = { VLD->getOperand(Num: 0), VLD->getOperand(Num: 2) }; |
16426 | MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(Val: VLD); |
16427 | SDValue VLDDup = DAG.getMemIntrinsicNode(Opcode: NewOpc, dl: SDLoc(VLD), VTList: SDTys, |
16428 | Ops, MemVT: VLDMemInt->getMemoryVT(), |
16429 | MMO: VLDMemInt->getMemOperand()); |
16430 | |
16431 | // Update the uses. |
16432 | for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); |
16433 | UI != UE; ++UI) { |
16434 | unsigned ResNo = UI.getUse().getResNo(); |
16435 | // Ignore uses of the chain result. |
16436 | if (ResNo == NumVecs) |
16437 | continue; |
16438 | SDNode *User = *UI; |
16439 | DCI.CombineTo(N: User, Res: SDValue(VLDDup.getNode(), ResNo)); |
16440 | } |
16441 | |
16442 | // Now the vldN-lane intrinsic is dead except for its chain result. |
16443 | // Update uses of the chain. |
16444 | std::vector<SDValue> VLDDupResults; |
16445 | for (unsigned n = 0; n < NumVecs; ++n) |
16446 | VLDDupResults.push_back(x: SDValue(VLDDup.getNode(), n)); |
16447 | VLDDupResults.push_back(x: SDValue(VLDDup.getNode(), NumVecs)); |
16448 | DCI.CombineTo(N: VLD, To: VLDDupResults); |
16449 | |
16450 | return true; |
16451 | } |
16452 | |
16453 | /// PerformVDUPLANECombine - Target-specific dag combine xforms for |
16454 | /// ARMISD::VDUPLANE. |
16455 | static SDValue PerformVDUPLANECombine(SDNode *N, |
16456 | TargetLowering::DAGCombinerInfo &DCI, |
16457 | const ARMSubtarget *Subtarget) { |
16458 | SDValue Op = N->getOperand(Num: 0); |
16459 | EVT VT = N->getValueType(ResNo: 0); |
16460 | |
16461 | // On MVE, we just convert the VDUPLANE to a VDUP with an extract. |
16462 | if (Subtarget->hasMVEIntegerOps()) { |
16463 | EVT = VT.getVectorElementType(); |
16464 | // We need to ensure we are creating a legal type. |
16465 | if (!DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT: ExtractVT)) |
16466 | ExtractVT = MVT::i32; |
16467 | SDValue = DCI.DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: SDLoc(N), VT: ExtractVT, |
16468 | N1: N->getOperand(Num: 0), N2: N->getOperand(Num: 1)); |
16469 | return DCI.DAG.getNode(Opcode: ARMISD::VDUP, DL: SDLoc(N), VT, Operand: Extract); |
16470 | } |
16471 | |
16472 | // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses |
16473 | // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation. |
16474 | if (CombineVLDDUP(N, DCI)) |
16475 | return SDValue(N, 0); |
16476 | |
16477 | // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is |
16478 | // redundant. Ignore bit_converts for now; element sizes are checked below. |
16479 | while (Op.getOpcode() == ISD::BITCAST) |
16480 | Op = Op.getOperand(i: 0); |
16481 | if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM) |
16482 | return SDValue(); |
16483 | |
16484 | // Make sure the VMOV element size is not bigger than the VDUPLANE elements. |
16485 | unsigned EltSize = Op.getScalarValueSizeInBits(); |
16486 | // The canonical VMOV for a zero vector uses a 32-bit element size. |
16487 | unsigned Imm = Op.getConstantOperandVal(i: 0); |
16488 | unsigned EltBits; |
16489 | if (ARM_AM::decodeVMOVModImm(ModImm: Imm, EltBits) == 0) |
16490 | EltSize = 8; |
16491 | if (EltSize > VT.getScalarSizeInBits()) |
16492 | return SDValue(); |
16493 | |
16494 | return DCI.DAG.getNode(Opcode: ISD::BITCAST, DL: SDLoc(N), VT, Operand: Op); |
16495 | } |
16496 | |
16497 | /// PerformVDUPCombine - Target-specific dag combine xforms for ARMISD::VDUP. |
16498 | static SDValue PerformVDUPCombine(SDNode *N, SelectionDAG &DAG, |
16499 | const ARMSubtarget *Subtarget) { |
16500 | SDValue Op = N->getOperand(Num: 0); |
16501 | SDLoc dl(N); |
16502 | |
16503 | if (Subtarget->hasMVEIntegerOps()) { |
16504 | // Convert VDUP f32 -> VDUP BITCAST i32 under MVE, as we know the value will |
16505 | // need to come from a GPR. |
16506 | if (Op.getValueType() == MVT::f32) |
16507 | return DAG.getNode(Opcode: ARMISD::VDUP, DL: dl, VT: N->getValueType(ResNo: 0), |
16508 | Operand: DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::i32, Operand: Op)); |
16509 | else if (Op.getValueType() == MVT::f16) |
16510 | return DAG.getNode(Opcode: ARMISD::VDUP, DL: dl, VT: N->getValueType(ResNo: 0), |
16511 | Operand: DAG.getNode(Opcode: ARMISD::VMOVrh, DL: dl, VT: MVT::i32, Operand: Op)); |
16512 | } |
16513 | |
16514 | if (!Subtarget->hasNEON()) |
16515 | return SDValue(); |
16516 | |
16517 | // Match VDUP(LOAD) -> VLD1DUP. |
16518 | // We match this pattern here rather than waiting for isel because the |
16519 | // transform is only legal for unindexed loads. |
16520 | LoadSDNode *LD = dyn_cast<LoadSDNode>(Val: Op.getNode()); |
16521 | if (LD && Op.hasOneUse() && LD->isUnindexed() && |
16522 | LD->getMemoryVT() == N->getValueType(ResNo: 0).getVectorElementType()) { |
16523 | SDValue Ops[] = {LD->getOperand(Num: 0), LD->getOperand(Num: 1), |
16524 | DAG.getConstant(Val: LD->getAlign().value(), DL: SDLoc(N), VT: MVT::i32)}; |
16525 | SDVTList SDTys = DAG.getVTList(VT1: N->getValueType(ResNo: 0), VT2: MVT::Other); |
16526 | SDValue VLDDup = |
16527 | DAG.getMemIntrinsicNode(Opcode: ARMISD::VLD1DUP, dl: SDLoc(N), VTList: SDTys, Ops, |
16528 | MemVT: LD->getMemoryVT(), MMO: LD->getMemOperand()); |
16529 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(LD, 1), To: VLDDup.getValue(R: 1)); |
16530 | return VLDDup; |
16531 | } |
16532 | |
16533 | return SDValue(); |
16534 | } |
16535 | |
16536 | static SDValue PerformLOADCombine(SDNode *N, |
16537 | TargetLowering::DAGCombinerInfo &DCI, |
16538 | const ARMSubtarget *Subtarget) { |
16539 | EVT VT = N->getValueType(ResNo: 0); |
16540 | |
16541 | // If this is a legal vector load, try to combine it into a VLD1_UPD. |
16542 | if (Subtarget->hasNEON() && ISD::isNormalLoad(N) && VT.isVector() && |
16543 | DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT)) |
16544 | return CombineBaseUpdate(N, DCI); |
16545 | |
16546 | return SDValue(); |
16547 | } |
16548 | |
16549 | // Optimize trunc store (of multiple scalars) to shuffle and store. First, |
16550 | // pack all of the elements in one place. Next, store to memory in fewer |
16551 | // chunks. |
16552 | static SDValue PerformTruncatingStoreCombine(StoreSDNode *St, |
16553 | SelectionDAG &DAG) { |
16554 | SDValue StVal = St->getValue(); |
16555 | EVT VT = StVal.getValueType(); |
16556 | if (!St->isTruncatingStore() || !VT.isVector()) |
16557 | return SDValue(); |
16558 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
16559 | EVT StVT = St->getMemoryVT(); |
16560 | unsigned NumElems = VT.getVectorNumElements(); |
16561 | assert(StVT != VT && "Cannot truncate to the same type" ); |
16562 | unsigned FromEltSz = VT.getScalarSizeInBits(); |
16563 | unsigned ToEltSz = StVT.getScalarSizeInBits(); |
16564 | |
16565 | // From, To sizes and ElemCount must be pow of two |
16566 | if (!isPowerOf2_32(Value: NumElems * FromEltSz * ToEltSz)) |
16567 | return SDValue(); |
16568 | |
16569 | // We are going to use the original vector elt for storing. |
16570 | // Accumulated smaller vector elements must be a multiple of the store size. |
16571 | if (0 != (NumElems * FromEltSz) % ToEltSz) |
16572 | return SDValue(); |
16573 | |
16574 | unsigned SizeRatio = FromEltSz / ToEltSz; |
16575 | assert(SizeRatio * NumElems * ToEltSz == VT.getSizeInBits()); |
16576 | |
16577 | // Create a type on which we perform the shuffle. |
16578 | EVT WideVecVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: StVT.getScalarType(), |
16579 | NumElements: NumElems * SizeRatio); |
16580 | assert(WideVecVT.getSizeInBits() == VT.getSizeInBits()); |
16581 | |
16582 | SDLoc DL(St); |
16583 | SDValue WideVec = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: WideVecVT, Operand: StVal); |
16584 | SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); |
16585 | for (unsigned i = 0; i < NumElems; ++i) |
16586 | ShuffleVec[i] = DAG.getDataLayout().isBigEndian() ? (i + 1) * SizeRatio - 1 |
16587 | : i * SizeRatio; |
16588 | |
16589 | // Can't shuffle using an illegal type. |
16590 | if (!TLI.isTypeLegal(VT: WideVecVT)) |
16591 | return SDValue(); |
16592 | |
16593 | SDValue Shuff = DAG.getVectorShuffle( |
16594 | VT: WideVecVT, dl: DL, N1: WideVec, N2: DAG.getUNDEF(VT: WideVec.getValueType()), Mask: ShuffleVec); |
16595 | // At this point all of the data is stored at the bottom of the |
16596 | // register. We now need to save it to mem. |
16597 | |
16598 | // Find the largest store unit |
16599 | MVT StoreType = MVT::i8; |
16600 | for (MVT Tp : MVT::integer_valuetypes()) { |
16601 | if (TLI.isTypeLegal(VT: Tp) && Tp.getSizeInBits() <= NumElems * ToEltSz) |
16602 | StoreType = Tp; |
16603 | } |
16604 | // Didn't find a legal store type. |
16605 | if (!TLI.isTypeLegal(VT: StoreType)) |
16606 | return SDValue(); |
16607 | |
16608 | // Bitcast the original vector into a vector of store-size units |
16609 | EVT StoreVecVT = |
16610 | EVT::getVectorVT(Context&: *DAG.getContext(), VT: StoreType, |
16611 | NumElements: VT.getSizeInBits() / EVT(StoreType).getSizeInBits()); |
16612 | assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits()); |
16613 | SDValue ShuffWide = DAG.getNode(Opcode: ISD::BITCAST, DL, VT: StoreVecVT, Operand: Shuff); |
16614 | SmallVector<SDValue, 8> Chains; |
16615 | SDValue Increment = DAG.getConstant(Val: StoreType.getSizeInBits() / 8, DL, |
16616 | VT: TLI.getPointerTy(DL: DAG.getDataLayout())); |
16617 | SDValue BasePtr = St->getBasePtr(); |
16618 | |
16619 | // Perform one or more big stores into memory. |
16620 | unsigned E = (ToEltSz * NumElems) / StoreType.getSizeInBits(); |
16621 | for (unsigned I = 0; I < E; I++) { |
16622 | SDValue SubVec = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL, VT: StoreType, |
16623 | N1: ShuffWide, N2: DAG.getIntPtrConstant(Val: I, DL)); |
16624 | SDValue Ch = |
16625 | DAG.getStore(Chain: St->getChain(), dl: DL, Val: SubVec, Ptr: BasePtr, PtrInfo: St->getPointerInfo(), |
16626 | Alignment: St->getAlign(), MMOFlags: St->getMemOperand()->getFlags()); |
16627 | BasePtr = |
16628 | DAG.getNode(Opcode: ISD::ADD, DL, VT: BasePtr.getValueType(), N1: BasePtr, N2: Increment); |
16629 | Chains.push_back(Elt: Ch); |
16630 | } |
16631 | return DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other, Ops: Chains); |
16632 | } |
16633 | |
16634 | // Try taking a single vector store from an fpround (which would otherwise turn |
16635 | // into an expensive buildvector) and splitting it into a series of narrowing |
16636 | // stores. |
16637 | static SDValue PerformSplittingToNarrowingStores(StoreSDNode *St, |
16638 | SelectionDAG &DAG) { |
16639 | if (!St->isSimple() || St->isTruncatingStore() || !St->isUnindexed()) |
16640 | return SDValue(); |
16641 | SDValue Trunc = St->getValue(); |
16642 | if (Trunc->getOpcode() != ISD::FP_ROUND) |
16643 | return SDValue(); |
16644 | EVT FromVT = Trunc->getOperand(Num: 0).getValueType(); |
16645 | EVT ToVT = Trunc.getValueType(); |
16646 | if (!ToVT.isVector()) |
16647 | return SDValue(); |
16648 | assert(FromVT.getVectorNumElements() == ToVT.getVectorNumElements()); |
16649 | EVT ToEltVT = ToVT.getVectorElementType(); |
16650 | EVT FromEltVT = FromVT.getVectorElementType(); |
16651 | |
16652 | if (FromEltVT != MVT::f32 || ToEltVT != MVT::f16) |
16653 | return SDValue(); |
16654 | |
16655 | unsigned NumElements = 4; |
16656 | if (FromVT.getVectorNumElements() % NumElements != 0) |
16657 | return SDValue(); |
16658 | |
16659 | // Test if the Trunc will be convertable to a VMOVN with a shuffle, and if so |
16660 | // use the VMOVN over splitting the store. We are looking for patterns of: |
16661 | // !rev: 0 N 1 N+1 2 N+2 ... |
16662 | // rev: N 0 N+1 1 N+2 2 ... |
16663 | // The shuffle may either be a single source (in which case N = NumElts/2) or |
16664 | // two inputs extended with concat to the same size (in which case N = |
16665 | // NumElts). |
16666 | auto isVMOVNShuffle = [&](ShuffleVectorSDNode *SVN, bool Rev) { |
16667 | ArrayRef<int> M = SVN->getMask(); |
16668 | unsigned NumElts = ToVT.getVectorNumElements(); |
16669 | if (SVN->getOperand(Num: 1).isUndef()) |
16670 | NumElts /= 2; |
16671 | |
16672 | unsigned Off0 = Rev ? NumElts : 0; |
16673 | unsigned Off1 = Rev ? 0 : NumElts; |
16674 | |
16675 | for (unsigned I = 0; I < NumElts; I += 2) { |
16676 | if (M[I] >= 0 && M[I] != (int)(Off0 + I / 2)) |
16677 | return false; |
16678 | if (M[I + 1] >= 0 && M[I + 1] != (int)(Off1 + I / 2)) |
16679 | return false; |
16680 | } |
16681 | |
16682 | return true; |
16683 | }; |
16684 | |
16685 | if (auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Val: Trunc.getOperand(i: 0))) |
16686 | if (isVMOVNShuffle(Shuffle, false) || isVMOVNShuffle(Shuffle, true)) |
16687 | return SDValue(); |
16688 | |
16689 | LLVMContext &C = *DAG.getContext(); |
16690 | SDLoc DL(St); |
16691 | // Details about the old store |
16692 | SDValue Ch = St->getChain(); |
16693 | SDValue BasePtr = St->getBasePtr(); |
16694 | Align Alignment = St->getOriginalAlign(); |
16695 | MachineMemOperand::Flags MMOFlags = St->getMemOperand()->getFlags(); |
16696 | AAMDNodes AAInfo = St->getAAInfo(); |
16697 | |
16698 | // We split the store into slices of NumElements. fp16 trunc stores are vcvt |
16699 | // and then stored as truncating integer stores. |
16700 | EVT NewFromVT = EVT::getVectorVT(Context&: C, VT: FromEltVT, NumElements); |
16701 | EVT NewToVT = EVT::getVectorVT( |
16702 | Context&: C, VT: EVT::getIntegerVT(Context&: C, BitWidth: ToEltVT.getSizeInBits()), NumElements); |
16703 | |
16704 | SmallVector<SDValue, 4> Stores; |
16705 | for (unsigned i = 0; i < FromVT.getVectorNumElements() / NumElements; i++) { |
16706 | unsigned NewOffset = i * NumElements * ToEltVT.getSizeInBits() / 8; |
16707 | SDValue NewPtr = |
16708 | DAG.getObjectPtrOffset(SL: DL, Ptr: BasePtr, Offset: TypeSize::getFixed(ExactSize: NewOffset)); |
16709 | |
16710 | SDValue = |
16711 | DAG.getNode(Opcode: ISD::EXTRACT_SUBVECTOR, DL, VT: NewFromVT, N1: Trunc.getOperand(i: 0), |
16712 | N2: DAG.getConstant(Val: i * NumElements, DL, VT: MVT::i32)); |
16713 | |
16714 | SDValue FPTrunc = |
16715 | DAG.getNode(Opcode: ARMISD::VCVTN, DL, VT: MVT::v8f16, N1: DAG.getUNDEF(VT: MVT::v8f16), |
16716 | N2: Extract, N3: DAG.getConstant(Val: 0, DL, VT: MVT::i32)); |
16717 | Extract = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT: MVT::v4i32, Operand: FPTrunc); |
16718 | |
16719 | SDValue Store = DAG.getTruncStore( |
16720 | Chain: Ch, dl: DL, Val: Extract, Ptr: NewPtr, PtrInfo: St->getPointerInfo().getWithOffset(O: NewOffset), |
16721 | SVT: NewToVT, Alignment, MMOFlags, AAInfo); |
16722 | Stores.push_back(Elt: Store); |
16723 | } |
16724 | return DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other, Ops: Stores); |
16725 | } |
16726 | |
16727 | // Try taking a single vector store from an MVETRUNC (which would otherwise turn |
16728 | // into an expensive buildvector) and splitting it into a series of narrowing |
16729 | // stores. |
16730 | static SDValue PerformSplittingMVETruncToNarrowingStores(StoreSDNode *St, |
16731 | SelectionDAG &DAG) { |
16732 | if (!St->isSimple() || St->isTruncatingStore() || !St->isUnindexed()) |
16733 | return SDValue(); |
16734 | SDValue Trunc = St->getValue(); |
16735 | if (Trunc->getOpcode() != ARMISD::MVETRUNC) |
16736 | return SDValue(); |
16737 | EVT FromVT = Trunc->getOperand(Num: 0).getValueType(); |
16738 | EVT ToVT = Trunc.getValueType(); |
16739 | |
16740 | LLVMContext &C = *DAG.getContext(); |
16741 | SDLoc DL(St); |
16742 | // Details about the old store |
16743 | SDValue Ch = St->getChain(); |
16744 | SDValue BasePtr = St->getBasePtr(); |
16745 | Align Alignment = St->getOriginalAlign(); |
16746 | MachineMemOperand::Flags MMOFlags = St->getMemOperand()->getFlags(); |
16747 | AAMDNodes AAInfo = St->getAAInfo(); |
16748 | |
16749 | EVT NewToVT = EVT::getVectorVT(Context&: C, VT: ToVT.getVectorElementType(), |
16750 | NumElements: FromVT.getVectorNumElements()); |
16751 | |
16752 | SmallVector<SDValue, 4> Stores; |
16753 | for (unsigned i = 0; i < Trunc.getNumOperands(); i++) { |
16754 | unsigned NewOffset = |
16755 | i * FromVT.getVectorNumElements() * ToVT.getScalarSizeInBits() / 8; |
16756 | SDValue NewPtr = |
16757 | DAG.getObjectPtrOffset(SL: DL, Ptr: BasePtr, Offset: TypeSize::getFixed(ExactSize: NewOffset)); |
16758 | |
16759 | SDValue = Trunc.getOperand(i); |
16760 | SDValue Store = DAG.getTruncStore( |
16761 | Chain: Ch, dl: DL, Val: Extract, Ptr: NewPtr, PtrInfo: St->getPointerInfo().getWithOffset(O: NewOffset), |
16762 | SVT: NewToVT, Alignment, MMOFlags, AAInfo); |
16763 | Stores.push_back(Elt: Store); |
16764 | } |
16765 | return DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other, Ops: Stores); |
16766 | } |
16767 | |
16768 | // Given a floating point store from an extracted vector, with an integer |
16769 | // VGETLANE that already exists, store the existing VGETLANEu directly. This can |
16770 | // help reduce fp register pressure, doesn't require the fp extract and allows |
16771 | // use of more integer post-inc stores not available with vstr. |
16772 | static SDValue (StoreSDNode *St, SelectionDAG &DAG) { |
16773 | if (!St->isSimple() || St->isTruncatingStore() || !St->isUnindexed()) |
16774 | return SDValue(); |
16775 | SDValue = St->getValue(); |
16776 | EVT VT = Extract.getValueType(); |
16777 | // For now only uses f16. This may be useful for f32 too, but that will |
16778 | // be bitcast(extract), not the VGETLANEu we currently check here. |
16779 | if (VT != MVT::f16 || Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT) |
16780 | return SDValue(); |
16781 | |
16782 | SDNode *GetLane = |
16783 | DAG.getNodeIfExists(Opcode: ARMISD::VGETLANEu, VTList: DAG.getVTList(VT: MVT::i32), |
16784 | Ops: {Extract.getOperand(i: 0), Extract.getOperand(i: 1)}); |
16785 | if (!GetLane) |
16786 | return SDValue(); |
16787 | |
16788 | LLVMContext &C = *DAG.getContext(); |
16789 | SDLoc DL(St); |
16790 | // Create a new integer store to replace the existing floating point version. |
16791 | SDValue Ch = St->getChain(); |
16792 | SDValue BasePtr = St->getBasePtr(); |
16793 | Align Alignment = St->getOriginalAlign(); |
16794 | MachineMemOperand::Flags MMOFlags = St->getMemOperand()->getFlags(); |
16795 | AAMDNodes AAInfo = St->getAAInfo(); |
16796 | EVT NewToVT = EVT::getIntegerVT(Context&: C, BitWidth: VT.getSizeInBits()); |
16797 | SDValue Store = DAG.getTruncStore(Chain: Ch, dl: DL, Val: SDValue(GetLane, 0), Ptr: BasePtr, |
16798 | PtrInfo: St->getPointerInfo(), SVT: NewToVT, Alignment, |
16799 | MMOFlags, AAInfo); |
16800 | |
16801 | return Store; |
16802 | } |
16803 | |
16804 | /// PerformSTORECombine - Target-specific dag combine xforms for |
16805 | /// ISD::STORE. |
16806 | static SDValue PerformSTORECombine(SDNode *N, |
16807 | TargetLowering::DAGCombinerInfo &DCI, |
16808 | const ARMSubtarget *Subtarget) { |
16809 | StoreSDNode *St = cast<StoreSDNode>(Val: N); |
16810 | if (St->isVolatile()) |
16811 | return SDValue(); |
16812 | SDValue StVal = St->getValue(); |
16813 | EVT VT = StVal.getValueType(); |
16814 | |
16815 | if (Subtarget->hasNEON()) |
16816 | if (SDValue Store = PerformTruncatingStoreCombine(St, DAG&: DCI.DAG)) |
16817 | return Store; |
16818 | |
16819 | if (Subtarget->hasMVEFloatOps()) |
16820 | if (SDValue NewToken = PerformSplittingToNarrowingStores(St, DAG&: DCI.DAG)) |
16821 | return NewToken; |
16822 | |
16823 | if (Subtarget->hasMVEIntegerOps()) { |
16824 | if (SDValue NewChain = PerformExtractFpToIntStores(St, DAG&: DCI.DAG)) |
16825 | return NewChain; |
16826 | if (SDValue NewToken = |
16827 | PerformSplittingMVETruncToNarrowingStores(St, DAG&: DCI.DAG)) |
16828 | return NewToken; |
16829 | } |
16830 | |
16831 | if (!ISD::isNormalStore(N: St)) |
16832 | return SDValue(); |
16833 | |
16834 | // Split a store of a VMOVDRR into two integer stores to avoid mixing NEON and |
16835 | // ARM stores of arguments in the same cache line. |
16836 | if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR && |
16837 | StVal.getNode()->hasOneUse()) { |
16838 | SelectionDAG &DAG = DCI.DAG; |
16839 | bool isBigEndian = DAG.getDataLayout().isBigEndian(); |
16840 | SDLoc DL(St); |
16841 | SDValue BasePtr = St->getBasePtr(); |
16842 | SDValue NewST1 = DAG.getStore( |
16843 | Chain: St->getChain(), dl: DL, Val: StVal.getNode()->getOperand(Num: isBigEndian ? 1 : 0), |
16844 | Ptr: BasePtr, PtrInfo: St->getPointerInfo(), Alignment: St->getOriginalAlign(), |
16845 | MMOFlags: St->getMemOperand()->getFlags()); |
16846 | |
16847 | SDValue OffsetPtr = DAG.getNode(Opcode: ISD::ADD, DL, VT: MVT::i32, N1: BasePtr, |
16848 | N2: DAG.getConstant(Val: 4, DL, VT: MVT::i32)); |
16849 | return DAG.getStore(Chain: NewST1.getValue(R: 0), dl: DL, |
16850 | Val: StVal.getNode()->getOperand(Num: isBigEndian ? 0 : 1), |
16851 | Ptr: OffsetPtr, PtrInfo: St->getPointerInfo().getWithOffset(O: 4), |
16852 | Alignment: St->getOriginalAlign(), |
16853 | MMOFlags: St->getMemOperand()->getFlags()); |
16854 | } |
16855 | |
16856 | if (StVal.getValueType() == MVT::i64 && |
16857 | StVal.getNode()->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { |
16858 | |
16859 | // Bitcast an i64 store extracted from a vector to f64. |
16860 | // Otherwise, the i64 value will be legalized to a pair of i32 values. |
16861 | SelectionDAG &DAG = DCI.DAG; |
16862 | SDLoc dl(StVal); |
16863 | SDValue IntVec = StVal.getOperand(i: 0); |
16864 | EVT FloatVT = EVT::getVectorVT(Context&: *DAG.getContext(), VT: MVT::f64, |
16865 | NumElements: IntVec.getValueType().getVectorNumElements()); |
16866 | SDValue Vec = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: FloatVT, Operand: IntVec); |
16867 | SDValue ExtElt = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL: dl, VT: MVT::f64, |
16868 | N1: Vec, N2: StVal.getOperand(i: 1)); |
16869 | dl = SDLoc(N); |
16870 | SDValue V = DAG.getNode(Opcode: ISD::BITCAST, DL: dl, VT: MVT::i64, Operand: ExtElt); |
16871 | // Make the DAGCombiner fold the bitcasts. |
16872 | DCI.AddToWorklist(N: Vec.getNode()); |
16873 | DCI.AddToWorklist(N: ExtElt.getNode()); |
16874 | DCI.AddToWorklist(N: V.getNode()); |
16875 | return DAG.getStore(Chain: St->getChain(), dl, Val: V, Ptr: St->getBasePtr(), |
16876 | PtrInfo: St->getPointerInfo(), Alignment: St->getAlign(), |
16877 | MMOFlags: St->getMemOperand()->getFlags(), AAInfo: St->getAAInfo()); |
16878 | } |
16879 | |
16880 | // If this is a legal vector store, try to combine it into a VST1_UPD. |
16881 | if (Subtarget->hasNEON() && ISD::isNormalStore(N) && VT.isVector() && |
16882 | DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT)) |
16883 | return CombineBaseUpdate(N, DCI); |
16884 | |
16885 | return SDValue(); |
16886 | } |
16887 | |
16888 | /// PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD) |
16889 | /// can replace combinations of VMUL and VCVT (floating-point to integer) |
16890 | /// when the VMUL has a constant operand that is a power of 2. |
16891 | /// |
16892 | /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): |
16893 | /// vmul.f32 d16, d17, d16 |
16894 | /// vcvt.s32.f32 d16, d16 |
16895 | /// becomes: |
16896 | /// vcvt.s32.f32 d16, d16, #3 |
16897 | static SDValue PerformVCVTCombine(SDNode *N, SelectionDAG &DAG, |
16898 | const ARMSubtarget *Subtarget) { |
16899 | if (!Subtarget->hasNEON()) |
16900 | return SDValue(); |
16901 | |
16902 | SDValue Op = N->getOperand(Num: 0); |
16903 | if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() || |
16904 | Op.getOpcode() != ISD::FMUL) |
16905 | return SDValue(); |
16906 | |
16907 | SDValue ConstVec = Op->getOperand(Num: 1); |
16908 | if (!isa<BuildVectorSDNode>(Val: ConstVec)) |
16909 | return SDValue(); |
16910 | |
16911 | MVT FloatTy = Op.getSimpleValueType().getVectorElementType(); |
16912 | uint32_t FloatBits = FloatTy.getSizeInBits(); |
16913 | MVT IntTy = N->getSimpleValueType(ResNo: 0).getVectorElementType(); |
16914 | uint32_t IntBits = IntTy.getSizeInBits(); |
16915 | unsigned NumLanes = Op.getValueType().getVectorNumElements(); |
16916 | if (FloatBits != 32 || IntBits > 32 || (NumLanes != 4 && NumLanes != 2)) { |
16917 | // These instructions only exist converting from f32 to i32. We can handle |
16918 | // smaller integers by generating an extra truncate, but larger ones would |
16919 | // be lossy. We also can't handle anything other than 2 or 4 lanes, since |
16920 | // these intructions only support v2i32/v4i32 types. |
16921 | return SDValue(); |
16922 | } |
16923 | |
16924 | BitVector UndefElements; |
16925 | BuildVectorSDNode *BV = cast<BuildVectorSDNode>(Val&: ConstVec); |
16926 | int32_t C = BV->getConstantFPSplatPow2ToLog2Int(UndefElements: &UndefElements, BitWidth: 33); |
16927 | if (C == -1 || C == 0 || C > 32) |
16928 | return SDValue(); |
16929 | |
16930 | SDLoc dl(N); |
16931 | bool isSigned = N->getOpcode() == ISD::FP_TO_SINT; |
16932 | unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs : |
16933 | Intrinsic::arm_neon_vcvtfp2fxu; |
16934 | SDValue FixConv = DAG.getNode( |
16935 | Opcode: ISD::INTRINSIC_WO_CHAIN, DL: dl, VT: NumLanes == 2 ? MVT::v2i32 : MVT::v4i32, |
16936 | N1: DAG.getConstant(Val: IntrinsicOpcode, DL: dl, VT: MVT::i32), N2: Op->getOperand(Num: 0), |
16937 | N3: DAG.getConstant(Val: C, DL: dl, VT: MVT::i32)); |
16938 | |
16939 | if (IntBits < FloatBits) |
16940 | FixConv = DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: N->getValueType(ResNo: 0), Operand: FixConv); |
16941 | |
16942 | return FixConv; |
16943 | } |
16944 | |
16945 | static SDValue PerformFAddVSelectCombine(SDNode *N, SelectionDAG &DAG, |
16946 | const ARMSubtarget *Subtarget) { |
16947 | if (!Subtarget->hasMVEFloatOps()) |
16948 | return SDValue(); |
16949 | |
16950 | // Turn (fadd x, (vselect c, y, -0.0)) into (vselect c, (fadd x, y), x) |
16951 | // The second form can be more easily turned into a predicated vadd, and |
16952 | // possibly combined into a fma to become a predicated vfma. |
16953 | SDValue Op0 = N->getOperand(Num: 0); |
16954 | SDValue Op1 = N->getOperand(Num: 1); |
16955 | EVT VT = N->getValueType(ResNo: 0); |
16956 | SDLoc DL(N); |
16957 | |
16958 | // The identity element for a fadd is -0.0 or +0.0 when the nsz flag is set, |
16959 | // which these VMOV's represent. |
16960 | auto isIdentitySplat = [&](SDValue Op, bool NSZ) { |
16961 | if (Op.getOpcode() != ISD::BITCAST || |
16962 | Op.getOperand(i: 0).getOpcode() != ARMISD::VMOVIMM) |
16963 | return false; |
16964 | uint64_t ImmVal = Op.getOperand(i: 0).getConstantOperandVal(i: 0); |
16965 | if (VT == MVT::v4f32 && (ImmVal == 1664 || (ImmVal == 0 && NSZ))) |
16966 | return true; |
16967 | if (VT == MVT::v8f16 && (ImmVal == 2688 || (ImmVal == 0 && NSZ))) |
16968 | return true; |
16969 | return false; |
16970 | }; |
16971 | |
16972 | if (Op0.getOpcode() == ISD::VSELECT && Op1.getOpcode() != ISD::VSELECT) |
16973 | std::swap(a&: Op0, b&: Op1); |
16974 | |
16975 | if (Op1.getOpcode() != ISD::VSELECT) |
16976 | return SDValue(); |
16977 | |
16978 | SDNodeFlags FaddFlags = N->getFlags(); |
16979 | bool NSZ = FaddFlags.hasNoSignedZeros(); |
16980 | if (!isIdentitySplat(Op1.getOperand(i: 2), NSZ)) |
16981 | return SDValue(); |
16982 | |
16983 | SDValue FAdd = |
16984 | DAG.getNode(Opcode: ISD::FADD, DL, VT, N1: Op0, N2: Op1.getOperand(i: 1), Flags: FaddFlags); |
16985 | return DAG.getNode(Opcode: ISD::VSELECT, DL, VT, N1: Op1.getOperand(i: 0), N2: FAdd, N3: Op0, Flags: FaddFlags); |
16986 | } |
16987 | |
16988 | static SDValue PerformFADDVCMLACombine(SDNode *N, SelectionDAG &DAG) { |
16989 | SDValue LHS = N->getOperand(Num: 0); |
16990 | SDValue RHS = N->getOperand(Num: 1); |
16991 | EVT VT = N->getValueType(ResNo: 0); |
16992 | SDLoc DL(N); |
16993 | |
16994 | if (!N->getFlags().hasAllowReassociation()) |
16995 | return SDValue(); |
16996 | |
16997 | // Combine fadd(a, vcmla(b, c, d)) -> vcmla(fadd(a, b), b, c) |
16998 | auto ReassocComplex = [&](SDValue A, SDValue B) { |
16999 | if (A.getOpcode() != ISD::INTRINSIC_WO_CHAIN) |
17000 | return SDValue(); |
17001 | unsigned Opc = A.getConstantOperandVal(i: 0); |
17002 | if (Opc != Intrinsic::arm_mve_vcmlaq) |
17003 | return SDValue(); |
17004 | SDValue VCMLA = DAG.getNode( |
17005 | Opcode: ISD::INTRINSIC_WO_CHAIN, DL, VT, N1: A.getOperand(i: 0), N2: A.getOperand(i: 1), |
17006 | N3: DAG.getNode(Opcode: ISD::FADD, DL, VT, N1: A.getOperand(i: 2), N2: B, Flags: N->getFlags()), |
17007 | N4: A.getOperand(i: 3), N5: A.getOperand(i: 4)); |
17008 | VCMLA->setFlags(A->getFlags()); |
17009 | return VCMLA; |
17010 | }; |
17011 | if (SDValue R = ReassocComplex(LHS, RHS)) |
17012 | return R; |
17013 | if (SDValue R = ReassocComplex(RHS, LHS)) |
17014 | return R; |
17015 | |
17016 | return SDValue(); |
17017 | } |
17018 | |
17019 | static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG, |
17020 | const ARMSubtarget *Subtarget) { |
17021 | if (SDValue S = PerformFAddVSelectCombine(N, DAG, Subtarget)) |
17022 | return S; |
17023 | if (SDValue S = PerformFADDVCMLACombine(N, DAG)) |
17024 | return S; |
17025 | return SDValue(); |
17026 | } |
17027 | |
17028 | /// PerformVMulVCTPCombine - VCVT (fixed-point to floating-point, Advanced SIMD) |
17029 | /// can replace combinations of VCVT (integer to floating-point) and VMUL |
17030 | /// when the VMUL has a constant operand that is a power of 2. |
17031 | /// |
17032 | /// Example (assume d17 = <float 0.125, float 0.125>): |
17033 | /// vcvt.f32.s32 d16, d16 |
17034 | /// vmul.f32 d16, d16, d17 |
17035 | /// becomes: |
17036 | /// vcvt.f32.s32 d16, d16, #3 |
17037 | static SDValue PerformVMulVCTPCombine(SDNode *N, SelectionDAG &DAG, |
17038 | const ARMSubtarget *Subtarget) { |
17039 | if (!Subtarget->hasNEON()) |
17040 | return SDValue(); |
17041 | |
17042 | SDValue Op = N->getOperand(Num: 0); |
17043 | unsigned OpOpcode = Op.getNode()->getOpcode(); |
17044 | if (!N->getValueType(ResNo: 0).isVector() || !N->getValueType(ResNo: 0).isSimple() || |
17045 | (OpOpcode != ISD::SINT_TO_FP && OpOpcode != ISD::UINT_TO_FP)) |
17046 | return SDValue(); |
17047 | |
17048 | SDValue ConstVec = N->getOperand(Num: 1); |
17049 | if (!isa<BuildVectorSDNode>(Val: ConstVec)) |
17050 | return SDValue(); |
17051 | |
17052 | MVT FloatTy = N->getSimpleValueType(ResNo: 0).getVectorElementType(); |
17053 | uint32_t FloatBits = FloatTy.getSizeInBits(); |
17054 | MVT IntTy = Op.getOperand(i: 0).getSimpleValueType().getVectorElementType(); |
17055 | uint32_t IntBits = IntTy.getSizeInBits(); |
17056 | unsigned NumLanes = Op.getValueType().getVectorNumElements(); |
17057 | if (FloatBits != 32 || IntBits > 32 || (NumLanes != 4 && NumLanes != 2)) { |
17058 | // These instructions only exist converting from i32 to f32. We can handle |
17059 | // smaller integers by generating an extra extend, but larger ones would |
17060 | // be lossy. We also can't handle anything other than 2 or 4 lanes, since |
17061 | // these intructions only support v2i32/v4i32 types. |
17062 | return SDValue(); |
17063 | } |
17064 | |
17065 | ConstantFPSDNode *CN = isConstOrConstSplatFP(N: ConstVec, AllowUndefs: true); |
17066 | APFloat Recip(0.0f); |
17067 | if (!CN || !CN->getValueAPF().getExactInverse(inv: &Recip)) |
17068 | return SDValue(); |
17069 | |
17070 | bool IsExact; |
17071 | APSInt IntVal(33); |
17072 | if (Recip.convertToInteger(Result&: IntVal, RM: APFloat::rmTowardZero, IsExact: &IsExact) != |
17073 | APFloat::opOK || |
17074 | !IsExact) |
17075 | return SDValue(); |
17076 | |
17077 | int32_t C = IntVal.exactLogBase2(); |
17078 | if (C == -1 || C == 0 || C > 32) |
17079 | return SDValue(); |
17080 | |
17081 | SDLoc DL(N); |
17082 | bool isSigned = OpOpcode == ISD::SINT_TO_FP; |
17083 | SDValue ConvInput = Op.getOperand(i: 0); |
17084 | if (IntBits < FloatBits) |
17085 | ConvInput = DAG.getNode(Opcode: isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL, |
17086 | VT: NumLanes == 2 ? MVT::v2i32 : MVT::v4i32, Operand: ConvInput); |
17087 | |
17088 | unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp |
17089 | : Intrinsic::arm_neon_vcvtfxu2fp; |
17090 | return DAG.getNode(Opcode: ISD::INTRINSIC_WO_CHAIN, DL, VT: Op.getValueType(), |
17091 | N1: DAG.getConstant(Val: IntrinsicOpcode, DL, VT: MVT::i32), N2: ConvInput, |
17092 | N3: DAG.getConstant(Val: C, DL, VT: MVT::i32)); |
17093 | } |
17094 | |
17095 | static SDValue PerformVECREDUCE_ADDCombine(SDNode *N, SelectionDAG &DAG, |
17096 | const ARMSubtarget *ST) { |
17097 | if (!ST->hasMVEIntegerOps()) |
17098 | return SDValue(); |
17099 | |
17100 | assert(N->getOpcode() == ISD::VECREDUCE_ADD); |
17101 | EVT ResVT = N->getValueType(ResNo: 0); |
17102 | SDValue N0 = N->getOperand(Num: 0); |
17103 | SDLoc dl(N); |
17104 | |
17105 | // Try to turn vecreduce_add(add(x, y)) into vecreduce(x) + vecreduce(y) |
17106 | if (ResVT == MVT::i32 && N0.getOpcode() == ISD::ADD && |
17107 | (N0.getValueType() == MVT::v4i32 || N0.getValueType() == MVT::v8i16 || |
17108 | N0.getValueType() == MVT::v16i8)) { |
17109 | SDValue Red0 = DAG.getNode(Opcode: ISD::VECREDUCE_ADD, DL: dl, VT: ResVT, Operand: N0.getOperand(i: 0)); |
17110 | SDValue Red1 = DAG.getNode(Opcode: ISD::VECREDUCE_ADD, DL: dl, VT: ResVT, Operand: N0.getOperand(i: 1)); |
17111 | return DAG.getNode(Opcode: ISD::ADD, DL: dl, VT: ResVT, N1: Red0, N2: Red1); |
17112 | } |
17113 | |
17114 | // We are looking for something that will have illegal types if left alone, |
17115 | // but that we can convert to a single instruction under MVE. For example |
17116 | // vecreduce_add(sext(A, v8i32)) => VADDV.s16 A |
17117 | // or |
17118 | // vecreduce_add(mul(zext(A, v16i32), zext(B, v16i32))) => VMLADAV.u8 A, B |
17119 | |
17120 | // The legal cases are: |
17121 | // VADDV u/s 8/16/32 |
17122 | // VMLAV u/s 8/16/32 |
17123 | // VADDLV u/s 32 |
17124 | // VMLALV u/s 16/32 |
17125 | |
17126 | // If the input vector is smaller than legal (v4i8/v4i16 for example) we can |
17127 | // extend it and use v4i32 instead. |
17128 | auto ExtTypeMatches = [](SDValue A, ArrayRef<MVT> ExtTypes) { |
17129 | EVT AVT = A.getValueType(); |
17130 | return any_of(Range&: ExtTypes, P: [&](MVT Ty) { |
17131 | return AVT.getVectorNumElements() == Ty.getVectorNumElements() && |
17132 | AVT.bitsLE(VT: Ty); |
17133 | }); |
17134 | }; |
17135 | auto ExtendIfNeeded = [&](SDValue A, unsigned ExtendCode) { |
17136 | EVT AVT = A.getValueType(); |
17137 | if (!AVT.is128BitVector()) |
17138 | A = DAG.getNode(Opcode: ExtendCode, DL: dl, |
17139 | VT: AVT.changeVectorElementType(EltVT: MVT::getIntegerVT( |
17140 | BitWidth: 128 / AVT.getVectorMinNumElements())), |
17141 | Operand: A); |
17142 | return A; |
17143 | }; |
17144 | auto IsVADDV = [&](MVT RetTy, unsigned ExtendCode, ArrayRef<MVT> ExtTypes) { |
17145 | if (ResVT != RetTy || N0->getOpcode() != ExtendCode) |
17146 | return SDValue(); |
17147 | SDValue A = N0->getOperand(Num: 0); |
17148 | if (ExtTypeMatches(A, ExtTypes)) |
17149 | return ExtendIfNeeded(A, ExtendCode); |
17150 | return SDValue(); |
17151 | }; |
17152 | auto IsPredVADDV = [&](MVT RetTy, unsigned ExtendCode, |
17153 | ArrayRef<MVT> ExtTypes, SDValue &Mask) { |
17154 | if (ResVT != RetTy || N0->getOpcode() != ISD::VSELECT || |
17155 | !ISD::isBuildVectorAllZeros(N: N0->getOperand(Num: 2).getNode())) |
17156 | return SDValue(); |
17157 | Mask = N0->getOperand(Num: 0); |
17158 | SDValue Ext = N0->getOperand(Num: 1); |
17159 | if (Ext->getOpcode() != ExtendCode) |
17160 | return SDValue(); |
17161 | SDValue A = Ext->getOperand(Num: 0); |
17162 | if (ExtTypeMatches(A, ExtTypes)) |
17163 | return ExtendIfNeeded(A, ExtendCode); |
17164 | return SDValue(); |
17165 | }; |
17166 | auto IsVMLAV = [&](MVT RetTy, unsigned ExtendCode, ArrayRef<MVT> ExtTypes, |
17167 | SDValue &A, SDValue &B) { |
17168 | // For a vmla we are trying to match a larger pattern: |
17169 | // ExtA = sext/zext A |
17170 | // ExtB = sext/zext B |
17171 | // Mul = mul ExtA, ExtB |
17172 | // vecreduce.add Mul |
17173 | // There might also be en extra extend between the mul and the addreduce, so |
17174 | // long as the bitwidth is high enough to make them equivalent (for example |
17175 | // original v8i16 might be mul at v8i32 and the reduce happens at v8i64). |
17176 | if (ResVT != RetTy) |
17177 | return false; |
17178 | SDValue Mul = N0; |
17179 | if (Mul->getOpcode() == ExtendCode && |
17180 | Mul->getOperand(Num: 0).getScalarValueSizeInBits() * 2 >= |
17181 | ResVT.getScalarSizeInBits()) |
17182 | Mul = Mul->getOperand(Num: 0); |
17183 | if (Mul->getOpcode() != ISD::MUL) |
17184 | return false; |
17185 | SDValue ExtA = Mul->getOperand(Num: 0); |
17186 | SDValue ExtB = Mul->getOperand(Num: 1); |
17187 | if (ExtA->getOpcode() != ExtendCode || ExtB->getOpcode() != ExtendCode) |
17188 | return false; |
17189 | A = ExtA->getOperand(Num: 0); |
17190 | B = ExtB->getOperand(Num: 0); |
17191 | if (ExtTypeMatches(A, ExtTypes) && ExtTypeMatches(B, ExtTypes)) { |
17192 | A = ExtendIfNeeded(A, ExtendCode); |
17193 | B = ExtendIfNeeded(B, ExtendCode); |
17194 | return true; |
17195 | } |
17196 | return false; |
17197 | }; |
17198 | auto IsPredVMLAV = [&](MVT RetTy, unsigned ExtendCode, ArrayRef<MVT> ExtTypes, |
17199 | SDValue &A, SDValue &B, SDValue &Mask) { |
17200 | // Same as the pattern above with a select for the zero predicated lanes |
17201 | // ExtA = sext/zext A |
17202 | // ExtB = sext/zext B |
17203 | // Mul = mul ExtA, ExtB |
17204 | // N0 = select Mask, Mul, 0 |
17205 | // vecreduce.add N0 |
17206 | if (ResVT != RetTy || N0->getOpcode() != ISD::VSELECT || |
17207 | !ISD::isBuildVectorAllZeros(N: N0->getOperand(Num: 2).getNode())) |
17208 | return false; |
17209 | Mask = N0->getOperand(Num: 0); |
17210 | SDValue Mul = N0->getOperand(Num: 1); |
17211 | if (Mul->getOpcode() == ExtendCode && |
17212 | Mul->getOperand(Num: 0).getScalarValueSizeInBits() * 2 >= |
17213 | ResVT.getScalarSizeInBits()) |
17214 | Mul = Mul->getOperand(Num: 0); |
17215 | if (Mul->getOpcode() != ISD::MUL) |
17216 | return false; |
17217 | SDValue ExtA = Mul->getOperand(Num: 0); |
17218 | SDValue ExtB = Mul->getOperand(Num: 1); |
17219 | if (ExtA->getOpcode() != ExtendCode || ExtB->getOpcode() != ExtendCode) |
17220 | return false; |
17221 | A = ExtA->getOperand(Num: 0); |
17222 | B = ExtB->getOperand(Num: 0); |
17223 | if (ExtTypeMatches(A, ExtTypes) && ExtTypeMatches(B, ExtTypes)) { |
17224 | A = ExtendIfNeeded(A, ExtendCode); |
17225 | B = ExtendIfNeeded(B, ExtendCode); |
17226 | return true; |
17227 | } |
17228 | return false; |
17229 | }; |
17230 | auto Create64bitNode = [&](unsigned Opcode, ArrayRef<SDValue> Ops) { |
17231 | // Split illegal MVT::v16i8->i64 vector reductions into two legal v8i16->i64 |
17232 | // reductions. The operands are extended with MVEEXT, but as they are |
17233 | // reductions the lane orders do not matter. MVEEXT may be combined with |
17234 | // loads to produce two extending loads, or else they will be expanded to |
17235 | // VREV/VMOVL. |
17236 | EVT VT = Ops[0].getValueType(); |
17237 | if (VT == MVT::v16i8) { |
17238 | assert((Opcode == ARMISD::VMLALVs || Opcode == ARMISD::VMLALVu) && |
17239 | "Unexpected illegal long reduction opcode" ); |
17240 | bool IsUnsigned = Opcode == ARMISD::VMLALVu; |
17241 | |
17242 | SDValue Ext0 = |
17243 | DAG.getNode(Opcode: IsUnsigned ? ARMISD::MVEZEXT : ARMISD::MVESEXT, DL: dl, |
17244 | VTList: DAG.getVTList(VT1: MVT::v8i16, VT2: MVT::v8i16), N: Ops[0]); |
17245 | SDValue Ext1 = |
17246 | DAG.getNode(Opcode: IsUnsigned ? ARMISD::MVEZEXT : ARMISD::MVESEXT, DL: dl, |
17247 | VTList: DAG.getVTList(VT1: MVT::v8i16, VT2: MVT::v8i16), N: Ops[1]); |
17248 | |
17249 | SDValue MLA0 = DAG.getNode(Opcode, DL: dl, VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), |
17250 | N1: Ext0, N2: Ext1); |
17251 | SDValue MLA1 = |
17252 | DAG.getNode(Opcode: IsUnsigned ? ARMISD::VMLALVAu : ARMISD::VMLALVAs, DL: dl, |
17253 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::i32), N1: MLA0, N2: MLA0.getValue(R: 1), |
17254 | N3: Ext0.getValue(R: 1), N4: Ext1.getValue(R: 1)); |
17255 | return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: MLA1, N2: MLA1.getValue(R: 1)); |
17256 | } |
17257 | SDValue Node = DAG.getNode(Opcode, DL: dl, ResultTys: {MVT::i32, MVT::i32}, Ops); |
17258 | return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: Node, |
17259 | N2: SDValue(Node.getNode(), 1)); |
17260 | }; |
17261 | |
17262 | SDValue A, B; |
17263 | SDValue Mask; |
17264 | if (IsVMLAV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B)) |
17265 | return DAG.getNode(Opcode: ARMISD::VMLAVs, DL: dl, VT: ResVT, N1: A, N2: B); |
17266 | if (IsVMLAV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B)) |
17267 | return DAG.getNode(Opcode: ARMISD::VMLAVu, DL: dl, VT: ResVT, N1: A, N2: B); |
17268 | if (IsVMLAV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v16i8, MVT::v8i16, MVT::v4i32}, |
17269 | A, B)) |
17270 | return Create64bitNode(ARMISD::VMLALVs, {A, B}); |
17271 | if (IsVMLAV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v16i8, MVT::v8i16, MVT::v4i32}, |
17272 | A, B)) |
17273 | return Create64bitNode(ARMISD::VMLALVu, {A, B}); |
17274 | if (IsVMLAV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8}, A, B)) |
17275 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: ResVT, |
17276 | Operand: DAG.getNode(Opcode: ARMISD::VMLAVs, DL: dl, VT: MVT::i32, N1: A, N2: B)); |
17277 | if (IsVMLAV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8}, A, B)) |
17278 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: ResVT, |
17279 | Operand: DAG.getNode(Opcode: ARMISD::VMLAVu, DL: dl, VT: MVT::i32, N1: A, N2: B)); |
17280 | |
17281 | if (IsPredVMLAV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B, |
17282 | Mask)) |
17283 | return DAG.getNode(Opcode: ARMISD::VMLAVps, DL: dl, VT: ResVT, N1: A, N2: B, N3: Mask); |
17284 | if (IsPredVMLAV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B, |
17285 | Mask)) |
17286 | return DAG.getNode(Opcode: ARMISD::VMLAVpu, DL: dl, VT: ResVT, N1: A, N2: B, N3: Mask); |
17287 | if (IsPredVMLAV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v4i32}, A, B, |
17288 | Mask)) |
17289 | return Create64bitNode(ARMISD::VMLALVps, {A, B, Mask}); |
17290 | if (IsPredVMLAV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v4i32}, A, B, |
17291 | Mask)) |
17292 | return Create64bitNode(ARMISD::VMLALVpu, {A, B, Mask}); |
17293 | if (IsPredVMLAV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8}, A, B, Mask)) |
17294 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: ResVT, |
17295 | Operand: DAG.getNode(Opcode: ARMISD::VMLAVps, DL: dl, VT: MVT::i32, N1: A, N2: B, N3: Mask)); |
17296 | if (IsPredVMLAV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8}, A, B, Mask)) |
17297 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: ResVT, |
17298 | Operand: DAG.getNode(Opcode: ARMISD::VMLAVpu, DL: dl, VT: MVT::i32, N1: A, N2: B, N3: Mask)); |
17299 | |
17300 | if (SDValue A = IsVADDV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8})) |
17301 | return DAG.getNode(Opcode: ARMISD::VADDVs, DL: dl, VT: ResVT, Operand: A); |
17302 | if (SDValue A = IsVADDV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8})) |
17303 | return DAG.getNode(Opcode: ARMISD::VADDVu, DL: dl, VT: ResVT, Operand: A); |
17304 | if (SDValue A = IsVADDV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v4i32})) |
17305 | return Create64bitNode(ARMISD::VADDLVs, {A}); |
17306 | if (SDValue A = IsVADDV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v4i32})) |
17307 | return Create64bitNode(ARMISD::VADDLVu, {A}); |
17308 | if (SDValue A = IsVADDV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8})) |
17309 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: ResVT, |
17310 | Operand: DAG.getNode(Opcode: ARMISD::VADDVs, DL: dl, VT: MVT::i32, Operand: A)); |
17311 | if (SDValue A = IsVADDV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8})) |
17312 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: ResVT, |
17313 | Operand: DAG.getNode(Opcode: ARMISD::VADDVu, DL: dl, VT: MVT::i32, Operand: A)); |
17314 | |
17315 | if (SDValue A = IsPredVADDV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}, Mask)) |
17316 | return DAG.getNode(Opcode: ARMISD::VADDVps, DL: dl, VT: ResVT, N1: A, N2: Mask); |
17317 | if (SDValue A = IsPredVADDV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}, Mask)) |
17318 | return DAG.getNode(Opcode: ARMISD::VADDVpu, DL: dl, VT: ResVT, N1: A, N2: Mask); |
17319 | if (SDValue A = IsPredVADDV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v4i32}, Mask)) |
17320 | return Create64bitNode(ARMISD::VADDLVps, {A, Mask}); |
17321 | if (SDValue A = IsPredVADDV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v4i32}, Mask)) |
17322 | return Create64bitNode(ARMISD::VADDLVpu, {A, Mask}); |
17323 | if (SDValue A = IsPredVADDV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8}, Mask)) |
17324 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: ResVT, |
17325 | Operand: DAG.getNode(Opcode: ARMISD::VADDVps, DL: dl, VT: MVT::i32, N1: A, N2: Mask)); |
17326 | if (SDValue A = IsPredVADDV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8}, Mask)) |
17327 | return DAG.getNode(Opcode: ISD::TRUNCATE, DL: dl, VT: ResVT, |
17328 | Operand: DAG.getNode(Opcode: ARMISD::VADDVpu, DL: dl, VT: MVT::i32, N1: A, N2: Mask)); |
17329 | |
17330 | // Some complications. We can get a case where the two inputs of the mul are |
17331 | // the same, then the output sext will have been helpfully converted to a |
17332 | // zext. Turn it back. |
17333 | SDValue Op = N0; |
17334 | if (Op->getOpcode() == ISD::VSELECT) |
17335 | Op = Op->getOperand(Num: 1); |
17336 | if (Op->getOpcode() == ISD::ZERO_EXTEND && |
17337 | Op->getOperand(Num: 0)->getOpcode() == ISD::MUL) { |
17338 | SDValue Mul = Op->getOperand(Num: 0); |
17339 | if (Mul->getOperand(Num: 0) == Mul->getOperand(Num: 1) && |
17340 | Mul->getOperand(Num: 0)->getOpcode() == ISD::SIGN_EXTEND) { |
17341 | SDValue Ext = DAG.getNode(Opcode: ISD::SIGN_EXTEND, DL: dl, VT: N0->getValueType(ResNo: 0), Operand: Mul); |
17342 | if (Op != N0) |
17343 | Ext = DAG.getNode(Opcode: ISD::VSELECT, DL: dl, VT: N0->getValueType(ResNo: 0), |
17344 | N1: N0->getOperand(Num: 0), N2: Ext, N3: N0->getOperand(Num: 2)); |
17345 | return DAG.getNode(Opcode: ISD::VECREDUCE_ADD, DL: dl, VT: ResVT, Operand: Ext); |
17346 | } |
17347 | } |
17348 | |
17349 | return SDValue(); |
17350 | } |
17351 | |
17352 | // Looks for vaddv(shuffle) or vmlav(shuffle, shuffle), with a shuffle where all |
17353 | // the lanes are used. Due to the reduction being commutative the shuffle can be |
17354 | // removed. |
17355 | static SDValue PerformReduceShuffleCombine(SDNode *N, SelectionDAG &DAG) { |
17356 | unsigned VecOp = N->getOperand(Num: 0).getValueType().isVector() ? 0 : 2; |
17357 | auto *Shuf = dyn_cast<ShuffleVectorSDNode>(Val: N->getOperand(Num: VecOp)); |
17358 | if (!Shuf || !Shuf->getOperand(Num: 1).isUndef()) |
17359 | return SDValue(); |
17360 | |
17361 | // Check all elements are used once in the mask. |
17362 | ArrayRef<int> Mask = Shuf->getMask(); |
17363 | APInt SetElts(Mask.size(), 0); |
17364 | for (int E : Mask) { |
17365 | if (E < 0 || E >= (int)Mask.size()) |
17366 | return SDValue(); |
17367 | SetElts.setBit(E); |
17368 | } |
17369 | if (!SetElts.isAllOnes()) |
17370 | return SDValue(); |
17371 | |
17372 | if (N->getNumOperands() != VecOp + 1) { |
17373 | auto *Shuf2 = dyn_cast<ShuffleVectorSDNode>(Val: N->getOperand(Num: VecOp + 1)); |
17374 | if (!Shuf2 || !Shuf2->getOperand(Num: 1).isUndef() || Shuf2->getMask() != Mask) |
17375 | return SDValue(); |
17376 | } |
17377 | |
17378 | SmallVector<SDValue> Ops; |
17379 | for (SDValue Op : N->ops()) { |
17380 | if (Op.getValueType().isVector()) |
17381 | Ops.push_back(Elt: Op.getOperand(i: 0)); |
17382 | else |
17383 | Ops.push_back(Elt: Op); |
17384 | } |
17385 | return DAG.getNode(Opcode: N->getOpcode(), DL: SDLoc(N), VTList: N->getVTList(), Ops); |
17386 | } |
17387 | |
17388 | static SDValue PerformVMOVNCombine(SDNode *N, |
17389 | TargetLowering::DAGCombinerInfo &DCI) { |
17390 | SDValue Op0 = N->getOperand(Num: 0); |
17391 | SDValue Op1 = N->getOperand(Num: 1); |
17392 | unsigned IsTop = N->getConstantOperandVal(Num: 2); |
17393 | |
17394 | // VMOVNT a undef -> a |
17395 | // VMOVNB a undef -> a |
17396 | // VMOVNB undef a -> a |
17397 | if (Op1->isUndef()) |
17398 | return Op0; |
17399 | if (Op0->isUndef() && !IsTop) |
17400 | return Op1; |
17401 | |
17402 | // VMOVNt(c, VQMOVNb(a, b)) => VQMOVNt(c, b) |
17403 | // VMOVNb(c, VQMOVNb(a, b)) => VQMOVNb(c, b) |
17404 | if ((Op1->getOpcode() == ARMISD::VQMOVNs || |
17405 | Op1->getOpcode() == ARMISD::VQMOVNu) && |
17406 | Op1->getConstantOperandVal(Num: 2) == 0) |
17407 | return DCI.DAG.getNode(Opcode: Op1->getOpcode(), DL: SDLoc(Op1), VT: N->getValueType(ResNo: 0), |
17408 | N1: Op0, N2: Op1->getOperand(Num: 1), N3: N->getOperand(Num: 2)); |
17409 | |
17410 | // Only the bottom lanes from Qm (Op1) and either the top or bottom lanes from |
17411 | // Qd (Op0) are demanded from a VMOVN, depending on whether we are inserting |
17412 | // into the top or bottom lanes. |
17413 | unsigned NumElts = N->getValueType(ResNo: 0).getVectorNumElements(); |
17414 | APInt Op1DemandedElts = APInt::getSplat(NewLen: NumElts, V: APInt::getLowBitsSet(numBits: 2, loBitsSet: 1)); |
17415 | APInt Op0DemandedElts = |
17416 | IsTop ? Op1DemandedElts |
17417 | : APInt::getSplat(NewLen: NumElts, V: APInt::getHighBitsSet(numBits: 2, hiBitsSet: 1)); |
17418 | |
17419 | const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo(); |
17420 | if (TLI.SimplifyDemandedVectorElts(Op: Op0, DemandedElts: Op0DemandedElts, DCI)) |
17421 | return SDValue(N, 0); |
17422 | if (TLI.SimplifyDemandedVectorElts(Op: Op1, DemandedElts: Op1DemandedElts, DCI)) |
17423 | return SDValue(N, 0); |
17424 | |
17425 | return SDValue(); |
17426 | } |
17427 | |
17428 | static SDValue PerformVQMOVNCombine(SDNode *N, |
17429 | TargetLowering::DAGCombinerInfo &DCI) { |
17430 | SDValue Op0 = N->getOperand(Num: 0); |
17431 | unsigned IsTop = N->getConstantOperandVal(Num: 2); |
17432 | |
17433 | unsigned NumElts = N->getValueType(ResNo: 0).getVectorNumElements(); |
17434 | APInt Op0DemandedElts = |
17435 | APInt::getSplat(NewLen: NumElts, V: IsTop ? APInt::getLowBitsSet(numBits: 2, loBitsSet: 1) |
17436 | : APInt::getHighBitsSet(numBits: 2, hiBitsSet: 1)); |
17437 | |
17438 | const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo(); |
17439 | if (TLI.SimplifyDemandedVectorElts(Op: Op0, DemandedElts: Op0DemandedElts, DCI)) |
17440 | return SDValue(N, 0); |
17441 | return SDValue(); |
17442 | } |
17443 | |
17444 | static SDValue PerformVQDMULHCombine(SDNode *N, |
17445 | TargetLowering::DAGCombinerInfo &DCI) { |
17446 | EVT VT = N->getValueType(ResNo: 0); |
17447 | SDValue LHS = N->getOperand(Num: 0); |
17448 | SDValue RHS = N->getOperand(Num: 1); |
17449 | |
17450 | auto *Shuf0 = dyn_cast<ShuffleVectorSDNode>(Val&: LHS); |
17451 | auto *Shuf1 = dyn_cast<ShuffleVectorSDNode>(Val&: RHS); |
17452 | // Turn VQDMULH(shuffle, shuffle) -> shuffle(VQDMULH) |
17453 | if (Shuf0 && Shuf1 && Shuf0->getMask().equals(RHS: Shuf1->getMask()) && |
17454 | LHS.getOperand(i: 1).isUndef() && RHS.getOperand(i: 1).isUndef() && |
17455 | (LHS.hasOneUse() || RHS.hasOneUse() || LHS == RHS)) { |
17456 | SDLoc DL(N); |
17457 | SDValue NewBinOp = DCI.DAG.getNode(Opcode: N->getOpcode(), DL, VT, |
17458 | N1: LHS.getOperand(i: 0), N2: RHS.getOperand(i: 0)); |
17459 | SDValue UndefV = LHS.getOperand(i: 1); |
17460 | return DCI.DAG.getVectorShuffle(VT, dl: DL, N1: NewBinOp, N2: UndefV, Mask: Shuf0->getMask()); |
17461 | } |
17462 | return SDValue(); |
17463 | } |
17464 | |
17465 | static SDValue PerformLongShiftCombine(SDNode *N, SelectionDAG &DAG) { |
17466 | SDLoc DL(N); |
17467 | SDValue Op0 = N->getOperand(Num: 0); |
17468 | SDValue Op1 = N->getOperand(Num: 1); |
17469 | |
17470 | // Turn X << -C -> X >> C and viceversa. The negative shifts can come up from |
17471 | // uses of the intrinsics. |
17472 | if (auto C = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 2))) { |
17473 | int ShiftAmt = C->getSExtValue(); |
17474 | if (ShiftAmt == 0) { |
17475 | SDValue Merge = DAG.getMergeValues(Ops: {Op0, Op1}, dl: DL); |
17476 | DAG.ReplaceAllUsesWith(From: N, To: Merge.getNode()); |
17477 | return SDValue(); |
17478 | } |
17479 | |
17480 | if (ShiftAmt >= -32 && ShiftAmt < 0) { |
17481 | unsigned NewOpcode = |
17482 | N->getOpcode() == ARMISD::LSLL ? ARMISD::LSRL : ARMISD::LSLL; |
17483 | SDValue NewShift = DAG.getNode(Opcode: NewOpcode, DL, VTList: N->getVTList(), N1: Op0, N2: Op1, |
17484 | N3: DAG.getConstant(Val: -ShiftAmt, DL, VT: MVT::i32)); |
17485 | DAG.ReplaceAllUsesWith(From: N, To: NewShift.getNode()); |
17486 | return NewShift; |
17487 | } |
17488 | } |
17489 | |
17490 | return SDValue(); |
17491 | } |
17492 | |
17493 | /// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics. |
17494 | SDValue ARMTargetLowering::PerformIntrinsicCombine(SDNode *N, |
17495 | DAGCombinerInfo &DCI) const { |
17496 | SelectionDAG &DAG = DCI.DAG; |
17497 | unsigned IntNo = N->getConstantOperandVal(Num: 0); |
17498 | switch (IntNo) { |
17499 | default: |
17500 | // Don't do anything for most intrinsics. |
17501 | break; |
17502 | |
17503 | // Vector shifts: check for immediate versions and lower them. |
17504 | // Note: This is done during DAG combining instead of DAG legalizing because |
17505 | // the build_vectors for 64-bit vector element shift counts are generally |
17506 | // not legal, and it is hard to see their values after they get legalized to |
17507 | // loads from a constant pool. |
17508 | case Intrinsic::arm_neon_vshifts: |
17509 | case Intrinsic::arm_neon_vshiftu: |
17510 | case Intrinsic::arm_neon_vrshifts: |
17511 | case Intrinsic::arm_neon_vrshiftu: |
17512 | case Intrinsic::arm_neon_vrshiftn: |
17513 | case Intrinsic::arm_neon_vqshifts: |
17514 | case Intrinsic::arm_neon_vqshiftu: |
17515 | case Intrinsic::arm_neon_vqshiftsu: |
17516 | case Intrinsic::arm_neon_vqshiftns: |
17517 | case Intrinsic::arm_neon_vqshiftnu: |
17518 | case Intrinsic::arm_neon_vqshiftnsu: |
17519 | case Intrinsic::arm_neon_vqrshiftns: |
17520 | case Intrinsic::arm_neon_vqrshiftnu: |
17521 | case Intrinsic::arm_neon_vqrshiftnsu: { |
17522 | EVT VT = N->getOperand(Num: 1).getValueType(); |
17523 | int64_t Cnt; |
17524 | unsigned VShiftOpc = 0; |
17525 | |
17526 | switch (IntNo) { |
17527 | case Intrinsic::arm_neon_vshifts: |
17528 | case Intrinsic::arm_neon_vshiftu: |
17529 | if (isVShiftLImm(Op: N->getOperand(Num: 2), VT, isLong: false, Cnt)) { |
17530 | VShiftOpc = ARMISD::VSHLIMM; |
17531 | break; |
17532 | } |
17533 | if (isVShiftRImm(Op: N->getOperand(Num: 2), VT, isNarrow: false, isIntrinsic: true, Cnt)) { |
17534 | VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? ARMISD::VSHRsIMM |
17535 | : ARMISD::VSHRuIMM); |
17536 | break; |
17537 | } |
17538 | return SDValue(); |
17539 | |
17540 | case Intrinsic::arm_neon_vrshifts: |
17541 | case Intrinsic::arm_neon_vrshiftu: |
17542 | if (isVShiftRImm(Op: N->getOperand(Num: 2), VT, isNarrow: false, isIntrinsic: true, Cnt)) |
17543 | break; |
17544 | return SDValue(); |
17545 | |
17546 | case Intrinsic::arm_neon_vqshifts: |
17547 | case Intrinsic::arm_neon_vqshiftu: |
17548 | if (isVShiftLImm(Op: N->getOperand(Num: 2), VT, isLong: false, Cnt)) |
17549 | break; |
17550 | return SDValue(); |
17551 | |
17552 | case Intrinsic::arm_neon_vqshiftsu: |
17553 | if (isVShiftLImm(Op: N->getOperand(Num: 2), VT, isLong: false, Cnt)) |
17554 | break; |
17555 | llvm_unreachable("invalid shift count for vqshlu intrinsic" ); |
17556 | |
17557 | case Intrinsic::arm_neon_vrshiftn: |
17558 | case Intrinsic::arm_neon_vqshiftns: |
17559 | case Intrinsic::arm_neon_vqshiftnu: |
17560 | case Intrinsic::arm_neon_vqshiftnsu: |
17561 | case Intrinsic::arm_neon_vqrshiftns: |
17562 | case Intrinsic::arm_neon_vqrshiftnu: |
17563 | case Intrinsic::arm_neon_vqrshiftnsu: |
17564 | // Narrowing shifts require an immediate right shift. |
17565 | if (isVShiftRImm(Op: N->getOperand(Num: 2), VT, isNarrow: true, isIntrinsic: true, Cnt)) |
17566 | break; |
17567 | llvm_unreachable("invalid shift count for narrowing vector shift " |
17568 | "intrinsic" ); |
17569 | |
17570 | default: |
17571 | llvm_unreachable("unhandled vector shift" ); |
17572 | } |
17573 | |
17574 | switch (IntNo) { |
17575 | case Intrinsic::arm_neon_vshifts: |
17576 | case Intrinsic::arm_neon_vshiftu: |
17577 | // Opcode already set above. |
17578 | break; |
17579 | case Intrinsic::arm_neon_vrshifts: |
17580 | VShiftOpc = ARMISD::VRSHRsIMM; |
17581 | break; |
17582 | case Intrinsic::arm_neon_vrshiftu: |
17583 | VShiftOpc = ARMISD::VRSHRuIMM; |
17584 | break; |
17585 | case Intrinsic::arm_neon_vrshiftn: |
17586 | VShiftOpc = ARMISD::VRSHRNIMM; |
17587 | break; |
17588 | case Intrinsic::arm_neon_vqshifts: |
17589 | VShiftOpc = ARMISD::VQSHLsIMM; |
17590 | break; |
17591 | case Intrinsic::arm_neon_vqshiftu: |
17592 | VShiftOpc = ARMISD::VQSHLuIMM; |
17593 | break; |
17594 | case Intrinsic::arm_neon_vqshiftsu: |
17595 | VShiftOpc = ARMISD::VQSHLsuIMM; |
17596 | break; |
17597 | case Intrinsic::arm_neon_vqshiftns: |
17598 | VShiftOpc = ARMISD::VQSHRNsIMM; |
17599 | break; |
17600 | case Intrinsic::arm_neon_vqshiftnu: |
17601 | VShiftOpc = ARMISD::VQSHRNuIMM; |
17602 | break; |
17603 | case Intrinsic::arm_neon_vqshiftnsu: |
17604 | VShiftOpc = ARMISD::VQSHRNsuIMM; |
17605 | break; |
17606 | case Intrinsic::arm_neon_vqrshiftns: |
17607 | VShiftOpc = ARMISD::VQRSHRNsIMM; |
17608 | break; |
17609 | case Intrinsic::arm_neon_vqrshiftnu: |
17610 | VShiftOpc = ARMISD::VQRSHRNuIMM; |
17611 | break; |
17612 | case Intrinsic::arm_neon_vqrshiftnsu: |
17613 | VShiftOpc = ARMISD::VQRSHRNsuIMM; |
17614 | break; |
17615 | } |
17616 | |
17617 | SDLoc dl(N); |
17618 | return DAG.getNode(Opcode: VShiftOpc, DL: dl, VT: N->getValueType(ResNo: 0), |
17619 | N1: N->getOperand(Num: 1), N2: DAG.getConstant(Val: Cnt, DL: dl, VT: MVT::i32)); |
17620 | } |
17621 | |
17622 | case Intrinsic::arm_neon_vshiftins: { |
17623 | EVT VT = N->getOperand(Num: 1).getValueType(); |
17624 | int64_t Cnt; |
17625 | unsigned VShiftOpc = 0; |
17626 | |
17627 | if (isVShiftLImm(Op: N->getOperand(Num: 3), VT, isLong: false, Cnt)) |
17628 | VShiftOpc = ARMISD::VSLIIMM; |
17629 | else if (isVShiftRImm(Op: N->getOperand(Num: 3), VT, isNarrow: false, isIntrinsic: true, Cnt)) |
17630 | VShiftOpc = ARMISD::VSRIIMM; |
17631 | else { |
17632 | llvm_unreachable("invalid shift count for vsli/vsri intrinsic" ); |
17633 | } |
17634 | |
17635 | SDLoc dl(N); |
17636 | return DAG.getNode(Opcode: VShiftOpc, DL: dl, VT: N->getValueType(ResNo: 0), |
17637 | N1: N->getOperand(Num: 1), N2: N->getOperand(Num: 2), |
17638 | N3: DAG.getConstant(Val: Cnt, DL: dl, VT: MVT::i32)); |
17639 | } |
17640 | |
17641 | case Intrinsic::arm_neon_vqrshifts: |
17642 | case Intrinsic::arm_neon_vqrshiftu: |
17643 | // No immediate versions of these to check for. |
17644 | break; |
17645 | |
17646 | case Intrinsic::arm_mve_vqdmlah: |
17647 | case Intrinsic::arm_mve_vqdmlash: |
17648 | case Intrinsic::arm_mve_vqrdmlah: |
17649 | case Intrinsic::arm_mve_vqrdmlash: |
17650 | case Intrinsic::arm_mve_vmla_n_predicated: |
17651 | case Intrinsic::arm_mve_vmlas_n_predicated: |
17652 | case Intrinsic::arm_mve_vqdmlah_predicated: |
17653 | case Intrinsic::arm_mve_vqdmlash_predicated: |
17654 | case Intrinsic::arm_mve_vqrdmlah_predicated: |
17655 | case Intrinsic::arm_mve_vqrdmlash_predicated: { |
17656 | // These intrinsics all take an i32 scalar operand which is narrowed to the |
17657 | // size of a single lane of the vector type they return. So we don't need |
17658 | // any bits of that operand above that point, which allows us to eliminate |
17659 | // uxth/sxth. |
17660 | unsigned BitWidth = N->getValueType(ResNo: 0).getScalarSizeInBits(); |
17661 | APInt DemandedMask = APInt::getLowBitsSet(numBits: 32, loBitsSet: BitWidth); |
17662 | if (SimplifyDemandedBits(Op: N->getOperand(Num: 3), DemandedBits: DemandedMask, DCI)) |
17663 | return SDValue(); |
17664 | break; |
17665 | } |
17666 | |
17667 | case Intrinsic::arm_mve_minv: |
17668 | case Intrinsic::arm_mve_maxv: |
17669 | case Intrinsic::arm_mve_minav: |
17670 | case Intrinsic::arm_mve_maxav: |
17671 | case Intrinsic::arm_mve_minv_predicated: |
17672 | case Intrinsic::arm_mve_maxv_predicated: |
17673 | case Intrinsic::arm_mve_minav_predicated: |
17674 | case Intrinsic::arm_mve_maxav_predicated: { |
17675 | // These intrinsics all take an i32 scalar operand which is narrowed to the |
17676 | // size of a single lane of the vector type they take as the other input. |
17677 | unsigned BitWidth = N->getOperand(Num: 2)->getValueType(ResNo: 0).getScalarSizeInBits(); |
17678 | APInt DemandedMask = APInt::getLowBitsSet(numBits: 32, loBitsSet: BitWidth); |
17679 | if (SimplifyDemandedBits(Op: N->getOperand(Num: 1), DemandedBits: DemandedMask, DCI)) |
17680 | return SDValue(); |
17681 | break; |
17682 | } |
17683 | |
17684 | case Intrinsic::arm_mve_addv: { |
17685 | // Turn this intrinsic straight into the appropriate ARMISD::VADDV node, |
17686 | // which allow PerformADDVecReduce to turn it into VADDLV when possible. |
17687 | bool Unsigned = N->getConstantOperandVal(Num: 2); |
17688 | unsigned Opc = Unsigned ? ARMISD::VADDVu : ARMISD::VADDVs; |
17689 | return DAG.getNode(Opcode: Opc, DL: SDLoc(N), VTList: N->getVTList(), N: N->getOperand(Num: 1)); |
17690 | } |
17691 | |
17692 | case Intrinsic::arm_mve_addlv: |
17693 | case Intrinsic::arm_mve_addlv_predicated: { |
17694 | // Same for these, but ARMISD::VADDLV has to be followed by a BUILD_PAIR |
17695 | // which recombines the two outputs into an i64 |
17696 | bool Unsigned = N->getConstantOperandVal(Num: 2); |
17697 | unsigned Opc = IntNo == Intrinsic::arm_mve_addlv ? |
17698 | (Unsigned ? ARMISD::VADDLVu : ARMISD::VADDLVs) : |
17699 | (Unsigned ? ARMISD::VADDLVpu : ARMISD::VADDLVps); |
17700 | |
17701 | SmallVector<SDValue, 4> Ops; |
17702 | for (unsigned i = 1, e = N->getNumOperands(); i < e; i++) |
17703 | if (i != 2) // skip the unsigned flag |
17704 | Ops.push_back(Elt: N->getOperand(Num: i)); |
17705 | |
17706 | SDLoc dl(N); |
17707 | SDValue val = DAG.getNode(Opcode: Opc, DL: dl, ResultTys: {MVT::i32, MVT::i32}, Ops); |
17708 | return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT: MVT::i64, N1: val.getValue(R: 0), |
17709 | N2: val.getValue(R: 1)); |
17710 | } |
17711 | } |
17712 | |
17713 | return SDValue(); |
17714 | } |
17715 | |
17716 | /// PerformShiftCombine - Checks for immediate versions of vector shifts and |
17717 | /// lowers them. As with the vector shift intrinsics, this is done during DAG |
17718 | /// combining instead of DAG legalizing because the build_vectors for 64-bit |
17719 | /// vector element shift counts are generally not legal, and it is hard to see |
17720 | /// their values after they get legalized to loads from a constant pool. |
17721 | static SDValue PerformShiftCombine(SDNode *N, |
17722 | TargetLowering::DAGCombinerInfo &DCI, |
17723 | const ARMSubtarget *ST) { |
17724 | SelectionDAG &DAG = DCI.DAG; |
17725 | EVT VT = N->getValueType(ResNo: 0); |
17726 | |
17727 | if (ST->isThumb1Only() && N->getOpcode() == ISD::SHL && VT == MVT::i32 && |
17728 | N->getOperand(Num: 0)->getOpcode() == ISD::AND && |
17729 | N->getOperand(Num: 0)->hasOneUse()) { |
17730 | if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) |
17731 | return SDValue(); |
17732 | // Look for the pattern (shl (and x, AndMask), ShiftAmt). This doesn't |
17733 | // usually show up because instcombine prefers to canonicalize it to |
17734 | // (and (shl x, ShiftAmt) (shl AndMask, ShiftAmt)), but the shift can come |
17735 | // out of GEP lowering in some cases. |
17736 | SDValue N0 = N->getOperand(Num: 0); |
17737 | ConstantSDNode *ShiftAmtNode = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 1)); |
17738 | if (!ShiftAmtNode) |
17739 | return SDValue(); |
17740 | uint32_t ShiftAmt = static_cast<uint32_t>(ShiftAmtNode->getZExtValue()); |
17741 | ConstantSDNode *AndMaskNode = dyn_cast<ConstantSDNode>(Val: N0->getOperand(Num: 1)); |
17742 | if (!AndMaskNode) |
17743 | return SDValue(); |
17744 | uint32_t AndMask = static_cast<uint32_t>(AndMaskNode->getZExtValue()); |
17745 | // Don't transform uxtb/uxth. |
17746 | if (AndMask == 255 || AndMask == 65535) |
17747 | return SDValue(); |
17748 | if (isMask_32(Value: AndMask)) { |
17749 | uint32_t MaskedBits = llvm::countl_zero(Val: AndMask); |
17750 | if (MaskedBits > ShiftAmt) { |
17751 | SDLoc DL(N); |
17752 | SDValue SHL = DAG.getNode(Opcode: ISD::SHL, DL, VT: MVT::i32, N1: N0->getOperand(Num: 0), |
17753 | N2: DAG.getConstant(Val: MaskedBits, DL, VT: MVT::i32)); |
17754 | return DAG.getNode( |
17755 | Opcode: ISD::SRL, DL, VT: MVT::i32, N1: SHL, |
17756 | N2: DAG.getConstant(Val: MaskedBits - ShiftAmt, DL, VT: MVT::i32)); |
17757 | } |
17758 | } |
17759 | } |
17760 | |
17761 | // Nothing to be done for scalar shifts. |
17762 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
17763 | if (!VT.isVector() || !TLI.isTypeLegal(VT)) |
17764 | return SDValue(); |
17765 | if (ST->hasMVEIntegerOps()) |
17766 | return SDValue(); |
17767 | |
17768 | int64_t Cnt; |
17769 | |
17770 | switch (N->getOpcode()) { |
17771 | default: llvm_unreachable("unexpected shift opcode" ); |
17772 | |
17773 | case ISD::SHL: |
17774 | if (isVShiftLImm(Op: N->getOperand(Num: 1), VT, isLong: false, Cnt)) { |
17775 | SDLoc dl(N); |
17776 | return DAG.getNode(Opcode: ARMISD::VSHLIMM, DL: dl, VT, N1: N->getOperand(Num: 0), |
17777 | N2: DAG.getConstant(Val: Cnt, DL: dl, VT: MVT::i32)); |
17778 | } |
17779 | break; |
17780 | |
17781 | case ISD::SRA: |
17782 | case ISD::SRL: |
17783 | if (isVShiftRImm(Op: N->getOperand(Num: 1), VT, isNarrow: false, isIntrinsic: false, Cnt)) { |
17784 | unsigned VShiftOpc = |
17785 | (N->getOpcode() == ISD::SRA ? ARMISD::VSHRsIMM : ARMISD::VSHRuIMM); |
17786 | SDLoc dl(N); |
17787 | return DAG.getNode(Opcode: VShiftOpc, DL: dl, VT, N1: N->getOperand(Num: 0), |
17788 | N2: DAG.getConstant(Val: Cnt, DL: dl, VT: MVT::i32)); |
17789 | } |
17790 | } |
17791 | return SDValue(); |
17792 | } |
17793 | |
17794 | // Look for a sign/zero/fpextend extend of a larger than legal load. This can be |
17795 | // split into multiple extending loads, which are simpler to deal with than an |
17796 | // arbitrary extend. For fp extends we use an integer extending load and a VCVTL |
17797 | // to convert the type to an f32. |
17798 | static SDValue PerformSplittingToWideningLoad(SDNode *N, SelectionDAG &DAG) { |
17799 | SDValue N0 = N->getOperand(Num: 0); |
17800 | if (N0.getOpcode() != ISD::LOAD) |
17801 | return SDValue(); |
17802 | LoadSDNode *LD = cast<LoadSDNode>(Val: N0.getNode()); |
17803 | if (!LD->isSimple() || !N0.hasOneUse() || LD->isIndexed() || |
17804 | LD->getExtensionType() != ISD::NON_EXTLOAD) |
17805 | return SDValue(); |
17806 | EVT FromVT = LD->getValueType(ResNo: 0); |
17807 | EVT ToVT = N->getValueType(ResNo: 0); |
17808 | if (!ToVT.isVector()) |
17809 | return SDValue(); |
17810 | assert(FromVT.getVectorNumElements() == ToVT.getVectorNumElements()); |
17811 | EVT ToEltVT = ToVT.getVectorElementType(); |
17812 | EVT FromEltVT = FromVT.getVectorElementType(); |
17813 | |
17814 | unsigned NumElements = 0; |
17815 | if (ToEltVT == MVT::i32 && FromEltVT == MVT::i8) |
17816 | NumElements = 4; |
17817 | if (ToEltVT == MVT::f32 && FromEltVT == MVT::f16) |
17818 | NumElements = 4; |
17819 | if (NumElements == 0 || |
17820 | (FromEltVT != MVT::f16 && FromVT.getVectorNumElements() == NumElements) || |
17821 | FromVT.getVectorNumElements() % NumElements != 0 || |
17822 | !isPowerOf2_32(Value: NumElements)) |
17823 | return SDValue(); |
17824 | |
17825 | LLVMContext &C = *DAG.getContext(); |
17826 | SDLoc DL(LD); |
17827 | // Details about the old load |
17828 | SDValue Ch = LD->getChain(); |
17829 | SDValue BasePtr = LD->getBasePtr(); |
17830 | Align Alignment = LD->getOriginalAlign(); |
17831 | MachineMemOperand::Flags MMOFlags = LD->getMemOperand()->getFlags(); |
17832 | AAMDNodes AAInfo = LD->getAAInfo(); |
17833 | |
17834 | ISD::LoadExtType NewExtType = |
17835 | N->getOpcode() == ISD::SIGN_EXTEND ? ISD::SEXTLOAD : ISD::ZEXTLOAD; |
17836 | SDValue Offset = DAG.getUNDEF(VT: BasePtr.getValueType()); |
17837 | EVT NewFromVT = EVT::getVectorVT( |
17838 | Context&: C, VT: EVT::getIntegerVT(Context&: C, BitWidth: FromEltVT.getScalarSizeInBits()), NumElements); |
17839 | EVT NewToVT = EVT::getVectorVT( |
17840 | Context&: C, VT: EVT::getIntegerVT(Context&: C, BitWidth: ToEltVT.getScalarSizeInBits()), NumElements); |
17841 | |
17842 | SmallVector<SDValue, 4> Loads; |
17843 | SmallVector<SDValue, 4> Chains; |
17844 | for (unsigned i = 0; i < FromVT.getVectorNumElements() / NumElements; i++) { |
17845 | unsigned NewOffset = (i * NewFromVT.getSizeInBits()) / 8; |
17846 | SDValue NewPtr = |
17847 | DAG.getObjectPtrOffset(SL: DL, Ptr: BasePtr, Offset: TypeSize::getFixed(ExactSize: NewOffset)); |
17848 | |
17849 | SDValue NewLoad = |
17850 | DAG.getLoad(AM: ISD::UNINDEXED, ExtType: NewExtType, VT: NewToVT, dl: DL, Chain: Ch, Ptr: NewPtr, Offset, |
17851 | PtrInfo: LD->getPointerInfo().getWithOffset(O: NewOffset), MemVT: NewFromVT, |
17852 | Alignment, MMOFlags, AAInfo); |
17853 | Loads.push_back(Elt: NewLoad); |
17854 | Chains.push_back(Elt: SDValue(NewLoad.getNode(), 1)); |
17855 | } |
17856 | |
17857 | // Float truncs need to extended with VCVTB's into their floating point types. |
17858 | if (FromEltVT == MVT::f16) { |
17859 | SmallVector<SDValue, 4> Extends; |
17860 | |
17861 | for (unsigned i = 0; i < Loads.size(); i++) { |
17862 | SDValue LoadBC = |
17863 | DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT: MVT::v8f16, Operand: Loads[i]); |
17864 | SDValue FPExt = DAG.getNode(Opcode: ARMISD::VCVTL, DL, VT: MVT::v4f32, N1: LoadBC, |
17865 | N2: DAG.getConstant(Val: 0, DL, VT: MVT::i32)); |
17866 | Extends.push_back(Elt: FPExt); |
17867 | } |
17868 | |
17869 | Loads = Extends; |
17870 | } |
17871 | |
17872 | SDValue NewChain = DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other, Ops: Chains); |
17873 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(LD, 1), To: NewChain); |
17874 | return DAG.getNode(Opcode: ISD::CONCAT_VECTORS, DL, VT: ToVT, Ops: Loads); |
17875 | } |
17876 | |
17877 | /// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, |
17878 | /// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND. |
17879 | static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, |
17880 | const ARMSubtarget *ST) { |
17881 | SDValue N0 = N->getOperand(Num: 0); |
17882 | |
17883 | // Check for sign- and zero-extensions of vector extract operations of 8- and |
17884 | // 16-bit vector elements. NEON and MVE support these directly. They are |
17885 | // handled during DAG combining because type legalization will promote them |
17886 | // to 32-bit types and it is messy to recognize the operations after that. |
17887 | if ((ST->hasNEON() || ST->hasMVEIntegerOps()) && |
17888 | N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { |
17889 | SDValue Vec = N0.getOperand(i: 0); |
17890 | SDValue Lane = N0.getOperand(i: 1); |
17891 | EVT VT = N->getValueType(ResNo: 0); |
17892 | EVT EltVT = N0.getValueType(); |
17893 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
17894 | |
17895 | if (VT == MVT::i32 && |
17896 | (EltVT == MVT::i8 || EltVT == MVT::i16) && |
17897 | TLI.isTypeLegal(VT: Vec.getValueType()) && |
17898 | isa<ConstantSDNode>(Val: Lane)) { |
17899 | |
17900 | unsigned Opc = 0; |
17901 | switch (N->getOpcode()) { |
17902 | default: llvm_unreachable("unexpected opcode" ); |
17903 | case ISD::SIGN_EXTEND: |
17904 | Opc = ARMISD::VGETLANEs; |
17905 | break; |
17906 | case ISD::ZERO_EXTEND: |
17907 | case ISD::ANY_EXTEND: |
17908 | Opc = ARMISD::VGETLANEu; |
17909 | break; |
17910 | } |
17911 | return DAG.getNode(Opcode: Opc, DL: SDLoc(N), VT, N1: Vec, N2: Lane); |
17912 | } |
17913 | } |
17914 | |
17915 | if (ST->hasMVEIntegerOps()) |
17916 | if (SDValue NewLoad = PerformSplittingToWideningLoad(N, DAG)) |
17917 | return NewLoad; |
17918 | |
17919 | return SDValue(); |
17920 | } |
17921 | |
17922 | static SDValue PerformFPExtendCombine(SDNode *N, SelectionDAG &DAG, |
17923 | const ARMSubtarget *ST) { |
17924 | if (ST->hasMVEFloatOps()) |
17925 | if (SDValue NewLoad = PerformSplittingToWideningLoad(N, DAG)) |
17926 | return NewLoad; |
17927 | |
17928 | return SDValue(); |
17929 | } |
17930 | |
17931 | // Lower smin(smax(x, C1), C2) to ssat or usat, if they have saturating |
17932 | // constant bounds. |
17933 | static SDValue PerformMinMaxToSatCombine(SDValue Op, SelectionDAG &DAG, |
17934 | const ARMSubtarget *Subtarget) { |
17935 | if ((Subtarget->isThumb() || !Subtarget->hasV6Ops()) && |
17936 | !Subtarget->isThumb2()) |
17937 | return SDValue(); |
17938 | |
17939 | EVT VT = Op.getValueType(); |
17940 | SDValue Op0 = Op.getOperand(i: 0); |
17941 | |
17942 | if (VT != MVT::i32 || |
17943 | (Op0.getOpcode() != ISD::SMIN && Op0.getOpcode() != ISD::SMAX) || |
17944 | !isa<ConstantSDNode>(Val: Op.getOperand(i: 1)) || |
17945 | !isa<ConstantSDNode>(Val: Op0.getOperand(i: 1))) |
17946 | return SDValue(); |
17947 | |
17948 | SDValue Min = Op; |
17949 | SDValue Max = Op0; |
17950 | SDValue Input = Op0.getOperand(i: 0); |
17951 | if (Min.getOpcode() == ISD::SMAX) |
17952 | std::swap(a&: Min, b&: Max); |
17953 | |
17954 | APInt MinC = Min.getConstantOperandAPInt(i: 1); |
17955 | APInt MaxC = Max.getConstantOperandAPInt(i: 1); |
17956 | |
17957 | if (Min.getOpcode() != ISD::SMIN || Max.getOpcode() != ISD::SMAX || |
17958 | !(MinC + 1).isPowerOf2()) |
17959 | return SDValue(); |
17960 | |
17961 | SDLoc DL(Op); |
17962 | if (MinC == ~MaxC) |
17963 | return DAG.getNode(Opcode: ARMISD::SSAT, DL, VT, N1: Input, |
17964 | N2: DAG.getConstant(Val: MinC.countr_one(), DL, VT)); |
17965 | if (MaxC == 0) |
17966 | return DAG.getNode(Opcode: ARMISD::USAT, DL, VT, N1: Input, |
17967 | N2: DAG.getConstant(Val: MinC.countr_one(), DL, VT)); |
17968 | |
17969 | return SDValue(); |
17970 | } |
17971 | |
17972 | /// PerformMinMaxCombine - Target-specific DAG combining for creating truncating |
17973 | /// saturates. |
17974 | static SDValue PerformMinMaxCombine(SDNode *N, SelectionDAG &DAG, |
17975 | const ARMSubtarget *ST) { |
17976 | EVT VT = N->getValueType(ResNo: 0); |
17977 | SDValue N0 = N->getOperand(Num: 0); |
17978 | |
17979 | if (VT == MVT::i32) |
17980 | return PerformMinMaxToSatCombine(Op: SDValue(N, 0), DAG, Subtarget: ST); |
17981 | |
17982 | if (!ST->hasMVEIntegerOps()) |
17983 | return SDValue(); |
17984 | |
17985 | if (SDValue V = PerformVQDMULHCombine(N, DAG)) |
17986 | return V; |
17987 | |
17988 | if (VT != MVT::v4i32 && VT != MVT::v8i16) |
17989 | return SDValue(); |
17990 | |
17991 | auto IsSignedSaturate = [&](SDNode *Min, SDNode *Max) { |
17992 | // Check one is a smin and the other is a smax |
17993 | if (Min->getOpcode() != ISD::SMIN) |
17994 | std::swap(a&: Min, b&: Max); |
17995 | if (Min->getOpcode() != ISD::SMIN || Max->getOpcode() != ISD::SMAX) |
17996 | return false; |
17997 | |
17998 | APInt SaturateC; |
17999 | if (VT == MVT::v4i32) |
18000 | SaturateC = APInt(32, (1 << 15) - 1, true); |
18001 | else //if (VT == MVT::v8i16) |
18002 | SaturateC = APInt(16, (1 << 7) - 1, true); |
18003 | |
18004 | APInt MinC, MaxC; |
18005 | if (!ISD::isConstantSplatVector(N: Min->getOperand(Num: 1).getNode(), SplatValue&: MinC) || |
18006 | MinC != SaturateC) |
18007 | return false; |
18008 | if (!ISD::isConstantSplatVector(N: Max->getOperand(Num: 1).getNode(), SplatValue&: MaxC) || |
18009 | MaxC != ~SaturateC) |
18010 | return false; |
18011 | return true; |
18012 | }; |
18013 | |
18014 | if (IsSignedSaturate(N, N0.getNode())) { |
18015 | SDLoc DL(N); |
18016 | MVT ExtVT, HalfVT; |
18017 | if (VT == MVT::v4i32) { |
18018 | HalfVT = MVT::v8i16; |
18019 | ExtVT = MVT::v4i16; |
18020 | } else { // if (VT == MVT::v8i16) |
18021 | HalfVT = MVT::v16i8; |
18022 | ExtVT = MVT::v8i8; |
18023 | } |
18024 | |
18025 | // Create a VQMOVNB with undef top lanes, then signed extended into the top |
18026 | // half. That extend will hopefully be removed if only the bottom bits are |
18027 | // demanded (though a truncating store, for example). |
18028 | SDValue VQMOVN = |
18029 | DAG.getNode(Opcode: ARMISD::VQMOVNs, DL, VT: HalfVT, N1: DAG.getUNDEF(VT: HalfVT), |
18030 | N2: N0->getOperand(Num: 0), N3: DAG.getConstant(Val: 0, DL, VT: MVT::i32)); |
18031 | SDValue Bitcast = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: VQMOVN); |
18032 | return DAG.getNode(Opcode: ISD::SIGN_EXTEND_INREG, DL, VT, N1: Bitcast, |
18033 | N2: DAG.getValueType(ExtVT)); |
18034 | } |
18035 | |
18036 | auto IsUnsignedSaturate = [&](SDNode *Min) { |
18037 | // For unsigned, we just need to check for <= 0xffff |
18038 | if (Min->getOpcode() != ISD::UMIN) |
18039 | return false; |
18040 | |
18041 | APInt SaturateC; |
18042 | if (VT == MVT::v4i32) |
18043 | SaturateC = APInt(32, (1 << 16) - 1, true); |
18044 | else //if (VT == MVT::v8i16) |
18045 | SaturateC = APInt(16, (1 << 8) - 1, true); |
18046 | |
18047 | APInt MinC; |
18048 | if (!ISD::isConstantSplatVector(N: Min->getOperand(Num: 1).getNode(), SplatValue&: MinC) || |
18049 | MinC != SaturateC) |
18050 | return false; |
18051 | return true; |
18052 | }; |
18053 | |
18054 | if (IsUnsignedSaturate(N)) { |
18055 | SDLoc DL(N); |
18056 | MVT HalfVT; |
18057 | unsigned ExtConst; |
18058 | if (VT == MVT::v4i32) { |
18059 | HalfVT = MVT::v8i16; |
18060 | ExtConst = 0x0000FFFF; |
18061 | } else { //if (VT == MVT::v8i16) |
18062 | HalfVT = MVT::v16i8; |
18063 | ExtConst = 0x00FF; |
18064 | } |
18065 | |
18066 | // Create a VQMOVNB with undef top lanes, then ZExt into the top half with |
18067 | // an AND. That extend will hopefully be removed if only the bottom bits are |
18068 | // demanded (though a truncating store, for example). |
18069 | SDValue VQMOVN = |
18070 | DAG.getNode(Opcode: ARMISD::VQMOVNu, DL, VT: HalfVT, N1: DAG.getUNDEF(VT: HalfVT), N2: N0, |
18071 | N3: DAG.getConstant(Val: 0, DL, VT: MVT::i32)); |
18072 | SDValue Bitcast = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: VQMOVN); |
18073 | return DAG.getNode(Opcode: ISD::AND, DL, VT, N1: Bitcast, |
18074 | N2: DAG.getConstant(Val: ExtConst, DL, VT)); |
18075 | } |
18076 | |
18077 | return SDValue(); |
18078 | } |
18079 | |
18080 | static const APInt *isPowerOf2Constant(SDValue V) { |
18081 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: V); |
18082 | if (!C) |
18083 | return nullptr; |
18084 | const APInt *CV = &C->getAPIntValue(); |
18085 | return CV->isPowerOf2() ? CV : nullptr; |
18086 | } |
18087 | |
18088 | SDValue ARMTargetLowering::PerformCMOVToBFICombine(SDNode *CMOV, SelectionDAG &DAG) const { |
18089 | // If we have a CMOV, OR and AND combination such as: |
18090 | // if (x & CN) |
18091 | // y |= CM; |
18092 | // |
18093 | // And: |
18094 | // * CN is a single bit; |
18095 | // * All bits covered by CM are known zero in y |
18096 | // |
18097 | // Then we can convert this into a sequence of BFI instructions. This will |
18098 | // always be a win if CM is a single bit, will always be no worse than the |
18099 | // TST&OR sequence if CM is two bits, and for thumb will be no worse if CM is |
18100 | // three bits (due to the extra IT instruction). |
18101 | |
18102 | SDValue Op0 = CMOV->getOperand(Num: 0); |
18103 | SDValue Op1 = CMOV->getOperand(Num: 1); |
18104 | auto CC = CMOV->getConstantOperandAPInt(Num: 2).getLimitedValue(); |
18105 | SDValue CmpZ = CMOV->getOperand(Num: 4); |
18106 | |
18107 | // The compare must be against zero. |
18108 | if (!isNullConstant(V: CmpZ->getOperand(Num: 1))) |
18109 | return SDValue(); |
18110 | |
18111 | assert(CmpZ->getOpcode() == ARMISD::CMPZ); |
18112 | SDValue And = CmpZ->getOperand(Num: 0); |
18113 | if (And->getOpcode() != ISD::AND) |
18114 | return SDValue(); |
18115 | const APInt *AndC = isPowerOf2Constant(V: And->getOperand(Num: 1)); |
18116 | if (!AndC) |
18117 | return SDValue(); |
18118 | SDValue X = And->getOperand(Num: 0); |
18119 | |
18120 | if (CC == ARMCC::EQ) { |
18121 | // We're performing an "equal to zero" compare. Swap the operands so we |
18122 | // canonicalize on a "not equal to zero" compare. |
18123 | std::swap(a&: Op0, b&: Op1); |
18124 | } else { |
18125 | assert(CC == ARMCC::NE && "How can a CMPZ node not be EQ or NE?" ); |
18126 | } |
18127 | |
18128 | if (Op1->getOpcode() != ISD::OR) |
18129 | return SDValue(); |
18130 | |
18131 | ConstantSDNode *OrC = dyn_cast<ConstantSDNode>(Val: Op1->getOperand(Num: 1)); |
18132 | if (!OrC) |
18133 | return SDValue(); |
18134 | SDValue Y = Op1->getOperand(Num: 0); |
18135 | |
18136 | if (Op0 != Y) |
18137 | return SDValue(); |
18138 | |
18139 | // Now, is it profitable to continue? |
18140 | APInt OrCI = OrC->getAPIntValue(); |
18141 | unsigned Heuristic = Subtarget->isThumb() ? 3 : 2; |
18142 | if (OrCI.popcount() > Heuristic) |
18143 | return SDValue(); |
18144 | |
18145 | // Lastly, can we determine that the bits defined by OrCI |
18146 | // are zero in Y? |
18147 | KnownBits Known = DAG.computeKnownBits(Op: Y); |
18148 | if ((OrCI & Known.Zero) != OrCI) |
18149 | return SDValue(); |
18150 | |
18151 | // OK, we can do the combine. |
18152 | SDValue V = Y; |
18153 | SDLoc dl(X); |
18154 | EVT VT = X.getValueType(); |
18155 | unsigned BitInX = AndC->logBase2(); |
18156 | |
18157 | if (BitInX != 0) { |
18158 | // We must shift X first. |
18159 | X = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT, N1: X, |
18160 | N2: DAG.getConstant(Val: BitInX, DL: dl, VT)); |
18161 | } |
18162 | |
18163 | for (unsigned BitInY = 0, NumActiveBits = OrCI.getActiveBits(); |
18164 | BitInY < NumActiveBits; ++BitInY) { |
18165 | if (OrCI[BitInY] == 0) |
18166 | continue; |
18167 | APInt Mask(VT.getSizeInBits(), 0); |
18168 | Mask.setBit(BitInY); |
18169 | V = DAG.getNode(Opcode: ARMISD::BFI, DL: dl, VT, N1: V, N2: X, |
18170 | // Confusingly, the operand is an *inverted* mask. |
18171 | N3: DAG.getConstant(Val: ~Mask, DL: dl, VT)); |
18172 | } |
18173 | |
18174 | return V; |
18175 | } |
18176 | |
18177 | // Given N, the value controlling the conditional branch, search for the loop |
18178 | // intrinsic, returning it, along with how the value is used. We need to handle |
18179 | // patterns such as the following: |
18180 | // (brcond (xor (setcc (loop.decrement), 0, ne), 1), exit) |
18181 | // (brcond (setcc (loop.decrement), 0, eq), exit) |
18182 | // (brcond (setcc (loop.decrement), 0, ne), header) |
18183 | static SDValue SearchLoopIntrinsic(SDValue N, ISD::CondCode &CC, int &Imm, |
18184 | bool &Negate) { |
18185 | switch (N->getOpcode()) { |
18186 | default: |
18187 | break; |
18188 | case ISD::XOR: { |
18189 | if (!isa<ConstantSDNode>(Val: N.getOperand(i: 1))) |
18190 | return SDValue(); |
18191 | if (!cast<ConstantSDNode>(Val: N.getOperand(i: 1))->isOne()) |
18192 | return SDValue(); |
18193 | Negate = !Negate; |
18194 | return SearchLoopIntrinsic(N: N.getOperand(i: 0), CC, Imm, Negate); |
18195 | } |
18196 | case ISD::SETCC: { |
18197 | auto *Const = dyn_cast<ConstantSDNode>(Val: N.getOperand(i: 1)); |
18198 | if (!Const) |
18199 | return SDValue(); |
18200 | if (Const->isZero()) |
18201 | Imm = 0; |
18202 | else if (Const->isOne()) |
18203 | Imm = 1; |
18204 | else |
18205 | return SDValue(); |
18206 | CC = cast<CondCodeSDNode>(Val: N.getOperand(i: 2))->get(); |
18207 | return SearchLoopIntrinsic(N: N->getOperand(Num: 0), CC, Imm, Negate); |
18208 | } |
18209 | case ISD::INTRINSIC_W_CHAIN: { |
18210 | unsigned IntOp = N.getConstantOperandVal(i: 1); |
18211 | if (IntOp != Intrinsic::test_start_loop_iterations && |
18212 | IntOp != Intrinsic::loop_decrement_reg) |
18213 | return SDValue(); |
18214 | return N; |
18215 | } |
18216 | } |
18217 | return SDValue(); |
18218 | } |
18219 | |
18220 | static SDValue PerformHWLoopCombine(SDNode *N, |
18221 | TargetLowering::DAGCombinerInfo &DCI, |
18222 | const ARMSubtarget *ST) { |
18223 | |
18224 | // The hwloop intrinsics that we're interested are used for control-flow, |
18225 | // either for entering or exiting the loop: |
18226 | // - test.start.loop.iterations will test whether its operand is zero. If it |
18227 | // is zero, the proceeding branch should not enter the loop. |
18228 | // - loop.decrement.reg also tests whether its operand is zero. If it is |
18229 | // zero, the proceeding branch should not branch back to the beginning of |
18230 | // the loop. |
18231 | // So here, we need to check that how the brcond is using the result of each |
18232 | // of the intrinsics to ensure that we're branching to the right place at the |
18233 | // right time. |
18234 | |
18235 | ISD::CondCode CC; |
18236 | SDValue Cond; |
18237 | int Imm = 1; |
18238 | bool Negate = false; |
18239 | SDValue Chain = N->getOperand(Num: 0); |
18240 | SDValue Dest; |
18241 | |
18242 | if (N->getOpcode() == ISD::BRCOND) { |
18243 | CC = ISD::SETEQ; |
18244 | Cond = N->getOperand(Num: 1); |
18245 | Dest = N->getOperand(Num: 2); |
18246 | } else { |
18247 | assert(N->getOpcode() == ISD::BR_CC && "Expected BRCOND or BR_CC!" ); |
18248 | CC = cast<CondCodeSDNode>(Val: N->getOperand(Num: 1))->get(); |
18249 | Cond = N->getOperand(Num: 2); |
18250 | Dest = N->getOperand(Num: 4); |
18251 | if (auto *Const = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 3))) { |
18252 | if (!Const->isOne() && !Const->isZero()) |
18253 | return SDValue(); |
18254 | Imm = Const->getZExtValue(); |
18255 | } else |
18256 | return SDValue(); |
18257 | } |
18258 | |
18259 | SDValue Int = SearchLoopIntrinsic(N: Cond, CC, Imm, Negate); |
18260 | if (!Int) |
18261 | return SDValue(); |
18262 | |
18263 | if (Negate) |
18264 | CC = ISD::getSetCCInverse(Operation: CC, /* Integer inverse */ Type: MVT::i32); |
18265 | |
18266 | auto IsTrueIfZero = [](ISD::CondCode CC, int Imm) { |
18267 | return (CC == ISD::SETEQ && Imm == 0) || |
18268 | (CC == ISD::SETNE && Imm == 1) || |
18269 | (CC == ISD::SETLT && Imm == 1) || |
18270 | (CC == ISD::SETULT && Imm == 1); |
18271 | }; |
18272 | |
18273 | auto IsFalseIfZero = [](ISD::CondCode CC, int Imm) { |
18274 | return (CC == ISD::SETEQ && Imm == 1) || |
18275 | (CC == ISD::SETNE && Imm == 0) || |
18276 | (CC == ISD::SETGT && Imm == 0) || |
18277 | (CC == ISD::SETUGT && Imm == 0) || |
18278 | (CC == ISD::SETGE && Imm == 1) || |
18279 | (CC == ISD::SETUGE && Imm == 1); |
18280 | }; |
18281 | |
18282 | assert((IsTrueIfZero(CC, Imm) || IsFalseIfZero(CC, Imm)) && |
18283 | "unsupported condition" ); |
18284 | |
18285 | SDLoc dl(Int); |
18286 | SelectionDAG &DAG = DCI.DAG; |
18287 | SDValue Elements = Int.getOperand(i: 2); |
18288 | unsigned IntOp = Int->getConstantOperandVal(Num: 1); |
18289 | assert((N->hasOneUse() && N->use_begin()->getOpcode() == ISD::BR) |
18290 | && "expected single br user" ); |
18291 | SDNode *Br = *N->use_begin(); |
18292 | SDValue OtherTarget = Br->getOperand(Num: 1); |
18293 | |
18294 | // Update the unconditional branch to branch to the given Dest. |
18295 | auto UpdateUncondBr = [](SDNode *Br, SDValue Dest, SelectionDAG &DAG) { |
18296 | SDValue NewBrOps[] = { Br->getOperand(Num: 0), Dest }; |
18297 | SDValue NewBr = DAG.getNode(Opcode: ISD::BR, DL: SDLoc(Br), VT: MVT::Other, Ops: NewBrOps); |
18298 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(Br, 0), To: NewBr); |
18299 | }; |
18300 | |
18301 | if (IntOp == Intrinsic::test_start_loop_iterations) { |
18302 | SDValue Res; |
18303 | SDValue Setup = DAG.getNode(Opcode: ARMISD::WLSSETUP, DL: dl, VT: MVT::i32, Operand: Elements); |
18304 | // We expect this 'instruction' to branch when the counter is zero. |
18305 | if (IsTrueIfZero(CC, Imm)) { |
18306 | SDValue Ops[] = {Chain, Setup, Dest}; |
18307 | Res = DAG.getNode(Opcode: ARMISD::WLS, DL: dl, VT: MVT::Other, Ops); |
18308 | } else { |
18309 | // The logic is the reverse of what we need for WLS, so find the other |
18310 | // basic block target: the target of the proceeding br. |
18311 | UpdateUncondBr(Br, Dest, DAG); |
18312 | |
18313 | SDValue Ops[] = {Chain, Setup, OtherTarget}; |
18314 | Res = DAG.getNode(Opcode: ARMISD::WLS, DL: dl, VT: MVT::Other, Ops); |
18315 | } |
18316 | // Update LR count to the new value |
18317 | DAG.ReplaceAllUsesOfValueWith(From: Int.getValue(R: 0), To: Setup); |
18318 | // Update chain |
18319 | DAG.ReplaceAllUsesOfValueWith(From: Int.getValue(R: 2), To: Int.getOperand(i: 0)); |
18320 | return Res; |
18321 | } else { |
18322 | SDValue Size = |
18323 | DAG.getTargetConstant(Val: Int.getConstantOperandVal(i: 3), DL: dl, VT: MVT::i32); |
18324 | SDValue Args[] = { Int.getOperand(i: 0), Elements, Size, }; |
18325 | SDValue LoopDec = DAG.getNode(Opcode: ARMISD::LOOP_DEC, DL: dl, |
18326 | VTList: DAG.getVTList(VT1: MVT::i32, VT2: MVT::Other), Ops: Args); |
18327 | DAG.ReplaceAllUsesWith(From: Int.getNode(), To: LoopDec.getNode()); |
18328 | |
18329 | // We expect this instruction to branch when the count is not zero. |
18330 | SDValue Target = IsFalseIfZero(CC, Imm) ? Dest : OtherTarget; |
18331 | |
18332 | // Update the unconditional branch to target the loop preheader if we've |
18333 | // found the condition has been reversed. |
18334 | if (Target == OtherTarget) |
18335 | UpdateUncondBr(Br, Dest, DAG); |
18336 | |
18337 | Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL: dl, VT: MVT::Other, |
18338 | N1: SDValue(LoopDec.getNode(), 1), N2: Chain); |
18339 | |
18340 | SDValue EndArgs[] = { Chain, SDValue(LoopDec.getNode(), 0), Target }; |
18341 | return DAG.getNode(Opcode: ARMISD::LE, DL: dl, VT: MVT::Other, Ops: EndArgs); |
18342 | } |
18343 | return SDValue(); |
18344 | } |
18345 | |
18346 | /// PerformBRCONDCombine - Target-specific DAG combining for ARMISD::BRCOND. |
18347 | SDValue |
18348 | ARMTargetLowering::PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const { |
18349 | SDValue Cmp = N->getOperand(Num: 4); |
18350 | if (Cmp.getOpcode() != ARMISD::CMPZ) |
18351 | // Only looking at NE cases. |
18352 | return SDValue(); |
18353 | |
18354 | EVT VT = N->getValueType(ResNo: 0); |
18355 | SDLoc dl(N); |
18356 | SDValue LHS = Cmp.getOperand(i: 0); |
18357 | SDValue RHS = Cmp.getOperand(i: 1); |
18358 | SDValue Chain = N->getOperand(Num: 0); |
18359 | SDValue BB = N->getOperand(Num: 1); |
18360 | SDValue ARMcc = N->getOperand(Num: 2); |
18361 | ARMCC::CondCodes CC = (ARMCC::CondCodes)ARMcc->getAsZExtVal(); |
18362 | |
18363 | // (brcond Chain BB ne CPSR (cmpz (and (cmov 0 1 CC CPSR Cmp) 1) 0)) |
18364 | // -> (brcond Chain BB CC CPSR Cmp) |
18365 | if (CC == ARMCC::NE && LHS.getOpcode() == ISD::AND && LHS->hasOneUse() && |
18366 | LHS->getOperand(Num: 0)->getOpcode() == ARMISD::CMOV && |
18367 | LHS->getOperand(Num: 0)->hasOneUse() && |
18368 | isNullConstant(V: LHS->getOperand(Num: 0)->getOperand(Num: 0)) && |
18369 | isOneConstant(V: LHS->getOperand(Num: 0)->getOperand(Num: 1)) && |
18370 | isOneConstant(V: LHS->getOperand(Num: 1)) && isNullConstant(V: RHS)) { |
18371 | return DAG.getNode( |
18372 | Opcode: ARMISD::BRCOND, DL: dl, VT, N1: Chain, N2: BB, N3: LHS->getOperand(Num: 0)->getOperand(Num: 2), |
18373 | N4: LHS->getOperand(Num: 0)->getOperand(Num: 3), N5: LHS->getOperand(Num: 0)->getOperand(Num: 4)); |
18374 | } |
18375 | |
18376 | return SDValue(); |
18377 | } |
18378 | |
18379 | /// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV. |
18380 | SDValue |
18381 | ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const { |
18382 | SDValue Cmp = N->getOperand(Num: 4); |
18383 | if (Cmp.getOpcode() != ARMISD::CMPZ) |
18384 | // Only looking at EQ and NE cases. |
18385 | return SDValue(); |
18386 | |
18387 | EVT VT = N->getValueType(ResNo: 0); |
18388 | SDLoc dl(N); |
18389 | SDValue LHS = Cmp.getOperand(i: 0); |
18390 | SDValue RHS = Cmp.getOperand(i: 1); |
18391 | SDValue FalseVal = N->getOperand(Num: 0); |
18392 | SDValue TrueVal = N->getOperand(Num: 1); |
18393 | SDValue ARMcc = N->getOperand(Num: 2); |
18394 | ARMCC::CondCodes CC = (ARMCC::CondCodes)ARMcc->getAsZExtVal(); |
18395 | |
18396 | // BFI is only available on V6T2+. |
18397 | if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) { |
18398 | SDValue R = PerformCMOVToBFICombine(CMOV: N, DAG); |
18399 | if (R) |
18400 | return R; |
18401 | } |
18402 | |
18403 | // Simplify |
18404 | // mov r1, r0 |
18405 | // cmp r1, x |
18406 | // mov r0, y |
18407 | // moveq r0, x |
18408 | // to |
18409 | // cmp r0, x |
18410 | // movne r0, y |
18411 | // |
18412 | // mov r1, r0 |
18413 | // cmp r1, x |
18414 | // mov r0, x |
18415 | // movne r0, y |
18416 | // to |
18417 | // cmp r0, x |
18418 | // movne r0, y |
18419 | /// FIXME: Turn this into a target neutral optimization? |
18420 | SDValue Res; |
18421 | if (CC == ARMCC::NE && FalseVal == RHS && FalseVal != LHS) { |
18422 | Res = DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: LHS, N2: TrueVal, N3: ARMcc, |
18423 | N4: N->getOperand(Num: 3), N5: Cmp); |
18424 | } else if (CC == ARMCC::EQ && TrueVal == RHS) { |
18425 | SDValue ARMcc; |
18426 | SDValue NewCmp = getARMCmp(LHS, RHS, CC: ISD::SETNE, ARMcc, DAG, dl); |
18427 | Res = DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: LHS, N2: FalseVal, N3: ARMcc, |
18428 | N4: N->getOperand(Num: 3), N5: NewCmp); |
18429 | } |
18430 | |
18431 | // (cmov F T ne CPSR (cmpz (cmov 0 1 CC CPSR Cmp) 0)) |
18432 | // -> (cmov F T CC CPSR Cmp) |
18433 | if (CC == ARMCC::NE && LHS.getOpcode() == ARMISD::CMOV && LHS->hasOneUse() && |
18434 | isNullConstant(V: LHS->getOperand(Num: 0)) && isOneConstant(V: LHS->getOperand(Num: 1)) && |
18435 | isNullConstant(V: RHS)) { |
18436 | return DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: FalseVal, N2: TrueVal, |
18437 | N3: LHS->getOperand(Num: 2), N4: LHS->getOperand(Num: 3), |
18438 | N5: LHS->getOperand(Num: 4)); |
18439 | } |
18440 | |
18441 | if (!VT.isInteger()) |
18442 | return SDValue(); |
18443 | |
18444 | // Fold away an unneccessary CMPZ/CMOV |
18445 | // CMOV A, B, C1, $cpsr, (CMPZ (CMOV 1, 0, C2, D), 0) -> |
18446 | // if C1==EQ -> CMOV A, B, C2, $cpsr, D |
18447 | // if C1==NE -> CMOV A, B, NOT(C2), $cpsr, D |
18448 | if (N->getConstantOperandVal(Num: 2) == ARMCC::EQ || |
18449 | N->getConstantOperandVal(Num: 2) == ARMCC::NE) { |
18450 | ARMCC::CondCodes Cond; |
18451 | if (SDValue C = IsCMPZCSINC(Cmp: N->getOperand(Num: 4).getNode(), CC&: Cond)) { |
18452 | if (N->getConstantOperandVal(Num: 2) == ARMCC::NE) |
18453 | Cond = ARMCC::getOppositeCondition(CC: Cond); |
18454 | return DAG.getNode(Opcode: N->getOpcode(), DL: SDLoc(N), VT: MVT::i32, N1: N->getOperand(Num: 0), |
18455 | N2: N->getOperand(Num: 1), |
18456 | N3: DAG.getTargetConstant(Val: Cond, DL: SDLoc(N), VT: MVT::i32), |
18457 | N4: N->getOperand(Num: 3), N5: C); |
18458 | } |
18459 | } |
18460 | |
18461 | // Materialize a boolean comparison for integers so we can avoid branching. |
18462 | if (isNullConstant(V: FalseVal)) { |
18463 | if (CC == ARMCC::EQ && isOneConstant(V: TrueVal)) { |
18464 | if (!Subtarget->isThumb1Only() && Subtarget->hasV5TOps()) { |
18465 | // If x == y then x - y == 0 and ARM's CLZ will return 32, shifting it |
18466 | // right 5 bits will make that 32 be 1, otherwise it will be 0. |
18467 | // CMOV 0, 1, ==, (CMPZ x, y) -> SRL (CTLZ (SUB x, y)), 5 |
18468 | SDValue Sub = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT, N1: LHS, N2: RHS); |
18469 | Res = DAG.getNode(Opcode: ISD::SRL, DL: dl, VT, N1: DAG.getNode(Opcode: ISD::CTLZ, DL: dl, VT, Operand: Sub), |
18470 | N2: DAG.getConstant(Val: 5, DL: dl, VT: MVT::i32)); |
18471 | } else { |
18472 | // CMOV 0, 1, ==, (CMPZ x, y) -> |
18473 | // (UADDO_CARRY (SUB x, y), t:0, t:1) |
18474 | // where t = (USUBO_CARRY 0, (SUB x, y), 0) |
18475 | // |
18476 | // The USUBO_CARRY computes 0 - (x - y) and this will give a borrow when |
18477 | // x != y. In other words, a carry C == 1 when x == y, C == 0 |
18478 | // otherwise. |
18479 | // The final UADDO_CARRY computes |
18480 | // x - y + (0 - (x - y)) + C == C |
18481 | SDValue Sub = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT, N1: LHS, N2: RHS); |
18482 | SDVTList VTs = DAG.getVTList(VT1: VT, VT2: MVT::i32); |
18483 | SDValue Neg = DAG.getNode(Opcode: ISD::USUBO, DL: dl, VTList: VTs, N1: FalseVal, N2: Sub); |
18484 | // ISD::USUBO_CARRY returns a borrow but we want the carry here |
18485 | // actually. |
18486 | SDValue Carry = |
18487 | DAG.getNode(Opcode: ISD::SUB, DL: dl, VT: MVT::i32, |
18488 | N1: DAG.getConstant(Val: 1, DL: dl, VT: MVT::i32), N2: Neg.getValue(R: 1)); |
18489 | Res = DAG.getNode(Opcode: ISD::UADDO_CARRY, DL: dl, VTList: VTs, N1: Sub, N2: Neg, N3: Carry); |
18490 | } |
18491 | } else if (CC == ARMCC::NE && !isNullConstant(V: RHS) && |
18492 | (!Subtarget->isThumb1Only() || isPowerOf2Constant(V: TrueVal))) { |
18493 | // This seems pointless but will allow us to combine it further below. |
18494 | // CMOV 0, z, !=, (CMPZ x, y) -> CMOV (SUBC x, y), z, !=, (SUBC x, y):1 |
18495 | SDValue Sub = |
18496 | DAG.getNode(Opcode: ARMISD::SUBC, DL: dl, VTList: DAG.getVTList(VT1: VT, VT2: MVT::i32), N1: LHS, N2: RHS); |
18497 | SDValue CPSRGlue = DAG.getCopyToReg(Chain: DAG.getEntryNode(), dl, Reg: ARM::CPSR, |
18498 | N: Sub.getValue(R: 1), Glue: SDValue()); |
18499 | Res = DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: Sub, N2: TrueVal, N3: ARMcc, |
18500 | N4: N->getOperand(Num: 3), N5: CPSRGlue.getValue(R: 1)); |
18501 | FalseVal = Sub; |
18502 | } |
18503 | } else if (isNullConstant(V: TrueVal)) { |
18504 | if (CC == ARMCC::EQ && !isNullConstant(V: RHS) && |
18505 | (!Subtarget->isThumb1Only() || isPowerOf2Constant(V: FalseVal))) { |
18506 | // This seems pointless but will allow us to combine it further below |
18507 | // Note that we change == for != as this is the dual for the case above. |
18508 | // CMOV z, 0, ==, (CMPZ x, y) -> CMOV (SUBC x, y), z, !=, (SUBC x, y):1 |
18509 | SDValue Sub = |
18510 | DAG.getNode(Opcode: ARMISD::SUBC, DL: dl, VTList: DAG.getVTList(VT1: VT, VT2: MVT::i32), N1: LHS, N2: RHS); |
18511 | SDValue CPSRGlue = DAG.getCopyToReg(Chain: DAG.getEntryNode(), dl, Reg: ARM::CPSR, |
18512 | N: Sub.getValue(R: 1), Glue: SDValue()); |
18513 | Res = DAG.getNode(Opcode: ARMISD::CMOV, DL: dl, VT, N1: Sub, N2: FalseVal, |
18514 | N3: DAG.getConstant(Val: ARMCC::NE, DL: dl, VT: MVT::i32), |
18515 | N4: N->getOperand(Num: 3), N5: CPSRGlue.getValue(R: 1)); |
18516 | FalseVal = Sub; |
18517 | } |
18518 | } |
18519 | |
18520 | // On Thumb1, the DAG above may be further combined if z is a power of 2 |
18521 | // (z == 2 ^ K). |
18522 | // CMOV (SUBC x, y), z, !=, (SUBC x, y):1 -> |
18523 | // t1 = (USUBO (SUB x, y), 1) |
18524 | // t2 = (USUBO_CARRY (SUB x, y), t1:0, t1:1) |
18525 | // Result = if K != 0 then (SHL t2:0, K) else t2:0 |
18526 | // |
18527 | // This also handles the special case of comparing against zero; it's |
18528 | // essentially, the same pattern, except there's no SUBC: |
18529 | // CMOV x, z, !=, (CMPZ x, 0) -> |
18530 | // t1 = (USUBO x, 1) |
18531 | // t2 = (USUBO_CARRY x, t1:0, t1:1) |
18532 | // Result = if K != 0 then (SHL t2:0, K) else t2:0 |
18533 | const APInt *TrueConst; |
18534 | if (Subtarget->isThumb1Only() && CC == ARMCC::NE && |
18535 | ((FalseVal.getOpcode() == ARMISD::SUBC && FalseVal.getOperand(i: 0) == LHS && |
18536 | FalseVal.getOperand(i: 1) == RHS) || |
18537 | (FalseVal == LHS && isNullConstant(V: RHS))) && |
18538 | (TrueConst = isPowerOf2Constant(V: TrueVal))) { |
18539 | SDVTList VTs = DAG.getVTList(VT1: VT, VT2: MVT::i32); |
18540 | unsigned ShiftAmount = TrueConst->logBase2(); |
18541 | if (ShiftAmount) |
18542 | TrueVal = DAG.getConstant(Val: 1, DL: dl, VT); |
18543 | SDValue Subc = DAG.getNode(Opcode: ISD::USUBO, DL: dl, VTList: VTs, N1: FalseVal, N2: TrueVal); |
18544 | Res = DAG.getNode(Opcode: ISD::USUBO_CARRY, DL: dl, VTList: VTs, N1: FalseVal, N2: Subc, |
18545 | N3: Subc.getValue(R: 1)); |
18546 | |
18547 | if (ShiftAmount) |
18548 | Res = DAG.getNode(Opcode: ISD::SHL, DL: dl, VT, N1: Res, |
18549 | N2: DAG.getConstant(Val: ShiftAmount, DL: dl, VT: MVT::i32)); |
18550 | } |
18551 | |
18552 | if (Res.getNode()) { |
18553 | KnownBits Known = DAG.computeKnownBits(Op: SDValue(N,0)); |
18554 | // Capture demanded bits information that would be otherwise lost. |
18555 | if (Known.Zero == 0xfffffffe) |
18556 | Res = DAG.getNode(Opcode: ISD::AssertZext, DL: dl, VT: MVT::i32, N1: Res, |
18557 | N2: DAG.getValueType(MVT::i1)); |
18558 | else if (Known.Zero == 0xffffff00) |
18559 | Res = DAG.getNode(Opcode: ISD::AssertZext, DL: dl, VT: MVT::i32, N1: Res, |
18560 | N2: DAG.getValueType(MVT::i8)); |
18561 | else if (Known.Zero == 0xffff0000) |
18562 | Res = DAG.getNode(Opcode: ISD::AssertZext, DL: dl, VT: MVT::i32, N1: Res, |
18563 | N2: DAG.getValueType(MVT::i16)); |
18564 | } |
18565 | |
18566 | return Res; |
18567 | } |
18568 | |
18569 | static SDValue PerformBITCASTCombine(SDNode *N, |
18570 | TargetLowering::DAGCombinerInfo &DCI, |
18571 | const ARMSubtarget *ST) { |
18572 | SelectionDAG &DAG = DCI.DAG; |
18573 | SDValue Src = N->getOperand(Num: 0); |
18574 | EVT DstVT = N->getValueType(ResNo: 0); |
18575 | |
18576 | // Convert v4f32 bitcast (v4i32 vdup (i32)) -> v4f32 vdup (i32) under MVE. |
18577 | if (ST->hasMVEIntegerOps() && Src.getOpcode() == ARMISD::VDUP) { |
18578 | EVT SrcVT = Src.getValueType(); |
18579 | if (SrcVT.getScalarSizeInBits() == DstVT.getScalarSizeInBits()) |
18580 | return DAG.getNode(Opcode: ARMISD::VDUP, DL: SDLoc(N), VT: DstVT, Operand: Src.getOperand(i: 0)); |
18581 | } |
18582 | |
18583 | // We may have a bitcast of something that has already had this bitcast |
18584 | // combine performed on it, so skip past any VECTOR_REG_CASTs. |
18585 | while (Src.getOpcode() == ARMISD::VECTOR_REG_CAST) |
18586 | Src = Src.getOperand(i: 0); |
18587 | |
18588 | // Bitcast from element-wise VMOV or VMVN doesn't need VREV if the VREV that |
18589 | // would be generated is at least the width of the element type. |
18590 | EVT SrcVT = Src.getValueType(); |
18591 | if ((Src.getOpcode() == ARMISD::VMOVIMM || |
18592 | Src.getOpcode() == ARMISD::VMVNIMM || |
18593 | Src.getOpcode() == ARMISD::VMOVFPIMM) && |
18594 | SrcVT.getScalarSizeInBits() <= DstVT.getScalarSizeInBits() && |
18595 | DAG.getDataLayout().isBigEndian()) |
18596 | return DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL: SDLoc(N), VT: DstVT, Operand: Src); |
18597 | |
18598 | // bitcast(extract(x, n)); bitcast(extract(x, n+1)) -> VMOVRRD x |
18599 | if (SDValue R = PerformExtractEltToVMOVRRD(N, DCI)) |
18600 | return R; |
18601 | |
18602 | return SDValue(); |
18603 | } |
18604 | |
18605 | // Some combines for the MVETrunc truncations legalizer helper. Also lowers the |
18606 | // node into stack operations after legalizeOps. |
18607 | SDValue ARMTargetLowering::PerformMVETruncCombine( |
18608 | SDNode *N, TargetLowering::DAGCombinerInfo &DCI) const { |
18609 | SelectionDAG &DAG = DCI.DAG; |
18610 | EVT VT = N->getValueType(ResNo: 0); |
18611 | SDLoc DL(N); |
18612 | |
18613 | // MVETrunc(Undef, Undef) -> Undef |
18614 | if (all_of(Range: N->ops(), P: [](SDValue Op) { return Op.isUndef(); })) |
18615 | return DAG.getUNDEF(VT); |
18616 | |
18617 | // MVETrunc(MVETrunc a b, MVETrunc c, d) -> MVETrunc |
18618 | if (N->getNumOperands() == 2 && |
18619 | N->getOperand(Num: 0).getOpcode() == ARMISD::MVETRUNC && |
18620 | N->getOperand(Num: 1).getOpcode() == ARMISD::MVETRUNC) |
18621 | return DAG.getNode(Opcode: ARMISD::MVETRUNC, DL, VT, N1: N->getOperand(Num: 0).getOperand(i: 0), |
18622 | N2: N->getOperand(Num: 0).getOperand(i: 1), |
18623 | N3: N->getOperand(Num: 1).getOperand(i: 0), |
18624 | N4: N->getOperand(Num: 1).getOperand(i: 1)); |
18625 | |
18626 | // MVETrunc(shuffle, shuffle) -> VMOVN |
18627 | if (N->getNumOperands() == 2 && |
18628 | N->getOperand(Num: 0).getOpcode() == ISD::VECTOR_SHUFFLE && |
18629 | N->getOperand(Num: 1).getOpcode() == ISD::VECTOR_SHUFFLE) { |
18630 | auto *S0 = cast<ShuffleVectorSDNode>(Val: N->getOperand(Num: 0).getNode()); |
18631 | auto *S1 = cast<ShuffleVectorSDNode>(Val: N->getOperand(Num: 1).getNode()); |
18632 | |
18633 | if (S0->getOperand(Num: 0) == S1->getOperand(Num: 0) && |
18634 | S0->getOperand(Num: 1) == S1->getOperand(Num: 1)) { |
18635 | // Construct complete shuffle mask |
18636 | SmallVector<int, 8> Mask(S0->getMask()); |
18637 | Mask.append(in_start: S1->getMask().begin(), in_end: S1->getMask().end()); |
18638 | |
18639 | if (isVMOVNTruncMask(M: Mask, ToVT: VT, rev: false)) |
18640 | return DAG.getNode( |
18641 | Opcode: ARMISD::VMOVN, DL, VT, |
18642 | N1: DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: S0->getOperand(Num: 0)), |
18643 | N2: DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: S0->getOperand(Num: 1)), |
18644 | N3: DAG.getConstant(Val: 1, DL, VT: MVT::i32)); |
18645 | if (isVMOVNTruncMask(M: Mask, ToVT: VT, rev: true)) |
18646 | return DAG.getNode( |
18647 | Opcode: ARMISD::VMOVN, DL, VT, |
18648 | N1: DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: S0->getOperand(Num: 1)), |
18649 | N2: DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: S0->getOperand(Num: 0)), |
18650 | N3: DAG.getConstant(Val: 1, DL, VT: MVT::i32)); |
18651 | } |
18652 | } |
18653 | |
18654 | // For MVETrunc of a buildvector or shuffle, it can be beneficial to lower the |
18655 | // truncate to a buildvector to allow the generic optimisations to kick in. |
18656 | if (all_of(Range: N->ops(), P: [](SDValue Op) { |
18657 | return Op.getOpcode() == ISD::BUILD_VECTOR || |
18658 | Op.getOpcode() == ISD::VECTOR_SHUFFLE || |
18659 | (Op.getOpcode() == ISD::BITCAST && |
18660 | Op.getOperand(i: 0).getOpcode() == ISD::BUILD_VECTOR); |
18661 | })) { |
18662 | SmallVector<SDValue, 8> ; |
18663 | for (unsigned Op = 0; Op < N->getNumOperands(); Op++) { |
18664 | SDValue O = N->getOperand(Num: Op); |
18665 | for (unsigned i = 0; i < O.getValueType().getVectorNumElements(); i++) { |
18666 | SDValue Ext = DAG.getNode(Opcode: ISD::EXTRACT_VECTOR_ELT, DL, VT: MVT::i32, N1: O, |
18667 | N2: DAG.getConstant(Val: i, DL, VT: MVT::i32)); |
18668 | Extracts.push_back(Elt: Ext); |
18669 | } |
18670 | } |
18671 | return DAG.getBuildVector(VT, DL, Ops: Extracts); |
18672 | } |
18673 | |
18674 | // If we are late in the legalization process and nothing has optimised |
18675 | // the trunc to anything better, lower it to a stack store and reload, |
18676 | // performing the truncation whilst keeping the lanes in the correct order: |
18677 | // VSTRH.32 a, stack; VSTRH.32 b, stack+8; VLDRW.32 stack; |
18678 | if (!DCI.isAfterLegalizeDAG()) |
18679 | return SDValue(); |
18680 | |
18681 | SDValue StackPtr = DAG.CreateStackTemporary(Bytes: TypeSize::getFixed(ExactSize: 16), Alignment: Align(4)); |
18682 | int SPFI = cast<FrameIndexSDNode>(Val: StackPtr.getNode())->getIndex(); |
18683 | int NumIns = N->getNumOperands(); |
18684 | assert((NumIns == 2 || NumIns == 4) && |
18685 | "Expected 2 or 4 inputs to an MVETrunc" ); |
18686 | EVT StoreVT = VT.getHalfNumVectorElementsVT(Context&: *DAG.getContext()); |
18687 | if (N->getNumOperands() == 4) |
18688 | StoreVT = StoreVT.getHalfNumVectorElementsVT(Context&: *DAG.getContext()); |
18689 | |
18690 | SmallVector<SDValue> Chains; |
18691 | for (int I = 0; I < NumIns; I++) { |
18692 | SDValue Ptr = DAG.getNode( |
18693 | Opcode: ISD::ADD, DL, VT: StackPtr.getValueType(), N1: StackPtr, |
18694 | N2: DAG.getConstant(Val: I * 16 / NumIns, DL, VT: StackPtr.getValueType())); |
18695 | MachinePointerInfo MPI = MachinePointerInfo::getFixedStack( |
18696 | MF&: DAG.getMachineFunction(), FI: SPFI, Offset: I * 16 / NumIns); |
18697 | SDValue Ch = DAG.getTruncStore(Chain: DAG.getEntryNode(), dl: DL, Val: N->getOperand(Num: I), |
18698 | Ptr, PtrInfo: MPI, SVT: StoreVT, Alignment: Align(4)); |
18699 | Chains.push_back(Elt: Ch); |
18700 | } |
18701 | |
18702 | SDValue Chain = DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other, Ops: Chains); |
18703 | MachinePointerInfo MPI = |
18704 | MachinePointerInfo::getFixedStack(MF&: DAG.getMachineFunction(), FI: SPFI, Offset: 0); |
18705 | return DAG.getLoad(VT, dl: DL, Chain, Ptr: StackPtr, PtrInfo: MPI, Alignment: Align(4)); |
18706 | } |
18707 | |
18708 | // Take a MVEEXT(load x) and split that into (extload x, extload x+8) |
18709 | static SDValue PerformSplittingMVEEXTToWideningLoad(SDNode *N, |
18710 | SelectionDAG &DAG) { |
18711 | SDValue N0 = N->getOperand(Num: 0); |
18712 | LoadSDNode *LD = dyn_cast<LoadSDNode>(Val: N0.getNode()); |
18713 | if (!LD || !LD->isSimple() || !N0.hasOneUse() || LD->isIndexed()) |
18714 | return SDValue(); |
18715 | |
18716 | EVT FromVT = LD->getMemoryVT(); |
18717 | EVT ToVT = N->getValueType(ResNo: 0); |
18718 | if (!ToVT.isVector()) |
18719 | return SDValue(); |
18720 | assert(FromVT.getVectorNumElements() == ToVT.getVectorNumElements() * 2); |
18721 | EVT ToEltVT = ToVT.getVectorElementType(); |
18722 | EVT FromEltVT = FromVT.getVectorElementType(); |
18723 | |
18724 | unsigned NumElements = 0; |
18725 | if (ToEltVT == MVT::i32 && (FromEltVT == MVT::i16 || FromEltVT == MVT::i8)) |
18726 | NumElements = 4; |
18727 | if (ToEltVT == MVT::i16 && FromEltVT == MVT::i8) |
18728 | NumElements = 8; |
18729 | assert(NumElements != 0); |
18730 | |
18731 | ISD::LoadExtType NewExtType = |
18732 | N->getOpcode() == ARMISD::MVESEXT ? ISD::SEXTLOAD : ISD::ZEXTLOAD; |
18733 | if (LD->getExtensionType() != ISD::NON_EXTLOAD && |
18734 | LD->getExtensionType() != ISD::EXTLOAD && |
18735 | LD->getExtensionType() != NewExtType) |
18736 | return SDValue(); |
18737 | |
18738 | LLVMContext &C = *DAG.getContext(); |
18739 | SDLoc DL(LD); |
18740 | // Details about the old load |
18741 | SDValue Ch = LD->getChain(); |
18742 | SDValue BasePtr = LD->getBasePtr(); |
18743 | Align Alignment = LD->getOriginalAlign(); |
18744 | MachineMemOperand::Flags MMOFlags = LD->getMemOperand()->getFlags(); |
18745 | AAMDNodes AAInfo = LD->getAAInfo(); |
18746 | |
18747 | SDValue Offset = DAG.getUNDEF(VT: BasePtr.getValueType()); |
18748 | EVT NewFromVT = EVT::getVectorVT( |
18749 | Context&: C, VT: EVT::getIntegerVT(Context&: C, BitWidth: FromEltVT.getScalarSizeInBits()), NumElements); |
18750 | EVT NewToVT = EVT::getVectorVT( |
18751 | Context&: C, VT: EVT::getIntegerVT(Context&: C, BitWidth: ToEltVT.getScalarSizeInBits()), NumElements); |
18752 | |
18753 | SmallVector<SDValue, 4> Loads; |
18754 | SmallVector<SDValue, 4> Chains; |
18755 | for (unsigned i = 0; i < FromVT.getVectorNumElements() / NumElements; i++) { |
18756 | unsigned NewOffset = (i * NewFromVT.getSizeInBits()) / 8; |
18757 | SDValue NewPtr = |
18758 | DAG.getObjectPtrOffset(SL: DL, Ptr: BasePtr, Offset: TypeSize::getFixed(ExactSize: NewOffset)); |
18759 | |
18760 | SDValue NewLoad = |
18761 | DAG.getLoad(AM: ISD::UNINDEXED, ExtType: NewExtType, VT: NewToVT, dl: DL, Chain: Ch, Ptr: NewPtr, Offset, |
18762 | PtrInfo: LD->getPointerInfo().getWithOffset(O: NewOffset), MemVT: NewFromVT, |
18763 | Alignment, MMOFlags, AAInfo); |
18764 | Loads.push_back(Elt: NewLoad); |
18765 | Chains.push_back(Elt: SDValue(NewLoad.getNode(), 1)); |
18766 | } |
18767 | |
18768 | SDValue NewChain = DAG.getNode(Opcode: ISD::TokenFactor, DL, VT: MVT::Other, Ops: Chains); |
18769 | DAG.ReplaceAllUsesOfValueWith(From: SDValue(LD, 1), To: NewChain); |
18770 | return DAG.getMergeValues(Ops: Loads, dl: DL); |
18771 | } |
18772 | |
18773 | // Perform combines for MVEEXT. If it has not be optimized to anything better |
18774 | // before lowering, it gets converted to stack store and extloads performing the |
18775 | // extend whilst still keeping the same lane ordering. |
18776 | SDValue ARMTargetLowering::PerformMVEExtCombine( |
18777 | SDNode *N, TargetLowering::DAGCombinerInfo &DCI) const { |
18778 | SelectionDAG &DAG = DCI.DAG; |
18779 | EVT VT = N->getValueType(ResNo: 0); |
18780 | SDLoc DL(N); |
18781 | assert(N->getNumValues() == 2 && "Expected MVEEXT with 2 elements" ); |
18782 | assert((VT == MVT::v4i32 || VT == MVT::v8i16) && "Unexpected MVEEXT type" ); |
18783 | |
18784 | EVT ExtVT = N->getOperand(Num: 0).getValueType().getHalfNumVectorElementsVT( |
18785 | Context&: *DAG.getContext()); |
18786 | auto Extend = [&](SDValue V) { |
18787 | SDValue VVT = DAG.getNode(Opcode: ARMISD::VECTOR_REG_CAST, DL, VT, Operand: V); |
18788 | return N->getOpcode() == ARMISD::MVESEXT |
18789 | ? DAG.getNode(Opcode: ISD::SIGN_EXTEND_INREG, DL, VT, N1: VVT, |
18790 | N2: DAG.getValueType(ExtVT)) |
18791 | : DAG.getZeroExtendInReg(Op: VVT, DL, VT: ExtVT); |
18792 | }; |
18793 | |
18794 | // MVEEXT(VDUP) -> SIGN_EXTEND_INREG(VDUP) |
18795 | if (N->getOperand(Num: 0).getOpcode() == ARMISD::VDUP) { |
18796 | SDValue Ext = Extend(N->getOperand(Num: 0)); |
18797 | return DAG.getMergeValues(Ops: {Ext, Ext}, dl: DL); |
18798 | } |
18799 | |
18800 | // MVEEXT(shuffle) -> SIGN_EXTEND_INREG/ZERO_EXTEND_INREG |
18801 | if (auto *SVN = dyn_cast<ShuffleVectorSDNode>(Val: N->getOperand(Num: 0))) { |
18802 | ArrayRef<int> Mask = SVN->getMask(); |
18803 | assert(Mask.size() == 2 * VT.getVectorNumElements()); |
18804 | assert(Mask.size() == SVN->getValueType(0).getVectorNumElements()); |
18805 | unsigned Rev = VT == MVT::v4i32 ? ARMISD::VREV32 : ARMISD::VREV16; |
18806 | SDValue Op0 = SVN->getOperand(Num: 0); |
18807 | SDValue Op1 = SVN->getOperand(Num: 1); |
18808 | |
18809 | auto CheckInregMask = [&](int Start, int Offset) { |
18810 | for (int Idx = 0, E = VT.getVectorNumElements(); Idx < E; ++Idx) |
18811 | if (Mask[Start + Idx] >= 0 && Mask[Start + Idx] != Idx * 2 + Offset) |
18812 | return false; |
18813 | return true; |
18814 | }; |
18815 | SDValue V0 = SDValue(N, 0); |
18816 | SDValue V1 = SDValue(N, 1); |
18817 | if (CheckInregMask(0, 0)) |
18818 | V0 = Extend(Op0); |
18819 | else if (CheckInregMask(0, 1)) |
18820 | V0 = Extend(DAG.getNode(Opcode: Rev, DL, VT: SVN->getValueType(ResNo: 0), Operand: Op0)); |
18821 | else if (CheckInregMask(0, Mask.size())) |
18822 | V0 = Extend(Op1); |
18823 | else if (CheckInregMask(0, Mask.size() + 1)) |
18824 | V0 = Extend(DAG.getNode(Opcode: Rev, DL, VT: SVN->getValueType(ResNo: 0), Operand: Op1)); |
18825 | |
18826 | if (CheckInregMask(VT.getVectorNumElements(), Mask.size())) |
18827 | V1 = Extend(Op1); |
18828 | else if (CheckInregMask(VT.getVectorNumElements(), Mask.size() + 1)) |
18829 | V1 = Extend(DAG.getNode(Opcode: Rev, DL, VT: SVN->getValueType(ResNo: 0), Operand: Op1)); |
18830 | else if (CheckInregMask(VT.getVectorNumElements(), 0)) |
18831 | V1 = Extend(Op0); |
18832 | else if (CheckInregMask(VT.getVectorNumElements(), 1)) |
18833 | V1 = Extend(DAG.getNode(Opcode: Rev, DL, VT: SVN->getValueType(ResNo: 0), Operand: Op0)); |
18834 | |
18835 | if (V0.getNode() != N || V1.getNode() != N) |
18836 | return DAG.getMergeValues(Ops: {V0, V1}, dl: DL); |
18837 | } |
18838 | |
18839 | // MVEEXT(load) -> extload, extload |
18840 | if (N->getOperand(Num: 0)->getOpcode() == ISD::LOAD) |
18841 | if (SDValue L = PerformSplittingMVEEXTToWideningLoad(N, DAG)) |
18842 | return L; |
18843 | |
18844 | if (!DCI.isAfterLegalizeDAG()) |
18845 | return SDValue(); |
18846 | |
18847 | // Lower to a stack store and reload: |
18848 | // VSTRW.32 a, stack; VLDRH.32 stack; VLDRH.32 stack+8; |
18849 | SDValue StackPtr = DAG.CreateStackTemporary(Bytes: TypeSize::getFixed(ExactSize: 16), Alignment: Align(4)); |
18850 | int SPFI = cast<FrameIndexSDNode>(Val: StackPtr.getNode())->getIndex(); |
18851 | int NumOuts = N->getNumValues(); |
18852 | assert((NumOuts == 2 || NumOuts == 4) && |
18853 | "Expected 2 or 4 outputs to an MVEEXT" ); |
18854 | EVT LoadVT = N->getOperand(Num: 0).getValueType().getHalfNumVectorElementsVT( |
18855 | Context&: *DAG.getContext()); |
18856 | if (N->getNumOperands() == 4) |
18857 | LoadVT = LoadVT.getHalfNumVectorElementsVT(Context&: *DAG.getContext()); |
18858 | |
18859 | MachinePointerInfo MPI = |
18860 | MachinePointerInfo::getFixedStack(MF&: DAG.getMachineFunction(), FI: SPFI, Offset: 0); |
18861 | SDValue Chain = DAG.getStore(Chain: DAG.getEntryNode(), dl: DL, Val: N->getOperand(Num: 0), |
18862 | Ptr: StackPtr, PtrInfo: MPI, Alignment: Align(4)); |
18863 | |
18864 | SmallVector<SDValue> Loads; |
18865 | for (int I = 0; I < NumOuts; I++) { |
18866 | SDValue Ptr = DAG.getNode( |
18867 | Opcode: ISD::ADD, DL, VT: StackPtr.getValueType(), N1: StackPtr, |
18868 | N2: DAG.getConstant(Val: I * 16 / NumOuts, DL, VT: StackPtr.getValueType())); |
18869 | MachinePointerInfo MPI = MachinePointerInfo::getFixedStack( |
18870 | MF&: DAG.getMachineFunction(), FI: SPFI, Offset: I * 16 / NumOuts); |
18871 | SDValue Load = DAG.getExtLoad( |
18872 | ExtType: N->getOpcode() == ARMISD::MVESEXT ? ISD::SEXTLOAD : ISD::ZEXTLOAD, dl: DL, |
18873 | VT, Chain, Ptr, PtrInfo: MPI, MemVT: LoadVT, Alignment: Align(4)); |
18874 | Loads.push_back(Elt: Load); |
18875 | } |
18876 | |
18877 | return DAG.getMergeValues(Ops: Loads, dl: DL); |
18878 | } |
18879 | |
18880 | SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, |
18881 | DAGCombinerInfo &DCI) const { |
18882 | switch (N->getOpcode()) { |
18883 | default: break; |
18884 | case ISD::SELECT_CC: |
18885 | case ISD::SELECT: return PerformSELECTCombine(N, DCI, Subtarget); |
18886 | case ISD::VSELECT: return PerformVSELECTCombine(N, DCI, Subtarget); |
18887 | case ISD::SETCC: return PerformVSetCCToVCTPCombine(N, DCI, Subtarget); |
18888 | case ARMISD::ADDE: return PerformADDECombine(N, DCI, Subtarget); |
18889 | case ARMISD::UMLAL: return PerformUMLALCombine(N, DAG&: DCI.DAG, Subtarget); |
18890 | case ISD::ADD: return PerformADDCombine(N, DCI, Subtarget); |
18891 | case ISD::SUB: return PerformSUBCombine(N, DCI, Subtarget); |
18892 | case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget); |
18893 | case ISD::OR: return PerformORCombine(N, DCI, Subtarget); |
18894 | case ISD::XOR: return PerformXORCombine(N, DCI, Subtarget); |
18895 | case ISD::AND: return PerformANDCombine(N, DCI, Subtarget); |
18896 | case ISD::BRCOND: |
18897 | case ISD::BR_CC: return PerformHWLoopCombine(N, DCI, ST: Subtarget); |
18898 | case ARMISD::ADDC: |
18899 | case ARMISD::SUBC: return PerformAddcSubcCombine(N, DCI, Subtarget); |
18900 | case ARMISD::SUBE: return PerformAddeSubeCombine(N, DCI, Subtarget); |
18901 | case ARMISD::BFI: return PerformBFICombine(N, DAG&: DCI.DAG); |
18902 | case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI, Subtarget); |
18903 | case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DAG&: DCI.DAG); |
18904 | case ARMISD::VMOVhr: return PerformVMOVhrCombine(N, DCI); |
18905 | case ARMISD::VMOVrh: return PerformVMOVrhCombine(N, DAG&: DCI.DAG); |
18906 | case ISD::STORE: return PerformSTORECombine(N, DCI, Subtarget); |
18907 | case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI, Subtarget); |
18908 | case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI); |
18909 | case ISD::EXTRACT_VECTOR_ELT: |
18910 | return PerformExtractEltCombine(N, DCI, ST: Subtarget); |
18911 | case ISD::SIGN_EXTEND_INREG: return PerformSignExtendInregCombine(N, DAG&: DCI.DAG); |
18912 | case ISD::INSERT_SUBVECTOR: return PerformInsertSubvectorCombine(N, DCI); |
18913 | case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DAG&: DCI.DAG); |
18914 | case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI, Subtarget); |
18915 | case ARMISD::VDUP: return PerformVDUPCombine(N, DAG&: DCI.DAG, Subtarget); |
18916 | case ISD::FP_TO_SINT: |
18917 | case ISD::FP_TO_UINT: |
18918 | return PerformVCVTCombine(N, DAG&: DCI.DAG, Subtarget); |
18919 | case ISD::FADD: |
18920 | return PerformFADDCombine(N, DAG&: DCI.DAG, Subtarget); |
18921 | case ISD::FMUL: |
18922 | return PerformVMulVCTPCombine(N, DAG&: DCI.DAG, Subtarget); |
18923 | case ISD::INTRINSIC_WO_CHAIN: |
18924 | return PerformIntrinsicCombine(N, DCI); |
18925 | case ISD::SHL: |
18926 | case ISD::SRA: |
18927 | case ISD::SRL: |
18928 | return PerformShiftCombine(N, DCI, ST: Subtarget); |
18929 | case ISD::SIGN_EXTEND: |
18930 | case ISD::ZERO_EXTEND: |
18931 | case ISD::ANY_EXTEND: |
18932 | return PerformExtendCombine(N, DAG&: DCI.DAG, ST: Subtarget); |
18933 | case ISD::FP_EXTEND: |
18934 | return PerformFPExtendCombine(N, DAG&: DCI.DAG, ST: Subtarget); |
18935 | case ISD::SMIN: |
18936 | case ISD::UMIN: |
18937 | case ISD::SMAX: |
18938 | case ISD::UMAX: |
18939 | return PerformMinMaxCombine(N, DAG&: DCI.DAG, ST: Subtarget); |
18940 | case ARMISD::CMOV: |
18941 | return PerformCMOVCombine(N, DAG&: DCI.DAG); |
18942 | case ARMISD::BRCOND: |
18943 | return PerformBRCONDCombine(N, DAG&: DCI.DAG); |
18944 | case ARMISD::CMPZ: |
18945 | return PerformCMPZCombine(N, DAG&: DCI.DAG); |
18946 | case ARMISD::CSINC: |
18947 | case ARMISD::CSINV: |
18948 | case ARMISD::CSNEG: |
18949 | return PerformCSETCombine(N, DAG&: DCI.DAG); |
18950 | case ISD::LOAD: |
18951 | return PerformLOADCombine(N, DCI, Subtarget); |
18952 | case ARMISD::VLD1DUP: |
18953 | case ARMISD::VLD2DUP: |
18954 | case ARMISD::VLD3DUP: |
18955 | case ARMISD::VLD4DUP: |
18956 | return PerformVLDCombine(N, DCI); |
18957 | case ARMISD::BUILD_VECTOR: |
18958 | return PerformARMBUILD_VECTORCombine(N, DCI); |
18959 | case ISD::BITCAST: |
18960 | return PerformBITCASTCombine(N, DCI, ST: Subtarget); |
18961 | case ARMISD::PREDICATE_CAST: |
18962 | return PerformPREDICATE_CASTCombine(N, DCI); |
18963 | case ARMISD::VECTOR_REG_CAST: |
18964 | return PerformVECTOR_REG_CASTCombine(N, DAG&: DCI.DAG, ST: Subtarget); |
18965 | case ARMISD::MVETRUNC: |
18966 | return PerformMVETruncCombine(N, DCI); |
18967 | case ARMISD::MVESEXT: |
18968 | case ARMISD::MVEZEXT: |
18969 | return PerformMVEExtCombine(N, DCI); |
18970 | case ARMISD::VCMP: |
18971 | return PerformVCMPCombine(N, DAG&: DCI.DAG, Subtarget); |
18972 | case ISD::VECREDUCE_ADD: |
18973 | return PerformVECREDUCE_ADDCombine(N, DAG&: DCI.DAG, ST: Subtarget); |
18974 | case ARMISD::VADDVs: |
18975 | case ARMISD::VADDVu: |
18976 | case ARMISD::VADDLVs: |
18977 | case ARMISD::VADDLVu: |
18978 | case ARMISD::VADDLVAs: |
18979 | case ARMISD::VADDLVAu: |
18980 | case ARMISD::VMLAVs: |
18981 | case ARMISD::VMLAVu: |
18982 | case ARMISD::VMLALVs: |
18983 | case ARMISD::VMLALVu: |
18984 | case ARMISD::VMLALVAs: |
18985 | case ARMISD::VMLALVAu: |
18986 | return PerformReduceShuffleCombine(N, DAG&: DCI.DAG); |
18987 | case ARMISD::VMOVN: |
18988 | return PerformVMOVNCombine(N, DCI); |
18989 | case ARMISD::VQMOVNs: |
18990 | case ARMISD::VQMOVNu: |
18991 | return PerformVQMOVNCombine(N, DCI); |
18992 | case ARMISD::VQDMULH: |
18993 | return PerformVQDMULHCombine(N, DCI); |
18994 | case ARMISD::ASRL: |
18995 | case ARMISD::LSRL: |
18996 | case ARMISD::LSLL: |
18997 | return PerformLongShiftCombine(N, DAG&: DCI.DAG); |
18998 | case ARMISD::SMULWB: { |
18999 | unsigned BitWidth = N->getValueType(ResNo: 0).getSizeInBits(); |
19000 | APInt DemandedMask = APInt::getLowBitsSet(numBits: BitWidth, loBitsSet: 16); |
19001 | if (SimplifyDemandedBits(Op: N->getOperand(Num: 1), DemandedBits: DemandedMask, DCI)) |
19002 | return SDValue(); |
19003 | break; |
19004 | } |
19005 | case ARMISD::SMULWT: { |
19006 | unsigned BitWidth = N->getValueType(ResNo: 0).getSizeInBits(); |
19007 | APInt DemandedMask = APInt::getHighBitsSet(numBits: BitWidth, hiBitsSet: 16); |
19008 | if (SimplifyDemandedBits(Op: N->getOperand(Num: 1), DemandedBits: DemandedMask, DCI)) |
19009 | return SDValue(); |
19010 | break; |
19011 | } |
19012 | case ARMISD::SMLALBB: |
19013 | case ARMISD::QADD16b: |
19014 | case ARMISD::QSUB16b: |
19015 | case ARMISD::UQADD16b: |
19016 | case ARMISD::UQSUB16b: { |
19017 | unsigned BitWidth = N->getValueType(ResNo: 0).getSizeInBits(); |
19018 | APInt DemandedMask = APInt::getLowBitsSet(numBits: BitWidth, loBitsSet: 16); |
19019 | if ((SimplifyDemandedBits(Op: N->getOperand(Num: 0), DemandedBits: DemandedMask, DCI)) || |
19020 | (SimplifyDemandedBits(Op: N->getOperand(Num: 1), DemandedBits: DemandedMask, DCI))) |
19021 | return SDValue(); |
19022 | break; |
19023 | } |
19024 | case ARMISD::SMLALBT: { |
19025 | unsigned LowWidth = N->getOperand(Num: 0).getValueType().getSizeInBits(); |
19026 | APInt LowMask = APInt::getLowBitsSet(numBits: LowWidth, loBitsSet: 16); |
19027 | unsigned HighWidth = N->getOperand(Num: 1).getValueType().getSizeInBits(); |
19028 | APInt HighMask = APInt::getHighBitsSet(numBits: HighWidth, hiBitsSet: 16); |
19029 | if ((SimplifyDemandedBits(Op: N->getOperand(Num: 0), DemandedBits: LowMask, DCI)) || |
19030 | (SimplifyDemandedBits(Op: N->getOperand(Num: 1), DemandedBits: HighMask, DCI))) |
19031 | return SDValue(); |
19032 | break; |
19033 | } |
19034 | case ARMISD::SMLALTB: { |
19035 | unsigned HighWidth = N->getOperand(Num: 0).getValueType().getSizeInBits(); |
19036 | APInt HighMask = APInt::getHighBitsSet(numBits: HighWidth, hiBitsSet: 16); |
19037 | unsigned LowWidth = N->getOperand(Num: 1).getValueType().getSizeInBits(); |
19038 | APInt LowMask = APInt::getLowBitsSet(numBits: LowWidth, loBitsSet: 16); |
19039 | if ((SimplifyDemandedBits(Op: N->getOperand(Num: 0), DemandedBits: HighMask, DCI)) || |
19040 | (SimplifyDemandedBits(Op: N->getOperand(Num: 1), DemandedBits: LowMask, DCI))) |
19041 | return SDValue(); |
19042 | break; |
19043 | } |
19044 | case ARMISD::SMLALTT: { |
19045 | unsigned BitWidth = N->getValueType(ResNo: 0).getSizeInBits(); |
19046 | APInt DemandedMask = APInt::getHighBitsSet(numBits: BitWidth, hiBitsSet: 16); |
19047 | if ((SimplifyDemandedBits(Op: N->getOperand(Num: 0), DemandedBits: DemandedMask, DCI)) || |
19048 | (SimplifyDemandedBits(Op: N->getOperand(Num: 1), DemandedBits: DemandedMask, DCI))) |
19049 | return SDValue(); |
19050 | break; |
19051 | } |
19052 | case ARMISD::QADD8b: |
19053 | case ARMISD::QSUB8b: |
19054 | case ARMISD::UQADD8b: |
19055 | case ARMISD::UQSUB8b: { |
19056 | unsigned BitWidth = N->getValueType(ResNo: 0).getSizeInBits(); |
19057 | APInt DemandedMask = APInt::getLowBitsSet(numBits: BitWidth, loBitsSet: 8); |
19058 | if ((SimplifyDemandedBits(Op: N->getOperand(Num: 0), DemandedBits: DemandedMask, DCI)) || |
19059 | (SimplifyDemandedBits(Op: N->getOperand(Num: 1), DemandedBits: DemandedMask, DCI))) |
19060 | return SDValue(); |
19061 | break; |
19062 | } |
19063 | case ISD::INTRINSIC_VOID: |
19064 | case ISD::INTRINSIC_W_CHAIN: |
19065 | switch (N->getConstantOperandVal(Num: 1)) { |
19066 | case Intrinsic::arm_neon_vld1: |
19067 | case Intrinsic::arm_neon_vld1x2: |
19068 | case Intrinsic::arm_neon_vld1x3: |
19069 | case Intrinsic::arm_neon_vld1x4: |
19070 | case Intrinsic::arm_neon_vld2: |
19071 | case Intrinsic::arm_neon_vld3: |
19072 | case Intrinsic::arm_neon_vld4: |
19073 | case Intrinsic::arm_neon_vld2lane: |
19074 | case Intrinsic::arm_neon_vld3lane: |
19075 | case Intrinsic::arm_neon_vld4lane: |
19076 | case Intrinsic::arm_neon_vld2dup: |
19077 | case Intrinsic::arm_neon_vld3dup: |
19078 | case Intrinsic::arm_neon_vld4dup: |
19079 | case Intrinsic::arm_neon_vst1: |
19080 | case Intrinsic::arm_neon_vst1x2: |
19081 | case Intrinsic::arm_neon_vst1x3: |
19082 | case Intrinsic::arm_neon_vst1x4: |
19083 | case Intrinsic::arm_neon_vst2: |
19084 | case Intrinsic::arm_neon_vst3: |
19085 | case Intrinsic::arm_neon_vst4: |
19086 | case Intrinsic::arm_neon_vst2lane: |
19087 | case Intrinsic::arm_neon_vst3lane: |
19088 | case Intrinsic::arm_neon_vst4lane: |
19089 | return PerformVLDCombine(N, DCI); |
19090 | case Intrinsic::arm_mve_vld2q: |
19091 | case Intrinsic::arm_mve_vld4q: |
19092 | case Intrinsic::arm_mve_vst2q: |
19093 | case Intrinsic::arm_mve_vst4q: |
19094 | return PerformMVEVLDCombine(N, DCI); |
19095 | default: break; |
19096 | } |
19097 | break; |
19098 | } |
19099 | return SDValue(); |
19100 | } |
19101 | |
19102 | bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc, |
19103 | EVT VT) const { |
19104 | return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE); |
19105 | } |
19106 | |
19107 | bool ARMTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned, |
19108 | Align Alignment, |
19109 | MachineMemOperand::Flags, |
19110 | unsigned *Fast) const { |
19111 | // Depends what it gets converted into if the type is weird. |
19112 | if (!VT.isSimple()) |
19113 | return false; |
19114 | |
19115 | // The AllowsUnaligned flag models the SCTLR.A setting in ARM cpus |
19116 | bool AllowsUnaligned = Subtarget->allowsUnalignedMem(); |
19117 | auto Ty = VT.getSimpleVT().SimpleTy; |
19118 | |
19119 | if (Ty == MVT::i8 || Ty == MVT::i16 || Ty == MVT::i32) { |
19120 | // Unaligned access can use (for example) LRDB, LRDH, LDR |
19121 | if (AllowsUnaligned) { |
19122 | if (Fast) |
19123 | *Fast = Subtarget->hasV7Ops(); |
19124 | return true; |
19125 | } |
19126 | } |
19127 | |
19128 | if (Ty == MVT::f64 || Ty == MVT::v2f64) { |
19129 | // For any little-endian targets with neon, we can support unaligned ld/st |
19130 | // of D and Q (e.g. {D0,D1}) registers by using vld1.i8/vst1.i8. |
19131 | // A big-endian target may also explicitly support unaligned accesses |
19132 | if (Subtarget->hasNEON() && (AllowsUnaligned || Subtarget->isLittle())) { |
19133 | if (Fast) |
19134 | *Fast = 1; |
19135 | return true; |
19136 | } |
19137 | } |
19138 | |
19139 | if (!Subtarget->hasMVEIntegerOps()) |
19140 | return false; |
19141 | |
19142 | // These are for predicates |
19143 | if ((Ty == MVT::v16i1 || Ty == MVT::v8i1 || Ty == MVT::v4i1 || |
19144 | Ty == MVT::v2i1)) { |
19145 | if (Fast) |
19146 | *Fast = 1; |
19147 | return true; |
19148 | } |
19149 | |
19150 | // These are for truncated stores/narrowing loads. They are fine so long as |
19151 | // the alignment is at least the size of the item being loaded |
19152 | if ((Ty == MVT::v4i8 || Ty == MVT::v8i8 || Ty == MVT::v4i16) && |
19153 | Alignment >= VT.getScalarSizeInBits() / 8) { |
19154 | if (Fast) |
19155 | *Fast = true; |
19156 | return true; |
19157 | } |
19158 | |
19159 | // In little-endian MVE, the store instructions VSTRB.U8, VSTRH.U16 and |
19160 | // VSTRW.U32 all store the vector register in exactly the same format, and |
19161 | // differ only in the range of their immediate offset field and the required |
19162 | // alignment. So there is always a store that can be used, regardless of |
19163 | // actual type. |
19164 | // |
19165 | // For big endian, that is not the case. But can still emit a (VSTRB.U8; |
19166 | // VREV64.8) pair and get the same effect. This will likely be better than |
19167 | // aligning the vector through the stack. |
19168 | if (Ty == MVT::v16i8 || Ty == MVT::v8i16 || Ty == MVT::v8f16 || |
19169 | Ty == MVT::v4i32 || Ty == MVT::v4f32 || Ty == MVT::v2i64 || |
19170 | Ty == MVT::v2f64) { |
19171 | if (Fast) |
19172 | *Fast = 1; |
19173 | return true; |
19174 | } |
19175 | |
19176 | return false; |
19177 | } |
19178 | |
19179 | |
19180 | EVT ARMTargetLowering::getOptimalMemOpType( |
19181 | const MemOp &Op, const AttributeList &FuncAttributes) const { |
19182 | // See if we can use NEON instructions for this... |
19183 | if ((Op.isMemcpy() || Op.isZeroMemset()) && Subtarget->hasNEON() && |
19184 | !FuncAttributes.hasFnAttr(Kind: Attribute::NoImplicitFloat)) { |
19185 | unsigned Fast; |
19186 | if (Op.size() >= 16 && |
19187 | (Op.isAligned(AlignCheck: Align(16)) || |
19188 | (allowsMisalignedMemoryAccesses(VT: MVT::v2f64, 0, Alignment: Align(1), |
19189 | MachineMemOperand::MONone, Fast: &Fast) && |
19190 | Fast))) { |
19191 | return MVT::v2f64; |
19192 | } else if (Op.size() >= 8 && |
19193 | (Op.isAligned(AlignCheck: Align(8)) || |
19194 | (allowsMisalignedMemoryAccesses( |
19195 | VT: MVT::f64, 0, Alignment: Align(1), MachineMemOperand::MONone, Fast: &Fast) && |
19196 | Fast))) { |
19197 | return MVT::f64; |
19198 | } |
19199 | } |
19200 | |
19201 | // Let the target-independent logic figure it out. |
19202 | return MVT::Other; |
19203 | } |
19204 | |
19205 | // 64-bit integers are split into their high and low parts and held in two |
19206 | // different registers, so the trunc is free since the low register can just |
19207 | // be used. |
19208 | bool ARMTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const { |
19209 | if (!SrcTy->isIntegerTy() || !DstTy->isIntegerTy()) |
19210 | return false; |
19211 | unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); |
19212 | unsigned DestBits = DstTy->getPrimitiveSizeInBits(); |
19213 | return (SrcBits == 64 && DestBits == 32); |
19214 | } |
19215 | |
19216 | bool ARMTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const { |
19217 | if (SrcVT.isVector() || DstVT.isVector() || !SrcVT.isInteger() || |
19218 | !DstVT.isInteger()) |
19219 | return false; |
19220 | unsigned SrcBits = SrcVT.getSizeInBits(); |
19221 | unsigned DestBits = DstVT.getSizeInBits(); |
19222 | return (SrcBits == 64 && DestBits == 32); |
19223 | } |
19224 | |
19225 | bool ARMTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { |
19226 | if (Val.getOpcode() != ISD::LOAD) |
19227 | return false; |
19228 | |
19229 | EVT VT1 = Val.getValueType(); |
19230 | if (!VT1.isSimple() || !VT1.isInteger() || |
19231 | !VT2.isSimple() || !VT2.isInteger()) |
19232 | return false; |
19233 | |
19234 | switch (VT1.getSimpleVT().SimpleTy) { |
19235 | default: break; |
19236 | case MVT::i1: |
19237 | case MVT::i8: |
19238 | case MVT::i16: |
19239 | // 8-bit and 16-bit loads implicitly zero-extend to 32-bits. |
19240 | return true; |
19241 | } |
19242 | |
19243 | return false; |
19244 | } |
19245 | |
19246 | bool ARMTargetLowering::isFNegFree(EVT VT) const { |
19247 | if (!VT.isSimple()) |
19248 | return false; |
19249 | |
19250 | // There are quite a few FP16 instructions (e.g. VNMLA, VNMLS, etc.) that |
19251 | // negate values directly (fneg is free). So, we don't want to let the DAG |
19252 | // combiner rewrite fneg into xors and some other instructions. For f16 and |
19253 | // FullFP16 argument passing, some bitcast nodes may be introduced, |
19254 | // triggering this DAG combine rewrite, so we are avoiding that with this. |
19255 | switch (VT.getSimpleVT().SimpleTy) { |
19256 | default: break; |
19257 | case MVT::f16: |
19258 | return Subtarget->hasFullFP16(); |
19259 | } |
19260 | |
19261 | return false; |
19262 | } |
19263 | |
19264 | /// Check if Ext1 and Ext2 are extends of the same type, doubling the bitwidth |
19265 | /// of the vector elements. |
19266 | static bool (Value *Ext1, Value *Ext2) { |
19267 | auto areExtDoubled = [](Instruction *Ext) { |
19268 | return Ext->getType()->getScalarSizeInBits() == |
19269 | 2 * Ext->getOperand(i: 0)->getType()->getScalarSizeInBits(); |
19270 | }; |
19271 | |
19272 | if (!match(V: Ext1, P: m_ZExtOrSExt(Op: m_Value())) || |
19273 | !match(V: Ext2, P: m_ZExtOrSExt(Op: m_Value())) || |
19274 | !areExtDoubled(cast<Instruction>(Val: Ext1)) || |
19275 | !areExtDoubled(cast<Instruction>(Val: Ext2))) |
19276 | return false; |
19277 | |
19278 | return true; |
19279 | } |
19280 | |
19281 | /// Check if sinking \p I's operands to I's basic block is profitable, because |
19282 | /// the operands can be folded into a target instruction, e.g. |
19283 | /// sext/zext can be folded into vsubl. |
19284 | bool ARMTargetLowering::shouldSinkOperands(Instruction *I, |
19285 | SmallVectorImpl<Use *> &Ops) const { |
19286 | if (!I->getType()->isVectorTy()) |
19287 | return false; |
19288 | |
19289 | if (Subtarget->hasNEON()) { |
19290 | switch (I->getOpcode()) { |
19291 | case Instruction::Sub: |
19292 | case Instruction::Add: { |
19293 | if (!areExtractExts(Ext1: I->getOperand(i: 0), Ext2: I->getOperand(i: 1))) |
19294 | return false; |
19295 | Ops.push_back(Elt: &I->getOperandUse(i: 0)); |
19296 | Ops.push_back(Elt: &I->getOperandUse(i: 1)); |
19297 | return true; |
19298 | } |
19299 | default: |
19300 | return false; |
19301 | } |
19302 | } |
19303 | |
19304 | if (!Subtarget->hasMVEIntegerOps()) |
19305 | return false; |
19306 | |
19307 | auto IsFMSMul = [&](Instruction *I) { |
19308 | if (!I->hasOneUse()) |
19309 | return false; |
19310 | auto *Sub = cast<Instruction>(Val: *I->users().begin()); |
19311 | return Sub->getOpcode() == Instruction::FSub && Sub->getOperand(i: 1) == I; |
19312 | }; |
19313 | auto IsFMS = [&](Instruction *I) { |
19314 | if (match(V: I->getOperand(i: 0), P: m_FNeg(X: m_Value())) || |
19315 | match(V: I->getOperand(i: 1), P: m_FNeg(X: m_Value()))) |
19316 | return true; |
19317 | return false; |
19318 | }; |
19319 | |
19320 | auto IsSinker = [&](Instruction *I, int Operand) { |
19321 | switch (I->getOpcode()) { |
19322 | case Instruction::Add: |
19323 | case Instruction::Mul: |
19324 | case Instruction::FAdd: |
19325 | case Instruction::ICmp: |
19326 | case Instruction::FCmp: |
19327 | return true; |
19328 | case Instruction::FMul: |
19329 | return !IsFMSMul(I); |
19330 | case Instruction::Sub: |
19331 | case Instruction::FSub: |
19332 | case Instruction::Shl: |
19333 | case Instruction::LShr: |
19334 | case Instruction::AShr: |
19335 | return Operand == 1; |
19336 | case Instruction::Call: |
19337 | if (auto *II = dyn_cast<IntrinsicInst>(Val: I)) { |
19338 | switch (II->getIntrinsicID()) { |
19339 | case Intrinsic::fma: |
19340 | return !IsFMS(I); |
19341 | case Intrinsic::sadd_sat: |
19342 | case Intrinsic::uadd_sat: |
19343 | case Intrinsic::arm_mve_add_predicated: |
19344 | case Intrinsic::arm_mve_mul_predicated: |
19345 | case Intrinsic::arm_mve_qadd_predicated: |
19346 | case Intrinsic::arm_mve_vhadd: |
19347 | case Intrinsic::arm_mve_hadd_predicated: |
19348 | case Intrinsic::arm_mve_vqdmull: |
19349 | case Intrinsic::arm_mve_vqdmull_predicated: |
19350 | case Intrinsic::arm_mve_vqdmulh: |
19351 | case Intrinsic::arm_mve_qdmulh_predicated: |
19352 | case Intrinsic::arm_mve_vqrdmulh: |
19353 | case Intrinsic::arm_mve_qrdmulh_predicated: |
19354 | case Intrinsic::arm_mve_fma_predicated: |
19355 | return true; |
19356 | case Intrinsic::ssub_sat: |
19357 | case Intrinsic::usub_sat: |
19358 | case Intrinsic::arm_mve_sub_predicated: |
19359 | case Intrinsic::arm_mve_qsub_predicated: |
19360 | case Intrinsic::arm_mve_hsub_predicated: |
19361 | case Intrinsic::arm_mve_vhsub: |
19362 | return Operand == 1; |
19363 | default: |
19364 | return false; |
19365 | } |
19366 | } |
19367 | return false; |
19368 | default: |
19369 | return false; |
19370 | } |
19371 | }; |
19372 | |
19373 | for (auto OpIdx : enumerate(First: I->operands())) { |
19374 | Instruction *Op = dyn_cast<Instruction>(Val: OpIdx.value().get()); |
19375 | // Make sure we are not already sinking this operand |
19376 | if (!Op || any_of(Range&: Ops, P: [&](Use *U) { return U->get() == Op; })) |
19377 | continue; |
19378 | |
19379 | Instruction *Shuffle = Op; |
19380 | if (Shuffle->getOpcode() == Instruction::BitCast) |
19381 | Shuffle = dyn_cast<Instruction>(Val: Shuffle->getOperand(i: 0)); |
19382 | // We are looking for a splat that can be sunk. |
19383 | if (!Shuffle || |
19384 | !match(V: Shuffle, P: m_Shuffle( |
19385 | v1: m_InsertElt(Val: m_Undef(), Elt: m_Value(), Idx: m_ZeroInt()), |
19386 | v2: m_Undef(), mask: m_ZeroMask()))) |
19387 | continue; |
19388 | if (!IsSinker(I, OpIdx.index())) |
19389 | continue; |
19390 | |
19391 | // All uses of the shuffle should be sunk to avoid duplicating it across gpr |
19392 | // and vector registers |
19393 | for (Use &U : Op->uses()) { |
19394 | Instruction *Insn = cast<Instruction>(Val: U.getUser()); |
19395 | if (!IsSinker(Insn, U.getOperandNo())) |
19396 | return false; |
19397 | } |
19398 | |
19399 | Ops.push_back(Elt: &Shuffle->getOperandUse(i: 0)); |
19400 | if (Shuffle != Op) |
19401 | Ops.push_back(Elt: &Op->getOperandUse(i: 0)); |
19402 | Ops.push_back(Elt: &OpIdx.value()); |
19403 | } |
19404 | return true; |
19405 | } |
19406 | |
19407 | Type *ARMTargetLowering::shouldConvertSplatType(ShuffleVectorInst *SVI) const { |
19408 | if (!Subtarget->hasMVEIntegerOps()) |
19409 | return nullptr; |
19410 | Type *SVIType = SVI->getType(); |
19411 | Type *ScalarType = SVIType->getScalarType(); |
19412 | |
19413 | if (ScalarType->isFloatTy()) |
19414 | return Type::getInt32Ty(C&: SVIType->getContext()); |
19415 | if (ScalarType->isHalfTy()) |
19416 | return Type::getInt16Ty(C&: SVIType->getContext()); |
19417 | return nullptr; |
19418 | } |
19419 | |
19420 | bool ARMTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const { |
19421 | EVT VT = ExtVal.getValueType(); |
19422 | |
19423 | if (!isTypeLegal(VT)) |
19424 | return false; |
19425 | |
19426 | if (auto *Ld = dyn_cast<MaskedLoadSDNode>(Val: ExtVal.getOperand(i: 0))) { |
19427 | if (Ld->isExpandingLoad()) |
19428 | return false; |
19429 | } |
19430 | |
19431 | if (Subtarget->hasMVEIntegerOps()) |
19432 | return true; |
19433 | |
19434 | // Don't create a loadext if we can fold the extension into a wide/long |
19435 | // instruction. |
19436 | // If there's more than one user instruction, the loadext is desirable no |
19437 | // matter what. There can be two uses by the same instruction. |
19438 | if (ExtVal->use_empty() || |
19439 | !ExtVal->use_begin()->isOnlyUserOf(N: ExtVal.getNode())) |
19440 | return true; |
19441 | |
19442 | SDNode *U = *ExtVal->use_begin(); |
19443 | if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB || |
19444 | U->getOpcode() == ISD::SHL || U->getOpcode() == ARMISD::VSHLIMM)) |
19445 | return false; |
19446 | |
19447 | return true; |
19448 | } |
19449 | |
19450 | bool ARMTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const { |
19451 | if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) |
19452 | return false; |
19453 | |
19454 | if (!isTypeLegal(VT: EVT::getEVT(Ty: Ty1))) |
19455 | return false; |
19456 | |
19457 | assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop" ); |
19458 | |
19459 | // Assuming the caller doesn't have a zeroext or signext return parameter, |
19460 | // truncation all the way down to i1 is valid. |
19461 | return true; |
19462 | } |
19463 | |
19464 | /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster |
19465 | /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be |
19466 | /// expanded to FMAs when this method returns true, otherwise fmuladd is |
19467 | /// expanded to fmul + fadd. |
19468 | /// |
19469 | /// ARM supports both fused and unfused multiply-add operations; we already |
19470 | /// lower a pair of fmul and fadd to the latter so it's not clear that there |
19471 | /// would be a gain or that the gain would be worthwhile enough to risk |
19472 | /// correctness bugs. |
19473 | /// |
19474 | /// For MVE, we set this to true as it helps simplify the need for some |
19475 | /// patterns (and we don't have the non-fused floating point instruction). |
19476 | bool ARMTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, |
19477 | EVT VT) const { |
19478 | if (!VT.isSimple()) |
19479 | return false; |
19480 | |
19481 | switch (VT.getSimpleVT().SimpleTy) { |
19482 | case MVT::v4f32: |
19483 | case MVT::v8f16: |
19484 | return Subtarget->hasMVEFloatOps(); |
19485 | case MVT::f16: |
19486 | return Subtarget->useFPVFMx16(); |
19487 | case MVT::f32: |
19488 | return Subtarget->useFPVFMx(); |
19489 | case MVT::f64: |
19490 | return Subtarget->useFPVFMx64(); |
19491 | default: |
19492 | break; |
19493 | } |
19494 | |
19495 | return false; |
19496 | } |
19497 | |
19498 | static bool isLegalT1AddressImmediate(int64_t V, EVT VT) { |
19499 | if (V < 0) |
19500 | return false; |
19501 | |
19502 | unsigned Scale = 1; |
19503 | switch (VT.getSimpleVT().SimpleTy) { |
19504 | case MVT::i1: |
19505 | case MVT::i8: |
19506 | // Scale == 1; |
19507 | break; |
19508 | case MVT::i16: |
19509 | // Scale == 2; |
19510 | Scale = 2; |
19511 | break; |
19512 | default: |
19513 | // On thumb1 we load most things (i32, i64, floats, etc) with a LDR |
19514 | // Scale == 4; |
19515 | Scale = 4; |
19516 | break; |
19517 | } |
19518 | |
19519 | if ((V & (Scale - 1)) != 0) |
19520 | return false; |
19521 | return isUInt<5>(x: V / Scale); |
19522 | } |
19523 | |
19524 | static bool isLegalT2AddressImmediate(int64_t V, EVT VT, |
19525 | const ARMSubtarget *Subtarget) { |
19526 | if (!VT.isInteger() && !VT.isFloatingPoint()) |
19527 | return false; |
19528 | if (VT.isVector() && Subtarget->hasNEON()) |
19529 | return false; |
19530 | if (VT.isVector() && VT.isFloatingPoint() && Subtarget->hasMVEIntegerOps() && |
19531 | !Subtarget->hasMVEFloatOps()) |
19532 | return false; |
19533 | |
19534 | bool IsNeg = false; |
19535 | if (V < 0) { |
19536 | IsNeg = true; |
19537 | V = -V; |
19538 | } |
19539 | |
19540 | unsigned NumBytes = std::max(a: (unsigned)VT.getSizeInBits() / 8, b: 1U); |
19541 | |
19542 | // MVE: size * imm7 |
19543 | if (VT.isVector() && Subtarget->hasMVEIntegerOps()) { |
19544 | switch (VT.getSimpleVT().getVectorElementType().SimpleTy) { |
19545 | case MVT::i32: |
19546 | case MVT::f32: |
19547 | return isShiftedUInt<7,2>(x: V); |
19548 | case MVT::i16: |
19549 | case MVT::f16: |
19550 | return isShiftedUInt<7,1>(x: V); |
19551 | case MVT::i8: |
19552 | return isUInt<7>(x: V); |
19553 | default: |
19554 | return false; |
19555 | } |
19556 | } |
19557 | |
19558 | // half VLDR: 2 * imm8 |
19559 | if (VT.isFloatingPoint() && NumBytes == 2 && Subtarget->hasFPRegs16()) |
19560 | return isShiftedUInt<8, 1>(x: V); |
19561 | // VLDR and LDRD: 4 * imm8 |
19562 | if ((VT.isFloatingPoint() && Subtarget->hasVFP2Base()) || NumBytes == 8) |
19563 | return isShiftedUInt<8, 2>(x: V); |
19564 | |
19565 | if (NumBytes == 1 || NumBytes == 2 || NumBytes == 4) { |
19566 | // + imm12 or - imm8 |
19567 | if (IsNeg) |
19568 | return isUInt<8>(x: V); |
19569 | return isUInt<12>(x: V); |
19570 | } |
19571 | |
19572 | return false; |
19573 | } |
19574 | |
19575 | /// isLegalAddressImmediate - Return true if the integer value can be used |
19576 | /// as the offset of the target addressing mode for load / store of the |
19577 | /// given type. |
19578 | static bool isLegalAddressImmediate(int64_t V, EVT VT, |
19579 | const ARMSubtarget *Subtarget) { |
19580 | if (V == 0) |
19581 | return true; |
19582 | |
19583 | if (!VT.isSimple()) |
19584 | return false; |
19585 | |
19586 | if (Subtarget->isThumb1Only()) |
19587 | return isLegalT1AddressImmediate(V, VT); |
19588 | else if (Subtarget->isThumb2()) |
19589 | return isLegalT2AddressImmediate(V, VT, Subtarget); |
19590 | |
19591 | // ARM mode. |
19592 | if (V < 0) |
19593 | V = - V; |
19594 | switch (VT.getSimpleVT().SimpleTy) { |
19595 | default: return false; |
19596 | case MVT::i1: |
19597 | case MVT::i8: |
19598 | case MVT::i32: |
19599 | // +- imm12 |
19600 | return isUInt<12>(x: V); |
19601 | case MVT::i16: |
19602 | // +- imm8 |
19603 | return isUInt<8>(x: V); |
19604 | case MVT::f32: |
19605 | case MVT::f64: |
19606 | if (!Subtarget->hasVFP2Base()) // FIXME: NEON? |
19607 | return false; |
19608 | return isShiftedUInt<8, 2>(x: V); |
19609 | } |
19610 | } |
19611 | |
19612 | bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM, |
19613 | EVT VT) const { |
19614 | int Scale = AM.Scale; |
19615 | if (Scale < 0) |
19616 | return false; |
19617 | |
19618 | switch (VT.getSimpleVT().SimpleTy) { |
19619 | default: return false; |
19620 | case MVT::i1: |
19621 | case MVT::i8: |
19622 | case MVT::i16: |
19623 | case MVT::i32: |
19624 | if (Scale == 1) |
19625 | return true; |
19626 | // r + r << imm |
19627 | Scale = Scale & ~1; |
19628 | return Scale == 2 || Scale == 4 || Scale == 8; |
19629 | case MVT::i64: |
19630 | // FIXME: What are we trying to model here? ldrd doesn't have an r + r |
19631 | // version in Thumb mode. |
19632 | // r + r |
19633 | if (Scale == 1) |
19634 | return true; |
19635 | // r * 2 (this can be lowered to r + r). |
19636 | if (!AM.HasBaseReg && Scale == 2) |
19637 | return true; |
19638 | return false; |
19639 | case MVT::isVoid: |
19640 | // Note, we allow "void" uses (basically, uses that aren't loads or |
19641 | // stores), because arm allows folding a scale into many arithmetic |
19642 | // operations. This should be made more precise and revisited later. |
19643 | |
19644 | // Allow r << imm, but the imm has to be a multiple of two. |
19645 | if (Scale & 1) return false; |
19646 | return isPowerOf2_32(Value: Scale); |
19647 | } |
19648 | } |
19649 | |
19650 | bool ARMTargetLowering::isLegalT1ScaledAddressingMode(const AddrMode &AM, |
19651 | EVT VT) const { |
19652 | const int Scale = AM.Scale; |
19653 | |
19654 | // Negative scales are not supported in Thumb1. |
19655 | if (Scale < 0) |
19656 | return false; |
19657 | |
19658 | // Thumb1 addressing modes do not support register scaling excepting the |
19659 | // following cases: |
19660 | // 1. Scale == 1 means no scaling. |
19661 | // 2. Scale == 2 this can be lowered to r + r if there is no base register. |
19662 | return (Scale == 1) || (!AM.HasBaseReg && Scale == 2); |
19663 | } |
19664 | |
19665 | /// isLegalAddressingMode - Return true if the addressing mode represented |
19666 | /// by AM is legal for this target, for a load/store of the specified type. |
19667 | bool ARMTargetLowering::isLegalAddressingMode(const DataLayout &DL, |
19668 | const AddrMode &AM, Type *Ty, |
19669 | unsigned AS, Instruction *I) const { |
19670 | EVT VT = getValueType(DL, Ty, AllowUnknown: true); |
19671 | if (!isLegalAddressImmediate(V: AM.BaseOffs, VT, Subtarget)) |
19672 | return false; |
19673 | |
19674 | // Can never fold addr of global into load/store. |
19675 | if (AM.BaseGV) |
19676 | return false; |
19677 | |
19678 | switch (AM.Scale) { |
19679 | case 0: // no scale reg, must be "r+i" or "r", or "i". |
19680 | break; |
19681 | default: |
19682 | // ARM doesn't support any R+R*scale+imm addr modes. |
19683 | if (AM.BaseOffs) |
19684 | return false; |
19685 | |
19686 | if (!VT.isSimple()) |
19687 | return false; |
19688 | |
19689 | if (Subtarget->isThumb1Only()) |
19690 | return isLegalT1ScaledAddressingMode(AM, VT); |
19691 | |
19692 | if (Subtarget->isThumb2()) |
19693 | return isLegalT2ScaledAddressingMode(AM, VT); |
19694 | |
19695 | int Scale = AM.Scale; |
19696 | switch (VT.getSimpleVT().SimpleTy) { |
19697 | default: return false; |
19698 | case MVT::i1: |
19699 | case MVT::i8: |
19700 | case MVT::i32: |
19701 | if (Scale < 0) Scale = -Scale; |
19702 | if (Scale == 1) |
19703 | return true; |
19704 | // r + r << imm |
19705 | return isPowerOf2_32(Value: Scale & ~1); |
19706 | case MVT::i16: |
19707 | case MVT::i64: |
19708 | // r +/- r |
19709 | if (Scale == 1 || (AM.HasBaseReg && Scale == -1)) |
19710 | return true; |
19711 | // r * 2 (this can be lowered to r + r). |
19712 | if (!AM.HasBaseReg && Scale == 2) |
19713 | return true; |
19714 | return false; |
19715 | |
19716 | case MVT::isVoid: |
19717 | // Note, we allow "void" uses (basically, uses that aren't loads or |
19718 | // stores), because arm allows folding a scale into many arithmetic |
19719 | // operations. This should be made more precise and revisited later. |
19720 | |
19721 | // Allow r << imm, but the imm has to be a multiple of two. |
19722 | if (Scale & 1) return false; |
19723 | return isPowerOf2_32(Value: Scale); |
19724 | } |
19725 | } |
19726 | return true; |
19727 | } |
19728 | |
19729 | /// isLegalICmpImmediate - Return true if the specified immediate is legal |
19730 | /// icmp immediate, that is the target has icmp instructions which can compare |
19731 | /// a register against the immediate without having to materialize the |
19732 | /// immediate into a register. |
19733 | bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const { |
19734 | // Thumb2 and ARM modes can use cmn for negative immediates. |
19735 | if (!Subtarget->isThumb()) |
19736 | return ARM_AM::getSOImmVal(Arg: (uint32_t)Imm) != -1 || |
19737 | ARM_AM::getSOImmVal(Arg: -(uint32_t)Imm) != -1; |
19738 | if (Subtarget->isThumb2()) |
19739 | return ARM_AM::getT2SOImmVal(Arg: (uint32_t)Imm) != -1 || |
19740 | ARM_AM::getT2SOImmVal(Arg: -(uint32_t)Imm) != -1; |
19741 | // Thumb1 doesn't have cmn, and only 8-bit immediates. |
19742 | return Imm >= 0 && Imm <= 255; |
19743 | } |
19744 | |
19745 | /// isLegalAddImmediate - Return true if the specified immediate is a legal add |
19746 | /// *or sub* immediate, that is the target has add or sub instructions which can |
19747 | /// add a register with the immediate without having to materialize the |
19748 | /// immediate into a register. |
19749 | bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const { |
19750 | // Same encoding for add/sub, just flip the sign. |
19751 | int64_t AbsImm = std::abs(i: Imm); |
19752 | if (!Subtarget->isThumb()) |
19753 | return ARM_AM::getSOImmVal(Arg: AbsImm) != -1; |
19754 | if (Subtarget->isThumb2()) |
19755 | return ARM_AM::getT2SOImmVal(Arg: AbsImm) != -1; |
19756 | // Thumb1 only has 8-bit unsigned immediate. |
19757 | return AbsImm >= 0 && AbsImm <= 255; |
19758 | } |
19759 | |
19760 | // Return false to prevent folding |
19761 | // (mul (add r, c0), c1) -> (add (mul r, c1), c0*c1) in DAGCombine, |
19762 | // if the folding leads to worse code. |
19763 | bool ARMTargetLowering::isMulAddWithConstProfitable(SDValue AddNode, |
19764 | SDValue ConstNode) const { |
19765 | // Let the DAGCombiner decide for vector types and large types. |
19766 | const EVT VT = AddNode.getValueType(); |
19767 | if (VT.isVector() || VT.getScalarSizeInBits() > 32) |
19768 | return true; |
19769 | |
19770 | // It is worse if c0 is legal add immediate, while c1*c0 is not |
19771 | // and has to be composed by at least two instructions. |
19772 | const ConstantSDNode *C0Node = cast<ConstantSDNode>(Val: AddNode.getOperand(i: 1)); |
19773 | const ConstantSDNode *C1Node = cast<ConstantSDNode>(Val&: ConstNode); |
19774 | const int64_t C0 = C0Node->getSExtValue(); |
19775 | APInt CA = C0Node->getAPIntValue() * C1Node->getAPIntValue(); |
19776 | if (!isLegalAddImmediate(Imm: C0) || isLegalAddImmediate(Imm: CA.getSExtValue())) |
19777 | return true; |
19778 | if (ConstantMaterializationCost(Val: (unsigned)CA.getZExtValue(), Subtarget) > 1) |
19779 | return false; |
19780 | |
19781 | // Default to true and let the DAGCombiner decide. |
19782 | return true; |
19783 | } |
19784 | |
19785 | static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, |
19786 | bool isSEXTLoad, SDValue &Base, |
19787 | SDValue &Offset, bool &isInc, |
19788 | SelectionDAG &DAG) { |
19789 | if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) |
19790 | return false; |
19791 | |
19792 | if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) { |
19793 | // AddressingMode 3 |
19794 | Base = Ptr->getOperand(Num: 0); |
19795 | if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Val: Ptr->getOperand(Num: 1))) { |
19796 | int RHSC = (int)RHS->getZExtValue(); |
19797 | if (RHSC < 0 && RHSC > -256) { |
19798 | assert(Ptr->getOpcode() == ISD::ADD); |
19799 | isInc = false; |
19800 | Offset = DAG.getConstant(Val: -RHSC, DL: SDLoc(Ptr), VT: RHS->getValueType(ResNo: 0)); |
19801 | return true; |
19802 | } |
19803 | } |
19804 | isInc = (Ptr->getOpcode() == ISD::ADD); |
19805 | Offset = Ptr->getOperand(Num: 1); |
19806 | return true; |
19807 | } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) { |
19808 | // AddressingMode 2 |
19809 | if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Val: Ptr->getOperand(Num: 1))) { |
19810 | int RHSC = (int)RHS->getZExtValue(); |
19811 | if (RHSC < 0 && RHSC > -0x1000) { |
19812 | assert(Ptr->getOpcode() == ISD::ADD); |
19813 | isInc = false; |
19814 | Offset = DAG.getConstant(Val: -RHSC, DL: SDLoc(Ptr), VT: RHS->getValueType(ResNo: 0)); |
19815 | Base = Ptr->getOperand(Num: 0); |
19816 | return true; |
19817 | } |
19818 | } |
19819 | |
19820 | if (Ptr->getOpcode() == ISD::ADD) { |
19821 | isInc = true; |
19822 | ARM_AM::ShiftOpc ShOpcVal= |
19823 | ARM_AM::getShiftOpcForNode(Opcode: Ptr->getOperand(Num: 0).getOpcode()); |
19824 | if (ShOpcVal != ARM_AM::no_shift) { |
19825 | Base = Ptr->getOperand(Num: 1); |
19826 | Offset = Ptr->getOperand(Num: 0); |
19827 | } else { |
19828 | Base = Ptr->getOperand(Num: 0); |
19829 | Offset = Ptr->getOperand(Num: 1); |
19830 | } |
19831 | return true; |
19832 | } |
19833 | |
19834 | isInc = (Ptr->getOpcode() == ISD::ADD); |
19835 | Base = Ptr->getOperand(Num: 0); |
19836 | Offset = Ptr->getOperand(Num: 1); |
19837 | return true; |
19838 | } |
19839 | |
19840 | // FIXME: Use VLDM / VSTM to emulate indexed FP load / store. |
19841 | return false; |
19842 | } |
19843 | |
19844 | static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, |
19845 | bool isSEXTLoad, SDValue &Base, |
19846 | SDValue &Offset, bool &isInc, |
19847 | SelectionDAG &DAG) { |
19848 | if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) |
19849 | return false; |
19850 | |
19851 | Base = Ptr->getOperand(Num: 0); |
19852 | if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Val: Ptr->getOperand(Num: 1))) { |
19853 | int RHSC = (int)RHS->getZExtValue(); |
19854 | if (RHSC < 0 && RHSC > -0x100) { // 8 bits. |
19855 | assert(Ptr->getOpcode() == ISD::ADD); |
19856 | isInc = false; |
19857 | Offset = DAG.getConstant(Val: -RHSC, DL: SDLoc(Ptr), VT: RHS->getValueType(ResNo: 0)); |
19858 | return true; |
19859 | } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero. |
19860 | isInc = Ptr->getOpcode() == ISD::ADD; |
19861 | Offset = DAG.getConstant(Val: RHSC, DL: SDLoc(Ptr), VT: RHS->getValueType(ResNo: 0)); |
19862 | return true; |
19863 | } |
19864 | } |
19865 | |
19866 | return false; |
19867 | } |
19868 | |
19869 | static bool getMVEIndexedAddressParts(SDNode *Ptr, EVT VT, Align Alignment, |
19870 | bool isSEXTLoad, bool IsMasked, bool isLE, |
19871 | SDValue &Base, SDValue &Offset, |
19872 | bool &isInc, SelectionDAG &DAG) { |
19873 | if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) |
19874 | return false; |
19875 | if (!isa<ConstantSDNode>(Val: Ptr->getOperand(Num: 1))) |
19876 | return false; |
19877 | |
19878 | // We allow LE non-masked loads to change the type (for example use a vldrb.8 |
19879 | // as opposed to a vldrw.32). This can allow extra addressing modes or |
19880 | // alignments for what is otherwise an equivalent instruction. |
19881 | bool CanChangeType = isLE && !IsMasked; |
19882 | |
19883 | ConstantSDNode *RHS = cast<ConstantSDNode>(Val: Ptr->getOperand(Num: 1)); |
19884 | int RHSC = (int)RHS->getZExtValue(); |
19885 | |
19886 | auto IsInRange = [&](int RHSC, int Limit, int Scale) { |
19887 | if (RHSC < 0 && RHSC > -Limit * Scale && RHSC % Scale == 0) { |
19888 | assert(Ptr->getOpcode() == ISD::ADD); |
19889 | isInc = false; |
19890 | Offset = DAG.getConstant(Val: -RHSC, DL: SDLoc(Ptr), VT: RHS->getValueType(ResNo: 0)); |
19891 | return true; |
19892 | } else if (RHSC > 0 && RHSC < Limit * Scale && RHSC % Scale == 0) { |
19893 | isInc = Ptr->getOpcode() == ISD::ADD; |
19894 | Offset = DAG.getConstant(Val: RHSC, DL: SDLoc(Ptr), VT: RHS->getValueType(ResNo: 0)); |
19895 | return true; |
19896 | } |
19897 | return false; |
19898 | }; |
19899 | |
19900 | // Try to find a matching instruction based on s/zext, Alignment, Offset and |
19901 | // (in BE/masked) type. |
19902 | Base = Ptr->getOperand(Num: 0); |
19903 | if (VT == MVT::v4i16) { |
19904 | if (Alignment >= 2 && IsInRange(RHSC, 0x80, 2)) |
19905 | return true; |
19906 | } else if (VT == MVT::v4i8 || VT == MVT::v8i8) { |
19907 | if (IsInRange(RHSC, 0x80, 1)) |
19908 | return true; |
19909 | } else if (Alignment >= 4 && |
19910 | (CanChangeType || VT == MVT::v4i32 || VT == MVT::v4f32) && |
19911 | IsInRange(RHSC, 0x80, 4)) |
19912 | return true; |
19913 | else if (Alignment >= 2 && |
19914 | (CanChangeType || VT == MVT::v8i16 || VT == MVT::v8f16) && |
19915 | IsInRange(RHSC, 0x80, 2)) |
19916 | return true; |
19917 | else if ((CanChangeType || VT == MVT::v16i8) && IsInRange(RHSC, 0x80, 1)) |
19918 | return true; |
19919 | return false; |
19920 | } |
19921 | |
19922 | /// getPreIndexedAddressParts - returns true by value, base pointer and |
19923 | /// offset pointer and addressing mode by reference if the node's address |
19924 | /// can be legally represented as pre-indexed load / store address. |
19925 | bool |
19926 | ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, |
19927 | SDValue &Offset, |
19928 | ISD::MemIndexedMode &AM, |
19929 | SelectionDAG &DAG) const { |
19930 | if (Subtarget->isThumb1Only()) |
19931 | return false; |
19932 | |
19933 | EVT VT; |
19934 | SDValue Ptr; |
19935 | Align Alignment; |
19936 | bool isSEXTLoad = false; |
19937 | bool IsMasked = false; |
19938 | if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val: N)) { |
19939 | Ptr = LD->getBasePtr(); |
19940 | VT = LD->getMemoryVT(); |
19941 | Alignment = LD->getAlign(); |
19942 | isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; |
19943 | } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(Val: N)) { |
19944 | Ptr = ST->getBasePtr(); |
19945 | VT = ST->getMemoryVT(); |
19946 | Alignment = ST->getAlign(); |
19947 | } else if (MaskedLoadSDNode *LD = dyn_cast<MaskedLoadSDNode>(Val: N)) { |
19948 | Ptr = LD->getBasePtr(); |
19949 | VT = LD->getMemoryVT(); |
19950 | Alignment = LD->getAlign(); |
19951 | isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; |
19952 | IsMasked = true; |
19953 | } else if (MaskedStoreSDNode *ST = dyn_cast<MaskedStoreSDNode>(Val: N)) { |
19954 | Ptr = ST->getBasePtr(); |
19955 | VT = ST->getMemoryVT(); |
19956 | Alignment = ST->getAlign(); |
19957 | IsMasked = true; |
19958 | } else |
19959 | return false; |
19960 | |
19961 | bool isInc; |
19962 | bool isLegal = false; |
19963 | if (VT.isVector()) |
19964 | isLegal = Subtarget->hasMVEIntegerOps() && |
19965 | getMVEIndexedAddressParts( |
19966 | Ptr: Ptr.getNode(), VT, Alignment, isSEXTLoad, IsMasked, |
19967 | isLE: Subtarget->isLittle(), Base, Offset, isInc, DAG); |
19968 | else { |
19969 | if (Subtarget->isThumb2()) |
19970 | isLegal = getT2IndexedAddressParts(Ptr: Ptr.getNode(), VT, isSEXTLoad, Base, |
19971 | Offset, isInc, DAG); |
19972 | else |
19973 | isLegal = getARMIndexedAddressParts(Ptr: Ptr.getNode(), VT, isSEXTLoad, Base, |
19974 | Offset, isInc, DAG); |
19975 | } |
19976 | if (!isLegal) |
19977 | return false; |
19978 | |
19979 | AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC; |
19980 | return true; |
19981 | } |
19982 | |
19983 | /// getPostIndexedAddressParts - returns true by value, base pointer and |
19984 | /// offset pointer and addressing mode by reference if this node can be |
19985 | /// combined with a load / store to form a post-indexed load / store. |
19986 | bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, |
19987 | SDValue &Base, |
19988 | SDValue &Offset, |
19989 | ISD::MemIndexedMode &AM, |
19990 | SelectionDAG &DAG) const { |
19991 | EVT VT; |
19992 | SDValue Ptr; |
19993 | Align Alignment; |
19994 | bool isSEXTLoad = false, isNonExt; |
19995 | bool IsMasked = false; |
19996 | if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val: N)) { |
19997 | VT = LD->getMemoryVT(); |
19998 | Ptr = LD->getBasePtr(); |
19999 | Alignment = LD->getAlign(); |
20000 | isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; |
20001 | isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD; |
20002 | } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(Val: N)) { |
20003 | VT = ST->getMemoryVT(); |
20004 | Ptr = ST->getBasePtr(); |
20005 | Alignment = ST->getAlign(); |
20006 | isNonExt = !ST->isTruncatingStore(); |
20007 | } else if (MaskedLoadSDNode *LD = dyn_cast<MaskedLoadSDNode>(Val: N)) { |
20008 | VT = LD->getMemoryVT(); |
20009 | Ptr = LD->getBasePtr(); |
20010 | Alignment = LD->getAlign(); |
20011 | isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; |
20012 | isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD; |
20013 | IsMasked = true; |
20014 | } else if (MaskedStoreSDNode *ST = dyn_cast<MaskedStoreSDNode>(Val: N)) { |
20015 | VT = ST->getMemoryVT(); |
20016 | Ptr = ST->getBasePtr(); |
20017 | Alignment = ST->getAlign(); |
20018 | isNonExt = !ST->isTruncatingStore(); |
20019 | IsMasked = true; |
20020 | } else |
20021 | return false; |
20022 | |
20023 | if (Subtarget->isThumb1Only()) { |
20024 | // Thumb-1 can do a limited post-inc load or store as an updating LDM. It |
20025 | // must be non-extending/truncating, i32, with an offset of 4. |
20026 | assert(Op->getValueType(0) == MVT::i32 && "Non-i32 post-inc op?!" ); |
20027 | if (Op->getOpcode() != ISD::ADD || !isNonExt) |
20028 | return false; |
20029 | auto *RHS = dyn_cast<ConstantSDNode>(Val: Op->getOperand(Num: 1)); |
20030 | if (!RHS || RHS->getZExtValue() != 4) |
20031 | return false; |
20032 | if (Alignment < Align(4)) |
20033 | return false; |
20034 | |
20035 | Offset = Op->getOperand(Num: 1); |
20036 | Base = Op->getOperand(Num: 0); |
20037 | AM = ISD::POST_INC; |
20038 | return true; |
20039 | } |
20040 | |
20041 | bool isInc; |
20042 | bool isLegal = false; |
20043 | if (VT.isVector()) |
20044 | isLegal = Subtarget->hasMVEIntegerOps() && |
20045 | getMVEIndexedAddressParts(Ptr: Op, VT, Alignment, isSEXTLoad, IsMasked, |
20046 | isLE: Subtarget->isLittle(), Base, Offset, |
20047 | isInc, DAG); |
20048 | else { |
20049 | if (Subtarget->isThumb2()) |
20050 | isLegal = getT2IndexedAddressParts(Ptr: Op, VT, isSEXTLoad, Base, Offset, |
20051 | isInc, DAG); |
20052 | else |
20053 | isLegal = getARMIndexedAddressParts(Ptr: Op, VT, isSEXTLoad, Base, Offset, |
20054 | isInc, DAG); |
20055 | } |
20056 | if (!isLegal) |
20057 | return false; |
20058 | |
20059 | if (Ptr != Base) { |
20060 | // Swap base ptr and offset to catch more post-index load / store when |
20061 | // it's legal. In Thumb2 mode, offset must be an immediate. |
20062 | if (Ptr == Offset && Op->getOpcode() == ISD::ADD && |
20063 | !Subtarget->isThumb2()) |
20064 | std::swap(a&: Base, b&: Offset); |
20065 | |
20066 | // Post-indexed load / store update the base pointer. |
20067 | if (Ptr != Base) |
20068 | return false; |
20069 | } |
20070 | |
20071 | AM = isInc ? ISD::POST_INC : ISD::POST_DEC; |
20072 | return true; |
20073 | } |
20074 | |
20075 | void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, |
20076 | KnownBits &Known, |
20077 | const APInt &DemandedElts, |
20078 | const SelectionDAG &DAG, |
20079 | unsigned Depth) const { |
20080 | unsigned BitWidth = Known.getBitWidth(); |
20081 | Known.resetAll(); |
20082 | switch (Op.getOpcode()) { |
20083 | default: break; |
20084 | case ARMISD::ADDC: |
20085 | case ARMISD::ADDE: |
20086 | case ARMISD::SUBC: |
20087 | case ARMISD::SUBE: |
20088 | // Special cases when we convert a carry to a boolean. |
20089 | if (Op.getResNo() == 0) { |
20090 | SDValue LHS = Op.getOperand(i: 0); |
20091 | SDValue RHS = Op.getOperand(i: 1); |
20092 | // (ADDE 0, 0, C) will give us a single bit. |
20093 | if (Op->getOpcode() == ARMISD::ADDE && isNullConstant(V: LHS) && |
20094 | isNullConstant(V: RHS)) { |
20095 | Known.Zero |= APInt::getHighBitsSet(numBits: BitWidth, hiBitsSet: BitWidth - 1); |
20096 | return; |
20097 | } |
20098 | } |
20099 | break; |
20100 | case ARMISD::CMOV: { |
20101 | // Bits are known zero/one if known on the LHS and RHS. |
20102 | Known = DAG.computeKnownBits(Op: Op.getOperand(i: 0), Depth: Depth+1); |
20103 | if (Known.isUnknown()) |
20104 | return; |
20105 | |
20106 | KnownBits KnownRHS = DAG.computeKnownBits(Op: Op.getOperand(i: 1), Depth: Depth+1); |
20107 | Known = Known.intersectWith(RHS: KnownRHS); |
20108 | return; |
20109 | } |
20110 | case ISD::INTRINSIC_W_CHAIN: { |
20111 | Intrinsic::ID IntID = |
20112 | static_cast<Intrinsic::ID>(Op->getConstantOperandVal(Num: 1)); |
20113 | switch (IntID) { |
20114 | default: return; |
20115 | case Intrinsic::arm_ldaex: |
20116 | case Intrinsic::arm_ldrex: { |
20117 | EVT VT = cast<MemIntrinsicSDNode>(Val: Op)->getMemoryVT(); |
20118 | unsigned MemBits = VT.getScalarSizeInBits(); |
20119 | Known.Zero |= APInt::getHighBitsSet(numBits: BitWidth, hiBitsSet: BitWidth - MemBits); |
20120 | return; |
20121 | } |
20122 | } |
20123 | } |
20124 | case ARMISD::BFI: { |
20125 | // Conservatively, we can recurse down the first operand |
20126 | // and just mask out all affected bits. |
20127 | Known = DAG.computeKnownBits(Op: Op.getOperand(i: 0), Depth: Depth + 1); |
20128 | |
20129 | // The operand to BFI is already a mask suitable for removing the bits it |
20130 | // sets. |
20131 | const APInt &Mask = Op.getConstantOperandAPInt(i: 2); |
20132 | Known.Zero &= Mask; |
20133 | Known.One &= Mask; |
20134 | return; |
20135 | } |
20136 | case ARMISD::VGETLANEs: |
20137 | case ARMISD::VGETLANEu: { |
20138 | const SDValue &SrcSV = Op.getOperand(i: 0); |
20139 | EVT VecVT = SrcSV.getValueType(); |
20140 | assert(VecVT.isVector() && "VGETLANE expected a vector type" ); |
20141 | const unsigned NumSrcElts = VecVT.getVectorNumElements(); |
20142 | ConstantSDNode *Pos = cast<ConstantSDNode>(Val: Op.getOperand(i: 1).getNode()); |
20143 | assert(Pos->getAPIntValue().ult(NumSrcElts) && |
20144 | "VGETLANE index out of bounds" ); |
20145 | unsigned Idx = Pos->getZExtValue(); |
20146 | APInt DemandedElt = APInt::getOneBitSet(numBits: NumSrcElts, BitNo: Idx); |
20147 | Known = DAG.computeKnownBits(Op: SrcSV, DemandedElts: DemandedElt, Depth: Depth + 1); |
20148 | |
20149 | EVT VT = Op.getValueType(); |
20150 | const unsigned DstSz = VT.getScalarSizeInBits(); |
20151 | const unsigned SrcSz = VecVT.getVectorElementType().getSizeInBits(); |
20152 | (void)SrcSz; |
20153 | assert(SrcSz == Known.getBitWidth()); |
20154 | assert(DstSz > SrcSz); |
20155 | if (Op.getOpcode() == ARMISD::VGETLANEs) |
20156 | Known = Known.sext(BitWidth: DstSz); |
20157 | else { |
20158 | Known = Known.zext(BitWidth: DstSz); |
20159 | } |
20160 | assert(DstSz == Known.getBitWidth()); |
20161 | break; |
20162 | } |
20163 | case ARMISD::VMOVrh: { |
20164 | KnownBits KnownOp = DAG.computeKnownBits(Op: Op->getOperand(Num: 0), Depth: Depth + 1); |
20165 | assert(KnownOp.getBitWidth() == 16); |
20166 | Known = KnownOp.zext(BitWidth: 32); |
20167 | break; |
20168 | } |
20169 | case ARMISD::CSINC: |
20170 | case ARMISD::CSINV: |
20171 | case ARMISD::CSNEG: { |
20172 | KnownBits KnownOp0 = DAG.computeKnownBits(Op: Op->getOperand(Num: 0), Depth: Depth + 1); |
20173 | KnownBits KnownOp1 = DAG.computeKnownBits(Op: Op->getOperand(Num: 1), Depth: Depth + 1); |
20174 | |
20175 | // The result is either: |
20176 | // CSINC: KnownOp0 or KnownOp1 + 1 |
20177 | // CSINV: KnownOp0 or ~KnownOp1 |
20178 | // CSNEG: KnownOp0 or KnownOp1 * -1 |
20179 | if (Op.getOpcode() == ARMISD::CSINC) |
20180 | KnownOp1 = KnownBits::computeForAddSub( |
20181 | /*Add=*/true, /*NSW=*/false, /*NUW=*/false, LHS: KnownOp1, |
20182 | RHS: KnownBits::makeConstant(C: APInt(32, 1))); |
20183 | else if (Op.getOpcode() == ARMISD::CSINV) |
20184 | std::swap(a&: KnownOp1.Zero, b&: KnownOp1.One); |
20185 | else if (Op.getOpcode() == ARMISD::CSNEG) |
20186 | KnownOp1 = KnownBits::mul( |
20187 | LHS: KnownOp1, RHS: KnownBits::makeConstant(C: APInt(32, -1))); |
20188 | |
20189 | Known = KnownOp0.intersectWith(RHS: KnownOp1); |
20190 | break; |
20191 | } |
20192 | } |
20193 | } |
20194 | |
20195 | bool ARMTargetLowering::targetShrinkDemandedConstant( |
20196 | SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, |
20197 | TargetLoweringOpt &TLO) const { |
20198 | // Delay optimization, so we don't have to deal with illegal types, or block |
20199 | // optimizations. |
20200 | if (!TLO.LegalOps) |
20201 | return false; |
20202 | |
20203 | // Only optimize AND for now. |
20204 | if (Op.getOpcode() != ISD::AND) |
20205 | return false; |
20206 | |
20207 | EVT VT = Op.getValueType(); |
20208 | |
20209 | // Ignore vectors. |
20210 | if (VT.isVector()) |
20211 | return false; |
20212 | |
20213 | assert(VT == MVT::i32 && "Unexpected integer type" ); |
20214 | |
20215 | // Make sure the RHS really is a constant. |
20216 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val: Op.getOperand(i: 1)); |
20217 | if (!C) |
20218 | return false; |
20219 | |
20220 | unsigned Mask = C->getZExtValue(); |
20221 | |
20222 | unsigned Demanded = DemandedBits.getZExtValue(); |
20223 | unsigned ShrunkMask = Mask & Demanded; |
20224 | unsigned ExpandedMask = Mask | ~Demanded; |
20225 | |
20226 | // If the mask is all zeros, let the target-independent code replace the |
20227 | // result with zero. |
20228 | if (ShrunkMask == 0) |
20229 | return false; |
20230 | |
20231 | // If the mask is all ones, erase the AND. (Currently, the target-independent |
20232 | // code won't do this, so we have to do it explicitly to avoid an infinite |
20233 | // loop in obscure cases.) |
20234 | if (ExpandedMask == ~0U) |
20235 | return TLO.CombineTo(O: Op, N: Op.getOperand(i: 0)); |
20236 | |
20237 | auto IsLegalMask = [ShrunkMask, ExpandedMask](unsigned Mask) -> bool { |
20238 | return (ShrunkMask & Mask) == ShrunkMask && (~ExpandedMask & Mask) == 0; |
20239 | }; |
20240 | auto UseMask = [Mask, Op, VT, &TLO](unsigned NewMask) -> bool { |
20241 | if (NewMask == Mask) |
20242 | return true; |
20243 | SDLoc DL(Op); |
20244 | SDValue NewC = TLO.DAG.getConstant(Val: NewMask, DL, VT); |
20245 | SDValue NewOp = TLO.DAG.getNode(Opcode: ISD::AND, DL, VT, N1: Op.getOperand(i: 0), N2: NewC); |
20246 | return TLO.CombineTo(O: Op, N: NewOp); |
20247 | }; |
20248 | |
20249 | // Prefer uxtb mask. |
20250 | if (IsLegalMask(0xFF)) |
20251 | return UseMask(0xFF); |
20252 | |
20253 | // Prefer uxth mask. |
20254 | if (IsLegalMask(0xFFFF)) |
20255 | return UseMask(0xFFFF); |
20256 | |
20257 | // [1, 255] is Thumb1 movs+ands, legal immediate for ARM/Thumb2. |
20258 | // FIXME: Prefer a contiguous sequence of bits for other optimizations. |
20259 | if (ShrunkMask < 256) |
20260 | return UseMask(ShrunkMask); |
20261 | |
20262 | // [-256, -2] is Thumb1 movs+bics, legal immediate for ARM/Thumb2. |
20263 | // FIXME: Prefer a contiguous sequence of bits for other optimizations. |
20264 | if ((int)ExpandedMask <= -2 && (int)ExpandedMask >= -256) |
20265 | return UseMask(ExpandedMask); |
20266 | |
20267 | // Potential improvements: |
20268 | // |
20269 | // We could try to recognize lsls+lsrs or lsrs+lsls pairs here. |
20270 | // We could try to prefer Thumb1 immediates which can be lowered to a |
20271 | // two-instruction sequence. |
20272 | // We could try to recognize more legal ARM/Thumb2 immediates here. |
20273 | |
20274 | return false; |
20275 | } |
20276 | |
20277 | bool ARMTargetLowering::SimplifyDemandedBitsForTargetNode( |
20278 | SDValue Op, const APInt &OriginalDemandedBits, |
20279 | const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, |
20280 | unsigned Depth) const { |
20281 | unsigned Opc = Op.getOpcode(); |
20282 | |
20283 | switch (Opc) { |
20284 | case ARMISD::ASRL: |
20285 | case ARMISD::LSRL: { |
20286 | // If this is result 0 and the other result is unused, see if the demand |
20287 | // bits allow us to shrink this long shift into a standard small shift in |
20288 | // the opposite direction. |
20289 | if (Op.getResNo() == 0 && !Op->hasAnyUseOfValue(Value: 1) && |
20290 | isa<ConstantSDNode>(Val: Op->getOperand(Num: 2))) { |
20291 | unsigned ShAmt = Op->getConstantOperandVal(Num: 2); |
20292 | if (ShAmt < 32 && OriginalDemandedBits.isSubsetOf(RHS: APInt::getAllOnes(numBits: 32) |
20293 | << (32 - ShAmt))) |
20294 | return TLO.CombineTo( |
20295 | O: Op, N: TLO.DAG.getNode( |
20296 | Opcode: ISD::SHL, DL: SDLoc(Op), VT: MVT::i32, N1: Op.getOperand(i: 1), |
20297 | N2: TLO.DAG.getConstant(Val: 32 - ShAmt, DL: SDLoc(Op), VT: MVT::i32))); |
20298 | } |
20299 | break; |
20300 | } |
20301 | case ARMISD::VBICIMM: { |
20302 | SDValue Op0 = Op.getOperand(i: 0); |
20303 | unsigned ModImm = Op.getConstantOperandVal(i: 1); |
20304 | unsigned EltBits = 0; |
20305 | uint64_t Mask = ARM_AM::decodeVMOVModImm(ModImm, EltBits); |
20306 | if ((OriginalDemandedBits & Mask) == 0) |
20307 | return TLO.CombineTo(O: Op, N: Op0); |
20308 | } |
20309 | } |
20310 | |
20311 | return TargetLowering::SimplifyDemandedBitsForTargetNode( |
20312 | Op, DemandedBits: OriginalDemandedBits, DemandedElts: OriginalDemandedElts, Known, TLO, Depth); |
20313 | } |
20314 | |
20315 | //===----------------------------------------------------------------------===// |
20316 | // ARM Inline Assembly Support |
20317 | //===----------------------------------------------------------------------===// |
20318 | |
20319 | bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const { |
20320 | // Looking for "rev" which is V6+. |
20321 | if (!Subtarget->hasV6Ops()) |
20322 | return false; |
20323 | |
20324 | InlineAsm *IA = cast<InlineAsm>(Val: CI->getCalledOperand()); |
20325 | StringRef AsmStr = IA->getAsmString(); |
20326 | SmallVector<StringRef, 4> AsmPieces; |
20327 | SplitString(Source: AsmStr, OutFragments&: AsmPieces, Delimiters: ";\n" ); |
20328 | |
20329 | switch (AsmPieces.size()) { |
20330 | default: return false; |
20331 | case 1: |
20332 | AsmStr = AsmPieces[0]; |
20333 | AsmPieces.clear(); |
20334 | SplitString(Source: AsmStr, OutFragments&: AsmPieces, Delimiters: " \t," ); |
20335 | |
20336 | // rev $0, $1 |
20337 | if (AsmPieces.size() == 3 && |
20338 | AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" && |
20339 | IA->getConstraintString().compare(pos: 0, n1: 4, s: "=l,l" ) == 0) { |
20340 | IntegerType *Ty = dyn_cast<IntegerType>(Val: CI->getType()); |
20341 | if (Ty && Ty->getBitWidth() == 32) |
20342 | return IntrinsicLowering::LowerToByteSwap(CI); |
20343 | } |
20344 | break; |
20345 | } |
20346 | |
20347 | return false; |
20348 | } |
20349 | |
20350 | const char *ARMTargetLowering::LowerXConstraint(EVT ConstraintVT) const { |
20351 | // At this point, we have to lower this constraint to something else, so we |
20352 | // lower it to an "r" or "w". However, by doing this we will force the result |
20353 | // to be in register, while the X constraint is much more permissive. |
20354 | // |
20355 | // Although we are correct (we are free to emit anything, without |
20356 | // constraints), we might break use cases that would expect us to be more |
20357 | // efficient and emit something else. |
20358 | if (!Subtarget->hasVFP2Base()) |
20359 | return "r" ; |
20360 | if (ConstraintVT.isFloatingPoint()) |
20361 | return "w" ; |
20362 | if (ConstraintVT.isVector() && Subtarget->hasNEON() && |
20363 | (ConstraintVT.getSizeInBits() == 64 || |
20364 | ConstraintVT.getSizeInBits() == 128)) |
20365 | return "w" ; |
20366 | |
20367 | return "r" ; |
20368 | } |
20369 | |
20370 | /// getConstraintType - Given a constraint letter, return the type of |
20371 | /// constraint it is for this target. |
20372 | ARMTargetLowering::ConstraintType |
20373 | ARMTargetLowering::getConstraintType(StringRef Constraint) const { |
20374 | unsigned S = Constraint.size(); |
20375 | if (S == 1) { |
20376 | switch (Constraint[0]) { |
20377 | default: break; |
20378 | case 'l': return C_RegisterClass; |
20379 | case 'w': return C_RegisterClass; |
20380 | case 'h': return C_RegisterClass; |
20381 | case 'x': return C_RegisterClass; |
20382 | case 't': return C_RegisterClass; |
20383 | case 'j': return C_Immediate; // Constant for movw. |
20384 | // An address with a single base register. Due to the way we |
20385 | // currently handle addresses it is the same as an 'r' memory constraint. |
20386 | case 'Q': return C_Memory; |
20387 | } |
20388 | } else if (S == 2) { |
20389 | switch (Constraint[0]) { |
20390 | default: break; |
20391 | case 'T': return C_RegisterClass; |
20392 | // All 'U+' constraints are addresses. |
20393 | case 'U': return C_Memory; |
20394 | } |
20395 | } |
20396 | return TargetLowering::getConstraintType(Constraint); |
20397 | } |
20398 | |
20399 | /// Examine constraint type and operand type and determine a weight value. |
20400 | /// This object must already have been set up with the operand type |
20401 | /// and the current alternative constraint selected. |
20402 | TargetLowering::ConstraintWeight |
20403 | ARMTargetLowering::getSingleConstraintMatchWeight( |
20404 | AsmOperandInfo &info, const char *constraint) const { |
20405 | ConstraintWeight weight = CW_Invalid; |
20406 | Value *CallOperandVal = info.CallOperandVal; |
20407 | // If we don't have a value, we can't do a match, |
20408 | // but allow it at the lowest weight. |
20409 | if (!CallOperandVal) |
20410 | return CW_Default; |
20411 | Type *type = CallOperandVal->getType(); |
20412 | // Look at the constraint type. |
20413 | switch (*constraint) { |
20414 | default: |
20415 | weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); |
20416 | break; |
20417 | case 'l': |
20418 | if (type->isIntegerTy()) { |
20419 | if (Subtarget->isThumb()) |
20420 | weight = CW_SpecificReg; |
20421 | else |
20422 | weight = CW_Register; |
20423 | } |
20424 | break; |
20425 | case 'w': |
20426 | if (type->isFloatingPointTy()) |
20427 | weight = CW_Register; |
20428 | break; |
20429 | } |
20430 | return weight; |
20431 | } |
20432 | |
20433 | using RCPair = std::pair<unsigned, const TargetRegisterClass *>; |
20434 | |
20435 | RCPair ARMTargetLowering::getRegForInlineAsmConstraint( |
20436 | const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { |
20437 | switch (Constraint.size()) { |
20438 | case 1: |
20439 | // GCC ARM Constraint Letters |
20440 | switch (Constraint[0]) { |
20441 | case 'l': // Low regs or general regs. |
20442 | if (Subtarget->isThumb()) |
20443 | return RCPair(0U, &ARM::tGPRRegClass); |
20444 | return RCPair(0U, &ARM::GPRRegClass); |
20445 | case 'h': // High regs or no regs. |
20446 | if (Subtarget->isThumb()) |
20447 | return RCPair(0U, &ARM::hGPRRegClass); |
20448 | break; |
20449 | case 'r': |
20450 | if (Subtarget->isThumb1Only()) |
20451 | return RCPair(0U, &ARM::tGPRRegClass); |
20452 | return RCPair(0U, &ARM::GPRRegClass); |
20453 | case 'w': |
20454 | if (VT == MVT::Other) |
20455 | break; |
20456 | if (VT == MVT::f32 || VT == MVT::f16 || VT == MVT::bf16) |
20457 | return RCPair(0U, &ARM::SPRRegClass); |
20458 | if (VT.getSizeInBits() == 64) |
20459 | return RCPair(0U, &ARM::DPRRegClass); |
20460 | if (VT.getSizeInBits() == 128) |
20461 | return RCPair(0U, &ARM::QPRRegClass); |
20462 | break; |
20463 | case 'x': |
20464 | if (VT == MVT::Other) |
20465 | break; |
20466 | if (VT == MVT::f32 || VT == MVT::f16 || VT == MVT::bf16) |
20467 | return RCPair(0U, &ARM::SPR_8RegClass); |
20468 | if (VT.getSizeInBits() == 64) |
20469 | return RCPair(0U, &ARM::DPR_8RegClass); |
20470 | if (VT.getSizeInBits() == 128) |
20471 | return RCPair(0U, &ARM::QPR_8RegClass); |
20472 | break; |
20473 | case 't': |
20474 | if (VT == MVT::Other) |
20475 | break; |
20476 | if (VT == MVT::f32 || VT == MVT::i32 || VT == MVT::f16 || VT == MVT::bf16) |
20477 | return RCPair(0U, &ARM::SPRRegClass); |
20478 | if (VT.getSizeInBits() == 64) |
20479 | return RCPair(0U, &ARM::DPR_VFP2RegClass); |
20480 | if (VT.getSizeInBits() == 128) |
20481 | return RCPair(0U, &ARM::QPR_VFP2RegClass); |
20482 | break; |
20483 | } |
20484 | break; |
20485 | |
20486 | case 2: |
20487 | if (Constraint[0] == 'T') { |
20488 | switch (Constraint[1]) { |
20489 | default: |
20490 | break; |
20491 | case 'e': |
20492 | return RCPair(0U, &ARM::tGPREvenRegClass); |
20493 | case 'o': |
20494 | return RCPair(0U, &ARM::tGPROddRegClass); |
20495 | } |
20496 | } |
20497 | break; |
20498 | |
20499 | default: |
20500 | break; |
20501 | } |
20502 | |
20503 | if (StringRef("{cc}" ).equals_insensitive(RHS: Constraint)) |
20504 | return std::make_pair(x: unsigned(ARM::CPSR), y: &ARM::CCRRegClass); |
20505 | |
20506 | return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); |
20507 | } |
20508 | |
20509 | /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops |
20510 | /// vector. If it is invalid, don't add anything to Ops. |
20511 | void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, |
20512 | StringRef Constraint, |
20513 | std::vector<SDValue> &Ops, |
20514 | SelectionDAG &DAG) const { |
20515 | SDValue Result; |
20516 | |
20517 | // Currently only support length 1 constraints. |
20518 | if (Constraint.size() != 1) |
20519 | return; |
20520 | |
20521 | char ConstraintLetter = Constraint[0]; |
20522 | switch (ConstraintLetter) { |
20523 | default: break; |
20524 | case 'j': |
20525 | case 'I': case 'J': case 'K': case 'L': |
20526 | case 'M': case 'N': case 'O': |
20527 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: Op); |
20528 | if (!C) |
20529 | return; |
20530 | |
20531 | int64_t CVal64 = C->getSExtValue(); |
20532 | int CVal = (int) CVal64; |
20533 | // None of these constraints allow values larger than 32 bits. Check |
20534 | // that the value fits in an int. |
20535 | if (CVal != CVal64) |
20536 | return; |
20537 | |
20538 | switch (ConstraintLetter) { |
20539 | case 'j': |
20540 | // Constant suitable for movw, must be between 0 and |
20541 | // 65535. |
20542 | if (Subtarget->hasV6T2Ops() || (Subtarget->hasV8MBaselineOps())) |
20543 | if (CVal >= 0 && CVal <= 65535) |
20544 | break; |
20545 | return; |
20546 | case 'I': |
20547 | if (Subtarget->isThumb1Only()) { |
20548 | // This must be a constant between 0 and 255, for ADD |
20549 | // immediates. |
20550 | if (CVal >= 0 && CVal <= 255) |
20551 | break; |
20552 | } else if (Subtarget->isThumb2()) { |
20553 | // A constant that can be used as an immediate value in a |
20554 | // data-processing instruction. |
20555 | if (ARM_AM::getT2SOImmVal(Arg: CVal) != -1) |
20556 | break; |
20557 | } else { |
20558 | // A constant that can be used as an immediate value in a |
20559 | // data-processing instruction. |
20560 | if (ARM_AM::getSOImmVal(Arg: CVal) != -1) |
20561 | break; |
20562 | } |
20563 | return; |
20564 | |
20565 | case 'J': |
20566 | if (Subtarget->isThumb1Only()) { |
20567 | // This must be a constant between -255 and -1, for negated ADD |
20568 | // immediates. This can be used in GCC with an "n" modifier that |
20569 | // prints the negated value, for use with SUB instructions. It is |
20570 | // not useful otherwise but is implemented for compatibility. |
20571 | if (CVal >= -255 && CVal <= -1) |
20572 | break; |
20573 | } else { |
20574 | // This must be a constant between -4095 and 4095. It is not clear |
20575 | // what this constraint is intended for. Implemented for |
20576 | // compatibility with GCC. |
20577 | if (CVal >= -4095 && CVal <= 4095) |
20578 | break; |
20579 | } |
20580 | return; |
20581 | |
20582 | case 'K': |
20583 | if (Subtarget->isThumb1Only()) { |
20584 | // A 32-bit value where only one byte has a nonzero value. Exclude |
20585 | // zero to match GCC. This constraint is used by GCC internally for |
20586 | // constants that can be loaded with a move/shift combination. |
20587 | // It is not useful otherwise but is implemented for compatibility. |
20588 | if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(V: CVal)) |
20589 | break; |
20590 | } else if (Subtarget->isThumb2()) { |
20591 | // A constant whose bitwise inverse can be used as an immediate |
20592 | // value in a data-processing instruction. This can be used in GCC |
20593 | // with a "B" modifier that prints the inverted value, for use with |
20594 | // BIC and MVN instructions. It is not useful otherwise but is |
20595 | // implemented for compatibility. |
20596 | if (ARM_AM::getT2SOImmVal(Arg: ~CVal) != -1) |
20597 | break; |
20598 | } else { |
20599 | // A constant whose bitwise inverse can be used as an immediate |
20600 | // value in a data-processing instruction. This can be used in GCC |
20601 | // with a "B" modifier that prints the inverted value, for use with |
20602 | // BIC and MVN instructions. It is not useful otherwise but is |
20603 | // implemented for compatibility. |
20604 | if (ARM_AM::getSOImmVal(Arg: ~CVal) != -1) |
20605 | break; |
20606 | } |
20607 | return; |
20608 | |
20609 | case 'L': |
20610 | if (Subtarget->isThumb1Only()) { |
20611 | // This must be a constant between -7 and 7, |
20612 | // for 3-operand ADD/SUB immediate instructions. |
20613 | if (CVal >= -7 && CVal < 7) |
20614 | break; |
20615 | } else if (Subtarget->isThumb2()) { |
20616 | // A constant whose negation can be used as an immediate value in a |
20617 | // data-processing instruction. This can be used in GCC with an "n" |
20618 | // modifier that prints the negated value, for use with SUB |
20619 | // instructions. It is not useful otherwise but is implemented for |
20620 | // compatibility. |
20621 | if (ARM_AM::getT2SOImmVal(Arg: -CVal) != -1) |
20622 | break; |
20623 | } else { |
20624 | // A constant whose negation can be used as an immediate value in a |
20625 | // data-processing instruction. This can be used in GCC with an "n" |
20626 | // modifier that prints the negated value, for use with SUB |
20627 | // instructions. It is not useful otherwise but is implemented for |
20628 | // compatibility. |
20629 | if (ARM_AM::getSOImmVal(Arg: -CVal) != -1) |
20630 | break; |
20631 | } |
20632 | return; |
20633 | |
20634 | case 'M': |
20635 | if (Subtarget->isThumb1Only()) { |
20636 | // This must be a multiple of 4 between 0 and 1020, for |
20637 | // ADD sp + immediate. |
20638 | if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0)) |
20639 | break; |
20640 | } else { |
20641 | // A power of two or a constant between 0 and 32. This is used in |
20642 | // GCC for the shift amount on shifted register operands, but it is |
20643 | // useful in general for any shift amounts. |
20644 | if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0)) |
20645 | break; |
20646 | } |
20647 | return; |
20648 | |
20649 | case 'N': |
20650 | if (Subtarget->isThumb1Only()) { |
20651 | // This must be a constant between 0 and 31, for shift amounts. |
20652 | if (CVal >= 0 && CVal <= 31) |
20653 | break; |
20654 | } |
20655 | return; |
20656 | |
20657 | case 'O': |
20658 | if (Subtarget->isThumb1Only()) { |
20659 | // This must be a multiple of 4 between -508 and 508, for |
20660 | // ADD/SUB sp = sp + immediate. |
20661 | if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0)) |
20662 | break; |
20663 | } |
20664 | return; |
20665 | } |
20666 | Result = DAG.getTargetConstant(Val: CVal, DL: SDLoc(Op), VT: Op.getValueType()); |
20667 | break; |
20668 | } |
20669 | |
20670 | if (Result.getNode()) { |
20671 | Ops.push_back(x: Result); |
20672 | return; |
20673 | } |
20674 | return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); |
20675 | } |
20676 | |
20677 | static RTLIB::Libcall getDivRemLibcall( |
20678 | const SDNode *N, MVT::SimpleValueType SVT) { |
20679 | assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || |
20680 | N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && |
20681 | "Unhandled Opcode in getDivRemLibcall" ); |
20682 | bool isSigned = N->getOpcode() == ISD::SDIVREM || |
20683 | N->getOpcode() == ISD::SREM; |
20684 | RTLIB::Libcall LC; |
20685 | switch (SVT) { |
20686 | default: llvm_unreachable("Unexpected request for libcall!" ); |
20687 | case MVT::i8: LC = isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break; |
20688 | case MVT::i16: LC = isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break; |
20689 | case MVT::i32: LC = isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break; |
20690 | case MVT::i64: LC = isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break; |
20691 | } |
20692 | return LC; |
20693 | } |
20694 | |
20695 | static TargetLowering::ArgListTy getDivRemArgList( |
20696 | const SDNode *N, LLVMContext *Context, const ARMSubtarget *Subtarget) { |
20697 | assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || |
20698 | N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && |
20699 | "Unhandled Opcode in getDivRemArgList" ); |
20700 | bool isSigned = N->getOpcode() == ISD::SDIVREM || |
20701 | N->getOpcode() == ISD::SREM; |
20702 | TargetLowering::ArgListTy Args; |
20703 | TargetLowering::ArgListEntry Entry; |
20704 | for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { |
20705 | EVT ArgVT = N->getOperand(Num: i).getValueType(); |
20706 | Type *ArgTy = ArgVT.getTypeForEVT(Context&: *Context); |
20707 | Entry.Node = N->getOperand(Num: i); |
20708 | Entry.Ty = ArgTy; |
20709 | Entry.IsSExt = isSigned; |
20710 | Entry.IsZExt = !isSigned; |
20711 | Args.push_back(x: Entry); |
20712 | } |
20713 | if (Subtarget->isTargetWindows() && Args.size() >= 2) |
20714 | std::swap(a&: Args[0], b&: Args[1]); |
20715 | return Args; |
20716 | } |
20717 | |
20718 | SDValue ARMTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const { |
20719 | assert((Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() || |
20720 | Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() || |
20721 | Subtarget->isTargetWindows()) && |
20722 | "Register-based DivRem lowering only" ); |
20723 | unsigned Opcode = Op->getOpcode(); |
20724 | assert((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) && |
20725 | "Invalid opcode for Div/Rem lowering" ); |
20726 | bool isSigned = (Opcode == ISD::SDIVREM); |
20727 | EVT VT = Op->getValueType(ResNo: 0); |
20728 | SDLoc dl(Op); |
20729 | |
20730 | if (VT == MVT::i64 && isa<ConstantSDNode>(Val: Op.getOperand(i: 1))) { |
20731 | SmallVector<SDValue> Result; |
20732 | if (expandDIVREMByConstant(N: Op.getNode(), Result, HiLoVT: MVT::i32, DAG)) { |
20733 | SDValue Res0 = |
20734 | DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT, N1: Result[0], N2: Result[1]); |
20735 | SDValue Res1 = |
20736 | DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: dl, VT, N1: Result[2], N2: Result[3]); |
20737 | return DAG.getNode(Opcode: ISD::MERGE_VALUES, DL: dl, VTList: Op->getVTList(), |
20738 | Ops: {Res0, Res1}); |
20739 | } |
20740 | } |
20741 | |
20742 | Type *Ty = VT.getTypeForEVT(Context&: *DAG.getContext()); |
20743 | |
20744 | // If the target has hardware divide, use divide + multiply + subtract: |
20745 | // div = a / b |
20746 | // rem = a - b * div |
20747 | // return {div, rem} |
20748 | // This should be lowered into UDIV/SDIV + MLS later on. |
20749 | bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode() |
20750 | : Subtarget->hasDivideInARMMode(); |
20751 | if (hasDivide && Op->getValueType(ResNo: 0).isSimple() && |
20752 | Op->getSimpleValueType(ResNo: 0) == MVT::i32) { |
20753 | unsigned DivOpcode = isSigned ? ISD::SDIV : ISD::UDIV; |
20754 | const SDValue Dividend = Op->getOperand(Num: 0); |
20755 | const SDValue Divisor = Op->getOperand(Num: 1); |
20756 | SDValue Div = DAG.getNode(Opcode: DivOpcode, DL: dl, VT, N1: Dividend, N2: Divisor); |
20757 | SDValue Mul = DAG.getNode(Opcode: ISD::MUL, DL: dl, VT, N1: Div, N2: Divisor); |
20758 | SDValue Rem = DAG.getNode(Opcode: ISD::SUB, DL: dl, VT, N1: Dividend, N2: Mul); |
20759 | |
20760 | SDValue Values[2] = {Div, Rem}; |
20761 | return DAG.getNode(Opcode: ISD::MERGE_VALUES, DL: dl, VTList: DAG.getVTList(VT1: VT, VT2: VT), Ops: Values); |
20762 | } |
20763 | |
20764 | RTLIB::Libcall LC = getDivRemLibcall(N: Op.getNode(), |
20765 | SVT: VT.getSimpleVT().SimpleTy); |
20766 | SDValue InChain = DAG.getEntryNode(); |
20767 | |
20768 | TargetLowering::ArgListTy Args = getDivRemArgList(N: Op.getNode(), |
20769 | Context: DAG.getContext(), |
20770 | Subtarget); |
20771 | |
20772 | SDValue Callee = DAG.getExternalSymbol(Sym: getLibcallName(Call: LC), |
20773 | VT: getPointerTy(DL: DAG.getDataLayout())); |
20774 | |
20775 | Type *RetTy = StructType::get(elt1: Ty, elts: Ty); |
20776 | |
20777 | if (Subtarget->isTargetWindows()) |
20778 | InChain = WinDBZCheckDenominator(DAG, N: Op.getNode(), InChain); |
20779 | |
20780 | TargetLowering::CallLoweringInfo CLI(DAG); |
20781 | CLI.setDebugLoc(dl).setChain(InChain) |
20782 | .setCallee(CC: getLibcallCallingConv(Call: LC), ResultType: RetTy, Target: Callee, ArgsList: std::move(Args)) |
20783 | .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned); |
20784 | |
20785 | std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI); |
20786 | return CallInfo.first; |
20787 | } |
20788 | |
20789 | // Lowers REM using divmod helpers |
20790 | // see RTABI section 4.2/4.3 |
20791 | SDValue ARMTargetLowering::LowerREM(SDNode *N, SelectionDAG &DAG) const { |
20792 | EVT VT = N->getValueType(ResNo: 0); |
20793 | |
20794 | if (VT == MVT::i64 && isa<ConstantSDNode>(Val: N->getOperand(Num: 1))) { |
20795 | SmallVector<SDValue> Result; |
20796 | if (expandDIVREMByConstant(N, Result, HiLoVT: MVT::i32, DAG)) |
20797 | return DAG.getNode(Opcode: ISD::BUILD_PAIR, DL: SDLoc(N), VT: N->getValueType(ResNo: 0), |
20798 | N1: Result[0], N2: Result[1]); |
20799 | } |
20800 | |
20801 | // Build return types (div and rem) |
20802 | std::vector<Type*> RetTyParams; |
20803 | Type *RetTyElement; |
20804 | |
20805 | switch (VT.getSimpleVT().SimpleTy) { |
20806 | default: llvm_unreachable("Unexpected request for libcall!" ); |
20807 | case MVT::i8: RetTyElement = Type::getInt8Ty(C&: *DAG.getContext()); break; |
20808 | case MVT::i16: RetTyElement = Type::getInt16Ty(C&: *DAG.getContext()); break; |
20809 | case MVT::i32: RetTyElement = Type::getInt32Ty(C&: *DAG.getContext()); break; |
20810 | case MVT::i64: RetTyElement = Type::getInt64Ty(C&: *DAG.getContext()); break; |
20811 | } |
20812 | |
20813 | RetTyParams.push_back(x: RetTyElement); |
20814 | RetTyParams.push_back(x: RetTyElement); |
20815 | ArrayRef<Type*> ret = ArrayRef<Type*>(RetTyParams); |
20816 | Type *RetTy = StructType::get(Context&: *DAG.getContext(), Elements: ret); |
20817 | |
20818 | RTLIB::Libcall LC = getDivRemLibcall(N, SVT: N->getValueType(ResNo: 0).getSimpleVT(). |
20819 | SimpleTy); |
20820 | SDValue InChain = DAG.getEntryNode(); |
20821 | TargetLowering::ArgListTy Args = getDivRemArgList(N, Context: DAG.getContext(), |
20822 | Subtarget); |
20823 | bool isSigned = N->getOpcode() == ISD::SREM; |
20824 | SDValue Callee = DAG.getExternalSymbol(Sym: getLibcallName(Call: LC), |
20825 | VT: getPointerTy(DL: DAG.getDataLayout())); |
20826 | |
20827 | if (Subtarget->isTargetWindows()) |
20828 | InChain = WinDBZCheckDenominator(DAG, N, InChain); |
20829 | |
20830 | // Lower call |
20831 | CallLoweringInfo CLI(DAG); |
20832 | CLI.setChain(InChain) |
20833 | .setCallee(CC: CallingConv::ARM_AAPCS, ResultType: RetTy, Target: Callee, ArgsList: std::move(Args)) |
20834 | .setSExtResult(isSigned).setZExtResult(!isSigned).setDebugLoc(SDLoc(N)); |
20835 | std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); |
20836 | |
20837 | // Return second (rem) result operand (first contains div) |
20838 | SDNode *ResNode = CallResult.first.getNode(); |
20839 | assert(ResNode->getNumOperands() == 2 && "divmod should return two operands" ); |
20840 | return ResNode->getOperand(Num: 1); |
20841 | } |
20842 | |
20843 | SDValue |
20844 | ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { |
20845 | assert(Subtarget->isTargetWindows() && "unsupported target platform" ); |
20846 | SDLoc DL(Op); |
20847 | |
20848 | // Get the inputs. |
20849 | SDValue Chain = Op.getOperand(i: 0); |
20850 | SDValue Size = Op.getOperand(i: 1); |
20851 | |
20852 | if (DAG.getMachineFunction().getFunction().hasFnAttribute( |
20853 | Kind: "no-stack-arg-probe" )) { |
20854 | MaybeAlign Align = |
20855 | cast<ConstantSDNode>(Val: Op.getOperand(i: 2))->getMaybeAlignValue(); |
20856 | SDValue SP = DAG.getCopyFromReg(Chain, dl: DL, Reg: ARM::SP, VT: MVT::i32); |
20857 | Chain = SP.getValue(R: 1); |
20858 | SP = DAG.getNode(Opcode: ISD::SUB, DL, VT: MVT::i32, N1: SP, N2: Size); |
20859 | if (Align) |
20860 | SP = |
20861 | DAG.getNode(Opcode: ISD::AND, DL, VT: MVT::i32, N1: SP.getValue(R: 0), |
20862 | N2: DAG.getConstant(Val: -(uint64_t)Align->value(), DL, VT: MVT::i32)); |
20863 | Chain = DAG.getCopyToReg(Chain, dl: DL, Reg: ARM::SP, N: SP); |
20864 | SDValue Ops[2] = { SP, Chain }; |
20865 | return DAG.getMergeValues(Ops, dl: DL); |
20866 | } |
20867 | |
20868 | SDValue Words = DAG.getNode(Opcode: ISD::SRL, DL, VT: MVT::i32, N1: Size, |
20869 | N2: DAG.getConstant(Val: 2, DL, VT: MVT::i32)); |
20870 | |
20871 | SDValue Glue; |
20872 | Chain = DAG.getCopyToReg(Chain, dl: DL, Reg: ARM::R4, N: Words, Glue); |
20873 | Glue = Chain.getValue(R: 1); |
20874 | |
20875 | SDVTList NodeTys = DAG.getVTList(VT1: MVT::Other, VT2: MVT::Glue); |
20876 | Chain = DAG.getNode(Opcode: ARMISD::WIN__CHKSTK, DL, VTList: NodeTys, N1: Chain, N2: Glue); |
20877 | |
20878 | SDValue NewSP = DAG.getCopyFromReg(Chain, dl: DL, Reg: ARM::SP, VT: MVT::i32); |
20879 | Chain = NewSP.getValue(R: 1); |
20880 | |
20881 | SDValue Ops[2] = { NewSP, Chain }; |
20882 | return DAG.getMergeValues(Ops, dl: DL); |
20883 | } |
20884 | |
20885 | SDValue ARMTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const { |
20886 | bool IsStrict = Op->isStrictFPOpcode(); |
20887 | SDValue SrcVal = Op.getOperand(i: IsStrict ? 1 : 0); |
20888 | const unsigned DstSz = Op.getValueType().getSizeInBits(); |
20889 | const unsigned SrcSz = SrcVal.getValueType().getSizeInBits(); |
20890 | assert(DstSz > SrcSz && DstSz <= 64 && SrcSz >= 16 && |
20891 | "Unexpected type for custom-lowering FP_EXTEND" ); |
20892 | |
20893 | assert((!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) && |
20894 | "With both FP DP and 16, any FP conversion is legal!" ); |
20895 | |
20896 | assert(!(DstSz == 32 && Subtarget->hasFP16()) && |
20897 | "With FP16, 16 to 32 conversion is legal!" ); |
20898 | |
20899 | // Converting from 32 -> 64 is valid if we have FP64. |
20900 | if (SrcSz == 32 && DstSz == 64 && Subtarget->hasFP64()) { |
20901 | // FIXME: Remove this when we have strict fp instruction selection patterns |
20902 | if (IsStrict) { |
20903 | SDLoc Loc(Op); |
20904 | SDValue Result = DAG.getNode(Opcode: ISD::FP_EXTEND, |
20905 | DL: Loc, VT: Op.getValueType(), Operand: SrcVal); |
20906 | return DAG.getMergeValues(Ops: {Result, Op.getOperand(i: 0)}, dl: Loc); |
20907 | } |
20908 | return Op; |
20909 | } |
20910 | |
20911 | // Either we are converting from 16 -> 64, without FP16 and/or |
20912 | // FP.double-precision or without Armv8-fp. So we must do it in two |
20913 | // steps. |
20914 | // Or we are converting from 32 -> 64 without fp.double-precision or 16 -> 32 |
20915 | // without FP16. So we must do a function call. |
20916 | SDLoc Loc(Op); |
20917 | RTLIB::Libcall LC; |
20918 | MakeLibCallOptions CallOptions; |
20919 | SDValue Chain = IsStrict ? Op.getOperand(i: 0) : SDValue(); |
20920 | for (unsigned Sz = SrcSz; Sz <= 32 && Sz < DstSz; Sz *= 2) { |
20921 | bool Supported = (Sz == 16 ? Subtarget->hasFP16() : Subtarget->hasFP64()); |
20922 | MVT SrcVT = (Sz == 16 ? MVT::f16 : MVT::f32); |
20923 | MVT DstVT = (Sz == 16 ? MVT::f32 : MVT::f64); |
20924 | if (Supported) { |
20925 | if (IsStrict) { |
20926 | SrcVal = DAG.getNode(Opcode: ISD::STRICT_FP_EXTEND, DL: Loc, |
20927 | ResultTys: {DstVT, MVT::Other}, Ops: {Chain, SrcVal}); |
20928 | Chain = SrcVal.getValue(R: 1); |
20929 | } else { |
20930 | SrcVal = DAG.getNode(Opcode: ISD::FP_EXTEND, DL: Loc, VT: DstVT, Operand: SrcVal); |
20931 | } |
20932 | } else { |
20933 | LC = RTLIB::getFPEXT(OpVT: SrcVT, RetVT: DstVT); |
20934 | assert(LC != RTLIB::UNKNOWN_LIBCALL && |
20935 | "Unexpected type for custom-lowering FP_EXTEND" ); |
20936 | std::tie(args&: SrcVal, args&: Chain) = makeLibCall(DAG, LC, RetVT: DstVT, Ops: SrcVal, CallOptions, |
20937 | dl: Loc, Chain); |
20938 | } |
20939 | } |
20940 | |
20941 | return IsStrict ? DAG.getMergeValues(Ops: {SrcVal, Chain}, dl: Loc) : SrcVal; |
20942 | } |
20943 | |
20944 | SDValue ARMTargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { |
20945 | bool IsStrict = Op->isStrictFPOpcode(); |
20946 | |
20947 | SDValue SrcVal = Op.getOperand(i: IsStrict ? 1 : 0); |
20948 | EVT SrcVT = SrcVal.getValueType(); |
20949 | EVT DstVT = Op.getValueType(); |
20950 | const unsigned DstSz = Op.getValueType().getSizeInBits(); |
20951 | const unsigned SrcSz = SrcVT.getSizeInBits(); |
20952 | (void)DstSz; |
20953 | assert(DstSz < SrcSz && SrcSz <= 64 && DstSz >= 16 && |
20954 | "Unexpected type for custom-lowering FP_ROUND" ); |
20955 | |
20956 | assert((!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) && |
20957 | "With both FP DP and 16, any FP conversion is legal!" ); |
20958 | |
20959 | SDLoc Loc(Op); |
20960 | |
20961 | // Instruction from 32 -> 16 if hasFP16 is valid |
20962 | if (SrcSz == 32 && Subtarget->hasFP16()) |
20963 | return Op; |
20964 | |
20965 | // Lib call from 32 -> 16 / 64 -> [32, 16] |
20966 | RTLIB::Libcall LC = RTLIB::getFPROUND(OpVT: SrcVT, RetVT: DstVT); |
20967 | assert(LC != RTLIB::UNKNOWN_LIBCALL && |
20968 | "Unexpected type for custom-lowering FP_ROUND" ); |
20969 | MakeLibCallOptions CallOptions; |
20970 | SDValue Chain = IsStrict ? Op.getOperand(i: 0) : SDValue(); |
20971 | SDValue Result; |
20972 | std::tie(args&: Result, args&: Chain) = makeLibCall(DAG, LC, RetVT: DstVT, Ops: SrcVal, CallOptions, |
20973 | dl: Loc, Chain); |
20974 | return IsStrict ? DAG.getMergeValues(Ops: {Result, Chain}, dl: Loc) : Result; |
20975 | } |
20976 | |
20977 | bool |
20978 | ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { |
20979 | // The ARM target isn't yet aware of offsets. |
20980 | return false; |
20981 | } |
20982 | |
20983 | bool ARM::isBitFieldInvertedMask(unsigned v) { |
20984 | if (v == 0xffffffff) |
20985 | return false; |
20986 | |
20987 | // there can be 1's on either or both "outsides", all the "inside" |
20988 | // bits must be 0's |
20989 | return isShiftedMask_32(Value: ~v); |
20990 | } |
20991 | |
20992 | /// isFPImmLegal - Returns true if the target can instruction select the |
20993 | /// specified FP immediate natively. If false, the legalizer will |
20994 | /// materialize the FP immediate as a load from a constant pool. |
20995 | bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, |
20996 | bool ForCodeSize) const { |
20997 | if (!Subtarget->hasVFP3Base()) |
20998 | return false; |
20999 | if (VT == MVT::f16 && Subtarget->hasFullFP16()) |
21000 | return ARM_AM::getFP16Imm(FPImm: Imm) != -1; |
21001 | if (VT == MVT::f32 && Subtarget->hasFullFP16() && |
21002 | ARM_AM::getFP32FP16Imm(FPImm: Imm) != -1) |
21003 | return true; |
21004 | if (VT == MVT::f32) |
21005 | return ARM_AM::getFP32Imm(FPImm: Imm) != -1; |
21006 | if (VT == MVT::f64 && Subtarget->hasFP64()) |
21007 | return ARM_AM::getFP64Imm(FPImm: Imm) != -1; |
21008 | return false; |
21009 | } |
21010 | |
21011 | /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as |
21012 | /// MemIntrinsicNodes. The associated MachineMemOperands record the alignment |
21013 | /// specified in the intrinsic calls. |
21014 | bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, |
21015 | const CallInst &I, |
21016 | MachineFunction &MF, |
21017 | unsigned Intrinsic) const { |
21018 | switch (Intrinsic) { |
21019 | case Intrinsic::arm_neon_vld1: |
21020 | case Intrinsic::arm_neon_vld2: |
21021 | case Intrinsic::arm_neon_vld3: |
21022 | case Intrinsic::arm_neon_vld4: |
21023 | case Intrinsic::arm_neon_vld2lane: |
21024 | case Intrinsic::arm_neon_vld3lane: |
21025 | case Intrinsic::arm_neon_vld4lane: |
21026 | case Intrinsic::arm_neon_vld2dup: |
21027 | case Intrinsic::arm_neon_vld3dup: |
21028 | case Intrinsic::arm_neon_vld4dup: { |
21029 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
21030 | // Conservatively set memVT to the entire set of vectors loaded. |
21031 | auto &DL = I.getDataLayout(); |
21032 | uint64_t NumElts = DL.getTypeSizeInBits(Ty: I.getType()) / 64; |
21033 | Info.memVT = EVT::getVectorVT(Context&: I.getType()->getContext(), VT: MVT::i64, NumElements: NumElts); |
21034 | Info.ptrVal = I.getArgOperand(i: 0); |
21035 | Info.offset = 0; |
21036 | Value *AlignArg = I.getArgOperand(i: I.arg_size() - 1); |
21037 | Info.align = cast<ConstantInt>(Val: AlignArg)->getMaybeAlignValue(); |
21038 | // volatile loads with NEON intrinsics not supported |
21039 | Info.flags = MachineMemOperand::MOLoad; |
21040 | return true; |
21041 | } |
21042 | case Intrinsic::arm_neon_vld1x2: |
21043 | case Intrinsic::arm_neon_vld1x3: |
21044 | case Intrinsic::arm_neon_vld1x4: { |
21045 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
21046 | // Conservatively set memVT to the entire set of vectors loaded. |
21047 | auto &DL = I.getDataLayout(); |
21048 | uint64_t NumElts = DL.getTypeSizeInBits(Ty: I.getType()) / 64; |
21049 | Info.memVT = EVT::getVectorVT(Context&: I.getType()->getContext(), VT: MVT::i64, NumElements: NumElts); |
21050 | Info.ptrVal = I.getArgOperand(i: I.arg_size() - 1); |
21051 | Info.offset = 0; |
21052 | Info.align.reset(); |
21053 | // volatile loads with NEON intrinsics not supported |
21054 | Info.flags = MachineMemOperand::MOLoad; |
21055 | return true; |
21056 | } |
21057 | case Intrinsic::arm_neon_vst1: |
21058 | case Intrinsic::arm_neon_vst2: |
21059 | case Intrinsic::arm_neon_vst3: |
21060 | case Intrinsic::arm_neon_vst4: |
21061 | case Intrinsic::arm_neon_vst2lane: |
21062 | case Intrinsic::arm_neon_vst3lane: |
21063 | case Intrinsic::arm_neon_vst4lane: { |
21064 | Info.opc = ISD::INTRINSIC_VOID; |
21065 | // Conservatively set memVT to the entire set of vectors stored. |
21066 | auto &DL = I.getDataLayout(); |
21067 | unsigned NumElts = 0; |
21068 | for (unsigned ArgI = 1, ArgE = I.arg_size(); ArgI < ArgE; ++ArgI) { |
21069 | Type *ArgTy = I.getArgOperand(i: ArgI)->getType(); |
21070 | if (!ArgTy->isVectorTy()) |
21071 | break; |
21072 | NumElts += DL.getTypeSizeInBits(Ty: ArgTy) / 64; |
21073 | } |
21074 | Info.memVT = EVT::getVectorVT(Context&: I.getType()->getContext(), VT: MVT::i64, NumElements: NumElts); |
21075 | Info.ptrVal = I.getArgOperand(i: 0); |
21076 | Info.offset = 0; |
21077 | Value *AlignArg = I.getArgOperand(i: I.arg_size() - 1); |
21078 | Info.align = cast<ConstantInt>(Val: AlignArg)->getMaybeAlignValue(); |
21079 | // volatile stores with NEON intrinsics not supported |
21080 | Info.flags = MachineMemOperand::MOStore; |
21081 | return true; |
21082 | } |
21083 | case Intrinsic::arm_neon_vst1x2: |
21084 | case Intrinsic::arm_neon_vst1x3: |
21085 | case Intrinsic::arm_neon_vst1x4: { |
21086 | Info.opc = ISD::INTRINSIC_VOID; |
21087 | // Conservatively set memVT to the entire set of vectors stored. |
21088 | auto &DL = I.getDataLayout(); |
21089 | unsigned NumElts = 0; |
21090 | for (unsigned ArgI = 1, ArgE = I.arg_size(); ArgI < ArgE; ++ArgI) { |
21091 | Type *ArgTy = I.getArgOperand(i: ArgI)->getType(); |
21092 | if (!ArgTy->isVectorTy()) |
21093 | break; |
21094 | NumElts += DL.getTypeSizeInBits(Ty: ArgTy) / 64; |
21095 | } |
21096 | Info.memVT = EVT::getVectorVT(Context&: I.getType()->getContext(), VT: MVT::i64, NumElements: NumElts); |
21097 | Info.ptrVal = I.getArgOperand(i: 0); |
21098 | Info.offset = 0; |
21099 | Info.align.reset(); |
21100 | // volatile stores with NEON intrinsics not supported |
21101 | Info.flags = MachineMemOperand::MOStore; |
21102 | return true; |
21103 | } |
21104 | case Intrinsic::arm_mve_vld2q: |
21105 | case Intrinsic::arm_mve_vld4q: { |
21106 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
21107 | // Conservatively set memVT to the entire set of vectors loaded. |
21108 | Type *VecTy = cast<StructType>(Val: I.getType())->getElementType(N: 1); |
21109 | unsigned Factor = Intrinsic == Intrinsic::arm_mve_vld2q ? 2 : 4; |
21110 | Info.memVT = EVT::getVectorVT(Context&: VecTy->getContext(), VT: MVT::i64, NumElements: Factor * 2); |
21111 | Info.ptrVal = I.getArgOperand(i: 0); |
21112 | Info.offset = 0; |
21113 | Info.align = Align(VecTy->getScalarSizeInBits() / 8); |
21114 | // volatile loads with MVE intrinsics not supported |
21115 | Info.flags = MachineMemOperand::MOLoad; |
21116 | return true; |
21117 | } |
21118 | case Intrinsic::arm_mve_vst2q: |
21119 | case Intrinsic::arm_mve_vst4q: { |
21120 | Info.opc = ISD::INTRINSIC_VOID; |
21121 | // Conservatively set memVT to the entire set of vectors stored. |
21122 | Type *VecTy = I.getArgOperand(i: 1)->getType(); |
21123 | unsigned Factor = Intrinsic == Intrinsic::arm_mve_vst2q ? 2 : 4; |
21124 | Info.memVT = EVT::getVectorVT(Context&: VecTy->getContext(), VT: MVT::i64, NumElements: Factor * 2); |
21125 | Info.ptrVal = I.getArgOperand(i: 0); |
21126 | Info.offset = 0; |
21127 | Info.align = Align(VecTy->getScalarSizeInBits() / 8); |
21128 | // volatile stores with MVE intrinsics not supported |
21129 | Info.flags = MachineMemOperand::MOStore; |
21130 | return true; |
21131 | } |
21132 | case Intrinsic::arm_mve_vldr_gather_base: |
21133 | case Intrinsic::arm_mve_vldr_gather_base_predicated: { |
21134 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
21135 | Info.ptrVal = nullptr; |
21136 | Info.memVT = MVT::getVT(Ty: I.getType()); |
21137 | Info.align = Align(1); |
21138 | Info.flags |= MachineMemOperand::MOLoad; |
21139 | return true; |
21140 | } |
21141 | case Intrinsic::arm_mve_vldr_gather_base_wb: |
21142 | case Intrinsic::arm_mve_vldr_gather_base_wb_predicated: { |
21143 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
21144 | Info.ptrVal = nullptr; |
21145 | Info.memVT = MVT::getVT(Ty: I.getType()->getContainedType(i: 0)); |
21146 | Info.align = Align(1); |
21147 | Info.flags |= MachineMemOperand::MOLoad; |
21148 | return true; |
21149 | } |
21150 | case Intrinsic::arm_mve_vldr_gather_offset: |
21151 | case Intrinsic::arm_mve_vldr_gather_offset_predicated: { |
21152 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
21153 | Info.ptrVal = nullptr; |
21154 | MVT DataVT = MVT::getVT(Ty: I.getType()); |
21155 | unsigned MemSize = cast<ConstantInt>(Val: I.getArgOperand(i: 2))->getZExtValue(); |
21156 | Info.memVT = MVT::getVectorVT(VT: MVT::getIntegerVT(BitWidth: MemSize), |
21157 | NumElements: DataVT.getVectorNumElements()); |
21158 | Info.align = Align(1); |
21159 | Info.flags |= MachineMemOperand::MOLoad; |
21160 | return true; |
21161 | } |
21162 | case Intrinsic::arm_mve_vstr_scatter_base: |
21163 | case Intrinsic::arm_mve_vstr_scatter_base_predicated: { |
21164 | Info.opc = ISD::INTRINSIC_VOID; |
21165 | Info.ptrVal = nullptr; |
21166 | Info.memVT = MVT::getVT(Ty: I.getArgOperand(i: 2)->getType()); |
21167 | Info.align = Align(1); |
21168 | Info.flags |= MachineMemOperand::MOStore; |
21169 | return true; |
21170 | } |
21171 | case Intrinsic::arm_mve_vstr_scatter_base_wb: |
21172 | case Intrinsic::arm_mve_vstr_scatter_base_wb_predicated: { |
21173 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
21174 | Info.ptrVal = nullptr; |
21175 | Info.memVT = MVT::getVT(Ty: I.getArgOperand(i: 2)->getType()); |
21176 | Info.align = Align(1); |
21177 | Info.flags |= MachineMemOperand::MOStore; |
21178 | return true; |
21179 | } |
21180 | case Intrinsic::arm_mve_vstr_scatter_offset: |
21181 | case Intrinsic::arm_mve_vstr_scatter_offset_predicated: { |
21182 | Info.opc = ISD::INTRINSIC_VOID; |
21183 | Info.ptrVal = nullptr; |
21184 | MVT DataVT = MVT::getVT(Ty: I.getArgOperand(i: 2)->getType()); |
21185 | unsigned MemSize = cast<ConstantInt>(Val: I.getArgOperand(i: 3))->getZExtValue(); |
21186 | Info.memVT = MVT::getVectorVT(VT: MVT::getIntegerVT(BitWidth: MemSize), |
21187 | NumElements: DataVT.getVectorNumElements()); |
21188 | Info.align = Align(1); |
21189 | Info.flags |= MachineMemOperand::MOStore; |
21190 | return true; |
21191 | } |
21192 | case Intrinsic::arm_ldaex: |
21193 | case Intrinsic::arm_ldrex: { |
21194 | auto &DL = I.getDataLayout(); |
21195 | Type *ValTy = I.getParamElementType(ArgNo: 0); |
21196 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
21197 | Info.memVT = MVT::getVT(Ty: ValTy); |
21198 | Info.ptrVal = I.getArgOperand(i: 0); |
21199 | Info.offset = 0; |
21200 | Info.align = DL.getABITypeAlign(Ty: ValTy); |
21201 | Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile; |
21202 | return true; |
21203 | } |
21204 | case Intrinsic::arm_stlex: |
21205 | case Intrinsic::arm_strex: { |
21206 | auto &DL = I.getDataLayout(); |
21207 | Type *ValTy = I.getParamElementType(ArgNo: 1); |
21208 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
21209 | Info.memVT = MVT::getVT(Ty: ValTy); |
21210 | Info.ptrVal = I.getArgOperand(i: 1); |
21211 | Info.offset = 0; |
21212 | Info.align = DL.getABITypeAlign(Ty: ValTy); |
21213 | Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile; |
21214 | return true; |
21215 | } |
21216 | case Intrinsic::arm_stlexd: |
21217 | case Intrinsic::arm_strexd: |
21218 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
21219 | Info.memVT = MVT::i64; |
21220 | Info.ptrVal = I.getArgOperand(i: 2); |
21221 | Info.offset = 0; |
21222 | Info.align = Align(8); |
21223 | Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile; |
21224 | return true; |
21225 | |
21226 | case Intrinsic::arm_ldaexd: |
21227 | case Intrinsic::arm_ldrexd: |
21228 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
21229 | Info.memVT = MVT::i64; |
21230 | Info.ptrVal = I.getArgOperand(i: 0); |
21231 | Info.offset = 0; |
21232 | Info.align = Align(8); |
21233 | Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile; |
21234 | return true; |
21235 | |
21236 | default: |
21237 | break; |
21238 | } |
21239 | |
21240 | return false; |
21241 | } |
21242 | |
21243 | /// Returns true if it is beneficial to convert a load of a constant |
21244 | /// to just the constant itself. |
21245 | bool ARMTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, |
21246 | Type *Ty) const { |
21247 | assert(Ty->isIntegerTy()); |
21248 | |
21249 | unsigned Bits = Ty->getPrimitiveSizeInBits(); |
21250 | if (Bits == 0 || Bits > 32) |
21251 | return false; |
21252 | return true; |
21253 | } |
21254 | |
21255 | bool ARMTargetLowering::(EVT ResVT, EVT SrcVT, |
21256 | unsigned Index) const { |
21257 | if (!isOperationLegalOrCustom(Op: ISD::EXTRACT_SUBVECTOR, VT: ResVT)) |
21258 | return false; |
21259 | |
21260 | return (Index == 0 || Index == ResVT.getVectorNumElements()); |
21261 | } |
21262 | |
21263 | Instruction *ARMTargetLowering::makeDMB(IRBuilderBase &Builder, |
21264 | ARM_MB::MemBOpt Domain) const { |
21265 | Module *M = Builder.GetInsertBlock()->getParent()->getParent(); |
21266 | |
21267 | // First, if the target has no DMB, see what fallback we can use. |
21268 | if (!Subtarget->hasDataBarrier()) { |
21269 | // Some ARMv6 cpus can support data barriers with an mcr instruction. |
21270 | // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get |
21271 | // here. |
21272 | if (Subtarget->hasV6Ops() && !Subtarget->isThumb()) { |
21273 | Function *MCR = Intrinsic::getDeclaration(M, id: Intrinsic::arm_mcr); |
21274 | Value* args[6] = {Builder.getInt32(C: 15), Builder.getInt32(C: 0), |
21275 | Builder.getInt32(C: 0), Builder.getInt32(C: 7), |
21276 | Builder.getInt32(C: 10), Builder.getInt32(C: 5)}; |
21277 | return Builder.CreateCall(Callee: MCR, Args: args); |
21278 | } else { |
21279 | // Instead of using barriers, atomic accesses on these subtargets use |
21280 | // libcalls. |
21281 | llvm_unreachable("makeDMB on a target so old that it has no barriers" ); |
21282 | } |
21283 | } else { |
21284 | Function *DMB = Intrinsic::getDeclaration(M, id: Intrinsic::arm_dmb); |
21285 | // Only a full system barrier exists in the M-class architectures. |
21286 | Domain = Subtarget->isMClass() ? ARM_MB::SY : Domain; |
21287 | Constant *CDomain = Builder.getInt32(C: Domain); |
21288 | return Builder.CreateCall(Callee: DMB, Args: CDomain); |
21289 | } |
21290 | } |
21291 | |
21292 | // Based on http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html |
21293 | Instruction *ARMTargetLowering::emitLeadingFence(IRBuilderBase &Builder, |
21294 | Instruction *Inst, |
21295 | AtomicOrdering Ord) const { |
21296 | switch (Ord) { |
21297 | case AtomicOrdering::NotAtomic: |
21298 | case AtomicOrdering::Unordered: |
21299 | llvm_unreachable("Invalid fence: unordered/non-atomic" ); |
21300 | case AtomicOrdering::Monotonic: |
21301 | case AtomicOrdering::Acquire: |
21302 | return nullptr; // Nothing to do |
21303 | case AtomicOrdering::SequentiallyConsistent: |
21304 | if (!Inst->hasAtomicStore()) |
21305 | return nullptr; // Nothing to do |
21306 | [[fallthrough]]; |
21307 | case AtomicOrdering::Release: |
21308 | case AtomicOrdering::AcquireRelease: |
21309 | if (Subtarget->preferISHSTBarriers()) |
21310 | return makeDMB(Builder, Domain: ARM_MB::ISHST); |
21311 | // FIXME: add a comment with a link to documentation justifying this. |
21312 | else |
21313 | return makeDMB(Builder, Domain: ARM_MB::ISH); |
21314 | } |
21315 | llvm_unreachable("Unknown fence ordering in emitLeadingFence" ); |
21316 | } |
21317 | |
21318 | Instruction *ARMTargetLowering::emitTrailingFence(IRBuilderBase &Builder, |
21319 | Instruction *Inst, |
21320 | AtomicOrdering Ord) const { |
21321 | switch (Ord) { |
21322 | case AtomicOrdering::NotAtomic: |
21323 | case AtomicOrdering::Unordered: |
21324 | llvm_unreachable("Invalid fence: unordered/not-atomic" ); |
21325 | case AtomicOrdering::Monotonic: |
21326 | case AtomicOrdering::Release: |
21327 | return nullptr; // Nothing to do |
21328 | case AtomicOrdering::Acquire: |
21329 | case AtomicOrdering::AcquireRelease: |
21330 | case AtomicOrdering::SequentiallyConsistent: |
21331 | return makeDMB(Builder, Domain: ARM_MB::ISH); |
21332 | } |
21333 | llvm_unreachable("Unknown fence ordering in emitTrailingFence" ); |
21334 | } |
21335 | |
21336 | // Loads and stores less than 64-bits are already atomic; ones above that |
21337 | // are doomed anyway, so defer to the default libcall and blame the OS when |
21338 | // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit |
21339 | // anything for those. |
21340 | TargetLoweringBase::AtomicExpansionKind |
21341 | ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { |
21342 | bool has64BitAtomicStore; |
21343 | if (Subtarget->isMClass()) |
21344 | has64BitAtomicStore = false; |
21345 | else if (Subtarget->isThumb()) |
21346 | has64BitAtomicStore = Subtarget->hasV7Ops(); |
21347 | else |
21348 | has64BitAtomicStore = Subtarget->hasV6Ops(); |
21349 | |
21350 | unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits(); |
21351 | return Size == 64 && has64BitAtomicStore ? AtomicExpansionKind::Expand |
21352 | : AtomicExpansionKind::None; |
21353 | } |
21354 | |
21355 | // Loads and stores less than 64-bits are already atomic; ones above that |
21356 | // are doomed anyway, so defer to the default libcall and blame the OS when |
21357 | // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit |
21358 | // anything for those. |
21359 | // FIXME: ldrd and strd are atomic if the CPU has LPAE (e.g. A15 has that |
21360 | // guarantee, see DDI0406C ARM architecture reference manual, |
21361 | // sections A8.8.72-74 LDRD) |
21362 | TargetLowering::AtomicExpansionKind |
21363 | ARMTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { |
21364 | bool has64BitAtomicLoad; |
21365 | if (Subtarget->isMClass()) |
21366 | has64BitAtomicLoad = false; |
21367 | else if (Subtarget->isThumb()) |
21368 | has64BitAtomicLoad = Subtarget->hasV7Ops(); |
21369 | else |
21370 | has64BitAtomicLoad = Subtarget->hasV6Ops(); |
21371 | |
21372 | unsigned Size = LI->getType()->getPrimitiveSizeInBits(); |
21373 | return (Size == 64 && has64BitAtomicLoad) ? AtomicExpansionKind::LLOnly |
21374 | : AtomicExpansionKind::None; |
21375 | } |
21376 | |
21377 | // For the real atomic operations, we have ldrex/strex up to 32 bits, |
21378 | // and up to 64 bits on the non-M profiles |
21379 | TargetLowering::AtomicExpansionKind |
21380 | ARMTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { |
21381 | if (AI->isFloatingPointOperation()) |
21382 | return AtomicExpansionKind::CmpXChg; |
21383 | |
21384 | unsigned Size = AI->getType()->getPrimitiveSizeInBits(); |
21385 | bool hasAtomicRMW; |
21386 | if (Subtarget->isMClass()) |
21387 | hasAtomicRMW = Subtarget->hasV8MBaselineOps(); |
21388 | else if (Subtarget->isThumb()) |
21389 | hasAtomicRMW = Subtarget->hasV7Ops(); |
21390 | else |
21391 | hasAtomicRMW = Subtarget->hasV6Ops(); |
21392 | if (Size <= (Subtarget->isMClass() ? 32U : 64U) && hasAtomicRMW) { |
21393 | // At -O0, fast-regalloc cannot cope with the live vregs necessary to |
21394 | // implement atomicrmw without spilling. If the target address is also on |
21395 | // the stack and close enough to the spill slot, this can lead to a |
21396 | // situation where the monitor always gets cleared and the atomic operation |
21397 | // can never succeed. So at -O0 lower this operation to a CAS loop. |
21398 | if (getTargetMachine().getOptLevel() == CodeGenOptLevel::None) |
21399 | return AtomicExpansionKind::CmpXChg; |
21400 | return AtomicExpansionKind::LLSC; |
21401 | } |
21402 | return AtomicExpansionKind::None; |
21403 | } |
21404 | |
21405 | // Similar to shouldExpandAtomicRMWInIR, ldrex/strex can be used up to 32 |
21406 | // bits, and up to 64 bits on the non-M profiles. |
21407 | TargetLowering::AtomicExpansionKind |
21408 | ARMTargetLowering::shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const { |
21409 | // At -O0, fast-regalloc cannot cope with the live vregs necessary to |
21410 | // implement cmpxchg without spilling. If the address being exchanged is also |
21411 | // on the stack and close enough to the spill slot, this can lead to a |
21412 | // situation where the monitor always gets cleared and the atomic operation |
21413 | // can never succeed. So at -O0 we need a late-expanded pseudo-inst instead. |
21414 | unsigned Size = AI->getOperand(i_nocapture: 1)->getType()->getPrimitiveSizeInBits(); |
21415 | bool HasAtomicCmpXchg; |
21416 | if (Subtarget->isMClass()) |
21417 | HasAtomicCmpXchg = Subtarget->hasV8MBaselineOps(); |
21418 | else if (Subtarget->isThumb()) |
21419 | HasAtomicCmpXchg = Subtarget->hasV7Ops(); |
21420 | else |
21421 | HasAtomicCmpXchg = Subtarget->hasV6Ops(); |
21422 | if (getTargetMachine().getOptLevel() != CodeGenOptLevel::None && |
21423 | HasAtomicCmpXchg && Size <= (Subtarget->isMClass() ? 32U : 64U)) |
21424 | return AtomicExpansionKind::LLSC; |
21425 | return AtomicExpansionKind::None; |
21426 | } |
21427 | |
21428 | bool ARMTargetLowering::shouldInsertFencesForAtomic( |
21429 | const Instruction *I) const { |
21430 | return InsertFencesForAtomic; |
21431 | } |
21432 | |
21433 | bool ARMTargetLowering::useLoadStackGuardNode() const { |
21434 | // ROPI/RWPI are not supported currently. |
21435 | return !Subtarget->isROPI() && !Subtarget->isRWPI(); |
21436 | } |
21437 | |
21438 | void ARMTargetLowering::insertSSPDeclarations(Module &M) const { |
21439 | if (!Subtarget->getTargetTriple().isWindowsMSVCEnvironment()) |
21440 | return TargetLowering::insertSSPDeclarations(M); |
21441 | |
21442 | // MSVC CRT has a global variable holding security cookie. |
21443 | M.getOrInsertGlobal(Name: "__security_cookie" , |
21444 | Ty: PointerType::getUnqual(C&: M.getContext())); |
21445 | |
21446 | // MSVC CRT has a function to validate security cookie. |
21447 | FunctionCallee SecurityCheckCookie = M.getOrInsertFunction( |
21448 | Name: "__security_check_cookie" , RetTy: Type::getVoidTy(C&: M.getContext()), |
21449 | Args: PointerType::getUnqual(C&: M.getContext())); |
21450 | if (Function *F = dyn_cast<Function>(Val: SecurityCheckCookie.getCallee())) |
21451 | F->addParamAttr(ArgNo: 0, Kind: Attribute::AttrKind::InReg); |
21452 | } |
21453 | |
21454 | Value *ARMTargetLowering::getSDagStackGuard(const Module &M) const { |
21455 | // MSVC CRT has a global variable holding security cookie. |
21456 | if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment()) |
21457 | return M.getGlobalVariable(Name: "__security_cookie" ); |
21458 | return TargetLowering::getSDagStackGuard(M); |
21459 | } |
21460 | |
21461 | Function *ARMTargetLowering::getSSPStackGuardCheck(const Module &M) const { |
21462 | // MSVC CRT has a function to validate security cookie. |
21463 | if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment()) |
21464 | return M.getFunction(Name: "__security_check_cookie" ); |
21465 | return TargetLowering::getSSPStackGuardCheck(M); |
21466 | } |
21467 | |
21468 | bool ARMTargetLowering::canCombineStoreAndExtract(Type *VectorTy, Value *Idx, |
21469 | unsigned &Cost) const { |
21470 | // If we do not have NEON, vector types are not natively supported. |
21471 | if (!Subtarget->hasNEON()) |
21472 | return false; |
21473 | |
21474 | // Floating point values and vector values map to the same register file. |
21475 | // Therefore, although we could do a store extract of a vector type, this is |
21476 | // better to leave at float as we have more freedom in the addressing mode for |
21477 | // those. |
21478 | if (VectorTy->isFPOrFPVectorTy()) |
21479 | return false; |
21480 | |
21481 | // If the index is unknown at compile time, this is very expensive to lower |
21482 | // and it is not possible to combine the store with the extract. |
21483 | if (!isa<ConstantInt>(Val: Idx)) |
21484 | return false; |
21485 | |
21486 | assert(VectorTy->isVectorTy() && "VectorTy is not a vector type" ); |
21487 | unsigned BitWidth = VectorTy->getPrimitiveSizeInBits().getFixedValue(); |
21488 | // We can do a store + vector extract on any vector that fits perfectly in a D |
21489 | // or Q register. |
21490 | if (BitWidth == 64 || BitWidth == 128) { |
21491 | Cost = 0; |
21492 | return true; |
21493 | } |
21494 | return false; |
21495 | } |
21496 | |
21497 | bool ARMTargetLowering::isCheapToSpeculateCttz(Type *Ty) const { |
21498 | return Subtarget->hasV6T2Ops(); |
21499 | } |
21500 | |
21501 | bool ARMTargetLowering::isCheapToSpeculateCtlz(Type *Ty) const { |
21502 | return Subtarget->hasV6T2Ops(); |
21503 | } |
21504 | |
21505 | bool ARMTargetLowering::isMaskAndCmp0FoldingBeneficial( |
21506 | const Instruction &AndI) const { |
21507 | if (!Subtarget->hasV7Ops()) |
21508 | return false; |
21509 | |
21510 | // Sink the `and` instruction only if the mask would fit into a modified |
21511 | // immediate operand. |
21512 | ConstantInt *Mask = dyn_cast<ConstantInt>(Val: AndI.getOperand(i: 1)); |
21513 | if (!Mask || Mask->getValue().getBitWidth() > 32u) |
21514 | return false; |
21515 | auto MaskVal = unsigned(Mask->getValue().getZExtValue()); |
21516 | return (Subtarget->isThumb2() ? ARM_AM::getT2SOImmVal(Arg: MaskVal) |
21517 | : ARM_AM::getSOImmVal(Arg: MaskVal)) != -1; |
21518 | } |
21519 | |
21520 | TargetLowering::ShiftLegalizationStrategy |
21521 | ARMTargetLowering::preferredShiftLegalizationStrategy( |
21522 | SelectionDAG &DAG, SDNode *N, unsigned ExpansionFactor) const { |
21523 | if (Subtarget->hasMinSize() && !Subtarget->isTargetWindows()) |
21524 | return ShiftLegalizationStrategy::LowerToLibcall; |
21525 | return TargetLowering::preferredShiftLegalizationStrategy(DAG, N, |
21526 | ExpansionFactor); |
21527 | } |
21528 | |
21529 | Value *ARMTargetLowering::emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, |
21530 | Value *Addr, |
21531 | AtomicOrdering Ord) const { |
21532 | Module *M = Builder.GetInsertBlock()->getParent()->getParent(); |
21533 | bool IsAcquire = isAcquireOrStronger(AO: Ord); |
21534 | |
21535 | // Since i64 isn't legal and intrinsics don't get type-lowered, the ldrexd |
21536 | // intrinsic must return {i32, i32} and we have to recombine them into a |
21537 | // single i64 here. |
21538 | if (ValueTy->getPrimitiveSizeInBits() == 64) { |
21539 | Intrinsic::ID Int = |
21540 | IsAcquire ? Intrinsic::arm_ldaexd : Intrinsic::arm_ldrexd; |
21541 | Function *Ldrex = Intrinsic::getDeclaration(M, id: Int); |
21542 | |
21543 | Value *LoHi = Builder.CreateCall(Callee: Ldrex, Args: Addr, Name: "lohi" ); |
21544 | |
21545 | Value *Lo = Builder.CreateExtractValue(Agg: LoHi, Idxs: 0, Name: "lo" ); |
21546 | Value *Hi = Builder.CreateExtractValue(Agg: LoHi, Idxs: 1, Name: "hi" ); |
21547 | if (!Subtarget->isLittle()) |
21548 | std::swap (a&: Lo, b&: Hi); |
21549 | Lo = Builder.CreateZExt(V: Lo, DestTy: ValueTy, Name: "lo64" ); |
21550 | Hi = Builder.CreateZExt(V: Hi, DestTy: ValueTy, Name: "hi64" ); |
21551 | return Builder.CreateOr( |
21552 | LHS: Lo, RHS: Builder.CreateShl(LHS: Hi, RHS: ConstantInt::get(Ty: ValueTy, V: 32)), Name: "val64" ); |
21553 | } |
21554 | |
21555 | Type *Tys[] = { Addr->getType() }; |
21556 | Intrinsic::ID Int = IsAcquire ? Intrinsic::arm_ldaex : Intrinsic::arm_ldrex; |
21557 | Function *Ldrex = Intrinsic::getDeclaration(M, id: Int, Tys); |
21558 | CallInst *CI = Builder.CreateCall(Callee: Ldrex, Args: Addr); |
21559 | |
21560 | CI->addParamAttr( |
21561 | ArgNo: 0, Attr: Attribute::get(Context&: M->getContext(), Kind: Attribute::ElementType, Ty: ValueTy)); |
21562 | return Builder.CreateTruncOrBitCast(V: CI, DestTy: ValueTy); |
21563 | } |
21564 | |
21565 | void ARMTargetLowering::emitAtomicCmpXchgNoStoreLLBalance( |
21566 | IRBuilderBase &Builder) const { |
21567 | if (!Subtarget->hasV7Ops()) |
21568 | return; |
21569 | Module *M = Builder.GetInsertBlock()->getParent()->getParent(); |
21570 | Builder.CreateCall(Callee: Intrinsic::getDeclaration(M, id: Intrinsic::arm_clrex)); |
21571 | } |
21572 | |
21573 | Value *ARMTargetLowering::emitStoreConditional(IRBuilderBase &Builder, |
21574 | Value *Val, Value *Addr, |
21575 | AtomicOrdering Ord) const { |
21576 | Module *M = Builder.GetInsertBlock()->getParent()->getParent(); |
21577 | bool IsRelease = isReleaseOrStronger(AO: Ord); |
21578 | |
21579 | // Since the intrinsics must have legal type, the i64 intrinsics take two |
21580 | // parameters: "i32, i32". We must marshal Val into the appropriate form |
21581 | // before the call. |
21582 | if (Val->getType()->getPrimitiveSizeInBits() == 64) { |
21583 | Intrinsic::ID Int = |
21584 | IsRelease ? Intrinsic::arm_stlexd : Intrinsic::arm_strexd; |
21585 | Function *Strex = Intrinsic::getDeclaration(M, id: Int); |
21586 | Type *Int32Ty = Type::getInt32Ty(C&: M->getContext()); |
21587 | |
21588 | Value *Lo = Builder.CreateTrunc(V: Val, DestTy: Int32Ty, Name: "lo" ); |
21589 | Value *Hi = Builder.CreateTrunc(V: Builder.CreateLShr(LHS: Val, RHS: 32), DestTy: Int32Ty, Name: "hi" ); |
21590 | if (!Subtarget->isLittle()) |
21591 | std::swap(a&: Lo, b&: Hi); |
21592 | return Builder.CreateCall(Callee: Strex, Args: {Lo, Hi, Addr}); |
21593 | } |
21594 | |
21595 | Intrinsic::ID Int = IsRelease ? Intrinsic::arm_stlex : Intrinsic::arm_strex; |
21596 | Type *Tys[] = { Addr->getType() }; |
21597 | Function *Strex = Intrinsic::getDeclaration(M, id: Int, Tys); |
21598 | |
21599 | CallInst *CI = Builder.CreateCall( |
21600 | Callee: Strex, Args: {Builder.CreateZExtOrBitCast( |
21601 | V: Val, DestTy: Strex->getFunctionType()->getParamType(i: 0)), |
21602 | Addr}); |
21603 | CI->addParamAttr(ArgNo: 1, Attr: Attribute::get(Context&: M->getContext(), Kind: Attribute::ElementType, |
21604 | Ty: Val->getType())); |
21605 | return CI; |
21606 | } |
21607 | |
21608 | |
21609 | bool ARMTargetLowering::alignLoopsWithOptSize() const { |
21610 | return Subtarget->isMClass(); |
21611 | } |
21612 | |
21613 | /// A helper function for determining the number of interleaved accesses we |
21614 | /// will generate when lowering accesses of the given type. |
21615 | unsigned |
21616 | ARMTargetLowering::getNumInterleavedAccesses(VectorType *VecTy, |
21617 | const DataLayout &DL) const { |
21618 | return (DL.getTypeSizeInBits(Ty: VecTy) + 127) / 128; |
21619 | } |
21620 | |
21621 | bool ARMTargetLowering::isLegalInterleavedAccessType( |
21622 | unsigned Factor, FixedVectorType *VecTy, Align Alignment, |
21623 | const DataLayout &DL) const { |
21624 | |
21625 | unsigned VecSize = DL.getTypeSizeInBits(Ty: VecTy); |
21626 | unsigned ElSize = DL.getTypeSizeInBits(Ty: VecTy->getElementType()); |
21627 | |
21628 | if (!Subtarget->hasNEON() && !Subtarget->hasMVEIntegerOps()) |
21629 | return false; |
21630 | |
21631 | // Ensure the vector doesn't have f16 elements. Even though we could do an |
21632 | // i16 vldN, we can't hold the f16 vectors and will end up converting via |
21633 | // f32. |
21634 | if (Subtarget->hasNEON() && VecTy->getElementType()->isHalfTy()) |
21635 | return false; |
21636 | if (Subtarget->hasMVEIntegerOps() && Factor == 3) |
21637 | return false; |
21638 | |
21639 | // Ensure the number of vector elements is greater than 1. |
21640 | if (VecTy->getNumElements() < 2) |
21641 | return false; |
21642 | |
21643 | // Ensure the element type is legal. |
21644 | if (ElSize != 8 && ElSize != 16 && ElSize != 32) |
21645 | return false; |
21646 | // And the alignment if high enough under MVE. |
21647 | if (Subtarget->hasMVEIntegerOps() && Alignment < ElSize / 8) |
21648 | return false; |
21649 | |
21650 | // Ensure the total vector size is 64 or a multiple of 128. Types larger than |
21651 | // 128 will be split into multiple interleaved accesses. |
21652 | if (Subtarget->hasNEON() && VecSize == 64) |
21653 | return true; |
21654 | return VecSize % 128 == 0; |
21655 | } |
21656 | |
21657 | unsigned ARMTargetLowering::getMaxSupportedInterleaveFactor() const { |
21658 | if (Subtarget->hasNEON()) |
21659 | return 4; |
21660 | if (Subtarget->hasMVEIntegerOps()) |
21661 | return MVEMaxSupportedInterleaveFactor; |
21662 | return TargetLoweringBase::getMaxSupportedInterleaveFactor(); |
21663 | } |
21664 | |
21665 | /// Lower an interleaved load into a vldN intrinsic. |
21666 | /// |
21667 | /// E.g. Lower an interleaved load (Factor = 2): |
21668 | /// %wide.vec = load <8 x i32>, <8 x i32>* %ptr, align 4 |
21669 | /// %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6> ; Extract even elements |
21670 | /// %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7> ; Extract odd elements |
21671 | /// |
21672 | /// Into: |
21673 | /// %vld2 = { <4 x i32>, <4 x i32> } call llvm.arm.neon.vld2(%ptr, 4) |
21674 | /// %vec0 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 0 |
21675 | /// %vec1 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 1 |
21676 | bool ARMTargetLowering::lowerInterleavedLoad( |
21677 | LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles, |
21678 | ArrayRef<unsigned> Indices, unsigned Factor) const { |
21679 | assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && |
21680 | "Invalid interleave factor" ); |
21681 | assert(!Shuffles.empty() && "Empty shufflevector input" ); |
21682 | assert(Shuffles.size() == Indices.size() && |
21683 | "Unmatched number of shufflevectors and indices" ); |
21684 | |
21685 | auto *VecTy = cast<FixedVectorType>(Val: Shuffles[0]->getType()); |
21686 | Type *EltTy = VecTy->getElementType(); |
21687 | |
21688 | const DataLayout &DL = LI->getDataLayout(); |
21689 | Align Alignment = LI->getAlign(); |
21690 | |
21691 | // Skip if we do not have NEON and skip illegal vector types. We can |
21692 | // "legalize" wide vector types into multiple interleaved accesses as long as |
21693 | // the vector types are divisible by 128. |
21694 | if (!isLegalInterleavedAccessType(Factor, VecTy, Alignment, DL)) |
21695 | return false; |
21696 | |
21697 | unsigned NumLoads = getNumInterleavedAccesses(VecTy, DL); |
21698 | |
21699 | // A pointer vector can not be the return type of the ldN intrinsics. Need to |
21700 | // load integer vectors first and then convert to pointer vectors. |
21701 | if (EltTy->isPointerTy()) |
21702 | VecTy = FixedVectorType::get(ElementType: DL.getIntPtrType(EltTy), FVTy: VecTy); |
21703 | |
21704 | IRBuilder<> Builder(LI); |
21705 | |
21706 | // The base address of the load. |
21707 | Value *BaseAddr = LI->getPointerOperand(); |
21708 | |
21709 | if (NumLoads > 1) { |
21710 | // If we're going to generate more than one load, reset the sub-vector type |
21711 | // to something legal. |
21712 | VecTy = FixedVectorType::get(ElementType: VecTy->getElementType(), |
21713 | NumElts: VecTy->getNumElements() / NumLoads); |
21714 | } |
21715 | |
21716 | assert(isTypeLegal(EVT::getEVT(VecTy)) && "Illegal vldN vector type!" ); |
21717 | |
21718 | auto createLoadIntrinsic = [&](Value *BaseAddr) { |
21719 | if (Subtarget->hasNEON()) { |
21720 | Type *PtrTy = Builder.getPtrTy(AddrSpace: LI->getPointerAddressSpace()); |
21721 | Type *Tys[] = {VecTy, PtrTy}; |
21722 | static const Intrinsic::ID LoadInts[3] = {Intrinsic::arm_neon_vld2, |
21723 | Intrinsic::arm_neon_vld3, |
21724 | Intrinsic::arm_neon_vld4}; |
21725 | Function *VldnFunc = |
21726 | Intrinsic::getDeclaration(M: LI->getModule(), id: LoadInts[Factor - 2], Tys); |
21727 | |
21728 | SmallVector<Value *, 2> Ops; |
21729 | Ops.push_back(Elt: BaseAddr); |
21730 | Ops.push_back(Elt: Builder.getInt32(C: LI->getAlign().value())); |
21731 | |
21732 | return Builder.CreateCall(Callee: VldnFunc, Args: Ops, Name: "vldN" ); |
21733 | } else { |
21734 | assert((Factor == 2 || Factor == 4) && |
21735 | "expected interleave factor of 2 or 4 for MVE" ); |
21736 | Intrinsic::ID LoadInts = |
21737 | Factor == 2 ? Intrinsic::arm_mve_vld2q : Intrinsic::arm_mve_vld4q; |
21738 | Type *PtrTy = Builder.getPtrTy(AddrSpace: LI->getPointerAddressSpace()); |
21739 | Type *Tys[] = {VecTy, PtrTy}; |
21740 | Function *VldnFunc = |
21741 | Intrinsic::getDeclaration(M: LI->getModule(), id: LoadInts, Tys); |
21742 | |
21743 | SmallVector<Value *, 2> Ops; |
21744 | Ops.push_back(Elt: BaseAddr); |
21745 | return Builder.CreateCall(Callee: VldnFunc, Args: Ops, Name: "vldN" ); |
21746 | } |
21747 | }; |
21748 | |
21749 | // Holds sub-vectors extracted from the load intrinsic return values. The |
21750 | // sub-vectors are associated with the shufflevector instructions they will |
21751 | // replace. |
21752 | DenseMap<ShuffleVectorInst *, SmallVector<Value *, 4>> SubVecs; |
21753 | |
21754 | for (unsigned LoadCount = 0; LoadCount < NumLoads; ++LoadCount) { |
21755 | // If we're generating more than one load, compute the base address of |
21756 | // subsequent loads as an offset from the previous. |
21757 | if (LoadCount > 0) |
21758 | BaseAddr = Builder.CreateConstGEP1_32(Ty: VecTy->getElementType(), Ptr: BaseAddr, |
21759 | Idx0: VecTy->getNumElements() * Factor); |
21760 | |
21761 | CallInst *VldN = createLoadIntrinsic(BaseAddr); |
21762 | |
21763 | // Replace uses of each shufflevector with the corresponding vector loaded |
21764 | // by ldN. |
21765 | for (unsigned i = 0; i < Shuffles.size(); i++) { |
21766 | ShuffleVectorInst *SV = Shuffles[i]; |
21767 | unsigned Index = Indices[i]; |
21768 | |
21769 | Value *SubVec = Builder.CreateExtractValue(Agg: VldN, Idxs: Index); |
21770 | |
21771 | // Convert the integer vector to pointer vector if the element is pointer. |
21772 | if (EltTy->isPointerTy()) |
21773 | SubVec = Builder.CreateIntToPtr( |
21774 | V: SubVec, |
21775 | DestTy: FixedVectorType::get(ElementType: SV->getType()->getElementType(), FVTy: VecTy)); |
21776 | |
21777 | SubVecs[SV].push_back(Elt: SubVec); |
21778 | } |
21779 | } |
21780 | |
21781 | // Replace uses of the shufflevector instructions with the sub-vectors |
21782 | // returned by the load intrinsic. If a shufflevector instruction is |
21783 | // associated with more than one sub-vector, those sub-vectors will be |
21784 | // concatenated into a single wide vector. |
21785 | for (ShuffleVectorInst *SVI : Shuffles) { |
21786 | auto &SubVec = SubVecs[SVI]; |
21787 | auto *WideVec = |
21788 | SubVec.size() > 1 ? concatenateVectors(Builder, Vecs: SubVec) : SubVec[0]; |
21789 | SVI->replaceAllUsesWith(V: WideVec); |
21790 | } |
21791 | |
21792 | return true; |
21793 | } |
21794 | |
21795 | /// Lower an interleaved store into a vstN intrinsic. |
21796 | /// |
21797 | /// E.g. Lower an interleaved store (Factor = 3): |
21798 | /// %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1, |
21799 | /// <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> |
21800 | /// store <12 x i32> %i.vec, <12 x i32>* %ptr, align 4 |
21801 | /// |
21802 | /// Into: |
21803 | /// %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3> |
21804 | /// %sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7> |
21805 | /// %sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11> |
21806 | /// call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4) |
21807 | /// |
21808 | /// Note that the new shufflevectors will be removed and we'll only generate one |
21809 | /// vst3 instruction in CodeGen. |
21810 | /// |
21811 | /// Example for a more general valid mask (Factor 3). Lower: |
21812 | /// %i.vec = shuffle <32 x i32> %v0, <32 x i32> %v1, |
21813 | /// <4, 32, 16, 5, 33, 17, 6, 34, 18, 7, 35, 19> |
21814 | /// store <12 x i32> %i.vec, <12 x i32>* %ptr |
21815 | /// |
21816 | /// Into: |
21817 | /// %sub.v0 = shuffle <32 x i32> %v0, <32 x i32> v1, <4, 5, 6, 7> |
21818 | /// %sub.v1 = shuffle <32 x i32> %v0, <32 x i32> v1, <32, 33, 34, 35> |
21819 | /// %sub.v2 = shuffle <32 x i32> %v0, <32 x i32> v1, <16, 17, 18, 19> |
21820 | /// call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4) |
21821 | bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI, |
21822 | ShuffleVectorInst *SVI, |
21823 | unsigned Factor) const { |
21824 | assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && |
21825 | "Invalid interleave factor" ); |
21826 | |
21827 | auto *VecTy = cast<FixedVectorType>(Val: SVI->getType()); |
21828 | assert(VecTy->getNumElements() % Factor == 0 && "Invalid interleaved store" ); |
21829 | |
21830 | unsigned LaneLen = VecTy->getNumElements() / Factor; |
21831 | Type *EltTy = VecTy->getElementType(); |
21832 | auto *SubVecTy = FixedVectorType::get(ElementType: EltTy, NumElts: LaneLen); |
21833 | |
21834 | const DataLayout &DL = SI->getDataLayout(); |
21835 | Align Alignment = SI->getAlign(); |
21836 | |
21837 | // Skip if we do not have NEON and skip illegal vector types. We can |
21838 | // "legalize" wide vector types into multiple interleaved accesses as long as |
21839 | // the vector types are divisible by 128. |
21840 | if (!isLegalInterleavedAccessType(Factor, VecTy: SubVecTy, Alignment, DL)) |
21841 | return false; |
21842 | |
21843 | unsigned NumStores = getNumInterleavedAccesses(VecTy: SubVecTy, DL); |
21844 | |
21845 | Value *Op0 = SVI->getOperand(i_nocapture: 0); |
21846 | Value *Op1 = SVI->getOperand(i_nocapture: 1); |
21847 | IRBuilder<> Builder(SI); |
21848 | |
21849 | // StN intrinsics don't support pointer vectors as arguments. Convert pointer |
21850 | // vectors to integer vectors. |
21851 | if (EltTy->isPointerTy()) { |
21852 | Type *IntTy = DL.getIntPtrType(EltTy); |
21853 | |
21854 | // Convert to the corresponding integer vector. |
21855 | auto *IntVecTy = |
21856 | FixedVectorType::get(ElementType: IntTy, FVTy: cast<FixedVectorType>(Val: Op0->getType())); |
21857 | Op0 = Builder.CreatePtrToInt(V: Op0, DestTy: IntVecTy); |
21858 | Op1 = Builder.CreatePtrToInt(V: Op1, DestTy: IntVecTy); |
21859 | |
21860 | SubVecTy = FixedVectorType::get(ElementType: IntTy, NumElts: LaneLen); |
21861 | } |
21862 | |
21863 | // The base address of the store. |
21864 | Value *BaseAddr = SI->getPointerOperand(); |
21865 | |
21866 | if (NumStores > 1) { |
21867 | // If we're going to generate more than one store, reset the lane length |
21868 | // and sub-vector type to something legal. |
21869 | LaneLen /= NumStores; |
21870 | SubVecTy = FixedVectorType::get(ElementType: SubVecTy->getElementType(), NumElts: LaneLen); |
21871 | } |
21872 | |
21873 | assert(isTypeLegal(EVT::getEVT(SubVecTy)) && "Illegal vstN vector type!" ); |
21874 | |
21875 | auto Mask = SVI->getShuffleMask(); |
21876 | |
21877 | auto createStoreIntrinsic = [&](Value *BaseAddr, |
21878 | SmallVectorImpl<Value *> &Shuffles) { |
21879 | if (Subtarget->hasNEON()) { |
21880 | static const Intrinsic::ID StoreInts[3] = {Intrinsic::arm_neon_vst2, |
21881 | Intrinsic::arm_neon_vst3, |
21882 | Intrinsic::arm_neon_vst4}; |
21883 | Type *PtrTy = Builder.getPtrTy(AddrSpace: SI->getPointerAddressSpace()); |
21884 | Type *Tys[] = {PtrTy, SubVecTy}; |
21885 | |
21886 | Function *VstNFunc = Intrinsic::getDeclaration( |
21887 | M: SI->getModule(), id: StoreInts[Factor - 2], Tys); |
21888 | |
21889 | SmallVector<Value *, 6> Ops; |
21890 | Ops.push_back(Elt: BaseAddr); |
21891 | append_range(C&: Ops, R&: Shuffles); |
21892 | Ops.push_back(Elt: Builder.getInt32(C: SI->getAlign().value())); |
21893 | Builder.CreateCall(Callee: VstNFunc, Args: Ops); |
21894 | } else { |
21895 | assert((Factor == 2 || Factor == 4) && |
21896 | "expected interleave factor of 2 or 4 for MVE" ); |
21897 | Intrinsic::ID StoreInts = |
21898 | Factor == 2 ? Intrinsic::arm_mve_vst2q : Intrinsic::arm_mve_vst4q; |
21899 | Type *PtrTy = Builder.getPtrTy(AddrSpace: SI->getPointerAddressSpace()); |
21900 | Type *Tys[] = {PtrTy, SubVecTy}; |
21901 | Function *VstNFunc = |
21902 | Intrinsic::getDeclaration(M: SI->getModule(), id: StoreInts, Tys); |
21903 | |
21904 | SmallVector<Value *, 6> Ops; |
21905 | Ops.push_back(Elt: BaseAddr); |
21906 | append_range(C&: Ops, R&: Shuffles); |
21907 | for (unsigned F = 0; F < Factor; F++) { |
21908 | Ops.push_back(Elt: Builder.getInt32(C: F)); |
21909 | Builder.CreateCall(Callee: VstNFunc, Args: Ops); |
21910 | Ops.pop_back(); |
21911 | } |
21912 | } |
21913 | }; |
21914 | |
21915 | for (unsigned StoreCount = 0; StoreCount < NumStores; ++StoreCount) { |
21916 | // If we generating more than one store, we compute the base address of |
21917 | // subsequent stores as an offset from the previous. |
21918 | if (StoreCount > 0) |
21919 | BaseAddr = Builder.CreateConstGEP1_32(Ty: SubVecTy->getElementType(), |
21920 | Ptr: BaseAddr, Idx0: LaneLen * Factor); |
21921 | |
21922 | SmallVector<Value *, 4> Shuffles; |
21923 | |
21924 | // Split the shufflevector operands into sub vectors for the new vstN call. |
21925 | for (unsigned i = 0; i < Factor; i++) { |
21926 | unsigned IdxI = StoreCount * LaneLen * Factor + i; |
21927 | if (Mask[IdxI] >= 0) { |
21928 | Shuffles.push_back(Elt: Builder.CreateShuffleVector( |
21929 | V1: Op0, V2: Op1, Mask: createSequentialMask(Start: Mask[IdxI], NumInts: LaneLen, NumUndefs: 0))); |
21930 | } else { |
21931 | unsigned StartMask = 0; |
21932 | for (unsigned j = 1; j < LaneLen; j++) { |
21933 | unsigned IdxJ = StoreCount * LaneLen * Factor + j; |
21934 | if (Mask[IdxJ * Factor + IdxI] >= 0) { |
21935 | StartMask = Mask[IdxJ * Factor + IdxI] - IdxJ; |
21936 | break; |
21937 | } |
21938 | } |
21939 | // Note: If all elements in a chunk are undefs, StartMask=0! |
21940 | // Note: Filling undef gaps with random elements is ok, since |
21941 | // those elements were being written anyway (with undefs). |
21942 | // In the case of all undefs we're defaulting to using elems from 0 |
21943 | // Note: StartMask cannot be negative, it's checked in |
21944 | // isReInterleaveMask |
21945 | Shuffles.push_back(Elt: Builder.CreateShuffleVector( |
21946 | V1: Op0, V2: Op1, Mask: createSequentialMask(Start: StartMask, NumInts: LaneLen, NumUndefs: 0))); |
21947 | } |
21948 | } |
21949 | |
21950 | createStoreIntrinsic(BaseAddr, Shuffles); |
21951 | } |
21952 | return true; |
21953 | } |
21954 | |
21955 | enum HABaseType { |
21956 | HA_UNKNOWN = 0, |
21957 | HA_FLOAT, |
21958 | HA_DOUBLE, |
21959 | HA_VECT64, |
21960 | HA_VECT128 |
21961 | }; |
21962 | |
21963 | static bool isHomogeneousAggregate(Type *Ty, HABaseType &Base, |
21964 | uint64_t &Members) { |
21965 | if (auto *ST = dyn_cast<StructType>(Val: Ty)) { |
21966 | for (unsigned i = 0; i < ST->getNumElements(); ++i) { |
21967 | uint64_t SubMembers = 0; |
21968 | if (!isHomogeneousAggregate(Ty: ST->getElementType(N: i), Base, Members&: SubMembers)) |
21969 | return false; |
21970 | Members += SubMembers; |
21971 | } |
21972 | } else if (auto *AT = dyn_cast<ArrayType>(Val: Ty)) { |
21973 | uint64_t SubMembers = 0; |
21974 | if (!isHomogeneousAggregate(Ty: AT->getElementType(), Base, Members&: SubMembers)) |
21975 | return false; |
21976 | Members += SubMembers * AT->getNumElements(); |
21977 | } else if (Ty->isFloatTy()) { |
21978 | if (Base != HA_UNKNOWN && Base != HA_FLOAT) |
21979 | return false; |
21980 | Members = 1; |
21981 | Base = HA_FLOAT; |
21982 | } else if (Ty->isDoubleTy()) { |
21983 | if (Base != HA_UNKNOWN && Base != HA_DOUBLE) |
21984 | return false; |
21985 | Members = 1; |
21986 | Base = HA_DOUBLE; |
21987 | } else if (auto *VT = dyn_cast<VectorType>(Val: Ty)) { |
21988 | Members = 1; |
21989 | switch (Base) { |
21990 | case HA_FLOAT: |
21991 | case HA_DOUBLE: |
21992 | return false; |
21993 | case HA_VECT64: |
21994 | return VT->getPrimitiveSizeInBits().getFixedValue() == 64; |
21995 | case HA_VECT128: |
21996 | return VT->getPrimitiveSizeInBits().getFixedValue() == 128; |
21997 | case HA_UNKNOWN: |
21998 | switch (VT->getPrimitiveSizeInBits().getFixedValue()) { |
21999 | case 64: |
22000 | Base = HA_VECT64; |
22001 | return true; |
22002 | case 128: |
22003 | Base = HA_VECT128; |
22004 | return true; |
22005 | default: |
22006 | return false; |
22007 | } |
22008 | } |
22009 | } |
22010 | |
22011 | return (Members > 0 && Members <= 4); |
22012 | } |
22013 | |
22014 | /// Return the correct alignment for the current calling convention. |
22015 | Align ARMTargetLowering::getABIAlignmentForCallingConv( |
22016 | Type *ArgTy, const DataLayout &DL) const { |
22017 | const Align ABITypeAlign = DL.getABITypeAlign(Ty: ArgTy); |
22018 | if (!ArgTy->isVectorTy()) |
22019 | return ABITypeAlign; |
22020 | |
22021 | // Avoid over-aligning vector parameters. It would require realigning the |
22022 | // stack and waste space for no real benefit. |
22023 | return std::min(a: ABITypeAlign, b: DL.getStackAlignment()); |
22024 | } |
22025 | |
22026 | /// Return true if a type is an AAPCS-VFP homogeneous aggregate or one of |
22027 | /// [N x i32] or [N x i64]. This allows front-ends to skip emitting padding when |
22028 | /// passing according to AAPCS rules. |
22029 | bool ARMTargetLowering::functionArgumentNeedsConsecutiveRegisters( |
22030 | Type *Ty, CallingConv::ID CallConv, bool isVarArg, |
22031 | const DataLayout &DL) const { |
22032 | if (getEffectiveCallingConv(CC: CallConv, isVarArg) != |
22033 | CallingConv::ARM_AAPCS_VFP) |
22034 | return false; |
22035 | |
22036 | HABaseType Base = HA_UNKNOWN; |
22037 | uint64_t Members = 0; |
22038 | bool IsHA = isHomogeneousAggregate(Ty, Base, Members); |
22039 | LLVM_DEBUG(dbgs() << "isHA: " << IsHA << " " ; Ty->dump()); |
22040 | |
22041 | bool IsIntArray = Ty->isArrayTy() && Ty->getArrayElementType()->isIntegerTy(); |
22042 | return IsHA || IsIntArray; |
22043 | } |
22044 | |
22045 | Register ARMTargetLowering::getExceptionPointerRegister( |
22046 | const Constant *PersonalityFn) const { |
22047 | // Platforms which do not use SjLj EH may return values in these registers |
22048 | // via the personality function. |
22049 | return Subtarget->useSjLjEH() ? Register() : ARM::R0; |
22050 | } |
22051 | |
22052 | Register ARMTargetLowering::getExceptionSelectorRegister( |
22053 | const Constant *PersonalityFn) const { |
22054 | // Platforms which do not use SjLj EH may return values in these registers |
22055 | // via the personality function. |
22056 | return Subtarget->useSjLjEH() ? Register() : ARM::R1; |
22057 | } |
22058 | |
22059 | void ARMTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { |
22060 | // Update IsSplitCSR in ARMFunctionInfo. |
22061 | ARMFunctionInfo *AFI = Entry->getParent()->getInfo<ARMFunctionInfo>(); |
22062 | AFI->setIsSplitCSR(true); |
22063 | } |
22064 | |
22065 | void ARMTargetLowering::insertCopiesSplitCSR( |
22066 | MachineBasicBlock *Entry, |
22067 | const SmallVectorImpl<MachineBasicBlock *> &Exits) const { |
22068 | const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
22069 | const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(MF: Entry->getParent()); |
22070 | if (!IStart) |
22071 | return; |
22072 | |
22073 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
22074 | MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); |
22075 | MachineBasicBlock::iterator MBBI = Entry->begin(); |
22076 | for (const MCPhysReg *I = IStart; *I; ++I) { |
22077 | const TargetRegisterClass *RC = nullptr; |
22078 | if (ARM::GPRRegClass.contains(Reg: *I)) |
22079 | RC = &ARM::GPRRegClass; |
22080 | else if (ARM::DPRRegClass.contains(Reg: *I)) |
22081 | RC = &ARM::DPRRegClass; |
22082 | else |
22083 | llvm_unreachable("Unexpected register class in CSRsViaCopy!" ); |
22084 | |
22085 | Register NewVR = MRI->createVirtualRegister(RegClass: RC); |
22086 | // Create copy from CSR to a virtual register. |
22087 | // FIXME: this currently does not emit CFI pseudo-instructions, it works |
22088 | // fine for CXX_FAST_TLS since the C++-style TLS access functions should be |
22089 | // nounwind. If we want to generalize this later, we may need to emit |
22090 | // CFI pseudo-instructions. |
22091 | assert(Entry->getParent()->getFunction().hasFnAttribute( |
22092 | Attribute::NoUnwind) && |
22093 | "Function should be nounwind in insertCopiesSplitCSR!" ); |
22094 | Entry->addLiveIn(PhysReg: *I); |
22095 | BuildMI(BB&: *Entry, I: MBBI, MIMD: DebugLoc(), MCID: TII->get(Opcode: TargetOpcode::COPY), DestReg: NewVR) |
22096 | .addReg(RegNo: *I); |
22097 | |
22098 | // Insert the copy-back instructions right before the terminator. |
22099 | for (auto *Exit : Exits) |
22100 | BuildMI(BB&: *Exit, I: Exit->getFirstTerminator(), MIMD: DebugLoc(), |
22101 | MCID: TII->get(Opcode: TargetOpcode::COPY), DestReg: *I) |
22102 | .addReg(RegNo: NewVR); |
22103 | } |
22104 | } |
22105 | |
22106 | void ARMTargetLowering::finalizeLowering(MachineFunction &MF) const { |
22107 | MF.getFrameInfo().computeMaxCallFrameSize(MF); |
22108 | TargetLoweringBase::finalizeLowering(MF); |
22109 | } |
22110 | |
22111 | bool ARMTargetLowering::isComplexDeinterleavingSupported() const { |
22112 | return Subtarget->hasMVEIntegerOps(); |
22113 | } |
22114 | |
22115 | bool ARMTargetLowering::isComplexDeinterleavingOperationSupported( |
22116 | ComplexDeinterleavingOperation Operation, Type *Ty) const { |
22117 | auto *VTy = dyn_cast<FixedVectorType>(Val: Ty); |
22118 | if (!VTy) |
22119 | return false; |
22120 | |
22121 | auto *ScalarTy = VTy->getScalarType(); |
22122 | unsigned NumElements = VTy->getNumElements(); |
22123 | |
22124 | unsigned VTyWidth = VTy->getScalarSizeInBits() * NumElements; |
22125 | if (VTyWidth < 128 || !llvm::isPowerOf2_32(Value: VTyWidth)) |
22126 | return false; |
22127 | |
22128 | // Both VCADD and VCMUL/VCMLA support the same types, F16 and F32 |
22129 | if (ScalarTy->isHalfTy() || ScalarTy->isFloatTy()) |
22130 | return Subtarget->hasMVEFloatOps(); |
22131 | |
22132 | if (Operation != ComplexDeinterleavingOperation::CAdd) |
22133 | return false; |
22134 | |
22135 | return Subtarget->hasMVEIntegerOps() && |
22136 | (ScalarTy->isIntegerTy(Bitwidth: 8) || ScalarTy->isIntegerTy(Bitwidth: 16) || |
22137 | ScalarTy->isIntegerTy(Bitwidth: 32)); |
22138 | } |
22139 | |
22140 | Value *ARMTargetLowering::createComplexDeinterleavingIR( |
22141 | IRBuilderBase &B, ComplexDeinterleavingOperation OperationType, |
22142 | ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB, |
22143 | Value *Accumulator) const { |
22144 | |
22145 | FixedVectorType *Ty = cast<FixedVectorType>(Val: InputA->getType()); |
22146 | |
22147 | unsigned TyWidth = Ty->getScalarSizeInBits() * Ty->getNumElements(); |
22148 | |
22149 | assert(TyWidth >= 128 && "Width of vector type must be at least 128 bits" ); |
22150 | |
22151 | if (TyWidth > 128) { |
22152 | int Stride = Ty->getNumElements() / 2; |
22153 | auto SplitSeq = llvm::seq<int>(Begin: 0, End: Ty->getNumElements()); |
22154 | auto SplitSeqVec = llvm::to_vector(Range&: SplitSeq); |
22155 | ArrayRef<int> LowerSplitMask(&SplitSeqVec[0], Stride); |
22156 | ArrayRef<int> UpperSplitMask(&SplitSeqVec[Stride], Stride); |
22157 | |
22158 | auto *LowerSplitA = B.CreateShuffleVector(V: InputA, Mask: LowerSplitMask); |
22159 | auto *LowerSplitB = B.CreateShuffleVector(V: InputB, Mask: LowerSplitMask); |
22160 | auto *UpperSplitA = B.CreateShuffleVector(V: InputA, Mask: UpperSplitMask); |
22161 | auto *UpperSplitB = B.CreateShuffleVector(V: InputB, Mask: UpperSplitMask); |
22162 | Value *LowerSplitAcc = nullptr; |
22163 | Value *UpperSplitAcc = nullptr; |
22164 | |
22165 | if (Accumulator) { |
22166 | LowerSplitAcc = B.CreateShuffleVector(V: Accumulator, Mask: LowerSplitMask); |
22167 | UpperSplitAcc = B.CreateShuffleVector(V: Accumulator, Mask: UpperSplitMask); |
22168 | } |
22169 | |
22170 | auto *LowerSplitInt = createComplexDeinterleavingIR( |
22171 | B, OperationType, Rotation, InputA: LowerSplitA, InputB: LowerSplitB, Accumulator: LowerSplitAcc); |
22172 | auto *UpperSplitInt = createComplexDeinterleavingIR( |
22173 | B, OperationType, Rotation, InputA: UpperSplitA, InputB: UpperSplitB, Accumulator: UpperSplitAcc); |
22174 | |
22175 | ArrayRef<int> JoinMask(&SplitSeqVec[0], Ty->getNumElements()); |
22176 | return B.CreateShuffleVector(V1: LowerSplitInt, V2: UpperSplitInt, Mask: JoinMask); |
22177 | } |
22178 | |
22179 | auto *IntTy = Type::getInt32Ty(C&: B.getContext()); |
22180 | |
22181 | ConstantInt *ConstRotation = nullptr; |
22182 | if (OperationType == ComplexDeinterleavingOperation::CMulPartial) { |
22183 | ConstRotation = ConstantInt::get(Ty: IntTy, V: (int)Rotation); |
22184 | |
22185 | if (Accumulator) |
22186 | return B.CreateIntrinsic(ID: Intrinsic::arm_mve_vcmlaq, Types: Ty, |
22187 | Args: {ConstRotation, Accumulator, InputB, InputA}); |
22188 | return B.CreateIntrinsic(ID: Intrinsic::arm_mve_vcmulq, Types: Ty, |
22189 | Args: {ConstRotation, InputB, InputA}); |
22190 | } |
22191 | |
22192 | if (OperationType == ComplexDeinterleavingOperation::CAdd) { |
22193 | // 1 means the value is not halved. |
22194 | auto *ConstHalving = ConstantInt::get(Ty: IntTy, V: 1); |
22195 | |
22196 | if (Rotation == ComplexDeinterleavingRotation::Rotation_90) |
22197 | ConstRotation = ConstantInt::get(Ty: IntTy, V: 0); |
22198 | else if (Rotation == ComplexDeinterleavingRotation::Rotation_270) |
22199 | ConstRotation = ConstantInt::get(Ty: IntTy, V: 1); |
22200 | |
22201 | if (!ConstRotation) |
22202 | return nullptr; // Invalid rotation for arm_mve_vcaddq |
22203 | |
22204 | return B.CreateIntrinsic(ID: Intrinsic::arm_mve_vcaddq, Types: Ty, |
22205 | Args: {ConstHalving, ConstRotation, InputA, InputB}); |
22206 | } |
22207 | |
22208 | return nullptr; |
22209 | } |
22210 | |