1//===-- AArch64ISelDAGToDAG.cpp - A dag to dag inst selector for AArch64 --===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines an instruction selector for the AArch64 target.
10//
11//===----------------------------------------------------------------------===//
12
13#include "AArch64MachineFunctionInfo.h"
14#include "AArch64TargetMachine.h"
15#include "MCTargetDesc/AArch64AddressingModes.h"
16#include "llvm/ADT/APSInt.h"
17#include "llvm/CodeGen/ISDOpcodes.h"
18#include "llvm/CodeGen/SelectionDAGISel.h"
19#include "llvm/IR/Function.h" // To access function attributes.
20#include "llvm/IR/GlobalValue.h"
21#include "llvm/IR/Intrinsics.h"
22#include "llvm/IR/IntrinsicsAArch64.h"
23#include "llvm/Support/Debug.h"
24#include "llvm/Support/ErrorHandling.h"
25#include "llvm/Support/KnownBits.h"
26#include "llvm/Support/MathExtras.h"
27#include "llvm/Support/raw_ostream.h"
28
29using namespace llvm;
30
31#define DEBUG_TYPE "aarch64-isel"
32#define PASS_NAME "AArch64 Instruction Selection"
33
34// https://github.com/llvm/llvm-project/issues/114425
35#if defined(_MSC_VER) && !defined(__clang__) && !defined(NDEBUG)
36#pragma inline_depth(0)
37#endif
38
39//===--------------------------------------------------------------------===//
40/// AArch64DAGToDAGISel - AArch64 specific code to select AArch64 machine
41/// instructions for SelectionDAG operations.
42///
43namespace {
44
45class AArch64DAGToDAGISel : public SelectionDAGISel {
46
47 /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
48 /// make the right decision when generating code for different targets.
49 const AArch64Subtarget *Subtarget;
50
51public:
52 AArch64DAGToDAGISel() = delete;
53
54 explicit AArch64DAGToDAGISel(AArch64TargetMachine &tm,
55 CodeGenOptLevel OptLevel)
56 : SelectionDAGISel(tm, OptLevel), Subtarget(nullptr) {}
57
58 bool runOnMachineFunction(MachineFunction &MF) override {
59 Subtarget = &MF.getSubtarget<AArch64Subtarget>();
60 return SelectionDAGISel::runOnMachineFunction(mf&: MF);
61 }
62
63 void Select(SDNode *Node) override;
64
65 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
66 /// inline asm expressions.
67 bool SelectInlineAsmMemoryOperand(const SDValue &Op,
68 InlineAsm::ConstraintCode ConstraintID,
69 std::vector<SDValue> &OutOps) override;
70
71 template <signed Low, signed High, signed Scale>
72 bool SelectRDVLImm(SDValue N, SDValue &Imm);
73
74 bool SelectArithExtendedRegister(SDValue N, SDValue &Reg, SDValue &Shift);
75 bool SelectArithUXTXRegister(SDValue N, SDValue &Reg, SDValue &Shift);
76 bool SelectArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
77 bool SelectNegArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
78 bool SelectArithShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
79 return SelectShiftedRegister(N, AllowROR: false, Reg, Shift);
80 }
81 bool SelectLogicalShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
82 return SelectShiftedRegister(N, AllowROR: true, Reg, Shift);
83 }
84 bool SelectAddrModeIndexed7S8(SDValue N, SDValue &Base, SDValue &OffImm) {
85 return SelectAddrModeIndexed7S(N, Size: 1, Base, OffImm);
86 }
87 bool SelectAddrModeIndexed7S16(SDValue N, SDValue &Base, SDValue &OffImm) {
88 return SelectAddrModeIndexed7S(N, Size: 2, Base, OffImm);
89 }
90 bool SelectAddrModeIndexed7S32(SDValue N, SDValue &Base, SDValue &OffImm) {
91 return SelectAddrModeIndexed7S(N, Size: 4, Base, OffImm);
92 }
93 bool SelectAddrModeIndexed7S64(SDValue N, SDValue &Base, SDValue &OffImm) {
94 return SelectAddrModeIndexed7S(N, Size: 8, Base, OffImm);
95 }
96 bool SelectAddrModeIndexed7S128(SDValue N, SDValue &Base, SDValue &OffImm) {
97 return SelectAddrModeIndexed7S(N, Size: 16, Base, OffImm);
98 }
99 bool SelectAddrModeIndexedS9S128(SDValue N, SDValue &Base, SDValue &OffImm) {
100 return SelectAddrModeIndexedBitWidth(N, IsSignedImm: true, BW: 9, Size: 16, Base, OffImm);
101 }
102 bool SelectAddrModeIndexedU6S128(SDValue N, SDValue &Base, SDValue &OffImm) {
103 return SelectAddrModeIndexedBitWidth(N, IsSignedImm: false, BW: 6, Size: 16, Base, OffImm);
104 }
105 bool SelectAddrModeIndexed8(SDValue N, SDValue &Base, SDValue &OffImm) {
106 return SelectAddrModeIndexed(N, Size: 1, Base, OffImm);
107 }
108 bool SelectAddrModeIndexed16(SDValue N, SDValue &Base, SDValue &OffImm) {
109 return SelectAddrModeIndexed(N, Size: 2, Base, OffImm);
110 }
111 bool SelectAddrModeIndexed32(SDValue N, SDValue &Base, SDValue &OffImm) {
112 return SelectAddrModeIndexed(N, Size: 4, Base, OffImm);
113 }
114 bool SelectAddrModeIndexed64(SDValue N, SDValue &Base, SDValue &OffImm) {
115 return SelectAddrModeIndexed(N, Size: 8, Base, OffImm);
116 }
117 bool SelectAddrModeIndexed128(SDValue N, SDValue &Base, SDValue &OffImm) {
118 return SelectAddrModeIndexed(N, Size: 16, Base, OffImm);
119 }
120 bool SelectAddrModeUnscaled8(SDValue N, SDValue &Base, SDValue &OffImm) {
121 return SelectAddrModeUnscaled(N, Size: 1, Base, OffImm);
122 }
123 bool SelectAddrModeUnscaled16(SDValue N, SDValue &Base, SDValue &OffImm) {
124 return SelectAddrModeUnscaled(N, Size: 2, Base, OffImm);
125 }
126 bool SelectAddrModeUnscaled32(SDValue N, SDValue &Base, SDValue &OffImm) {
127 return SelectAddrModeUnscaled(N, Size: 4, Base, OffImm);
128 }
129 bool SelectAddrModeUnscaled64(SDValue N, SDValue &Base, SDValue &OffImm) {
130 return SelectAddrModeUnscaled(N, Size: 8, Base, OffImm);
131 }
132 bool SelectAddrModeUnscaled128(SDValue N, SDValue &Base, SDValue &OffImm) {
133 return SelectAddrModeUnscaled(N, Size: 16, Base, OffImm);
134 }
135 template <unsigned Size, unsigned Max>
136 bool SelectAddrModeIndexedUImm(SDValue N, SDValue &Base, SDValue &OffImm) {
137 // Test if there is an appropriate addressing mode and check if the
138 // immediate fits.
139 bool Found = SelectAddrModeIndexed(N, Size, Base, OffImm);
140 if (Found) {
141 if (auto *CI = dyn_cast<ConstantSDNode>(Val&: OffImm)) {
142 int64_t C = CI->getSExtValue();
143 if (C <= Max)
144 return true;
145 }
146 }
147
148 // Otherwise, base only, materialize address in register.
149 Base = N;
150 OffImm = CurDAG->getTargetConstant(Val: 0, DL: SDLoc(N), VT: MVT::i64);
151 return true;
152 }
153
154 template<int Width>
155 bool SelectAddrModeWRO(SDValue N, SDValue &Base, SDValue &Offset,
156 SDValue &SignExtend, SDValue &DoShift) {
157 return SelectAddrModeWRO(N, Size: Width / 8, Base, Offset, SignExtend, DoShift);
158 }
159
160 template<int Width>
161 bool SelectAddrModeXRO(SDValue N, SDValue &Base, SDValue &Offset,
162 SDValue &SignExtend, SDValue &DoShift) {
163 return SelectAddrModeXRO(N, Size: Width / 8, Base, Offset, SignExtend, DoShift);
164 }
165
166 bool SelectExtractHigh(SDValue N, SDValue &Res) {
167 if (Subtarget->isLittleEndian() && N->getOpcode() == ISD::BITCAST)
168 N = N->getOperand(Num: 0);
169 if (N->getOpcode() != ISD::EXTRACT_SUBVECTOR ||
170 !isa<ConstantSDNode>(Val: N->getOperand(Num: 1)))
171 return false;
172 EVT VT = N->getValueType(ResNo: 0);
173 EVT LVT = N->getOperand(Num: 0).getValueType();
174 unsigned Index = N->getConstantOperandVal(Num: 1);
175 if (!VT.is64BitVector() || !LVT.is128BitVector() ||
176 Index != VT.getVectorNumElements())
177 return false;
178 Res = N->getOperand(Num: 0);
179 return true;
180 }
181
182 bool SelectRoundingVLShr(SDValue N, SDValue &Res1, SDValue &Res2) {
183 if (N.getOpcode() != AArch64ISD::VLSHR)
184 return false;
185 SDValue Op = N->getOperand(Num: 0);
186 EVT VT = Op.getValueType();
187 unsigned ShtAmt = N->getConstantOperandVal(Num: 1);
188 if (ShtAmt > VT.getScalarSizeInBits() / 2 || Op.getOpcode() != ISD::ADD)
189 return false;
190
191 APInt Imm;
192 if (Op.getOperand(i: 1).getOpcode() == AArch64ISD::MOVIshift)
193 Imm = APInt(VT.getScalarSizeInBits(),
194 Op.getOperand(i: 1).getConstantOperandVal(i: 0)
195 << Op.getOperand(i: 1).getConstantOperandVal(i: 1));
196 else if (Op.getOperand(i: 1).getOpcode() == AArch64ISD::DUP &&
197 isa<ConstantSDNode>(Val: Op.getOperand(i: 1).getOperand(i: 0)))
198 Imm = APInt(VT.getScalarSizeInBits(),
199 Op.getOperand(i: 1).getConstantOperandVal(i: 0));
200 else
201 return false;
202
203 if (Imm != 1ULL << (ShtAmt - 1))
204 return false;
205
206 Res1 = Op.getOperand(i: 0);
207 Res2 = CurDAG->getTargetConstant(Val: ShtAmt, DL: SDLoc(N), VT: MVT::i32);
208 return true;
209 }
210
211 bool SelectDupZeroOrUndef(SDValue N) {
212 switch(N->getOpcode()) {
213 case ISD::UNDEF:
214 return true;
215 case AArch64ISD::DUP:
216 case ISD::SPLAT_VECTOR: {
217 auto Opnd0 = N->getOperand(Num: 0);
218 if (isNullConstant(V: Opnd0))
219 return true;
220 if (isNullFPConstant(V: Opnd0))
221 return true;
222 break;
223 }
224 default:
225 break;
226 }
227
228 return false;
229 }
230
231 bool SelectAny(SDValue) { return true; }
232
233 bool SelectDupZero(SDValue N) {
234 switch(N->getOpcode()) {
235 case AArch64ISD::DUP:
236 case ISD::SPLAT_VECTOR: {
237 auto Opnd0 = N->getOperand(Num: 0);
238 if (isNullConstant(V: Opnd0))
239 return true;
240 if (isNullFPConstant(V: Opnd0))
241 return true;
242 break;
243 }
244 }
245
246 return false;
247 }
248
249 template<MVT::SimpleValueType VT>
250 bool SelectSVEAddSubImm(SDValue N, SDValue &Imm, SDValue &Shift) {
251 return SelectSVEAddSubImm(N, VT, Imm, Shift);
252 }
253
254 template <MVT::SimpleValueType VT, bool Negate>
255 bool SelectSVEAddSubSSatImm(SDValue N, SDValue &Imm, SDValue &Shift) {
256 return SelectSVEAddSubSSatImm(N, VT, Imm, Shift, Negate);
257 }
258
259 template <MVT::SimpleValueType VT>
260 bool SelectSVECpyDupImm(SDValue N, SDValue &Imm, SDValue &Shift) {
261 return SelectSVECpyDupImm(N, VT, Imm, Shift);
262 }
263
264 template <MVT::SimpleValueType VT, bool Invert = false>
265 bool SelectSVELogicalImm(SDValue N, SDValue &Imm) {
266 return SelectSVELogicalImm(N, VT, Imm, Invert);
267 }
268
269 template <MVT::SimpleValueType VT>
270 bool SelectSVEArithImm(SDValue N, SDValue &Imm) {
271 return SelectSVEArithImm(N, VT, Imm);
272 }
273
274 template <unsigned Low, unsigned High, bool AllowSaturation = false>
275 bool SelectSVEShiftImm(SDValue N, SDValue &Imm) {
276 return SelectSVEShiftImm(N, Low, High, AllowSaturation, Imm);
277 }
278
279 bool SelectSVEShiftSplatImmR(SDValue N, SDValue &Imm) {
280 if (N->getOpcode() != ISD::SPLAT_VECTOR)
281 return false;
282
283 EVT EltVT = N->getValueType(ResNo: 0).getVectorElementType();
284 return SelectSVEShiftImm(N: N->getOperand(Num: 0), /* Low */ 1,
285 /* High */ EltVT.getFixedSizeInBits(),
286 /* AllowSaturation */ true, Imm);
287 }
288
289 // Returns a suitable CNT/INC/DEC/RDVL multiplier to calculate VSCALE*N.
290 template<signed Min, signed Max, signed Scale, bool Shift>
291 bool SelectCntImm(SDValue N, SDValue &Imm) {
292 if (!isa<ConstantSDNode>(Val: N))
293 return false;
294
295 int64_t MulImm = cast<ConstantSDNode>(Val&: N)->getSExtValue();
296 if (Shift)
297 MulImm = 1LL << MulImm;
298
299 if ((MulImm % std::abs(x: Scale)) != 0)
300 return false;
301
302 MulImm /= Scale;
303 if ((MulImm >= Min) && (MulImm <= Max)) {
304 Imm = CurDAG->getTargetConstant(Val: MulImm, DL: SDLoc(N), VT: MVT::i32);
305 return true;
306 }
307
308 return false;
309 }
310
311 template <signed Max, signed Scale>
312 bool SelectEXTImm(SDValue N, SDValue &Imm) {
313 if (!isa<ConstantSDNode>(Val: N))
314 return false;
315
316 int64_t MulImm = cast<ConstantSDNode>(Val&: N)->getSExtValue();
317
318 if (MulImm >= 0 && MulImm <= Max) {
319 MulImm *= Scale;
320 Imm = CurDAG->getTargetConstant(Val: MulImm, DL: SDLoc(N), VT: MVT::i32);
321 return true;
322 }
323
324 return false;
325 }
326
327 template <unsigned BaseReg, unsigned Max>
328 bool ImmToReg(SDValue N, SDValue &Imm) {
329 if (auto *CI = dyn_cast<ConstantSDNode>(Val&: N)) {
330 uint64_t C = CI->getZExtValue();
331
332 if (C > Max)
333 return false;
334
335 Imm = CurDAG->getRegister(Reg: BaseReg + C, VT: MVT::Other);
336 return true;
337 }
338 return false;
339 }
340
341 /// Form sequences of consecutive 64/128-bit registers for use in NEON
342 /// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have
343 /// between 1 and 4 elements. If it contains a single element that is returned
344 /// unchanged; otherwise a REG_SEQUENCE value is returned.
345 SDValue createDTuple(ArrayRef<SDValue> Vecs);
346 SDValue createQTuple(ArrayRef<SDValue> Vecs);
347 // Form a sequence of SVE registers for instructions using list of vectors,
348 // e.g. structured loads and stores (ldN, stN).
349 SDValue createZTuple(ArrayRef<SDValue> Vecs);
350
351 // Similar to above, except the register must start at a multiple of the
352 // tuple, e.g. z2 for a 2-tuple, or z8 for a 4-tuple.
353 SDValue createZMulTuple(ArrayRef<SDValue> Regs);
354
355 /// Generic helper for the createDTuple/createQTuple
356 /// functions. Those should almost always be called instead.
357 SDValue createTuple(ArrayRef<SDValue> Vecs, const unsigned RegClassIDs[],
358 const unsigned SubRegs[]);
359
360 void SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc, bool isExt);
361
362 bool tryIndexedLoad(SDNode *N);
363
364 void SelectPtrauthAuth(SDNode *N);
365 void SelectPtrauthResign(SDNode *N);
366
367 bool trySelectStackSlotTagP(SDNode *N);
368 void SelectTagP(SDNode *N);
369
370 void SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
371 unsigned SubRegIdx);
372 void SelectPostLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
373 unsigned SubRegIdx);
374 void SelectLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
375 void SelectPostLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
376 void SelectPredicatedLoad(SDNode *N, unsigned NumVecs, unsigned Scale,
377 unsigned Opc_rr, unsigned Opc_ri,
378 bool IsIntr = false);
379 void SelectContiguousMultiVectorLoad(SDNode *N, unsigned NumVecs,
380 unsigned Scale, unsigned Opc_ri,
381 unsigned Opc_rr);
382 void SelectDestructiveMultiIntrinsic(SDNode *N, unsigned NumVecs,
383 bool IsZmMulti, unsigned Opcode,
384 bool HasPred = false);
385 void SelectPExtPair(SDNode *N, unsigned Opc);
386 void SelectWhilePair(SDNode *N, unsigned Opc);
387 void SelectCVTIntrinsic(SDNode *N, unsigned NumVecs, unsigned Opcode);
388 void SelectCVTIntrinsicFP8(SDNode *N, unsigned NumVecs, unsigned Opcode);
389 void SelectClamp(SDNode *N, unsigned NumVecs, unsigned Opcode);
390 void SelectUnaryMultiIntrinsic(SDNode *N, unsigned NumOutVecs,
391 bool IsTupleInput, unsigned Opc);
392 void SelectFrintFromVT(SDNode *N, unsigned NumVecs, unsigned Opcode);
393
394 template <unsigned MaxIdx, unsigned Scale>
395 void SelectMultiVectorMove(SDNode *N, unsigned NumVecs, unsigned BaseReg,
396 unsigned Op);
397 void SelectMultiVectorMoveZ(SDNode *N, unsigned NumVecs,
398 unsigned Op, unsigned MaxIdx, unsigned Scale,
399 unsigned BaseReg = 0);
400 bool SelectAddrModeFrameIndexSVE(SDValue N, SDValue &Base, SDValue &OffImm);
401 /// SVE Reg+Imm addressing mode.
402 template <int64_t Min, int64_t Max>
403 bool SelectAddrModeIndexedSVE(SDNode *Root, SDValue N, SDValue &Base,
404 SDValue &OffImm);
405 /// SVE Reg+Reg address mode.
406 template <unsigned Scale>
407 bool SelectSVERegRegAddrMode(SDValue N, SDValue &Base, SDValue &Offset) {
408 return SelectSVERegRegAddrMode(N, Scale, Base, Offset);
409 }
410
411 void SelectMultiVectorLutiLane(SDNode *Node, unsigned NumOutVecs,
412 unsigned Opc, uint32_t MaxImm);
413
414 void SelectMultiVectorLuti(SDNode *Node, unsigned NumOutVecs, unsigned Opc);
415
416 template <unsigned MaxIdx, unsigned Scale>
417 bool SelectSMETileSlice(SDValue N, SDValue &Vector, SDValue &Offset) {
418 return SelectSMETileSlice(N, MaxSize: MaxIdx, Vector, Offset, Scale);
419 }
420
421 void SelectStore(SDNode *N, unsigned NumVecs, unsigned Opc);
422 void SelectPostStore(SDNode *N, unsigned NumVecs, unsigned Opc);
423 void SelectStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
424 void SelectPostStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
425 void SelectPredicatedStore(SDNode *N, unsigned NumVecs, unsigned Scale,
426 unsigned Opc_rr, unsigned Opc_ri);
427 std::tuple<unsigned, SDValue, SDValue>
428 findAddrModeSVELoadStore(SDNode *N, unsigned Opc_rr, unsigned Opc_ri,
429 const SDValue &OldBase, const SDValue &OldOffset,
430 unsigned Scale);
431
432 bool tryBitfieldExtractOp(SDNode *N);
433 bool tryBitfieldExtractOpFromSExt(SDNode *N);
434 bool tryBitfieldInsertOp(SDNode *N);
435 bool tryBitfieldInsertInZeroOp(SDNode *N);
436 bool tryShiftAmountMod(SDNode *N);
437
438 bool tryReadRegister(SDNode *N);
439 bool tryWriteRegister(SDNode *N);
440
441 bool trySelectCastFixedLengthToScalableVector(SDNode *N);
442 bool trySelectCastScalableToFixedLengthVector(SDNode *N);
443
444 bool trySelectXAR(SDNode *N);
445
446// Include the pieces autogenerated from the target description.
447#include "AArch64GenDAGISel.inc"
448
449private:
450 bool SelectShiftedRegister(SDValue N, bool AllowROR, SDValue &Reg,
451 SDValue &Shift);
452 bool SelectShiftedRegisterFromAnd(SDValue N, SDValue &Reg, SDValue &Shift);
453 bool SelectAddrModeIndexed7S(SDValue N, unsigned Size, SDValue &Base,
454 SDValue &OffImm) {
455 return SelectAddrModeIndexedBitWidth(N, IsSignedImm: true, BW: 7, Size, Base, OffImm);
456 }
457 bool SelectAddrModeIndexedBitWidth(SDValue N, bool IsSignedImm, unsigned BW,
458 unsigned Size, SDValue &Base,
459 SDValue &OffImm);
460 bool SelectAddrModeIndexed(SDValue N, unsigned Size, SDValue &Base,
461 SDValue &OffImm);
462 bool SelectAddrModeUnscaled(SDValue N, unsigned Size, SDValue &Base,
463 SDValue &OffImm);
464 bool SelectAddrModeWRO(SDValue N, unsigned Size, SDValue &Base,
465 SDValue &Offset, SDValue &SignExtend,
466 SDValue &DoShift);
467 bool SelectAddrModeXRO(SDValue N, unsigned Size, SDValue &Base,
468 SDValue &Offset, SDValue &SignExtend,
469 SDValue &DoShift);
470 bool isWorthFoldingALU(SDValue V, bool LSL = false) const;
471 bool isWorthFoldingAddr(SDValue V, unsigned Size) const;
472 bool SelectExtendedSHL(SDValue N, unsigned Size, bool WantExtend,
473 SDValue &Offset, SDValue &SignExtend);
474
475 template<unsigned RegWidth>
476 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos) {
477 return SelectCVTFixedPosOperand(N, FixedPos, Width: RegWidth);
478 }
479
480 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos, unsigned Width);
481
482 template<unsigned RegWidth>
483 bool SelectCVTFixedPosRecipOperand(SDValue N, SDValue &FixedPos) {
484 return SelectCVTFixedPosRecipOperand(N, FixedPos, Width: RegWidth);
485 }
486
487 bool SelectCVTFixedPosRecipOperand(SDValue N, SDValue &FixedPos,
488 unsigned Width);
489
490 bool SelectCMP_SWAP(SDNode *N);
491
492 bool SelectSVEAddSubImm(SDValue N, MVT VT, SDValue &Imm, SDValue &Shift);
493 bool SelectSVEAddSubSSatImm(SDValue N, MVT VT, SDValue &Imm, SDValue &Shift,
494 bool Negate);
495 bool SelectSVECpyDupImm(SDValue N, MVT VT, SDValue &Imm, SDValue &Shift);
496 bool SelectSVELogicalImm(SDValue N, MVT VT, SDValue &Imm, bool Invert);
497
498 bool SelectSVESignedArithImm(SDValue N, SDValue &Imm);
499 bool SelectSVEShiftImm(SDValue N, uint64_t Low, uint64_t High,
500 bool AllowSaturation, SDValue &Imm);
501
502 bool SelectSVEArithImm(SDValue N, MVT VT, SDValue &Imm);
503 bool SelectSVERegRegAddrMode(SDValue N, unsigned Scale, SDValue &Base,
504 SDValue &Offset);
505 bool SelectSMETileSlice(SDValue N, unsigned MaxSize, SDValue &Vector,
506 SDValue &Offset, unsigned Scale = 1);
507
508 bool SelectAllActivePredicate(SDValue N);
509 bool SelectAnyPredicate(SDValue N);
510
511 bool SelectCmpBranchUImm6Operand(SDNode *P, SDValue N, SDValue &Imm);
512};
513
514class AArch64DAGToDAGISelLegacy : public SelectionDAGISelLegacy {
515public:
516 static char ID;
517 explicit AArch64DAGToDAGISelLegacy(AArch64TargetMachine &tm,
518 CodeGenOptLevel OptLevel)
519 : SelectionDAGISelLegacy(
520 ID, std::make_unique<AArch64DAGToDAGISel>(args&: tm, args&: OptLevel)) {}
521};
522} // end anonymous namespace
523
524char AArch64DAGToDAGISelLegacy::ID = 0;
525
526INITIALIZE_PASS(AArch64DAGToDAGISelLegacy, DEBUG_TYPE, PASS_NAME, false, false)
527
528/// isIntImmediate - This method tests to see if the node is a constant
529/// operand. If so Imm will receive the 32-bit value.
530static bool isIntImmediate(const SDNode *N, uint64_t &Imm) {
531 if (const ConstantSDNode *C = dyn_cast<const ConstantSDNode>(Val: N)) {
532 Imm = C->getZExtValue();
533 return true;
534 }
535 return false;
536}
537
538// isIntImmediate - This method tests to see if a constant operand.
539// If so Imm will receive the value.
540static bool isIntImmediate(SDValue N, uint64_t &Imm) {
541 return isIntImmediate(N: N.getNode(), Imm);
542}
543
544// isOpcWithIntImmediate - This method tests to see if the node is a specific
545// opcode and that it has a immediate integer right operand.
546// If so Imm will receive the 32 bit value.
547static bool isOpcWithIntImmediate(const SDNode *N, unsigned Opc,
548 uint64_t &Imm) {
549 return N->getOpcode() == Opc &&
550 isIntImmediate(N: N->getOperand(Num: 1).getNode(), Imm);
551}
552
553// isIntImmediateEq - This method tests to see if N is a constant operand that
554// is equivalent to 'ImmExpected'.
555#ifndef NDEBUG
556static bool isIntImmediateEq(SDValue N, const uint64_t ImmExpected) {
557 uint64_t Imm;
558 if (!isIntImmediate(N.getNode(), Imm))
559 return false;
560 return Imm == ImmExpected;
561}
562#endif
563
564bool AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand(
565 const SDValue &Op, const InlineAsm::ConstraintCode ConstraintID,
566 std::vector<SDValue> &OutOps) {
567 switch(ConstraintID) {
568 default:
569 llvm_unreachable("Unexpected asm memory constraint");
570 case InlineAsm::ConstraintCode::m:
571 case InlineAsm::ConstraintCode::o:
572 case InlineAsm::ConstraintCode::Q:
573 // We need to make sure that this one operand does not end up in XZR, thus
574 // require the address to be in a PointerRegClass register.
575 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
576 const TargetRegisterClass *TRC = TRI->getPointerRegClass(MF: *MF);
577 SDLoc dl(Op);
578 SDValue RC = CurDAG->getTargetConstant(Val: TRC->getID(), DL: dl, VT: MVT::i64);
579 SDValue NewOp =
580 SDValue(CurDAG->getMachineNode(Opcode: TargetOpcode::COPY_TO_REGCLASS,
581 dl, VT: Op.getValueType(),
582 Op1: Op, Op2: RC), 0);
583 OutOps.push_back(x: NewOp);
584 return false;
585 }
586 return true;
587}
588
589/// SelectArithImmed - Select an immediate value that can be represented as
590/// a 12-bit value shifted left by either 0 or 12. If so, return true with
591/// Val set to the 12-bit value and Shift set to the shifter operand.
592bool AArch64DAGToDAGISel::SelectArithImmed(SDValue N, SDValue &Val,
593 SDValue &Shift) {
594 // This function is called from the addsub_shifted_imm ComplexPattern,
595 // which lists [imm] as the list of opcode it's interested in, however
596 // we still need to check whether the operand is actually an immediate
597 // here because the ComplexPattern opcode list is only used in
598 // root-level opcode matching.
599 if (!isa<ConstantSDNode>(Val: N.getNode()))
600 return false;
601
602 uint64_t Immed = N.getNode()->getAsZExtVal();
603 unsigned ShiftAmt;
604
605 if (Immed >> 12 == 0) {
606 ShiftAmt = 0;
607 } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
608 ShiftAmt = 12;
609 Immed = Immed >> 12;
610 } else
611 return false;
612
613 unsigned ShVal = AArch64_AM::getShifterImm(ST: AArch64_AM::LSL, Imm: ShiftAmt);
614 SDLoc dl(N);
615 Val = CurDAG->getTargetConstant(Val: Immed, DL: dl, VT: MVT::i32);
616 Shift = CurDAG->getTargetConstant(Val: ShVal, DL: dl, VT: MVT::i32);
617 return true;
618}
619
620/// SelectNegArithImmed - As above, but negates the value before trying to
621/// select it.
622bool AArch64DAGToDAGISel::SelectNegArithImmed(SDValue N, SDValue &Val,
623 SDValue &Shift) {
624 // This function is called from the addsub_shifted_imm ComplexPattern,
625 // which lists [imm] as the list of opcode it's interested in, however
626 // we still need to check whether the operand is actually an immediate
627 // here because the ComplexPattern opcode list is only used in
628 // root-level opcode matching.
629 if (!isa<ConstantSDNode>(Val: N.getNode()))
630 return false;
631
632 // The immediate operand must be a 24-bit zero-extended immediate.
633 uint64_t Immed = N.getNode()->getAsZExtVal();
634
635 // This negation is almost always valid, but "cmp wN, #0" and "cmn wN, #0"
636 // have the opposite effect on the C flag, so this pattern mustn't match under
637 // those circumstances.
638 if (Immed == 0)
639 return false;
640
641 if (N.getValueType() == MVT::i32)
642 Immed = ~((uint32_t)Immed) + 1;
643 else
644 Immed = ~Immed + 1ULL;
645 if (Immed & 0xFFFFFFFFFF000000ULL)
646 return false;
647
648 Immed &= 0xFFFFFFULL;
649 return SelectArithImmed(N: CurDAG->getConstant(Val: Immed, DL: SDLoc(N), VT: MVT::i32), Val,
650 Shift);
651}
652
653/// getShiftTypeForNode - Translate a shift node to the corresponding
654/// ShiftType value.
655static AArch64_AM::ShiftExtendType getShiftTypeForNode(SDValue N) {
656 switch (N.getOpcode()) {
657 default:
658 return AArch64_AM::InvalidShiftExtend;
659 case ISD::SHL:
660 return AArch64_AM::LSL;
661 case ISD::SRL:
662 return AArch64_AM::LSR;
663 case ISD::SRA:
664 return AArch64_AM::ASR;
665 case ISD::ROTR:
666 return AArch64_AM::ROR;
667 }
668}
669
670static bool isMemOpOrPrefetch(SDNode *N) {
671 return isa<MemSDNode>(Val: *N) || N->getOpcode() == AArch64ISD::PREFETCH;
672}
673
674/// Determine whether it is worth it to fold SHL into the addressing
675/// mode.
676static bool isWorthFoldingSHL(SDValue V) {
677 assert(V.getOpcode() == ISD::SHL && "invalid opcode");
678 // It is worth folding logical shift of up to three places.
679 auto *CSD = dyn_cast<ConstantSDNode>(Val: V.getOperand(i: 1));
680 if (!CSD)
681 return false;
682 unsigned ShiftVal = CSD->getZExtValue();
683 if (ShiftVal > 3)
684 return false;
685
686 // Check if this particular node is reused in any non-memory related
687 // operation. If yes, do not try to fold this node into the address
688 // computation, since the computation will be kept.
689 const SDNode *Node = V.getNode();
690 for (SDNode *UI : Node->users())
691 if (!isMemOpOrPrefetch(N: UI))
692 for (SDNode *UII : UI->users())
693 if (!isMemOpOrPrefetch(N: UII))
694 return false;
695 return true;
696}
697
698/// Determine whether it is worth to fold V into an extended register addressing
699/// mode.
700bool AArch64DAGToDAGISel::isWorthFoldingAddr(SDValue V, unsigned Size) const {
701 // Trivial if we are optimizing for code size or if there is only
702 // one use of the value.
703 if (CurDAG->shouldOptForSize() || V.hasOneUse())
704 return true;
705
706 // If a subtarget has a slow shift, folding a shift into multiple loads
707 // costs additional micro-ops.
708 if (Subtarget->hasAddrLSLSlow14() && (Size == 2 || Size == 16))
709 return false;
710
711 // Check whether we're going to emit the address arithmetic anyway because
712 // it's used by a non-address operation.
713 if (V.getOpcode() == ISD::SHL && isWorthFoldingSHL(V))
714 return true;
715 if (V.getOpcode() == ISD::ADD) {
716 const SDValue LHS = V.getOperand(i: 0);
717 const SDValue RHS = V.getOperand(i: 1);
718 if (LHS.getOpcode() == ISD::SHL && isWorthFoldingSHL(V: LHS))
719 return true;
720 if (RHS.getOpcode() == ISD::SHL && isWorthFoldingSHL(V: RHS))
721 return true;
722 }
723
724 // It hurts otherwise, since the value will be reused.
725 return false;
726}
727
728/// and (shl/srl/sra, x, c), mask --> shl (srl/sra, x, c1), c2
729/// to select more shifted register
730bool AArch64DAGToDAGISel::SelectShiftedRegisterFromAnd(SDValue N, SDValue &Reg,
731 SDValue &Shift) {
732 EVT VT = N.getValueType();
733 if (VT != MVT::i32 && VT != MVT::i64)
734 return false;
735
736 if (N->getOpcode() != ISD::AND || !N->hasOneUse())
737 return false;
738 SDValue LHS = N.getOperand(i: 0);
739 if (!LHS->hasOneUse())
740 return false;
741
742 unsigned LHSOpcode = LHS->getOpcode();
743 if (LHSOpcode != ISD::SHL && LHSOpcode != ISD::SRL && LHSOpcode != ISD::SRA)
744 return false;
745
746 ConstantSDNode *ShiftAmtNode = dyn_cast<ConstantSDNode>(Val: LHS.getOperand(i: 1));
747 if (!ShiftAmtNode)
748 return false;
749
750 uint64_t ShiftAmtC = ShiftAmtNode->getZExtValue();
751 ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(Val: N.getOperand(i: 1));
752 if (!RHSC)
753 return false;
754
755 APInt AndMask = RHSC->getAPIntValue();
756 unsigned LowZBits, MaskLen;
757 if (!AndMask.isShiftedMask(MaskIdx&: LowZBits, MaskLen))
758 return false;
759
760 unsigned BitWidth = N.getValueSizeInBits();
761 SDLoc DL(LHS);
762 uint64_t NewShiftC;
763 unsigned NewShiftOp;
764 if (LHSOpcode == ISD::SHL) {
765 // LowZBits <= ShiftAmtC will fall into isBitfieldPositioningOp
766 // BitWidth != LowZBits + MaskLen doesn't match the pattern
767 if (LowZBits <= ShiftAmtC || (BitWidth != LowZBits + MaskLen))
768 return false;
769
770 NewShiftC = LowZBits - ShiftAmtC;
771 NewShiftOp = VT == MVT::i64 ? AArch64::UBFMXri : AArch64::UBFMWri;
772 } else {
773 if (LowZBits == 0)
774 return false;
775
776 // NewShiftC >= BitWidth will fall into isBitfieldExtractOp
777 NewShiftC = LowZBits + ShiftAmtC;
778 if (NewShiftC >= BitWidth)
779 return false;
780
781 // SRA need all high bits
782 if (LHSOpcode == ISD::SRA && (BitWidth != (LowZBits + MaskLen)))
783 return false;
784
785 // SRL high bits can be 0 or 1
786 if (LHSOpcode == ISD::SRL && (BitWidth > (NewShiftC + MaskLen)))
787 return false;
788
789 if (LHSOpcode == ISD::SRL)
790 NewShiftOp = VT == MVT::i64 ? AArch64::UBFMXri : AArch64::UBFMWri;
791 else
792 NewShiftOp = VT == MVT::i64 ? AArch64::SBFMXri : AArch64::SBFMWri;
793 }
794
795 assert(NewShiftC < BitWidth && "Invalid shift amount");
796 SDValue NewShiftAmt = CurDAG->getTargetConstant(Val: NewShiftC, DL, VT);
797 SDValue BitWidthMinus1 = CurDAG->getTargetConstant(Val: BitWidth - 1, DL, VT);
798 Reg = SDValue(CurDAG->getMachineNode(Opcode: NewShiftOp, dl: DL, VT, Op1: LHS->getOperand(Num: 0),
799 Op2: NewShiftAmt, Op3: BitWidthMinus1),
800 0);
801 unsigned ShVal = AArch64_AM::getShifterImm(ST: AArch64_AM::LSL, Imm: LowZBits);
802 Shift = CurDAG->getTargetConstant(Val: ShVal, DL, VT: MVT::i32);
803 return true;
804}
805
806/// getExtendTypeForNode - Translate an extend node to the corresponding
807/// ExtendType value.
808static AArch64_AM::ShiftExtendType
809getExtendTypeForNode(SDValue N, bool IsLoadStore = false) {
810 if (N.getOpcode() == ISD::SIGN_EXTEND ||
811 N.getOpcode() == ISD::SIGN_EXTEND_INREG) {
812 EVT SrcVT;
813 if (N.getOpcode() == ISD::SIGN_EXTEND_INREG)
814 SrcVT = cast<VTSDNode>(Val: N.getOperand(i: 1))->getVT();
815 else
816 SrcVT = N.getOperand(i: 0).getValueType();
817
818 if (!IsLoadStore && SrcVT == MVT::i8)
819 return AArch64_AM::SXTB;
820 else if (!IsLoadStore && SrcVT == MVT::i16)
821 return AArch64_AM::SXTH;
822 else if (SrcVT == MVT::i32)
823 return AArch64_AM::SXTW;
824 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
825
826 return AArch64_AM::InvalidShiftExtend;
827 } else if (N.getOpcode() == ISD::ZERO_EXTEND ||
828 N.getOpcode() == ISD::ANY_EXTEND) {
829 EVT SrcVT = N.getOperand(i: 0).getValueType();
830 if (!IsLoadStore && SrcVT == MVT::i8)
831 return AArch64_AM::UXTB;
832 else if (!IsLoadStore && SrcVT == MVT::i16)
833 return AArch64_AM::UXTH;
834 else if (SrcVT == MVT::i32)
835 return AArch64_AM::UXTW;
836 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
837
838 return AArch64_AM::InvalidShiftExtend;
839 } else if (N.getOpcode() == ISD::AND) {
840 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(Val: N.getOperand(i: 1));
841 if (!CSD)
842 return AArch64_AM::InvalidShiftExtend;
843 uint64_t AndMask = CSD->getZExtValue();
844
845 switch (AndMask) {
846 default:
847 return AArch64_AM::InvalidShiftExtend;
848 case 0xFF:
849 return !IsLoadStore ? AArch64_AM::UXTB : AArch64_AM::InvalidShiftExtend;
850 case 0xFFFF:
851 return !IsLoadStore ? AArch64_AM::UXTH : AArch64_AM::InvalidShiftExtend;
852 case 0xFFFFFFFF:
853 return AArch64_AM::UXTW;
854 }
855 }
856
857 return AArch64_AM::InvalidShiftExtend;
858}
859
860/// Determine whether it is worth to fold V into an extended register of an
861/// Add/Sub. LSL means we are folding into an `add w0, w1, w2, lsl #N`
862/// instruction, and the shift should be treated as worth folding even if has
863/// multiple uses.
864bool AArch64DAGToDAGISel::isWorthFoldingALU(SDValue V, bool LSL) const {
865 // Trivial if we are optimizing for code size or if there is only
866 // one use of the value.
867 if (CurDAG->shouldOptForSize() || V.hasOneUse())
868 return true;
869
870 // If a subtarget has a fastpath LSL we can fold a logical shift into
871 // the add/sub and save a cycle.
872 if (LSL && Subtarget->hasALULSLFast() && V.getOpcode() == ISD::SHL &&
873 V.getConstantOperandVal(i: 1) <= 4 &&
874 getExtendTypeForNode(N: V.getOperand(i: 0)) == AArch64_AM::InvalidShiftExtend)
875 return true;
876
877 // It hurts otherwise, since the value will be reused.
878 return false;
879}
880
881/// SelectShiftedRegister - Select a "shifted register" operand. If the value
882/// is not shifted, set the Shift operand to default of "LSL 0". The logical
883/// instructions allow the shifted register to be rotated, but the arithmetic
884/// instructions do not. The AllowROR parameter specifies whether ROR is
885/// supported.
886bool AArch64DAGToDAGISel::SelectShiftedRegister(SDValue N, bool AllowROR,
887 SDValue &Reg, SDValue &Shift) {
888 if (SelectShiftedRegisterFromAnd(N, Reg, Shift))
889 return true;
890
891 AArch64_AM::ShiftExtendType ShType = getShiftTypeForNode(N);
892 if (ShType == AArch64_AM::InvalidShiftExtend)
893 return false;
894 if (!AllowROR && ShType == AArch64_AM::ROR)
895 return false;
896
897 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Val: N.getOperand(i: 1))) {
898 unsigned BitSize = N.getValueSizeInBits();
899 unsigned Val = RHS->getZExtValue() & (BitSize - 1);
900 unsigned ShVal = AArch64_AM::getShifterImm(ST: ShType, Imm: Val);
901
902 Reg = N.getOperand(i: 0);
903 Shift = CurDAG->getTargetConstant(Val: ShVal, DL: SDLoc(N), VT: MVT::i32);
904 return isWorthFoldingALU(V: N, LSL: true);
905 }
906
907 return false;
908}
909
910/// Instructions that accept extend modifiers like UXTW expect the register
911/// being extended to be a GPR32, but the incoming DAG might be acting on a
912/// GPR64 (either via SEXT_INREG or AND). Extract the appropriate low bits if
913/// this is the case.
914static SDValue narrowIfNeeded(SelectionDAG *CurDAG, SDValue N) {
915 if (N.getValueType() == MVT::i32)
916 return N;
917
918 SDLoc dl(N);
919 return CurDAG->getTargetExtractSubreg(SRIdx: AArch64::sub_32, DL: dl, VT: MVT::i32, Operand: N);
920}
921
922// Returns a suitable CNT/INC/DEC/RDVL multiplier to calculate VSCALE*N.
923template<signed Low, signed High, signed Scale>
924bool AArch64DAGToDAGISel::SelectRDVLImm(SDValue N, SDValue &Imm) {
925 if (!isa<ConstantSDNode>(Val: N))
926 return false;
927
928 int64_t MulImm = cast<ConstantSDNode>(Val&: N)->getSExtValue();
929 if ((MulImm % std::abs(x: Scale)) == 0) {
930 int64_t RDVLImm = MulImm / Scale;
931 if ((RDVLImm >= Low) && (RDVLImm <= High)) {
932 Imm = CurDAG->getSignedTargetConstant(Val: RDVLImm, DL: SDLoc(N), VT: MVT::i32);
933 return true;
934 }
935 }
936
937 return false;
938}
939
940/// SelectArithExtendedRegister - Select a "extended register" operand. This
941/// operand folds in an extend followed by an optional left shift.
942bool AArch64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
943 SDValue &Shift) {
944 unsigned ShiftVal = 0;
945 AArch64_AM::ShiftExtendType Ext;
946
947 if (N.getOpcode() == ISD::SHL) {
948 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(Val: N.getOperand(i: 1));
949 if (!CSD)
950 return false;
951 ShiftVal = CSD->getZExtValue();
952 if (ShiftVal > 4)
953 return false;
954
955 Ext = getExtendTypeForNode(N: N.getOperand(i: 0));
956 if (Ext == AArch64_AM::InvalidShiftExtend)
957 return false;
958
959 Reg = N.getOperand(i: 0).getOperand(i: 0);
960 } else {
961 Ext = getExtendTypeForNode(N);
962 if (Ext == AArch64_AM::InvalidShiftExtend)
963 return false;
964
965 Reg = N.getOperand(i: 0);
966
967 // Don't match if free 32-bit -> 64-bit zext can be used instead. Use the
968 // isDef32 as a heuristic for when the operand is likely to be a 32bit def.
969 auto isDef32 = [](SDValue N) {
970 unsigned Opc = N.getOpcode();
971 return Opc != ISD::TRUNCATE && Opc != TargetOpcode::EXTRACT_SUBREG &&
972 Opc != ISD::CopyFromReg && Opc != ISD::AssertSext &&
973 Opc != ISD::AssertZext && Opc != ISD::AssertAlign &&
974 Opc != ISD::FREEZE;
975 };
976 if (Ext == AArch64_AM::UXTW && Reg->getValueType(ResNo: 0).getSizeInBits() == 32 &&
977 isDef32(Reg))
978 return false;
979 }
980
981 // AArch64 mandates that the RHS of the operation must use the smallest
982 // register class that could contain the size being extended from. Thus,
983 // if we're folding a (sext i8), we need the RHS to be a GPR32, even though
984 // there might not be an actual 32-bit value in the program. We can
985 // (harmlessly) synthesize one by injected an EXTRACT_SUBREG here.
986 assert(Ext != AArch64_AM::UXTX && Ext != AArch64_AM::SXTX);
987 Reg = narrowIfNeeded(CurDAG, N: Reg);
988 Shift = CurDAG->getTargetConstant(Val: getArithExtendImm(ET: Ext, Imm: ShiftVal), DL: SDLoc(N),
989 VT: MVT::i32);
990 return isWorthFoldingALU(V: N);
991}
992
993/// SelectArithUXTXRegister - Select a "UXTX register" operand. This
994/// operand is referred by the instructions have SP operand
995bool AArch64DAGToDAGISel::SelectArithUXTXRegister(SDValue N, SDValue &Reg,
996 SDValue &Shift) {
997 unsigned ShiftVal = 0;
998 AArch64_AM::ShiftExtendType Ext;
999
1000 if (N.getOpcode() != ISD::SHL)
1001 return false;
1002
1003 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(Val: N.getOperand(i: 1));
1004 if (!CSD)
1005 return false;
1006 ShiftVal = CSD->getZExtValue();
1007 if (ShiftVal > 4)
1008 return false;
1009
1010 Ext = AArch64_AM::UXTX;
1011 Reg = N.getOperand(i: 0);
1012 Shift = CurDAG->getTargetConstant(Val: getArithExtendImm(ET: Ext, Imm: ShiftVal), DL: SDLoc(N),
1013 VT: MVT::i32);
1014 return isWorthFoldingALU(V: N);
1015}
1016
1017/// If there's a use of this ADDlow that's not itself a load/store then we'll
1018/// need to create a real ADD instruction from it anyway and there's no point in
1019/// folding it into the mem op. Theoretically, it shouldn't matter, but there's
1020/// a single pseudo-instruction for an ADRP/ADD pair so over-aggressive folding
1021/// leads to duplicated ADRP instructions.
1022static bool isWorthFoldingADDlow(SDValue N) {
1023 for (auto *User : N->users()) {
1024 if (User->getOpcode() != ISD::LOAD && User->getOpcode() != ISD::STORE &&
1025 User->getOpcode() != ISD::ATOMIC_LOAD &&
1026 User->getOpcode() != ISD::ATOMIC_STORE)
1027 return false;
1028
1029 // ldar and stlr have much more restrictive addressing modes (just a
1030 // register).
1031 if (isStrongerThanMonotonic(AO: cast<MemSDNode>(Val: User)->getSuccessOrdering()))
1032 return false;
1033 }
1034
1035 return true;
1036}
1037
1038/// Check if the immediate offset is valid as a scaled immediate.
1039static bool isValidAsScaledImmediate(int64_t Offset, unsigned Range,
1040 unsigned Size) {
1041 if ((Offset & (Size - 1)) == 0 && Offset >= 0 &&
1042 Offset < (Range << Log2_32(Value: Size)))
1043 return true;
1044 return false;
1045}
1046
1047/// SelectAddrModeIndexedBitWidth - Select a "register plus scaled (un)signed BW-bit
1048/// immediate" address. The "Size" argument is the size in bytes of the memory
1049/// reference, which determines the scale.
1050bool AArch64DAGToDAGISel::SelectAddrModeIndexedBitWidth(SDValue N, bool IsSignedImm,
1051 unsigned BW, unsigned Size,
1052 SDValue &Base,
1053 SDValue &OffImm) {
1054 SDLoc dl(N);
1055 const DataLayout &DL = CurDAG->getDataLayout();
1056 const TargetLowering *TLI = getTargetLowering();
1057 if (N.getOpcode() == ISD::FrameIndex) {
1058 int FI = cast<FrameIndexSDNode>(Val&: N)->getIndex();
1059 Base = CurDAG->getTargetFrameIndex(FI, VT: TLI->getPointerTy(DL));
1060 OffImm = CurDAG->getTargetConstant(Val: 0, DL: dl, VT: MVT::i64);
1061 return true;
1062 }
1063
1064 // As opposed to the (12-bit) Indexed addressing mode below, the 7/9-bit signed
1065 // selected here doesn't support labels/immediates, only base+offset.
1066 if (CurDAG->isBaseWithConstantOffset(Op: N)) {
1067 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Val: N.getOperand(i: 1))) {
1068 if (IsSignedImm) {
1069 int64_t RHSC = RHS->getSExtValue();
1070 unsigned Scale = Log2_32(Value: Size);
1071 int64_t Range = 0x1LL << (BW - 1);
1072
1073 if ((RHSC & (Size - 1)) == 0 && RHSC >= -(Range << Scale) &&
1074 RHSC < (Range << Scale)) {
1075 Base = N.getOperand(i: 0);
1076 if (Base.getOpcode() == ISD::FrameIndex) {
1077 int FI = cast<FrameIndexSDNode>(Val&: Base)->getIndex();
1078 Base = CurDAG->getTargetFrameIndex(FI, VT: TLI->getPointerTy(DL));
1079 }
1080 OffImm = CurDAG->getTargetConstant(Val: RHSC >> Scale, DL: dl, VT: MVT::i64);
1081 return true;
1082 }
1083 } else {
1084 // unsigned Immediate
1085 uint64_t RHSC = RHS->getZExtValue();
1086 unsigned Scale = Log2_32(Value: Size);
1087 uint64_t Range = 0x1ULL << BW;
1088
1089 if ((RHSC & (Size - 1)) == 0 && RHSC < (Range << Scale)) {
1090 Base = N.getOperand(i: 0);
1091 if (Base.getOpcode() == ISD::FrameIndex) {
1092 int FI = cast<FrameIndexSDNode>(Val&: Base)->getIndex();
1093 Base = CurDAG->getTargetFrameIndex(FI, VT: TLI->getPointerTy(DL));
1094 }
1095 OffImm = CurDAG->getTargetConstant(Val: RHSC >> Scale, DL: dl, VT: MVT::i64);
1096 return true;
1097 }
1098 }
1099 }
1100 }
1101 // Base only. The address will be materialized into a register before
1102 // the memory is accessed.
1103 // add x0, Xbase, #offset
1104 // stp x1, x2, [x0]
1105 Base = N;
1106 OffImm = CurDAG->getTargetConstant(Val: 0, DL: dl, VT: MVT::i64);
1107 return true;
1108}
1109
1110/// SelectAddrModeIndexed - Select a "register plus scaled unsigned 12-bit
1111/// immediate" address. The "Size" argument is the size in bytes of the memory
1112/// reference, which determines the scale.
1113bool AArch64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
1114 SDValue &Base, SDValue &OffImm) {
1115 SDLoc dl(N);
1116 const DataLayout &DL = CurDAG->getDataLayout();
1117 const TargetLowering *TLI = getTargetLowering();
1118 if (N.getOpcode() == ISD::FrameIndex) {
1119 int FI = cast<FrameIndexSDNode>(Val&: N)->getIndex();
1120 Base = CurDAG->getTargetFrameIndex(FI, VT: TLI->getPointerTy(DL));
1121 OffImm = CurDAG->getTargetConstant(Val: 0, DL: dl, VT: MVT::i64);
1122 return true;
1123 }
1124
1125 if (N.getOpcode() == AArch64ISD::ADDlow && isWorthFoldingADDlow(N)) {
1126 GlobalAddressSDNode *GAN =
1127 dyn_cast<GlobalAddressSDNode>(Val: N.getOperand(i: 1).getNode());
1128 Base = N.getOperand(i: 0);
1129 OffImm = N.getOperand(i: 1);
1130 if (!GAN)
1131 return true;
1132
1133 if (GAN->getOffset() % Size == 0 &&
1134 GAN->getGlobal()->getPointerAlignment(DL) >= Size)
1135 return true;
1136 }
1137
1138 if (CurDAG->isBaseWithConstantOffset(Op: N)) {
1139 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Val: N.getOperand(i: 1))) {
1140 int64_t RHSC = (int64_t)RHS->getZExtValue();
1141 unsigned Scale = Log2_32(Value: Size);
1142 if (isValidAsScaledImmediate(Offset: RHSC, Range: 0x1000, Size)) {
1143 Base = N.getOperand(i: 0);
1144 if (Base.getOpcode() == ISD::FrameIndex) {
1145 int FI = cast<FrameIndexSDNode>(Val&: Base)->getIndex();
1146 Base = CurDAG->getTargetFrameIndex(FI, VT: TLI->getPointerTy(DL));
1147 }
1148 OffImm = CurDAG->getTargetConstant(Val: RHSC >> Scale, DL: dl, VT: MVT::i64);
1149 return true;
1150 }
1151 }
1152 }
1153
1154 // Before falling back to our general case, check if the unscaled
1155 // instructions can handle this. If so, that's preferable.
1156 if (SelectAddrModeUnscaled(N, Size, Base, OffImm))
1157 return false;
1158
1159 // Base only. The address will be materialized into a register before
1160 // the memory is accessed.
1161 // add x0, Xbase, #offset
1162 // ldr x0, [x0]
1163 Base = N;
1164 OffImm = CurDAG->getTargetConstant(Val: 0, DL: dl, VT: MVT::i64);
1165 return true;
1166}
1167
1168/// SelectAddrModeUnscaled - Select a "register plus unscaled signed 9-bit
1169/// immediate" address. This should only match when there is an offset that
1170/// is not valid for a scaled immediate addressing mode. The "Size" argument
1171/// is the size in bytes of the memory reference, which is needed here to know
1172/// what is valid for a scaled immediate.
1173bool AArch64DAGToDAGISel::SelectAddrModeUnscaled(SDValue N, unsigned Size,
1174 SDValue &Base,
1175 SDValue &OffImm) {
1176 if (!CurDAG->isBaseWithConstantOffset(Op: N))
1177 return false;
1178 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Val: N.getOperand(i: 1))) {
1179 int64_t RHSC = RHS->getSExtValue();
1180 if (RHSC >= -256 && RHSC < 256) {
1181 Base = N.getOperand(i: 0);
1182 if (Base.getOpcode() == ISD::FrameIndex) {
1183 int FI = cast<FrameIndexSDNode>(Val&: Base)->getIndex();
1184 const TargetLowering *TLI = getTargetLowering();
1185 Base = CurDAG->getTargetFrameIndex(
1186 FI, VT: TLI->getPointerTy(DL: CurDAG->getDataLayout()));
1187 }
1188 OffImm = CurDAG->getTargetConstant(Val: RHSC, DL: SDLoc(N), VT: MVT::i64);
1189 return true;
1190 }
1191 }
1192 return false;
1193}
1194
1195static SDValue Widen(SelectionDAG *CurDAG, SDValue N) {
1196 SDLoc dl(N);
1197 SDValue ImpDef = SDValue(
1198 CurDAG->getMachineNode(Opcode: TargetOpcode::IMPLICIT_DEF, dl, VT: MVT::i64), 0);
1199 return CurDAG->getTargetInsertSubreg(SRIdx: AArch64::sub_32, DL: dl, VT: MVT::i64, Operand: ImpDef,
1200 Subreg: N);
1201}
1202
1203/// Check if the given SHL node (\p N), can be used to form an
1204/// extended register for an addressing mode.
1205bool AArch64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size,
1206 bool WantExtend, SDValue &Offset,
1207 SDValue &SignExtend) {
1208 assert(N.getOpcode() == ISD::SHL && "Invalid opcode.");
1209 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(Val: N.getOperand(i: 1));
1210 if (!CSD || (CSD->getZExtValue() & 0x7) != CSD->getZExtValue())
1211 return false;
1212
1213 SDLoc dl(N);
1214 if (WantExtend) {
1215 AArch64_AM::ShiftExtendType Ext =
1216 getExtendTypeForNode(N: N.getOperand(i: 0), IsLoadStore: true);
1217 if (Ext == AArch64_AM::InvalidShiftExtend)
1218 return false;
1219
1220 Offset = narrowIfNeeded(CurDAG, N: N.getOperand(i: 0).getOperand(i: 0));
1221 SignExtend = CurDAG->getTargetConstant(Val: Ext == AArch64_AM::SXTW, DL: dl,
1222 VT: MVT::i32);
1223 } else {
1224 Offset = N.getOperand(i: 0);
1225 SignExtend = CurDAG->getTargetConstant(Val: 0, DL: dl, VT: MVT::i32);
1226 }
1227
1228 unsigned LegalShiftVal = Log2_32(Value: Size);
1229 unsigned ShiftVal = CSD->getZExtValue();
1230
1231 if (ShiftVal != 0 && ShiftVal != LegalShiftVal)
1232 return false;
1233
1234 return isWorthFoldingAddr(V: N, Size);
1235}
1236
1237bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
1238 SDValue &Base, SDValue &Offset,
1239 SDValue &SignExtend,
1240 SDValue &DoShift) {
1241 if (N.getOpcode() != ISD::ADD)
1242 return false;
1243 SDValue LHS = N.getOperand(i: 0);
1244 SDValue RHS = N.getOperand(i: 1);
1245 SDLoc dl(N);
1246
1247 // We don't want to match immediate adds here, because they are better lowered
1248 // to the register-immediate addressing modes.
1249 if (isa<ConstantSDNode>(Val: LHS) || isa<ConstantSDNode>(Val: RHS))
1250 return false;
1251
1252 // Check if this particular node is reused in any non-memory related
1253 // operation. If yes, do not try to fold this node into the address
1254 // computation, since the computation will be kept.
1255 const SDNode *Node = N.getNode();
1256 for (SDNode *UI : Node->users()) {
1257 if (!isMemOpOrPrefetch(N: UI))
1258 return false;
1259 }
1260
1261 // Remember if it is worth folding N when it produces extended register.
1262 bool IsExtendedRegisterWorthFolding = isWorthFoldingAddr(V: N, Size);
1263
1264 // Try to match a shifted extend on the RHS.
1265 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
1266 SelectExtendedSHL(N: RHS, Size, WantExtend: true, Offset, SignExtend)) {
1267 Base = LHS;
1268 DoShift = CurDAG->getTargetConstant(Val: true, DL: dl, VT: MVT::i32);
1269 return true;
1270 }
1271
1272 // Try to match a shifted extend on the LHS.
1273 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
1274 SelectExtendedSHL(N: LHS, Size, WantExtend: true, Offset, SignExtend)) {
1275 Base = RHS;
1276 DoShift = CurDAG->getTargetConstant(Val: true, DL: dl, VT: MVT::i32);
1277 return true;
1278 }
1279
1280 // There was no shift, whatever else we find.
1281 DoShift = CurDAG->getTargetConstant(Val: false, DL: dl, VT: MVT::i32);
1282
1283 AArch64_AM::ShiftExtendType Ext = AArch64_AM::InvalidShiftExtend;
1284 // Try to match an unshifted extend on the LHS.
1285 if (IsExtendedRegisterWorthFolding &&
1286 (Ext = getExtendTypeForNode(N: LHS, IsLoadStore: true)) !=
1287 AArch64_AM::InvalidShiftExtend) {
1288 Base = RHS;
1289 Offset = narrowIfNeeded(CurDAG, N: LHS.getOperand(i: 0));
1290 SignExtend = CurDAG->getTargetConstant(Val: Ext == AArch64_AM::SXTW, DL: dl,
1291 VT: MVT::i32);
1292 if (isWorthFoldingAddr(V: LHS, Size))
1293 return true;
1294 }
1295
1296 // Try to match an unshifted extend on the RHS.
1297 if (IsExtendedRegisterWorthFolding &&
1298 (Ext = getExtendTypeForNode(N: RHS, IsLoadStore: true)) !=
1299 AArch64_AM::InvalidShiftExtend) {
1300 Base = LHS;
1301 Offset = narrowIfNeeded(CurDAG, N: RHS.getOperand(i: 0));
1302 SignExtend = CurDAG->getTargetConstant(Val: Ext == AArch64_AM::SXTW, DL: dl,
1303 VT: MVT::i32);
1304 if (isWorthFoldingAddr(V: RHS, Size))
1305 return true;
1306 }
1307
1308 return false;
1309}
1310
1311// Check if the given immediate is preferred by ADD. If an immediate can be
1312// encoded in an ADD, or it can be encoded in an "ADD LSL #12" and can not be
1313// encoded by one MOVZ, return true.
1314static bool isPreferredADD(int64_t ImmOff) {
1315 // Constant in [0x0, 0xfff] can be encoded in ADD.
1316 if ((ImmOff & 0xfffffffffffff000LL) == 0x0LL)
1317 return true;
1318 // Check if it can be encoded in an "ADD LSL #12".
1319 if ((ImmOff & 0xffffffffff000fffLL) == 0x0LL)
1320 // As a single MOVZ is faster than a "ADD of LSL #12", ignore such constant.
1321 return (ImmOff & 0xffffffffff00ffffLL) != 0x0LL &&
1322 (ImmOff & 0xffffffffffff0fffLL) != 0x0LL;
1323 return false;
1324}
1325
1326bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
1327 SDValue &Base, SDValue &Offset,
1328 SDValue &SignExtend,
1329 SDValue &DoShift) {
1330 if (N.getOpcode() != ISD::ADD)
1331 return false;
1332 SDValue LHS = N.getOperand(i: 0);
1333 SDValue RHS = N.getOperand(i: 1);
1334 SDLoc DL(N);
1335
1336 // Check if this particular node is reused in any non-memory related
1337 // operation. If yes, do not try to fold this node into the address
1338 // computation, since the computation will be kept.
1339 const SDNode *Node = N.getNode();
1340 for (SDNode *UI : Node->users()) {
1341 if (!isMemOpOrPrefetch(N: UI))
1342 return false;
1343 }
1344
1345 // Watch out if RHS is a wide immediate, it can not be selected into
1346 // [BaseReg+Imm] addressing mode. Also it may not be able to be encoded into
1347 // ADD/SUB. Instead it will use [BaseReg + 0] address mode and generate
1348 // instructions like:
1349 // MOV X0, WideImmediate
1350 // ADD X1, BaseReg, X0
1351 // LDR X2, [X1, 0]
1352 // For such situation, using [BaseReg, XReg] addressing mode can save one
1353 // ADD/SUB:
1354 // MOV X0, WideImmediate
1355 // LDR X2, [BaseReg, X0]
1356 if (isa<ConstantSDNode>(Val: RHS)) {
1357 int64_t ImmOff = (int64_t)RHS->getAsZExtVal();
1358 // Skip the immediate can be selected by load/store addressing mode.
1359 // Also skip the immediate can be encoded by a single ADD (SUB is also
1360 // checked by using -ImmOff).
1361 if (isValidAsScaledImmediate(Offset: ImmOff, Range: 0x1000, Size) ||
1362 isPreferredADD(ImmOff) || isPreferredADD(ImmOff: -ImmOff))
1363 return false;
1364
1365 SDValue Ops[] = { RHS };
1366 SDNode *MOVI =
1367 CurDAG->getMachineNode(Opcode: AArch64::MOVi64imm, dl: DL, VT: MVT::i64, Ops);
1368 SDValue MOVIV = SDValue(MOVI, 0);
1369 // This ADD of two X register will be selected into [Reg+Reg] mode.
1370 N = CurDAG->getNode(Opcode: ISD::ADD, DL, VT: MVT::i64, N1: LHS, N2: MOVIV);
1371 }
1372
1373 // Remember if it is worth folding N when it produces extended register.
1374 bool IsExtendedRegisterWorthFolding = isWorthFoldingAddr(V: N, Size);
1375
1376 // Try to match a shifted extend on the RHS.
1377 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
1378 SelectExtendedSHL(N: RHS, Size, WantExtend: false, Offset, SignExtend)) {
1379 Base = LHS;
1380 DoShift = CurDAG->getTargetConstant(Val: true, DL, VT: MVT::i32);
1381 return true;
1382 }
1383
1384 // Try to match a shifted extend on the LHS.
1385 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
1386 SelectExtendedSHL(N: LHS, Size, WantExtend: false, Offset, SignExtend)) {
1387 Base = RHS;
1388 DoShift = CurDAG->getTargetConstant(Val: true, DL, VT: MVT::i32);
1389 return true;
1390 }
1391
1392 // Match any non-shifted, non-extend, non-immediate add expression.
1393 Base = LHS;
1394 Offset = RHS;
1395 SignExtend = CurDAG->getTargetConstant(Val: false, DL, VT: MVT::i32);
1396 DoShift = CurDAG->getTargetConstant(Val: false, DL, VT: MVT::i32);
1397 // Reg1 + Reg2 is free: no check needed.
1398 return true;
1399}
1400
1401SDValue AArch64DAGToDAGISel::createDTuple(ArrayRef<SDValue> Regs) {
1402 static const unsigned RegClassIDs[] = {
1403 AArch64::DDRegClassID, AArch64::DDDRegClassID, AArch64::DDDDRegClassID};
1404 static const unsigned SubRegs[] = {AArch64::dsub0, AArch64::dsub1,
1405 AArch64::dsub2, AArch64::dsub3};
1406
1407 return createTuple(Vecs: Regs, RegClassIDs, SubRegs);
1408}
1409
1410SDValue AArch64DAGToDAGISel::createQTuple(ArrayRef<SDValue> Regs) {
1411 static const unsigned RegClassIDs[] = {
1412 AArch64::QQRegClassID, AArch64::QQQRegClassID, AArch64::QQQQRegClassID};
1413 static const unsigned SubRegs[] = {AArch64::qsub0, AArch64::qsub1,
1414 AArch64::qsub2, AArch64::qsub3};
1415
1416 return createTuple(Vecs: Regs, RegClassIDs, SubRegs);
1417}
1418
1419SDValue AArch64DAGToDAGISel::createZTuple(ArrayRef<SDValue> Regs) {
1420 static const unsigned RegClassIDs[] = {AArch64::ZPR2RegClassID,
1421 AArch64::ZPR3RegClassID,
1422 AArch64::ZPR4RegClassID};
1423 static const unsigned SubRegs[] = {AArch64::zsub0, AArch64::zsub1,
1424 AArch64::zsub2, AArch64::zsub3};
1425
1426 return createTuple(Vecs: Regs, RegClassIDs, SubRegs);
1427}
1428
1429SDValue AArch64DAGToDAGISel::createZMulTuple(ArrayRef<SDValue> Regs) {
1430 assert(Regs.size() == 2 || Regs.size() == 4);
1431
1432 // The createTuple interface requires 3 RegClassIDs for each possible
1433 // tuple type even though we only have them for ZPR2 and ZPR4.
1434 static const unsigned RegClassIDs[] = {AArch64::ZPR2Mul2RegClassID, 0,
1435 AArch64::ZPR4Mul4RegClassID};
1436 static const unsigned SubRegs[] = {AArch64::zsub0, AArch64::zsub1,
1437 AArch64::zsub2, AArch64::zsub3};
1438 return createTuple(Vecs: Regs, RegClassIDs, SubRegs);
1439}
1440
1441SDValue AArch64DAGToDAGISel::createTuple(ArrayRef<SDValue> Regs,
1442 const unsigned RegClassIDs[],
1443 const unsigned SubRegs[]) {
1444 // There's no special register-class for a vector-list of 1 element: it's just
1445 // a vector.
1446 if (Regs.size() == 1)
1447 return Regs[0];
1448
1449 assert(Regs.size() >= 2 && Regs.size() <= 4);
1450
1451 SDLoc DL(Regs[0]);
1452
1453 SmallVector<SDValue, 4> Ops;
1454
1455 // First operand of REG_SEQUENCE is the desired RegClass.
1456 Ops.push_back(
1457 Elt: CurDAG->getTargetConstant(Val: RegClassIDs[Regs.size() - 2], DL, VT: MVT::i32));
1458
1459 // Then we get pairs of source & subregister-position for the components.
1460 for (unsigned i = 0; i < Regs.size(); ++i) {
1461 Ops.push_back(Elt: Regs[i]);
1462 Ops.push_back(Elt: CurDAG->getTargetConstant(Val: SubRegs[i], DL, VT: MVT::i32));
1463 }
1464
1465 SDNode *N =
1466 CurDAG->getMachineNode(Opcode: TargetOpcode::REG_SEQUENCE, dl: DL, VT: MVT::Untyped, Ops);
1467 return SDValue(N, 0);
1468}
1469
1470void AArch64DAGToDAGISel::SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc,
1471 bool isExt) {
1472 SDLoc dl(N);
1473 EVT VT = N->getValueType(ResNo: 0);
1474
1475 unsigned ExtOff = isExt;
1476
1477 // Form a REG_SEQUENCE to force register allocation.
1478 unsigned Vec0Off = ExtOff + 1;
1479 SmallVector<SDValue, 4> Regs(N->ops().slice(N: Vec0Off, M: NumVecs));
1480 SDValue RegSeq = createQTuple(Regs);
1481
1482 SmallVector<SDValue, 6> Ops;
1483 if (isExt)
1484 Ops.push_back(Elt: N->getOperand(Num: 1));
1485 Ops.push_back(Elt: RegSeq);
1486 Ops.push_back(Elt: N->getOperand(Num: NumVecs + ExtOff + 1));
1487 ReplaceNode(F: N, T: CurDAG->getMachineNode(Opcode: Opc, dl, VT, Ops));
1488}
1489
1490static std::tuple<SDValue, SDValue>
1491extractPtrauthBlendDiscriminators(SDValue Disc, SelectionDAG *DAG) {
1492 SDLoc DL(Disc);
1493 SDValue AddrDisc;
1494 SDValue ConstDisc;
1495
1496 // If this is a blend, remember the constant and address discriminators.
1497 // Otherwise, it's either a constant discriminator, or a non-blended
1498 // address discriminator.
1499 if (Disc->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
1500 Disc->getConstantOperandVal(Num: 0) == Intrinsic::ptrauth_blend) {
1501 AddrDisc = Disc->getOperand(Num: 1);
1502 ConstDisc = Disc->getOperand(Num: 2);
1503 } else {
1504 ConstDisc = Disc;
1505 }
1506
1507 // If the constant discriminator (either the blend RHS, or the entire
1508 // discriminator value) isn't a 16-bit constant, bail out, and let the
1509 // discriminator be computed separately.
1510 auto *ConstDiscN = dyn_cast<ConstantSDNode>(Val&: ConstDisc);
1511 if (!ConstDiscN || !isUInt<16>(x: ConstDiscN->getZExtValue()))
1512 return std::make_tuple(args: DAG->getTargetConstant(Val: 0, DL, VT: MVT::i64), args&: Disc);
1513
1514 // If there's no address discriminator, use XZR directly.
1515 if (!AddrDisc)
1516 AddrDisc = DAG->getRegister(Reg: AArch64::XZR, VT: MVT::i64);
1517
1518 return std::make_tuple(
1519 args: DAG->getTargetConstant(Val: ConstDiscN->getZExtValue(), DL, VT: MVT::i64),
1520 args&: AddrDisc);
1521}
1522
1523void AArch64DAGToDAGISel::SelectPtrauthAuth(SDNode *N) {
1524 SDLoc DL(N);
1525 // IntrinsicID is operand #0
1526 SDValue Val = N->getOperand(Num: 1);
1527 SDValue AUTKey = N->getOperand(Num: 2);
1528 SDValue AUTDisc = N->getOperand(Num: 3);
1529
1530 unsigned AUTKeyC = cast<ConstantSDNode>(Val&: AUTKey)->getZExtValue();
1531 AUTKey = CurDAG->getTargetConstant(Val: AUTKeyC, DL, VT: MVT::i64);
1532
1533 SDValue AUTAddrDisc, AUTConstDisc;
1534 std::tie(args&: AUTConstDisc, args&: AUTAddrDisc) =
1535 extractPtrauthBlendDiscriminators(Disc: AUTDisc, DAG: CurDAG);
1536
1537 SDValue X16Copy = CurDAG->getCopyToReg(Chain: CurDAG->getEntryNode(), dl: DL,
1538 Reg: AArch64::X16, N: Val, Glue: SDValue());
1539 SDValue Ops[] = {AUTKey, AUTConstDisc, AUTAddrDisc, X16Copy.getValue(R: 1)};
1540
1541 SDNode *AUT = CurDAG->getMachineNode(Opcode: AArch64::AUT, dl: DL, VT: MVT::i64, Ops);
1542 ReplaceNode(F: N, T: AUT);
1543}
1544
1545void AArch64DAGToDAGISel::SelectPtrauthResign(SDNode *N) {
1546 SDLoc DL(N);
1547 // IntrinsicID is operand #0
1548 SDValue Val = N->getOperand(Num: 1);
1549 SDValue AUTKey = N->getOperand(Num: 2);
1550 SDValue AUTDisc = N->getOperand(Num: 3);
1551 SDValue PACKey = N->getOperand(Num: 4);
1552 SDValue PACDisc = N->getOperand(Num: 5);
1553
1554 unsigned AUTKeyC = cast<ConstantSDNode>(Val&: AUTKey)->getZExtValue();
1555 unsigned PACKeyC = cast<ConstantSDNode>(Val&: PACKey)->getZExtValue();
1556
1557 AUTKey = CurDAG->getTargetConstant(Val: AUTKeyC, DL, VT: MVT::i64);
1558 PACKey = CurDAG->getTargetConstant(Val: PACKeyC, DL, VT: MVT::i64);
1559
1560 SDValue AUTAddrDisc, AUTConstDisc;
1561 std::tie(args&: AUTConstDisc, args&: AUTAddrDisc) =
1562 extractPtrauthBlendDiscriminators(Disc: AUTDisc, DAG: CurDAG);
1563
1564 SDValue PACAddrDisc, PACConstDisc;
1565 std::tie(args&: PACConstDisc, args&: PACAddrDisc) =
1566 extractPtrauthBlendDiscriminators(Disc: PACDisc, DAG: CurDAG);
1567
1568 SDValue X16Copy = CurDAG->getCopyToReg(Chain: CurDAG->getEntryNode(), dl: DL,
1569 Reg: AArch64::X16, N: Val, Glue: SDValue());
1570
1571 SDValue Ops[] = {AUTKey, AUTConstDisc, AUTAddrDisc, PACKey,
1572 PACConstDisc, PACAddrDisc, X16Copy.getValue(R: 1)};
1573
1574 SDNode *AUTPAC = CurDAG->getMachineNode(Opcode: AArch64::AUTPAC, dl: DL, VT: MVT::i64, Ops);
1575 ReplaceNode(F: N, T: AUTPAC);
1576}
1577
1578bool AArch64DAGToDAGISel::tryIndexedLoad(SDNode *N) {
1579 LoadSDNode *LD = cast<LoadSDNode>(Val: N);
1580 if (LD->isUnindexed())
1581 return false;
1582 EVT VT = LD->getMemoryVT();
1583 EVT DstVT = N->getValueType(ResNo: 0);
1584 ISD::MemIndexedMode AM = LD->getAddressingMode();
1585 bool IsPre = AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
1586 ConstantSDNode *OffsetOp = cast<ConstantSDNode>(Val: LD->getOffset());
1587 int OffsetVal = (int)OffsetOp->getZExtValue();
1588
1589 // We're not doing validity checking here. That was done when checking
1590 // if we should mark the load as indexed or not. We're just selecting
1591 // the right instruction.
1592 unsigned Opcode = 0;
1593
1594 ISD::LoadExtType ExtType = LD->getExtensionType();
1595 bool InsertTo64 = false;
1596 if (VT == MVT::i64)
1597 Opcode = IsPre ? AArch64::LDRXpre : AArch64::LDRXpost;
1598 else if (VT == MVT::i32) {
1599 if (ExtType == ISD::NON_EXTLOAD)
1600 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
1601 else if (ExtType == ISD::SEXTLOAD)
1602 Opcode = IsPre ? AArch64::LDRSWpre : AArch64::LDRSWpost;
1603 else {
1604 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
1605 InsertTo64 = true;
1606 // The result of the load is only i32. It's the subreg_to_reg that makes
1607 // it into an i64.
1608 DstVT = MVT::i32;
1609 }
1610 } else if (VT == MVT::i16) {
1611 if (ExtType == ISD::SEXTLOAD) {
1612 if (DstVT == MVT::i64)
1613 Opcode = IsPre ? AArch64::LDRSHXpre : AArch64::LDRSHXpost;
1614 else
1615 Opcode = IsPre ? AArch64::LDRSHWpre : AArch64::LDRSHWpost;
1616 } else {
1617 Opcode = IsPre ? AArch64::LDRHHpre : AArch64::LDRHHpost;
1618 InsertTo64 = DstVT == MVT::i64;
1619 // The result of the load is only i32. It's the subreg_to_reg that makes
1620 // it into an i64.
1621 DstVT = MVT::i32;
1622 }
1623 } else if (VT == MVT::i8) {
1624 if (ExtType == ISD::SEXTLOAD) {
1625 if (DstVT == MVT::i64)
1626 Opcode = IsPre ? AArch64::LDRSBXpre : AArch64::LDRSBXpost;
1627 else
1628 Opcode = IsPre ? AArch64::LDRSBWpre : AArch64::LDRSBWpost;
1629 } else {
1630 Opcode = IsPre ? AArch64::LDRBBpre : AArch64::LDRBBpost;
1631 InsertTo64 = DstVT == MVT::i64;
1632 // The result of the load is only i32. It's the subreg_to_reg that makes
1633 // it into an i64.
1634 DstVT = MVT::i32;
1635 }
1636 } else if (VT == MVT::f16) {
1637 Opcode = IsPre ? AArch64::LDRHpre : AArch64::LDRHpost;
1638 } else if (VT == MVT::bf16) {
1639 Opcode = IsPre ? AArch64::LDRHpre : AArch64::LDRHpost;
1640 } else if (VT == MVT::f32) {
1641 Opcode = IsPre ? AArch64::LDRSpre : AArch64::LDRSpost;
1642 } else if (VT == MVT::f64 ||
1643 (VT.is64BitVector() && Subtarget->isLittleEndian())) {
1644 Opcode = IsPre ? AArch64::LDRDpre : AArch64::LDRDpost;
1645 } else if (VT.is128BitVector() && Subtarget->isLittleEndian()) {
1646 Opcode = IsPre ? AArch64::LDRQpre : AArch64::LDRQpost;
1647 } else if (VT.is64BitVector()) {
1648 if (IsPre || OffsetVal != 8)
1649 return false;
1650 switch (VT.getScalarSizeInBits()) {
1651 case 8:
1652 Opcode = AArch64::LD1Onev8b_POST;
1653 break;
1654 case 16:
1655 Opcode = AArch64::LD1Onev4h_POST;
1656 break;
1657 case 32:
1658 Opcode = AArch64::LD1Onev2s_POST;
1659 break;
1660 case 64:
1661 Opcode = AArch64::LD1Onev1d_POST;
1662 break;
1663 default:
1664 llvm_unreachable("Expected vector element to be a power of 2");
1665 }
1666 } else if (VT.is128BitVector()) {
1667 if (IsPre || OffsetVal != 16)
1668 return false;
1669 switch (VT.getScalarSizeInBits()) {
1670 case 8:
1671 Opcode = AArch64::LD1Onev16b_POST;
1672 break;
1673 case 16:
1674 Opcode = AArch64::LD1Onev8h_POST;
1675 break;
1676 case 32:
1677 Opcode = AArch64::LD1Onev4s_POST;
1678 break;
1679 case 64:
1680 Opcode = AArch64::LD1Onev2d_POST;
1681 break;
1682 default:
1683 llvm_unreachable("Expected vector element to be a power of 2");
1684 }
1685 } else
1686 return false;
1687 SDValue Chain = LD->getChain();
1688 SDValue Base = LD->getBasePtr();
1689 SDLoc dl(N);
1690 // LD1 encodes an immediate offset by using XZR as the offset register.
1691 SDValue Offset = (VT.isVector() && !Subtarget->isLittleEndian())
1692 ? CurDAG->getRegister(Reg: AArch64::XZR, VT: MVT::i64)
1693 : CurDAG->getTargetConstant(Val: OffsetVal, DL: dl, VT: MVT::i64);
1694 SDValue Ops[] = { Base, Offset, Chain };
1695 SDNode *Res = CurDAG->getMachineNode(Opcode, dl, VT1: MVT::i64, VT2: DstVT,
1696 VT3: MVT::Other, Ops);
1697
1698 // Transfer memoperands.
1699 MachineMemOperand *MemOp = cast<MemSDNode>(Val: N)->getMemOperand();
1700 CurDAG->setNodeMemRefs(N: cast<MachineSDNode>(Val: Res), NewMemRefs: {MemOp});
1701
1702 // Either way, we're replacing the node, so tell the caller that.
1703 SDValue LoadedVal = SDValue(Res, 1);
1704 if (InsertTo64) {
1705 SDValue SubReg = CurDAG->getTargetConstant(Val: AArch64::sub_32, DL: dl, VT: MVT::i32);
1706 LoadedVal =
1707 SDValue(CurDAG->getMachineNode(
1708 Opcode: AArch64::SUBREG_TO_REG, dl, VT: MVT::i64,
1709 Op1: CurDAG->getTargetConstant(Val: 0, DL: dl, VT: MVT::i64), Op2: LoadedVal,
1710 Op3: SubReg),
1711 0);
1712 }
1713
1714 ReplaceUses(F: SDValue(N, 0), T: LoadedVal);
1715 ReplaceUses(F: SDValue(N, 1), T: SDValue(Res, 0));
1716 ReplaceUses(F: SDValue(N, 2), T: SDValue(Res, 2));
1717 CurDAG->RemoveDeadNode(N);
1718 return true;
1719}
1720
1721void AArch64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
1722 unsigned SubRegIdx) {
1723 SDLoc dl(N);
1724 EVT VT = N->getValueType(ResNo: 0);
1725 SDValue Chain = N->getOperand(Num: 0);
1726
1727 SDValue Ops[] = {N->getOperand(Num: 2), // Mem operand;
1728 Chain};
1729
1730 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
1731
1732 SDNode *Ld = CurDAG->getMachineNode(Opcode: Opc, dl, ResultTys: ResTys, Ops);
1733 SDValue SuperReg = SDValue(Ld, 0);
1734 for (unsigned i = 0; i < NumVecs; ++i)
1735 ReplaceUses(F: SDValue(N, i),
1736 T: CurDAG->getTargetExtractSubreg(SRIdx: SubRegIdx + i, DL: dl, VT, Operand: SuperReg));
1737
1738 ReplaceUses(F: SDValue(N, NumVecs), T: SDValue(Ld, 1));
1739
1740 // Transfer memoperands. In the case of AArch64::LD64B, there won't be one,
1741 // because it's too simple to have needed special treatment during lowering.
1742 if (auto *MemIntr = dyn_cast<MemIntrinsicSDNode>(Val: N)) {
1743 MachineMemOperand *MemOp = MemIntr->getMemOperand();
1744 CurDAG->setNodeMemRefs(N: cast<MachineSDNode>(Val: Ld), NewMemRefs: {MemOp});
1745 }
1746
1747 CurDAG->RemoveDeadNode(N);
1748}
1749
1750void AArch64DAGToDAGISel::SelectPostLoad(SDNode *N, unsigned NumVecs,
1751 unsigned Opc, unsigned SubRegIdx) {
1752 SDLoc dl(N);
1753 EVT VT = N->getValueType(ResNo: 0);
1754 SDValue Chain = N->getOperand(Num: 0);
1755
1756 SDValue Ops[] = {N->getOperand(Num: 1), // Mem operand
1757 N->getOperand(Num: 2), // Incremental
1758 Chain};
1759
1760 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1761 MVT::Untyped, MVT::Other};
1762
1763 SDNode *Ld = CurDAG->getMachineNode(Opcode: Opc, dl, ResultTys: ResTys, Ops);
1764
1765 // Update uses of write back register
1766 ReplaceUses(F: SDValue(N, NumVecs), T: SDValue(Ld, 0));
1767
1768 // Update uses of vector list
1769 SDValue SuperReg = SDValue(Ld, 1);
1770 if (NumVecs == 1)
1771 ReplaceUses(F: SDValue(N, 0), T: SuperReg);
1772 else
1773 for (unsigned i = 0; i < NumVecs; ++i)
1774 ReplaceUses(F: SDValue(N, i),
1775 T: CurDAG->getTargetExtractSubreg(SRIdx: SubRegIdx + i, DL: dl, VT, Operand: SuperReg));
1776
1777 // Update the chain
1778 ReplaceUses(F: SDValue(N, NumVecs + 1), T: SDValue(Ld, 2));
1779 CurDAG->RemoveDeadNode(N);
1780}
1781
1782/// Optimize \param OldBase and \param OldOffset selecting the best addressing
1783/// mode. Returns a tuple consisting of an Opcode, an SDValue representing the
1784/// new Base and an SDValue representing the new offset.
1785std::tuple<unsigned, SDValue, SDValue>
1786AArch64DAGToDAGISel::findAddrModeSVELoadStore(SDNode *N, unsigned Opc_rr,
1787 unsigned Opc_ri,
1788 const SDValue &OldBase,
1789 const SDValue &OldOffset,
1790 unsigned Scale) {
1791 SDValue NewBase = OldBase;
1792 SDValue NewOffset = OldOffset;
1793 // Detect a possible Reg+Imm addressing mode.
1794 const bool IsRegImm = SelectAddrModeIndexedSVE</*Min=*/-8, /*Max=*/7>(
1795 Root: N, N: OldBase, Base&: NewBase, OffImm&: NewOffset);
1796
1797 // Detect a possible reg+reg addressing mode, but only if we haven't already
1798 // detected a Reg+Imm one.
1799 const bool IsRegReg =
1800 !IsRegImm && SelectSVERegRegAddrMode(N: OldBase, Scale, Base&: NewBase, Offset&: NewOffset);
1801
1802 // Select the instruction.
1803 return std::make_tuple(args&: IsRegReg ? Opc_rr : Opc_ri, args&: NewBase, args&: NewOffset);
1804}
1805
1806enum class SelectTypeKind {
1807 Int1 = 0,
1808 Int = 1,
1809 FP = 2,
1810 AnyType = 3,
1811};
1812
1813/// This function selects an opcode from a list of opcodes, which is
1814/// expected to be the opcode for { 8-bit, 16-bit, 32-bit, 64-bit }
1815/// element types, in this order.
1816template <SelectTypeKind Kind>
1817static unsigned SelectOpcodeFromVT(EVT VT, ArrayRef<unsigned> Opcodes) {
1818 // Only match scalable vector VTs
1819 if (!VT.isScalableVector())
1820 return 0;
1821
1822 EVT EltVT = VT.getVectorElementType();
1823 unsigned Key = VT.getVectorMinNumElements();
1824 switch (Kind) {
1825 case SelectTypeKind::AnyType:
1826 break;
1827 case SelectTypeKind::Int:
1828 if (EltVT != MVT::i8 && EltVT != MVT::i16 && EltVT != MVT::i32 &&
1829 EltVT != MVT::i64)
1830 return 0;
1831 break;
1832 case SelectTypeKind::Int1:
1833 if (EltVT != MVT::i1)
1834 return 0;
1835 break;
1836 case SelectTypeKind::FP:
1837 if (EltVT == MVT::bf16)
1838 Key = 16;
1839 else if (EltVT != MVT::bf16 && EltVT != MVT::f16 && EltVT != MVT::f32 &&
1840 EltVT != MVT::f64)
1841 return 0;
1842 break;
1843 }
1844
1845 unsigned Offset;
1846 switch (Key) {
1847 case 16: // 8-bit or bf16
1848 Offset = 0;
1849 break;
1850 case 8: // 16-bit
1851 Offset = 1;
1852 break;
1853 case 4: // 32-bit
1854 Offset = 2;
1855 break;
1856 case 2: // 64-bit
1857 Offset = 3;
1858 break;
1859 default:
1860 return 0;
1861 }
1862
1863 return (Opcodes.size() <= Offset) ? 0 : Opcodes[Offset];
1864}
1865
1866// This function is almost identical to SelectWhilePair, but has an
1867// extra check on the range of the immediate operand.
1868// TODO: Merge these two functions together at some point?
1869void AArch64DAGToDAGISel::SelectPExtPair(SDNode *N, unsigned Opc) {
1870 // Immediate can be either 0 or 1.
1871 if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 2)))
1872 if (Imm->getZExtValue() > 1)
1873 return;
1874
1875 SDLoc DL(N);
1876 EVT VT = N->getValueType(ResNo: 0);
1877 SDValue Ops[] = {N->getOperand(Num: 1), N->getOperand(Num: 2)};
1878 SDNode *WhilePair = CurDAG->getMachineNode(Opcode: Opc, dl: DL, VT: MVT::Untyped, Ops);
1879 SDValue SuperReg = SDValue(WhilePair, 0);
1880
1881 for (unsigned I = 0; I < 2; ++I)
1882 ReplaceUses(F: SDValue(N, I), T: CurDAG->getTargetExtractSubreg(
1883 SRIdx: AArch64::psub0 + I, DL, VT, Operand: SuperReg));
1884
1885 CurDAG->RemoveDeadNode(N);
1886}
1887
1888void AArch64DAGToDAGISel::SelectWhilePair(SDNode *N, unsigned Opc) {
1889 SDLoc DL(N);
1890 EVT VT = N->getValueType(ResNo: 0);
1891
1892 SDValue Ops[] = {N->getOperand(Num: 1), N->getOperand(Num: 2)};
1893
1894 SDNode *WhilePair = CurDAG->getMachineNode(Opcode: Opc, dl: DL, VT: MVT::Untyped, Ops);
1895 SDValue SuperReg = SDValue(WhilePair, 0);
1896
1897 for (unsigned I = 0; I < 2; ++I)
1898 ReplaceUses(F: SDValue(N, I), T: CurDAG->getTargetExtractSubreg(
1899 SRIdx: AArch64::psub0 + I, DL, VT, Operand: SuperReg));
1900
1901 CurDAG->RemoveDeadNode(N);
1902}
1903
1904void AArch64DAGToDAGISel::SelectCVTIntrinsic(SDNode *N, unsigned NumVecs,
1905 unsigned Opcode) {
1906 EVT VT = N->getValueType(ResNo: 0);
1907 SmallVector<SDValue, 4> Regs(N->ops().slice(N: 1, M: NumVecs));
1908 SDValue Ops = createZTuple(Regs);
1909 SDLoc DL(N);
1910 SDNode *Intrinsic = CurDAG->getMachineNode(Opcode, dl: DL, VT: MVT::Untyped, Op1: Ops);
1911 SDValue SuperReg = SDValue(Intrinsic, 0);
1912 for (unsigned i = 0; i < NumVecs; ++i)
1913 ReplaceUses(F: SDValue(N, i), T: CurDAG->getTargetExtractSubreg(
1914 SRIdx: AArch64::zsub0 + i, DL, VT, Operand: SuperReg));
1915
1916 CurDAG->RemoveDeadNode(N);
1917}
1918
1919void AArch64DAGToDAGISel::SelectCVTIntrinsicFP8(SDNode *N, unsigned NumVecs,
1920 unsigned Opcode) {
1921 SDLoc DL(N);
1922 EVT VT = N->getValueType(ResNo: 0);
1923 SmallVector<SDValue, 4> Ops(N->op_begin() + 2, N->op_end());
1924 Ops.push_back(/*Chain*/ Elt: N->getOperand(Num: 0));
1925
1926 SDNode *Instruction =
1927 CurDAG->getMachineNode(Opcode, dl: DL, ResultTys: {MVT::Untyped, MVT::Other}, Ops);
1928 SDValue SuperReg = SDValue(Instruction, 0);
1929
1930 for (unsigned i = 0; i < NumVecs; ++i)
1931 ReplaceUses(F: SDValue(N, i), T: CurDAG->getTargetExtractSubreg(
1932 SRIdx: AArch64::zsub0 + i, DL, VT, Operand: SuperReg));
1933
1934 // Copy chain
1935 unsigned ChainIdx = NumVecs;
1936 ReplaceUses(F: SDValue(N, ChainIdx), T: SDValue(Instruction, 1));
1937 CurDAG->RemoveDeadNode(N);
1938}
1939
1940void AArch64DAGToDAGISel::SelectDestructiveMultiIntrinsic(SDNode *N,
1941 unsigned NumVecs,
1942 bool IsZmMulti,
1943 unsigned Opcode,
1944 bool HasPred) {
1945 assert(Opcode != 0 && "Unexpected opcode");
1946
1947 SDLoc DL(N);
1948 EVT VT = N->getValueType(ResNo: 0);
1949 unsigned FirstVecIdx = HasPred ? 2 : 1;
1950
1951 auto GetMultiVecOperand = [=](unsigned StartIdx) {
1952 SmallVector<SDValue, 4> Regs(N->ops().slice(N: StartIdx, M: NumVecs));
1953 return createZMulTuple(Regs);
1954 };
1955
1956 SDValue Zdn = GetMultiVecOperand(FirstVecIdx);
1957
1958 SDValue Zm;
1959 if (IsZmMulti)
1960 Zm = GetMultiVecOperand(NumVecs + FirstVecIdx);
1961 else
1962 Zm = N->getOperand(Num: NumVecs + FirstVecIdx);
1963
1964 SDNode *Intrinsic;
1965 if (HasPred)
1966 Intrinsic = CurDAG->getMachineNode(Opcode, dl: DL, VT: MVT::Untyped,
1967 Op1: N->getOperand(Num: 1), Op2: Zdn, Op3: Zm);
1968 else
1969 Intrinsic = CurDAG->getMachineNode(Opcode, dl: DL, VT: MVT::Untyped, Op1: Zdn, Op2: Zm);
1970 SDValue SuperReg = SDValue(Intrinsic, 0);
1971 for (unsigned i = 0; i < NumVecs; ++i)
1972 ReplaceUses(F: SDValue(N, i), T: CurDAG->getTargetExtractSubreg(
1973 SRIdx: AArch64::zsub0 + i, DL, VT, Operand: SuperReg));
1974
1975 CurDAG->RemoveDeadNode(N);
1976}
1977
1978void AArch64DAGToDAGISel::SelectPredicatedLoad(SDNode *N, unsigned NumVecs,
1979 unsigned Scale, unsigned Opc_ri,
1980 unsigned Opc_rr, bool IsIntr) {
1981 assert(Scale < 5 && "Invalid scaling value.");
1982 SDLoc DL(N);
1983 EVT VT = N->getValueType(ResNo: 0);
1984 SDValue Chain = N->getOperand(Num: 0);
1985
1986 // Optimize addressing mode.
1987 SDValue Base, Offset;
1988 unsigned Opc;
1989 std::tie(args&: Opc, args&: Base, args&: Offset) = findAddrModeSVELoadStore(
1990 N, Opc_rr, Opc_ri, OldBase: N->getOperand(Num: IsIntr ? 3 : 2),
1991 OldOffset: CurDAG->getTargetConstant(Val: 0, DL, VT: MVT::i64), Scale);
1992
1993 SDValue Ops[] = {N->getOperand(Num: IsIntr ? 2 : 1), // Predicate
1994 Base, // Memory operand
1995 Offset, Chain};
1996
1997 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
1998
1999 SDNode *Load = CurDAG->getMachineNode(Opcode: Opc, dl: DL, ResultTys: ResTys, Ops);
2000 SDValue SuperReg = SDValue(Load, 0);
2001 for (unsigned i = 0; i < NumVecs; ++i)
2002 ReplaceUses(F: SDValue(N, i), T: CurDAG->getTargetExtractSubreg(
2003 SRIdx: AArch64::zsub0 + i, DL, VT, Operand: SuperReg));
2004
2005 // Copy chain
2006 unsigned ChainIdx = NumVecs;
2007 ReplaceUses(F: SDValue(N, ChainIdx), T: SDValue(Load, 1));
2008 CurDAG->RemoveDeadNode(N);
2009}
2010
2011void AArch64DAGToDAGISel::SelectContiguousMultiVectorLoad(SDNode *N,
2012 unsigned NumVecs,
2013 unsigned Scale,
2014 unsigned Opc_ri,
2015 unsigned Opc_rr) {
2016 assert(Scale < 4 && "Invalid scaling value.");
2017 SDLoc DL(N);
2018 EVT VT = N->getValueType(ResNo: 0);
2019 SDValue Chain = N->getOperand(Num: 0);
2020
2021 SDValue PNg = N->getOperand(Num: 2);
2022 SDValue Base = N->getOperand(Num: 3);
2023 SDValue Offset = CurDAG->getTargetConstant(Val: 0, DL, VT: MVT::i64);
2024 unsigned Opc;
2025 std::tie(args&: Opc, args&: Base, args&: Offset) =
2026 findAddrModeSVELoadStore(N, Opc_rr, Opc_ri, OldBase: Base, OldOffset: Offset, Scale);
2027
2028 SDValue Ops[] = {PNg, // Predicate-as-counter
2029 Base, // Memory operand
2030 Offset, Chain};
2031
2032 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
2033
2034 SDNode *Load = CurDAG->getMachineNode(Opcode: Opc, dl: DL, ResultTys: ResTys, Ops);
2035 SDValue SuperReg = SDValue(Load, 0);
2036 for (unsigned i = 0; i < NumVecs; ++i)
2037 ReplaceUses(F: SDValue(N, i), T: CurDAG->getTargetExtractSubreg(
2038 SRIdx: AArch64::zsub0 + i, DL, VT, Operand: SuperReg));
2039
2040 // Copy chain
2041 unsigned ChainIdx = NumVecs;
2042 ReplaceUses(F: SDValue(N, ChainIdx), T: SDValue(Load, 1));
2043 CurDAG->RemoveDeadNode(N);
2044}
2045
2046void AArch64DAGToDAGISel::SelectFrintFromVT(SDNode *N, unsigned NumVecs,
2047 unsigned Opcode) {
2048 if (N->getValueType(ResNo: 0) != MVT::nxv4f32)
2049 return;
2050 SelectUnaryMultiIntrinsic(N, NumOutVecs: NumVecs, IsTupleInput: true, Opc: Opcode);
2051}
2052
2053void AArch64DAGToDAGISel::SelectMultiVectorLutiLane(SDNode *Node,
2054 unsigned NumOutVecs,
2055 unsigned Opc,
2056 uint32_t MaxImm) {
2057 if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Val: Node->getOperand(Num: 4)))
2058 if (Imm->getZExtValue() > MaxImm)
2059 return;
2060
2061 SDValue ZtValue;
2062 if (!ImmToReg<AArch64::ZT0, 0>(N: Node->getOperand(Num: 2), Imm&: ZtValue))
2063 return;
2064
2065 SDValue Ops[] = {ZtValue, Node->getOperand(Num: 3), Node->getOperand(Num: 4)};
2066 SDLoc DL(Node);
2067 EVT VT = Node->getValueType(ResNo: 0);
2068
2069 SDNode *Instruction =
2070 CurDAG->getMachineNode(Opcode: Opc, dl: DL, ResultTys: {MVT::Untyped, MVT::Other}, Ops);
2071 SDValue SuperReg = SDValue(Instruction, 0);
2072
2073 for (unsigned I = 0; I < NumOutVecs; ++I)
2074 ReplaceUses(F: SDValue(Node, I), T: CurDAG->getTargetExtractSubreg(
2075 SRIdx: AArch64::zsub0 + I, DL, VT, Operand: SuperReg));
2076
2077 // Copy chain
2078 unsigned ChainIdx = NumOutVecs;
2079 ReplaceUses(F: SDValue(Node, ChainIdx), T: SDValue(Instruction, 1));
2080 CurDAG->RemoveDeadNode(N: Node);
2081}
2082
2083void AArch64DAGToDAGISel::SelectMultiVectorLuti(SDNode *Node,
2084 unsigned NumOutVecs,
2085 unsigned Opc) {
2086
2087 SDValue ZtValue;
2088 SmallVector<SDValue, 4> Ops;
2089 if (!ImmToReg<AArch64::ZT0, 0>(N: Node->getOperand(Num: 2), Imm&: ZtValue))
2090 return;
2091
2092 Ops.push_back(Elt: ZtValue);
2093 Ops.push_back(Elt: createZMulTuple(Regs: {Node->getOperand(Num: 3), Node->getOperand(Num: 4)}));
2094 SDLoc DL(Node);
2095 EVT VT = Node->getValueType(ResNo: 0);
2096
2097 SDNode *Instruction =
2098 CurDAG->getMachineNode(Opcode: Opc, dl: DL, ResultTys: {MVT::Untyped, MVT::Other}, Ops);
2099 SDValue SuperReg = SDValue(Instruction, 0);
2100
2101 for (unsigned I = 0; I < NumOutVecs; ++I)
2102 ReplaceUses(F: SDValue(Node, I), T: CurDAG->getTargetExtractSubreg(
2103 SRIdx: AArch64::zsub0 + I, DL, VT, Operand: SuperReg));
2104
2105 // Copy chain
2106 unsigned ChainIdx = NumOutVecs;
2107 ReplaceUses(F: SDValue(Node, ChainIdx), T: SDValue(Instruction, 1));
2108 CurDAG->RemoveDeadNode(N: Node);
2109}
2110
2111void AArch64DAGToDAGISel::SelectClamp(SDNode *N, unsigned NumVecs,
2112 unsigned Op) {
2113 SDLoc DL(N);
2114 EVT VT = N->getValueType(ResNo: 0);
2115
2116 SmallVector<SDValue, 4> Regs(N->ops().slice(N: 1, M: NumVecs));
2117 SDValue Zd = createZMulTuple(Regs);
2118 SDValue Zn = N->getOperand(Num: 1 + NumVecs);
2119 SDValue Zm = N->getOperand(Num: 2 + NumVecs);
2120
2121 SDValue Ops[] = {Zd, Zn, Zm};
2122
2123 SDNode *Intrinsic = CurDAG->getMachineNode(Opcode: Op, dl: DL, VT: MVT::Untyped, Ops);
2124 SDValue SuperReg = SDValue(Intrinsic, 0);
2125 for (unsigned i = 0; i < NumVecs; ++i)
2126 ReplaceUses(F: SDValue(N, i), T: CurDAG->getTargetExtractSubreg(
2127 SRIdx: AArch64::zsub0 + i, DL, VT, Operand: SuperReg));
2128
2129 CurDAG->RemoveDeadNode(N);
2130}
2131
2132bool SelectSMETile(unsigned &BaseReg, unsigned TileNum) {
2133 switch (BaseReg) {
2134 default:
2135 return false;
2136 case AArch64::ZA:
2137 case AArch64::ZAB0:
2138 if (TileNum == 0)
2139 break;
2140 return false;
2141 case AArch64::ZAH0:
2142 if (TileNum <= 1)
2143 break;
2144 return false;
2145 case AArch64::ZAS0:
2146 if (TileNum <= 3)
2147 break;
2148 return false;
2149 case AArch64::ZAD0:
2150 if (TileNum <= 7)
2151 break;
2152 return false;
2153 }
2154
2155 BaseReg += TileNum;
2156 return true;
2157}
2158
2159template <unsigned MaxIdx, unsigned Scale>
2160void AArch64DAGToDAGISel::SelectMultiVectorMove(SDNode *N, unsigned NumVecs,
2161 unsigned BaseReg, unsigned Op) {
2162 unsigned TileNum = 0;
2163 if (BaseReg != AArch64::ZA)
2164 TileNum = N->getConstantOperandVal(Num: 2);
2165
2166 if (!SelectSMETile(BaseReg, TileNum))
2167 return;
2168
2169 SDValue SliceBase, Base, Offset;
2170 if (BaseReg == AArch64::ZA)
2171 SliceBase = N->getOperand(Num: 2);
2172 else
2173 SliceBase = N->getOperand(Num: 3);
2174
2175 if (!SelectSMETileSlice(N: SliceBase, MaxSize: MaxIdx, Vector&: Base, Offset, Scale))
2176 return;
2177
2178 SDLoc DL(N);
2179 SDValue SubReg = CurDAG->getRegister(Reg: BaseReg, VT: MVT::Other);
2180 SDValue Ops[] = {SubReg, Base, Offset, /*Chain*/ N->getOperand(Num: 0)};
2181 SDNode *Mov = CurDAG->getMachineNode(Opcode: Op, dl: DL, ResultTys: {MVT::Untyped, MVT::Other}, Ops);
2182
2183 EVT VT = N->getValueType(ResNo: 0);
2184 for (unsigned I = 0; I < NumVecs; ++I)
2185 ReplaceUses(F: SDValue(N, I),
2186 T: CurDAG->getTargetExtractSubreg(SRIdx: AArch64::zsub0 + I, DL, VT,
2187 Operand: SDValue(Mov, 0)));
2188 // Copy chain
2189 unsigned ChainIdx = NumVecs;
2190 ReplaceUses(F: SDValue(N, ChainIdx), T: SDValue(Mov, 1));
2191 CurDAG->RemoveDeadNode(N);
2192}
2193
2194void AArch64DAGToDAGISel::SelectMultiVectorMoveZ(SDNode *N, unsigned NumVecs,
2195 unsigned Op, unsigned MaxIdx,
2196 unsigned Scale, unsigned BaseReg) {
2197 // Slice can be in different positions
2198 // The array to vector: llvm.aarch64.sme.readz.<h/v>.<sz>(slice)
2199 // The tile to vector: llvm.aarch64.sme.readz.<h/v>.<sz>(tile, slice)
2200 SDValue SliceBase = N->getOperand(Num: 2);
2201 if (BaseReg != AArch64::ZA)
2202 SliceBase = N->getOperand(Num: 3);
2203
2204 SDValue Base, Offset;
2205 if (!SelectSMETileSlice(N: SliceBase, MaxSize: MaxIdx, Vector&: Base, Offset, Scale))
2206 return;
2207 // The correct Za tile number is computed in Machine Instruction
2208 // See EmitZAInstr
2209 // DAG cannot select Za tile as an output register with ZReg
2210 SDLoc DL(N);
2211 SmallVector<SDValue, 6> Ops;
2212 if (BaseReg != AArch64::ZA )
2213 Ops.push_back(Elt: N->getOperand(Num: 2));
2214 Ops.push_back(Elt: Base);
2215 Ops.push_back(Elt: Offset);
2216 Ops.push_back(Elt: N->getOperand(Num: 0)); //Chain
2217 SDNode *Mov = CurDAG->getMachineNode(Opcode: Op, dl: DL, ResultTys: {MVT::Untyped, MVT::Other}, Ops);
2218
2219 EVT VT = N->getValueType(ResNo: 0);
2220 for (unsigned I = 0; I < NumVecs; ++I)
2221 ReplaceUses(F: SDValue(N, I),
2222 T: CurDAG->getTargetExtractSubreg(SRIdx: AArch64::zsub0 + I, DL, VT,
2223 Operand: SDValue(Mov, 0)));
2224
2225 // Copy chain
2226 unsigned ChainIdx = NumVecs;
2227 ReplaceUses(F: SDValue(N, ChainIdx), T: SDValue(Mov, 1));
2228 CurDAG->RemoveDeadNode(N);
2229}
2230
2231void AArch64DAGToDAGISel::SelectUnaryMultiIntrinsic(SDNode *N,
2232 unsigned NumOutVecs,
2233 bool IsTupleInput,
2234 unsigned Opc) {
2235 SDLoc DL(N);
2236 EVT VT = N->getValueType(ResNo: 0);
2237 unsigned NumInVecs = N->getNumOperands() - 1;
2238
2239 SmallVector<SDValue, 6> Ops;
2240 if (IsTupleInput) {
2241 assert((NumInVecs == 2 || NumInVecs == 4) &&
2242 "Don't know how to handle multi-register input!");
2243 SmallVector<SDValue, 4> Regs(N->ops().slice(N: 1, M: NumInVecs));
2244 Ops.push_back(Elt: createZMulTuple(Regs));
2245 } else {
2246 // All intrinsic nodes have the ID as the first operand, hence the "1 + I".
2247 for (unsigned I = 0; I < NumInVecs; I++)
2248 Ops.push_back(Elt: N->getOperand(Num: 1 + I));
2249 }
2250
2251 SDNode *Res = CurDAG->getMachineNode(Opcode: Opc, dl: DL, VT: MVT::Untyped, Ops);
2252 SDValue SuperReg = SDValue(Res, 0);
2253
2254 for (unsigned I = 0; I < NumOutVecs; I++)
2255 ReplaceUses(F: SDValue(N, I), T: CurDAG->getTargetExtractSubreg(
2256 SRIdx: AArch64::zsub0 + I, DL, VT, Operand: SuperReg));
2257 CurDAG->RemoveDeadNode(N);
2258}
2259
2260void AArch64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs,
2261 unsigned Opc) {
2262 SDLoc dl(N);
2263 EVT VT = N->getOperand(Num: 2)->getValueType(ResNo: 0);
2264
2265 // Form a REG_SEQUENCE to force register allocation.
2266 bool Is128Bit = VT.getSizeInBits() == 128;
2267 SmallVector<SDValue, 4> Regs(N->ops().slice(N: 2, M: NumVecs));
2268 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
2269
2270 SDValue Ops[] = {RegSeq, N->getOperand(Num: NumVecs + 2), N->getOperand(Num: 0)};
2271 SDNode *St = CurDAG->getMachineNode(Opcode: Opc, dl, VT: N->getValueType(ResNo: 0), Ops);
2272
2273 // Transfer memoperands.
2274 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(Val: N)->getMemOperand();
2275 CurDAG->setNodeMemRefs(N: cast<MachineSDNode>(Val: St), NewMemRefs: {MemOp});
2276
2277 ReplaceNode(F: N, T: St);
2278}
2279
2280void AArch64DAGToDAGISel::SelectPredicatedStore(SDNode *N, unsigned NumVecs,
2281 unsigned Scale, unsigned Opc_rr,
2282 unsigned Opc_ri) {
2283 SDLoc dl(N);
2284
2285 // Form a REG_SEQUENCE to force register allocation.
2286 SmallVector<SDValue, 4> Regs(N->ops().slice(N: 2, M: NumVecs));
2287 SDValue RegSeq = createZTuple(Regs);
2288
2289 // Optimize addressing mode.
2290 unsigned Opc;
2291 SDValue Offset, Base;
2292 std::tie(args&: Opc, args&: Base, args&: Offset) = findAddrModeSVELoadStore(
2293 N, Opc_rr, Opc_ri, OldBase: N->getOperand(Num: NumVecs + 3),
2294 OldOffset: CurDAG->getTargetConstant(Val: 0, DL: dl, VT: MVT::i64), Scale);
2295
2296 SDValue Ops[] = {RegSeq, N->getOperand(Num: NumVecs + 2), // predicate
2297 Base, // address
2298 Offset, // offset
2299 N->getOperand(Num: 0)}; // chain
2300 SDNode *St = CurDAG->getMachineNode(Opcode: Opc, dl, VT: N->getValueType(ResNo: 0), Ops);
2301
2302 ReplaceNode(F: N, T: St);
2303}
2304
2305bool AArch64DAGToDAGISel::SelectAddrModeFrameIndexSVE(SDValue N, SDValue &Base,
2306 SDValue &OffImm) {
2307 SDLoc dl(N);
2308 const DataLayout &DL = CurDAG->getDataLayout();
2309 const TargetLowering *TLI = getTargetLowering();
2310
2311 // Try to match it for the frame address
2312 if (auto FINode = dyn_cast<FrameIndexSDNode>(Val&: N)) {
2313 int FI = FINode->getIndex();
2314 Base = CurDAG->getTargetFrameIndex(FI, VT: TLI->getPointerTy(DL));
2315 OffImm = CurDAG->getTargetConstant(Val: 0, DL: dl, VT: MVT::i64);
2316 return true;
2317 }
2318
2319 return false;
2320}
2321
2322void AArch64DAGToDAGISel::SelectPostStore(SDNode *N, unsigned NumVecs,
2323 unsigned Opc) {
2324 SDLoc dl(N);
2325 EVT VT = N->getOperand(Num: 2)->getValueType(ResNo: 0);
2326 const EVT ResTys[] = {MVT::i64, // Type of the write back register
2327 MVT::Other}; // Type for the Chain
2328
2329 // Form a REG_SEQUENCE to force register allocation.
2330 bool Is128Bit = VT.getSizeInBits() == 128;
2331 SmallVector<SDValue, 4> Regs(N->ops().slice(N: 1, M: NumVecs));
2332 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
2333
2334 SDValue Ops[] = {RegSeq,
2335 N->getOperand(Num: NumVecs + 1), // base register
2336 N->getOperand(Num: NumVecs + 2), // Incremental
2337 N->getOperand(Num: 0)}; // Chain
2338 SDNode *St = CurDAG->getMachineNode(Opcode: Opc, dl, ResultTys: ResTys, Ops);
2339
2340 ReplaceNode(F: N, T: St);
2341}
2342
2343namespace {
2344/// WidenVector - Given a value in the V64 register class, produce the
2345/// equivalent value in the V128 register class.
2346class WidenVector {
2347 SelectionDAG &DAG;
2348
2349public:
2350 WidenVector(SelectionDAG &DAG) : DAG(DAG) {}
2351
2352 SDValue operator()(SDValue V64Reg) {
2353 EVT VT = V64Reg.getValueType();
2354 unsigned NarrowSize = VT.getVectorNumElements();
2355 MVT EltTy = VT.getVectorElementType().getSimpleVT();
2356 MVT WideTy = MVT::getVectorVT(VT: EltTy, NumElements: 2 * NarrowSize);
2357 SDLoc DL(V64Reg);
2358
2359 SDValue Undef =
2360 SDValue(DAG.getMachineNode(Opcode: TargetOpcode::IMPLICIT_DEF, dl: DL, VT: WideTy), 0);
2361 return DAG.getTargetInsertSubreg(SRIdx: AArch64::dsub, DL, VT: WideTy, Operand: Undef, Subreg: V64Reg);
2362 }
2363};
2364} // namespace
2365
2366/// NarrowVector - Given a value in the V128 register class, produce the
2367/// equivalent value in the V64 register class.
2368static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) {
2369 EVT VT = V128Reg.getValueType();
2370 unsigned WideSize = VT.getVectorNumElements();
2371 MVT EltTy = VT.getVectorElementType().getSimpleVT();
2372 MVT NarrowTy = MVT::getVectorVT(VT: EltTy, NumElements: WideSize / 2);
2373
2374 return DAG.getTargetExtractSubreg(SRIdx: AArch64::dsub, DL: SDLoc(V128Reg), VT: NarrowTy,
2375 Operand: V128Reg);
2376}
2377
2378void AArch64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs,
2379 unsigned Opc) {
2380 SDLoc dl(N);
2381 EVT VT = N->getValueType(ResNo: 0);
2382 bool Narrow = VT.getSizeInBits() == 64;
2383
2384 // Form a REG_SEQUENCE to force register allocation.
2385 SmallVector<SDValue, 4> Regs(N->ops().slice(N: 2, M: NumVecs));
2386
2387 if (Narrow)
2388 transform(Range&: Regs, d_first: Regs.begin(),
2389 F: WidenVector(*CurDAG));
2390
2391 SDValue RegSeq = createQTuple(Regs);
2392
2393 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
2394
2395 unsigned LaneNo = N->getConstantOperandVal(Num: NumVecs + 2);
2396
2397 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(Val: LaneNo, DL: dl, VT: MVT::i64),
2398 N->getOperand(Num: NumVecs + 3), N->getOperand(Num: 0)};
2399 SDNode *Ld = CurDAG->getMachineNode(Opcode: Opc, dl, ResultTys: ResTys, Ops);
2400 SDValue SuperReg = SDValue(Ld, 0);
2401
2402 EVT WideVT = RegSeq.getOperand(i: 1)->getValueType(ResNo: 0);
2403 static const unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1,
2404 AArch64::qsub2, AArch64::qsub3 };
2405 for (unsigned i = 0; i < NumVecs; ++i) {
2406 SDValue NV = CurDAG->getTargetExtractSubreg(SRIdx: QSubs[i], DL: dl, VT: WideVT, Operand: SuperReg);
2407 if (Narrow)
2408 NV = NarrowVector(V128Reg: NV, DAG&: *CurDAG);
2409 ReplaceUses(F: SDValue(N, i), T: NV);
2410 }
2411
2412 ReplaceUses(F: SDValue(N, NumVecs), T: SDValue(Ld, 1));
2413 CurDAG->RemoveDeadNode(N);
2414}
2415
2416void AArch64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs,
2417 unsigned Opc) {
2418 SDLoc dl(N);
2419 EVT VT = N->getValueType(ResNo: 0);
2420 bool Narrow = VT.getSizeInBits() == 64;
2421
2422 // Form a REG_SEQUENCE to force register allocation.
2423 SmallVector<SDValue, 4> Regs(N->ops().slice(N: 1, M: NumVecs));
2424
2425 if (Narrow)
2426 transform(Range&: Regs, d_first: Regs.begin(),
2427 F: WidenVector(*CurDAG));
2428
2429 SDValue RegSeq = createQTuple(Regs);
2430
2431 const EVT ResTys[] = {MVT::i64, // Type of the write back register
2432 RegSeq->getValueType(ResNo: 0), MVT::Other};
2433
2434 unsigned LaneNo = N->getConstantOperandVal(Num: NumVecs + 1);
2435
2436 SDValue Ops[] = {RegSeq,
2437 CurDAG->getTargetConstant(Val: LaneNo, DL: dl,
2438 VT: MVT::i64), // Lane Number
2439 N->getOperand(Num: NumVecs + 2), // Base register
2440 N->getOperand(Num: NumVecs + 3), // Incremental
2441 N->getOperand(Num: 0)};
2442 SDNode *Ld = CurDAG->getMachineNode(Opcode: Opc, dl, ResultTys: ResTys, Ops);
2443
2444 // Update uses of the write back register
2445 ReplaceUses(F: SDValue(N, NumVecs), T: SDValue(Ld, 0));
2446
2447 // Update uses of the vector list
2448 SDValue SuperReg = SDValue(Ld, 1);
2449 if (NumVecs == 1) {
2450 ReplaceUses(F: SDValue(N, 0),
2451 T: Narrow ? NarrowVector(V128Reg: SuperReg, DAG&: *CurDAG) : SuperReg);
2452 } else {
2453 EVT WideVT = RegSeq.getOperand(i: 1)->getValueType(ResNo: 0);
2454 static const unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1,
2455 AArch64::qsub2, AArch64::qsub3 };
2456 for (unsigned i = 0; i < NumVecs; ++i) {
2457 SDValue NV = CurDAG->getTargetExtractSubreg(SRIdx: QSubs[i], DL: dl, VT: WideVT,
2458 Operand: SuperReg);
2459 if (Narrow)
2460 NV = NarrowVector(V128Reg: NV, DAG&: *CurDAG);
2461 ReplaceUses(F: SDValue(N, i), T: NV);
2462 }
2463 }
2464
2465 // Update the Chain
2466 ReplaceUses(F: SDValue(N, NumVecs + 1), T: SDValue(Ld, 2));
2467 CurDAG->RemoveDeadNode(N);
2468}
2469
2470void AArch64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs,
2471 unsigned Opc) {
2472 SDLoc dl(N);
2473 EVT VT = N->getOperand(Num: 2)->getValueType(ResNo: 0);
2474 bool Narrow = VT.getSizeInBits() == 64;
2475
2476 // Form a REG_SEQUENCE to force register allocation.
2477 SmallVector<SDValue, 4> Regs(N->ops().slice(N: 2, M: NumVecs));
2478
2479 if (Narrow)
2480 transform(Range&: Regs, d_first: Regs.begin(),
2481 F: WidenVector(*CurDAG));
2482
2483 SDValue RegSeq = createQTuple(Regs);
2484
2485 unsigned LaneNo = N->getConstantOperandVal(Num: NumVecs + 2);
2486
2487 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(Val: LaneNo, DL: dl, VT: MVT::i64),
2488 N->getOperand(Num: NumVecs + 3), N->getOperand(Num: 0)};
2489 SDNode *St = CurDAG->getMachineNode(Opcode: Opc, dl, VT: MVT::Other, Ops);
2490
2491 // Transfer memoperands.
2492 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(Val: N)->getMemOperand();
2493 CurDAG->setNodeMemRefs(N: cast<MachineSDNode>(Val: St), NewMemRefs: {MemOp});
2494
2495 ReplaceNode(F: N, T: St);
2496}
2497
2498void AArch64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs,
2499 unsigned Opc) {
2500 SDLoc dl(N);
2501 EVT VT = N->getOperand(Num: 2)->getValueType(ResNo: 0);
2502 bool Narrow = VT.getSizeInBits() == 64;
2503
2504 // Form a REG_SEQUENCE to force register allocation.
2505 SmallVector<SDValue, 4> Regs(N->ops().slice(N: 1, M: NumVecs));
2506
2507 if (Narrow)
2508 transform(Range&: Regs, d_first: Regs.begin(),
2509 F: WidenVector(*CurDAG));
2510
2511 SDValue RegSeq = createQTuple(Regs);
2512
2513 const EVT ResTys[] = {MVT::i64, // Type of the write back register
2514 MVT::Other};
2515
2516 unsigned LaneNo = N->getConstantOperandVal(Num: NumVecs + 1);
2517
2518 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(Val: LaneNo, DL: dl, VT: MVT::i64),
2519 N->getOperand(Num: NumVecs + 2), // Base Register
2520 N->getOperand(Num: NumVecs + 3), // Incremental
2521 N->getOperand(Num: 0)};
2522 SDNode *St = CurDAG->getMachineNode(Opcode: Opc, dl, ResultTys: ResTys, Ops);
2523
2524 // Transfer memoperands.
2525 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(Val: N)->getMemOperand();
2526 CurDAG->setNodeMemRefs(N: cast<MachineSDNode>(Val: St), NewMemRefs: {MemOp});
2527
2528 ReplaceNode(F: N, T: St);
2529}
2530
2531static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N,
2532 unsigned &Opc, SDValue &Opd0,
2533 unsigned &LSB, unsigned &MSB,
2534 unsigned NumberOfIgnoredLowBits,
2535 bool BiggerPattern) {
2536 assert(N->getOpcode() == ISD::AND &&
2537 "N must be a AND operation to call this function");
2538
2539 EVT VT = N->getValueType(ResNo: 0);
2540
2541 // Here we can test the type of VT and return false when the type does not
2542 // match, but since it is done prior to that call in the current context
2543 // we turned that into an assert to avoid redundant code.
2544 assert((VT == MVT::i32 || VT == MVT::i64) &&
2545 "Type checking must have been done before calling this function");
2546
2547 // FIXME: simplify-demanded-bits in DAGCombine will probably have
2548 // changed the AND node to a 32-bit mask operation. We'll have to
2549 // undo that as part of the transform here if we want to catch all
2550 // the opportunities.
2551 // Currently the NumberOfIgnoredLowBits argument helps to recover
2552 // from these situations when matching bigger pattern (bitfield insert).
2553
2554 // For unsigned extracts, check for a shift right and mask
2555 uint64_t AndImm = 0;
2556 if (!isOpcWithIntImmediate(N, Opc: ISD::AND, Imm&: AndImm))
2557 return false;
2558
2559 const SDNode *Op0 = N->getOperand(Num: 0).getNode();
2560
2561 // Because of simplify-demanded-bits in DAGCombine, the mask may have been
2562 // simplified. Try to undo that
2563 AndImm |= maskTrailingOnes<uint64_t>(N: NumberOfIgnoredLowBits);
2564
2565 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
2566 if (AndImm & (AndImm + 1))
2567 return false;
2568
2569 bool ClampMSB = false;
2570 uint64_t SrlImm = 0;
2571 // Handle the SRL + ANY_EXTEND case.
2572 if (VT == MVT::i64 && Op0->getOpcode() == ISD::ANY_EXTEND &&
2573 isOpcWithIntImmediate(N: Op0->getOperand(Num: 0).getNode(), Opc: ISD::SRL, Imm&: SrlImm)) {
2574 // Extend the incoming operand of the SRL to 64-bit.
2575 Opd0 = Widen(CurDAG, N: Op0->getOperand(Num: 0).getOperand(i: 0));
2576 // Make sure to clamp the MSB so that we preserve the semantics of the
2577 // original operations.
2578 ClampMSB = true;
2579 } else if (VT == MVT::i32 && Op0->getOpcode() == ISD::TRUNCATE &&
2580 isOpcWithIntImmediate(N: Op0->getOperand(Num: 0).getNode(), Opc: ISD::SRL,
2581 Imm&: SrlImm)) {
2582 // If the shift result was truncated, we can still combine them.
2583 Opd0 = Op0->getOperand(Num: 0).getOperand(i: 0);
2584
2585 // Use the type of SRL node.
2586 VT = Opd0->getValueType(ResNo: 0);
2587 } else if (isOpcWithIntImmediate(N: Op0, Opc: ISD::SRL, Imm&: SrlImm)) {
2588 Opd0 = Op0->getOperand(Num: 0);
2589 ClampMSB = (VT == MVT::i32);
2590 } else if (BiggerPattern) {
2591 // Let's pretend a 0 shift right has been performed.
2592 // The resulting code will be at least as good as the original one
2593 // plus it may expose more opportunities for bitfield insert pattern.
2594 // FIXME: Currently we limit this to the bigger pattern, because
2595 // some optimizations expect AND and not UBFM.
2596 Opd0 = N->getOperand(Num: 0);
2597 } else
2598 return false;
2599
2600 // Bail out on large immediates. This happens when no proper
2601 // combining/constant folding was performed.
2602 if (!BiggerPattern && (SrlImm <= 0 || SrlImm >= VT.getSizeInBits())) {
2603 LLVM_DEBUG(
2604 (dbgs() << N
2605 << ": Found large shift immediate, this should not happen\n"));
2606 return false;
2607 }
2608
2609 LSB = SrlImm;
2610 MSB = SrlImm +
2611 (VT == MVT::i32 ? llvm::countr_one<uint32_t>(Value: AndImm)
2612 : llvm::countr_one<uint64_t>(Value: AndImm)) -
2613 1;
2614 if (ClampMSB)
2615 // Since we're moving the extend before the right shift operation, we need
2616 // to clamp the MSB to make sure we don't shift in undefined bits instead of
2617 // the zeros which would get shifted in with the original right shift
2618 // operation.
2619 MSB = MSB > 31 ? 31 : MSB;
2620
2621 Opc = VT == MVT::i32 ? AArch64::UBFMWri : AArch64::UBFMXri;
2622 return true;
2623}
2624
2625static bool isBitfieldExtractOpFromSExtInReg(SDNode *N, unsigned &Opc,
2626 SDValue &Opd0, unsigned &Immr,
2627 unsigned &Imms) {
2628 assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
2629
2630 EVT VT = N->getValueType(ResNo: 0);
2631 unsigned BitWidth = VT.getSizeInBits();
2632 assert((VT == MVT::i32 || VT == MVT::i64) &&
2633 "Type checking must have been done before calling this function");
2634
2635 SDValue Op = N->getOperand(Num: 0);
2636 if (Op->getOpcode() == ISD::TRUNCATE) {
2637 Op = Op->getOperand(Num: 0);
2638 VT = Op->getValueType(ResNo: 0);
2639 BitWidth = VT.getSizeInBits();
2640 }
2641
2642 uint64_t ShiftImm;
2643 if (!isOpcWithIntImmediate(N: Op.getNode(), Opc: ISD::SRL, Imm&: ShiftImm) &&
2644 !isOpcWithIntImmediate(N: Op.getNode(), Opc: ISD::SRA, Imm&: ShiftImm))
2645 return false;
2646
2647 unsigned Width = cast<VTSDNode>(Val: N->getOperand(Num: 1))->getVT().getSizeInBits();
2648 if (ShiftImm + Width > BitWidth)
2649 return false;
2650
2651 Opc = (VT == MVT::i32) ? AArch64::SBFMWri : AArch64::SBFMXri;
2652 Opd0 = Op.getOperand(i: 0);
2653 Immr = ShiftImm;
2654 Imms = ShiftImm + Width - 1;
2655 return true;
2656}
2657
2658static bool isSeveralBitsExtractOpFromShr(SDNode *N, unsigned &Opc,
2659 SDValue &Opd0, unsigned &LSB,
2660 unsigned &MSB) {
2661 // We are looking for the following pattern which basically extracts several
2662 // continuous bits from the source value and places it from the LSB of the
2663 // destination value, all other bits of the destination value or set to zero:
2664 //
2665 // Value2 = AND Value, MaskImm
2666 // SRL Value2, ShiftImm
2667 //
2668 // with MaskImm >> ShiftImm to search for the bit width.
2669 //
2670 // This gets selected into a single UBFM:
2671 //
2672 // UBFM Value, ShiftImm, Log2_64(MaskImm)
2673 //
2674
2675 if (N->getOpcode() != ISD::SRL)
2676 return false;
2677
2678 uint64_t AndMask = 0;
2679 if (!isOpcWithIntImmediate(N: N->getOperand(Num: 0).getNode(), Opc: ISD::AND, Imm&: AndMask))
2680 return false;
2681
2682 Opd0 = N->getOperand(Num: 0).getOperand(i: 0);
2683
2684 uint64_t SrlImm = 0;
2685 if (!isIntImmediate(N: N->getOperand(Num: 1), Imm&: SrlImm))
2686 return false;
2687
2688 // Check whether we really have several bits extract here.
2689 if (!isMask_64(Value: AndMask >> SrlImm))
2690 return false;
2691
2692 Opc = N->getValueType(ResNo: 0) == MVT::i32 ? AArch64::UBFMWri : AArch64::UBFMXri;
2693 LSB = SrlImm;
2694 MSB = llvm::Log2_64(Value: AndMask);
2695 return true;
2696}
2697
2698static bool isBitfieldExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
2699 unsigned &Immr, unsigned &Imms,
2700 bool BiggerPattern) {
2701 assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) &&
2702 "N must be a SHR/SRA operation to call this function");
2703
2704 EVT VT = N->getValueType(ResNo: 0);
2705
2706 // Here we can test the type of VT and return false when the type does not
2707 // match, but since it is done prior to that call in the current context
2708 // we turned that into an assert to avoid redundant code.
2709 assert((VT == MVT::i32 || VT == MVT::i64) &&
2710 "Type checking must have been done before calling this function");
2711
2712 // Check for AND + SRL doing several bits extract.
2713 if (isSeveralBitsExtractOpFromShr(N, Opc, Opd0, LSB&: Immr, MSB&: Imms))
2714 return true;
2715
2716 // We're looking for a shift of a shift.
2717 uint64_t ShlImm = 0;
2718 uint64_t TruncBits = 0;
2719 if (isOpcWithIntImmediate(N: N->getOperand(Num: 0).getNode(), Opc: ISD::SHL, Imm&: ShlImm)) {
2720 Opd0 = N->getOperand(Num: 0).getOperand(i: 0);
2721 } else if (VT == MVT::i32 && N->getOpcode() == ISD::SRL &&
2722 N->getOperand(Num: 0).getNode()->getOpcode() == ISD::TRUNCATE) {
2723 // We are looking for a shift of truncate. Truncate from i64 to i32 could
2724 // be considered as setting high 32 bits as zero. Our strategy here is to
2725 // always generate 64bit UBFM. This consistency will help the CSE pass
2726 // later find more redundancy.
2727 Opd0 = N->getOperand(Num: 0).getOperand(i: 0);
2728 TruncBits = Opd0->getValueType(ResNo: 0).getSizeInBits() - VT.getSizeInBits();
2729 VT = Opd0.getValueType();
2730 assert(VT == MVT::i64 && "the promoted type should be i64");
2731 } else if (BiggerPattern) {
2732 // Let's pretend a 0 shift left has been performed.
2733 // FIXME: Currently we limit this to the bigger pattern case,
2734 // because some optimizations expect AND and not UBFM
2735 Opd0 = N->getOperand(Num: 0);
2736 } else
2737 return false;
2738
2739 // Missing combines/constant folding may have left us with strange
2740 // constants.
2741 if (ShlImm >= VT.getSizeInBits()) {
2742 LLVM_DEBUG(
2743 (dbgs() << N
2744 << ": Found large shift immediate, this should not happen\n"));
2745 return false;
2746 }
2747
2748 uint64_t SrlImm = 0;
2749 if (!isIntImmediate(N: N->getOperand(Num: 1), Imm&: SrlImm))
2750 return false;
2751
2752 assert(SrlImm > 0 && SrlImm < VT.getSizeInBits() &&
2753 "bad amount in shift node!");
2754 int immr = SrlImm - ShlImm;
2755 Immr = immr < 0 ? immr + VT.getSizeInBits() : immr;
2756 Imms = VT.getSizeInBits() - ShlImm - TruncBits - 1;
2757 // SRA requires a signed extraction
2758 if (VT == MVT::i32)
2759 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMWri : AArch64::UBFMWri;
2760 else
2761 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMXri : AArch64::UBFMXri;
2762 return true;
2763}
2764
2765bool AArch64DAGToDAGISel::tryBitfieldExtractOpFromSExt(SDNode *N) {
2766 assert(N->getOpcode() == ISD::SIGN_EXTEND);
2767
2768 EVT VT = N->getValueType(ResNo: 0);
2769 EVT NarrowVT = N->getOperand(Num: 0)->getValueType(ResNo: 0);
2770 if (VT != MVT::i64 || NarrowVT != MVT::i32)
2771 return false;
2772
2773 uint64_t ShiftImm;
2774 SDValue Op = N->getOperand(Num: 0);
2775 if (!isOpcWithIntImmediate(N: Op.getNode(), Opc: ISD::SRA, Imm&: ShiftImm))
2776 return false;
2777
2778 SDLoc dl(N);
2779 // Extend the incoming operand of the shift to 64-bits.
2780 SDValue Opd0 = Widen(CurDAG, N: Op.getOperand(i: 0));
2781 unsigned Immr = ShiftImm;
2782 unsigned Imms = NarrowVT.getSizeInBits() - 1;
2783 SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(Val: Immr, DL: dl, VT),
2784 CurDAG->getTargetConstant(Val: Imms, DL: dl, VT)};
2785 CurDAG->SelectNodeTo(N, MachineOpc: AArch64::SBFMXri, VT, Ops);
2786 return true;
2787}
2788
2789static bool isBitfieldExtractOp(SelectionDAG *CurDAG, SDNode *N, unsigned &Opc,
2790 SDValue &Opd0, unsigned &Immr, unsigned &Imms,
2791 unsigned NumberOfIgnoredLowBits = 0,
2792 bool BiggerPattern = false) {
2793 if (N->getValueType(ResNo: 0) != MVT::i32 && N->getValueType(ResNo: 0) != MVT::i64)
2794 return false;
2795
2796 switch (N->getOpcode()) {
2797 default:
2798 if (!N->isMachineOpcode())
2799 return false;
2800 break;
2801 case ISD::AND:
2802 return isBitfieldExtractOpFromAnd(CurDAG, N, Opc, Opd0, LSB&: Immr, MSB&: Imms,
2803 NumberOfIgnoredLowBits, BiggerPattern);
2804 case ISD::SRL:
2805 case ISD::SRA:
2806 return isBitfieldExtractOpFromShr(N, Opc, Opd0, Immr, Imms, BiggerPattern);
2807
2808 case ISD::SIGN_EXTEND_INREG:
2809 return isBitfieldExtractOpFromSExtInReg(N, Opc, Opd0, Immr, Imms);
2810 }
2811
2812 unsigned NOpc = N->getMachineOpcode();
2813 switch (NOpc) {
2814 default:
2815 return false;
2816 case AArch64::SBFMWri:
2817 case AArch64::UBFMWri:
2818 case AArch64::SBFMXri:
2819 case AArch64::UBFMXri:
2820 Opc = NOpc;
2821 Opd0 = N->getOperand(Num: 0);
2822 Immr = N->getConstantOperandVal(Num: 1);
2823 Imms = N->getConstantOperandVal(Num: 2);
2824 return true;
2825 }
2826 // Unreachable
2827 return false;
2828}
2829
2830bool AArch64DAGToDAGISel::tryBitfieldExtractOp(SDNode *N) {
2831 unsigned Opc, Immr, Imms;
2832 SDValue Opd0;
2833 if (!isBitfieldExtractOp(CurDAG, N, Opc, Opd0, Immr, Imms))
2834 return false;
2835
2836 EVT VT = N->getValueType(ResNo: 0);
2837 SDLoc dl(N);
2838
2839 // If the bit extract operation is 64bit but the original type is 32bit, we
2840 // need to add one EXTRACT_SUBREG.
2841 if ((Opc == AArch64::SBFMXri || Opc == AArch64::UBFMXri) && VT == MVT::i32) {
2842 SDValue Ops64[] = {Opd0, CurDAG->getTargetConstant(Val: Immr, DL: dl, VT: MVT::i64),
2843 CurDAG->getTargetConstant(Val: Imms, DL: dl, VT: MVT::i64)};
2844
2845 SDNode *BFM = CurDAG->getMachineNode(Opcode: Opc, dl, VT: MVT::i64, Ops: Ops64);
2846 SDValue Inner = CurDAG->getTargetExtractSubreg(SRIdx: AArch64::sub_32, DL: dl,
2847 VT: MVT::i32, Operand: SDValue(BFM, 0));
2848 ReplaceNode(F: N, T: Inner.getNode());
2849 return true;
2850 }
2851
2852 SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(Val: Immr, DL: dl, VT),
2853 CurDAG->getTargetConstant(Val: Imms, DL: dl, VT)};
2854 CurDAG->SelectNodeTo(N, MachineOpc: Opc, VT, Ops);
2855 return true;
2856}
2857
2858/// Does DstMask form a complementary pair with the mask provided by
2859/// BitsToBeInserted, suitable for use in a BFI instruction. Roughly speaking,
2860/// this asks whether DstMask zeroes precisely those bits that will be set by
2861/// the other half.
2862static bool isBitfieldDstMask(uint64_t DstMask, const APInt &BitsToBeInserted,
2863 unsigned NumberOfIgnoredHighBits, EVT VT) {
2864 assert((VT == MVT::i32 || VT == MVT::i64) &&
2865 "i32 or i64 mask type expected!");
2866 unsigned BitWidth = VT.getSizeInBits() - NumberOfIgnoredHighBits;
2867
2868 // Enable implicitTrunc as we're intentionally ignoring high bits.
2869 APInt SignificantDstMask =
2870 APInt(BitWidth, DstMask, /*isSigned=*/false, /*implicitTrunc=*/true);
2871 APInt SignificantBitsToBeInserted = BitsToBeInserted.zextOrTrunc(width: BitWidth);
2872
2873 return (SignificantDstMask & SignificantBitsToBeInserted) == 0 &&
2874 (SignificantDstMask | SignificantBitsToBeInserted).isAllOnes();
2875}
2876
2877// Look for bits that will be useful for later uses.
2878// A bit is consider useless as soon as it is dropped and never used
2879// before it as been dropped.
2880// E.g., looking for useful bit of x
2881// 1. y = x & 0x7
2882// 2. z = y >> 2
2883// After #1, x useful bits are 0x7, then the useful bits of x, live through
2884// y.
2885// After #2, the useful bits of x are 0x4.
2886// However, if x is used on an unpredictable instruction, then all its bits
2887// are useful.
2888// E.g.
2889// 1. y = x & 0x7
2890// 2. z = y >> 2
2891// 3. str x, [@x]
2892static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth = 0);
2893
2894static void getUsefulBitsFromAndWithImmediate(SDValue Op, APInt &UsefulBits,
2895 unsigned Depth) {
2896 uint64_t Imm =
2897 cast<const ConstantSDNode>(Val: Op.getOperand(i: 1).getNode())->getZExtValue();
2898 Imm = AArch64_AM::decodeLogicalImmediate(val: Imm, regSize: UsefulBits.getBitWidth());
2899 UsefulBits &= APInt(UsefulBits.getBitWidth(), Imm);
2900 getUsefulBits(Op, UsefulBits, Depth: Depth + 1);
2901}
2902
2903static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op, APInt &UsefulBits,
2904 uint64_t Imm, uint64_t MSB,
2905 unsigned Depth) {
2906 // inherit the bitwidth value
2907 APInt OpUsefulBits(UsefulBits);
2908 OpUsefulBits = 1;
2909
2910 if (MSB >= Imm) {
2911 OpUsefulBits <<= MSB - Imm + 1;
2912 --OpUsefulBits;
2913 // The interesting part will be in the lower part of the result
2914 getUsefulBits(Op, UsefulBits&: OpUsefulBits, Depth: Depth + 1);
2915 // The interesting part was starting at Imm in the argument
2916 OpUsefulBits <<= Imm;
2917 } else {
2918 OpUsefulBits <<= MSB + 1;
2919 --OpUsefulBits;
2920 // The interesting part will be shifted in the result
2921 OpUsefulBits <<= OpUsefulBits.getBitWidth() - Imm;
2922 getUsefulBits(Op, UsefulBits&: OpUsefulBits, Depth: Depth + 1);
2923 // The interesting part was at zero in the argument
2924 OpUsefulBits.lshrInPlace(ShiftAmt: OpUsefulBits.getBitWidth() - Imm);
2925 }
2926
2927 UsefulBits &= OpUsefulBits;
2928}
2929
2930static void getUsefulBitsFromUBFM(SDValue Op, APInt &UsefulBits,
2931 unsigned Depth) {
2932 uint64_t Imm =
2933 cast<const ConstantSDNode>(Val: Op.getOperand(i: 1).getNode())->getZExtValue();
2934 uint64_t MSB =
2935 cast<const ConstantSDNode>(Val: Op.getOperand(i: 2).getNode())->getZExtValue();
2936
2937 getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
2938}
2939
2940static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits,
2941 unsigned Depth) {
2942 uint64_t ShiftTypeAndValue =
2943 cast<const ConstantSDNode>(Val: Op.getOperand(i: 2).getNode())->getZExtValue();
2944 APInt Mask(UsefulBits);
2945 Mask.clearAllBits();
2946 Mask.flipAllBits();
2947
2948 if (AArch64_AM::getShiftType(Imm: ShiftTypeAndValue) == AArch64_AM::LSL) {
2949 // Shift Left
2950 uint64_t ShiftAmt = AArch64_AM::getShiftValue(Imm: ShiftTypeAndValue);
2951 Mask <<= ShiftAmt;
2952 getUsefulBits(Op, UsefulBits&: Mask, Depth: Depth + 1);
2953 Mask.lshrInPlace(ShiftAmt);
2954 } else if (AArch64_AM::getShiftType(Imm: ShiftTypeAndValue) == AArch64_AM::LSR) {
2955 // Shift Right
2956 // We do not handle AArch64_AM::ASR, because the sign will change the
2957 // number of useful bits
2958 uint64_t ShiftAmt = AArch64_AM::getShiftValue(Imm: ShiftTypeAndValue);
2959 Mask.lshrInPlace(ShiftAmt);
2960 getUsefulBits(Op, UsefulBits&: Mask, Depth: Depth + 1);
2961 Mask <<= ShiftAmt;
2962 } else
2963 return;
2964
2965 UsefulBits &= Mask;
2966}
2967
2968static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits,
2969 unsigned Depth) {
2970 uint64_t Imm =
2971 cast<const ConstantSDNode>(Val: Op.getOperand(i: 2).getNode())->getZExtValue();
2972 uint64_t MSB =
2973 cast<const ConstantSDNode>(Val: Op.getOperand(i: 3).getNode())->getZExtValue();
2974
2975 APInt OpUsefulBits(UsefulBits);
2976 OpUsefulBits = 1;
2977
2978 APInt ResultUsefulBits(UsefulBits.getBitWidth(), 0);
2979 ResultUsefulBits.flipAllBits();
2980 APInt Mask(UsefulBits.getBitWidth(), 0);
2981
2982 getUsefulBits(Op, UsefulBits&: ResultUsefulBits, Depth: Depth + 1);
2983
2984 if (MSB >= Imm) {
2985 // The instruction is a BFXIL.
2986 uint64_t Width = MSB - Imm + 1;
2987 uint64_t LSB = Imm;
2988
2989 OpUsefulBits <<= Width;
2990 --OpUsefulBits;
2991
2992 if (Op.getOperand(i: 1) == Orig) {
2993 // Copy the low bits from the result to bits starting from LSB.
2994 Mask = ResultUsefulBits & OpUsefulBits;
2995 Mask <<= LSB;
2996 }
2997
2998 if (Op.getOperand(i: 0) == Orig)
2999 // Bits starting from LSB in the input contribute to the result.
3000 Mask |= (ResultUsefulBits & ~OpUsefulBits);
3001 } else {
3002 // The instruction is a BFI.
3003 uint64_t Width = MSB + 1;
3004 uint64_t LSB = UsefulBits.getBitWidth() - Imm;
3005
3006 OpUsefulBits <<= Width;
3007 --OpUsefulBits;
3008 OpUsefulBits <<= LSB;
3009
3010 if (Op.getOperand(i: 1) == Orig) {
3011 // Copy the bits from the result to the zero bits.
3012 Mask = ResultUsefulBits & OpUsefulBits;
3013 Mask.lshrInPlace(ShiftAmt: LSB);
3014 }
3015
3016 if (Op.getOperand(i: 0) == Orig)
3017 Mask |= (ResultUsefulBits & ~OpUsefulBits);
3018 }
3019
3020 UsefulBits &= Mask;
3021}
3022
3023static void getUsefulBitsForUse(SDNode *UserNode, APInt &UsefulBits,
3024 SDValue Orig, unsigned Depth) {
3025
3026 // Users of this node should have already been instruction selected
3027 // FIXME: Can we turn that into an assert?
3028 if (!UserNode->isMachineOpcode())
3029 return;
3030
3031 switch (UserNode->getMachineOpcode()) {
3032 default:
3033 return;
3034 case AArch64::ANDSWri:
3035 case AArch64::ANDSXri:
3036 case AArch64::ANDWri:
3037 case AArch64::ANDXri:
3038 // We increment Depth only when we call the getUsefulBits
3039 return getUsefulBitsFromAndWithImmediate(Op: SDValue(UserNode, 0), UsefulBits,
3040 Depth);
3041 case AArch64::UBFMWri:
3042 case AArch64::UBFMXri:
3043 return getUsefulBitsFromUBFM(Op: SDValue(UserNode, 0), UsefulBits, Depth);
3044
3045 case AArch64::ORRWrs:
3046 case AArch64::ORRXrs:
3047 if (UserNode->getOperand(Num: 0) != Orig && UserNode->getOperand(Num: 1) == Orig)
3048 getUsefulBitsFromOrWithShiftedReg(Op: SDValue(UserNode, 0), UsefulBits,
3049 Depth);
3050 return;
3051 case AArch64::BFMWri:
3052 case AArch64::BFMXri:
3053 return getUsefulBitsFromBFM(Op: SDValue(UserNode, 0), Orig, UsefulBits, Depth);
3054
3055 case AArch64::STRBBui:
3056 case AArch64::STURBBi:
3057 if (UserNode->getOperand(Num: 0) != Orig)
3058 return;
3059 UsefulBits &= APInt(UsefulBits.getBitWidth(), 0xff);
3060 return;
3061
3062 case AArch64::STRHHui:
3063 case AArch64::STURHHi:
3064 if (UserNode->getOperand(Num: 0) != Orig)
3065 return;
3066 UsefulBits &= APInt(UsefulBits.getBitWidth(), 0xffff);
3067 return;
3068 }
3069}
3070
3071static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth) {
3072 if (Depth >= SelectionDAG::MaxRecursionDepth)
3073 return;
3074 // Initialize UsefulBits
3075 if (!Depth) {
3076 unsigned Bitwidth = Op.getScalarValueSizeInBits();
3077 // At the beginning, assume every produced bits is useful
3078 UsefulBits = APInt(Bitwidth, 0);
3079 UsefulBits.flipAllBits();
3080 }
3081 APInt UsersUsefulBits(UsefulBits.getBitWidth(), 0);
3082
3083 for (SDNode *Node : Op.getNode()->users()) {
3084 // A use cannot produce useful bits
3085 APInt UsefulBitsForUse = APInt(UsefulBits);
3086 getUsefulBitsForUse(UserNode: Node, UsefulBits&: UsefulBitsForUse, Orig: Op, Depth);
3087 UsersUsefulBits |= UsefulBitsForUse;
3088 }
3089 // UsefulBits contains the produced bits that are meaningful for the
3090 // current definition, thus a user cannot make a bit meaningful at
3091 // this point
3092 UsefulBits &= UsersUsefulBits;
3093}
3094
3095/// Create a machine node performing a notional SHL of Op by ShlAmount. If
3096/// ShlAmount is negative, do a (logical) right-shift instead. If ShlAmount is
3097/// 0, return Op unchanged.
3098static SDValue getLeftShift(SelectionDAG *CurDAG, SDValue Op, int ShlAmount) {
3099 if (ShlAmount == 0)
3100 return Op;
3101
3102 EVT VT = Op.getValueType();
3103 SDLoc dl(Op);
3104 unsigned BitWidth = VT.getSizeInBits();
3105 unsigned UBFMOpc = BitWidth == 32 ? AArch64::UBFMWri : AArch64::UBFMXri;
3106
3107 SDNode *ShiftNode;
3108 if (ShlAmount > 0) {
3109 // LSL wD, wN, #Amt == UBFM wD, wN, #32-Amt, #31-Amt
3110 ShiftNode = CurDAG->getMachineNode(
3111 Opcode: UBFMOpc, dl, VT, Op1: Op,
3112 Op2: CurDAG->getTargetConstant(Val: BitWidth - ShlAmount, DL: dl, VT),
3113 Op3: CurDAG->getTargetConstant(Val: BitWidth - 1 - ShlAmount, DL: dl, VT));
3114 } else {
3115 // LSR wD, wN, #Amt == UBFM wD, wN, #Amt, #32-1
3116 assert(ShlAmount < 0 && "expected right shift");
3117 int ShrAmount = -ShlAmount;
3118 ShiftNode = CurDAG->getMachineNode(
3119 Opcode: UBFMOpc, dl, VT, Op1: Op, Op2: CurDAG->getTargetConstant(Val: ShrAmount, DL: dl, VT),
3120 Op3: CurDAG->getTargetConstant(Val: BitWidth - 1, DL: dl, VT));
3121 }
3122
3123 return SDValue(ShiftNode, 0);
3124}
3125
3126// For bit-field-positioning pattern "(and (shl VAL, N), ShiftedMask)".
3127static bool isBitfieldPositioningOpFromAnd(SelectionDAG *CurDAG, SDValue Op,
3128 bool BiggerPattern,
3129 const uint64_t NonZeroBits,
3130 SDValue &Src, int &DstLSB,
3131 int &Width);
3132
3133// For bit-field-positioning pattern "shl VAL, N)".
3134static bool isBitfieldPositioningOpFromShl(SelectionDAG *CurDAG, SDValue Op,
3135 bool BiggerPattern,
3136 const uint64_t NonZeroBits,
3137 SDValue &Src, int &DstLSB,
3138 int &Width);
3139
3140/// Does this tree qualify as an attempt to move a bitfield into position,
3141/// essentially "(and (shl VAL, N), Mask)" or (shl VAL, N).
3142static bool isBitfieldPositioningOp(SelectionDAG *CurDAG, SDValue Op,
3143 bool BiggerPattern, SDValue &Src,
3144 int &DstLSB, int &Width) {
3145 EVT VT = Op.getValueType();
3146 unsigned BitWidth = VT.getSizeInBits();
3147 (void)BitWidth;
3148 assert(BitWidth == 32 || BitWidth == 64);
3149
3150 KnownBits Known = CurDAG->computeKnownBits(Op);
3151
3152 // Non-zero in the sense that they're not provably zero, which is the key
3153 // point if we want to use this value
3154 const uint64_t NonZeroBits = (~Known.Zero).getZExtValue();
3155 if (!isShiftedMask_64(Value: NonZeroBits))
3156 return false;
3157
3158 switch (Op.getOpcode()) {
3159 default:
3160 break;
3161 case ISD::AND:
3162 return isBitfieldPositioningOpFromAnd(CurDAG, Op, BiggerPattern,
3163 NonZeroBits, Src, DstLSB, Width);
3164 case ISD::SHL:
3165 return isBitfieldPositioningOpFromShl(CurDAG, Op, BiggerPattern,
3166 NonZeroBits, Src, DstLSB, Width);
3167 }
3168
3169 return false;
3170}
3171
3172static bool isBitfieldPositioningOpFromAnd(SelectionDAG *CurDAG, SDValue Op,
3173 bool BiggerPattern,
3174 const uint64_t NonZeroBits,
3175 SDValue &Src, int &DstLSB,
3176 int &Width) {
3177 assert(isShiftedMask_64(NonZeroBits) && "Caller guaranteed");
3178
3179 EVT VT = Op.getValueType();
3180 assert((VT == MVT::i32 || VT == MVT::i64) &&
3181 "Caller guarantees VT is one of i32 or i64");
3182 (void)VT;
3183
3184 uint64_t AndImm;
3185 if (!isOpcWithIntImmediate(N: Op.getNode(), Opc: ISD::AND, Imm&: AndImm))
3186 return false;
3187
3188 // If (~AndImm & NonZeroBits) is not zero at POS, we know that
3189 // 1) (AndImm & (1 << POS) == 0)
3190 // 2) the result of AND is not zero at POS bit (according to NonZeroBits)
3191 //
3192 // 1) and 2) don't agree so something must be wrong (e.g., in
3193 // 'SelectionDAG::computeKnownBits')
3194 assert((~AndImm & NonZeroBits) == 0 &&
3195 "Something must be wrong (e.g., in SelectionDAG::computeKnownBits)");
3196
3197 SDValue AndOp0 = Op.getOperand(i: 0);
3198
3199 uint64_t ShlImm;
3200 SDValue ShlOp0;
3201 if (isOpcWithIntImmediate(N: AndOp0.getNode(), Opc: ISD::SHL, Imm&: ShlImm)) {
3202 // For pattern "and(shl(val, N), shifted-mask)", 'ShlOp0' is set to 'val'.
3203 ShlOp0 = AndOp0.getOperand(i: 0);
3204 } else if (VT == MVT::i64 && AndOp0.getOpcode() == ISD::ANY_EXTEND &&
3205 isOpcWithIntImmediate(N: AndOp0.getOperand(i: 0).getNode(), Opc: ISD::SHL,
3206 Imm&: ShlImm)) {
3207 // For pattern "and(any_extend(shl(val, N)), shifted-mask)"
3208
3209 // ShlVal == shl(val, N), which is a left shift on a smaller type.
3210 SDValue ShlVal = AndOp0.getOperand(i: 0);
3211
3212 // Since this is after type legalization and ShlVal is extended to MVT::i64,
3213 // expect VT to be MVT::i32.
3214 assert((ShlVal.getValueType() == MVT::i32) && "Expect VT to be MVT::i32.");
3215
3216 // Widens 'val' to MVT::i64 as the source of bit field positioning.
3217 ShlOp0 = Widen(CurDAG, N: ShlVal.getOperand(i: 0));
3218 } else
3219 return false;
3220
3221 // For !BiggerPattern, bail out if the AndOp0 has more than one use, since
3222 // then we'll end up generating AndOp0+UBFIZ instead of just keeping
3223 // AndOp0+AND.
3224 if (!BiggerPattern && !AndOp0.hasOneUse())
3225 return false;
3226
3227 DstLSB = llvm::countr_zero(Val: NonZeroBits);
3228 Width = llvm::countr_one(Value: NonZeroBits >> DstLSB);
3229
3230 // Bail out on large Width. This happens when no proper combining / constant
3231 // folding was performed.
3232 if (Width >= (int)VT.getSizeInBits()) {
3233 // If VT is i64, Width > 64 is insensible since NonZeroBits is uint64_t, and
3234 // Width == 64 indicates a missed dag-combine from "(and val, AllOnes)" to
3235 // "val".
3236 // If VT is i32, what Width >= 32 means:
3237 // - For "(and (any_extend(shl val, N)), shifted-mask)", the`and` Op
3238 // demands at least 'Width' bits (after dag-combiner). This together with
3239 // `any_extend` Op (undefined higher bits) indicates missed combination
3240 // when lowering the 'and' IR instruction to an machine IR instruction.
3241 LLVM_DEBUG(
3242 dbgs()
3243 << "Found large Width in bit-field-positioning -- this indicates no "
3244 "proper combining / constant folding was performed\n");
3245 return false;
3246 }
3247
3248 // BFI encompasses sufficiently many nodes that it's worth inserting an extra
3249 // LSL/LSR if the mask in NonZeroBits doesn't quite match up with the ISD::SHL
3250 // amount. BiggerPattern is true when this pattern is being matched for BFI,
3251 // BiggerPattern is false when this pattern is being matched for UBFIZ, in
3252 // which case it is not profitable to insert an extra shift.
3253 if (ShlImm != uint64_t(DstLSB) && !BiggerPattern)
3254 return false;
3255
3256 Src = getLeftShift(CurDAG, Op: ShlOp0, ShlAmount: ShlImm - DstLSB);
3257 return true;
3258}
3259
3260// For node (shl (and val, mask), N)), returns true if the node is equivalent to
3261// UBFIZ.
3262static bool isSeveralBitsPositioningOpFromShl(const uint64_t ShlImm, SDValue Op,
3263 SDValue &Src, int &DstLSB,
3264 int &Width) {
3265 // Caller should have verified that N is a left shift with constant shift
3266 // amount; asserts that.
3267 assert(Op.getOpcode() == ISD::SHL &&
3268 "Op.getNode() should be a SHL node to call this function");
3269 assert(isIntImmediateEq(Op.getOperand(1), ShlImm) &&
3270 "Op.getNode() should shift ShlImm to call this function");
3271
3272 uint64_t AndImm = 0;
3273 SDValue Op0 = Op.getOperand(i: 0);
3274 if (!isOpcWithIntImmediate(N: Op0.getNode(), Opc: ISD::AND, Imm&: AndImm))
3275 return false;
3276
3277 const uint64_t ShiftedAndImm = ((AndImm << ShlImm) >> ShlImm);
3278 if (isMask_64(Value: ShiftedAndImm)) {
3279 // AndImm is a superset of (AllOnes >> ShlImm); in other words, AndImm
3280 // should end with Mask, and could be prefixed with random bits if those
3281 // bits are shifted out.
3282 //
3283 // For example, xyz11111 (with {x,y,z} being 0 or 1) is fine if ShlImm >= 3;
3284 // the AND result corresponding to those bits are shifted out, so it's fine
3285 // to not extract them.
3286 Width = llvm::countr_one(Value: ShiftedAndImm);
3287 DstLSB = ShlImm;
3288 Src = Op0.getOperand(i: 0);
3289 return true;
3290 }
3291 return false;
3292}
3293
3294static bool isBitfieldPositioningOpFromShl(SelectionDAG *CurDAG, SDValue Op,
3295 bool BiggerPattern,
3296 const uint64_t NonZeroBits,
3297 SDValue &Src, int &DstLSB,
3298 int &Width) {
3299 assert(isShiftedMask_64(NonZeroBits) && "Caller guaranteed");
3300
3301 EVT VT = Op.getValueType();
3302 assert((VT == MVT::i32 || VT == MVT::i64) &&
3303 "Caller guarantees that type is i32 or i64");
3304 (void)VT;
3305
3306 uint64_t ShlImm;
3307 if (!isOpcWithIntImmediate(N: Op.getNode(), Opc: ISD::SHL, Imm&: ShlImm))
3308 return false;
3309
3310 if (!BiggerPattern && !Op.hasOneUse())
3311 return false;
3312
3313 if (isSeveralBitsPositioningOpFromShl(ShlImm, Op, Src, DstLSB, Width))
3314 return true;
3315
3316 DstLSB = llvm::countr_zero(Val: NonZeroBits);
3317 Width = llvm::countr_one(Value: NonZeroBits >> DstLSB);
3318
3319 if (ShlImm != uint64_t(DstLSB) && !BiggerPattern)
3320 return false;
3321
3322 Src = getLeftShift(CurDAG, Op: Op.getOperand(i: 0), ShlAmount: ShlImm - DstLSB);
3323 return true;
3324}
3325
3326static bool isShiftedMask(uint64_t Mask, EVT VT) {
3327 assert(VT == MVT::i32 || VT == MVT::i64);
3328 if (VT == MVT::i32)
3329 return isShiftedMask_32(Value: Mask);
3330 return isShiftedMask_64(Value: Mask);
3331}
3332
3333// Generate a BFI/BFXIL from 'or (and X, MaskImm), OrImm' iff the value being
3334// inserted only sets known zero bits.
3335static bool tryBitfieldInsertOpFromOrAndImm(SDNode *N, SelectionDAG *CurDAG) {
3336 assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
3337
3338 EVT VT = N->getValueType(ResNo: 0);
3339 if (VT != MVT::i32 && VT != MVT::i64)
3340 return false;
3341
3342 unsigned BitWidth = VT.getSizeInBits();
3343
3344 uint64_t OrImm;
3345 if (!isOpcWithIntImmediate(N, Opc: ISD::OR, Imm&: OrImm))
3346 return false;
3347
3348 // Skip this transformation if the ORR immediate can be encoded in the ORR.
3349 // Otherwise, we'll trade an AND+ORR for ORR+BFI/BFXIL, which is most likely
3350 // performance neutral.
3351 if (AArch64_AM::isLogicalImmediate(imm: OrImm, regSize: BitWidth))
3352 return false;
3353
3354 uint64_t MaskImm;
3355 SDValue And = N->getOperand(Num: 0);
3356 // Must be a single use AND with an immediate operand.
3357 if (!And.hasOneUse() ||
3358 !isOpcWithIntImmediate(N: And.getNode(), Opc: ISD::AND, Imm&: MaskImm))
3359 return false;
3360
3361 // Compute the Known Zero for the AND as this allows us to catch more general
3362 // cases than just looking for AND with imm.
3363 KnownBits Known = CurDAG->computeKnownBits(Op: And);
3364
3365 // Non-zero in the sense that they're not provably zero, which is the key
3366 // point if we want to use this value.
3367 uint64_t NotKnownZero = (~Known.Zero).getZExtValue();
3368
3369 // The KnownZero mask must be a shifted mask (e.g., 1110..011, 11100..00).
3370 if (!isShiftedMask(Mask: Known.Zero.getZExtValue(), VT))
3371 return false;
3372
3373 // The bits being inserted must only set those bits that are known to be zero.
3374 if ((OrImm & NotKnownZero) != 0) {
3375 // FIXME: It's okay if the OrImm sets NotKnownZero bits to 1, but we don't
3376 // currently handle this case.
3377 return false;
3378 }
3379
3380 // BFI/BFXIL dst, src, #lsb, #width.
3381 int LSB = llvm::countr_one(Value: NotKnownZero);
3382 int Width = BitWidth - APInt(BitWidth, NotKnownZero).popcount();
3383
3384 // BFI/BFXIL is an alias of BFM, so translate to BFM operands.
3385 unsigned ImmR = (BitWidth - LSB) % BitWidth;
3386 unsigned ImmS = Width - 1;
3387
3388 // If we're creating a BFI instruction avoid cases where we need more
3389 // instructions to materialize the BFI constant as compared to the original
3390 // ORR. A BFXIL will use the same constant as the original ORR, so the code
3391 // should be no worse in this case.
3392 bool IsBFI = LSB != 0;
3393 uint64_t BFIImm = OrImm >> LSB;
3394 if (IsBFI && !AArch64_AM::isLogicalImmediate(imm: BFIImm, regSize: BitWidth)) {
3395 // We have a BFI instruction and we know the constant can't be materialized
3396 // with a ORR-immediate with the zero register.
3397 unsigned OrChunks = 0, BFIChunks = 0;
3398 for (unsigned Shift = 0; Shift < BitWidth; Shift += 16) {
3399 if (((OrImm >> Shift) & 0xFFFF) != 0)
3400 ++OrChunks;
3401 if (((BFIImm >> Shift) & 0xFFFF) != 0)
3402 ++BFIChunks;
3403 }
3404 if (BFIChunks > OrChunks)
3405 return false;
3406 }
3407
3408 // Materialize the constant to be inserted.
3409 SDLoc DL(N);
3410 unsigned MOVIOpc = VT == MVT::i32 ? AArch64::MOVi32imm : AArch64::MOVi64imm;
3411 SDNode *MOVI = CurDAG->getMachineNode(
3412 Opcode: MOVIOpc, dl: DL, VT, Op1: CurDAG->getTargetConstant(Val: BFIImm, DL, VT));
3413
3414 // Create the BFI/BFXIL instruction.
3415 SDValue Ops[] = {And.getOperand(i: 0), SDValue(MOVI, 0),
3416 CurDAG->getTargetConstant(Val: ImmR, DL, VT),
3417 CurDAG->getTargetConstant(Val: ImmS, DL, VT)};
3418 unsigned Opc = (VT == MVT::i32) ? AArch64::BFMWri : AArch64::BFMXri;
3419 CurDAG->SelectNodeTo(N, MachineOpc: Opc, VT, Ops);
3420 return true;
3421}
3422
3423static bool isWorthFoldingIntoOrrWithShift(SDValue Dst, SelectionDAG *CurDAG,
3424 SDValue &ShiftedOperand,
3425 uint64_t &EncodedShiftImm) {
3426 // Avoid folding Dst into ORR-with-shift if Dst has other uses than ORR.
3427 if (!Dst.hasOneUse())
3428 return false;
3429
3430 EVT VT = Dst.getValueType();
3431 assert((VT == MVT::i32 || VT == MVT::i64) &&
3432 "Caller should guarantee that VT is one of i32 or i64");
3433 const unsigned SizeInBits = VT.getSizeInBits();
3434
3435 SDLoc DL(Dst.getNode());
3436 uint64_t AndImm, ShlImm;
3437 if (isOpcWithIntImmediate(N: Dst.getNode(), Opc: ISD::AND, Imm&: AndImm) &&
3438 isShiftedMask_64(Value: AndImm)) {
3439 // Avoid transforming 'DstOp0' if it has other uses than the AND node.
3440 SDValue DstOp0 = Dst.getOperand(i: 0);
3441 if (!DstOp0.hasOneUse())
3442 return false;
3443
3444 // An example to illustrate the transformation
3445 // From:
3446 // lsr x8, x1, #1
3447 // and x8, x8, #0x3f80
3448 // bfxil x8, x1, #0, #7
3449 // To:
3450 // and x8, x23, #0x7f
3451 // ubfx x9, x23, #8, #7
3452 // orr x23, x8, x9, lsl #7
3453 //
3454 // The number of instructions remains the same, but ORR is faster than BFXIL
3455 // on many AArch64 processors (or as good as BFXIL if not faster). Besides,
3456 // the dependency chain is improved after the transformation.
3457 uint64_t SrlImm;
3458 if (isOpcWithIntImmediate(N: DstOp0.getNode(), Opc: ISD::SRL, Imm&: SrlImm)) {
3459 uint64_t NumTrailingZeroInShiftedMask = llvm::countr_zero(Val: AndImm);
3460 if ((SrlImm + NumTrailingZeroInShiftedMask) < SizeInBits) {
3461 unsigned MaskWidth =
3462 llvm::countr_one(Value: AndImm >> NumTrailingZeroInShiftedMask);
3463 unsigned UBFMOpc =
3464 (VT == MVT::i32) ? AArch64::UBFMWri : AArch64::UBFMXri;
3465 SDNode *UBFMNode = CurDAG->getMachineNode(
3466 Opcode: UBFMOpc, dl: DL, VT, Op1: DstOp0.getOperand(i: 0),
3467 Op2: CurDAG->getTargetConstant(Val: SrlImm + NumTrailingZeroInShiftedMask, DL,
3468 VT),
3469 Op3: CurDAG->getTargetConstant(
3470 Val: SrlImm + NumTrailingZeroInShiftedMask + MaskWidth - 1, DL, VT));
3471 ShiftedOperand = SDValue(UBFMNode, 0);
3472 EncodedShiftImm = AArch64_AM::getShifterImm(
3473 ST: AArch64_AM::LSL, Imm: NumTrailingZeroInShiftedMask);
3474 return true;
3475 }
3476 }
3477 return false;
3478 }
3479
3480 if (isOpcWithIntImmediate(N: Dst.getNode(), Opc: ISD::SHL, Imm&: ShlImm)) {
3481 ShiftedOperand = Dst.getOperand(i: 0);
3482 EncodedShiftImm = AArch64_AM::getShifterImm(ST: AArch64_AM::LSL, Imm: ShlImm);
3483 return true;
3484 }
3485
3486 uint64_t SrlImm;
3487 if (isOpcWithIntImmediate(N: Dst.getNode(), Opc: ISD::SRL, Imm&: SrlImm)) {
3488 ShiftedOperand = Dst.getOperand(i: 0);
3489 EncodedShiftImm = AArch64_AM::getShifterImm(ST: AArch64_AM::LSR, Imm: SrlImm);
3490 return true;
3491 }
3492 return false;
3493}
3494
3495// Given an 'ISD::OR' node that is going to be selected as BFM, analyze
3496// the operands and select it to AArch64::ORR with shifted registers if
3497// that's more efficient. Returns true iff selection to AArch64::ORR happens.
3498static bool tryOrrWithShift(SDNode *N, SDValue OrOpd0, SDValue OrOpd1,
3499 SDValue Src, SDValue Dst, SelectionDAG *CurDAG,
3500 const bool BiggerPattern) {
3501 EVT VT = N->getValueType(ResNo: 0);
3502 assert(N->getOpcode() == ISD::OR && "Expect N to be an OR node");
3503 assert(((N->getOperand(0) == OrOpd0 && N->getOperand(1) == OrOpd1) ||
3504 (N->getOperand(1) == OrOpd0 && N->getOperand(0) == OrOpd1)) &&
3505 "Expect OrOpd0 and OrOpd1 to be operands of ISD::OR");
3506 assert((VT == MVT::i32 || VT == MVT::i64) &&
3507 "Expect result type to be i32 or i64 since N is combinable to BFM");
3508 SDLoc DL(N);
3509
3510 // Bail out if BFM simplifies away one node in BFM Dst.
3511 if (OrOpd1 != Dst)
3512 return false;
3513
3514 const unsigned OrrOpc = (VT == MVT::i32) ? AArch64::ORRWrs : AArch64::ORRXrs;
3515 // For "BFM Rd, Rn, #immr, #imms", it's known that BFM simplifies away fewer
3516 // nodes from Rn (or inserts additional shift node) if BiggerPattern is true.
3517 if (BiggerPattern) {
3518 uint64_t SrcAndImm;
3519 if (isOpcWithIntImmediate(N: OrOpd0.getNode(), Opc: ISD::AND, Imm&: SrcAndImm) &&
3520 isMask_64(Value: SrcAndImm) && OrOpd0.getOperand(i: 0) == Src) {
3521 // OrOpd0 = AND Src, #Mask
3522 // So BFM simplifies away one AND node from Src and doesn't simplify away
3523 // nodes from Dst. If ORR with left-shifted operand also simplifies away
3524 // one node (from Rd), ORR is better since it has higher throughput and
3525 // smaller latency than BFM on many AArch64 processors (and for the rest
3526 // ORR is at least as good as BFM).
3527 SDValue ShiftedOperand;
3528 uint64_t EncodedShiftImm;
3529 if (isWorthFoldingIntoOrrWithShift(Dst, CurDAG, ShiftedOperand,
3530 EncodedShiftImm)) {
3531 SDValue Ops[] = {OrOpd0, ShiftedOperand,
3532 CurDAG->getTargetConstant(Val: EncodedShiftImm, DL, VT)};
3533 CurDAG->SelectNodeTo(N, MachineOpc: OrrOpc, VT, Ops);
3534 return true;
3535 }
3536 }
3537 return false;
3538 }
3539
3540 assert((!BiggerPattern) && "BiggerPattern should be handled above");
3541
3542 uint64_t ShlImm;
3543 if (isOpcWithIntImmediate(N: OrOpd0.getNode(), Opc: ISD::SHL, Imm&: ShlImm)) {
3544 if (OrOpd0.getOperand(i: 0) == Src && OrOpd0.hasOneUse()) {
3545 SDValue Ops[] = {
3546 Dst, Src,
3547 CurDAG->getTargetConstant(
3548 Val: AArch64_AM::getShifterImm(ST: AArch64_AM::LSL, Imm: ShlImm), DL, VT)};
3549 CurDAG->SelectNodeTo(N, MachineOpc: OrrOpc, VT, Ops);
3550 return true;
3551 }
3552
3553 // Select the following pattern to left-shifted operand rather than BFI.
3554 // %val1 = op ..
3555 // %val2 = shl %val1, #imm
3556 // %res = or %val1, %val2
3557 //
3558 // If N is selected to be BFI, we know that
3559 // 1) OrOpd0 would be the operand from which extract bits (i.e., folded into
3560 // BFI) 2) OrOpd1 would be the destination operand (i.e., preserved)
3561 //
3562 // Instead of selecting N to BFI, fold OrOpd0 as a left shift directly.
3563 if (OrOpd0.getOperand(i: 0) == OrOpd1) {
3564 SDValue Ops[] = {
3565 OrOpd1, OrOpd1,
3566 CurDAG->getTargetConstant(
3567 Val: AArch64_AM::getShifterImm(ST: AArch64_AM::LSL, Imm: ShlImm), DL, VT)};
3568 CurDAG->SelectNodeTo(N, MachineOpc: OrrOpc, VT, Ops);
3569 return true;
3570 }
3571 }
3572
3573 uint64_t SrlImm;
3574 if (isOpcWithIntImmediate(N: OrOpd0.getNode(), Opc: ISD::SRL, Imm&: SrlImm)) {
3575 // Select the following pattern to right-shifted operand rather than BFXIL.
3576 // %val1 = op ..
3577 // %val2 = lshr %val1, #imm
3578 // %res = or %val1, %val2
3579 //
3580 // If N is selected to be BFXIL, we know that
3581 // 1) OrOpd0 would be the operand from which extract bits (i.e., folded into
3582 // BFXIL) 2) OrOpd1 would be the destination operand (i.e., preserved)
3583 //
3584 // Instead of selecting N to BFXIL, fold OrOpd0 as a right shift directly.
3585 if (OrOpd0.getOperand(i: 0) == OrOpd1) {
3586 SDValue Ops[] = {
3587 OrOpd1, OrOpd1,
3588 CurDAG->getTargetConstant(
3589 Val: AArch64_AM::getShifterImm(ST: AArch64_AM::LSR, Imm: SrlImm), DL, VT)};
3590 CurDAG->SelectNodeTo(N, MachineOpc: OrrOpc, VT, Ops);
3591 return true;
3592 }
3593 }
3594
3595 return false;
3596}
3597
3598static bool tryBitfieldInsertOpFromOr(SDNode *N, const APInt &UsefulBits,
3599 SelectionDAG *CurDAG) {
3600 assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
3601
3602 EVT VT = N->getValueType(ResNo: 0);
3603 if (VT != MVT::i32 && VT != MVT::i64)
3604 return false;
3605
3606 unsigned BitWidth = VT.getSizeInBits();
3607
3608 // Because of simplify-demanded-bits in DAGCombine, involved masks may not
3609 // have the expected shape. Try to undo that.
3610
3611 unsigned NumberOfIgnoredLowBits = UsefulBits.countr_zero();
3612 unsigned NumberOfIgnoredHighBits = UsefulBits.countl_zero();
3613
3614 // Given a OR operation, check if we have the following pattern
3615 // ubfm c, b, imm, imm2 (or something that does the same jobs, see
3616 // isBitfieldExtractOp)
3617 // d = e & mask2 ; where mask is a binary sequence of 1..10..0 and
3618 // countTrailingZeros(mask2) == imm2 - imm + 1
3619 // f = d | c
3620 // if yes, replace the OR instruction with:
3621 // f = BFM Opd0, Opd1, LSB, MSB ; where LSB = imm, and MSB = imm2
3622
3623 // OR is commutative, check all combinations of operand order and values of
3624 // BiggerPattern, i.e.
3625 // Opd0, Opd1, BiggerPattern=false
3626 // Opd1, Opd0, BiggerPattern=false
3627 // Opd0, Opd1, BiggerPattern=true
3628 // Opd1, Opd0, BiggerPattern=true
3629 // Several of these combinations may match, so check with BiggerPattern=false
3630 // first since that will produce better results by matching more instructions
3631 // and/or inserting fewer extra instructions.
3632 for (int I = 0; I < 4; ++I) {
3633
3634 SDValue Dst, Src;
3635 unsigned ImmR, ImmS;
3636 bool BiggerPattern = I / 2;
3637 SDValue OrOpd0Val = N->getOperand(Num: I % 2);
3638 SDNode *OrOpd0 = OrOpd0Val.getNode();
3639 SDValue OrOpd1Val = N->getOperand(Num: (I + 1) % 2);
3640 SDNode *OrOpd1 = OrOpd1Val.getNode();
3641
3642 unsigned BFXOpc;
3643 int DstLSB, Width;
3644 if (isBitfieldExtractOp(CurDAG, N: OrOpd0, Opc&: BFXOpc, Opd0&: Src, Immr&: ImmR, Imms&: ImmS,
3645 NumberOfIgnoredLowBits, BiggerPattern)) {
3646 // Check that the returned opcode is compatible with the pattern,
3647 // i.e., same type and zero extended (U and not S)
3648 if ((BFXOpc != AArch64::UBFMXri && VT == MVT::i64) ||
3649 (BFXOpc != AArch64::UBFMWri && VT == MVT::i32))
3650 continue;
3651
3652 // Compute the width of the bitfield insertion
3653 DstLSB = 0;
3654 Width = ImmS - ImmR + 1;
3655 // FIXME: This constraint is to catch bitfield insertion we may
3656 // want to widen the pattern if we want to grab general bitfield
3657 // move case
3658 if (Width <= 0)
3659 continue;
3660
3661 // If the mask on the insertee is correct, we have a BFXIL operation. We
3662 // can share the ImmR and ImmS values from the already-computed UBFM.
3663 } else if (isBitfieldPositioningOp(CurDAG, Op: OrOpd0Val,
3664 BiggerPattern,
3665 Src, DstLSB, Width)) {
3666 ImmR = (BitWidth - DstLSB) % BitWidth;
3667 ImmS = Width - 1;
3668 } else
3669 continue;
3670
3671 // Check the second part of the pattern
3672 EVT VT = OrOpd1Val.getValueType();
3673 assert((VT == MVT::i32 || VT == MVT::i64) && "unexpected OR operand");
3674
3675 // Compute the Known Zero for the candidate of the first operand.
3676 // This allows to catch more general case than just looking for
3677 // AND with imm. Indeed, simplify-demanded-bits may have removed
3678 // the AND instruction because it proves it was useless.
3679 KnownBits Known = CurDAG->computeKnownBits(Op: OrOpd1Val);
3680
3681 // Check if there is enough room for the second operand to appear
3682 // in the first one
3683 APInt BitsToBeInserted =
3684 APInt::getBitsSet(numBits: Known.getBitWidth(), loBit: DstLSB, hiBit: DstLSB + Width);
3685
3686 if ((BitsToBeInserted & ~Known.Zero) != 0)
3687 continue;
3688
3689 // Set the first operand
3690 uint64_t Imm;
3691 if (isOpcWithIntImmediate(N: OrOpd1, Opc: ISD::AND, Imm) &&
3692 isBitfieldDstMask(DstMask: Imm, BitsToBeInserted, NumberOfIgnoredHighBits, VT))
3693 // In that case, we can eliminate the AND
3694 Dst = OrOpd1->getOperand(Num: 0);
3695 else
3696 // Maybe the AND has been removed by simplify-demanded-bits
3697 // or is useful because it discards more bits
3698 Dst = OrOpd1Val;
3699
3700 // Before selecting ISD::OR node to AArch64::BFM, see if an AArch64::ORR
3701 // with shifted operand is more efficient.
3702 if (tryOrrWithShift(N, OrOpd0: OrOpd0Val, OrOpd1: OrOpd1Val, Src, Dst, CurDAG,
3703 BiggerPattern))
3704 return true;
3705
3706 // both parts match
3707 SDLoc DL(N);
3708 SDValue Ops[] = {Dst, Src, CurDAG->getTargetConstant(Val: ImmR, DL, VT),
3709 CurDAG->getTargetConstant(Val: ImmS, DL, VT)};
3710 unsigned Opc = (VT == MVT::i32) ? AArch64::BFMWri : AArch64::BFMXri;
3711 CurDAG->SelectNodeTo(N, MachineOpc: Opc, VT, Ops);
3712 return true;
3713 }
3714
3715 // Generate a BFXIL from 'or (and X, Mask0Imm), (and Y, Mask1Imm)' iff
3716 // Mask0Imm and ~Mask1Imm are equivalent and one of the MaskImms is a shifted
3717 // mask (e.g., 0x000ffff0).
3718 uint64_t Mask0Imm, Mask1Imm;
3719 SDValue And0 = N->getOperand(Num: 0);
3720 SDValue And1 = N->getOperand(Num: 1);
3721 if (And0.hasOneUse() && And1.hasOneUse() &&
3722 isOpcWithIntImmediate(N: And0.getNode(), Opc: ISD::AND, Imm&: Mask0Imm) &&
3723 isOpcWithIntImmediate(N: And1.getNode(), Opc: ISD::AND, Imm&: Mask1Imm) &&
3724 APInt(BitWidth, Mask0Imm) == ~APInt(BitWidth, Mask1Imm) &&
3725 (isShiftedMask(Mask: Mask0Imm, VT) || isShiftedMask(Mask: Mask1Imm, VT))) {
3726
3727 // ORR is commutative, so canonicalize to the form 'or (and X, Mask0Imm),
3728 // (and Y, Mask1Imm)' where Mask1Imm is the shifted mask masking off the
3729 // bits to be inserted.
3730 if (isShiftedMask(Mask: Mask0Imm, VT)) {
3731 std::swap(a&: And0, b&: And1);
3732 std::swap(a&: Mask0Imm, b&: Mask1Imm);
3733 }
3734
3735 SDValue Src = And1->getOperand(Num: 0);
3736 SDValue Dst = And0->getOperand(Num: 0);
3737 unsigned LSB = llvm::countr_zero(Val: Mask1Imm);
3738 int Width = BitWidth - APInt(BitWidth, Mask0Imm).popcount();
3739
3740 // The BFXIL inserts the low-order bits from a source register, so right
3741 // shift the needed bits into place.
3742 SDLoc DL(N);
3743 unsigned ShiftOpc = (VT == MVT::i32) ? AArch64::UBFMWri : AArch64::UBFMXri;
3744 uint64_t LsrImm = LSB;
3745 if (Src->hasOneUse() &&
3746 isOpcWithIntImmediate(N: Src.getNode(), Opc: ISD::SRL, Imm&: LsrImm) &&
3747 (LsrImm + LSB) < BitWidth) {
3748 Src = Src->getOperand(Num: 0);
3749 LsrImm += LSB;
3750 }
3751
3752 SDNode *LSR = CurDAG->getMachineNode(
3753 Opcode: ShiftOpc, dl: DL, VT, Op1: Src, Op2: CurDAG->getTargetConstant(Val: LsrImm, DL, VT),
3754 Op3: CurDAG->getTargetConstant(Val: BitWidth - 1, DL, VT));
3755
3756 // BFXIL is an alias of BFM, so translate to BFM operands.
3757 unsigned ImmR = (BitWidth - LSB) % BitWidth;
3758 unsigned ImmS = Width - 1;
3759
3760 // Create the BFXIL instruction.
3761 SDValue Ops[] = {Dst, SDValue(LSR, 0),
3762 CurDAG->getTargetConstant(Val: ImmR, DL, VT),
3763 CurDAG->getTargetConstant(Val: ImmS, DL, VT)};
3764 unsigned Opc = (VT == MVT::i32) ? AArch64::BFMWri : AArch64::BFMXri;
3765 CurDAG->SelectNodeTo(N, MachineOpc: Opc, VT, Ops);
3766 return true;
3767 }
3768
3769 return false;
3770}
3771
3772bool AArch64DAGToDAGISel::tryBitfieldInsertOp(SDNode *N) {
3773 if (N->getOpcode() != ISD::OR)
3774 return false;
3775
3776 APInt NUsefulBits;
3777 getUsefulBits(Op: SDValue(N, 0), UsefulBits&: NUsefulBits);
3778
3779 // If all bits are not useful, just return UNDEF.
3780 if (!NUsefulBits) {
3781 CurDAG->SelectNodeTo(N, MachineOpc: TargetOpcode::IMPLICIT_DEF, VT: N->getValueType(ResNo: 0));
3782 return true;
3783 }
3784
3785 if (tryBitfieldInsertOpFromOr(N, UsefulBits: NUsefulBits, CurDAG))
3786 return true;
3787
3788 return tryBitfieldInsertOpFromOrAndImm(N, CurDAG);
3789}
3790
3791/// SelectBitfieldInsertInZeroOp - Match a UBFIZ instruction that is the
3792/// equivalent of a left shift by a constant amount followed by an and masking
3793/// out a contiguous set of bits.
3794bool AArch64DAGToDAGISel::tryBitfieldInsertInZeroOp(SDNode *N) {
3795 if (N->getOpcode() != ISD::AND)
3796 return false;
3797
3798 EVT VT = N->getValueType(ResNo: 0);
3799 if (VT != MVT::i32 && VT != MVT::i64)
3800 return false;
3801
3802 SDValue Op0;
3803 int DstLSB, Width;
3804 if (!isBitfieldPositioningOp(CurDAG, Op: SDValue(N, 0), /*BiggerPattern=*/false,
3805 Src&: Op0, DstLSB, Width))
3806 return false;
3807
3808 // ImmR is the rotate right amount.
3809 unsigned ImmR = (VT.getSizeInBits() - DstLSB) % VT.getSizeInBits();
3810 // ImmS is the most significant bit of the source to be moved.
3811 unsigned ImmS = Width - 1;
3812
3813 SDLoc DL(N);
3814 SDValue Ops[] = {Op0, CurDAG->getTargetConstant(Val: ImmR, DL, VT),
3815 CurDAG->getTargetConstant(Val: ImmS, DL, VT)};
3816 unsigned Opc = (VT == MVT::i32) ? AArch64::UBFMWri : AArch64::UBFMXri;
3817 CurDAG->SelectNodeTo(N, MachineOpc: Opc, VT, Ops);
3818 return true;
3819}
3820
3821/// tryShiftAmountMod - Take advantage of built-in mod of shift amount in
3822/// variable shift/rotate instructions.
3823bool AArch64DAGToDAGISel::tryShiftAmountMod(SDNode *N) {
3824 EVT VT = N->getValueType(ResNo: 0);
3825
3826 unsigned Opc;
3827 switch (N->getOpcode()) {
3828 case ISD::ROTR:
3829 Opc = (VT == MVT::i32) ? AArch64::RORVWr : AArch64::RORVXr;
3830 break;
3831 case ISD::SHL:
3832 Opc = (VT == MVT::i32) ? AArch64::LSLVWr : AArch64::LSLVXr;
3833 break;
3834 case ISD::SRL:
3835 Opc = (VT == MVT::i32) ? AArch64::LSRVWr : AArch64::LSRVXr;
3836 break;
3837 case ISD::SRA:
3838 Opc = (VT == MVT::i32) ? AArch64::ASRVWr : AArch64::ASRVXr;
3839 break;
3840 default:
3841 return false;
3842 }
3843
3844 uint64_t Size;
3845 uint64_t Bits;
3846 if (VT == MVT::i32) {
3847 Bits = 5;
3848 Size = 32;
3849 } else if (VT == MVT::i64) {
3850 Bits = 6;
3851 Size = 64;
3852 } else
3853 return false;
3854
3855 SDValue ShiftAmt = N->getOperand(Num: 1);
3856 SDLoc DL(N);
3857 SDValue NewShiftAmt;
3858
3859 // Skip over an extend of the shift amount.
3860 if (ShiftAmt->getOpcode() == ISD::ZERO_EXTEND ||
3861 ShiftAmt->getOpcode() == ISD::ANY_EXTEND)
3862 ShiftAmt = ShiftAmt->getOperand(Num: 0);
3863
3864 if (ShiftAmt->getOpcode() == ISD::ADD || ShiftAmt->getOpcode() == ISD::SUB) {
3865 SDValue Add0 = ShiftAmt->getOperand(Num: 0);
3866 SDValue Add1 = ShiftAmt->getOperand(Num: 1);
3867 uint64_t Add0Imm;
3868 uint64_t Add1Imm;
3869 if (isIntImmediate(N: Add1, Imm&: Add1Imm) && (Add1Imm % Size == 0)) {
3870 // If we are shifting by X+/-N where N == 0 mod Size, then just shift by X
3871 // to avoid the ADD/SUB.
3872 NewShiftAmt = Add0;
3873 } else if (ShiftAmt->getOpcode() == ISD::SUB &&
3874 isIntImmediate(N: Add0, Imm&: Add0Imm) && Add0Imm != 0 &&
3875 (Add0Imm % Size == 0)) {
3876 // If we are shifting by N-X where N == 0 mod Size, then just shift by -X
3877 // to generate a NEG instead of a SUB from a constant.
3878 unsigned NegOpc;
3879 unsigned ZeroReg;
3880 EVT SubVT = ShiftAmt->getValueType(ResNo: 0);
3881 if (SubVT == MVT::i32) {
3882 NegOpc = AArch64::SUBWrr;
3883 ZeroReg = AArch64::WZR;
3884 } else {
3885 assert(SubVT == MVT::i64);
3886 NegOpc = AArch64::SUBXrr;
3887 ZeroReg = AArch64::XZR;
3888 }
3889 SDValue Zero =
3890 CurDAG->getCopyFromReg(Chain: CurDAG->getEntryNode(), dl: DL, Reg: ZeroReg, VT: SubVT);
3891 MachineSDNode *Neg =
3892 CurDAG->getMachineNode(Opcode: NegOpc, dl: DL, VT: SubVT, Op1: Zero, Op2: Add1);
3893 NewShiftAmt = SDValue(Neg, 0);
3894 } else if (ShiftAmt->getOpcode() == ISD::SUB &&
3895 isIntImmediate(N: Add0, Imm&: Add0Imm) && (Add0Imm % Size == Size - 1)) {
3896 // If we are shifting by N-X where N == -1 mod Size, then just shift by ~X
3897 // to generate a NOT instead of a SUB from a constant.
3898 unsigned NotOpc;
3899 unsigned ZeroReg;
3900 EVT SubVT = ShiftAmt->getValueType(ResNo: 0);
3901 if (SubVT == MVT::i32) {
3902 NotOpc = AArch64::ORNWrr;
3903 ZeroReg = AArch64::WZR;
3904 } else {
3905 assert(SubVT == MVT::i64);
3906 NotOpc = AArch64::ORNXrr;
3907 ZeroReg = AArch64::XZR;
3908 }
3909 SDValue Zero =
3910 CurDAG->getCopyFromReg(Chain: CurDAG->getEntryNode(), dl: DL, Reg: ZeroReg, VT: SubVT);
3911 MachineSDNode *Not =
3912 CurDAG->getMachineNode(Opcode: NotOpc, dl: DL, VT: SubVT, Op1: Zero, Op2: Add1);
3913 NewShiftAmt = SDValue(Not, 0);
3914 } else
3915 return false;
3916 } else {
3917 // If the shift amount is masked with an AND, check that the mask covers the
3918 // bits that are implicitly ANDed off by the above opcodes and if so, skip
3919 // the AND.
3920 uint64_t MaskImm;
3921 if (!isOpcWithIntImmediate(N: ShiftAmt.getNode(), Opc: ISD::AND, Imm&: MaskImm) &&
3922 !isOpcWithIntImmediate(N: ShiftAmt.getNode(), Opc: AArch64ISD::ANDS, Imm&: MaskImm))
3923 return false;
3924
3925 if ((unsigned)llvm::countr_one(Value: MaskImm) < Bits)
3926 return false;
3927
3928 NewShiftAmt = ShiftAmt->getOperand(Num: 0);
3929 }
3930
3931 // Narrow/widen the shift amount to match the size of the shift operation.
3932 if (VT == MVT::i32)
3933 NewShiftAmt = narrowIfNeeded(CurDAG, N: NewShiftAmt);
3934 else if (VT == MVT::i64 && NewShiftAmt->getValueType(ResNo: 0) == MVT::i32) {
3935 SDValue SubReg = CurDAG->getTargetConstant(Val: AArch64::sub_32, DL, VT: MVT::i32);
3936 MachineSDNode *Ext = CurDAG->getMachineNode(
3937 Opcode: AArch64::SUBREG_TO_REG, dl: DL, VT,
3938 Op1: CurDAG->getTargetConstant(Val: 0, DL, VT: MVT::i64), Op2: NewShiftAmt, Op3: SubReg);
3939 NewShiftAmt = SDValue(Ext, 0);
3940 }
3941
3942 SDValue Ops[] = {N->getOperand(Num: 0), NewShiftAmt};
3943 CurDAG->SelectNodeTo(N, MachineOpc: Opc, VT, Ops);
3944 return true;
3945}
3946
3947static bool checkCVTFixedPointOperandWithFBits(SelectionDAG *CurDAG, SDValue N,
3948 SDValue &FixedPos,
3949 unsigned RegWidth,
3950 bool isReciprocal) {
3951 APFloat FVal(0.0);
3952 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(Val&: N))
3953 FVal = CN->getValueAPF();
3954 else if (LoadSDNode *LN = dyn_cast<LoadSDNode>(Val&: N)) {
3955 // Some otherwise illegal constants are allowed in this case.
3956 if (LN->getOperand(Num: 1).getOpcode() != AArch64ISD::ADDlow ||
3957 !isa<ConstantPoolSDNode>(Val: LN->getOperand(Num: 1)->getOperand(Num: 1)))
3958 return false;
3959
3960 ConstantPoolSDNode *CN =
3961 dyn_cast<ConstantPoolSDNode>(Val: LN->getOperand(Num: 1)->getOperand(Num: 1));
3962 FVal = cast<ConstantFP>(Val: CN->getConstVal())->getValueAPF();
3963 } else
3964 return false;
3965
3966 // An FCVT[SU] instruction performs: convertToInt(Val * 2^fbits) where fbits
3967 // is between 1 and 32 for a destination w-register, or 1 and 64 for an
3968 // x-register.
3969 //
3970 // By this stage, we've detected (fp_to_[su]int (fmul Val, THIS_NODE)) so we
3971 // want THIS_NODE to be 2^fbits. This is much easier to deal with using
3972 // integers.
3973 bool IsExact;
3974
3975 if (isReciprocal)
3976 if (!FVal.getExactInverse(inv: &FVal))
3977 return false;
3978
3979 // fbits is between 1 and 64 in the worst-case, which means the fmul
3980 // could have 2^64 as an actual operand. Need 65 bits of precision.
3981 APSInt IntVal(65, true);
3982 FVal.convertToInteger(Result&: IntVal, RM: APFloat::rmTowardZero, IsExact: &IsExact);
3983
3984 // N.b. isPowerOf2 also checks for > 0.
3985 if (!IsExact || !IntVal.isPowerOf2())
3986 return false;
3987 unsigned FBits = IntVal.logBase2();
3988
3989 // Checks above should have guaranteed that we haven't lost information in
3990 // finding FBits, but it must still be in range.
3991 if (FBits == 0 || FBits > RegWidth) return false;
3992
3993 FixedPos = CurDAG->getTargetConstant(Val: FBits, DL: SDLoc(N), VT: MVT::i32);
3994 return true;
3995}
3996
3997bool AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
3998 unsigned RegWidth) {
3999 return checkCVTFixedPointOperandWithFBits(CurDAG, N, FixedPos, RegWidth,
4000 isReciprocal: false);
4001}
4002
4003bool AArch64DAGToDAGISel::SelectCVTFixedPosRecipOperand(SDValue N,
4004 SDValue &FixedPos,
4005 unsigned RegWidth) {
4006 return checkCVTFixedPointOperandWithFBits(CurDAG, N, FixedPos, RegWidth,
4007 isReciprocal: true);
4008}
4009
4010// Inspects a register string of the form o0:op1:CRn:CRm:op2 gets the fields
4011// of the string and obtains the integer values from them and combines these
4012// into a single value to be used in the MRS/MSR instruction.
4013static int getIntOperandFromRegisterString(StringRef RegString) {
4014 SmallVector<StringRef, 5> Fields;
4015 RegString.split(A&: Fields, Separator: ':');
4016
4017 if (Fields.size() == 1)
4018 return -1;
4019
4020 assert(Fields.size() == 5
4021 && "Invalid number of fields in read register string");
4022
4023 SmallVector<int, 5> Ops;
4024 bool AllIntFields = true;
4025
4026 for (StringRef Field : Fields) {
4027 unsigned IntField;
4028 AllIntFields &= !Field.getAsInteger(Radix: 10, Result&: IntField);
4029 Ops.push_back(Elt: IntField);
4030 }
4031
4032 assert(AllIntFields &&
4033 "Unexpected non-integer value in special register string.");
4034 (void)AllIntFields;
4035
4036 // Need to combine the integer fields of the string into a single value
4037 // based on the bit encoding of MRS/MSR instruction.
4038 return (Ops[0] << 14) | (Ops[1] << 11) | (Ops[2] << 7) |
4039 (Ops[3] << 3) | (Ops[4]);
4040}
4041
4042// Lower the read_register intrinsic to an MRS instruction node if the special
4043// register string argument is either of the form detailed in the ALCE (the
4044// form described in getIntOperandsFromRegisterString) or is a named register
4045// known by the MRS SysReg mapper.
4046bool AArch64DAGToDAGISel::tryReadRegister(SDNode *N) {
4047 const auto *MD = cast<MDNodeSDNode>(Val: N->getOperand(Num: 1));
4048 const auto *RegString = cast<MDString>(Val: MD->getMD()->getOperand(I: 0));
4049 SDLoc DL(N);
4050
4051 bool ReadIs128Bit = N->getOpcode() == AArch64ISD::MRRS;
4052
4053 unsigned Opcode64Bit = AArch64::MRS;
4054 int Imm = getIntOperandFromRegisterString(RegString: RegString->getString());
4055 if (Imm == -1) {
4056 // No match, Use the sysreg mapper to map the remaining possible strings to
4057 // the value for the register to be used for the instruction operand.
4058 const auto *TheReg =
4059 AArch64SysReg::lookupSysRegByName(Name: RegString->getString());
4060 if (TheReg && TheReg->Readable &&
4061 TheReg->haveFeatures(ActiveFeatures: Subtarget->getFeatureBits()))
4062 Imm = TheReg->Encoding;
4063 else
4064 Imm = AArch64SysReg::parseGenericRegister(Name: RegString->getString());
4065
4066 if (Imm == -1) {
4067 // Still no match, see if this is "pc" or give up.
4068 if (!ReadIs128Bit && RegString->getString() == "pc") {
4069 Opcode64Bit = AArch64::ADR;
4070 Imm = 0;
4071 } else {
4072 return false;
4073 }
4074 }
4075 }
4076
4077 SDValue InChain = N->getOperand(Num: 0);
4078 SDValue SysRegImm = CurDAG->getTargetConstant(Val: Imm, DL, VT: MVT::i32);
4079 if (!ReadIs128Bit) {
4080 CurDAG->SelectNodeTo(N, MachineOpc: Opcode64Bit, VT1: MVT::i64, VT2: MVT::Other /* Chain */,
4081 Ops: {SysRegImm, InChain});
4082 } else {
4083 SDNode *MRRS = CurDAG->getMachineNode(
4084 Opcode: AArch64::MRRS, dl: DL,
4085 ResultTys: {MVT::Untyped /* XSeqPair */, MVT::Other /* Chain */},
4086 Ops: {SysRegImm, InChain});
4087
4088 // Sysregs are not endian. The even register always contains the low half
4089 // of the register.
4090 SDValue Lo = CurDAG->getTargetExtractSubreg(SRIdx: AArch64::sube64, DL, VT: MVT::i64,
4091 Operand: SDValue(MRRS, 0));
4092 SDValue Hi = CurDAG->getTargetExtractSubreg(SRIdx: AArch64::subo64, DL, VT: MVT::i64,
4093 Operand: SDValue(MRRS, 0));
4094 SDValue OutChain = SDValue(MRRS, 1);
4095
4096 ReplaceUses(F: SDValue(N, 0), T: Lo);
4097 ReplaceUses(F: SDValue(N, 1), T: Hi);
4098 ReplaceUses(F: SDValue(N, 2), T: OutChain);
4099 };
4100 return true;
4101}
4102
4103// Lower the write_register intrinsic to an MSR instruction node if the special
4104// register string argument is either of the form detailed in the ALCE (the
4105// form described in getIntOperandsFromRegisterString) or is a named register
4106// known by the MSR SysReg mapper.
4107bool AArch64DAGToDAGISel::tryWriteRegister(SDNode *N) {
4108 const auto *MD = cast<MDNodeSDNode>(Val: N->getOperand(Num: 1));
4109 const auto *RegString = cast<MDString>(Val: MD->getMD()->getOperand(I: 0));
4110 SDLoc DL(N);
4111
4112 bool WriteIs128Bit = N->getOpcode() == AArch64ISD::MSRR;
4113
4114 if (!WriteIs128Bit) {
4115 // Check if the register was one of those allowed as the pstatefield value
4116 // in the MSR (immediate) instruction. To accept the values allowed in the
4117 // pstatefield for the MSR (immediate) instruction, we also require that an
4118 // immediate value has been provided as an argument, we know that this is
4119 // the case as it has been ensured by semantic checking.
4120 auto trySelectPState = [&](auto PMapper, unsigned State) {
4121 if (PMapper) {
4122 assert(isa<ConstantSDNode>(N->getOperand(2)) &&
4123 "Expected a constant integer expression.");
4124 unsigned Reg = PMapper->Encoding;
4125 uint64_t Immed = N->getConstantOperandVal(Num: 2);
4126 CurDAG->SelectNodeTo(
4127 N, MachineOpc: State, VT: MVT::Other, Op1: CurDAG->getTargetConstant(Val: Reg, DL, VT: MVT::i32),
4128 Op2: CurDAG->getTargetConstant(Val: Immed, DL, VT: MVT::i16), Op3: N->getOperand(Num: 0));
4129 return true;
4130 }
4131 return false;
4132 };
4133
4134 if (trySelectPState(
4135 AArch64PState::lookupPStateImm0_15ByName(Name: RegString->getString()),
4136 AArch64::MSRpstateImm4))
4137 return true;
4138 if (trySelectPState(
4139 AArch64PState::lookupPStateImm0_1ByName(Name: RegString->getString()),
4140 AArch64::MSRpstateImm1))
4141 return true;
4142 }
4143
4144 int Imm = getIntOperandFromRegisterString(RegString: RegString->getString());
4145 if (Imm == -1) {
4146 // Use the sysreg mapper to attempt to map the remaining possible strings
4147 // to the value for the register to be used for the MSR (register)
4148 // instruction operand.
4149 auto TheReg = AArch64SysReg::lookupSysRegByName(Name: RegString->getString());
4150 if (TheReg && TheReg->Writeable &&
4151 TheReg->haveFeatures(ActiveFeatures: Subtarget->getFeatureBits()))
4152 Imm = TheReg->Encoding;
4153 else
4154 Imm = AArch64SysReg::parseGenericRegister(Name: RegString->getString());
4155
4156 if (Imm == -1)
4157 return false;
4158 }
4159
4160 SDValue InChain = N->getOperand(Num: 0);
4161 if (!WriteIs128Bit) {
4162 CurDAG->SelectNodeTo(N, MachineOpc: AArch64::MSR, VT: MVT::Other,
4163 Op1: CurDAG->getTargetConstant(Val: Imm, DL, VT: MVT::i32),
4164 Op2: N->getOperand(Num: 2), Op3: InChain);
4165 } else {
4166 // No endian swap. The lower half always goes into the even subreg, and the
4167 // higher half always into the odd supreg.
4168 SDNode *Pair = CurDAG->getMachineNode(
4169 Opcode: TargetOpcode::REG_SEQUENCE, dl: DL, VT: MVT::Untyped /* XSeqPair */,
4170 Ops: {CurDAG->getTargetConstant(Val: AArch64::XSeqPairsClassRegClass.getID(), DL,
4171 VT: MVT::i32),
4172 N->getOperand(Num: 2),
4173 CurDAG->getTargetConstant(Val: AArch64::sube64, DL, VT: MVT::i32),
4174 N->getOperand(Num: 3),
4175 CurDAG->getTargetConstant(Val: AArch64::subo64, DL, VT: MVT::i32)});
4176
4177 CurDAG->SelectNodeTo(N, MachineOpc: AArch64::MSRR, VT: MVT::Other,
4178 Op1: CurDAG->getTargetConstant(Val: Imm, DL, VT: MVT::i32),
4179 Op2: SDValue(Pair, 0), Op3: InChain);
4180 }
4181
4182 return true;
4183}
4184
4185/// We've got special pseudo-instructions for these
4186bool AArch64DAGToDAGISel::SelectCMP_SWAP(SDNode *N) {
4187 unsigned Opcode;
4188 EVT MemTy = cast<MemSDNode>(Val: N)->getMemoryVT();
4189
4190 // Leave IR for LSE if subtarget supports it.
4191 if (Subtarget->hasLSE()) return false;
4192
4193 if (MemTy == MVT::i8)
4194 Opcode = AArch64::CMP_SWAP_8;
4195 else if (MemTy == MVT::i16)
4196 Opcode = AArch64::CMP_SWAP_16;
4197 else if (MemTy == MVT::i32)
4198 Opcode = AArch64::CMP_SWAP_32;
4199 else if (MemTy == MVT::i64)
4200 Opcode = AArch64::CMP_SWAP_64;
4201 else
4202 llvm_unreachable("Unknown AtomicCmpSwap type");
4203
4204 MVT RegTy = MemTy == MVT::i64 ? MVT::i64 : MVT::i32;
4205 SDValue Ops[] = {N->getOperand(Num: 1), N->getOperand(Num: 2), N->getOperand(Num: 3),
4206 N->getOperand(Num: 0)};
4207 SDNode *CmpSwap = CurDAG->getMachineNode(
4208 Opcode, dl: SDLoc(N),
4209 VTs: CurDAG->getVTList(VT1: RegTy, VT2: MVT::i32, VT3: MVT::Other), Ops);
4210
4211 MachineMemOperand *MemOp = cast<MemSDNode>(Val: N)->getMemOperand();
4212 CurDAG->setNodeMemRefs(N: cast<MachineSDNode>(Val: CmpSwap), NewMemRefs: {MemOp});
4213
4214 ReplaceUses(F: SDValue(N, 0), T: SDValue(CmpSwap, 0));
4215 ReplaceUses(F: SDValue(N, 1), T: SDValue(CmpSwap, 2));
4216 CurDAG->RemoveDeadNode(N);
4217
4218 return true;
4219}
4220
4221bool AArch64DAGToDAGISel::SelectSVEAddSubImm(SDValue N, MVT VT, SDValue &Imm,
4222 SDValue &Shift) {
4223 if (!isa<ConstantSDNode>(Val: N))
4224 return false;
4225
4226 SDLoc DL(N);
4227 uint64_t Val = cast<ConstantSDNode>(Val&: N)
4228 ->getAPIntValue()
4229 .trunc(width: VT.getFixedSizeInBits())
4230 .getZExtValue();
4231
4232 switch (VT.SimpleTy) {
4233 case MVT::i8:
4234 // All immediates are supported.
4235 Shift = CurDAG->getTargetConstant(Val: 0, DL, VT: MVT::i32);
4236 Imm = CurDAG->getTargetConstant(Val, DL, VT: MVT::i32);
4237 return true;
4238 case MVT::i16:
4239 case MVT::i32:
4240 case MVT::i64:
4241 // Support 8bit unsigned immediates.
4242 if (Val <= 255) {
4243 Shift = CurDAG->getTargetConstant(Val: 0, DL, VT: MVT::i32);
4244 Imm = CurDAG->getTargetConstant(Val, DL, VT: MVT::i32);
4245 return true;
4246 }
4247 // Support 16bit unsigned immediates that are a multiple of 256.
4248 if (Val <= 65280 && Val % 256 == 0) {
4249 Shift = CurDAG->getTargetConstant(Val: 8, DL, VT: MVT::i32);
4250 Imm = CurDAG->getTargetConstant(Val: Val >> 8, DL, VT: MVT::i32);
4251 return true;
4252 }
4253 break;
4254 default:
4255 break;
4256 }
4257
4258 return false;
4259}
4260
4261bool AArch64DAGToDAGISel::SelectSVEAddSubSSatImm(SDValue N, MVT VT,
4262 SDValue &Imm, SDValue &Shift,
4263 bool Negate) {
4264 if (!isa<ConstantSDNode>(Val: N))
4265 return false;
4266
4267 SDLoc DL(N);
4268 int64_t Val = cast<ConstantSDNode>(Val&: N)
4269 ->getAPIntValue()
4270 .trunc(width: VT.getFixedSizeInBits())
4271 .getSExtValue();
4272
4273 if (Negate)
4274 Val = -Val;
4275
4276 // Signed saturating instructions treat their immediate operand as unsigned,
4277 // whereas the related intrinsics define their operands to be signed. This
4278 // means we can only use the immediate form when the operand is non-negative.
4279 if (Val < 0)
4280 return false;
4281
4282 switch (VT.SimpleTy) {
4283 case MVT::i8:
4284 // All positive immediates are supported.
4285 Shift = CurDAG->getTargetConstant(Val: 0, DL, VT: MVT::i32);
4286 Imm = CurDAG->getTargetConstant(Val, DL, VT: MVT::i32);
4287 return true;
4288 case MVT::i16:
4289 case MVT::i32:
4290 case MVT::i64:
4291 // Support 8bit positive immediates.
4292 if (Val <= 255) {
4293 Shift = CurDAG->getTargetConstant(Val: 0, DL, VT: MVT::i32);
4294 Imm = CurDAG->getTargetConstant(Val, DL, VT: MVT::i32);
4295 return true;
4296 }
4297 // Support 16bit positive immediates that are a multiple of 256.
4298 if (Val <= 65280 && Val % 256 == 0) {
4299 Shift = CurDAG->getTargetConstant(Val: 8, DL, VT: MVT::i32);
4300 Imm = CurDAG->getTargetConstant(Val: Val >> 8, DL, VT: MVT::i32);
4301 return true;
4302 }
4303 break;
4304 default:
4305 break;
4306 }
4307
4308 return false;
4309}
4310
4311bool AArch64DAGToDAGISel::SelectSVECpyDupImm(SDValue N, MVT VT, SDValue &Imm,
4312 SDValue &Shift) {
4313 if (!isa<ConstantSDNode>(Val: N))
4314 return false;
4315
4316 SDLoc DL(N);
4317 int64_t Val = cast<ConstantSDNode>(Val&: N)
4318 ->getAPIntValue()
4319 .trunc(width: VT.getFixedSizeInBits())
4320 .getSExtValue();
4321
4322 switch (VT.SimpleTy) {
4323 case MVT::i8:
4324 // All immediates are supported.
4325 Shift = CurDAG->getTargetConstant(Val: 0, DL, VT: MVT::i32);
4326 Imm = CurDAG->getTargetConstant(Val: Val & 0xFF, DL, VT: MVT::i32);
4327 return true;
4328 case MVT::i16:
4329 case MVT::i32:
4330 case MVT::i64:
4331 // Support 8bit signed immediates.
4332 if (Val >= -128 && Val <= 127) {
4333 Shift = CurDAG->getTargetConstant(Val: 0, DL, VT: MVT::i32);
4334 Imm = CurDAG->getTargetConstant(Val: Val & 0xFF, DL, VT: MVT::i32);
4335 return true;
4336 }
4337 // Support 16bit signed immediates that are a multiple of 256.
4338 if (Val >= -32768 && Val <= 32512 && Val % 256 == 0) {
4339 Shift = CurDAG->getTargetConstant(Val: 8, DL, VT: MVT::i32);
4340 Imm = CurDAG->getTargetConstant(Val: (Val >> 8) & 0xFF, DL, VT: MVT::i32);
4341 return true;
4342 }
4343 break;
4344 default:
4345 break;
4346 }
4347
4348 return false;
4349}
4350
4351bool AArch64DAGToDAGISel::SelectSVESignedArithImm(SDValue N, SDValue &Imm) {
4352 if (auto CNode = dyn_cast<ConstantSDNode>(Val&: N)) {
4353 int64_t ImmVal = CNode->getSExtValue();
4354 SDLoc DL(N);
4355 if (ImmVal >= -128 && ImmVal < 128) {
4356 Imm = CurDAG->getSignedTargetConstant(Val: ImmVal, DL, VT: MVT::i32);
4357 return true;
4358 }
4359 }
4360 return false;
4361}
4362
4363bool AArch64DAGToDAGISel::SelectSVEArithImm(SDValue N, MVT VT, SDValue &Imm) {
4364 if (auto CNode = dyn_cast<ConstantSDNode>(Val&: N)) {
4365 uint64_t ImmVal = CNode->getZExtValue();
4366
4367 switch (VT.SimpleTy) {
4368 case MVT::i8:
4369 ImmVal &= 0xFF;
4370 break;
4371 case MVT::i16:
4372 ImmVal &= 0xFFFF;
4373 break;
4374 case MVT::i32:
4375 ImmVal &= 0xFFFFFFFF;
4376 break;
4377 case MVT::i64:
4378 break;
4379 default:
4380 llvm_unreachable("Unexpected type");
4381 }
4382
4383 if (ImmVal < 256) {
4384 Imm = CurDAG->getTargetConstant(Val: ImmVal, DL: SDLoc(N), VT: MVT::i32);
4385 return true;
4386 }
4387 }
4388 return false;
4389}
4390
4391bool AArch64DAGToDAGISel::SelectSVELogicalImm(SDValue N, MVT VT, SDValue &Imm,
4392 bool Invert) {
4393 if (auto CNode = dyn_cast<ConstantSDNode>(Val&: N)) {
4394 uint64_t ImmVal = CNode->getZExtValue();
4395 SDLoc DL(N);
4396
4397 if (Invert)
4398 ImmVal = ~ImmVal;
4399
4400 // Shift mask depending on type size.
4401 switch (VT.SimpleTy) {
4402 case MVT::i8:
4403 ImmVal &= 0xFF;
4404 ImmVal |= ImmVal << 8;
4405 ImmVal |= ImmVal << 16;
4406 ImmVal |= ImmVal << 32;
4407 break;
4408 case MVT::i16:
4409 ImmVal &= 0xFFFF;
4410 ImmVal |= ImmVal << 16;
4411 ImmVal |= ImmVal << 32;
4412 break;
4413 case MVT::i32:
4414 ImmVal &= 0xFFFFFFFF;
4415 ImmVal |= ImmVal << 32;
4416 break;
4417 case MVT::i64:
4418 break;
4419 default:
4420 llvm_unreachable("Unexpected type");
4421 }
4422
4423 uint64_t encoding;
4424 if (AArch64_AM::processLogicalImmediate(Imm: ImmVal, RegSize: 64, Encoding&: encoding)) {
4425 Imm = CurDAG->getTargetConstant(Val: encoding, DL, VT: MVT::i64);
4426 return true;
4427 }
4428 }
4429 return false;
4430}
4431
4432// SVE shift intrinsics allow shift amounts larger than the element's bitwidth.
4433// Rather than attempt to normalise everything we can sometimes saturate the
4434// shift amount during selection. This function also allows for consistent
4435// isel patterns by ensuring the resulting "Imm" node is of the i32 type
4436// required by the instructions.
4437bool AArch64DAGToDAGISel::SelectSVEShiftImm(SDValue N, uint64_t Low,
4438 uint64_t High, bool AllowSaturation,
4439 SDValue &Imm) {
4440 if (auto *CN = dyn_cast<ConstantSDNode>(Val&: N)) {
4441 uint64_t ImmVal = CN->getZExtValue();
4442
4443 // Reject shift amounts that are too small.
4444 if (ImmVal < Low)
4445 return false;
4446
4447 // Reject or saturate shift amounts that are too big.
4448 if (ImmVal > High) {
4449 if (!AllowSaturation)
4450 return false;
4451 ImmVal = High;
4452 }
4453
4454 Imm = CurDAG->getTargetConstant(Val: ImmVal, DL: SDLoc(N), VT: MVT::i32);
4455 return true;
4456 }
4457
4458 return false;
4459}
4460
4461bool AArch64DAGToDAGISel::trySelectStackSlotTagP(SDNode *N) {
4462 // tagp(FrameIndex, IRGstack, tag_offset):
4463 // since the offset between FrameIndex and IRGstack is a compile-time
4464 // constant, this can be lowered to a single ADDG instruction.
4465 if (!(isa<FrameIndexSDNode>(Val: N->getOperand(Num: 1)))) {
4466 return false;
4467 }
4468
4469 SDValue IRG_SP = N->getOperand(Num: 2);
4470 if (IRG_SP->getOpcode() != ISD::INTRINSIC_W_CHAIN ||
4471 IRG_SP->getConstantOperandVal(Num: 1) != Intrinsic::aarch64_irg_sp) {
4472 return false;
4473 }
4474
4475 const TargetLowering *TLI = getTargetLowering();
4476 SDLoc DL(N);
4477 int FI = cast<FrameIndexSDNode>(Val: N->getOperand(Num: 1))->getIndex();
4478 SDValue FiOp = CurDAG->getTargetFrameIndex(
4479 FI, VT: TLI->getPointerTy(DL: CurDAG->getDataLayout()));
4480 int TagOffset = N->getConstantOperandVal(Num: 3);
4481
4482 SDNode *Out = CurDAG->getMachineNode(
4483 Opcode: AArch64::TAGPstack, dl: DL, VT: MVT::i64,
4484 Ops: {FiOp, CurDAG->getTargetConstant(Val: 0, DL, VT: MVT::i64), N->getOperand(Num: 2),
4485 CurDAG->getTargetConstant(Val: TagOffset, DL, VT: MVT::i64)});
4486 ReplaceNode(F: N, T: Out);
4487 return true;
4488}
4489
4490void AArch64DAGToDAGISel::SelectTagP(SDNode *N) {
4491 assert(isa<ConstantSDNode>(N->getOperand(3)) &&
4492 "llvm.aarch64.tagp third argument must be an immediate");
4493 if (trySelectStackSlotTagP(N))
4494 return;
4495 // FIXME: above applies in any case when offset between Op1 and Op2 is a
4496 // compile-time constant, not just for stack allocations.
4497
4498 // General case for unrelated pointers in Op1 and Op2.
4499 SDLoc DL(N);
4500 int TagOffset = N->getConstantOperandVal(Num: 3);
4501 SDNode *N1 = CurDAG->getMachineNode(Opcode: AArch64::SUBP, dl: DL, VT: MVT::i64,
4502 Ops: {N->getOperand(Num: 1), N->getOperand(Num: 2)});
4503 SDNode *N2 = CurDAG->getMachineNode(Opcode: AArch64::ADDXrr, dl: DL, VT: MVT::i64,
4504 Ops: {SDValue(N1, 0), N->getOperand(Num: 2)});
4505 SDNode *N3 = CurDAG->getMachineNode(
4506 Opcode: AArch64::ADDG, dl: DL, VT: MVT::i64,
4507 Ops: {SDValue(N2, 0), CurDAG->getTargetConstant(Val: 0, DL, VT: MVT::i64),
4508 CurDAG->getTargetConstant(Val: TagOffset, DL, VT: MVT::i64)});
4509 ReplaceNode(F: N, T: N3);
4510}
4511
4512bool AArch64DAGToDAGISel::trySelectCastFixedLengthToScalableVector(SDNode *N) {
4513 assert(N->getOpcode() == ISD::INSERT_SUBVECTOR && "Invalid Node!");
4514
4515 // Bail when not a "cast" like insert_subvector.
4516 if (N->getConstantOperandVal(Num: 2) != 0)
4517 return false;
4518 if (!N->getOperand(Num: 0).isUndef())
4519 return false;
4520
4521 // Bail when normal isel should do the job.
4522 EVT VT = N->getValueType(ResNo: 0);
4523 EVT InVT = N->getOperand(Num: 1).getValueType();
4524 if (VT.isFixedLengthVector() || InVT.isScalableVector())
4525 return false;
4526 if (InVT.getSizeInBits() <= 128)
4527 return false;
4528
4529 // NOTE: We can only get here when doing fixed length SVE code generation.
4530 // We do manual selection because the types involved are not linked to real
4531 // registers (despite being legal) and must be coerced into SVE registers.
4532
4533 assert(VT.getSizeInBits().getKnownMinValue() == AArch64::SVEBitsPerBlock &&
4534 "Expected to insert into a packed scalable vector!");
4535
4536 SDLoc DL(N);
4537 auto RC = CurDAG->getTargetConstant(Val: AArch64::ZPRRegClassID, DL, VT: MVT::i64);
4538 ReplaceNode(F: N, T: CurDAG->getMachineNode(Opcode: TargetOpcode::COPY_TO_REGCLASS, dl: DL, VT,
4539 Op1: N->getOperand(Num: 1), Op2: RC));
4540 return true;
4541}
4542
4543bool AArch64DAGToDAGISel::trySelectCastScalableToFixedLengthVector(SDNode *N) {
4544 assert(N->getOpcode() == ISD::EXTRACT_SUBVECTOR && "Invalid Node!");
4545
4546 // Bail when not a "cast" like extract_subvector.
4547 if (N->getConstantOperandVal(Num: 1) != 0)
4548 return false;
4549
4550 // Bail when normal isel can do the job.
4551 EVT VT = N->getValueType(ResNo: 0);
4552 EVT InVT = N->getOperand(Num: 0).getValueType();
4553 if (VT.isScalableVector() || InVT.isFixedLengthVector())
4554 return false;
4555 if (VT.getSizeInBits() <= 128)
4556 return false;
4557
4558 // NOTE: We can only get here when doing fixed length SVE code generation.
4559 // We do manual selection because the types involved are not linked to real
4560 // registers (despite being legal) and must be coerced into SVE registers.
4561
4562 assert(InVT.getSizeInBits().getKnownMinValue() == AArch64::SVEBitsPerBlock &&
4563 "Expected to extract from a packed scalable vector!");
4564
4565 SDLoc DL(N);
4566 auto RC = CurDAG->getTargetConstant(Val: AArch64::ZPRRegClassID, DL, VT: MVT::i64);
4567 ReplaceNode(F: N, T: CurDAG->getMachineNode(Opcode: TargetOpcode::COPY_TO_REGCLASS, dl: DL, VT,
4568 Op1: N->getOperand(Num: 0), Op2: RC));
4569 return true;
4570}
4571
4572bool AArch64DAGToDAGISel::trySelectXAR(SDNode *N) {
4573 assert(N->getOpcode() == ISD::OR && "Expected OR instruction");
4574
4575 SDValue N0 = N->getOperand(Num: 0);
4576 SDValue N1 = N->getOperand(Num: 1);
4577
4578 EVT VT = N->getValueType(ResNo: 0);
4579 SDLoc DL(N);
4580
4581 // Essentially: rotr (xor(x, y), imm) -> xar (x, y, imm)
4582 // Rotate by a constant is a funnel shift in IR which is exanded to
4583 // an OR with shifted operands.
4584 // We do the following transform:
4585 // OR N0, N1 -> xar (x, y, imm)
4586 // Where:
4587 // N1 = SRL_PRED true, V, splat(imm) --> rotr amount
4588 // N0 = SHL_PRED true, V, splat(bits-imm)
4589 // V = (xor x, y)
4590 if (VT.isScalableVector() &&
4591 (Subtarget->hasSVE2() ||
4592 (Subtarget->hasSME() && Subtarget->isStreaming()))) {
4593 if (N0.getOpcode() != AArch64ISD::SHL_PRED ||
4594 N1.getOpcode() != AArch64ISD::SRL_PRED)
4595 std::swap(a&: N0, b&: N1);
4596 if (N0.getOpcode() != AArch64ISD::SHL_PRED ||
4597 N1.getOpcode() != AArch64ISD::SRL_PRED)
4598 return false;
4599
4600 auto *TLI = static_cast<const AArch64TargetLowering *>(getTargetLowering());
4601 if (!TLI->isAllActivePredicate(DAG&: *CurDAG, N: N0.getOperand(i: 0)) ||
4602 !TLI->isAllActivePredicate(DAG&: *CurDAG, N: N1.getOperand(i: 0)))
4603 return false;
4604
4605 if (N0.getOperand(i: 1) != N1.getOperand(i: 1))
4606 return false;
4607
4608 SDValue R1, R2;
4609 bool IsXOROperand = true;
4610 if (N0.getOperand(i: 1).getOpcode() != ISD::XOR) {
4611 IsXOROperand = false;
4612 } else {
4613 R1 = N0.getOperand(i: 1).getOperand(i: 0);
4614 R2 = N1.getOperand(i: 1).getOperand(i: 1);
4615 }
4616
4617 APInt ShlAmt, ShrAmt;
4618 if (!ISD::isConstantSplatVector(N: N0.getOperand(i: 2).getNode(), SplatValue&: ShlAmt) ||
4619 !ISD::isConstantSplatVector(N: N1.getOperand(i: 2).getNode(), SplatValue&: ShrAmt))
4620 return false;
4621
4622 if (ShlAmt + ShrAmt != VT.getScalarSizeInBits())
4623 return false;
4624
4625 if (!IsXOROperand) {
4626 SDValue Zero = CurDAG->getTargetConstant(Val: 0, DL, VT: MVT::i64);
4627 SDNode *MOV = CurDAG->getMachineNode(Opcode: AArch64::MOVIv2d_ns, dl: DL, VT, Op1: Zero);
4628 SDValue MOVIV = SDValue(MOV, 0);
4629
4630 SDValue ZSub = CurDAG->getTargetConstant(Val: AArch64::zsub, DL, VT: MVT::i32);
4631 SDNode *SubRegToReg = CurDAG->getMachineNode(Opcode: AArch64::SUBREG_TO_REG, dl: DL,
4632 VT, Op1: Zero, Op2: MOVIV, Op3: ZSub);
4633
4634 R1 = N1->getOperand(Num: 1);
4635 R2 = SDValue(SubRegToReg, 0);
4636 }
4637
4638 SDValue Imm =
4639 CurDAG->getTargetConstant(Val: ShrAmt.getZExtValue(), DL, VT: MVT::i32);
4640
4641 SDValue Ops[] = {R1, R2, Imm};
4642 if (auto Opc = SelectOpcodeFromVT<SelectTypeKind::Int>(
4643 VT, Opcodes: {AArch64::XAR_ZZZI_B, AArch64::XAR_ZZZI_H, AArch64::XAR_ZZZI_S,
4644 AArch64::XAR_ZZZI_D})) {
4645 CurDAG->SelectNodeTo(N, MachineOpc: Opc, VT, Ops);
4646 return true;
4647 }
4648 return false;
4649 }
4650
4651 // We have Neon SHA3 XAR operation for v2i64 but for types
4652 // v4i32, v8i16, v16i8 we can use SVE operations when SVE2-SHA3
4653 // is available.
4654 EVT SVT;
4655 switch (VT.getSimpleVT().SimpleTy) {
4656 case MVT::v4i32:
4657 case MVT::v2i32:
4658 SVT = MVT::nxv4i32;
4659 break;
4660 case MVT::v8i16:
4661 case MVT::v4i16:
4662 SVT = MVT::nxv8i16;
4663 break;
4664 case MVT::v16i8:
4665 case MVT::v8i8:
4666 SVT = MVT::nxv16i8;
4667 break;
4668 case MVT::v2i64:
4669 case MVT::v1i64:
4670 SVT = Subtarget->hasSHA3() ? MVT::v2i64 : MVT::nxv2i64;
4671 break;
4672 default:
4673 return false;
4674 }
4675
4676 if ((!SVT.isScalableVector() && !Subtarget->hasSHA3()) ||
4677 (SVT.isScalableVector() && !Subtarget->hasSVE2()))
4678 return false;
4679
4680 if (N0->getOpcode() != AArch64ISD::VSHL ||
4681 N1->getOpcode() != AArch64ISD::VLSHR)
4682 return false;
4683
4684 if (N0->getOperand(Num: 0) != N1->getOperand(Num: 0))
4685 return false;
4686
4687 SDValue R1, R2;
4688 bool IsXOROperand = true;
4689 if (N1->getOperand(Num: 0)->getOpcode() != ISD::XOR) {
4690 IsXOROperand = false;
4691 } else {
4692 SDValue XOR = N0.getOperand(i: 0);
4693 R1 = XOR.getOperand(i: 0);
4694 R2 = XOR.getOperand(i: 1);
4695 }
4696
4697 unsigned HsAmt = N0.getConstantOperandVal(i: 1);
4698 unsigned ShAmt = N1.getConstantOperandVal(i: 1);
4699
4700 SDValue Imm = CurDAG->getTargetConstant(
4701 Val: ShAmt, DL, VT: N0.getOperand(i: 1).getValueType(), isOpaque: false);
4702
4703 unsigned VTSizeInBits = VT.getScalarSizeInBits();
4704 if (ShAmt + HsAmt != VTSizeInBits)
4705 return false;
4706
4707 if (!IsXOROperand) {
4708 SDValue Zero = CurDAG->getTargetConstant(Val: 0, DL, VT: MVT::i64);
4709 SDNode *MOV =
4710 CurDAG->getMachineNode(Opcode: AArch64::MOVIv2d_ns, dl: DL, VT: MVT::v2i64, Op1: Zero);
4711 SDValue MOVIV = SDValue(MOV, 0);
4712
4713 R1 = N1->getOperand(Num: 0);
4714 R2 = MOVIV;
4715 }
4716
4717 if (SVT != VT) {
4718 SDValue Undef =
4719 SDValue(CurDAG->getMachineNode(Opcode: TargetOpcode::IMPLICIT_DEF, dl: DL, VT: SVT), 0);
4720
4721 if (SVT.isScalableVector() && VT.is64BitVector()) {
4722 EVT QVT = VT.getDoubleNumVectorElementsVT(Context&: *CurDAG->getContext());
4723
4724 SDValue UndefQ = SDValue(
4725 CurDAG->getMachineNode(Opcode: TargetOpcode::IMPLICIT_DEF, dl: DL, VT: QVT), 0);
4726 SDValue DSub = CurDAG->getTargetConstant(Val: AArch64::dsub, DL, VT: MVT::i32);
4727
4728 R1 = SDValue(CurDAG->getMachineNode(Opcode: AArch64::INSERT_SUBREG, dl: DL, VT: QVT,
4729 Op1: UndefQ, Op2: R1, Op3: DSub),
4730 0);
4731 if (R2.getValueType() == VT)
4732 R2 = SDValue(CurDAG->getMachineNode(Opcode: AArch64::INSERT_SUBREG, dl: DL, VT: QVT,
4733 Op1: UndefQ, Op2: R2, Op3: DSub),
4734 0);
4735 }
4736
4737 SDValue SubReg = CurDAG->getTargetConstant(
4738 Val: (SVT.isScalableVector() ? AArch64::zsub : AArch64::dsub), DL, VT: MVT::i32);
4739
4740 R1 = SDValue(CurDAG->getMachineNode(Opcode: AArch64::INSERT_SUBREG, dl: DL, VT: SVT, Op1: Undef,
4741 Op2: R1, Op3: SubReg),
4742 0);
4743
4744 if (SVT.isScalableVector() || R2.getValueType() != SVT)
4745 R2 = SDValue(CurDAG->getMachineNode(Opcode: AArch64::INSERT_SUBREG, dl: DL, VT: SVT,
4746 Op1: Undef, Op2: R2, Op3: SubReg),
4747 0);
4748 }
4749
4750 SDValue Ops[] = {R1, R2, Imm};
4751 SDNode *XAR = nullptr;
4752
4753 if (SVT.isScalableVector()) {
4754 if (auto Opc = SelectOpcodeFromVT<SelectTypeKind::Int>(
4755 VT: SVT, Opcodes: {AArch64::XAR_ZZZI_B, AArch64::XAR_ZZZI_H, AArch64::XAR_ZZZI_S,
4756 AArch64::XAR_ZZZI_D}))
4757 XAR = CurDAG->getMachineNode(Opcode: Opc, dl: DL, VT: SVT, Ops);
4758 } else {
4759 XAR = CurDAG->getMachineNode(Opcode: AArch64::XAR, dl: DL, VT: SVT, Ops);
4760 }
4761
4762 assert(XAR && "Unexpected NULL value for XAR instruction in DAG");
4763
4764 if (SVT != VT) {
4765 if (VT.is64BitVector() && SVT.isScalableVector()) {
4766 EVT QVT = VT.getDoubleNumVectorElementsVT(Context&: *CurDAG->getContext());
4767
4768 SDValue ZSub = CurDAG->getTargetConstant(Val: AArch64::zsub, DL, VT: MVT::i32);
4769 SDNode *Q = CurDAG->getMachineNode(Opcode: AArch64::EXTRACT_SUBREG, dl: DL, VT: QVT,
4770 Op1: SDValue(XAR, 0), Op2: ZSub);
4771
4772 SDValue DSub = CurDAG->getTargetConstant(Val: AArch64::dsub, DL, VT: MVT::i32);
4773 XAR = CurDAG->getMachineNode(Opcode: AArch64::EXTRACT_SUBREG, dl: DL, VT,
4774 Op1: SDValue(Q, 0), Op2: DSub);
4775 } else {
4776 SDValue SubReg = CurDAG->getTargetConstant(
4777 Val: (SVT.isScalableVector() ? AArch64::zsub : AArch64::dsub), DL,
4778 VT: MVT::i32);
4779 XAR = CurDAG->getMachineNode(Opcode: AArch64::EXTRACT_SUBREG, dl: DL, VT,
4780 Op1: SDValue(XAR, 0), Op2: SubReg);
4781 }
4782 }
4783 ReplaceNode(F: N, T: XAR);
4784 return true;
4785}
4786
4787void AArch64DAGToDAGISel::Select(SDNode *Node) {
4788 // If we have a custom node, we already have selected!
4789 if (Node->isMachineOpcode()) {
4790 LLVM_DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
4791 Node->setNodeId(-1);
4792 return;
4793 }
4794
4795 // Few custom selection stuff.
4796 EVT VT = Node->getValueType(ResNo: 0);
4797
4798 switch (Node->getOpcode()) {
4799 default:
4800 break;
4801
4802 case ISD::ATOMIC_CMP_SWAP:
4803 if (SelectCMP_SWAP(N: Node))
4804 return;
4805 break;
4806
4807 case ISD::READ_REGISTER:
4808 case AArch64ISD::MRRS:
4809 if (tryReadRegister(N: Node))
4810 return;
4811 break;
4812
4813 case ISD::WRITE_REGISTER:
4814 case AArch64ISD::MSRR:
4815 if (tryWriteRegister(N: Node))
4816 return;
4817 break;
4818
4819 case ISD::LOAD: {
4820 // Try to select as an indexed load. Fall through to normal processing
4821 // if we can't.
4822 if (tryIndexedLoad(N: Node))
4823 return;
4824 break;
4825 }
4826
4827 case ISD::SRL:
4828 case ISD::AND:
4829 case ISD::SRA:
4830 case ISD::SIGN_EXTEND_INREG:
4831 if (tryBitfieldExtractOp(N: Node))
4832 return;
4833 if (tryBitfieldInsertInZeroOp(N: Node))
4834 return;
4835 [[fallthrough]];
4836 case ISD::ROTR:
4837 case ISD::SHL:
4838 if (tryShiftAmountMod(N: Node))
4839 return;
4840 break;
4841
4842 case ISD::SIGN_EXTEND:
4843 if (tryBitfieldExtractOpFromSExt(N: Node))
4844 return;
4845 break;
4846
4847 case ISD::OR:
4848 if (tryBitfieldInsertOp(N: Node))
4849 return;
4850 if (trySelectXAR(N: Node))
4851 return;
4852 break;
4853
4854 case ISD::EXTRACT_SUBVECTOR: {
4855 if (trySelectCastScalableToFixedLengthVector(N: Node))
4856 return;
4857 break;
4858 }
4859
4860 case ISD::INSERT_SUBVECTOR: {
4861 if (trySelectCastFixedLengthToScalableVector(N: Node))
4862 return;
4863 break;
4864 }
4865
4866 case ISD::Constant: {
4867 // Materialize zero constants as copies from WZR/XZR. This allows
4868 // the coalescer to propagate these into other instructions.
4869 ConstantSDNode *ConstNode = cast<ConstantSDNode>(Val: Node);
4870 if (ConstNode->isZero()) {
4871 if (VT == MVT::i32) {
4872 SDValue New = CurDAG->getCopyFromReg(
4873 Chain: CurDAG->getEntryNode(), dl: SDLoc(Node), Reg: AArch64::WZR, VT: MVT::i32);
4874 ReplaceNode(F: Node, T: New.getNode());
4875 return;
4876 } else if (VT == MVT::i64) {
4877 SDValue New = CurDAG->getCopyFromReg(
4878 Chain: CurDAG->getEntryNode(), dl: SDLoc(Node), Reg: AArch64::XZR, VT: MVT::i64);
4879 ReplaceNode(F: Node, T: New.getNode());
4880 return;
4881 }
4882 }
4883 break;
4884 }
4885
4886 case ISD::FrameIndex: {
4887 // Selects to ADDXri FI, 0 which in turn will become ADDXri SP, imm.
4888 int FI = cast<FrameIndexSDNode>(Val: Node)->getIndex();
4889 unsigned Shifter = AArch64_AM::getShifterImm(ST: AArch64_AM::LSL, Imm: 0);
4890 const TargetLowering *TLI = getTargetLowering();
4891 SDValue TFI = CurDAG->getTargetFrameIndex(
4892 FI, VT: TLI->getPointerTy(DL: CurDAG->getDataLayout()));
4893 SDLoc DL(Node);
4894 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(Val: 0, DL, VT: MVT::i32),
4895 CurDAG->getTargetConstant(Val: Shifter, DL, VT: MVT::i32) };
4896 CurDAG->SelectNodeTo(N: Node, MachineOpc: AArch64::ADDXri, VT: MVT::i64, Ops);
4897 return;
4898 }
4899 case ISD::INTRINSIC_W_CHAIN: {
4900 unsigned IntNo = Node->getConstantOperandVal(Num: 1);
4901 switch (IntNo) {
4902 default:
4903 break;
4904 case Intrinsic::aarch64_gcsss: {
4905 SDLoc DL(Node);
4906 SDValue Chain = Node->getOperand(Num: 0);
4907 SDValue Val = Node->getOperand(Num: 2);
4908 SDValue Zero = CurDAG->getCopyFromReg(Chain, dl: DL, Reg: AArch64::XZR, VT: MVT::i64);
4909 SDNode *SS1 =
4910 CurDAG->getMachineNode(Opcode: AArch64::GCSSS1, dl: DL, VT: MVT::Other, Op1: Val, Op2: Chain);
4911 SDNode *SS2 = CurDAG->getMachineNode(Opcode: AArch64::GCSSS2, dl: DL, VT1: MVT::i64,
4912 VT2: MVT::Other, Op1: Zero, Op2: SDValue(SS1, 0));
4913 ReplaceNode(F: Node, T: SS2);
4914 return;
4915 }
4916 case Intrinsic::aarch64_ldaxp:
4917 case Intrinsic::aarch64_ldxp: {
4918 unsigned Op =
4919 IntNo == Intrinsic::aarch64_ldaxp ? AArch64::LDAXPX : AArch64::LDXPX;
4920 SDValue MemAddr = Node->getOperand(Num: 2);
4921 SDLoc DL(Node);
4922 SDValue Chain = Node->getOperand(Num: 0);
4923
4924 SDNode *Ld = CurDAG->getMachineNode(Opcode: Op, dl: DL, VT1: MVT::i64, VT2: MVT::i64,
4925 VT3: MVT::Other, Op1: MemAddr, Op2: Chain);
4926
4927 // Transfer memoperands.
4928 MachineMemOperand *MemOp =
4929 cast<MemIntrinsicSDNode>(Val: Node)->getMemOperand();
4930 CurDAG->setNodeMemRefs(N: cast<MachineSDNode>(Val: Ld), NewMemRefs: {MemOp});
4931 ReplaceNode(F: Node, T: Ld);
4932 return;
4933 }
4934 case Intrinsic::aarch64_stlxp:
4935 case Intrinsic::aarch64_stxp: {
4936 unsigned Op =
4937 IntNo == Intrinsic::aarch64_stlxp ? AArch64::STLXPX : AArch64::STXPX;
4938 SDLoc DL(Node);
4939 SDValue Chain = Node->getOperand(Num: 0);
4940 SDValue ValLo = Node->getOperand(Num: 2);
4941 SDValue ValHi = Node->getOperand(Num: 3);
4942 SDValue MemAddr = Node->getOperand(Num: 4);
4943
4944 // Place arguments in the right order.
4945 SDValue Ops[] = {ValLo, ValHi, MemAddr, Chain};
4946
4947 SDNode *St = CurDAG->getMachineNode(Opcode: Op, dl: DL, VT1: MVT::i32, VT2: MVT::Other, Ops);
4948 // Transfer memoperands.
4949 MachineMemOperand *MemOp =
4950 cast<MemIntrinsicSDNode>(Val: Node)->getMemOperand();
4951 CurDAG->setNodeMemRefs(N: cast<MachineSDNode>(Val: St), NewMemRefs: {MemOp});
4952
4953 ReplaceNode(F: Node, T: St);
4954 return;
4955 }
4956 case Intrinsic::aarch64_neon_ld1x2:
4957 if (VT == MVT::v8i8) {
4958 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD1Twov8b, SubRegIdx: AArch64::dsub0);
4959 return;
4960 } else if (VT == MVT::v16i8) {
4961 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD1Twov16b, SubRegIdx: AArch64::qsub0);
4962 return;
4963 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4964 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD1Twov4h, SubRegIdx: AArch64::dsub0);
4965 return;
4966 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4967 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD1Twov8h, SubRegIdx: AArch64::qsub0);
4968 return;
4969 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4970 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD1Twov2s, SubRegIdx: AArch64::dsub0);
4971 return;
4972 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4973 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD1Twov4s, SubRegIdx: AArch64::qsub0);
4974 return;
4975 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4976 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD1Twov1d, SubRegIdx: AArch64::dsub0);
4977 return;
4978 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4979 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD1Twov2d, SubRegIdx: AArch64::qsub0);
4980 return;
4981 }
4982 break;
4983 case Intrinsic::aarch64_neon_ld1x3:
4984 if (VT == MVT::v8i8) {
4985 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD1Threev8b, SubRegIdx: AArch64::dsub0);
4986 return;
4987 } else if (VT == MVT::v16i8) {
4988 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD1Threev16b, SubRegIdx: AArch64::qsub0);
4989 return;
4990 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4991 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD1Threev4h, SubRegIdx: AArch64::dsub0);
4992 return;
4993 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4994 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD1Threev8h, SubRegIdx: AArch64::qsub0);
4995 return;
4996 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4997 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD1Threev2s, SubRegIdx: AArch64::dsub0);
4998 return;
4999 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
5000 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD1Threev4s, SubRegIdx: AArch64::qsub0);
5001 return;
5002 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
5003 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD1Threev1d, SubRegIdx: AArch64::dsub0);
5004 return;
5005 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
5006 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD1Threev2d, SubRegIdx: AArch64::qsub0);
5007 return;
5008 }
5009 break;
5010 case Intrinsic::aarch64_neon_ld1x4:
5011 if (VT == MVT::v8i8) {
5012 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD1Fourv8b, SubRegIdx: AArch64::dsub0);
5013 return;
5014 } else if (VT == MVT::v16i8) {
5015 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD1Fourv16b, SubRegIdx: AArch64::qsub0);
5016 return;
5017 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
5018 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD1Fourv4h, SubRegIdx: AArch64::dsub0);
5019 return;
5020 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
5021 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD1Fourv8h, SubRegIdx: AArch64::qsub0);
5022 return;
5023 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
5024 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD1Fourv2s, SubRegIdx: AArch64::dsub0);
5025 return;
5026 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
5027 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD1Fourv4s, SubRegIdx: AArch64::qsub0);
5028 return;
5029 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
5030 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD1Fourv1d, SubRegIdx: AArch64::dsub0);
5031 return;
5032 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
5033 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD1Fourv2d, SubRegIdx: AArch64::qsub0);
5034 return;
5035 }
5036 break;
5037 case Intrinsic::aarch64_neon_ld2:
5038 if (VT == MVT::v8i8) {
5039 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Twov8b, SubRegIdx: AArch64::dsub0);
5040 return;
5041 } else if (VT == MVT::v16i8) {
5042 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Twov16b, SubRegIdx: AArch64::qsub0);
5043 return;
5044 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
5045 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Twov4h, SubRegIdx: AArch64::dsub0);
5046 return;
5047 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
5048 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Twov8h, SubRegIdx: AArch64::qsub0);
5049 return;
5050 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
5051 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Twov2s, SubRegIdx: AArch64::dsub0);
5052 return;
5053 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
5054 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Twov4s, SubRegIdx: AArch64::qsub0);
5055 return;
5056 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
5057 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD1Twov1d, SubRegIdx: AArch64::dsub0);
5058 return;
5059 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
5060 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Twov2d, SubRegIdx: AArch64::qsub0);
5061 return;
5062 }
5063 break;
5064 case Intrinsic::aarch64_neon_ld3:
5065 if (VT == MVT::v8i8) {
5066 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Threev8b, SubRegIdx: AArch64::dsub0);
5067 return;
5068 } else if (VT == MVT::v16i8) {
5069 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Threev16b, SubRegIdx: AArch64::qsub0);
5070 return;
5071 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
5072 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Threev4h, SubRegIdx: AArch64::dsub0);
5073 return;
5074 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
5075 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Threev8h, SubRegIdx: AArch64::qsub0);
5076 return;
5077 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
5078 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Threev2s, SubRegIdx: AArch64::dsub0);
5079 return;
5080 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
5081 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Threev4s, SubRegIdx: AArch64::qsub0);
5082 return;
5083 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
5084 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD1Threev1d, SubRegIdx: AArch64::dsub0);
5085 return;
5086 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
5087 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Threev2d, SubRegIdx: AArch64::qsub0);
5088 return;
5089 }
5090 break;
5091 case Intrinsic::aarch64_neon_ld4:
5092 if (VT == MVT::v8i8) {
5093 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Fourv8b, SubRegIdx: AArch64::dsub0);
5094 return;
5095 } else if (VT == MVT::v16i8) {
5096 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Fourv16b, SubRegIdx: AArch64::qsub0);
5097 return;
5098 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
5099 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Fourv4h, SubRegIdx: AArch64::dsub0);
5100 return;
5101 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
5102 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Fourv8h, SubRegIdx: AArch64::qsub0);
5103 return;
5104 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
5105 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Fourv2s, SubRegIdx: AArch64::dsub0);
5106 return;
5107 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
5108 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Fourv4s, SubRegIdx: AArch64::qsub0);
5109 return;
5110 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
5111 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD1Fourv1d, SubRegIdx: AArch64::dsub0);
5112 return;
5113 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
5114 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Fourv2d, SubRegIdx: AArch64::qsub0);
5115 return;
5116 }
5117 break;
5118 case Intrinsic::aarch64_neon_ld2r:
5119 if (VT == MVT::v8i8) {
5120 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Rv8b, SubRegIdx: AArch64::dsub0);
5121 return;
5122 } else if (VT == MVT::v16i8) {
5123 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Rv16b, SubRegIdx: AArch64::qsub0);
5124 return;
5125 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
5126 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Rv4h, SubRegIdx: AArch64::dsub0);
5127 return;
5128 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
5129 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Rv8h, SubRegIdx: AArch64::qsub0);
5130 return;
5131 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
5132 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Rv2s, SubRegIdx: AArch64::dsub0);
5133 return;
5134 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
5135 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Rv4s, SubRegIdx: AArch64::qsub0);
5136 return;
5137 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
5138 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Rv1d, SubRegIdx: AArch64::dsub0);
5139 return;
5140 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
5141 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Rv2d, SubRegIdx: AArch64::qsub0);
5142 return;
5143 }
5144 break;
5145 case Intrinsic::aarch64_neon_ld3r:
5146 if (VT == MVT::v8i8) {
5147 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Rv8b, SubRegIdx: AArch64::dsub0);
5148 return;
5149 } else if (VT == MVT::v16i8) {
5150 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Rv16b, SubRegIdx: AArch64::qsub0);
5151 return;
5152 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
5153 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Rv4h, SubRegIdx: AArch64::dsub0);
5154 return;
5155 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
5156 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Rv8h, SubRegIdx: AArch64::qsub0);
5157 return;
5158 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
5159 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Rv2s, SubRegIdx: AArch64::dsub0);
5160 return;
5161 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
5162 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Rv4s, SubRegIdx: AArch64::qsub0);
5163 return;
5164 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
5165 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Rv1d, SubRegIdx: AArch64::dsub0);
5166 return;
5167 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
5168 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Rv2d, SubRegIdx: AArch64::qsub0);
5169 return;
5170 }
5171 break;
5172 case Intrinsic::aarch64_neon_ld4r:
5173 if (VT == MVT::v8i8) {
5174 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Rv8b, SubRegIdx: AArch64::dsub0);
5175 return;
5176 } else if (VT == MVT::v16i8) {
5177 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Rv16b, SubRegIdx: AArch64::qsub0);
5178 return;
5179 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
5180 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Rv4h, SubRegIdx: AArch64::dsub0);
5181 return;
5182 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
5183 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Rv8h, SubRegIdx: AArch64::qsub0);
5184 return;
5185 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
5186 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Rv2s, SubRegIdx: AArch64::dsub0);
5187 return;
5188 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
5189 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Rv4s, SubRegIdx: AArch64::qsub0);
5190 return;
5191 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
5192 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Rv1d, SubRegIdx: AArch64::dsub0);
5193 return;
5194 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
5195 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Rv2d, SubRegIdx: AArch64::qsub0);
5196 return;
5197 }
5198 break;
5199 case Intrinsic::aarch64_neon_ld2lane:
5200 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
5201 SelectLoadLane(N: Node, NumVecs: 2, Opc: AArch64::LD2i8);
5202 return;
5203 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
5204 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
5205 SelectLoadLane(N: Node, NumVecs: 2, Opc: AArch64::LD2i16);
5206 return;
5207 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
5208 VT == MVT::v2f32) {
5209 SelectLoadLane(N: Node, NumVecs: 2, Opc: AArch64::LD2i32);
5210 return;
5211 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
5212 VT == MVT::v1f64) {
5213 SelectLoadLane(N: Node, NumVecs: 2, Opc: AArch64::LD2i64);
5214 return;
5215 }
5216 break;
5217 case Intrinsic::aarch64_neon_ld3lane:
5218 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
5219 SelectLoadLane(N: Node, NumVecs: 3, Opc: AArch64::LD3i8);
5220 return;
5221 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
5222 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
5223 SelectLoadLane(N: Node, NumVecs: 3, Opc: AArch64::LD3i16);
5224 return;
5225 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
5226 VT == MVT::v2f32) {
5227 SelectLoadLane(N: Node, NumVecs: 3, Opc: AArch64::LD3i32);
5228 return;
5229 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
5230 VT == MVT::v1f64) {
5231 SelectLoadLane(N: Node, NumVecs: 3, Opc: AArch64::LD3i64);
5232 return;
5233 }
5234 break;
5235 case Intrinsic::aarch64_neon_ld4lane:
5236 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
5237 SelectLoadLane(N: Node, NumVecs: 4, Opc: AArch64::LD4i8);
5238 return;
5239 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
5240 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
5241 SelectLoadLane(N: Node, NumVecs: 4, Opc: AArch64::LD4i16);
5242 return;
5243 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
5244 VT == MVT::v2f32) {
5245 SelectLoadLane(N: Node, NumVecs: 4, Opc: AArch64::LD4i32);
5246 return;
5247 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
5248 VT == MVT::v1f64) {
5249 SelectLoadLane(N: Node, NumVecs: 4, Opc: AArch64::LD4i64);
5250 return;
5251 }
5252 break;
5253 case Intrinsic::aarch64_ld64b:
5254 SelectLoad(N: Node, NumVecs: 8, Opc: AArch64::LD64B, SubRegIdx: AArch64::x8sub_0);
5255 return;
5256 case Intrinsic::aarch64_sve_ld2q_sret: {
5257 SelectPredicatedLoad(N: Node, NumVecs: 2, Scale: 4, Opc_ri: AArch64::LD2Q_IMM, Opc_rr: AArch64::LD2Q, IsIntr: true);
5258 return;
5259 }
5260 case Intrinsic::aarch64_sve_ld3q_sret: {
5261 SelectPredicatedLoad(N: Node, NumVecs: 3, Scale: 4, Opc_ri: AArch64::LD3Q_IMM, Opc_rr: AArch64::LD3Q, IsIntr: true);
5262 return;
5263 }
5264 case Intrinsic::aarch64_sve_ld4q_sret: {
5265 SelectPredicatedLoad(N: Node, NumVecs: 4, Scale: 4, Opc_ri: AArch64::LD4Q_IMM, Opc_rr: AArch64::LD4Q, IsIntr: true);
5266 return;
5267 }
5268 case Intrinsic::aarch64_sve_ld2_sret: {
5269 if (VT == MVT::nxv16i8) {
5270 SelectPredicatedLoad(N: Node, NumVecs: 2, Scale: 0, Opc_ri: AArch64::LD2B_IMM, Opc_rr: AArch64::LD2B,
5271 IsIntr: true);
5272 return;
5273 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
5274 VT == MVT::nxv8bf16) {
5275 SelectPredicatedLoad(N: Node, NumVecs: 2, Scale: 1, Opc_ri: AArch64::LD2H_IMM, Opc_rr: AArch64::LD2H,
5276 IsIntr: true);
5277 return;
5278 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
5279 SelectPredicatedLoad(N: Node, NumVecs: 2, Scale: 2, Opc_ri: AArch64::LD2W_IMM, Opc_rr: AArch64::LD2W,
5280 IsIntr: true);
5281 return;
5282 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
5283 SelectPredicatedLoad(N: Node, NumVecs: 2, Scale: 3, Opc_ri: AArch64::LD2D_IMM, Opc_rr: AArch64::LD2D,
5284 IsIntr: true);
5285 return;
5286 }
5287 break;
5288 }
5289 case Intrinsic::aarch64_sve_ld1_pn_x2: {
5290 if (VT == MVT::nxv16i8) {
5291 if (Subtarget->hasSME2())
5292 SelectContiguousMultiVectorLoad(
5293 N: Node, NumVecs: 2, Scale: 0, Opc_ri: AArch64::LD1B_2Z_IMM_PSEUDO, Opc_rr: AArch64::LD1B_2Z_PSEUDO);
5294 else if (Subtarget->hasSVE2p1())
5295 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 2, Scale: 0, Opc_ri: AArch64::LD1B_2Z_IMM,
5296 Opc_rr: AArch64::LD1B_2Z);
5297 else
5298 break;
5299 return;
5300 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
5301 VT == MVT::nxv8bf16) {
5302 if (Subtarget->hasSME2())
5303 SelectContiguousMultiVectorLoad(
5304 N: Node, NumVecs: 2, Scale: 1, Opc_ri: AArch64::LD1H_2Z_IMM_PSEUDO, Opc_rr: AArch64::LD1H_2Z_PSEUDO);
5305 else if (Subtarget->hasSVE2p1())
5306 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 2, Scale: 1, Opc_ri: AArch64::LD1H_2Z_IMM,
5307 Opc_rr: AArch64::LD1H_2Z);
5308 else
5309 break;
5310 return;
5311 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
5312 if (Subtarget->hasSME2())
5313 SelectContiguousMultiVectorLoad(
5314 N: Node, NumVecs: 2, Scale: 2, Opc_ri: AArch64::LD1W_2Z_IMM_PSEUDO, Opc_rr: AArch64::LD1W_2Z_PSEUDO);
5315 else if (Subtarget->hasSVE2p1())
5316 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 2, Scale: 2, Opc_ri: AArch64::LD1W_2Z_IMM,
5317 Opc_rr: AArch64::LD1W_2Z);
5318 else
5319 break;
5320 return;
5321 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
5322 if (Subtarget->hasSME2())
5323 SelectContiguousMultiVectorLoad(
5324 N: Node, NumVecs: 2, Scale: 3, Opc_ri: AArch64::LD1D_2Z_IMM_PSEUDO, Opc_rr: AArch64::LD1D_2Z_PSEUDO);
5325 else if (Subtarget->hasSVE2p1())
5326 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 2, Scale: 3, Opc_ri: AArch64::LD1D_2Z_IMM,
5327 Opc_rr: AArch64::LD1D_2Z);
5328 else
5329 break;
5330 return;
5331 }
5332 break;
5333 }
5334 case Intrinsic::aarch64_sve_ld1_pn_x4: {
5335 if (VT == MVT::nxv16i8) {
5336 if (Subtarget->hasSME2())
5337 SelectContiguousMultiVectorLoad(
5338 N: Node, NumVecs: 4, Scale: 0, Opc_ri: AArch64::LD1B_4Z_IMM_PSEUDO, Opc_rr: AArch64::LD1B_4Z_PSEUDO);
5339 else if (Subtarget->hasSVE2p1())
5340 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 4, Scale: 0, Opc_ri: AArch64::LD1B_4Z_IMM,
5341 Opc_rr: AArch64::LD1B_4Z);
5342 else
5343 break;
5344 return;
5345 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
5346 VT == MVT::nxv8bf16) {
5347 if (Subtarget->hasSME2())
5348 SelectContiguousMultiVectorLoad(
5349 N: Node, NumVecs: 4, Scale: 1, Opc_ri: AArch64::LD1H_4Z_IMM_PSEUDO, Opc_rr: AArch64::LD1H_4Z_PSEUDO);
5350 else if (Subtarget->hasSVE2p1())
5351 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 4, Scale: 1, Opc_ri: AArch64::LD1H_4Z_IMM,
5352 Opc_rr: AArch64::LD1H_4Z);
5353 else
5354 break;
5355 return;
5356 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
5357 if (Subtarget->hasSME2())
5358 SelectContiguousMultiVectorLoad(
5359 N: Node, NumVecs: 4, Scale: 2, Opc_ri: AArch64::LD1W_4Z_IMM_PSEUDO, Opc_rr: AArch64::LD1W_4Z_PSEUDO);
5360 else if (Subtarget->hasSVE2p1())
5361 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 4, Scale: 2, Opc_ri: AArch64::LD1W_4Z_IMM,
5362 Opc_rr: AArch64::LD1W_4Z);
5363 else
5364 break;
5365 return;
5366 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
5367 if (Subtarget->hasSME2())
5368 SelectContiguousMultiVectorLoad(
5369 N: Node, NumVecs: 4, Scale: 3, Opc_ri: AArch64::LD1D_4Z_IMM_PSEUDO, Opc_rr: AArch64::LD1D_4Z_PSEUDO);
5370 else if (Subtarget->hasSVE2p1())
5371 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 4, Scale: 3, Opc_ri: AArch64::LD1D_4Z_IMM,
5372 Opc_rr: AArch64::LD1D_4Z);
5373 else
5374 break;
5375 return;
5376 }
5377 break;
5378 }
5379 case Intrinsic::aarch64_sve_ldnt1_pn_x2: {
5380 if (VT == MVT::nxv16i8) {
5381 if (Subtarget->hasSME2())
5382 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 2, Scale: 0,
5383 Opc_ri: AArch64::LDNT1B_2Z_IMM_PSEUDO,
5384 Opc_rr: AArch64::LDNT1B_2Z_PSEUDO);
5385 else if (Subtarget->hasSVE2p1())
5386 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 2, Scale: 0, Opc_ri: AArch64::LDNT1B_2Z_IMM,
5387 Opc_rr: AArch64::LDNT1B_2Z);
5388 else
5389 break;
5390 return;
5391 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
5392 VT == MVT::nxv8bf16) {
5393 if (Subtarget->hasSME2())
5394 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 2, Scale: 1,
5395 Opc_ri: AArch64::LDNT1H_2Z_IMM_PSEUDO,
5396 Opc_rr: AArch64::LDNT1H_2Z_PSEUDO);
5397 else if (Subtarget->hasSVE2p1())
5398 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 2, Scale: 1, Opc_ri: AArch64::LDNT1H_2Z_IMM,
5399 Opc_rr: AArch64::LDNT1H_2Z);
5400 else
5401 break;
5402 return;
5403 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
5404 if (Subtarget->hasSME2())
5405 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 2, Scale: 2,
5406 Opc_ri: AArch64::LDNT1W_2Z_IMM_PSEUDO,
5407 Opc_rr: AArch64::LDNT1W_2Z_PSEUDO);
5408 else if (Subtarget->hasSVE2p1())
5409 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 2, Scale: 2, Opc_ri: AArch64::LDNT1W_2Z_IMM,
5410 Opc_rr: AArch64::LDNT1W_2Z);
5411 else
5412 break;
5413 return;
5414 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
5415 if (Subtarget->hasSME2())
5416 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 2, Scale: 3,
5417 Opc_ri: AArch64::LDNT1D_2Z_IMM_PSEUDO,
5418 Opc_rr: AArch64::LDNT1D_2Z_PSEUDO);
5419 else if (Subtarget->hasSVE2p1())
5420 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 2, Scale: 3, Opc_ri: AArch64::LDNT1D_2Z_IMM,
5421 Opc_rr: AArch64::LDNT1D_2Z);
5422 else
5423 break;
5424 return;
5425 }
5426 break;
5427 }
5428 case Intrinsic::aarch64_sve_ldnt1_pn_x4: {
5429 if (VT == MVT::nxv16i8) {
5430 if (Subtarget->hasSME2())
5431 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 4, Scale: 0,
5432 Opc_ri: AArch64::LDNT1B_4Z_IMM_PSEUDO,
5433 Opc_rr: AArch64::LDNT1B_4Z_PSEUDO);
5434 else if (Subtarget->hasSVE2p1())
5435 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 4, Scale: 0, Opc_ri: AArch64::LDNT1B_4Z_IMM,
5436 Opc_rr: AArch64::LDNT1B_4Z);
5437 else
5438 break;
5439 return;
5440 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
5441 VT == MVT::nxv8bf16) {
5442 if (Subtarget->hasSME2())
5443 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 4, Scale: 1,
5444 Opc_ri: AArch64::LDNT1H_4Z_IMM_PSEUDO,
5445 Opc_rr: AArch64::LDNT1H_4Z_PSEUDO);
5446 else if (Subtarget->hasSVE2p1())
5447 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 4, Scale: 1, Opc_ri: AArch64::LDNT1H_4Z_IMM,
5448 Opc_rr: AArch64::LDNT1H_4Z);
5449 else
5450 break;
5451 return;
5452 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
5453 if (Subtarget->hasSME2())
5454 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 4, Scale: 2,
5455 Opc_ri: AArch64::LDNT1W_4Z_IMM_PSEUDO,
5456 Opc_rr: AArch64::LDNT1W_4Z_PSEUDO);
5457 else if (Subtarget->hasSVE2p1())
5458 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 4, Scale: 2, Opc_ri: AArch64::LDNT1W_4Z_IMM,
5459 Opc_rr: AArch64::LDNT1W_4Z);
5460 else
5461 break;
5462 return;
5463 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
5464 if (Subtarget->hasSME2())
5465 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 4, Scale: 3,
5466 Opc_ri: AArch64::LDNT1D_4Z_IMM_PSEUDO,
5467 Opc_rr: AArch64::LDNT1D_4Z_PSEUDO);
5468 else if (Subtarget->hasSVE2p1())
5469 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 4, Scale: 3, Opc_ri: AArch64::LDNT1D_4Z_IMM,
5470 Opc_rr: AArch64::LDNT1D_4Z);
5471 else
5472 break;
5473 return;
5474 }
5475 break;
5476 }
5477 case Intrinsic::aarch64_sve_ld3_sret: {
5478 if (VT == MVT::nxv16i8) {
5479 SelectPredicatedLoad(N: Node, NumVecs: 3, Scale: 0, Opc_ri: AArch64::LD3B_IMM, Opc_rr: AArch64::LD3B,
5480 IsIntr: true);
5481 return;
5482 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
5483 VT == MVT::nxv8bf16) {
5484 SelectPredicatedLoad(N: Node, NumVecs: 3, Scale: 1, Opc_ri: AArch64::LD3H_IMM, Opc_rr: AArch64::LD3H,
5485 IsIntr: true);
5486 return;
5487 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
5488 SelectPredicatedLoad(N: Node, NumVecs: 3, Scale: 2, Opc_ri: AArch64::LD3W_IMM, Opc_rr: AArch64::LD3W,
5489 IsIntr: true);
5490 return;
5491 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
5492 SelectPredicatedLoad(N: Node, NumVecs: 3, Scale: 3, Opc_ri: AArch64::LD3D_IMM, Opc_rr: AArch64::LD3D,
5493 IsIntr: true);
5494 return;
5495 }
5496 break;
5497 }
5498 case Intrinsic::aarch64_sve_ld4_sret: {
5499 if (VT == MVT::nxv16i8) {
5500 SelectPredicatedLoad(N: Node, NumVecs: 4, Scale: 0, Opc_ri: AArch64::LD4B_IMM, Opc_rr: AArch64::LD4B,
5501 IsIntr: true);
5502 return;
5503 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
5504 VT == MVT::nxv8bf16) {
5505 SelectPredicatedLoad(N: Node, NumVecs: 4, Scale: 1, Opc_ri: AArch64::LD4H_IMM, Opc_rr: AArch64::LD4H,
5506 IsIntr: true);
5507 return;
5508 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
5509 SelectPredicatedLoad(N: Node, NumVecs: 4, Scale: 2, Opc_ri: AArch64::LD4W_IMM, Opc_rr: AArch64::LD4W,
5510 IsIntr: true);
5511 return;
5512 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
5513 SelectPredicatedLoad(N: Node, NumVecs: 4, Scale: 3, Opc_ri: AArch64::LD4D_IMM, Opc_rr: AArch64::LD4D,
5514 IsIntr: true);
5515 return;
5516 }
5517 break;
5518 }
5519 case Intrinsic::aarch64_sme_read_hor_vg2: {
5520 if (VT == MVT::nxv16i8) {
5521 SelectMultiVectorMove<14, 2>(N: Node, NumVecs: 2, BaseReg: AArch64::ZAB0,
5522 Op: AArch64::MOVA_2ZMXI_H_B);
5523 return;
5524 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
5525 VT == MVT::nxv8bf16) {
5526 SelectMultiVectorMove<6, 2>(N: Node, NumVecs: 2, BaseReg: AArch64::ZAH0,
5527 Op: AArch64::MOVA_2ZMXI_H_H);
5528 return;
5529 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
5530 SelectMultiVectorMove<2, 2>(N: Node, NumVecs: 2, BaseReg: AArch64::ZAS0,
5531 Op: AArch64::MOVA_2ZMXI_H_S);
5532 return;
5533 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
5534 SelectMultiVectorMove<0, 2>(N: Node, NumVecs: 2, BaseReg: AArch64::ZAD0,
5535 Op: AArch64::MOVA_2ZMXI_H_D);
5536 return;
5537 }
5538 break;
5539 }
5540 case Intrinsic::aarch64_sme_read_ver_vg2: {
5541 if (VT == MVT::nxv16i8) {
5542 SelectMultiVectorMove<14, 2>(N: Node, NumVecs: 2, BaseReg: AArch64::ZAB0,
5543 Op: AArch64::MOVA_2ZMXI_V_B);
5544 return;
5545 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
5546 VT == MVT::nxv8bf16) {
5547 SelectMultiVectorMove<6, 2>(N: Node, NumVecs: 2, BaseReg: AArch64::ZAH0,
5548 Op: AArch64::MOVA_2ZMXI_V_H);
5549 return;
5550 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
5551 SelectMultiVectorMove<2, 2>(N: Node, NumVecs: 2, BaseReg: AArch64::ZAS0,
5552 Op: AArch64::MOVA_2ZMXI_V_S);
5553 return;
5554 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
5555 SelectMultiVectorMove<0, 2>(N: Node, NumVecs: 2, BaseReg: AArch64::ZAD0,
5556 Op: AArch64::MOVA_2ZMXI_V_D);
5557 return;
5558 }
5559 break;
5560 }
5561 case Intrinsic::aarch64_sme_read_hor_vg4: {
5562 if (VT == MVT::nxv16i8) {
5563 SelectMultiVectorMove<12, 4>(N: Node, NumVecs: 4, BaseReg: AArch64::ZAB0,
5564 Op: AArch64::MOVA_4ZMXI_H_B);
5565 return;
5566 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
5567 VT == MVT::nxv8bf16) {
5568 SelectMultiVectorMove<4, 4>(N: Node, NumVecs: 4, BaseReg: AArch64::ZAH0,
5569 Op: AArch64::MOVA_4ZMXI_H_H);
5570 return;
5571 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
5572 SelectMultiVectorMove<0, 2>(N: Node, NumVecs: 4, BaseReg: AArch64::ZAS0,
5573 Op: AArch64::MOVA_4ZMXI_H_S);
5574 return;
5575 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
5576 SelectMultiVectorMove<0, 2>(N: Node, NumVecs: 4, BaseReg: AArch64::ZAD0,
5577 Op: AArch64::MOVA_4ZMXI_H_D);
5578 return;
5579 }
5580 break;
5581 }
5582 case Intrinsic::aarch64_sme_read_ver_vg4: {
5583 if (VT == MVT::nxv16i8) {
5584 SelectMultiVectorMove<12, 4>(N: Node, NumVecs: 4, BaseReg: AArch64::ZAB0,
5585 Op: AArch64::MOVA_4ZMXI_V_B);
5586 return;
5587 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
5588 VT == MVT::nxv8bf16) {
5589 SelectMultiVectorMove<4, 4>(N: Node, NumVecs: 4, BaseReg: AArch64::ZAH0,
5590 Op: AArch64::MOVA_4ZMXI_V_H);
5591 return;
5592 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
5593 SelectMultiVectorMove<0, 4>(N: Node, NumVecs: 4, BaseReg: AArch64::ZAS0,
5594 Op: AArch64::MOVA_4ZMXI_V_S);
5595 return;
5596 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
5597 SelectMultiVectorMove<0, 4>(N: Node, NumVecs: 4, BaseReg: AArch64::ZAD0,
5598 Op: AArch64::MOVA_4ZMXI_V_D);
5599 return;
5600 }
5601 break;
5602 }
5603 case Intrinsic::aarch64_sme_read_vg1x2: {
5604 SelectMultiVectorMove<7, 1>(N: Node, NumVecs: 2, BaseReg: AArch64::ZA,
5605 Op: AArch64::MOVA_VG2_2ZMXI);
5606 return;
5607 }
5608 case Intrinsic::aarch64_sme_read_vg1x4: {
5609 SelectMultiVectorMove<7, 1>(N: Node, NumVecs: 4, BaseReg: AArch64::ZA,
5610 Op: AArch64::MOVA_VG4_4ZMXI);
5611 return;
5612 }
5613 case Intrinsic::aarch64_sme_readz_horiz_x2: {
5614 if (VT == MVT::nxv16i8) {
5615 SelectMultiVectorMoveZ(N: Node, NumVecs: 2, Op: AArch64::MOVAZ_2ZMI_H_B_PSEUDO, MaxIdx: 14, Scale: 2);
5616 return;
5617 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
5618 VT == MVT::nxv8bf16) {
5619 SelectMultiVectorMoveZ(N: Node, NumVecs: 2, Op: AArch64::MOVAZ_2ZMI_H_H_PSEUDO, MaxIdx: 6, Scale: 2);
5620 return;
5621 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
5622 SelectMultiVectorMoveZ(N: Node, NumVecs: 2, Op: AArch64::MOVAZ_2ZMI_H_S_PSEUDO, MaxIdx: 2, Scale: 2);
5623 return;
5624 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
5625 SelectMultiVectorMoveZ(N: Node, NumVecs: 2, Op: AArch64::MOVAZ_2ZMI_H_D_PSEUDO, MaxIdx: 0, Scale: 2);
5626 return;
5627 }
5628 break;
5629 }
5630 case Intrinsic::aarch64_sme_readz_vert_x2: {
5631 if (VT == MVT::nxv16i8) {
5632 SelectMultiVectorMoveZ(N: Node, NumVecs: 2, Op: AArch64::MOVAZ_2ZMI_V_B_PSEUDO, MaxIdx: 14, Scale: 2);
5633 return;
5634 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
5635 VT == MVT::nxv8bf16) {
5636 SelectMultiVectorMoveZ(N: Node, NumVecs: 2, Op: AArch64::MOVAZ_2ZMI_V_H_PSEUDO, MaxIdx: 6, Scale: 2);
5637 return;
5638 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
5639 SelectMultiVectorMoveZ(N: Node, NumVecs: 2, Op: AArch64::MOVAZ_2ZMI_V_S_PSEUDO, MaxIdx: 2, Scale: 2);
5640 return;
5641 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
5642 SelectMultiVectorMoveZ(N: Node, NumVecs: 2, Op: AArch64::MOVAZ_2ZMI_V_D_PSEUDO, MaxIdx: 0, Scale: 2);
5643 return;
5644 }
5645 break;
5646 }
5647 case Intrinsic::aarch64_sme_readz_horiz_x4: {
5648 if (VT == MVT::nxv16i8) {
5649 SelectMultiVectorMoveZ(N: Node, NumVecs: 4, Op: AArch64::MOVAZ_4ZMI_H_B_PSEUDO, MaxIdx: 12, Scale: 4);
5650 return;
5651 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
5652 VT == MVT::nxv8bf16) {
5653 SelectMultiVectorMoveZ(N: Node, NumVecs: 4, Op: AArch64::MOVAZ_4ZMI_H_H_PSEUDO, MaxIdx: 4, Scale: 4);
5654 return;
5655 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
5656 SelectMultiVectorMoveZ(N: Node, NumVecs: 4, Op: AArch64::MOVAZ_4ZMI_H_S_PSEUDO, MaxIdx: 0, Scale: 4);
5657 return;
5658 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
5659 SelectMultiVectorMoveZ(N: Node, NumVecs: 4, Op: AArch64::MOVAZ_4ZMI_H_D_PSEUDO, MaxIdx: 0, Scale: 4);
5660 return;
5661 }
5662 break;
5663 }
5664 case Intrinsic::aarch64_sme_readz_vert_x4: {
5665 if (VT == MVT::nxv16i8) {
5666 SelectMultiVectorMoveZ(N: Node, NumVecs: 4, Op: AArch64::MOVAZ_4ZMI_V_B_PSEUDO, MaxIdx: 12, Scale: 4);
5667 return;
5668 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
5669 VT == MVT::nxv8bf16) {
5670 SelectMultiVectorMoveZ(N: Node, NumVecs: 4, Op: AArch64::MOVAZ_4ZMI_V_H_PSEUDO, MaxIdx: 4, Scale: 4);
5671 return;
5672 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
5673 SelectMultiVectorMoveZ(N: Node, NumVecs: 4, Op: AArch64::MOVAZ_4ZMI_V_S_PSEUDO, MaxIdx: 0, Scale: 4);
5674 return;
5675 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
5676 SelectMultiVectorMoveZ(N: Node, NumVecs: 4, Op: AArch64::MOVAZ_4ZMI_V_D_PSEUDO, MaxIdx: 0, Scale: 4);
5677 return;
5678 }
5679 break;
5680 }
5681 case Intrinsic::aarch64_sme_readz_x2: {
5682 SelectMultiVectorMoveZ(N: Node, NumVecs: 2, Op: AArch64::MOVAZ_VG2_2ZMXI_PSEUDO, MaxIdx: 7, Scale: 1,
5683 BaseReg: AArch64::ZA);
5684 return;
5685 }
5686 case Intrinsic::aarch64_sme_readz_x4: {
5687 SelectMultiVectorMoveZ(N: Node, NumVecs: 4, Op: AArch64::MOVAZ_VG4_4ZMXI_PSEUDO, MaxIdx: 7, Scale: 1,
5688 BaseReg: AArch64::ZA);
5689 return;
5690 }
5691 case Intrinsic::swift_async_context_addr: {
5692 SDLoc DL(Node);
5693 SDValue Chain = Node->getOperand(Num: 0);
5694 SDValue CopyFP = CurDAG->getCopyFromReg(Chain, dl: DL, Reg: AArch64::FP, VT: MVT::i64);
5695 SDValue Res = SDValue(
5696 CurDAG->getMachineNode(Opcode: AArch64::SUBXri, dl: DL, VT: MVT::i64, Op1: CopyFP,
5697 Op2: CurDAG->getTargetConstant(Val: 8, DL, VT: MVT::i32),
5698 Op3: CurDAG->getTargetConstant(Val: 0, DL, VT: MVT::i32)),
5699 0);
5700 ReplaceUses(F: SDValue(Node, 0), T: Res);
5701 ReplaceUses(F: SDValue(Node, 1), T: CopyFP.getValue(R: 1));
5702 CurDAG->RemoveDeadNode(N: Node);
5703
5704 auto &MF = CurDAG->getMachineFunction();
5705 MF.getFrameInfo().setFrameAddressIsTaken(true);
5706 MF.getInfo<AArch64FunctionInfo>()->setHasSwiftAsyncContext(true);
5707 return;
5708 }
5709 case Intrinsic::aarch64_sme_luti2_lane_zt_x4: {
5710 if (auto Opc = SelectOpcodeFromVT<SelectTypeKind::AnyType>(
5711 VT: Node->getValueType(ResNo: 0),
5712 Opcodes: {AArch64::LUTI2_4ZTZI_B, AArch64::LUTI2_4ZTZI_H,
5713 AArch64::LUTI2_4ZTZI_S}))
5714 // Second Immediate must be <= 3:
5715 SelectMultiVectorLutiLane(Node, NumOutVecs: 4, Opc, MaxImm: 3);
5716 return;
5717 }
5718 case Intrinsic::aarch64_sme_luti4_lane_zt_x4: {
5719 if (auto Opc = SelectOpcodeFromVT<SelectTypeKind::AnyType>(
5720 VT: Node->getValueType(ResNo: 0),
5721 Opcodes: {0, AArch64::LUTI4_4ZTZI_H, AArch64::LUTI4_4ZTZI_S}))
5722 // Second Immediate must be <= 1:
5723 SelectMultiVectorLutiLane(Node, NumOutVecs: 4, Opc, MaxImm: 1);
5724 return;
5725 }
5726 case Intrinsic::aarch64_sme_luti2_lane_zt_x2: {
5727 if (auto Opc = SelectOpcodeFromVT<SelectTypeKind::AnyType>(
5728 VT: Node->getValueType(ResNo: 0),
5729 Opcodes: {AArch64::LUTI2_2ZTZI_B, AArch64::LUTI2_2ZTZI_H,
5730 AArch64::LUTI2_2ZTZI_S}))
5731 // Second Immediate must be <= 7:
5732 SelectMultiVectorLutiLane(Node, NumOutVecs: 2, Opc, MaxImm: 7);
5733 return;
5734 }
5735 case Intrinsic::aarch64_sme_luti4_lane_zt_x2: {
5736 if (auto Opc = SelectOpcodeFromVT<SelectTypeKind::AnyType>(
5737 VT: Node->getValueType(ResNo: 0),
5738 Opcodes: {AArch64::LUTI4_2ZTZI_B, AArch64::LUTI4_2ZTZI_H,
5739 AArch64::LUTI4_2ZTZI_S}))
5740 // Second Immediate must be <= 3:
5741 SelectMultiVectorLutiLane(Node, NumOutVecs: 2, Opc, MaxImm: 3);
5742 return;
5743 }
5744 case Intrinsic::aarch64_sme_luti4_zt_x4: {
5745 SelectMultiVectorLuti(Node, NumOutVecs: 4, Opc: AArch64::LUTI4_4ZZT2Z);
5746 return;
5747 }
5748 case Intrinsic::aarch64_sve_fp8_cvtl1_x2:
5749 if (auto Opc = SelectOpcodeFromVT<SelectTypeKind::FP>(
5750 VT: Node->getValueType(ResNo: 0),
5751 Opcodes: {AArch64::BF1CVTL_2ZZ_BtoH, AArch64::F1CVTL_2ZZ_BtoH}))
5752 SelectCVTIntrinsicFP8(N: Node, NumVecs: 2, Opcode: Opc);
5753 return;
5754 case Intrinsic::aarch64_sve_fp8_cvtl2_x2:
5755 if (auto Opc = SelectOpcodeFromVT<SelectTypeKind::FP>(
5756 VT: Node->getValueType(ResNo: 0),
5757 Opcodes: {AArch64::BF2CVTL_2ZZ_BtoH, AArch64::F2CVTL_2ZZ_BtoH}))
5758 SelectCVTIntrinsicFP8(N: Node, NumVecs: 2, Opcode: Opc);
5759 return;
5760 case Intrinsic::aarch64_sve_fp8_cvt1_x2:
5761 if (auto Opc = SelectOpcodeFromVT<SelectTypeKind::FP>(
5762 VT: Node->getValueType(ResNo: 0),
5763 Opcodes: {AArch64::BF1CVT_2ZZ_BtoH, AArch64::F1CVT_2ZZ_BtoH}))
5764 SelectCVTIntrinsicFP8(N: Node, NumVecs: 2, Opcode: Opc);
5765 return;
5766 case Intrinsic::aarch64_sve_fp8_cvt2_x2:
5767 if (auto Opc = SelectOpcodeFromVT<SelectTypeKind::FP>(
5768 VT: Node->getValueType(ResNo: 0),
5769 Opcodes: {AArch64::BF2CVT_2ZZ_BtoH, AArch64::F2CVT_2ZZ_BtoH}))
5770 SelectCVTIntrinsicFP8(N: Node, NumVecs: 2, Opcode: Opc);
5771 return;
5772 }
5773 } break;
5774 case ISD::INTRINSIC_WO_CHAIN: {
5775 unsigned IntNo = Node->getConstantOperandVal(Num: 0);
5776 switch (IntNo) {
5777 default:
5778 break;
5779 case Intrinsic::aarch64_tagp:
5780 SelectTagP(N: Node);
5781 return;
5782
5783 case Intrinsic::ptrauth_auth:
5784 SelectPtrauthAuth(N: Node);
5785 return;
5786
5787 case Intrinsic::ptrauth_resign:
5788 SelectPtrauthResign(N: Node);
5789 return;
5790
5791 case Intrinsic::aarch64_neon_tbl2:
5792 SelectTable(N: Node, NumVecs: 2,
5793 Opc: VT == MVT::v8i8 ? AArch64::TBLv8i8Two : AArch64::TBLv16i8Two,
5794 isExt: false);
5795 return;
5796 case Intrinsic::aarch64_neon_tbl3:
5797 SelectTable(N: Node, NumVecs: 3, Opc: VT == MVT::v8i8 ? AArch64::TBLv8i8Three
5798 : AArch64::TBLv16i8Three,
5799 isExt: false);
5800 return;
5801 case Intrinsic::aarch64_neon_tbl4:
5802 SelectTable(N: Node, NumVecs: 4, Opc: VT == MVT::v8i8 ? AArch64::TBLv8i8Four
5803 : AArch64::TBLv16i8Four,
5804 isExt: false);
5805 return;
5806 case Intrinsic::aarch64_neon_tbx2:
5807 SelectTable(N: Node, NumVecs: 2,
5808 Opc: VT == MVT::v8i8 ? AArch64::TBXv8i8Two : AArch64::TBXv16i8Two,
5809 isExt: true);
5810 return;
5811 case Intrinsic::aarch64_neon_tbx3:
5812 SelectTable(N: Node, NumVecs: 3, Opc: VT == MVT::v8i8 ? AArch64::TBXv8i8Three
5813 : AArch64::TBXv16i8Three,
5814 isExt: true);
5815 return;
5816 case Intrinsic::aarch64_neon_tbx4:
5817 SelectTable(N: Node, NumVecs: 4, Opc: VT == MVT::v8i8 ? AArch64::TBXv8i8Four
5818 : AArch64::TBXv16i8Four,
5819 isExt: true);
5820 return;
5821 case Intrinsic::aarch64_sve_srshl_single_x2:
5822 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
5823 VT: Node->getValueType(ResNo: 0),
5824 Opcodes: {AArch64::SRSHL_VG2_2ZZ_B, AArch64::SRSHL_VG2_2ZZ_H,
5825 AArch64::SRSHL_VG2_2ZZ_S, AArch64::SRSHL_VG2_2ZZ_D}))
5826 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: false, Opcode: Op);
5827 return;
5828 case Intrinsic::aarch64_sve_srshl_single_x4:
5829 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
5830 VT: Node->getValueType(ResNo: 0),
5831 Opcodes: {AArch64::SRSHL_VG4_4ZZ_B, AArch64::SRSHL_VG4_4ZZ_H,
5832 AArch64::SRSHL_VG4_4ZZ_S, AArch64::SRSHL_VG4_4ZZ_D}))
5833 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: false, Opcode: Op);
5834 return;
5835 case Intrinsic::aarch64_sve_urshl_single_x2:
5836 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
5837 VT: Node->getValueType(ResNo: 0),
5838 Opcodes: {AArch64::URSHL_VG2_2ZZ_B, AArch64::URSHL_VG2_2ZZ_H,
5839 AArch64::URSHL_VG2_2ZZ_S, AArch64::URSHL_VG2_2ZZ_D}))
5840 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: false, Opcode: Op);
5841 return;
5842 case Intrinsic::aarch64_sve_urshl_single_x4:
5843 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
5844 VT: Node->getValueType(ResNo: 0),
5845 Opcodes: {AArch64::URSHL_VG4_4ZZ_B, AArch64::URSHL_VG4_4ZZ_H,
5846 AArch64::URSHL_VG4_4ZZ_S, AArch64::URSHL_VG4_4ZZ_D}))
5847 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: false, Opcode: Op);
5848 return;
5849 case Intrinsic::aarch64_sve_srshl_x2:
5850 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
5851 VT: Node->getValueType(ResNo: 0),
5852 Opcodes: {AArch64::SRSHL_VG2_2Z2Z_B, AArch64::SRSHL_VG2_2Z2Z_H,
5853 AArch64::SRSHL_VG2_2Z2Z_S, AArch64::SRSHL_VG2_2Z2Z_D}))
5854 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: true, Opcode: Op);
5855 return;
5856 case Intrinsic::aarch64_sve_srshl_x4:
5857 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
5858 VT: Node->getValueType(ResNo: 0),
5859 Opcodes: {AArch64::SRSHL_VG4_4Z4Z_B, AArch64::SRSHL_VG4_4Z4Z_H,
5860 AArch64::SRSHL_VG4_4Z4Z_S, AArch64::SRSHL_VG4_4Z4Z_D}))
5861 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: true, Opcode: Op);
5862 return;
5863 case Intrinsic::aarch64_sve_urshl_x2:
5864 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
5865 VT: Node->getValueType(ResNo: 0),
5866 Opcodes: {AArch64::URSHL_VG2_2Z2Z_B, AArch64::URSHL_VG2_2Z2Z_H,
5867 AArch64::URSHL_VG2_2Z2Z_S, AArch64::URSHL_VG2_2Z2Z_D}))
5868 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: true, Opcode: Op);
5869 return;
5870 case Intrinsic::aarch64_sve_urshl_x4:
5871 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
5872 VT: Node->getValueType(ResNo: 0),
5873 Opcodes: {AArch64::URSHL_VG4_4Z4Z_B, AArch64::URSHL_VG4_4Z4Z_H,
5874 AArch64::URSHL_VG4_4Z4Z_S, AArch64::URSHL_VG4_4Z4Z_D}))
5875 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: true, Opcode: Op);
5876 return;
5877 case Intrinsic::aarch64_sve_sqdmulh_single_vgx2:
5878 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
5879 VT: Node->getValueType(ResNo: 0),
5880 Opcodes: {AArch64::SQDMULH_VG2_2ZZ_B, AArch64::SQDMULH_VG2_2ZZ_H,
5881 AArch64::SQDMULH_VG2_2ZZ_S, AArch64::SQDMULH_VG2_2ZZ_D}))
5882 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: false, Opcode: Op);
5883 return;
5884 case Intrinsic::aarch64_sve_sqdmulh_single_vgx4:
5885 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
5886 VT: Node->getValueType(ResNo: 0),
5887 Opcodes: {AArch64::SQDMULH_VG4_4ZZ_B, AArch64::SQDMULH_VG4_4ZZ_H,
5888 AArch64::SQDMULH_VG4_4ZZ_S, AArch64::SQDMULH_VG4_4ZZ_D}))
5889 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: false, Opcode: Op);
5890 return;
5891 case Intrinsic::aarch64_sve_sqdmulh_vgx2:
5892 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
5893 VT: Node->getValueType(ResNo: 0),
5894 Opcodes: {AArch64::SQDMULH_VG2_2Z2Z_B, AArch64::SQDMULH_VG2_2Z2Z_H,
5895 AArch64::SQDMULH_VG2_2Z2Z_S, AArch64::SQDMULH_VG2_2Z2Z_D}))
5896 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: true, Opcode: Op);
5897 return;
5898 case Intrinsic::aarch64_sve_sqdmulh_vgx4:
5899 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
5900 VT: Node->getValueType(ResNo: 0),
5901 Opcodes: {AArch64::SQDMULH_VG4_4Z4Z_B, AArch64::SQDMULH_VG4_4Z4Z_H,
5902 AArch64::SQDMULH_VG4_4Z4Z_S, AArch64::SQDMULH_VG4_4Z4Z_D}))
5903 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: true, Opcode: Op);
5904 return;
5905 case Intrinsic::aarch64_sme_fp8_scale_single_x2:
5906 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
5907 VT: Node->getValueType(ResNo: 0),
5908 Opcodes: {0, AArch64::FSCALE_2ZZ_H, AArch64::FSCALE_2ZZ_S,
5909 AArch64::FSCALE_2ZZ_D}))
5910 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: false, Opcode: Op);
5911 return;
5912 case Intrinsic::aarch64_sme_fp8_scale_single_x4:
5913 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
5914 VT: Node->getValueType(ResNo: 0),
5915 Opcodes: {0, AArch64::FSCALE_4ZZ_H, AArch64::FSCALE_4ZZ_S,
5916 AArch64::FSCALE_4ZZ_D}))
5917 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: false, Opcode: Op);
5918 return;
5919 case Intrinsic::aarch64_sme_fp8_scale_x2:
5920 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
5921 VT: Node->getValueType(ResNo: 0),
5922 Opcodes: {0, AArch64::FSCALE_2Z2Z_H, AArch64::FSCALE_2Z2Z_S,
5923 AArch64::FSCALE_2Z2Z_D}))
5924 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: true, Opcode: Op);
5925 return;
5926 case Intrinsic::aarch64_sme_fp8_scale_x4:
5927 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
5928 VT: Node->getValueType(ResNo: 0),
5929 Opcodes: {0, AArch64::FSCALE_4Z4Z_H, AArch64::FSCALE_4Z4Z_S,
5930 AArch64::FSCALE_4Z4Z_D}))
5931 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: true, Opcode: Op);
5932 return;
5933 case Intrinsic::aarch64_sve_whilege_x2:
5934 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int1>(
5935 VT: Node->getValueType(ResNo: 0),
5936 Opcodes: {AArch64::WHILEGE_2PXX_B, AArch64::WHILEGE_2PXX_H,
5937 AArch64::WHILEGE_2PXX_S, AArch64::WHILEGE_2PXX_D}))
5938 SelectWhilePair(N: Node, Opc: Op);
5939 return;
5940 case Intrinsic::aarch64_sve_whilegt_x2:
5941 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int1>(
5942 VT: Node->getValueType(ResNo: 0),
5943 Opcodes: {AArch64::WHILEGT_2PXX_B, AArch64::WHILEGT_2PXX_H,
5944 AArch64::WHILEGT_2PXX_S, AArch64::WHILEGT_2PXX_D}))
5945 SelectWhilePair(N: Node, Opc: Op);
5946 return;
5947 case Intrinsic::aarch64_sve_whilehi_x2:
5948 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int1>(
5949 VT: Node->getValueType(ResNo: 0),
5950 Opcodes: {AArch64::WHILEHI_2PXX_B, AArch64::WHILEHI_2PXX_H,
5951 AArch64::WHILEHI_2PXX_S, AArch64::WHILEHI_2PXX_D}))
5952 SelectWhilePair(N: Node, Opc: Op);
5953 return;
5954 case Intrinsic::aarch64_sve_whilehs_x2:
5955 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int1>(
5956 VT: Node->getValueType(ResNo: 0),
5957 Opcodes: {AArch64::WHILEHS_2PXX_B, AArch64::WHILEHS_2PXX_H,
5958 AArch64::WHILEHS_2PXX_S, AArch64::WHILEHS_2PXX_D}))
5959 SelectWhilePair(N: Node, Opc: Op);
5960 return;
5961 case Intrinsic::aarch64_sve_whilele_x2:
5962 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int1>(
5963 VT: Node->getValueType(ResNo: 0),
5964 Opcodes: {AArch64::WHILELE_2PXX_B, AArch64::WHILELE_2PXX_H,
5965 AArch64::WHILELE_2PXX_S, AArch64::WHILELE_2PXX_D}))
5966 SelectWhilePair(N: Node, Opc: Op);
5967 return;
5968 case Intrinsic::aarch64_sve_whilelo_x2:
5969 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int1>(
5970 VT: Node->getValueType(ResNo: 0),
5971 Opcodes: {AArch64::WHILELO_2PXX_B, AArch64::WHILELO_2PXX_H,
5972 AArch64::WHILELO_2PXX_S, AArch64::WHILELO_2PXX_D}))
5973 SelectWhilePair(N: Node, Opc: Op);
5974 return;
5975 case Intrinsic::aarch64_sve_whilels_x2:
5976 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int1>(
5977 VT: Node->getValueType(ResNo: 0),
5978 Opcodes: {AArch64::WHILELS_2PXX_B, AArch64::WHILELS_2PXX_H,
5979 AArch64::WHILELS_2PXX_S, AArch64::WHILELS_2PXX_D}))
5980 SelectWhilePair(N: Node, Opc: Op);
5981 return;
5982 case Intrinsic::aarch64_sve_whilelt_x2:
5983 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int1>(
5984 VT: Node->getValueType(ResNo: 0),
5985 Opcodes: {AArch64::WHILELT_2PXX_B, AArch64::WHILELT_2PXX_H,
5986 AArch64::WHILELT_2PXX_S, AArch64::WHILELT_2PXX_D}))
5987 SelectWhilePair(N: Node, Opc: Op);
5988 return;
5989 case Intrinsic::aarch64_sve_smax_single_x2:
5990 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
5991 VT: Node->getValueType(ResNo: 0),
5992 Opcodes: {AArch64::SMAX_VG2_2ZZ_B, AArch64::SMAX_VG2_2ZZ_H,
5993 AArch64::SMAX_VG2_2ZZ_S, AArch64::SMAX_VG2_2ZZ_D}))
5994 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: false, Opcode: Op);
5995 return;
5996 case Intrinsic::aarch64_sve_umax_single_x2:
5997 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
5998 VT: Node->getValueType(ResNo: 0),
5999 Opcodes: {AArch64::UMAX_VG2_2ZZ_B, AArch64::UMAX_VG2_2ZZ_H,
6000 AArch64::UMAX_VG2_2ZZ_S, AArch64::UMAX_VG2_2ZZ_D}))
6001 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: false, Opcode: Op);
6002 return;
6003 case Intrinsic::aarch64_sve_fmax_single_x2:
6004 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6005 VT: Node->getValueType(ResNo: 0),
6006 Opcodes: {AArch64::BFMAX_VG2_2ZZ_H, AArch64::FMAX_VG2_2ZZ_H,
6007 AArch64::FMAX_VG2_2ZZ_S, AArch64::FMAX_VG2_2ZZ_D}))
6008 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: false, Opcode: Op);
6009 return;
6010 case Intrinsic::aarch64_sve_smax_single_x4:
6011 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6012 VT: Node->getValueType(ResNo: 0),
6013 Opcodes: {AArch64::SMAX_VG4_4ZZ_B, AArch64::SMAX_VG4_4ZZ_H,
6014 AArch64::SMAX_VG4_4ZZ_S, AArch64::SMAX_VG4_4ZZ_D}))
6015 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: false, Opcode: Op);
6016 return;
6017 case Intrinsic::aarch64_sve_umax_single_x4:
6018 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6019 VT: Node->getValueType(ResNo: 0),
6020 Opcodes: {AArch64::UMAX_VG4_4ZZ_B, AArch64::UMAX_VG4_4ZZ_H,
6021 AArch64::UMAX_VG4_4ZZ_S, AArch64::UMAX_VG4_4ZZ_D}))
6022 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: false, Opcode: Op);
6023 return;
6024 case Intrinsic::aarch64_sve_fmax_single_x4:
6025 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6026 VT: Node->getValueType(ResNo: 0),
6027 Opcodes: {AArch64::BFMAX_VG4_4ZZ_H, AArch64::FMAX_VG4_4ZZ_H,
6028 AArch64::FMAX_VG4_4ZZ_S, AArch64::FMAX_VG4_4ZZ_D}))
6029 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: false, Opcode: Op);
6030 return;
6031 case Intrinsic::aarch64_sve_smin_single_x2:
6032 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6033 VT: Node->getValueType(ResNo: 0),
6034 Opcodes: {AArch64::SMIN_VG2_2ZZ_B, AArch64::SMIN_VG2_2ZZ_H,
6035 AArch64::SMIN_VG2_2ZZ_S, AArch64::SMIN_VG2_2ZZ_D}))
6036 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: false, Opcode: Op);
6037 return;
6038 case Intrinsic::aarch64_sve_umin_single_x2:
6039 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6040 VT: Node->getValueType(ResNo: 0),
6041 Opcodes: {AArch64::UMIN_VG2_2ZZ_B, AArch64::UMIN_VG2_2ZZ_H,
6042 AArch64::UMIN_VG2_2ZZ_S, AArch64::UMIN_VG2_2ZZ_D}))
6043 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: false, Opcode: Op);
6044 return;
6045 case Intrinsic::aarch64_sve_fmin_single_x2:
6046 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6047 VT: Node->getValueType(ResNo: 0),
6048 Opcodes: {AArch64::BFMIN_VG2_2ZZ_H, AArch64::FMIN_VG2_2ZZ_H,
6049 AArch64::FMIN_VG2_2ZZ_S, AArch64::FMIN_VG2_2ZZ_D}))
6050 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: false, Opcode: Op);
6051 return;
6052 case Intrinsic::aarch64_sve_smin_single_x4:
6053 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6054 VT: Node->getValueType(ResNo: 0),
6055 Opcodes: {AArch64::SMIN_VG4_4ZZ_B, AArch64::SMIN_VG4_4ZZ_H,
6056 AArch64::SMIN_VG4_4ZZ_S, AArch64::SMIN_VG4_4ZZ_D}))
6057 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: false, Opcode: Op);
6058 return;
6059 case Intrinsic::aarch64_sve_umin_single_x4:
6060 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6061 VT: Node->getValueType(ResNo: 0),
6062 Opcodes: {AArch64::UMIN_VG4_4ZZ_B, AArch64::UMIN_VG4_4ZZ_H,
6063 AArch64::UMIN_VG4_4ZZ_S, AArch64::UMIN_VG4_4ZZ_D}))
6064 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: false, Opcode: Op);
6065 return;
6066 case Intrinsic::aarch64_sve_fmin_single_x4:
6067 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6068 VT: Node->getValueType(ResNo: 0),
6069 Opcodes: {AArch64::BFMIN_VG4_4ZZ_H, AArch64::FMIN_VG4_4ZZ_H,
6070 AArch64::FMIN_VG4_4ZZ_S, AArch64::FMIN_VG4_4ZZ_D}))
6071 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: false, Opcode: Op);
6072 return;
6073 case Intrinsic::aarch64_sve_smax_x2:
6074 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6075 VT: Node->getValueType(ResNo: 0),
6076 Opcodes: {AArch64::SMAX_VG2_2Z2Z_B, AArch64::SMAX_VG2_2Z2Z_H,
6077 AArch64::SMAX_VG2_2Z2Z_S, AArch64::SMAX_VG2_2Z2Z_D}))
6078 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: true, Opcode: Op);
6079 return;
6080 case Intrinsic::aarch64_sve_umax_x2:
6081 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6082 VT: Node->getValueType(ResNo: 0),
6083 Opcodes: {AArch64::UMAX_VG2_2Z2Z_B, AArch64::UMAX_VG2_2Z2Z_H,
6084 AArch64::UMAX_VG2_2Z2Z_S, AArch64::UMAX_VG2_2Z2Z_D}))
6085 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: true, Opcode: Op);
6086 return;
6087 case Intrinsic::aarch64_sve_fmax_x2:
6088 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6089 VT: Node->getValueType(ResNo: 0),
6090 Opcodes: {AArch64::BFMAX_VG2_2Z2Z_H, AArch64::FMAX_VG2_2Z2Z_H,
6091 AArch64::FMAX_VG2_2Z2Z_S, AArch64::FMAX_VG2_2Z2Z_D}))
6092 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: true, Opcode: Op);
6093 return;
6094 case Intrinsic::aarch64_sve_smax_x4:
6095 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6096 VT: Node->getValueType(ResNo: 0),
6097 Opcodes: {AArch64::SMAX_VG4_4Z4Z_B, AArch64::SMAX_VG4_4Z4Z_H,
6098 AArch64::SMAX_VG4_4Z4Z_S, AArch64::SMAX_VG4_4Z4Z_D}))
6099 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: true, Opcode: Op);
6100 return;
6101 case Intrinsic::aarch64_sve_umax_x4:
6102 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6103 VT: Node->getValueType(ResNo: 0),
6104 Opcodes: {AArch64::UMAX_VG4_4Z4Z_B, AArch64::UMAX_VG4_4Z4Z_H,
6105 AArch64::UMAX_VG4_4Z4Z_S, AArch64::UMAX_VG4_4Z4Z_D}))
6106 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: true, Opcode: Op);
6107 return;
6108 case Intrinsic::aarch64_sve_fmax_x4:
6109 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6110 VT: Node->getValueType(ResNo: 0),
6111 Opcodes: {AArch64::BFMAX_VG4_4Z2Z_H, AArch64::FMAX_VG4_4Z4Z_H,
6112 AArch64::FMAX_VG4_4Z4Z_S, AArch64::FMAX_VG4_4Z4Z_D}))
6113 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: true, Opcode: Op);
6114 return;
6115 case Intrinsic::aarch64_sme_famax_x2:
6116 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6117 VT: Node->getValueType(ResNo: 0),
6118 Opcodes: {0, AArch64::FAMAX_2Z2Z_H, AArch64::FAMAX_2Z2Z_S,
6119 AArch64::FAMAX_2Z2Z_D}))
6120 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: true, Opcode: Op);
6121 return;
6122 case Intrinsic::aarch64_sme_famax_x4:
6123 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6124 VT: Node->getValueType(ResNo: 0),
6125 Opcodes: {0, AArch64::FAMAX_4Z4Z_H, AArch64::FAMAX_4Z4Z_S,
6126 AArch64::FAMAX_4Z4Z_D}))
6127 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: true, Opcode: Op);
6128 return;
6129 case Intrinsic::aarch64_sme_famin_x2:
6130 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6131 VT: Node->getValueType(ResNo: 0),
6132 Opcodes: {0, AArch64::FAMIN_2Z2Z_H, AArch64::FAMIN_2Z2Z_S,
6133 AArch64::FAMIN_2Z2Z_D}))
6134 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: true, Opcode: Op);
6135 return;
6136 case Intrinsic::aarch64_sme_famin_x4:
6137 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6138 VT: Node->getValueType(ResNo: 0),
6139 Opcodes: {0, AArch64::FAMIN_4Z4Z_H, AArch64::FAMIN_4Z4Z_S,
6140 AArch64::FAMIN_4Z4Z_D}))
6141 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: true, Opcode: Op);
6142 return;
6143 case Intrinsic::aarch64_sve_smin_x2:
6144 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6145 VT: Node->getValueType(ResNo: 0),
6146 Opcodes: {AArch64::SMIN_VG2_2Z2Z_B, AArch64::SMIN_VG2_2Z2Z_H,
6147 AArch64::SMIN_VG2_2Z2Z_S, AArch64::SMIN_VG2_2Z2Z_D}))
6148 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: true, Opcode: Op);
6149 return;
6150 case Intrinsic::aarch64_sve_umin_x2:
6151 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6152 VT: Node->getValueType(ResNo: 0),
6153 Opcodes: {AArch64::UMIN_VG2_2Z2Z_B, AArch64::UMIN_VG2_2Z2Z_H,
6154 AArch64::UMIN_VG2_2Z2Z_S, AArch64::UMIN_VG2_2Z2Z_D}))
6155 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: true, Opcode: Op);
6156 return;
6157 case Intrinsic::aarch64_sve_fmin_x2:
6158 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6159 VT: Node->getValueType(ResNo: 0),
6160 Opcodes: {AArch64::BFMIN_VG2_2Z2Z_H, AArch64::FMIN_VG2_2Z2Z_H,
6161 AArch64::FMIN_VG2_2Z2Z_S, AArch64::FMIN_VG2_2Z2Z_D}))
6162 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: true, Opcode: Op);
6163 return;
6164 case Intrinsic::aarch64_sve_smin_x4:
6165 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6166 VT: Node->getValueType(ResNo: 0),
6167 Opcodes: {AArch64::SMIN_VG4_4Z4Z_B, AArch64::SMIN_VG4_4Z4Z_H,
6168 AArch64::SMIN_VG4_4Z4Z_S, AArch64::SMIN_VG4_4Z4Z_D}))
6169 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: true, Opcode: Op);
6170 return;
6171 case Intrinsic::aarch64_sve_umin_x4:
6172 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6173 VT: Node->getValueType(ResNo: 0),
6174 Opcodes: {AArch64::UMIN_VG4_4Z4Z_B, AArch64::UMIN_VG4_4Z4Z_H,
6175 AArch64::UMIN_VG4_4Z4Z_S, AArch64::UMIN_VG4_4Z4Z_D}))
6176 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: true, Opcode: Op);
6177 return;
6178 case Intrinsic::aarch64_sve_fmin_x4:
6179 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6180 VT: Node->getValueType(ResNo: 0),
6181 Opcodes: {AArch64::BFMIN_VG4_4Z2Z_H, AArch64::FMIN_VG4_4Z4Z_H,
6182 AArch64::FMIN_VG4_4Z4Z_S, AArch64::FMIN_VG4_4Z4Z_D}))
6183 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: true, Opcode: Op);
6184 return;
6185 case Intrinsic::aarch64_sve_fmaxnm_single_x2 :
6186 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6187 VT: Node->getValueType(ResNo: 0),
6188 Opcodes: {AArch64::BFMAXNM_VG2_2ZZ_H, AArch64::FMAXNM_VG2_2ZZ_H,
6189 AArch64::FMAXNM_VG2_2ZZ_S, AArch64::FMAXNM_VG2_2ZZ_D}))
6190 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: false, Opcode: Op);
6191 return;
6192 case Intrinsic::aarch64_sve_fmaxnm_single_x4 :
6193 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6194 VT: Node->getValueType(ResNo: 0),
6195 Opcodes: {AArch64::BFMAXNM_VG4_4ZZ_H, AArch64::FMAXNM_VG4_4ZZ_H,
6196 AArch64::FMAXNM_VG4_4ZZ_S, AArch64::FMAXNM_VG4_4ZZ_D}))
6197 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: false, Opcode: Op);
6198 return;
6199 case Intrinsic::aarch64_sve_fminnm_single_x2:
6200 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6201 VT: Node->getValueType(ResNo: 0),
6202 Opcodes: {AArch64::BFMINNM_VG2_2ZZ_H, AArch64::FMINNM_VG2_2ZZ_H,
6203 AArch64::FMINNM_VG2_2ZZ_S, AArch64::FMINNM_VG2_2ZZ_D}))
6204 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: false, Opcode: Op);
6205 return;
6206 case Intrinsic::aarch64_sve_fminnm_single_x4:
6207 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6208 VT: Node->getValueType(ResNo: 0),
6209 Opcodes: {AArch64::BFMINNM_VG4_4ZZ_H, AArch64::FMINNM_VG4_4ZZ_H,
6210 AArch64::FMINNM_VG4_4ZZ_S, AArch64::FMINNM_VG4_4ZZ_D}))
6211 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: false, Opcode: Op);
6212 return;
6213 case Intrinsic::aarch64_sve_fmaxnm_x2:
6214 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6215 VT: Node->getValueType(ResNo: 0),
6216 Opcodes: {AArch64::BFMAXNM_VG2_2Z2Z_H, AArch64::FMAXNM_VG2_2Z2Z_H,
6217 AArch64::FMAXNM_VG2_2Z2Z_S, AArch64::FMAXNM_VG2_2Z2Z_D}))
6218 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: true, Opcode: Op);
6219 return;
6220 case Intrinsic::aarch64_sve_fmaxnm_x4:
6221 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6222 VT: Node->getValueType(ResNo: 0),
6223 Opcodes: {AArch64::BFMAXNM_VG4_4Z2Z_H, AArch64::FMAXNM_VG4_4Z4Z_H,
6224 AArch64::FMAXNM_VG4_4Z4Z_S, AArch64::FMAXNM_VG4_4Z4Z_D}))
6225 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: true, Opcode: Op);
6226 return;
6227 case Intrinsic::aarch64_sve_fminnm_x2:
6228 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6229 VT: Node->getValueType(ResNo: 0),
6230 Opcodes: {AArch64::BFMINNM_VG2_2Z2Z_H, AArch64::FMINNM_VG2_2Z2Z_H,
6231 AArch64::FMINNM_VG2_2Z2Z_S, AArch64::FMINNM_VG2_2Z2Z_D}))
6232 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: true, Opcode: Op);
6233 return;
6234 case Intrinsic::aarch64_sve_fminnm_x4:
6235 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6236 VT: Node->getValueType(ResNo: 0),
6237 Opcodes: {AArch64::BFMINNM_VG4_4Z2Z_H, AArch64::FMINNM_VG4_4Z4Z_H,
6238 AArch64::FMINNM_VG4_4Z4Z_S, AArch64::FMINNM_VG4_4Z4Z_D}))
6239 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: true, Opcode: Op);
6240 return;
6241 case Intrinsic::aarch64_sve_fcvtzs_x2:
6242 SelectCVTIntrinsic(N: Node, NumVecs: 2, Opcode: AArch64::FCVTZS_2Z2Z_StoS);
6243 return;
6244 case Intrinsic::aarch64_sve_scvtf_x2:
6245 SelectCVTIntrinsic(N: Node, NumVecs: 2, Opcode: AArch64::SCVTF_2Z2Z_StoS);
6246 return;
6247 case Intrinsic::aarch64_sve_fcvtzu_x2:
6248 SelectCVTIntrinsic(N: Node, NumVecs: 2, Opcode: AArch64::FCVTZU_2Z2Z_StoS);
6249 return;
6250 case Intrinsic::aarch64_sve_ucvtf_x2:
6251 SelectCVTIntrinsic(N: Node, NumVecs: 2, Opcode: AArch64::UCVTF_2Z2Z_StoS);
6252 return;
6253 case Intrinsic::aarch64_sve_fcvtzs_x4:
6254 SelectCVTIntrinsic(N: Node, NumVecs: 4, Opcode: AArch64::FCVTZS_4Z4Z_StoS);
6255 return;
6256 case Intrinsic::aarch64_sve_scvtf_x4:
6257 SelectCVTIntrinsic(N: Node, NumVecs: 4, Opcode: AArch64::SCVTF_4Z4Z_StoS);
6258 return;
6259 case Intrinsic::aarch64_sve_fcvtzu_x4:
6260 SelectCVTIntrinsic(N: Node, NumVecs: 4, Opcode: AArch64::FCVTZU_4Z4Z_StoS);
6261 return;
6262 case Intrinsic::aarch64_sve_ucvtf_x4:
6263 SelectCVTIntrinsic(N: Node, NumVecs: 4, Opcode: AArch64::UCVTF_4Z4Z_StoS);
6264 return;
6265 case Intrinsic::aarch64_sve_fcvt_widen_x2:
6266 SelectUnaryMultiIntrinsic(N: Node, NumOutVecs: 2, IsTupleInput: false, Opc: AArch64::FCVT_2ZZ_H_S);
6267 return;
6268 case Intrinsic::aarch64_sve_fcvtl_widen_x2:
6269 SelectUnaryMultiIntrinsic(N: Node, NumOutVecs: 2, IsTupleInput: false, Opc: AArch64::FCVTL_2ZZ_H_S);
6270 return;
6271 case Intrinsic::aarch64_sve_sclamp_single_x2:
6272 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6273 VT: Node->getValueType(ResNo: 0),
6274 Opcodes: {AArch64::SCLAMP_VG2_2Z2Z_B, AArch64::SCLAMP_VG2_2Z2Z_H,
6275 AArch64::SCLAMP_VG2_2Z2Z_S, AArch64::SCLAMP_VG2_2Z2Z_D}))
6276 SelectClamp(N: Node, NumVecs: 2, Op);
6277 return;
6278 case Intrinsic::aarch64_sve_uclamp_single_x2:
6279 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6280 VT: Node->getValueType(ResNo: 0),
6281 Opcodes: {AArch64::UCLAMP_VG2_2Z2Z_B, AArch64::UCLAMP_VG2_2Z2Z_H,
6282 AArch64::UCLAMP_VG2_2Z2Z_S, AArch64::UCLAMP_VG2_2Z2Z_D}))
6283 SelectClamp(N: Node, NumVecs: 2, Op);
6284 return;
6285 case Intrinsic::aarch64_sve_fclamp_single_x2:
6286 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6287 VT: Node->getValueType(ResNo: 0),
6288 Opcodes: {0, AArch64::FCLAMP_VG2_2Z2Z_H, AArch64::FCLAMP_VG2_2Z2Z_S,
6289 AArch64::FCLAMP_VG2_2Z2Z_D}))
6290 SelectClamp(N: Node, NumVecs: 2, Op);
6291 return;
6292 case Intrinsic::aarch64_sve_bfclamp_single_x2:
6293 SelectClamp(N: Node, NumVecs: 2, Op: AArch64::BFCLAMP_VG2_2ZZZ_H);
6294 return;
6295 case Intrinsic::aarch64_sve_sclamp_single_x4:
6296 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6297 VT: Node->getValueType(ResNo: 0),
6298 Opcodes: {AArch64::SCLAMP_VG4_4Z4Z_B, AArch64::SCLAMP_VG4_4Z4Z_H,
6299 AArch64::SCLAMP_VG4_4Z4Z_S, AArch64::SCLAMP_VG4_4Z4Z_D}))
6300 SelectClamp(N: Node, NumVecs: 4, Op);
6301 return;
6302 case Intrinsic::aarch64_sve_uclamp_single_x4:
6303 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6304 VT: Node->getValueType(ResNo: 0),
6305 Opcodes: {AArch64::UCLAMP_VG4_4Z4Z_B, AArch64::UCLAMP_VG4_4Z4Z_H,
6306 AArch64::UCLAMP_VG4_4Z4Z_S, AArch64::UCLAMP_VG4_4Z4Z_D}))
6307 SelectClamp(N: Node, NumVecs: 4, Op);
6308 return;
6309 case Intrinsic::aarch64_sve_fclamp_single_x4:
6310 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6311 VT: Node->getValueType(ResNo: 0),
6312 Opcodes: {0, AArch64::FCLAMP_VG4_4Z4Z_H, AArch64::FCLAMP_VG4_4Z4Z_S,
6313 AArch64::FCLAMP_VG4_4Z4Z_D}))
6314 SelectClamp(N: Node, NumVecs: 4, Op);
6315 return;
6316 case Intrinsic::aarch64_sve_bfclamp_single_x4:
6317 SelectClamp(N: Node, NumVecs: 4, Op: AArch64::BFCLAMP_VG4_4ZZZ_H);
6318 return;
6319 case Intrinsic::aarch64_sve_add_single_x2:
6320 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6321 VT: Node->getValueType(ResNo: 0),
6322 Opcodes: {AArch64::ADD_VG2_2ZZ_B, AArch64::ADD_VG2_2ZZ_H,
6323 AArch64::ADD_VG2_2ZZ_S, AArch64::ADD_VG2_2ZZ_D}))
6324 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: false, Opcode: Op);
6325 return;
6326 case Intrinsic::aarch64_sve_add_single_x4:
6327 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6328 VT: Node->getValueType(ResNo: 0),
6329 Opcodes: {AArch64::ADD_VG4_4ZZ_B, AArch64::ADD_VG4_4ZZ_H,
6330 AArch64::ADD_VG4_4ZZ_S, AArch64::ADD_VG4_4ZZ_D}))
6331 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: false, Opcode: Op);
6332 return;
6333 case Intrinsic::aarch64_sve_zip_x2:
6334 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::AnyType>(
6335 VT: Node->getValueType(ResNo: 0),
6336 Opcodes: {AArch64::ZIP_VG2_2ZZZ_B, AArch64::ZIP_VG2_2ZZZ_H,
6337 AArch64::ZIP_VG2_2ZZZ_S, AArch64::ZIP_VG2_2ZZZ_D}))
6338 SelectUnaryMultiIntrinsic(N: Node, NumOutVecs: 2, /*IsTupleInput=*/false, Opc: Op);
6339 return;
6340 case Intrinsic::aarch64_sve_zipq_x2:
6341 SelectUnaryMultiIntrinsic(N: Node, NumOutVecs: 2, /*IsTupleInput=*/false,
6342 Opc: AArch64::ZIP_VG2_2ZZZ_Q);
6343 return;
6344 case Intrinsic::aarch64_sve_zip_x4:
6345 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::AnyType>(
6346 VT: Node->getValueType(ResNo: 0),
6347 Opcodes: {AArch64::ZIP_VG4_4Z4Z_B, AArch64::ZIP_VG4_4Z4Z_H,
6348 AArch64::ZIP_VG4_4Z4Z_S, AArch64::ZIP_VG4_4Z4Z_D}))
6349 SelectUnaryMultiIntrinsic(N: Node, NumOutVecs: 4, /*IsTupleInput=*/true, Opc: Op);
6350 return;
6351 case Intrinsic::aarch64_sve_zipq_x4:
6352 SelectUnaryMultiIntrinsic(N: Node, NumOutVecs: 4, /*IsTupleInput=*/true,
6353 Opc: AArch64::ZIP_VG4_4Z4Z_Q);
6354 return;
6355 case Intrinsic::aarch64_sve_uzp_x2:
6356 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::AnyType>(
6357 VT: Node->getValueType(ResNo: 0),
6358 Opcodes: {AArch64::UZP_VG2_2ZZZ_B, AArch64::UZP_VG2_2ZZZ_H,
6359 AArch64::UZP_VG2_2ZZZ_S, AArch64::UZP_VG2_2ZZZ_D}))
6360 SelectUnaryMultiIntrinsic(N: Node, NumOutVecs: 2, /*IsTupleInput=*/false, Opc: Op);
6361 return;
6362 case Intrinsic::aarch64_sve_uzpq_x2:
6363 SelectUnaryMultiIntrinsic(N: Node, NumOutVecs: 2, /*IsTupleInput=*/false,
6364 Opc: AArch64::UZP_VG2_2ZZZ_Q);
6365 return;
6366 case Intrinsic::aarch64_sve_uzp_x4:
6367 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::AnyType>(
6368 VT: Node->getValueType(ResNo: 0),
6369 Opcodes: {AArch64::UZP_VG4_4Z4Z_B, AArch64::UZP_VG4_4Z4Z_H,
6370 AArch64::UZP_VG4_4Z4Z_S, AArch64::UZP_VG4_4Z4Z_D}))
6371 SelectUnaryMultiIntrinsic(N: Node, NumOutVecs: 4, /*IsTupleInput=*/true, Opc: Op);
6372 return;
6373 case Intrinsic::aarch64_sve_uzpq_x4:
6374 SelectUnaryMultiIntrinsic(N: Node, NumOutVecs: 4, /*IsTupleInput=*/true,
6375 Opc: AArch64::UZP_VG4_4Z4Z_Q);
6376 return;
6377 case Intrinsic::aarch64_sve_sel_x2:
6378 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::AnyType>(
6379 VT: Node->getValueType(ResNo: 0),
6380 Opcodes: {AArch64::SEL_VG2_2ZC2Z2Z_B, AArch64::SEL_VG2_2ZC2Z2Z_H,
6381 AArch64::SEL_VG2_2ZC2Z2Z_S, AArch64::SEL_VG2_2ZC2Z2Z_D}))
6382 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: true, Opcode: Op, /*HasPred=*/true);
6383 return;
6384 case Intrinsic::aarch64_sve_sel_x4:
6385 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::AnyType>(
6386 VT: Node->getValueType(ResNo: 0),
6387 Opcodes: {AArch64::SEL_VG4_4ZC4Z4Z_B, AArch64::SEL_VG4_4ZC4Z4Z_H,
6388 AArch64::SEL_VG4_4ZC4Z4Z_S, AArch64::SEL_VG4_4ZC4Z4Z_D}))
6389 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: true, Opcode: Op, /*HasPred=*/true);
6390 return;
6391 case Intrinsic::aarch64_sve_frinta_x2:
6392 SelectFrintFromVT(N: Node, NumVecs: 2, Opcode: AArch64::FRINTA_2Z2Z_S);
6393 return;
6394 case Intrinsic::aarch64_sve_frinta_x4:
6395 SelectFrintFromVT(N: Node, NumVecs: 4, Opcode: AArch64::FRINTA_4Z4Z_S);
6396 return;
6397 case Intrinsic::aarch64_sve_frintm_x2:
6398 SelectFrintFromVT(N: Node, NumVecs: 2, Opcode: AArch64::FRINTM_2Z2Z_S);
6399 return;
6400 case Intrinsic::aarch64_sve_frintm_x4:
6401 SelectFrintFromVT(N: Node, NumVecs: 4, Opcode: AArch64::FRINTM_4Z4Z_S);
6402 return;
6403 case Intrinsic::aarch64_sve_frintn_x2:
6404 SelectFrintFromVT(N: Node, NumVecs: 2, Opcode: AArch64::FRINTN_2Z2Z_S);
6405 return;
6406 case Intrinsic::aarch64_sve_frintn_x4:
6407 SelectFrintFromVT(N: Node, NumVecs: 4, Opcode: AArch64::FRINTN_4Z4Z_S);
6408 return;
6409 case Intrinsic::aarch64_sve_frintp_x2:
6410 SelectFrintFromVT(N: Node, NumVecs: 2, Opcode: AArch64::FRINTP_2Z2Z_S);
6411 return;
6412 case Intrinsic::aarch64_sve_frintp_x4:
6413 SelectFrintFromVT(N: Node, NumVecs: 4, Opcode: AArch64::FRINTP_4Z4Z_S);
6414 return;
6415 case Intrinsic::aarch64_sve_sunpk_x2:
6416 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6417 VT: Node->getValueType(ResNo: 0),
6418 Opcodes: {0, AArch64::SUNPK_VG2_2ZZ_H, AArch64::SUNPK_VG2_2ZZ_S,
6419 AArch64::SUNPK_VG2_2ZZ_D}))
6420 SelectUnaryMultiIntrinsic(N: Node, NumOutVecs: 2, /*IsTupleInput=*/false, Opc: Op);
6421 return;
6422 case Intrinsic::aarch64_sve_uunpk_x2:
6423 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6424 VT: Node->getValueType(ResNo: 0),
6425 Opcodes: {0, AArch64::UUNPK_VG2_2ZZ_H, AArch64::UUNPK_VG2_2ZZ_S,
6426 AArch64::UUNPK_VG2_2ZZ_D}))
6427 SelectUnaryMultiIntrinsic(N: Node, NumOutVecs: 2, /*IsTupleInput=*/false, Opc: Op);
6428 return;
6429 case Intrinsic::aarch64_sve_sunpk_x4:
6430 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6431 VT: Node->getValueType(ResNo: 0),
6432 Opcodes: {0, AArch64::SUNPK_VG4_4Z2Z_H, AArch64::SUNPK_VG4_4Z2Z_S,
6433 AArch64::SUNPK_VG4_4Z2Z_D}))
6434 SelectUnaryMultiIntrinsic(N: Node, NumOutVecs: 4, /*IsTupleInput=*/true, Opc: Op);
6435 return;
6436 case Intrinsic::aarch64_sve_uunpk_x4:
6437 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6438 VT: Node->getValueType(ResNo: 0),
6439 Opcodes: {0, AArch64::UUNPK_VG4_4Z2Z_H, AArch64::UUNPK_VG4_4Z2Z_S,
6440 AArch64::UUNPK_VG4_4Z2Z_D}))
6441 SelectUnaryMultiIntrinsic(N: Node, NumOutVecs: 4, /*IsTupleInput=*/true, Opc: Op);
6442 return;
6443 case Intrinsic::aarch64_sve_pext_x2: {
6444 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::AnyType>(
6445 VT: Node->getValueType(ResNo: 0),
6446 Opcodes: {AArch64::PEXT_2PCI_B, AArch64::PEXT_2PCI_H, AArch64::PEXT_2PCI_S,
6447 AArch64::PEXT_2PCI_D}))
6448 SelectPExtPair(N: Node, Opc: Op);
6449 return;
6450 }
6451 }
6452 break;
6453 }
6454 case ISD::INTRINSIC_VOID: {
6455 unsigned IntNo = Node->getConstantOperandVal(Num: 1);
6456 if (Node->getNumOperands() >= 3)
6457 VT = Node->getOperand(Num: 2)->getValueType(ResNo: 0);
6458 switch (IntNo) {
6459 default:
6460 break;
6461 case Intrinsic::aarch64_neon_st1x2: {
6462 if (VT == MVT::v8i8) {
6463 SelectStore(N: Node, NumVecs: 2, Opc: AArch64::ST1Twov8b);
6464 return;
6465 } else if (VT == MVT::v16i8) {
6466 SelectStore(N: Node, NumVecs: 2, Opc: AArch64::ST1Twov16b);
6467 return;
6468 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 ||
6469 VT == MVT::v4bf16) {
6470 SelectStore(N: Node, NumVecs: 2, Opc: AArch64::ST1Twov4h);
6471 return;
6472 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 ||
6473 VT == MVT::v8bf16) {
6474 SelectStore(N: Node, NumVecs: 2, Opc: AArch64::ST1Twov8h);
6475 return;
6476 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
6477 SelectStore(N: Node, NumVecs: 2, Opc: AArch64::ST1Twov2s);
6478 return;
6479 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
6480 SelectStore(N: Node, NumVecs: 2, Opc: AArch64::ST1Twov4s);
6481 return;
6482 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
6483 SelectStore(N: Node, NumVecs: 2, Opc: AArch64::ST1Twov2d);
6484 return;
6485 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
6486 SelectStore(N: Node, NumVecs: 2, Opc: AArch64::ST1Twov1d);
6487 return;
6488 }
6489 break;
6490 }
6491 case Intrinsic::aarch64_neon_st1x3: {
6492 if (VT == MVT::v8i8) {
6493 SelectStore(N: Node, NumVecs: 3, Opc: AArch64::ST1Threev8b);
6494 return;
6495 } else if (VT == MVT::v16i8) {
6496 SelectStore(N: Node, NumVecs: 3, Opc: AArch64::ST1Threev16b);
6497 return;
6498 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 ||
6499 VT == MVT::v4bf16) {
6500 SelectStore(N: Node, NumVecs: 3, Opc: AArch64::ST1Threev4h);
6501 return;
6502 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 ||
6503 VT == MVT::v8bf16) {
6504 SelectStore(N: Node, NumVecs: 3, Opc: AArch64::ST1Threev8h);
6505 return;
6506 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
6507 SelectStore(N: Node, NumVecs: 3, Opc: AArch64::ST1Threev2s);
6508 return;
6509 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
6510 SelectStore(N: Node, NumVecs: 3, Opc: AArch64::ST1Threev4s);
6511 return;
6512 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
6513 SelectStore(N: Node, NumVecs: 3, Opc: AArch64::ST1Threev2d);
6514 return;
6515 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
6516 SelectStore(N: Node, NumVecs: 3, Opc: AArch64::ST1Threev1d);
6517 return;
6518 }
6519 break;
6520 }
6521 case Intrinsic::aarch64_neon_st1x4: {
6522 if (VT == MVT::v8i8) {
6523 SelectStore(N: Node, NumVecs: 4, Opc: AArch64::ST1Fourv8b);
6524 return;
6525 } else if (VT == MVT::v16i8) {
6526 SelectStore(N: Node, NumVecs: 4, Opc: AArch64::ST1Fourv16b);
6527 return;
6528 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 ||
6529 VT == MVT::v4bf16) {
6530 SelectStore(N: Node, NumVecs: 4, Opc: AArch64::ST1Fourv4h);
6531 return;
6532 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 ||
6533 VT == MVT::v8bf16) {
6534 SelectStore(N: Node, NumVecs: 4, Opc: AArch64::ST1Fourv8h);
6535 return;
6536 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
6537 SelectStore(N: Node, NumVecs: 4, Opc: AArch64::ST1Fourv2s);
6538 return;
6539 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
6540 SelectStore(N: Node, NumVecs: 4, Opc: AArch64::ST1Fourv4s);
6541 return;
6542 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
6543 SelectStore(N: Node, NumVecs: 4, Opc: AArch64::ST1Fourv2d);
6544 return;
6545 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
6546 SelectStore(N: Node, NumVecs: 4, Opc: AArch64::ST1Fourv1d);
6547 return;
6548 }
6549 break;
6550 }
6551 case Intrinsic::aarch64_neon_st2: {
6552 if (VT == MVT::v8i8) {
6553 SelectStore(N: Node, NumVecs: 2, Opc: AArch64::ST2Twov8b);
6554 return;
6555 } else if (VT == MVT::v16i8) {
6556 SelectStore(N: Node, NumVecs: 2, Opc: AArch64::ST2Twov16b);
6557 return;
6558 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 ||
6559 VT == MVT::v4bf16) {
6560 SelectStore(N: Node, NumVecs: 2, Opc: AArch64::ST2Twov4h);
6561 return;
6562 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 ||
6563 VT == MVT::v8bf16) {
6564 SelectStore(N: Node, NumVecs: 2, Opc: AArch64::ST2Twov8h);
6565 return;
6566 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
6567 SelectStore(N: Node, NumVecs: 2, Opc: AArch64::ST2Twov2s);
6568 return;
6569 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
6570 SelectStore(N: Node, NumVecs: 2, Opc: AArch64::ST2Twov4s);
6571 return;
6572 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
6573 SelectStore(N: Node, NumVecs: 2, Opc: AArch64::ST2Twov2d);
6574 return;
6575 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
6576 SelectStore(N: Node, NumVecs: 2, Opc: AArch64::ST1Twov1d);
6577 return;
6578 }
6579 break;
6580 }
6581 case Intrinsic::aarch64_neon_st3: {
6582 if (VT == MVT::v8i8) {
6583 SelectStore(N: Node, NumVecs: 3, Opc: AArch64::ST3Threev8b);
6584 return;
6585 } else if (VT == MVT::v16i8) {
6586 SelectStore(N: Node, NumVecs: 3, Opc: AArch64::ST3Threev16b);
6587 return;
6588 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 ||
6589 VT == MVT::v4bf16) {
6590 SelectStore(N: Node, NumVecs: 3, Opc: AArch64::ST3Threev4h);
6591 return;
6592 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 ||
6593 VT == MVT::v8bf16) {
6594 SelectStore(N: Node, NumVecs: 3, Opc: AArch64::ST3Threev8h);
6595 return;
6596 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
6597 SelectStore(N: Node, NumVecs: 3, Opc: AArch64::ST3Threev2s);
6598 return;
6599 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
6600 SelectStore(N: Node, NumVecs: 3, Opc: AArch64::ST3Threev4s);
6601 return;
6602 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
6603 SelectStore(N: Node, NumVecs: 3, Opc: AArch64::ST3Threev2d);
6604 return;
6605 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
6606 SelectStore(N: Node, NumVecs: 3, Opc: AArch64::ST1Threev1d);
6607 return;
6608 }
6609 break;
6610 }
6611 case Intrinsic::aarch64_neon_st4: {
6612 if (VT == MVT::v8i8) {
6613 SelectStore(N: Node, NumVecs: 4, Opc: AArch64::ST4Fourv8b);
6614 return;
6615 } else if (VT == MVT::v16i8) {
6616 SelectStore(N: Node, NumVecs: 4, Opc: AArch64::ST4Fourv16b);
6617 return;
6618 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 ||
6619 VT == MVT::v4bf16) {
6620 SelectStore(N: Node, NumVecs: 4, Opc: AArch64::ST4Fourv4h);
6621 return;
6622 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 ||
6623 VT == MVT::v8bf16) {
6624 SelectStore(N: Node, NumVecs: 4, Opc: AArch64::ST4Fourv8h);
6625 return;
6626 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
6627 SelectStore(N: Node, NumVecs: 4, Opc: AArch64::ST4Fourv2s);
6628 return;
6629 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
6630 SelectStore(N: Node, NumVecs: 4, Opc: AArch64::ST4Fourv4s);
6631 return;
6632 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
6633 SelectStore(N: Node, NumVecs: 4, Opc: AArch64::ST4Fourv2d);
6634 return;
6635 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
6636 SelectStore(N: Node, NumVecs: 4, Opc: AArch64::ST1Fourv1d);
6637 return;
6638 }
6639 break;
6640 }
6641 case Intrinsic::aarch64_neon_st2lane: {
6642 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
6643 SelectStoreLane(N: Node, NumVecs: 2, Opc: AArch64::ST2i8);
6644 return;
6645 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
6646 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
6647 SelectStoreLane(N: Node, NumVecs: 2, Opc: AArch64::ST2i16);
6648 return;
6649 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
6650 VT == MVT::v2f32) {
6651 SelectStoreLane(N: Node, NumVecs: 2, Opc: AArch64::ST2i32);
6652 return;
6653 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
6654 VT == MVT::v1f64) {
6655 SelectStoreLane(N: Node, NumVecs: 2, Opc: AArch64::ST2i64);
6656 return;
6657 }
6658 break;
6659 }
6660 case Intrinsic::aarch64_neon_st3lane: {
6661 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
6662 SelectStoreLane(N: Node, NumVecs: 3, Opc: AArch64::ST3i8);
6663 return;
6664 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
6665 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
6666 SelectStoreLane(N: Node, NumVecs: 3, Opc: AArch64::ST3i16);
6667 return;
6668 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
6669 VT == MVT::v2f32) {
6670 SelectStoreLane(N: Node, NumVecs: 3, Opc: AArch64::ST3i32);
6671 return;
6672 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
6673 VT == MVT::v1f64) {
6674 SelectStoreLane(N: Node, NumVecs: 3, Opc: AArch64::ST3i64);
6675 return;
6676 }
6677 break;
6678 }
6679 case Intrinsic::aarch64_neon_st4lane: {
6680 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
6681 SelectStoreLane(N: Node, NumVecs: 4, Opc: AArch64::ST4i8);
6682 return;
6683 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
6684 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
6685 SelectStoreLane(N: Node, NumVecs: 4, Opc: AArch64::ST4i16);
6686 return;
6687 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
6688 VT == MVT::v2f32) {
6689 SelectStoreLane(N: Node, NumVecs: 4, Opc: AArch64::ST4i32);
6690 return;
6691 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
6692 VT == MVT::v1f64) {
6693 SelectStoreLane(N: Node, NumVecs: 4, Opc: AArch64::ST4i64);
6694 return;
6695 }
6696 break;
6697 }
6698 case Intrinsic::aarch64_sve_st2q: {
6699 SelectPredicatedStore(N: Node, NumVecs: 2, Scale: 4, Opc_rr: AArch64::ST2Q, Opc_ri: AArch64::ST2Q_IMM);
6700 return;
6701 }
6702 case Intrinsic::aarch64_sve_st3q: {
6703 SelectPredicatedStore(N: Node, NumVecs: 3, Scale: 4, Opc_rr: AArch64::ST3Q, Opc_ri: AArch64::ST3Q_IMM);
6704 return;
6705 }
6706 case Intrinsic::aarch64_sve_st4q: {
6707 SelectPredicatedStore(N: Node, NumVecs: 4, Scale: 4, Opc_rr: AArch64::ST4Q, Opc_ri: AArch64::ST4Q_IMM);
6708 return;
6709 }
6710 case Intrinsic::aarch64_sve_st2: {
6711 if (VT == MVT::nxv16i8) {
6712 SelectPredicatedStore(N: Node, NumVecs: 2, Scale: 0, Opc_rr: AArch64::ST2B, Opc_ri: AArch64::ST2B_IMM);
6713 return;
6714 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
6715 VT == MVT::nxv8bf16) {
6716 SelectPredicatedStore(N: Node, NumVecs: 2, Scale: 1, Opc_rr: AArch64::ST2H, Opc_ri: AArch64::ST2H_IMM);
6717 return;
6718 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
6719 SelectPredicatedStore(N: Node, NumVecs: 2, Scale: 2, Opc_rr: AArch64::ST2W, Opc_ri: AArch64::ST2W_IMM);
6720 return;
6721 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
6722 SelectPredicatedStore(N: Node, NumVecs: 2, Scale: 3, Opc_rr: AArch64::ST2D, Opc_ri: AArch64::ST2D_IMM);
6723 return;
6724 }
6725 break;
6726 }
6727 case Intrinsic::aarch64_sve_st3: {
6728 if (VT == MVT::nxv16i8) {
6729 SelectPredicatedStore(N: Node, NumVecs: 3, Scale: 0, Opc_rr: AArch64::ST3B, Opc_ri: AArch64::ST3B_IMM);
6730 return;
6731 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
6732 VT == MVT::nxv8bf16) {
6733 SelectPredicatedStore(N: Node, NumVecs: 3, Scale: 1, Opc_rr: AArch64::ST3H, Opc_ri: AArch64::ST3H_IMM);
6734 return;
6735 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
6736 SelectPredicatedStore(N: Node, NumVecs: 3, Scale: 2, Opc_rr: AArch64::ST3W, Opc_ri: AArch64::ST3W_IMM);
6737 return;
6738 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
6739 SelectPredicatedStore(N: Node, NumVecs: 3, Scale: 3, Opc_rr: AArch64::ST3D, Opc_ri: AArch64::ST3D_IMM);
6740 return;
6741 }
6742 break;
6743 }
6744 case Intrinsic::aarch64_sve_st4: {
6745 if (VT == MVT::nxv16i8) {
6746 SelectPredicatedStore(N: Node, NumVecs: 4, Scale: 0, Opc_rr: AArch64::ST4B, Opc_ri: AArch64::ST4B_IMM);
6747 return;
6748 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
6749 VT == MVT::nxv8bf16) {
6750 SelectPredicatedStore(N: Node, NumVecs: 4, Scale: 1, Opc_rr: AArch64::ST4H, Opc_ri: AArch64::ST4H_IMM);
6751 return;
6752 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
6753 SelectPredicatedStore(N: Node, NumVecs: 4, Scale: 2, Opc_rr: AArch64::ST4W, Opc_ri: AArch64::ST4W_IMM);
6754 return;
6755 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
6756 SelectPredicatedStore(N: Node, NumVecs: 4, Scale: 3, Opc_rr: AArch64::ST4D, Opc_ri: AArch64::ST4D_IMM);
6757 return;
6758 }
6759 break;
6760 }
6761 }
6762 break;
6763 }
6764 case AArch64ISD::LD2post: {
6765 if (VT == MVT::v8i8) {
6766 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Twov8b_POST, SubRegIdx: AArch64::dsub0);
6767 return;
6768 } else if (VT == MVT::v16i8) {
6769 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Twov16b_POST, SubRegIdx: AArch64::qsub0);
6770 return;
6771 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
6772 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Twov4h_POST, SubRegIdx: AArch64::dsub0);
6773 return;
6774 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
6775 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Twov8h_POST, SubRegIdx: AArch64::qsub0);
6776 return;
6777 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
6778 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Twov2s_POST, SubRegIdx: AArch64::dsub0);
6779 return;
6780 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
6781 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Twov4s_POST, SubRegIdx: AArch64::qsub0);
6782 return;
6783 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
6784 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD1Twov1d_POST, SubRegIdx: AArch64::dsub0);
6785 return;
6786 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
6787 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Twov2d_POST, SubRegIdx: AArch64::qsub0);
6788 return;
6789 }
6790 break;
6791 }
6792 case AArch64ISD::LD3post: {
6793 if (VT == MVT::v8i8) {
6794 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Threev8b_POST, SubRegIdx: AArch64::dsub0);
6795 return;
6796 } else if (VT == MVT::v16i8) {
6797 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Threev16b_POST, SubRegIdx: AArch64::qsub0);
6798 return;
6799 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
6800 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Threev4h_POST, SubRegIdx: AArch64::dsub0);
6801 return;
6802 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
6803 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Threev8h_POST, SubRegIdx: AArch64::qsub0);
6804 return;
6805 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
6806 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Threev2s_POST, SubRegIdx: AArch64::dsub0);
6807 return;
6808 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
6809 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Threev4s_POST, SubRegIdx: AArch64::qsub0);
6810 return;
6811 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
6812 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD1Threev1d_POST, SubRegIdx: AArch64::dsub0);
6813 return;
6814 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
6815 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Threev2d_POST, SubRegIdx: AArch64::qsub0);
6816 return;
6817 }
6818 break;
6819 }
6820 case AArch64ISD::LD4post: {
6821 if (VT == MVT::v8i8) {
6822 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Fourv8b_POST, SubRegIdx: AArch64::dsub0);
6823 return;
6824 } else if (VT == MVT::v16i8) {
6825 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Fourv16b_POST, SubRegIdx: AArch64::qsub0);
6826 return;
6827 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
6828 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Fourv4h_POST, SubRegIdx: AArch64::dsub0);
6829 return;
6830 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
6831 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Fourv8h_POST, SubRegIdx: AArch64::qsub0);
6832 return;
6833 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
6834 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Fourv2s_POST, SubRegIdx: AArch64::dsub0);
6835 return;
6836 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
6837 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Fourv4s_POST, SubRegIdx: AArch64::qsub0);
6838 return;
6839 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
6840 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD1Fourv1d_POST, SubRegIdx: AArch64::dsub0);
6841 return;
6842 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
6843 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Fourv2d_POST, SubRegIdx: AArch64::qsub0);
6844 return;
6845 }
6846 break;
6847 }
6848 case AArch64ISD::LD1x2post: {
6849 if (VT == MVT::v8i8) {
6850 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD1Twov8b_POST, SubRegIdx: AArch64::dsub0);
6851 return;
6852 } else if (VT == MVT::v16i8) {
6853 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD1Twov16b_POST, SubRegIdx: AArch64::qsub0);
6854 return;
6855 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
6856 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD1Twov4h_POST, SubRegIdx: AArch64::dsub0);
6857 return;
6858 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
6859 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD1Twov8h_POST, SubRegIdx: AArch64::qsub0);
6860 return;
6861 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
6862 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD1Twov2s_POST, SubRegIdx: AArch64::dsub0);
6863 return;
6864 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
6865 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD1Twov4s_POST, SubRegIdx: AArch64::qsub0);
6866 return;
6867 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
6868 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD1Twov1d_POST, SubRegIdx: AArch64::dsub0);
6869 return;
6870 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
6871 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD1Twov2d_POST, SubRegIdx: AArch64::qsub0);
6872 return;
6873 }
6874 break;
6875 }
6876 case AArch64ISD::LD1x3post: {
6877 if (VT == MVT::v8i8) {
6878 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD1Threev8b_POST, SubRegIdx: AArch64::dsub0);
6879 return;
6880 } else if (VT == MVT::v16i8) {
6881 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD1Threev16b_POST, SubRegIdx: AArch64::qsub0);
6882 return;
6883 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
6884 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD1Threev4h_POST, SubRegIdx: AArch64::dsub0);
6885 return;
6886 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
6887 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD1Threev8h_POST, SubRegIdx: AArch64::qsub0);
6888 return;
6889 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
6890 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD1Threev2s_POST, SubRegIdx: AArch64::dsub0);
6891 return;
6892 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
6893 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD1Threev4s_POST, SubRegIdx: AArch64::qsub0);
6894 return;
6895 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
6896 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD1Threev1d_POST, SubRegIdx: AArch64::dsub0);
6897 return;
6898 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
6899 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD1Threev2d_POST, SubRegIdx: AArch64::qsub0);
6900 return;
6901 }
6902 break;
6903 }
6904 case AArch64ISD::LD1x4post: {
6905 if (VT == MVT::v8i8) {
6906 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD1Fourv8b_POST, SubRegIdx: AArch64::dsub0);
6907 return;
6908 } else if (VT == MVT::v16i8) {
6909 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD1Fourv16b_POST, SubRegIdx: AArch64::qsub0);
6910 return;
6911 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
6912 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD1Fourv4h_POST, SubRegIdx: AArch64::dsub0);
6913 return;
6914 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
6915 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD1Fourv8h_POST, SubRegIdx: AArch64::qsub0);
6916 return;
6917 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
6918 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD1Fourv2s_POST, SubRegIdx: AArch64::dsub0);
6919 return;
6920 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
6921 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD1Fourv4s_POST, SubRegIdx: AArch64::qsub0);
6922 return;
6923 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
6924 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD1Fourv1d_POST, SubRegIdx: AArch64::dsub0);
6925 return;
6926 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
6927 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD1Fourv2d_POST, SubRegIdx: AArch64::qsub0);
6928 return;
6929 }
6930 break;
6931 }
6932 case AArch64ISD::LD1DUPpost: {
6933 if (VT == MVT::v8i8) {
6934 SelectPostLoad(N: Node, NumVecs: 1, Opc: AArch64::LD1Rv8b_POST, SubRegIdx: AArch64::dsub0);
6935 return;
6936 } else if (VT == MVT::v16i8) {
6937 SelectPostLoad(N: Node, NumVecs: 1, Opc: AArch64::LD1Rv16b_POST, SubRegIdx: AArch64::qsub0);
6938 return;
6939 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
6940 SelectPostLoad(N: Node, NumVecs: 1, Opc: AArch64::LD1Rv4h_POST, SubRegIdx: AArch64::dsub0);
6941 return;
6942 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
6943 SelectPostLoad(N: Node, NumVecs: 1, Opc: AArch64::LD1Rv8h_POST, SubRegIdx: AArch64::qsub0);
6944 return;
6945 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
6946 SelectPostLoad(N: Node, NumVecs: 1, Opc: AArch64::LD1Rv2s_POST, SubRegIdx: AArch64::dsub0);
6947 return;
6948 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
6949 SelectPostLoad(N: Node, NumVecs: 1, Opc: AArch64::LD1Rv4s_POST, SubRegIdx: AArch64::qsub0);
6950 return;
6951 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
6952 SelectPostLoad(N: Node, NumVecs: 1, Opc: AArch64::LD1Rv1d_POST, SubRegIdx: AArch64::dsub0);
6953 return;
6954 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
6955 SelectPostLoad(N: Node, NumVecs: 1, Opc: AArch64::LD1Rv2d_POST, SubRegIdx: AArch64::qsub0);
6956 return;
6957 }
6958 break;
6959 }
6960 case AArch64ISD::LD2DUPpost: {
6961 if (VT == MVT::v8i8) {
6962 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Rv8b_POST, SubRegIdx: AArch64::dsub0);
6963 return;
6964 } else if (VT == MVT::v16i8) {
6965 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Rv16b_POST, SubRegIdx: AArch64::qsub0);
6966 return;
6967 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
6968 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Rv4h_POST, SubRegIdx: AArch64::dsub0);
6969 return;
6970 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
6971 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Rv8h_POST, SubRegIdx: AArch64::qsub0);
6972 return;
6973 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
6974 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Rv2s_POST, SubRegIdx: AArch64::dsub0);
6975 return;
6976 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
6977 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Rv4s_POST, SubRegIdx: AArch64::qsub0);
6978 return;
6979 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
6980 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Rv1d_POST, SubRegIdx: AArch64::dsub0);
6981 return;
6982 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
6983 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Rv2d_POST, SubRegIdx: AArch64::qsub0);
6984 return;
6985 }
6986 break;
6987 }
6988 case AArch64ISD::LD3DUPpost: {
6989 if (VT == MVT::v8i8) {
6990 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Rv8b_POST, SubRegIdx: AArch64::dsub0);
6991 return;
6992 } else if (VT == MVT::v16i8) {
6993 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Rv16b_POST, SubRegIdx: AArch64::qsub0);
6994 return;
6995 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
6996 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Rv4h_POST, SubRegIdx: AArch64::dsub0);
6997 return;
6998 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
6999 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Rv8h_POST, SubRegIdx: AArch64::qsub0);
7000 return;
7001 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
7002 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Rv2s_POST, SubRegIdx: AArch64::dsub0);
7003 return;
7004 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
7005 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Rv4s_POST, SubRegIdx: AArch64::qsub0);
7006 return;
7007 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
7008 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Rv1d_POST, SubRegIdx: AArch64::dsub0);
7009 return;
7010 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
7011 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Rv2d_POST, SubRegIdx: AArch64::qsub0);
7012 return;
7013 }
7014 break;
7015 }
7016 case AArch64ISD::LD4DUPpost: {
7017 if (VT == MVT::v8i8) {
7018 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Rv8b_POST, SubRegIdx: AArch64::dsub0);
7019 return;
7020 } else if (VT == MVT::v16i8) {
7021 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Rv16b_POST, SubRegIdx: AArch64::qsub0);
7022 return;
7023 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
7024 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Rv4h_POST, SubRegIdx: AArch64::dsub0);
7025 return;
7026 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
7027 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Rv8h_POST, SubRegIdx: AArch64::qsub0);
7028 return;
7029 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
7030 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Rv2s_POST, SubRegIdx: AArch64::dsub0);
7031 return;
7032 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
7033 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Rv4s_POST, SubRegIdx: AArch64::qsub0);
7034 return;
7035 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
7036 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Rv1d_POST, SubRegIdx: AArch64::dsub0);
7037 return;
7038 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
7039 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Rv2d_POST, SubRegIdx: AArch64::qsub0);
7040 return;
7041 }
7042 break;
7043 }
7044 case AArch64ISD::LD1LANEpost: {
7045 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
7046 SelectPostLoadLane(N: Node, NumVecs: 1, Opc: AArch64::LD1i8_POST);
7047 return;
7048 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
7049 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
7050 SelectPostLoadLane(N: Node, NumVecs: 1, Opc: AArch64::LD1i16_POST);
7051 return;
7052 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
7053 VT == MVT::v2f32) {
7054 SelectPostLoadLane(N: Node, NumVecs: 1, Opc: AArch64::LD1i32_POST);
7055 return;
7056 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
7057 VT == MVT::v1f64) {
7058 SelectPostLoadLane(N: Node, NumVecs: 1, Opc: AArch64::LD1i64_POST);
7059 return;
7060 }
7061 break;
7062 }
7063 case AArch64ISD::LD2LANEpost: {
7064 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
7065 SelectPostLoadLane(N: Node, NumVecs: 2, Opc: AArch64::LD2i8_POST);
7066 return;
7067 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
7068 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
7069 SelectPostLoadLane(N: Node, NumVecs: 2, Opc: AArch64::LD2i16_POST);
7070 return;
7071 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
7072 VT == MVT::v2f32) {
7073 SelectPostLoadLane(N: Node, NumVecs: 2, Opc: AArch64::LD2i32_POST);
7074 return;
7075 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
7076 VT == MVT::v1f64) {
7077 SelectPostLoadLane(N: Node, NumVecs: 2, Opc: AArch64::LD2i64_POST);
7078 return;
7079 }
7080 break;
7081 }
7082 case AArch64ISD::LD3LANEpost: {
7083 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
7084 SelectPostLoadLane(N: Node, NumVecs: 3, Opc: AArch64::LD3i8_POST);
7085 return;
7086 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
7087 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
7088 SelectPostLoadLane(N: Node, NumVecs: 3, Opc: AArch64::LD3i16_POST);
7089 return;
7090 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
7091 VT == MVT::v2f32) {
7092 SelectPostLoadLane(N: Node, NumVecs: 3, Opc: AArch64::LD3i32_POST);
7093 return;
7094 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
7095 VT == MVT::v1f64) {
7096 SelectPostLoadLane(N: Node, NumVecs: 3, Opc: AArch64::LD3i64_POST);
7097 return;
7098 }
7099 break;
7100 }
7101 case AArch64ISD::LD4LANEpost: {
7102 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
7103 SelectPostLoadLane(N: Node, NumVecs: 4, Opc: AArch64::LD4i8_POST);
7104 return;
7105 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
7106 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
7107 SelectPostLoadLane(N: Node, NumVecs: 4, Opc: AArch64::LD4i16_POST);
7108 return;
7109 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
7110 VT == MVT::v2f32) {
7111 SelectPostLoadLane(N: Node, NumVecs: 4, Opc: AArch64::LD4i32_POST);
7112 return;
7113 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
7114 VT == MVT::v1f64) {
7115 SelectPostLoadLane(N: Node, NumVecs: 4, Opc: AArch64::LD4i64_POST);
7116 return;
7117 }
7118 break;
7119 }
7120 case AArch64ISD::ST2post: {
7121 VT = Node->getOperand(Num: 1).getValueType();
7122 if (VT == MVT::v8i8) {
7123 SelectPostStore(N: Node, NumVecs: 2, Opc: AArch64::ST2Twov8b_POST);
7124 return;
7125 } else if (VT == MVT::v16i8) {
7126 SelectPostStore(N: Node, NumVecs: 2, Opc: AArch64::ST2Twov16b_POST);
7127 return;
7128 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
7129 SelectPostStore(N: Node, NumVecs: 2, Opc: AArch64::ST2Twov4h_POST);
7130 return;
7131 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
7132 SelectPostStore(N: Node, NumVecs: 2, Opc: AArch64::ST2Twov8h_POST);
7133 return;
7134 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
7135 SelectPostStore(N: Node, NumVecs: 2, Opc: AArch64::ST2Twov2s_POST);
7136 return;
7137 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
7138 SelectPostStore(N: Node, NumVecs: 2, Opc: AArch64::ST2Twov4s_POST);
7139 return;
7140 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
7141 SelectPostStore(N: Node, NumVecs: 2, Opc: AArch64::ST2Twov2d_POST);
7142 return;
7143 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
7144 SelectPostStore(N: Node, NumVecs: 2, Opc: AArch64::ST1Twov1d_POST);
7145 return;
7146 }
7147 break;
7148 }
7149 case AArch64ISD::ST3post: {
7150 VT = Node->getOperand(Num: 1).getValueType();
7151 if (VT == MVT::v8i8) {
7152 SelectPostStore(N: Node, NumVecs: 3, Opc: AArch64::ST3Threev8b_POST);
7153 return;
7154 } else if (VT == MVT::v16i8) {
7155 SelectPostStore(N: Node, NumVecs: 3, Opc: AArch64::ST3Threev16b_POST);
7156 return;
7157 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
7158 SelectPostStore(N: Node, NumVecs: 3, Opc: AArch64::ST3Threev4h_POST);
7159 return;
7160 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
7161 SelectPostStore(N: Node, NumVecs: 3, Opc: AArch64::ST3Threev8h_POST);
7162 return;
7163 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
7164 SelectPostStore(N: Node, NumVecs: 3, Opc: AArch64::ST3Threev2s_POST);
7165 return;
7166 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
7167 SelectPostStore(N: Node, NumVecs: 3, Opc: AArch64::ST3Threev4s_POST);
7168 return;
7169 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
7170 SelectPostStore(N: Node, NumVecs: 3, Opc: AArch64::ST3Threev2d_POST);
7171 return;
7172 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
7173 SelectPostStore(N: Node, NumVecs: 3, Opc: AArch64::ST1Threev1d_POST);
7174 return;
7175 }
7176 break;
7177 }
7178 case AArch64ISD::ST4post: {
7179 VT = Node->getOperand(Num: 1).getValueType();
7180 if (VT == MVT::v8i8) {
7181 SelectPostStore(N: Node, NumVecs: 4, Opc: AArch64::ST4Fourv8b_POST);
7182 return;
7183 } else if (VT == MVT::v16i8) {
7184 SelectPostStore(N: Node, NumVecs: 4, Opc: AArch64::ST4Fourv16b_POST);
7185 return;
7186 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
7187 SelectPostStore(N: Node, NumVecs: 4, Opc: AArch64::ST4Fourv4h_POST);
7188 return;
7189 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
7190 SelectPostStore(N: Node, NumVecs: 4, Opc: AArch64::ST4Fourv8h_POST);
7191 return;
7192 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
7193 SelectPostStore(N: Node, NumVecs: 4, Opc: AArch64::ST4Fourv2s_POST);
7194 return;
7195 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
7196 SelectPostStore(N: Node, NumVecs: 4, Opc: AArch64::ST4Fourv4s_POST);
7197 return;
7198 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
7199 SelectPostStore(N: Node, NumVecs: 4, Opc: AArch64::ST4Fourv2d_POST);
7200 return;
7201 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
7202 SelectPostStore(N: Node, NumVecs: 4, Opc: AArch64::ST1Fourv1d_POST);
7203 return;
7204 }
7205 break;
7206 }
7207 case AArch64ISD::ST1x2post: {
7208 VT = Node->getOperand(Num: 1).getValueType();
7209 if (VT == MVT::v8i8) {
7210 SelectPostStore(N: Node, NumVecs: 2, Opc: AArch64::ST1Twov8b_POST);
7211 return;
7212 } else if (VT == MVT::v16i8) {
7213 SelectPostStore(N: Node, NumVecs: 2, Opc: AArch64::ST1Twov16b_POST);
7214 return;
7215 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
7216 SelectPostStore(N: Node, NumVecs: 2, Opc: AArch64::ST1Twov4h_POST);
7217 return;
7218 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
7219 SelectPostStore(N: Node, NumVecs: 2, Opc: AArch64::ST1Twov8h_POST);
7220 return;
7221 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
7222 SelectPostStore(N: Node, NumVecs: 2, Opc: AArch64::ST1Twov2s_POST);
7223 return;
7224 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
7225 SelectPostStore(N: Node, NumVecs: 2, Opc: AArch64::ST1Twov4s_POST);
7226 return;
7227 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
7228 SelectPostStore(N: Node, NumVecs: 2, Opc: AArch64::ST1Twov1d_POST);
7229 return;
7230 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
7231 SelectPostStore(N: Node, NumVecs: 2, Opc: AArch64::ST1Twov2d_POST);
7232 return;
7233 }
7234 break;
7235 }
7236 case AArch64ISD::ST1x3post: {
7237 VT = Node->getOperand(Num: 1).getValueType();
7238 if (VT == MVT::v8i8) {
7239 SelectPostStore(N: Node, NumVecs: 3, Opc: AArch64::ST1Threev8b_POST);
7240 return;
7241 } else if (VT == MVT::v16i8) {
7242 SelectPostStore(N: Node, NumVecs: 3, Opc: AArch64::ST1Threev16b_POST);
7243 return;
7244 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
7245 SelectPostStore(N: Node, NumVecs: 3, Opc: AArch64::ST1Threev4h_POST);
7246 return;
7247 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16 ) {
7248 SelectPostStore(N: Node, NumVecs: 3, Opc: AArch64::ST1Threev8h_POST);
7249 return;
7250 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
7251 SelectPostStore(N: Node, NumVecs: 3, Opc: AArch64::ST1Threev2s_POST);
7252 return;
7253 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
7254 SelectPostStore(N: Node, NumVecs: 3, Opc: AArch64::ST1Threev4s_POST);
7255 return;
7256 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
7257 SelectPostStore(N: Node, NumVecs: 3, Opc: AArch64::ST1Threev1d_POST);
7258 return;
7259 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
7260 SelectPostStore(N: Node, NumVecs: 3, Opc: AArch64::ST1Threev2d_POST);
7261 return;
7262 }
7263 break;
7264 }
7265 case AArch64ISD::ST1x4post: {
7266 VT = Node->getOperand(Num: 1).getValueType();
7267 if (VT == MVT::v8i8) {
7268 SelectPostStore(N: Node, NumVecs: 4, Opc: AArch64::ST1Fourv8b_POST);
7269 return;
7270 } else if (VT == MVT::v16i8) {
7271 SelectPostStore(N: Node, NumVecs: 4, Opc: AArch64::ST1Fourv16b_POST);
7272 return;
7273 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
7274 SelectPostStore(N: Node, NumVecs: 4, Opc: AArch64::ST1Fourv4h_POST);
7275 return;
7276 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
7277 SelectPostStore(N: Node, NumVecs: 4, Opc: AArch64::ST1Fourv8h_POST);
7278 return;
7279 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
7280 SelectPostStore(N: Node, NumVecs: 4, Opc: AArch64::ST1Fourv2s_POST);
7281 return;
7282 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
7283 SelectPostStore(N: Node, NumVecs: 4, Opc: AArch64::ST1Fourv4s_POST);
7284 return;
7285 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
7286 SelectPostStore(N: Node, NumVecs: 4, Opc: AArch64::ST1Fourv1d_POST);
7287 return;
7288 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
7289 SelectPostStore(N: Node, NumVecs: 4, Opc: AArch64::ST1Fourv2d_POST);
7290 return;
7291 }
7292 break;
7293 }
7294 case AArch64ISD::ST2LANEpost: {
7295 VT = Node->getOperand(Num: 1).getValueType();
7296 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
7297 SelectPostStoreLane(N: Node, NumVecs: 2, Opc: AArch64::ST2i8_POST);
7298 return;
7299 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
7300 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
7301 SelectPostStoreLane(N: Node, NumVecs: 2, Opc: AArch64::ST2i16_POST);
7302 return;
7303 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
7304 VT == MVT::v2f32) {
7305 SelectPostStoreLane(N: Node, NumVecs: 2, Opc: AArch64::ST2i32_POST);
7306 return;
7307 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
7308 VT == MVT::v1f64) {
7309 SelectPostStoreLane(N: Node, NumVecs: 2, Opc: AArch64::ST2i64_POST);
7310 return;
7311 }
7312 break;
7313 }
7314 case AArch64ISD::ST3LANEpost: {
7315 VT = Node->getOperand(Num: 1).getValueType();
7316 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
7317 SelectPostStoreLane(N: Node, NumVecs: 3, Opc: AArch64::ST3i8_POST);
7318 return;
7319 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
7320 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
7321 SelectPostStoreLane(N: Node, NumVecs: 3, Opc: AArch64::ST3i16_POST);
7322 return;
7323 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
7324 VT == MVT::v2f32) {
7325 SelectPostStoreLane(N: Node, NumVecs: 3, Opc: AArch64::ST3i32_POST);
7326 return;
7327 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
7328 VT == MVT::v1f64) {
7329 SelectPostStoreLane(N: Node, NumVecs: 3, Opc: AArch64::ST3i64_POST);
7330 return;
7331 }
7332 break;
7333 }
7334 case AArch64ISD::ST4LANEpost: {
7335 VT = Node->getOperand(Num: 1).getValueType();
7336 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
7337 SelectPostStoreLane(N: Node, NumVecs: 4, Opc: AArch64::ST4i8_POST);
7338 return;
7339 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
7340 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
7341 SelectPostStoreLane(N: Node, NumVecs: 4, Opc: AArch64::ST4i16_POST);
7342 return;
7343 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
7344 VT == MVT::v2f32) {
7345 SelectPostStoreLane(N: Node, NumVecs: 4, Opc: AArch64::ST4i32_POST);
7346 return;
7347 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
7348 VT == MVT::v1f64) {
7349 SelectPostStoreLane(N: Node, NumVecs: 4, Opc: AArch64::ST4i64_POST);
7350 return;
7351 }
7352 break;
7353 }
7354 }
7355
7356 // Select the default instruction
7357 SelectCode(N: Node);
7358}
7359
7360/// createAArch64ISelDag - This pass converts a legalized DAG into a
7361/// AArch64-specific DAG, ready for instruction scheduling.
7362FunctionPass *llvm::createAArch64ISelDag(AArch64TargetMachine &TM,
7363 CodeGenOptLevel OptLevel) {
7364 return new AArch64DAGToDAGISelLegacy(TM, OptLevel);
7365}
7366
7367/// When \p PredVT is a scalable vector predicate in the form
7368/// MVT::nx<M>xi1, it builds the correspondent scalable vector of
7369/// integers MVT::nx<M>xi<bits> s.t. M x bits = 128. When targeting
7370/// structured vectors (NumVec >1), the output data type is
7371/// MVT::nx<M*NumVec>xi<bits> s.t. M x bits = 128. If the input
7372/// PredVT is not in the form MVT::nx<M>xi1, it returns an invalid
7373/// EVT.
7374static EVT getPackedVectorTypeFromPredicateType(LLVMContext &Ctx, EVT PredVT,
7375 unsigned NumVec) {
7376 assert(NumVec > 0 && NumVec < 5 && "Invalid number of vectors.");
7377 if (!PredVT.isScalableVector() || PredVT.getVectorElementType() != MVT::i1)
7378 return EVT();
7379
7380 if (PredVT != MVT::nxv16i1 && PredVT != MVT::nxv8i1 &&
7381 PredVT != MVT::nxv4i1 && PredVT != MVT::nxv2i1)
7382 return EVT();
7383
7384 ElementCount EC = PredVT.getVectorElementCount();
7385 EVT ScalarVT =
7386 EVT::getIntegerVT(Context&: Ctx, BitWidth: AArch64::SVEBitsPerBlock / EC.getKnownMinValue());
7387 EVT MemVT = EVT::getVectorVT(Context&: Ctx, VT: ScalarVT, EC: EC * NumVec);
7388
7389 return MemVT;
7390}
7391
7392/// Return the EVT of the data associated to a memory operation in \p
7393/// Root. If such EVT cannot be retrieved, it returns an invalid EVT.
7394static EVT getMemVTFromNode(LLVMContext &Ctx, SDNode *Root) {
7395 if (auto *MemIntr = dyn_cast<MemIntrinsicSDNode>(Val: Root))
7396 return MemIntr->getMemoryVT();
7397
7398 if (isa<MemSDNode>(Val: Root)) {
7399 EVT MemVT = cast<MemSDNode>(Val: Root)->getMemoryVT();
7400
7401 EVT DataVT;
7402 if (auto *Load = dyn_cast<LoadSDNode>(Val: Root))
7403 DataVT = Load->getValueType(ResNo: 0);
7404 else if (auto *Load = dyn_cast<MaskedLoadSDNode>(Val: Root))
7405 DataVT = Load->getValueType(ResNo: 0);
7406 else if (auto *Store = dyn_cast<StoreSDNode>(Val: Root))
7407 DataVT = Store->getValue().getValueType();
7408 else if (auto *Store = dyn_cast<MaskedStoreSDNode>(Val: Root))
7409 DataVT = Store->getValue().getValueType();
7410 else
7411 llvm_unreachable("Unexpected MemSDNode!");
7412
7413 return DataVT.changeVectorElementType(EltVT: MemVT.getVectorElementType());
7414 }
7415
7416 const unsigned Opcode = Root->getOpcode();
7417 // For custom ISD nodes, we have to look at them individually to extract the
7418 // type of the data moved to/from memory.
7419 switch (Opcode) {
7420 case AArch64ISD::LD1_MERGE_ZERO:
7421 case AArch64ISD::LD1S_MERGE_ZERO:
7422 case AArch64ISD::LDNF1_MERGE_ZERO:
7423 case AArch64ISD::LDNF1S_MERGE_ZERO:
7424 return cast<VTSDNode>(Val: Root->getOperand(Num: 3))->getVT();
7425 case AArch64ISD::ST1_PRED:
7426 return cast<VTSDNode>(Val: Root->getOperand(Num: 4))->getVT();
7427 default:
7428 break;
7429 }
7430
7431 if (Opcode != ISD::INTRINSIC_VOID && Opcode != ISD::INTRINSIC_W_CHAIN)
7432 return EVT();
7433
7434 switch (Root->getConstantOperandVal(Num: 1)) {
7435 default:
7436 return EVT();
7437 case Intrinsic::aarch64_sme_ldr:
7438 case Intrinsic::aarch64_sme_str:
7439 return MVT::nxv16i8;
7440 case Intrinsic::aarch64_sve_prf:
7441 // We are using an SVE prefetch intrinsic. Type must be inferred from the
7442 // width of the predicate.
7443 return getPackedVectorTypeFromPredicateType(
7444 Ctx, PredVT: Root->getOperand(Num: 2)->getValueType(ResNo: 0), /*NumVec=*/1);
7445 case Intrinsic::aarch64_sve_ld2_sret:
7446 case Intrinsic::aarch64_sve_ld2q_sret:
7447 return getPackedVectorTypeFromPredicateType(
7448 Ctx, PredVT: Root->getOperand(Num: 2)->getValueType(ResNo: 0), /*NumVec=*/2);
7449 case Intrinsic::aarch64_sve_st2q:
7450 return getPackedVectorTypeFromPredicateType(
7451 Ctx, PredVT: Root->getOperand(Num: 4)->getValueType(ResNo: 0), /*NumVec=*/2);
7452 case Intrinsic::aarch64_sve_ld3_sret:
7453 case Intrinsic::aarch64_sve_ld3q_sret:
7454 return getPackedVectorTypeFromPredicateType(
7455 Ctx, PredVT: Root->getOperand(Num: 2)->getValueType(ResNo: 0), /*NumVec=*/3);
7456 case Intrinsic::aarch64_sve_st3q:
7457 return getPackedVectorTypeFromPredicateType(
7458 Ctx, PredVT: Root->getOperand(Num: 5)->getValueType(ResNo: 0), /*NumVec=*/3);
7459 case Intrinsic::aarch64_sve_ld4_sret:
7460 case Intrinsic::aarch64_sve_ld4q_sret:
7461 return getPackedVectorTypeFromPredicateType(
7462 Ctx, PredVT: Root->getOperand(Num: 2)->getValueType(ResNo: 0), /*NumVec=*/4);
7463 case Intrinsic::aarch64_sve_st4q:
7464 return getPackedVectorTypeFromPredicateType(
7465 Ctx, PredVT: Root->getOperand(Num: 6)->getValueType(ResNo: 0), /*NumVec=*/4);
7466 case Intrinsic::aarch64_sve_ld1udq:
7467 case Intrinsic::aarch64_sve_st1dq:
7468 return EVT(MVT::nxv1i64);
7469 case Intrinsic::aarch64_sve_ld1uwq:
7470 case Intrinsic::aarch64_sve_st1wq:
7471 return EVT(MVT::nxv1i32);
7472 }
7473}
7474
7475/// SelectAddrModeIndexedSVE - Attempt selection of the addressing mode:
7476/// Base + OffImm * sizeof(MemVT) for Min >= OffImm <= Max
7477/// where Root is the memory access using N for its address.
7478template <int64_t Min, int64_t Max>
7479bool AArch64DAGToDAGISel::SelectAddrModeIndexedSVE(SDNode *Root, SDValue N,
7480 SDValue &Base,
7481 SDValue &OffImm) {
7482 const EVT MemVT = getMemVTFromNode(Ctx&: *(CurDAG->getContext()), Root);
7483 const DataLayout &DL = CurDAG->getDataLayout();
7484 const MachineFrameInfo &MFI = MF->getFrameInfo();
7485
7486 if (N.getOpcode() == ISD::FrameIndex) {
7487 int FI = cast<FrameIndexSDNode>(Val&: N)->getIndex();
7488 // We can only encode VL scaled offsets, so only fold in frame indexes
7489 // referencing SVE objects.
7490 if (MFI.getStackID(ObjectIdx: FI) == TargetStackID::ScalableVector) {
7491 Base = CurDAG->getTargetFrameIndex(FI, VT: TLI->getPointerTy(DL));
7492 OffImm = CurDAG->getTargetConstant(Val: 0, DL: SDLoc(N), VT: MVT::i64);
7493 return true;
7494 }
7495
7496 return false;
7497 }
7498
7499 if (MemVT == EVT())
7500 return false;
7501
7502 if (N.getOpcode() != ISD::ADD)
7503 return false;
7504
7505 SDValue VScale = N.getOperand(i: 1);
7506 int64_t MulImm = std::numeric_limits<int64_t>::max();
7507 if (VScale.getOpcode() == ISD::VSCALE) {
7508 MulImm = cast<ConstantSDNode>(Val: VScale.getOperand(i: 0))->getSExtValue();
7509 } else if (auto C = dyn_cast<ConstantSDNode>(Val&: VScale)) {
7510 int64_t ByteOffset = C->getSExtValue();
7511 const auto KnownVScale =
7512 Subtarget->getSVEVectorSizeInBits() / AArch64::SVEBitsPerBlock;
7513
7514 if (!KnownVScale || ByteOffset % KnownVScale != 0)
7515 return false;
7516
7517 MulImm = ByteOffset / KnownVScale;
7518 } else
7519 return false;
7520
7521 TypeSize TS = MemVT.getSizeInBits();
7522 int64_t MemWidthBytes = static_cast<int64_t>(TS.getKnownMinValue()) / 8;
7523
7524 if ((MulImm % MemWidthBytes) != 0)
7525 return false;
7526
7527 int64_t Offset = MulImm / MemWidthBytes;
7528 if (Offset < Min || Offset > Max)
7529 return false;
7530
7531 Base = N.getOperand(i: 0);
7532 if (Base.getOpcode() == ISD::FrameIndex) {
7533 int FI = cast<FrameIndexSDNode>(Val&: Base)->getIndex();
7534 // We can only encode VL scaled offsets, so only fold in frame indexes
7535 // referencing SVE objects.
7536 if (MFI.getStackID(ObjectIdx: FI) == TargetStackID::ScalableVector)
7537 Base = CurDAG->getTargetFrameIndex(FI, VT: TLI->getPointerTy(DL));
7538 }
7539
7540 OffImm = CurDAG->getTargetConstant(Val: Offset, DL: SDLoc(N), VT: MVT::i64);
7541 return true;
7542}
7543
7544/// Select register plus register addressing mode for SVE, with scaled
7545/// offset.
7546bool AArch64DAGToDAGISel::SelectSVERegRegAddrMode(SDValue N, unsigned Scale,
7547 SDValue &Base,
7548 SDValue &Offset) {
7549 if (N.getOpcode() != ISD::ADD)
7550 return false;
7551
7552 // Process an ADD node.
7553 const SDValue LHS = N.getOperand(i: 0);
7554 const SDValue RHS = N.getOperand(i: 1);
7555
7556 // 8 bit data does not come with the SHL node, so it is treated
7557 // separately.
7558 if (Scale == 0) {
7559 Base = LHS;
7560 Offset = RHS;
7561 return true;
7562 }
7563
7564 if (auto C = dyn_cast<ConstantSDNode>(Val: RHS)) {
7565 int64_t ImmOff = C->getSExtValue();
7566 unsigned Size = 1 << Scale;
7567
7568 // To use the reg+reg addressing mode, the immediate must be a multiple of
7569 // the vector element's byte size.
7570 if (ImmOff % Size)
7571 return false;
7572
7573 SDLoc DL(N);
7574 Base = LHS;
7575 Offset = CurDAG->getTargetConstant(Val: ImmOff >> Scale, DL, VT: MVT::i64);
7576 SDValue Ops[] = {Offset};
7577 SDNode *MI = CurDAG->getMachineNode(Opcode: AArch64::MOVi64imm, dl: DL, VT: MVT::i64, Ops);
7578 Offset = SDValue(MI, 0);
7579 return true;
7580 }
7581
7582 // Check if the RHS is a shift node with a constant.
7583 if (RHS.getOpcode() != ISD::SHL)
7584 return false;
7585
7586 const SDValue ShiftRHS = RHS.getOperand(i: 1);
7587 if (auto *C = dyn_cast<ConstantSDNode>(Val: ShiftRHS))
7588 if (C->getZExtValue() == Scale) {
7589 Base = LHS;
7590 Offset = RHS.getOperand(i: 0);
7591 return true;
7592 }
7593
7594 return false;
7595}
7596
7597bool AArch64DAGToDAGISel::SelectAllActivePredicate(SDValue N) {
7598 const AArch64TargetLowering *TLI =
7599 static_cast<const AArch64TargetLowering *>(getTargetLowering());
7600
7601 return TLI->isAllActivePredicate(DAG&: *CurDAG, N);
7602}
7603
7604bool AArch64DAGToDAGISel::SelectAnyPredicate(SDValue N) {
7605 EVT VT = N.getValueType();
7606 return VT.isScalableVector() && VT.getVectorElementType() == MVT::i1;
7607}
7608
7609bool AArch64DAGToDAGISel::SelectSMETileSlice(SDValue N, unsigned MaxSize,
7610 SDValue &Base, SDValue &Offset,
7611 unsigned Scale) {
7612 // Try to untangle an ADD node into a 'reg + offset'
7613 if (CurDAG->isBaseWithConstantOffset(Op: N))
7614 if (auto C = dyn_cast<ConstantSDNode>(Val: N.getOperand(i: 1))) {
7615 int64_t ImmOff = C->getSExtValue();
7616 if ((ImmOff > 0 && ImmOff <= MaxSize && (ImmOff % Scale == 0))) {
7617 Base = N.getOperand(i: 0);
7618 Offset = CurDAG->getTargetConstant(Val: ImmOff / Scale, DL: SDLoc(N), VT: MVT::i64);
7619 return true;
7620 }
7621 }
7622
7623 // By default, just match reg + 0.
7624 Base = N;
7625 Offset = CurDAG->getTargetConstant(Val: 0, DL: SDLoc(N), VT: MVT::i64);
7626 return true;
7627}
7628
7629bool AArch64DAGToDAGISel::SelectCmpBranchUImm6Operand(SDNode *P, SDValue N,
7630 SDValue &Imm) {
7631 AArch64CC::CondCode CC =
7632 static_cast<AArch64CC::CondCode>(P->getConstantOperandVal(Num: 1));
7633 if (auto *CN = dyn_cast<ConstantSDNode>(Val&: N)) {
7634 // Check conservatively if the immediate fits the valid range [0, 64).
7635 // Immediate variants for GE and HS definitely need to be decremented
7636 // when lowering the pseudos later, so an immediate of 1 would become 0.
7637 // For the inverse conditions LT and LO we don't know for sure if they
7638 // will need a decrement but should the decision be made to reverse the
7639 // branch condition, we again end up with the need to decrement.
7640 // The same argument holds for LE, LS, GT and HI and possibly
7641 // incremented immediates. This can lead to slightly less optimal
7642 // codegen, e.g. we never codegen the legal case
7643 // cblt w0, #63, A
7644 // because we could end up with the illegal case
7645 // cbge w0, #64, B
7646 // should the decision to reverse the branch direction be made. For the
7647 // lower bound cases this is no problem since we can express comparisons
7648 // against 0 with either tbz/tnbz or using wzr/xzr.
7649 uint64_t LowerBound = 0, UpperBound = 64;
7650 switch (CC) {
7651 case AArch64CC::GE:
7652 case AArch64CC::HS:
7653 case AArch64CC::LT:
7654 case AArch64CC::LO:
7655 LowerBound = 1;
7656 break;
7657 case AArch64CC::LE:
7658 case AArch64CC::LS:
7659 case AArch64CC::GT:
7660 case AArch64CC::HI:
7661 UpperBound = 63;
7662 break;
7663 default:
7664 break;
7665 }
7666
7667 if (CN->getAPIntValue().uge(RHS: LowerBound) &&
7668 CN->getAPIntValue().ult(RHS: UpperBound)) {
7669 SDLoc DL(N);
7670 Imm = CurDAG->getTargetConstant(Val: CN->getZExtValue(), DL, VT: N.getValueType());
7671 return true;
7672 }
7673 }
7674
7675 return false;
7676}
7677