1//===-- AArch64ISelDAGToDAG.cpp - A dag to dag inst selector for AArch64 --===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines an instruction selector for the AArch64 target.
10//
11//===----------------------------------------------------------------------===//
12
13#include "AArch64MachineFunctionInfo.h"
14#include "AArch64TargetMachine.h"
15#include "MCTargetDesc/AArch64AddressingModes.h"
16#include "llvm/ADT/APSInt.h"
17#include "llvm/CodeGen/ISDOpcodes.h"
18#include "llvm/CodeGen/SelectionDAGISel.h"
19#include "llvm/IR/Function.h" // To access function attributes.
20#include "llvm/IR/GlobalValue.h"
21#include "llvm/IR/Intrinsics.h"
22#include "llvm/IR/IntrinsicsAArch64.h"
23#include "llvm/Support/Debug.h"
24#include "llvm/Support/ErrorHandling.h"
25#include "llvm/Support/KnownBits.h"
26#include "llvm/Support/MathExtras.h"
27#include "llvm/Support/raw_ostream.h"
28
29using namespace llvm;
30
31#define DEBUG_TYPE "aarch64-isel"
32#define PASS_NAME "AArch64 Instruction Selection"
33
34// https://github.com/llvm/llvm-project/issues/114425
35#if defined(_MSC_VER) && !defined(__clang__) && !defined(NDEBUG)
36#pragma inline_depth(0)
37#endif
38
39//===--------------------------------------------------------------------===//
40/// AArch64DAGToDAGISel - AArch64 specific code to select AArch64 machine
41/// instructions for SelectionDAG operations.
42///
43namespace {
44
45class AArch64DAGToDAGISel : public SelectionDAGISel {
46
47 /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
48 /// make the right decision when generating code for different targets.
49 const AArch64Subtarget *Subtarget;
50
51public:
52 AArch64DAGToDAGISel() = delete;
53
54 explicit AArch64DAGToDAGISel(AArch64TargetMachine &tm,
55 CodeGenOptLevel OptLevel)
56 : SelectionDAGISel(tm, OptLevel), Subtarget(nullptr) {}
57
58 bool runOnMachineFunction(MachineFunction &MF) override {
59 Subtarget = &MF.getSubtarget<AArch64Subtarget>();
60 return SelectionDAGISel::runOnMachineFunction(mf&: MF);
61 }
62
63 void Select(SDNode *Node) override;
64 void PreprocessISelDAG() override;
65
66 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
67 /// inline asm expressions.
68 bool SelectInlineAsmMemoryOperand(const SDValue &Op,
69 InlineAsm::ConstraintCode ConstraintID,
70 std::vector<SDValue> &OutOps) override;
71
72 template <signed Low, signed High, signed Scale>
73 bool SelectRDVLImm(SDValue N, SDValue &Imm);
74
75 template <signed Low, signed High>
76 bool SelectRDSVLShiftImm(SDValue N, SDValue &Imm);
77
78 bool SelectArithExtendedRegister(SDValue N, SDValue &Reg, SDValue &Shift);
79 bool SelectArithUXTXRegister(SDValue N, SDValue &Reg, SDValue &Shift);
80 bool SelectArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
81 bool SelectNegArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
82 bool SelectArithShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
83 return SelectShiftedRegister(N, AllowROR: false, Reg, Shift);
84 }
85 bool SelectLogicalShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
86 return SelectShiftedRegister(N, AllowROR: true, Reg, Shift);
87 }
88 bool SelectAddrModeIndexed7S8(SDValue N, SDValue &Base, SDValue &OffImm) {
89 return SelectAddrModeIndexed7S(N, Size: 1, Base, OffImm);
90 }
91 bool SelectAddrModeIndexed7S16(SDValue N, SDValue &Base, SDValue &OffImm) {
92 return SelectAddrModeIndexed7S(N, Size: 2, Base, OffImm);
93 }
94 bool SelectAddrModeIndexed7S32(SDValue N, SDValue &Base, SDValue &OffImm) {
95 return SelectAddrModeIndexed7S(N, Size: 4, Base, OffImm);
96 }
97 bool SelectAddrModeIndexed7S64(SDValue N, SDValue &Base, SDValue &OffImm) {
98 return SelectAddrModeIndexed7S(N, Size: 8, Base, OffImm);
99 }
100 bool SelectAddrModeIndexed7S128(SDValue N, SDValue &Base, SDValue &OffImm) {
101 return SelectAddrModeIndexed7S(N, Size: 16, Base, OffImm);
102 }
103 bool SelectAddrModeIndexedS9S128(SDValue N, SDValue &Base, SDValue &OffImm) {
104 return SelectAddrModeIndexedBitWidth(N, IsSignedImm: true, BW: 9, Size: 16, Base, OffImm);
105 }
106 bool SelectAddrModeIndexedU6S128(SDValue N, SDValue &Base, SDValue &OffImm) {
107 return SelectAddrModeIndexedBitWidth(N, IsSignedImm: false, BW: 6, Size: 16, Base, OffImm);
108 }
109 bool SelectAddrModeIndexed8(SDValue N, SDValue &Base, SDValue &OffImm) {
110 return SelectAddrModeIndexed(N, Size: 1, Base, OffImm);
111 }
112 bool SelectAddrModeIndexed16(SDValue N, SDValue &Base, SDValue &OffImm) {
113 return SelectAddrModeIndexed(N, Size: 2, Base, OffImm);
114 }
115 bool SelectAddrModeIndexed32(SDValue N, SDValue &Base, SDValue &OffImm) {
116 return SelectAddrModeIndexed(N, Size: 4, Base, OffImm);
117 }
118 bool SelectAddrModeIndexed64(SDValue N, SDValue &Base, SDValue &OffImm) {
119 return SelectAddrModeIndexed(N, Size: 8, Base, OffImm);
120 }
121 bool SelectAddrModeIndexed128(SDValue N, SDValue &Base, SDValue &OffImm) {
122 return SelectAddrModeIndexed(N, Size: 16, Base, OffImm);
123 }
124 bool SelectAddrModeUnscaled8(SDValue N, SDValue &Base, SDValue &OffImm) {
125 return SelectAddrModeUnscaled(N, Size: 1, Base, OffImm);
126 }
127 bool SelectAddrModeUnscaled16(SDValue N, SDValue &Base, SDValue &OffImm) {
128 return SelectAddrModeUnscaled(N, Size: 2, Base, OffImm);
129 }
130 bool SelectAddrModeUnscaled32(SDValue N, SDValue &Base, SDValue &OffImm) {
131 return SelectAddrModeUnscaled(N, Size: 4, Base, OffImm);
132 }
133 bool SelectAddrModeUnscaled64(SDValue N, SDValue &Base, SDValue &OffImm) {
134 return SelectAddrModeUnscaled(N, Size: 8, Base, OffImm);
135 }
136 bool SelectAddrModeUnscaled128(SDValue N, SDValue &Base, SDValue &OffImm) {
137 return SelectAddrModeUnscaled(N, Size: 16, Base, OffImm);
138 }
139 template <unsigned Size, unsigned Max>
140 bool SelectAddrModeIndexedUImm(SDValue N, SDValue &Base, SDValue &OffImm) {
141 // Test if there is an appropriate addressing mode and check if the
142 // immediate fits.
143 bool Found = SelectAddrModeIndexed(N, Size, Base, OffImm);
144 if (Found) {
145 if (auto *CI = dyn_cast<ConstantSDNode>(Val&: OffImm)) {
146 int64_t C = CI->getSExtValue();
147 if (C <= Max)
148 return true;
149 }
150 }
151
152 // Otherwise, base only, materialize address in register.
153 Base = N;
154 OffImm = CurDAG->getTargetConstant(Val: 0, DL: SDLoc(N), VT: MVT::i64);
155 return true;
156 }
157
158 template<int Width>
159 bool SelectAddrModeWRO(SDValue N, SDValue &Base, SDValue &Offset,
160 SDValue &SignExtend, SDValue &DoShift) {
161 return SelectAddrModeWRO(N, Size: Width / 8, Base, Offset, SignExtend, DoShift);
162 }
163
164 template<int Width>
165 bool SelectAddrModeXRO(SDValue N, SDValue &Base, SDValue &Offset,
166 SDValue &SignExtend, SDValue &DoShift) {
167 return SelectAddrModeXRO(N, Size: Width / 8, Base, Offset, SignExtend, DoShift);
168 }
169
170 bool SelectExtractHigh(SDValue N, SDValue &Res) {
171 if (Subtarget->isLittleEndian() && N->getOpcode() == ISD::BITCAST)
172 N = N->getOperand(Num: 0);
173 if (N->getOpcode() != ISD::EXTRACT_SUBVECTOR ||
174 !isa<ConstantSDNode>(Val: N->getOperand(Num: 1)))
175 return false;
176 EVT VT = N->getValueType(ResNo: 0);
177 EVT LVT = N->getOperand(Num: 0).getValueType();
178 unsigned Index = N->getConstantOperandVal(Num: 1);
179 if (!VT.is64BitVector() || !LVT.is128BitVector() ||
180 Index != VT.getVectorNumElements())
181 return false;
182 Res = N->getOperand(Num: 0);
183 return true;
184 }
185
186 bool SelectRoundingVLShr(SDValue N, SDValue &Res1, SDValue &Res2) {
187 if (N.getOpcode() != AArch64ISD::VLSHR)
188 return false;
189 SDValue Op = N->getOperand(Num: 0);
190 EVT VT = Op.getValueType();
191 unsigned ShtAmt = N->getConstantOperandVal(Num: 1);
192 if (ShtAmt > VT.getScalarSizeInBits() / 2 || Op.getOpcode() != ISD::ADD)
193 return false;
194
195 APInt Imm;
196 if (Op.getOperand(i: 1).getOpcode() == AArch64ISD::MOVIshift)
197 Imm = APInt(VT.getScalarSizeInBits(),
198 Op.getOperand(i: 1).getConstantOperandVal(i: 0)
199 << Op.getOperand(i: 1).getConstantOperandVal(i: 1));
200 else if (Op.getOperand(i: 1).getOpcode() == AArch64ISD::DUP &&
201 isa<ConstantSDNode>(Val: Op.getOperand(i: 1).getOperand(i: 0)))
202 Imm = APInt(VT.getScalarSizeInBits(),
203 Op.getOperand(i: 1).getConstantOperandVal(i: 0));
204 else
205 return false;
206
207 if (Imm != 1ULL << (ShtAmt - 1))
208 return false;
209
210 Res1 = Op.getOperand(i: 0);
211 Res2 = CurDAG->getTargetConstant(Val: ShtAmt, DL: SDLoc(N), VT: MVT::i32);
212 return true;
213 }
214
215 bool SelectDupZeroOrUndef(SDValue N) {
216 switch(N->getOpcode()) {
217 case ISD::UNDEF:
218 return true;
219 case AArch64ISD::DUP:
220 case ISD::SPLAT_VECTOR: {
221 auto Opnd0 = N->getOperand(Num: 0);
222 if (isNullConstant(V: Opnd0))
223 return true;
224 if (isNullFPConstant(V: Opnd0))
225 return true;
226 break;
227 }
228 default:
229 break;
230 }
231
232 return false;
233 }
234
235 bool SelectAny(SDValue) { return true; }
236
237 bool SelectDupZero(SDValue N) {
238 switch(N->getOpcode()) {
239 case AArch64ISD::DUP:
240 case ISD::SPLAT_VECTOR: {
241 auto Opnd0 = N->getOperand(Num: 0);
242 if (isNullConstant(V: Opnd0))
243 return true;
244 if (isNullFPConstant(V: Opnd0))
245 return true;
246 break;
247 }
248 }
249
250 return false;
251 }
252
253 template <MVT::SimpleValueType VT, bool Negate>
254 bool SelectSVEAddSubImm(SDValue N, SDValue &Imm, SDValue &Shift) {
255 return SelectSVEAddSubImm(N, VT, Imm, Shift, Negate);
256 }
257
258 template <MVT::SimpleValueType VT, bool Negate>
259 bool SelectSVEAddSubSSatImm(SDValue N, SDValue &Imm, SDValue &Shift) {
260 return SelectSVEAddSubSSatImm(N, VT, Imm, Shift, Negate);
261 }
262
263 template <MVT::SimpleValueType VT>
264 bool SelectSVECpyDupImm(SDValue N, SDValue &Imm, SDValue &Shift) {
265 return SelectSVECpyDupImm(N, VT, Imm, Shift);
266 }
267
268 template <MVT::SimpleValueType VT, bool Invert = false>
269 bool SelectSVELogicalImm(SDValue N, SDValue &Imm) {
270 return SelectSVELogicalImm(N, VT, Imm, Invert);
271 }
272
273 template <MVT::SimpleValueType VT>
274 bool SelectSVEArithImm(SDValue N, SDValue &Imm) {
275 return SelectSVEArithImm(N, VT, Imm);
276 }
277
278 template <unsigned Low, unsigned High, bool AllowSaturation = false>
279 bool SelectSVEShiftImm(SDValue N, SDValue &Imm) {
280 return SelectSVEShiftImm(N, Low, High, AllowSaturation, Imm);
281 }
282
283 bool SelectSVEShiftSplatImmR(SDValue N, SDValue &Imm) {
284 if (N->getOpcode() != ISD::SPLAT_VECTOR)
285 return false;
286
287 EVT EltVT = N->getValueType(ResNo: 0).getVectorElementType();
288 return SelectSVEShiftImm(N: N->getOperand(Num: 0), /* Low */ 1,
289 /* High */ EltVT.getFixedSizeInBits(),
290 /* AllowSaturation */ true, Imm);
291 }
292
293 // Returns a suitable CNT/INC/DEC/RDVL multiplier to calculate VSCALE*N.
294 template<signed Min, signed Max, signed Scale, bool Shift>
295 bool SelectCntImm(SDValue N, SDValue &Imm) {
296 if (!isa<ConstantSDNode>(Val: N))
297 return false;
298
299 int64_t MulImm = cast<ConstantSDNode>(Val&: N)->getSExtValue();
300 if (Shift)
301 MulImm = 1LL << MulImm;
302
303 if ((MulImm % std::abs(x: Scale)) != 0)
304 return false;
305
306 MulImm /= Scale;
307 if ((MulImm >= Min) && (MulImm <= Max)) {
308 Imm = CurDAG->getTargetConstant(Val: MulImm, DL: SDLoc(N), VT: MVT::i32);
309 return true;
310 }
311
312 return false;
313 }
314
315 template <signed Max, signed Scale>
316 bool SelectEXTImm(SDValue N, SDValue &Imm) {
317 if (!isa<ConstantSDNode>(Val: N))
318 return false;
319
320 int64_t MulImm = cast<ConstantSDNode>(Val&: N)->getSExtValue();
321
322 if (MulImm >= 0 && MulImm <= Max) {
323 MulImm *= Scale;
324 Imm = CurDAG->getTargetConstant(Val: MulImm, DL: SDLoc(N), VT: MVT::i32);
325 return true;
326 }
327
328 return false;
329 }
330
331 template <unsigned BaseReg, unsigned Max>
332 bool ImmToReg(SDValue N, SDValue &Imm) {
333 if (auto *CI = dyn_cast<ConstantSDNode>(Val&: N)) {
334 uint64_t C = CI->getZExtValue();
335
336 if (C > Max)
337 return false;
338
339 Imm = CurDAG->getRegister(Reg: BaseReg + C, VT: MVT::Other);
340 return true;
341 }
342 return false;
343 }
344
345 /// Form sequences of consecutive 64/128-bit registers for use in NEON
346 /// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have
347 /// between 1 and 4 elements. If it contains a single element that is returned
348 /// unchanged; otherwise a REG_SEQUENCE value is returned.
349 SDValue createDTuple(ArrayRef<SDValue> Vecs);
350 SDValue createQTuple(ArrayRef<SDValue> Vecs);
351 // Form a sequence of SVE registers for instructions using list of vectors,
352 // e.g. structured loads and stores (ldN, stN).
353 SDValue createZTuple(ArrayRef<SDValue> Vecs);
354
355 // Similar to above, except the register must start at a multiple of the
356 // tuple, e.g. z2 for a 2-tuple, or z8 for a 4-tuple.
357 SDValue createZMulTuple(ArrayRef<SDValue> Regs);
358
359 /// Generic helper for the createDTuple/createQTuple
360 /// functions. Those should almost always be called instead.
361 SDValue createTuple(ArrayRef<SDValue> Vecs, const unsigned RegClassIDs[],
362 const unsigned SubRegs[]);
363
364 void SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc, bool isExt);
365
366 bool tryIndexedLoad(SDNode *N);
367
368 void SelectPtrauthAuth(SDNode *N);
369 void SelectPtrauthResign(SDNode *N);
370
371 bool trySelectStackSlotTagP(SDNode *N);
372 void SelectTagP(SDNode *N);
373
374 void SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
375 unsigned SubRegIdx);
376 void SelectPostLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
377 unsigned SubRegIdx);
378 void SelectLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
379 void SelectPostLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
380 void SelectPredicatedLoad(SDNode *N, unsigned NumVecs, unsigned Scale,
381 unsigned Opc_rr, unsigned Opc_ri,
382 bool IsIntr = false);
383 void SelectContiguousMultiVectorLoad(SDNode *N, unsigned NumVecs,
384 unsigned Scale, unsigned Opc_ri,
385 unsigned Opc_rr);
386 void SelectDestructiveMultiIntrinsic(SDNode *N, unsigned NumVecs,
387 bool IsZmMulti, unsigned Opcode,
388 bool HasPred = false);
389 void SelectPExtPair(SDNode *N, unsigned Opc);
390 void SelectWhilePair(SDNode *N, unsigned Opc);
391 void SelectCVTIntrinsic(SDNode *N, unsigned NumVecs, unsigned Opcode);
392 void SelectCVTIntrinsicFP8(SDNode *N, unsigned NumVecs, unsigned Opcode);
393 void SelectClamp(SDNode *N, unsigned NumVecs, unsigned Opcode);
394 void SelectUnaryMultiIntrinsic(SDNode *N, unsigned NumOutVecs,
395 bool IsTupleInput, unsigned Opc);
396 void SelectFrintFromVT(SDNode *N, unsigned NumVecs, unsigned Opcode);
397
398 template <unsigned MaxIdx, unsigned Scale>
399 void SelectMultiVectorMove(SDNode *N, unsigned NumVecs, unsigned BaseReg,
400 unsigned Op);
401 void SelectMultiVectorMoveZ(SDNode *N, unsigned NumVecs,
402 unsigned Op, unsigned MaxIdx, unsigned Scale,
403 unsigned BaseReg = 0);
404 bool SelectAddrModeFrameIndexSVE(SDValue N, SDValue &Base, SDValue &OffImm);
405 /// SVE Reg+Imm addressing mode.
406 template <int64_t Min, int64_t Max>
407 bool SelectAddrModeIndexedSVE(SDNode *Root, SDValue N, SDValue &Base,
408 SDValue &OffImm);
409 /// SVE Reg+Reg address mode.
410 template <unsigned Scale>
411 bool SelectSVERegRegAddrMode(SDValue N, SDValue &Base, SDValue &Offset) {
412 return SelectSVERegRegAddrMode(N, Scale, Base, Offset);
413 }
414
415 void SelectMultiVectorLutiLane(SDNode *Node, unsigned NumOutVecs,
416 unsigned Opc, uint32_t MaxImm);
417
418 void SelectMultiVectorLuti(SDNode *Node, unsigned NumOutVecs, unsigned Opc);
419
420 template <unsigned MaxIdx, unsigned Scale>
421 bool SelectSMETileSlice(SDValue N, SDValue &Vector, SDValue &Offset) {
422 return SelectSMETileSlice(N, MaxSize: MaxIdx, Vector, Offset, Scale);
423 }
424
425 void SelectStore(SDNode *N, unsigned NumVecs, unsigned Opc);
426 void SelectPostStore(SDNode *N, unsigned NumVecs, unsigned Opc);
427 void SelectStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
428 void SelectPostStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
429 void SelectPredicatedStore(SDNode *N, unsigned NumVecs, unsigned Scale,
430 unsigned Opc_rr, unsigned Opc_ri);
431 std::tuple<unsigned, SDValue, SDValue>
432 findAddrModeSVELoadStore(SDNode *N, unsigned Opc_rr, unsigned Opc_ri,
433 const SDValue &OldBase, const SDValue &OldOffset,
434 unsigned Scale);
435
436 bool tryBitfieldExtractOp(SDNode *N);
437 bool tryBitfieldExtractOpFromSExt(SDNode *N);
438 bool tryBitfieldInsertOp(SDNode *N);
439 bool tryBitfieldInsertInZeroOp(SDNode *N);
440 bool tryShiftAmountMod(SDNode *N);
441
442 bool tryReadRegister(SDNode *N);
443 bool tryWriteRegister(SDNode *N);
444
445 bool trySelectCastFixedLengthToScalableVector(SDNode *N);
446 bool trySelectCastScalableToFixedLengthVector(SDNode *N);
447
448 bool trySelectXAR(SDNode *N);
449
450// Include the pieces autogenerated from the target description.
451#include "AArch64GenDAGISel.inc"
452
453private:
454 bool SelectShiftedRegister(SDValue N, bool AllowROR, SDValue &Reg,
455 SDValue &Shift);
456 bool SelectShiftedRegisterFromAnd(SDValue N, SDValue &Reg, SDValue &Shift);
457 bool SelectAddrModeIndexed7S(SDValue N, unsigned Size, SDValue &Base,
458 SDValue &OffImm) {
459 return SelectAddrModeIndexedBitWidth(N, IsSignedImm: true, BW: 7, Size, Base, OffImm);
460 }
461 bool SelectAddrModeIndexedBitWidth(SDValue N, bool IsSignedImm, unsigned BW,
462 unsigned Size, SDValue &Base,
463 SDValue &OffImm);
464 bool SelectAddrModeIndexed(SDValue N, unsigned Size, SDValue &Base,
465 SDValue &OffImm);
466 bool SelectAddrModeUnscaled(SDValue N, unsigned Size, SDValue &Base,
467 SDValue &OffImm);
468 bool SelectAddrModeWRO(SDValue N, unsigned Size, SDValue &Base,
469 SDValue &Offset, SDValue &SignExtend,
470 SDValue &DoShift);
471 bool SelectAddrModeXRO(SDValue N, unsigned Size, SDValue &Base,
472 SDValue &Offset, SDValue &SignExtend,
473 SDValue &DoShift);
474 bool isWorthFoldingALU(SDValue V, bool LSL = false) const;
475 bool isWorthFoldingAddr(SDValue V, unsigned Size) const;
476 bool SelectExtendedSHL(SDValue N, unsigned Size, bool WantExtend,
477 SDValue &Offset, SDValue &SignExtend);
478
479 template<unsigned RegWidth>
480 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos) {
481 return SelectCVTFixedPosOperand(N, FixedPos, Width: RegWidth);
482 }
483 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos, unsigned Width);
484
485 template <unsigned RegWidth>
486 bool SelectCVTFixedPointVec(SDValue N, SDValue &FixedPos) {
487 return SelectCVTFixedPointVec(N, FixedPos, Width: RegWidth);
488 }
489 bool SelectCVTFixedPointVec(SDValue N, SDValue &FixedPos, unsigned Width);
490
491 template<unsigned RegWidth>
492 bool SelectCVTFixedPosRecipOperand(SDValue N, SDValue &FixedPos) {
493 return SelectCVTFixedPosRecipOperand(N, FixedPos, Width: RegWidth);
494 }
495
496 bool SelectCVTFixedPosRecipOperand(SDValue N, SDValue &FixedPos,
497 unsigned Width);
498
499 bool SelectCMP_SWAP(SDNode *N);
500
501 bool SelectSVEAddSubImm(SDValue N, MVT VT, SDValue &Imm, SDValue &Shift,
502 bool Negate);
503 bool SelectSVEAddSubSSatImm(SDValue N, MVT VT, SDValue &Imm, SDValue &Shift,
504 bool Negate);
505 bool SelectSVECpyDupImm(SDValue N, MVT VT, SDValue &Imm, SDValue &Shift);
506 bool SelectSVELogicalImm(SDValue N, MVT VT, SDValue &Imm, bool Invert);
507
508 bool SelectSVESignedArithImm(SDValue N, SDValue &Imm);
509 bool SelectSVEShiftImm(SDValue N, uint64_t Low, uint64_t High,
510 bool AllowSaturation, SDValue &Imm);
511
512 bool SelectSVEArithImm(SDValue N, MVT VT, SDValue &Imm);
513 bool SelectSVERegRegAddrMode(SDValue N, unsigned Scale, SDValue &Base,
514 SDValue &Offset);
515 bool SelectSMETileSlice(SDValue N, unsigned MaxSize, SDValue &Vector,
516 SDValue &Offset, unsigned Scale = 1);
517
518 bool SelectAllActivePredicate(SDValue N);
519 bool SelectAnyPredicate(SDValue N);
520
521 bool SelectCmpBranchUImm6Operand(SDNode *P, SDValue N, SDValue &Imm);
522
523 template <bool MatchCBB>
524 bool SelectCmpBranchExtOperand(SDValue N, SDValue &Reg, SDValue &ExtType);
525};
526
527class AArch64DAGToDAGISelLegacy : public SelectionDAGISelLegacy {
528public:
529 static char ID;
530 explicit AArch64DAGToDAGISelLegacy(AArch64TargetMachine &tm,
531 CodeGenOptLevel OptLevel)
532 : SelectionDAGISelLegacy(
533 ID, std::make_unique<AArch64DAGToDAGISel>(args&: tm, args&: OptLevel)) {}
534};
535} // end anonymous namespace
536
537char AArch64DAGToDAGISelLegacy::ID = 0;
538
539INITIALIZE_PASS(AArch64DAGToDAGISelLegacy, DEBUG_TYPE, PASS_NAME, false, false)
540
541/// addBitcastHints - This method adds bitcast hints to the operands of a node
542/// to help instruction selector determine which operands are in Neon registers.
543static SDValue addBitcastHints(SelectionDAG &DAG, SDNode &N) {
544 SDLoc DL(&N);
545 auto getFloatVT = [&](EVT VT) {
546 EVT ScalarVT = VT.getScalarType();
547 assert((ScalarVT == MVT::i32 || ScalarVT == MVT::i64) && "Unexpected VT");
548 return VT.changeElementType(Context&: *(DAG.getContext()),
549 EltVT: ScalarVT == MVT::i32 ? MVT::f32 : MVT::f64);
550 };
551 auto bitcastToFloat = [&](SDValue Val) {
552 return DAG.getBitcast(VT: getFloatVT(Val.getValueType()), V: Val);
553 };
554 SmallVector<SDValue, 2> NewOps;
555 NewOps.reserve(N: N.getNumOperands());
556
557 for (unsigned I = 0, E = N.getNumOperands(); I < E; ++I)
558 NewOps.push_back(Elt: bitcastToFloat(N.getOperand(Num: I)));
559 EVT OrigVT = N.getValueType(ResNo: 0);
560 SDValue OpNode = DAG.getNode(Opcode: N.getOpcode(), DL, VT: getFloatVT(OrigVT), Ops: NewOps);
561 return DAG.getBitcast(VT: OrigVT, V: OpNode);
562}
563
564/// isIntImmediate - This method tests to see if the node is a constant
565/// operand. If so Imm will receive the 32-bit value.
566static bool isIntImmediate(const SDNode *N, uint64_t &Imm) {
567 if (const ConstantSDNode *C = dyn_cast<const ConstantSDNode>(Val: N)) {
568 Imm = C->getZExtValue();
569 return true;
570 }
571 return false;
572}
573
574// isIntImmediate - This method tests to see if a constant operand.
575// If so Imm will receive the value.
576static bool isIntImmediate(SDValue N, uint64_t &Imm) {
577 return isIntImmediate(N: N.getNode(), Imm);
578}
579
580// isOpcWithIntImmediate - This method tests to see if the node is a specific
581// opcode and that it has a immediate integer right operand.
582// If so Imm will receive the 32 bit value.
583static bool isOpcWithIntImmediate(const SDNode *N, unsigned Opc,
584 uint64_t &Imm) {
585 return N->getOpcode() == Opc &&
586 isIntImmediate(N: N->getOperand(Num: 1).getNode(), Imm);
587}
588
589// isIntImmediateEq - This method tests to see if N is a constant operand that
590// is equivalent to 'ImmExpected'.
591#ifndef NDEBUG
592static bool isIntImmediateEq(SDValue N, const uint64_t ImmExpected) {
593 uint64_t Imm;
594 if (!isIntImmediate(N.getNode(), Imm))
595 return false;
596 return Imm == ImmExpected;
597}
598#endif
599
600bool AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand(
601 const SDValue &Op, const InlineAsm::ConstraintCode ConstraintID,
602 std::vector<SDValue> &OutOps) {
603 switch(ConstraintID) {
604 default:
605 llvm_unreachable("Unexpected asm memory constraint");
606 case InlineAsm::ConstraintCode::m:
607 case InlineAsm::ConstraintCode::o:
608 case InlineAsm::ConstraintCode::Q:
609 // We need to make sure that this one operand does not end up in XZR, thus
610 // require the address to be in a PointerRegClass register.
611 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
612 const TargetRegisterClass *TRC = TRI->getPointerRegClass();
613 SDLoc dl(Op);
614 SDValue RC = CurDAG->getTargetConstant(Val: TRC->getID(), DL: dl, VT: MVT::i64);
615 SDValue NewOp =
616 SDValue(CurDAG->getMachineNode(Opcode: TargetOpcode::COPY_TO_REGCLASS,
617 dl, VT: Op.getValueType(),
618 Op1: Op, Op2: RC), 0);
619 OutOps.push_back(x: NewOp);
620 return false;
621 }
622 return true;
623}
624
625/// SelectArithImmed - Select an immediate value that can be represented as
626/// a 12-bit value shifted left by either 0 or 12. If so, return true with
627/// Val set to the 12-bit value and Shift set to the shifter operand.
628bool AArch64DAGToDAGISel::SelectArithImmed(SDValue N, SDValue &Val,
629 SDValue &Shift) {
630 // This function is called from the addsub_shifted_imm ComplexPattern,
631 // which lists [imm] as the list of opcode it's interested in, however
632 // we still need to check whether the operand is actually an immediate
633 // here because the ComplexPattern opcode list is only used in
634 // root-level opcode matching.
635 if (!isa<ConstantSDNode>(Val: N.getNode()))
636 return false;
637
638 uint64_t Immed = N.getNode()->getAsZExtVal();
639 unsigned ShiftAmt;
640
641 if (Immed >> 12 == 0) {
642 ShiftAmt = 0;
643 } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
644 ShiftAmt = 12;
645 Immed = Immed >> 12;
646 } else
647 return false;
648
649 unsigned ShVal = AArch64_AM::getShifterImm(ST: AArch64_AM::LSL, Imm: ShiftAmt);
650 SDLoc dl(N);
651 Val = CurDAG->getTargetConstant(Val: Immed, DL: dl, VT: MVT::i32);
652 Shift = CurDAG->getTargetConstant(Val: ShVal, DL: dl, VT: MVT::i32);
653 return true;
654}
655
656/// SelectNegArithImmed - As above, but negates the value before trying to
657/// select it.
658bool AArch64DAGToDAGISel::SelectNegArithImmed(SDValue N, SDValue &Val,
659 SDValue &Shift) {
660 // This function is called from the addsub_shifted_imm ComplexPattern,
661 // which lists [imm] as the list of opcode it's interested in, however
662 // we still need to check whether the operand is actually an immediate
663 // here because the ComplexPattern opcode list is only used in
664 // root-level opcode matching.
665 if (!isa<ConstantSDNode>(Val: N.getNode()))
666 return false;
667
668 // The immediate operand must be a 24-bit zero-extended immediate.
669 uint64_t Immed = N.getNode()->getAsZExtVal();
670
671 // This negation is almost always valid, but "cmp wN, #0" and "cmn wN, #0"
672 // have the opposite effect on the C flag, so this pattern mustn't match under
673 // those circumstances.
674 if (Immed == 0)
675 return false;
676
677 if (N.getValueType() == MVT::i32)
678 Immed = ~((uint32_t)Immed) + 1;
679 else
680 Immed = ~Immed + 1ULL;
681 if (Immed & 0xFFFFFFFFFF000000ULL)
682 return false;
683
684 Immed &= 0xFFFFFFULL;
685 return SelectArithImmed(N: CurDAG->getConstant(Val: Immed, DL: SDLoc(N), VT: MVT::i32), Val,
686 Shift);
687}
688
689/// getShiftTypeForNode - Translate a shift node to the corresponding
690/// ShiftType value.
691static AArch64_AM::ShiftExtendType getShiftTypeForNode(SDValue N) {
692 switch (N.getOpcode()) {
693 default:
694 return AArch64_AM::InvalidShiftExtend;
695 case ISD::SHL:
696 return AArch64_AM::LSL;
697 case ISD::SRL:
698 return AArch64_AM::LSR;
699 case ISD::SRA:
700 return AArch64_AM::ASR;
701 case ISD::ROTR:
702 return AArch64_AM::ROR;
703 }
704}
705
706static bool isMemOpOrPrefetch(SDNode *N) {
707 return isa<MemSDNode>(Val: *N) || N->getOpcode() == AArch64ISD::PREFETCH;
708}
709
710/// Determine whether it is worth it to fold SHL into the addressing
711/// mode.
712static bool isWorthFoldingSHL(SDValue V) {
713 assert(V.getOpcode() == ISD::SHL && "invalid opcode");
714 // It is worth folding logical shift of up to three places.
715 auto *CSD = dyn_cast<ConstantSDNode>(Val: V.getOperand(i: 1));
716 if (!CSD)
717 return false;
718 unsigned ShiftVal = CSD->getZExtValue();
719 if (ShiftVal > 3)
720 return false;
721
722 // Check if this particular node is reused in any non-memory related
723 // operation. If yes, do not try to fold this node into the address
724 // computation, since the computation will be kept.
725 const SDNode *Node = V.getNode();
726 for (SDNode *UI : Node->users())
727 if (!isMemOpOrPrefetch(N: UI))
728 for (SDNode *UII : UI->users())
729 if (!isMemOpOrPrefetch(N: UII))
730 return false;
731 return true;
732}
733
734/// Determine whether it is worth to fold V into an extended register addressing
735/// mode.
736bool AArch64DAGToDAGISel::isWorthFoldingAddr(SDValue V, unsigned Size) const {
737 // Trivial if we are optimizing for code size or if there is only
738 // one use of the value.
739 if (CurDAG->shouldOptForSize() || V.hasOneUse())
740 return true;
741
742 // If a subtarget has a slow shift, folding a shift into multiple loads
743 // costs additional micro-ops.
744 if (Subtarget->hasAddrLSLSlow14() && (Size == 2 || Size == 16))
745 return false;
746
747 // Check whether we're going to emit the address arithmetic anyway because
748 // it's used by a non-address operation.
749 if (V.getOpcode() == ISD::SHL && isWorthFoldingSHL(V))
750 return true;
751 if (V.getOpcode() == ISD::ADD) {
752 const SDValue LHS = V.getOperand(i: 0);
753 const SDValue RHS = V.getOperand(i: 1);
754 if (LHS.getOpcode() == ISD::SHL && isWorthFoldingSHL(V: LHS))
755 return true;
756 if (RHS.getOpcode() == ISD::SHL && isWorthFoldingSHL(V: RHS))
757 return true;
758 }
759
760 // It hurts otherwise, since the value will be reused.
761 return false;
762}
763
764/// and (shl/srl/sra, x, c), mask --> shl (srl/sra, x, c1), c2
765/// to select more shifted register
766bool AArch64DAGToDAGISel::SelectShiftedRegisterFromAnd(SDValue N, SDValue &Reg,
767 SDValue &Shift) {
768 EVT VT = N.getValueType();
769 if (VT != MVT::i32 && VT != MVT::i64)
770 return false;
771
772 if (N->getOpcode() != ISD::AND || !N->hasOneUse())
773 return false;
774 SDValue LHS = N.getOperand(i: 0);
775 if (!LHS->hasOneUse())
776 return false;
777
778 unsigned LHSOpcode = LHS->getOpcode();
779 if (LHSOpcode != ISD::SHL && LHSOpcode != ISD::SRL && LHSOpcode != ISD::SRA)
780 return false;
781
782 ConstantSDNode *ShiftAmtNode = dyn_cast<ConstantSDNode>(Val: LHS.getOperand(i: 1));
783 if (!ShiftAmtNode)
784 return false;
785
786 uint64_t ShiftAmtC = ShiftAmtNode->getZExtValue();
787 ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(Val: N.getOperand(i: 1));
788 if (!RHSC)
789 return false;
790
791 APInt AndMask = RHSC->getAPIntValue();
792 unsigned LowZBits, MaskLen;
793 if (!AndMask.isShiftedMask(MaskIdx&: LowZBits, MaskLen))
794 return false;
795
796 unsigned BitWidth = N.getValueSizeInBits();
797 SDLoc DL(LHS);
798 uint64_t NewShiftC;
799 unsigned NewShiftOp;
800 if (LHSOpcode == ISD::SHL) {
801 // LowZBits <= ShiftAmtC will fall into isBitfieldPositioningOp
802 // BitWidth != LowZBits + MaskLen doesn't match the pattern
803 if (LowZBits <= ShiftAmtC || (BitWidth != LowZBits + MaskLen))
804 return false;
805
806 NewShiftC = LowZBits - ShiftAmtC;
807 NewShiftOp = VT == MVT::i64 ? AArch64::UBFMXri : AArch64::UBFMWri;
808 } else {
809 if (LowZBits == 0)
810 return false;
811
812 // NewShiftC >= BitWidth will fall into isBitfieldExtractOp
813 NewShiftC = LowZBits + ShiftAmtC;
814 if (NewShiftC >= BitWidth)
815 return false;
816
817 // SRA need all high bits
818 if (LHSOpcode == ISD::SRA && (BitWidth != (LowZBits + MaskLen)))
819 return false;
820
821 // SRL high bits can be 0 or 1
822 if (LHSOpcode == ISD::SRL && (BitWidth > (NewShiftC + MaskLen)))
823 return false;
824
825 if (LHSOpcode == ISD::SRL)
826 NewShiftOp = VT == MVT::i64 ? AArch64::UBFMXri : AArch64::UBFMWri;
827 else
828 NewShiftOp = VT == MVT::i64 ? AArch64::SBFMXri : AArch64::SBFMWri;
829 }
830
831 assert(NewShiftC < BitWidth && "Invalid shift amount");
832 SDValue NewShiftAmt = CurDAG->getTargetConstant(Val: NewShiftC, DL, VT);
833 SDValue BitWidthMinus1 = CurDAG->getTargetConstant(Val: BitWidth - 1, DL, VT);
834 Reg = SDValue(CurDAG->getMachineNode(Opcode: NewShiftOp, dl: DL, VT, Op1: LHS->getOperand(Num: 0),
835 Op2: NewShiftAmt, Op3: BitWidthMinus1),
836 0);
837 unsigned ShVal = AArch64_AM::getShifterImm(ST: AArch64_AM::LSL, Imm: LowZBits);
838 Shift = CurDAG->getTargetConstant(Val: ShVal, DL, VT: MVT::i32);
839 return true;
840}
841
842/// getExtendTypeForNode - Translate an extend node to the corresponding
843/// ExtendType value.
844static AArch64_AM::ShiftExtendType
845getExtendTypeForNode(SDValue N, bool IsLoadStore = false) {
846 if (N.getOpcode() == ISD::SIGN_EXTEND ||
847 N.getOpcode() == ISD::SIGN_EXTEND_INREG) {
848 EVT SrcVT;
849 if (N.getOpcode() == ISD::SIGN_EXTEND_INREG)
850 SrcVT = cast<VTSDNode>(Val: N.getOperand(i: 1))->getVT();
851 else
852 SrcVT = N.getOperand(i: 0).getValueType();
853
854 if (!IsLoadStore && SrcVT == MVT::i8)
855 return AArch64_AM::SXTB;
856 else if (!IsLoadStore && SrcVT == MVT::i16)
857 return AArch64_AM::SXTH;
858 else if (SrcVT == MVT::i32)
859 return AArch64_AM::SXTW;
860 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
861
862 return AArch64_AM::InvalidShiftExtend;
863 } else if (N.getOpcode() == ISD::ZERO_EXTEND ||
864 N.getOpcode() == ISD::ANY_EXTEND) {
865 EVT SrcVT = N.getOperand(i: 0).getValueType();
866 if (!IsLoadStore && SrcVT == MVT::i8)
867 return AArch64_AM::UXTB;
868 else if (!IsLoadStore && SrcVT == MVT::i16)
869 return AArch64_AM::UXTH;
870 else if (SrcVT == MVT::i32)
871 return AArch64_AM::UXTW;
872 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
873
874 return AArch64_AM::InvalidShiftExtend;
875 } else if (N.getOpcode() == ISD::AND) {
876 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(Val: N.getOperand(i: 1));
877 if (!CSD)
878 return AArch64_AM::InvalidShiftExtend;
879 uint64_t AndMask = CSD->getZExtValue();
880
881 switch (AndMask) {
882 default:
883 return AArch64_AM::InvalidShiftExtend;
884 case 0xFF:
885 return !IsLoadStore ? AArch64_AM::UXTB : AArch64_AM::InvalidShiftExtend;
886 case 0xFFFF:
887 return !IsLoadStore ? AArch64_AM::UXTH : AArch64_AM::InvalidShiftExtend;
888 case 0xFFFFFFFF:
889 return AArch64_AM::UXTW;
890 }
891 }
892
893 return AArch64_AM::InvalidShiftExtend;
894}
895
896/// Determine whether it is worth to fold V into an extended register of an
897/// Add/Sub. LSL means we are folding into an `add w0, w1, w2, lsl #N`
898/// instruction, and the shift should be treated as worth folding even if has
899/// multiple uses.
900bool AArch64DAGToDAGISel::isWorthFoldingALU(SDValue V, bool LSL) const {
901 // Trivial if we are optimizing for code size or if there is only
902 // one use of the value.
903 if (CurDAG->shouldOptForSize() || V.hasOneUse())
904 return true;
905
906 // If a subtarget has a fastpath LSL we can fold a logical shift into
907 // the add/sub and save a cycle.
908 if (LSL && Subtarget->hasALULSLFast() && V.getOpcode() == ISD::SHL &&
909 V.getConstantOperandVal(i: 1) <= 4 &&
910 getExtendTypeForNode(N: V.getOperand(i: 0)) == AArch64_AM::InvalidShiftExtend)
911 return true;
912
913 // It hurts otherwise, since the value will be reused.
914 return false;
915}
916
917/// SelectShiftedRegister - Select a "shifted register" operand. If the value
918/// is not shifted, set the Shift operand to default of "LSL 0". The logical
919/// instructions allow the shifted register to be rotated, but the arithmetic
920/// instructions do not. The AllowROR parameter specifies whether ROR is
921/// supported.
922bool AArch64DAGToDAGISel::SelectShiftedRegister(SDValue N, bool AllowROR,
923 SDValue &Reg, SDValue &Shift) {
924 if (SelectShiftedRegisterFromAnd(N, Reg, Shift))
925 return true;
926
927 AArch64_AM::ShiftExtendType ShType = getShiftTypeForNode(N);
928 if (ShType == AArch64_AM::InvalidShiftExtend)
929 return false;
930 if (!AllowROR && ShType == AArch64_AM::ROR)
931 return false;
932
933 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Val: N.getOperand(i: 1))) {
934 unsigned BitSize = N.getValueSizeInBits();
935 unsigned Val = RHS->getZExtValue() & (BitSize - 1);
936 unsigned ShVal = AArch64_AM::getShifterImm(ST: ShType, Imm: Val);
937
938 Reg = N.getOperand(i: 0);
939 Shift = CurDAG->getTargetConstant(Val: ShVal, DL: SDLoc(N), VT: MVT::i32);
940 return isWorthFoldingALU(V: N, LSL: true);
941 }
942
943 return false;
944}
945
946/// Instructions that accept extend modifiers like UXTW expect the register
947/// being extended to be a GPR32, but the incoming DAG might be acting on a
948/// GPR64 (either via SEXT_INREG or AND). Extract the appropriate low bits if
949/// this is the case.
950static SDValue narrowIfNeeded(SelectionDAG *CurDAG, SDValue N) {
951 if (N.getValueType() == MVT::i32)
952 return N;
953
954 SDLoc dl(N);
955 return CurDAG->getTargetExtractSubreg(SRIdx: AArch64::sub_32, DL: dl, VT: MVT::i32, Operand: N);
956}
957
958// Returns a suitable CNT/INC/DEC/RDVL multiplier to calculate VSCALE*N.
959template<signed Low, signed High, signed Scale>
960bool AArch64DAGToDAGISel::SelectRDVLImm(SDValue N, SDValue &Imm) {
961 if (!isa<ConstantSDNode>(Val: N))
962 return false;
963
964 int64_t MulImm = cast<ConstantSDNode>(Val&: N)->getSExtValue();
965 if ((MulImm % std::abs(x: Scale)) == 0) {
966 int64_t RDVLImm = MulImm / Scale;
967 if ((RDVLImm >= Low) && (RDVLImm <= High)) {
968 Imm = CurDAG->getSignedTargetConstant(Val: RDVLImm, DL: SDLoc(N), VT: MVT::i32);
969 return true;
970 }
971 }
972
973 return false;
974}
975
976// Returns a suitable RDSVL multiplier from a left shift.
977template <signed Low, signed High>
978bool AArch64DAGToDAGISel::SelectRDSVLShiftImm(SDValue N, SDValue &Imm) {
979 if (!isa<ConstantSDNode>(Val: N))
980 return false;
981
982 int64_t MulImm = 1LL << cast<ConstantSDNode>(Val&: N)->getSExtValue();
983 if (MulImm >= Low && MulImm <= High) {
984 Imm = CurDAG->getSignedTargetConstant(Val: MulImm, DL: SDLoc(N), VT: MVT::i32);
985 return true;
986 }
987
988 return false;
989}
990
991/// SelectArithExtendedRegister - Select a "extended register" operand. This
992/// operand folds in an extend followed by an optional left shift.
993bool AArch64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
994 SDValue &Shift) {
995 unsigned ShiftVal = 0;
996 AArch64_AM::ShiftExtendType Ext;
997
998 if (N.getOpcode() == ISD::SHL) {
999 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(Val: N.getOperand(i: 1));
1000 if (!CSD)
1001 return false;
1002 ShiftVal = CSD->getZExtValue();
1003 if (ShiftVal > 4)
1004 return false;
1005
1006 Ext = getExtendTypeForNode(N: N.getOperand(i: 0));
1007 if (Ext == AArch64_AM::InvalidShiftExtend)
1008 return false;
1009
1010 Reg = N.getOperand(i: 0).getOperand(i: 0);
1011 } else {
1012 Ext = getExtendTypeForNode(N);
1013 if (Ext == AArch64_AM::InvalidShiftExtend)
1014 return false;
1015
1016 Reg = N.getOperand(i: 0);
1017
1018 // Don't match if free 32-bit -> 64-bit zext can be used instead. Use the
1019 // isDef32 as a heuristic for when the operand is likely to be a 32bit def.
1020 auto isDef32 = [](SDValue N) {
1021 unsigned Opc = N.getOpcode();
1022 return Opc != ISD::TRUNCATE && Opc != TargetOpcode::EXTRACT_SUBREG &&
1023 Opc != ISD::CopyFromReg && Opc != ISD::AssertSext &&
1024 Opc != ISD::AssertZext && Opc != ISD::AssertAlign &&
1025 Opc != ISD::FREEZE;
1026 };
1027 if (Ext == AArch64_AM::UXTW && Reg->getValueType(ResNo: 0).getSizeInBits() == 32 &&
1028 isDef32(Reg))
1029 return false;
1030 }
1031
1032 // AArch64 mandates that the RHS of the operation must use the smallest
1033 // register class that could contain the size being extended from. Thus,
1034 // if we're folding a (sext i8), we need the RHS to be a GPR32, even though
1035 // there might not be an actual 32-bit value in the program. We can
1036 // (harmlessly) synthesize one by injected an EXTRACT_SUBREG here.
1037 assert(Ext != AArch64_AM::UXTX && Ext != AArch64_AM::SXTX);
1038 Reg = narrowIfNeeded(CurDAG, N: Reg);
1039 Shift = CurDAG->getTargetConstant(Val: getArithExtendImm(ET: Ext, Imm: ShiftVal), DL: SDLoc(N),
1040 VT: MVT::i32);
1041 return isWorthFoldingALU(V: N);
1042}
1043
1044/// SelectArithUXTXRegister - Select a "UXTX register" operand. This
1045/// operand is referred by the instructions have SP operand
1046bool AArch64DAGToDAGISel::SelectArithUXTXRegister(SDValue N, SDValue &Reg,
1047 SDValue &Shift) {
1048 unsigned ShiftVal = 0;
1049 AArch64_AM::ShiftExtendType Ext;
1050
1051 if (N.getOpcode() != ISD::SHL)
1052 return false;
1053
1054 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(Val: N.getOperand(i: 1));
1055 if (!CSD)
1056 return false;
1057 ShiftVal = CSD->getZExtValue();
1058 if (ShiftVal > 4)
1059 return false;
1060
1061 Ext = AArch64_AM::UXTX;
1062 Reg = N.getOperand(i: 0);
1063 Shift = CurDAG->getTargetConstant(Val: getArithExtendImm(ET: Ext, Imm: ShiftVal), DL: SDLoc(N),
1064 VT: MVT::i32);
1065 return isWorthFoldingALU(V: N);
1066}
1067
1068/// If there's a use of this ADDlow that's not itself a load/store then we'll
1069/// need to create a real ADD instruction from it anyway and there's no point in
1070/// folding it into the mem op. Theoretically, it shouldn't matter, but there's
1071/// a single pseudo-instruction for an ADRP/ADD pair so over-aggressive folding
1072/// leads to duplicated ADRP instructions.
1073static bool isWorthFoldingADDlow(SDValue N) {
1074 for (auto *User : N->users()) {
1075 if (User->getOpcode() != ISD::LOAD && User->getOpcode() != ISD::STORE &&
1076 User->getOpcode() != ISD::ATOMIC_LOAD &&
1077 User->getOpcode() != ISD::ATOMIC_STORE)
1078 return false;
1079
1080 // ldar and stlr have much more restrictive addressing modes (just a
1081 // register).
1082 if (isStrongerThanMonotonic(AO: cast<MemSDNode>(Val: User)->getSuccessOrdering()))
1083 return false;
1084 }
1085
1086 return true;
1087}
1088
1089/// Check if the immediate offset is valid as a scaled immediate.
1090static bool isValidAsScaledImmediate(int64_t Offset, unsigned Range,
1091 unsigned Size) {
1092 if ((Offset & (Size - 1)) == 0 && Offset >= 0 &&
1093 Offset < (Range << Log2_32(Value: Size)))
1094 return true;
1095 return false;
1096}
1097
1098/// SelectAddrModeIndexedBitWidth - Select a "register plus scaled (un)signed BW-bit
1099/// immediate" address. The "Size" argument is the size in bytes of the memory
1100/// reference, which determines the scale.
1101bool AArch64DAGToDAGISel::SelectAddrModeIndexedBitWidth(SDValue N, bool IsSignedImm,
1102 unsigned BW, unsigned Size,
1103 SDValue &Base,
1104 SDValue &OffImm) {
1105 SDLoc dl(N);
1106 const DataLayout &DL = CurDAG->getDataLayout();
1107 const TargetLowering *TLI = getTargetLowering();
1108 if (N.getOpcode() == ISD::FrameIndex) {
1109 int FI = cast<FrameIndexSDNode>(Val&: N)->getIndex();
1110 Base = CurDAG->getTargetFrameIndex(FI, VT: TLI->getPointerTy(DL));
1111 OffImm = CurDAG->getTargetConstant(Val: 0, DL: dl, VT: MVT::i64);
1112 return true;
1113 }
1114
1115 // As opposed to the (12-bit) Indexed addressing mode below, the 7/9-bit signed
1116 // selected here doesn't support labels/immediates, only base+offset.
1117 if (CurDAG->isBaseWithConstantOffset(Op: N)) {
1118 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Val: N.getOperand(i: 1))) {
1119 if (IsSignedImm) {
1120 int64_t RHSC = RHS->getSExtValue();
1121 unsigned Scale = Log2_32(Value: Size);
1122 int64_t Range = 0x1LL << (BW - 1);
1123
1124 if ((RHSC & (Size - 1)) == 0 && RHSC >= -(Range << Scale) &&
1125 RHSC < (Range << Scale)) {
1126 Base = N.getOperand(i: 0);
1127 if (Base.getOpcode() == ISD::FrameIndex) {
1128 int FI = cast<FrameIndexSDNode>(Val&: Base)->getIndex();
1129 Base = CurDAG->getTargetFrameIndex(FI, VT: TLI->getPointerTy(DL));
1130 }
1131 OffImm = CurDAG->getTargetConstant(Val: RHSC >> Scale, DL: dl, VT: MVT::i64);
1132 return true;
1133 }
1134 } else {
1135 // unsigned Immediate
1136 uint64_t RHSC = RHS->getZExtValue();
1137 unsigned Scale = Log2_32(Value: Size);
1138 uint64_t Range = 0x1ULL << BW;
1139
1140 if ((RHSC & (Size - 1)) == 0 && RHSC < (Range << Scale)) {
1141 Base = N.getOperand(i: 0);
1142 if (Base.getOpcode() == ISD::FrameIndex) {
1143 int FI = cast<FrameIndexSDNode>(Val&: Base)->getIndex();
1144 Base = CurDAG->getTargetFrameIndex(FI, VT: TLI->getPointerTy(DL));
1145 }
1146 OffImm = CurDAG->getTargetConstant(Val: RHSC >> Scale, DL: dl, VT: MVT::i64);
1147 return true;
1148 }
1149 }
1150 }
1151 }
1152 // Base only. The address will be materialized into a register before
1153 // the memory is accessed.
1154 // add x0, Xbase, #offset
1155 // stp x1, x2, [x0]
1156 Base = N;
1157 OffImm = CurDAG->getTargetConstant(Val: 0, DL: dl, VT: MVT::i64);
1158 return true;
1159}
1160
1161/// SelectAddrModeIndexed - Select a "register plus scaled unsigned 12-bit
1162/// immediate" address. The "Size" argument is the size in bytes of the memory
1163/// reference, which determines the scale.
1164bool AArch64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
1165 SDValue &Base, SDValue &OffImm) {
1166 SDLoc dl(N);
1167 const DataLayout &DL = CurDAG->getDataLayout();
1168 const TargetLowering *TLI = getTargetLowering();
1169 if (N.getOpcode() == ISD::FrameIndex) {
1170 int FI = cast<FrameIndexSDNode>(Val&: N)->getIndex();
1171 Base = CurDAG->getTargetFrameIndex(FI, VT: TLI->getPointerTy(DL));
1172 OffImm = CurDAG->getTargetConstant(Val: 0, DL: dl, VT: MVT::i64);
1173 return true;
1174 }
1175
1176 if (N.getOpcode() == AArch64ISD::ADDlow && isWorthFoldingADDlow(N)) {
1177 GlobalAddressSDNode *GAN =
1178 dyn_cast<GlobalAddressSDNode>(Val: N.getOperand(i: 1).getNode());
1179 Base = N.getOperand(i: 0);
1180 OffImm = N.getOperand(i: 1);
1181 if (!GAN)
1182 return true;
1183
1184 if (GAN->getOffset() % Size == 0 &&
1185 GAN->getGlobal()->getPointerAlignment(DL) >= Size)
1186 return true;
1187 }
1188
1189 if (CurDAG->isBaseWithConstantOffset(Op: N)) {
1190 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Val: N.getOperand(i: 1))) {
1191 int64_t RHSC = (int64_t)RHS->getZExtValue();
1192 unsigned Scale = Log2_32(Value: Size);
1193 if (isValidAsScaledImmediate(Offset: RHSC, Range: 0x1000, Size)) {
1194 Base = N.getOperand(i: 0);
1195 if (Base.getOpcode() == ISD::FrameIndex) {
1196 int FI = cast<FrameIndexSDNode>(Val&: Base)->getIndex();
1197 Base = CurDAG->getTargetFrameIndex(FI, VT: TLI->getPointerTy(DL));
1198 }
1199 OffImm = CurDAG->getTargetConstant(Val: RHSC >> Scale, DL: dl, VT: MVT::i64);
1200 return true;
1201 }
1202 }
1203 }
1204
1205 // Before falling back to our general case, check if the unscaled
1206 // instructions can handle this. If so, that's preferable.
1207 if (SelectAddrModeUnscaled(N, Size, Base, OffImm))
1208 return false;
1209
1210 // Base only. The address will be materialized into a register before
1211 // the memory is accessed.
1212 // add x0, Xbase, #offset
1213 // ldr x0, [x0]
1214 Base = N;
1215 OffImm = CurDAG->getTargetConstant(Val: 0, DL: dl, VT: MVT::i64);
1216 return true;
1217}
1218
1219/// SelectAddrModeUnscaled - Select a "register plus unscaled signed 9-bit
1220/// immediate" address. This should only match when there is an offset that
1221/// is not valid for a scaled immediate addressing mode. The "Size" argument
1222/// is the size in bytes of the memory reference, which is needed here to know
1223/// what is valid for a scaled immediate.
1224bool AArch64DAGToDAGISel::SelectAddrModeUnscaled(SDValue N, unsigned Size,
1225 SDValue &Base,
1226 SDValue &OffImm) {
1227 if (!CurDAG->isBaseWithConstantOffset(Op: N))
1228 return false;
1229 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Val: N.getOperand(i: 1))) {
1230 int64_t RHSC = RHS->getSExtValue();
1231 if (RHSC >= -256 && RHSC < 256) {
1232 Base = N.getOperand(i: 0);
1233 if (Base.getOpcode() == ISD::FrameIndex) {
1234 int FI = cast<FrameIndexSDNode>(Val&: Base)->getIndex();
1235 const TargetLowering *TLI = getTargetLowering();
1236 Base = CurDAG->getTargetFrameIndex(
1237 FI, VT: TLI->getPointerTy(DL: CurDAG->getDataLayout()));
1238 }
1239 OffImm = CurDAG->getTargetConstant(Val: RHSC, DL: SDLoc(N), VT: MVT::i64);
1240 return true;
1241 }
1242 }
1243 return false;
1244}
1245
1246static SDValue Widen(SelectionDAG *CurDAG, SDValue N) {
1247 SDLoc dl(N);
1248 SDValue ImpDef = SDValue(
1249 CurDAG->getMachineNode(Opcode: TargetOpcode::IMPLICIT_DEF, dl, VT: MVT::i64), 0);
1250 return CurDAG->getTargetInsertSubreg(SRIdx: AArch64::sub_32, DL: dl, VT: MVT::i64, Operand: ImpDef,
1251 Subreg: N);
1252}
1253
1254/// Check if the given SHL node (\p N), can be used to form an
1255/// extended register for an addressing mode.
1256bool AArch64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size,
1257 bool WantExtend, SDValue &Offset,
1258 SDValue &SignExtend) {
1259 assert(N.getOpcode() == ISD::SHL && "Invalid opcode.");
1260 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(Val: N.getOperand(i: 1));
1261 if (!CSD || (CSD->getZExtValue() & 0x7) != CSD->getZExtValue())
1262 return false;
1263
1264 SDLoc dl(N);
1265 if (WantExtend) {
1266 AArch64_AM::ShiftExtendType Ext =
1267 getExtendTypeForNode(N: N.getOperand(i: 0), IsLoadStore: true);
1268 if (Ext == AArch64_AM::InvalidShiftExtend)
1269 return false;
1270
1271 Offset = narrowIfNeeded(CurDAG, N: N.getOperand(i: 0).getOperand(i: 0));
1272 SignExtend = CurDAG->getTargetConstant(Val: Ext == AArch64_AM::SXTW, DL: dl,
1273 VT: MVT::i32);
1274 } else {
1275 Offset = N.getOperand(i: 0);
1276 SignExtend = CurDAG->getTargetConstant(Val: 0, DL: dl, VT: MVT::i32);
1277 }
1278
1279 unsigned LegalShiftVal = Log2_32(Value: Size);
1280 unsigned ShiftVal = CSD->getZExtValue();
1281
1282 if (ShiftVal != 0 && ShiftVal != LegalShiftVal)
1283 return false;
1284
1285 return isWorthFoldingAddr(V: N, Size);
1286}
1287
1288bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
1289 SDValue &Base, SDValue &Offset,
1290 SDValue &SignExtend,
1291 SDValue &DoShift) {
1292 if (N.getOpcode() != ISD::ADD)
1293 return false;
1294 SDValue LHS = N.getOperand(i: 0);
1295 SDValue RHS = N.getOperand(i: 1);
1296 SDLoc dl(N);
1297
1298 // We don't want to match immediate adds here, because they are better lowered
1299 // to the register-immediate addressing modes.
1300 if (isa<ConstantSDNode>(Val: LHS) || isa<ConstantSDNode>(Val: RHS))
1301 return false;
1302
1303 // Check if this particular node is reused in any non-memory related
1304 // operation. If yes, do not try to fold this node into the address
1305 // computation, since the computation will be kept.
1306 const SDNode *Node = N.getNode();
1307 for (SDNode *UI : Node->users()) {
1308 if (!isMemOpOrPrefetch(N: UI))
1309 return false;
1310 }
1311
1312 // Remember if it is worth folding N when it produces extended register.
1313 bool IsExtendedRegisterWorthFolding = isWorthFoldingAddr(V: N, Size);
1314
1315 // Try to match a shifted extend on the RHS.
1316 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
1317 SelectExtendedSHL(N: RHS, Size, WantExtend: true, Offset, SignExtend)) {
1318 Base = LHS;
1319 DoShift = CurDAG->getTargetConstant(Val: true, DL: dl, VT: MVT::i32);
1320 return true;
1321 }
1322
1323 // Try to match a shifted extend on the LHS.
1324 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
1325 SelectExtendedSHL(N: LHS, Size, WantExtend: true, Offset, SignExtend)) {
1326 Base = RHS;
1327 DoShift = CurDAG->getTargetConstant(Val: true, DL: dl, VT: MVT::i32);
1328 return true;
1329 }
1330
1331 // There was no shift, whatever else we find.
1332 DoShift = CurDAG->getTargetConstant(Val: false, DL: dl, VT: MVT::i32);
1333
1334 AArch64_AM::ShiftExtendType Ext = AArch64_AM::InvalidShiftExtend;
1335 // Try to match an unshifted extend on the LHS.
1336 if (IsExtendedRegisterWorthFolding &&
1337 (Ext = getExtendTypeForNode(N: LHS, IsLoadStore: true)) !=
1338 AArch64_AM::InvalidShiftExtend) {
1339 Base = RHS;
1340 Offset = narrowIfNeeded(CurDAG, N: LHS.getOperand(i: 0));
1341 SignExtend = CurDAG->getTargetConstant(Val: Ext == AArch64_AM::SXTW, DL: dl,
1342 VT: MVT::i32);
1343 if (isWorthFoldingAddr(V: LHS, Size))
1344 return true;
1345 }
1346
1347 // Try to match an unshifted extend on the RHS.
1348 if (IsExtendedRegisterWorthFolding &&
1349 (Ext = getExtendTypeForNode(N: RHS, IsLoadStore: true)) !=
1350 AArch64_AM::InvalidShiftExtend) {
1351 Base = LHS;
1352 Offset = narrowIfNeeded(CurDAG, N: RHS.getOperand(i: 0));
1353 SignExtend = CurDAG->getTargetConstant(Val: Ext == AArch64_AM::SXTW, DL: dl,
1354 VT: MVT::i32);
1355 if (isWorthFoldingAddr(V: RHS, Size))
1356 return true;
1357 }
1358
1359 return false;
1360}
1361
1362// Check if the given immediate is preferred by ADD. If an immediate can be
1363// encoded in an ADD, or it can be encoded in an "ADD LSL #12" and can not be
1364// encoded by one MOVZ, return true.
1365static bool isPreferredADD(int64_t ImmOff) {
1366 // Constant in [0x0, 0xfff] can be encoded in ADD.
1367 if ((ImmOff & 0xfffffffffffff000LL) == 0x0LL)
1368 return true;
1369 // Check if it can be encoded in an "ADD LSL #12".
1370 if ((ImmOff & 0xffffffffff000fffLL) == 0x0LL)
1371 // As a single MOVZ is faster than a "ADD of LSL #12", ignore such constant.
1372 return (ImmOff & 0xffffffffff00ffffLL) != 0x0LL &&
1373 (ImmOff & 0xffffffffffff0fffLL) != 0x0LL;
1374 return false;
1375}
1376
1377bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
1378 SDValue &Base, SDValue &Offset,
1379 SDValue &SignExtend,
1380 SDValue &DoShift) {
1381 if (N.getOpcode() != ISD::ADD)
1382 return false;
1383 SDValue LHS = N.getOperand(i: 0);
1384 SDValue RHS = N.getOperand(i: 1);
1385 SDLoc DL(N);
1386
1387 // Check if this particular node is reused in any non-memory related
1388 // operation. If yes, do not try to fold this node into the address
1389 // computation, since the computation will be kept.
1390 const SDNode *Node = N.getNode();
1391 for (SDNode *UI : Node->users()) {
1392 if (!isMemOpOrPrefetch(N: UI))
1393 return false;
1394 }
1395
1396 // Watch out if RHS is a wide immediate, it can not be selected into
1397 // [BaseReg+Imm] addressing mode. Also it may not be able to be encoded into
1398 // ADD/SUB. Instead it will use [BaseReg + 0] address mode and generate
1399 // instructions like:
1400 // MOV X0, WideImmediate
1401 // ADD X1, BaseReg, X0
1402 // LDR X2, [X1, 0]
1403 // For such situation, using [BaseReg, XReg] addressing mode can save one
1404 // ADD/SUB:
1405 // MOV X0, WideImmediate
1406 // LDR X2, [BaseReg, X0]
1407 if (isa<ConstantSDNode>(Val: RHS)) {
1408 int64_t ImmOff = (int64_t)RHS->getAsZExtVal();
1409 // Skip the immediate can be selected by load/store addressing mode.
1410 // Also skip the immediate can be encoded by a single ADD (SUB is also
1411 // checked by using -ImmOff).
1412 if (isValidAsScaledImmediate(Offset: ImmOff, Range: 0x1000, Size) ||
1413 isPreferredADD(ImmOff) || isPreferredADD(ImmOff: -ImmOff))
1414 return false;
1415
1416 SDValue Ops[] = { RHS };
1417 SDNode *MOVI =
1418 CurDAG->getMachineNode(Opcode: AArch64::MOVi64imm, dl: DL, VT: MVT::i64, Ops);
1419 SDValue MOVIV = SDValue(MOVI, 0);
1420 // This ADD of two X register will be selected into [Reg+Reg] mode.
1421 N = CurDAG->getNode(Opcode: ISD::ADD, DL, VT: MVT::i64, N1: LHS, N2: MOVIV);
1422 }
1423
1424 // Remember if it is worth folding N when it produces extended register.
1425 bool IsExtendedRegisterWorthFolding = isWorthFoldingAddr(V: N, Size);
1426
1427 // Try to match a shifted extend on the RHS.
1428 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
1429 SelectExtendedSHL(N: RHS, Size, WantExtend: false, Offset, SignExtend)) {
1430 Base = LHS;
1431 DoShift = CurDAG->getTargetConstant(Val: true, DL, VT: MVT::i32);
1432 return true;
1433 }
1434
1435 // Try to match a shifted extend on the LHS.
1436 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
1437 SelectExtendedSHL(N: LHS, Size, WantExtend: false, Offset, SignExtend)) {
1438 Base = RHS;
1439 DoShift = CurDAG->getTargetConstant(Val: true, DL, VT: MVT::i32);
1440 return true;
1441 }
1442
1443 // Match any non-shifted, non-extend, non-immediate add expression.
1444 Base = LHS;
1445 Offset = RHS;
1446 SignExtend = CurDAG->getTargetConstant(Val: false, DL, VT: MVT::i32);
1447 DoShift = CurDAG->getTargetConstant(Val: false, DL, VT: MVT::i32);
1448 // Reg1 + Reg2 is free: no check needed.
1449 return true;
1450}
1451
1452SDValue AArch64DAGToDAGISel::createDTuple(ArrayRef<SDValue> Regs) {
1453 static const unsigned RegClassIDs[] = {
1454 AArch64::DDRegClassID, AArch64::DDDRegClassID, AArch64::DDDDRegClassID};
1455 static const unsigned SubRegs[] = {AArch64::dsub0, AArch64::dsub1,
1456 AArch64::dsub2, AArch64::dsub3};
1457
1458 return createTuple(Vecs: Regs, RegClassIDs, SubRegs);
1459}
1460
1461SDValue AArch64DAGToDAGISel::createQTuple(ArrayRef<SDValue> Regs) {
1462 static const unsigned RegClassIDs[] = {
1463 AArch64::QQRegClassID, AArch64::QQQRegClassID, AArch64::QQQQRegClassID};
1464 static const unsigned SubRegs[] = {AArch64::qsub0, AArch64::qsub1,
1465 AArch64::qsub2, AArch64::qsub3};
1466
1467 return createTuple(Vecs: Regs, RegClassIDs, SubRegs);
1468}
1469
1470SDValue AArch64DAGToDAGISel::createZTuple(ArrayRef<SDValue> Regs) {
1471 static const unsigned RegClassIDs[] = {AArch64::ZPR2RegClassID,
1472 AArch64::ZPR3RegClassID,
1473 AArch64::ZPR4RegClassID};
1474 static const unsigned SubRegs[] = {AArch64::zsub0, AArch64::zsub1,
1475 AArch64::zsub2, AArch64::zsub3};
1476
1477 return createTuple(Vecs: Regs, RegClassIDs, SubRegs);
1478}
1479
1480SDValue AArch64DAGToDAGISel::createZMulTuple(ArrayRef<SDValue> Regs) {
1481 assert(Regs.size() == 2 || Regs.size() == 4);
1482
1483 // The createTuple interface requires 3 RegClassIDs for each possible
1484 // tuple type even though we only have them for ZPR2 and ZPR4.
1485 static const unsigned RegClassIDs[] = {AArch64::ZPR2Mul2RegClassID, 0,
1486 AArch64::ZPR4Mul4RegClassID};
1487 static const unsigned SubRegs[] = {AArch64::zsub0, AArch64::zsub1,
1488 AArch64::zsub2, AArch64::zsub3};
1489 return createTuple(Vecs: Regs, RegClassIDs, SubRegs);
1490}
1491
1492SDValue AArch64DAGToDAGISel::createTuple(ArrayRef<SDValue> Regs,
1493 const unsigned RegClassIDs[],
1494 const unsigned SubRegs[]) {
1495 // There's no special register-class for a vector-list of 1 element: it's just
1496 // a vector.
1497 if (Regs.size() == 1)
1498 return Regs[0];
1499
1500 assert(Regs.size() >= 2 && Regs.size() <= 4);
1501
1502 SDLoc DL(Regs[0]);
1503
1504 SmallVector<SDValue, 4> Ops;
1505
1506 // First operand of REG_SEQUENCE is the desired RegClass.
1507 Ops.push_back(
1508 Elt: CurDAG->getTargetConstant(Val: RegClassIDs[Regs.size() - 2], DL, VT: MVT::i32));
1509
1510 // Then we get pairs of source & subregister-position for the components.
1511 for (unsigned i = 0; i < Regs.size(); ++i) {
1512 Ops.push_back(Elt: Regs[i]);
1513 Ops.push_back(Elt: CurDAG->getTargetConstant(Val: SubRegs[i], DL, VT: MVT::i32));
1514 }
1515
1516 SDNode *N =
1517 CurDAG->getMachineNode(Opcode: TargetOpcode::REG_SEQUENCE, dl: DL, VT: MVT::Untyped, Ops);
1518 return SDValue(N, 0);
1519}
1520
1521void AArch64DAGToDAGISel::SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc,
1522 bool isExt) {
1523 SDLoc dl(N);
1524 EVT VT = N->getValueType(ResNo: 0);
1525
1526 unsigned ExtOff = isExt;
1527
1528 // Form a REG_SEQUENCE to force register allocation.
1529 unsigned Vec0Off = ExtOff + 1;
1530 SmallVector<SDValue, 4> Regs(N->ops().slice(N: Vec0Off, M: NumVecs));
1531 SDValue RegSeq = createQTuple(Regs);
1532
1533 SmallVector<SDValue, 6> Ops;
1534 if (isExt)
1535 Ops.push_back(Elt: N->getOperand(Num: 1));
1536 Ops.push_back(Elt: RegSeq);
1537 Ops.push_back(Elt: N->getOperand(Num: NumVecs + ExtOff + 1));
1538 ReplaceNode(F: N, T: CurDAG->getMachineNode(Opcode: Opc, dl, VT, Ops));
1539}
1540
1541static std::tuple<SDValue, SDValue>
1542extractPtrauthBlendDiscriminators(SDValue Disc, SelectionDAG *DAG) {
1543 SDLoc DL(Disc);
1544 SDValue AddrDisc;
1545 SDValue ConstDisc;
1546
1547 // If this is a blend, remember the constant and address discriminators.
1548 // Otherwise, it's either a constant discriminator, or a non-blended
1549 // address discriminator.
1550 if (Disc->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
1551 Disc->getConstantOperandVal(Num: 0) == Intrinsic::ptrauth_blend) {
1552 AddrDisc = Disc->getOperand(Num: 1);
1553 ConstDisc = Disc->getOperand(Num: 2);
1554 } else {
1555 ConstDisc = Disc;
1556 }
1557
1558 // If the constant discriminator (either the blend RHS, or the entire
1559 // discriminator value) isn't a 16-bit constant, bail out, and let the
1560 // discriminator be computed separately.
1561 auto *ConstDiscN = dyn_cast<ConstantSDNode>(Val&: ConstDisc);
1562 if (!ConstDiscN || !isUInt<16>(x: ConstDiscN->getZExtValue()))
1563 return std::make_tuple(args: DAG->getTargetConstant(Val: 0, DL, VT: MVT::i64), args&: Disc);
1564
1565 // If there's no address discriminator, use XZR directly.
1566 if (!AddrDisc)
1567 AddrDisc = DAG->getRegister(Reg: AArch64::XZR, VT: MVT::i64);
1568
1569 return std::make_tuple(
1570 args: DAG->getTargetConstant(Val: ConstDiscN->getZExtValue(), DL, VT: MVT::i64),
1571 args&: AddrDisc);
1572}
1573
1574void AArch64DAGToDAGISel::SelectPtrauthAuth(SDNode *N) {
1575 SDLoc DL(N);
1576 // IntrinsicID is operand #0
1577 SDValue Val = N->getOperand(Num: 1);
1578 SDValue AUTKey = N->getOperand(Num: 2);
1579 SDValue AUTDisc = N->getOperand(Num: 3);
1580
1581 unsigned AUTKeyC = cast<ConstantSDNode>(Val&: AUTKey)->getZExtValue();
1582 AUTKey = CurDAG->getTargetConstant(Val: AUTKeyC, DL, VT: MVT::i64);
1583
1584 SDValue AUTAddrDisc, AUTConstDisc;
1585 std::tie(args&: AUTConstDisc, args&: AUTAddrDisc) =
1586 extractPtrauthBlendDiscriminators(Disc: AUTDisc, DAG: CurDAG);
1587
1588 if (!Subtarget->isX16X17Safer()) {
1589 std::vector<SDValue> Ops = {Val, AUTKey, AUTConstDisc, AUTAddrDisc};
1590 // Copy deactivation symbol if present.
1591 if (N->getNumOperands() > 4)
1592 Ops.push_back(x: N->getOperand(Num: 4));
1593
1594 SDNode *AUT =
1595 CurDAG->getMachineNode(Opcode: AArch64::AUTxMxN, dl: DL, VT1: MVT::i64, VT2: MVT::i64, Ops);
1596 ReplaceNode(F: N, T: AUT);
1597 } else {
1598 SDValue X16Copy = CurDAG->getCopyToReg(Chain: CurDAG->getEntryNode(), dl: DL,
1599 Reg: AArch64::X16, N: Val, Glue: SDValue());
1600 SDValue Ops[] = {AUTKey, AUTConstDisc, AUTAddrDisc, X16Copy.getValue(R: 1)};
1601
1602 SDNode *AUT = CurDAG->getMachineNode(Opcode: AArch64::AUTx16x17, dl: DL, VT: MVT::i64, Ops);
1603 ReplaceNode(F: N, T: AUT);
1604 }
1605}
1606
1607void AArch64DAGToDAGISel::SelectPtrauthResign(SDNode *N) {
1608 SDLoc DL(N);
1609 // IntrinsicID is operand #0, if W_CHAIN it is #1
1610 int OffsetBase = N->getOpcode() == ISD::INTRINSIC_W_CHAIN ? 1 : 0;
1611 SDValue Val = N->getOperand(Num: OffsetBase + 1);
1612 SDValue AUTKey = N->getOperand(Num: OffsetBase + 2);
1613 SDValue AUTDisc = N->getOperand(Num: OffsetBase + 3);
1614 SDValue PACKey = N->getOperand(Num: OffsetBase + 4);
1615 SDValue PACDisc = N->getOperand(Num: OffsetBase + 5);
1616 uint32_t IntNum = N->getConstantOperandVal(Num: OffsetBase + 0);
1617 bool HasLoad = IntNum == Intrinsic::ptrauth_resign_load_relative;
1618
1619 unsigned AUTKeyC = cast<ConstantSDNode>(Val&: AUTKey)->getZExtValue();
1620 unsigned PACKeyC = cast<ConstantSDNode>(Val&: PACKey)->getZExtValue();
1621
1622 AUTKey = CurDAG->getTargetConstant(Val: AUTKeyC, DL, VT: MVT::i64);
1623 PACKey = CurDAG->getTargetConstant(Val: PACKeyC, DL, VT: MVT::i64);
1624
1625 SDValue AUTAddrDisc, AUTConstDisc;
1626 std::tie(args&: AUTConstDisc, args&: AUTAddrDisc) =
1627 extractPtrauthBlendDiscriminators(Disc: AUTDisc, DAG: CurDAG);
1628
1629 SDValue PACAddrDisc, PACConstDisc;
1630 std::tie(args&: PACConstDisc, args&: PACAddrDisc) =
1631 extractPtrauthBlendDiscriminators(Disc: PACDisc, DAG: CurDAG);
1632
1633 SDValue X16Copy = CurDAG->getCopyToReg(Chain: CurDAG->getEntryNode(), dl: DL,
1634 Reg: AArch64::X16, N: Val, Glue: SDValue());
1635
1636 if (HasLoad) {
1637 SDValue Addend = N->getOperand(Num: OffsetBase + 6);
1638 SDValue IncomingChain = N->getOperand(Num: 0);
1639 SDValue Ops[] = {AUTKey, AUTConstDisc, AUTAddrDisc,
1640 PACKey, PACConstDisc, PACAddrDisc,
1641 Addend, IncomingChain, X16Copy.getValue(R: 1)};
1642
1643 SDNode *AUTRELLOADPAC = CurDAG->getMachineNode(Opcode: AArch64::AUTRELLOADPAC, dl: DL,
1644 VT1: MVT::i64, VT2: MVT::Other, Ops);
1645 ReplaceNode(F: N, T: AUTRELLOADPAC);
1646 } else {
1647 SDValue Ops[] = {AUTKey, AUTConstDisc, AUTAddrDisc, PACKey,
1648 PACConstDisc, PACAddrDisc, X16Copy.getValue(R: 1)};
1649
1650 SDNode *AUTPAC = CurDAG->getMachineNode(Opcode: AArch64::AUTPAC, dl: DL, VT: MVT::i64, Ops);
1651 ReplaceNode(F: N, T: AUTPAC);
1652 }
1653}
1654
1655bool AArch64DAGToDAGISel::tryIndexedLoad(SDNode *N) {
1656 LoadSDNode *LD = cast<LoadSDNode>(Val: N);
1657 if (LD->isUnindexed())
1658 return false;
1659 EVT VT = LD->getMemoryVT();
1660 EVT DstVT = N->getValueType(ResNo: 0);
1661 ISD::MemIndexedMode AM = LD->getAddressingMode();
1662 bool IsPre = AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
1663 ConstantSDNode *OffsetOp = cast<ConstantSDNode>(Val: LD->getOffset());
1664 int OffsetVal = (int)OffsetOp->getZExtValue();
1665
1666 // We're not doing validity checking here. That was done when checking
1667 // if we should mark the load as indexed or not. We're just selecting
1668 // the right instruction.
1669 unsigned Opcode = 0;
1670
1671 ISD::LoadExtType ExtType = LD->getExtensionType();
1672 bool InsertTo64 = false;
1673 if (VT == MVT::i64)
1674 Opcode = IsPre ? AArch64::LDRXpre : AArch64::LDRXpost;
1675 else if (VT == MVT::i32) {
1676 if (ExtType == ISD::NON_EXTLOAD)
1677 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
1678 else if (ExtType == ISD::SEXTLOAD)
1679 Opcode = IsPre ? AArch64::LDRSWpre : AArch64::LDRSWpost;
1680 else {
1681 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
1682 InsertTo64 = true;
1683 // The result of the load is only i32. It's the subreg_to_reg that makes
1684 // it into an i64.
1685 DstVT = MVT::i32;
1686 }
1687 } else if (VT == MVT::i16) {
1688 if (ExtType == ISD::SEXTLOAD) {
1689 if (DstVT == MVT::i64)
1690 Opcode = IsPre ? AArch64::LDRSHXpre : AArch64::LDRSHXpost;
1691 else
1692 Opcode = IsPre ? AArch64::LDRSHWpre : AArch64::LDRSHWpost;
1693 } else {
1694 Opcode = IsPre ? AArch64::LDRHHpre : AArch64::LDRHHpost;
1695 InsertTo64 = DstVT == MVT::i64;
1696 // The result of the load is only i32. It's the subreg_to_reg that makes
1697 // it into an i64.
1698 DstVT = MVT::i32;
1699 }
1700 } else if (VT == MVT::i8) {
1701 if (ExtType == ISD::SEXTLOAD) {
1702 if (DstVT == MVT::i64)
1703 Opcode = IsPre ? AArch64::LDRSBXpre : AArch64::LDRSBXpost;
1704 else
1705 Opcode = IsPre ? AArch64::LDRSBWpre : AArch64::LDRSBWpost;
1706 } else {
1707 Opcode = IsPre ? AArch64::LDRBBpre : AArch64::LDRBBpost;
1708 InsertTo64 = DstVT == MVT::i64;
1709 // The result of the load is only i32. It's the subreg_to_reg that makes
1710 // it into an i64.
1711 DstVT = MVT::i32;
1712 }
1713 } else if (VT == MVT::f16) {
1714 Opcode = IsPre ? AArch64::LDRHpre : AArch64::LDRHpost;
1715 } else if (VT == MVT::bf16) {
1716 Opcode = IsPre ? AArch64::LDRHpre : AArch64::LDRHpost;
1717 } else if (VT == MVT::f32) {
1718 Opcode = IsPre ? AArch64::LDRSpre : AArch64::LDRSpost;
1719 } else if (VT == MVT::f64 ||
1720 (VT.is64BitVector() && Subtarget->isLittleEndian())) {
1721 Opcode = IsPre ? AArch64::LDRDpre : AArch64::LDRDpost;
1722 } else if (VT.is128BitVector() && Subtarget->isLittleEndian()) {
1723 Opcode = IsPre ? AArch64::LDRQpre : AArch64::LDRQpost;
1724 } else if (VT.is64BitVector()) {
1725 if (IsPre || OffsetVal != 8)
1726 return false;
1727 switch (VT.getScalarSizeInBits()) {
1728 case 8:
1729 Opcode = AArch64::LD1Onev8b_POST;
1730 break;
1731 case 16:
1732 Opcode = AArch64::LD1Onev4h_POST;
1733 break;
1734 case 32:
1735 Opcode = AArch64::LD1Onev2s_POST;
1736 break;
1737 case 64:
1738 Opcode = AArch64::LD1Onev1d_POST;
1739 break;
1740 default:
1741 llvm_unreachable("Expected vector element to be a power of 2");
1742 }
1743 } else if (VT.is128BitVector()) {
1744 if (IsPre || OffsetVal != 16)
1745 return false;
1746 switch (VT.getScalarSizeInBits()) {
1747 case 8:
1748 Opcode = AArch64::LD1Onev16b_POST;
1749 break;
1750 case 16:
1751 Opcode = AArch64::LD1Onev8h_POST;
1752 break;
1753 case 32:
1754 Opcode = AArch64::LD1Onev4s_POST;
1755 break;
1756 case 64:
1757 Opcode = AArch64::LD1Onev2d_POST;
1758 break;
1759 default:
1760 llvm_unreachable("Expected vector element to be a power of 2");
1761 }
1762 } else
1763 return false;
1764 SDValue Chain = LD->getChain();
1765 SDValue Base = LD->getBasePtr();
1766 SDLoc dl(N);
1767 // LD1 encodes an immediate offset by using XZR as the offset register.
1768 SDValue Offset = (VT.isVector() && !Subtarget->isLittleEndian())
1769 ? CurDAG->getRegister(Reg: AArch64::XZR, VT: MVT::i64)
1770 : CurDAG->getTargetConstant(Val: OffsetVal, DL: dl, VT: MVT::i64);
1771 SDValue Ops[] = { Base, Offset, Chain };
1772 SDNode *Res = CurDAG->getMachineNode(Opcode, dl, VT1: MVT::i64, VT2: DstVT,
1773 VT3: MVT::Other, Ops);
1774
1775 // Transfer memoperands.
1776 MachineMemOperand *MemOp = cast<MemSDNode>(Val: N)->getMemOperand();
1777 CurDAG->setNodeMemRefs(N: cast<MachineSDNode>(Val: Res), NewMemRefs: {MemOp});
1778
1779 // Either way, we're replacing the node, so tell the caller that.
1780 SDValue LoadedVal = SDValue(Res, 1);
1781 if (InsertTo64) {
1782 SDValue SubReg = CurDAG->getTargetConstant(Val: AArch64::sub_32, DL: dl, VT: MVT::i32);
1783 LoadedVal = SDValue(CurDAG->getMachineNode(Opcode: AArch64::SUBREG_TO_REG, dl,
1784 VT: MVT::i64, Op1: LoadedVal, Op2: SubReg),
1785 0);
1786 }
1787
1788 ReplaceUses(F: SDValue(N, 0), T: LoadedVal);
1789 ReplaceUses(F: SDValue(N, 1), T: SDValue(Res, 0));
1790 ReplaceUses(F: SDValue(N, 2), T: SDValue(Res, 2));
1791 CurDAG->RemoveDeadNode(N);
1792 return true;
1793}
1794
1795void AArch64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
1796 unsigned SubRegIdx) {
1797 SDLoc dl(N);
1798 EVT VT = N->getValueType(ResNo: 0);
1799 SDValue Chain = N->getOperand(Num: 0);
1800
1801 SDValue Ops[] = {N->getOperand(Num: 2), // Mem operand;
1802 Chain};
1803
1804 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
1805
1806 SDNode *Ld = CurDAG->getMachineNode(Opcode: Opc, dl, ResultTys: ResTys, Ops);
1807 SDValue SuperReg = SDValue(Ld, 0);
1808 for (unsigned i = 0; i < NumVecs; ++i)
1809 ReplaceUses(F: SDValue(N, i),
1810 T: CurDAG->getTargetExtractSubreg(SRIdx: SubRegIdx + i, DL: dl, VT, Operand: SuperReg));
1811
1812 ReplaceUses(F: SDValue(N, NumVecs), T: SDValue(Ld, 1));
1813
1814 // Transfer memoperands. In the case of AArch64::LD64B, there won't be one,
1815 // because it's too simple to have needed special treatment during lowering.
1816 if (auto *MemIntr = dyn_cast<MemIntrinsicSDNode>(Val: N)) {
1817 MachineMemOperand *MemOp = MemIntr->getMemOperand();
1818 CurDAG->setNodeMemRefs(N: cast<MachineSDNode>(Val: Ld), NewMemRefs: {MemOp});
1819 }
1820
1821 CurDAG->RemoveDeadNode(N);
1822}
1823
1824void AArch64DAGToDAGISel::SelectPostLoad(SDNode *N, unsigned NumVecs,
1825 unsigned Opc, unsigned SubRegIdx) {
1826 SDLoc dl(N);
1827 EVT VT = N->getValueType(ResNo: 0);
1828 SDValue Chain = N->getOperand(Num: 0);
1829
1830 SDValue Ops[] = {N->getOperand(Num: 1), // Mem operand
1831 N->getOperand(Num: 2), // Incremental
1832 Chain};
1833
1834 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1835 MVT::Untyped, MVT::Other};
1836
1837 SDNode *Ld = CurDAG->getMachineNode(Opcode: Opc, dl, ResultTys: ResTys, Ops);
1838
1839 // Update uses of write back register
1840 ReplaceUses(F: SDValue(N, NumVecs), T: SDValue(Ld, 0));
1841
1842 // Update uses of vector list
1843 SDValue SuperReg = SDValue(Ld, 1);
1844 if (NumVecs == 1)
1845 ReplaceUses(F: SDValue(N, 0), T: SuperReg);
1846 else
1847 for (unsigned i = 0; i < NumVecs; ++i)
1848 ReplaceUses(F: SDValue(N, i),
1849 T: CurDAG->getTargetExtractSubreg(SRIdx: SubRegIdx + i, DL: dl, VT, Operand: SuperReg));
1850
1851 // Update the chain
1852 ReplaceUses(F: SDValue(N, NumVecs + 1), T: SDValue(Ld, 2));
1853 CurDAG->RemoveDeadNode(N);
1854}
1855
1856/// Optimize \param OldBase and \param OldOffset selecting the best addressing
1857/// mode. Returns a tuple consisting of an Opcode, an SDValue representing the
1858/// new Base and an SDValue representing the new offset.
1859std::tuple<unsigned, SDValue, SDValue>
1860AArch64DAGToDAGISel::findAddrModeSVELoadStore(SDNode *N, unsigned Opc_rr,
1861 unsigned Opc_ri,
1862 const SDValue &OldBase,
1863 const SDValue &OldOffset,
1864 unsigned Scale) {
1865 SDValue NewBase = OldBase;
1866 SDValue NewOffset = OldOffset;
1867 // Detect a possible Reg+Imm addressing mode.
1868 const bool IsRegImm = SelectAddrModeIndexedSVE</*Min=*/-8, /*Max=*/7>(
1869 Root: N, N: OldBase, Base&: NewBase, OffImm&: NewOffset);
1870
1871 // Detect a possible reg+reg addressing mode, but only if we haven't already
1872 // detected a Reg+Imm one.
1873 const bool IsRegReg =
1874 !IsRegImm && SelectSVERegRegAddrMode(N: OldBase, Scale, Base&: NewBase, Offset&: NewOffset);
1875
1876 // Select the instruction.
1877 return std::make_tuple(args&: IsRegReg ? Opc_rr : Opc_ri, args&: NewBase, args&: NewOffset);
1878}
1879
1880enum class SelectTypeKind {
1881 Int1 = 0,
1882 Int = 1,
1883 FP = 2,
1884 AnyType = 3,
1885};
1886
1887/// This function selects an opcode from a list of opcodes, which is
1888/// expected to be the opcode for { 8-bit, 16-bit, 32-bit, 64-bit }
1889/// element types, in this order.
1890template <SelectTypeKind Kind>
1891static unsigned SelectOpcodeFromVT(EVT VT, ArrayRef<unsigned> Opcodes) {
1892 // Only match scalable vector VTs
1893 if (!VT.isScalableVector())
1894 return 0;
1895
1896 EVT EltVT = VT.getVectorElementType();
1897 unsigned Key = VT.getVectorMinNumElements();
1898 switch (Kind) {
1899 case SelectTypeKind::AnyType:
1900 break;
1901 case SelectTypeKind::Int:
1902 if (EltVT != MVT::i8 && EltVT != MVT::i16 && EltVT != MVT::i32 &&
1903 EltVT != MVT::i64)
1904 return 0;
1905 break;
1906 case SelectTypeKind::Int1:
1907 if (EltVT != MVT::i1)
1908 return 0;
1909 break;
1910 case SelectTypeKind::FP:
1911 if (EltVT == MVT::bf16)
1912 Key = 16;
1913 else if (EltVT != MVT::bf16 && EltVT != MVT::f16 && EltVT != MVT::f32 &&
1914 EltVT != MVT::f64)
1915 return 0;
1916 break;
1917 }
1918
1919 unsigned Offset;
1920 switch (Key) {
1921 case 16: // 8-bit or bf16
1922 Offset = 0;
1923 break;
1924 case 8: // 16-bit
1925 Offset = 1;
1926 break;
1927 case 4: // 32-bit
1928 Offset = 2;
1929 break;
1930 case 2: // 64-bit
1931 Offset = 3;
1932 break;
1933 default:
1934 return 0;
1935 }
1936
1937 return (Opcodes.size() <= Offset) ? 0 : Opcodes[Offset];
1938}
1939
1940// This function is almost identical to SelectWhilePair, but has an
1941// extra check on the range of the immediate operand.
1942// TODO: Merge these two functions together at some point?
1943void AArch64DAGToDAGISel::SelectPExtPair(SDNode *N, unsigned Opc) {
1944 // Immediate can be either 0 or 1.
1945 if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 2)))
1946 if (Imm->getZExtValue() > 1)
1947 return;
1948
1949 SDLoc DL(N);
1950 EVT VT = N->getValueType(ResNo: 0);
1951 SDValue Ops[] = {N->getOperand(Num: 1), N->getOperand(Num: 2)};
1952 SDNode *WhilePair = CurDAG->getMachineNode(Opcode: Opc, dl: DL, VT: MVT::Untyped, Ops);
1953 SDValue SuperReg = SDValue(WhilePair, 0);
1954
1955 for (unsigned I = 0; I < 2; ++I)
1956 ReplaceUses(F: SDValue(N, I), T: CurDAG->getTargetExtractSubreg(
1957 SRIdx: AArch64::psub0 + I, DL, VT, Operand: SuperReg));
1958
1959 CurDAG->RemoveDeadNode(N);
1960}
1961
1962void AArch64DAGToDAGISel::SelectWhilePair(SDNode *N, unsigned Opc) {
1963 SDLoc DL(N);
1964 EVT VT = N->getValueType(ResNo: 0);
1965
1966 SDValue Ops[] = {N->getOperand(Num: 1), N->getOperand(Num: 2)};
1967
1968 SDNode *WhilePair = CurDAG->getMachineNode(Opcode: Opc, dl: DL, VT: MVT::Untyped, Ops);
1969 SDValue SuperReg = SDValue(WhilePair, 0);
1970
1971 for (unsigned I = 0; I < 2; ++I)
1972 ReplaceUses(F: SDValue(N, I), T: CurDAG->getTargetExtractSubreg(
1973 SRIdx: AArch64::psub0 + I, DL, VT, Operand: SuperReg));
1974
1975 CurDAG->RemoveDeadNode(N);
1976}
1977
1978void AArch64DAGToDAGISel::SelectCVTIntrinsic(SDNode *N, unsigned NumVecs,
1979 unsigned Opcode) {
1980 EVT VT = N->getValueType(ResNo: 0);
1981 SmallVector<SDValue, 4> Regs(N->ops().slice(N: 1, M: NumVecs));
1982 SDValue Ops = createZTuple(Regs);
1983 SDLoc DL(N);
1984 SDNode *Intrinsic = CurDAG->getMachineNode(Opcode, dl: DL, VT: MVT::Untyped, Op1: Ops);
1985 SDValue SuperReg = SDValue(Intrinsic, 0);
1986 for (unsigned i = 0; i < NumVecs; ++i)
1987 ReplaceUses(F: SDValue(N, i), T: CurDAG->getTargetExtractSubreg(
1988 SRIdx: AArch64::zsub0 + i, DL, VT, Operand: SuperReg));
1989
1990 CurDAG->RemoveDeadNode(N);
1991}
1992
1993void AArch64DAGToDAGISel::SelectCVTIntrinsicFP8(SDNode *N, unsigned NumVecs,
1994 unsigned Opcode) {
1995 SDLoc DL(N);
1996 EVT VT = N->getValueType(ResNo: 0);
1997 SmallVector<SDValue, 4> Ops(N->op_begin() + 2, N->op_end());
1998 Ops.push_back(/*Chain*/ Elt: N->getOperand(Num: 0));
1999
2000 SDNode *Instruction =
2001 CurDAG->getMachineNode(Opcode, dl: DL, ResultTys: {MVT::Untyped, MVT::Other}, Ops);
2002 SDValue SuperReg = SDValue(Instruction, 0);
2003
2004 for (unsigned i = 0; i < NumVecs; ++i)
2005 ReplaceUses(F: SDValue(N, i), T: CurDAG->getTargetExtractSubreg(
2006 SRIdx: AArch64::zsub0 + i, DL, VT, Operand: SuperReg));
2007
2008 // Copy chain
2009 unsigned ChainIdx = NumVecs;
2010 ReplaceUses(F: SDValue(N, ChainIdx), T: SDValue(Instruction, 1));
2011 CurDAG->RemoveDeadNode(N);
2012}
2013
2014void AArch64DAGToDAGISel::SelectDestructiveMultiIntrinsic(SDNode *N,
2015 unsigned NumVecs,
2016 bool IsZmMulti,
2017 unsigned Opcode,
2018 bool HasPred) {
2019 assert(Opcode != 0 && "Unexpected opcode");
2020
2021 SDLoc DL(N);
2022 EVT VT = N->getValueType(ResNo: 0);
2023 SDUse *OpsIter = N->op_begin() + 1; // Skip intrinsic ID
2024 SmallVector<SDValue, 4> Ops;
2025
2026 auto GetMultiVecOperand = [&]() {
2027 SmallVector<SDValue, 4> Regs(OpsIter, OpsIter + NumVecs);
2028 OpsIter += NumVecs;
2029 return createZMulTuple(Regs);
2030 };
2031
2032 if (HasPred)
2033 Ops.push_back(Elt: *OpsIter++);
2034
2035 Ops.push_back(Elt: GetMultiVecOperand());
2036 if (IsZmMulti)
2037 Ops.push_back(Elt: GetMultiVecOperand());
2038 else
2039 Ops.push_back(Elt: *OpsIter++);
2040
2041 // Append any remaining operands.
2042 Ops.append(in_start: OpsIter, in_end: N->op_end());
2043 SDNode *Intrinsic;
2044 Intrinsic = CurDAG->getMachineNode(Opcode, dl: DL, VT: MVT::Untyped, Ops);
2045 SDValue SuperReg = SDValue(Intrinsic, 0);
2046 for (unsigned i = 0; i < NumVecs; ++i)
2047 ReplaceUses(F: SDValue(N, i), T: CurDAG->getTargetExtractSubreg(
2048 SRIdx: AArch64::zsub0 + i, DL, VT, Operand: SuperReg));
2049
2050 CurDAG->RemoveDeadNode(N);
2051}
2052
2053void AArch64DAGToDAGISel::SelectPredicatedLoad(SDNode *N, unsigned NumVecs,
2054 unsigned Scale, unsigned Opc_ri,
2055 unsigned Opc_rr, bool IsIntr) {
2056 assert(Scale < 5 && "Invalid scaling value.");
2057 SDLoc DL(N);
2058 EVT VT = N->getValueType(ResNo: 0);
2059 SDValue Chain = N->getOperand(Num: 0);
2060
2061 // Optimize addressing mode.
2062 SDValue Base, Offset;
2063 unsigned Opc;
2064 std::tie(args&: Opc, args&: Base, args&: Offset) = findAddrModeSVELoadStore(
2065 N, Opc_rr, Opc_ri, OldBase: N->getOperand(Num: IsIntr ? 3 : 2),
2066 OldOffset: CurDAG->getTargetConstant(Val: 0, DL, VT: MVT::i64), Scale);
2067
2068 SDValue Ops[] = {N->getOperand(Num: IsIntr ? 2 : 1), // Predicate
2069 Base, // Memory operand
2070 Offset, Chain};
2071
2072 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
2073
2074 SDNode *Load = CurDAG->getMachineNode(Opcode: Opc, dl: DL, ResultTys: ResTys, Ops);
2075 SDValue SuperReg = SDValue(Load, 0);
2076 for (unsigned i = 0; i < NumVecs; ++i)
2077 ReplaceUses(F: SDValue(N, i), T: CurDAG->getTargetExtractSubreg(
2078 SRIdx: AArch64::zsub0 + i, DL, VT, Operand: SuperReg));
2079
2080 // Copy chain
2081 unsigned ChainIdx = NumVecs;
2082 ReplaceUses(F: SDValue(N, ChainIdx), T: SDValue(Load, 1));
2083 CurDAG->RemoveDeadNode(N);
2084}
2085
2086void AArch64DAGToDAGISel::SelectContiguousMultiVectorLoad(SDNode *N,
2087 unsigned NumVecs,
2088 unsigned Scale,
2089 unsigned Opc_ri,
2090 unsigned Opc_rr) {
2091 assert(Scale < 4 && "Invalid scaling value.");
2092 SDLoc DL(N);
2093 EVT VT = N->getValueType(ResNo: 0);
2094 SDValue Chain = N->getOperand(Num: 0);
2095
2096 SDValue PNg = N->getOperand(Num: 2);
2097 SDValue Base = N->getOperand(Num: 3);
2098 SDValue Offset = CurDAG->getTargetConstant(Val: 0, DL, VT: MVT::i64);
2099 unsigned Opc;
2100 std::tie(args&: Opc, args&: Base, args&: Offset) =
2101 findAddrModeSVELoadStore(N, Opc_rr, Opc_ri, OldBase: Base, OldOffset: Offset, Scale);
2102
2103 SDValue Ops[] = {PNg, // Predicate-as-counter
2104 Base, // Memory operand
2105 Offset, Chain};
2106
2107 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
2108
2109 SDNode *Load = CurDAG->getMachineNode(Opcode: Opc, dl: DL, ResultTys: ResTys, Ops);
2110 SDValue SuperReg = SDValue(Load, 0);
2111 for (unsigned i = 0; i < NumVecs; ++i)
2112 ReplaceUses(F: SDValue(N, i), T: CurDAG->getTargetExtractSubreg(
2113 SRIdx: AArch64::zsub0 + i, DL, VT, Operand: SuperReg));
2114
2115 // Copy chain
2116 unsigned ChainIdx = NumVecs;
2117 ReplaceUses(F: SDValue(N, ChainIdx), T: SDValue(Load, 1));
2118 CurDAG->RemoveDeadNode(N);
2119}
2120
2121void AArch64DAGToDAGISel::SelectFrintFromVT(SDNode *N, unsigned NumVecs,
2122 unsigned Opcode) {
2123 if (N->getValueType(ResNo: 0) != MVT::nxv4f32)
2124 return;
2125 SelectUnaryMultiIntrinsic(N, NumOutVecs: NumVecs, IsTupleInput: true, Opc: Opcode);
2126}
2127
2128void AArch64DAGToDAGISel::SelectMultiVectorLutiLane(SDNode *Node,
2129 unsigned NumOutVecs,
2130 unsigned Opc,
2131 uint32_t MaxImm) {
2132 if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Val: Node->getOperand(Num: 4)))
2133 if (Imm->getZExtValue() > MaxImm)
2134 return;
2135
2136 SDValue ZtValue;
2137 if (!ImmToReg<AArch64::ZT0, 0>(N: Node->getOperand(Num: 2), Imm&: ZtValue))
2138 return;
2139
2140 SDValue Chain = Node->getOperand(Num: 0);
2141 SDValue Ops[] = {ZtValue, Node->getOperand(Num: 3), Node->getOperand(Num: 4), Chain};
2142 SDLoc DL(Node);
2143 EVT VT = Node->getValueType(ResNo: 0);
2144
2145 SDNode *Instruction =
2146 CurDAG->getMachineNode(Opcode: Opc, dl: DL, ResultTys: {MVT::Untyped, MVT::Other}, Ops);
2147 SDValue SuperReg = SDValue(Instruction, 0);
2148
2149 for (unsigned I = 0; I < NumOutVecs; ++I)
2150 ReplaceUses(F: SDValue(Node, I), T: CurDAG->getTargetExtractSubreg(
2151 SRIdx: AArch64::zsub0 + I, DL, VT, Operand: SuperReg));
2152
2153 // Copy chain
2154 unsigned ChainIdx = NumOutVecs;
2155 ReplaceUses(F: SDValue(Node, ChainIdx), T: SDValue(Instruction, 1));
2156 CurDAG->RemoveDeadNode(N: Node);
2157}
2158
2159void AArch64DAGToDAGISel::SelectMultiVectorLuti(SDNode *Node,
2160 unsigned NumOutVecs,
2161 unsigned Opc) {
2162 SDValue ZtValue;
2163 if (!ImmToReg<AArch64::ZT0, 0>(N: Node->getOperand(Num: 2), Imm&: ZtValue))
2164 return;
2165
2166 SDValue Chain = Node->getOperand(Num: 0);
2167 SDValue Ops[] = {ZtValue,
2168 createZMulTuple(Regs: {Node->getOperand(Num: 3), Node->getOperand(Num: 4)}),
2169 Chain};
2170
2171 SDLoc DL(Node);
2172 EVT VT = Node->getValueType(ResNo: 0);
2173
2174 SDNode *Instruction =
2175 CurDAG->getMachineNode(Opcode: Opc, dl: DL, ResultTys: {MVT::Untyped, MVT::Other}, Ops);
2176 SDValue SuperReg = SDValue(Instruction, 0);
2177
2178 for (unsigned I = 0; I < NumOutVecs; ++I)
2179 ReplaceUses(F: SDValue(Node, I), T: CurDAG->getTargetExtractSubreg(
2180 SRIdx: AArch64::zsub0 + I, DL, VT, Operand: SuperReg));
2181
2182 // Copy chain
2183 unsigned ChainIdx = NumOutVecs;
2184 ReplaceUses(F: SDValue(Node, ChainIdx), T: SDValue(Instruction, 1));
2185 CurDAG->RemoveDeadNode(N: Node);
2186}
2187
2188void AArch64DAGToDAGISel::SelectClamp(SDNode *N, unsigned NumVecs,
2189 unsigned Op) {
2190 SDLoc DL(N);
2191 EVT VT = N->getValueType(ResNo: 0);
2192
2193 SmallVector<SDValue, 4> Regs(N->ops().slice(N: 1, M: NumVecs));
2194 SDValue Zd = createZMulTuple(Regs);
2195 SDValue Zn = N->getOperand(Num: 1 + NumVecs);
2196 SDValue Zm = N->getOperand(Num: 2 + NumVecs);
2197
2198 SDValue Ops[] = {Zd, Zn, Zm};
2199
2200 SDNode *Intrinsic = CurDAG->getMachineNode(Opcode: Op, dl: DL, VT: MVT::Untyped, Ops);
2201 SDValue SuperReg = SDValue(Intrinsic, 0);
2202 for (unsigned i = 0; i < NumVecs; ++i)
2203 ReplaceUses(F: SDValue(N, i), T: CurDAG->getTargetExtractSubreg(
2204 SRIdx: AArch64::zsub0 + i, DL, VT, Operand: SuperReg));
2205
2206 CurDAG->RemoveDeadNode(N);
2207}
2208
2209bool SelectSMETile(unsigned &BaseReg, unsigned TileNum) {
2210 switch (BaseReg) {
2211 default:
2212 return false;
2213 case AArch64::ZA:
2214 case AArch64::ZAB0:
2215 if (TileNum == 0)
2216 break;
2217 return false;
2218 case AArch64::ZAH0:
2219 if (TileNum <= 1)
2220 break;
2221 return false;
2222 case AArch64::ZAS0:
2223 if (TileNum <= 3)
2224 break;
2225 return false;
2226 case AArch64::ZAD0:
2227 if (TileNum <= 7)
2228 break;
2229 return false;
2230 }
2231
2232 BaseReg += TileNum;
2233 return true;
2234}
2235
2236template <unsigned MaxIdx, unsigned Scale>
2237void AArch64DAGToDAGISel::SelectMultiVectorMove(SDNode *N, unsigned NumVecs,
2238 unsigned BaseReg, unsigned Op) {
2239 unsigned TileNum = 0;
2240 if (BaseReg != AArch64::ZA)
2241 TileNum = N->getConstantOperandVal(Num: 2);
2242
2243 if (!SelectSMETile(BaseReg, TileNum))
2244 return;
2245
2246 SDValue SliceBase, Base, Offset;
2247 if (BaseReg == AArch64::ZA)
2248 SliceBase = N->getOperand(Num: 2);
2249 else
2250 SliceBase = N->getOperand(Num: 3);
2251
2252 if (!SelectSMETileSlice(N: SliceBase, MaxSize: MaxIdx, Vector&: Base, Offset, Scale))
2253 return;
2254
2255 SDLoc DL(N);
2256 SDValue SubReg = CurDAG->getRegister(Reg: BaseReg, VT: MVT::Other);
2257 SDValue Ops[] = {SubReg, Base, Offset, /*Chain*/ N->getOperand(Num: 0)};
2258 SDNode *Mov = CurDAG->getMachineNode(Opcode: Op, dl: DL, ResultTys: {MVT::Untyped, MVT::Other}, Ops);
2259
2260 EVT VT = N->getValueType(ResNo: 0);
2261 for (unsigned I = 0; I < NumVecs; ++I)
2262 ReplaceUses(F: SDValue(N, I),
2263 T: CurDAG->getTargetExtractSubreg(SRIdx: AArch64::zsub0 + I, DL, VT,
2264 Operand: SDValue(Mov, 0)));
2265 // Copy chain
2266 unsigned ChainIdx = NumVecs;
2267 ReplaceUses(F: SDValue(N, ChainIdx), T: SDValue(Mov, 1));
2268 CurDAG->RemoveDeadNode(N);
2269}
2270
2271void AArch64DAGToDAGISel::SelectMultiVectorMoveZ(SDNode *N, unsigned NumVecs,
2272 unsigned Op, unsigned MaxIdx,
2273 unsigned Scale, unsigned BaseReg) {
2274 // Slice can be in different positions
2275 // The array to vector: llvm.aarch64.sme.readz.<h/v>.<sz>(slice)
2276 // The tile to vector: llvm.aarch64.sme.readz.<h/v>.<sz>(tile, slice)
2277 SDValue SliceBase = N->getOperand(Num: 2);
2278 if (BaseReg != AArch64::ZA)
2279 SliceBase = N->getOperand(Num: 3);
2280
2281 SDValue Base, Offset;
2282 if (!SelectSMETileSlice(N: SliceBase, MaxSize: MaxIdx, Vector&: Base, Offset, Scale))
2283 return;
2284 // The correct Za tile number is computed in Machine Instruction
2285 // See EmitZAInstr
2286 // DAG cannot select Za tile as an output register with ZReg
2287 SDLoc DL(N);
2288 SmallVector<SDValue, 6> Ops;
2289 if (BaseReg != AArch64::ZA )
2290 Ops.push_back(Elt: N->getOperand(Num: 2));
2291 Ops.push_back(Elt: Base);
2292 Ops.push_back(Elt: Offset);
2293 Ops.push_back(Elt: N->getOperand(Num: 0)); //Chain
2294 SDNode *Mov = CurDAG->getMachineNode(Opcode: Op, dl: DL, ResultTys: {MVT::Untyped, MVT::Other}, Ops);
2295
2296 EVT VT = N->getValueType(ResNo: 0);
2297 for (unsigned I = 0; I < NumVecs; ++I)
2298 ReplaceUses(F: SDValue(N, I),
2299 T: CurDAG->getTargetExtractSubreg(SRIdx: AArch64::zsub0 + I, DL, VT,
2300 Operand: SDValue(Mov, 0)));
2301
2302 // Copy chain
2303 unsigned ChainIdx = NumVecs;
2304 ReplaceUses(F: SDValue(N, ChainIdx), T: SDValue(Mov, 1));
2305 CurDAG->RemoveDeadNode(N);
2306}
2307
2308void AArch64DAGToDAGISel::SelectUnaryMultiIntrinsic(SDNode *N,
2309 unsigned NumOutVecs,
2310 bool IsTupleInput,
2311 unsigned Opc) {
2312 SDLoc DL(N);
2313 EVT VT = N->getValueType(ResNo: 0);
2314 unsigned NumInVecs = N->getNumOperands() - 1;
2315
2316 SmallVector<SDValue, 6> Ops;
2317 if (IsTupleInput) {
2318 assert((NumInVecs == 2 || NumInVecs == 4) &&
2319 "Don't know how to handle multi-register input!");
2320 SmallVector<SDValue, 4> Regs(N->ops().slice(N: 1, M: NumInVecs));
2321 Ops.push_back(Elt: createZMulTuple(Regs));
2322 } else {
2323 // All intrinsic nodes have the ID as the first operand, hence the "1 + I".
2324 for (unsigned I = 0; I < NumInVecs; I++)
2325 Ops.push_back(Elt: N->getOperand(Num: 1 + I));
2326 }
2327
2328 SDNode *Res = CurDAG->getMachineNode(Opcode: Opc, dl: DL, VT: MVT::Untyped, Ops);
2329 SDValue SuperReg = SDValue(Res, 0);
2330
2331 for (unsigned I = 0; I < NumOutVecs; I++)
2332 ReplaceUses(F: SDValue(N, I), T: CurDAG->getTargetExtractSubreg(
2333 SRIdx: AArch64::zsub0 + I, DL, VT, Operand: SuperReg));
2334 CurDAG->RemoveDeadNode(N);
2335}
2336
2337void AArch64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs,
2338 unsigned Opc) {
2339 SDLoc dl(N);
2340 EVT VT = N->getOperand(Num: 2)->getValueType(ResNo: 0);
2341
2342 // Form a REG_SEQUENCE to force register allocation.
2343 bool Is128Bit = VT.getSizeInBits() == 128;
2344 SmallVector<SDValue, 4> Regs(N->ops().slice(N: 2, M: NumVecs));
2345 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
2346
2347 SDValue Ops[] = {RegSeq, N->getOperand(Num: NumVecs + 2), N->getOperand(Num: 0)};
2348 SDNode *St = CurDAG->getMachineNode(Opcode: Opc, dl, VT: N->getValueType(ResNo: 0), Ops);
2349
2350 // Transfer memoperands.
2351 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(Val: N)->getMemOperand();
2352 CurDAG->setNodeMemRefs(N: cast<MachineSDNode>(Val: St), NewMemRefs: {MemOp});
2353
2354 ReplaceNode(F: N, T: St);
2355}
2356
2357void AArch64DAGToDAGISel::SelectPredicatedStore(SDNode *N, unsigned NumVecs,
2358 unsigned Scale, unsigned Opc_rr,
2359 unsigned Opc_ri) {
2360 SDLoc dl(N);
2361
2362 // Form a REG_SEQUENCE to force register allocation.
2363 SmallVector<SDValue, 4> Regs(N->ops().slice(N: 2, M: NumVecs));
2364 SDValue RegSeq = createZTuple(Regs);
2365
2366 // Optimize addressing mode.
2367 unsigned Opc;
2368 SDValue Offset, Base;
2369 std::tie(args&: Opc, args&: Base, args&: Offset) = findAddrModeSVELoadStore(
2370 N, Opc_rr, Opc_ri, OldBase: N->getOperand(Num: NumVecs + 3),
2371 OldOffset: CurDAG->getTargetConstant(Val: 0, DL: dl, VT: MVT::i64), Scale);
2372
2373 SDValue Ops[] = {RegSeq, N->getOperand(Num: NumVecs + 2), // predicate
2374 Base, // address
2375 Offset, // offset
2376 N->getOperand(Num: 0)}; // chain
2377 SDNode *St = CurDAG->getMachineNode(Opcode: Opc, dl, VT: N->getValueType(ResNo: 0), Ops);
2378
2379 ReplaceNode(F: N, T: St);
2380}
2381
2382bool AArch64DAGToDAGISel::SelectAddrModeFrameIndexSVE(SDValue N, SDValue &Base,
2383 SDValue &OffImm) {
2384 SDLoc dl(N);
2385 const DataLayout &DL = CurDAG->getDataLayout();
2386 const TargetLowering *TLI = getTargetLowering();
2387
2388 // Try to match it for the frame address
2389 if (auto FINode = dyn_cast<FrameIndexSDNode>(Val&: N)) {
2390 int FI = FINode->getIndex();
2391 Base = CurDAG->getTargetFrameIndex(FI, VT: TLI->getPointerTy(DL));
2392 OffImm = CurDAG->getTargetConstant(Val: 0, DL: dl, VT: MVT::i64);
2393 return true;
2394 }
2395
2396 return false;
2397}
2398
2399void AArch64DAGToDAGISel::SelectPostStore(SDNode *N, unsigned NumVecs,
2400 unsigned Opc) {
2401 SDLoc dl(N);
2402 EVT VT = N->getOperand(Num: 2)->getValueType(ResNo: 0);
2403 const EVT ResTys[] = {MVT::i64, // Type of the write back register
2404 MVT::Other}; // Type for the Chain
2405
2406 // Form a REG_SEQUENCE to force register allocation.
2407 bool Is128Bit = VT.getSizeInBits() == 128;
2408 SmallVector<SDValue, 4> Regs(N->ops().slice(N: 1, M: NumVecs));
2409 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
2410
2411 SDValue Ops[] = {RegSeq,
2412 N->getOperand(Num: NumVecs + 1), // base register
2413 N->getOperand(Num: NumVecs + 2), // Incremental
2414 N->getOperand(Num: 0)}; // Chain
2415 SDNode *St = CurDAG->getMachineNode(Opcode: Opc, dl, ResultTys: ResTys, Ops);
2416
2417 ReplaceNode(F: N, T: St);
2418}
2419
2420namespace {
2421/// WidenVector - Given a value in the V64 register class, produce the
2422/// equivalent value in the V128 register class.
2423class WidenVector {
2424 SelectionDAG &DAG;
2425
2426public:
2427 WidenVector(SelectionDAG &DAG) : DAG(DAG) {}
2428
2429 SDValue operator()(SDValue V64Reg) {
2430 EVT VT = V64Reg.getValueType();
2431 unsigned NarrowSize = VT.getVectorNumElements();
2432 MVT EltTy = VT.getVectorElementType().getSimpleVT();
2433 MVT WideTy = MVT::getVectorVT(VT: EltTy, NumElements: 2 * NarrowSize);
2434 SDLoc DL(V64Reg);
2435
2436 SDValue Undef =
2437 SDValue(DAG.getMachineNode(Opcode: TargetOpcode::IMPLICIT_DEF, dl: DL, VT: WideTy), 0);
2438 return DAG.getTargetInsertSubreg(SRIdx: AArch64::dsub, DL, VT: WideTy, Operand: Undef, Subreg: V64Reg);
2439 }
2440};
2441} // namespace
2442
2443/// NarrowVector - Given a value in the V128 register class, produce the
2444/// equivalent value in the V64 register class.
2445static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) {
2446 EVT VT = V128Reg.getValueType();
2447 unsigned WideSize = VT.getVectorNumElements();
2448 MVT EltTy = VT.getVectorElementType().getSimpleVT();
2449 MVT NarrowTy = MVT::getVectorVT(VT: EltTy, NumElements: WideSize / 2);
2450
2451 return DAG.getTargetExtractSubreg(SRIdx: AArch64::dsub, DL: SDLoc(V128Reg), VT: NarrowTy,
2452 Operand: V128Reg);
2453}
2454
2455void AArch64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs,
2456 unsigned Opc) {
2457 SDLoc dl(N);
2458 EVT VT = N->getValueType(ResNo: 0);
2459 bool Narrow = VT.getSizeInBits() == 64;
2460
2461 // Form a REG_SEQUENCE to force register allocation.
2462 SmallVector<SDValue, 4> Regs(N->ops().slice(N: 2, M: NumVecs));
2463
2464 if (Narrow)
2465 transform(Range&: Regs, d_first: Regs.begin(),
2466 F: WidenVector(*CurDAG));
2467
2468 SDValue RegSeq = createQTuple(Regs);
2469
2470 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
2471
2472 unsigned LaneNo = N->getConstantOperandVal(Num: NumVecs + 2);
2473
2474 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(Val: LaneNo, DL: dl, VT: MVT::i64),
2475 N->getOperand(Num: NumVecs + 3), N->getOperand(Num: 0)};
2476 SDNode *Ld = CurDAG->getMachineNode(Opcode: Opc, dl, ResultTys: ResTys, Ops);
2477 SDValue SuperReg = SDValue(Ld, 0);
2478
2479 EVT WideVT = RegSeq.getOperand(i: 1)->getValueType(ResNo: 0);
2480 static const unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1,
2481 AArch64::qsub2, AArch64::qsub3 };
2482 for (unsigned i = 0; i < NumVecs; ++i) {
2483 SDValue NV = CurDAG->getTargetExtractSubreg(SRIdx: QSubs[i], DL: dl, VT: WideVT, Operand: SuperReg);
2484 if (Narrow)
2485 NV = NarrowVector(V128Reg: NV, DAG&: *CurDAG);
2486 ReplaceUses(F: SDValue(N, i), T: NV);
2487 }
2488
2489 ReplaceUses(F: SDValue(N, NumVecs), T: SDValue(Ld, 1));
2490 CurDAG->RemoveDeadNode(N);
2491}
2492
2493void AArch64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs,
2494 unsigned Opc) {
2495 SDLoc dl(N);
2496 EVT VT = N->getValueType(ResNo: 0);
2497 bool Narrow = VT.getSizeInBits() == 64;
2498
2499 // Form a REG_SEQUENCE to force register allocation.
2500 SmallVector<SDValue, 4> Regs(N->ops().slice(N: 1, M: NumVecs));
2501
2502 if (Narrow)
2503 transform(Range&: Regs, d_first: Regs.begin(),
2504 F: WidenVector(*CurDAG));
2505
2506 SDValue RegSeq = createQTuple(Regs);
2507
2508 const EVT ResTys[] = {MVT::i64, // Type of the write back register
2509 RegSeq->getValueType(ResNo: 0), MVT::Other};
2510
2511 unsigned LaneNo = N->getConstantOperandVal(Num: NumVecs + 1);
2512
2513 SDValue Ops[] = {RegSeq,
2514 CurDAG->getTargetConstant(Val: LaneNo, DL: dl,
2515 VT: MVT::i64), // Lane Number
2516 N->getOperand(Num: NumVecs + 2), // Base register
2517 N->getOperand(Num: NumVecs + 3), // Incremental
2518 N->getOperand(Num: 0)};
2519 SDNode *Ld = CurDAG->getMachineNode(Opcode: Opc, dl, ResultTys: ResTys, Ops);
2520
2521 // Update uses of the write back register
2522 ReplaceUses(F: SDValue(N, NumVecs), T: SDValue(Ld, 0));
2523
2524 // Update uses of the vector list
2525 SDValue SuperReg = SDValue(Ld, 1);
2526 if (NumVecs == 1) {
2527 ReplaceUses(F: SDValue(N, 0),
2528 T: Narrow ? NarrowVector(V128Reg: SuperReg, DAG&: *CurDAG) : SuperReg);
2529 } else {
2530 EVT WideVT = RegSeq.getOperand(i: 1)->getValueType(ResNo: 0);
2531 static const unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1,
2532 AArch64::qsub2, AArch64::qsub3 };
2533 for (unsigned i = 0; i < NumVecs; ++i) {
2534 SDValue NV = CurDAG->getTargetExtractSubreg(SRIdx: QSubs[i], DL: dl, VT: WideVT,
2535 Operand: SuperReg);
2536 if (Narrow)
2537 NV = NarrowVector(V128Reg: NV, DAG&: *CurDAG);
2538 ReplaceUses(F: SDValue(N, i), T: NV);
2539 }
2540 }
2541
2542 // Update the Chain
2543 ReplaceUses(F: SDValue(N, NumVecs + 1), T: SDValue(Ld, 2));
2544 CurDAG->RemoveDeadNode(N);
2545}
2546
2547void AArch64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs,
2548 unsigned Opc) {
2549 SDLoc dl(N);
2550 EVT VT = N->getOperand(Num: 2)->getValueType(ResNo: 0);
2551 bool Narrow = VT.getSizeInBits() == 64;
2552
2553 // Form a REG_SEQUENCE to force register allocation.
2554 SmallVector<SDValue, 4> Regs(N->ops().slice(N: 2, M: NumVecs));
2555
2556 if (Narrow)
2557 transform(Range&: Regs, d_first: Regs.begin(),
2558 F: WidenVector(*CurDAG));
2559
2560 SDValue RegSeq = createQTuple(Regs);
2561
2562 unsigned LaneNo = N->getConstantOperandVal(Num: NumVecs + 2);
2563
2564 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(Val: LaneNo, DL: dl, VT: MVT::i64),
2565 N->getOperand(Num: NumVecs + 3), N->getOperand(Num: 0)};
2566 SDNode *St = CurDAG->getMachineNode(Opcode: Opc, dl, VT: MVT::Other, Ops);
2567
2568 // Transfer memoperands.
2569 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(Val: N)->getMemOperand();
2570 CurDAG->setNodeMemRefs(N: cast<MachineSDNode>(Val: St), NewMemRefs: {MemOp});
2571
2572 ReplaceNode(F: N, T: St);
2573}
2574
2575void AArch64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs,
2576 unsigned Opc) {
2577 SDLoc dl(N);
2578 EVT VT = N->getOperand(Num: 2)->getValueType(ResNo: 0);
2579 bool Narrow = VT.getSizeInBits() == 64;
2580
2581 // Form a REG_SEQUENCE to force register allocation.
2582 SmallVector<SDValue, 4> Regs(N->ops().slice(N: 1, M: NumVecs));
2583
2584 if (Narrow)
2585 transform(Range&: Regs, d_first: Regs.begin(),
2586 F: WidenVector(*CurDAG));
2587
2588 SDValue RegSeq = createQTuple(Regs);
2589
2590 const EVT ResTys[] = {MVT::i64, // Type of the write back register
2591 MVT::Other};
2592
2593 unsigned LaneNo = N->getConstantOperandVal(Num: NumVecs + 1);
2594
2595 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(Val: LaneNo, DL: dl, VT: MVT::i64),
2596 N->getOperand(Num: NumVecs + 2), // Base Register
2597 N->getOperand(Num: NumVecs + 3), // Incremental
2598 N->getOperand(Num: 0)};
2599 SDNode *St = CurDAG->getMachineNode(Opcode: Opc, dl, ResultTys: ResTys, Ops);
2600
2601 // Transfer memoperands.
2602 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(Val: N)->getMemOperand();
2603 CurDAG->setNodeMemRefs(N: cast<MachineSDNode>(Val: St), NewMemRefs: {MemOp});
2604
2605 ReplaceNode(F: N, T: St);
2606}
2607
2608static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N,
2609 unsigned &Opc, SDValue &Opd0,
2610 unsigned &LSB, unsigned &MSB,
2611 unsigned NumberOfIgnoredLowBits,
2612 bool BiggerPattern) {
2613 assert(N->getOpcode() == ISD::AND &&
2614 "N must be a AND operation to call this function");
2615
2616 EVT VT = N->getValueType(ResNo: 0);
2617
2618 // Here we can test the type of VT and return false when the type does not
2619 // match, but since it is done prior to that call in the current context
2620 // we turned that into an assert to avoid redundant code.
2621 assert((VT == MVT::i32 || VT == MVT::i64) &&
2622 "Type checking must have been done before calling this function");
2623
2624 // FIXME: simplify-demanded-bits in DAGCombine will probably have
2625 // changed the AND node to a 32-bit mask operation. We'll have to
2626 // undo that as part of the transform here if we want to catch all
2627 // the opportunities.
2628 // Currently the NumberOfIgnoredLowBits argument helps to recover
2629 // from these situations when matching bigger pattern (bitfield insert).
2630
2631 // For unsigned extracts, check for a shift right and mask
2632 uint64_t AndImm = 0;
2633 if (!isOpcWithIntImmediate(N, Opc: ISD::AND, Imm&: AndImm))
2634 return false;
2635
2636 const SDNode *Op0 = N->getOperand(Num: 0).getNode();
2637
2638 // Because of simplify-demanded-bits in DAGCombine, the mask may have been
2639 // simplified. Try to undo that
2640 AndImm |= maskTrailingOnes<uint64_t>(N: NumberOfIgnoredLowBits);
2641
2642 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
2643 if (AndImm & (AndImm + 1))
2644 return false;
2645
2646 bool ClampMSB = false;
2647 uint64_t SrlImm = 0;
2648 // Handle the SRL + ANY_EXTEND case.
2649 if (VT == MVT::i64 && Op0->getOpcode() == ISD::ANY_EXTEND &&
2650 isOpcWithIntImmediate(N: Op0->getOperand(Num: 0).getNode(), Opc: ISD::SRL, Imm&: SrlImm)) {
2651 // Extend the incoming operand of the SRL to 64-bit.
2652 Opd0 = Widen(CurDAG, N: Op0->getOperand(Num: 0).getOperand(i: 0));
2653 // Make sure to clamp the MSB so that we preserve the semantics of the
2654 // original operations.
2655 ClampMSB = true;
2656 } else if (VT == MVT::i32 && Op0->getOpcode() == ISD::TRUNCATE &&
2657 isOpcWithIntImmediate(N: Op0->getOperand(Num: 0).getNode(), Opc: ISD::SRL,
2658 Imm&: SrlImm)) {
2659 // If the shift result was truncated, we can still combine them.
2660 Opd0 = Op0->getOperand(Num: 0).getOperand(i: 0);
2661
2662 // Use the type of SRL node.
2663 VT = Opd0->getValueType(ResNo: 0);
2664 } else if (isOpcWithIntImmediate(N: Op0, Opc: ISD::SRL, Imm&: SrlImm)) {
2665 Opd0 = Op0->getOperand(Num: 0);
2666 ClampMSB = (VT == MVT::i32);
2667 } else if (BiggerPattern) {
2668 // Let's pretend a 0 shift right has been performed.
2669 // The resulting code will be at least as good as the original one
2670 // plus it may expose more opportunities for bitfield insert pattern.
2671 // FIXME: Currently we limit this to the bigger pattern, because
2672 // some optimizations expect AND and not UBFM.
2673 Opd0 = N->getOperand(Num: 0);
2674 } else
2675 return false;
2676
2677 // Bail out on large immediates. This happens when no proper
2678 // combining/constant folding was performed.
2679 if (!BiggerPattern && (SrlImm <= 0 || SrlImm >= VT.getSizeInBits())) {
2680 LLVM_DEBUG(
2681 (dbgs() << N
2682 << ": Found large shift immediate, this should not happen\n"));
2683 return false;
2684 }
2685
2686 LSB = SrlImm;
2687 MSB = SrlImm +
2688 (VT == MVT::i32 ? llvm::countr_one<uint32_t>(Value: AndImm)
2689 : llvm::countr_one<uint64_t>(Value: AndImm)) -
2690 1;
2691 if (ClampMSB)
2692 // Since we're moving the extend before the right shift operation, we need
2693 // to clamp the MSB to make sure we don't shift in undefined bits instead of
2694 // the zeros which would get shifted in with the original right shift
2695 // operation.
2696 MSB = MSB > 31 ? 31 : MSB;
2697
2698 Opc = VT == MVT::i32 ? AArch64::UBFMWri : AArch64::UBFMXri;
2699 return true;
2700}
2701
2702static bool isBitfieldExtractOpFromSExtInReg(SDNode *N, unsigned &Opc,
2703 SDValue &Opd0, unsigned &Immr,
2704 unsigned &Imms) {
2705 assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
2706
2707 EVT VT = N->getValueType(ResNo: 0);
2708 unsigned BitWidth = VT.getSizeInBits();
2709 assert((VT == MVT::i32 || VT == MVT::i64) &&
2710 "Type checking must have been done before calling this function");
2711
2712 SDValue Op = N->getOperand(Num: 0);
2713 if (Op->getOpcode() == ISD::TRUNCATE) {
2714 Op = Op->getOperand(Num: 0);
2715 VT = Op->getValueType(ResNo: 0);
2716 BitWidth = VT.getSizeInBits();
2717 }
2718
2719 uint64_t ShiftImm;
2720 if (!isOpcWithIntImmediate(N: Op.getNode(), Opc: ISD::SRL, Imm&: ShiftImm) &&
2721 !isOpcWithIntImmediate(N: Op.getNode(), Opc: ISD::SRA, Imm&: ShiftImm))
2722 return false;
2723
2724 unsigned Width = cast<VTSDNode>(Val: N->getOperand(Num: 1))->getVT().getSizeInBits();
2725 if (ShiftImm + Width > BitWidth)
2726 return false;
2727
2728 Opc = (VT == MVT::i32) ? AArch64::SBFMWri : AArch64::SBFMXri;
2729 Opd0 = Op.getOperand(i: 0);
2730 Immr = ShiftImm;
2731 Imms = ShiftImm + Width - 1;
2732 return true;
2733}
2734
2735static bool isSeveralBitsExtractOpFromShr(SDNode *N, unsigned &Opc,
2736 SDValue &Opd0, unsigned &LSB,
2737 unsigned &MSB) {
2738 // We are looking for the following pattern which basically extracts several
2739 // continuous bits from the source value and places it from the LSB of the
2740 // destination value, all other bits of the destination value or set to zero:
2741 //
2742 // Value2 = AND Value, MaskImm
2743 // SRL Value2, ShiftImm
2744 //
2745 // with MaskImm >> ShiftImm to search for the bit width.
2746 //
2747 // This gets selected into a single UBFM:
2748 //
2749 // UBFM Value, ShiftImm, Log2_64(MaskImm)
2750 //
2751
2752 if (N->getOpcode() != ISD::SRL)
2753 return false;
2754
2755 uint64_t AndMask = 0;
2756 if (!isOpcWithIntImmediate(N: N->getOperand(Num: 0).getNode(), Opc: ISD::AND, Imm&: AndMask))
2757 return false;
2758
2759 Opd0 = N->getOperand(Num: 0).getOperand(i: 0);
2760
2761 uint64_t SrlImm = 0;
2762 if (!isIntImmediate(N: N->getOperand(Num: 1), Imm&: SrlImm))
2763 return false;
2764
2765 // Check whether we really have several bits extract here.
2766 if (!isMask_64(Value: AndMask >> SrlImm))
2767 return false;
2768
2769 Opc = N->getValueType(ResNo: 0) == MVT::i32 ? AArch64::UBFMWri : AArch64::UBFMXri;
2770 LSB = SrlImm;
2771 MSB = llvm::Log2_64(Value: AndMask);
2772 return true;
2773}
2774
2775static bool isBitfieldExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
2776 unsigned &Immr, unsigned &Imms,
2777 bool BiggerPattern) {
2778 assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) &&
2779 "N must be a SHR/SRA operation to call this function");
2780
2781 EVT VT = N->getValueType(ResNo: 0);
2782
2783 // Here we can test the type of VT and return false when the type does not
2784 // match, but since it is done prior to that call in the current context
2785 // we turned that into an assert to avoid redundant code.
2786 assert((VT == MVT::i32 || VT == MVT::i64) &&
2787 "Type checking must have been done before calling this function");
2788
2789 // Check for AND + SRL doing several bits extract.
2790 if (isSeveralBitsExtractOpFromShr(N, Opc, Opd0, LSB&: Immr, MSB&: Imms))
2791 return true;
2792
2793 // We're looking for a shift of a shift.
2794 uint64_t ShlImm = 0;
2795 uint64_t TruncBits = 0;
2796 if (isOpcWithIntImmediate(N: N->getOperand(Num: 0).getNode(), Opc: ISD::SHL, Imm&: ShlImm)) {
2797 Opd0 = N->getOperand(Num: 0).getOperand(i: 0);
2798 } else if (VT == MVT::i32 && N->getOpcode() == ISD::SRL &&
2799 N->getOperand(Num: 0).getNode()->getOpcode() == ISD::TRUNCATE) {
2800 // We are looking for a shift of truncate. Truncate from i64 to i32 could
2801 // be considered as setting high 32 bits as zero. Our strategy here is to
2802 // always generate 64bit UBFM. This consistency will help the CSE pass
2803 // later find more redundancy.
2804 Opd0 = N->getOperand(Num: 0).getOperand(i: 0);
2805 TruncBits = Opd0->getValueType(ResNo: 0).getSizeInBits() - VT.getSizeInBits();
2806 VT = Opd0.getValueType();
2807 assert(VT == MVT::i64 && "the promoted type should be i64");
2808 } else if (BiggerPattern) {
2809 // Let's pretend a 0 shift left has been performed.
2810 // FIXME: Currently we limit this to the bigger pattern case,
2811 // because some optimizations expect AND and not UBFM
2812 Opd0 = N->getOperand(Num: 0);
2813 } else
2814 return false;
2815
2816 // Missing combines/constant folding may have left us with strange
2817 // constants.
2818 if (ShlImm >= VT.getSizeInBits()) {
2819 LLVM_DEBUG(
2820 (dbgs() << N
2821 << ": Found large shift immediate, this should not happen\n"));
2822 return false;
2823 }
2824
2825 uint64_t SrlImm = 0;
2826 if (!isIntImmediate(N: N->getOperand(Num: 1), Imm&: SrlImm))
2827 return false;
2828
2829 assert(SrlImm > 0 && SrlImm < VT.getSizeInBits() &&
2830 "bad amount in shift node!");
2831 int immr = SrlImm - ShlImm;
2832 Immr = immr < 0 ? immr + VT.getSizeInBits() : immr;
2833 Imms = VT.getSizeInBits() - ShlImm - TruncBits - 1;
2834 // SRA requires a signed extraction
2835 if (VT == MVT::i32)
2836 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMWri : AArch64::UBFMWri;
2837 else
2838 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMXri : AArch64::UBFMXri;
2839 return true;
2840}
2841
2842bool AArch64DAGToDAGISel::tryBitfieldExtractOpFromSExt(SDNode *N) {
2843 assert(N->getOpcode() == ISD::SIGN_EXTEND);
2844
2845 EVT VT = N->getValueType(ResNo: 0);
2846 EVT NarrowVT = N->getOperand(Num: 0)->getValueType(ResNo: 0);
2847 if (VT != MVT::i64 || NarrowVT != MVT::i32)
2848 return false;
2849
2850 uint64_t ShiftImm;
2851 SDValue Op = N->getOperand(Num: 0);
2852 if (!isOpcWithIntImmediate(N: Op.getNode(), Opc: ISD::SRA, Imm&: ShiftImm))
2853 return false;
2854
2855 SDLoc dl(N);
2856 // Extend the incoming operand of the shift to 64-bits.
2857 SDValue Opd0 = Widen(CurDAG, N: Op.getOperand(i: 0));
2858 unsigned Immr = ShiftImm;
2859 unsigned Imms = NarrowVT.getSizeInBits() - 1;
2860 SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(Val: Immr, DL: dl, VT),
2861 CurDAG->getTargetConstant(Val: Imms, DL: dl, VT)};
2862 CurDAG->SelectNodeTo(N, MachineOpc: AArch64::SBFMXri, VT, Ops);
2863 return true;
2864}
2865
2866static bool isBitfieldExtractOp(SelectionDAG *CurDAG, SDNode *N, unsigned &Opc,
2867 SDValue &Opd0, unsigned &Immr, unsigned &Imms,
2868 unsigned NumberOfIgnoredLowBits = 0,
2869 bool BiggerPattern = false) {
2870 if (N->getValueType(ResNo: 0) != MVT::i32 && N->getValueType(ResNo: 0) != MVT::i64)
2871 return false;
2872
2873 switch (N->getOpcode()) {
2874 default:
2875 if (!N->isMachineOpcode())
2876 return false;
2877 break;
2878 case ISD::AND:
2879 return isBitfieldExtractOpFromAnd(CurDAG, N, Opc, Opd0, LSB&: Immr, MSB&: Imms,
2880 NumberOfIgnoredLowBits, BiggerPattern);
2881 case ISD::SRL:
2882 case ISD::SRA:
2883 return isBitfieldExtractOpFromShr(N, Opc, Opd0, Immr, Imms, BiggerPattern);
2884
2885 case ISD::SIGN_EXTEND_INREG:
2886 return isBitfieldExtractOpFromSExtInReg(N, Opc, Opd0, Immr, Imms);
2887 }
2888
2889 unsigned NOpc = N->getMachineOpcode();
2890 switch (NOpc) {
2891 default:
2892 return false;
2893 case AArch64::SBFMWri:
2894 case AArch64::UBFMWri:
2895 case AArch64::SBFMXri:
2896 case AArch64::UBFMXri:
2897 Opc = NOpc;
2898 Opd0 = N->getOperand(Num: 0);
2899 Immr = N->getConstantOperandVal(Num: 1);
2900 Imms = N->getConstantOperandVal(Num: 2);
2901 return true;
2902 }
2903 // Unreachable
2904 return false;
2905}
2906
2907bool AArch64DAGToDAGISel::tryBitfieldExtractOp(SDNode *N) {
2908 unsigned Opc, Immr, Imms;
2909 SDValue Opd0;
2910 if (!isBitfieldExtractOp(CurDAG, N, Opc, Opd0, Immr, Imms))
2911 return false;
2912
2913 EVT VT = N->getValueType(ResNo: 0);
2914 SDLoc dl(N);
2915
2916 // If the bit extract operation is 64bit but the original type is 32bit, we
2917 // need to add one EXTRACT_SUBREG.
2918 if ((Opc == AArch64::SBFMXri || Opc == AArch64::UBFMXri) && VT == MVT::i32) {
2919 SDValue Ops64[] = {Opd0, CurDAG->getTargetConstant(Val: Immr, DL: dl, VT: MVT::i64),
2920 CurDAG->getTargetConstant(Val: Imms, DL: dl, VT: MVT::i64)};
2921
2922 SDNode *BFM = CurDAG->getMachineNode(Opcode: Opc, dl, VT: MVT::i64, Ops: Ops64);
2923 SDValue Inner = CurDAG->getTargetExtractSubreg(SRIdx: AArch64::sub_32, DL: dl,
2924 VT: MVT::i32, Operand: SDValue(BFM, 0));
2925 ReplaceNode(F: N, T: Inner.getNode());
2926 return true;
2927 }
2928
2929 SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(Val: Immr, DL: dl, VT),
2930 CurDAG->getTargetConstant(Val: Imms, DL: dl, VT)};
2931 CurDAG->SelectNodeTo(N, MachineOpc: Opc, VT, Ops);
2932 return true;
2933}
2934
2935/// Does DstMask form a complementary pair with the mask provided by
2936/// BitsToBeInserted, suitable for use in a BFI instruction. Roughly speaking,
2937/// this asks whether DstMask zeroes precisely those bits that will be set by
2938/// the other half.
2939static bool isBitfieldDstMask(uint64_t DstMask, const APInt &BitsToBeInserted,
2940 unsigned NumberOfIgnoredHighBits, EVT VT) {
2941 assert((VT == MVT::i32 || VT == MVT::i64) &&
2942 "i32 or i64 mask type expected!");
2943 unsigned BitWidth = VT.getSizeInBits() - NumberOfIgnoredHighBits;
2944
2945 // Enable implicitTrunc as we're intentionally ignoring high bits.
2946 APInt SignificantDstMask =
2947 APInt(BitWidth, DstMask, /*isSigned=*/false, /*implicitTrunc=*/true);
2948 APInt SignificantBitsToBeInserted = BitsToBeInserted.zextOrTrunc(width: BitWidth);
2949
2950 return (SignificantDstMask & SignificantBitsToBeInserted) == 0 &&
2951 (SignificantDstMask | SignificantBitsToBeInserted).isAllOnes();
2952}
2953
2954// Look for bits that will be useful for later uses.
2955// A bit is consider useless as soon as it is dropped and never used
2956// before it as been dropped.
2957// E.g., looking for useful bit of x
2958// 1. y = x & 0x7
2959// 2. z = y >> 2
2960// After #1, x useful bits are 0x7, then the useful bits of x, live through
2961// y.
2962// After #2, the useful bits of x are 0x4.
2963// However, if x is used on an unpredictable instruction, then all its bits
2964// are useful.
2965// E.g.
2966// 1. y = x & 0x7
2967// 2. z = y >> 2
2968// 3. str x, [@x]
2969static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth = 0);
2970
2971static void getUsefulBitsFromAndWithImmediate(SDValue Op, APInt &UsefulBits,
2972 unsigned Depth) {
2973 uint64_t Imm =
2974 cast<const ConstantSDNode>(Val: Op.getOperand(i: 1).getNode())->getZExtValue();
2975 Imm = AArch64_AM::decodeLogicalImmediate(val: Imm, regSize: UsefulBits.getBitWidth());
2976 UsefulBits &= APInt(UsefulBits.getBitWidth(), Imm);
2977 getUsefulBits(Op, UsefulBits, Depth: Depth + 1);
2978}
2979
2980static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op, APInt &UsefulBits,
2981 uint64_t Imm, uint64_t MSB,
2982 unsigned Depth) {
2983 // inherit the bitwidth value
2984 APInt OpUsefulBits(UsefulBits);
2985 OpUsefulBits = 1;
2986
2987 if (MSB >= Imm) {
2988 OpUsefulBits <<= MSB - Imm + 1;
2989 --OpUsefulBits;
2990 // The interesting part will be in the lower part of the result
2991 getUsefulBits(Op, UsefulBits&: OpUsefulBits, Depth: Depth + 1);
2992 // The interesting part was starting at Imm in the argument
2993 OpUsefulBits <<= Imm;
2994 } else {
2995 OpUsefulBits <<= MSB + 1;
2996 --OpUsefulBits;
2997 // The interesting part will be shifted in the result
2998 OpUsefulBits <<= OpUsefulBits.getBitWidth() - Imm;
2999 getUsefulBits(Op, UsefulBits&: OpUsefulBits, Depth: Depth + 1);
3000 // The interesting part was at zero in the argument
3001 OpUsefulBits.lshrInPlace(ShiftAmt: OpUsefulBits.getBitWidth() - Imm);
3002 }
3003
3004 UsefulBits &= OpUsefulBits;
3005}
3006
3007static void getUsefulBitsFromUBFM(SDValue Op, APInt &UsefulBits,
3008 unsigned Depth) {
3009 uint64_t Imm =
3010 cast<const ConstantSDNode>(Val: Op.getOperand(i: 1).getNode())->getZExtValue();
3011 uint64_t MSB =
3012 cast<const ConstantSDNode>(Val: Op.getOperand(i: 2).getNode())->getZExtValue();
3013
3014 getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
3015}
3016
3017static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits,
3018 unsigned Depth) {
3019 uint64_t ShiftTypeAndValue =
3020 cast<const ConstantSDNode>(Val: Op.getOperand(i: 2).getNode())->getZExtValue();
3021 APInt Mask(UsefulBits);
3022 Mask.clearAllBits();
3023 Mask.flipAllBits();
3024
3025 if (AArch64_AM::getShiftType(Imm: ShiftTypeAndValue) == AArch64_AM::LSL) {
3026 // Shift Left
3027 uint64_t ShiftAmt = AArch64_AM::getShiftValue(Imm: ShiftTypeAndValue);
3028 Mask <<= ShiftAmt;
3029 getUsefulBits(Op, UsefulBits&: Mask, Depth: Depth + 1);
3030 Mask.lshrInPlace(ShiftAmt);
3031 } else if (AArch64_AM::getShiftType(Imm: ShiftTypeAndValue) == AArch64_AM::LSR) {
3032 // Shift Right
3033 // We do not handle AArch64_AM::ASR, because the sign will change the
3034 // number of useful bits
3035 uint64_t ShiftAmt = AArch64_AM::getShiftValue(Imm: ShiftTypeAndValue);
3036 Mask.lshrInPlace(ShiftAmt);
3037 getUsefulBits(Op, UsefulBits&: Mask, Depth: Depth + 1);
3038 Mask <<= ShiftAmt;
3039 } else
3040 return;
3041
3042 UsefulBits &= Mask;
3043}
3044
3045static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits,
3046 unsigned Depth) {
3047 uint64_t Imm =
3048 cast<const ConstantSDNode>(Val: Op.getOperand(i: 2).getNode())->getZExtValue();
3049 uint64_t MSB =
3050 cast<const ConstantSDNode>(Val: Op.getOperand(i: 3).getNode())->getZExtValue();
3051
3052 APInt OpUsefulBits(UsefulBits);
3053 OpUsefulBits = 1;
3054
3055 APInt ResultUsefulBits(UsefulBits.getBitWidth(), 0);
3056 ResultUsefulBits.flipAllBits();
3057 APInt Mask(UsefulBits.getBitWidth(), 0);
3058
3059 getUsefulBits(Op, UsefulBits&: ResultUsefulBits, Depth: Depth + 1);
3060
3061 if (MSB >= Imm) {
3062 // The instruction is a BFXIL.
3063 uint64_t Width = MSB - Imm + 1;
3064 uint64_t LSB = Imm;
3065
3066 OpUsefulBits <<= Width;
3067 --OpUsefulBits;
3068
3069 if (Op.getOperand(i: 1) == Orig) {
3070 // Copy the low bits from the result to bits starting from LSB.
3071 Mask = ResultUsefulBits & OpUsefulBits;
3072 Mask <<= LSB;
3073 }
3074
3075 if (Op.getOperand(i: 0) == Orig)
3076 // Bits starting from LSB in the input contribute to the result.
3077 Mask |= (ResultUsefulBits & ~OpUsefulBits);
3078 } else {
3079 // The instruction is a BFI.
3080 uint64_t Width = MSB + 1;
3081 uint64_t LSB = UsefulBits.getBitWidth() - Imm;
3082
3083 OpUsefulBits <<= Width;
3084 --OpUsefulBits;
3085 OpUsefulBits <<= LSB;
3086
3087 if (Op.getOperand(i: 1) == Orig) {
3088 // Copy the bits from the result to the zero bits.
3089 Mask = ResultUsefulBits & OpUsefulBits;
3090 Mask.lshrInPlace(ShiftAmt: LSB);
3091 }
3092
3093 if (Op.getOperand(i: 0) == Orig)
3094 Mask |= (ResultUsefulBits & ~OpUsefulBits);
3095 }
3096
3097 UsefulBits &= Mask;
3098}
3099
3100static void getUsefulBitsForUse(SDNode *UserNode, APInt &UsefulBits,
3101 SDValue Orig, unsigned Depth) {
3102
3103 // Users of this node should have already been instruction selected
3104 // FIXME: Can we turn that into an assert?
3105 if (!UserNode->isMachineOpcode())
3106 return;
3107
3108 switch (UserNode->getMachineOpcode()) {
3109 default:
3110 return;
3111 case AArch64::ANDSWri:
3112 case AArch64::ANDSXri:
3113 case AArch64::ANDWri:
3114 case AArch64::ANDXri:
3115 // We increment Depth only when we call the getUsefulBits
3116 return getUsefulBitsFromAndWithImmediate(Op: SDValue(UserNode, 0), UsefulBits,
3117 Depth);
3118 case AArch64::UBFMWri:
3119 case AArch64::UBFMXri:
3120 return getUsefulBitsFromUBFM(Op: SDValue(UserNode, 0), UsefulBits, Depth);
3121
3122 case AArch64::ORRWrs:
3123 case AArch64::ORRXrs:
3124 if (UserNode->getOperand(Num: 0) != Orig && UserNode->getOperand(Num: 1) == Orig)
3125 getUsefulBitsFromOrWithShiftedReg(Op: SDValue(UserNode, 0), UsefulBits,
3126 Depth);
3127 return;
3128 case AArch64::BFMWri:
3129 case AArch64::BFMXri:
3130 return getUsefulBitsFromBFM(Op: SDValue(UserNode, 0), Orig, UsefulBits, Depth);
3131
3132 case AArch64::STRBBui:
3133 case AArch64::STURBBi:
3134 if (UserNode->getOperand(Num: 0) != Orig)
3135 return;
3136 UsefulBits &= APInt(UsefulBits.getBitWidth(), 0xff);
3137 return;
3138
3139 case AArch64::STRHHui:
3140 case AArch64::STURHHi:
3141 if (UserNode->getOperand(Num: 0) != Orig)
3142 return;
3143 UsefulBits &= APInt(UsefulBits.getBitWidth(), 0xffff);
3144 return;
3145 }
3146}
3147
3148static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth) {
3149 if (Depth >= SelectionDAG::MaxRecursionDepth)
3150 return;
3151 // Initialize UsefulBits
3152 if (!Depth) {
3153 unsigned Bitwidth = Op.getScalarValueSizeInBits();
3154 // At the beginning, assume every produced bits is useful
3155 UsefulBits = APInt(Bitwidth, 0);
3156 UsefulBits.flipAllBits();
3157 }
3158 APInt UsersUsefulBits(UsefulBits.getBitWidth(), 0);
3159
3160 for (SDNode *Node : Op.getNode()->users()) {
3161 // A use cannot produce useful bits
3162 APInt UsefulBitsForUse = APInt(UsefulBits);
3163 getUsefulBitsForUse(UserNode: Node, UsefulBits&: UsefulBitsForUse, Orig: Op, Depth);
3164 UsersUsefulBits |= UsefulBitsForUse;
3165 }
3166 // UsefulBits contains the produced bits that are meaningful for the
3167 // current definition, thus a user cannot make a bit meaningful at
3168 // this point
3169 UsefulBits &= UsersUsefulBits;
3170}
3171
3172/// Create a machine node performing a notional SHL of Op by ShlAmount. If
3173/// ShlAmount is negative, do a (logical) right-shift instead. If ShlAmount is
3174/// 0, return Op unchanged.
3175static SDValue getLeftShift(SelectionDAG *CurDAG, SDValue Op, int ShlAmount) {
3176 if (ShlAmount == 0)
3177 return Op;
3178
3179 EVT VT = Op.getValueType();
3180 SDLoc dl(Op);
3181 unsigned BitWidth = VT.getSizeInBits();
3182 unsigned UBFMOpc = BitWidth == 32 ? AArch64::UBFMWri : AArch64::UBFMXri;
3183
3184 SDNode *ShiftNode;
3185 if (ShlAmount > 0) {
3186 // LSL wD, wN, #Amt == UBFM wD, wN, #32-Amt, #31-Amt
3187 ShiftNode = CurDAG->getMachineNode(
3188 Opcode: UBFMOpc, dl, VT, Op1: Op,
3189 Op2: CurDAG->getTargetConstant(Val: BitWidth - ShlAmount, DL: dl, VT),
3190 Op3: CurDAG->getTargetConstant(Val: BitWidth - 1 - ShlAmount, DL: dl, VT));
3191 } else {
3192 // LSR wD, wN, #Amt == UBFM wD, wN, #Amt, #32-1
3193 assert(ShlAmount < 0 && "expected right shift");
3194 int ShrAmount = -ShlAmount;
3195 ShiftNode = CurDAG->getMachineNode(
3196 Opcode: UBFMOpc, dl, VT, Op1: Op, Op2: CurDAG->getTargetConstant(Val: ShrAmount, DL: dl, VT),
3197 Op3: CurDAG->getTargetConstant(Val: BitWidth - 1, DL: dl, VT));
3198 }
3199
3200 return SDValue(ShiftNode, 0);
3201}
3202
3203// For bit-field-positioning pattern "(and (shl VAL, N), ShiftedMask)".
3204static bool isBitfieldPositioningOpFromAnd(SelectionDAG *CurDAG, SDValue Op,
3205 bool BiggerPattern,
3206 const uint64_t NonZeroBits,
3207 SDValue &Src, int &DstLSB,
3208 int &Width);
3209
3210// For bit-field-positioning pattern "shl VAL, N)".
3211static bool isBitfieldPositioningOpFromShl(SelectionDAG *CurDAG, SDValue Op,
3212 bool BiggerPattern,
3213 const uint64_t NonZeroBits,
3214 SDValue &Src, int &DstLSB,
3215 int &Width);
3216
3217/// Does this tree qualify as an attempt to move a bitfield into position,
3218/// essentially "(and (shl VAL, N), Mask)" or (shl VAL, N).
3219static bool isBitfieldPositioningOp(SelectionDAG *CurDAG, SDValue Op,
3220 bool BiggerPattern, SDValue &Src,
3221 int &DstLSB, int &Width) {
3222 EVT VT = Op.getValueType();
3223 unsigned BitWidth = VT.getSizeInBits();
3224 (void)BitWidth;
3225 assert(BitWidth == 32 || BitWidth == 64);
3226
3227 KnownBits Known = CurDAG->computeKnownBits(Op);
3228
3229 // Non-zero in the sense that they're not provably zero, which is the key
3230 // point if we want to use this value
3231 const uint64_t NonZeroBits = (~Known.Zero).getZExtValue();
3232 if (!isShiftedMask_64(Value: NonZeroBits))
3233 return false;
3234
3235 switch (Op.getOpcode()) {
3236 default:
3237 break;
3238 case ISD::AND:
3239 return isBitfieldPositioningOpFromAnd(CurDAG, Op, BiggerPattern,
3240 NonZeroBits, Src, DstLSB, Width);
3241 case ISD::SHL:
3242 return isBitfieldPositioningOpFromShl(CurDAG, Op, BiggerPattern,
3243 NonZeroBits, Src, DstLSB, Width);
3244 }
3245
3246 return false;
3247}
3248
3249static bool isBitfieldPositioningOpFromAnd(SelectionDAG *CurDAG, SDValue Op,
3250 bool BiggerPattern,
3251 const uint64_t NonZeroBits,
3252 SDValue &Src, int &DstLSB,
3253 int &Width) {
3254 assert(isShiftedMask_64(NonZeroBits) && "Caller guaranteed");
3255
3256 EVT VT = Op.getValueType();
3257 assert((VT == MVT::i32 || VT == MVT::i64) &&
3258 "Caller guarantees VT is one of i32 or i64");
3259 (void)VT;
3260
3261 uint64_t AndImm;
3262 if (!isOpcWithIntImmediate(N: Op.getNode(), Opc: ISD::AND, Imm&: AndImm))
3263 return false;
3264
3265 // If (~AndImm & NonZeroBits) is not zero at POS, we know that
3266 // 1) (AndImm & (1 << POS) == 0)
3267 // 2) the result of AND is not zero at POS bit (according to NonZeroBits)
3268 //
3269 // 1) and 2) don't agree so something must be wrong (e.g., in
3270 // 'SelectionDAG::computeKnownBits')
3271 assert((~AndImm & NonZeroBits) == 0 &&
3272 "Something must be wrong (e.g., in SelectionDAG::computeKnownBits)");
3273
3274 SDValue AndOp0 = Op.getOperand(i: 0);
3275
3276 uint64_t ShlImm;
3277 SDValue ShlOp0;
3278 if (isOpcWithIntImmediate(N: AndOp0.getNode(), Opc: ISD::SHL, Imm&: ShlImm)) {
3279 // For pattern "and(shl(val, N), shifted-mask)", 'ShlOp0' is set to 'val'.
3280 ShlOp0 = AndOp0.getOperand(i: 0);
3281 } else if (VT == MVT::i64 && AndOp0.getOpcode() == ISD::ANY_EXTEND &&
3282 isOpcWithIntImmediate(N: AndOp0.getOperand(i: 0).getNode(), Opc: ISD::SHL,
3283 Imm&: ShlImm)) {
3284 // For pattern "and(any_extend(shl(val, N)), shifted-mask)"
3285
3286 // ShlVal == shl(val, N), which is a left shift on a smaller type.
3287 SDValue ShlVal = AndOp0.getOperand(i: 0);
3288
3289 // Since this is after type legalization and ShlVal is extended to MVT::i64,
3290 // expect VT to be MVT::i32.
3291 assert((ShlVal.getValueType() == MVT::i32) && "Expect VT to be MVT::i32.");
3292
3293 // Widens 'val' to MVT::i64 as the source of bit field positioning.
3294 ShlOp0 = Widen(CurDAG, N: ShlVal.getOperand(i: 0));
3295 } else
3296 return false;
3297
3298 // For !BiggerPattern, bail out if the AndOp0 has more than one use, since
3299 // then we'll end up generating AndOp0+UBFIZ instead of just keeping
3300 // AndOp0+AND.
3301 if (!BiggerPattern && !AndOp0.hasOneUse())
3302 return false;
3303
3304 DstLSB = llvm::countr_zero(Val: NonZeroBits);
3305 Width = llvm::countr_one(Value: NonZeroBits >> DstLSB);
3306
3307 // Bail out on large Width. This happens when no proper combining / constant
3308 // folding was performed.
3309 if (Width >= (int)VT.getSizeInBits()) {
3310 // If VT is i64, Width > 64 is insensible since NonZeroBits is uint64_t, and
3311 // Width == 64 indicates a missed dag-combine from "(and val, AllOnes)" to
3312 // "val".
3313 // If VT is i32, what Width >= 32 means:
3314 // - For "(and (any_extend(shl val, N)), shifted-mask)", the`and` Op
3315 // demands at least 'Width' bits (after dag-combiner). This together with
3316 // `any_extend` Op (undefined higher bits) indicates missed combination
3317 // when lowering the 'and' IR instruction to an machine IR instruction.
3318 LLVM_DEBUG(
3319 dbgs()
3320 << "Found large Width in bit-field-positioning -- this indicates no "
3321 "proper combining / constant folding was performed\n");
3322 return false;
3323 }
3324
3325 // BFI encompasses sufficiently many nodes that it's worth inserting an extra
3326 // LSL/LSR if the mask in NonZeroBits doesn't quite match up with the ISD::SHL
3327 // amount. BiggerPattern is true when this pattern is being matched for BFI,
3328 // BiggerPattern is false when this pattern is being matched for UBFIZ, in
3329 // which case it is not profitable to insert an extra shift.
3330 if (ShlImm != uint64_t(DstLSB) && !BiggerPattern)
3331 return false;
3332
3333 Src = getLeftShift(CurDAG, Op: ShlOp0, ShlAmount: ShlImm - DstLSB);
3334 return true;
3335}
3336
3337// For node (shl (and val, mask), N)), returns true if the node is equivalent to
3338// UBFIZ.
3339static bool isSeveralBitsPositioningOpFromShl(const uint64_t ShlImm, SDValue Op,
3340 SDValue &Src, int &DstLSB,
3341 int &Width) {
3342 // Caller should have verified that N is a left shift with constant shift
3343 // amount; asserts that.
3344 assert(Op.getOpcode() == ISD::SHL &&
3345 "Op.getNode() should be a SHL node to call this function");
3346 assert(isIntImmediateEq(Op.getOperand(1), ShlImm) &&
3347 "Op.getNode() should shift ShlImm to call this function");
3348
3349 uint64_t AndImm = 0;
3350 SDValue Op0 = Op.getOperand(i: 0);
3351 if (!isOpcWithIntImmediate(N: Op0.getNode(), Opc: ISD::AND, Imm&: AndImm))
3352 return false;
3353
3354 const uint64_t ShiftedAndImm = ((AndImm << ShlImm) >> ShlImm);
3355 if (isMask_64(Value: ShiftedAndImm)) {
3356 // AndImm is a superset of (AllOnes >> ShlImm); in other words, AndImm
3357 // should end with Mask, and could be prefixed with random bits if those
3358 // bits are shifted out.
3359 //
3360 // For example, xyz11111 (with {x,y,z} being 0 or 1) is fine if ShlImm >= 3;
3361 // the AND result corresponding to those bits are shifted out, so it's fine
3362 // to not extract them.
3363 Width = llvm::countr_one(Value: ShiftedAndImm);
3364 DstLSB = ShlImm;
3365 Src = Op0.getOperand(i: 0);
3366 return true;
3367 }
3368 return false;
3369}
3370
3371static bool isBitfieldPositioningOpFromShl(SelectionDAG *CurDAG, SDValue Op,
3372 bool BiggerPattern,
3373 const uint64_t NonZeroBits,
3374 SDValue &Src, int &DstLSB,
3375 int &Width) {
3376 assert(isShiftedMask_64(NonZeroBits) && "Caller guaranteed");
3377
3378 EVT VT = Op.getValueType();
3379 assert((VT == MVT::i32 || VT == MVT::i64) &&
3380 "Caller guarantees that type is i32 or i64");
3381 (void)VT;
3382
3383 uint64_t ShlImm;
3384 if (!isOpcWithIntImmediate(N: Op.getNode(), Opc: ISD::SHL, Imm&: ShlImm))
3385 return false;
3386
3387 if (!BiggerPattern && !Op.hasOneUse())
3388 return false;
3389
3390 if (isSeveralBitsPositioningOpFromShl(ShlImm, Op, Src, DstLSB, Width))
3391 return true;
3392
3393 DstLSB = llvm::countr_zero(Val: NonZeroBits);
3394 Width = llvm::countr_one(Value: NonZeroBits >> DstLSB);
3395
3396 if (ShlImm != uint64_t(DstLSB) && !BiggerPattern)
3397 return false;
3398
3399 Src = getLeftShift(CurDAG, Op: Op.getOperand(i: 0), ShlAmount: ShlImm - DstLSB);
3400 return true;
3401}
3402
3403static bool isShiftedMask(uint64_t Mask, EVT VT) {
3404 assert(VT == MVT::i32 || VT == MVT::i64);
3405 if (VT == MVT::i32)
3406 return isShiftedMask_32(Value: Mask);
3407 return isShiftedMask_64(Value: Mask);
3408}
3409
3410// Generate a BFI/BFXIL from 'or (and X, MaskImm), OrImm' iff the value being
3411// inserted only sets known zero bits.
3412static bool tryBitfieldInsertOpFromOrAndImm(SDNode *N, SelectionDAG *CurDAG) {
3413 assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
3414
3415 EVT VT = N->getValueType(ResNo: 0);
3416 if (VT != MVT::i32 && VT != MVT::i64)
3417 return false;
3418
3419 unsigned BitWidth = VT.getSizeInBits();
3420
3421 uint64_t OrImm;
3422 if (!isOpcWithIntImmediate(N, Opc: ISD::OR, Imm&: OrImm))
3423 return false;
3424
3425 // Skip this transformation if the ORR immediate can be encoded in the ORR.
3426 // Otherwise, we'll trade an AND+ORR for ORR+BFI/BFXIL, which is most likely
3427 // performance neutral.
3428 if (AArch64_AM::isLogicalImmediate(imm: OrImm, regSize: BitWidth))
3429 return false;
3430
3431 uint64_t MaskImm;
3432 SDValue And = N->getOperand(Num: 0);
3433 // Must be a single use AND with an immediate operand.
3434 if (!And.hasOneUse() ||
3435 !isOpcWithIntImmediate(N: And.getNode(), Opc: ISD::AND, Imm&: MaskImm))
3436 return false;
3437
3438 // Compute the Known Zero for the AND as this allows us to catch more general
3439 // cases than just looking for AND with imm.
3440 KnownBits Known = CurDAG->computeKnownBits(Op: And);
3441
3442 // Non-zero in the sense that they're not provably zero, which is the key
3443 // point if we want to use this value.
3444 uint64_t NotKnownZero = (~Known.Zero).getZExtValue();
3445
3446 // The KnownZero mask must be a shifted mask (e.g., 1110..011, 11100..00).
3447 if (!isShiftedMask(Mask: Known.Zero.getZExtValue(), VT))
3448 return false;
3449
3450 // The bits being inserted must only set those bits that are known to be zero.
3451 if ((OrImm & NotKnownZero) != 0) {
3452 // FIXME: It's okay if the OrImm sets NotKnownZero bits to 1, but we don't
3453 // currently handle this case.
3454 return false;
3455 }
3456
3457 // BFI/BFXIL dst, src, #lsb, #width.
3458 int LSB = llvm::countr_one(Value: NotKnownZero);
3459 int Width = BitWidth - APInt(BitWidth, NotKnownZero).popcount();
3460
3461 // BFI/BFXIL is an alias of BFM, so translate to BFM operands.
3462 unsigned ImmR = (BitWidth - LSB) % BitWidth;
3463 unsigned ImmS = Width - 1;
3464
3465 // If we're creating a BFI instruction avoid cases where we need more
3466 // instructions to materialize the BFI constant as compared to the original
3467 // ORR. A BFXIL will use the same constant as the original ORR, so the code
3468 // should be no worse in this case.
3469 bool IsBFI = LSB != 0;
3470 uint64_t BFIImm = OrImm >> LSB;
3471 if (IsBFI && !AArch64_AM::isLogicalImmediate(imm: BFIImm, regSize: BitWidth)) {
3472 // We have a BFI instruction and we know the constant can't be materialized
3473 // with a ORR-immediate with the zero register.
3474 unsigned OrChunks = 0, BFIChunks = 0;
3475 for (unsigned Shift = 0; Shift < BitWidth; Shift += 16) {
3476 if (((OrImm >> Shift) & 0xFFFF) != 0)
3477 ++OrChunks;
3478 if (((BFIImm >> Shift) & 0xFFFF) != 0)
3479 ++BFIChunks;
3480 }
3481 if (BFIChunks > OrChunks)
3482 return false;
3483 }
3484
3485 // Materialize the constant to be inserted.
3486 SDLoc DL(N);
3487 unsigned MOVIOpc = VT == MVT::i32 ? AArch64::MOVi32imm : AArch64::MOVi64imm;
3488 SDNode *MOVI = CurDAG->getMachineNode(
3489 Opcode: MOVIOpc, dl: DL, VT, Op1: CurDAG->getTargetConstant(Val: BFIImm, DL, VT));
3490
3491 // Create the BFI/BFXIL instruction.
3492 SDValue Ops[] = {And.getOperand(i: 0), SDValue(MOVI, 0),
3493 CurDAG->getTargetConstant(Val: ImmR, DL, VT),
3494 CurDAG->getTargetConstant(Val: ImmS, DL, VT)};
3495 unsigned Opc = (VT == MVT::i32) ? AArch64::BFMWri : AArch64::BFMXri;
3496 CurDAG->SelectNodeTo(N, MachineOpc: Opc, VT, Ops);
3497 return true;
3498}
3499
3500static bool isWorthFoldingIntoOrrWithShift(SDValue Dst, SelectionDAG *CurDAG,
3501 SDValue &ShiftedOperand,
3502 uint64_t &EncodedShiftImm) {
3503 // Avoid folding Dst into ORR-with-shift if Dst has other uses than ORR.
3504 if (!Dst.hasOneUse())
3505 return false;
3506
3507 EVT VT = Dst.getValueType();
3508 assert((VT == MVT::i32 || VT == MVT::i64) &&
3509 "Caller should guarantee that VT is one of i32 or i64");
3510 const unsigned SizeInBits = VT.getSizeInBits();
3511
3512 SDLoc DL(Dst.getNode());
3513 uint64_t AndImm, ShlImm;
3514 if (isOpcWithIntImmediate(N: Dst.getNode(), Opc: ISD::AND, Imm&: AndImm) &&
3515 isShiftedMask_64(Value: AndImm)) {
3516 // Avoid transforming 'DstOp0' if it has other uses than the AND node.
3517 SDValue DstOp0 = Dst.getOperand(i: 0);
3518 if (!DstOp0.hasOneUse())
3519 return false;
3520
3521 // An example to illustrate the transformation
3522 // From:
3523 // lsr x8, x1, #1
3524 // and x8, x8, #0x3f80
3525 // bfxil x8, x1, #0, #7
3526 // To:
3527 // and x8, x23, #0x7f
3528 // ubfx x9, x23, #8, #7
3529 // orr x23, x8, x9, lsl #7
3530 //
3531 // The number of instructions remains the same, but ORR is faster than BFXIL
3532 // on many AArch64 processors (or as good as BFXIL if not faster). Besides,
3533 // the dependency chain is improved after the transformation.
3534 uint64_t SrlImm;
3535 if (isOpcWithIntImmediate(N: DstOp0.getNode(), Opc: ISD::SRL, Imm&: SrlImm)) {
3536 uint64_t NumTrailingZeroInShiftedMask = llvm::countr_zero(Val: AndImm);
3537 if ((SrlImm + NumTrailingZeroInShiftedMask) < SizeInBits) {
3538 unsigned MaskWidth =
3539 llvm::countr_one(Value: AndImm >> NumTrailingZeroInShiftedMask);
3540 unsigned UBFMOpc =
3541 (VT == MVT::i32) ? AArch64::UBFMWri : AArch64::UBFMXri;
3542 SDNode *UBFMNode = CurDAG->getMachineNode(
3543 Opcode: UBFMOpc, dl: DL, VT, Op1: DstOp0.getOperand(i: 0),
3544 Op2: CurDAG->getTargetConstant(Val: SrlImm + NumTrailingZeroInShiftedMask, DL,
3545 VT),
3546 Op3: CurDAG->getTargetConstant(
3547 Val: SrlImm + NumTrailingZeroInShiftedMask + MaskWidth - 1, DL, VT));
3548 ShiftedOperand = SDValue(UBFMNode, 0);
3549 EncodedShiftImm = AArch64_AM::getShifterImm(
3550 ST: AArch64_AM::LSL, Imm: NumTrailingZeroInShiftedMask);
3551 return true;
3552 }
3553 }
3554 return false;
3555 }
3556
3557 if (isOpcWithIntImmediate(N: Dst.getNode(), Opc: ISD::SHL, Imm&: ShlImm)) {
3558 ShiftedOperand = Dst.getOperand(i: 0);
3559 EncodedShiftImm = AArch64_AM::getShifterImm(ST: AArch64_AM::LSL, Imm: ShlImm);
3560 return true;
3561 }
3562
3563 uint64_t SrlImm;
3564 if (isOpcWithIntImmediate(N: Dst.getNode(), Opc: ISD::SRL, Imm&: SrlImm)) {
3565 ShiftedOperand = Dst.getOperand(i: 0);
3566 EncodedShiftImm = AArch64_AM::getShifterImm(ST: AArch64_AM::LSR, Imm: SrlImm);
3567 return true;
3568 }
3569 return false;
3570}
3571
3572// Given an 'ISD::OR' node that is going to be selected as BFM, analyze
3573// the operands and select it to AArch64::ORR with shifted registers if
3574// that's more efficient. Returns true iff selection to AArch64::ORR happens.
3575static bool tryOrrWithShift(SDNode *N, SDValue OrOpd0, SDValue OrOpd1,
3576 SDValue Src, SDValue Dst, SelectionDAG *CurDAG,
3577 const bool BiggerPattern) {
3578 EVT VT = N->getValueType(ResNo: 0);
3579 assert(N->getOpcode() == ISD::OR && "Expect N to be an OR node");
3580 assert(((N->getOperand(0) == OrOpd0 && N->getOperand(1) == OrOpd1) ||
3581 (N->getOperand(1) == OrOpd0 && N->getOperand(0) == OrOpd1)) &&
3582 "Expect OrOpd0 and OrOpd1 to be operands of ISD::OR");
3583 assert((VT == MVT::i32 || VT == MVT::i64) &&
3584 "Expect result type to be i32 or i64 since N is combinable to BFM");
3585 SDLoc DL(N);
3586
3587 // Bail out if BFM simplifies away one node in BFM Dst.
3588 if (OrOpd1 != Dst)
3589 return false;
3590
3591 const unsigned OrrOpc = (VT == MVT::i32) ? AArch64::ORRWrs : AArch64::ORRXrs;
3592 // For "BFM Rd, Rn, #immr, #imms", it's known that BFM simplifies away fewer
3593 // nodes from Rn (or inserts additional shift node) if BiggerPattern is true.
3594 if (BiggerPattern) {
3595 uint64_t SrcAndImm;
3596 if (isOpcWithIntImmediate(N: OrOpd0.getNode(), Opc: ISD::AND, Imm&: SrcAndImm) &&
3597 isMask_64(Value: SrcAndImm) && OrOpd0.getOperand(i: 0) == Src) {
3598 // OrOpd0 = AND Src, #Mask
3599 // So BFM simplifies away one AND node from Src and doesn't simplify away
3600 // nodes from Dst. If ORR with left-shifted operand also simplifies away
3601 // one node (from Rd), ORR is better since it has higher throughput and
3602 // smaller latency than BFM on many AArch64 processors (and for the rest
3603 // ORR is at least as good as BFM).
3604 SDValue ShiftedOperand;
3605 uint64_t EncodedShiftImm;
3606 if (isWorthFoldingIntoOrrWithShift(Dst, CurDAG, ShiftedOperand,
3607 EncodedShiftImm)) {
3608 SDValue Ops[] = {OrOpd0, ShiftedOperand,
3609 CurDAG->getTargetConstant(Val: EncodedShiftImm, DL, VT)};
3610 CurDAG->SelectNodeTo(N, MachineOpc: OrrOpc, VT, Ops);
3611 return true;
3612 }
3613 }
3614 return false;
3615 }
3616
3617 assert((!BiggerPattern) && "BiggerPattern should be handled above");
3618
3619 uint64_t ShlImm;
3620 if (isOpcWithIntImmediate(N: OrOpd0.getNode(), Opc: ISD::SHL, Imm&: ShlImm)) {
3621 if (OrOpd0.getOperand(i: 0) == Src && OrOpd0.hasOneUse()) {
3622 SDValue Ops[] = {
3623 Dst, Src,
3624 CurDAG->getTargetConstant(
3625 Val: AArch64_AM::getShifterImm(ST: AArch64_AM::LSL, Imm: ShlImm), DL, VT)};
3626 CurDAG->SelectNodeTo(N, MachineOpc: OrrOpc, VT, Ops);
3627 return true;
3628 }
3629
3630 // Select the following pattern to left-shifted operand rather than BFI.
3631 // %val1 = op ..
3632 // %val2 = shl %val1, #imm
3633 // %res = or %val1, %val2
3634 //
3635 // If N is selected to be BFI, we know that
3636 // 1) OrOpd0 would be the operand from which extract bits (i.e., folded into
3637 // BFI) 2) OrOpd1 would be the destination operand (i.e., preserved)
3638 //
3639 // Instead of selecting N to BFI, fold OrOpd0 as a left shift directly.
3640 if (OrOpd0.getOperand(i: 0) == OrOpd1) {
3641 SDValue Ops[] = {
3642 OrOpd1, OrOpd1,
3643 CurDAG->getTargetConstant(
3644 Val: AArch64_AM::getShifterImm(ST: AArch64_AM::LSL, Imm: ShlImm), DL, VT)};
3645 CurDAG->SelectNodeTo(N, MachineOpc: OrrOpc, VT, Ops);
3646 return true;
3647 }
3648 }
3649
3650 uint64_t SrlImm;
3651 if (isOpcWithIntImmediate(N: OrOpd0.getNode(), Opc: ISD::SRL, Imm&: SrlImm)) {
3652 // Select the following pattern to right-shifted operand rather than BFXIL.
3653 // %val1 = op ..
3654 // %val2 = lshr %val1, #imm
3655 // %res = or %val1, %val2
3656 //
3657 // If N is selected to be BFXIL, we know that
3658 // 1) OrOpd0 would be the operand from which extract bits (i.e., folded into
3659 // BFXIL) 2) OrOpd1 would be the destination operand (i.e., preserved)
3660 //
3661 // Instead of selecting N to BFXIL, fold OrOpd0 as a right shift directly.
3662 if (OrOpd0.getOperand(i: 0) == OrOpd1) {
3663 SDValue Ops[] = {
3664 OrOpd1, OrOpd1,
3665 CurDAG->getTargetConstant(
3666 Val: AArch64_AM::getShifterImm(ST: AArch64_AM::LSR, Imm: SrlImm), DL, VT)};
3667 CurDAG->SelectNodeTo(N, MachineOpc: OrrOpc, VT, Ops);
3668 return true;
3669 }
3670 }
3671
3672 return false;
3673}
3674
3675static bool tryBitfieldInsertOpFromOr(SDNode *N, const APInt &UsefulBits,
3676 SelectionDAG *CurDAG) {
3677 assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
3678
3679 EVT VT = N->getValueType(ResNo: 0);
3680 if (VT != MVT::i32 && VT != MVT::i64)
3681 return false;
3682
3683 unsigned BitWidth = VT.getSizeInBits();
3684
3685 // Because of simplify-demanded-bits in DAGCombine, involved masks may not
3686 // have the expected shape. Try to undo that.
3687
3688 unsigned NumberOfIgnoredLowBits = UsefulBits.countr_zero();
3689 unsigned NumberOfIgnoredHighBits = UsefulBits.countl_zero();
3690
3691 // Given a OR operation, check if we have the following pattern
3692 // ubfm c, b, imm, imm2 (or something that does the same jobs, see
3693 // isBitfieldExtractOp)
3694 // d = e & mask2 ; where mask is a binary sequence of 1..10..0 and
3695 // countTrailingZeros(mask2) == imm2 - imm + 1
3696 // f = d | c
3697 // if yes, replace the OR instruction with:
3698 // f = BFM Opd0, Opd1, LSB, MSB ; where LSB = imm, and MSB = imm2
3699
3700 // OR is commutative, check all combinations of operand order and values of
3701 // BiggerPattern, i.e.
3702 // Opd0, Opd1, BiggerPattern=false
3703 // Opd1, Opd0, BiggerPattern=false
3704 // Opd0, Opd1, BiggerPattern=true
3705 // Opd1, Opd0, BiggerPattern=true
3706 // Several of these combinations may match, so check with BiggerPattern=false
3707 // first since that will produce better results by matching more instructions
3708 // and/or inserting fewer extra instructions.
3709 for (int I = 0; I < 4; ++I) {
3710
3711 SDValue Dst, Src;
3712 unsigned ImmR, ImmS;
3713 bool BiggerPattern = I / 2;
3714 SDValue OrOpd0Val = N->getOperand(Num: I % 2);
3715 SDNode *OrOpd0 = OrOpd0Val.getNode();
3716 SDValue OrOpd1Val = N->getOperand(Num: (I + 1) % 2);
3717 SDNode *OrOpd1 = OrOpd1Val.getNode();
3718
3719 unsigned BFXOpc;
3720 int DstLSB, Width;
3721 if (isBitfieldExtractOp(CurDAG, N: OrOpd0, Opc&: BFXOpc, Opd0&: Src, Immr&: ImmR, Imms&: ImmS,
3722 NumberOfIgnoredLowBits, BiggerPattern)) {
3723 // Check that the returned opcode is compatible with the pattern,
3724 // i.e., same type and zero extended (U and not S)
3725 if ((BFXOpc != AArch64::UBFMXri && VT == MVT::i64) ||
3726 (BFXOpc != AArch64::UBFMWri && VT == MVT::i32))
3727 continue;
3728
3729 // Compute the width of the bitfield insertion
3730 DstLSB = 0;
3731 Width = ImmS - ImmR + 1;
3732 // FIXME: This constraint is to catch bitfield insertion we may
3733 // want to widen the pattern if we want to grab general bitfield
3734 // move case
3735 if (Width <= 0)
3736 continue;
3737
3738 // If the mask on the insertee is correct, we have a BFXIL operation. We
3739 // can share the ImmR and ImmS values from the already-computed UBFM.
3740 } else if (isBitfieldPositioningOp(CurDAG, Op: OrOpd0Val,
3741 BiggerPattern,
3742 Src, DstLSB, Width)) {
3743 ImmR = (BitWidth - DstLSB) % BitWidth;
3744 ImmS = Width - 1;
3745 } else
3746 continue;
3747
3748 // Check the second part of the pattern
3749 EVT VT = OrOpd1Val.getValueType();
3750 assert((VT == MVT::i32 || VT == MVT::i64) && "unexpected OR operand");
3751
3752 // Compute the Known Zero for the candidate of the first operand.
3753 // This allows to catch more general case than just looking for
3754 // AND with imm. Indeed, simplify-demanded-bits may have removed
3755 // the AND instruction because it proves it was useless.
3756 KnownBits Known = CurDAG->computeKnownBits(Op: OrOpd1Val);
3757
3758 // Check if there is enough room for the second operand to appear
3759 // in the first one
3760 APInt BitsToBeInserted =
3761 APInt::getBitsSet(numBits: Known.getBitWidth(), loBit: DstLSB, hiBit: DstLSB + Width);
3762
3763 if ((BitsToBeInserted & ~Known.Zero) != 0)
3764 continue;
3765
3766 // Set the first operand
3767 uint64_t Imm;
3768 if (isOpcWithIntImmediate(N: OrOpd1, Opc: ISD::AND, Imm) &&
3769 isBitfieldDstMask(DstMask: Imm, BitsToBeInserted, NumberOfIgnoredHighBits, VT))
3770 // In that case, we can eliminate the AND
3771 Dst = OrOpd1->getOperand(Num: 0);
3772 else
3773 // Maybe the AND has been removed by simplify-demanded-bits
3774 // or is useful because it discards more bits
3775 Dst = OrOpd1Val;
3776
3777 // Before selecting ISD::OR node to AArch64::BFM, see if an AArch64::ORR
3778 // with shifted operand is more efficient.
3779 if (tryOrrWithShift(N, OrOpd0: OrOpd0Val, OrOpd1: OrOpd1Val, Src, Dst, CurDAG,
3780 BiggerPattern))
3781 return true;
3782
3783 // both parts match
3784 SDLoc DL(N);
3785 SDValue Ops[] = {Dst, Src, CurDAG->getTargetConstant(Val: ImmR, DL, VT),
3786 CurDAG->getTargetConstant(Val: ImmS, DL, VT)};
3787 unsigned Opc = (VT == MVT::i32) ? AArch64::BFMWri : AArch64::BFMXri;
3788 CurDAG->SelectNodeTo(N, MachineOpc: Opc, VT, Ops);
3789 return true;
3790 }
3791
3792 // Generate a BFXIL from 'or (and X, Mask0Imm), (and Y, Mask1Imm)' iff
3793 // Mask0Imm and ~Mask1Imm are equivalent and one of the MaskImms is a shifted
3794 // mask (e.g., 0x000ffff0).
3795 uint64_t Mask0Imm, Mask1Imm;
3796 SDValue And0 = N->getOperand(Num: 0);
3797 SDValue And1 = N->getOperand(Num: 1);
3798 if (And0.hasOneUse() && And1.hasOneUse() &&
3799 isOpcWithIntImmediate(N: And0.getNode(), Opc: ISD::AND, Imm&: Mask0Imm) &&
3800 isOpcWithIntImmediate(N: And1.getNode(), Opc: ISD::AND, Imm&: Mask1Imm) &&
3801 APInt(BitWidth, Mask0Imm) == ~APInt(BitWidth, Mask1Imm) &&
3802 (isShiftedMask(Mask: Mask0Imm, VT) || isShiftedMask(Mask: Mask1Imm, VT))) {
3803
3804 // ORR is commutative, so canonicalize to the form 'or (and X, Mask0Imm),
3805 // (and Y, Mask1Imm)' where Mask1Imm is the shifted mask masking off the
3806 // bits to be inserted.
3807 if (isShiftedMask(Mask: Mask0Imm, VT)) {
3808 std::swap(a&: And0, b&: And1);
3809 std::swap(a&: Mask0Imm, b&: Mask1Imm);
3810 }
3811
3812 SDValue Src = And1->getOperand(Num: 0);
3813 SDValue Dst = And0->getOperand(Num: 0);
3814 unsigned LSB = llvm::countr_zero(Val: Mask1Imm);
3815 int Width = BitWidth - APInt(BitWidth, Mask0Imm).popcount();
3816
3817 // The BFXIL inserts the low-order bits from a source register, so right
3818 // shift the needed bits into place.
3819 SDLoc DL(N);
3820 unsigned ShiftOpc = (VT == MVT::i32) ? AArch64::UBFMWri : AArch64::UBFMXri;
3821 uint64_t LsrImm = LSB;
3822 if (Src->hasOneUse() &&
3823 isOpcWithIntImmediate(N: Src.getNode(), Opc: ISD::SRL, Imm&: LsrImm) &&
3824 (LsrImm + LSB) < BitWidth) {
3825 Src = Src->getOperand(Num: 0);
3826 LsrImm += LSB;
3827 }
3828
3829 SDNode *LSR = CurDAG->getMachineNode(
3830 Opcode: ShiftOpc, dl: DL, VT, Op1: Src, Op2: CurDAG->getTargetConstant(Val: LsrImm, DL, VT),
3831 Op3: CurDAG->getTargetConstant(Val: BitWidth - 1, DL, VT));
3832
3833 // BFXIL is an alias of BFM, so translate to BFM operands.
3834 unsigned ImmR = (BitWidth - LSB) % BitWidth;
3835 unsigned ImmS = Width - 1;
3836
3837 // Create the BFXIL instruction.
3838 SDValue Ops[] = {Dst, SDValue(LSR, 0),
3839 CurDAG->getTargetConstant(Val: ImmR, DL, VT),
3840 CurDAG->getTargetConstant(Val: ImmS, DL, VT)};
3841 unsigned Opc = (VT == MVT::i32) ? AArch64::BFMWri : AArch64::BFMXri;
3842 CurDAG->SelectNodeTo(N, MachineOpc: Opc, VT, Ops);
3843 return true;
3844 }
3845
3846 return false;
3847}
3848
3849bool AArch64DAGToDAGISel::tryBitfieldInsertOp(SDNode *N) {
3850 if (N->getOpcode() != ISD::OR)
3851 return false;
3852
3853 APInt NUsefulBits;
3854 getUsefulBits(Op: SDValue(N, 0), UsefulBits&: NUsefulBits);
3855
3856 // If all bits are not useful, just return UNDEF.
3857 if (!NUsefulBits) {
3858 CurDAG->SelectNodeTo(N, MachineOpc: TargetOpcode::IMPLICIT_DEF, VT: N->getValueType(ResNo: 0));
3859 return true;
3860 }
3861
3862 if (tryBitfieldInsertOpFromOr(N, UsefulBits: NUsefulBits, CurDAG))
3863 return true;
3864
3865 return tryBitfieldInsertOpFromOrAndImm(N, CurDAG);
3866}
3867
3868/// SelectBitfieldInsertInZeroOp - Match a UBFIZ instruction that is the
3869/// equivalent of a left shift by a constant amount followed by an and masking
3870/// out a contiguous set of bits.
3871bool AArch64DAGToDAGISel::tryBitfieldInsertInZeroOp(SDNode *N) {
3872 if (N->getOpcode() != ISD::AND)
3873 return false;
3874
3875 EVT VT = N->getValueType(ResNo: 0);
3876 if (VT != MVT::i32 && VT != MVT::i64)
3877 return false;
3878
3879 SDValue Op0;
3880 int DstLSB, Width;
3881 if (!isBitfieldPositioningOp(CurDAG, Op: SDValue(N, 0), /*BiggerPattern=*/false,
3882 Src&: Op0, DstLSB, Width))
3883 return false;
3884
3885 // ImmR is the rotate right amount.
3886 unsigned ImmR = (VT.getSizeInBits() - DstLSB) % VT.getSizeInBits();
3887 // ImmS is the most significant bit of the source to be moved.
3888 unsigned ImmS = Width - 1;
3889
3890 SDLoc DL(N);
3891 SDValue Ops[] = {Op0, CurDAG->getTargetConstant(Val: ImmR, DL, VT),
3892 CurDAG->getTargetConstant(Val: ImmS, DL, VT)};
3893 unsigned Opc = (VT == MVT::i32) ? AArch64::UBFMWri : AArch64::UBFMXri;
3894 CurDAG->SelectNodeTo(N, MachineOpc: Opc, VT, Ops);
3895 return true;
3896}
3897
3898/// tryShiftAmountMod - Take advantage of built-in mod of shift amount in
3899/// variable shift/rotate instructions.
3900bool AArch64DAGToDAGISel::tryShiftAmountMod(SDNode *N) {
3901 EVT VT = N->getValueType(ResNo: 0);
3902
3903 unsigned Opc;
3904 switch (N->getOpcode()) {
3905 case ISD::ROTR:
3906 Opc = (VT == MVT::i32) ? AArch64::RORVWr : AArch64::RORVXr;
3907 break;
3908 case ISD::SHL:
3909 Opc = (VT == MVT::i32) ? AArch64::LSLVWr : AArch64::LSLVXr;
3910 break;
3911 case ISD::SRL:
3912 Opc = (VT == MVT::i32) ? AArch64::LSRVWr : AArch64::LSRVXr;
3913 break;
3914 case ISD::SRA:
3915 Opc = (VT == MVT::i32) ? AArch64::ASRVWr : AArch64::ASRVXr;
3916 break;
3917 default:
3918 return false;
3919 }
3920
3921 uint64_t Size;
3922 uint64_t Bits;
3923 if (VT == MVT::i32) {
3924 Bits = 5;
3925 Size = 32;
3926 } else if (VT == MVT::i64) {
3927 Bits = 6;
3928 Size = 64;
3929 } else
3930 return false;
3931
3932 SDValue ShiftAmt = N->getOperand(Num: 1);
3933 SDLoc DL(N);
3934 SDValue NewShiftAmt;
3935
3936 // Skip over an extend of the shift amount.
3937 if (ShiftAmt->getOpcode() == ISD::ZERO_EXTEND ||
3938 ShiftAmt->getOpcode() == ISD::ANY_EXTEND)
3939 ShiftAmt = ShiftAmt->getOperand(Num: 0);
3940
3941 if (ShiftAmt->getOpcode() == ISD::ADD || ShiftAmt->getOpcode() == ISD::SUB) {
3942 SDValue Add0 = ShiftAmt->getOperand(Num: 0);
3943 SDValue Add1 = ShiftAmt->getOperand(Num: 1);
3944 uint64_t Add0Imm;
3945 uint64_t Add1Imm;
3946 if (isIntImmediate(N: Add1, Imm&: Add1Imm) && (Add1Imm % Size == 0)) {
3947 // If we are shifting by X+/-N where N == 0 mod Size, then just shift by X
3948 // to avoid the ADD/SUB.
3949 NewShiftAmt = Add0;
3950 } else if (ShiftAmt->getOpcode() == ISD::SUB &&
3951 isIntImmediate(N: Add0, Imm&: Add0Imm) && Add0Imm != 0 &&
3952 (Add0Imm % Size == 0)) {
3953 // If we are shifting by N-X where N == 0 mod Size, then just shift by -X
3954 // to generate a NEG instead of a SUB from a constant.
3955 unsigned NegOpc;
3956 unsigned ZeroReg;
3957 EVT SubVT = ShiftAmt->getValueType(ResNo: 0);
3958 if (SubVT == MVT::i32) {
3959 NegOpc = AArch64::SUBWrr;
3960 ZeroReg = AArch64::WZR;
3961 } else {
3962 assert(SubVT == MVT::i64);
3963 NegOpc = AArch64::SUBXrr;
3964 ZeroReg = AArch64::XZR;
3965 }
3966 SDValue Zero =
3967 CurDAG->getCopyFromReg(Chain: CurDAG->getEntryNode(), dl: DL, Reg: ZeroReg, VT: SubVT);
3968 MachineSDNode *Neg =
3969 CurDAG->getMachineNode(Opcode: NegOpc, dl: DL, VT: SubVT, Op1: Zero, Op2: Add1);
3970 NewShiftAmt = SDValue(Neg, 0);
3971 } else if (ShiftAmt->getOpcode() == ISD::SUB &&
3972 isIntImmediate(N: Add0, Imm&: Add0Imm) && (Add0Imm % Size == Size - 1)) {
3973 // If we are shifting by N-X where N == -1 mod Size, then just shift by ~X
3974 // to generate a NOT instead of a SUB from a constant.
3975 unsigned NotOpc;
3976 unsigned ZeroReg;
3977 EVT SubVT = ShiftAmt->getValueType(ResNo: 0);
3978 if (SubVT == MVT::i32) {
3979 NotOpc = AArch64::ORNWrr;
3980 ZeroReg = AArch64::WZR;
3981 } else {
3982 assert(SubVT == MVT::i64);
3983 NotOpc = AArch64::ORNXrr;
3984 ZeroReg = AArch64::XZR;
3985 }
3986 SDValue Zero =
3987 CurDAG->getCopyFromReg(Chain: CurDAG->getEntryNode(), dl: DL, Reg: ZeroReg, VT: SubVT);
3988 MachineSDNode *Not =
3989 CurDAG->getMachineNode(Opcode: NotOpc, dl: DL, VT: SubVT, Op1: Zero, Op2: Add1);
3990 NewShiftAmt = SDValue(Not, 0);
3991 } else
3992 return false;
3993 } else {
3994 // If the shift amount is masked with an AND, check that the mask covers the
3995 // bits that are implicitly ANDed off by the above opcodes and if so, skip
3996 // the AND.
3997 uint64_t MaskImm;
3998 if (!isOpcWithIntImmediate(N: ShiftAmt.getNode(), Opc: ISD::AND, Imm&: MaskImm) &&
3999 !isOpcWithIntImmediate(N: ShiftAmt.getNode(), Opc: AArch64ISD::ANDS, Imm&: MaskImm))
4000 return false;
4001
4002 if ((unsigned)llvm::countr_one(Value: MaskImm) < Bits)
4003 return false;
4004
4005 NewShiftAmt = ShiftAmt->getOperand(Num: 0);
4006 }
4007
4008 // Narrow/widen the shift amount to match the size of the shift operation.
4009 if (VT == MVT::i32)
4010 NewShiftAmt = narrowIfNeeded(CurDAG, N: NewShiftAmt);
4011 else if (VT == MVT::i64 && NewShiftAmt->getValueType(ResNo: 0) == MVT::i32) {
4012 SDValue SubReg = CurDAG->getTargetConstant(Val: AArch64::sub_32, DL, VT: MVT::i32);
4013 MachineSDNode *Ext = CurDAG->getMachineNode(Opcode: AArch64::SUBREG_TO_REG, dl: DL, VT,
4014 Op1: NewShiftAmt, Op2: SubReg);
4015 NewShiftAmt = SDValue(Ext, 0);
4016 }
4017
4018 SDValue Ops[] = {N->getOperand(Num: 0), NewShiftAmt};
4019 CurDAG->SelectNodeTo(N, MachineOpc: Opc, VT, Ops);
4020 return true;
4021}
4022
4023static bool checkCVTFixedPointOperandWithFBits(SelectionDAG *CurDAG, SDValue N,
4024 SDValue &FixedPos,
4025 unsigned RegWidth,
4026 bool isReciprocal) {
4027 APFloat FVal(0.0);
4028 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(Val&: N))
4029 FVal = CN->getValueAPF();
4030 else if (LoadSDNode *LN = dyn_cast<LoadSDNode>(Val&: N)) {
4031 // Some otherwise illegal constants are allowed in this case.
4032 if (LN->getOperand(Num: 1).getOpcode() != AArch64ISD::ADDlow ||
4033 !isa<ConstantPoolSDNode>(Val: LN->getOperand(Num: 1)->getOperand(Num: 1)))
4034 return false;
4035
4036 ConstantPoolSDNode *CN =
4037 dyn_cast<ConstantPoolSDNode>(Val: LN->getOperand(Num: 1)->getOperand(Num: 1));
4038 FVal = cast<ConstantFP>(Val: CN->getConstVal())->getValueAPF();
4039 } else
4040 return false;
4041
4042 if (unsigned FBits =
4043 CheckFixedPointOperandConstant(FVal, RegWidth, isReciprocal)) {
4044 FixedPos = CurDAG->getTargetConstant(Val: FBits, DL: SDLoc(N), VT: MVT::i32);
4045 return true;
4046 }
4047
4048 return false;
4049}
4050
4051bool AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
4052 unsigned RegWidth) {
4053 return checkCVTFixedPointOperandWithFBits(CurDAG, N, FixedPos, RegWidth,
4054 /*isReciprocal*/ false);
4055}
4056
4057bool AArch64DAGToDAGISel::SelectCVTFixedPointVec(SDValue N, SDValue &FixedPos,
4058 unsigned RegWidth) {
4059 if ((N.getOpcode() == AArch64ISD::NVCAST || N.getOpcode() == ISD::BITCAST) &&
4060 N.getValueType().getScalarSizeInBits() ==
4061 N.getOperand(i: 0).getValueType().getScalarSizeInBits())
4062 N = N.getOperand(i: 0);
4063
4064 auto ImmToFloat = [RegWidth](APInt Imm) {
4065 switch (RegWidth) {
4066 case 16:
4067 return APFloat(APFloat::IEEEhalf(), Imm);
4068 case 32:
4069 return APFloat(APFloat::IEEEsingle(), Imm);
4070 case 64:
4071 return APFloat(APFloat::IEEEdouble(), Imm);
4072 default:
4073 llvm_unreachable("Unexpected RegWidth!");
4074 };
4075 };
4076
4077 APFloat FVal(0.0);
4078 switch (N->getOpcode()) {
4079 case AArch64ISD::MOVIshift:
4080 FVal = ImmToFloat(APInt(RegWidth, N.getConstantOperandVal(i: 0)
4081 << N.getConstantOperandVal(i: 1)));
4082 break;
4083 case AArch64ISD::FMOV:
4084 assert(RegWidth == 32 || RegWidth == 64);
4085 if (RegWidth == 32)
4086 FVal = ImmToFloat(
4087 APInt(RegWidth, (uint32_t)AArch64_AM::decodeAdvSIMDModImmType11(
4088 Imm: N.getConstantOperandVal(i: 0))));
4089 else
4090 FVal = ImmToFloat(APInt(RegWidth, AArch64_AM::decodeAdvSIMDModImmType12(
4091 Imm: N.getConstantOperandVal(i: 0))));
4092 break;
4093 case AArch64ISD::DUP:
4094 if (isa<ConstantSDNode>(Val: N.getOperand(i: 0)))
4095 FVal = ImmToFloat(N.getConstantOperandAPInt(i: 0).trunc(width: RegWidth));
4096 else
4097 return false;
4098 break;
4099 default:
4100 return false;
4101 }
4102
4103 if (unsigned FBits = CheckFixedPointOperandConstant(FVal, RegWidth,
4104 /*isReciprocal*/ false)) {
4105 FixedPos = CurDAG->getTargetConstant(Val: FBits, DL: SDLoc(N), VT: MVT::i32);
4106 return true;
4107 }
4108
4109 return false;
4110}
4111
4112bool AArch64DAGToDAGISel::SelectCVTFixedPosRecipOperand(SDValue N,
4113 SDValue &FixedPos,
4114 unsigned RegWidth) {
4115 return checkCVTFixedPointOperandWithFBits(CurDAG, N, FixedPos, RegWidth,
4116 /*isReciprocal*/ true);
4117}
4118
4119// Inspects a register string of the form o0:op1:CRn:CRm:op2 gets the fields
4120// of the string and obtains the integer values from them and combines these
4121// into a single value to be used in the MRS/MSR instruction.
4122static int getIntOperandFromRegisterString(StringRef RegString) {
4123 SmallVector<StringRef, 5> Fields;
4124 RegString.split(A&: Fields, Separator: ':');
4125
4126 if (Fields.size() == 1)
4127 return -1;
4128
4129 assert(Fields.size() == 5
4130 && "Invalid number of fields in read register string");
4131
4132 SmallVector<int, 5> Ops;
4133 bool AllIntFields = true;
4134
4135 for (StringRef Field : Fields) {
4136 unsigned IntField;
4137 AllIntFields &= !Field.getAsInteger(Radix: 10, Result&: IntField);
4138 Ops.push_back(Elt: IntField);
4139 }
4140
4141 assert(AllIntFields &&
4142 "Unexpected non-integer value in special register string.");
4143 (void)AllIntFields;
4144
4145 // Need to combine the integer fields of the string into a single value
4146 // based on the bit encoding of MRS/MSR instruction.
4147 return (Ops[0] << 14) | (Ops[1] << 11) | (Ops[2] << 7) |
4148 (Ops[3] << 3) | (Ops[4]);
4149}
4150
4151// Lower the read_register intrinsic to an MRS instruction node if the special
4152// register string argument is either of the form detailed in the ALCE (the
4153// form described in getIntOperandsFromRegisterString) or is a named register
4154// known by the MRS SysReg mapper.
4155bool AArch64DAGToDAGISel::tryReadRegister(SDNode *N) {
4156 const auto *MD = cast<MDNodeSDNode>(Val: N->getOperand(Num: 1));
4157 const auto *RegString = cast<MDString>(Val: MD->getMD()->getOperand(I: 0));
4158 SDLoc DL(N);
4159
4160 bool ReadIs128Bit = N->getOpcode() == AArch64ISD::MRRS;
4161
4162 unsigned Opcode64Bit = AArch64::MRS;
4163 int Imm = getIntOperandFromRegisterString(RegString: RegString->getString());
4164 if (Imm == -1) {
4165 // No match, Use the sysreg mapper to map the remaining possible strings to
4166 // the value for the register to be used for the instruction operand.
4167 const auto *TheReg =
4168 AArch64SysReg::lookupSysRegByName(Name: RegString->getString());
4169 if (TheReg && TheReg->Readable &&
4170 TheReg->haveFeatures(ActiveFeatures: Subtarget->getFeatureBits()))
4171 Imm = TheReg->Encoding;
4172 else
4173 Imm = AArch64SysReg::parseGenericRegister(Name: RegString->getString());
4174
4175 if (Imm == -1) {
4176 // Still no match, see if this is "pc" or give up.
4177 if (!ReadIs128Bit && RegString->getString() == "pc") {
4178 Opcode64Bit = AArch64::ADR;
4179 Imm = 0;
4180 } else {
4181 return false;
4182 }
4183 }
4184 }
4185
4186 SDValue InChain = N->getOperand(Num: 0);
4187 SDValue SysRegImm = CurDAG->getTargetConstant(Val: Imm, DL, VT: MVT::i32);
4188 if (!ReadIs128Bit) {
4189 CurDAG->SelectNodeTo(N, MachineOpc: Opcode64Bit, VT1: MVT::i64, VT2: MVT::Other /* Chain */,
4190 Ops: {SysRegImm, InChain});
4191 } else {
4192 SDNode *MRRS = CurDAG->getMachineNode(
4193 Opcode: AArch64::MRRS, dl: DL,
4194 ResultTys: {MVT::Untyped /* XSeqPair */, MVT::Other /* Chain */},
4195 Ops: {SysRegImm, InChain});
4196
4197 // Sysregs are not endian. The even register always contains the low half
4198 // of the register.
4199 SDValue Lo = CurDAG->getTargetExtractSubreg(SRIdx: AArch64::sube64, DL, VT: MVT::i64,
4200 Operand: SDValue(MRRS, 0));
4201 SDValue Hi = CurDAG->getTargetExtractSubreg(SRIdx: AArch64::subo64, DL, VT: MVT::i64,
4202 Operand: SDValue(MRRS, 0));
4203 SDValue OutChain = SDValue(MRRS, 1);
4204
4205 ReplaceUses(F: SDValue(N, 0), T: Lo);
4206 ReplaceUses(F: SDValue(N, 1), T: Hi);
4207 ReplaceUses(F: SDValue(N, 2), T: OutChain);
4208 };
4209 return true;
4210}
4211
4212// Lower the write_register intrinsic to an MSR instruction node if the special
4213// register string argument is either of the form detailed in the ALCE (the
4214// form described in getIntOperandsFromRegisterString) or is a named register
4215// known by the MSR SysReg mapper.
4216bool AArch64DAGToDAGISel::tryWriteRegister(SDNode *N) {
4217 const auto *MD = cast<MDNodeSDNode>(Val: N->getOperand(Num: 1));
4218 const auto *RegString = cast<MDString>(Val: MD->getMD()->getOperand(I: 0));
4219 SDLoc DL(N);
4220
4221 bool WriteIs128Bit = N->getOpcode() == AArch64ISD::MSRR;
4222
4223 if (!WriteIs128Bit) {
4224 // Check if the register was one of those allowed as the pstatefield value
4225 // in the MSR (immediate) instruction. To accept the values allowed in the
4226 // pstatefield for the MSR (immediate) instruction, we also require that an
4227 // immediate value has been provided as an argument, we know that this is
4228 // the case as it has been ensured by semantic checking.
4229 auto trySelectPState = [&](auto PMapper, unsigned State) {
4230 if (PMapper) {
4231 assert(isa<ConstantSDNode>(N->getOperand(2)) &&
4232 "Expected a constant integer expression.");
4233 unsigned Reg = PMapper->Encoding;
4234 uint64_t Immed = N->getConstantOperandVal(Num: 2);
4235 CurDAG->SelectNodeTo(
4236 N, MachineOpc: State, VT: MVT::Other, Op1: CurDAG->getTargetConstant(Val: Reg, DL, VT: MVT::i32),
4237 Op2: CurDAG->getTargetConstant(Val: Immed, DL, VT: MVT::i16), Op3: N->getOperand(Num: 0));
4238 return true;
4239 }
4240 return false;
4241 };
4242
4243 if (trySelectPState(
4244 AArch64PState::lookupPStateImm0_15ByName(Name: RegString->getString()),
4245 AArch64::MSRpstateImm4))
4246 return true;
4247 if (trySelectPState(
4248 AArch64PState::lookupPStateImm0_1ByName(Name: RegString->getString()),
4249 AArch64::MSRpstateImm1))
4250 return true;
4251 }
4252
4253 int Imm = getIntOperandFromRegisterString(RegString: RegString->getString());
4254 if (Imm == -1) {
4255 // Use the sysreg mapper to attempt to map the remaining possible strings
4256 // to the value for the register to be used for the MSR (register)
4257 // instruction operand.
4258 auto TheReg = AArch64SysReg::lookupSysRegByName(Name: RegString->getString());
4259 if (TheReg && TheReg->Writeable &&
4260 TheReg->haveFeatures(ActiveFeatures: Subtarget->getFeatureBits()))
4261 Imm = TheReg->Encoding;
4262 else
4263 Imm = AArch64SysReg::parseGenericRegister(Name: RegString->getString());
4264
4265 if (Imm == -1)
4266 return false;
4267 }
4268
4269 SDValue InChain = N->getOperand(Num: 0);
4270 if (!WriteIs128Bit) {
4271 CurDAG->SelectNodeTo(N, MachineOpc: AArch64::MSR, VT: MVT::Other,
4272 Op1: CurDAG->getTargetConstant(Val: Imm, DL, VT: MVT::i32),
4273 Op2: N->getOperand(Num: 2), Op3: InChain);
4274 } else {
4275 // No endian swap. The lower half always goes into the even subreg, and the
4276 // higher half always into the odd supreg.
4277 SDNode *Pair = CurDAG->getMachineNode(
4278 Opcode: TargetOpcode::REG_SEQUENCE, dl: DL, VT: MVT::Untyped /* XSeqPair */,
4279 Ops: {CurDAG->getTargetConstant(Val: AArch64::XSeqPairsClassRegClass.getID(), DL,
4280 VT: MVT::i32),
4281 N->getOperand(Num: 2),
4282 CurDAG->getTargetConstant(Val: AArch64::sube64, DL, VT: MVT::i32),
4283 N->getOperand(Num: 3),
4284 CurDAG->getTargetConstant(Val: AArch64::subo64, DL, VT: MVT::i32)});
4285
4286 CurDAG->SelectNodeTo(N, MachineOpc: AArch64::MSRR, VT: MVT::Other,
4287 Op1: CurDAG->getTargetConstant(Val: Imm, DL, VT: MVT::i32),
4288 Op2: SDValue(Pair, 0), Op3: InChain);
4289 }
4290
4291 return true;
4292}
4293
4294/// We've got special pseudo-instructions for these
4295bool AArch64DAGToDAGISel::SelectCMP_SWAP(SDNode *N) {
4296 unsigned Opcode;
4297 EVT MemTy = cast<MemSDNode>(Val: N)->getMemoryVT();
4298
4299 // Leave IR for LSE if subtarget supports it.
4300 if (Subtarget->hasLSE()) return false;
4301
4302 if (MemTy == MVT::i8)
4303 Opcode = AArch64::CMP_SWAP_8;
4304 else if (MemTy == MVT::i16)
4305 Opcode = AArch64::CMP_SWAP_16;
4306 else if (MemTy == MVT::i32)
4307 Opcode = AArch64::CMP_SWAP_32;
4308 else if (MemTy == MVT::i64)
4309 Opcode = AArch64::CMP_SWAP_64;
4310 else
4311 llvm_unreachable("Unknown AtomicCmpSwap type");
4312
4313 MVT RegTy = MemTy == MVT::i64 ? MVT::i64 : MVT::i32;
4314 SDValue Ops[] = {N->getOperand(Num: 1), N->getOperand(Num: 2), N->getOperand(Num: 3),
4315 N->getOperand(Num: 0)};
4316 SDNode *CmpSwap = CurDAG->getMachineNode(
4317 Opcode, dl: SDLoc(N),
4318 VTs: CurDAG->getVTList(VT1: RegTy, VT2: MVT::i32, VT3: MVT::Other), Ops);
4319
4320 MachineMemOperand *MemOp = cast<MemSDNode>(Val: N)->getMemOperand();
4321 CurDAG->setNodeMemRefs(N: cast<MachineSDNode>(Val: CmpSwap), NewMemRefs: {MemOp});
4322
4323 ReplaceUses(F: SDValue(N, 0), T: SDValue(CmpSwap, 0));
4324 ReplaceUses(F: SDValue(N, 1), T: SDValue(CmpSwap, 2));
4325 CurDAG->RemoveDeadNode(N);
4326
4327 return true;
4328}
4329
4330bool AArch64DAGToDAGISel::SelectSVEAddSubImm(SDValue N, MVT VT, SDValue &Imm,
4331 SDValue &Shift, bool Negate) {
4332 if (!isa<ConstantSDNode>(Val: N))
4333 return false;
4334
4335 SDLoc DL(N);
4336 APInt Val =
4337 cast<ConstantSDNode>(Val&: N)->getAPIntValue().trunc(width: VT.getFixedSizeInBits());
4338
4339 if (Negate)
4340 Val = -Val;
4341
4342 switch (VT.SimpleTy) {
4343 case MVT::i8:
4344 // All immediates are supported.
4345 Shift = CurDAG->getTargetConstant(Val: 0, DL, VT: MVT::i32);
4346 Imm = CurDAG->getTargetConstant(Val: Val.getZExtValue(), DL, VT: MVT::i32);
4347 return true;
4348 case MVT::i16:
4349 case MVT::i32:
4350 case MVT::i64:
4351 // Support 8bit unsigned immediates.
4352 if ((Val & ~0xff) == 0) {
4353 Shift = CurDAG->getTargetConstant(Val: 0, DL, VT: MVT::i32);
4354 Imm = CurDAG->getTargetConstant(Val: Val.getZExtValue(), DL, VT: MVT::i32);
4355 return true;
4356 }
4357 // Support 16bit unsigned immediates that are a multiple of 256.
4358 if ((Val & ~0xff00) == 0) {
4359 Shift = CurDAG->getTargetConstant(Val: 8, DL, VT: MVT::i32);
4360 Imm = CurDAG->getTargetConstant(Val: Val.lshr(shiftAmt: 8).getZExtValue(), DL, VT: MVT::i32);
4361 return true;
4362 }
4363 break;
4364 default:
4365 break;
4366 }
4367
4368 return false;
4369}
4370
4371bool AArch64DAGToDAGISel::SelectSVEAddSubSSatImm(SDValue N, MVT VT,
4372 SDValue &Imm, SDValue &Shift,
4373 bool Negate) {
4374 if (!isa<ConstantSDNode>(Val: N))
4375 return false;
4376
4377 SDLoc DL(N);
4378 int64_t Val = cast<ConstantSDNode>(Val&: N)
4379 ->getAPIntValue()
4380 .trunc(width: VT.getFixedSizeInBits())
4381 .getSExtValue();
4382
4383 if (Negate)
4384 Val = -Val;
4385
4386 // Signed saturating instructions treat their immediate operand as unsigned,
4387 // whereas the related intrinsics define their operands to be signed. This
4388 // means we can only use the immediate form when the operand is non-negative.
4389 if (Val < 0)
4390 return false;
4391
4392 switch (VT.SimpleTy) {
4393 case MVT::i8:
4394 // All positive immediates are supported.
4395 Shift = CurDAG->getTargetConstant(Val: 0, DL, VT: MVT::i32);
4396 Imm = CurDAG->getTargetConstant(Val, DL, VT: MVT::i32);
4397 return true;
4398 case MVT::i16:
4399 case MVT::i32:
4400 case MVT::i64:
4401 // Support 8bit positive immediates.
4402 if (Val <= 255) {
4403 Shift = CurDAG->getTargetConstant(Val: 0, DL, VT: MVT::i32);
4404 Imm = CurDAG->getTargetConstant(Val, DL, VT: MVT::i32);
4405 return true;
4406 }
4407 // Support 16bit positive immediates that are a multiple of 256.
4408 if (Val <= 65280 && Val % 256 == 0) {
4409 Shift = CurDAG->getTargetConstant(Val: 8, DL, VT: MVT::i32);
4410 Imm = CurDAG->getTargetConstant(Val: Val >> 8, DL, VT: MVT::i32);
4411 return true;
4412 }
4413 break;
4414 default:
4415 break;
4416 }
4417
4418 return false;
4419}
4420
4421bool AArch64DAGToDAGISel::SelectSVECpyDupImm(SDValue N, MVT VT, SDValue &Imm,
4422 SDValue &Shift) {
4423 if (!isa<ConstantSDNode>(Val: N))
4424 return false;
4425
4426 SDLoc DL(N);
4427 int64_t Val = cast<ConstantSDNode>(Val&: N)
4428 ->getAPIntValue()
4429 .trunc(width: VT.getFixedSizeInBits())
4430 .getSExtValue();
4431 int32_t ImmVal, ShiftVal;
4432 if (!AArch64_AM::isSVECpyDupImm(SizeInBits: VT.getScalarSizeInBits(), Val, Imm&: ImmVal,
4433 Shift&: ShiftVal))
4434 return false;
4435
4436 Shift = CurDAG->getTargetConstant(Val: ShiftVal, DL, VT: MVT::i32);
4437 Imm = CurDAG->getTargetConstant(Val: ImmVal, DL, VT: MVT::i32);
4438 return true;
4439}
4440
4441bool AArch64DAGToDAGISel::SelectSVESignedArithImm(SDValue N, SDValue &Imm) {
4442 if (auto CNode = dyn_cast<ConstantSDNode>(Val&: N)) {
4443 int64_t ImmVal = CNode->getSExtValue();
4444 SDLoc DL(N);
4445 if (ImmVal >= -128 && ImmVal < 128) {
4446 Imm = CurDAG->getSignedTargetConstant(Val: ImmVal, DL, VT: MVT::i32);
4447 return true;
4448 }
4449 }
4450 return false;
4451}
4452
4453bool AArch64DAGToDAGISel::SelectSVEArithImm(SDValue N, MVT VT, SDValue &Imm) {
4454 if (auto CNode = dyn_cast<ConstantSDNode>(Val&: N)) {
4455 uint64_t ImmVal = CNode->getZExtValue();
4456
4457 switch (VT.SimpleTy) {
4458 case MVT::i8:
4459 ImmVal &= 0xFF;
4460 break;
4461 case MVT::i16:
4462 ImmVal &= 0xFFFF;
4463 break;
4464 case MVT::i32:
4465 ImmVal &= 0xFFFFFFFF;
4466 break;
4467 case MVT::i64:
4468 break;
4469 default:
4470 llvm_unreachable("Unexpected type");
4471 }
4472
4473 if (ImmVal < 256) {
4474 Imm = CurDAG->getTargetConstant(Val: ImmVal, DL: SDLoc(N), VT: MVT::i32);
4475 return true;
4476 }
4477 }
4478 return false;
4479}
4480
4481bool AArch64DAGToDAGISel::SelectSVELogicalImm(SDValue N, MVT VT, SDValue &Imm,
4482 bool Invert) {
4483 uint64_t ImmVal;
4484 if (auto CI = dyn_cast<ConstantSDNode>(Val&: N))
4485 ImmVal = CI->getZExtValue();
4486 else if (auto CFP = dyn_cast<ConstantFPSDNode>(Val&: N))
4487 ImmVal = CFP->getValueAPF().bitcastToAPInt().getZExtValue();
4488 else
4489 return false;
4490
4491 if (Invert)
4492 ImmVal = ~ImmVal;
4493
4494 uint64_t encoding;
4495 if (!AArch64_AM::isSVELogicalImm(SizeInBits: VT.getScalarSizeInBits(), ImmVal, Encoding&: encoding))
4496 return false;
4497
4498 Imm = CurDAG->getTargetConstant(Val: encoding, DL: SDLoc(N), VT: MVT::i64);
4499 return true;
4500}
4501
4502// SVE shift intrinsics allow shift amounts larger than the element's bitwidth.
4503// Rather than attempt to normalise everything we can sometimes saturate the
4504// shift amount during selection. This function also allows for consistent
4505// isel patterns by ensuring the resulting "Imm" node is of the i32 type
4506// required by the instructions.
4507bool AArch64DAGToDAGISel::SelectSVEShiftImm(SDValue N, uint64_t Low,
4508 uint64_t High, bool AllowSaturation,
4509 SDValue &Imm) {
4510 if (auto *CN = dyn_cast<ConstantSDNode>(Val&: N)) {
4511 uint64_t ImmVal = CN->getZExtValue();
4512
4513 // Reject shift amounts that are too small.
4514 if (ImmVal < Low)
4515 return false;
4516
4517 // Reject or saturate shift amounts that are too big.
4518 if (ImmVal > High) {
4519 if (!AllowSaturation)
4520 return false;
4521 ImmVal = High;
4522 }
4523
4524 Imm = CurDAG->getTargetConstant(Val: ImmVal, DL: SDLoc(N), VT: MVT::i32);
4525 return true;
4526 }
4527
4528 return false;
4529}
4530
4531bool AArch64DAGToDAGISel::trySelectStackSlotTagP(SDNode *N) {
4532 // tagp(FrameIndex, IRGstack, tag_offset):
4533 // since the offset between FrameIndex and IRGstack is a compile-time
4534 // constant, this can be lowered to a single ADDG instruction.
4535 if (!(isa<FrameIndexSDNode>(Val: N->getOperand(Num: 1)))) {
4536 return false;
4537 }
4538
4539 SDValue IRG_SP = N->getOperand(Num: 2);
4540 if (IRG_SP->getOpcode() != ISD::INTRINSIC_W_CHAIN ||
4541 IRG_SP->getConstantOperandVal(Num: 1) != Intrinsic::aarch64_irg_sp) {
4542 return false;
4543 }
4544
4545 const TargetLowering *TLI = getTargetLowering();
4546 SDLoc DL(N);
4547 int FI = cast<FrameIndexSDNode>(Val: N->getOperand(Num: 1))->getIndex();
4548 SDValue FiOp = CurDAG->getTargetFrameIndex(
4549 FI, VT: TLI->getPointerTy(DL: CurDAG->getDataLayout()));
4550 int TagOffset = N->getConstantOperandVal(Num: 3);
4551
4552 SDNode *Out = CurDAG->getMachineNode(
4553 Opcode: AArch64::TAGPstack, dl: DL, VT: MVT::i64,
4554 Ops: {FiOp, CurDAG->getTargetConstant(Val: 0, DL, VT: MVT::i64), N->getOperand(Num: 2),
4555 CurDAG->getTargetConstant(Val: TagOffset, DL, VT: MVT::i64)});
4556 ReplaceNode(F: N, T: Out);
4557 return true;
4558}
4559
4560void AArch64DAGToDAGISel::SelectTagP(SDNode *N) {
4561 assert(isa<ConstantSDNode>(N->getOperand(3)) &&
4562 "llvm.aarch64.tagp third argument must be an immediate");
4563 if (trySelectStackSlotTagP(N))
4564 return;
4565 // FIXME: above applies in any case when offset between Op1 and Op2 is a
4566 // compile-time constant, not just for stack allocations.
4567
4568 // General case for unrelated pointers in Op1 and Op2.
4569 SDLoc DL(N);
4570 int TagOffset = N->getConstantOperandVal(Num: 3);
4571 SDNode *N1 = CurDAG->getMachineNode(Opcode: AArch64::SUBP, dl: DL, VT: MVT::i64,
4572 Ops: {N->getOperand(Num: 1), N->getOperand(Num: 2)});
4573 SDNode *N2 = CurDAG->getMachineNode(Opcode: AArch64::ADDXrr, dl: DL, VT: MVT::i64,
4574 Ops: {SDValue(N1, 0), N->getOperand(Num: 2)});
4575 SDNode *N3 = CurDAG->getMachineNode(
4576 Opcode: AArch64::ADDG, dl: DL, VT: MVT::i64,
4577 Ops: {SDValue(N2, 0), CurDAG->getTargetConstant(Val: 0, DL, VT: MVT::i64),
4578 CurDAG->getTargetConstant(Val: TagOffset, DL, VT: MVT::i64)});
4579 ReplaceNode(F: N, T: N3);
4580}
4581
4582bool AArch64DAGToDAGISel::trySelectCastFixedLengthToScalableVector(SDNode *N) {
4583 assert(N->getOpcode() == ISD::INSERT_SUBVECTOR && "Invalid Node!");
4584
4585 // Bail when not a "cast" like insert_subvector.
4586 if (N->getConstantOperandVal(Num: 2) != 0)
4587 return false;
4588 if (!N->getOperand(Num: 0).isUndef())
4589 return false;
4590
4591 // Bail when normal isel should do the job.
4592 EVT VT = N->getValueType(ResNo: 0);
4593 EVT InVT = N->getOperand(Num: 1).getValueType();
4594 if (VT.isFixedLengthVector() || InVT.isScalableVector())
4595 return false;
4596 if (InVT.getSizeInBits() <= 128)
4597 return false;
4598
4599 // NOTE: We can only get here when doing fixed length SVE code generation.
4600 // We do manual selection because the types involved are not linked to real
4601 // registers (despite being legal) and must be coerced into SVE registers.
4602
4603 assert(VT.getSizeInBits().getKnownMinValue() == AArch64::SVEBitsPerBlock &&
4604 "Expected to insert into a packed scalable vector!");
4605
4606 SDLoc DL(N);
4607 auto RC = CurDAG->getTargetConstant(Val: AArch64::ZPRRegClassID, DL, VT: MVT::i64);
4608 ReplaceNode(F: N, T: CurDAG->getMachineNode(Opcode: TargetOpcode::COPY_TO_REGCLASS, dl: DL, VT,
4609 Op1: N->getOperand(Num: 1), Op2: RC));
4610 return true;
4611}
4612
4613bool AArch64DAGToDAGISel::trySelectCastScalableToFixedLengthVector(SDNode *N) {
4614 assert(N->getOpcode() == ISD::EXTRACT_SUBVECTOR && "Invalid Node!");
4615
4616 // Bail when not a "cast" like extract_subvector.
4617 if (N->getConstantOperandVal(Num: 1) != 0)
4618 return false;
4619
4620 // Bail when normal isel can do the job.
4621 EVT VT = N->getValueType(ResNo: 0);
4622 EVT InVT = N->getOperand(Num: 0).getValueType();
4623 if (VT.isScalableVector() || InVT.isFixedLengthVector())
4624 return false;
4625 if (VT.getSizeInBits() <= 128)
4626 return false;
4627
4628 // NOTE: We can only get here when doing fixed length SVE code generation.
4629 // We do manual selection because the types involved are not linked to real
4630 // registers (despite being legal) and must be coerced into SVE registers.
4631
4632 assert(InVT.getSizeInBits().getKnownMinValue() == AArch64::SVEBitsPerBlock &&
4633 "Expected to extract from a packed scalable vector!");
4634
4635 SDLoc DL(N);
4636 auto RC = CurDAG->getTargetConstant(Val: AArch64::ZPRRegClassID, DL, VT: MVT::i64);
4637 ReplaceNode(F: N, T: CurDAG->getMachineNode(Opcode: TargetOpcode::COPY_TO_REGCLASS, dl: DL, VT,
4638 Op1: N->getOperand(Num: 0), Op2: RC));
4639 return true;
4640}
4641
4642bool AArch64DAGToDAGISel::trySelectXAR(SDNode *N) {
4643 assert(N->getOpcode() == ISD::OR && "Expected OR instruction");
4644
4645 SDValue N0 = N->getOperand(Num: 0);
4646 SDValue N1 = N->getOperand(Num: 1);
4647
4648 EVT VT = N->getValueType(ResNo: 0);
4649 SDLoc DL(N);
4650
4651 // Essentially: rotr (xor(x, y), imm) -> xar (x, y, imm)
4652 // Rotate by a constant is a funnel shift in IR which is exanded to
4653 // an OR with shifted operands.
4654 // We do the following transform:
4655 // OR N0, N1 -> xar (x, y, imm)
4656 // Where:
4657 // N1 = SRL_PRED true, V, splat(imm) --> rotr amount
4658 // N0 = SHL_PRED true, V, splat(bits-imm)
4659 // V = (xor x, y)
4660 if (VT.isScalableVector() &&
4661 (Subtarget->hasSVE2() ||
4662 (Subtarget->hasSME() && Subtarget->isStreaming()))) {
4663 if (N0.getOpcode() != AArch64ISD::SHL_PRED ||
4664 N1.getOpcode() != AArch64ISD::SRL_PRED)
4665 std::swap(a&: N0, b&: N1);
4666 if (N0.getOpcode() != AArch64ISD::SHL_PRED ||
4667 N1.getOpcode() != AArch64ISD::SRL_PRED)
4668 return false;
4669
4670 auto *TLI = static_cast<const AArch64TargetLowering *>(getTargetLowering());
4671 if (!TLI->isAllActivePredicate(DAG&: *CurDAG, N: N0.getOperand(i: 0)) ||
4672 !TLI->isAllActivePredicate(DAG&: *CurDAG, N: N1.getOperand(i: 0)))
4673 return false;
4674
4675 if (N0.getOperand(i: 1) != N1.getOperand(i: 1))
4676 return false;
4677
4678 SDValue R1, R2;
4679 bool IsXOROperand = true;
4680 if (N0.getOperand(i: 1).getOpcode() != ISD::XOR) {
4681 IsXOROperand = false;
4682 } else {
4683 R1 = N0.getOperand(i: 1).getOperand(i: 0);
4684 R2 = N1.getOperand(i: 1).getOperand(i: 1);
4685 }
4686
4687 APInt ShlAmt, ShrAmt;
4688 if (!ISD::isConstantSplatVector(N: N0.getOperand(i: 2).getNode(), SplatValue&: ShlAmt) ||
4689 !ISD::isConstantSplatVector(N: N1.getOperand(i: 2).getNode(), SplatValue&: ShrAmt))
4690 return false;
4691
4692 if (ShlAmt + ShrAmt != VT.getScalarSizeInBits())
4693 return false;
4694
4695 if (!IsXOROperand) {
4696 SDValue Zero = CurDAG->getTargetConstant(Val: 0, DL, VT: MVT::i64);
4697 SDNode *MOV = CurDAG->getMachineNode(Opcode: AArch64::MOVIv2d_ns, dl: DL, VT, Op1: Zero);
4698 SDValue MOVIV = SDValue(MOV, 0);
4699
4700 SDValue ZSub = CurDAG->getTargetConstant(Val: AArch64::zsub, DL, VT: MVT::i32);
4701 SDNode *SubRegToReg =
4702 CurDAG->getMachineNode(Opcode: AArch64::SUBREG_TO_REG, dl: DL, VT, Op1: MOVIV, Op2: ZSub);
4703
4704 R1 = N1->getOperand(Num: 1);
4705 R2 = SDValue(SubRegToReg, 0);
4706 }
4707
4708 SDValue Imm =
4709 CurDAG->getTargetConstant(Val: ShrAmt.getZExtValue(), DL, VT: MVT::i32);
4710
4711 SDValue Ops[] = {R1, R2, Imm};
4712 if (auto Opc = SelectOpcodeFromVT<SelectTypeKind::Int>(
4713 VT, Opcodes: {AArch64::XAR_ZZZI_B, AArch64::XAR_ZZZI_H, AArch64::XAR_ZZZI_S,
4714 AArch64::XAR_ZZZI_D})) {
4715 CurDAG->SelectNodeTo(N, MachineOpc: Opc, VT, Ops);
4716 return true;
4717 }
4718 return false;
4719 }
4720
4721 // We have Neon SHA3 XAR operation for v2i64 but for types
4722 // v4i32, v8i16, v16i8 we can use SVE operations when SVE2-SHA3
4723 // is available.
4724 EVT SVT;
4725 switch (VT.getSimpleVT().SimpleTy) {
4726 case MVT::v4i32:
4727 case MVT::v2i32:
4728 SVT = MVT::nxv4i32;
4729 break;
4730 case MVT::v8i16:
4731 case MVT::v4i16:
4732 SVT = MVT::nxv8i16;
4733 break;
4734 case MVT::v16i8:
4735 case MVT::v8i8:
4736 SVT = MVT::nxv16i8;
4737 break;
4738 case MVT::v2i64:
4739 case MVT::v1i64:
4740 SVT = Subtarget->hasSHA3() ? MVT::v2i64 : MVT::nxv2i64;
4741 break;
4742 default:
4743 return false;
4744 }
4745
4746 if ((!SVT.isScalableVector() && !Subtarget->hasSHA3()) ||
4747 (SVT.isScalableVector() && !Subtarget->hasSVE2()))
4748 return false;
4749
4750 if (N0->getOpcode() != AArch64ISD::VSHL ||
4751 N1->getOpcode() != AArch64ISD::VLSHR)
4752 return false;
4753
4754 if (N0->getOperand(Num: 0) != N1->getOperand(Num: 0))
4755 return false;
4756
4757 SDValue R1, R2;
4758 bool IsXOROperand = true;
4759 if (N1->getOperand(Num: 0)->getOpcode() != ISD::XOR) {
4760 IsXOROperand = false;
4761 } else {
4762 SDValue XOR = N0.getOperand(i: 0);
4763 R1 = XOR.getOperand(i: 0);
4764 R2 = XOR.getOperand(i: 1);
4765 }
4766
4767 unsigned HsAmt = N0.getConstantOperandVal(i: 1);
4768 unsigned ShAmt = N1.getConstantOperandVal(i: 1);
4769
4770 SDValue Imm = CurDAG->getTargetConstant(
4771 Val: ShAmt, DL, VT: N0.getOperand(i: 1).getValueType(), isOpaque: false);
4772
4773 unsigned VTSizeInBits = VT.getScalarSizeInBits();
4774 if (ShAmt + HsAmt != VTSizeInBits)
4775 return false;
4776
4777 if (!IsXOROperand) {
4778 SDValue Zero = CurDAG->getTargetConstant(Val: 0, DL, VT: MVT::i64);
4779 SDNode *MOV =
4780 CurDAG->getMachineNode(Opcode: AArch64::MOVIv2d_ns, dl: DL, VT: MVT::v2i64, Op1: Zero);
4781 SDValue MOVIV = SDValue(MOV, 0);
4782
4783 R1 = N1->getOperand(Num: 0);
4784 R2 = MOVIV;
4785 }
4786
4787 if (SVT != VT) {
4788 SDValue Undef =
4789 SDValue(CurDAG->getMachineNode(Opcode: TargetOpcode::IMPLICIT_DEF, dl: DL, VT: SVT), 0);
4790
4791 if (SVT.isScalableVector() && VT.is64BitVector()) {
4792 EVT QVT = VT.getDoubleNumVectorElementsVT(Context&: *CurDAG->getContext());
4793
4794 SDValue UndefQ = SDValue(
4795 CurDAG->getMachineNode(Opcode: TargetOpcode::IMPLICIT_DEF, dl: DL, VT: QVT), 0);
4796 SDValue DSub = CurDAG->getTargetConstant(Val: AArch64::dsub, DL, VT: MVT::i32);
4797
4798 R1 = SDValue(CurDAG->getMachineNode(Opcode: AArch64::INSERT_SUBREG, dl: DL, VT: QVT,
4799 Op1: UndefQ, Op2: R1, Op3: DSub),
4800 0);
4801 if (R2.getValueType() == VT)
4802 R2 = SDValue(CurDAG->getMachineNode(Opcode: AArch64::INSERT_SUBREG, dl: DL, VT: QVT,
4803 Op1: UndefQ, Op2: R2, Op3: DSub),
4804 0);
4805 }
4806
4807 SDValue SubReg = CurDAG->getTargetConstant(
4808 Val: (SVT.isScalableVector() ? AArch64::zsub : AArch64::dsub), DL, VT: MVT::i32);
4809
4810 R1 = SDValue(CurDAG->getMachineNode(Opcode: AArch64::INSERT_SUBREG, dl: DL, VT: SVT, Op1: Undef,
4811 Op2: R1, Op3: SubReg),
4812 0);
4813
4814 if (SVT.isScalableVector() || R2.getValueType() != SVT)
4815 R2 = SDValue(CurDAG->getMachineNode(Opcode: AArch64::INSERT_SUBREG, dl: DL, VT: SVT,
4816 Op1: Undef, Op2: R2, Op3: SubReg),
4817 0);
4818 }
4819
4820 SDValue Ops[] = {R1, R2, Imm};
4821 SDNode *XAR = nullptr;
4822
4823 if (SVT.isScalableVector()) {
4824 if (auto Opc = SelectOpcodeFromVT<SelectTypeKind::Int>(
4825 VT: SVT, Opcodes: {AArch64::XAR_ZZZI_B, AArch64::XAR_ZZZI_H, AArch64::XAR_ZZZI_S,
4826 AArch64::XAR_ZZZI_D}))
4827 XAR = CurDAG->getMachineNode(Opcode: Opc, dl: DL, VT: SVT, Ops);
4828 } else {
4829 XAR = CurDAG->getMachineNode(Opcode: AArch64::XAR, dl: DL, VT: SVT, Ops);
4830 }
4831
4832 assert(XAR && "Unexpected NULL value for XAR instruction in DAG");
4833
4834 if (SVT != VT) {
4835 if (VT.is64BitVector() && SVT.isScalableVector()) {
4836 EVT QVT = VT.getDoubleNumVectorElementsVT(Context&: *CurDAG->getContext());
4837
4838 SDValue ZSub = CurDAG->getTargetConstant(Val: AArch64::zsub, DL, VT: MVT::i32);
4839 SDNode *Q = CurDAG->getMachineNode(Opcode: AArch64::EXTRACT_SUBREG, dl: DL, VT: QVT,
4840 Op1: SDValue(XAR, 0), Op2: ZSub);
4841
4842 SDValue DSub = CurDAG->getTargetConstant(Val: AArch64::dsub, DL, VT: MVT::i32);
4843 XAR = CurDAG->getMachineNode(Opcode: AArch64::EXTRACT_SUBREG, dl: DL, VT,
4844 Op1: SDValue(Q, 0), Op2: DSub);
4845 } else {
4846 SDValue SubReg = CurDAG->getTargetConstant(
4847 Val: (SVT.isScalableVector() ? AArch64::zsub : AArch64::dsub), DL,
4848 VT: MVT::i32);
4849 XAR = CurDAG->getMachineNode(Opcode: AArch64::EXTRACT_SUBREG, dl: DL, VT,
4850 Op1: SDValue(XAR, 0), Op2: SubReg);
4851 }
4852 }
4853 ReplaceNode(F: N, T: XAR);
4854 return true;
4855}
4856
4857void AArch64DAGToDAGISel::Select(SDNode *Node) {
4858 // If we have a custom node, we already have selected!
4859 if (Node->isMachineOpcode()) {
4860 LLVM_DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
4861 Node->setNodeId(-1);
4862 return;
4863 }
4864
4865 // Few custom selection stuff.
4866 EVT VT = Node->getValueType(ResNo: 0);
4867
4868 switch (Node->getOpcode()) {
4869 default:
4870 break;
4871
4872 case ISD::ATOMIC_CMP_SWAP:
4873 if (SelectCMP_SWAP(N: Node))
4874 return;
4875 break;
4876
4877 case ISD::READ_REGISTER:
4878 case AArch64ISD::MRRS:
4879 if (tryReadRegister(N: Node))
4880 return;
4881 break;
4882
4883 case ISD::WRITE_REGISTER:
4884 case AArch64ISD::MSRR:
4885 if (tryWriteRegister(N: Node))
4886 return;
4887 break;
4888
4889 case ISD::LOAD: {
4890 // Try to select as an indexed load. Fall through to normal processing
4891 // if we can't.
4892 if (tryIndexedLoad(N: Node))
4893 return;
4894 break;
4895 }
4896
4897 case ISD::SRL:
4898 case ISD::AND:
4899 case ISD::SRA:
4900 case ISD::SIGN_EXTEND_INREG:
4901 if (tryBitfieldExtractOp(N: Node))
4902 return;
4903 if (tryBitfieldInsertInZeroOp(N: Node))
4904 return;
4905 [[fallthrough]];
4906 case ISD::ROTR:
4907 case ISD::SHL:
4908 if (tryShiftAmountMod(N: Node))
4909 return;
4910 break;
4911
4912 case ISD::SIGN_EXTEND:
4913 if (tryBitfieldExtractOpFromSExt(N: Node))
4914 return;
4915 break;
4916
4917 case ISD::OR:
4918 if (tryBitfieldInsertOp(N: Node))
4919 return;
4920 if (trySelectXAR(N: Node))
4921 return;
4922 break;
4923
4924 case ISD::EXTRACT_SUBVECTOR: {
4925 if (trySelectCastScalableToFixedLengthVector(N: Node))
4926 return;
4927 break;
4928 }
4929
4930 case ISD::INSERT_SUBVECTOR: {
4931 if (trySelectCastFixedLengthToScalableVector(N: Node))
4932 return;
4933 break;
4934 }
4935
4936 case ISD::Constant: {
4937 // Materialize zero constants as copies from WZR/XZR. This allows
4938 // the coalescer to propagate these into other instructions.
4939 ConstantSDNode *ConstNode = cast<ConstantSDNode>(Val: Node);
4940 if (ConstNode->isZero()) {
4941 if (VT == MVT::i32) {
4942 SDValue New = CurDAG->getCopyFromReg(
4943 Chain: CurDAG->getEntryNode(), dl: SDLoc(Node), Reg: AArch64::WZR, VT: MVT::i32);
4944 ReplaceNode(F: Node, T: New.getNode());
4945 return;
4946 } else if (VT == MVT::i64) {
4947 SDValue New = CurDAG->getCopyFromReg(
4948 Chain: CurDAG->getEntryNode(), dl: SDLoc(Node), Reg: AArch64::XZR, VT: MVT::i64);
4949 ReplaceNode(F: Node, T: New.getNode());
4950 return;
4951 }
4952 }
4953 break;
4954 }
4955
4956 case ISD::FrameIndex: {
4957 // Selects to ADDXri FI, 0 which in turn will become ADDXri SP, imm.
4958 int FI = cast<FrameIndexSDNode>(Val: Node)->getIndex();
4959 unsigned Shifter = AArch64_AM::getShifterImm(ST: AArch64_AM::LSL, Imm: 0);
4960 const TargetLowering *TLI = getTargetLowering();
4961 SDValue TFI = CurDAG->getTargetFrameIndex(
4962 FI, VT: TLI->getPointerTy(DL: CurDAG->getDataLayout()));
4963 SDLoc DL(Node);
4964 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(Val: 0, DL, VT: MVT::i32),
4965 CurDAG->getTargetConstant(Val: Shifter, DL, VT: MVT::i32) };
4966 CurDAG->SelectNodeTo(N: Node, MachineOpc: AArch64::ADDXri, VT: MVT::i64, Ops);
4967 return;
4968 }
4969 case ISD::INTRINSIC_W_CHAIN: {
4970 unsigned IntNo = Node->getConstantOperandVal(Num: 1);
4971 switch (IntNo) {
4972 default:
4973 break;
4974 case Intrinsic::aarch64_gcsss: {
4975 SDLoc DL(Node);
4976 SDValue Chain = Node->getOperand(Num: 0);
4977 SDValue Val = Node->getOperand(Num: 2);
4978 SDValue Zero = CurDAG->getCopyFromReg(Chain, dl: DL, Reg: AArch64::XZR, VT: MVT::i64);
4979 SDNode *SS1 =
4980 CurDAG->getMachineNode(Opcode: AArch64::GCSSS1, dl: DL, VT: MVT::Other, Op1: Val, Op2: Chain);
4981 SDNode *SS2 = CurDAG->getMachineNode(Opcode: AArch64::GCSSS2, dl: DL, VT1: MVT::i64,
4982 VT2: MVT::Other, Op1: Zero, Op2: SDValue(SS1, 0));
4983 ReplaceNode(F: Node, T: SS2);
4984 return;
4985 }
4986 case Intrinsic::aarch64_ldaxp:
4987 case Intrinsic::aarch64_ldxp: {
4988 unsigned Op =
4989 IntNo == Intrinsic::aarch64_ldaxp ? AArch64::LDAXPX : AArch64::LDXPX;
4990 SDValue MemAddr = Node->getOperand(Num: 2);
4991 SDLoc DL(Node);
4992 SDValue Chain = Node->getOperand(Num: 0);
4993
4994 SDNode *Ld = CurDAG->getMachineNode(Opcode: Op, dl: DL, VT1: MVT::i64, VT2: MVT::i64,
4995 VT3: MVT::Other, Op1: MemAddr, Op2: Chain);
4996
4997 // Transfer memoperands.
4998 MachineMemOperand *MemOp =
4999 cast<MemIntrinsicSDNode>(Val: Node)->getMemOperand();
5000 CurDAG->setNodeMemRefs(N: cast<MachineSDNode>(Val: Ld), NewMemRefs: {MemOp});
5001 ReplaceNode(F: Node, T: Ld);
5002 return;
5003 }
5004 case Intrinsic::aarch64_stlxp:
5005 case Intrinsic::aarch64_stxp: {
5006 unsigned Op =
5007 IntNo == Intrinsic::aarch64_stlxp ? AArch64::STLXPX : AArch64::STXPX;
5008 SDLoc DL(Node);
5009 SDValue Chain = Node->getOperand(Num: 0);
5010 SDValue ValLo = Node->getOperand(Num: 2);
5011 SDValue ValHi = Node->getOperand(Num: 3);
5012 SDValue MemAddr = Node->getOperand(Num: 4);
5013
5014 // Place arguments in the right order.
5015 SDValue Ops[] = {ValLo, ValHi, MemAddr, Chain};
5016
5017 SDNode *St = CurDAG->getMachineNode(Opcode: Op, dl: DL, VT1: MVT::i32, VT2: MVT::Other, Ops);
5018 // Transfer memoperands.
5019 MachineMemOperand *MemOp =
5020 cast<MemIntrinsicSDNode>(Val: Node)->getMemOperand();
5021 CurDAG->setNodeMemRefs(N: cast<MachineSDNode>(Val: St), NewMemRefs: {MemOp});
5022
5023 ReplaceNode(F: Node, T: St);
5024 return;
5025 }
5026 case Intrinsic::aarch64_neon_ld1x2:
5027 if (VT == MVT::v8i8) {
5028 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD1Twov8b, SubRegIdx: AArch64::dsub0);
5029 return;
5030 } else if (VT == MVT::v16i8) {
5031 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD1Twov16b, SubRegIdx: AArch64::qsub0);
5032 return;
5033 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
5034 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD1Twov4h, SubRegIdx: AArch64::dsub0);
5035 return;
5036 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
5037 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD1Twov8h, SubRegIdx: AArch64::qsub0);
5038 return;
5039 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
5040 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD1Twov2s, SubRegIdx: AArch64::dsub0);
5041 return;
5042 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
5043 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD1Twov4s, SubRegIdx: AArch64::qsub0);
5044 return;
5045 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
5046 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD1Twov1d, SubRegIdx: AArch64::dsub0);
5047 return;
5048 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
5049 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD1Twov2d, SubRegIdx: AArch64::qsub0);
5050 return;
5051 }
5052 break;
5053 case Intrinsic::aarch64_neon_ld1x3:
5054 if (VT == MVT::v8i8) {
5055 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD1Threev8b, SubRegIdx: AArch64::dsub0);
5056 return;
5057 } else if (VT == MVT::v16i8) {
5058 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD1Threev16b, SubRegIdx: AArch64::qsub0);
5059 return;
5060 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
5061 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD1Threev4h, SubRegIdx: AArch64::dsub0);
5062 return;
5063 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
5064 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD1Threev8h, SubRegIdx: AArch64::qsub0);
5065 return;
5066 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
5067 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD1Threev2s, SubRegIdx: AArch64::dsub0);
5068 return;
5069 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
5070 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD1Threev4s, SubRegIdx: AArch64::qsub0);
5071 return;
5072 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
5073 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD1Threev1d, SubRegIdx: AArch64::dsub0);
5074 return;
5075 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
5076 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD1Threev2d, SubRegIdx: AArch64::qsub0);
5077 return;
5078 }
5079 break;
5080 case Intrinsic::aarch64_neon_ld1x4:
5081 if (VT == MVT::v8i8) {
5082 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD1Fourv8b, SubRegIdx: AArch64::dsub0);
5083 return;
5084 } else if (VT == MVT::v16i8) {
5085 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD1Fourv16b, SubRegIdx: AArch64::qsub0);
5086 return;
5087 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
5088 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD1Fourv4h, SubRegIdx: AArch64::dsub0);
5089 return;
5090 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
5091 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD1Fourv8h, SubRegIdx: AArch64::qsub0);
5092 return;
5093 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
5094 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD1Fourv2s, SubRegIdx: AArch64::dsub0);
5095 return;
5096 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
5097 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD1Fourv4s, SubRegIdx: AArch64::qsub0);
5098 return;
5099 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
5100 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD1Fourv1d, SubRegIdx: AArch64::dsub0);
5101 return;
5102 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
5103 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD1Fourv2d, SubRegIdx: AArch64::qsub0);
5104 return;
5105 }
5106 break;
5107 case Intrinsic::aarch64_neon_ld2:
5108 if (VT == MVT::v8i8) {
5109 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Twov8b, SubRegIdx: AArch64::dsub0);
5110 return;
5111 } else if (VT == MVT::v16i8) {
5112 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Twov16b, SubRegIdx: AArch64::qsub0);
5113 return;
5114 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
5115 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Twov4h, SubRegIdx: AArch64::dsub0);
5116 return;
5117 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
5118 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Twov8h, SubRegIdx: AArch64::qsub0);
5119 return;
5120 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
5121 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Twov2s, SubRegIdx: AArch64::dsub0);
5122 return;
5123 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
5124 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Twov4s, SubRegIdx: AArch64::qsub0);
5125 return;
5126 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
5127 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD1Twov1d, SubRegIdx: AArch64::dsub0);
5128 return;
5129 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
5130 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Twov2d, SubRegIdx: AArch64::qsub0);
5131 return;
5132 }
5133 break;
5134 case Intrinsic::aarch64_neon_ld3:
5135 if (VT == MVT::v8i8) {
5136 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Threev8b, SubRegIdx: AArch64::dsub0);
5137 return;
5138 } else if (VT == MVT::v16i8) {
5139 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Threev16b, SubRegIdx: AArch64::qsub0);
5140 return;
5141 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
5142 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Threev4h, SubRegIdx: AArch64::dsub0);
5143 return;
5144 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
5145 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Threev8h, SubRegIdx: AArch64::qsub0);
5146 return;
5147 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
5148 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Threev2s, SubRegIdx: AArch64::dsub0);
5149 return;
5150 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
5151 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Threev4s, SubRegIdx: AArch64::qsub0);
5152 return;
5153 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
5154 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD1Threev1d, SubRegIdx: AArch64::dsub0);
5155 return;
5156 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
5157 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Threev2d, SubRegIdx: AArch64::qsub0);
5158 return;
5159 }
5160 break;
5161 case Intrinsic::aarch64_neon_ld4:
5162 if (VT == MVT::v8i8) {
5163 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Fourv8b, SubRegIdx: AArch64::dsub0);
5164 return;
5165 } else if (VT == MVT::v16i8) {
5166 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Fourv16b, SubRegIdx: AArch64::qsub0);
5167 return;
5168 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
5169 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Fourv4h, SubRegIdx: AArch64::dsub0);
5170 return;
5171 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
5172 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Fourv8h, SubRegIdx: AArch64::qsub0);
5173 return;
5174 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
5175 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Fourv2s, SubRegIdx: AArch64::dsub0);
5176 return;
5177 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
5178 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Fourv4s, SubRegIdx: AArch64::qsub0);
5179 return;
5180 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
5181 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD1Fourv1d, SubRegIdx: AArch64::dsub0);
5182 return;
5183 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
5184 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Fourv2d, SubRegIdx: AArch64::qsub0);
5185 return;
5186 }
5187 break;
5188 case Intrinsic::aarch64_neon_ld2r:
5189 if (VT == MVT::v8i8) {
5190 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Rv8b, SubRegIdx: AArch64::dsub0);
5191 return;
5192 } else if (VT == MVT::v16i8) {
5193 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Rv16b, SubRegIdx: AArch64::qsub0);
5194 return;
5195 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
5196 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Rv4h, SubRegIdx: AArch64::dsub0);
5197 return;
5198 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
5199 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Rv8h, SubRegIdx: AArch64::qsub0);
5200 return;
5201 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
5202 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Rv2s, SubRegIdx: AArch64::dsub0);
5203 return;
5204 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
5205 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Rv4s, SubRegIdx: AArch64::qsub0);
5206 return;
5207 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
5208 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Rv1d, SubRegIdx: AArch64::dsub0);
5209 return;
5210 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
5211 SelectLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Rv2d, SubRegIdx: AArch64::qsub0);
5212 return;
5213 }
5214 break;
5215 case Intrinsic::aarch64_neon_ld3r:
5216 if (VT == MVT::v8i8) {
5217 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Rv8b, SubRegIdx: AArch64::dsub0);
5218 return;
5219 } else if (VT == MVT::v16i8) {
5220 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Rv16b, SubRegIdx: AArch64::qsub0);
5221 return;
5222 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
5223 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Rv4h, SubRegIdx: AArch64::dsub0);
5224 return;
5225 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
5226 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Rv8h, SubRegIdx: AArch64::qsub0);
5227 return;
5228 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
5229 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Rv2s, SubRegIdx: AArch64::dsub0);
5230 return;
5231 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
5232 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Rv4s, SubRegIdx: AArch64::qsub0);
5233 return;
5234 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
5235 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Rv1d, SubRegIdx: AArch64::dsub0);
5236 return;
5237 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
5238 SelectLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Rv2d, SubRegIdx: AArch64::qsub0);
5239 return;
5240 }
5241 break;
5242 case Intrinsic::aarch64_neon_ld4r:
5243 if (VT == MVT::v8i8) {
5244 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Rv8b, SubRegIdx: AArch64::dsub0);
5245 return;
5246 } else if (VT == MVT::v16i8) {
5247 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Rv16b, SubRegIdx: AArch64::qsub0);
5248 return;
5249 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
5250 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Rv4h, SubRegIdx: AArch64::dsub0);
5251 return;
5252 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
5253 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Rv8h, SubRegIdx: AArch64::qsub0);
5254 return;
5255 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
5256 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Rv2s, SubRegIdx: AArch64::dsub0);
5257 return;
5258 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
5259 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Rv4s, SubRegIdx: AArch64::qsub0);
5260 return;
5261 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
5262 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Rv1d, SubRegIdx: AArch64::dsub0);
5263 return;
5264 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
5265 SelectLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Rv2d, SubRegIdx: AArch64::qsub0);
5266 return;
5267 }
5268 break;
5269 case Intrinsic::aarch64_neon_ld2lane:
5270 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
5271 SelectLoadLane(N: Node, NumVecs: 2, Opc: AArch64::LD2i8);
5272 return;
5273 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
5274 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
5275 SelectLoadLane(N: Node, NumVecs: 2, Opc: AArch64::LD2i16);
5276 return;
5277 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
5278 VT == MVT::v2f32) {
5279 SelectLoadLane(N: Node, NumVecs: 2, Opc: AArch64::LD2i32);
5280 return;
5281 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
5282 VT == MVT::v1f64) {
5283 SelectLoadLane(N: Node, NumVecs: 2, Opc: AArch64::LD2i64);
5284 return;
5285 }
5286 break;
5287 case Intrinsic::aarch64_neon_ld3lane:
5288 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
5289 SelectLoadLane(N: Node, NumVecs: 3, Opc: AArch64::LD3i8);
5290 return;
5291 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
5292 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
5293 SelectLoadLane(N: Node, NumVecs: 3, Opc: AArch64::LD3i16);
5294 return;
5295 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
5296 VT == MVT::v2f32) {
5297 SelectLoadLane(N: Node, NumVecs: 3, Opc: AArch64::LD3i32);
5298 return;
5299 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
5300 VT == MVT::v1f64) {
5301 SelectLoadLane(N: Node, NumVecs: 3, Opc: AArch64::LD3i64);
5302 return;
5303 }
5304 break;
5305 case Intrinsic::aarch64_neon_ld4lane:
5306 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
5307 SelectLoadLane(N: Node, NumVecs: 4, Opc: AArch64::LD4i8);
5308 return;
5309 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
5310 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
5311 SelectLoadLane(N: Node, NumVecs: 4, Opc: AArch64::LD4i16);
5312 return;
5313 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
5314 VT == MVT::v2f32) {
5315 SelectLoadLane(N: Node, NumVecs: 4, Opc: AArch64::LD4i32);
5316 return;
5317 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
5318 VT == MVT::v1f64) {
5319 SelectLoadLane(N: Node, NumVecs: 4, Opc: AArch64::LD4i64);
5320 return;
5321 }
5322 break;
5323 case Intrinsic::aarch64_ld64b:
5324 SelectLoad(N: Node, NumVecs: 8, Opc: AArch64::LD64B, SubRegIdx: AArch64::x8sub_0);
5325 return;
5326 case Intrinsic::aarch64_sve_ld2q_sret: {
5327 SelectPredicatedLoad(N: Node, NumVecs: 2, Scale: 4, Opc_ri: AArch64::LD2Q_IMM, Opc_rr: AArch64::LD2Q, IsIntr: true);
5328 return;
5329 }
5330 case Intrinsic::aarch64_sve_ld3q_sret: {
5331 SelectPredicatedLoad(N: Node, NumVecs: 3, Scale: 4, Opc_ri: AArch64::LD3Q_IMM, Opc_rr: AArch64::LD3Q, IsIntr: true);
5332 return;
5333 }
5334 case Intrinsic::aarch64_sve_ld4q_sret: {
5335 SelectPredicatedLoad(N: Node, NumVecs: 4, Scale: 4, Opc_ri: AArch64::LD4Q_IMM, Opc_rr: AArch64::LD4Q, IsIntr: true);
5336 return;
5337 }
5338 case Intrinsic::aarch64_sve_ld2_sret: {
5339 if (VT == MVT::nxv16i8) {
5340 SelectPredicatedLoad(N: Node, NumVecs: 2, Scale: 0, Opc_ri: AArch64::LD2B_IMM, Opc_rr: AArch64::LD2B,
5341 IsIntr: true);
5342 return;
5343 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
5344 VT == MVT::nxv8bf16) {
5345 SelectPredicatedLoad(N: Node, NumVecs: 2, Scale: 1, Opc_ri: AArch64::LD2H_IMM, Opc_rr: AArch64::LD2H,
5346 IsIntr: true);
5347 return;
5348 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
5349 SelectPredicatedLoad(N: Node, NumVecs: 2, Scale: 2, Opc_ri: AArch64::LD2W_IMM, Opc_rr: AArch64::LD2W,
5350 IsIntr: true);
5351 return;
5352 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
5353 SelectPredicatedLoad(N: Node, NumVecs: 2, Scale: 3, Opc_ri: AArch64::LD2D_IMM, Opc_rr: AArch64::LD2D,
5354 IsIntr: true);
5355 return;
5356 }
5357 break;
5358 }
5359 case Intrinsic::aarch64_sve_ld1_pn_x2: {
5360 if (VT == MVT::nxv16i8) {
5361 if (Subtarget->hasSME2() && Subtarget->isStreaming())
5362 SelectContiguousMultiVectorLoad(
5363 N: Node, NumVecs: 2, Scale: 0, Opc_ri: AArch64::LD1B_2Z_IMM_PSEUDO, Opc_rr: AArch64::LD1B_2Z_PSEUDO);
5364 else if (Subtarget->hasSVE2p1())
5365 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 2, Scale: 0, Opc_ri: AArch64::LD1B_2Z_IMM,
5366 Opc_rr: AArch64::LD1B_2Z);
5367 else
5368 break;
5369 return;
5370 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
5371 VT == MVT::nxv8bf16) {
5372 if (Subtarget->hasSME2() && Subtarget->isStreaming())
5373 SelectContiguousMultiVectorLoad(
5374 N: Node, NumVecs: 2, Scale: 1, Opc_ri: AArch64::LD1H_2Z_IMM_PSEUDO, Opc_rr: AArch64::LD1H_2Z_PSEUDO);
5375 else if (Subtarget->hasSVE2p1())
5376 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 2, Scale: 1, Opc_ri: AArch64::LD1H_2Z_IMM,
5377 Opc_rr: AArch64::LD1H_2Z);
5378 else
5379 break;
5380 return;
5381 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
5382 if (Subtarget->hasSME2() && Subtarget->isStreaming())
5383 SelectContiguousMultiVectorLoad(
5384 N: Node, NumVecs: 2, Scale: 2, Opc_ri: AArch64::LD1W_2Z_IMM_PSEUDO, Opc_rr: AArch64::LD1W_2Z_PSEUDO);
5385 else if (Subtarget->hasSVE2p1())
5386 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 2, Scale: 2, Opc_ri: AArch64::LD1W_2Z_IMM,
5387 Opc_rr: AArch64::LD1W_2Z);
5388 else
5389 break;
5390 return;
5391 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
5392 if (Subtarget->hasSME2() && Subtarget->isStreaming())
5393 SelectContiguousMultiVectorLoad(
5394 N: Node, NumVecs: 2, Scale: 3, Opc_ri: AArch64::LD1D_2Z_IMM_PSEUDO, Opc_rr: AArch64::LD1D_2Z_PSEUDO);
5395 else if (Subtarget->hasSVE2p1())
5396 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 2, Scale: 3, Opc_ri: AArch64::LD1D_2Z_IMM,
5397 Opc_rr: AArch64::LD1D_2Z);
5398 else
5399 break;
5400 return;
5401 }
5402 break;
5403 }
5404 case Intrinsic::aarch64_sve_ld1_pn_x4: {
5405 if (VT == MVT::nxv16i8) {
5406 if (Subtarget->hasSME2() && Subtarget->isStreaming())
5407 SelectContiguousMultiVectorLoad(
5408 N: Node, NumVecs: 4, Scale: 0, Opc_ri: AArch64::LD1B_4Z_IMM_PSEUDO, Opc_rr: AArch64::LD1B_4Z_PSEUDO);
5409 else if (Subtarget->hasSVE2p1())
5410 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 4, Scale: 0, Opc_ri: AArch64::LD1B_4Z_IMM,
5411 Opc_rr: AArch64::LD1B_4Z);
5412 else
5413 break;
5414 return;
5415 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
5416 VT == MVT::nxv8bf16) {
5417 if (Subtarget->hasSME2() && Subtarget->isStreaming())
5418 SelectContiguousMultiVectorLoad(
5419 N: Node, NumVecs: 4, Scale: 1, Opc_ri: AArch64::LD1H_4Z_IMM_PSEUDO, Opc_rr: AArch64::LD1H_4Z_PSEUDO);
5420 else if (Subtarget->hasSVE2p1())
5421 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 4, Scale: 1, Opc_ri: AArch64::LD1H_4Z_IMM,
5422 Opc_rr: AArch64::LD1H_4Z);
5423 else
5424 break;
5425 return;
5426 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
5427 if (Subtarget->hasSME2() && Subtarget->isStreaming())
5428 SelectContiguousMultiVectorLoad(
5429 N: Node, NumVecs: 4, Scale: 2, Opc_ri: AArch64::LD1W_4Z_IMM_PSEUDO, Opc_rr: AArch64::LD1W_4Z_PSEUDO);
5430 else if (Subtarget->hasSVE2p1())
5431 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 4, Scale: 2, Opc_ri: AArch64::LD1W_4Z_IMM,
5432 Opc_rr: AArch64::LD1W_4Z);
5433 else
5434 break;
5435 return;
5436 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
5437 if (Subtarget->hasSME2() && Subtarget->isStreaming())
5438 SelectContiguousMultiVectorLoad(
5439 N: Node, NumVecs: 4, Scale: 3, Opc_ri: AArch64::LD1D_4Z_IMM_PSEUDO, Opc_rr: AArch64::LD1D_4Z_PSEUDO);
5440 else if (Subtarget->hasSVE2p1())
5441 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 4, Scale: 3, Opc_ri: AArch64::LD1D_4Z_IMM,
5442 Opc_rr: AArch64::LD1D_4Z);
5443 else
5444 break;
5445 return;
5446 }
5447 break;
5448 }
5449 case Intrinsic::aarch64_sve_ldnt1_pn_x2: {
5450 if (VT == MVT::nxv16i8) {
5451 if (Subtarget->hasSME2() && Subtarget->isStreaming())
5452 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 2, Scale: 0,
5453 Opc_ri: AArch64::LDNT1B_2Z_IMM_PSEUDO,
5454 Opc_rr: AArch64::LDNT1B_2Z_PSEUDO);
5455 else if (Subtarget->hasSVE2p1())
5456 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 2, Scale: 0, Opc_ri: AArch64::LDNT1B_2Z_IMM,
5457 Opc_rr: AArch64::LDNT1B_2Z);
5458 else
5459 break;
5460 return;
5461 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
5462 VT == MVT::nxv8bf16) {
5463 if (Subtarget->hasSME2() && Subtarget->isStreaming())
5464 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 2, Scale: 1,
5465 Opc_ri: AArch64::LDNT1H_2Z_IMM_PSEUDO,
5466 Opc_rr: AArch64::LDNT1H_2Z_PSEUDO);
5467 else if (Subtarget->hasSVE2p1())
5468 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 2, Scale: 1, Opc_ri: AArch64::LDNT1H_2Z_IMM,
5469 Opc_rr: AArch64::LDNT1H_2Z);
5470 else
5471 break;
5472 return;
5473 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
5474 if (Subtarget->hasSME2() && Subtarget->isStreaming())
5475 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 2, Scale: 2,
5476 Opc_ri: AArch64::LDNT1W_2Z_IMM_PSEUDO,
5477 Opc_rr: AArch64::LDNT1W_2Z_PSEUDO);
5478 else if (Subtarget->hasSVE2p1())
5479 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 2, Scale: 2, Opc_ri: AArch64::LDNT1W_2Z_IMM,
5480 Opc_rr: AArch64::LDNT1W_2Z);
5481 else
5482 break;
5483 return;
5484 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
5485 if (Subtarget->hasSME2() && Subtarget->isStreaming())
5486 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 2, Scale: 3,
5487 Opc_ri: AArch64::LDNT1D_2Z_IMM_PSEUDO,
5488 Opc_rr: AArch64::LDNT1D_2Z_PSEUDO);
5489 else if (Subtarget->hasSVE2p1())
5490 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 2, Scale: 3, Opc_ri: AArch64::LDNT1D_2Z_IMM,
5491 Opc_rr: AArch64::LDNT1D_2Z);
5492 else
5493 break;
5494 return;
5495 }
5496 break;
5497 }
5498 case Intrinsic::aarch64_sve_ldnt1_pn_x4: {
5499 if (VT == MVT::nxv16i8) {
5500 if (Subtarget->hasSME2() && Subtarget->isStreaming())
5501 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 4, Scale: 0,
5502 Opc_ri: AArch64::LDNT1B_4Z_IMM_PSEUDO,
5503 Opc_rr: AArch64::LDNT1B_4Z_PSEUDO);
5504 else if (Subtarget->hasSVE2p1())
5505 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 4, Scale: 0, Opc_ri: AArch64::LDNT1B_4Z_IMM,
5506 Opc_rr: AArch64::LDNT1B_4Z);
5507 else
5508 break;
5509 return;
5510 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
5511 VT == MVT::nxv8bf16) {
5512 if (Subtarget->hasSME2() && Subtarget->isStreaming())
5513 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 4, Scale: 1,
5514 Opc_ri: AArch64::LDNT1H_4Z_IMM_PSEUDO,
5515 Opc_rr: AArch64::LDNT1H_4Z_PSEUDO);
5516 else if (Subtarget->hasSVE2p1())
5517 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 4, Scale: 1, Opc_ri: AArch64::LDNT1H_4Z_IMM,
5518 Opc_rr: AArch64::LDNT1H_4Z);
5519 else
5520 break;
5521 return;
5522 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
5523 if (Subtarget->hasSME2() && Subtarget->isStreaming())
5524 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 4, Scale: 2,
5525 Opc_ri: AArch64::LDNT1W_4Z_IMM_PSEUDO,
5526 Opc_rr: AArch64::LDNT1W_4Z_PSEUDO);
5527 else if (Subtarget->hasSVE2p1())
5528 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 4, Scale: 2, Opc_ri: AArch64::LDNT1W_4Z_IMM,
5529 Opc_rr: AArch64::LDNT1W_4Z);
5530 else
5531 break;
5532 return;
5533 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
5534 if (Subtarget->hasSME2() && Subtarget->isStreaming())
5535 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 4, Scale: 3,
5536 Opc_ri: AArch64::LDNT1D_4Z_IMM_PSEUDO,
5537 Opc_rr: AArch64::LDNT1D_4Z_PSEUDO);
5538 else if (Subtarget->hasSVE2p1())
5539 SelectContiguousMultiVectorLoad(N: Node, NumVecs: 4, Scale: 3, Opc_ri: AArch64::LDNT1D_4Z_IMM,
5540 Opc_rr: AArch64::LDNT1D_4Z);
5541 else
5542 break;
5543 return;
5544 }
5545 break;
5546 }
5547 case Intrinsic::aarch64_sve_ld3_sret: {
5548 if (VT == MVT::nxv16i8) {
5549 SelectPredicatedLoad(N: Node, NumVecs: 3, Scale: 0, Opc_ri: AArch64::LD3B_IMM, Opc_rr: AArch64::LD3B,
5550 IsIntr: true);
5551 return;
5552 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
5553 VT == MVT::nxv8bf16) {
5554 SelectPredicatedLoad(N: Node, NumVecs: 3, Scale: 1, Opc_ri: AArch64::LD3H_IMM, Opc_rr: AArch64::LD3H,
5555 IsIntr: true);
5556 return;
5557 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
5558 SelectPredicatedLoad(N: Node, NumVecs: 3, Scale: 2, Opc_ri: AArch64::LD3W_IMM, Opc_rr: AArch64::LD3W,
5559 IsIntr: true);
5560 return;
5561 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
5562 SelectPredicatedLoad(N: Node, NumVecs: 3, Scale: 3, Opc_ri: AArch64::LD3D_IMM, Opc_rr: AArch64::LD3D,
5563 IsIntr: true);
5564 return;
5565 }
5566 break;
5567 }
5568 case Intrinsic::aarch64_sve_ld4_sret: {
5569 if (VT == MVT::nxv16i8) {
5570 SelectPredicatedLoad(N: Node, NumVecs: 4, Scale: 0, Opc_ri: AArch64::LD4B_IMM, Opc_rr: AArch64::LD4B,
5571 IsIntr: true);
5572 return;
5573 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
5574 VT == MVT::nxv8bf16) {
5575 SelectPredicatedLoad(N: Node, NumVecs: 4, Scale: 1, Opc_ri: AArch64::LD4H_IMM, Opc_rr: AArch64::LD4H,
5576 IsIntr: true);
5577 return;
5578 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
5579 SelectPredicatedLoad(N: Node, NumVecs: 4, Scale: 2, Opc_ri: AArch64::LD4W_IMM, Opc_rr: AArch64::LD4W,
5580 IsIntr: true);
5581 return;
5582 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
5583 SelectPredicatedLoad(N: Node, NumVecs: 4, Scale: 3, Opc_ri: AArch64::LD4D_IMM, Opc_rr: AArch64::LD4D,
5584 IsIntr: true);
5585 return;
5586 }
5587 break;
5588 }
5589 case Intrinsic::aarch64_sme_read_hor_vg2: {
5590 if (VT == MVT::nxv16i8) {
5591 SelectMultiVectorMove<14, 2>(N: Node, NumVecs: 2, BaseReg: AArch64::ZAB0,
5592 Op: AArch64::MOVA_2ZMXI_H_B);
5593 return;
5594 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
5595 VT == MVT::nxv8bf16) {
5596 SelectMultiVectorMove<6, 2>(N: Node, NumVecs: 2, BaseReg: AArch64::ZAH0,
5597 Op: AArch64::MOVA_2ZMXI_H_H);
5598 return;
5599 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
5600 SelectMultiVectorMove<2, 2>(N: Node, NumVecs: 2, BaseReg: AArch64::ZAS0,
5601 Op: AArch64::MOVA_2ZMXI_H_S);
5602 return;
5603 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
5604 SelectMultiVectorMove<0, 2>(N: Node, NumVecs: 2, BaseReg: AArch64::ZAD0,
5605 Op: AArch64::MOVA_2ZMXI_H_D);
5606 return;
5607 }
5608 break;
5609 }
5610 case Intrinsic::aarch64_sme_read_ver_vg2: {
5611 if (VT == MVT::nxv16i8) {
5612 SelectMultiVectorMove<14, 2>(N: Node, NumVecs: 2, BaseReg: AArch64::ZAB0,
5613 Op: AArch64::MOVA_2ZMXI_V_B);
5614 return;
5615 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
5616 VT == MVT::nxv8bf16) {
5617 SelectMultiVectorMove<6, 2>(N: Node, NumVecs: 2, BaseReg: AArch64::ZAH0,
5618 Op: AArch64::MOVA_2ZMXI_V_H);
5619 return;
5620 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
5621 SelectMultiVectorMove<2, 2>(N: Node, NumVecs: 2, BaseReg: AArch64::ZAS0,
5622 Op: AArch64::MOVA_2ZMXI_V_S);
5623 return;
5624 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
5625 SelectMultiVectorMove<0, 2>(N: Node, NumVecs: 2, BaseReg: AArch64::ZAD0,
5626 Op: AArch64::MOVA_2ZMXI_V_D);
5627 return;
5628 }
5629 break;
5630 }
5631 case Intrinsic::aarch64_sme_read_hor_vg4: {
5632 if (VT == MVT::nxv16i8) {
5633 SelectMultiVectorMove<12, 4>(N: Node, NumVecs: 4, BaseReg: AArch64::ZAB0,
5634 Op: AArch64::MOVA_4ZMXI_H_B);
5635 return;
5636 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
5637 VT == MVT::nxv8bf16) {
5638 SelectMultiVectorMove<4, 4>(N: Node, NumVecs: 4, BaseReg: AArch64::ZAH0,
5639 Op: AArch64::MOVA_4ZMXI_H_H);
5640 return;
5641 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
5642 SelectMultiVectorMove<0, 2>(N: Node, NumVecs: 4, BaseReg: AArch64::ZAS0,
5643 Op: AArch64::MOVA_4ZMXI_H_S);
5644 return;
5645 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
5646 SelectMultiVectorMove<0, 2>(N: Node, NumVecs: 4, BaseReg: AArch64::ZAD0,
5647 Op: AArch64::MOVA_4ZMXI_H_D);
5648 return;
5649 }
5650 break;
5651 }
5652 case Intrinsic::aarch64_sme_read_ver_vg4: {
5653 if (VT == MVT::nxv16i8) {
5654 SelectMultiVectorMove<12, 4>(N: Node, NumVecs: 4, BaseReg: AArch64::ZAB0,
5655 Op: AArch64::MOVA_4ZMXI_V_B);
5656 return;
5657 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
5658 VT == MVT::nxv8bf16) {
5659 SelectMultiVectorMove<4, 4>(N: Node, NumVecs: 4, BaseReg: AArch64::ZAH0,
5660 Op: AArch64::MOVA_4ZMXI_V_H);
5661 return;
5662 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
5663 SelectMultiVectorMove<0, 4>(N: Node, NumVecs: 4, BaseReg: AArch64::ZAS0,
5664 Op: AArch64::MOVA_4ZMXI_V_S);
5665 return;
5666 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
5667 SelectMultiVectorMove<0, 4>(N: Node, NumVecs: 4, BaseReg: AArch64::ZAD0,
5668 Op: AArch64::MOVA_4ZMXI_V_D);
5669 return;
5670 }
5671 break;
5672 }
5673 case Intrinsic::aarch64_sme_read_vg1x2: {
5674 SelectMultiVectorMove<7, 1>(N: Node, NumVecs: 2, BaseReg: AArch64::ZA,
5675 Op: AArch64::MOVA_VG2_2ZMXI);
5676 return;
5677 }
5678 case Intrinsic::aarch64_sme_read_vg1x4: {
5679 SelectMultiVectorMove<7, 1>(N: Node, NumVecs: 4, BaseReg: AArch64::ZA,
5680 Op: AArch64::MOVA_VG4_4ZMXI);
5681 return;
5682 }
5683 case Intrinsic::aarch64_sme_readz_horiz_x2: {
5684 if (VT == MVT::nxv16i8) {
5685 SelectMultiVectorMoveZ(N: Node, NumVecs: 2, Op: AArch64::MOVAZ_2ZMI_H_B_PSEUDO, MaxIdx: 14, Scale: 2);
5686 return;
5687 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
5688 VT == MVT::nxv8bf16) {
5689 SelectMultiVectorMoveZ(N: Node, NumVecs: 2, Op: AArch64::MOVAZ_2ZMI_H_H_PSEUDO, MaxIdx: 6, Scale: 2);
5690 return;
5691 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
5692 SelectMultiVectorMoveZ(N: Node, NumVecs: 2, Op: AArch64::MOVAZ_2ZMI_H_S_PSEUDO, MaxIdx: 2, Scale: 2);
5693 return;
5694 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
5695 SelectMultiVectorMoveZ(N: Node, NumVecs: 2, Op: AArch64::MOVAZ_2ZMI_H_D_PSEUDO, MaxIdx: 0, Scale: 2);
5696 return;
5697 }
5698 break;
5699 }
5700 case Intrinsic::aarch64_sme_readz_vert_x2: {
5701 if (VT == MVT::nxv16i8) {
5702 SelectMultiVectorMoveZ(N: Node, NumVecs: 2, Op: AArch64::MOVAZ_2ZMI_V_B_PSEUDO, MaxIdx: 14, Scale: 2);
5703 return;
5704 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
5705 VT == MVT::nxv8bf16) {
5706 SelectMultiVectorMoveZ(N: Node, NumVecs: 2, Op: AArch64::MOVAZ_2ZMI_V_H_PSEUDO, MaxIdx: 6, Scale: 2);
5707 return;
5708 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
5709 SelectMultiVectorMoveZ(N: Node, NumVecs: 2, Op: AArch64::MOVAZ_2ZMI_V_S_PSEUDO, MaxIdx: 2, Scale: 2);
5710 return;
5711 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
5712 SelectMultiVectorMoveZ(N: Node, NumVecs: 2, Op: AArch64::MOVAZ_2ZMI_V_D_PSEUDO, MaxIdx: 0, Scale: 2);
5713 return;
5714 }
5715 break;
5716 }
5717 case Intrinsic::aarch64_sme_readz_horiz_x4: {
5718 if (VT == MVT::nxv16i8) {
5719 SelectMultiVectorMoveZ(N: Node, NumVecs: 4, Op: AArch64::MOVAZ_4ZMI_H_B_PSEUDO, MaxIdx: 12, Scale: 4);
5720 return;
5721 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
5722 VT == MVT::nxv8bf16) {
5723 SelectMultiVectorMoveZ(N: Node, NumVecs: 4, Op: AArch64::MOVAZ_4ZMI_H_H_PSEUDO, MaxIdx: 4, Scale: 4);
5724 return;
5725 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
5726 SelectMultiVectorMoveZ(N: Node, NumVecs: 4, Op: AArch64::MOVAZ_4ZMI_H_S_PSEUDO, MaxIdx: 0, Scale: 4);
5727 return;
5728 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
5729 SelectMultiVectorMoveZ(N: Node, NumVecs: 4, Op: AArch64::MOVAZ_4ZMI_H_D_PSEUDO, MaxIdx: 0, Scale: 4);
5730 return;
5731 }
5732 break;
5733 }
5734 case Intrinsic::aarch64_sme_readz_vert_x4: {
5735 if (VT == MVT::nxv16i8) {
5736 SelectMultiVectorMoveZ(N: Node, NumVecs: 4, Op: AArch64::MOVAZ_4ZMI_V_B_PSEUDO, MaxIdx: 12, Scale: 4);
5737 return;
5738 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
5739 VT == MVT::nxv8bf16) {
5740 SelectMultiVectorMoveZ(N: Node, NumVecs: 4, Op: AArch64::MOVAZ_4ZMI_V_H_PSEUDO, MaxIdx: 4, Scale: 4);
5741 return;
5742 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
5743 SelectMultiVectorMoveZ(N: Node, NumVecs: 4, Op: AArch64::MOVAZ_4ZMI_V_S_PSEUDO, MaxIdx: 0, Scale: 4);
5744 return;
5745 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
5746 SelectMultiVectorMoveZ(N: Node, NumVecs: 4, Op: AArch64::MOVAZ_4ZMI_V_D_PSEUDO, MaxIdx: 0, Scale: 4);
5747 return;
5748 }
5749 break;
5750 }
5751 case Intrinsic::aarch64_sme_readz_x2: {
5752 SelectMultiVectorMoveZ(N: Node, NumVecs: 2, Op: AArch64::MOVAZ_VG2_2ZMXI_PSEUDO, MaxIdx: 7, Scale: 1,
5753 BaseReg: AArch64::ZA);
5754 return;
5755 }
5756 case Intrinsic::aarch64_sme_readz_x4: {
5757 SelectMultiVectorMoveZ(N: Node, NumVecs: 4, Op: AArch64::MOVAZ_VG4_4ZMXI_PSEUDO, MaxIdx: 7, Scale: 1,
5758 BaseReg: AArch64::ZA);
5759 return;
5760 }
5761 case Intrinsic::swift_async_context_addr: {
5762 SDLoc DL(Node);
5763 SDValue Chain = Node->getOperand(Num: 0);
5764 SDValue CopyFP = CurDAG->getCopyFromReg(Chain, dl: DL, Reg: AArch64::FP, VT: MVT::i64);
5765 SDValue Res = SDValue(
5766 CurDAG->getMachineNode(Opcode: AArch64::SUBXri, dl: DL, VT: MVT::i64, Op1: CopyFP,
5767 Op2: CurDAG->getTargetConstant(Val: 8, DL, VT: MVT::i32),
5768 Op3: CurDAG->getTargetConstant(Val: 0, DL, VT: MVT::i32)),
5769 0);
5770 ReplaceUses(F: SDValue(Node, 0), T: Res);
5771 ReplaceUses(F: SDValue(Node, 1), T: CopyFP.getValue(R: 1));
5772 CurDAG->RemoveDeadNode(N: Node);
5773
5774 auto &MF = CurDAG->getMachineFunction();
5775 MF.getFrameInfo().setFrameAddressIsTaken(true);
5776 MF.getInfo<AArch64FunctionInfo>()->setHasSwiftAsyncContext(true);
5777 return;
5778 }
5779 case Intrinsic::aarch64_sme_luti2_lane_zt_x4: {
5780 if (auto Opc = SelectOpcodeFromVT<SelectTypeKind::AnyType>(
5781 VT: Node->getValueType(ResNo: 0),
5782 Opcodes: {AArch64::LUTI2_4ZTZI_B, AArch64::LUTI2_4ZTZI_H,
5783 AArch64::LUTI2_4ZTZI_S}))
5784 // Second Immediate must be <= 3:
5785 SelectMultiVectorLutiLane(Node, NumOutVecs: 4, Opc, MaxImm: 3);
5786 return;
5787 }
5788 case Intrinsic::aarch64_sme_luti4_lane_zt_x4: {
5789 if (auto Opc = SelectOpcodeFromVT<SelectTypeKind::AnyType>(
5790 VT: Node->getValueType(ResNo: 0),
5791 Opcodes: {0, AArch64::LUTI4_4ZTZI_H, AArch64::LUTI4_4ZTZI_S}))
5792 // Second Immediate must be <= 1:
5793 SelectMultiVectorLutiLane(Node, NumOutVecs: 4, Opc, MaxImm: 1);
5794 return;
5795 }
5796 case Intrinsic::aarch64_sme_luti2_lane_zt_x2: {
5797 if (auto Opc = SelectOpcodeFromVT<SelectTypeKind::AnyType>(
5798 VT: Node->getValueType(ResNo: 0),
5799 Opcodes: {AArch64::LUTI2_2ZTZI_B, AArch64::LUTI2_2ZTZI_H,
5800 AArch64::LUTI2_2ZTZI_S}))
5801 // Second Immediate must be <= 7:
5802 SelectMultiVectorLutiLane(Node, NumOutVecs: 2, Opc, MaxImm: 7);
5803 return;
5804 }
5805 case Intrinsic::aarch64_sme_luti4_lane_zt_x2: {
5806 if (auto Opc = SelectOpcodeFromVT<SelectTypeKind::AnyType>(
5807 VT: Node->getValueType(ResNo: 0),
5808 Opcodes: {AArch64::LUTI4_2ZTZI_B, AArch64::LUTI4_2ZTZI_H,
5809 AArch64::LUTI4_2ZTZI_S}))
5810 // Second Immediate must be <= 3:
5811 SelectMultiVectorLutiLane(Node, NumOutVecs: 2, Opc, MaxImm: 3);
5812 return;
5813 }
5814 case Intrinsic::aarch64_sme_luti4_zt_x4: {
5815 SelectMultiVectorLuti(Node, NumOutVecs: 4, Opc: AArch64::LUTI4_4ZZT2Z);
5816 return;
5817 }
5818 case Intrinsic::aarch64_sve_fp8_cvtl1_x2:
5819 if (auto Opc = SelectOpcodeFromVT<SelectTypeKind::FP>(
5820 VT: Node->getValueType(ResNo: 0),
5821 Opcodes: {AArch64::BF1CVTL_2ZZ_BtoH, AArch64::F1CVTL_2ZZ_BtoH}))
5822 SelectCVTIntrinsicFP8(N: Node, NumVecs: 2, Opcode: Opc);
5823 return;
5824 case Intrinsic::aarch64_sve_fp8_cvtl2_x2:
5825 if (auto Opc = SelectOpcodeFromVT<SelectTypeKind::FP>(
5826 VT: Node->getValueType(ResNo: 0),
5827 Opcodes: {AArch64::BF2CVTL_2ZZ_BtoH, AArch64::F2CVTL_2ZZ_BtoH}))
5828 SelectCVTIntrinsicFP8(N: Node, NumVecs: 2, Opcode: Opc);
5829 return;
5830 case Intrinsic::aarch64_sve_fp8_cvt1_x2:
5831 if (auto Opc = SelectOpcodeFromVT<SelectTypeKind::FP>(
5832 VT: Node->getValueType(ResNo: 0),
5833 Opcodes: {AArch64::BF1CVT_2ZZ_BtoH, AArch64::F1CVT_2ZZ_BtoH}))
5834 SelectCVTIntrinsicFP8(N: Node, NumVecs: 2, Opcode: Opc);
5835 return;
5836 case Intrinsic::aarch64_sve_fp8_cvt2_x2:
5837 if (auto Opc = SelectOpcodeFromVT<SelectTypeKind::FP>(
5838 VT: Node->getValueType(ResNo: 0),
5839 Opcodes: {AArch64::BF2CVT_2ZZ_BtoH, AArch64::F2CVT_2ZZ_BtoH}))
5840 SelectCVTIntrinsicFP8(N: Node, NumVecs: 2, Opcode: Opc);
5841 return;
5842 case Intrinsic::ptrauth_resign_load_relative:
5843 SelectPtrauthResign(N: Node);
5844 return;
5845 }
5846 } break;
5847 case ISD::INTRINSIC_WO_CHAIN: {
5848 unsigned IntNo = Node->getConstantOperandVal(Num: 0);
5849 switch (IntNo) {
5850 default:
5851 break;
5852 case Intrinsic::aarch64_tagp:
5853 SelectTagP(N: Node);
5854 return;
5855
5856 case Intrinsic::ptrauth_auth:
5857 SelectPtrauthAuth(N: Node);
5858 return;
5859
5860 case Intrinsic::ptrauth_resign:
5861 SelectPtrauthResign(N: Node);
5862 return;
5863
5864 case Intrinsic::aarch64_neon_tbl2:
5865 SelectTable(N: Node, NumVecs: 2,
5866 Opc: VT == MVT::v8i8 ? AArch64::TBLv8i8Two : AArch64::TBLv16i8Two,
5867 isExt: false);
5868 return;
5869 case Intrinsic::aarch64_neon_tbl3:
5870 SelectTable(N: Node, NumVecs: 3, Opc: VT == MVT::v8i8 ? AArch64::TBLv8i8Three
5871 : AArch64::TBLv16i8Three,
5872 isExt: false);
5873 return;
5874 case Intrinsic::aarch64_neon_tbl4:
5875 SelectTable(N: Node, NumVecs: 4, Opc: VT == MVT::v8i8 ? AArch64::TBLv8i8Four
5876 : AArch64::TBLv16i8Four,
5877 isExt: false);
5878 return;
5879 case Intrinsic::aarch64_neon_tbx2:
5880 SelectTable(N: Node, NumVecs: 2,
5881 Opc: VT == MVT::v8i8 ? AArch64::TBXv8i8Two : AArch64::TBXv16i8Two,
5882 isExt: true);
5883 return;
5884 case Intrinsic::aarch64_neon_tbx3:
5885 SelectTable(N: Node, NumVecs: 3, Opc: VT == MVT::v8i8 ? AArch64::TBXv8i8Three
5886 : AArch64::TBXv16i8Three,
5887 isExt: true);
5888 return;
5889 case Intrinsic::aarch64_neon_tbx4:
5890 SelectTable(N: Node, NumVecs: 4, Opc: VT == MVT::v8i8 ? AArch64::TBXv8i8Four
5891 : AArch64::TBXv16i8Four,
5892 isExt: true);
5893 return;
5894 case Intrinsic::aarch64_sve_srshl_single_x2:
5895 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
5896 VT: Node->getValueType(ResNo: 0),
5897 Opcodes: {AArch64::SRSHL_VG2_2ZZ_B, AArch64::SRSHL_VG2_2ZZ_H,
5898 AArch64::SRSHL_VG2_2ZZ_S, AArch64::SRSHL_VG2_2ZZ_D}))
5899 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: false, Opcode: Op);
5900 return;
5901 case Intrinsic::aarch64_sve_srshl_single_x4:
5902 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
5903 VT: Node->getValueType(ResNo: 0),
5904 Opcodes: {AArch64::SRSHL_VG4_4ZZ_B, AArch64::SRSHL_VG4_4ZZ_H,
5905 AArch64::SRSHL_VG4_4ZZ_S, AArch64::SRSHL_VG4_4ZZ_D}))
5906 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: false, Opcode: Op);
5907 return;
5908 case Intrinsic::aarch64_sve_urshl_single_x2:
5909 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
5910 VT: Node->getValueType(ResNo: 0),
5911 Opcodes: {AArch64::URSHL_VG2_2ZZ_B, AArch64::URSHL_VG2_2ZZ_H,
5912 AArch64::URSHL_VG2_2ZZ_S, AArch64::URSHL_VG2_2ZZ_D}))
5913 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: false, Opcode: Op);
5914 return;
5915 case Intrinsic::aarch64_sve_urshl_single_x4:
5916 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
5917 VT: Node->getValueType(ResNo: 0),
5918 Opcodes: {AArch64::URSHL_VG4_4ZZ_B, AArch64::URSHL_VG4_4ZZ_H,
5919 AArch64::URSHL_VG4_4ZZ_S, AArch64::URSHL_VG4_4ZZ_D}))
5920 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: false, Opcode: Op);
5921 return;
5922 case Intrinsic::aarch64_sve_srshl_x2:
5923 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
5924 VT: Node->getValueType(ResNo: 0),
5925 Opcodes: {AArch64::SRSHL_VG2_2Z2Z_B, AArch64::SRSHL_VG2_2Z2Z_H,
5926 AArch64::SRSHL_VG2_2Z2Z_S, AArch64::SRSHL_VG2_2Z2Z_D}))
5927 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: true, Opcode: Op);
5928 return;
5929 case Intrinsic::aarch64_sve_srshl_x4:
5930 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
5931 VT: Node->getValueType(ResNo: 0),
5932 Opcodes: {AArch64::SRSHL_VG4_4Z4Z_B, AArch64::SRSHL_VG4_4Z4Z_H,
5933 AArch64::SRSHL_VG4_4Z4Z_S, AArch64::SRSHL_VG4_4Z4Z_D}))
5934 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: true, Opcode: Op);
5935 return;
5936 case Intrinsic::aarch64_sve_urshl_x2:
5937 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
5938 VT: Node->getValueType(ResNo: 0),
5939 Opcodes: {AArch64::URSHL_VG2_2Z2Z_B, AArch64::URSHL_VG2_2Z2Z_H,
5940 AArch64::URSHL_VG2_2Z2Z_S, AArch64::URSHL_VG2_2Z2Z_D}))
5941 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: true, Opcode: Op);
5942 return;
5943 case Intrinsic::aarch64_sve_urshl_x4:
5944 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
5945 VT: Node->getValueType(ResNo: 0),
5946 Opcodes: {AArch64::URSHL_VG4_4Z4Z_B, AArch64::URSHL_VG4_4Z4Z_H,
5947 AArch64::URSHL_VG4_4Z4Z_S, AArch64::URSHL_VG4_4Z4Z_D}))
5948 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: true, Opcode: Op);
5949 return;
5950 case Intrinsic::aarch64_sve_sqdmulh_single_vgx2:
5951 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
5952 VT: Node->getValueType(ResNo: 0),
5953 Opcodes: {AArch64::SQDMULH_VG2_2ZZ_B, AArch64::SQDMULH_VG2_2ZZ_H,
5954 AArch64::SQDMULH_VG2_2ZZ_S, AArch64::SQDMULH_VG2_2ZZ_D}))
5955 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: false, Opcode: Op);
5956 return;
5957 case Intrinsic::aarch64_sve_sqdmulh_single_vgx4:
5958 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
5959 VT: Node->getValueType(ResNo: 0),
5960 Opcodes: {AArch64::SQDMULH_VG4_4ZZ_B, AArch64::SQDMULH_VG4_4ZZ_H,
5961 AArch64::SQDMULH_VG4_4ZZ_S, AArch64::SQDMULH_VG4_4ZZ_D}))
5962 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: false, Opcode: Op);
5963 return;
5964 case Intrinsic::aarch64_sve_sqdmulh_vgx2:
5965 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
5966 VT: Node->getValueType(ResNo: 0),
5967 Opcodes: {AArch64::SQDMULH_VG2_2Z2Z_B, AArch64::SQDMULH_VG2_2Z2Z_H,
5968 AArch64::SQDMULH_VG2_2Z2Z_S, AArch64::SQDMULH_VG2_2Z2Z_D}))
5969 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: true, Opcode: Op);
5970 return;
5971 case Intrinsic::aarch64_sve_sqdmulh_vgx4:
5972 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
5973 VT: Node->getValueType(ResNo: 0),
5974 Opcodes: {AArch64::SQDMULH_VG4_4Z4Z_B, AArch64::SQDMULH_VG4_4Z4Z_H,
5975 AArch64::SQDMULH_VG4_4Z4Z_S, AArch64::SQDMULH_VG4_4Z4Z_D}))
5976 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: true, Opcode: Op);
5977 return;
5978 case Intrinsic::aarch64_sme_fp8_scale_single_x2:
5979 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
5980 VT: Node->getValueType(ResNo: 0),
5981 Opcodes: {0, AArch64::FSCALE_2ZZ_H, AArch64::FSCALE_2ZZ_S,
5982 AArch64::FSCALE_2ZZ_D}))
5983 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: false, Opcode: Op);
5984 return;
5985 case Intrinsic::aarch64_sme_fp8_scale_single_x4:
5986 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
5987 VT: Node->getValueType(ResNo: 0),
5988 Opcodes: {0, AArch64::FSCALE_4ZZ_H, AArch64::FSCALE_4ZZ_S,
5989 AArch64::FSCALE_4ZZ_D}))
5990 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: false, Opcode: Op);
5991 return;
5992 case Intrinsic::aarch64_sme_fp8_scale_x2:
5993 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
5994 VT: Node->getValueType(ResNo: 0),
5995 Opcodes: {0, AArch64::FSCALE_2Z2Z_H, AArch64::FSCALE_2Z2Z_S,
5996 AArch64::FSCALE_2Z2Z_D}))
5997 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: true, Opcode: Op);
5998 return;
5999 case Intrinsic::aarch64_sme_fp8_scale_x4:
6000 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6001 VT: Node->getValueType(ResNo: 0),
6002 Opcodes: {0, AArch64::FSCALE_4Z4Z_H, AArch64::FSCALE_4Z4Z_S,
6003 AArch64::FSCALE_4Z4Z_D}))
6004 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: true, Opcode: Op);
6005 return;
6006 case Intrinsic::aarch64_sve_whilege_x2:
6007 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int1>(
6008 VT: Node->getValueType(ResNo: 0),
6009 Opcodes: {AArch64::WHILEGE_2PXX_B, AArch64::WHILEGE_2PXX_H,
6010 AArch64::WHILEGE_2PXX_S, AArch64::WHILEGE_2PXX_D}))
6011 SelectWhilePair(N: Node, Opc: Op);
6012 return;
6013 case Intrinsic::aarch64_sve_whilegt_x2:
6014 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int1>(
6015 VT: Node->getValueType(ResNo: 0),
6016 Opcodes: {AArch64::WHILEGT_2PXX_B, AArch64::WHILEGT_2PXX_H,
6017 AArch64::WHILEGT_2PXX_S, AArch64::WHILEGT_2PXX_D}))
6018 SelectWhilePair(N: Node, Opc: Op);
6019 return;
6020 case Intrinsic::aarch64_sve_whilehi_x2:
6021 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int1>(
6022 VT: Node->getValueType(ResNo: 0),
6023 Opcodes: {AArch64::WHILEHI_2PXX_B, AArch64::WHILEHI_2PXX_H,
6024 AArch64::WHILEHI_2PXX_S, AArch64::WHILEHI_2PXX_D}))
6025 SelectWhilePair(N: Node, Opc: Op);
6026 return;
6027 case Intrinsic::aarch64_sve_whilehs_x2:
6028 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int1>(
6029 VT: Node->getValueType(ResNo: 0),
6030 Opcodes: {AArch64::WHILEHS_2PXX_B, AArch64::WHILEHS_2PXX_H,
6031 AArch64::WHILEHS_2PXX_S, AArch64::WHILEHS_2PXX_D}))
6032 SelectWhilePair(N: Node, Opc: Op);
6033 return;
6034 case Intrinsic::aarch64_sve_whilele_x2:
6035 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int1>(
6036 VT: Node->getValueType(ResNo: 0),
6037 Opcodes: {AArch64::WHILELE_2PXX_B, AArch64::WHILELE_2PXX_H,
6038 AArch64::WHILELE_2PXX_S, AArch64::WHILELE_2PXX_D}))
6039 SelectWhilePair(N: Node, Opc: Op);
6040 return;
6041 case Intrinsic::aarch64_sve_whilelo_x2:
6042 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int1>(
6043 VT: Node->getValueType(ResNo: 0),
6044 Opcodes: {AArch64::WHILELO_2PXX_B, AArch64::WHILELO_2PXX_H,
6045 AArch64::WHILELO_2PXX_S, AArch64::WHILELO_2PXX_D}))
6046 SelectWhilePair(N: Node, Opc: Op);
6047 return;
6048 case Intrinsic::aarch64_sve_whilels_x2:
6049 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int1>(
6050 VT: Node->getValueType(ResNo: 0),
6051 Opcodes: {AArch64::WHILELS_2PXX_B, AArch64::WHILELS_2PXX_H,
6052 AArch64::WHILELS_2PXX_S, AArch64::WHILELS_2PXX_D}))
6053 SelectWhilePair(N: Node, Opc: Op);
6054 return;
6055 case Intrinsic::aarch64_sve_whilelt_x2:
6056 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int1>(
6057 VT: Node->getValueType(ResNo: 0),
6058 Opcodes: {AArch64::WHILELT_2PXX_B, AArch64::WHILELT_2PXX_H,
6059 AArch64::WHILELT_2PXX_S, AArch64::WHILELT_2PXX_D}))
6060 SelectWhilePair(N: Node, Opc: Op);
6061 return;
6062 case Intrinsic::aarch64_sve_smax_single_x2:
6063 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6064 VT: Node->getValueType(ResNo: 0),
6065 Opcodes: {AArch64::SMAX_VG2_2ZZ_B, AArch64::SMAX_VG2_2ZZ_H,
6066 AArch64::SMAX_VG2_2ZZ_S, AArch64::SMAX_VG2_2ZZ_D}))
6067 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: false, Opcode: Op);
6068 return;
6069 case Intrinsic::aarch64_sve_umax_single_x2:
6070 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6071 VT: Node->getValueType(ResNo: 0),
6072 Opcodes: {AArch64::UMAX_VG2_2ZZ_B, AArch64::UMAX_VG2_2ZZ_H,
6073 AArch64::UMAX_VG2_2ZZ_S, AArch64::UMAX_VG2_2ZZ_D}))
6074 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: false, Opcode: Op);
6075 return;
6076 case Intrinsic::aarch64_sve_fmax_single_x2:
6077 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6078 VT: Node->getValueType(ResNo: 0),
6079 Opcodes: {AArch64::BFMAX_VG2_2ZZ_H, AArch64::FMAX_VG2_2ZZ_H,
6080 AArch64::FMAX_VG2_2ZZ_S, AArch64::FMAX_VG2_2ZZ_D}))
6081 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: false, Opcode: Op);
6082 return;
6083 case Intrinsic::aarch64_sve_smax_single_x4:
6084 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6085 VT: Node->getValueType(ResNo: 0),
6086 Opcodes: {AArch64::SMAX_VG4_4ZZ_B, AArch64::SMAX_VG4_4ZZ_H,
6087 AArch64::SMAX_VG4_4ZZ_S, AArch64::SMAX_VG4_4ZZ_D}))
6088 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: false, Opcode: Op);
6089 return;
6090 case Intrinsic::aarch64_sve_umax_single_x4:
6091 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6092 VT: Node->getValueType(ResNo: 0),
6093 Opcodes: {AArch64::UMAX_VG4_4ZZ_B, AArch64::UMAX_VG4_4ZZ_H,
6094 AArch64::UMAX_VG4_4ZZ_S, AArch64::UMAX_VG4_4ZZ_D}))
6095 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: false, Opcode: Op);
6096 return;
6097 case Intrinsic::aarch64_sve_fmax_single_x4:
6098 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6099 VT: Node->getValueType(ResNo: 0),
6100 Opcodes: {AArch64::BFMAX_VG4_4ZZ_H, AArch64::FMAX_VG4_4ZZ_H,
6101 AArch64::FMAX_VG4_4ZZ_S, AArch64::FMAX_VG4_4ZZ_D}))
6102 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: false, Opcode: Op);
6103 return;
6104 case Intrinsic::aarch64_sve_smin_single_x2:
6105 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6106 VT: Node->getValueType(ResNo: 0),
6107 Opcodes: {AArch64::SMIN_VG2_2ZZ_B, AArch64::SMIN_VG2_2ZZ_H,
6108 AArch64::SMIN_VG2_2ZZ_S, AArch64::SMIN_VG2_2ZZ_D}))
6109 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: false, Opcode: Op);
6110 return;
6111 case Intrinsic::aarch64_sve_umin_single_x2:
6112 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6113 VT: Node->getValueType(ResNo: 0),
6114 Opcodes: {AArch64::UMIN_VG2_2ZZ_B, AArch64::UMIN_VG2_2ZZ_H,
6115 AArch64::UMIN_VG2_2ZZ_S, AArch64::UMIN_VG2_2ZZ_D}))
6116 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: false, Opcode: Op);
6117 return;
6118 case Intrinsic::aarch64_sve_fmin_single_x2:
6119 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6120 VT: Node->getValueType(ResNo: 0),
6121 Opcodes: {AArch64::BFMIN_VG2_2ZZ_H, AArch64::FMIN_VG2_2ZZ_H,
6122 AArch64::FMIN_VG2_2ZZ_S, AArch64::FMIN_VG2_2ZZ_D}))
6123 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: false, Opcode: Op);
6124 return;
6125 case Intrinsic::aarch64_sve_smin_single_x4:
6126 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6127 VT: Node->getValueType(ResNo: 0),
6128 Opcodes: {AArch64::SMIN_VG4_4ZZ_B, AArch64::SMIN_VG4_4ZZ_H,
6129 AArch64::SMIN_VG4_4ZZ_S, AArch64::SMIN_VG4_4ZZ_D}))
6130 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: false, Opcode: Op);
6131 return;
6132 case Intrinsic::aarch64_sve_umin_single_x4:
6133 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6134 VT: Node->getValueType(ResNo: 0),
6135 Opcodes: {AArch64::UMIN_VG4_4ZZ_B, AArch64::UMIN_VG4_4ZZ_H,
6136 AArch64::UMIN_VG4_4ZZ_S, AArch64::UMIN_VG4_4ZZ_D}))
6137 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: false, Opcode: Op);
6138 return;
6139 case Intrinsic::aarch64_sve_fmin_single_x4:
6140 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6141 VT: Node->getValueType(ResNo: 0),
6142 Opcodes: {AArch64::BFMIN_VG4_4ZZ_H, AArch64::FMIN_VG4_4ZZ_H,
6143 AArch64::FMIN_VG4_4ZZ_S, AArch64::FMIN_VG4_4ZZ_D}))
6144 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: false, Opcode: Op);
6145 return;
6146 case Intrinsic::aarch64_sve_smax_x2:
6147 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6148 VT: Node->getValueType(ResNo: 0),
6149 Opcodes: {AArch64::SMAX_VG2_2Z2Z_B, AArch64::SMAX_VG2_2Z2Z_H,
6150 AArch64::SMAX_VG2_2Z2Z_S, AArch64::SMAX_VG2_2Z2Z_D}))
6151 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: true, Opcode: Op);
6152 return;
6153 case Intrinsic::aarch64_sve_umax_x2:
6154 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6155 VT: Node->getValueType(ResNo: 0),
6156 Opcodes: {AArch64::UMAX_VG2_2Z2Z_B, AArch64::UMAX_VG2_2Z2Z_H,
6157 AArch64::UMAX_VG2_2Z2Z_S, AArch64::UMAX_VG2_2Z2Z_D}))
6158 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: true, Opcode: Op);
6159 return;
6160 case Intrinsic::aarch64_sve_fmax_x2:
6161 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6162 VT: Node->getValueType(ResNo: 0),
6163 Opcodes: {AArch64::BFMAX_VG2_2Z2Z_H, AArch64::FMAX_VG2_2Z2Z_H,
6164 AArch64::FMAX_VG2_2Z2Z_S, AArch64::FMAX_VG2_2Z2Z_D}))
6165 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: true, Opcode: Op);
6166 return;
6167 case Intrinsic::aarch64_sve_smax_x4:
6168 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6169 VT: Node->getValueType(ResNo: 0),
6170 Opcodes: {AArch64::SMAX_VG4_4Z4Z_B, AArch64::SMAX_VG4_4Z4Z_H,
6171 AArch64::SMAX_VG4_4Z4Z_S, AArch64::SMAX_VG4_4Z4Z_D}))
6172 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: true, Opcode: Op);
6173 return;
6174 case Intrinsic::aarch64_sve_umax_x4:
6175 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6176 VT: Node->getValueType(ResNo: 0),
6177 Opcodes: {AArch64::UMAX_VG4_4Z4Z_B, AArch64::UMAX_VG4_4Z4Z_H,
6178 AArch64::UMAX_VG4_4Z4Z_S, AArch64::UMAX_VG4_4Z4Z_D}))
6179 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: true, Opcode: Op);
6180 return;
6181 case Intrinsic::aarch64_sve_fmax_x4:
6182 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6183 VT: Node->getValueType(ResNo: 0),
6184 Opcodes: {AArch64::BFMAX_VG4_4Z2Z_H, AArch64::FMAX_VG4_4Z4Z_H,
6185 AArch64::FMAX_VG4_4Z4Z_S, AArch64::FMAX_VG4_4Z4Z_D}))
6186 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: true, Opcode: Op);
6187 return;
6188 case Intrinsic::aarch64_sme_famax_x2:
6189 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6190 VT: Node->getValueType(ResNo: 0),
6191 Opcodes: {0, AArch64::FAMAX_2Z2Z_H, AArch64::FAMAX_2Z2Z_S,
6192 AArch64::FAMAX_2Z2Z_D}))
6193 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: true, Opcode: Op);
6194 return;
6195 case Intrinsic::aarch64_sme_famax_x4:
6196 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6197 VT: Node->getValueType(ResNo: 0),
6198 Opcodes: {0, AArch64::FAMAX_4Z4Z_H, AArch64::FAMAX_4Z4Z_S,
6199 AArch64::FAMAX_4Z4Z_D}))
6200 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: true, Opcode: Op);
6201 return;
6202 case Intrinsic::aarch64_sme_famin_x2:
6203 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6204 VT: Node->getValueType(ResNo: 0),
6205 Opcodes: {0, AArch64::FAMIN_2Z2Z_H, AArch64::FAMIN_2Z2Z_S,
6206 AArch64::FAMIN_2Z2Z_D}))
6207 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: true, Opcode: Op);
6208 return;
6209 case Intrinsic::aarch64_sme_famin_x4:
6210 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6211 VT: Node->getValueType(ResNo: 0),
6212 Opcodes: {0, AArch64::FAMIN_4Z4Z_H, AArch64::FAMIN_4Z4Z_S,
6213 AArch64::FAMIN_4Z4Z_D}))
6214 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: true, Opcode: Op);
6215 return;
6216 case Intrinsic::aarch64_sve_smin_x2:
6217 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6218 VT: Node->getValueType(ResNo: 0),
6219 Opcodes: {AArch64::SMIN_VG2_2Z2Z_B, AArch64::SMIN_VG2_2Z2Z_H,
6220 AArch64::SMIN_VG2_2Z2Z_S, AArch64::SMIN_VG2_2Z2Z_D}))
6221 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: true, Opcode: Op);
6222 return;
6223 case Intrinsic::aarch64_sve_umin_x2:
6224 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6225 VT: Node->getValueType(ResNo: 0),
6226 Opcodes: {AArch64::UMIN_VG2_2Z2Z_B, AArch64::UMIN_VG2_2Z2Z_H,
6227 AArch64::UMIN_VG2_2Z2Z_S, AArch64::UMIN_VG2_2Z2Z_D}))
6228 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: true, Opcode: Op);
6229 return;
6230 case Intrinsic::aarch64_sve_fmin_x2:
6231 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6232 VT: Node->getValueType(ResNo: 0),
6233 Opcodes: {AArch64::BFMIN_VG2_2Z2Z_H, AArch64::FMIN_VG2_2Z2Z_H,
6234 AArch64::FMIN_VG2_2Z2Z_S, AArch64::FMIN_VG2_2Z2Z_D}))
6235 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: true, Opcode: Op);
6236 return;
6237 case Intrinsic::aarch64_sve_smin_x4:
6238 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6239 VT: Node->getValueType(ResNo: 0),
6240 Opcodes: {AArch64::SMIN_VG4_4Z4Z_B, AArch64::SMIN_VG4_4Z4Z_H,
6241 AArch64::SMIN_VG4_4Z4Z_S, AArch64::SMIN_VG4_4Z4Z_D}))
6242 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: true, Opcode: Op);
6243 return;
6244 case Intrinsic::aarch64_sve_umin_x4:
6245 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6246 VT: Node->getValueType(ResNo: 0),
6247 Opcodes: {AArch64::UMIN_VG4_4Z4Z_B, AArch64::UMIN_VG4_4Z4Z_H,
6248 AArch64::UMIN_VG4_4Z4Z_S, AArch64::UMIN_VG4_4Z4Z_D}))
6249 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: true, Opcode: Op);
6250 return;
6251 case Intrinsic::aarch64_sve_fmin_x4:
6252 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6253 VT: Node->getValueType(ResNo: 0),
6254 Opcodes: {AArch64::BFMIN_VG4_4Z2Z_H, AArch64::FMIN_VG4_4Z4Z_H,
6255 AArch64::FMIN_VG4_4Z4Z_S, AArch64::FMIN_VG4_4Z4Z_D}))
6256 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: true, Opcode: Op);
6257 return;
6258 case Intrinsic::aarch64_sve_fmaxnm_single_x2 :
6259 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6260 VT: Node->getValueType(ResNo: 0),
6261 Opcodes: {AArch64::BFMAXNM_VG2_2ZZ_H, AArch64::FMAXNM_VG2_2ZZ_H,
6262 AArch64::FMAXNM_VG2_2ZZ_S, AArch64::FMAXNM_VG2_2ZZ_D}))
6263 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: false, Opcode: Op);
6264 return;
6265 case Intrinsic::aarch64_sve_fmaxnm_single_x4 :
6266 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6267 VT: Node->getValueType(ResNo: 0),
6268 Opcodes: {AArch64::BFMAXNM_VG4_4ZZ_H, AArch64::FMAXNM_VG4_4ZZ_H,
6269 AArch64::FMAXNM_VG4_4ZZ_S, AArch64::FMAXNM_VG4_4ZZ_D}))
6270 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: false, Opcode: Op);
6271 return;
6272 case Intrinsic::aarch64_sve_fminnm_single_x2:
6273 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6274 VT: Node->getValueType(ResNo: 0),
6275 Opcodes: {AArch64::BFMINNM_VG2_2ZZ_H, AArch64::FMINNM_VG2_2ZZ_H,
6276 AArch64::FMINNM_VG2_2ZZ_S, AArch64::FMINNM_VG2_2ZZ_D}))
6277 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: false, Opcode: Op);
6278 return;
6279 case Intrinsic::aarch64_sve_fminnm_single_x4:
6280 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6281 VT: Node->getValueType(ResNo: 0),
6282 Opcodes: {AArch64::BFMINNM_VG4_4ZZ_H, AArch64::FMINNM_VG4_4ZZ_H,
6283 AArch64::FMINNM_VG4_4ZZ_S, AArch64::FMINNM_VG4_4ZZ_D}))
6284 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: false, Opcode: Op);
6285 return;
6286 case Intrinsic::aarch64_sve_fscale_single_x4:
6287 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: false, Opcode: AArch64::BFSCALE_4ZZ);
6288 return;
6289 case Intrinsic::aarch64_sve_fscale_single_x2:
6290 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: false, Opcode: AArch64::BFSCALE_2ZZ);
6291 return;
6292 case Intrinsic::aarch64_sve_fmul_single_x4:
6293 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6294 VT: Node->getValueType(ResNo: 0),
6295 Opcodes: {AArch64::BFMUL_4ZZ, AArch64::FMUL_4ZZ_H, AArch64::FMUL_4ZZ_S,
6296 AArch64::FMUL_4ZZ_D}))
6297 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: false, Opcode: Op);
6298 return;
6299 case Intrinsic::aarch64_sve_fmul_single_x2:
6300 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6301 VT: Node->getValueType(ResNo: 0),
6302 Opcodes: {AArch64::BFMUL_2ZZ, AArch64::FMUL_2ZZ_H, AArch64::FMUL_2ZZ_S,
6303 AArch64::FMUL_2ZZ_D}))
6304 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: false, Opcode: Op);
6305 return;
6306 case Intrinsic::aarch64_sve_fmaxnm_x2:
6307 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6308 VT: Node->getValueType(ResNo: 0),
6309 Opcodes: {AArch64::BFMAXNM_VG2_2Z2Z_H, AArch64::FMAXNM_VG2_2Z2Z_H,
6310 AArch64::FMAXNM_VG2_2Z2Z_S, AArch64::FMAXNM_VG2_2Z2Z_D}))
6311 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: true, Opcode: Op);
6312 return;
6313 case Intrinsic::aarch64_sve_fmaxnm_x4:
6314 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6315 VT: Node->getValueType(ResNo: 0),
6316 Opcodes: {AArch64::BFMAXNM_VG4_4Z2Z_H, AArch64::FMAXNM_VG4_4Z4Z_H,
6317 AArch64::FMAXNM_VG4_4Z4Z_S, AArch64::FMAXNM_VG4_4Z4Z_D}))
6318 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: true, Opcode: Op);
6319 return;
6320 case Intrinsic::aarch64_sve_fminnm_x2:
6321 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6322 VT: Node->getValueType(ResNo: 0),
6323 Opcodes: {AArch64::BFMINNM_VG2_2Z2Z_H, AArch64::FMINNM_VG2_2Z2Z_H,
6324 AArch64::FMINNM_VG2_2Z2Z_S, AArch64::FMINNM_VG2_2Z2Z_D}))
6325 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: true, Opcode: Op);
6326 return;
6327 case Intrinsic::aarch64_sve_fminnm_x4:
6328 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6329 VT: Node->getValueType(ResNo: 0),
6330 Opcodes: {AArch64::BFMINNM_VG4_4Z2Z_H, AArch64::FMINNM_VG4_4Z4Z_H,
6331 AArch64::FMINNM_VG4_4Z4Z_S, AArch64::FMINNM_VG4_4Z4Z_D}))
6332 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: true, Opcode: Op);
6333 return;
6334 case Intrinsic::aarch64_sve_aese_lane_x2:
6335 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: false, Opcode: AArch64::AESE_2ZZI_B);
6336 return;
6337 case Intrinsic::aarch64_sve_aesd_lane_x2:
6338 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: false, Opcode: AArch64::AESD_2ZZI_B);
6339 return;
6340 case Intrinsic::aarch64_sve_aesemc_lane_x2:
6341 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: false, Opcode: AArch64::AESEMC_2ZZI_B);
6342 return;
6343 case Intrinsic::aarch64_sve_aesdimc_lane_x2:
6344 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: false, Opcode: AArch64::AESDIMC_2ZZI_B);
6345 return;
6346 case Intrinsic::aarch64_sve_aese_lane_x4:
6347 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: false, Opcode: AArch64::AESE_4ZZI_B);
6348 return;
6349 case Intrinsic::aarch64_sve_aesd_lane_x4:
6350 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: false, Opcode: AArch64::AESD_4ZZI_B);
6351 return;
6352 case Intrinsic::aarch64_sve_aesemc_lane_x4:
6353 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: false, Opcode: AArch64::AESEMC_4ZZI_B);
6354 return;
6355 case Intrinsic::aarch64_sve_aesdimc_lane_x4:
6356 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: false, Opcode: AArch64::AESDIMC_4ZZI_B);
6357 return;
6358 case Intrinsic::aarch64_sve_pmlal_pair_x2:
6359 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: false, Opcode: AArch64::PMLAL_2ZZZ_Q);
6360 return;
6361 case Intrinsic::aarch64_sve_pmull_pair_x2: {
6362 SDLoc DL(Node);
6363 SmallVector<SDValue, 4> Regs(Node->ops().slice(N: 1, M: 2));
6364 SDNode *Res =
6365 CurDAG->getMachineNode(Opcode: AArch64::PMULL_2ZZZ_Q, dl: DL, VT: MVT::Untyped, Ops: Regs);
6366 SDValue SuperReg = SDValue(Res, 0);
6367 for (unsigned I = 0; I < 2; I++)
6368 ReplaceUses(F: SDValue(Node, I),
6369 T: CurDAG->getTargetExtractSubreg(SRIdx: AArch64::zsub0 + I, DL, VT,
6370 Operand: SuperReg));
6371 CurDAG->RemoveDeadNode(N: Node);
6372 return;
6373 }
6374 case Intrinsic::aarch64_sve_fscale_x4:
6375 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: true, Opcode: AArch64::BFSCALE_4Z4Z);
6376 return;
6377 case Intrinsic::aarch64_sve_fscale_x2:
6378 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: true, Opcode: AArch64::BFSCALE_2Z2Z);
6379 return;
6380 case Intrinsic::aarch64_sve_fmul_x4:
6381 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6382 VT: Node->getValueType(ResNo: 0),
6383 Opcodes: {AArch64::BFMUL_4Z4Z, AArch64::FMUL_4Z4Z_H, AArch64::FMUL_4Z4Z_S,
6384 AArch64::FMUL_4Z4Z_D}))
6385 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: true, Opcode: Op);
6386 return;
6387 case Intrinsic::aarch64_sve_fmul_x2:
6388 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6389 VT: Node->getValueType(ResNo: 0),
6390 Opcodes: {AArch64::BFMUL_2Z2Z, AArch64::FMUL_2Z2Z_H, AArch64::FMUL_2Z2Z_S,
6391 AArch64::FMUL_2Z2Z_D}))
6392 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: true, Opcode: Op);
6393 return;
6394 case Intrinsic::aarch64_sve_fcvtzs_x2:
6395 SelectCVTIntrinsic(N: Node, NumVecs: 2, Opcode: AArch64::FCVTZS_2Z2Z_StoS);
6396 return;
6397 case Intrinsic::aarch64_sve_scvtf_x2:
6398 SelectCVTIntrinsic(N: Node, NumVecs: 2, Opcode: AArch64::SCVTF_2Z2Z_StoS);
6399 return;
6400 case Intrinsic::aarch64_sve_fcvtzu_x2:
6401 SelectCVTIntrinsic(N: Node, NumVecs: 2, Opcode: AArch64::FCVTZU_2Z2Z_StoS);
6402 return;
6403 case Intrinsic::aarch64_sve_ucvtf_x2:
6404 SelectCVTIntrinsic(N: Node, NumVecs: 2, Opcode: AArch64::UCVTF_2Z2Z_StoS);
6405 return;
6406 case Intrinsic::aarch64_sve_fcvtzs_x4:
6407 SelectCVTIntrinsic(N: Node, NumVecs: 4, Opcode: AArch64::FCVTZS_4Z4Z_StoS);
6408 return;
6409 case Intrinsic::aarch64_sve_scvtf_x4:
6410 SelectCVTIntrinsic(N: Node, NumVecs: 4, Opcode: AArch64::SCVTF_4Z4Z_StoS);
6411 return;
6412 case Intrinsic::aarch64_sve_fcvtzu_x4:
6413 SelectCVTIntrinsic(N: Node, NumVecs: 4, Opcode: AArch64::FCVTZU_4Z4Z_StoS);
6414 return;
6415 case Intrinsic::aarch64_sve_ucvtf_x4:
6416 SelectCVTIntrinsic(N: Node, NumVecs: 4, Opcode: AArch64::UCVTF_4Z4Z_StoS);
6417 return;
6418 case Intrinsic::aarch64_sve_fcvt_widen_x2:
6419 SelectUnaryMultiIntrinsic(N: Node, NumOutVecs: 2, IsTupleInput: false, Opc: AArch64::FCVT_2ZZ_H_S);
6420 return;
6421 case Intrinsic::aarch64_sve_fcvtl_widen_x2:
6422 SelectUnaryMultiIntrinsic(N: Node, NumOutVecs: 2, IsTupleInput: false, Opc: AArch64::FCVTL_2ZZ_H_S);
6423 return;
6424 case Intrinsic::aarch64_sve_sclamp_single_x2:
6425 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6426 VT: Node->getValueType(ResNo: 0),
6427 Opcodes: {AArch64::SCLAMP_VG2_2Z2Z_B, AArch64::SCLAMP_VG2_2Z2Z_H,
6428 AArch64::SCLAMP_VG2_2Z2Z_S, AArch64::SCLAMP_VG2_2Z2Z_D}))
6429 SelectClamp(N: Node, NumVecs: 2, Op);
6430 return;
6431 case Intrinsic::aarch64_sve_uclamp_single_x2:
6432 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6433 VT: Node->getValueType(ResNo: 0),
6434 Opcodes: {AArch64::UCLAMP_VG2_2Z2Z_B, AArch64::UCLAMP_VG2_2Z2Z_H,
6435 AArch64::UCLAMP_VG2_2Z2Z_S, AArch64::UCLAMP_VG2_2Z2Z_D}))
6436 SelectClamp(N: Node, NumVecs: 2, Op);
6437 return;
6438 case Intrinsic::aarch64_sve_fclamp_single_x2:
6439 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6440 VT: Node->getValueType(ResNo: 0),
6441 Opcodes: {0, AArch64::FCLAMP_VG2_2Z2Z_H, AArch64::FCLAMP_VG2_2Z2Z_S,
6442 AArch64::FCLAMP_VG2_2Z2Z_D}))
6443 SelectClamp(N: Node, NumVecs: 2, Op);
6444 return;
6445 case Intrinsic::aarch64_sve_bfclamp_single_x2:
6446 SelectClamp(N: Node, NumVecs: 2, Op: AArch64::BFCLAMP_VG2_2ZZZ_H);
6447 return;
6448 case Intrinsic::aarch64_sve_sclamp_single_x4:
6449 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6450 VT: Node->getValueType(ResNo: 0),
6451 Opcodes: {AArch64::SCLAMP_VG4_4Z4Z_B, AArch64::SCLAMP_VG4_4Z4Z_H,
6452 AArch64::SCLAMP_VG4_4Z4Z_S, AArch64::SCLAMP_VG4_4Z4Z_D}))
6453 SelectClamp(N: Node, NumVecs: 4, Op);
6454 return;
6455 case Intrinsic::aarch64_sve_uclamp_single_x4:
6456 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6457 VT: Node->getValueType(ResNo: 0),
6458 Opcodes: {AArch64::UCLAMP_VG4_4Z4Z_B, AArch64::UCLAMP_VG4_4Z4Z_H,
6459 AArch64::UCLAMP_VG4_4Z4Z_S, AArch64::UCLAMP_VG4_4Z4Z_D}))
6460 SelectClamp(N: Node, NumVecs: 4, Op);
6461 return;
6462 case Intrinsic::aarch64_sve_fclamp_single_x4:
6463 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::FP>(
6464 VT: Node->getValueType(ResNo: 0),
6465 Opcodes: {0, AArch64::FCLAMP_VG4_4Z4Z_H, AArch64::FCLAMP_VG4_4Z4Z_S,
6466 AArch64::FCLAMP_VG4_4Z4Z_D}))
6467 SelectClamp(N: Node, NumVecs: 4, Op);
6468 return;
6469 case Intrinsic::aarch64_sve_bfclamp_single_x4:
6470 SelectClamp(N: Node, NumVecs: 4, Op: AArch64::BFCLAMP_VG4_4ZZZ_H);
6471 return;
6472 case Intrinsic::aarch64_sve_add_single_x2:
6473 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6474 VT: Node->getValueType(ResNo: 0),
6475 Opcodes: {AArch64::ADD_VG2_2ZZ_B, AArch64::ADD_VG2_2ZZ_H,
6476 AArch64::ADD_VG2_2ZZ_S, AArch64::ADD_VG2_2ZZ_D}))
6477 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: false, Opcode: Op);
6478 return;
6479 case Intrinsic::aarch64_sve_add_single_x4:
6480 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6481 VT: Node->getValueType(ResNo: 0),
6482 Opcodes: {AArch64::ADD_VG4_4ZZ_B, AArch64::ADD_VG4_4ZZ_H,
6483 AArch64::ADD_VG4_4ZZ_S, AArch64::ADD_VG4_4ZZ_D}))
6484 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: false, Opcode: Op);
6485 return;
6486 case Intrinsic::aarch64_sve_zip_x2:
6487 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::AnyType>(
6488 VT: Node->getValueType(ResNo: 0),
6489 Opcodes: {AArch64::ZIP_VG2_2ZZZ_B, AArch64::ZIP_VG2_2ZZZ_H,
6490 AArch64::ZIP_VG2_2ZZZ_S, AArch64::ZIP_VG2_2ZZZ_D}))
6491 SelectUnaryMultiIntrinsic(N: Node, NumOutVecs: 2, /*IsTupleInput=*/false, Opc: Op);
6492 return;
6493 case Intrinsic::aarch64_sve_zipq_x2:
6494 SelectUnaryMultiIntrinsic(N: Node, NumOutVecs: 2, /*IsTupleInput=*/false,
6495 Opc: AArch64::ZIP_VG2_2ZZZ_Q);
6496 return;
6497 case Intrinsic::aarch64_sve_zip_x4:
6498 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::AnyType>(
6499 VT: Node->getValueType(ResNo: 0),
6500 Opcodes: {AArch64::ZIP_VG4_4Z4Z_B, AArch64::ZIP_VG4_4Z4Z_H,
6501 AArch64::ZIP_VG4_4Z4Z_S, AArch64::ZIP_VG4_4Z4Z_D}))
6502 SelectUnaryMultiIntrinsic(N: Node, NumOutVecs: 4, /*IsTupleInput=*/true, Opc: Op);
6503 return;
6504 case Intrinsic::aarch64_sve_zipq_x4:
6505 SelectUnaryMultiIntrinsic(N: Node, NumOutVecs: 4, /*IsTupleInput=*/true,
6506 Opc: AArch64::ZIP_VG4_4Z4Z_Q);
6507 return;
6508 case Intrinsic::aarch64_sve_uzp_x2:
6509 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::AnyType>(
6510 VT: Node->getValueType(ResNo: 0),
6511 Opcodes: {AArch64::UZP_VG2_2ZZZ_B, AArch64::UZP_VG2_2ZZZ_H,
6512 AArch64::UZP_VG2_2ZZZ_S, AArch64::UZP_VG2_2ZZZ_D}))
6513 SelectUnaryMultiIntrinsic(N: Node, NumOutVecs: 2, /*IsTupleInput=*/false, Opc: Op);
6514 return;
6515 case Intrinsic::aarch64_sve_uzpq_x2:
6516 SelectUnaryMultiIntrinsic(N: Node, NumOutVecs: 2, /*IsTupleInput=*/false,
6517 Opc: AArch64::UZP_VG2_2ZZZ_Q);
6518 return;
6519 case Intrinsic::aarch64_sve_uzp_x4:
6520 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::AnyType>(
6521 VT: Node->getValueType(ResNo: 0),
6522 Opcodes: {AArch64::UZP_VG4_4Z4Z_B, AArch64::UZP_VG4_4Z4Z_H,
6523 AArch64::UZP_VG4_4Z4Z_S, AArch64::UZP_VG4_4Z4Z_D}))
6524 SelectUnaryMultiIntrinsic(N: Node, NumOutVecs: 4, /*IsTupleInput=*/true, Opc: Op);
6525 return;
6526 case Intrinsic::aarch64_sve_uzpq_x4:
6527 SelectUnaryMultiIntrinsic(N: Node, NumOutVecs: 4, /*IsTupleInput=*/true,
6528 Opc: AArch64::UZP_VG4_4Z4Z_Q);
6529 return;
6530 case Intrinsic::aarch64_sve_sel_x2:
6531 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::AnyType>(
6532 VT: Node->getValueType(ResNo: 0),
6533 Opcodes: {AArch64::SEL_VG2_2ZC2Z2Z_B, AArch64::SEL_VG2_2ZC2Z2Z_H,
6534 AArch64::SEL_VG2_2ZC2Z2Z_S, AArch64::SEL_VG2_2ZC2Z2Z_D}))
6535 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 2, IsZmMulti: true, Opcode: Op, /*HasPred=*/true);
6536 return;
6537 case Intrinsic::aarch64_sve_sel_x4:
6538 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::AnyType>(
6539 VT: Node->getValueType(ResNo: 0),
6540 Opcodes: {AArch64::SEL_VG4_4ZC4Z4Z_B, AArch64::SEL_VG4_4ZC4Z4Z_H,
6541 AArch64::SEL_VG4_4ZC4Z4Z_S, AArch64::SEL_VG4_4ZC4Z4Z_D}))
6542 SelectDestructiveMultiIntrinsic(N: Node, NumVecs: 4, IsZmMulti: true, Opcode: Op, /*HasPred=*/true);
6543 return;
6544 case Intrinsic::aarch64_sve_frinta_x2:
6545 SelectFrintFromVT(N: Node, NumVecs: 2, Opcode: AArch64::FRINTA_2Z2Z_S);
6546 return;
6547 case Intrinsic::aarch64_sve_frinta_x4:
6548 SelectFrintFromVT(N: Node, NumVecs: 4, Opcode: AArch64::FRINTA_4Z4Z_S);
6549 return;
6550 case Intrinsic::aarch64_sve_frintm_x2:
6551 SelectFrintFromVT(N: Node, NumVecs: 2, Opcode: AArch64::FRINTM_2Z2Z_S);
6552 return;
6553 case Intrinsic::aarch64_sve_frintm_x4:
6554 SelectFrintFromVT(N: Node, NumVecs: 4, Opcode: AArch64::FRINTM_4Z4Z_S);
6555 return;
6556 case Intrinsic::aarch64_sve_frintn_x2:
6557 SelectFrintFromVT(N: Node, NumVecs: 2, Opcode: AArch64::FRINTN_2Z2Z_S);
6558 return;
6559 case Intrinsic::aarch64_sve_frintn_x4:
6560 SelectFrintFromVT(N: Node, NumVecs: 4, Opcode: AArch64::FRINTN_4Z4Z_S);
6561 return;
6562 case Intrinsic::aarch64_sve_frintp_x2:
6563 SelectFrintFromVT(N: Node, NumVecs: 2, Opcode: AArch64::FRINTP_2Z2Z_S);
6564 return;
6565 case Intrinsic::aarch64_sve_frintp_x4:
6566 SelectFrintFromVT(N: Node, NumVecs: 4, Opcode: AArch64::FRINTP_4Z4Z_S);
6567 return;
6568 case Intrinsic::aarch64_sve_sunpk_x2:
6569 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6570 VT: Node->getValueType(ResNo: 0),
6571 Opcodes: {0, AArch64::SUNPK_VG2_2ZZ_H, AArch64::SUNPK_VG2_2ZZ_S,
6572 AArch64::SUNPK_VG2_2ZZ_D}))
6573 SelectUnaryMultiIntrinsic(N: Node, NumOutVecs: 2, /*IsTupleInput=*/false, Opc: Op);
6574 return;
6575 case Intrinsic::aarch64_sve_uunpk_x2:
6576 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6577 VT: Node->getValueType(ResNo: 0),
6578 Opcodes: {0, AArch64::UUNPK_VG2_2ZZ_H, AArch64::UUNPK_VG2_2ZZ_S,
6579 AArch64::UUNPK_VG2_2ZZ_D}))
6580 SelectUnaryMultiIntrinsic(N: Node, NumOutVecs: 2, /*IsTupleInput=*/false, Opc: Op);
6581 return;
6582 case Intrinsic::aarch64_sve_sunpk_x4:
6583 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6584 VT: Node->getValueType(ResNo: 0),
6585 Opcodes: {0, AArch64::SUNPK_VG4_4Z2Z_H, AArch64::SUNPK_VG4_4Z2Z_S,
6586 AArch64::SUNPK_VG4_4Z2Z_D}))
6587 SelectUnaryMultiIntrinsic(N: Node, NumOutVecs: 4, /*IsTupleInput=*/true, Opc: Op);
6588 return;
6589 case Intrinsic::aarch64_sve_uunpk_x4:
6590 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::Int>(
6591 VT: Node->getValueType(ResNo: 0),
6592 Opcodes: {0, AArch64::UUNPK_VG4_4Z2Z_H, AArch64::UUNPK_VG4_4Z2Z_S,
6593 AArch64::UUNPK_VG4_4Z2Z_D}))
6594 SelectUnaryMultiIntrinsic(N: Node, NumOutVecs: 4, /*IsTupleInput=*/true, Opc: Op);
6595 return;
6596 case Intrinsic::aarch64_sve_pext_x2: {
6597 if (auto Op = SelectOpcodeFromVT<SelectTypeKind::AnyType>(
6598 VT: Node->getValueType(ResNo: 0),
6599 Opcodes: {AArch64::PEXT_2PCI_B, AArch64::PEXT_2PCI_H, AArch64::PEXT_2PCI_S,
6600 AArch64::PEXT_2PCI_D}))
6601 SelectPExtPair(N: Node, Opc: Op);
6602 return;
6603 }
6604 }
6605 break;
6606 }
6607 case ISD::INTRINSIC_VOID: {
6608 unsigned IntNo = Node->getConstantOperandVal(Num: 1);
6609 if (Node->getNumOperands() >= 3)
6610 VT = Node->getOperand(Num: 2)->getValueType(ResNo: 0);
6611 switch (IntNo) {
6612 default:
6613 break;
6614 case Intrinsic::aarch64_neon_st1x2: {
6615 if (VT == MVT::v8i8) {
6616 SelectStore(N: Node, NumVecs: 2, Opc: AArch64::ST1Twov8b);
6617 return;
6618 } else if (VT == MVT::v16i8) {
6619 SelectStore(N: Node, NumVecs: 2, Opc: AArch64::ST1Twov16b);
6620 return;
6621 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 ||
6622 VT == MVT::v4bf16) {
6623 SelectStore(N: Node, NumVecs: 2, Opc: AArch64::ST1Twov4h);
6624 return;
6625 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 ||
6626 VT == MVT::v8bf16) {
6627 SelectStore(N: Node, NumVecs: 2, Opc: AArch64::ST1Twov8h);
6628 return;
6629 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
6630 SelectStore(N: Node, NumVecs: 2, Opc: AArch64::ST1Twov2s);
6631 return;
6632 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
6633 SelectStore(N: Node, NumVecs: 2, Opc: AArch64::ST1Twov4s);
6634 return;
6635 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
6636 SelectStore(N: Node, NumVecs: 2, Opc: AArch64::ST1Twov2d);
6637 return;
6638 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
6639 SelectStore(N: Node, NumVecs: 2, Opc: AArch64::ST1Twov1d);
6640 return;
6641 }
6642 break;
6643 }
6644 case Intrinsic::aarch64_neon_st1x3: {
6645 if (VT == MVT::v8i8) {
6646 SelectStore(N: Node, NumVecs: 3, Opc: AArch64::ST1Threev8b);
6647 return;
6648 } else if (VT == MVT::v16i8) {
6649 SelectStore(N: Node, NumVecs: 3, Opc: AArch64::ST1Threev16b);
6650 return;
6651 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 ||
6652 VT == MVT::v4bf16) {
6653 SelectStore(N: Node, NumVecs: 3, Opc: AArch64::ST1Threev4h);
6654 return;
6655 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 ||
6656 VT == MVT::v8bf16) {
6657 SelectStore(N: Node, NumVecs: 3, Opc: AArch64::ST1Threev8h);
6658 return;
6659 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
6660 SelectStore(N: Node, NumVecs: 3, Opc: AArch64::ST1Threev2s);
6661 return;
6662 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
6663 SelectStore(N: Node, NumVecs: 3, Opc: AArch64::ST1Threev4s);
6664 return;
6665 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
6666 SelectStore(N: Node, NumVecs: 3, Opc: AArch64::ST1Threev2d);
6667 return;
6668 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
6669 SelectStore(N: Node, NumVecs: 3, Opc: AArch64::ST1Threev1d);
6670 return;
6671 }
6672 break;
6673 }
6674 case Intrinsic::aarch64_neon_st1x4: {
6675 if (VT == MVT::v8i8) {
6676 SelectStore(N: Node, NumVecs: 4, Opc: AArch64::ST1Fourv8b);
6677 return;
6678 } else if (VT == MVT::v16i8) {
6679 SelectStore(N: Node, NumVecs: 4, Opc: AArch64::ST1Fourv16b);
6680 return;
6681 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 ||
6682 VT == MVT::v4bf16) {
6683 SelectStore(N: Node, NumVecs: 4, Opc: AArch64::ST1Fourv4h);
6684 return;
6685 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 ||
6686 VT == MVT::v8bf16) {
6687 SelectStore(N: Node, NumVecs: 4, Opc: AArch64::ST1Fourv8h);
6688 return;
6689 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
6690 SelectStore(N: Node, NumVecs: 4, Opc: AArch64::ST1Fourv2s);
6691 return;
6692 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
6693 SelectStore(N: Node, NumVecs: 4, Opc: AArch64::ST1Fourv4s);
6694 return;
6695 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
6696 SelectStore(N: Node, NumVecs: 4, Opc: AArch64::ST1Fourv2d);
6697 return;
6698 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
6699 SelectStore(N: Node, NumVecs: 4, Opc: AArch64::ST1Fourv1d);
6700 return;
6701 }
6702 break;
6703 }
6704 case Intrinsic::aarch64_neon_st2: {
6705 if (VT == MVT::v8i8) {
6706 SelectStore(N: Node, NumVecs: 2, Opc: AArch64::ST2Twov8b);
6707 return;
6708 } else if (VT == MVT::v16i8) {
6709 SelectStore(N: Node, NumVecs: 2, Opc: AArch64::ST2Twov16b);
6710 return;
6711 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 ||
6712 VT == MVT::v4bf16) {
6713 SelectStore(N: Node, NumVecs: 2, Opc: AArch64::ST2Twov4h);
6714 return;
6715 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 ||
6716 VT == MVT::v8bf16) {
6717 SelectStore(N: Node, NumVecs: 2, Opc: AArch64::ST2Twov8h);
6718 return;
6719 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
6720 SelectStore(N: Node, NumVecs: 2, Opc: AArch64::ST2Twov2s);
6721 return;
6722 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
6723 SelectStore(N: Node, NumVecs: 2, Opc: AArch64::ST2Twov4s);
6724 return;
6725 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
6726 SelectStore(N: Node, NumVecs: 2, Opc: AArch64::ST2Twov2d);
6727 return;
6728 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
6729 SelectStore(N: Node, NumVecs: 2, Opc: AArch64::ST1Twov1d);
6730 return;
6731 }
6732 break;
6733 }
6734 case Intrinsic::aarch64_neon_st3: {
6735 if (VT == MVT::v8i8) {
6736 SelectStore(N: Node, NumVecs: 3, Opc: AArch64::ST3Threev8b);
6737 return;
6738 } else if (VT == MVT::v16i8) {
6739 SelectStore(N: Node, NumVecs: 3, Opc: AArch64::ST3Threev16b);
6740 return;
6741 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 ||
6742 VT == MVT::v4bf16) {
6743 SelectStore(N: Node, NumVecs: 3, Opc: AArch64::ST3Threev4h);
6744 return;
6745 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 ||
6746 VT == MVT::v8bf16) {
6747 SelectStore(N: Node, NumVecs: 3, Opc: AArch64::ST3Threev8h);
6748 return;
6749 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
6750 SelectStore(N: Node, NumVecs: 3, Opc: AArch64::ST3Threev2s);
6751 return;
6752 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
6753 SelectStore(N: Node, NumVecs: 3, Opc: AArch64::ST3Threev4s);
6754 return;
6755 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
6756 SelectStore(N: Node, NumVecs: 3, Opc: AArch64::ST3Threev2d);
6757 return;
6758 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
6759 SelectStore(N: Node, NumVecs: 3, Opc: AArch64::ST1Threev1d);
6760 return;
6761 }
6762 break;
6763 }
6764 case Intrinsic::aarch64_neon_st4: {
6765 if (VT == MVT::v8i8) {
6766 SelectStore(N: Node, NumVecs: 4, Opc: AArch64::ST4Fourv8b);
6767 return;
6768 } else if (VT == MVT::v16i8) {
6769 SelectStore(N: Node, NumVecs: 4, Opc: AArch64::ST4Fourv16b);
6770 return;
6771 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 ||
6772 VT == MVT::v4bf16) {
6773 SelectStore(N: Node, NumVecs: 4, Opc: AArch64::ST4Fourv4h);
6774 return;
6775 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 ||
6776 VT == MVT::v8bf16) {
6777 SelectStore(N: Node, NumVecs: 4, Opc: AArch64::ST4Fourv8h);
6778 return;
6779 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
6780 SelectStore(N: Node, NumVecs: 4, Opc: AArch64::ST4Fourv2s);
6781 return;
6782 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
6783 SelectStore(N: Node, NumVecs: 4, Opc: AArch64::ST4Fourv4s);
6784 return;
6785 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
6786 SelectStore(N: Node, NumVecs: 4, Opc: AArch64::ST4Fourv2d);
6787 return;
6788 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
6789 SelectStore(N: Node, NumVecs: 4, Opc: AArch64::ST1Fourv1d);
6790 return;
6791 }
6792 break;
6793 }
6794 case Intrinsic::aarch64_neon_st2lane: {
6795 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
6796 SelectStoreLane(N: Node, NumVecs: 2, Opc: AArch64::ST2i8);
6797 return;
6798 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
6799 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
6800 SelectStoreLane(N: Node, NumVecs: 2, Opc: AArch64::ST2i16);
6801 return;
6802 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
6803 VT == MVT::v2f32) {
6804 SelectStoreLane(N: Node, NumVecs: 2, Opc: AArch64::ST2i32);
6805 return;
6806 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
6807 VT == MVT::v1f64) {
6808 SelectStoreLane(N: Node, NumVecs: 2, Opc: AArch64::ST2i64);
6809 return;
6810 }
6811 break;
6812 }
6813 case Intrinsic::aarch64_neon_st3lane: {
6814 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
6815 SelectStoreLane(N: Node, NumVecs: 3, Opc: AArch64::ST3i8);
6816 return;
6817 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
6818 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
6819 SelectStoreLane(N: Node, NumVecs: 3, Opc: AArch64::ST3i16);
6820 return;
6821 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
6822 VT == MVT::v2f32) {
6823 SelectStoreLane(N: Node, NumVecs: 3, Opc: AArch64::ST3i32);
6824 return;
6825 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
6826 VT == MVT::v1f64) {
6827 SelectStoreLane(N: Node, NumVecs: 3, Opc: AArch64::ST3i64);
6828 return;
6829 }
6830 break;
6831 }
6832 case Intrinsic::aarch64_neon_st4lane: {
6833 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
6834 SelectStoreLane(N: Node, NumVecs: 4, Opc: AArch64::ST4i8);
6835 return;
6836 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
6837 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
6838 SelectStoreLane(N: Node, NumVecs: 4, Opc: AArch64::ST4i16);
6839 return;
6840 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
6841 VT == MVT::v2f32) {
6842 SelectStoreLane(N: Node, NumVecs: 4, Opc: AArch64::ST4i32);
6843 return;
6844 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
6845 VT == MVT::v1f64) {
6846 SelectStoreLane(N: Node, NumVecs: 4, Opc: AArch64::ST4i64);
6847 return;
6848 }
6849 break;
6850 }
6851 case Intrinsic::aarch64_sve_st2q: {
6852 SelectPredicatedStore(N: Node, NumVecs: 2, Scale: 4, Opc_rr: AArch64::ST2Q, Opc_ri: AArch64::ST2Q_IMM);
6853 return;
6854 }
6855 case Intrinsic::aarch64_sve_st3q: {
6856 SelectPredicatedStore(N: Node, NumVecs: 3, Scale: 4, Opc_rr: AArch64::ST3Q, Opc_ri: AArch64::ST3Q_IMM);
6857 return;
6858 }
6859 case Intrinsic::aarch64_sve_st4q: {
6860 SelectPredicatedStore(N: Node, NumVecs: 4, Scale: 4, Opc_rr: AArch64::ST4Q, Opc_ri: AArch64::ST4Q_IMM);
6861 return;
6862 }
6863 case Intrinsic::aarch64_sve_st2: {
6864 if (VT == MVT::nxv16i8) {
6865 SelectPredicatedStore(N: Node, NumVecs: 2, Scale: 0, Opc_rr: AArch64::ST2B, Opc_ri: AArch64::ST2B_IMM);
6866 return;
6867 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
6868 VT == MVT::nxv8bf16) {
6869 SelectPredicatedStore(N: Node, NumVecs: 2, Scale: 1, Opc_rr: AArch64::ST2H, Opc_ri: AArch64::ST2H_IMM);
6870 return;
6871 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
6872 SelectPredicatedStore(N: Node, NumVecs: 2, Scale: 2, Opc_rr: AArch64::ST2W, Opc_ri: AArch64::ST2W_IMM);
6873 return;
6874 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
6875 SelectPredicatedStore(N: Node, NumVecs: 2, Scale: 3, Opc_rr: AArch64::ST2D, Opc_ri: AArch64::ST2D_IMM);
6876 return;
6877 }
6878 break;
6879 }
6880 case Intrinsic::aarch64_sve_st3: {
6881 if (VT == MVT::nxv16i8) {
6882 SelectPredicatedStore(N: Node, NumVecs: 3, Scale: 0, Opc_rr: AArch64::ST3B, Opc_ri: AArch64::ST3B_IMM);
6883 return;
6884 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
6885 VT == MVT::nxv8bf16) {
6886 SelectPredicatedStore(N: Node, NumVecs: 3, Scale: 1, Opc_rr: AArch64::ST3H, Opc_ri: AArch64::ST3H_IMM);
6887 return;
6888 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
6889 SelectPredicatedStore(N: Node, NumVecs: 3, Scale: 2, Opc_rr: AArch64::ST3W, Opc_ri: AArch64::ST3W_IMM);
6890 return;
6891 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
6892 SelectPredicatedStore(N: Node, NumVecs: 3, Scale: 3, Opc_rr: AArch64::ST3D, Opc_ri: AArch64::ST3D_IMM);
6893 return;
6894 }
6895 break;
6896 }
6897 case Intrinsic::aarch64_sve_st4: {
6898 if (VT == MVT::nxv16i8) {
6899 SelectPredicatedStore(N: Node, NumVecs: 4, Scale: 0, Opc_rr: AArch64::ST4B, Opc_ri: AArch64::ST4B_IMM);
6900 return;
6901 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
6902 VT == MVT::nxv8bf16) {
6903 SelectPredicatedStore(N: Node, NumVecs: 4, Scale: 1, Opc_rr: AArch64::ST4H, Opc_ri: AArch64::ST4H_IMM);
6904 return;
6905 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
6906 SelectPredicatedStore(N: Node, NumVecs: 4, Scale: 2, Opc_rr: AArch64::ST4W, Opc_ri: AArch64::ST4W_IMM);
6907 return;
6908 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
6909 SelectPredicatedStore(N: Node, NumVecs: 4, Scale: 3, Opc_rr: AArch64::ST4D, Opc_ri: AArch64::ST4D_IMM);
6910 return;
6911 }
6912 break;
6913 }
6914 }
6915 break;
6916 }
6917 case AArch64ISD::LD2post: {
6918 if (VT == MVT::v8i8) {
6919 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Twov8b_POST, SubRegIdx: AArch64::dsub0);
6920 return;
6921 } else if (VT == MVT::v16i8) {
6922 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Twov16b_POST, SubRegIdx: AArch64::qsub0);
6923 return;
6924 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
6925 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Twov4h_POST, SubRegIdx: AArch64::dsub0);
6926 return;
6927 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
6928 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Twov8h_POST, SubRegIdx: AArch64::qsub0);
6929 return;
6930 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
6931 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Twov2s_POST, SubRegIdx: AArch64::dsub0);
6932 return;
6933 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
6934 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Twov4s_POST, SubRegIdx: AArch64::qsub0);
6935 return;
6936 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
6937 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD1Twov1d_POST, SubRegIdx: AArch64::dsub0);
6938 return;
6939 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
6940 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Twov2d_POST, SubRegIdx: AArch64::qsub0);
6941 return;
6942 }
6943 break;
6944 }
6945 case AArch64ISD::LD3post: {
6946 if (VT == MVT::v8i8) {
6947 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Threev8b_POST, SubRegIdx: AArch64::dsub0);
6948 return;
6949 } else if (VT == MVT::v16i8) {
6950 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Threev16b_POST, SubRegIdx: AArch64::qsub0);
6951 return;
6952 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
6953 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Threev4h_POST, SubRegIdx: AArch64::dsub0);
6954 return;
6955 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
6956 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Threev8h_POST, SubRegIdx: AArch64::qsub0);
6957 return;
6958 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
6959 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Threev2s_POST, SubRegIdx: AArch64::dsub0);
6960 return;
6961 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
6962 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Threev4s_POST, SubRegIdx: AArch64::qsub0);
6963 return;
6964 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
6965 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD1Threev1d_POST, SubRegIdx: AArch64::dsub0);
6966 return;
6967 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
6968 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Threev2d_POST, SubRegIdx: AArch64::qsub0);
6969 return;
6970 }
6971 break;
6972 }
6973 case AArch64ISD::LD4post: {
6974 if (VT == MVT::v8i8) {
6975 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Fourv8b_POST, SubRegIdx: AArch64::dsub0);
6976 return;
6977 } else if (VT == MVT::v16i8) {
6978 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Fourv16b_POST, SubRegIdx: AArch64::qsub0);
6979 return;
6980 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
6981 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Fourv4h_POST, SubRegIdx: AArch64::dsub0);
6982 return;
6983 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
6984 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Fourv8h_POST, SubRegIdx: AArch64::qsub0);
6985 return;
6986 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
6987 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Fourv2s_POST, SubRegIdx: AArch64::dsub0);
6988 return;
6989 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
6990 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Fourv4s_POST, SubRegIdx: AArch64::qsub0);
6991 return;
6992 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
6993 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD1Fourv1d_POST, SubRegIdx: AArch64::dsub0);
6994 return;
6995 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
6996 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Fourv2d_POST, SubRegIdx: AArch64::qsub0);
6997 return;
6998 }
6999 break;
7000 }
7001 case AArch64ISD::LD1x2post: {
7002 if (VT == MVT::v8i8) {
7003 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD1Twov8b_POST, SubRegIdx: AArch64::dsub0);
7004 return;
7005 } else if (VT == MVT::v16i8) {
7006 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD1Twov16b_POST, SubRegIdx: AArch64::qsub0);
7007 return;
7008 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
7009 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD1Twov4h_POST, SubRegIdx: AArch64::dsub0);
7010 return;
7011 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
7012 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD1Twov8h_POST, SubRegIdx: AArch64::qsub0);
7013 return;
7014 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
7015 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD1Twov2s_POST, SubRegIdx: AArch64::dsub0);
7016 return;
7017 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
7018 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD1Twov4s_POST, SubRegIdx: AArch64::qsub0);
7019 return;
7020 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
7021 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD1Twov1d_POST, SubRegIdx: AArch64::dsub0);
7022 return;
7023 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
7024 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD1Twov2d_POST, SubRegIdx: AArch64::qsub0);
7025 return;
7026 }
7027 break;
7028 }
7029 case AArch64ISD::LD1x3post: {
7030 if (VT == MVT::v8i8) {
7031 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD1Threev8b_POST, SubRegIdx: AArch64::dsub0);
7032 return;
7033 } else if (VT == MVT::v16i8) {
7034 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD1Threev16b_POST, SubRegIdx: AArch64::qsub0);
7035 return;
7036 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
7037 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD1Threev4h_POST, SubRegIdx: AArch64::dsub0);
7038 return;
7039 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
7040 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD1Threev8h_POST, SubRegIdx: AArch64::qsub0);
7041 return;
7042 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
7043 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD1Threev2s_POST, SubRegIdx: AArch64::dsub0);
7044 return;
7045 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
7046 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD1Threev4s_POST, SubRegIdx: AArch64::qsub0);
7047 return;
7048 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
7049 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD1Threev1d_POST, SubRegIdx: AArch64::dsub0);
7050 return;
7051 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
7052 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD1Threev2d_POST, SubRegIdx: AArch64::qsub0);
7053 return;
7054 }
7055 break;
7056 }
7057 case AArch64ISD::LD1x4post: {
7058 if (VT == MVT::v8i8) {
7059 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD1Fourv8b_POST, SubRegIdx: AArch64::dsub0);
7060 return;
7061 } else if (VT == MVT::v16i8) {
7062 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD1Fourv16b_POST, SubRegIdx: AArch64::qsub0);
7063 return;
7064 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
7065 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD1Fourv4h_POST, SubRegIdx: AArch64::dsub0);
7066 return;
7067 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
7068 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD1Fourv8h_POST, SubRegIdx: AArch64::qsub0);
7069 return;
7070 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
7071 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD1Fourv2s_POST, SubRegIdx: AArch64::dsub0);
7072 return;
7073 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
7074 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD1Fourv4s_POST, SubRegIdx: AArch64::qsub0);
7075 return;
7076 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
7077 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD1Fourv1d_POST, SubRegIdx: AArch64::dsub0);
7078 return;
7079 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
7080 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD1Fourv2d_POST, SubRegIdx: AArch64::qsub0);
7081 return;
7082 }
7083 break;
7084 }
7085 case AArch64ISD::LD1DUPpost: {
7086 if (VT == MVT::v8i8) {
7087 SelectPostLoad(N: Node, NumVecs: 1, Opc: AArch64::LD1Rv8b_POST, SubRegIdx: AArch64::dsub0);
7088 return;
7089 } else if (VT == MVT::v16i8) {
7090 SelectPostLoad(N: Node, NumVecs: 1, Opc: AArch64::LD1Rv16b_POST, SubRegIdx: AArch64::qsub0);
7091 return;
7092 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
7093 SelectPostLoad(N: Node, NumVecs: 1, Opc: AArch64::LD1Rv4h_POST, SubRegIdx: AArch64::dsub0);
7094 return;
7095 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
7096 SelectPostLoad(N: Node, NumVecs: 1, Opc: AArch64::LD1Rv8h_POST, SubRegIdx: AArch64::qsub0);
7097 return;
7098 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
7099 SelectPostLoad(N: Node, NumVecs: 1, Opc: AArch64::LD1Rv2s_POST, SubRegIdx: AArch64::dsub0);
7100 return;
7101 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
7102 SelectPostLoad(N: Node, NumVecs: 1, Opc: AArch64::LD1Rv4s_POST, SubRegIdx: AArch64::qsub0);
7103 return;
7104 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
7105 SelectPostLoad(N: Node, NumVecs: 1, Opc: AArch64::LD1Rv1d_POST, SubRegIdx: AArch64::dsub0);
7106 return;
7107 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
7108 SelectPostLoad(N: Node, NumVecs: 1, Opc: AArch64::LD1Rv2d_POST, SubRegIdx: AArch64::qsub0);
7109 return;
7110 }
7111 break;
7112 }
7113 case AArch64ISD::LD2DUPpost: {
7114 if (VT == MVT::v8i8) {
7115 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Rv8b_POST, SubRegIdx: AArch64::dsub0);
7116 return;
7117 } else if (VT == MVT::v16i8) {
7118 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Rv16b_POST, SubRegIdx: AArch64::qsub0);
7119 return;
7120 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
7121 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Rv4h_POST, SubRegIdx: AArch64::dsub0);
7122 return;
7123 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
7124 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Rv8h_POST, SubRegIdx: AArch64::qsub0);
7125 return;
7126 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
7127 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Rv2s_POST, SubRegIdx: AArch64::dsub0);
7128 return;
7129 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
7130 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Rv4s_POST, SubRegIdx: AArch64::qsub0);
7131 return;
7132 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
7133 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Rv1d_POST, SubRegIdx: AArch64::dsub0);
7134 return;
7135 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
7136 SelectPostLoad(N: Node, NumVecs: 2, Opc: AArch64::LD2Rv2d_POST, SubRegIdx: AArch64::qsub0);
7137 return;
7138 }
7139 break;
7140 }
7141 case AArch64ISD::LD3DUPpost: {
7142 if (VT == MVT::v8i8) {
7143 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Rv8b_POST, SubRegIdx: AArch64::dsub0);
7144 return;
7145 } else if (VT == MVT::v16i8) {
7146 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Rv16b_POST, SubRegIdx: AArch64::qsub0);
7147 return;
7148 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
7149 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Rv4h_POST, SubRegIdx: AArch64::dsub0);
7150 return;
7151 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
7152 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Rv8h_POST, SubRegIdx: AArch64::qsub0);
7153 return;
7154 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
7155 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Rv2s_POST, SubRegIdx: AArch64::dsub0);
7156 return;
7157 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
7158 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Rv4s_POST, SubRegIdx: AArch64::qsub0);
7159 return;
7160 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
7161 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Rv1d_POST, SubRegIdx: AArch64::dsub0);
7162 return;
7163 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
7164 SelectPostLoad(N: Node, NumVecs: 3, Opc: AArch64::LD3Rv2d_POST, SubRegIdx: AArch64::qsub0);
7165 return;
7166 }
7167 break;
7168 }
7169 case AArch64ISD::LD4DUPpost: {
7170 if (VT == MVT::v8i8) {
7171 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Rv8b_POST, SubRegIdx: AArch64::dsub0);
7172 return;
7173 } else if (VT == MVT::v16i8) {
7174 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Rv16b_POST, SubRegIdx: AArch64::qsub0);
7175 return;
7176 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
7177 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Rv4h_POST, SubRegIdx: AArch64::dsub0);
7178 return;
7179 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
7180 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Rv8h_POST, SubRegIdx: AArch64::qsub0);
7181 return;
7182 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
7183 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Rv2s_POST, SubRegIdx: AArch64::dsub0);
7184 return;
7185 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
7186 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Rv4s_POST, SubRegIdx: AArch64::qsub0);
7187 return;
7188 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
7189 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Rv1d_POST, SubRegIdx: AArch64::dsub0);
7190 return;
7191 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
7192 SelectPostLoad(N: Node, NumVecs: 4, Opc: AArch64::LD4Rv2d_POST, SubRegIdx: AArch64::qsub0);
7193 return;
7194 }
7195 break;
7196 }
7197 case AArch64ISD::LD1LANEpost: {
7198 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
7199 SelectPostLoadLane(N: Node, NumVecs: 1, Opc: AArch64::LD1i8_POST);
7200 return;
7201 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
7202 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
7203 SelectPostLoadLane(N: Node, NumVecs: 1, Opc: AArch64::LD1i16_POST);
7204 return;
7205 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
7206 VT == MVT::v2f32) {
7207 SelectPostLoadLane(N: Node, NumVecs: 1, Opc: AArch64::LD1i32_POST);
7208 return;
7209 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
7210 VT == MVT::v1f64) {
7211 SelectPostLoadLane(N: Node, NumVecs: 1, Opc: AArch64::LD1i64_POST);
7212 return;
7213 }
7214 break;
7215 }
7216 case AArch64ISD::LD2LANEpost: {
7217 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
7218 SelectPostLoadLane(N: Node, NumVecs: 2, Opc: AArch64::LD2i8_POST);
7219 return;
7220 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
7221 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
7222 SelectPostLoadLane(N: Node, NumVecs: 2, Opc: AArch64::LD2i16_POST);
7223 return;
7224 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
7225 VT == MVT::v2f32) {
7226 SelectPostLoadLane(N: Node, NumVecs: 2, Opc: AArch64::LD2i32_POST);
7227 return;
7228 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
7229 VT == MVT::v1f64) {
7230 SelectPostLoadLane(N: Node, NumVecs: 2, Opc: AArch64::LD2i64_POST);
7231 return;
7232 }
7233 break;
7234 }
7235 case AArch64ISD::LD3LANEpost: {
7236 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
7237 SelectPostLoadLane(N: Node, NumVecs: 3, Opc: AArch64::LD3i8_POST);
7238 return;
7239 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
7240 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
7241 SelectPostLoadLane(N: Node, NumVecs: 3, Opc: AArch64::LD3i16_POST);
7242 return;
7243 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
7244 VT == MVT::v2f32) {
7245 SelectPostLoadLane(N: Node, NumVecs: 3, Opc: AArch64::LD3i32_POST);
7246 return;
7247 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
7248 VT == MVT::v1f64) {
7249 SelectPostLoadLane(N: Node, NumVecs: 3, Opc: AArch64::LD3i64_POST);
7250 return;
7251 }
7252 break;
7253 }
7254 case AArch64ISD::LD4LANEpost: {
7255 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
7256 SelectPostLoadLane(N: Node, NumVecs: 4, Opc: AArch64::LD4i8_POST);
7257 return;
7258 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
7259 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
7260 SelectPostLoadLane(N: Node, NumVecs: 4, Opc: AArch64::LD4i16_POST);
7261 return;
7262 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
7263 VT == MVT::v2f32) {
7264 SelectPostLoadLane(N: Node, NumVecs: 4, Opc: AArch64::LD4i32_POST);
7265 return;
7266 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
7267 VT == MVT::v1f64) {
7268 SelectPostLoadLane(N: Node, NumVecs: 4, Opc: AArch64::LD4i64_POST);
7269 return;
7270 }
7271 break;
7272 }
7273 case AArch64ISD::ST2post: {
7274 VT = Node->getOperand(Num: 1).getValueType();
7275 if (VT == MVT::v8i8) {
7276 SelectPostStore(N: Node, NumVecs: 2, Opc: AArch64::ST2Twov8b_POST);
7277 return;
7278 } else if (VT == MVT::v16i8) {
7279 SelectPostStore(N: Node, NumVecs: 2, Opc: AArch64::ST2Twov16b_POST);
7280 return;
7281 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
7282 SelectPostStore(N: Node, NumVecs: 2, Opc: AArch64::ST2Twov4h_POST);
7283 return;
7284 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
7285 SelectPostStore(N: Node, NumVecs: 2, Opc: AArch64::ST2Twov8h_POST);
7286 return;
7287 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
7288 SelectPostStore(N: Node, NumVecs: 2, Opc: AArch64::ST2Twov2s_POST);
7289 return;
7290 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
7291 SelectPostStore(N: Node, NumVecs: 2, Opc: AArch64::ST2Twov4s_POST);
7292 return;
7293 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
7294 SelectPostStore(N: Node, NumVecs: 2, Opc: AArch64::ST2Twov2d_POST);
7295 return;
7296 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
7297 SelectPostStore(N: Node, NumVecs: 2, Opc: AArch64::ST1Twov1d_POST);
7298 return;
7299 }
7300 break;
7301 }
7302 case AArch64ISD::ST3post: {
7303 VT = Node->getOperand(Num: 1).getValueType();
7304 if (VT == MVT::v8i8) {
7305 SelectPostStore(N: Node, NumVecs: 3, Opc: AArch64::ST3Threev8b_POST);
7306 return;
7307 } else if (VT == MVT::v16i8) {
7308 SelectPostStore(N: Node, NumVecs: 3, Opc: AArch64::ST3Threev16b_POST);
7309 return;
7310 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
7311 SelectPostStore(N: Node, NumVecs: 3, Opc: AArch64::ST3Threev4h_POST);
7312 return;
7313 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
7314 SelectPostStore(N: Node, NumVecs: 3, Opc: AArch64::ST3Threev8h_POST);
7315 return;
7316 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
7317 SelectPostStore(N: Node, NumVecs: 3, Opc: AArch64::ST3Threev2s_POST);
7318 return;
7319 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
7320 SelectPostStore(N: Node, NumVecs: 3, Opc: AArch64::ST3Threev4s_POST);
7321 return;
7322 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
7323 SelectPostStore(N: Node, NumVecs: 3, Opc: AArch64::ST3Threev2d_POST);
7324 return;
7325 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
7326 SelectPostStore(N: Node, NumVecs: 3, Opc: AArch64::ST1Threev1d_POST);
7327 return;
7328 }
7329 break;
7330 }
7331 case AArch64ISD::ST4post: {
7332 VT = Node->getOperand(Num: 1).getValueType();
7333 if (VT == MVT::v8i8) {
7334 SelectPostStore(N: Node, NumVecs: 4, Opc: AArch64::ST4Fourv8b_POST);
7335 return;
7336 } else if (VT == MVT::v16i8) {
7337 SelectPostStore(N: Node, NumVecs: 4, Opc: AArch64::ST4Fourv16b_POST);
7338 return;
7339 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
7340 SelectPostStore(N: Node, NumVecs: 4, Opc: AArch64::ST4Fourv4h_POST);
7341 return;
7342 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
7343 SelectPostStore(N: Node, NumVecs: 4, Opc: AArch64::ST4Fourv8h_POST);
7344 return;
7345 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
7346 SelectPostStore(N: Node, NumVecs: 4, Opc: AArch64::ST4Fourv2s_POST);
7347 return;
7348 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
7349 SelectPostStore(N: Node, NumVecs: 4, Opc: AArch64::ST4Fourv4s_POST);
7350 return;
7351 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
7352 SelectPostStore(N: Node, NumVecs: 4, Opc: AArch64::ST4Fourv2d_POST);
7353 return;
7354 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
7355 SelectPostStore(N: Node, NumVecs: 4, Opc: AArch64::ST1Fourv1d_POST);
7356 return;
7357 }
7358 break;
7359 }
7360 case AArch64ISD::ST1x2post: {
7361 VT = Node->getOperand(Num: 1).getValueType();
7362 if (VT == MVT::v8i8) {
7363 SelectPostStore(N: Node, NumVecs: 2, Opc: AArch64::ST1Twov8b_POST);
7364 return;
7365 } else if (VT == MVT::v16i8) {
7366 SelectPostStore(N: Node, NumVecs: 2, Opc: AArch64::ST1Twov16b_POST);
7367 return;
7368 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
7369 SelectPostStore(N: Node, NumVecs: 2, Opc: AArch64::ST1Twov4h_POST);
7370 return;
7371 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
7372 SelectPostStore(N: Node, NumVecs: 2, Opc: AArch64::ST1Twov8h_POST);
7373 return;
7374 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
7375 SelectPostStore(N: Node, NumVecs: 2, Opc: AArch64::ST1Twov2s_POST);
7376 return;
7377 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
7378 SelectPostStore(N: Node, NumVecs: 2, Opc: AArch64::ST1Twov4s_POST);
7379 return;
7380 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
7381 SelectPostStore(N: Node, NumVecs: 2, Opc: AArch64::ST1Twov1d_POST);
7382 return;
7383 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
7384 SelectPostStore(N: Node, NumVecs: 2, Opc: AArch64::ST1Twov2d_POST);
7385 return;
7386 }
7387 break;
7388 }
7389 case AArch64ISD::ST1x3post: {
7390 VT = Node->getOperand(Num: 1).getValueType();
7391 if (VT == MVT::v8i8) {
7392 SelectPostStore(N: Node, NumVecs: 3, Opc: AArch64::ST1Threev8b_POST);
7393 return;
7394 } else if (VT == MVT::v16i8) {
7395 SelectPostStore(N: Node, NumVecs: 3, Opc: AArch64::ST1Threev16b_POST);
7396 return;
7397 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
7398 SelectPostStore(N: Node, NumVecs: 3, Opc: AArch64::ST1Threev4h_POST);
7399 return;
7400 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16 ) {
7401 SelectPostStore(N: Node, NumVecs: 3, Opc: AArch64::ST1Threev8h_POST);
7402 return;
7403 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
7404 SelectPostStore(N: Node, NumVecs: 3, Opc: AArch64::ST1Threev2s_POST);
7405 return;
7406 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
7407 SelectPostStore(N: Node, NumVecs: 3, Opc: AArch64::ST1Threev4s_POST);
7408 return;
7409 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
7410 SelectPostStore(N: Node, NumVecs: 3, Opc: AArch64::ST1Threev1d_POST);
7411 return;
7412 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
7413 SelectPostStore(N: Node, NumVecs: 3, Opc: AArch64::ST1Threev2d_POST);
7414 return;
7415 }
7416 break;
7417 }
7418 case AArch64ISD::ST1x4post: {
7419 VT = Node->getOperand(Num: 1).getValueType();
7420 if (VT == MVT::v8i8) {
7421 SelectPostStore(N: Node, NumVecs: 4, Opc: AArch64::ST1Fourv8b_POST);
7422 return;
7423 } else if (VT == MVT::v16i8) {
7424 SelectPostStore(N: Node, NumVecs: 4, Opc: AArch64::ST1Fourv16b_POST);
7425 return;
7426 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
7427 SelectPostStore(N: Node, NumVecs: 4, Opc: AArch64::ST1Fourv4h_POST);
7428 return;
7429 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
7430 SelectPostStore(N: Node, NumVecs: 4, Opc: AArch64::ST1Fourv8h_POST);
7431 return;
7432 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
7433 SelectPostStore(N: Node, NumVecs: 4, Opc: AArch64::ST1Fourv2s_POST);
7434 return;
7435 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
7436 SelectPostStore(N: Node, NumVecs: 4, Opc: AArch64::ST1Fourv4s_POST);
7437 return;
7438 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
7439 SelectPostStore(N: Node, NumVecs: 4, Opc: AArch64::ST1Fourv1d_POST);
7440 return;
7441 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
7442 SelectPostStore(N: Node, NumVecs: 4, Opc: AArch64::ST1Fourv2d_POST);
7443 return;
7444 }
7445 break;
7446 }
7447 case AArch64ISD::ST2LANEpost: {
7448 VT = Node->getOperand(Num: 1).getValueType();
7449 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
7450 SelectPostStoreLane(N: Node, NumVecs: 2, Opc: AArch64::ST2i8_POST);
7451 return;
7452 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
7453 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
7454 SelectPostStoreLane(N: Node, NumVecs: 2, Opc: AArch64::ST2i16_POST);
7455 return;
7456 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
7457 VT == MVT::v2f32) {
7458 SelectPostStoreLane(N: Node, NumVecs: 2, Opc: AArch64::ST2i32_POST);
7459 return;
7460 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
7461 VT == MVT::v1f64) {
7462 SelectPostStoreLane(N: Node, NumVecs: 2, Opc: AArch64::ST2i64_POST);
7463 return;
7464 }
7465 break;
7466 }
7467 case AArch64ISD::ST3LANEpost: {
7468 VT = Node->getOperand(Num: 1).getValueType();
7469 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
7470 SelectPostStoreLane(N: Node, NumVecs: 3, Opc: AArch64::ST3i8_POST);
7471 return;
7472 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
7473 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
7474 SelectPostStoreLane(N: Node, NumVecs: 3, Opc: AArch64::ST3i16_POST);
7475 return;
7476 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
7477 VT == MVT::v2f32) {
7478 SelectPostStoreLane(N: Node, NumVecs: 3, Opc: AArch64::ST3i32_POST);
7479 return;
7480 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
7481 VT == MVT::v1f64) {
7482 SelectPostStoreLane(N: Node, NumVecs: 3, Opc: AArch64::ST3i64_POST);
7483 return;
7484 }
7485 break;
7486 }
7487 case AArch64ISD::ST4LANEpost: {
7488 VT = Node->getOperand(Num: 1).getValueType();
7489 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
7490 SelectPostStoreLane(N: Node, NumVecs: 4, Opc: AArch64::ST4i8_POST);
7491 return;
7492 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
7493 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
7494 SelectPostStoreLane(N: Node, NumVecs: 4, Opc: AArch64::ST4i16_POST);
7495 return;
7496 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
7497 VT == MVT::v2f32) {
7498 SelectPostStoreLane(N: Node, NumVecs: 4, Opc: AArch64::ST4i32_POST);
7499 return;
7500 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
7501 VT == MVT::v1f64) {
7502 SelectPostStoreLane(N: Node, NumVecs: 4, Opc: AArch64::ST4i64_POST);
7503 return;
7504 }
7505 break;
7506 }
7507 }
7508
7509 // Select the default instruction
7510 SelectCode(N: Node);
7511}
7512
7513/// createAArch64ISelDag - This pass converts a legalized DAG into a
7514/// AArch64-specific DAG, ready for instruction scheduling.
7515FunctionPass *llvm::createAArch64ISelDag(AArch64TargetMachine &TM,
7516 CodeGenOptLevel OptLevel) {
7517 return new AArch64DAGToDAGISelLegacy(TM, OptLevel);
7518}
7519
7520/// When \p PredVT is a scalable vector predicate in the form
7521/// MVT::nx<M>xi1, it builds the correspondent scalable vector of
7522/// integers MVT::nx<M>xi<bits> s.t. M x bits = 128. When targeting
7523/// structured vectors (NumVec >1), the output data type is
7524/// MVT::nx<M*NumVec>xi<bits> s.t. M x bits = 128. If the input
7525/// PredVT is not in the form MVT::nx<M>xi1, it returns an invalid
7526/// EVT.
7527static EVT getPackedVectorTypeFromPredicateType(LLVMContext &Ctx, EVT PredVT,
7528 unsigned NumVec) {
7529 assert(NumVec > 0 && NumVec < 5 && "Invalid number of vectors.");
7530 if (!PredVT.isScalableVector() || PredVT.getVectorElementType() != MVT::i1)
7531 return EVT();
7532
7533 if (PredVT != MVT::nxv16i1 && PredVT != MVT::nxv8i1 &&
7534 PredVT != MVT::nxv4i1 && PredVT != MVT::nxv2i1)
7535 return EVT();
7536
7537 ElementCount EC = PredVT.getVectorElementCount();
7538 EVT ScalarVT =
7539 EVT::getIntegerVT(Context&: Ctx, BitWidth: AArch64::SVEBitsPerBlock / EC.getKnownMinValue());
7540 EVT MemVT = EVT::getVectorVT(Context&: Ctx, VT: ScalarVT, EC: EC * NumVec);
7541
7542 return MemVT;
7543}
7544
7545/// Return the EVT of the data associated to a memory operation in \p
7546/// Root. If such EVT cannot be retrieved, it returns an invalid EVT.
7547static EVT getMemVTFromNode(LLVMContext &Ctx, SDNode *Root) {
7548 if (auto *MemIntr = dyn_cast<MemIntrinsicSDNode>(Val: Root))
7549 return MemIntr->getMemoryVT();
7550
7551 if (isa<MemSDNode>(Val: Root)) {
7552 EVT MemVT = cast<MemSDNode>(Val: Root)->getMemoryVT();
7553
7554 EVT DataVT;
7555 if (auto *Load = dyn_cast<LoadSDNode>(Val: Root))
7556 DataVT = Load->getValueType(ResNo: 0);
7557 else if (auto *Load = dyn_cast<MaskedLoadSDNode>(Val: Root))
7558 DataVT = Load->getValueType(ResNo: 0);
7559 else if (auto *Store = dyn_cast<StoreSDNode>(Val: Root))
7560 DataVT = Store->getValue().getValueType();
7561 else if (auto *Store = dyn_cast<MaskedStoreSDNode>(Val: Root))
7562 DataVT = Store->getValue().getValueType();
7563 else
7564 llvm_unreachable("Unexpected MemSDNode!");
7565
7566 return DataVT.changeVectorElementType(Context&: Ctx, EltVT: MemVT.getVectorElementType());
7567 }
7568
7569 const unsigned Opcode = Root->getOpcode();
7570 // For custom ISD nodes, we have to look at them individually to extract the
7571 // type of the data moved to/from memory.
7572 switch (Opcode) {
7573 case AArch64ISD::LD1_MERGE_ZERO:
7574 case AArch64ISD::LD1S_MERGE_ZERO:
7575 case AArch64ISD::LDNF1_MERGE_ZERO:
7576 case AArch64ISD::LDNF1S_MERGE_ZERO:
7577 return cast<VTSDNode>(Val: Root->getOperand(Num: 3))->getVT();
7578 case AArch64ISD::ST1_PRED:
7579 return cast<VTSDNode>(Val: Root->getOperand(Num: 4))->getVT();
7580 default:
7581 break;
7582 }
7583
7584 if (Opcode != ISD::INTRINSIC_VOID && Opcode != ISD::INTRINSIC_W_CHAIN)
7585 return EVT();
7586
7587 switch (Root->getConstantOperandVal(Num: 1)) {
7588 default:
7589 return EVT();
7590 case Intrinsic::aarch64_sme_ldr:
7591 case Intrinsic::aarch64_sme_str:
7592 return MVT::nxv16i8;
7593 case Intrinsic::aarch64_sve_prf:
7594 // We are using an SVE prefetch intrinsic. Type must be inferred from the
7595 // width of the predicate.
7596 return getPackedVectorTypeFromPredicateType(
7597 Ctx, PredVT: Root->getOperand(Num: 2)->getValueType(ResNo: 0), /*NumVec=*/1);
7598 case Intrinsic::aarch64_sve_ld2_sret:
7599 case Intrinsic::aarch64_sve_ld2q_sret:
7600 return getPackedVectorTypeFromPredicateType(
7601 Ctx, PredVT: Root->getOperand(Num: 2)->getValueType(ResNo: 0), /*NumVec=*/2);
7602 case Intrinsic::aarch64_sve_st2q:
7603 return getPackedVectorTypeFromPredicateType(
7604 Ctx, PredVT: Root->getOperand(Num: 4)->getValueType(ResNo: 0), /*NumVec=*/2);
7605 case Intrinsic::aarch64_sve_ld3_sret:
7606 case Intrinsic::aarch64_sve_ld3q_sret:
7607 return getPackedVectorTypeFromPredicateType(
7608 Ctx, PredVT: Root->getOperand(Num: 2)->getValueType(ResNo: 0), /*NumVec=*/3);
7609 case Intrinsic::aarch64_sve_st3q:
7610 return getPackedVectorTypeFromPredicateType(
7611 Ctx, PredVT: Root->getOperand(Num: 5)->getValueType(ResNo: 0), /*NumVec=*/3);
7612 case Intrinsic::aarch64_sve_ld4_sret:
7613 case Intrinsic::aarch64_sve_ld4q_sret:
7614 return getPackedVectorTypeFromPredicateType(
7615 Ctx, PredVT: Root->getOperand(Num: 2)->getValueType(ResNo: 0), /*NumVec=*/4);
7616 case Intrinsic::aarch64_sve_st4q:
7617 return getPackedVectorTypeFromPredicateType(
7618 Ctx, PredVT: Root->getOperand(Num: 6)->getValueType(ResNo: 0), /*NumVec=*/4);
7619 case Intrinsic::aarch64_sve_ld1udq:
7620 case Intrinsic::aarch64_sve_st1dq:
7621 return EVT(MVT::nxv1i64);
7622 case Intrinsic::aarch64_sve_ld1uwq:
7623 case Intrinsic::aarch64_sve_st1wq:
7624 return EVT(MVT::nxv1i32);
7625 }
7626}
7627
7628/// SelectAddrModeIndexedSVE - Attempt selection of the addressing mode:
7629/// Base + OffImm * sizeof(MemVT) for Min >= OffImm <= Max
7630/// where Root is the memory access using N for its address.
7631template <int64_t Min, int64_t Max>
7632bool AArch64DAGToDAGISel::SelectAddrModeIndexedSVE(SDNode *Root, SDValue N,
7633 SDValue &Base,
7634 SDValue &OffImm) {
7635 const EVT MemVT = getMemVTFromNode(Ctx&: *(CurDAG->getContext()), Root);
7636 const DataLayout &DL = CurDAG->getDataLayout();
7637 const MachineFrameInfo &MFI = MF->getFrameInfo();
7638
7639 if (N.getOpcode() == ISD::FrameIndex) {
7640 int FI = cast<FrameIndexSDNode>(Val&: N)->getIndex();
7641 // We can only encode VL scaled offsets, so only fold in frame indexes
7642 // referencing SVE objects.
7643 if (MFI.hasScalableStackID(ObjectIdx: FI)) {
7644 Base = CurDAG->getTargetFrameIndex(FI, VT: TLI->getPointerTy(DL));
7645 OffImm = CurDAG->getTargetConstant(Val: 0, DL: SDLoc(N), VT: MVT::i64);
7646 return true;
7647 }
7648
7649 return false;
7650 }
7651
7652 if (MemVT == EVT())
7653 return false;
7654
7655 if (N.getOpcode() != ISD::ADD)
7656 return false;
7657
7658 SDValue VScale = N.getOperand(i: 1);
7659 int64_t MulImm = std::numeric_limits<int64_t>::max();
7660 if (VScale.getOpcode() == ISD::VSCALE) {
7661 MulImm = cast<ConstantSDNode>(Val: VScale.getOperand(i: 0))->getSExtValue();
7662 } else if (auto C = dyn_cast<ConstantSDNode>(Val&: VScale)) {
7663 int64_t ByteOffset = C->getSExtValue();
7664 const auto KnownVScale =
7665 Subtarget->getSVEVectorSizeInBits() / AArch64::SVEBitsPerBlock;
7666
7667 if (!KnownVScale || ByteOffset % KnownVScale != 0)
7668 return false;
7669
7670 MulImm = ByteOffset / KnownVScale;
7671 } else
7672 return false;
7673
7674 TypeSize TS = MemVT.getSizeInBits();
7675 int64_t MemWidthBytes = static_cast<int64_t>(TS.getKnownMinValue()) / 8;
7676
7677 if ((MulImm % MemWidthBytes) != 0)
7678 return false;
7679
7680 int64_t Offset = MulImm / MemWidthBytes;
7681 if (Offset < Min || Offset > Max)
7682 return false;
7683
7684 Base = N.getOperand(i: 0);
7685 if (Base.getOpcode() == ISD::FrameIndex) {
7686 int FI = cast<FrameIndexSDNode>(Val&: Base)->getIndex();
7687 // We can only encode VL scaled offsets, so only fold in frame indexes
7688 // referencing SVE objects.
7689 if (MFI.hasScalableStackID(ObjectIdx: FI))
7690 Base = CurDAG->getTargetFrameIndex(FI, VT: TLI->getPointerTy(DL));
7691 }
7692
7693 OffImm = CurDAG->getTargetConstant(Val: Offset, DL: SDLoc(N), VT: MVT::i64);
7694 return true;
7695}
7696
7697/// Select register plus register addressing mode for SVE, with scaled
7698/// offset.
7699bool AArch64DAGToDAGISel::SelectSVERegRegAddrMode(SDValue N, unsigned Scale,
7700 SDValue &Base,
7701 SDValue &Offset) {
7702 if (N.getOpcode() != ISD::ADD)
7703 return false;
7704
7705 // Process an ADD node.
7706 const SDValue LHS = N.getOperand(i: 0);
7707 const SDValue RHS = N.getOperand(i: 1);
7708
7709 // 8 bit data does not come with the SHL node, so it is treated
7710 // separately.
7711 if (Scale == 0) {
7712 Base = LHS;
7713 Offset = RHS;
7714 return true;
7715 }
7716
7717 if (auto C = dyn_cast<ConstantSDNode>(Val: RHS)) {
7718 int64_t ImmOff = C->getSExtValue();
7719 unsigned Size = 1 << Scale;
7720
7721 // To use the reg+reg addressing mode, the immediate must be a multiple of
7722 // the vector element's byte size.
7723 if (ImmOff % Size)
7724 return false;
7725
7726 SDLoc DL(N);
7727 Base = LHS;
7728 Offset = CurDAG->getTargetConstant(Val: ImmOff >> Scale, DL, VT: MVT::i64);
7729 SDValue Ops[] = {Offset};
7730 SDNode *MI = CurDAG->getMachineNode(Opcode: AArch64::MOVi64imm, dl: DL, VT: MVT::i64, Ops);
7731 Offset = SDValue(MI, 0);
7732 return true;
7733 }
7734
7735 // Check if the RHS is a shift node with a constant.
7736 if (RHS.getOpcode() != ISD::SHL)
7737 return false;
7738
7739 const SDValue ShiftRHS = RHS.getOperand(i: 1);
7740 if (auto *C = dyn_cast<ConstantSDNode>(Val: ShiftRHS))
7741 if (C->getZExtValue() == Scale) {
7742 Base = LHS;
7743 Offset = RHS.getOperand(i: 0);
7744 return true;
7745 }
7746
7747 return false;
7748}
7749
7750bool AArch64DAGToDAGISel::SelectAllActivePredicate(SDValue N) {
7751 const AArch64TargetLowering *TLI =
7752 static_cast<const AArch64TargetLowering *>(getTargetLowering());
7753
7754 return TLI->isAllActivePredicate(DAG&: *CurDAG, N);
7755}
7756
7757bool AArch64DAGToDAGISel::SelectAnyPredicate(SDValue N) {
7758 EVT VT = N.getValueType();
7759 return VT.isScalableVector() && VT.getVectorElementType() == MVT::i1;
7760}
7761
7762bool AArch64DAGToDAGISel::SelectSMETileSlice(SDValue N, unsigned MaxSize,
7763 SDValue &Base, SDValue &Offset,
7764 unsigned Scale) {
7765 auto MatchConstantOffset = [&](SDValue CN) -> SDValue {
7766 if (auto *C = dyn_cast<ConstantSDNode>(Val&: CN)) {
7767 int64_t ImmOff = C->getSExtValue();
7768 if ((ImmOff > 0 && ImmOff <= MaxSize && (ImmOff % Scale == 0)))
7769 return CurDAG->getTargetConstant(Val: ImmOff / Scale, DL: SDLoc(N), VT: MVT::i64);
7770 }
7771 return SDValue();
7772 };
7773
7774 if (SDValue C = MatchConstantOffset(N)) {
7775 Base = CurDAG->getConstant(Val: 0, DL: SDLoc(N), VT: MVT::i32);
7776 Offset = C;
7777 return true;
7778 }
7779
7780 // Try to untangle an ADD node into a 'reg + offset'
7781 if (CurDAG->isBaseWithConstantOffset(Op: N)) {
7782 if (SDValue C = MatchConstantOffset(N.getOperand(i: 1))) {
7783 Base = N.getOperand(i: 0);
7784 Offset = C;
7785 return true;
7786 }
7787 }
7788
7789 // By default, just match reg + 0.
7790 Base = N;
7791 Offset = CurDAG->getTargetConstant(Val: 0, DL: SDLoc(N), VT: MVT::i64);
7792 return true;
7793}
7794
7795bool AArch64DAGToDAGISel::SelectCmpBranchUImm6Operand(SDNode *P, SDValue N,
7796 SDValue &Imm) {
7797 AArch64CC::CondCode CC =
7798 static_cast<AArch64CC::CondCode>(P->getConstantOperandVal(Num: 1));
7799 if (auto *CN = dyn_cast<ConstantSDNode>(Val&: N)) {
7800 // Check conservatively if the immediate fits the valid range [0, 64).
7801 // Immediate variants for GE and HS definitely need to be decremented
7802 // when lowering the pseudos later, so an immediate of 1 would become 0.
7803 // For the inverse conditions LT and LO we don't know for sure if they
7804 // will need a decrement but should the decision be made to reverse the
7805 // branch condition, we again end up with the need to decrement.
7806 // The same argument holds for LE, LS, GT and HI and possibly
7807 // incremented immediates. This can lead to slightly less optimal
7808 // codegen, e.g. we never codegen the legal case
7809 // cblt w0, #63, A
7810 // because we could end up with the illegal case
7811 // cbge w0, #64, B
7812 // should the decision to reverse the branch direction be made. For the
7813 // lower bound cases this is no problem since we can express comparisons
7814 // against 0 with either tbz/tnbz or using wzr/xzr.
7815 uint64_t LowerBound = 0, UpperBound = 64;
7816 switch (CC) {
7817 case AArch64CC::GE:
7818 case AArch64CC::HS:
7819 case AArch64CC::LT:
7820 case AArch64CC::LO:
7821 LowerBound = 1;
7822 break;
7823 case AArch64CC::LE:
7824 case AArch64CC::LS:
7825 case AArch64CC::GT:
7826 case AArch64CC::HI:
7827 UpperBound = 63;
7828 break;
7829 default:
7830 break;
7831 }
7832
7833 if (CN->getAPIntValue().uge(RHS: LowerBound) &&
7834 CN->getAPIntValue().ult(RHS: UpperBound)) {
7835 SDLoc DL(N);
7836 Imm = CurDAG->getTargetConstant(Val: CN->getZExtValue(), DL, VT: N.getValueType());
7837 return true;
7838 }
7839 }
7840
7841 return false;
7842}
7843
7844template <bool MatchCBB>
7845bool AArch64DAGToDAGISel::SelectCmpBranchExtOperand(SDValue N, SDValue &Reg,
7846 SDValue &ExtType) {
7847
7848 // Use an invalid shift-extend value to indicate we don't need to extend later
7849 if (N.getOpcode() == ISD::AssertZext || N.getOpcode() == ISD::AssertSext) {
7850 EVT Ty = cast<VTSDNode>(Val: N.getOperand(i: 1))->getVT();
7851 if (Ty != (MatchCBB ? MVT::i8 : MVT::i16))
7852 return false;
7853 Reg = N.getOperand(i: 0);
7854 ExtType = CurDAG->getSignedTargetConstant(Val: AArch64_AM::InvalidShiftExtend,
7855 DL: SDLoc(N), VT: MVT::i32);
7856 return true;
7857 }
7858
7859 AArch64_AM::ShiftExtendType ET = getExtendTypeForNode(N);
7860
7861 if ((MatchCBB && (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB)) ||
7862 (!MatchCBB && (ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH))) {
7863 Reg = N.getOperand(i: 0);
7864 ExtType =
7865 CurDAG->getTargetConstant(Val: getExtendEncoding(ET), DL: SDLoc(N), VT: MVT::i32);
7866 return true;
7867 }
7868
7869 return false;
7870}
7871
7872void AArch64DAGToDAGISel::PreprocessISelDAG() {
7873 bool MadeChange = false;
7874 for (SDNode &N : llvm::make_early_inc_range(Range: CurDAG->allnodes())) {
7875 if (N.use_empty())
7876 continue;
7877
7878 SDValue Result;
7879 switch (N.getOpcode()) {
7880 case ISD::SCALAR_TO_VECTOR: {
7881 EVT ScalarTy = N.getValueType(ResNo: 0).getVectorElementType();
7882 if ((ScalarTy == MVT::i32 || ScalarTy == MVT::i64) &&
7883 ScalarTy == N.getOperand(Num: 0).getValueType())
7884 Result = addBitcastHints(DAG&: *CurDAG, N);
7885
7886 break;
7887 }
7888 default:
7889 break;
7890 }
7891
7892 if (Result) {
7893 LLVM_DEBUG(dbgs() << "AArch64 DAG preprocessing replacing:\nOld: ");
7894 LLVM_DEBUG(N.dump(CurDAG));
7895 LLVM_DEBUG(dbgs() << "\nNew: ");
7896 LLVM_DEBUG(Result.dump(CurDAG));
7897 LLVM_DEBUG(dbgs() << "\n");
7898
7899 CurDAG->ReplaceAllUsesOfValueWith(From: SDValue(&N, 0), To: Result);
7900 MadeChange = true;
7901 }
7902 }
7903
7904 if (MadeChange)
7905 CurDAG->RemoveDeadNodes();
7906
7907 SelectionDAGISel::PreprocessISelDAG();
7908}
7909