1//===- AArch64InstrInfo.h - AArch64 Instruction Information -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the AArch64 implementation of the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64INSTRINFO_H
14#define LLVM_LIB_TARGET_AARCH64_AARCH64INSTRINFO_H
15
16#include "AArch64.h"
17#include "AArch64RegisterInfo.h"
18#include "llvm/CodeGen/TargetInstrInfo.h"
19#include "llvm/Support/TypeSize.h"
20#include <optional>
21
22#define GET_INSTRINFO_HEADER
23#include "AArch64GenInstrInfo.inc"
24
25namespace llvm {
26
27class AArch64Subtarget;
28
29static const MachineMemOperand::Flags MOSuppressPair =
30 MachineMemOperand::MOTargetFlag1;
31static const MachineMemOperand::Flags MOStridedAccess =
32 MachineMemOperand::MOTargetFlag2;
33
34#define FALKOR_STRIDED_ACCESS_MD "falkor.strided.access"
35
36// AArch64 MachineCombiner patterns
37enum AArch64MachineCombinerPattern : unsigned {
38 // These are patterns used to reduce the length of dependence chain.
39 SUBADD_OP1 = MachineCombinerPattern::TARGET_PATTERN_START,
40 SUBADD_OP2,
41
42 // These are multiply-add patterns matched by the AArch64 machine combiner.
43 MULADDW_OP1,
44 MULADDW_OP2,
45 MULSUBW_OP1,
46 MULSUBW_OP2,
47 MULADDWI_OP1,
48 MULSUBWI_OP1,
49 MULADDX_OP1,
50 MULADDX_OP2,
51 MULSUBX_OP1,
52 MULSUBX_OP2,
53 MULADDXI_OP1,
54 MULSUBXI_OP1,
55 // NEON integers vectors
56 MULADDv8i8_OP1,
57 MULADDv8i8_OP2,
58 MULADDv16i8_OP1,
59 MULADDv16i8_OP2,
60 MULADDv4i16_OP1,
61 MULADDv4i16_OP2,
62 MULADDv8i16_OP1,
63 MULADDv8i16_OP2,
64 MULADDv2i32_OP1,
65 MULADDv2i32_OP2,
66 MULADDv4i32_OP1,
67 MULADDv4i32_OP2,
68
69 MULSUBv8i8_OP1,
70 MULSUBv8i8_OP2,
71 MULSUBv16i8_OP1,
72 MULSUBv16i8_OP2,
73 MULSUBv4i16_OP1,
74 MULSUBv4i16_OP2,
75 MULSUBv8i16_OP1,
76 MULSUBv8i16_OP2,
77 MULSUBv2i32_OP1,
78 MULSUBv2i32_OP2,
79 MULSUBv4i32_OP1,
80 MULSUBv4i32_OP2,
81
82 MULADDv4i16_indexed_OP1,
83 MULADDv4i16_indexed_OP2,
84 MULADDv8i16_indexed_OP1,
85 MULADDv8i16_indexed_OP2,
86 MULADDv2i32_indexed_OP1,
87 MULADDv2i32_indexed_OP2,
88 MULADDv4i32_indexed_OP1,
89 MULADDv4i32_indexed_OP2,
90
91 MULSUBv4i16_indexed_OP1,
92 MULSUBv4i16_indexed_OP2,
93 MULSUBv8i16_indexed_OP1,
94 MULSUBv8i16_indexed_OP2,
95 MULSUBv2i32_indexed_OP1,
96 MULSUBv2i32_indexed_OP2,
97 MULSUBv4i32_indexed_OP1,
98 MULSUBv4i32_indexed_OP2,
99
100 // Floating Point
101 FMULADDH_OP1,
102 FMULADDH_OP2,
103 FMULSUBH_OP1,
104 FMULSUBH_OP2,
105 FMULADDS_OP1,
106 FMULADDS_OP2,
107 FMULSUBS_OP1,
108 FMULSUBS_OP2,
109 FMULADDD_OP1,
110 FMULADDD_OP2,
111 FMULSUBD_OP1,
112 FMULSUBD_OP2,
113 FNMULSUBH_OP1,
114 FNMULSUBS_OP1,
115 FNMULSUBD_OP1,
116 FMLAv1i32_indexed_OP1,
117 FMLAv1i32_indexed_OP2,
118 FMLAv1i64_indexed_OP1,
119 FMLAv1i64_indexed_OP2,
120 FMLAv4f16_OP1,
121 FMLAv4f16_OP2,
122 FMLAv8f16_OP1,
123 FMLAv8f16_OP2,
124 FMLAv2f32_OP2,
125 FMLAv2f32_OP1,
126 FMLAv2f64_OP1,
127 FMLAv2f64_OP2,
128 FMLAv4i16_indexed_OP1,
129 FMLAv4i16_indexed_OP2,
130 FMLAv8i16_indexed_OP1,
131 FMLAv8i16_indexed_OP2,
132 FMLAv2i32_indexed_OP1,
133 FMLAv2i32_indexed_OP2,
134 FMLAv2i64_indexed_OP1,
135 FMLAv2i64_indexed_OP2,
136 FMLAv4f32_OP1,
137 FMLAv4f32_OP2,
138 FMLAv4i32_indexed_OP1,
139 FMLAv4i32_indexed_OP2,
140 FMLSv1i32_indexed_OP2,
141 FMLSv1i64_indexed_OP2,
142 FMLSv4f16_OP1,
143 FMLSv4f16_OP2,
144 FMLSv8f16_OP1,
145 FMLSv8f16_OP2,
146 FMLSv2f32_OP1,
147 FMLSv2f32_OP2,
148 FMLSv2f64_OP1,
149 FMLSv2f64_OP2,
150 FMLSv4i16_indexed_OP1,
151 FMLSv4i16_indexed_OP2,
152 FMLSv8i16_indexed_OP1,
153 FMLSv8i16_indexed_OP2,
154 FMLSv2i32_indexed_OP1,
155 FMLSv2i32_indexed_OP2,
156 FMLSv2i64_indexed_OP1,
157 FMLSv2i64_indexed_OP2,
158 FMLSv4f32_OP1,
159 FMLSv4f32_OP2,
160 FMLSv4i32_indexed_OP1,
161 FMLSv4i32_indexed_OP2,
162
163 FMULv2i32_indexed_OP1,
164 FMULv2i32_indexed_OP2,
165 FMULv2i64_indexed_OP1,
166 FMULv2i64_indexed_OP2,
167 FMULv4i16_indexed_OP1,
168 FMULv4i16_indexed_OP2,
169 FMULv4i32_indexed_OP1,
170 FMULv4i32_indexed_OP2,
171 FMULv8i16_indexed_OP1,
172 FMULv8i16_indexed_OP2,
173
174 FNMADD,
175
176 GATHER_LANE_i32,
177 GATHER_LANE_i16,
178 GATHER_LANE_i8
179};
180class AArch64InstrInfo final : public AArch64GenInstrInfo {
181 const AArch64RegisterInfo RI;
182 const AArch64Subtarget &Subtarget;
183
184public:
185 explicit AArch64InstrInfo(const AArch64Subtarget &STI);
186
187 /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
188 /// such, whenever a client has an instance of instruction info, it should
189 /// always be able to get register info as well (through this method).
190 const AArch64RegisterInfo &getRegisterInfo() const { return RI; }
191
192 unsigned getInstSizeInBytes(const MachineInstr &MI) const override;
193
194 bool isAsCheapAsAMove(const MachineInstr &MI) const override;
195
196 bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg,
197 Register &DstReg, unsigned &SubIdx) const override;
198
199 bool
200 areMemAccessesTriviallyDisjoint(const MachineInstr &MIa,
201 const MachineInstr &MIb) const override;
202
203 Register isLoadFromStackSlot(const MachineInstr &MI,
204 int &FrameIndex) const override;
205 Register isStoreToStackSlot(const MachineInstr &MI,
206 int &FrameIndex) const override;
207
208 /// Check for post-frame ptr elimination stack locations as well. This uses a
209 /// heuristic so it isn't reliable for correctness.
210 Register isStoreToStackSlotPostFE(const MachineInstr &MI,
211 int &FrameIndex) const override;
212 /// Check for post-frame ptr elimination stack locations as well. This uses a
213 /// heuristic so it isn't reliable for correctness.
214 Register isLoadFromStackSlotPostFE(const MachineInstr &MI,
215 int &FrameIndex) const override;
216
217 /// Does this instruction set its full destination register to zero?
218 static bool isGPRZero(const MachineInstr &MI);
219
220 /// Does this instruction rename a GPR without modifying bits?
221 static bool isGPRCopy(const MachineInstr &MI);
222
223 /// Does this instruction rename an FPR without modifying bits?
224 static bool isFPRCopy(const MachineInstr &MI);
225
226 /// Return true if pairing the given load or store is hinted to be
227 /// unprofitable.
228 static bool isLdStPairSuppressed(const MachineInstr &MI);
229
230 /// Return true if the given load or store is a strided memory access.
231 static bool isStridedAccess(const MachineInstr &MI);
232
233 /// Return true if it has an unscaled load/store offset.
234 static bool hasUnscaledLdStOffset(unsigned Opc);
235 static bool hasUnscaledLdStOffset(MachineInstr &MI) {
236 return hasUnscaledLdStOffset(Opc: MI.getOpcode());
237 }
238
239 /// Returns the unscaled load/store for the scaled load/store opcode,
240 /// if there is a corresponding unscaled variant available.
241 static std::optional<unsigned> getUnscaledLdSt(unsigned Opc);
242
243 /// Scaling factor for (scaled or unscaled) load or store.
244 static int getMemScale(unsigned Opc);
245 static int getMemScale(const MachineInstr &MI) {
246 return getMemScale(Opc: MI.getOpcode());
247 }
248
249 /// Returns whether the instruction is a pre-indexed load.
250 static bool isPreLd(const MachineInstr &MI);
251
252 /// Returns whether the instruction is a pre-indexed store.
253 static bool isPreSt(const MachineInstr &MI);
254
255 /// Returns whether the instruction is a pre-indexed load/store.
256 static bool isPreLdSt(const MachineInstr &MI);
257
258 /// Returns whether the instruction is a zero-extending load.
259 static bool isZExtLoad(const MachineInstr &MI);
260
261 /// Returns whether the instruction is a sign-extending load.
262 static bool isSExtLoad(const MachineInstr &MI);
263
264 /// Returns whether the instruction is a paired load/store.
265 static bool isPairedLdSt(const MachineInstr &MI);
266
267 /// Returns the base register operator of a load/store.
268 static const MachineOperand &getLdStBaseOp(const MachineInstr &MI);
269
270 /// Returns the immediate offset operator of a load/store.
271 static const MachineOperand &getLdStOffsetOp(const MachineInstr &MI);
272
273 /// Returns whether the physical register is FP or NEON.
274 static bool isFpOrNEON(Register Reg);
275
276 /// Returns the shift amount operator of a load/store.
277 static const MachineOperand &getLdStAmountOp(const MachineInstr &MI);
278
279 /// Returns whether the instruction is FP or NEON.
280 static bool isFpOrNEON(const MachineInstr &MI);
281
282 /// Returns whether the instruction is in H form (16 bit operands)
283 static bool isHForm(const MachineInstr &MI);
284
285 /// Returns whether the instruction is in Q form (128 bit operands)
286 static bool isQForm(const MachineInstr &MI);
287
288 /// Returns whether the instruction can be compatible with non-zero BTYPE.
289 static bool hasBTISemantics(const MachineInstr &MI);
290
291 /// Returns the index for the immediate for a given instruction.
292 static unsigned getLoadStoreImmIdx(unsigned Opc);
293
294 /// Return true if pairing the given load or store may be paired with another.
295 static bool isPairableLdStInst(const MachineInstr &MI);
296
297 /// Returns true if MI is one of the TCRETURN* instructions.
298 static bool isTailCallReturnInst(const MachineInstr &MI);
299
300 /// Return the opcode that set flags when possible. The caller is
301 /// responsible for ensuring the opc has a flag setting equivalent.
302 static unsigned convertToFlagSettingOpc(unsigned Opc);
303
304 /// Return true if this is a load/store that can be potentially paired/merged.
305 bool isCandidateToMergeOrPair(const MachineInstr &MI) const;
306
307 /// Hint that pairing the given load or store is unprofitable.
308 static void suppressLdStPair(MachineInstr &MI);
309
310 std::optional<ExtAddrMode>
311 getAddrModeFromMemoryOp(const MachineInstr &MemI,
312 const TargetRegisterInfo *TRI) const override;
313
314 bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg,
315 const MachineInstr &AddrI,
316 ExtAddrMode &AM) const override;
317
318 MachineInstr *emitLdStWithAddr(MachineInstr &MemI,
319 const ExtAddrMode &AM) const override;
320
321 bool getMemOperandsWithOffsetWidth(
322 const MachineInstr &MI, SmallVectorImpl<const MachineOperand *> &BaseOps,
323 int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width,
324 const TargetRegisterInfo *TRI) const override;
325
326 /// If \p OffsetIsScalable is set to 'true', the offset is scaled by `vscale`.
327 /// This is true for some SVE instructions like ldr/str that have a
328 /// 'reg + imm' addressing mode where the immediate is an index to the
329 /// scalable vector located at 'reg + imm * vscale x #bytes'.
330 bool getMemOperandWithOffsetWidth(const MachineInstr &MI,
331 const MachineOperand *&BaseOp,
332 int64_t &Offset, bool &OffsetIsScalable,
333 TypeSize &Width,
334 const TargetRegisterInfo *TRI) const;
335
336 /// Return the immediate offset of the base register in a load/store \p LdSt.
337 MachineOperand &getMemOpBaseRegImmOfsOffsetOperand(MachineInstr &LdSt) const;
338
339 /// Returns true if opcode \p Opc is a memory operation. If it is, set
340 /// \p Scale, \p Width, \p MinOffset, and \p MaxOffset accordingly.
341 ///
342 /// For unscaled instructions, \p Scale is set to 1. All values are in bytes.
343 /// MinOffset/MaxOffset are the un-scaled limits of the immediate in the
344 /// instruction, the actual offset limit is [MinOffset*Scale,
345 /// MaxOffset*Scale].
346 static bool getMemOpInfo(unsigned Opcode, TypeSize &Scale, TypeSize &Width,
347 int64_t &MinOffset, int64_t &MaxOffset);
348
349 bool shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1,
350 int64_t Offset1, bool OffsetIsScalable1,
351 ArrayRef<const MachineOperand *> BaseOps2,
352 int64_t Offset2, bool OffsetIsScalable2,
353 unsigned ClusterSize,
354 unsigned NumBytes) const override;
355
356 void copyPhysRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
357 const DebugLoc &DL, MCRegister DestReg,
358 MCRegister SrcReg, bool KillSrc, unsigned Opcode,
359 llvm::ArrayRef<unsigned> Indices) const;
360 void copyGPRRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
361 const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg,
362 bool KillSrc, unsigned Opcode, unsigned ZeroReg,
363 llvm::ArrayRef<unsigned> Indices) const;
364 void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
365 const DebugLoc &DL, Register DestReg, Register SrcReg,
366 bool KillSrc, bool RenamableDest = false,
367 bool RenamableSrc = false) const override;
368
369 void storeRegToStackSlot(
370 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg,
371 bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
372 MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
373
374 void loadRegFromStackSlot(
375 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
376 Register DestReg, int FrameIndex, const TargetRegisterClass *RC,
377 Register VReg, unsigned SubReg = 0,
378 MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
379
380 // This tells target independent code that it is okay to pass instructions
381 // with subreg operands to foldMemoryOperandImpl.
382 bool isSubregFoldable() const override { return true; }
383
384 using TargetInstrInfo::foldMemoryOperandImpl;
385 MachineInstr *
386 foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
387 ArrayRef<unsigned> Ops,
388 MachineBasicBlock::iterator InsertPt, int FrameIndex,
389 LiveIntervals *LIS = nullptr,
390 VirtRegMap *VRM = nullptr) const override;
391
392 /// \returns true if a branch from an instruction with opcode \p BranchOpc
393 /// bytes is capable of jumping to a position \p BrOffset bytes away.
394 bool isBranchOffsetInRange(unsigned BranchOpc,
395 int64_t BrOffset) const override;
396
397 MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const override;
398
399 void insertIndirectBranch(MachineBasicBlock &MBB,
400 MachineBasicBlock &NewDestBB,
401 MachineBasicBlock &RestoreBB, const DebugLoc &DL,
402 int64_t BrOffset, RegScavenger *RS) const override;
403
404 bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
405 MachineBasicBlock *&FBB,
406 SmallVectorImpl<MachineOperand> &Cond,
407 bool AllowModify = false) const override;
408 bool analyzeBranchPredicate(MachineBasicBlock &MBB,
409 MachineBranchPredicate &MBP,
410 bool AllowModify) const override;
411 unsigned removeBranch(MachineBasicBlock &MBB,
412 int *BytesRemoved = nullptr) const override;
413 unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
414 MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
415 const DebugLoc &DL,
416 int *BytesAdded = nullptr) const override;
417
418 std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
419 analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override;
420
421 bool
422 reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
423 bool canInsertSelect(const MachineBasicBlock &, ArrayRef<MachineOperand> Cond,
424 Register, Register, Register, int &, int &,
425 int &) const override;
426 void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
427 const DebugLoc &DL, Register DstReg,
428 ArrayRef<MachineOperand> Cond, Register TrueReg,
429 Register FalseReg) const override;
430
431 void insertNoop(MachineBasicBlock &MBB,
432 MachineBasicBlock::iterator MI) const override;
433
434 MCInst getNop() const override;
435
436 bool isSchedulingBoundary(const MachineInstr &MI,
437 const MachineBasicBlock *MBB,
438 const MachineFunction &MF) const override;
439
440 /// analyzeCompare - For a comparison instruction, return the source registers
441 /// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
442 /// Return true if the comparison instruction can be analyzed.
443 bool analyzeCompare(const MachineInstr &MI, Register &SrcReg,
444 Register &SrcReg2, int64_t &CmpMask,
445 int64_t &CmpValue) const override;
446 /// optimizeCompareInstr - Convert the instruction supplying the argument to
447 /// the comparison into one that sets the zero bit in the flags register.
448 bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
449 Register SrcReg2, int64_t CmpMask, int64_t CmpValue,
450 const MachineRegisterInfo *MRI) const override;
451 bool optimizeCondBranch(MachineInstr &MI) const override;
452
453 CombinerObjective getCombinerObjective(unsigned Pattern) const override;
454 /// Return true when a code sequence can improve throughput. It
455 /// should be called only for instructions in loops.
456 /// \param Pattern - combiner pattern
457 bool isThroughputPattern(unsigned Pattern) const override;
458 /// Return true when there is potentially a faster code sequence
459 /// for an instruction chain ending in ``Root``. All potential patterns are
460 /// listed in the ``Patterns`` array.
461 bool getMachineCombinerPatterns(MachineInstr &Root,
462 SmallVectorImpl<unsigned> &Patterns,
463 bool DoRegPressureReduce) const override;
464 /// Return true when Inst is associative and commutative so that it can be
465 /// reassociated. If Invert is true, then the inverse of Inst operation must
466 /// be checked.
467 bool isAssociativeAndCommutative(const MachineInstr &Inst,
468 bool Invert) const override;
469
470 /// Returns true if \P Opcode is an instruction which performs accumulation
471 /// into a destination register.
472 bool isAccumulationOpcode(unsigned Opcode) const override;
473
474 /// Returns an opcode which defines the accumulator used by \P Opcode.
475 unsigned getAccumulationStartOpcode(unsigned Opcode) const override;
476
477 unsigned
478 getReduceOpcodeForAccumulator(unsigned int AccumulatorOpCode) const override;
479
480 /// When getMachineCombinerPatterns() finds patterns, this function
481 /// generates the instructions that could replace the original code
482 /// sequence
483 void genAlternativeCodeSequence(
484 MachineInstr &Root, unsigned Pattern,
485 SmallVectorImpl<MachineInstr *> &InsInstrs,
486 SmallVectorImpl<MachineInstr *> &DelInstrs,
487 DenseMap<Register, unsigned> &InstrIdxForVirtReg) const override;
488 /// AArch64 supports MachineCombiner.
489 bool useMachineCombiner() const override;
490
491 bool expandPostRAPseudo(MachineInstr &MI) const override;
492
493 std::pair<unsigned, unsigned>
494 decomposeMachineOperandsTargetFlags(unsigned TF) const override;
495 ArrayRef<std::pair<unsigned, const char *>>
496 getSerializableDirectMachineOperandTargetFlags() const override;
497 ArrayRef<std::pair<unsigned, const char *>>
498 getSerializableBitmaskMachineOperandTargetFlags() const override;
499 ArrayRef<std::pair<MachineMemOperand::Flags, const char *>>
500 getSerializableMachineMemOperandTargetFlags() const override;
501
502 bool isFunctionSafeToOutlineFrom(MachineFunction &MF,
503 bool OutlineFromLinkOnceODRs) const override;
504 std::optional<std::unique_ptr<outliner::OutlinedFunction>>
505 getOutliningCandidateInfo(
506 const MachineModuleInfo &MMI,
507 std::vector<outliner::Candidate> &RepeatedSequenceLocs,
508 unsigned MinRepeats) const override;
509 void mergeOutliningCandidateAttributes(
510 Function &F, std::vector<outliner::Candidate> &Candidates) const override;
511 outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI,
512 MachineBasicBlock::iterator &MIT,
513 unsigned Flags) const override;
514 SmallVector<
515 std::pair<MachineBasicBlock::iterator, MachineBasicBlock::iterator>>
516 getOutlinableRanges(MachineBasicBlock &MBB, unsigned &Flags) const override;
517 void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF,
518 const outliner::OutlinedFunction &OF) const override;
519 MachineBasicBlock::iterator
520 insertOutlinedCall(Module &M, MachineBasicBlock &MBB,
521 MachineBasicBlock::iterator &It, MachineFunction &MF,
522 outliner::Candidate &C) const override;
523 bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override;
524
525 void buildClearRegister(Register Reg, MachineBasicBlock &MBB,
526 MachineBasicBlock::iterator Iter, DebugLoc &DL,
527 bool AllowSideEffects = true) const override;
528
529 /// Returns the vector element size (B, H, S or D) of an SVE opcode.
530 uint64_t getElementSizeForOpcode(unsigned Opc) const;
531 /// Returns true if the opcode is for an SVE instruction that sets the
532 /// condition codes as if it's results had been fed to a PTEST instruction
533 /// along with the same general predicate.
534 bool isPTestLikeOpcode(unsigned Opc) const;
535 /// Returns true if the opcode is for an SVE WHILE## instruction.
536 bool isWhileOpcode(unsigned Opc) const;
537 /// Returns true if the instruction has a shift by immediate that can be
538 /// executed in one cycle less.
539 static bool isFalkorShiftExtFast(const MachineInstr &MI);
540 /// Return true if the instructions is a SEH instruction used for unwinding
541 /// on Windows.
542 static bool isSEHInstruction(const MachineInstr &MI);
543
544 std::optional<RegImmPair> isAddImmediate(const MachineInstr &MI,
545 Register Reg) const override;
546
547 bool isFunctionSafeToSplit(const MachineFunction &MF) const override;
548
549 bool isMBBSafeToSplitToCold(const MachineBasicBlock &MBB) const override;
550
551 std::optional<ParamLoadedValue>
552 describeLoadedValue(const MachineInstr &MI, Register Reg) const override;
553
554 unsigned int getTailDuplicateSize(CodeGenOptLevel OptLevel) const override;
555
556 bool isExtendLikelyToBeFolded(MachineInstr &ExtMI,
557 MachineRegisterInfo &MRI) const override;
558
559 static void decomposeStackOffsetForFrameOffsets(const StackOffset &Offset,
560 int64_t &NumBytes,
561 int64_t &NumPredicateVectors,
562 int64_t &NumDataVectors);
563 static void decomposeStackOffsetForDwarfOffsets(const StackOffset &Offset,
564 int64_t &ByteSized,
565 int64_t &VGSized);
566
567 // Return true if address of the form BaseReg + Scale * ScaledReg + Offset can
568 // be used for a load/store of NumBytes. BaseReg is always present and
569 // implicit.
570 bool isLegalAddressingMode(unsigned NumBytes, int64_t Offset,
571 unsigned Scale) const;
572
573 // Decrement the SP, issuing probes along the way. `TargetReg` is the new top
574 // of the stack. `FrameSetup` is passed as true, if the allocation is a part
575 // of constructing the activation frame of a function.
576 MachineBasicBlock::iterator probedStackAlloc(MachineBasicBlock::iterator MBBI,
577 Register TargetReg,
578 bool FrameSetup) const;
579
580 static int
581 findCondCodeUseOperandIdxForBranchOrSelect(const MachineInstr &Instr);
582
583#define GET_INSTRINFO_HELPER_DECLS
584#include "AArch64GenInstrInfo.inc"
585
586protected:
587 /// If the specific machine instruction is an instruction that moves/copies
588 /// value from one register to another register return destination and source
589 /// registers as machine operands.
590 std::optional<DestSourcePair>
591 isCopyInstrImpl(const MachineInstr &MI) const override;
592 std::optional<DestSourcePair>
593 isCopyLikeInstrImpl(const MachineInstr &MI) const override;
594
595private:
596 unsigned getInstBundleLength(const MachineInstr &MI) const;
597
598 /// Sets the offsets on outlined instructions in \p MBB which use SP
599 /// so that they will be valid post-outlining.
600 ///
601 /// \param MBB A \p MachineBasicBlock in an outlined function.
602 void fixupPostOutline(MachineBasicBlock &MBB) const;
603
604 void instantiateCondBranch(MachineBasicBlock &MBB, const DebugLoc &DL,
605 MachineBasicBlock *TBB,
606 ArrayRef<MachineOperand> Cond) const;
607 bool substituteCmpToZero(MachineInstr &CmpInstr, unsigned SrcReg,
608 const MachineRegisterInfo &MRI) const;
609 bool removeCmpToZeroOrOne(MachineInstr &CmpInstr, unsigned SrcReg,
610 int CmpValue, const MachineRegisterInfo &MRI) const;
611
612 /// Returns an unused general-purpose register which can be used for
613 /// constructing an outlined call if one exists. Returns 0 otherwise.
614 Register findRegisterToSaveLRTo(outliner::Candidate &C) const;
615
616 /// Remove a ptest of a predicate-generating operation that already sets, or
617 /// can be made to set, the condition codes in an identical manner
618 bool optimizePTestInstr(MachineInstr *PTest, unsigned MaskReg,
619 unsigned PredReg,
620 const MachineRegisterInfo *MRI) const;
621 std::optional<unsigned>
622 canRemovePTestInstr(MachineInstr *PTest, MachineInstr *Mask,
623 MachineInstr *Pred, const MachineRegisterInfo *MRI) const;
624
625 /// verifyInstruction - Perform target specific instruction verification.
626 bool verifyInstruction(const MachineInstr &MI,
627 StringRef &ErrInfo) const override;
628};
629
630struct UsedNZCV {
631 bool N = false;
632 bool Z = false;
633 bool C = false;
634 bool V = false;
635
636 UsedNZCV() = default;
637
638 UsedNZCV &operator|=(const UsedNZCV &UsedFlags) {
639 this->N |= UsedFlags.N;
640 this->Z |= UsedFlags.Z;
641 this->C |= UsedFlags.C;
642 this->V |= UsedFlags.V;
643 return *this;
644 }
645};
646
647/// \returns Conditions flags used after \p CmpInstr in its MachineBB if NZCV
648/// flags are not alive in successors of the same \p CmpInstr and \p MI parent.
649/// \returns std::nullopt otherwise.
650///
651/// Collect instructions using that flags in \p CCUseInstrs if provided.
652std::optional<UsedNZCV>
653examineCFlagsUse(MachineInstr &MI, MachineInstr &CmpInstr,
654 const TargetRegisterInfo &TRI,
655 SmallVectorImpl<MachineInstr *> *CCUseInstrs = nullptr);
656
657/// Return true if there is an instruction /after/ \p DefMI and before \p UseMI
658/// which either reads or clobbers NZCV.
659bool isNZCVTouchedInInstructionRange(const MachineInstr &DefMI,
660 const MachineInstr &UseMI,
661 const TargetRegisterInfo *TRI);
662
663MCCFIInstruction createDefCFA(const TargetRegisterInfo &TRI, unsigned FrameReg,
664 unsigned Reg, const StackOffset &Offset,
665 bool LastAdjustmentWasScalable = true);
666MCCFIInstruction
667createCFAOffset(const TargetRegisterInfo &MRI, unsigned Reg,
668 const StackOffset &OffsetFromDefCFA,
669 std::optional<int64_t> IncomingVGOffsetFromDefCFA);
670
671/// emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg
672/// plus Offset. This is intended to be used from within the prolog/epilog
673/// insertion (PEI) pass, where a virtual scratch register may be allocated
674/// if necessary, to be replaced by the scavenger at the end of PEI.
675void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
676 const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
677 StackOffset Offset, const TargetInstrInfo *TII,
678 MachineInstr::MIFlag = MachineInstr::NoFlags,
679 bool SetNZCV = false, bool NeedsWinCFI = false,
680 bool *HasWinCFI = nullptr, bool EmitCFAOffset = false,
681 StackOffset InitialOffset = {},
682 unsigned FrameReg = AArch64::SP);
683
684/// rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the
685/// FP. Return false if the offset could not be handled directly in MI, and
686/// return the left-over portion by reference.
687bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
688 unsigned FrameReg, StackOffset &Offset,
689 const AArch64InstrInfo *TII);
690
691/// Use to report the frame offset status in isAArch64FrameOffsetLegal.
692enum AArch64FrameOffsetStatus {
693 AArch64FrameOffsetCannotUpdate = 0x0, ///< Offset cannot apply.
694 AArch64FrameOffsetIsLegal = 0x1, ///< Offset is legal.
695 AArch64FrameOffsetCanUpdate = 0x2 ///< Offset can apply, at least partly.
696};
697
698/// Check if the @p Offset is a valid frame offset for @p MI.
699/// The returned value reports the validity of the frame offset for @p MI.
700/// It uses the values defined by AArch64FrameOffsetStatus for that.
701/// If result == AArch64FrameOffsetCannotUpdate, @p MI cannot be updated to
702/// use an offset.eq
703/// If result & AArch64FrameOffsetIsLegal, @p Offset can completely be
704/// rewritten in @p MI.
705/// If result & AArch64FrameOffsetCanUpdate, @p Offset contains the
706/// amount that is off the limit of the legal offset.
707/// If set, @p OutUseUnscaledOp will contain the whether @p MI should be
708/// turned into an unscaled operator, which opcode is in @p OutUnscaledOp.
709/// If set, @p EmittableOffset contains the amount that can be set in @p MI
710/// (possibly with @p OutUnscaledOp if OutUseUnscaledOp is true) and that
711/// is a legal offset.
712int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset,
713 bool *OutUseUnscaledOp = nullptr,
714 unsigned *OutUnscaledOp = nullptr,
715 int64_t *EmittableOffset = nullptr);
716
717bool optimizeTerminators(MachineBasicBlock *MBB, const TargetInstrInfo &TII);
718
719static inline bool isUncondBranchOpcode(int Opc) { return Opc == AArch64::B; }
720
721static inline bool isCondBranchOpcode(int Opc) {
722 switch (Opc) {
723 case AArch64::Bcc:
724 case AArch64::CBZW:
725 case AArch64::CBZX:
726 case AArch64::CBNZW:
727 case AArch64::CBNZX:
728 case AArch64::TBZW:
729 case AArch64::TBZX:
730 case AArch64::TBNZW:
731 case AArch64::TBNZX:
732 case AArch64::CBWPri:
733 case AArch64::CBXPri:
734 case AArch64::CBBAssertExt:
735 case AArch64::CBHAssertExt:
736 case AArch64::CBWPrr:
737 case AArch64::CBXPrr:
738 return true;
739 default:
740 return false;
741 }
742}
743
744static inline bool isIndirectBranchOpcode(int Opc) {
745 switch (Opc) {
746 case AArch64::BR:
747 case AArch64::BRAA:
748 case AArch64::BRAB:
749 case AArch64::BRAAZ:
750 case AArch64::BRABZ:
751 return true;
752 }
753 return false;
754}
755
756static inline bool isIndirectCallOpcode(unsigned Opc) {
757 switch (Opc) {
758 case AArch64::BLR:
759 case AArch64::BLRAA:
760 case AArch64::BLRAB:
761 case AArch64::BLRAAZ:
762 case AArch64::BLRABZ:
763 return true;
764 default:
765 return false;
766 }
767}
768
769static inline bool isPTrueOpcode(unsigned Opc) {
770 switch (Opc) {
771 case AArch64::PTRUE_B:
772 case AArch64::PTRUE_H:
773 case AArch64::PTRUE_S:
774 case AArch64::PTRUE_D:
775 return true;
776 default:
777 return false;
778 }
779}
780
781/// Return opcode to be used for indirect calls.
782unsigned getBLRCallOpcode(const MachineFunction &MF);
783
784/// Return XPAC opcode to be used for a ptrauth strip using the given key.
785static inline unsigned getXPACOpcodeForKey(AArch64PACKey::ID K) {
786 using namespace AArch64PACKey;
787 switch (K) {
788 case IA: case IB: return AArch64::XPACI;
789 case DA: case DB: return AArch64::XPACD;
790 }
791 llvm_unreachable("Unhandled AArch64PACKey::ID enum");
792}
793
794/// Return AUT opcode to be used for a ptrauth auth using the given key, or its
795/// AUT*Z variant that doesn't take a discriminator operand, using zero instead.
796static inline unsigned getAUTOpcodeForKey(AArch64PACKey::ID K, bool Zero) {
797 using namespace AArch64PACKey;
798 switch (K) {
799 case IA: return Zero ? AArch64::AUTIZA : AArch64::AUTIA;
800 case IB: return Zero ? AArch64::AUTIZB : AArch64::AUTIB;
801 case DA: return Zero ? AArch64::AUTDZA : AArch64::AUTDA;
802 case DB: return Zero ? AArch64::AUTDZB : AArch64::AUTDB;
803 }
804 llvm_unreachable("Unhandled AArch64PACKey::ID enum");
805}
806
807/// Return PAC opcode to be used for a ptrauth sign using the given key, or its
808/// PAC*Z variant that doesn't take a discriminator operand, using zero instead.
809static inline unsigned getPACOpcodeForKey(AArch64PACKey::ID K, bool Zero) {
810 using namespace AArch64PACKey;
811 switch (K) {
812 case IA: return Zero ? AArch64::PACIZA : AArch64::PACIA;
813 case IB: return Zero ? AArch64::PACIZB : AArch64::PACIB;
814 case DA: return Zero ? AArch64::PACDZA : AArch64::PACDA;
815 case DB: return Zero ? AArch64::PACDZB : AArch64::PACDB;
816 }
817 llvm_unreachable("Unhandled AArch64PACKey::ID enum");
818}
819
820/// Return B(L)RA opcode to be used for an authenticated branch or call using
821/// the given key, or its B(L)RA*Z variant that doesn't take a discriminator
822/// operand, using zero instead.
823static inline unsigned getBranchOpcodeForKey(bool IsCall, AArch64PACKey::ID K,
824 bool Zero) {
825 using namespace AArch64PACKey;
826 static const unsigned BranchOpcode[2][2] = {
827 {AArch64::BRAA, AArch64::BRAAZ},
828 {AArch64::BRAB, AArch64::BRABZ},
829 };
830 static const unsigned CallOpcode[2][2] = {
831 {AArch64::BLRAA, AArch64::BLRAAZ},
832 {AArch64::BLRAB, AArch64::BLRABZ},
833 };
834
835 assert((K == IA || K == IB) && "B(L)RA* instructions require IA or IB key");
836 if (IsCall)
837 return CallOpcode[K == IB][Zero];
838 return BranchOpcode[K == IB][Zero];
839}
840
841// struct TSFlags {
842#define TSFLAG_ELEMENT_SIZE_TYPE(X) (X) // 3-bits
843#define TSFLAG_DESTRUCTIVE_INST_TYPE(X) ((X) << 3) // 4-bits
844#define TSFLAG_FALSE_LANE_TYPE(X) ((X) << 7) // 2-bits
845#define TSFLAG_INSTR_FLAGS(X) ((X) << 9) // 2-bits
846#define TSFLAG_SME_MATRIX_TYPE(X) ((X) << 11) // 3-bits
847// }
848
849namespace AArch64 {
850
851enum ElementSizeType {
852 ElementSizeMask = TSFLAG_ELEMENT_SIZE_TYPE(0x7),
853 ElementSizeNone = TSFLAG_ELEMENT_SIZE_TYPE(0x0),
854 ElementSizeB = TSFLAG_ELEMENT_SIZE_TYPE(0x1),
855 ElementSizeH = TSFLAG_ELEMENT_SIZE_TYPE(0x2),
856 ElementSizeS = TSFLAG_ELEMENT_SIZE_TYPE(0x3),
857 ElementSizeD = TSFLAG_ELEMENT_SIZE_TYPE(0x4),
858};
859
860enum DestructiveInstType {
861 DestructiveInstTypeMask = TSFLAG_DESTRUCTIVE_INST_TYPE(0xf),
862 NotDestructive = TSFLAG_DESTRUCTIVE_INST_TYPE(0x0),
863 DestructiveOther = TSFLAG_DESTRUCTIVE_INST_TYPE(0x1),
864 DestructiveUnary = TSFLAG_DESTRUCTIVE_INST_TYPE(0x2),
865 DestructiveBinaryImm = TSFLAG_DESTRUCTIVE_INST_TYPE(0x3),
866 DestructiveBinaryShImmUnpred = TSFLAG_DESTRUCTIVE_INST_TYPE(0x4),
867 DestructiveBinary = TSFLAG_DESTRUCTIVE_INST_TYPE(0x5),
868 DestructiveBinaryComm = TSFLAG_DESTRUCTIVE_INST_TYPE(0x6),
869 DestructiveBinaryCommWithRev = TSFLAG_DESTRUCTIVE_INST_TYPE(0x7),
870 DestructiveTernaryCommWithRev = TSFLAG_DESTRUCTIVE_INST_TYPE(0x8),
871 Destructive2xRegImmUnpred = TSFLAG_DESTRUCTIVE_INST_TYPE(0x9),
872 DestructiveUnaryPassthru = TSFLAG_DESTRUCTIVE_INST_TYPE(0xa),
873};
874
875enum FalseLaneType {
876 FalseLanesMask = TSFLAG_FALSE_LANE_TYPE(0x3),
877 FalseLanesZero = TSFLAG_FALSE_LANE_TYPE(0x1),
878 FalseLanesUndef = TSFLAG_FALSE_LANE_TYPE(0x2),
879};
880
881// NOTE: This is a bit field.
882static const uint64_t InstrFlagIsWhile = TSFLAG_INSTR_FLAGS(0x1);
883static const uint64_t InstrFlagIsPTestLike = TSFLAG_INSTR_FLAGS(0x2);
884
885enum SMEMatrixType {
886 SMEMatrixTypeMask = TSFLAG_SME_MATRIX_TYPE(0x7),
887 SMEMatrixNone = TSFLAG_SME_MATRIX_TYPE(0x0),
888 SMEMatrixTileB = TSFLAG_SME_MATRIX_TYPE(0x1),
889 SMEMatrixTileH = TSFLAG_SME_MATRIX_TYPE(0x2),
890 SMEMatrixTileS = TSFLAG_SME_MATRIX_TYPE(0x3),
891 SMEMatrixTileD = TSFLAG_SME_MATRIX_TYPE(0x4),
892 SMEMatrixTileQ = TSFLAG_SME_MATRIX_TYPE(0x5),
893 SMEMatrixArray = TSFLAG_SME_MATRIX_TYPE(0x6),
894};
895
896#undef TSFLAG_ELEMENT_SIZE_TYPE
897#undef TSFLAG_DESTRUCTIVE_INST_TYPE
898#undef TSFLAG_FALSE_LANE_TYPE
899#undef TSFLAG_INSTR_FLAGS
900#undef TSFLAG_SME_MATRIX_TYPE
901
902int32_t getSVEPseudoMap(uint32_t Opcode);
903int32_t getSVERevInstr(uint32_t Opcode);
904int32_t getSVENonRevInstr(uint32_t Opcode);
905
906int32_t getSMEPseudoMap(uint32_t Opcode);
907}
908
909} // end namespace llvm
910
911#endif
912