1//=- AArch64MachineFunctionInfo.h - AArch64 machine function info -*- C++ -*-=//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares AArch64-specific per-machine-function information.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64MACHINEFUNCTIONINFO_H
14#define LLVM_LIB_TARGET_AARCH64_AARCH64MACHINEFUNCTIONINFO_H
15
16#include "AArch64Subtarget.h"
17#include "Utils/AArch64SMEAttributes.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/SmallPtrSet.h"
20#include "llvm/ADT/SmallVector.h"
21#include "llvm/CodeGen/CallingConvLower.h"
22#include "llvm/CodeGen/MIRYamlMapping.h"
23#include "llvm/CodeGen/MachineFrameInfo.h"
24#include "llvm/CodeGen/MachineFunction.h"
25#include "llvm/IR/Function.h"
26#include "llvm/MC/MCLinkerOptimizationHint.h"
27#include "llvm/MC/MCSymbol.h"
28#include <cassert>
29#include <optional>
30
31namespace llvm {
32
33namespace yaml {
34struct AArch64FunctionInfo;
35} // end namespace yaml
36
37class AArch64Subtarget;
38class MachineInstr;
39
40struct TPIDR2Object {
41 int FrameIndex = std::numeric_limits<int>::max();
42 unsigned Uses = 0;
43};
44
45/// AArch64FunctionInfo - This class is derived from MachineFunctionInfo and
46/// contains private AArch64-specific information for each MachineFunction.
47class AArch64FunctionInfo final : public MachineFunctionInfo {
48 /// Number of bytes of arguments this function has on the stack. If the callee
49 /// is expected to restore the argument stack this should be a multiple of 16,
50 /// all usable during a tail call.
51 ///
52 /// The alternative would forbid tail call optimisation in some cases: if we
53 /// want to transfer control from a function with 8-bytes of stack-argument
54 /// space to a function with 16-bytes then misalignment of this value would
55 /// make a stack adjustment necessary, which could not be undone by the
56 /// callee.
57 unsigned BytesInStackArgArea = 0;
58
59 /// The number of bytes to restore to deallocate space for incoming
60 /// arguments. Canonically 0 in the C calling convention, but non-zero when
61 /// callee is expected to pop the args.
62 unsigned ArgumentStackToRestore = 0;
63
64 /// Space just below incoming stack pointer reserved for arguments being
65 /// passed on the stack during a tail call. This will be the difference
66 /// between the largest tail call argument space needed in this function and
67 /// what's already available by reusing space of incoming arguments.
68 unsigned TailCallReservedStack = 0;
69
70 /// HasStackFrame - True if this function has a stack frame. Set by
71 /// determineCalleeSaves().
72 bool HasStackFrame = false;
73
74 /// Amount of stack frame size, not including callee-saved registers.
75 uint64_t LocalStackSize = 0;
76
77 /// The start and end frame indices for the SVE callee saves.
78 int MinSVECSFrameIndex = 0;
79 int MaxSVECSFrameIndex = 0;
80
81 /// Amount of stack frame size used for saving callee-saved registers.
82 unsigned CalleeSavedStackSize = 0;
83 unsigned SVECalleeSavedStackSize = 0;
84 bool HasCalleeSavedStackSize = false;
85
86 /// Number of TLS accesses using the special (combinable)
87 /// _TLS_MODULE_BASE_ symbol.
88 unsigned NumLocalDynamicTLSAccesses = 0;
89
90 /// FrameIndex for start of varargs area for arguments passed on the
91 /// stack.
92 int VarArgsStackIndex = 0;
93
94 /// Offset of start of varargs area for arguments passed on the stack.
95 unsigned VarArgsStackOffset = 0;
96
97 /// FrameIndex for start of varargs area for arguments passed in
98 /// general purpose registers.
99 int VarArgsGPRIndex = 0;
100
101 /// Size of the varargs area for arguments passed in general purpose
102 /// registers.
103 unsigned VarArgsGPRSize = 0;
104
105 /// FrameIndex for start of varargs area for arguments passed in
106 /// floating-point registers.
107 int VarArgsFPRIndex = 0;
108
109 /// Size of the varargs area for arguments passed in floating-point
110 /// registers.
111 unsigned VarArgsFPRSize = 0;
112
113 /// The stack slots used to add space between FPR and GPR accesses when using
114 /// hazard padding. StackHazardCSRSlotIndex is added between GPR and FPR CSRs.
115 /// StackHazardSlotIndex is added between (sorted) stack objects.
116 int StackHazardSlotIndex = std::numeric_limits<int>::max();
117 int StackHazardCSRSlotIndex = std::numeric_limits<int>::max();
118
119 /// True if this function has a subset of CSRs that is handled explicitly via
120 /// copies.
121 bool IsSplitCSR = false;
122
123 /// True when the stack gets realigned dynamically because the size of stack
124 /// frame is unknown at compile time. e.g., in case of VLAs.
125 bool StackRealigned = false;
126
127 /// True when the callee-save stack area has unused gaps that may be used for
128 /// other stack allocations.
129 bool CalleeSaveStackHasFreeSpace = false;
130
131 /// SRetReturnReg - sret lowering includes returning the value of the
132 /// returned struct in a register. This field holds the virtual register into
133 /// which the sret argument is passed.
134 Register SRetReturnReg;
135
136 /// SVE stack size (for predicates and data vectors) are maintained here
137 /// rather than in FrameInfo, as the placement and Stack IDs are target
138 /// specific.
139 uint64_t StackSizeSVE = 0;
140
141 /// HasCalculatedStackSizeSVE indicates whether StackSizeSVE is valid.
142 bool HasCalculatedStackSizeSVE = false;
143
144 /// Has a value when it is known whether or not the function uses a
145 /// redzone, and no value otherwise.
146 /// Initialized during frame lowering, unless the function has the noredzone
147 /// attribute, in which case it is set to false at construction.
148 std::optional<bool> HasRedZone;
149
150 /// ForwardedMustTailRegParms - A list of virtual and physical registers
151 /// that must be forwarded to every musttail call.
152 SmallVector<ForwardedRegister, 1> ForwardedMustTailRegParms;
153
154 /// FrameIndex for the tagged base pointer.
155 std::optional<int> TaggedBasePointerIndex;
156
157 /// Offset from SP-at-entry to the tagged base pointer.
158 /// Tagged base pointer is set up to point to the first (lowest address)
159 /// tagged stack slot.
160 unsigned TaggedBasePointerOffset;
161
162 /// OutliningStyle denotes, if a function was outined, how it was outlined,
163 /// e.g. Tail Call, Thunk, or Function if none apply.
164 std::optional<std::string> OutliningStyle;
165
166 // Offset from SP-after-callee-saved-spills (i.e. SP-at-entry minus
167 // CalleeSavedStackSize) to the address of the frame record.
168 int CalleeSaveBaseToFrameRecordOffset = 0;
169
170 /// SignReturnAddress is true if PAC-RET is enabled for the function with
171 /// defaults being sign non-leaf functions only, with the B key.
172 bool SignReturnAddress = false;
173
174 /// SignReturnAddressAll modifies the default PAC-RET mode to signing leaf
175 /// functions as well.
176 bool SignReturnAddressAll = false;
177
178 /// SignWithBKey modifies the default PAC-RET mode to signing with the B key.
179 bool SignWithBKey = false;
180
181 /// HasELFSignedGOT is true if the target binary format is ELF and the IR
182 /// module containing the corresponding function has "ptrauth-elf-got" flag
183 /// set to 1.
184 bool HasELFSignedGOT = false;
185
186 /// SigningInstrOffset captures the offset of the PAC-RET signing instruction
187 /// within the prologue, so it can be re-used for authentication in the
188 /// epilogue when using PC as a second salt (FEAT_PAuth_LR)
189 MCSymbol *SignInstrLabel = nullptr;
190
191 /// BranchTargetEnforcement enables placing BTI instructions at potential
192 /// indirect branch destinations.
193 bool BranchTargetEnforcement = false;
194
195 /// Indicates that SP signing should be diversified with PC as-per PAuthLR.
196 /// This is set by -mbranch-protection and will emit NOP instructions unless
197 /// the subtarget feature +pauthlr is also used (in which case non-NOP
198 /// instructions are emitted).
199 bool BranchProtectionPAuthLR = false;
200
201 /// Whether this function has an extended frame record [Ctx, FP, LR]. If so,
202 /// bit 60 of the in-memory FP will be 1 to enable other tools to detect the
203 /// extended record.
204 bool HasSwiftAsyncContext = false;
205
206 /// The stack slot where the Swift asynchronous context is stored.
207 int SwiftAsyncContextFrameIdx = std::numeric_limits<int>::max();
208
209 bool IsMTETagged = false;
210
211 /// The function has Scalable Vector or Scalable Predicate register argument
212 /// or return type
213 bool IsSVECC = false;
214
215 /// The frame-index for the TPIDR2 object used for lazy saves.
216 TPIDR2Object TPIDR2;
217
218 /// Whether this function changes streaming mode within the function.
219 bool HasStreamingModeChanges = false;
220
221 /// True if the function need unwind information.
222 mutable std::optional<bool> NeedsDwarfUnwindInfo;
223
224 /// True if the function need asynchronous unwind information.
225 mutable std::optional<bool> NeedsAsyncDwarfUnwindInfo;
226
227 int64_t StackProbeSize = 0;
228
229 // Holds a register containing pstate.sm. This is set
230 // on function entry to record the initial pstate of a function.
231 Register PStateSMReg = MCRegister::NoRegister;
232
233 // Holds a pointer to a buffer that is large enough to represent
234 // all SME ZA state and any additional state required by the
235 // __arm_sme_save/restore support routines.
236 Register SMESaveBufferAddr = MCRegister::NoRegister;
237
238 // true if SMESaveBufferAddr is used.
239 bool SMESaveBufferUsed = false;
240
241 // Has the PNReg used to build PTRUE instruction.
242 // The PTRUE is used for the LD/ST of ZReg pairs in save and restore.
243 unsigned PredicateRegForFillSpill = 0;
244
245 // The stack slots where VG values are stored to.
246 int64_t VGIdx = std::numeric_limits<int>::max();
247 int64_t StreamingVGIdx = std::numeric_limits<int>::max();
248
249 // Holds the SME function attributes (streaming mode, ZA/ZT0 state).
250 SMEAttrs SMEFnAttrs;
251
252public:
253 AArch64FunctionInfo(const Function &F, const AArch64Subtarget *STI);
254
255 MachineFunctionInfo *
256 clone(BumpPtrAllocator &Allocator, MachineFunction &DestMF,
257 const DenseMap<MachineBasicBlock *, MachineBasicBlock *> &Src2DstMBB)
258 const override;
259
260 void setPredicateRegForFillSpill(unsigned Reg) {
261 PredicateRegForFillSpill = Reg;
262 }
263 unsigned getPredicateRegForFillSpill() const {
264 return PredicateRegForFillSpill;
265 }
266
267 Register getSMESaveBufferAddr() const { return SMESaveBufferAddr; };
268 void setSMESaveBufferAddr(Register Reg) { SMESaveBufferAddr = Reg; };
269
270 unsigned isSMESaveBufferUsed() const { return SMESaveBufferUsed; };
271 void setSMESaveBufferUsed(bool Used = true) { SMESaveBufferUsed = Used; };
272
273 Register getPStateSMReg() const { return PStateSMReg; };
274 void setPStateSMReg(Register Reg) { PStateSMReg = Reg; };
275
276 int64_t getVGIdx() const { return VGIdx; };
277 void setVGIdx(unsigned Idx) { VGIdx = Idx; };
278
279 int64_t getStreamingVGIdx() const { return StreamingVGIdx; };
280 void setStreamingVGIdx(unsigned FrameIdx) { StreamingVGIdx = FrameIdx; };
281
282 bool isSVECC() const { return IsSVECC; };
283 void setIsSVECC(bool s) { IsSVECC = s; };
284
285 TPIDR2Object &getTPIDR2Obj() { return TPIDR2; }
286
287 void initializeBaseYamlFields(const yaml::AArch64FunctionInfo &YamlMFI);
288
289 unsigned getBytesInStackArgArea() const { return BytesInStackArgArea; }
290 void setBytesInStackArgArea(unsigned bytes) { BytesInStackArgArea = bytes; }
291
292 unsigned getArgumentStackToRestore() const { return ArgumentStackToRestore; }
293 void setArgumentStackToRestore(unsigned bytes) {
294 ArgumentStackToRestore = bytes;
295 }
296
297 unsigned getTailCallReservedStack() const { return TailCallReservedStack; }
298 void setTailCallReservedStack(unsigned bytes) {
299 TailCallReservedStack = bytes;
300 }
301
302 bool hasCalculatedStackSizeSVE() const { return HasCalculatedStackSizeSVE; }
303
304 void setStackSizeSVE(uint64_t S) {
305 HasCalculatedStackSizeSVE = true;
306 StackSizeSVE = S;
307 }
308
309 uint64_t getStackSizeSVE() const { return StackSizeSVE; }
310
311 bool hasStackFrame() const { return HasStackFrame; }
312 void setHasStackFrame(bool s) { HasStackFrame = s; }
313
314 bool isStackRealigned() const { return StackRealigned; }
315 void setStackRealigned(bool s) { StackRealigned = s; }
316
317 bool hasCalleeSaveStackFreeSpace() const {
318 return CalleeSaveStackHasFreeSpace;
319 }
320 void setCalleeSaveStackHasFreeSpace(bool s) {
321 CalleeSaveStackHasFreeSpace = s;
322 }
323 bool isSplitCSR() const { return IsSplitCSR; }
324 void setIsSplitCSR(bool s) { IsSplitCSR = s; }
325
326 void setLocalStackSize(uint64_t Size) { LocalStackSize = Size; }
327 uint64_t getLocalStackSize() const { return LocalStackSize; }
328
329 void setOutliningStyle(const std::string &Style) { OutliningStyle = Style; }
330 std::optional<std::string> getOutliningStyle() const {
331 return OutliningStyle;
332 }
333
334 void setCalleeSavedStackSize(unsigned Size) {
335 CalleeSavedStackSize = Size;
336 HasCalleeSavedStackSize = true;
337 }
338
339 // When CalleeSavedStackSize has not been set (for example when
340 // some MachineIR pass is run in isolation), then recalculate
341 // the CalleeSavedStackSize directly from the CalleeSavedInfo.
342 // Note: This information can only be recalculated after PEI
343 // has assigned offsets to the callee save objects.
344 unsigned getCalleeSavedStackSize(const MachineFrameInfo &MFI) const {
345 bool ValidateCalleeSavedStackSize = false;
346
347#ifndef NDEBUG
348 // Make sure the calculated size derived from the CalleeSavedInfo
349 // equals the cached size that was calculated elsewhere (e.g. in
350 // determineCalleeSaves).
351 ValidateCalleeSavedStackSize = HasCalleeSavedStackSize;
352#endif
353
354 if (!HasCalleeSavedStackSize || ValidateCalleeSavedStackSize) {
355 assert(MFI.isCalleeSavedInfoValid() && "CalleeSavedInfo not calculated");
356 if (MFI.getCalleeSavedInfo().empty())
357 return 0;
358
359 int64_t MinOffset = std::numeric_limits<int64_t>::max();
360 int64_t MaxOffset = std::numeric_limits<int64_t>::min();
361 for (const auto &Info : MFI.getCalleeSavedInfo()) {
362 int FrameIdx = Info.getFrameIdx();
363 if (MFI.getStackID(ObjectIdx: FrameIdx) != TargetStackID::Default)
364 continue;
365 int64_t Offset = MFI.getObjectOffset(ObjectIdx: FrameIdx);
366 int64_t ObjSize = MFI.getObjectSize(ObjectIdx: FrameIdx);
367 MinOffset = std::min<int64_t>(a: Offset, b: MinOffset);
368 MaxOffset = std::max<int64_t>(a: Offset + ObjSize, b: MaxOffset);
369 }
370
371 if (SwiftAsyncContextFrameIdx != std::numeric_limits<int>::max()) {
372 int64_t Offset = MFI.getObjectOffset(ObjectIdx: getSwiftAsyncContextFrameIdx());
373 int64_t ObjSize = MFI.getObjectSize(ObjectIdx: getSwiftAsyncContextFrameIdx());
374 MinOffset = std::min<int64_t>(a: Offset, b: MinOffset);
375 MaxOffset = std::max<int64_t>(a: Offset + ObjSize, b: MaxOffset);
376 }
377
378 if (StackHazardCSRSlotIndex != std::numeric_limits<int>::max()) {
379 int64_t Offset = MFI.getObjectOffset(ObjectIdx: StackHazardCSRSlotIndex);
380 int64_t ObjSize = MFI.getObjectSize(ObjectIdx: StackHazardCSRSlotIndex);
381 MinOffset = std::min<int64_t>(a: Offset, b: MinOffset);
382 MaxOffset = std::max<int64_t>(a: Offset + ObjSize, b: MaxOffset);
383 }
384
385 unsigned Size = alignTo(Value: MaxOffset - MinOffset, Align: 16);
386 assert((!HasCalleeSavedStackSize || getCalleeSavedStackSize() == Size) &&
387 "Invalid size calculated for callee saves");
388 return Size;
389 }
390
391 return getCalleeSavedStackSize();
392 }
393
394 unsigned getCalleeSavedStackSize() const {
395 assert(HasCalleeSavedStackSize &&
396 "CalleeSavedStackSize has not been calculated");
397 return CalleeSavedStackSize;
398 }
399
400 // Saves the CalleeSavedStackSize for SVE vectors in 'scalable bytes'
401 void setSVECalleeSavedStackSize(unsigned Size) {
402 SVECalleeSavedStackSize = Size;
403 }
404 unsigned getSVECalleeSavedStackSize() const {
405 return SVECalleeSavedStackSize;
406 }
407
408 void setMinMaxSVECSFrameIndex(int Min, int Max) {
409 MinSVECSFrameIndex = Min;
410 MaxSVECSFrameIndex = Max;
411 }
412
413 int getMinSVECSFrameIndex() const { return MinSVECSFrameIndex; }
414 int getMaxSVECSFrameIndex() const { return MaxSVECSFrameIndex; }
415
416 void incNumLocalDynamicTLSAccesses() { ++NumLocalDynamicTLSAccesses; }
417 unsigned getNumLocalDynamicTLSAccesses() const {
418 return NumLocalDynamicTLSAccesses;
419 }
420
421 std::optional<bool> hasRedZone() const { return HasRedZone; }
422 void setHasRedZone(bool s) { HasRedZone = s; }
423
424 int getVarArgsStackIndex() const { return VarArgsStackIndex; }
425 void setVarArgsStackIndex(int Index) { VarArgsStackIndex = Index; }
426
427 unsigned getVarArgsStackOffset() const { return VarArgsStackOffset; }
428 void setVarArgsStackOffset(unsigned Offset) { VarArgsStackOffset = Offset; }
429
430 int getVarArgsGPRIndex() const { return VarArgsGPRIndex; }
431 void setVarArgsGPRIndex(int Index) { VarArgsGPRIndex = Index; }
432
433 unsigned getVarArgsGPRSize() const { return VarArgsGPRSize; }
434 void setVarArgsGPRSize(unsigned Size) { VarArgsGPRSize = Size; }
435
436 int getVarArgsFPRIndex() const { return VarArgsFPRIndex; }
437 void setVarArgsFPRIndex(int Index) { VarArgsFPRIndex = Index; }
438
439 unsigned getVarArgsFPRSize() const { return VarArgsFPRSize; }
440 void setVarArgsFPRSize(unsigned Size) { VarArgsFPRSize = Size; }
441
442 bool hasStackHazardSlotIndex() const {
443 return StackHazardSlotIndex != std::numeric_limits<int>::max();
444 }
445 int getStackHazardSlotIndex() const { return StackHazardSlotIndex; }
446 void setStackHazardSlotIndex(int Index) {
447 assert(StackHazardSlotIndex == std::numeric_limits<int>::max());
448 StackHazardSlotIndex = Index;
449 }
450 int getStackHazardCSRSlotIndex() const { return StackHazardCSRSlotIndex; }
451 void setStackHazardCSRSlotIndex(int Index) {
452 assert(StackHazardCSRSlotIndex == std::numeric_limits<int>::max());
453 StackHazardCSRSlotIndex = Index;
454 }
455
456 SMEAttrs getSMEFnAttrs() const { return SMEFnAttrs; }
457
458 unsigned getSRetReturnReg() const { return SRetReturnReg; }
459 void setSRetReturnReg(unsigned Reg) { SRetReturnReg = Reg; }
460
461 unsigned getJumpTableEntrySize(int Idx) const {
462 return JumpTableEntryInfo[Idx].first;
463 }
464 MCSymbol *getJumpTableEntryPCRelSymbol(int Idx) const {
465 return JumpTableEntryInfo[Idx].second;
466 }
467 void setJumpTableEntryInfo(int Idx, unsigned Size, MCSymbol *PCRelSym) {
468 if ((unsigned)Idx >= JumpTableEntryInfo.size())
469 JumpTableEntryInfo.resize(N: Idx+1);
470 JumpTableEntryInfo[Idx] = std::make_pair(x&: Size, y&: PCRelSym);
471 }
472
473 using SetOfInstructions = SmallPtrSet<const MachineInstr *, 16>;
474
475 const SetOfInstructions &getLOHRelated() const { return LOHRelated; }
476
477 // Shortcuts for LOH related types.
478 class MILOHDirective {
479 MCLOHType Kind;
480
481 /// Arguments of this directive. Order matters.
482 SmallVector<const MachineInstr *, 3> Args;
483
484 public:
485 using LOHArgs = ArrayRef<const MachineInstr *>;
486
487 MILOHDirective(MCLOHType Kind, LOHArgs Args)
488 : Kind(Kind), Args(Args.begin(), Args.end()) {
489 assert(isValidMCLOHType(Kind) && "Invalid LOH directive type!");
490 }
491
492 MCLOHType getKind() const { return Kind; }
493 LOHArgs getArgs() const { return Args; }
494 };
495
496 using MILOHArgs = MILOHDirective::LOHArgs;
497 using MILOHContainer = SmallVector<MILOHDirective, 32>;
498
499 const MILOHContainer &getLOHContainer() const { return LOHContainerSet; }
500
501 /// Add a LOH directive of this @p Kind and this @p Args.
502 void addLOHDirective(MCLOHType Kind, MILOHArgs Args) {
503 LOHContainerSet.push_back(Elt: MILOHDirective(Kind, Args));
504 LOHRelated.insert_range(R&: Args);
505 }
506
507 size_t
508 clearLinkerOptimizationHints(const SmallPtrSetImpl<MachineInstr *> &MIs) {
509 size_t InitialSize = LOHContainerSet.size();
510 erase_if(C&: LOHContainerSet, P: [&](const auto &D) {
511 return any_of(D.getArgs(), [&](auto *Arg) { return MIs.contains(Ptr: Arg); });
512 });
513 // In theory there could be an LOH with one label in MIs and another label
514 // outside MIs, however we don't know if the label outside MIs is used in
515 // any other LOHs, so we can't remove them from LOHRelated. In that case, we
516 // might produce a few extra labels, but it won't break anything.
517 LOHRelated.remove_if(P: [&](auto *MI) { return MIs.contains(Ptr: MI); });
518 return InitialSize - LOHContainerSet.size();
519 };
520
521 SmallVectorImpl<ForwardedRegister> &getForwardedMustTailRegParms() {
522 return ForwardedMustTailRegParms;
523 }
524
525 std::optional<int> getTaggedBasePointerIndex() const {
526 return TaggedBasePointerIndex;
527 }
528 void setTaggedBasePointerIndex(int Index) { TaggedBasePointerIndex = Index; }
529
530 unsigned getTaggedBasePointerOffset() const {
531 return TaggedBasePointerOffset;
532 }
533 void setTaggedBasePointerOffset(unsigned Offset) {
534 TaggedBasePointerOffset = Offset;
535 }
536
537 int getCalleeSaveBaseToFrameRecordOffset() const {
538 return CalleeSaveBaseToFrameRecordOffset;
539 }
540 void setCalleeSaveBaseToFrameRecordOffset(int Offset) {
541 CalleeSaveBaseToFrameRecordOffset = Offset;
542 }
543
544 bool shouldSignReturnAddress(const MachineFunction &MF) const;
545 bool shouldSignReturnAddress(bool SpillsLR) const;
546
547 bool needsShadowCallStackPrologueEpilogue(MachineFunction &MF) const;
548
549 bool shouldSignWithBKey() const { return SignWithBKey; }
550
551 bool hasELFSignedGOT() const { return HasELFSignedGOT; }
552
553 MCSymbol *getSigningInstrLabel() const { return SignInstrLabel; }
554 void setSigningInstrLabel(MCSymbol *Label) { SignInstrLabel = Label; }
555
556 bool isMTETagged() const { return IsMTETagged; }
557
558 bool branchTargetEnforcement() const { return BranchTargetEnforcement; }
559
560 bool branchProtectionPAuthLR() const { return BranchProtectionPAuthLR; }
561
562 void setHasSwiftAsyncContext(bool HasContext) {
563 HasSwiftAsyncContext = HasContext;
564 }
565 bool hasSwiftAsyncContext() const { return HasSwiftAsyncContext; }
566
567 void setSwiftAsyncContextFrameIdx(int FI) {
568 SwiftAsyncContextFrameIdx = FI;
569 }
570 int getSwiftAsyncContextFrameIdx() const { return SwiftAsyncContextFrameIdx; }
571
572 bool needsDwarfUnwindInfo(const MachineFunction &MF) const;
573 bool needsAsyncDwarfUnwindInfo(const MachineFunction &MF) const;
574
575 bool hasStreamingModeChanges() const { return HasStreamingModeChanges; }
576 void setHasStreamingModeChanges(bool HasChanges) {
577 HasStreamingModeChanges = HasChanges;
578 }
579
580 bool hasStackProbing() const { return StackProbeSize != 0; }
581
582 int64_t getStackProbeSize() const { return StackProbeSize; }
583
584private:
585 // Hold the lists of LOHs.
586 MILOHContainer LOHContainerSet;
587 SetOfInstructions LOHRelated;
588
589 SmallVector<std::pair<unsigned, MCSymbol *>, 2> JumpTableEntryInfo;
590};
591
592namespace yaml {
593struct AArch64FunctionInfo final : public yaml::MachineFunctionInfo {
594 std::optional<bool> HasRedZone;
595
596 AArch64FunctionInfo() = default;
597 AArch64FunctionInfo(const llvm::AArch64FunctionInfo &MFI);
598
599 void mappingImpl(yaml::IO &YamlIO) override;
600 ~AArch64FunctionInfo() = default;
601};
602
603template <> struct MappingTraits<AArch64FunctionInfo> {
604 static void mapping(IO &YamlIO, AArch64FunctionInfo &MFI) {
605 YamlIO.mapOptional(Key: "hasRedZone", Val&: MFI.HasRedZone);
606 }
607};
608
609} // end namespace yaml
610
611} // end namespace llvm
612
613#endif // LLVM_LIB_TARGET_AARCH64_AARCH64MACHINEFUNCTIONINFO_H
614