| 1 | //===-- SystemZFrameLowering.cpp - Frame lowering for SystemZ -------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | |
| 9 | #include "SystemZFrameLowering.h" |
| 10 | #include "SystemZCallingConv.h" |
| 11 | #include "SystemZInstrInfo.h" |
| 12 | #include "SystemZMachineFunctionInfo.h" |
| 13 | #include "SystemZRegisterInfo.h" |
| 14 | #include "SystemZSubtarget.h" |
| 15 | #include "llvm/CodeGen/LivePhysRegs.h" |
| 16 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
| 17 | #include "llvm/CodeGen/MachineModuleInfo.h" |
| 18 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
| 19 | #include "llvm/CodeGen/RegisterScavenging.h" |
| 20 | #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" |
| 21 | #include "llvm/IR/CallingConv.h" |
| 22 | #include "llvm/IR/Function.h" |
| 23 | #include "llvm/IR/Module.h" |
| 24 | #include "llvm/Target/TargetMachine.h" |
| 25 | |
| 26 | using namespace llvm; |
| 27 | |
| 28 | namespace { |
| 29 | // The ABI-defined register save slots, relative to the CFA (i.e. |
| 30 | // incoming stack pointer + SystemZMC::ELFCallFrameSize). |
| 31 | static const TargetFrameLowering::SpillSlot ELFSpillOffsetTable[] = { |
| 32 | { .Reg: SystemZ::R2D, .Offset: 0x10 }, |
| 33 | { .Reg: SystemZ::R3D, .Offset: 0x18 }, |
| 34 | { .Reg: SystemZ::R4D, .Offset: 0x20 }, |
| 35 | { .Reg: SystemZ::R5D, .Offset: 0x28 }, |
| 36 | { .Reg: SystemZ::R6D, .Offset: 0x30 }, |
| 37 | { .Reg: SystemZ::R7D, .Offset: 0x38 }, |
| 38 | { .Reg: SystemZ::R8D, .Offset: 0x40 }, |
| 39 | { .Reg: SystemZ::R9D, .Offset: 0x48 }, |
| 40 | { .Reg: SystemZ::R10D, .Offset: 0x50 }, |
| 41 | { .Reg: SystemZ::R11D, .Offset: 0x58 }, |
| 42 | { .Reg: SystemZ::R12D, .Offset: 0x60 }, |
| 43 | { .Reg: SystemZ::R13D, .Offset: 0x68 }, |
| 44 | { .Reg: SystemZ::R14D, .Offset: 0x70 }, |
| 45 | { .Reg: SystemZ::R15D, .Offset: 0x78 }, |
| 46 | { .Reg: SystemZ::F0D, .Offset: 0x80 }, |
| 47 | { .Reg: SystemZ::F2D, .Offset: 0x88 }, |
| 48 | { .Reg: SystemZ::F4D, .Offset: 0x90 }, |
| 49 | { .Reg: SystemZ::F6D, .Offset: 0x98 } |
| 50 | }; |
| 51 | |
| 52 | static const TargetFrameLowering::SpillSlot XPLINKSpillOffsetTable[] = { |
| 53 | {.Reg: SystemZ::R4D, .Offset: 0x00}, {.Reg: SystemZ::R5D, .Offset: 0x08}, {.Reg: SystemZ::R6D, .Offset: 0x10}, |
| 54 | {.Reg: SystemZ::R7D, .Offset: 0x18}, {.Reg: SystemZ::R8D, .Offset: 0x20}, {.Reg: SystemZ::R9D, .Offset: 0x28}, |
| 55 | {.Reg: SystemZ::R10D, .Offset: 0x30}, {.Reg: SystemZ::R11D, .Offset: 0x38}, {.Reg: SystemZ::R12D, .Offset: 0x40}, |
| 56 | {.Reg: SystemZ::R13D, .Offset: 0x48}, {.Reg: SystemZ::R14D, .Offset: 0x50}, {.Reg: SystemZ::R15D, .Offset: 0x58}}; |
| 57 | } // end anonymous namespace |
| 58 | |
| 59 | SystemZFrameLowering::SystemZFrameLowering(StackDirection D, Align StackAl, |
| 60 | int LAO, Align TransAl, |
| 61 | bool StackReal, unsigned PointerSize) |
| 62 | : TargetFrameLowering(D, StackAl, LAO, TransAl, StackReal), |
| 63 | PointerSize(PointerSize) {} |
| 64 | |
| 65 | std::unique_ptr<SystemZFrameLowering> |
| 66 | SystemZFrameLowering::create(const SystemZSubtarget &STI) { |
| 67 | unsigned PtrSz = |
| 68 | STI.getTargetLowering()->getTargetMachine().getPointerSize(AS: 0); |
| 69 | if (STI.isTargetXPLINK64()) |
| 70 | return std::make_unique<SystemZXPLINKFrameLowering>(args&: PtrSz); |
| 71 | return std::make_unique<SystemZELFFrameLowering>(args&: PtrSz); |
| 72 | } |
| 73 | |
| 74 | namespace { |
| 75 | struct SZFrameSortingObj { |
| 76 | bool IsValid = false; // True if we care about this Object. |
| 77 | uint32_t ObjectIndex = 0; // Index of Object into MFI list. |
| 78 | uint64_t ObjectSize = 0; // Size of Object in bytes. |
| 79 | uint32_t D12Count = 0; // 12-bit displacement only. |
| 80 | uint32_t DPairCount = 0; // 12 or 20 bit displacement. |
| 81 | }; |
| 82 | typedef std::vector<SZFrameSortingObj> SZFrameObjVec; |
| 83 | } // namespace |
| 84 | |
| 85 | // TODO: Move to base class. |
| 86 | void SystemZELFFrameLowering::orderFrameObjects( |
| 87 | const MachineFunction &MF, SmallVectorImpl<int> &ObjectsToAllocate) const { |
| 88 | const MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 89 | auto *TII = MF.getSubtarget<SystemZSubtarget>().getInstrInfo(); |
| 90 | |
| 91 | // Make a vector of sorting objects to track all MFI objects and mark those |
| 92 | // to be sorted as valid. |
| 93 | if (ObjectsToAllocate.size() <= 1) |
| 94 | return; |
| 95 | SZFrameObjVec SortingObjects(MFI.getObjectIndexEnd()); |
| 96 | for (auto &Obj : ObjectsToAllocate) { |
| 97 | SortingObjects[Obj].IsValid = true; |
| 98 | SortingObjects[Obj].ObjectIndex = Obj; |
| 99 | SortingObjects[Obj].ObjectSize = MFI.getObjectSize(ObjectIdx: Obj); |
| 100 | } |
| 101 | |
| 102 | // Examine uses for each object and record short (12-bit) and "pair" |
| 103 | // displacement types. |
| 104 | for (auto &MBB : MF) |
| 105 | for (auto &MI : MBB) { |
| 106 | if (MI.isDebugInstr()) |
| 107 | continue; |
| 108 | for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) { |
| 109 | const MachineOperand &MO = MI.getOperand(i: I); |
| 110 | if (!MO.isFI()) |
| 111 | continue; |
| 112 | int Index = MO.getIndex(); |
| 113 | if (Index >= 0 && Index < MFI.getObjectIndexEnd() && |
| 114 | SortingObjects[Index].IsValid) { |
| 115 | if (TII->hasDisplacementPairInsn(Opcode: MI.getOpcode())) |
| 116 | SortingObjects[Index].DPairCount++; |
| 117 | else if (!(MI.getDesc().TSFlags & SystemZII::Has20BitOffset)) |
| 118 | SortingObjects[Index].D12Count++; |
| 119 | } |
| 120 | } |
| 121 | } |
| 122 | |
| 123 | // Sort all objects for short/paired displacements, which should be |
| 124 | // sufficient as it seems like all frame objects typically are within the |
| 125 | // long displacement range. Sorting works by computing the "density" as |
| 126 | // Count / ObjectSize. The comparisons of two such fractions are refactored |
| 127 | // by multiplying both sides with A.ObjectSize * B.ObjectSize, in order to |
| 128 | // eliminate the (fp) divisions. A higher density object needs to go after |
| 129 | // in the list in order for it to end up lower on the stack. |
| 130 | auto CmpD12 = [](const SZFrameSortingObj &A, const SZFrameSortingObj &B) { |
| 131 | // Put all invalid and variable sized objects at the end. |
| 132 | if (!A.IsValid || !B.IsValid) |
| 133 | return A.IsValid; |
| 134 | if (!A.ObjectSize || !B.ObjectSize) |
| 135 | return A.ObjectSize > 0; |
| 136 | uint64_t ADensityCmp = A.D12Count * B.ObjectSize; |
| 137 | uint64_t BDensityCmp = B.D12Count * A.ObjectSize; |
| 138 | if (ADensityCmp != BDensityCmp) |
| 139 | return ADensityCmp < BDensityCmp; |
| 140 | return A.DPairCount * B.ObjectSize < B.DPairCount * A.ObjectSize; |
| 141 | }; |
| 142 | llvm::stable_sort(Range&: SortingObjects, C: CmpD12); |
| 143 | |
| 144 | // Now modify the original list to represent the final order that |
| 145 | // we want. |
| 146 | unsigned Idx = 0; |
| 147 | for (auto &Obj : SortingObjects) { |
| 148 | // All invalid items are sorted at the end, so it's safe to stop. |
| 149 | if (!Obj.IsValid) |
| 150 | break; |
| 151 | ObjectsToAllocate[Idx++] = Obj.ObjectIndex; |
| 152 | } |
| 153 | } |
| 154 | |
| 155 | bool SystemZFrameLowering::hasReservedCallFrame( |
| 156 | const MachineFunction &MF) const { |
| 157 | // The ELF ABI requires us to allocate 160 bytes of stack space for the |
| 158 | // callee, with any outgoing stack arguments being placed above that. It |
| 159 | // seems better to make that area a permanent feature of the frame even if |
| 160 | // we're using a frame pointer. Similarly, 64-bit XPLINK requires 96 bytes |
| 161 | // of stack space for the register save area. |
| 162 | return true; |
| 163 | } |
| 164 | |
| 165 | bool SystemZELFFrameLowering::assignCalleeSavedSpillSlots( |
| 166 | MachineFunction &MF, const TargetRegisterInfo *TRI, |
| 167 | std::vector<CalleeSavedInfo> &CSI) const { |
| 168 | SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>(); |
| 169 | MachineFrameInfo &MFFrame = MF.getFrameInfo(); |
| 170 | bool IsVarArg = MF.getFunction().isVarArg(); |
| 171 | if (CSI.empty()) |
| 172 | return true; // Early exit if no callee saved registers are modified! |
| 173 | |
| 174 | unsigned LowGPR = 0; |
| 175 | unsigned HighGPR = SystemZ::R15D; |
| 176 | int StartSPOffset = SystemZMC::ELFCallFrameSize; |
| 177 | for (auto &CS : CSI) { |
| 178 | MCRegister Reg = CS.getReg(); |
| 179 | int Offset = getRegSpillOffset(MF, Reg); |
| 180 | if (Offset) { |
| 181 | if (SystemZ::GR64BitRegClass.contains(Reg) && StartSPOffset > Offset) { |
| 182 | LowGPR = Reg; |
| 183 | StartSPOffset = Offset; |
| 184 | } |
| 185 | Offset -= SystemZMC::ELFCallFrameSize; |
| 186 | int FrameIdx = |
| 187 | MFFrame.CreateFixedSpillStackObject(Size: getPointerSize(), SPOffset: Offset); |
| 188 | CS.setFrameIdx(FrameIdx); |
| 189 | } else |
| 190 | CS.setFrameIdx(INT32_MAX); |
| 191 | } |
| 192 | |
| 193 | // Save the range of call-saved registers, for use by the |
| 194 | // prologue/epilogue inserters. |
| 195 | ZFI->setRestoreGPRRegs(Low: LowGPR, High: HighGPR, Offs: StartSPOffset); |
| 196 | if (IsVarArg) { |
| 197 | // Also save the GPR varargs, if any. R6D is call-saved, so would |
| 198 | // already be included, but we also need to handle the call-clobbered |
| 199 | // argument registers. |
| 200 | Register FirstGPR = ZFI->getVarArgsFirstGPR(); |
| 201 | if (FirstGPR < SystemZ::ELFNumArgGPRs) { |
| 202 | unsigned Reg = SystemZ::ELFArgGPRs[FirstGPR]; |
| 203 | int Offset = getRegSpillOffset(MF, Reg); |
| 204 | if (StartSPOffset > Offset) { |
| 205 | LowGPR = Reg; StartSPOffset = Offset; |
| 206 | } |
| 207 | } |
| 208 | } |
| 209 | ZFI->setSpillGPRRegs(Low: LowGPR, High: HighGPR, Offs: StartSPOffset); |
| 210 | |
| 211 | // Create fixed stack objects for the remaining registers. |
| 212 | int CurrOffset = -SystemZMC::ELFCallFrameSize; |
| 213 | if (usePackedStack(MF)) |
| 214 | CurrOffset += StartSPOffset; |
| 215 | |
| 216 | for (auto &CS : CSI) { |
| 217 | if (CS.getFrameIdx() != INT32_MAX) |
| 218 | continue; |
| 219 | MCRegister Reg = CS.getReg(); |
| 220 | const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); |
| 221 | unsigned Size = TRI->getSpillSize(RC: *RC); |
| 222 | CurrOffset -= Size; |
| 223 | assert(CurrOffset % 8 == 0 && |
| 224 | "8-byte alignment required for for all register save slots" ); |
| 225 | int FrameIdx = MFFrame.CreateFixedSpillStackObject(Size, SPOffset: CurrOffset); |
| 226 | CS.setFrameIdx(FrameIdx); |
| 227 | } |
| 228 | |
| 229 | return true; |
| 230 | } |
| 231 | |
| 232 | void SystemZELFFrameLowering::determineCalleeSaves(MachineFunction &MF, |
| 233 | BitVector &SavedRegs, |
| 234 | RegScavenger *RS) const { |
| 235 | TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); |
| 236 | |
| 237 | MachineFrameInfo &MFFrame = MF.getFrameInfo(); |
| 238 | const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); |
| 239 | bool HasFP = hasFP(MF); |
| 240 | SystemZMachineFunctionInfo *MFI = MF.getInfo<SystemZMachineFunctionInfo>(); |
| 241 | bool IsVarArg = MF.getFunction().isVarArg(); |
| 242 | |
| 243 | // va_start stores incoming FPR varargs in the normal way, but delegates |
| 244 | // the saving of incoming GPR varargs to spillCalleeSavedRegisters(). |
| 245 | // Record these pending uses, which typically include the call-saved |
| 246 | // argument register R6D. |
| 247 | if (IsVarArg) |
| 248 | for (unsigned I = MFI->getVarArgsFirstGPR(); I < SystemZ::ELFNumArgGPRs; ++I) |
| 249 | SavedRegs.set(SystemZ::ELFArgGPRs[I]); |
| 250 | |
| 251 | // If there are any landing pads, entering them will modify r6/r7. |
| 252 | if (!MF.getLandingPads().empty()) { |
| 253 | SavedRegs.set(SystemZ::R6D); |
| 254 | SavedRegs.set(SystemZ::R7D); |
| 255 | } |
| 256 | |
| 257 | // If the function requires a frame pointer, record that the hard |
| 258 | // frame pointer will be clobbered. |
| 259 | if (HasFP) |
| 260 | SavedRegs.set(SystemZ::R11D); |
| 261 | |
| 262 | // If the function calls other functions, record that the return |
| 263 | // address register will be clobbered. |
| 264 | if (MFFrame.hasCalls()) |
| 265 | SavedRegs.set(SystemZ::R14D); |
| 266 | |
| 267 | // If we are saving GPRs other than the stack pointer, we might as well |
| 268 | // save and restore the stack pointer at the same time, via STMG and LMG. |
| 269 | // This allows the deallocation to be done by the LMG, rather than needing |
| 270 | // a separate %r15 addition. |
| 271 | const MCPhysReg *CSRegs = TRI->getCalleeSavedRegs(MF: &MF); |
| 272 | for (unsigned I = 0; CSRegs[I]; ++I) { |
| 273 | unsigned Reg = CSRegs[I]; |
| 274 | if (SystemZ::GR64BitRegClass.contains(Reg) && SavedRegs.test(Idx: Reg)) { |
| 275 | SavedRegs.set(SystemZ::R15D); |
| 276 | break; |
| 277 | } |
| 278 | } |
| 279 | } |
| 280 | |
| 281 | SystemZELFFrameLowering::SystemZELFFrameLowering(unsigned PointerSize) |
| 282 | : SystemZFrameLowering(TargetFrameLowering::StackGrowsDown, Align(8), 0, |
| 283 | Align(8), /* StackRealignable */ false, PointerSize), |
| 284 | RegSpillOffsets(0) { |
| 285 | |
| 286 | // Due to the SystemZ ABI, the DWARF CFA (Canonical Frame Address) is not |
| 287 | // equal to the incoming stack pointer, but to incoming stack pointer plus |
| 288 | // 160. Instead of using a Local Area Offset, the Register save area will |
| 289 | // be occupied by fixed frame objects, and all offsets are actually |
| 290 | // relative to CFA. |
| 291 | |
| 292 | // Create a mapping from register number to save slot offset. |
| 293 | // These offsets are relative to the start of the register save area. |
| 294 | RegSpillOffsets.grow(n: SystemZ::NUM_TARGET_REGS); |
| 295 | for (const auto &Entry : ELFSpillOffsetTable) |
| 296 | RegSpillOffsets[Entry.Reg] = Entry.Offset; |
| 297 | } |
| 298 | |
| 299 | // Add GPR64 to the save instruction being built by MIB, which is in basic |
| 300 | // block MBB. IsImplicit says whether this is an explicit operand to the |
| 301 | // instruction, or an implicit one that comes between the explicit start |
| 302 | // and end registers. |
| 303 | static void addSavedGPR(MachineBasicBlock &MBB, MachineInstrBuilder &MIB, |
| 304 | unsigned GPR64, bool IsImplicit) { |
| 305 | const TargetRegisterInfo *RI = |
| 306 | MBB.getParent()->getSubtarget().getRegisterInfo(); |
| 307 | Register GPR32 = RI->getSubReg(Reg: GPR64, Idx: SystemZ::subreg_l32); |
| 308 | bool IsLive = MBB.isLiveIn(Reg: GPR64) || MBB.isLiveIn(Reg: GPR32); |
| 309 | if (!IsLive || !IsImplicit) { |
| 310 | MIB.addReg(RegNo: GPR64, flags: getImplRegState(B: IsImplicit) | getKillRegState(B: !IsLive)); |
| 311 | if (!IsLive) |
| 312 | MBB.addLiveIn(PhysReg: GPR64); |
| 313 | } |
| 314 | } |
| 315 | |
| 316 | bool SystemZELFFrameLowering::spillCalleeSavedRegisters( |
| 317 | MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, |
| 318 | ArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const { |
| 319 | if (CSI.empty()) |
| 320 | return false; |
| 321 | |
| 322 | MachineFunction &MF = *MBB.getParent(); |
| 323 | const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); |
| 324 | SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>(); |
| 325 | bool IsVarArg = MF.getFunction().isVarArg(); |
| 326 | DebugLoc DL; |
| 327 | |
| 328 | // Save GPRs |
| 329 | SystemZ::GPRRegs SpillGPRs = ZFI->getSpillGPRRegs(); |
| 330 | if (SpillGPRs.LowGPR) { |
| 331 | assert(SpillGPRs.LowGPR != SpillGPRs.HighGPR && |
| 332 | "Should be saving %r15 and something else" ); |
| 333 | |
| 334 | // Build an STMG instruction. |
| 335 | MachineInstrBuilder MIB = BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: SystemZ::STMG)); |
| 336 | |
| 337 | // Add the explicit register operands. |
| 338 | addSavedGPR(MBB, MIB, GPR64: SpillGPRs.LowGPR, IsImplicit: false); |
| 339 | addSavedGPR(MBB, MIB, GPR64: SpillGPRs.HighGPR, IsImplicit: false); |
| 340 | |
| 341 | // Add the address. |
| 342 | MIB.addReg(RegNo: SystemZ::R15D).addImm(Val: SpillGPRs.GPROffset); |
| 343 | |
| 344 | // Make sure all call-saved GPRs are included as operands and are |
| 345 | // marked as live on entry. |
| 346 | for (const CalleeSavedInfo &I : CSI) { |
| 347 | MCRegister Reg = I.getReg(); |
| 348 | if (SystemZ::GR64BitRegClass.contains(Reg)) |
| 349 | addSavedGPR(MBB, MIB, GPR64: Reg, IsImplicit: true); |
| 350 | } |
| 351 | |
| 352 | // ...likewise GPR varargs. |
| 353 | if (IsVarArg) |
| 354 | for (unsigned I = ZFI->getVarArgsFirstGPR(); I < SystemZ::ELFNumArgGPRs; ++I) |
| 355 | addSavedGPR(MBB, MIB, GPR64: SystemZ::ELFArgGPRs[I], IsImplicit: true); |
| 356 | } |
| 357 | |
| 358 | // Save FPRs/VRs in the normal TargetInstrInfo way. |
| 359 | for (const CalleeSavedInfo &I : CSI) { |
| 360 | MCRegister Reg = I.getReg(); |
| 361 | if (SystemZ::FP64BitRegClass.contains(Reg)) { |
| 362 | MBB.addLiveIn(PhysReg: Reg); |
| 363 | TII->storeRegToStackSlot(MBB, MI: MBBI, SrcReg: Reg, isKill: true, FrameIndex: I.getFrameIdx(), |
| 364 | RC: &SystemZ::FP64BitRegClass, TRI, VReg: Register()); |
| 365 | } |
| 366 | if (SystemZ::VR128BitRegClass.contains(Reg)) { |
| 367 | MBB.addLiveIn(PhysReg: Reg); |
| 368 | TII->storeRegToStackSlot(MBB, MI: MBBI, SrcReg: Reg, isKill: true, FrameIndex: I.getFrameIdx(), |
| 369 | RC: &SystemZ::VR128BitRegClass, TRI, VReg: Register()); |
| 370 | } |
| 371 | } |
| 372 | |
| 373 | return true; |
| 374 | } |
| 375 | |
| 376 | bool SystemZELFFrameLowering::restoreCalleeSavedRegisters( |
| 377 | MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, |
| 378 | MutableArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const { |
| 379 | if (CSI.empty()) |
| 380 | return false; |
| 381 | |
| 382 | MachineFunction &MF = *MBB.getParent(); |
| 383 | const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); |
| 384 | SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>(); |
| 385 | bool HasFP = hasFP(MF); |
| 386 | DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); |
| 387 | |
| 388 | // Restore FPRs/VRs in the normal TargetInstrInfo way. |
| 389 | for (const CalleeSavedInfo &I : CSI) { |
| 390 | MCRegister Reg = I.getReg(); |
| 391 | if (SystemZ::FP64BitRegClass.contains(Reg)) |
| 392 | TII->loadRegFromStackSlot(MBB, MI: MBBI, DestReg: Reg, FrameIndex: I.getFrameIdx(), |
| 393 | RC: &SystemZ::FP64BitRegClass, TRI, VReg: Register()); |
| 394 | if (SystemZ::VR128BitRegClass.contains(Reg)) |
| 395 | TII->loadRegFromStackSlot(MBB, MI: MBBI, DestReg: Reg, FrameIndex: I.getFrameIdx(), |
| 396 | RC: &SystemZ::VR128BitRegClass, TRI, VReg: Register()); |
| 397 | } |
| 398 | |
| 399 | // Restore call-saved GPRs (but not call-clobbered varargs, which at |
| 400 | // this point might hold return values). |
| 401 | SystemZ::GPRRegs RestoreGPRs = ZFI->getRestoreGPRRegs(); |
| 402 | if (RestoreGPRs.LowGPR) { |
| 403 | // If we saved any of %r2-%r5 as varargs, we should also be saving |
| 404 | // and restoring %r6. If we're saving %r6 or above, we should be |
| 405 | // restoring it too. |
| 406 | assert(RestoreGPRs.LowGPR != RestoreGPRs.HighGPR && |
| 407 | "Should be loading %r15 and something else" ); |
| 408 | |
| 409 | // Build an LMG instruction. |
| 410 | MachineInstrBuilder MIB = BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: SystemZ::LMG)); |
| 411 | |
| 412 | // Add the explicit register operands. |
| 413 | MIB.addReg(RegNo: RestoreGPRs.LowGPR, flags: RegState::Define); |
| 414 | MIB.addReg(RegNo: RestoreGPRs.HighGPR, flags: RegState::Define); |
| 415 | |
| 416 | // Add the address. |
| 417 | MIB.addReg(RegNo: HasFP ? SystemZ::R11D : SystemZ::R15D); |
| 418 | MIB.addImm(Val: RestoreGPRs.GPROffset); |
| 419 | |
| 420 | // Do a second scan adding regs as being defined by instruction |
| 421 | for (const CalleeSavedInfo &I : CSI) { |
| 422 | MCRegister Reg = I.getReg(); |
| 423 | if (Reg != RestoreGPRs.LowGPR && Reg != RestoreGPRs.HighGPR && |
| 424 | SystemZ::GR64BitRegClass.contains(Reg)) |
| 425 | MIB.addReg(RegNo: Reg, flags: RegState::ImplicitDefine); |
| 426 | } |
| 427 | } |
| 428 | |
| 429 | return true; |
| 430 | } |
| 431 | |
| 432 | void SystemZELFFrameLowering::processFunctionBeforeFrameFinalized( |
| 433 | MachineFunction &MF, RegScavenger *RS) const { |
| 434 | MachineFrameInfo &MFFrame = MF.getFrameInfo(); |
| 435 | SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>(); |
| 436 | MachineRegisterInfo *MRI = &MF.getRegInfo(); |
| 437 | bool BackChain = MF.getSubtarget<SystemZSubtarget>().hasBackChain(); |
| 438 | |
| 439 | if (!usePackedStack(MF) || BackChain) |
| 440 | // Create the incoming register save area. |
| 441 | getOrCreateFramePointerSaveIndex(MF); |
| 442 | |
| 443 | // Get the size of our stack frame to be allocated ... |
| 444 | uint64_t StackSize = (MFFrame.estimateStackSize(MF) + |
| 445 | SystemZMC::ELFCallFrameSize); |
| 446 | // ... and the maximum offset we may need to reach into the |
| 447 | // caller's frame to access the save area or stack arguments. |
| 448 | int64_t MaxArgOffset = 0; |
| 449 | for (int I = MFFrame.getObjectIndexBegin(); I != 0; ++I) |
| 450 | if (MFFrame.getObjectOffset(ObjectIdx: I) >= 0) { |
| 451 | int64_t ArgOffset = MFFrame.getObjectOffset(ObjectIdx: I) + |
| 452 | MFFrame.getObjectSize(ObjectIdx: I); |
| 453 | MaxArgOffset = std::max(a: MaxArgOffset, b: ArgOffset); |
| 454 | } |
| 455 | |
| 456 | uint64_t MaxReach = StackSize + MaxArgOffset; |
| 457 | if (!isUInt<12>(x: MaxReach)) { |
| 458 | // We may need register scavenging slots if some parts of the frame |
| 459 | // are outside the reach of an unsigned 12-bit displacement. |
| 460 | // Create 2 for the case where both addresses in an MVC are |
| 461 | // out of range. |
| 462 | RS->addScavengingFrameIndex( |
| 463 | FI: MFFrame.CreateSpillStackObject(Size: getPointerSize(), Alignment: Align(8))); |
| 464 | RS->addScavengingFrameIndex( |
| 465 | FI: MFFrame.CreateSpillStackObject(Size: getPointerSize(), Alignment: Align(8))); |
| 466 | } |
| 467 | |
| 468 | // If R6 is used as an argument register it is still callee saved. If it in |
| 469 | // this case is not clobbered (and restored) it should never be marked as |
| 470 | // killed. |
| 471 | if (MF.front().isLiveIn(Reg: SystemZ::R6D) && |
| 472 | ZFI->getRestoreGPRRegs().LowGPR != SystemZ::R6D) |
| 473 | for (auto &MO : MRI->use_nodbg_operands(Reg: SystemZ::R6D)) |
| 474 | MO.setIsKill(false); |
| 475 | } |
| 476 | |
| 477 | // Emit instructions before MBBI (in MBB) to add NumBytes to Reg. |
| 478 | static void emitIncrement(MachineBasicBlock &MBB, |
| 479 | MachineBasicBlock::iterator &MBBI, const DebugLoc &DL, |
| 480 | Register Reg, int64_t NumBytes, |
| 481 | const TargetInstrInfo *TII) { |
| 482 | while (NumBytes) { |
| 483 | unsigned Opcode; |
| 484 | int64_t ThisVal = NumBytes; |
| 485 | if (isInt<16>(x: NumBytes)) |
| 486 | Opcode = SystemZ::AGHI; |
| 487 | else { |
| 488 | Opcode = SystemZ::AGFI; |
| 489 | // Make sure we maintain 8-byte stack alignment. |
| 490 | int64_t MinVal = -uint64_t(1) << 31; |
| 491 | int64_t MaxVal = (int64_t(1) << 31) - 8; |
| 492 | if (ThisVal < MinVal) |
| 493 | ThisVal = MinVal; |
| 494 | else if (ThisVal > MaxVal) |
| 495 | ThisVal = MaxVal; |
| 496 | } |
| 497 | MachineInstr *MI = BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode), DestReg: Reg) |
| 498 | .addReg(RegNo: Reg).addImm(Val: ThisVal); |
| 499 | // The CC implicit def is dead. |
| 500 | MI->getOperand(i: 3).setIsDead(); |
| 501 | NumBytes -= ThisVal; |
| 502 | } |
| 503 | } |
| 504 | |
| 505 | // Add CFI for the new CFA offset. |
| 506 | static void buildCFAOffs(MachineBasicBlock &MBB, |
| 507 | MachineBasicBlock::iterator MBBI, |
| 508 | const DebugLoc &DL, int Offset, |
| 509 | const SystemZInstrInfo *ZII) { |
| 510 | unsigned CFIIndex = MBB.getParent()->addFrameInst( |
| 511 | Inst: MCCFIInstruction::cfiDefCfaOffset(L: nullptr, Offset: -Offset)); |
| 512 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: ZII->get(Opcode: TargetOpcode::CFI_INSTRUCTION)) |
| 513 | .addCFIIndex(CFIIndex); |
| 514 | } |
| 515 | |
| 516 | // Add CFI for the new frame location. |
| 517 | static void buildDefCFAReg(MachineBasicBlock &MBB, |
| 518 | MachineBasicBlock::iterator MBBI, |
| 519 | const DebugLoc &DL, unsigned Reg, |
| 520 | const SystemZInstrInfo *ZII) { |
| 521 | MachineFunction &MF = *MBB.getParent(); |
| 522 | const MCRegisterInfo *MRI = MF.getContext().getRegisterInfo(); |
| 523 | unsigned RegNum = MRI->getDwarfRegNum(RegNum: Reg, isEH: true); |
| 524 | unsigned CFIIndex = MF.addFrameInst( |
| 525 | Inst: MCCFIInstruction::createDefCfaRegister(L: nullptr, Register: RegNum)); |
| 526 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: ZII->get(Opcode: TargetOpcode::CFI_INSTRUCTION)) |
| 527 | .addCFIIndex(CFIIndex); |
| 528 | } |
| 529 | |
| 530 | void SystemZELFFrameLowering::emitPrologue(MachineFunction &MF, |
| 531 | MachineBasicBlock &MBB) const { |
| 532 | assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported" ); |
| 533 | const SystemZSubtarget &STI = MF.getSubtarget<SystemZSubtarget>(); |
| 534 | const SystemZTargetLowering &TLI = *STI.getTargetLowering(); |
| 535 | MachineFrameInfo &MFFrame = MF.getFrameInfo(); |
| 536 | auto *ZII = static_cast<const SystemZInstrInfo *>(STI.getInstrInfo()); |
| 537 | SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>(); |
| 538 | MachineBasicBlock::iterator MBBI = MBB.begin(); |
| 539 | const MCRegisterInfo *MRI = MF.getContext().getRegisterInfo(); |
| 540 | const std::vector<CalleeSavedInfo> &CSI = MFFrame.getCalleeSavedInfo(); |
| 541 | bool HasFP = hasFP(MF); |
| 542 | |
| 543 | // In GHC calling convention C stack space, including the ABI-defined |
| 544 | // 160-byte base area, is (de)allocated by GHC itself. This stack space may |
| 545 | // be used by LLVM as spill slots for the tail recursive GHC functions. Thus |
| 546 | // do not allocate stack space here, too. |
| 547 | if (MF.getFunction().getCallingConv() == CallingConv::GHC) { |
| 548 | if (MFFrame.getStackSize() > 2048 * sizeof(long)) { |
| 549 | report_fatal_error( |
| 550 | reason: "Pre allocated stack space for GHC function is too small" ); |
| 551 | } |
| 552 | if (HasFP) { |
| 553 | report_fatal_error( |
| 554 | reason: "In GHC calling convention a frame pointer is not supported" ); |
| 555 | } |
| 556 | MFFrame.setStackSize(MFFrame.getStackSize() + SystemZMC::ELFCallFrameSize); |
| 557 | return; |
| 558 | } |
| 559 | |
| 560 | // Debug location must be unknown since the first debug location is used |
| 561 | // to determine the end of the prologue. |
| 562 | DebugLoc DL; |
| 563 | // Add mcount instrumentation if necessary. |
| 564 | if (MF.getFunction() |
| 565 | .getFnAttribute(Kind: "systemz-instrument-function-entry" ) |
| 566 | .getValueAsString() == "mcount" ) { |
| 567 | |
| 568 | // Store return address 8 bytes above stack pointer. |
| 569 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: ZII->get(Opcode: SystemZ::STG)) |
| 570 | .addReg(RegNo: SystemZ::R14D) |
| 571 | .addReg(RegNo: SystemZ::R15D) |
| 572 | .addImm(Val: 8) |
| 573 | .addReg(RegNo: 0); |
| 574 | |
| 575 | // Call mcount (Regmask from CC AnyReg since mcount preserves all normal |
| 576 | // argument registers). |
| 577 | FunctionCallee FC = MF.getFunction().getParent()->getOrInsertFunction( |
| 578 | Name: "mcount" , RetTy: Type::getVoidTy(C&: MF.getFunction().getContext())); |
| 579 | const uint32_t *Mask = MF.getSubtarget<SystemZSubtarget>() |
| 580 | .getSpecialRegisters() |
| 581 | ->getCallPreservedMask(MF, CC: CallingConv::AnyReg); |
| 582 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: ZII->get(Opcode: SystemZ::CallBRASL)) |
| 583 | .addGlobalAddress(GV: dyn_cast<Function>(Val: FC.getCallee())) |
| 584 | .addRegMask(Mask); |
| 585 | |
| 586 | // Reload return address from 8 bytes above stack pointer. |
| 587 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: ZII->get(Opcode: SystemZ::LG)) |
| 588 | .addReg(RegNo: SystemZ::R14D, flags: RegState::Define) |
| 589 | .addReg(RegNo: SystemZ::R15D) |
| 590 | .addImm(Val: 8) |
| 591 | .addReg(RegNo: 0); |
| 592 | } |
| 593 | |
| 594 | // The current offset of the stack pointer from the CFA. |
| 595 | int64_t SPOffsetFromCFA = -SystemZMC::ELFCFAOffsetFromInitialSP; |
| 596 | |
| 597 | if (ZFI->getSpillGPRRegs().LowGPR) { |
| 598 | // Skip over the GPR saves. |
| 599 | if (MBBI != MBB.end() && MBBI->getOpcode() == SystemZ::STMG) |
| 600 | ++MBBI; |
| 601 | else |
| 602 | llvm_unreachable("Couldn't skip over GPR saves" ); |
| 603 | |
| 604 | // Add CFI for the GPR saves. |
| 605 | for (auto &Save : CSI) { |
| 606 | MCRegister Reg = Save.getReg(); |
| 607 | if (SystemZ::GR64BitRegClass.contains(Reg)) { |
| 608 | int FI = Save.getFrameIdx(); |
| 609 | int64_t Offset = MFFrame.getObjectOffset(ObjectIdx: FI); |
| 610 | unsigned CFIIndex = MF.addFrameInst(Inst: MCCFIInstruction::createOffset( |
| 611 | L: nullptr, Register: MRI->getDwarfRegNum(RegNum: Reg, isEH: true), Offset)); |
| 612 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: ZII->get(Opcode: TargetOpcode::CFI_INSTRUCTION)) |
| 613 | .addCFIIndex(CFIIndex); |
| 614 | } |
| 615 | } |
| 616 | } |
| 617 | |
| 618 | uint64_t StackSize = MFFrame.getStackSize(); |
| 619 | // We need to allocate the ABI-defined 160-byte base area whenever |
| 620 | // we allocate stack space for our own use and whenever we call another |
| 621 | // function. |
| 622 | bool HasStackObject = false; |
| 623 | for (unsigned i = 0, e = MFFrame.getObjectIndexEnd(); i != e; ++i) |
| 624 | if (!MFFrame.isDeadObjectIndex(ObjectIdx: i)) { |
| 625 | HasStackObject = true; |
| 626 | break; |
| 627 | } |
| 628 | if (HasStackObject || MFFrame.hasCalls()) |
| 629 | StackSize += SystemZMC::ELFCallFrameSize; |
| 630 | // Don't allocate the incoming reg save area. |
| 631 | StackSize = StackSize > SystemZMC::ELFCallFrameSize |
| 632 | ? StackSize - SystemZMC::ELFCallFrameSize |
| 633 | : 0; |
| 634 | MFFrame.setStackSize(StackSize); |
| 635 | |
| 636 | if (StackSize) { |
| 637 | // Allocate StackSize bytes. |
| 638 | int64_t Delta = -int64_t(StackSize); |
| 639 | const unsigned ProbeSize = TLI.getStackProbeSize(MF); |
| 640 | bool FreeProbe = (ZFI->getSpillGPRRegs().GPROffset && |
| 641 | (ZFI->getSpillGPRRegs().GPROffset + StackSize) < ProbeSize); |
| 642 | if (!FreeProbe && |
| 643 | MF.getSubtarget().getTargetLowering()->hasInlineStackProbe(MF)) { |
| 644 | // Stack probing may involve looping, but splitting the prologue block |
| 645 | // is not possible at this point since it would invalidate the |
| 646 | // SaveBlocks / RestoreBlocks sets of PEI in the single block function |
| 647 | // case. Build a pseudo to be handled later by inlineStackProbe(). |
| 648 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: ZII->get(Opcode: SystemZ::PROBED_STACKALLOC)) |
| 649 | .addImm(Val: StackSize); |
| 650 | } |
| 651 | else { |
| 652 | bool StoreBackchain = MF.getSubtarget<SystemZSubtarget>().hasBackChain(); |
| 653 | // If we need backchain, save current stack pointer. R1 is free at |
| 654 | // this point. |
| 655 | if (StoreBackchain) |
| 656 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: ZII->get(Opcode: SystemZ::LGR)) |
| 657 | .addReg(RegNo: SystemZ::R1D, flags: RegState::Define).addReg(RegNo: SystemZ::R15D); |
| 658 | emitIncrement(MBB, MBBI, DL, Reg: SystemZ::R15D, NumBytes: Delta, TII: ZII); |
| 659 | buildCFAOffs(MBB, MBBI, DL, Offset: SPOffsetFromCFA + Delta, ZII); |
| 660 | if (StoreBackchain) |
| 661 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: ZII->get(Opcode: SystemZ::STG)) |
| 662 | .addReg(RegNo: SystemZ::R1D, flags: RegState::Kill).addReg(RegNo: SystemZ::R15D) |
| 663 | .addImm(Val: getBackchainOffset(MF)).addReg(RegNo: 0); |
| 664 | } |
| 665 | SPOffsetFromCFA += Delta; |
| 666 | } |
| 667 | |
| 668 | if (HasFP) { |
| 669 | // Copy the base of the frame to R11. |
| 670 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: ZII->get(Opcode: SystemZ::LGR), DestReg: SystemZ::R11D) |
| 671 | .addReg(RegNo: SystemZ::R15D); |
| 672 | |
| 673 | // Add CFI for the new frame location. |
| 674 | buildDefCFAReg(MBB, MBBI, DL, Reg: SystemZ::R11D, ZII); |
| 675 | |
| 676 | // Mark the FramePtr as live at the beginning of every block except |
| 677 | // the entry block. (We'll have marked R11 as live on entry when |
| 678 | // saving the GPRs.) |
| 679 | for (MachineBasicBlock &MBBJ : llvm::drop_begin(RangeOrContainer&: MF)) |
| 680 | MBBJ.addLiveIn(PhysReg: SystemZ::R11D); |
| 681 | } |
| 682 | |
| 683 | // Skip over the FPR/VR saves. |
| 684 | SmallVector<unsigned, 8> CFIIndexes; |
| 685 | for (auto &Save : CSI) { |
| 686 | MCRegister Reg = Save.getReg(); |
| 687 | if (SystemZ::FP64BitRegClass.contains(Reg)) { |
| 688 | if (MBBI != MBB.end() && |
| 689 | (MBBI->getOpcode() == SystemZ::STD || |
| 690 | MBBI->getOpcode() == SystemZ::STDY)) |
| 691 | ++MBBI; |
| 692 | else |
| 693 | llvm_unreachable("Couldn't skip over FPR save" ); |
| 694 | } else if (SystemZ::VR128BitRegClass.contains(Reg)) { |
| 695 | if (MBBI != MBB.end() && |
| 696 | MBBI->getOpcode() == SystemZ::VST) |
| 697 | ++MBBI; |
| 698 | else |
| 699 | llvm_unreachable("Couldn't skip over VR save" ); |
| 700 | } else |
| 701 | continue; |
| 702 | |
| 703 | // Add CFI for the this save. |
| 704 | unsigned DwarfReg = MRI->getDwarfRegNum(RegNum: Reg, isEH: true); |
| 705 | Register IgnoredFrameReg; |
| 706 | int64_t Offset = |
| 707 | getFrameIndexReference(MF, FI: Save.getFrameIdx(), FrameReg&: IgnoredFrameReg) |
| 708 | .getFixed(); |
| 709 | |
| 710 | unsigned CFIIndex = MF.addFrameInst(Inst: MCCFIInstruction::createOffset( |
| 711 | L: nullptr, Register: DwarfReg, Offset: SPOffsetFromCFA + Offset)); |
| 712 | CFIIndexes.push_back(Elt: CFIIndex); |
| 713 | } |
| 714 | // Complete the CFI for the FPR/VR saves, modelling them as taking effect |
| 715 | // after the last save. |
| 716 | for (auto CFIIndex : CFIIndexes) { |
| 717 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: ZII->get(Opcode: TargetOpcode::CFI_INSTRUCTION)) |
| 718 | .addCFIIndex(CFIIndex); |
| 719 | } |
| 720 | } |
| 721 | |
| 722 | void SystemZELFFrameLowering::emitEpilogue(MachineFunction &MF, |
| 723 | MachineBasicBlock &MBB) const { |
| 724 | MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); |
| 725 | auto *ZII = |
| 726 | static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo()); |
| 727 | SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>(); |
| 728 | MachineFrameInfo &MFFrame = MF.getFrameInfo(); |
| 729 | |
| 730 | // See SystemZELFFrameLowering::emitPrologue |
| 731 | if (MF.getFunction().getCallingConv() == CallingConv::GHC) |
| 732 | return; |
| 733 | |
| 734 | // Skip the return instruction. |
| 735 | assert(MBBI->isReturn() && "Can only insert epilogue into returning blocks" ); |
| 736 | |
| 737 | uint64_t StackSize = MFFrame.getStackSize(); |
| 738 | if (ZFI->getRestoreGPRRegs().LowGPR) { |
| 739 | --MBBI; |
| 740 | unsigned Opcode = MBBI->getOpcode(); |
| 741 | if (Opcode != SystemZ::LMG) |
| 742 | llvm_unreachable("Expected to see callee-save register restore code" ); |
| 743 | |
| 744 | unsigned AddrOpNo = 2; |
| 745 | DebugLoc DL = MBBI->getDebugLoc(); |
| 746 | uint64_t Offset = StackSize + MBBI->getOperand(i: AddrOpNo + 1).getImm(); |
| 747 | unsigned NewOpcode = ZII->getOpcodeForOffset(Opcode, Offset); |
| 748 | |
| 749 | // If the offset is too large, use the largest stack-aligned offset |
| 750 | // and add the rest to the base register (the stack or frame pointer). |
| 751 | if (!NewOpcode) { |
| 752 | uint64_t NumBytes = Offset - 0x7fff8; |
| 753 | emitIncrement(MBB, MBBI, DL, Reg: MBBI->getOperand(i: AddrOpNo).getReg(), |
| 754 | NumBytes, TII: ZII); |
| 755 | Offset -= NumBytes; |
| 756 | NewOpcode = ZII->getOpcodeForOffset(Opcode, Offset); |
| 757 | assert(NewOpcode && "No restore instruction available" ); |
| 758 | } |
| 759 | |
| 760 | MBBI->setDesc(ZII->get(Opcode: NewOpcode)); |
| 761 | MBBI->getOperand(i: AddrOpNo + 1).ChangeToImmediate(ImmVal: Offset); |
| 762 | } else if (StackSize) { |
| 763 | DebugLoc DL = MBBI->getDebugLoc(); |
| 764 | emitIncrement(MBB, MBBI, DL, Reg: SystemZ::R15D, NumBytes: StackSize, TII: ZII); |
| 765 | } |
| 766 | } |
| 767 | |
| 768 | void SystemZELFFrameLowering::inlineStackProbe( |
| 769 | MachineFunction &MF, MachineBasicBlock &PrologMBB) const { |
| 770 | auto *ZII = |
| 771 | static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo()); |
| 772 | const SystemZSubtarget &STI = MF.getSubtarget<SystemZSubtarget>(); |
| 773 | const SystemZTargetLowering &TLI = *STI.getTargetLowering(); |
| 774 | |
| 775 | MachineInstr *StackAllocMI = nullptr; |
| 776 | for (MachineInstr &MI : PrologMBB) |
| 777 | if (MI.getOpcode() == SystemZ::PROBED_STACKALLOC) { |
| 778 | StackAllocMI = &MI; |
| 779 | break; |
| 780 | } |
| 781 | if (StackAllocMI == nullptr) |
| 782 | return; |
| 783 | uint64_t StackSize = StackAllocMI->getOperand(i: 0).getImm(); |
| 784 | const unsigned ProbeSize = TLI.getStackProbeSize(MF); |
| 785 | uint64_t NumFullBlocks = StackSize / ProbeSize; |
| 786 | uint64_t Residual = StackSize % ProbeSize; |
| 787 | int64_t SPOffsetFromCFA = -SystemZMC::ELFCFAOffsetFromInitialSP; |
| 788 | MachineBasicBlock *MBB = &PrologMBB; |
| 789 | MachineBasicBlock::iterator MBBI = StackAllocMI; |
| 790 | const DebugLoc DL = StackAllocMI->getDebugLoc(); |
| 791 | |
| 792 | // Allocate a block of Size bytes on the stack and probe it. |
| 793 | auto allocateAndProbe = [&](MachineBasicBlock &InsMBB, |
| 794 | MachineBasicBlock::iterator InsPt, unsigned Size, |
| 795 | bool EmitCFI) -> void { |
| 796 | emitIncrement(MBB&: InsMBB, MBBI&: InsPt, DL, Reg: SystemZ::R15D, NumBytes: -int64_t(Size), TII: ZII); |
| 797 | if (EmitCFI) { |
| 798 | SPOffsetFromCFA -= Size; |
| 799 | buildCFAOffs(MBB&: InsMBB, MBBI: InsPt, DL, Offset: SPOffsetFromCFA, ZII); |
| 800 | } |
| 801 | // Probe by means of a volatile compare. |
| 802 | MachineMemOperand *MMO = MF.getMachineMemOperand(PtrInfo: MachinePointerInfo(), |
| 803 | F: MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad, Size: 8, BaseAlignment: Align(1)); |
| 804 | BuildMI(BB&: InsMBB, I: InsPt, MIMD: DL, MCID: ZII->get(Opcode: SystemZ::CG)) |
| 805 | .addReg(RegNo: SystemZ::R0D, flags: RegState::Undef) |
| 806 | .addReg(RegNo: SystemZ::R15D).addImm(Val: Size - 8).addReg(RegNo: 0) |
| 807 | .addMemOperand(MMO); |
| 808 | }; |
| 809 | |
| 810 | bool StoreBackchain = MF.getSubtarget<SystemZSubtarget>().hasBackChain(); |
| 811 | if (StoreBackchain) |
| 812 | BuildMI(BB&: *MBB, I: MBBI, MIMD: DL, MCID: ZII->get(Opcode: SystemZ::LGR)) |
| 813 | .addReg(RegNo: SystemZ::R1D, flags: RegState::Define).addReg(RegNo: SystemZ::R15D); |
| 814 | |
| 815 | MachineBasicBlock *DoneMBB = nullptr; |
| 816 | MachineBasicBlock *LoopMBB = nullptr; |
| 817 | if (NumFullBlocks < 3) { |
| 818 | // Emit unrolled probe statements. |
| 819 | for (unsigned int i = 0; i < NumFullBlocks; i++) |
| 820 | allocateAndProbe(*MBB, MBBI, ProbeSize, true/*EmitCFI*/); |
| 821 | } else { |
| 822 | // Emit a loop probing the pages. |
| 823 | uint64_t LoopAlloc = ProbeSize * NumFullBlocks; |
| 824 | SPOffsetFromCFA -= LoopAlloc; |
| 825 | |
| 826 | // Use R0D to hold the exit value. |
| 827 | BuildMI(BB&: *MBB, I: MBBI, MIMD: DL, MCID: ZII->get(Opcode: SystemZ::LGR), DestReg: SystemZ::R0D) |
| 828 | .addReg(RegNo: SystemZ::R15D); |
| 829 | buildDefCFAReg(MBB&: *MBB, MBBI, DL, Reg: SystemZ::R0D, ZII); |
| 830 | emitIncrement(MBB&: *MBB, MBBI, DL, Reg: SystemZ::R0D, NumBytes: -int64_t(LoopAlloc), TII: ZII); |
| 831 | buildCFAOffs(MBB&: *MBB, MBBI, DL, Offset: -int64_t(SystemZMC::ELFCallFrameSize + LoopAlloc), |
| 832 | ZII); |
| 833 | |
| 834 | DoneMBB = SystemZ::splitBlockBefore(MI: MBBI, MBB); |
| 835 | LoopMBB = SystemZ::emitBlockAfter(MBB); |
| 836 | MBB->addSuccessor(Succ: LoopMBB); |
| 837 | LoopMBB->addSuccessor(Succ: LoopMBB); |
| 838 | LoopMBB->addSuccessor(Succ: DoneMBB); |
| 839 | |
| 840 | MBB = LoopMBB; |
| 841 | allocateAndProbe(*MBB, MBB->end(), ProbeSize, false/*EmitCFI*/); |
| 842 | BuildMI(BB&: *MBB, I: MBB->end(), MIMD: DL, MCID: ZII->get(Opcode: SystemZ::CLGR)) |
| 843 | .addReg(RegNo: SystemZ::R15D).addReg(RegNo: SystemZ::R0D); |
| 844 | BuildMI(BB&: *MBB, I: MBB->end(), MIMD: DL, MCID: ZII->get(Opcode: SystemZ::BRC)) |
| 845 | .addImm(Val: SystemZ::CCMASK_ICMP).addImm(Val: SystemZ::CCMASK_CMP_GT).addMBB(MBB); |
| 846 | |
| 847 | MBB = DoneMBB; |
| 848 | MBBI = DoneMBB->begin(); |
| 849 | buildDefCFAReg(MBB&: *MBB, MBBI, DL, Reg: SystemZ::R15D, ZII); |
| 850 | } |
| 851 | |
| 852 | if (Residual) |
| 853 | allocateAndProbe(*MBB, MBBI, Residual, true/*EmitCFI*/); |
| 854 | |
| 855 | if (StoreBackchain) |
| 856 | BuildMI(BB&: *MBB, I: MBBI, MIMD: DL, MCID: ZII->get(Opcode: SystemZ::STG)) |
| 857 | .addReg(RegNo: SystemZ::R1D, flags: RegState::Kill).addReg(RegNo: SystemZ::R15D) |
| 858 | .addImm(Val: getBackchainOffset(MF)).addReg(RegNo: 0); |
| 859 | |
| 860 | StackAllocMI->eraseFromParent(); |
| 861 | if (DoneMBB != nullptr) { |
| 862 | // Compute the live-in lists for the new blocks. |
| 863 | fullyRecomputeLiveIns(MBBs: {DoneMBB, LoopMBB}); |
| 864 | } |
| 865 | } |
| 866 | |
| 867 | bool SystemZELFFrameLowering::hasFPImpl(const MachineFunction &MF) const { |
| 868 | return (MF.getTarget().Options.DisableFramePointerElim(MF) || |
| 869 | MF.getFrameInfo().hasVarSizedObjects()); |
| 870 | } |
| 871 | |
| 872 | StackOffset SystemZELFFrameLowering::getFrameIndexReference( |
| 873 | const MachineFunction &MF, int FI, Register &FrameReg) const { |
| 874 | // Our incoming SP is actually SystemZMC::ELFCallFrameSize below the CFA, so |
| 875 | // add that difference here. |
| 876 | StackOffset Offset = |
| 877 | TargetFrameLowering::getFrameIndexReference(MF, FI, FrameReg); |
| 878 | return Offset + StackOffset::getFixed(Fixed: SystemZMC::ELFCallFrameSize); |
| 879 | } |
| 880 | |
| 881 | unsigned SystemZELFFrameLowering::getRegSpillOffset(MachineFunction &MF, |
| 882 | Register Reg) const { |
| 883 | bool IsVarArg = MF.getFunction().isVarArg(); |
| 884 | const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>(); |
| 885 | bool BackChain = Subtarget.hasBackChain(); |
| 886 | bool SoftFloat = Subtarget.hasSoftFloat(); |
| 887 | unsigned Offset = RegSpillOffsets[Reg]; |
| 888 | if (usePackedStack(MF) && !(IsVarArg && !SoftFloat)) { |
| 889 | if (SystemZ::GR64BitRegClass.contains(Reg)) |
| 890 | // Put all GPRs at the top of the Register save area with packed |
| 891 | // stack. Make room for the backchain if needed. |
| 892 | Offset += BackChain ? 24 : 32; |
| 893 | else |
| 894 | Offset = 0; |
| 895 | } |
| 896 | return Offset; |
| 897 | } |
| 898 | |
| 899 | int SystemZELFFrameLowering::getOrCreateFramePointerSaveIndex( |
| 900 | MachineFunction &MF) const { |
| 901 | SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>(); |
| 902 | int FI = ZFI->getFramePointerSaveIndex(); |
| 903 | if (!FI) { |
| 904 | MachineFrameInfo &MFFrame = MF.getFrameInfo(); |
| 905 | int Offset = getBackchainOffset(MF) - SystemZMC::ELFCallFrameSize; |
| 906 | FI = MFFrame.CreateFixedObject(Size: getPointerSize(), SPOffset: Offset, IsImmutable: false); |
| 907 | ZFI->setFramePointerSaveIndex(FI); |
| 908 | } |
| 909 | return FI; |
| 910 | } |
| 911 | |
| 912 | bool SystemZELFFrameLowering::usePackedStack(MachineFunction &MF) const { |
| 913 | bool HasPackedStackAttr = MF.getFunction().hasFnAttribute(Kind: "packed-stack" ); |
| 914 | const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>(); |
| 915 | bool BackChain = Subtarget.hasBackChain(); |
| 916 | bool SoftFloat = Subtarget.hasSoftFloat(); |
| 917 | if (HasPackedStackAttr && BackChain && !SoftFloat) |
| 918 | report_fatal_error(reason: "packed-stack + backchain + hard-float is unsupported." ); |
| 919 | bool CallConv = MF.getFunction().getCallingConv() != CallingConv::GHC; |
| 920 | return HasPackedStackAttr && CallConv; |
| 921 | } |
| 922 | |
| 923 | SystemZXPLINKFrameLowering::SystemZXPLINKFrameLowering(unsigned PointerSize) |
| 924 | : SystemZFrameLowering(TargetFrameLowering::StackGrowsDown, Align(32), 0, |
| 925 | Align(32), /* StackRealignable */ false, |
| 926 | PointerSize), |
| 927 | RegSpillOffsets(-1) { |
| 928 | |
| 929 | // Create a mapping from register number to save slot offset. |
| 930 | // These offsets are relative to the start of the local are area. |
| 931 | RegSpillOffsets.grow(n: SystemZ::NUM_TARGET_REGS); |
| 932 | for (const auto &Entry : XPLINKSpillOffsetTable) |
| 933 | RegSpillOffsets[Entry.Reg] = Entry.Offset; |
| 934 | } |
| 935 | |
| 936 | int SystemZXPLINKFrameLowering::getOrCreateFramePointerSaveIndex( |
| 937 | MachineFunction &MF) const { |
| 938 | SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>(); |
| 939 | int FI = ZFI->getFramePointerSaveIndex(); |
| 940 | if (!FI) { |
| 941 | MachineFrameInfo &MFFrame = MF.getFrameInfo(); |
| 942 | FI = MFFrame.CreateFixedObject(Size: getPointerSize(), SPOffset: 0, IsImmutable: false); |
| 943 | MFFrame.setStackID(ObjectIdx: FI, ID: TargetStackID::NoAlloc); |
| 944 | ZFI->setFramePointerSaveIndex(FI); |
| 945 | } |
| 946 | return FI; |
| 947 | } |
| 948 | |
| 949 | // Checks if the function is a potential candidate for being a XPLeaf routine. |
| 950 | static bool isXPLeafCandidate(const MachineFunction &MF) { |
| 951 | const MachineFrameInfo &MFFrame = MF.getFrameInfo(); |
| 952 | const MachineRegisterInfo &MRI = MF.getRegInfo(); |
| 953 | const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>(); |
| 954 | auto *Regs = |
| 955 | static_cast<SystemZXPLINK64Registers *>(Subtarget.getSpecialRegisters()); |
| 956 | |
| 957 | // If function calls other functions including alloca, then it is not a XPLeaf |
| 958 | // routine. |
| 959 | if (MFFrame.hasCalls()) |
| 960 | return false; |
| 961 | |
| 962 | // If the function has var Sized Objects, then it is not a XPLeaf routine. |
| 963 | if (MFFrame.hasVarSizedObjects()) |
| 964 | return false; |
| 965 | |
| 966 | // If the function adjusts the stack, then it is not a XPLeaf routine. |
| 967 | if (MFFrame.adjustsStack()) |
| 968 | return false; |
| 969 | |
| 970 | // If function modifies the stack pointer register, then it is not a XPLeaf |
| 971 | // routine. |
| 972 | if (MRI.isPhysRegModified(PhysReg: Regs->getStackPointerRegister())) |
| 973 | return false; |
| 974 | |
| 975 | // If function modifies the ADA register, then it is not a XPLeaf routine. |
| 976 | if (MRI.isPhysRegModified(PhysReg: Regs->getAddressOfCalleeRegister())) |
| 977 | return false; |
| 978 | |
| 979 | // If function modifies the return address register, then it is not a XPLeaf |
| 980 | // routine. |
| 981 | if (MRI.isPhysRegModified(PhysReg: Regs->getReturnFunctionAddressRegister())) |
| 982 | return false; |
| 983 | |
| 984 | // If the backchain pointer should be stored, then it is not a XPLeaf routine. |
| 985 | if (MF.getSubtarget<SystemZSubtarget>().hasBackChain()) |
| 986 | return false; |
| 987 | |
| 988 | // If function acquires its own stack frame, then it is not a XPLeaf routine. |
| 989 | // At the time this function is called, only slots for local variables are |
| 990 | // allocated, so this is a very rough estimate. |
| 991 | if (MFFrame.estimateStackSize(MF) > 0) |
| 992 | return false; |
| 993 | |
| 994 | return true; |
| 995 | } |
| 996 | |
| 997 | bool SystemZXPLINKFrameLowering::assignCalleeSavedSpillSlots( |
| 998 | MachineFunction &MF, const TargetRegisterInfo *TRI, |
| 999 | std::vector<CalleeSavedInfo> &CSI) const { |
| 1000 | MachineFrameInfo &MFFrame = MF.getFrameInfo(); |
| 1001 | SystemZMachineFunctionInfo *MFI = MF.getInfo<SystemZMachineFunctionInfo>(); |
| 1002 | const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>(); |
| 1003 | auto &Regs = Subtarget.getSpecialRegisters<SystemZXPLINK64Registers>(); |
| 1004 | auto &GRRegClass = SystemZ::GR64BitRegClass; |
| 1005 | |
| 1006 | // At this point, the result of isXPLeafCandidate() is not accurate because |
| 1007 | // the size of the save area has not yet been determined. If |
| 1008 | // isXPLeafCandidate() indicates a potential leaf function, and there are no |
| 1009 | // callee-save registers, then it is indeed a leaf function, and we can early |
| 1010 | // exit. |
| 1011 | // TODO: It is possible for leaf functions to use callee-saved registers. |
| 1012 | // It can use the 0-2k range between R4 and the caller's stack frame without |
| 1013 | // acquiring its own stack frame. |
| 1014 | bool IsLeaf = CSI.empty() && isXPLeafCandidate(MF); |
| 1015 | if (IsLeaf) |
| 1016 | return true; |
| 1017 | |
| 1018 | // For non-leaf functions: |
| 1019 | // - the address of callee (entry point) register R6 must be saved |
| 1020 | CSI.push_back(x: CalleeSavedInfo(Regs.getAddressOfCalleeRegister())); |
| 1021 | CSI.back().setRestored(false); |
| 1022 | |
| 1023 | // The return address register R7 must be saved and restored. |
| 1024 | CSI.push_back(x: CalleeSavedInfo(Regs.getReturnFunctionAddressRegister())); |
| 1025 | |
| 1026 | // If the function needs a frame pointer, or if the backchain pointer should |
| 1027 | // be stored, then save the stack pointer register R4. |
| 1028 | if (hasFP(MF) || Subtarget.hasBackChain()) |
| 1029 | CSI.push_back(x: CalleeSavedInfo(Regs.getStackPointerRegister())); |
| 1030 | |
| 1031 | // If this function has an associated personality function then the |
| 1032 | // environment register R5 must be saved in the DSA. |
| 1033 | if (!MF.getLandingPads().empty()) |
| 1034 | CSI.push_back(x: CalleeSavedInfo(Regs.getADARegister())); |
| 1035 | |
| 1036 | // Scan the call-saved GPRs and find the bounds of the register spill area. |
| 1037 | Register LowRestoreGPR = 0; |
| 1038 | int LowRestoreOffset = INT32_MAX; |
| 1039 | Register LowSpillGPR = 0; |
| 1040 | int LowSpillOffset = INT32_MAX; |
| 1041 | Register HighGPR = 0; |
| 1042 | int HighOffset = -1; |
| 1043 | |
| 1044 | // Query index of the saved frame pointer. |
| 1045 | int FPSI = MFI->getFramePointerSaveIndex(); |
| 1046 | |
| 1047 | for (auto &CS : CSI) { |
| 1048 | MCRegister Reg = CS.getReg(); |
| 1049 | int Offset = RegSpillOffsets[Reg]; |
| 1050 | if (Offset >= 0) { |
| 1051 | if (GRRegClass.contains(Reg)) { |
| 1052 | if (LowSpillOffset > Offset) { |
| 1053 | LowSpillOffset = Offset; |
| 1054 | LowSpillGPR = Reg; |
| 1055 | } |
| 1056 | if (CS.isRestored() && LowRestoreOffset > Offset) { |
| 1057 | LowRestoreOffset = Offset; |
| 1058 | LowRestoreGPR = Reg; |
| 1059 | } |
| 1060 | |
| 1061 | if (Offset > HighOffset) { |
| 1062 | HighOffset = Offset; |
| 1063 | HighGPR = Reg; |
| 1064 | } |
| 1065 | // Non-volatile GPRs are saved in the dedicated register save area at |
| 1066 | // the bottom of the stack and are not truly part of the "normal" stack |
| 1067 | // frame. Mark the frame index as NoAlloc to indicate it as such. |
| 1068 | unsigned RegSize = getPointerSize(); |
| 1069 | int FrameIdx = |
| 1070 | (FPSI && Offset == 0) |
| 1071 | ? FPSI |
| 1072 | : MFFrame.CreateFixedSpillStackObject(Size: RegSize, SPOffset: Offset); |
| 1073 | CS.setFrameIdx(FrameIdx); |
| 1074 | MFFrame.setStackID(ObjectIdx: FrameIdx, ID: TargetStackID::NoAlloc); |
| 1075 | } |
| 1076 | } else { |
| 1077 | MCRegister Reg = CS.getReg(); |
| 1078 | const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); |
| 1079 | Align Alignment = TRI->getSpillAlign(RC: *RC); |
| 1080 | unsigned Size = TRI->getSpillSize(RC: *RC); |
| 1081 | Alignment = std::min(a: Alignment, b: getStackAlign()); |
| 1082 | int FrameIdx = MFFrame.CreateStackObject(Size, Alignment, isSpillSlot: true); |
| 1083 | CS.setFrameIdx(FrameIdx); |
| 1084 | } |
| 1085 | } |
| 1086 | |
| 1087 | // Save the range of call-saved registers, for use by the |
| 1088 | // prologue/epilogue inserters. |
| 1089 | if (LowRestoreGPR) |
| 1090 | MFI->setRestoreGPRRegs(Low: LowRestoreGPR, High: HighGPR, Offs: LowRestoreOffset); |
| 1091 | |
| 1092 | // Save the range of call-saved registers, for use by the epilogue inserter. |
| 1093 | assert(LowSpillGPR && "Expected registers to spill" ); |
| 1094 | MFI->setSpillGPRRegs(Low: LowSpillGPR, High: HighGPR, Offs: LowSpillOffset); |
| 1095 | |
| 1096 | return true; |
| 1097 | } |
| 1098 | |
| 1099 | void SystemZXPLINKFrameLowering::determineCalleeSaves(MachineFunction &MF, |
| 1100 | BitVector &SavedRegs, |
| 1101 | RegScavenger *RS) const { |
| 1102 | TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); |
| 1103 | |
| 1104 | bool HasFP = hasFP(MF); |
| 1105 | const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>(); |
| 1106 | auto &Regs = Subtarget.getSpecialRegisters<SystemZXPLINK64Registers>(); |
| 1107 | |
| 1108 | // If the function requires a frame pointer, record that the hard |
| 1109 | // frame pointer will be clobbered. |
| 1110 | if (HasFP) |
| 1111 | SavedRegs.set(Regs.getFramePointerRegister()); |
| 1112 | } |
| 1113 | |
| 1114 | bool SystemZXPLINKFrameLowering::spillCalleeSavedRegisters( |
| 1115 | MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, |
| 1116 | ArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const { |
| 1117 | if (CSI.empty()) |
| 1118 | return true; |
| 1119 | |
| 1120 | MachineFunction &MF = *MBB.getParent(); |
| 1121 | SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>(); |
| 1122 | const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>(); |
| 1123 | const TargetInstrInfo *TII = Subtarget.getInstrInfo(); |
| 1124 | auto &Regs = Subtarget.getSpecialRegisters<SystemZXPLINK64Registers>(); |
| 1125 | SystemZ::GPRRegs SpillGPRs = ZFI->getSpillGPRRegs(); |
| 1126 | DebugLoc DL; |
| 1127 | |
| 1128 | // Save GPRs |
| 1129 | if (SpillGPRs.LowGPR) { |
| 1130 | assert(SpillGPRs.LowGPR != SpillGPRs.HighGPR && |
| 1131 | "Should be saving multiple registers" ); |
| 1132 | |
| 1133 | // Build an STM/STMG instruction. |
| 1134 | MachineInstrBuilder MIB = BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: SystemZ::STMG)); |
| 1135 | |
| 1136 | // Add the explicit register operands. |
| 1137 | addSavedGPR(MBB, MIB, GPR64: SpillGPRs.LowGPR, IsImplicit: false); |
| 1138 | addSavedGPR(MBB, MIB, GPR64: SpillGPRs.HighGPR, IsImplicit: false); |
| 1139 | |
| 1140 | // Add the address r4 |
| 1141 | MIB.addReg(RegNo: Regs.getStackPointerRegister()); |
| 1142 | |
| 1143 | // Add the partial offset |
| 1144 | // We cannot add the actual offset as, at the stack is not finalized |
| 1145 | MIB.addImm(Val: SpillGPRs.GPROffset); |
| 1146 | |
| 1147 | // Make sure all call-saved GPRs are included as operands and are |
| 1148 | // marked as live on entry. |
| 1149 | auto &GRRegClass = SystemZ::GR64BitRegClass; |
| 1150 | for (const CalleeSavedInfo &I : CSI) { |
| 1151 | MCRegister Reg = I.getReg(); |
| 1152 | if (GRRegClass.contains(Reg)) |
| 1153 | addSavedGPR(MBB, MIB, GPR64: Reg, IsImplicit: true); |
| 1154 | } |
| 1155 | } |
| 1156 | |
| 1157 | // Spill FPRs to the stack in the normal TargetInstrInfo way |
| 1158 | for (const CalleeSavedInfo &I : CSI) { |
| 1159 | MCRegister Reg = I.getReg(); |
| 1160 | if (SystemZ::FP64BitRegClass.contains(Reg)) { |
| 1161 | MBB.addLiveIn(PhysReg: Reg); |
| 1162 | TII->storeRegToStackSlot(MBB, MI: MBBI, SrcReg: Reg, isKill: true, FrameIndex: I.getFrameIdx(), |
| 1163 | RC: &SystemZ::FP64BitRegClass, TRI, VReg: Register()); |
| 1164 | } |
| 1165 | if (SystemZ::VR128BitRegClass.contains(Reg)) { |
| 1166 | MBB.addLiveIn(PhysReg: Reg); |
| 1167 | TII->storeRegToStackSlot(MBB, MI: MBBI, SrcReg: Reg, isKill: true, FrameIndex: I.getFrameIdx(), |
| 1168 | RC: &SystemZ::VR128BitRegClass, TRI, VReg: Register()); |
| 1169 | } |
| 1170 | } |
| 1171 | |
| 1172 | return true; |
| 1173 | } |
| 1174 | |
| 1175 | bool SystemZXPLINKFrameLowering::restoreCalleeSavedRegisters( |
| 1176 | MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, |
| 1177 | MutableArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const { |
| 1178 | |
| 1179 | if (CSI.empty()) |
| 1180 | return false; |
| 1181 | |
| 1182 | MachineFunction &MF = *MBB.getParent(); |
| 1183 | SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>(); |
| 1184 | const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>(); |
| 1185 | const TargetInstrInfo *TII = Subtarget.getInstrInfo(); |
| 1186 | auto &Regs = Subtarget.getSpecialRegisters<SystemZXPLINK64Registers>(); |
| 1187 | |
| 1188 | DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); |
| 1189 | |
| 1190 | // Restore FPRs in the normal TargetInstrInfo way. |
| 1191 | for (const CalleeSavedInfo &I : CSI) { |
| 1192 | MCRegister Reg = I.getReg(); |
| 1193 | if (SystemZ::FP64BitRegClass.contains(Reg)) |
| 1194 | TII->loadRegFromStackSlot(MBB, MI: MBBI, DestReg: Reg, FrameIndex: I.getFrameIdx(), |
| 1195 | RC: &SystemZ::FP64BitRegClass, TRI, VReg: Register()); |
| 1196 | if (SystemZ::VR128BitRegClass.contains(Reg)) |
| 1197 | TII->loadRegFromStackSlot(MBB, MI: MBBI, DestReg: Reg, FrameIndex: I.getFrameIdx(), |
| 1198 | RC: &SystemZ::VR128BitRegClass, TRI, VReg: Register()); |
| 1199 | } |
| 1200 | |
| 1201 | // Restore call-saved GPRs (but not call-clobbered varargs, which at |
| 1202 | // this point might hold return values). |
| 1203 | SystemZ::GPRRegs RestoreGPRs = ZFI->getRestoreGPRRegs(); |
| 1204 | if (RestoreGPRs.LowGPR) { |
| 1205 | assert(isInt<20>(Regs.getStackPointerBias() + RestoreGPRs.GPROffset)); |
| 1206 | if (RestoreGPRs.LowGPR == RestoreGPRs.HighGPR) |
| 1207 | // Build an LG/L instruction. |
| 1208 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: SystemZ::LG), DestReg: RestoreGPRs.LowGPR) |
| 1209 | .addReg(RegNo: Regs.getStackPointerRegister()) |
| 1210 | .addImm(Val: Regs.getStackPointerBias() + RestoreGPRs.GPROffset) |
| 1211 | .addReg(RegNo: 0); |
| 1212 | else { |
| 1213 | // Build an LMG/LM instruction. |
| 1214 | MachineInstrBuilder MIB = BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: SystemZ::LMG)); |
| 1215 | |
| 1216 | // Add the explicit register operands. |
| 1217 | MIB.addReg(RegNo: RestoreGPRs.LowGPR, flags: RegState::Define); |
| 1218 | MIB.addReg(RegNo: RestoreGPRs.HighGPR, flags: RegState::Define); |
| 1219 | |
| 1220 | // Add the address. |
| 1221 | MIB.addReg(RegNo: Regs.getStackPointerRegister()); |
| 1222 | MIB.addImm(Val: Regs.getStackPointerBias() + RestoreGPRs.GPROffset); |
| 1223 | |
| 1224 | // Do a second scan adding regs as being defined by instruction |
| 1225 | for (const CalleeSavedInfo &I : CSI) { |
| 1226 | MCRegister Reg = I.getReg(); |
| 1227 | if (Reg > RestoreGPRs.LowGPR && Reg < RestoreGPRs.HighGPR) |
| 1228 | MIB.addReg(RegNo: Reg, flags: RegState::ImplicitDefine); |
| 1229 | } |
| 1230 | } |
| 1231 | } |
| 1232 | |
| 1233 | return true; |
| 1234 | } |
| 1235 | |
| 1236 | void SystemZXPLINKFrameLowering::emitPrologue(MachineFunction &MF, |
| 1237 | MachineBasicBlock &MBB) const { |
| 1238 | assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported" ); |
| 1239 | const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>(); |
| 1240 | SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>(); |
| 1241 | MachineBasicBlock::iterator MBBI = MBB.begin(); |
| 1242 | auto *ZII = static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); |
| 1243 | auto &Regs = Subtarget.getSpecialRegisters<SystemZXPLINK64Registers>(); |
| 1244 | MachineFrameInfo &MFFrame = MF.getFrameInfo(); |
| 1245 | MachineInstr *StoreInstr = nullptr; |
| 1246 | |
| 1247 | determineFrameLayout(MF); |
| 1248 | |
| 1249 | bool HasFP = hasFP(MF); |
| 1250 | // Debug location must be unknown since the first debug location is used |
| 1251 | // to determine the end of the prologue. |
| 1252 | DebugLoc DL; |
| 1253 | uint64_t Offset = 0; |
| 1254 | |
| 1255 | const uint64_t StackSize = MFFrame.getStackSize(); |
| 1256 | |
| 1257 | if (ZFI->getSpillGPRRegs().LowGPR) { |
| 1258 | // Skip over the GPR saves. |
| 1259 | if ((MBBI != MBB.end()) && ((MBBI->getOpcode() == SystemZ::STMG))) { |
| 1260 | const int Operand = 3; |
| 1261 | // Now we can set the offset for the operation, since now the Stack |
| 1262 | // has been finalized. |
| 1263 | Offset = Regs.getStackPointerBias() + MBBI->getOperand(i: Operand).getImm(); |
| 1264 | // Maximum displacement for STMG instruction. |
| 1265 | if (isInt<20>(x: Offset - StackSize)) |
| 1266 | Offset -= StackSize; |
| 1267 | else |
| 1268 | StoreInstr = &*MBBI; |
| 1269 | MBBI->getOperand(i: Operand).setImm(Offset); |
| 1270 | ++MBBI; |
| 1271 | } else |
| 1272 | llvm_unreachable("Couldn't skip over GPR saves" ); |
| 1273 | } |
| 1274 | |
| 1275 | if (StackSize) { |
| 1276 | MachineBasicBlock::iterator InsertPt = StoreInstr ? StoreInstr : MBBI; |
| 1277 | // Allocate StackSize bytes. |
| 1278 | int64_t Delta = -int64_t(StackSize); |
| 1279 | |
| 1280 | // In case the STM(G) instruction also stores SP (R4), but the displacement |
| 1281 | // is too large, the SP register is manipulated first before storing, |
| 1282 | // resulting in the wrong value stored and retrieved later. In this case, we |
| 1283 | // need to temporarily save the value of SP, and store it later to memory. |
| 1284 | if (StoreInstr && HasFP) { |
| 1285 | // Insert LR r0,r4 before STMG instruction. |
| 1286 | BuildMI(BB&: MBB, I: InsertPt, MIMD: DL, MCID: ZII->get(Opcode: SystemZ::LGR)) |
| 1287 | .addReg(RegNo: SystemZ::R0D, flags: RegState::Define) |
| 1288 | .addReg(RegNo: SystemZ::R4D); |
| 1289 | // Insert ST r0,xxx(,r4) after STMG instruction. |
| 1290 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: ZII->get(Opcode: SystemZ::STG)) |
| 1291 | .addReg(RegNo: SystemZ::R0D, flags: RegState::Kill) |
| 1292 | .addReg(RegNo: SystemZ::R4D) |
| 1293 | .addImm(Val: Offset) |
| 1294 | .addReg(RegNo: 0); |
| 1295 | } |
| 1296 | |
| 1297 | emitIncrement(MBB, MBBI&: InsertPt, DL, Reg: Regs.getStackPointerRegister(), NumBytes: Delta, |
| 1298 | TII: ZII); |
| 1299 | |
| 1300 | // If the requested stack size is larger than the guard page, then we need |
| 1301 | // to check if we need to call the stack extender. This requires adding a |
| 1302 | // conditional branch, but splitting the prologue block is not possible at |
| 1303 | // this point since it would invalidate the SaveBlocks / RestoreBlocks sets |
| 1304 | // of PEI in the single block function case. Build a pseudo to be handled |
| 1305 | // later by inlineStackProbe(). |
| 1306 | const uint64_t GuardPageSize = 1024 * 1024; |
| 1307 | if (StackSize > GuardPageSize) { |
| 1308 | assert(StoreInstr && "Wrong insertion point" ); |
| 1309 | BuildMI(BB&: MBB, I: InsertPt, MIMD: DL, MCID: ZII->get(Opcode: SystemZ::XPLINK_STACKALLOC)); |
| 1310 | } |
| 1311 | } |
| 1312 | |
| 1313 | if (HasFP) { |
| 1314 | // Copy the base of the frame to Frame Pointer Register. |
| 1315 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: ZII->get(Opcode: SystemZ::LGR), |
| 1316 | DestReg: Regs.getFramePointerRegister()) |
| 1317 | .addReg(RegNo: Regs.getStackPointerRegister()); |
| 1318 | |
| 1319 | // Mark the FramePtr as live at the beginning of every block except |
| 1320 | // the entry block. (We'll have marked R8 as live on entry when |
| 1321 | // saving the GPRs.) |
| 1322 | for (MachineBasicBlock &B : llvm::drop_begin(RangeOrContainer&: MF)) |
| 1323 | B.addLiveIn(PhysReg: Regs.getFramePointerRegister()); |
| 1324 | } |
| 1325 | |
| 1326 | // Save GPRs used for varargs, if any. |
| 1327 | const TargetInstrInfo *TII = Subtarget.getInstrInfo(); |
| 1328 | bool IsVarArg = MF.getFunction().isVarArg(); |
| 1329 | |
| 1330 | if (IsVarArg) { |
| 1331 | // FixedRegs is the number of used registers, accounting for shadow |
| 1332 | // registers. |
| 1333 | unsigned FixedRegs = ZFI->getVarArgsFirstGPR() + ZFI->getVarArgsFirstFPR(); |
| 1334 | auto &GPRs = SystemZ::XPLINK64ArgGPRs; |
| 1335 | for (unsigned I = FixedRegs; I < SystemZ::XPLINK64NumArgGPRs; I++) { |
| 1336 | uint64_t StartOffset = MFFrame.getOffsetAdjustment() + |
| 1337 | MFFrame.getStackSize() + Regs.getCallFrameSize() + |
| 1338 | getOffsetOfLocalArea() + I * getPointerSize(); |
| 1339 | unsigned Reg = GPRs[I]; |
| 1340 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: SystemZ::STG)) |
| 1341 | .addReg(RegNo: Reg) |
| 1342 | .addReg(RegNo: Regs.getStackPointerRegister()) |
| 1343 | .addImm(Val: StartOffset) |
| 1344 | .addReg(RegNo: 0); |
| 1345 | if (!MBB.isLiveIn(Reg)) |
| 1346 | MBB.addLiveIn(PhysReg: Reg); |
| 1347 | } |
| 1348 | } |
| 1349 | } |
| 1350 | |
| 1351 | void SystemZXPLINKFrameLowering::emitEpilogue(MachineFunction &MF, |
| 1352 | MachineBasicBlock &MBB) const { |
| 1353 | const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>(); |
| 1354 | MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); |
| 1355 | SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>(); |
| 1356 | MachineFrameInfo &MFFrame = MF.getFrameInfo(); |
| 1357 | auto *ZII = static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); |
| 1358 | auto &Regs = Subtarget.getSpecialRegisters<SystemZXPLINK64Registers>(); |
| 1359 | |
| 1360 | // Skip the return instruction. |
| 1361 | assert(MBBI->isReturn() && "Can only insert epilogue into returning blocks" ); |
| 1362 | |
| 1363 | uint64_t StackSize = MFFrame.getStackSize(); |
| 1364 | if (StackSize) { |
| 1365 | unsigned SPReg = Regs.getStackPointerRegister(); |
| 1366 | if (ZFI->getRestoreGPRRegs().LowGPR != SPReg) { |
| 1367 | DebugLoc DL = MBBI->getDebugLoc(); |
| 1368 | emitIncrement(MBB, MBBI, DL, Reg: SPReg, NumBytes: StackSize, TII: ZII); |
| 1369 | } |
| 1370 | } |
| 1371 | } |
| 1372 | |
| 1373 | // Emit a compare of the stack pointer against the stack floor, and a call to |
| 1374 | // the LE stack extender if needed. |
| 1375 | void SystemZXPLINKFrameLowering::inlineStackProbe( |
| 1376 | MachineFunction &MF, MachineBasicBlock &PrologMBB) const { |
| 1377 | auto *ZII = |
| 1378 | static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo()); |
| 1379 | |
| 1380 | MachineInstr *StackAllocMI = nullptr; |
| 1381 | for (MachineInstr &MI : PrologMBB) |
| 1382 | if (MI.getOpcode() == SystemZ::XPLINK_STACKALLOC) { |
| 1383 | StackAllocMI = &MI; |
| 1384 | break; |
| 1385 | } |
| 1386 | if (StackAllocMI == nullptr) |
| 1387 | return; |
| 1388 | |
| 1389 | bool NeedSaveSP = hasFP(MF); |
| 1390 | bool NeedSaveArg = PrologMBB.isLiveIn(Reg: SystemZ::R3D); |
| 1391 | const int64_t SaveSlotR3 = 2192; |
| 1392 | |
| 1393 | MachineBasicBlock &MBB = PrologMBB; |
| 1394 | const DebugLoc DL = StackAllocMI->getDebugLoc(); |
| 1395 | |
| 1396 | // The 2nd half of block MBB after split. |
| 1397 | MachineBasicBlock *NextMBB; |
| 1398 | |
| 1399 | // Add new basic block for the call to the stack overflow function. |
| 1400 | MachineBasicBlock *StackExtMBB = |
| 1401 | MF.CreateMachineBasicBlock(BB: MBB.getBasicBlock()); |
| 1402 | MF.push_back(MBB: StackExtMBB); |
| 1403 | |
| 1404 | // LG r3,72(,r3) |
| 1405 | BuildMI(BB: StackExtMBB, MIMD: DL, MCID: ZII->get(Opcode: SystemZ::LG), DestReg: SystemZ::R3D) |
| 1406 | .addReg(RegNo: SystemZ::R3D) |
| 1407 | .addImm(Val: 72) |
| 1408 | .addReg(RegNo: 0); |
| 1409 | // BASR r3,r3 |
| 1410 | BuildMI(BB: StackExtMBB, MIMD: DL, MCID: ZII->get(Opcode: SystemZ::CallBASR_STACKEXT)) |
| 1411 | .addReg(RegNo: SystemZ::R3D); |
| 1412 | if (NeedSaveArg) { |
| 1413 | if (!NeedSaveSP) { |
| 1414 | // LGR r0,r3 |
| 1415 | BuildMI(BB&: MBB, I: StackAllocMI, MIMD: DL, MCID: ZII->get(Opcode: SystemZ::LGR)) |
| 1416 | .addReg(RegNo: SystemZ::R0D, flags: RegState::Define) |
| 1417 | .addReg(RegNo: SystemZ::R3D); |
| 1418 | } else { |
| 1419 | // In this case, the incoming value of r4 is saved in r0 so the |
| 1420 | // latter register is unavailable. Store r3 in its corresponding |
| 1421 | // slot in the parameter list instead. Do this at the start of |
| 1422 | // the prolog before r4 is manipulated by anything else. |
| 1423 | // STG r3, 2192(r4) |
| 1424 | BuildMI(BB&: MBB, I: MBB.begin(), MIMD: DL, MCID: ZII->get(Opcode: SystemZ::STG)) |
| 1425 | .addReg(RegNo: SystemZ::R3D) |
| 1426 | .addReg(RegNo: SystemZ::R4D) |
| 1427 | .addImm(Val: SaveSlotR3) |
| 1428 | .addReg(RegNo: 0); |
| 1429 | } |
| 1430 | } |
| 1431 | // LLGT r3,1208 |
| 1432 | BuildMI(BB&: MBB, I: StackAllocMI, MIMD: DL, MCID: ZII->get(Opcode: SystemZ::LLGT), DestReg: SystemZ::R3D) |
| 1433 | .addReg(RegNo: 0) |
| 1434 | .addImm(Val: 1208) |
| 1435 | .addReg(RegNo: 0); |
| 1436 | // CG r4,64(,r3) |
| 1437 | BuildMI(BB&: MBB, I: StackAllocMI, MIMD: DL, MCID: ZII->get(Opcode: SystemZ::CG)) |
| 1438 | .addReg(RegNo: SystemZ::R4D) |
| 1439 | .addReg(RegNo: SystemZ::R3D) |
| 1440 | .addImm(Val: 64) |
| 1441 | .addReg(RegNo: 0); |
| 1442 | // JLL b'0100',F'37' |
| 1443 | BuildMI(BB&: MBB, I: StackAllocMI, MIMD: DL, MCID: ZII->get(Opcode: SystemZ::BRC)) |
| 1444 | .addImm(Val: SystemZ::CCMASK_ICMP) |
| 1445 | .addImm(Val: SystemZ::CCMASK_CMP_LT) |
| 1446 | .addMBB(MBB: StackExtMBB); |
| 1447 | |
| 1448 | NextMBB = SystemZ::splitBlockBefore(MI: StackAllocMI, MBB: &MBB); |
| 1449 | MBB.addSuccessor(Succ: NextMBB); |
| 1450 | MBB.addSuccessor(Succ: StackExtMBB); |
| 1451 | if (NeedSaveArg) { |
| 1452 | if (!NeedSaveSP) { |
| 1453 | // LGR r3, r0 |
| 1454 | BuildMI(BB&: *NextMBB, I: StackAllocMI, MIMD: DL, MCID: ZII->get(Opcode: SystemZ::LGR)) |
| 1455 | .addReg(RegNo: SystemZ::R3D, flags: RegState::Define) |
| 1456 | .addReg(RegNo: SystemZ::R0D, flags: RegState::Kill); |
| 1457 | } else { |
| 1458 | // In this case, the incoming value of r4 is saved in r0 so the |
| 1459 | // latter register is unavailable. We stored r3 in its corresponding |
| 1460 | // slot in the parameter list instead and we now restore it from there. |
| 1461 | // LGR r3, r0 |
| 1462 | BuildMI(BB&: *NextMBB, I: StackAllocMI, MIMD: DL, MCID: ZII->get(Opcode: SystemZ::LGR)) |
| 1463 | .addReg(RegNo: SystemZ::R3D, flags: RegState::Define) |
| 1464 | .addReg(RegNo: SystemZ::R0D); |
| 1465 | // LG r3, 2192(r3) |
| 1466 | BuildMI(BB&: *NextMBB, I: StackAllocMI, MIMD: DL, MCID: ZII->get(Opcode: SystemZ::LG)) |
| 1467 | .addReg(RegNo: SystemZ::R3D, flags: RegState::Define) |
| 1468 | .addReg(RegNo: SystemZ::R3D) |
| 1469 | .addImm(Val: SaveSlotR3) |
| 1470 | .addReg(RegNo: 0); |
| 1471 | } |
| 1472 | } |
| 1473 | |
| 1474 | // Add jump back from stack extension BB. |
| 1475 | BuildMI(BB: StackExtMBB, MIMD: DL, MCID: ZII->get(Opcode: SystemZ::J)).addMBB(MBB: NextMBB); |
| 1476 | StackExtMBB->addSuccessor(Succ: NextMBB); |
| 1477 | |
| 1478 | StackAllocMI->eraseFromParent(); |
| 1479 | |
| 1480 | // Compute the live-in lists for the new blocks. |
| 1481 | fullyRecomputeLiveIns(MBBs: {StackExtMBB, NextMBB}); |
| 1482 | } |
| 1483 | |
| 1484 | bool SystemZXPLINKFrameLowering::hasFPImpl(const MachineFunction &MF) const { |
| 1485 | return (MF.getFrameInfo().hasVarSizedObjects()); |
| 1486 | } |
| 1487 | |
| 1488 | void SystemZXPLINKFrameLowering::processFunctionBeforeFrameFinalized( |
| 1489 | MachineFunction &MF, RegScavenger *RS) const { |
| 1490 | MachineFrameInfo &MFFrame = MF.getFrameInfo(); |
| 1491 | const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>(); |
| 1492 | auto &Regs = Subtarget.getSpecialRegisters<SystemZXPLINK64Registers>(); |
| 1493 | |
| 1494 | // Setup stack frame offset |
| 1495 | MFFrame.setOffsetAdjustment(Regs.getStackPointerBias()); |
| 1496 | |
| 1497 | // Nothing to do for leaf functions. |
| 1498 | uint64_t StackSize = MFFrame.estimateStackSize(MF); |
| 1499 | if (StackSize == 0 && MFFrame.getCalleeSavedInfo().empty()) |
| 1500 | return; |
| 1501 | |
| 1502 | // Although the XPLINK specifications for AMODE64 state that minimum size |
| 1503 | // of the param area is minimum 32 bytes and no rounding is otherwise |
| 1504 | // specified, we round this area in 64 bytes increments to be compatible |
| 1505 | // with existing compilers. |
| 1506 | MFFrame.setMaxCallFrameSize( |
| 1507 | std::max(a: 64U, b: (unsigned)alignTo(Value: MFFrame.getMaxCallFrameSize(), Align: 64))); |
| 1508 | |
| 1509 | // Add frame values with positive object offsets. Since the displacement from |
| 1510 | // the SP/FP is calculated by ObjectOffset + StackSize + Bias, object offsets |
| 1511 | // with positive values are in the caller's stack frame. We need to include |
| 1512 | // that since it is accessed by displacement to SP/FP. |
| 1513 | int64_t LargestArgOffset = 0; |
| 1514 | for (int I = MFFrame.getObjectIndexBegin(); I != 0; ++I) { |
| 1515 | if (MFFrame.getObjectOffset(ObjectIdx: I) >= 0) { |
| 1516 | int64_t ObjOffset = MFFrame.getObjectOffset(ObjectIdx: I) + MFFrame.getObjectSize(ObjectIdx: I); |
| 1517 | LargestArgOffset = std::max(a: ObjOffset, b: LargestArgOffset); |
| 1518 | } |
| 1519 | } |
| 1520 | |
| 1521 | uint64_t MaxReach = (StackSize + Regs.getCallFrameSize() + |
| 1522 | Regs.getStackPointerBias() + LargestArgOffset); |
| 1523 | |
| 1524 | if (!isUInt<12>(x: MaxReach)) { |
| 1525 | // We may need register scavenging slots if some parts of the frame |
| 1526 | // are outside the reach of an unsigned 12-bit displacement. |
| 1527 | RS->addScavengingFrameIndex(FI: MFFrame.CreateSpillStackObject(Size: 8, Alignment: Align(8))); |
| 1528 | RS->addScavengingFrameIndex(FI: MFFrame.CreateSpillStackObject(Size: 8, Alignment: Align(8))); |
| 1529 | } |
| 1530 | } |
| 1531 | |
| 1532 | // Determines the size of the frame, and creates the deferred spill objects. |
| 1533 | void SystemZXPLINKFrameLowering::determineFrameLayout( |
| 1534 | MachineFunction &MF) const { |
| 1535 | MachineFrameInfo &MFFrame = MF.getFrameInfo(); |
| 1536 | const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>(); |
| 1537 | auto *Regs = |
| 1538 | static_cast<SystemZXPLINK64Registers *>(Subtarget.getSpecialRegisters()); |
| 1539 | |
| 1540 | uint64_t StackSize = MFFrame.getStackSize(); |
| 1541 | if (StackSize == 0) |
| 1542 | return; |
| 1543 | |
| 1544 | // Add the size of the register save area and the reserved area to the size. |
| 1545 | StackSize += Regs->getCallFrameSize(); |
| 1546 | MFFrame.setStackSize(StackSize); |
| 1547 | |
| 1548 | // We now know the stack size. Update the stack objects for the register save |
| 1549 | // area now. This has no impact on the stack frame layout, as this is already |
| 1550 | // computed. However, it makes sure that all callee saved registers have a |
| 1551 | // valid offset assigned. |
| 1552 | for (int FrameIdx = MFFrame.getObjectIndexBegin(); FrameIdx != 0; |
| 1553 | ++FrameIdx) { |
| 1554 | if (MFFrame.getStackID(ObjectIdx: FrameIdx) == TargetStackID::NoAlloc) { |
| 1555 | int64_t SPOffset = MFFrame.getObjectOffset(ObjectIdx: FrameIdx); |
| 1556 | SPOffset -= StackSize; |
| 1557 | MFFrame.setObjectOffset(ObjectIdx: FrameIdx, SPOffset); |
| 1558 | } |
| 1559 | } |
| 1560 | } |
| 1561 | |