| 1 | //===-- PPCRegisterInfo.cpp - PowerPC Register Information ----------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file contains the PowerPC implementation of the TargetRegisterInfo |
| 10 | // class. |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | #include "PPCRegisterInfo.h" |
| 15 | #include "PPCFrameLowering.h" |
| 16 | #include "PPCInstrBuilder.h" |
| 17 | #include "PPCMachineFunctionInfo.h" |
| 18 | #include "PPCSubtarget.h" |
| 19 | #include "PPCTargetMachine.h" |
| 20 | #include "llvm/ADT/BitVector.h" |
| 21 | #include "llvm/ADT/Statistic.h" |
| 22 | #include "llvm/CodeGen/MachineFrameInfo.h" |
| 23 | #include "llvm/CodeGen/MachineFunction.h" |
| 24 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
| 25 | #include "llvm/CodeGen/MachineModuleInfo.h" |
| 26 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
| 27 | #include "llvm/CodeGen/RegisterScavenging.h" |
| 28 | #include "llvm/CodeGen/TargetFrameLowering.h" |
| 29 | #include "llvm/CodeGen/TargetInstrInfo.h" |
| 30 | #include "llvm/CodeGen/VirtRegMap.h" |
| 31 | #include "llvm/IR/CallingConv.h" |
| 32 | #include "llvm/IR/Function.h" |
| 33 | #include "llvm/IR/Type.h" |
| 34 | #include "llvm/Support/CommandLine.h" |
| 35 | #include "llvm/Support/Debug.h" |
| 36 | #include "llvm/Support/ErrorHandling.h" |
| 37 | #include "llvm/Support/MathExtras.h" |
| 38 | #include "llvm/Support/raw_ostream.h" |
| 39 | #include "llvm/Target/TargetMachine.h" |
| 40 | #include "llvm/Target/TargetOptions.h" |
| 41 | #include <cstdlib> |
| 42 | |
| 43 | using namespace llvm; |
| 44 | |
| 45 | #define DEBUG_TYPE "reginfo" |
| 46 | |
| 47 | #define GET_REGINFO_TARGET_DESC |
| 48 | #include "PPCGenRegisterInfo.inc" |
| 49 | |
| 50 | STATISTIC(InflateGPRC, "Number of gprc inputs for getLargestLegalClass" ); |
| 51 | STATISTIC(InflateGP8RC, "Number of g8rc inputs for getLargestLegalClass" ); |
| 52 | |
| 53 | static cl::opt<bool> |
| 54 | EnableBasePointer("ppc-use-base-pointer" , cl::Hidden, cl::init(Val: true), |
| 55 | cl::desc("Enable use of a base pointer for complex stack frames" )); |
| 56 | |
| 57 | static cl::opt<bool> |
| 58 | AlwaysBasePointer("ppc-always-use-base-pointer" , cl::Hidden, cl::init(Val: false), |
| 59 | cl::desc("Force the use of a base pointer in every function" )); |
| 60 | |
| 61 | static cl::opt<bool> |
| 62 | EnableGPRToVecSpills("ppc-enable-gpr-to-vsr-spills" , cl::Hidden, cl::init(Val: false), |
| 63 | cl::desc("Enable spills from gpr to vsr rather than stack" )); |
| 64 | |
| 65 | static cl::opt<bool> |
| 66 | StackPtrConst("ppc-stack-ptr-caller-preserved" , |
| 67 | cl::desc("Consider R1 caller preserved so stack saves of " |
| 68 | "caller preserved registers can be LICM candidates" ), |
| 69 | cl::init(Val: true), cl::Hidden); |
| 70 | |
| 71 | static cl::opt<unsigned> |
| 72 | MaxCRBitSpillDist("ppc-max-crbit-spill-dist" , |
| 73 | cl::desc("Maximum search distance for definition of CR bit " |
| 74 | "spill on ppc" ), |
| 75 | cl::Hidden, cl::init(Val: 100)); |
| 76 | |
| 77 | // Copies/moves of physical accumulators are expensive operations |
| 78 | // that should be avoided whenever possible. MMA instructions are |
| 79 | // meant to be used in performance-sensitive computational kernels. |
| 80 | // This option is provided, at least for the time being, to give the |
| 81 | // user a tool to detect this expensive operation and either rework |
| 82 | // their code or report a compiler bug if that turns out to be the |
| 83 | // cause. |
| 84 | #ifndef NDEBUG |
| 85 | static cl::opt<bool> |
| 86 | ReportAccMoves("ppc-report-acc-moves" , |
| 87 | cl::desc("Emit information about accumulator register spills " |
| 88 | "and copies" ), |
| 89 | cl::Hidden, cl::init(false)); |
| 90 | #endif |
| 91 | |
| 92 | extern cl::opt<bool> DisableAutoPairedVecSt; |
| 93 | |
| 94 | static unsigned offsetMinAlignForOpcode(unsigned OpC); |
| 95 | |
| 96 | PPCRegisterInfo::PPCRegisterInfo(const PPCTargetMachine &TM) |
| 97 | : PPCGenRegisterInfo(TM.isPPC64() ? PPC::LR8 : PPC::LR, |
| 98 | TM.isPPC64() ? 0 : 1, |
| 99 | TM.isPPC64() ? 0 : 1), |
| 100 | TM(TM) { |
| 101 | ImmToIdxMap[PPC::LD] = PPC::LDX; ImmToIdxMap[PPC::STD] = PPC::STDX; |
| 102 | ImmToIdxMap[PPC::LBZ] = PPC::LBZX; ImmToIdxMap[PPC::STB] = PPC::STBX; |
| 103 | ImmToIdxMap[PPC::LHZ] = PPC::LHZX; ImmToIdxMap[PPC::LHA] = PPC::LHAX; |
| 104 | ImmToIdxMap[PPC::LWZ] = PPC::LWZX; ImmToIdxMap[PPC::LWA] = PPC::LWAX; |
| 105 | ImmToIdxMap[PPC::LFS] = PPC::LFSX; ImmToIdxMap[PPC::LFD] = PPC::LFDX; |
| 106 | ImmToIdxMap[PPC::STH] = PPC::STHX; ImmToIdxMap[PPC::STW] = PPC::STWX; |
| 107 | ImmToIdxMap[PPC::STFS] = PPC::STFSX; ImmToIdxMap[PPC::STFD] = PPC::STFDX; |
| 108 | ImmToIdxMap[PPC::ADDI] = PPC::ADD4; |
| 109 | ImmToIdxMap[PPC::LWA_32] = PPC::LWAX_32; |
| 110 | |
| 111 | // 64-bit |
| 112 | ImmToIdxMap[PPC::LHA8] = PPC::LHAX8; ImmToIdxMap[PPC::LBZ8] = PPC::LBZX8; |
| 113 | ImmToIdxMap[PPC::LHZ8] = PPC::LHZX8; ImmToIdxMap[PPC::LWZ8] = PPC::LWZX8; |
| 114 | ImmToIdxMap[PPC::STB8] = PPC::STBX8; ImmToIdxMap[PPC::STH8] = PPC::STHX8; |
| 115 | ImmToIdxMap[PPC::STW8] = PPC::STWX8; ImmToIdxMap[PPC::STDU] = PPC::STDUX; |
| 116 | ImmToIdxMap[PPC::ADDI8] = PPC::ADD8; |
| 117 | ImmToIdxMap[PPC::LQ] = PPC::LQX_PSEUDO; |
| 118 | ImmToIdxMap[PPC::STQ] = PPC::STQX_PSEUDO; |
| 119 | |
| 120 | // VSX |
| 121 | ImmToIdxMap[PPC::DFLOADf32] = PPC::LXSSPX; |
| 122 | ImmToIdxMap[PPC::DFLOADf64] = PPC::LXSDX; |
| 123 | ImmToIdxMap[PPC::SPILLTOVSR_LD] = PPC::SPILLTOVSR_LDX; |
| 124 | ImmToIdxMap[PPC::SPILLTOVSR_ST] = PPC::SPILLTOVSR_STX; |
| 125 | ImmToIdxMap[PPC::DFSTOREf32] = PPC::STXSSPX; |
| 126 | ImmToIdxMap[PPC::DFSTOREf64] = PPC::STXSDX; |
| 127 | ImmToIdxMap[PPC::LXV] = PPC::LXVX; |
| 128 | ImmToIdxMap[PPC::LXSD] = PPC::LXSDX; |
| 129 | ImmToIdxMap[PPC::LXSSP] = PPC::LXSSPX; |
| 130 | ImmToIdxMap[PPC::STXV] = PPC::STXVX; |
| 131 | ImmToIdxMap[PPC::STXSD] = PPC::STXSDX; |
| 132 | ImmToIdxMap[PPC::STXSSP] = PPC::STXSSPX; |
| 133 | |
| 134 | // SPE |
| 135 | ImmToIdxMap[PPC::EVLDD] = PPC::EVLDDX; |
| 136 | ImmToIdxMap[PPC::EVSTDD] = PPC::EVSTDDX; |
| 137 | ImmToIdxMap[PPC::SPESTW] = PPC::SPESTWX; |
| 138 | ImmToIdxMap[PPC::SPELWZ] = PPC::SPELWZX; |
| 139 | |
| 140 | // Power10 |
| 141 | ImmToIdxMap[PPC::PLBZ] = PPC::LBZX; ImmToIdxMap[PPC::PLBZ8] = PPC::LBZX8; |
| 142 | ImmToIdxMap[PPC::PLHZ] = PPC::LHZX; ImmToIdxMap[PPC::PLHZ8] = PPC::LHZX8; |
| 143 | ImmToIdxMap[PPC::PLHA] = PPC::LHAX; ImmToIdxMap[PPC::PLHA8] = PPC::LHAX8; |
| 144 | ImmToIdxMap[PPC::PLWZ] = PPC::LWZX; ImmToIdxMap[PPC::PLWZ8] = PPC::LWZX8; |
| 145 | ImmToIdxMap[PPC::PLWA] = PPC::LWAX; ImmToIdxMap[PPC::PLWA8] = PPC::LWAX; |
| 146 | ImmToIdxMap[PPC::PLD] = PPC::LDX; ImmToIdxMap[PPC::PSTD] = PPC::STDX; |
| 147 | |
| 148 | ImmToIdxMap[PPC::PSTB] = PPC::STBX; ImmToIdxMap[PPC::PSTB8] = PPC::STBX8; |
| 149 | ImmToIdxMap[PPC::PSTH] = PPC::STHX; ImmToIdxMap[PPC::PSTH8] = PPC::STHX8; |
| 150 | ImmToIdxMap[PPC::PSTW] = PPC::STWX; ImmToIdxMap[PPC::PSTW8] = PPC::STWX8; |
| 151 | |
| 152 | ImmToIdxMap[PPC::PLFS] = PPC::LFSX; ImmToIdxMap[PPC::PSTFS] = PPC::STFSX; |
| 153 | ImmToIdxMap[PPC::PLFD] = PPC::LFDX; ImmToIdxMap[PPC::PSTFD] = PPC::STFDX; |
| 154 | ImmToIdxMap[PPC::PLXSSP] = PPC::LXSSPX; ImmToIdxMap[PPC::PSTXSSP] = PPC::STXSSPX; |
| 155 | ImmToIdxMap[PPC::PLXSD] = PPC::LXSDX; ImmToIdxMap[PPC::PSTXSD] = PPC::STXSDX; |
| 156 | ImmToIdxMap[PPC::PLXV] = PPC::LXVX; ImmToIdxMap[PPC::PSTXV] = PPC::STXVX; |
| 157 | |
| 158 | ImmToIdxMap[PPC::LXVP] = PPC::LXVPX; |
| 159 | ImmToIdxMap[PPC::STXVP] = PPC::STXVPX; |
| 160 | ImmToIdxMap[PPC::PLXVP] = PPC::LXVPX; |
| 161 | ImmToIdxMap[PPC::PSTXVP] = PPC::STXVPX; |
| 162 | } |
| 163 | |
| 164 | /// getPointerRegClass - Return the register class to use to hold pointers. |
| 165 | /// This is used for addressing modes. |
| 166 | const TargetRegisterClass * |
| 167 | PPCRegisterInfo::getPointerRegClass(unsigned Kind) const { |
| 168 | // Note that PPCInstrInfo::foldImmediate also directly uses this Kind value |
| 169 | // when it checks for ZERO folding. |
| 170 | if (Kind == 1) { |
| 171 | if (TM.isPPC64()) |
| 172 | return &PPC::G8RC_NOX0RegClass; |
| 173 | return &PPC::GPRC_NOR0RegClass; |
| 174 | } |
| 175 | |
| 176 | if (TM.isPPC64()) |
| 177 | return &PPC::G8RCRegClass; |
| 178 | return &PPC::GPRCRegClass; |
| 179 | } |
| 180 | |
| 181 | const MCPhysReg* |
| 182 | PPCRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { |
| 183 | const PPCSubtarget &Subtarget = MF->getSubtarget<PPCSubtarget>(); |
| 184 | if (MF->getFunction().getCallingConv() == CallingConv::AnyReg) { |
| 185 | if (!TM.isPPC64() && Subtarget.isAIXABI()) |
| 186 | report_fatal_error(reason: "AnyReg unimplemented on 32-bit AIX." ); |
| 187 | if (Subtarget.hasVSX()) { |
| 188 | if (Subtarget.pairedVectorMemops()) |
| 189 | return CSR_64_AllRegs_VSRP_SaveList; |
| 190 | if (Subtarget.isAIXABI() && !TM.getAIXExtendedAltivecABI()) |
| 191 | return CSR_64_AllRegs_AIX_Dflt_VSX_SaveList; |
| 192 | return CSR_64_AllRegs_VSX_SaveList; |
| 193 | } |
| 194 | if (Subtarget.hasAltivec()) { |
| 195 | if (Subtarget.isAIXABI() && !TM.getAIXExtendedAltivecABI()) |
| 196 | return CSR_64_AllRegs_AIX_Dflt_Altivec_SaveList; |
| 197 | return CSR_64_AllRegs_Altivec_SaveList; |
| 198 | } |
| 199 | return CSR_64_AllRegs_SaveList; |
| 200 | } |
| 201 | |
| 202 | // On PPC64, we might need to save r2 (but only if it is not reserved). |
| 203 | // We do not need to treat R2 as callee-saved when using PC-Relative calls |
| 204 | // because any direct uses of R2 will cause it to be reserved. If the function |
| 205 | // is a leaf or the only uses of R2 are implicit uses for calls, the calls |
| 206 | // will use the @notoc relocation which will cause this function to set the |
| 207 | // st_other bit to 1, thereby communicating to its caller that it arbitrarily |
| 208 | // clobbers the TOC. |
| 209 | bool SaveR2 = MF->getRegInfo().isAllocatable(PhysReg: PPC::X2) && |
| 210 | !Subtarget.isUsingPCRelativeCalls(); |
| 211 | |
| 212 | // Cold calling convention CSRs. |
| 213 | if (MF->getFunction().getCallingConv() == CallingConv::Cold) { |
| 214 | if (Subtarget.isAIXABI()) |
| 215 | report_fatal_error(reason: "Cold calling unimplemented on AIX." ); |
| 216 | if (TM.isPPC64()) { |
| 217 | if (Subtarget.pairedVectorMemops()) |
| 218 | return SaveR2 ? CSR_SVR64_ColdCC_R2_VSRP_SaveList |
| 219 | : CSR_SVR64_ColdCC_VSRP_SaveList; |
| 220 | if (Subtarget.hasAltivec()) |
| 221 | return SaveR2 ? CSR_SVR64_ColdCC_R2_Altivec_SaveList |
| 222 | : CSR_SVR64_ColdCC_Altivec_SaveList; |
| 223 | return SaveR2 ? CSR_SVR64_ColdCC_R2_SaveList |
| 224 | : CSR_SVR64_ColdCC_SaveList; |
| 225 | } |
| 226 | // 32-bit targets. |
| 227 | if (Subtarget.pairedVectorMemops()) |
| 228 | return CSR_SVR32_ColdCC_VSRP_SaveList; |
| 229 | else if (Subtarget.hasAltivec()) |
| 230 | return CSR_SVR32_ColdCC_Altivec_SaveList; |
| 231 | else if (Subtarget.hasSPE()) |
| 232 | return CSR_SVR32_ColdCC_SPE_SaveList; |
| 233 | return CSR_SVR32_ColdCC_SaveList; |
| 234 | } |
| 235 | // Standard calling convention CSRs. |
| 236 | if (TM.isPPC64()) { |
| 237 | if (Subtarget.pairedVectorMemops()) { |
| 238 | if (Subtarget.isAIXABI()) { |
| 239 | if (!TM.getAIXExtendedAltivecABI()) |
| 240 | return SaveR2 ? CSR_PPC64_R2_SaveList : CSR_PPC64_SaveList; |
| 241 | return SaveR2 ? CSR_AIX64_R2_VSRP_SaveList : CSR_AIX64_VSRP_SaveList; |
| 242 | } |
| 243 | return SaveR2 ? CSR_SVR464_R2_VSRP_SaveList : CSR_SVR464_VSRP_SaveList; |
| 244 | } |
| 245 | if (Subtarget.hasAltivec() && |
| 246 | (!Subtarget.isAIXABI() || TM.getAIXExtendedAltivecABI())) { |
| 247 | return SaveR2 ? CSR_PPC64_R2_Altivec_SaveList |
| 248 | : CSR_PPC64_Altivec_SaveList; |
| 249 | } |
| 250 | return SaveR2 ? CSR_PPC64_R2_SaveList : CSR_PPC64_SaveList; |
| 251 | } |
| 252 | // 32-bit targets. |
| 253 | if (Subtarget.isAIXABI()) { |
| 254 | if (Subtarget.pairedVectorMemops()) |
| 255 | return TM.getAIXExtendedAltivecABI() ? CSR_AIX32_VSRP_SaveList |
| 256 | : CSR_AIX32_SaveList; |
| 257 | if (Subtarget.hasAltivec()) |
| 258 | return TM.getAIXExtendedAltivecABI() ? CSR_AIX32_Altivec_SaveList |
| 259 | : CSR_AIX32_SaveList; |
| 260 | return CSR_AIX32_SaveList; |
| 261 | } |
| 262 | if (Subtarget.pairedVectorMemops()) |
| 263 | return CSR_SVR432_VSRP_SaveList; |
| 264 | if (Subtarget.hasAltivec()) |
| 265 | return CSR_SVR432_Altivec_SaveList; |
| 266 | else if (Subtarget.hasSPE()) { |
| 267 | if (TM.isPositionIndependent() && !TM.isPPC64()) |
| 268 | return CSR_SVR432_SPE_NO_S30_31_SaveList; |
| 269 | return CSR_SVR432_SPE_SaveList; |
| 270 | } |
| 271 | return CSR_SVR432_SaveList; |
| 272 | } |
| 273 | |
| 274 | const uint32_t * |
| 275 | PPCRegisterInfo::getCallPreservedMask(const MachineFunction &MF, |
| 276 | CallingConv::ID CC) const { |
| 277 | const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); |
| 278 | if (CC == CallingConv::AnyReg) { |
| 279 | if (Subtarget.hasVSX()) { |
| 280 | if (Subtarget.pairedVectorMemops()) |
| 281 | return CSR_64_AllRegs_VSRP_RegMask; |
| 282 | if (Subtarget.isAIXABI() && !TM.getAIXExtendedAltivecABI()) |
| 283 | return CSR_64_AllRegs_AIX_Dflt_VSX_RegMask; |
| 284 | return CSR_64_AllRegs_VSX_RegMask; |
| 285 | } |
| 286 | if (Subtarget.hasAltivec()) { |
| 287 | if (Subtarget.isAIXABI() && !TM.getAIXExtendedAltivecABI()) |
| 288 | return CSR_64_AllRegs_AIX_Dflt_Altivec_RegMask; |
| 289 | return CSR_64_AllRegs_Altivec_RegMask; |
| 290 | } |
| 291 | return CSR_64_AllRegs_RegMask; |
| 292 | } |
| 293 | |
| 294 | if (Subtarget.isAIXABI()) { |
| 295 | if (Subtarget.pairedVectorMemops()) { |
| 296 | if (!TM.getAIXExtendedAltivecABI()) |
| 297 | return TM.isPPC64() ? CSR_PPC64_RegMask : CSR_AIX32_RegMask; |
| 298 | return TM.isPPC64() ? CSR_AIX64_VSRP_RegMask : CSR_AIX32_VSRP_RegMask; |
| 299 | } |
| 300 | return TM.isPPC64() |
| 301 | ? ((Subtarget.hasAltivec() && TM.getAIXExtendedAltivecABI()) |
| 302 | ? CSR_PPC64_Altivec_RegMask |
| 303 | : CSR_PPC64_RegMask) |
| 304 | : ((Subtarget.hasAltivec() && TM.getAIXExtendedAltivecABI()) |
| 305 | ? CSR_AIX32_Altivec_RegMask |
| 306 | : CSR_AIX32_RegMask); |
| 307 | } |
| 308 | |
| 309 | if (CC == CallingConv::Cold) { |
| 310 | if (TM.isPPC64()) |
| 311 | return Subtarget.pairedVectorMemops() |
| 312 | ? CSR_SVR64_ColdCC_VSRP_RegMask |
| 313 | : (Subtarget.hasAltivec() ? CSR_SVR64_ColdCC_Altivec_RegMask |
| 314 | : CSR_SVR64_ColdCC_RegMask); |
| 315 | else |
| 316 | return Subtarget.pairedVectorMemops() |
| 317 | ? CSR_SVR32_ColdCC_VSRP_RegMask |
| 318 | : (Subtarget.hasAltivec() |
| 319 | ? CSR_SVR32_ColdCC_Altivec_RegMask |
| 320 | : (Subtarget.hasSPE() ? CSR_SVR32_ColdCC_SPE_RegMask |
| 321 | : CSR_SVR32_ColdCC_RegMask)); |
| 322 | } |
| 323 | |
| 324 | if (TM.isPPC64()) |
| 325 | return Subtarget.pairedVectorMemops() |
| 326 | ? CSR_SVR464_VSRP_RegMask |
| 327 | : (Subtarget.hasAltivec() ? CSR_PPC64_Altivec_RegMask |
| 328 | : CSR_PPC64_RegMask); |
| 329 | else |
| 330 | return Subtarget.pairedVectorMemops() |
| 331 | ? CSR_SVR432_VSRP_RegMask |
| 332 | : (Subtarget.hasAltivec() |
| 333 | ? CSR_SVR432_Altivec_RegMask |
| 334 | : (Subtarget.hasSPE() |
| 335 | ? (TM.isPositionIndependent() |
| 336 | ? CSR_SVR432_SPE_NO_S30_31_RegMask |
| 337 | : CSR_SVR432_SPE_RegMask) |
| 338 | : CSR_SVR432_RegMask)); |
| 339 | } |
| 340 | |
| 341 | const uint32_t* |
| 342 | PPCRegisterInfo::getNoPreservedMask() const { |
| 343 | return CSR_NoRegs_RegMask; |
| 344 | } |
| 345 | |
| 346 | void PPCRegisterInfo::adjustStackMapLiveOutMask(uint32_t *Mask) const { |
| 347 | for (unsigned PseudoReg : {PPC::ZERO, PPC::ZERO8, PPC::RM}) |
| 348 | Mask[PseudoReg / 32] &= ~(1u << (PseudoReg % 32)); |
| 349 | } |
| 350 | |
| 351 | BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const { |
| 352 | BitVector Reserved(getNumRegs()); |
| 353 | const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); |
| 354 | const PPCFrameLowering *TFI = getFrameLowering(MF); |
| 355 | |
| 356 | // The ZERO register is not really a register, but the representation of r0 |
| 357 | // when used in instructions that treat r0 as the constant 0. |
| 358 | markSuperRegs(RegisterSet&: Reserved, Reg: PPC::ZERO); |
| 359 | |
| 360 | // The FP register is also not really a register, but is the representation |
| 361 | // of the frame pointer register used by ISD::FRAMEADDR. |
| 362 | markSuperRegs(RegisterSet&: Reserved, Reg: PPC::FP); |
| 363 | |
| 364 | // The BP register is also not really a register, but is the representation |
| 365 | // of the base pointer register used by setjmp. |
| 366 | markSuperRegs(RegisterSet&: Reserved, Reg: PPC::BP); |
| 367 | |
| 368 | // The counter registers must be reserved so that counter-based loops can |
| 369 | // be correctly formed (and the mtctr instructions are not DCE'd). |
| 370 | markSuperRegs(RegisterSet&: Reserved, Reg: PPC::CTR); |
| 371 | markSuperRegs(RegisterSet&: Reserved, Reg: PPC::CTR8); |
| 372 | |
| 373 | markSuperRegs(RegisterSet&: Reserved, Reg: PPC::R1); |
| 374 | markSuperRegs(RegisterSet&: Reserved, Reg: PPC::LR); |
| 375 | markSuperRegs(RegisterSet&: Reserved, Reg: PPC::LR8); |
| 376 | markSuperRegs(RegisterSet&: Reserved, Reg: PPC::RM); |
| 377 | |
| 378 | markSuperRegs(RegisterSet&: Reserved, Reg: PPC::VRSAVE); |
| 379 | |
| 380 | const PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); |
| 381 | bool UsesTOCBasePtr = FuncInfo->usesTOCBasePtr(); |
| 382 | // The SVR4 ABI reserves r2 and r13 |
| 383 | if (Subtarget.isSVR4ABI() || Subtarget.isAIXABI()) { |
| 384 | // We only reserve r2 if we need to use the TOC pointer. If we have no |
| 385 | // explicit uses of the TOC pointer (meaning we're a leaf function with |
| 386 | // no constant-pool loads, etc.) and we have no potential uses inside an |
| 387 | // inline asm block, then we can treat r2 has an ordinary callee-saved |
| 388 | // register. |
| 389 | if (!TM.isPPC64() || UsesTOCBasePtr || MF.hasInlineAsm()) |
| 390 | markSuperRegs(RegisterSet&: Reserved, Reg: PPC::R2); // System-reserved register. |
| 391 | |
| 392 | if (Subtarget.isSVR4ABI()) |
| 393 | markSuperRegs(RegisterSet&: Reserved, Reg: PPC::R13); // Small Data Area pointer register. |
| 394 | } |
| 395 | |
| 396 | // On PPC64, r13 is the thread pointer. Never allocate this register. |
| 397 | if (TM.isPPC64()) |
| 398 | markSuperRegs(RegisterSet&: Reserved, Reg: PPC::R13); |
| 399 | |
| 400 | if (TFI->needsFP(MF)) |
| 401 | markSuperRegs(RegisterSet&: Reserved, Reg: PPC::R31); |
| 402 | |
| 403 | bool IsPositionIndependent = TM.isPositionIndependent(); |
| 404 | if (hasBasePointer(MF)) { |
| 405 | if (Subtarget.is32BitELFABI() && IsPositionIndependent) |
| 406 | markSuperRegs(RegisterSet&: Reserved, Reg: PPC::R29); |
| 407 | else |
| 408 | markSuperRegs(RegisterSet&: Reserved, Reg: PPC::R30); |
| 409 | } |
| 410 | |
| 411 | if (Subtarget.is32BitELFABI() && IsPositionIndependent) |
| 412 | markSuperRegs(RegisterSet&: Reserved, Reg: PPC::R30); |
| 413 | |
| 414 | // Reserve Altivec registers when Altivec is unavailable. |
| 415 | if (!Subtarget.hasAltivec()) |
| 416 | for (MCRegister Reg : PPC::VRRCRegClass) |
| 417 | markSuperRegs(RegisterSet&: Reserved, Reg); |
| 418 | |
| 419 | if (Subtarget.isAIXABI() && Subtarget.hasAltivec() && |
| 420 | !TM.getAIXExtendedAltivecABI()) { |
| 421 | // In the AIX default Altivec ABI, vector registers VR20-VR31 are reserved |
| 422 | // and cannot be used. |
| 423 | for (auto Reg : CSR_Altivec_SaveList) { |
| 424 | if (Reg == 0) |
| 425 | break; |
| 426 | markSuperRegs(RegisterSet&: Reserved, Reg); |
| 427 | for (MCRegAliasIterator AS(Reg, this, true); AS.isValid(); ++AS) { |
| 428 | Reserved.set(*AS); |
| 429 | } |
| 430 | } |
| 431 | } |
| 432 | |
| 433 | assert(checkAllSuperRegsMarked(Reserved)); |
| 434 | return Reserved; |
| 435 | } |
| 436 | |
| 437 | bool PPCRegisterInfo::isAsmClobberable(const MachineFunction &MF, |
| 438 | MCRegister PhysReg) const { |
| 439 | // CTR and LR registers are always reserved, but they are asm clobberable. |
| 440 | if (PhysReg == PPC::CTR || PhysReg == PPC::CTR8 || PhysReg == PPC::LR || |
| 441 | PhysReg == PPC::LR8) |
| 442 | return true; |
| 443 | |
| 444 | return !getReservedRegs(MF).test(Idx: PhysReg); |
| 445 | } |
| 446 | |
| 447 | bool PPCRegisterInfo::requiresFrameIndexScavenging(const MachineFunction &MF) const { |
| 448 | const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); |
| 449 | const PPCInstrInfo *InstrInfo = Subtarget.getInstrInfo(); |
| 450 | const MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 451 | const std::vector<CalleeSavedInfo> &Info = MFI.getCalleeSavedInfo(); |
| 452 | |
| 453 | LLVM_DEBUG(dbgs() << "requiresFrameIndexScavenging for " << MF.getName() |
| 454 | << ".\n" ); |
| 455 | // If the callee saved info is invalid we have to default to true for safety. |
| 456 | if (!MFI.isCalleeSavedInfoValid()) { |
| 457 | LLVM_DEBUG(dbgs() << "TRUE - Invalid callee saved info.\n" ); |
| 458 | return true; |
| 459 | } |
| 460 | |
| 461 | // We will require the use of X-Forms because the frame is larger than what |
| 462 | // can be represented in signed 16 bits that fit in the immediate of a D-Form. |
| 463 | // If we need an X-Form then we need a register to store the address offset. |
| 464 | unsigned FrameSize = MFI.getStackSize(); |
| 465 | // Signed 16 bits means that the FrameSize cannot be more than 15 bits. |
| 466 | if (FrameSize & ~0x7FFF) { |
| 467 | LLVM_DEBUG(dbgs() << "TRUE - Frame size is too large for D-Form.\n" ); |
| 468 | return true; |
| 469 | } |
| 470 | |
| 471 | // The callee saved info is valid so it can be traversed. |
| 472 | // Checking for registers that need saving that do not have load or store |
| 473 | // forms where the address offset is an immediate. |
| 474 | for (const CalleeSavedInfo &CSI : Info) { |
| 475 | // If the spill is to a register no scavenging is required. |
| 476 | if (CSI.isSpilledToReg()) |
| 477 | continue; |
| 478 | |
| 479 | int FrIdx = CSI.getFrameIdx(); |
| 480 | Register Reg = CSI.getReg(); |
| 481 | |
| 482 | const TargetRegisterClass *RC = getMinimalPhysRegClass(Reg); |
| 483 | unsigned Opcode = InstrInfo->getStoreOpcodeForSpill(RC); |
| 484 | if (!MFI.isFixedObjectIndex(ObjectIdx: FrIdx)) { |
| 485 | // This is not a fixed object. If it requires alignment then we may still |
| 486 | // need to use the XForm. |
| 487 | if (offsetMinAlignForOpcode(OpC: Opcode) > 1) { |
| 488 | LLVM_DEBUG(dbgs() << "Memory Operand: " << InstrInfo->getName(Opcode) |
| 489 | << " for register " << printReg(Reg, this) << ".\n" ); |
| 490 | LLVM_DEBUG(dbgs() << "TRUE - Not fixed frame object that requires " |
| 491 | << "alignment.\n" ); |
| 492 | return true; |
| 493 | } |
| 494 | } |
| 495 | |
| 496 | // This is eiher: |
| 497 | // 1) A fixed frame index object which we know are aligned so |
| 498 | // as long as we have a valid DForm/DSForm/DQForm (non XForm) we don't |
| 499 | // need to consider the alignment here. |
| 500 | // 2) A not fixed object but in that case we now know that the min required |
| 501 | // alignment is no more than 1 based on the previous check. |
| 502 | if (InstrInfo->isXFormMemOp(Opcode)) { |
| 503 | LLVM_DEBUG(dbgs() << "Memory Operand: " << InstrInfo->getName(Opcode) |
| 504 | << " for register " << printReg(Reg, this) << ".\n" ); |
| 505 | LLVM_DEBUG(dbgs() << "TRUE - Memory operand is X-Form.\n" ); |
| 506 | return true; |
| 507 | } |
| 508 | |
| 509 | // This is a spill/restore of a quadword. |
| 510 | if ((Opcode == PPC::RESTORE_QUADWORD) || (Opcode == PPC::SPILL_QUADWORD)) { |
| 511 | LLVM_DEBUG(dbgs() << "Memory Operand: " << InstrInfo->getName(Opcode) |
| 512 | << " for register " << printReg(Reg, this) << ".\n" ); |
| 513 | LLVM_DEBUG(dbgs() << "TRUE - Memory operand is a quadword.\n" ); |
| 514 | return true; |
| 515 | } |
| 516 | } |
| 517 | LLVM_DEBUG(dbgs() << "FALSE - Scavenging is not required.\n" ); |
| 518 | return false; |
| 519 | } |
| 520 | |
| 521 | bool PPCRegisterInfo::requiresVirtualBaseRegisters( |
| 522 | const MachineFunction &MF) const { |
| 523 | const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); |
| 524 | // Do not use virtual base registers when ROP protection is turned on. |
| 525 | // Virtual base registers break the layout of the local variable space and may |
| 526 | // push the ROP Hash location past the 512 byte range of the ROP store |
| 527 | // instruction. |
| 528 | return !Subtarget.hasROPProtect(); |
| 529 | } |
| 530 | |
| 531 | bool PPCRegisterInfo::isCallerPreservedPhysReg(MCRegister PhysReg, |
| 532 | const MachineFunction &MF) const { |
| 533 | assert(PhysReg.isPhysical()); |
| 534 | const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); |
| 535 | const MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 536 | |
| 537 | if (!Subtarget.is64BitELFABI() && !Subtarget.isAIXABI()) |
| 538 | return false; |
| 539 | if (PhysReg == Subtarget.getTOCPointerRegister()) |
| 540 | // X2/R2 is guaranteed to be preserved within a function if it is reserved. |
| 541 | // The reason it's reserved is that it's the TOC pointer (and the function |
| 542 | // uses the TOC). In functions where it isn't reserved (i.e. leaf functions |
| 543 | // with no TOC access), we can't claim that it is preserved. |
| 544 | return (getReservedRegs(MF).test(Idx: PhysReg)); |
| 545 | if (StackPtrConst && PhysReg == Subtarget.getStackPointerRegister() && |
| 546 | !MFI.hasVarSizedObjects() && !MFI.hasOpaqueSPAdjustment()) |
| 547 | // The value of the stack pointer does not change within a function after |
| 548 | // the prologue and before the epilogue if there are no dynamic allocations |
| 549 | // and no inline asm which clobbers X1/R1. |
| 550 | return true; |
| 551 | return false; |
| 552 | } |
| 553 | |
| 554 | bool PPCRegisterInfo::getRegAllocationHints(Register VirtReg, |
| 555 | ArrayRef<MCPhysReg> Order, |
| 556 | SmallVectorImpl<MCPhysReg> &Hints, |
| 557 | const MachineFunction &MF, |
| 558 | const VirtRegMap *VRM, |
| 559 | const LiveRegMatrix *Matrix) const { |
| 560 | const MachineRegisterInfo *MRI = &MF.getRegInfo(); |
| 561 | |
| 562 | // Call the base implementation first to set any hints based on the usual |
| 563 | // heuristics and decide what the return value should be. We want to return |
| 564 | // the same value returned by the base implementation. If the base |
| 565 | // implementation decides to return true and force the allocation then we |
| 566 | // will leave it as such. On the other hand if the base implementation |
| 567 | // decides to return false the following code will not force the allocation |
| 568 | // as we are just looking to provide a hint. |
| 569 | bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints( |
| 570 | VirtReg, Order, Hints, MF, VRM, Matrix); |
| 571 | |
| 572 | // Don't use the allocation hints for ISAFuture. |
| 573 | // The WACC registers used in ISAFuture are unlike the ACC registers on |
| 574 | // Power 10 and so this logic to register allocation hints does not apply. |
| 575 | if (MF.getSubtarget<PPCSubtarget>().isISAFuture()) |
| 576 | return BaseImplRetVal; |
| 577 | |
| 578 | // We are interested in instructions that copy values to ACC/UACC. |
| 579 | // The copy into UACC will be simply a COPY to a subreg so we |
| 580 | // want to allocate the corresponding physical subreg for the source. |
| 581 | // The copy into ACC will be a BUILD_UACC so we want to allocate |
| 582 | // the same number UACC for the source. |
| 583 | const TargetRegisterClass *RegClass = MRI->getRegClass(Reg: VirtReg); |
| 584 | for (MachineInstr &Use : MRI->reg_nodbg_instructions(Reg: VirtReg)) { |
| 585 | const MachineOperand *ResultOp = nullptr; |
| 586 | Register ResultReg; |
| 587 | switch (Use.getOpcode()) { |
| 588 | case TargetOpcode::COPY: { |
| 589 | ResultOp = &Use.getOperand(i: 0); |
| 590 | ResultReg = ResultOp->getReg(); |
| 591 | if (ResultReg.isVirtual() && |
| 592 | MRI->getRegClass(Reg: ResultReg)->contains(Reg: PPC::UACC0) && |
| 593 | VRM->hasPhys(virtReg: ResultReg)) { |
| 594 | Register UACCPhys = VRM->getPhys(virtReg: ResultReg); |
| 595 | Register HintReg; |
| 596 | if (RegClass->contains(Reg: PPC::VSRp0)) { |
| 597 | HintReg = getSubReg(Reg: UACCPhys, Idx: ResultOp->getSubReg()); |
| 598 | // Ensure that the hint is a VSRp register. |
| 599 | if (HintReg >= PPC::VSRp0 && HintReg <= PPC::VSRp31) |
| 600 | Hints.push_back(Elt: HintReg); |
| 601 | } else if (RegClass->contains(Reg: PPC::ACC0)) { |
| 602 | HintReg = PPC::ACC0 + (UACCPhys - PPC::UACC0); |
| 603 | if (HintReg >= PPC::ACC0 && HintReg <= PPC::ACC7) |
| 604 | Hints.push_back(Elt: HintReg); |
| 605 | } |
| 606 | } |
| 607 | break; |
| 608 | } |
| 609 | case PPC::BUILD_UACC: { |
| 610 | ResultOp = &Use.getOperand(i: 0); |
| 611 | ResultReg = ResultOp->getReg(); |
| 612 | if (MRI->getRegClass(Reg: ResultReg)->contains(Reg: PPC::ACC0) && |
| 613 | VRM->hasPhys(virtReg: ResultReg)) { |
| 614 | Register ACCPhys = VRM->getPhys(virtReg: ResultReg); |
| 615 | assert((ACCPhys >= PPC::ACC0 && ACCPhys <= PPC::ACC7) && |
| 616 | "Expecting an ACC register for BUILD_UACC." ); |
| 617 | Register HintReg = PPC::UACC0 + (ACCPhys - PPC::ACC0); |
| 618 | Hints.push_back(Elt: HintReg); |
| 619 | } |
| 620 | break; |
| 621 | } |
| 622 | } |
| 623 | } |
| 624 | return BaseImplRetVal; |
| 625 | } |
| 626 | |
| 627 | const TargetRegisterClass * |
| 628 | PPCRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { |
| 629 | if (RC == &PPC::CARRYRCRegClass) |
| 630 | return TM.isPPC64() ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; |
| 631 | return RC; |
| 632 | } |
| 633 | |
| 634 | unsigned PPCRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC, |
| 635 | MachineFunction &MF) const { |
| 636 | const PPCFrameLowering *TFI = getFrameLowering(MF); |
| 637 | const unsigned DefaultSafety = 1; |
| 638 | |
| 639 | switch (RC->getID()) { |
| 640 | default: |
| 641 | return 0; |
| 642 | case PPC::G8RC_NOX0RegClassID: |
| 643 | case PPC::GPRC_NOR0RegClassID: |
| 644 | case PPC::SPERCRegClassID: |
| 645 | case PPC::G8RCRegClassID: |
| 646 | case PPC::GPRCRegClassID: { |
| 647 | unsigned FP = TFI->hasFP(MF) ? 1 : 0; |
| 648 | return 32 - FP - DefaultSafety; |
| 649 | } |
| 650 | case PPC::F4RCRegClassID: |
| 651 | case PPC::F8RCRegClassID: |
| 652 | case PPC::VSLRCRegClassID: |
| 653 | return 32 - DefaultSafety; |
| 654 | case PPC::VFRCRegClassID: |
| 655 | case PPC::VRRCRegClassID: { |
| 656 | const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); |
| 657 | // Vector registers VR20-VR31 are reserved and cannot be used in the default |
| 658 | // Altivec ABI on AIX. |
| 659 | if (!TM.getAIXExtendedAltivecABI() && Subtarget.isAIXABI()) |
| 660 | return 20 - DefaultSafety; |
| 661 | } |
| 662 | return 32 - DefaultSafety; |
| 663 | case PPC::VSFRCRegClassID: |
| 664 | case PPC::VSSRCRegClassID: |
| 665 | case PPC::VSRCRegClassID: { |
| 666 | const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); |
| 667 | if (!TM.getAIXExtendedAltivecABI() && Subtarget.isAIXABI()) |
| 668 | // Vector registers VR20-VR31 are reserved and cannot be used in the |
| 669 | // default Altivec ABI on AIX. |
| 670 | return 52 - DefaultSafety; |
| 671 | } |
| 672 | return 64 - DefaultSafety; |
| 673 | case PPC::CRRCRegClassID: |
| 674 | return 8 - DefaultSafety; |
| 675 | } |
| 676 | } |
| 677 | |
| 678 | const TargetRegisterClass * |
| 679 | PPCRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC, |
| 680 | const MachineFunction &MF) const { |
| 681 | const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); |
| 682 | const auto *DefaultSuperclass = |
| 683 | TargetRegisterInfo::getLargestLegalSuperClass(RC, MF); |
| 684 | if (Subtarget.hasVSX()) { |
| 685 | // With VSX, we can inflate various sub-register classes to the full VSX |
| 686 | // register set. |
| 687 | |
| 688 | // For Power9 we allow the user to enable GPR to vector spills. |
| 689 | // FIXME: Currently limited to spilling GP8RC. A follow on patch will add |
| 690 | // support to spill GPRC. |
| 691 | if (TM.isELFv2ABI() || Subtarget.isAIXABI()) { |
| 692 | if (Subtarget.hasP9Vector() && EnableGPRToVecSpills && |
| 693 | RC == &PPC::G8RCRegClass) { |
| 694 | InflateGP8RC++; |
| 695 | return &PPC::SPILLTOVSRRCRegClass; |
| 696 | } |
| 697 | if (RC == &PPC::GPRCRegClass && EnableGPRToVecSpills) |
| 698 | InflateGPRC++; |
| 699 | } |
| 700 | |
| 701 | for (unsigned SuperID : RC->superclasses()) { |
| 702 | if (getRegSizeInBits(RC: *getRegClass(i: SuperID)) != getRegSizeInBits(RC: *RC)) |
| 703 | continue; |
| 704 | |
| 705 | switch (SuperID) { |
| 706 | case PPC::VSSRCRegClassID: |
| 707 | return Subtarget.hasP8Vector() ? getRegClass(i: SuperID) |
| 708 | : DefaultSuperclass; |
| 709 | case PPC::VSFRCRegClassID: |
| 710 | case PPC::VSRCRegClassID: |
| 711 | return getRegClass(i: SuperID); |
| 712 | case PPC::VSRpRCRegClassID: |
| 713 | return Subtarget.pairedVectorMemops() ? getRegClass(i: SuperID) |
| 714 | : DefaultSuperclass; |
| 715 | case PPC::ACCRCRegClassID: |
| 716 | case PPC::UACCRCRegClassID: |
| 717 | return Subtarget.hasMMA() ? getRegClass(i: SuperID) : DefaultSuperclass; |
| 718 | } |
| 719 | } |
| 720 | } |
| 721 | |
| 722 | return DefaultSuperclass; |
| 723 | } |
| 724 | |
| 725 | //===----------------------------------------------------------------------===// |
| 726 | // Stack Frame Processing methods |
| 727 | //===----------------------------------------------------------------------===// |
| 728 | |
| 729 | /// lowerDynamicAlloc - Generate the code for allocating an object in the |
| 730 | /// current frame. The sequence of code will be in the general form |
| 731 | /// |
| 732 | /// addi R0, SP, \#frameSize ; get the address of the previous frame |
| 733 | /// stwxu R0, SP, Rnegsize ; add and update the SP with the negated size |
| 734 | /// addi Rnew, SP, \#maxCalFrameSize ; get the top of the allocation |
| 735 | /// |
| 736 | void PPCRegisterInfo::lowerDynamicAlloc(MachineBasicBlock::iterator II) const { |
| 737 | // Get the instruction. |
| 738 | MachineInstr &MI = *II; |
| 739 | // Get the instruction's basic block. |
| 740 | MachineBasicBlock &MBB = *MI.getParent(); |
| 741 | // Get the basic block's function. |
| 742 | MachineFunction &MF = *MBB.getParent(); |
| 743 | // Get the frame info. |
| 744 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 745 | const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); |
| 746 | // Get the instruction info. |
| 747 | const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); |
| 748 | // Determine whether 64-bit pointers are used. |
| 749 | bool LP64 = TM.isPPC64(); |
| 750 | DebugLoc dl = MI.getDebugLoc(); |
| 751 | |
| 752 | // Get the maximum call stack size. |
| 753 | unsigned maxCallFrameSize = MFI.getMaxCallFrameSize(); |
| 754 | Align MaxAlign = MFI.getMaxAlign(); |
| 755 | assert(isAligned(MaxAlign, maxCallFrameSize) && |
| 756 | "Maximum call-frame size not sufficiently aligned" ); |
| 757 | (void)MaxAlign; |
| 758 | |
| 759 | const TargetRegisterClass *G8RC = &PPC::G8RCRegClass; |
| 760 | const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; |
| 761 | Register Reg = MF.getRegInfo().createVirtualRegister(RegClass: LP64 ? G8RC : GPRC); |
| 762 | bool KillNegSizeReg = MI.getOperand(i: 1).isKill(); |
| 763 | Register NegSizeReg = MI.getOperand(i: 1).getReg(); |
| 764 | |
| 765 | prepareDynamicAlloca(II, NegSizeReg, KillNegSizeReg, FramePointer&: Reg); |
| 766 | // Grow the stack and update the stack pointer link, then determine the |
| 767 | // address of new allocated space. |
| 768 | if (LP64) { |
| 769 | BuildMI(BB&: MBB, I: II, MIMD: dl, MCID: TII.get(Opcode: PPC::STDUX), DestReg: PPC::X1) |
| 770 | .addReg(RegNo: Reg, Flags: RegState::Kill) |
| 771 | .addReg(RegNo: PPC::X1) |
| 772 | .addReg(RegNo: NegSizeReg, Flags: getKillRegState(B: KillNegSizeReg)); |
| 773 | BuildMI(BB&: MBB, I: II, MIMD: dl, MCID: TII.get(Opcode: PPC::ADDI8), DestReg: MI.getOperand(i: 0).getReg()) |
| 774 | .addReg(RegNo: PPC::X1) |
| 775 | .addImm(Val: maxCallFrameSize); |
| 776 | } else { |
| 777 | BuildMI(BB&: MBB, I: II, MIMD: dl, MCID: TII.get(Opcode: PPC::STWUX), DestReg: PPC::R1) |
| 778 | .addReg(RegNo: Reg, Flags: RegState::Kill) |
| 779 | .addReg(RegNo: PPC::R1) |
| 780 | .addReg(RegNo: NegSizeReg, Flags: getKillRegState(B: KillNegSizeReg)); |
| 781 | BuildMI(BB&: MBB, I: II, MIMD: dl, MCID: TII.get(Opcode: PPC::ADDI), DestReg: MI.getOperand(i: 0).getReg()) |
| 782 | .addReg(RegNo: PPC::R1) |
| 783 | .addImm(Val: maxCallFrameSize); |
| 784 | } |
| 785 | |
| 786 | // Discard the DYNALLOC instruction. |
| 787 | MBB.erase(I: II); |
| 788 | } |
| 789 | |
| 790 | /// To accomplish dynamic stack allocation, we have to calculate exact size |
| 791 | /// subtracted from the stack pointer according alignment information and get |
| 792 | /// previous frame pointer. |
| 793 | void PPCRegisterInfo::prepareDynamicAlloca(MachineBasicBlock::iterator II, |
| 794 | Register &NegSizeReg, |
| 795 | bool &KillNegSizeReg, |
| 796 | Register &FramePointer) const { |
| 797 | // Get the instruction. |
| 798 | MachineInstr &MI = *II; |
| 799 | // Get the instruction's basic block. |
| 800 | MachineBasicBlock &MBB = *MI.getParent(); |
| 801 | // Get the basic block's function. |
| 802 | MachineFunction &MF = *MBB.getParent(); |
| 803 | // Get the frame info. |
| 804 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 805 | const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); |
| 806 | // Get the instruction info. |
| 807 | const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); |
| 808 | // Determine whether 64-bit pointers are used. |
| 809 | bool LP64 = TM.isPPC64(); |
| 810 | DebugLoc dl = MI.getDebugLoc(); |
| 811 | // Get the total frame size. |
| 812 | unsigned FrameSize = MFI.getStackSize(); |
| 813 | |
| 814 | // Get stack alignments. |
| 815 | const PPCFrameLowering *TFI = getFrameLowering(MF); |
| 816 | Align TargetAlign = TFI->getStackAlign(); |
| 817 | Align MaxAlign = MFI.getMaxAlign(); |
| 818 | |
| 819 | // Determine the previous frame's address. If FrameSize can't be |
| 820 | // represented as 16 bits or we need special alignment, then we load the |
| 821 | // previous frame's address from 0(SP). Why not do an addis of the hi? |
| 822 | // Because R0 is our only safe tmp register and addi/addis treat R0 as zero. |
| 823 | // Constructing the constant and adding would take 3 instructions. |
| 824 | // Fortunately, a frame greater than 32K is rare. |
| 825 | const TargetRegisterClass *G8RC = &PPC::G8RCRegClass; |
| 826 | const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; |
| 827 | |
| 828 | if (MaxAlign < TargetAlign && isInt<16>(x: FrameSize)) { |
| 829 | if (LP64) |
| 830 | BuildMI(BB&: MBB, I: II, MIMD: dl, MCID: TII.get(Opcode: PPC::ADDI8), DestReg: FramePointer) |
| 831 | .addReg(RegNo: PPC::X31) |
| 832 | .addImm(Val: FrameSize); |
| 833 | else |
| 834 | BuildMI(BB&: MBB, I: II, MIMD: dl, MCID: TII.get(Opcode: PPC::ADDI), DestReg: FramePointer) |
| 835 | .addReg(RegNo: PPC::R31) |
| 836 | .addImm(Val: FrameSize); |
| 837 | } else if (LP64) { |
| 838 | BuildMI(BB&: MBB, I: II, MIMD: dl, MCID: TII.get(Opcode: PPC::LD), DestReg: FramePointer) |
| 839 | .addImm(Val: 0) |
| 840 | .addReg(RegNo: PPC::X1); |
| 841 | } else { |
| 842 | BuildMI(BB&: MBB, I: II, MIMD: dl, MCID: TII.get(Opcode: PPC::LWZ), DestReg: FramePointer) |
| 843 | .addImm(Val: 0) |
| 844 | .addReg(RegNo: PPC::R1); |
| 845 | } |
| 846 | // Determine the actual NegSizeReg according to alignment info. |
| 847 | if (LP64) { |
| 848 | if (MaxAlign > TargetAlign) { |
| 849 | unsigned UnalNegSizeReg = NegSizeReg; |
| 850 | NegSizeReg = MF.getRegInfo().createVirtualRegister(RegClass: G8RC); |
| 851 | |
| 852 | // Unfortunately, there is no andi, only andi., and we can't insert that |
| 853 | // here because we might clobber cr0 while it is live. |
| 854 | BuildMI(BB&: MBB, I: II, MIMD: dl, MCID: TII.get(Opcode: PPC::LI8), DestReg: NegSizeReg) |
| 855 | .addImm(Val: ~(MaxAlign.value() - 1)); |
| 856 | |
| 857 | unsigned NegSizeReg1 = NegSizeReg; |
| 858 | NegSizeReg = MF.getRegInfo().createVirtualRegister(RegClass: G8RC); |
| 859 | BuildMI(BB&: MBB, I: II, MIMD: dl, MCID: TII.get(Opcode: PPC::AND8), DestReg: NegSizeReg) |
| 860 | .addReg(RegNo: UnalNegSizeReg, Flags: getKillRegState(B: KillNegSizeReg)) |
| 861 | .addReg(RegNo: NegSizeReg1, Flags: RegState::Kill); |
| 862 | KillNegSizeReg = true; |
| 863 | } |
| 864 | } else { |
| 865 | if (MaxAlign > TargetAlign) { |
| 866 | unsigned UnalNegSizeReg = NegSizeReg; |
| 867 | NegSizeReg = MF.getRegInfo().createVirtualRegister(RegClass: GPRC); |
| 868 | |
| 869 | // Unfortunately, there is no andi, only andi., and we can't insert that |
| 870 | // here because we might clobber cr0 while it is live. |
| 871 | BuildMI(BB&: MBB, I: II, MIMD: dl, MCID: TII.get(Opcode: PPC::LI), DestReg: NegSizeReg) |
| 872 | .addImm(Val: ~(MaxAlign.value() - 1)); |
| 873 | |
| 874 | unsigned NegSizeReg1 = NegSizeReg; |
| 875 | NegSizeReg = MF.getRegInfo().createVirtualRegister(RegClass: GPRC); |
| 876 | BuildMI(BB&: MBB, I: II, MIMD: dl, MCID: TII.get(Opcode: PPC::AND), DestReg: NegSizeReg) |
| 877 | .addReg(RegNo: UnalNegSizeReg, Flags: getKillRegState(B: KillNegSizeReg)) |
| 878 | .addReg(RegNo: NegSizeReg1, Flags: RegState::Kill); |
| 879 | KillNegSizeReg = true; |
| 880 | } |
| 881 | } |
| 882 | } |
| 883 | |
| 884 | void PPCRegisterInfo::lowerPrepareProbedAlloca( |
| 885 | MachineBasicBlock::iterator II) const { |
| 886 | MachineInstr &MI = *II; |
| 887 | // Get the instruction's basic block. |
| 888 | MachineBasicBlock &MBB = *MI.getParent(); |
| 889 | // Get the basic block's function. |
| 890 | MachineFunction &MF = *MBB.getParent(); |
| 891 | const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); |
| 892 | // Get the instruction info. |
| 893 | const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); |
| 894 | // Determine whether 64-bit pointers are used. |
| 895 | bool LP64 = TM.isPPC64(); |
| 896 | DebugLoc dl = MI.getDebugLoc(); |
| 897 | Register FramePointer = MI.getOperand(i: 0).getReg(); |
| 898 | const Register ActualNegSizeReg = MI.getOperand(i: 1).getReg(); |
| 899 | bool KillNegSizeReg = MI.getOperand(i: 2).isKill(); |
| 900 | Register NegSizeReg = MI.getOperand(i: 2).getReg(); |
| 901 | const MCInstrDesc &CopyInst = TII.get(Opcode: LP64 ? PPC::OR8 : PPC::OR); |
| 902 | // RegAllocator might allocate FramePointer and NegSizeReg in the same phyreg. |
| 903 | if (FramePointer == NegSizeReg) { |
| 904 | assert(KillNegSizeReg && "FramePointer is a def and NegSizeReg is an use, " |
| 905 | "NegSizeReg should be killed" ); |
| 906 | // FramePointer is clobbered earlier than the use of NegSizeReg in |
| 907 | // prepareDynamicAlloca, save NegSizeReg in ActualNegSizeReg to avoid |
| 908 | // misuse. |
| 909 | BuildMI(BB&: MBB, I: II, MIMD: dl, MCID: CopyInst, DestReg: ActualNegSizeReg) |
| 910 | .addReg(RegNo: NegSizeReg) |
| 911 | .addReg(RegNo: NegSizeReg); |
| 912 | NegSizeReg = ActualNegSizeReg; |
| 913 | KillNegSizeReg = false; |
| 914 | } |
| 915 | prepareDynamicAlloca(II, NegSizeReg, KillNegSizeReg, FramePointer); |
| 916 | // NegSizeReg might be updated in prepareDynamicAlloca if MaxAlign > |
| 917 | // TargetAlign. |
| 918 | if (NegSizeReg != ActualNegSizeReg) |
| 919 | BuildMI(BB&: MBB, I: II, MIMD: dl, MCID: CopyInst, DestReg: ActualNegSizeReg) |
| 920 | .addReg(RegNo: NegSizeReg) |
| 921 | .addReg(RegNo: NegSizeReg); |
| 922 | MBB.erase(I: II); |
| 923 | } |
| 924 | |
| 925 | void PPCRegisterInfo::lowerDynamicAreaOffset( |
| 926 | MachineBasicBlock::iterator II) const { |
| 927 | // Get the instruction. |
| 928 | MachineInstr &MI = *II; |
| 929 | // Get the instruction's basic block. |
| 930 | MachineBasicBlock &MBB = *MI.getParent(); |
| 931 | // Get the basic block's function. |
| 932 | MachineFunction &MF = *MBB.getParent(); |
| 933 | // Get the frame info. |
| 934 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 935 | const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); |
| 936 | // Get the instruction info. |
| 937 | const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); |
| 938 | |
| 939 | unsigned maxCallFrameSize = MFI.getMaxCallFrameSize(); |
| 940 | bool is64Bit = TM.isPPC64(); |
| 941 | DebugLoc dl = MI.getDebugLoc(); |
| 942 | BuildMI(BB&: MBB, I: II, MIMD: dl, MCID: TII.get(Opcode: is64Bit ? PPC::LI8 : PPC::LI), |
| 943 | DestReg: MI.getOperand(i: 0).getReg()) |
| 944 | .addImm(Val: maxCallFrameSize); |
| 945 | MBB.erase(I: II); |
| 946 | } |
| 947 | |
| 948 | /// lowerCRSpilling - Generate the code for spilling a CR register. Instead of |
| 949 | /// reserving a whole register (R0), we scrounge for one here. This generates |
| 950 | /// code like this: |
| 951 | /// |
| 952 | /// mfcr rA ; Move the conditional register into GPR rA. |
| 953 | /// rlwinm rA, rA, SB, 0, 31 ; Shift the bits left so they are in CR0's slot. |
| 954 | /// stw rA, FI ; Store rA to the frame. |
| 955 | /// |
| 956 | void PPCRegisterInfo::lowerCRSpilling(MachineBasicBlock::iterator II, |
| 957 | unsigned FrameIndex) const { |
| 958 | // Get the instruction. |
| 959 | MachineInstr &MI = *II; // ; SPILL_CR <SrcReg>, <offset> |
| 960 | // Get the instruction's basic block. |
| 961 | MachineBasicBlock &MBB = *MI.getParent(); |
| 962 | MachineFunction &MF = *MBB.getParent(); |
| 963 | const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); |
| 964 | const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); |
| 965 | DebugLoc dl = MI.getDebugLoc(); |
| 966 | |
| 967 | bool LP64 = TM.isPPC64(); |
| 968 | const TargetRegisterClass *G8RC = &PPC::G8RCRegClass; |
| 969 | const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; |
| 970 | |
| 971 | Register Reg = MF.getRegInfo().createVirtualRegister(RegClass: LP64 ? G8RC : GPRC); |
| 972 | Register SrcReg = MI.getOperand(i: 0).getReg(); |
| 973 | |
| 974 | // We need to store the CR in the low 4-bits of the saved value. First, issue |
| 975 | // an MFOCRF to save all of the CRBits and, if needed, kill the SrcReg. |
| 976 | BuildMI(BB&: MBB, I: II, MIMD: dl, MCID: TII.get(Opcode: LP64 ? PPC::MFOCRF8 : PPC::MFOCRF), DestReg: Reg) |
| 977 | .addReg(RegNo: SrcReg, Flags: getKillRegState(B: MI.getOperand(i: 0).isKill())); |
| 978 | |
| 979 | // If the saved register wasn't CR0, shift the bits left so that they are in |
| 980 | // CR0's slot. |
| 981 | if (SrcReg != PPC::CR0) { |
| 982 | Register Reg1 = Reg; |
| 983 | Reg = MF.getRegInfo().createVirtualRegister(RegClass: LP64 ? G8RC : GPRC); |
| 984 | |
| 985 | // rlwinm rA, rA, ShiftBits, 0, 31. |
| 986 | BuildMI(BB&: MBB, I: II, MIMD: dl, MCID: TII.get(Opcode: LP64 ? PPC::RLWINM8 : PPC::RLWINM), DestReg: Reg) |
| 987 | .addReg(RegNo: Reg1, Flags: RegState::Kill) |
| 988 | .addImm(Val: getEncodingValue(Reg: SrcReg) * 4) |
| 989 | .addImm(Val: 0) |
| 990 | .addImm(Val: 31); |
| 991 | } |
| 992 | |
| 993 | addFrameReference(MIB: BuildMI(BB&: MBB, I: II, MIMD: dl, MCID: TII.get(Opcode: LP64 ? PPC::STW8 : PPC::STW)) |
| 994 | .addReg(RegNo: Reg, Flags: RegState::Kill), |
| 995 | FI: FrameIndex); |
| 996 | |
| 997 | // Discard the pseudo instruction. |
| 998 | MBB.erase(I: II); |
| 999 | } |
| 1000 | |
| 1001 | void PPCRegisterInfo::lowerCRRestore(MachineBasicBlock::iterator II, |
| 1002 | unsigned FrameIndex) const { |
| 1003 | // Get the instruction. |
| 1004 | MachineInstr &MI = *II; // ; <DestReg> = RESTORE_CR <offset> |
| 1005 | // Get the instruction's basic block. |
| 1006 | MachineBasicBlock &MBB = *MI.getParent(); |
| 1007 | MachineFunction &MF = *MBB.getParent(); |
| 1008 | const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); |
| 1009 | const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); |
| 1010 | DebugLoc dl = MI.getDebugLoc(); |
| 1011 | |
| 1012 | bool LP64 = TM.isPPC64(); |
| 1013 | const TargetRegisterClass *G8RC = &PPC::G8RCRegClass; |
| 1014 | const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; |
| 1015 | |
| 1016 | Register Reg = MF.getRegInfo().createVirtualRegister(RegClass: LP64 ? G8RC : GPRC); |
| 1017 | Register DestReg = MI.getOperand(i: 0).getReg(); |
| 1018 | assert(MI.definesRegister(DestReg, /*TRI=*/nullptr) && |
| 1019 | "RESTORE_CR does not define its destination" ); |
| 1020 | |
| 1021 | addFrameReference(MIB: BuildMI(BB&: MBB, I: II, MIMD: dl, MCID: TII.get(Opcode: LP64 ? PPC::LWZ8 : PPC::LWZ), |
| 1022 | DestReg: Reg), FI: FrameIndex); |
| 1023 | |
| 1024 | // If the reloaded register isn't CR0, shift the bits right so that they are |
| 1025 | // in the right CR's slot. |
| 1026 | if (DestReg != PPC::CR0) { |
| 1027 | Register Reg1 = Reg; |
| 1028 | Reg = MF.getRegInfo().createVirtualRegister(RegClass: LP64 ? G8RC : GPRC); |
| 1029 | |
| 1030 | unsigned ShiftBits = getEncodingValue(Reg: DestReg)*4; |
| 1031 | // rlwinm r11, r11, 32-ShiftBits, 0, 31. |
| 1032 | BuildMI(BB&: MBB, I: II, MIMD: dl, MCID: TII.get(Opcode: LP64 ? PPC::RLWINM8 : PPC::RLWINM), DestReg: Reg) |
| 1033 | .addReg(RegNo: Reg1, Flags: RegState::Kill).addImm(Val: 32-ShiftBits).addImm(Val: 0) |
| 1034 | .addImm(Val: 31); |
| 1035 | } |
| 1036 | |
| 1037 | BuildMI(BB&: MBB, I: II, MIMD: dl, MCID: TII.get(Opcode: LP64 ? PPC::MTOCRF8 : PPC::MTOCRF), DestReg) |
| 1038 | .addReg(RegNo: Reg, Flags: RegState::Kill); |
| 1039 | |
| 1040 | // Discard the pseudo instruction. |
| 1041 | MBB.erase(I: II); |
| 1042 | } |
| 1043 | |
| 1044 | void PPCRegisterInfo::lowerCRBitSpilling(MachineBasicBlock::iterator II, |
| 1045 | unsigned FrameIndex) const { |
| 1046 | // Get the instruction. |
| 1047 | MachineInstr &MI = *II; // ; SPILL_CRBIT <SrcReg>, <offset> |
| 1048 | // Get the instruction's basic block. |
| 1049 | MachineBasicBlock &MBB = *MI.getParent(); |
| 1050 | MachineFunction &MF = *MBB.getParent(); |
| 1051 | const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); |
| 1052 | const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); |
| 1053 | const TargetRegisterInfo* TRI = Subtarget.getRegisterInfo(); |
| 1054 | DebugLoc dl = MI.getDebugLoc(); |
| 1055 | |
| 1056 | bool LP64 = TM.isPPC64(); |
| 1057 | const TargetRegisterClass *G8RC = &PPC::G8RCRegClass; |
| 1058 | const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; |
| 1059 | |
| 1060 | Register Reg = MF.getRegInfo().createVirtualRegister(RegClass: LP64 ? G8RC : GPRC); |
| 1061 | Register SrcReg = MI.getOperand(i: 0).getReg(); |
| 1062 | |
| 1063 | // Search up the BB to find the definition of the CR bit. |
| 1064 | MachineBasicBlock::reverse_iterator Ins = MI; |
| 1065 | MachineBasicBlock::reverse_iterator Rend = MBB.rend(); |
| 1066 | ++Ins; |
| 1067 | unsigned CRBitSpillDistance = 0; |
| 1068 | bool SeenUse = false; |
| 1069 | for (; Ins != Rend; ++Ins) { |
| 1070 | // Definition found. |
| 1071 | if (Ins->modifiesRegister(Reg: SrcReg, TRI)) |
| 1072 | break; |
| 1073 | // Use found. |
| 1074 | if (Ins->readsRegister(Reg: SrcReg, TRI)) |
| 1075 | SeenUse = true; |
| 1076 | // Unable to find CR bit definition within maximum search distance. |
| 1077 | if (CRBitSpillDistance == MaxCRBitSpillDist) { |
| 1078 | Ins = MI; |
| 1079 | break; |
| 1080 | } |
| 1081 | // Skip debug instructions when counting CR bit spill distance. |
| 1082 | if (!Ins->isDebugInstr()) |
| 1083 | CRBitSpillDistance++; |
| 1084 | } |
| 1085 | |
| 1086 | // Unable to find the definition of the CR bit in the MBB. |
| 1087 | if (Ins == MBB.rend()) |
| 1088 | Ins = MI; |
| 1089 | |
| 1090 | bool SpillsKnownBit = false; |
| 1091 | // There is no need to extract the CR bit if its value is already known. |
| 1092 | switch (Ins->getOpcode()) { |
| 1093 | case PPC::CRUNSET: |
| 1094 | BuildMI(BB&: MBB, I: II, MIMD: dl, MCID: TII.get(Opcode: LP64 ? PPC::LI8 : PPC::LI), DestReg: Reg) |
| 1095 | .addImm(Val: 0); |
| 1096 | SpillsKnownBit = true; |
| 1097 | break; |
| 1098 | case PPC::CRSET: |
| 1099 | BuildMI(BB&: MBB, I: II, MIMD: dl, MCID: TII.get(Opcode: LP64 ? PPC::LIS8 : PPC::LIS), DestReg: Reg) |
| 1100 | .addImm(Val: -32768); |
| 1101 | SpillsKnownBit = true; |
| 1102 | break; |
| 1103 | default: |
| 1104 | // When spilling a CR bit, the super register may not be explicitly defined |
| 1105 | // (i.e. it can be defined by a CR-logical that only defines the subreg) so |
| 1106 | // we state that the CR field is undef. Also, in order to preserve the kill |
| 1107 | // flag on the CR bit, we add it as an implicit use. |
| 1108 | |
| 1109 | // On Power10, we can use SETNBC to spill all CR bits. SETNBC will set all |
| 1110 | // bits (specifically, it produces a -1 if the CR bit is set). Ultimately, |
| 1111 | // the bit that is of importance to us is bit 32 (bit 0 of a 32-bit |
| 1112 | // register), and SETNBC will set this. |
| 1113 | if (Subtarget.isISA3_1()) { |
| 1114 | BuildMI(BB&: MBB, I: II, MIMD: dl, MCID: TII.get(Opcode: LP64 ? PPC::SETNBC8 : PPC::SETNBC), DestReg: Reg) |
| 1115 | .addReg(RegNo: SrcReg, Flags: RegState::Undef) |
| 1116 | .addReg(RegNo: SrcReg, Flags: RegState::Implicit | |
| 1117 | getKillRegState(B: MI.getOperand(i: 0).isKill())); |
| 1118 | break; |
| 1119 | } |
| 1120 | |
| 1121 | // On Power9, we can use SETB to extract the LT bit. This only works for |
| 1122 | // the LT bit since SETB produces -1/1/0 for LT/GT/<neither>. So the value |
| 1123 | // of the bit we care about (32-bit sign bit) will be set to the value of |
| 1124 | // the LT bit (regardless of the other bits in the CR field). |
| 1125 | if (Subtarget.isISA3_0()) { |
| 1126 | if (SrcReg == PPC::CR0LT || SrcReg == PPC::CR1LT || |
| 1127 | SrcReg == PPC::CR2LT || SrcReg == PPC::CR3LT || |
| 1128 | SrcReg == PPC::CR4LT || SrcReg == PPC::CR5LT || |
| 1129 | SrcReg == PPC::CR6LT || SrcReg == PPC::CR7LT) { |
| 1130 | BuildMI(BB&: MBB, I: II, MIMD: dl, MCID: TII.get(Opcode: LP64 ? PPC::SETB8 : PPC::SETB), DestReg: Reg) |
| 1131 | .addReg(RegNo: getCRFromCRBit(SrcReg), Flags: RegState::Undef) |
| 1132 | .addReg(RegNo: SrcReg, Flags: RegState::Implicit | |
| 1133 | getKillRegState(B: MI.getOperand(i: 0).isKill())); |
| 1134 | break; |
| 1135 | } |
| 1136 | } |
| 1137 | |
| 1138 | // We need to move the CR field that contains the CR bit we are spilling. |
| 1139 | BuildMI(BB&: MBB, I: II, MIMD: dl, MCID: TII.get(Opcode: LP64 ? PPC::MFOCRF8 : PPC::MFOCRF), DestReg: Reg) |
| 1140 | .addReg(RegNo: getCRFromCRBit(SrcReg), Flags: RegState::Undef) |
| 1141 | .addReg(RegNo: SrcReg, |
| 1142 | Flags: RegState::Implicit | getKillRegState(B: MI.getOperand(i: 0).isKill())); |
| 1143 | |
| 1144 | // If the saved register wasn't CR0LT, shift the bits left so that the bit |
| 1145 | // to store is the first one. Mask all but that bit. |
| 1146 | Register Reg1 = Reg; |
| 1147 | Reg = MF.getRegInfo().createVirtualRegister(RegClass: LP64 ? G8RC : GPRC); |
| 1148 | |
| 1149 | // rlwinm rA, rA, ShiftBits, 0, 0. |
| 1150 | BuildMI(BB&: MBB, I: II, MIMD: dl, MCID: TII.get(Opcode: LP64 ? PPC::RLWINM8 : PPC::RLWINM), DestReg: Reg) |
| 1151 | .addReg(RegNo: Reg1, Flags: RegState::Kill) |
| 1152 | .addImm(Val: getEncodingValue(Reg: SrcReg)) |
| 1153 | .addImm(Val: 0).addImm(Val: 0); |
| 1154 | } |
| 1155 | addFrameReference(MIB: BuildMI(BB&: MBB, I: II, MIMD: dl, MCID: TII.get(Opcode: LP64 ? PPC::STW8 : PPC::STW)) |
| 1156 | .addReg(RegNo: Reg, Flags: RegState::Kill), |
| 1157 | FI: FrameIndex); |
| 1158 | |
| 1159 | bool KillsCRBit = MI.killsRegister(Reg: SrcReg, TRI); |
| 1160 | // Discard the pseudo instruction. |
| 1161 | MBB.erase(I: II); |
| 1162 | if (SpillsKnownBit && KillsCRBit && !SeenUse) { |
| 1163 | Ins->setDesc(TII.get(Opcode: PPC::UNENCODED_NOP)); |
| 1164 | Ins->removeOperand(OpNo: 0); |
| 1165 | } |
| 1166 | } |
| 1167 | |
| 1168 | void PPCRegisterInfo::lowerCRBitRestore(MachineBasicBlock::iterator II, |
| 1169 | unsigned FrameIndex) const { |
| 1170 | // Get the instruction. |
| 1171 | MachineInstr &MI = *II; // ; <DestReg> = RESTORE_CRBIT <offset> |
| 1172 | // Get the instruction's basic block. |
| 1173 | MachineBasicBlock &MBB = *MI.getParent(); |
| 1174 | MachineFunction &MF = *MBB.getParent(); |
| 1175 | const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); |
| 1176 | const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); |
| 1177 | DebugLoc dl = MI.getDebugLoc(); |
| 1178 | |
| 1179 | bool LP64 = TM.isPPC64(); |
| 1180 | const TargetRegisterClass *G8RC = &PPC::G8RCRegClass; |
| 1181 | const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; |
| 1182 | |
| 1183 | Register Reg = MF.getRegInfo().createVirtualRegister(RegClass: LP64 ? G8RC : GPRC); |
| 1184 | Register DestReg = MI.getOperand(i: 0).getReg(); |
| 1185 | assert(MI.definesRegister(DestReg, /*TRI=*/nullptr) && |
| 1186 | "RESTORE_CRBIT does not define its destination" ); |
| 1187 | |
| 1188 | addFrameReference(MIB: BuildMI(BB&: MBB, I: II, MIMD: dl, MCID: TII.get(Opcode: LP64 ? PPC::LWZ8 : PPC::LWZ), |
| 1189 | DestReg: Reg), FI: FrameIndex); |
| 1190 | |
| 1191 | BuildMI(BB&: MBB, I: II, MIMD: dl, MCID: TII.get(Opcode: TargetOpcode::IMPLICIT_DEF), DestReg); |
| 1192 | |
| 1193 | Register RegO = MF.getRegInfo().createVirtualRegister(RegClass: LP64 ? G8RC : GPRC); |
| 1194 | BuildMI(BB&: MBB, I: II, MIMD: dl, MCID: TII.get(Opcode: LP64 ? PPC::MFOCRF8 : PPC::MFOCRF), DestReg: RegO) |
| 1195 | .addReg(RegNo: getCRFromCRBit(SrcReg: DestReg)); |
| 1196 | |
| 1197 | unsigned ShiftBits = getEncodingValue(Reg: DestReg); |
| 1198 | // rlwimi r11, r10, 32-ShiftBits, ..., ... |
| 1199 | BuildMI(BB&: MBB, I: II, MIMD: dl, MCID: TII.get(Opcode: LP64 ? PPC::RLWIMI8 : PPC::RLWIMI), DestReg: RegO) |
| 1200 | .addReg(RegNo: RegO, Flags: RegState::Kill) |
| 1201 | .addReg(RegNo: Reg, Flags: RegState::Kill) |
| 1202 | .addImm(Val: ShiftBits ? 32 - ShiftBits : 0) |
| 1203 | .addImm(Val: ShiftBits) |
| 1204 | .addImm(Val: ShiftBits); |
| 1205 | |
| 1206 | BuildMI(BB&: MBB, I: II, MIMD: dl, MCID: TII.get(Opcode: LP64 ? PPC::MTOCRF8 : PPC::MTOCRF), |
| 1207 | DestReg: getCRFromCRBit(SrcReg: DestReg)) |
| 1208 | .addReg(RegNo: RegO, Flags: RegState::Kill) |
| 1209 | // Make sure we have a use dependency all the way through this |
| 1210 | // sequence of instructions. We can't have the other bits in the CR |
| 1211 | // modified in between the mfocrf and the mtocrf. |
| 1212 | .addReg(RegNo: getCRFromCRBit(SrcReg: DestReg), Flags: RegState::Implicit); |
| 1213 | |
| 1214 | // Discard the pseudo instruction. |
| 1215 | MBB.erase(I: II); |
| 1216 | } |
| 1217 | |
| 1218 | void PPCRegisterInfo::emitAccCopyInfo(MachineBasicBlock &MBB, |
| 1219 | MCRegister DestReg, MCRegister SrcReg) { |
| 1220 | #ifdef NDEBUG |
| 1221 | return; |
| 1222 | #else |
| 1223 | if (ReportAccMoves) { |
| 1224 | std::string Dest = PPC::ACCRCRegClass.contains(DestReg) ? "acc" : "uacc" ; |
| 1225 | std::string Src = PPC::ACCRCRegClass.contains(SrcReg) ? "acc" : "uacc" ; |
| 1226 | dbgs() << "Emitting copy from " << Src << " to " << Dest << ":\n" ; |
| 1227 | MBB.dump(); |
| 1228 | } |
| 1229 | #endif |
| 1230 | } |
| 1231 | |
| 1232 | static void emitAccSpillRestoreInfo(MachineBasicBlock &MBB, bool IsPrimed, |
| 1233 | bool IsRestore) { |
| 1234 | #ifdef NDEBUG |
| 1235 | return; |
| 1236 | #else |
| 1237 | if (ReportAccMoves) { |
| 1238 | dbgs() << "Emitting " << (IsPrimed ? "acc" : "uacc" ) << " register " |
| 1239 | << (IsRestore ? "restore" : "spill" ) << ":\n" ; |
| 1240 | MBB.dump(); |
| 1241 | } |
| 1242 | #endif |
| 1243 | } |
| 1244 | |
| 1245 | void PPCRegisterInfo::spillRegPair(MachineBasicBlock &MBB, |
| 1246 | MachineBasicBlock::iterator II, DebugLoc DL, |
| 1247 | const TargetInstrInfo &TII, |
| 1248 | unsigned FrameIndex, bool IsLittleEndian, |
| 1249 | bool IsKilled, Register Reg, |
| 1250 | int Offset) const { |
| 1251 | |
| 1252 | // This function does not support virtual registers. |
| 1253 | assert(!Reg.isVirtual() && |
| 1254 | "Spilling register pairs does not support virtual registers." ); |
| 1255 | |
| 1256 | addFrameReference( |
| 1257 | MIB: BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: TII.get(Opcode: PPC::STXV)) |
| 1258 | .addReg(RegNo: TargetRegisterInfo::getSubReg(Reg, Idx: PPC::sub_vsx0), |
| 1259 | Flags: getKillRegState(B: IsKilled)), |
| 1260 | FI: FrameIndex, Offset); |
| 1261 | |
| 1262 | addFrameReference( |
| 1263 | MIB: BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: TII.get(Opcode: PPC::STXV)) |
| 1264 | .addReg(RegNo: TargetRegisterInfo::getSubReg(Reg, Idx: PPC::sub_vsx1), |
| 1265 | Flags: getKillRegState(B: IsKilled)), |
| 1266 | FI: FrameIndex, Offset: IsLittleEndian ? Offset - 16 : Offset + 16); |
| 1267 | } |
| 1268 | |
| 1269 | /// Remove any STXVP[X] instructions and split them out into a pair of |
| 1270 | /// STXV[X] instructions if --disable-auto-paired-vec-st is specified on |
| 1271 | /// the command line. |
| 1272 | void PPCRegisterInfo::lowerOctWordSpilling(MachineBasicBlock::iterator II, |
| 1273 | unsigned FrameIndex) const { |
| 1274 | assert(DisableAutoPairedVecSt && |
| 1275 | "Expecting to do this only if paired vector stores are disabled." ); |
| 1276 | MachineInstr &MI = *II; // STXVP <SrcReg>, <offset> |
| 1277 | MachineBasicBlock &MBB = *MI.getParent(); |
| 1278 | MachineFunction &MF = *MBB.getParent(); |
| 1279 | const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); |
| 1280 | const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); |
| 1281 | DebugLoc DL = MI.getDebugLoc(); |
| 1282 | Register SrcReg = MI.getOperand(i: 0).getReg(); |
| 1283 | bool IsLittleEndian = Subtarget.isLittleEndian(); |
| 1284 | bool IsKilled = MI.getOperand(i: 0).isKill(); |
| 1285 | |
| 1286 | spillRegPair(MBB, II, DL, TII, FrameIndex, IsLittleEndian, IsKilled, Reg: SrcReg, |
| 1287 | Offset: IsLittleEndian ? 16 : 0); |
| 1288 | |
| 1289 | // Discard the original instruction. |
| 1290 | MBB.erase(I: II); |
| 1291 | } |
| 1292 | |
| 1293 | static void emitWAccSpillRestoreInfo(MachineBasicBlock &MBB, bool IsRestore) { |
| 1294 | #ifdef NDEBUG |
| 1295 | return; |
| 1296 | #else |
| 1297 | if (ReportAccMoves) { |
| 1298 | dbgs() << "Emitting wacc register " << (IsRestore ? "restore" : "spill" ) |
| 1299 | << ":\n" ; |
| 1300 | MBB.dump(); |
| 1301 | } |
| 1302 | #endif |
| 1303 | } |
| 1304 | |
| 1305 | /// lowerACCSpilling - Generate the code for spilling the accumulator register. |
| 1306 | /// Similarly to other spills/reloads that use pseudo-ops, we do not actually |
| 1307 | /// eliminate the FrameIndex here nor compute the stack offset. We simply |
| 1308 | /// create a real instruction with an FI and rely on eliminateFrameIndex to |
| 1309 | /// handle the FI elimination. |
| 1310 | void PPCRegisterInfo::lowerACCSpilling(MachineBasicBlock::iterator II, |
| 1311 | unsigned FrameIndex) const { |
| 1312 | MachineInstr &MI = *II; // SPILL_ACC <SrcReg>, <offset> |
| 1313 | MachineBasicBlock &MBB = *MI.getParent(); |
| 1314 | MachineFunction &MF = *MBB.getParent(); |
| 1315 | const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); |
| 1316 | const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); |
| 1317 | DebugLoc DL = MI.getDebugLoc(); |
| 1318 | Register SrcReg = MI.getOperand(i: 0).getReg(); |
| 1319 | bool IsKilled = MI.getOperand(i: 0).isKill(); |
| 1320 | |
| 1321 | bool IsPrimed = PPC::ACCRCRegClass.contains(Reg: SrcReg); |
| 1322 | bool IsLittleEndian = Subtarget.isLittleEndian(); |
| 1323 | |
| 1324 | emitAccSpillRestoreInfo(MBB, IsPrimed, IsRestore: false); |
| 1325 | |
| 1326 | // De-prime the register being spilled, create two stores for the pair |
| 1327 | // subregisters accounting for endianness and then re-prime the register if |
| 1328 | // it isn't killed. This uses the Offset parameter to addFrameReference() to |
| 1329 | // adjust the offset of the store that is within the 64-byte stack slot. |
| 1330 | if (IsPrimed) |
| 1331 | BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: TII.get(Opcode: PPC::XXMFACC), DestReg: SrcReg).addReg(RegNo: SrcReg); |
| 1332 | if (DisableAutoPairedVecSt) { |
| 1333 | spillRegPair(MBB, II, DL, TII, FrameIndex, IsLittleEndian, IsKilled, |
| 1334 | Reg: TargetRegisterInfo::getSubReg(Reg: SrcReg, Idx: PPC::sub_pair0), |
| 1335 | Offset: IsLittleEndian ? 48 : 0); |
| 1336 | spillRegPair(MBB, II, DL, TII, FrameIndex, IsLittleEndian, IsKilled, |
| 1337 | Reg: TargetRegisterInfo::getSubReg(Reg: SrcReg, Idx: PPC::sub_pair1), |
| 1338 | Offset: IsLittleEndian ? 16 : 32); |
| 1339 | } else { |
| 1340 | addFrameReference( |
| 1341 | MIB: BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: TII.get(Opcode: PPC::STXVP)) |
| 1342 | .addReg(RegNo: TargetRegisterInfo::getSubReg(Reg: SrcReg, Idx: PPC::sub_pair0), |
| 1343 | Flags: getKillRegState(B: IsKilled)), |
| 1344 | FI: FrameIndex, Offset: IsLittleEndian ? 32 : 0); |
| 1345 | addFrameReference( |
| 1346 | MIB: BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: TII.get(Opcode: PPC::STXVP)) |
| 1347 | .addReg(RegNo: TargetRegisterInfo::getSubReg(Reg: SrcReg, Idx: PPC::sub_pair1), |
| 1348 | Flags: getKillRegState(B: IsKilled)), |
| 1349 | FI: FrameIndex, Offset: IsLittleEndian ? 0 : 32); |
| 1350 | } |
| 1351 | if (IsPrimed && !IsKilled) |
| 1352 | BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: TII.get(Opcode: PPC::XXMTACC), DestReg: SrcReg).addReg(RegNo: SrcReg); |
| 1353 | |
| 1354 | // Discard the pseudo instruction. |
| 1355 | MBB.erase(I: II); |
| 1356 | } |
| 1357 | |
| 1358 | /// lowerACCRestore - Generate the code to restore the accumulator register. |
| 1359 | void PPCRegisterInfo::lowerACCRestore(MachineBasicBlock::iterator II, |
| 1360 | unsigned FrameIndex) const { |
| 1361 | MachineInstr &MI = *II; // <DestReg> = RESTORE_ACC <offset> |
| 1362 | MachineBasicBlock &MBB = *MI.getParent(); |
| 1363 | MachineFunction &MF = *MBB.getParent(); |
| 1364 | const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); |
| 1365 | const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); |
| 1366 | DebugLoc DL = MI.getDebugLoc(); |
| 1367 | |
| 1368 | Register DestReg = MI.getOperand(i: 0).getReg(); |
| 1369 | assert(MI.definesRegister(DestReg, /*TRI=*/nullptr) && |
| 1370 | "RESTORE_ACC does not define its destination" ); |
| 1371 | |
| 1372 | bool IsPrimed = PPC::ACCRCRegClass.contains(Reg: DestReg); |
| 1373 | Register Reg = |
| 1374 | PPC::VSRp0 + (DestReg - (IsPrimed ? PPC::ACC0 : PPC::UACC0)) * 2; |
| 1375 | bool IsLittleEndian = Subtarget.isLittleEndian(); |
| 1376 | |
| 1377 | emitAccSpillRestoreInfo(MBB, IsPrimed, IsRestore: true); |
| 1378 | |
| 1379 | // Create two loads for the pair subregisters accounting for endianness and |
| 1380 | // then prime the accumulator register being restored. |
| 1381 | addFrameReference(MIB: BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: TII.get(Opcode: PPC::LXVP), DestReg: Reg), |
| 1382 | FI: FrameIndex, Offset: IsLittleEndian ? 32 : 0); |
| 1383 | addFrameReference(MIB: BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: TII.get(Opcode: PPC::LXVP), DestReg: Reg + 1), |
| 1384 | FI: FrameIndex, Offset: IsLittleEndian ? 0 : 32); |
| 1385 | if (IsPrimed) |
| 1386 | BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: TII.get(Opcode: PPC::XXMTACC), DestReg).addReg(RegNo: DestReg); |
| 1387 | |
| 1388 | // Discard the pseudo instruction. |
| 1389 | MBB.erase(I: II); |
| 1390 | } |
| 1391 | |
| 1392 | /// lowerWACCSpilling - Generate the code for spilling the wide accumulator |
| 1393 | /// register. |
| 1394 | void PPCRegisterInfo::lowerWACCSpilling(MachineBasicBlock::iterator II, |
| 1395 | unsigned FrameIndex) const { |
| 1396 | MachineInstr &MI = *II; // SPILL_WACC <SrcReg>, <offset> |
| 1397 | MachineBasicBlock &MBB = *MI.getParent(); |
| 1398 | MachineFunction &MF = *MBB.getParent(); |
| 1399 | const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); |
| 1400 | const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); |
| 1401 | DebugLoc DL = MI.getDebugLoc(); |
| 1402 | bool IsLittleEndian = Subtarget.isLittleEndian(); |
| 1403 | |
| 1404 | emitWAccSpillRestoreInfo(MBB, IsRestore: false); |
| 1405 | |
| 1406 | const TargetRegisterClass *RC = &PPC::VSRpRCRegClass; |
| 1407 | Register VSRpReg0 = MF.getRegInfo().createVirtualRegister(RegClass: RC); |
| 1408 | Register VSRpReg1 = MF.getRegInfo().createVirtualRegister(RegClass: RC); |
| 1409 | Register SrcReg = MI.getOperand(i: 0).getReg(); |
| 1410 | |
| 1411 | BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: TII.get(Opcode: PPC::DMXXEXTFDMR512), DestReg: VSRpReg0) |
| 1412 | .addDef(RegNo: VSRpReg1) |
| 1413 | .addReg(RegNo: SrcReg); |
| 1414 | |
| 1415 | addFrameReference(MIB: BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: TII.get(Opcode: PPC::STXVP)) |
| 1416 | .addReg(RegNo: VSRpReg0, Flags: RegState::Kill), |
| 1417 | FI: FrameIndex, Offset: IsLittleEndian ? 32 : 0); |
| 1418 | addFrameReference(MIB: BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: TII.get(Opcode: PPC::STXVP)) |
| 1419 | .addReg(RegNo: VSRpReg1, Flags: RegState::Kill), |
| 1420 | FI: FrameIndex, Offset: IsLittleEndian ? 0 : 32); |
| 1421 | |
| 1422 | // Discard the pseudo instruction. |
| 1423 | MBB.erase(I: II); |
| 1424 | } |
| 1425 | |
| 1426 | /// lowerWACCRestore - Generate the code to restore the wide accumulator |
| 1427 | /// register. |
| 1428 | void PPCRegisterInfo::lowerWACCRestore(MachineBasicBlock::iterator II, |
| 1429 | unsigned FrameIndex) const { |
| 1430 | MachineInstr &MI = *II; // <DestReg> = RESTORE_WACC <offset> |
| 1431 | MachineBasicBlock &MBB = *MI.getParent(); |
| 1432 | MachineFunction &MF = *MBB.getParent(); |
| 1433 | const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); |
| 1434 | const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); |
| 1435 | DebugLoc DL = MI.getDebugLoc(); |
| 1436 | bool IsLittleEndian = Subtarget.isLittleEndian(); |
| 1437 | |
| 1438 | emitWAccSpillRestoreInfo(MBB, IsRestore: true); |
| 1439 | |
| 1440 | const TargetRegisterClass *RC = &PPC::VSRpRCRegClass; |
| 1441 | Register VSRpReg0 = MF.getRegInfo().createVirtualRegister(RegClass: RC); |
| 1442 | Register VSRpReg1 = MF.getRegInfo().createVirtualRegister(RegClass: RC); |
| 1443 | Register DestReg = MI.getOperand(i: 0).getReg(); |
| 1444 | |
| 1445 | addFrameReference(MIB: BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: TII.get(Opcode: PPC::LXVP), DestReg: VSRpReg0), |
| 1446 | FI: FrameIndex, Offset: IsLittleEndian ? 32 : 0); |
| 1447 | addFrameReference(MIB: BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: TII.get(Opcode: PPC::LXVP), DestReg: VSRpReg1), |
| 1448 | FI: FrameIndex, Offset: IsLittleEndian ? 0 : 32); |
| 1449 | |
| 1450 | // Kill VSRpReg0, VSRpReg1 (killedRegState::Killed) |
| 1451 | BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: TII.get(Opcode: PPC::DMXXINSTDMR512), DestReg) |
| 1452 | .addReg(RegNo: VSRpReg0, Flags: RegState::Kill) |
| 1453 | .addReg(RegNo: VSRpReg1, Flags: RegState::Kill); |
| 1454 | |
| 1455 | // Discard the pseudo instruction. |
| 1456 | MBB.erase(I: II); |
| 1457 | } |
| 1458 | |
| 1459 | /// lowerQuadwordSpilling - Generate code to spill paired general register. |
| 1460 | void PPCRegisterInfo::lowerQuadwordSpilling(MachineBasicBlock::iterator II, |
| 1461 | unsigned FrameIndex) const { |
| 1462 | MachineInstr &MI = *II; |
| 1463 | MachineBasicBlock &MBB = *MI.getParent(); |
| 1464 | MachineFunction &MF = *MBB.getParent(); |
| 1465 | const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); |
| 1466 | const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); |
| 1467 | DebugLoc DL = MI.getDebugLoc(); |
| 1468 | |
| 1469 | Register SrcReg = MI.getOperand(i: 0).getReg(); |
| 1470 | bool IsKilled = MI.getOperand(i: 0).isKill(); |
| 1471 | |
| 1472 | Register Reg = PPC::X0 + (SrcReg - PPC::G8p0) * 2; |
| 1473 | bool IsLittleEndian = Subtarget.isLittleEndian(); |
| 1474 | |
| 1475 | addFrameReference(MIB: BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: TII.get(Opcode: PPC::STD)) |
| 1476 | .addReg(RegNo: Reg, Flags: getKillRegState(B: IsKilled)), |
| 1477 | FI: FrameIndex, Offset: IsLittleEndian ? 8 : 0); |
| 1478 | addFrameReference(MIB: BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: TII.get(Opcode: PPC::STD)) |
| 1479 | .addReg(RegNo: Reg + 1, Flags: getKillRegState(B: IsKilled)), |
| 1480 | FI: FrameIndex, Offset: IsLittleEndian ? 0 : 8); |
| 1481 | |
| 1482 | // Discard the pseudo instruction. |
| 1483 | MBB.erase(I: II); |
| 1484 | } |
| 1485 | |
| 1486 | /// lowerQuadwordRestore - Generate code to restore paired general register. |
| 1487 | void PPCRegisterInfo::lowerQuadwordRestore(MachineBasicBlock::iterator II, |
| 1488 | unsigned FrameIndex) const { |
| 1489 | MachineInstr &MI = *II; |
| 1490 | MachineBasicBlock &MBB = *MI.getParent(); |
| 1491 | MachineFunction &MF = *MBB.getParent(); |
| 1492 | const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); |
| 1493 | const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); |
| 1494 | DebugLoc DL = MI.getDebugLoc(); |
| 1495 | |
| 1496 | Register DestReg = MI.getOperand(i: 0).getReg(); |
| 1497 | assert(MI.definesRegister(DestReg, /*TRI=*/nullptr) && |
| 1498 | "RESTORE_QUADWORD does not define its destination" ); |
| 1499 | |
| 1500 | Register Reg = PPC::X0 + (DestReg - PPC::G8p0) * 2; |
| 1501 | bool IsLittleEndian = Subtarget.isLittleEndian(); |
| 1502 | |
| 1503 | addFrameReference(MIB: BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: TII.get(Opcode: PPC::LD), DestReg: Reg), FI: FrameIndex, |
| 1504 | Offset: IsLittleEndian ? 8 : 0); |
| 1505 | addFrameReference(MIB: BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: TII.get(Opcode: PPC::LD), DestReg: Reg + 1), FI: FrameIndex, |
| 1506 | Offset: IsLittleEndian ? 0 : 8); |
| 1507 | |
| 1508 | // Discard the pseudo instruction. |
| 1509 | MBB.erase(I: II); |
| 1510 | } |
| 1511 | |
| 1512 | /// lowerDMRSpilling - Generate the code for spilling the DMR register. |
| 1513 | void PPCRegisterInfo::lowerDMRSpilling(MachineBasicBlock::iterator II, |
| 1514 | unsigned FrameIndex) const { |
| 1515 | MachineInstr &MI = *II; // SPILL_DMR <SrcReg>, <offset> |
| 1516 | MachineBasicBlock &MBB = *MI.getParent(); |
| 1517 | MachineFunction &MF = *MBB.getParent(); |
| 1518 | const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); |
| 1519 | const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); |
| 1520 | DebugLoc DL = MI.getDebugLoc(); |
| 1521 | bool IsLittleEndian = Subtarget.isLittleEndian(); |
| 1522 | |
| 1523 | // DMR is made up of WACC and WACC_HI, so DMXXEXTFDMR512 to spill |
| 1524 | // the corresponding 512 bits. |
| 1525 | const TargetRegisterClass *RC = &PPC::VSRpRCRegClass; |
| 1526 | auto spillDMR = [&](Register SrcReg, int BEIdx, int LEIdx) { |
| 1527 | auto spillWACC = [&](unsigned Opc, unsigned RegIdx, int IdxBE, int IdxLE) { |
| 1528 | Register VSRpReg0 = MF.getRegInfo().createVirtualRegister(RegClass: RC); |
| 1529 | Register VSRpReg1 = MF.getRegInfo().createVirtualRegister(RegClass: RC); |
| 1530 | |
| 1531 | BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: TII.get(Opcode: Opc), DestReg: VSRpReg0) |
| 1532 | .addDef(RegNo: VSRpReg1) |
| 1533 | .addReg(RegNo: TargetRegisterInfo::getSubReg(Reg: SrcReg, Idx: RegIdx)); |
| 1534 | |
| 1535 | addFrameReference(MIB: BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: TII.get(Opcode: PPC::STXVP)) |
| 1536 | .addReg(RegNo: VSRpReg0, Flags: RegState::Kill), |
| 1537 | FI: FrameIndex, Offset: IsLittleEndian ? IdxLE : IdxBE); |
| 1538 | addFrameReference(MIB: BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: TII.get(Opcode: PPC::STXVP)) |
| 1539 | .addReg(RegNo: VSRpReg1, Flags: RegState::Kill), |
| 1540 | FI: FrameIndex, Offset: IsLittleEndian ? IdxLE - 32 : IdxBE + 32); |
| 1541 | }; |
| 1542 | spillWACC(PPC::DMXXEXTFDMR512, PPC::sub_wacc_lo, BEIdx, LEIdx); |
| 1543 | spillWACC(PPC::DMXXEXTFDMR512_HI, PPC::sub_wacc_hi, BEIdx + 64, LEIdx - 64); |
| 1544 | }; |
| 1545 | |
| 1546 | Register SrcReg = MI.getOperand(i: 0).getReg(); |
| 1547 | if (MI.getOpcode() == PPC::SPILL_DMRP) { |
| 1548 | spillDMR(TargetRegisterInfo::getSubReg(Reg: SrcReg, Idx: PPC::sub_dmr1), 0, 96); |
| 1549 | spillDMR(TargetRegisterInfo::getSubReg(Reg: SrcReg, Idx: PPC::sub_dmr0), 128, 224); |
| 1550 | } else |
| 1551 | spillDMR(SrcReg, 0, 96); |
| 1552 | |
| 1553 | // Discard the pseudo instruction. |
| 1554 | MBB.erase(I: II); |
| 1555 | } |
| 1556 | |
| 1557 | /// lowerDMRRestore - Generate the code to restore the DMR register. |
| 1558 | void PPCRegisterInfo::lowerDMRRestore(MachineBasicBlock::iterator II, |
| 1559 | unsigned FrameIndex) const { |
| 1560 | MachineInstr &MI = *II; // <DestReg> = RESTORE_DMR[P] <offset> |
| 1561 | MachineBasicBlock &MBB = *MI.getParent(); |
| 1562 | MachineFunction &MF = *MBB.getParent(); |
| 1563 | const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); |
| 1564 | const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); |
| 1565 | DebugLoc DL = MI.getDebugLoc(); |
| 1566 | bool IsLittleEndian = Subtarget.isLittleEndian(); |
| 1567 | |
| 1568 | const TargetRegisterClass *RC = &PPC::VSRpRCRegClass; |
| 1569 | auto restoreDMR = [&](Register DestReg, int BEIdx, int LEIdx) { |
| 1570 | auto restoreWACC = [&](unsigned Opc, unsigned RegIdx, int IdxBE, |
| 1571 | int IdxLE) { |
| 1572 | Register VSRpReg0 = MF.getRegInfo().createVirtualRegister(RegClass: RC); |
| 1573 | Register VSRpReg1 = MF.getRegInfo().createVirtualRegister(RegClass: RC); |
| 1574 | |
| 1575 | addFrameReference(MIB: BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: TII.get(Opcode: PPC::LXVP), DestReg: VSRpReg0), |
| 1576 | FI: FrameIndex, Offset: IsLittleEndian ? IdxLE : IdxBE); |
| 1577 | addFrameReference(MIB: BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: TII.get(Opcode: PPC::LXVP), DestReg: VSRpReg1), |
| 1578 | FI: FrameIndex, Offset: IsLittleEndian ? IdxLE - 32 : IdxBE + 32); |
| 1579 | |
| 1580 | // Kill virtual registers (killedRegState::Killed). |
| 1581 | BuildMI(BB&: MBB, I: II, MIMD: DL, MCID: TII.get(Opcode: Opc), |
| 1582 | DestReg: TargetRegisterInfo::getSubReg(Reg: DestReg, Idx: RegIdx)) |
| 1583 | .addReg(RegNo: VSRpReg0, Flags: RegState::Kill) |
| 1584 | .addReg(RegNo: VSRpReg1, Flags: RegState::Kill); |
| 1585 | }; |
| 1586 | restoreWACC(PPC::DMXXINSTDMR512, PPC::sub_wacc_lo, BEIdx, LEIdx); |
| 1587 | restoreWACC(PPC::DMXXINSTDMR512_HI, PPC::sub_wacc_hi, BEIdx + 64, |
| 1588 | LEIdx - 64); |
| 1589 | }; |
| 1590 | |
| 1591 | Register DestReg = MI.getOperand(i: 0).getReg(); |
| 1592 | if (MI.getOpcode() == PPC::RESTORE_DMRP) { |
| 1593 | restoreDMR(TargetRegisterInfo::getSubReg(Reg: DestReg, Idx: PPC::sub_dmr1), 0, 96); |
| 1594 | restoreDMR(TargetRegisterInfo::getSubReg(Reg: DestReg, Idx: PPC::sub_dmr0), 128, 224); |
| 1595 | } else |
| 1596 | restoreDMR(DestReg, 0, 96); |
| 1597 | |
| 1598 | // Discard the pseudo instruction. |
| 1599 | MBB.erase(I: II); |
| 1600 | } |
| 1601 | |
| 1602 | bool PPCRegisterInfo::hasReservedSpillSlot(const MachineFunction &MF, |
| 1603 | Register Reg, int &FrameIdx) const { |
| 1604 | // For the nonvolatile condition registers (CR2, CR3, CR4) return true to |
| 1605 | // prevent allocating an additional frame slot. |
| 1606 | // For 64-bit ELF and AIX, the CR save area is in the linkage area at SP+8, |
| 1607 | // for 32-bit AIX the CR save area is in the linkage area at SP+4. |
| 1608 | // We have created a FrameIndex to that spill slot to keep the CalleSaveInfos |
| 1609 | // valid. |
| 1610 | // For 32-bit ELF, we have previously created the stack slot if needed, so |
| 1611 | // return its FrameIdx. |
| 1612 | if (PPC::CR2 <= Reg && Reg <= PPC::CR4) { |
| 1613 | FrameIdx = MF.getInfo<PPCFunctionInfo>()->getCRSpillFrameIndex(); |
| 1614 | return true; |
| 1615 | } |
| 1616 | return false; |
| 1617 | } |
| 1618 | |
| 1619 | // If the offset must be a multiple of some value, return what that value is. |
| 1620 | static unsigned offsetMinAlignForOpcode(unsigned OpC) { |
| 1621 | switch (OpC) { |
| 1622 | default: |
| 1623 | return 1; |
| 1624 | case PPC::LWA: |
| 1625 | case PPC::LWA_32: |
| 1626 | case PPC::LD: |
| 1627 | case PPC::LDU: |
| 1628 | case PPC::STD: |
| 1629 | case PPC::STDU: |
| 1630 | case PPC::DFLOADf32: |
| 1631 | case PPC::DFLOADf64: |
| 1632 | case PPC::DFSTOREf32: |
| 1633 | case PPC::DFSTOREf64: |
| 1634 | case PPC::LXSD: |
| 1635 | case PPC::LXSSP: |
| 1636 | case PPC::STXSD: |
| 1637 | case PPC::STXSSP: |
| 1638 | case PPC::STQ: |
| 1639 | return 4; |
| 1640 | case PPC::EVLDD: |
| 1641 | case PPC::EVSTDD: |
| 1642 | return 8; |
| 1643 | case PPC::LXV: |
| 1644 | case PPC::STXV: |
| 1645 | case PPC::LQ: |
| 1646 | case PPC::LXVP: |
| 1647 | case PPC::STXVP: |
| 1648 | return 16; |
| 1649 | } |
| 1650 | } |
| 1651 | |
| 1652 | // If the offset must be a multiple of some value, return what that value is. |
| 1653 | static unsigned offsetMinAlign(const MachineInstr &MI) { |
| 1654 | unsigned OpC = MI.getOpcode(); |
| 1655 | return offsetMinAlignForOpcode(OpC); |
| 1656 | } |
| 1657 | |
| 1658 | // Return the OffsetOperandNo given the FIOperandNum (and the instruction). |
| 1659 | static unsigned getOffsetONFromFION(const MachineInstr &MI, |
| 1660 | unsigned FIOperandNum) { |
| 1661 | // Take into account whether it's an add or mem instruction |
| 1662 | unsigned OffsetOperandNo = (FIOperandNum == 2) ? 1 : 2; |
| 1663 | if (MI.isInlineAsm()) |
| 1664 | OffsetOperandNo = FIOperandNum - 1; |
| 1665 | else if (MI.getOpcode() == TargetOpcode::STACKMAP || |
| 1666 | MI.getOpcode() == TargetOpcode::PATCHPOINT) |
| 1667 | OffsetOperandNo = FIOperandNum + 1; |
| 1668 | |
| 1669 | return OffsetOperandNo; |
| 1670 | } |
| 1671 | |
| 1672 | bool |
| 1673 | PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, |
| 1674 | int SPAdj, unsigned FIOperandNum, |
| 1675 | RegScavenger *RS) const { |
| 1676 | assert(SPAdj == 0 && "Unexpected" ); |
| 1677 | |
| 1678 | // Get the instruction. |
| 1679 | MachineInstr &MI = *II; |
| 1680 | // Get the instruction's basic block. |
| 1681 | MachineBasicBlock &MBB = *MI.getParent(); |
| 1682 | // Get the basic block's function. |
| 1683 | MachineFunction &MF = *MBB.getParent(); |
| 1684 | const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); |
| 1685 | // Get the instruction info. |
| 1686 | const PPCInstrInfo &TII = *Subtarget.getInstrInfo(); |
| 1687 | // Get the frame info. |
| 1688 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 1689 | DebugLoc dl = MI.getDebugLoc(); |
| 1690 | |
| 1691 | unsigned OffsetOperandNo = getOffsetONFromFION(MI, FIOperandNum); |
| 1692 | |
| 1693 | // Get the frame index. |
| 1694 | int FrameIndex = MI.getOperand(i: FIOperandNum).getIndex(); |
| 1695 | |
| 1696 | // Get the frame pointer save index. Users of this index are primarily |
| 1697 | // DYNALLOC instructions. |
| 1698 | PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); |
| 1699 | int FPSI = FI->getFramePointerSaveIndex(); |
| 1700 | // Get the instruction opcode. |
| 1701 | unsigned OpC = MI.getOpcode(); |
| 1702 | |
| 1703 | switch (OpC) { |
| 1704 | default: |
| 1705 | break; |
| 1706 | case PPC::DYNAREAOFFSET: |
| 1707 | case PPC::DYNAREAOFFSET8: |
| 1708 | lowerDynamicAreaOffset(II); |
| 1709 | // lowerDynamicAreaOffset erases II |
| 1710 | return true; |
| 1711 | case PPC::DYNALLOC: |
| 1712 | case PPC::DYNALLOC8: { |
| 1713 | // Special case for dynamic alloca. |
| 1714 | if (FPSI && FrameIndex == FPSI) { |
| 1715 | lowerDynamicAlloc(II); // lowerDynamicAlloc erases II |
| 1716 | return true; |
| 1717 | } |
| 1718 | break; |
| 1719 | } |
| 1720 | case PPC::PREPARE_PROBED_ALLOCA_64: |
| 1721 | case PPC::PREPARE_PROBED_ALLOCA_32: |
| 1722 | case PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_64: |
| 1723 | case PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_32: { |
| 1724 | if (FPSI && FrameIndex == FPSI) { |
| 1725 | lowerPrepareProbedAlloca(II); // lowerPrepareProbedAlloca erases II |
| 1726 | return true; |
| 1727 | } |
| 1728 | break; |
| 1729 | } |
| 1730 | case PPC::SPILL_CR: |
| 1731 | // Special case for pseudo-ops SPILL_CR and RESTORE_CR, etc. |
| 1732 | lowerCRSpilling(II, FrameIndex); |
| 1733 | return true; |
| 1734 | case PPC::RESTORE_CR: |
| 1735 | lowerCRRestore(II, FrameIndex); |
| 1736 | return true; |
| 1737 | case PPC::SPILL_CRBIT: |
| 1738 | lowerCRBitSpilling(II, FrameIndex); |
| 1739 | return true; |
| 1740 | case PPC::RESTORE_CRBIT: |
| 1741 | lowerCRBitRestore(II, FrameIndex); |
| 1742 | return true; |
| 1743 | case PPC::SPILL_ACC: |
| 1744 | case PPC::SPILL_UACC: |
| 1745 | lowerACCSpilling(II, FrameIndex); |
| 1746 | return true; |
| 1747 | case PPC::RESTORE_ACC: |
| 1748 | case PPC::RESTORE_UACC: |
| 1749 | lowerACCRestore(II, FrameIndex); |
| 1750 | return true; |
| 1751 | case PPC::STXVP: { |
| 1752 | if (DisableAutoPairedVecSt) { |
| 1753 | lowerOctWordSpilling(II, FrameIndex); |
| 1754 | return true; |
| 1755 | } |
| 1756 | break; |
| 1757 | } |
| 1758 | case PPC::SPILL_WACC: |
| 1759 | lowerWACCSpilling(II, FrameIndex); |
| 1760 | return true; |
| 1761 | case PPC::RESTORE_WACC: |
| 1762 | lowerWACCRestore(II, FrameIndex); |
| 1763 | return true; |
| 1764 | case PPC::SPILL_DMRP: |
| 1765 | case PPC::SPILL_DMR: |
| 1766 | lowerDMRSpilling(II, FrameIndex); |
| 1767 | return true; |
| 1768 | case PPC::RESTORE_DMRP: |
| 1769 | case PPC::RESTORE_DMR: |
| 1770 | lowerDMRRestore(II, FrameIndex); |
| 1771 | return true; |
| 1772 | case PPC::SPILL_QUADWORD: |
| 1773 | lowerQuadwordSpilling(II, FrameIndex); |
| 1774 | return true; |
| 1775 | case PPC::RESTORE_QUADWORD: |
| 1776 | lowerQuadwordRestore(II, FrameIndex); |
| 1777 | return true; |
| 1778 | } |
| 1779 | |
| 1780 | // Replace the FrameIndex with base register with GPR1 (SP) or GPR31 (FP). |
| 1781 | MI.getOperand(i: FIOperandNum).ChangeToRegister( |
| 1782 | Reg: FrameIndex < 0 ? getBaseRegister(MF) : getFrameRegister(MF), isDef: false); |
| 1783 | |
| 1784 | // If the instruction is not present in ImmToIdxMap, then it has no immediate |
| 1785 | // form (and must be r+r). |
| 1786 | bool noImmForm = !MI.isInlineAsm() && OpC != TargetOpcode::STACKMAP && |
| 1787 | OpC != TargetOpcode::PATCHPOINT && !ImmToIdxMap.count(Val: OpC); |
| 1788 | |
| 1789 | // Now add the frame object offset to the offset from r1. |
| 1790 | int64_t Offset = MFI.getObjectOffset(ObjectIdx: FrameIndex); |
| 1791 | Offset += MI.getOperand(i: OffsetOperandNo).getImm(); |
| 1792 | |
| 1793 | // If we're not using a Frame Pointer that has been set to the value of the |
| 1794 | // SP before having the stack size subtracted from it, then add the stack size |
| 1795 | // to Offset to get the correct offset. |
| 1796 | // Naked functions have stack size 0, although getStackSize may not reflect |
| 1797 | // that because we didn't call all the pieces that compute it for naked |
| 1798 | // functions. |
| 1799 | if (!MF.getFunction().hasFnAttribute(Kind: Attribute::Naked)) { |
| 1800 | if (!(hasBasePointer(MF) && FrameIndex < 0)) |
| 1801 | Offset += MFI.getStackSize(); |
| 1802 | } |
| 1803 | |
| 1804 | // If we encounter an LXVP/STXVP with an offset that doesn't fit, we can |
| 1805 | // transform it to the prefixed version so we don't have to use the XForm. |
| 1806 | if ((OpC == PPC::LXVP || OpC == PPC::STXVP) && |
| 1807 | (!isInt<16>(x: Offset) || (Offset % offsetMinAlign(MI)) != 0) && |
| 1808 | Subtarget.hasPrefixInstrs() && Subtarget.hasP10Vector()) { |
| 1809 | unsigned NewOpc = OpC == PPC::LXVP ? PPC::PLXVP : PPC::PSTXVP; |
| 1810 | MI.setDesc(TII.get(Opcode: NewOpc)); |
| 1811 | OpC = NewOpc; |
| 1812 | } |
| 1813 | |
| 1814 | // If we can, encode the offset directly into the instruction. If this is a |
| 1815 | // normal PPC "ri" instruction, any 16-bit value can be safely encoded. If |
| 1816 | // this is a PPC64 "ix" instruction, only a 16-bit value with the low two bits |
| 1817 | // clear can be encoded. This is extremely uncommon, because normally you |
| 1818 | // only "std" to a stack slot that is at least 4-byte aligned, but it can |
| 1819 | // happen in invalid code. |
| 1820 | assert(OpC != PPC::DBG_VALUE && |
| 1821 | "This should be handled in a target-independent way" ); |
| 1822 | // FIXME: This should be factored out to a separate function as prefixed |
| 1823 | // instructions add a number of opcodes for which we can use 34-bit imm. |
| 1824 | bool OffsetFitsMnemonic = (OpC == PPC::EVSTDD || OpC == PPC::EVLDD) ? |
| 1825 | isUInt<8>(x: Offset) : |
| 1826 | isInt<16>(x: Offset); |
| 1827 | if (TII.isPrefixed(Opcode: MI.getOpcode())) |
| 1828 | OffsetFitsMnemonic = isInt<34>(x: Offset); |
| 1829 | if (!noImmForm && ((OffsetFitsMnemonic && |
| 1830 | ((Offset % offsetMinAlign(MI)) == 0)) || |
| 1831 | OpC == TargetOpcode::STACKMAP || |
| 1832 | OpC == TargetOpcode::PATCHPOINT)) { |
| 1833 | MI.getOperand(i: OffsetOperandNo).ChangeToImmediate(ImmVal: Offset); |
| 1834 | return false; |
| 1835 | } |
| 1836 | |
| 1837 | // The offset doesn't fit into a single register, scavenge one to build the |
| 1838 | // offset in. |
| 1839 | |
| 1840 | bool is64Bit = TM.isPPC64(); |
| 1841 | const TargetRegisterClass *G8RC = &PPC::G8RCRegClass; |
| 1842 | const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; |
| 1843 | const TargetRegisterClass *RC = is64Bit ? G8RC : GPRC; |
| 1844 | unsigned NewOpcode = 0u; |
| 1845 | bool ScavengingFailed = RS && RS->getRegsAvailable(RC).none() && |
| 1846 | RS->getRegsAvailable(RC: &PPC::VSFRCRegClass).any(); |
| 1847 | Register SRegHi, SReg, VSReg; |
| 1848 | |
| 1849 | // The register scavenger is unable to get a GPR but can get a VSR. We |
| 1850 | // need to stash a GPR into a VSR so that we can free one up. |
| 1851 | if (ScavengingFailed && Subtarget.hasDirectMove()) { |
| 1852 | // Pick a volatile register and if we are spilling/restoring that |
| 1853 | // particular one, pick the next one. |
| 1854 | SRegHi = SReg = is64Bit ? PPC::X4 : PPC::R4; |
| 1855 | if (MI.getOperand(i: 0).getReg() == SReg) |
| 1856 | SRegHi = SReg = SReg + 1; |
| 1857 | VSReg = MF.getRegInfo().createVirtualRegister(RegClass: &PPC::VSFRCRegClass); |
| 1858 | BuildMI(BB&: MBB, I: II, MIMD: dl, MCID: TII.get(Opcode: is64Bit ? PPC::MTVSRD : PPC::MTVSRWZ), DestReg: VSReg) |
| 1859 | .addReg(RegNo: SReg); |
| 1860 | } else { |
| 1861 | SRegHi = MF.getRegInfo().createVirtualRegister(RegClass: RC); |
| 1862 | SReg = MF.getRegInfo().createVirtualRegister(RegClass: RC); |
| 1863 | } |
| 1864 | |
| 1865 | // Insert a set of rA with the full offset value before the ld, st, or add |
| 1866 | if (isInt<16>(x: Offset)) |
| 1867 | BuildMI(BB&: MBB, I: II, MIMD: dl, MCID: TII.get(Opcode: is64Bit ? PPC::LI8 : PPC::LI), DestReg: SReg) |
| 1868 | .addImm(Val: Offset); |
| 1869 | else if (isInt<32>(x: Offset)) { |
| 1870 | BuildMI(BB&: MBB, I: II, MIMD: dl, MCID: TII.get(Opcode: is64Bit ? PPC::LIS8 : PPC::LIS), DestReg: SRegHi) |
| 1871 | .addImm(Val: Offset >> 16); |
| 1872 | BuildMI(BB&: MBB, I: II, MIMD: dl, MCID: TII.get(Opcode: is64Bit ? PPC::ORI8 : PPC::ORI), DestReg: SReg) |
| 1873 | .addReg(RegNo: SRegHi, Flags: RegState::Kill) |
| 1874 | .addImm(Val: Offset); |
| 1875 | } else { |
| 1876 | assert(is64Bit && "Huge stack is only supported on PPC64" ); |
| 1877 | TII.materializeImmPostRA(MBB, MBBI: II, DL: dl, Reg: SReg, Imm: Offset); |
| 1878 | } |
| 1879 | |
| 1880 | // Convert into indexed form of the instruction: |
| 1881 | // |
| 1882 | // sth 0:rA, 1:imm 2:(rB) ==> sthx 0:rA, 2:rB, 1:r0 |
| 1883 | // addi 0:rA 1:rB, 2, imm ==> add 0:rA, 1:rB, 2:r0 |
| 1884 | unsigned OperandBase; |
| 1885 | |
| 1886 | if (noImmForm) |
| 1887 | OperandBase = 1; |
| 1888 | else if (OpC != TargetOpcode::INLINEASM && |
| 1889 | OpC != TargetOpcode::INLINEASM_BR) { |
| 1890 | assert(ImmToIdxMap.count(OpC) && |
| 1891 | "No indexed form of load or store available!" ); |
| 1892 | NewOpcode = ImmToIdxMap.find(Val: OpC)->second; |
| 1893 | MI.setDesc(TII.get(Opcode: NewOpcode)); |
| 1894 | OperandBase = 1; |
| 1895 | } else { |
| 1896 | OperandBase = OffsetOperandNo; |
| 1897 | } |
| 1898 | |
| 1899 | Register StackReg = MI.getOperand(i: FIOperandNum).getReg(); |
| 1900 | MI.getOperand(i: OperandBase).ChangeToRegister(Reg: StackReg, isDef: false); |
| 1901 | MI.getOperand(i: OperandBase + 1).ChangeToRegister(Reg: SReg, isDef: false, isImp: false, isKill: true); |
| 1902 | |
| 1903 | // If we stashed a value from a GPR into a VSR, we need to get it back after |
| 1904 | // spilling the register. |
| 1905 | if (ScavengingFailed && Subtarget.hasDirectMove()) |
| 1906 | BuildMI(BB&: MBB, I: ++II, MIMD: dl, MCID: TII.get(Opcode: is64Bit ? PPC::MFVSRD : PPC::MFVSRWZ), DestReg: SReg) |
| 1907 | .addReg(RegNo: VSReg); |
| 1908 | |
| 1909 | // Since these are not real X-Form instructions, we must |
| 1910 | // add the registers and access 0(NewReg) rather than |
| 1911 | // emitting the X-Form pseudo. |
| 1912 | if (NewOpcode == PPC::LQX_PSEUDO || NewOpcode == PPC::STQX_PSEUDO) { |
| 1913 | assert(is64Bit && "Quadword loads/stores only supported in 64-bit mode" ); |
| 1914 | Register NewReg = MF.getRegInfo().createVirtualRegister(RegClass: &PPC::G8RCRegClass); |
| 1915 | BuildMI(BB&: MBB, I: II, MIMD: dl, MCID: TII.get(Opcode: PPC::ADD8), DestReg: NewReg) |
| 1916 | .addReg(RegNo: SReg, Flags: RegState::Kill) |
| 1917 | .addReg(RegNo: StackReg); |
| 1918 | MI.setDesc(TII.get(Opcode: NewOpcode == PPC::LQX_PSEUDO ? PPC::LQ : PPC::STQ)); |
| 1919 | MI.getOperand(i: OperandBase + 1).ChangeToRegister(Reg: NewReg, isDef: false); |
| 1920 | MI.getOperand(i: OperandBase).ChangeToImmediate(ImmVal: 0); |
| 1921 | } |
| 1922 | return false; |
| 1923 | } |
| 1924 | |
| 1925 | Register PPCRegisterInfo::getFrameRegister(const MachineFunction &MF) const { |
| 1926 | const PPCFrameLowering *TFI = getFrameLowering(MF); |
| 1927 | |
| 1928 | if (!TM.isPPC64()) |
| 1929 | return TFI->hasFP(MF) ? PPC::R31 : PPC::R1; |
| 1930 | else |
| 1931 | return TFI->hasFP(MF) ? PPC::X31 : PPC::X1; |
| 1932 | } |
| 1933 | |
| 1934 | Register PPCRegisterInfo::getBaseRegister(const MachineFunction &MF) const { |
| 1935 | const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); |
| 1936 | if (!hasBasePointer(MF)) |
| 1937 | return getFrameRegister(MF); |
| 1938 | |
| 1939 | if (TM.isPPC64()) |
| 1940 | return PPC::X30; |
| 1941 | |
| 1942 | if (Subtarget.isSVR4ABI() && TM.isPositionIndependent()) |
| 1943 | return PPC::R29; |
| 1944 | |
| 1945 | return PPC::R30; |
| 1946 | } |
| 1947 | |
| 1948 | bool PPCRegisterInfo::hasBasePointer(const MachineFunction &MF) const { |
| 1949 | if (!EnableBasePointer) |
| 1950 | return false; |
| 1951 | if (AlwaysBasePointer) |
| 1952 | return true; |
| 1953 | |
| 1954 | // If we need to realign the stack, then the stack pointer can no longer |
| 1955 | // serve as an offset into the caller's stack space. As a result, we need a |
| 1956 | // base pointer. |
| 1957 | return hasStackRealignment(MF); |
| 1958 | } |
| 1959 | |
| 1960 | /// Returns true if the instruction's frame index |
| 1961 | /// reference would be better served by a base register other than FP |
| 1962 | /// or SP. Used by LocalStackFrameAllocation to determine which frame index |
| 1963 | /// references it should create new base registers for. |
| 1964 | bool PPCRegisterInfo:: |
| 1965 | needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const { |
| 1966 | assert(Offset < 0 && "Local offset must be negative" ); |
| 1967 | |
| 1968 | // It's the load/store FI references that cause issues, as it can be difficult |
| 1969 | // to materialize the offset if it won't fit in the literal field. Estimate |
| 1970 | // based on the size of the local frame and some conservative assumptions |
| 1971 | // about the rest of the stack frame (note, this is pre-regalloc, so |
| 1972 | // we don't know everything for certain yet) whether this offset is likely |
| 1973 | // to be out of range of the immediate. Return true if so. |
| 1974 | |
| 1975 | // We only generate virtual base registers for loads and stores that have |
| 1976 | // an r+i form. Return false for everything else. |
| 1977 | unsigned OpC = MI->getOpcode(); |
| 1978 | if (!ImmToIdxMap.count(Val: OpC)) |
| 1979 | return false; |
| 1980 | |
| 1981 | // Don't generate a new virtual base register just to add zero to it. |
| 1982 | if ((OpC == PPC::ADDI || OpC == PPC::ADDI8) && |
| 1983 | MI->getOperand(i: 2).getImm() == 0) |
| 1984 | return false; |
| 1985 | |
| 1986 | MachineBasicBlock &MBB = *MI->getParent(); |
| 1987 | MachineFunction &MF = *MBB.getParent(); |
| 1988 | const PPCFrameLowering *TFI = getFrameLowering(MF); |
| 1989 | unsigned StackEst = TFI->determineFrameLayout(MF, UseEstimate: true); |
| 1990 | |
| 1991 | // If we likely don't need a stack frame, then we probably don't need a |
| 1992 | // virtual base register either. |
| 1993 | if (!StackEst) |
| 1994 | return false; |
| 1995 | |
| 1996 | // Estimate an offset from the stack pointer. |
| 1997 | // The incoming offset is relating to the SP at the start of the function, |
| 1998 | // but when we access the local it'll be relative to the SP after local |
| 1999 | // allocation, so adjust our SP-relative offset by that allocation size. |
| 2000 | Offset += StackEst; |
| 2001 | |
| 2002 | // The frame pointer will point to the end of the stack, so estimate the |
| 2003 | // offset as the difference between the object offset and the FP location. |
| 2004 | return !isFrameOffsetLegal(MI, BaseReg: getBaseRegister(MF), Offset); |
| 2005 | } |
| 2006 | |
| 2007 | /// Insert defining instruction(s) for BaseReg to |
| 2008 | /// be a pointer to FrameIdx at the beginning of the basic block. |
| 2009 | Register PPCRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB, |
| 2010 | int FrameIdx, |
| 2011 | int64_t Offset) const { |
| 2012 | unsigned ADDriOpc = TM.isPPC64() ? PPC::ADDI8 : PPC::ADDI; |
| 2013 | |
| 2014 | MachineBasicBlock::iterator Ins = MBB->begin(); |
| 2015 | DebugLoc DL; // Defaults to "unknown" |
| 2016 | if (Ins != MBB->end()) |
| 2017 | DL = Ins->getDebugLoc(); |
| 2018 | |
| 2019 | const MachineFunction &MF = *MBB->getParent(); |
| 2020 | const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); |
| 2021 | const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); |
| 2022 | const MCInstrDesc &MCID = TII.get(Opcode: ADDriOpc); |
| 2023 | MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); |
| 2024 | const TargetRegisterClass *RC = getPointerRegClass(); |
| 2025 | Register BaseReg = MRI.createVirtualRegister(RegClass: RC); |
| 2026 | MRI.constrainRegClass(Reg: BaseReg, RC: TII.getRegClass(MCID, OpNum: 0)); |
| 2027 | |
| 2028 | BuildMI(BB&: *MBB, I: Ins, MIMD: DL, MCID, DestReg: BaseReg) |
| 2029 | .addFrameIndex(Idx: FrameIdx).addImm(Val: Offset); |
| 2030 | |
| 2031 | return BaseReg; |
| 2032 | } |
| 2033 | |
| 2034 | void PPCRegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg, |
| 2035 | int64_t Offset) const { |
| 2036 | unsigned FIOperandNum = 0; |
| 2037 | while (!MI.getOperand(i: FIOperandNum).isFI()) { |
| 2038 | ++FIOperandNum; |
| 2039 | assert(FIOperandNum < MI.getNumOperands() && |
| 2040 | "Instr doesn't have FrameIndex operand!" ); |
| 2041 | } |
| 2042 | |
| 2043 | MI.getOperand(i: FIOperandNum).ChangeToRegister(Reg: BaseReg, isDef: false); |
| 2044 | unsigned OffsetOperandNo = getOffsetONFromFION(MI, FIOperandNum); |
| 2045 | Offset += MI.getOperand(i: OffsetOperandNo).getImm(); |
| 2046 | MI.getOperand(i: OffsetOperandNo).ChangeToImmediate(ImmVal: Offset); |
| 2047 | |
| 2048 | MachineBasicBlock &MBB = *MI.getParent(); |
| 2049 | MachineFunction &MF = *MBB.getParent(); |
| 2050 | const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); |
| 2051 | const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); |
| 2052 | const MCInstrDesc &MCID = MI.getDesc(); |
| 2053 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
| 2054 | MRI.constrainRegClass(Reg: BaseReg, RC: TII.getRegClass(MCID, OpNum: FIOperandNum)); |
| 2055 | } |
| 2056 | |
| 2057 | bool PPCRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI, |
| 2058 | Register BaseReg, |
| 2059 | int64_t Offset) const { |
| 2060 | unsigned FIOperandNum = 0; |
| 2061 | while (!MI->getOperand(i: FIOperandNum).isFI()) { |
| 2062 | ++FIOperandNum; |
| 2063 | assert(FIOperandNum < MI->getNumOperands() && |
| 2064 | "Instr doesn't have FrameIndex operand!" ); |
| 2065 | } |
| 2066 | |
| 2067 | unsigned OffsetOperandNo = getOffsetONFromFION(MI: *MI, FIOperandNum); |
| 2068 | Offset += MI->getOperand(i: OffsetOperandNo).getImm(); |
| 2069 | |
| 2070 | return MI->getOpcode() == PPC::DBG_VALUE || // DBG_VALUE is always Reg+Imm |
| 2071 | MI->getOpcode() == TargetOpcode::STACKMAP || |
| 2072 | MI->getOpcode() == TargetOpcode::PATCHPOINT || |
| 2073 | (isInt<16>(x: Offset) && (Offset % offsetMinAlign(MI: *MI)) == 0); |
| 2074 | } |
| 2075 | |