| 1 | //===-- SparcInstrInfo.cpp - Sparc Instruction Information ----------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file contains the Sparc implementation of the TargetInstrInfo class. |
| 10 | // |
| 11 | //===----------------------------------------------------------------------===// |
| 12 | |
| 13 | #include "SparcInstrInfo.h" |
| 14 | #include "Sparc.h" |
| 15 | #include "SparcMachineFunctionInfo.h" |
| 16 | #include "SparcSubtarget.h" |
| 17 | #include "llvm/ADT/SmallVector.h" |
| 18 | #include "llvm/CodeGen/MachineFrameInfo.h" |
| 19 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
| 20 | #include "llvm/CodeGen/MachineMemOperand.h" |
| 21 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
| 22 | #include "llvm/Support/ErrorHandling.h" |
| 23 | |
| 24 | using namespace llvm; |
| 25 | |
| 26 | #define GET_INSTRINFO_CTOR_DTOR |
| 27 | #include "SparcGenInstrInfo.inc" |
| 28 | |
| 29 | static cl::opt<unsigned> BPccDisplacementBits( |
| 30 | "sparc-bpcc-offset-bits" , cl::Hidden, cl::init(Val: 19), |
| 31 | cl::desc("Restrict range of BPcc/FBPfcc instructions (DEBUG)" )); |
| 32 | |
| 33 | static cl::opt<unsigned> |
| 34 | BPrDisplacementBits("sparc-bpr-offset-bits" , cl::Hidden, cl::init(Val: 16), |
| 35 | cl::desc("Restrict range of BPr instructions (DEBUG)" )); |
| 36 | |
| 37 | // Pin the vtable to this file. |
| 38 | void SparcInstrInfo::anchor() {} |
| 39 | |
| 40 | SparcInstrInfo::SparcInstrInfo(const SparcSubtarget &ST) |
| 41 | : SparcGenInstrInfo(ST, RI, SP::ADJCALLSTACKDOWN, SP::ADJCALLSTACKUP), |
| 42 | RI(ST), Subtarget(ST) {} |
| 43 | |
| 44 | /// isLoadFromStackSlot - If the specified machine instruction is a direct |
| 45 | /// load from a stack slot, return the virtual or physical register number of |
| 46 | /// the destination along with the FrameIndex of the loaded stack slot. If |
| 47 | /// not, return 0. This predicate must return 0 if the instruction has |
| 48 | /// any side effects other than loading from the stack slot. |
| 49 | Register SparcInstrInfo::isLoadFromStackSlot(const MachineInstr &MI, |
| 50 | int &FrameIndex, |
| 51 | TypeSize &MemBytes) const { |
| 52 | switch (MI.getOpcode()) { |
| 53 | default: |
| 54 | return 0; |
| 55 | case SP::LDri: |
| 56 | MemBytes = TypeSize::getFixed(ExactSize: 4); |
| 57 | break; |
| 58 | case SP::LDXri: |
| 59 | MemBytes = TypeSize::getFixed(ExactSize: 8); |
| 60 | break; |
| 61 | case SP::LDFri: |
| 62 | MemBytes = TypeSize::getFixed(ExactSize: 4); |
| 63 | break; |
| 64 | case SP::LDDFri: |
| 65 | MemBytes = TypeSize::getFixed(ExactSize: 8); |
| 66 | break; |
| 67 | case SP::LDQFri: |
| 68 | MemBytes = TypeSize::getFixed(ExactSize: 16); |
| 69 | break; |
| 70 | } |
| 71 | if (MI.getOperand(i: 1).isFI() && MI.getOperand(i: 2).isImm() && |
| 72 | MI.getOperand(i: 2).getImm() == 0) { |
| 73 | FrameIndex = MI.getOperand(i: 1).getIndex(); |
| 74 | return MI.getOperand(i: 0).getReg(); |
| 75 | } |
| 76 | return 0; |
| 77 | } |
| 78 | |
| 79 | /// isStoreToStackSlot - If the specified machine instruction is a direct |
| 80 | /// store to a stack slot, return the virtual or physical register number of |
| 81 | /// the source reg along with the FrameIndex of the loaded stack slot. If |
| 82 | /// not, return 0. This predicate must return 0 if the instruction has |
| 83 | /// any side effects other than storing to the stack slot. |
| 84 | Register SparcInstrInfo::isStoreToStackSlot(const MachineInstr &MI, |
| 85 | int &FrameIndex, |
| 86 | TypeSize &MemBytes) const { |
| 87 | switch (MI.getOpcode()) { |
| 88 | default: |
| 89 | return 0; |
| 90 | case SP::STri: |
| 91 | MemBytes = TypeSize::getFixed(ExactSize: 4); |
| 92 | break; |
| 93 | case SP::STXri: |
| 94 | MemBytes = TypeSize::getFixed(ExactSize: 8); |
| 95 | break; |
| 96 | case SP::STFri: |
| 97 | MemBytes = TypeSize::getFixed(ExactSize: 4); |
| 98 | break; |
| 99 | case SP::STDFri: |
| 100 | MemBytes = TypeSize::getFixed(ExactSize: 8); |
| 101 | break; |
| 102 | case SP::STQFri: |
| 103 | MemBytes = TypeSize::getFixed(ExactSize: 16); |
| 104 | break; |
| 105 | } |
| 106 | if (MI.getOperand(i: 0).isFI() && MI.getOperand(i: 1).isImm() && |
| 107 | MI.getOperand(i: 1).getImm() == 0) { |
| 108 | FrameIndex = MI.getOperand(i: 0).getIndex(); |
| 109 | return MI.getOperand(i: 2).getReg(); |
| 110 | } |
| 111 | return 0; |
| 112 | } |
| 113 | |
| 114 | static SPCC::CondCodes GetOppositeBranchCondition(SPCC::CondCodes CC) |
| 115 | { |
| 116 | switch(CC) { |
| 117 | case SPCC::ICC_A: return SPCC::ICC_N; |
| 118 | case SPCC::ICC_N: return SPCC::ICC_A; |
| 119 | case SPCC::ICC_NE: return SPCC::ICC_E; |
| 120 | case SPCC::ICC_E: return SPCC::ICC_NE; |
| 121 | case SPCC::ICC_G: return SPCC::ICC_LE; |
| 122 | case SPCC::ICC_LE: return SPCC::ICC_G; |
| 123 | case SPCC::ICC_GE: return SPCC::ICC_L; |
| 124 | case SPCC::ICC_L: return SPCC::ICC_GE; |
| 125 | case SPCC::ICC_GU: return SPCC::ICC_LEU; |
| 126 | case SPCC::ICC_LEU: return SPCC::ICC_GU; |
| 127 | case SPCC::ICC_CC: return SPCC::ICC_CS; |
| 128 | case SPCC::ICC_CS: return SPCC::ICC_CC; |
| 129 | case SPCC::ICC_POS: return SPCC::ICC_NEG; |
| 130 | case SPCC::ICC_NEG: return SPCC::ICC_POS; |
| 131 | case SPCC::ICC_VC: return SPCC::ICC_VS; |
| 132 | case SPCC::ICC_VS: return SPCC::ICC_VC; |
| 133 | |
| 134 | case SPCC::FCC_A: return SPCC::FCC_N; |
| 135 | case SPCC::FCC_N: return SPCC::FCC_A; |
| 136 | case SPCC::FCC_U: return SPCC::FCC_O; |
| 137 | case SPCC::FCC_O: return SPCC::FCC_U; |
| 138 | case SPCC::FCC_G: return SPCC::FCC_ULE; |
| 139 | case SPCC::FCC_LE: return SPCC::FCC_UG; |
| 140 | case SPCC::FCC_UG: return SPCC::FCC_LE; |
| 141 | case SPCC::FCC_ULE: return SPCC::FCC_G; |
| 142 | case SPCC::FCC_L: return SPCC::FCC_UGE; |
| 143 | case SPCC::FCC_GE: return SPCC::FCC_UL; |
| 144 | case SPCC::FCC_UL: return SPCC::FCC_GE; |
| 145 | case SPCC::FCC_UGE: return SPCC::FCC_L; |
| 146 | case SPCC::FCC_LG: return SPCC::FCC_UE; |
| 147 | case SPCC::FCC_UE: return SPCC::FCC_LG; |
| 148 | case SPCC::FCC_NE: return SPCC::FCC_E; |
| 149 | case SPCC::FCC_E: return SPCC::FCC_NE; |
| 150 | |
| 151 | case SPCC::CPCC_A: return SPCC::CPCC_N; |
| 152 | case SPCC::CPCC_N: return SPCC::CPCC_A; |
| 153 | case SPCC::CPCC_3: [[fallthrough]]; |
| 154 | case SPCC::CPCC_2: [[fallthrough]]; |
| 155 | case SPCC::CPCC_23: [[fallthrough]]; |
| 156 | case SPCC::CPCC_1: [[fallthrough]]; |
| 157 | case SPCC::CPCC_13: [[fallthrough]]; |
| 158 | case SPCC::CPCC_12: [[fallthrough]]; |
| 159 | case SPCC::CPCC_123: [[fallthrough]]; |
| 160 | case SPCC::CPCC_0: [[fallthrough]]; |
| 161 | case SPCC::CPCC_03: [[fallthrough]]; |
| 162 | case SPCC::CPCC_02: [[fallthrough]]; |
| 163 | case SPCC::CPCC_023: [[fallthrough]]; |
| 164 | case SPCC::CPCC_01: [[fallthrough]]; |
| 165 | case SPCC::CPCC_013: [[fallthrough]]; |
| 166 | case SPCC::CPCC_012: |
| 167 | // "Opposite" code is not meaningful, as we don't know |
| 168 | // what the CoProc condition means here. The cond-code will |
| 169 | // only be used in inline assembler, so this code should |
| 170 | // not be reached in a normal compilation pass. |
| 171 | llvm_unreachable("Meaningless inversion of co-processor cond code" ); |
| 172 | |
| 173 | case SPCC::REG_BEGIN: |
| 174 | llvm_unreachable("Use of reserved cond code" ); |
| 175 | case SPCC::REG_Z: |
| 176 | return SPCC::REG_NZ; |
| 177 | case SPCC::REG_LEZ: |
| 178 | return SPCC::REG_GZ; |
| 179 | case SPCC::REG_LZ: |
| 180 | return SPCC::REG_GEZ; |
| 181 | case SPCC::REG_NZ: |
| 182 | return SPCC::REG_Z; |
| 183 | case SPCC::REG_GZ: |
| 184 | return SPCC::REG_LEZ; |
| 185 | case SPCC::REG_GEZ: |
| 186 | return SPCC::REG_LZ; |
| 187 | } |
| 188 | llvm_unreachable("Invalid cond code" ); |
| 189 | } |
| 190 | |
| 191 | static bool isUncondBranchOpcode(int Opc) { return Opc == SP::BA; } |
| 192 | |
| 193 | static bool isI32CondBranchOpcode(int Opc) { |
| 194 | return Opc == SP::BCOND || Opc == SP::BPICC || Opc == SP::BPICCA || |
| 195 | Opc == SP::BPICCNT || Opc == SP::BPICCANT; |
| 196 | } |
| 197 | |
| 198 | static bool isI64CondBranchOpcode(int Opc) { |
| 199 | return Opc == SP::BPXCC || Opc == SP::BPXCCA || Opc == SP::BPXCCNT || |
| 200 | Opc == SP::BPXCCANT; |
| 201 | } |
| 202 | |
| 203 | static bool isRegCondBranchOpcode(int Opc) { |
| 204 | return Opc == SP::BPR || Opc == SP::BPRA || Opc == SP::BPRNT || |
| 205 | Opc == SP::BPRANT; |
| 206 | } |
| 207 | |
| 208 | static bool isFCondBranchOpcode(int Opc) { |
| 209 | return Opc == SP::FBCOND || Opc == SP::FBCONDA || Opc == SP::FBCOND_V9 || |
| 210 | Opc == SP::FBCONDA_V9; |
| 211 | } |
| 212 | |
| 213 | static bool isCondBranchOpcode(int Opc) { |
| 214 | return isI32CondBranchOpcode(Opc) || isI64CondBranchOpcode(Opc) || |
| 215 | isRegCondBranchOpcode(Opc) || isFCondBranchOpcode(Opc); |
| 216 | } |
| 217 | |
| 218 | static bool isIndirectBranchOpcode(int Opc) { |
| 219 | return Opc == SP::BINDrr || Opc == SP::BINDri; |
| 220 | } |
| 221 | |
| 222 | static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, |
| 223 | SmallVectorImpl<MachineOperand> &Cond) { |
| 224 | unsigned Opc = LastInst->getOpcode(); |
| 225 | int64_t CC = LastInst->getOperand(i: 1).getImm(); |
| 226 | |
| 227 | // Push the branch opcode into Cond too so later in insertBranch |
| 228 | // it can use the information to emit the correct SPARC branch opcode. |
| 229 | Cond.push_back(Elt: MachineOperand::CreateImm(Val: Opc)); |
| 230 | Cond.push_back(Elt: MachineOperand::CreateImm(Val: CC)); |
| 231 | |
| 232 | // Branch on register contents need another argument to indicate |
| 233 | // the register it branches on. |
| 234 | if (isRegCondBranchOpcode(Opc)) { |
| 235 | Register Reg = LastInst->getOperand(i: 2).getReg(); |
| 236 | Cond.push_back(Elt: MachineOperand::CreateReg(Reg, isDef: false)); |
| 237 | } |
| 238 | |
| 239 | Target = LastInst->getOperand(i: 0).getMBB(); |
| 240 | } |
| 241 | |
| 242 | MachineBasicBlock * |
| 243 | SparcInstrInfo::getBranchDestBlock(const MachineInstr &MI) const { |
| 244 | switch (MI.getOpcode()) { |
| 245 | default: |
| 246 | llvm_unreachable("unexpected opcode!" ); |
| 247 | case SP::BA: |
| 248 | case SP::BCOND: |
| 249 | case SP::BCONDA: |
| 250 | case SP::FBCOND: |
| 251 | case SP::FBCONDA: |
| 252 | case SP::BPICC: |
| 253 | case SP::BPICCA: |
| 254 | case SP::BPICCNT: |
| 255 | case SP::BPICCANT: |
| 256 | case SP::BPXCC: |
| 257 | case SP::BPXCCA: |
| 258 | case SP::BPXCCNT: |
| 259 | case SP::BPXCCANT: |
| 260 | case SP::BPFCC: |
| 261 | case SP::BPFCCA: |
| 262 | case SP::BPFCCNT: |
| 263 | case SP::BPFCCANT: |
| 264 | case SP::FBCOND_V9: |
| 265 | case SP::FBCONDA_V9: |
| 266 | case SP::BPR: |
| 267 | case SP::BPRA: |
| 268 | case SP::BPRNT: |
| 269 | case SP::BPRANT: |
| 270 | return MI.getOperand(i: 0).getMBB(); |
| 271 | } |
| 272 | } |
| 273 | |
| 274 | bool SparcInstrInfo::analyzeBranch(MachineBasicBlock &MBB, |
| 275 | MachineBasicBlock *&TBB, |
| 276 | MachineBasicBlock *&FBB, |
| 277 | SmallVectorImpl<MachineOperand> &Cond, |
| 278 | bool AllowModify) const { |
| 279 | MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr(); |
| 280 | if (I == MBB.end()) |
| 281 | return false; |
| 282 | |
| 283 | if (!isUnpredicatedTerminator(MI: *I)) |
| 284 | return false; |
| 285 | |
| 286 | // Get the last instruction in the block. |
| 287 | MachineInstr *LastInst = &*I; |
| 288 | unsigned LastOpc = LastInst->getOpcode(); |
| 289 | |
| 290 | // If there is only one terminator instruction, process it. |
| 291 | if (I == MBB.begin() || !isUnpredicatedTerminator(MI: *--I)) { |
| 292 | if (isUncondBranchOpcode(Opc: LastOpc)) { |
| 293 | TBB = LastInst->getOperand(i: 0).getMBB(); |
| 294 | return false; |
| 295 | } |
| 296 | if (isCondBranchOpcode(Opc: LastOpc)) { |
| 297 | // Block ends with fall-through condbranch. |
| 298 | parseCondBranch(LastInst, Target&: TBB, Cond); |
| 299 | return false; |
| 300 | } |
| 301 | return true; // Can't handle indirect branch. |
| 302 | } |
| 303 | |
| 304 | // Get the instruction before it if it is a terminator. |
| 305 | MachineInstr *SecondLastInst = &*I; |
| 306 | unsigned SecondLastOpc = SecondLastInst->getOpcode(); |
| 307 | |
| 308 | // If AllowModify is true and the block ends with two or more unconditional |
| 309 | // branches, delete all but the first unconditional branch. |
| 310 | if (AllowModify && isUncondBranchOpcode(Opc: LastOpc)) { |
| 311 | while (isUncondBranchOpcode(Opc: SecondLastOpc)) { |
| 312 | LastInst->eraseFromParent(); |
| 313 | LastInst = SecondLastInst; |
| 314 | LastOpc = LastInst->getOpcode(); |
| 315 | if (I == MBB.begin() || !isUnpredicatedTerminator(MI: *--I)) { |
| 316 | // Return now the only terminator is an unconditional branch. |
| 317 | TBB = LastInst->getOperand(i: 0).getMBB(); |
| 318 | return false; |
| 319 | } else { |
| 320 | SecondLastInst = &*I; |
| 321 | SecondLastOpc = SecondLastInst->getOpcode(); |
| 322 | } |
| 323 | } |
| 324 | } |
| 325 | |
| 326 | // If there are three terminators, we don't know what sort of block this is. |
| 327 | if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(MI: *--I)) |
| 328 | return true; |
| 329 | |
| 330 | // If the block ends with a B and a Bcc, handle it. |
| 331 | if (isCondBranchOpcode(Opc: SecondLastOpc) && isUncondBranchOpcode(Opc: LastOpc)) { |
| 332 | parseCondBranch(LastInst: SecondLastInst, Target&: TBB, Cond); |
| 333 | FBB = LastInst->getOperand(i: 0).getMBB(); |
| 334 | return false; |
| 335 | } |
| 336 | |
| 337 | // If the block ends with two unconditional branches, handle it. The second |
| 338 | // one is not executed. |
| 339 | if (isUncondBranchOpcode(Opc: SecondLastOpc) && isUncondBranchOpcode(Opc: LastOpc)) { |
| 340 | TBB = SecondLastInst->getOperand(i: 0).getMBB(); |
| 341 | return false; |
| 342 | } |
| 343 | |
| 344 | // ...likewise if it ends with an indirect branch followed by an unconditional |
| 345 | // branch. |
| 346 | if (isIndirectBranchOpcode(Opc: SecondLastOpc) && isUncondBranchOpcode(Opc: LastOpc)) { |
| 347 | I = LastInst; |
| 348 | if (AllowModify) |
| 349 | I->eraseFromParent(); |
| 350 | return true; |
| 351 | } |
| 352 | |
| 353 | // Otherwise, can't handle this. |
| 354 | return true; |
| 355 | } |
| 356 | |
| 357 | unsigned SparcInstrInfo::insertBranch(MachineBasicBlock &MBB, |
| 358 | MachineBasicBlock *TBB, |
| 359 | MachineBasicBlock *FBB, |
| 360 | ArrayRef<MachineOperand> Cond, |
| 361 | const DebugLoc &DL, |
| 362 | int *BytesAdded) const { |
| 363 | assert(TBB && "insertBranch must not be told to insert a fallthrough" ); |
| 364 | assert((Cond.size() <= 3) && |
| 365 | "Sparc branch conditions should have at most three components!" ); |
| 366 | |
| 367 | if (Cond.empty()) { |
| 368 | assert(!FBB && "Unconditional branch with multiple successors!" ); |
| 369 | BuildMI(BB: &MBB, MIMD: DL, MCID: get(Opcode: SP::BA)).addMBB(MBB: TBB); |
| 370 | if (BytesAdded) |
| 371 | *BytesAdded = 8; |
| 372 | return 1; |
| 373 | } |
| 374 | |
| 375 | // Conditional branch |
| 376 | unsigned Opc = Cond[0].getImm(); |
| 377 | unsigned CC = Cond[1].getImm(); |
| 378 | if (isRegCondBranchOpcode(Opc)) { |
| 379 | Register Reg = Cond[2].getReg(); |
| 380 | BuildMI(BB: &MBB, MIMD: DL, MCID: get(Opcode: Opc)).addMBB(MBB: TBB).addImm(Val: CC).addReg(RegNo: Reg); |
| 381 | } else { |
| 382 | BuildMI(BB: &MBB, MIMD: DL, MCID: get(Opcode: Opc)).addMBB(MBB: TBB).addImm(Val: CC); |
| 383 | } |
| 384 | |
| 385 | if (!FBB) { |
| 386 | if (BytesAdded) |
| 387 | *BytesAdded = 8; |
| 388 | return 1; |
| 389 | } |
| 390 | |
| 391 | BuildMI(BB: &MBB, MIMD: DL, MCID: get(Opcode: SP::BA)).addMBB(MBB: FBB); |
| 392 | if (BytesAdded) |
| 393 | *BytesAdded = 16; |
| 394 | return 2; |
| 395 | } |
| 396 | |
| 397 | unsigned SparcInstrInfo::removeBranch(MachineBasicBlock &MBB, |
| 398 | int *BytesRemoved) const { |
| 399 | MachineBasicBlock::iterator I = MBB.end(); |
| 400 | unsigned Count = 0; |
| 401 | int Removed = 0; |
| 402 | while (I != MBB.begin()) { |
| 403 | --I; |
| 404 | |
| 405 | if (I->isDebugInstr()) |
| 406 | continue; |
| 407 | |
| 408 | if (!isCondBranchOpcode(Opc: I->getOpcode()) && |
| 409 | !isUncondBranchOpcode(Opc: I->getOpcode())) |
| 410 | break; // Not a branch |
| 411 | |
| 412 | Removed += getInstSizeInBytes(MI: *I); |
| 413 | I->eraseFromParent(); |
| 414 | I = MBB.end(); |
| 415 | ++Count; |
| 416 | } |
| 417 | |
| 418 | if (BytesRemoved) |
| 419 | *BytesRemoved = Removed; |
| 420 | return Count; |
| 421 | } |
| 422 | |
| 423 | bool SparcInstrInfo::reverseBranchCondition( |
| 424 | SmallVectorImpl<MachineOperand> &Cond) const { |
| 425 | assert(Cond.size() <= 3); |
| 426 | SPCC::CondCodes CC = static_cast<SPCC::CondCodes>(Cond[1].getImm()); |
| 427 | Cond[1].setImm(GetOppositeBranchCondition(CC)); |
| 428 | return false; |
| 429 | } |
| 430 | |
| 431 | bool SparcInstrInfo::isBranchOffsetInRange(unsigned BranchOpc, |
| 432 | int64_t Offset) const { |
| 433 | assert((Offset & 0b11) == 0 && "Malformed branch offset" ); |
| 434 | switch (BranchOpc) { |
| 435 | case SP::BA: |
| 436 | case SP::BCOND: |
| 437 | case SP::BCONDA: |
| 438 | case SP::FBCOND: |
| 439 | case SP::FBCONDA: |
| 440 | return isIntN(N: 22, x: Offset >> 2); |
| 441 | |
| 442 | case SP::BPICC: |
| 443 | case SP::BPICCA: |
| 444 | case SP::BPICCNT: |
| 445 | case SP::BPICCANT: |
| 446 | case SP::BPXCC: |
| 447 | case SP::BPXCCA: |
| 448 | case SP::BPXCCNT: |
| 449 | case SP::BPXCCANT: |
| 450 | case SP::BPFCC: |
| 451 | case SP::BPFCCA: |
| 452 | case SP::BPFCCNT: |
| 453 | case SP::BPFCCANT: |
| 454 | case SP::FBCOND_V9: |
| 455 | case SP::FBCONDA_V9: |
| 456 | return isIntN(N: BPccDisplacementBits, x: Offset >> 2); |
| 457 | |
| 458 | case SP::BPR: |
| 459 | case SP::BPRA: |
| 460 | case SP::BPRNT: |
| 461 | case SP::BPRANT: |
| 462 | return isIntN(N: BPrDisplacementBits, x: Offset >> 2); |
| 463 | } |
| 464 | |
| 465 | llvm_unreachable("Unknown branch instruction!" ); |
| 466 | } |
| 467 | |
| 468 | void SparcInstrInfo::copyPhysReg(MachineBasicBlock &MBB, |
| 469 | MachineBasicBlock::iterator I, |
| 470 | const DebugLoc &DL, Register DestReg, |
| 471 | Register SrcReg, bool KillSrc, |
| 472 | bool RenamableDest, bool RenamableSrc) const { |
| 473 | unsigned numSubRegs = 0; |
| 474 | unsigned movOpc = 0; |
| 475 | const unsigned *subRegIdx = nullptr; |
| 476 | bool = false; |
| 477 | |
| 478 | const unsigned DW_SubRegsIdx[] = { SP::sub_even, SP::sub_odd }; |
| 479 | const unsigned DFP_FP_SubRegsIdx[] = { SP::sub_even, SP::sub_odd }; |
| 480 | const unsigned QFP_DFP_SubRegsIdx[] = { SP::sub_even64, SP::sub_odd64 }; |
| 481 | const unsigned QFP_FP_SubRegsIdx[] = { SP::sub_even, SP::sub_odd, |
| 482 | SP::sub_odd64_then_sub_even, |
| 483 | SP::sub_odd64_then_sub_odd }; |
| 484 | |
| 485 | if (SP::IntRegsRegClass.contains(Reg1: DestReg, Reg2: SrcReg)) |
| 486 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: SP::ORrr), DestReg).addReg(RegNo: SP::G0) |
| 487 | .addReg(RegNo: SrcReg, Flags: getKillRegState(B: KillSrc)); |
| 488 | else if (SP::IntPairRegClass.contains(Reg1: DestReg, Reg2: SrcReg)) { |
| 489 | subRegIdx = DW_SubRegsIdx; |
| 490 | numSubRegs = 2; |
| 491 | movOpc = SP::ORrr; |
| 492 | ExtraG0 = true; |
| 493 | } else if (SP::FPRegsRegClass.contains(Reg1: DestReg, Reg2: SrcReg)) |
| 494 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: SP::FMOVS), DestReg) |
| 495 | .addReg(RegNo: SrcReg, Flags: getKillRegState(B: KillSrc)); |
| 496 | else if (SP::DFPRegsRegClass.contains(Reg1: DestReg, Reg2: SrcReg)) { |
| 497 | if (Subtarget.isV9()) { |
| 498 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: SP::FMOVD), DestReg) |
| 499 | .addReg(RegNo: SrcReg, Flags: getKillRegState(B: KillSrc)); |
| 500 | } else { |
| 501 | // Use two FMOVS instructions. |
| 502 | subRegIdx = DFP_FP_SubRegsIdx; |
| 503 | numSubRegs = 2; |
| 504 | movOpc = SP::FMOVS; |
| 505 | } |
| 506 | } else if (SP::QFPRegsRegClass.contains(Reg1: DestReg, Reg2: SrcReg)) { |
| 507 | if (Subtarget.isV9()) { |
| 508 | if (Subtarget.hasHardQuad()) { |
| 509 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: SP::FMOVQ), DestReg) |
| 510 | .addReg(RegNo: SrcReg, Flags: getKillRegState(B: KillSrc)); |
| 511 | } else { |
| 512 | // Use two FMOVD instructions. |
| 513 | subRegIdx = QFP_DFP_SubRegsIdx; |
| 514 | numSubRegs = 2; |
| 515 | movOpc = SP::FMOVD; |
| 516 | } |
| 517 | } else { |
| 518 | // Use four FMOVS instructions. |
| 519 | subRegIdx = QFP_FP_SubRegsIdx; |
| 520 | numSubRegs = 4; |
| 521 | movOpc = SP::FMOVS; |
| 522 | } |
| 523 | } else if (SP::ASRRegsRegClass.contains(Reg: DestReg) && |
| 524 | SP::IntRegsRegClass.contains(Reg: SrcReg)) { |
| 525 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: SP::WRASRrr), DestReg) |
| 526 | .addReg(RegNo: SP::G0) |
| 527 | .addReg(RegNo: SrcReg, Flags: getKillRegState(B: KillSrc)); |
| 528 | } else if (SP::IntRegsRegClass.contains(Reg: DestReg) && |
| 529 | SP::ASRRegsRegClass.contains(Reg: SrcReg)) { |
| 530 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: SP::RDASR), DestReg) |
| 531 | .addReg(RegNo: SrcReg, Flags: getKillRegState(B: KillSrc)); |
| 532 | } else |
| 533 | llvm_unreachable("Impossible reg-to-reg copy" ); |
| 534 | |
| 535 | if (numSubRegs == 0 || subRegIdx == nullptr || movOpc == 0) |
| 536 | return; |
| 537 | |
| 538 | const TargetRegisterInfo *TRI = &getRegisterInfo(); |
| 539 | MachineInstr *MovMI = nullptr; |
| 540 | |
| 541 | for (unsigned i = 0; i != numSubRegs; ++i) { |
| 542 | Register Dst = TRI->getSubReg(Reg: DestReg, Idx: subRegIdx[i]); |
| 543 | Register Src = TRI->getSubReg(Reg: SrcReg, Idx: subRegIdx[i]); |
| 544 | assert(Dst && Src && "Bad sub-register" ); |
| 545 | |
| 546 | MachineInstrBuilder MIB = BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: movOpc), DestReg: Dst); |
| 547 | if (ExtraG0) |
| 548 | MIB.addReg(RegNo: SP::G0); |
| 549 | MIB.addReg(RegNo: Src); |
| 550 | MovMI = MIB.getInstr(); |
| 551 | } |
| 552 | // Add implicit super-register defs and kills to the last MovMI. |
| 553 | MovMI->addRegisterDefined(Reg: DestReg, RegInfo: TRI); |
| 554 | if (KillSrc) |
| 555 | MovMI->addRegisterKilled(IncomingReg: SrcReg, RegInfo: TRI); |
| 556 | } |
| 557 | |
| 558 | void SparcInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, |
| 559 | MachineBasicBlock::iterator I, |
| 560 | Register SrcReg, bool isKill, int FI, |
| 561 | const TargetRegisterClass *RC, |
| 562 | Register VReg, |
| 563 | MachineInstr::MIFlag Flags) const { |
| 564 | DebugLoc DL; |
| 565 | if (I != MBB.end()) DL = I->getDebugLoc(); |
| 566 | |
| 567 | MachineFunction *MF = MBB.getParent(); |
| 568 | const MachineFrameInfo &MFI = MF->getFrameInfo(); |
| 569 | MachineMemOperand *MMO = MF->getMachineMemOperand( |
| 570 | PtrInfo: MachinePointerInfo::getFixedStack(MF&: *MF, FI), F: MachineMemOperand::MOStore, |
| 571 | Size: MFI.getObjectSize(ObjectIdx: FI), BaseAlignment: MFI.getObjectAlign(ObjectIdx: FI)); |
| 572 | |
| 573 | // On the order of operands here: think "[FrameIdx + 0] = SrcReg". |
| 574 | if (RC == &SP::I64RegsRegClass) |
| 575 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: SP::STXri)).addFrameIndex(Idx: FI).addImm(Val: 0) |
| 576 | .addReg(RegNo: SrcReg, Flags: getKillRegState(B: isKill)).addMemOperand(MMO); |
| 577 | else if (RC == &SP::IntRegsRegClass) |
| 578 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: SP::STri)).addFrameIndex(Idx: FI).addImm(Val: 0) |
| 579 | .addReg(RegNo: SrcReg, Flags: getKillRegState(B: isKill)).addMemOperand(MMO); |
| 580 | else if (RC == &SP::IntPairRegClass) |
| 581 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: SP::STDri)).addFrameIndex(Idx: FI).addImm(Val: 0) |
| 582 | .addReg(RegNo: SrcReg, Flags: getKillRegState(B: isKill)).addMemOperand(MMO); |
| 583 | else if (RC == &SP::FPRegsRegClass) |
| 584 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: SP::STFri)).addFrameIndex(Idx: FI).addImm(Val: 0) |
| 585 | .addReg(RegNo: SrcReg, Flags: getKillRegState(B: isKill)).addMemOperand(MMO); |
| 586 | else if (SP::DFPRegsRegClass.hasSubClassEq(RC)) |
| 587 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: SP::STDFri)).addFrameIndex(Idx: FI).addImm(Val: 0) |
| 588 | .addReg(RegNo: SrcReg, Flags: getKillRegState(B: isKill)).addMemOperand(MMO); |
| 589 | else if (SP::QFPRegsRegClass.hasSubClassEq(RC)) |
| 590 | // Use STQFri irrespective of its legality. If STQ is not legal, it will be |
| 591 | // lowered into two STDs in eliminateFrameIndex. |
| 592 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: SP::STQFri)).addFrameIndex(Idx: FI).addImm(Val: 0) |
| 593 | .addReg(RegNo: SrcReg, Flags: getKillRegState(B: isKill)).addMemOperand(MMO); |
| 594 | else |
| 595 | llvm_unreachable("Can't store this register to stack slot" ); |
| 596 | } |
| 597 | |
| 598 | void SparcInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, |
| 599 | MachineBasicBlock::iterator I, |
| 600 | Register DestReg, int FI, |
| 601 | const TargetRegisterClass *RC, |
| 602 | Register VReg, unsigned SubReg, |
| 603 | MachineInstr::MIFlag Flags) const { |
| 604 | DebugLoc DL; |
| 605 | if (I != MBB.end()) DL = I->getDebugLoc(); |
| 606 | |
| 607 | MachineFunction *MF = MBB.getParent(); |
| 608 | const MachineFrameInfo &MFI = MF->getFrameInfo(); |
| 609 | MachineMemOperand *MMO = MF->getMachineMemOperand( |
| 610 | PtrInfo: MachinePointerInfo::getFixedStack(MF&: *MF, FI), F: MachineMemOperand::MOLoad, |
| 611 | Size: MFI.getObjectSize(ObjectIdx: FI), BaseAlignment: MFI.getObjectAlign(ObjectIdx: FI)); |
| 612 | |
| 613 | if (RC == &SP::I64RegsRegClass) |
| 614 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: SP::LDXri), DestReg).addFrameIndex(Idx: FI).addImm(Val: 0) |
| 615 | .addMemOperand(MMO); |
| 616 | else if (RC == &SP::IntRegsRegClass) |
| 617 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: SP::LDri), DestReg).addFrameIndex(Idx: FI).addImm(Val: 0) |
| 618 | .addMemOperand(MMO); |
| 619 | else if (RC == &SP::IntPairRegClass) |
| 620 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: SP::LDDri), DestReg).addFrameIndex(Idx: FI).addImm(Val: 0) |
| 621 | .addMemOperand(MMO); |
| 622 | else if (RC == &SP::FPRegsRegClass) |
| 623 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: SP::LDFri), DestReg).addFrameIndex(Idx: FI).addImm(Val: 0) |
| 624 | .addMemOperand(MMO); |
| 625 | else if (SP::DFPRegsRegClass.hasSubClassEq(RC)) |
| 626 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: SP::LDDFri), DestReg).addFrameIndex(Idx: FI).addImm(Val: 0) |
| 627 | .addMemOperand(MMO); |
| 628 | else if (SP::QFPRegsRegClass.hasSubClassEq(RC)) |
| 629 | // Use LDQFri irrespective of its legality. If LDQ is not legal, it will be |
| 630 | // lowered into two LDDs in eliminateFrameIndex. |
| 631 | BuildMI(BB&: MBB, I, MIMD: DL, MCID: get(Opcode: SP::LDQFri), DestReg).addFrameIndex(Idx: FI).addImm(Val: 0) |
| 632 | .addMemOperand(MMO); |
| 633 | else |
| 634 | llvm_unreachable("Can't load this register from stack slot" ); |
| 635 | } |
| 636 | |
| 637 | Register SparcInstrInfo::getGlobalBaseReg(MachineFunction *MF) const { |
| 638 | SparcMachineFunctionInfo *SparcFI = MF->getInfo<SparcMachineFunctionInfo>(); |
| 639 | Register GlobalBaseReg = SparcFI->getGlobalBaseReg(); |
| 640 | if (GlobalBaseReg) |
| 641 | return GlobalBaseReg; |
| 642 | |
| 643 | // Insert the set of GlobalBaseReg into the first MBB of the function |
| 644 | MachineBasicBlock &FirstMBB = MF->front(); |
| 645 | MachineBasicBlock::iterator MBBI = FirstMBB.begin(); |
| 646 | MachineRegisterInfo &RegInfo = MF->getRegInfo(); |
| 647 | |
| 648 | const TargetRegisterClass *PtrRC = |
| 649 | Subtarget.is64Bit() ? &SP::I64RegsRegClass : &SP::IntRegsRegClass; |
| 650 | GlobalBaseReg = RegInfo.createVirtualRegister(RegClass: PtrRC); |
| 651 | |
| 652 | DebugLoc dl; |
| 653 | |
| 654 | BuildMI(BB&: FirstMBB, I: MBBI, MIMD: dl, MCID: get(Opcode: SP::GETPCX), DestReg: GlobalBaseReg); |
| 655 | SparcFI->setGlobalBaseReg(GlobalBaseReg); |
| 656 | return GlobalBaseReg; |
| 657 | } |
| 658 | |
| 659 | unsigned SparcInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { |
| 660 | unsigned Opcode = MI.getOpcode(); |
| 661 | |
| 662 | if (MI.isInlineAsm()) { |
| 663 | const MachineFunction *MF = MI.getParent()->getParent(); |
| 664 | const char *AsmStr = MI.getOperand(i: 0).getSymbolName(); |
| 665 | return getInlineAsmLength(Str: AsmStr, MAI: *MF->getTarget().getMCAsmInfo()); |
| 666 | } |
| 667 | |
| 668 | // If the instruction has a delay slot, be conservative and also include |
| 669 | // it for sizing purposes. This is done so that the BranchRelaxation pass |
| 670 | // will not mistakenly mark out-of-range branches as in-range. |
| 671 | if (MI.hasDelaySlot()) |
| 672 | return get(Opcode).getSize() * 2; |
| 673 | return get(Opcode).getSize(); |
| 674 | } |
| 675 | |
| 676 | bool SparcInstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg, |
| 677 | Register &SrcReg2, int64_t &CmpMask, |
| 678 | int64_t &CmpValue) const { |
| 679 | Register DstReg; |
| 680 | switch (MI.getOpcode()) { |
| 681 | default: |
| 682 | break; |
| 683 | case SP::SUBCCri: |
| 684 | DstReg = MI.getOperand(i: 0).getReg(); |
| 685 | SrcReg = MI.getOperand(i: 1).getReg(); |
| 686 | SrcReg2 = 0; |
| 687 | CmpMask = ~0; |
| 688 | CmpValue = MI.getOperand(i: 2).getImm(); |
| 689 | return DstReg == SP::G0 && CmpValue == 0; |
| 690 | case SP::SUBCCrr: |
| 691 | DstReg = MI.getOperand(i: 0).getReg(); |
| 692 | SrcReg = MI.getOperand(i: 1).getReg(); |
| 693 | SrcReg2 = MI.getOperand(i: 2).getReg(); |
| 694 | CmpMask = ~0; |
| 695 | CmpValue = 0; |
| 696 | return DstReg == SP::G0 && SrcReg2 == SP::G0; |
| 697 | } |
| 698 | |
| 699 | return false; |
| 700 | } |
| 701 | |
| 702 | bool SparcInstrInfo::optimizeCompareInstr( |
| 703 | MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t CmpMask, |
| 704 | int64_t CmpValue, const MachineRegisterInfo *MRI) const { |
| 705 | |
| 706 | // Get the unique definition of SrcReg. |
| 707 | MachineInstr *MI = MRI->getUniqueVRegDef(Reg: SrcReg); |
| 708 | if (!MI) |
| 709 | return false; |
| 710 | |
| 711 | // Only optimize if defining and comparing instruction in same block. |
| 712 | if (MI->getParent() != CmpInstr.getParent()) |
| 713 | return false; |
| 714 | |
| 715 | unsigned NewOpcode; |
| 716 | switch (MI->getOpcode()) { |
| 717 | case SP::ANDNrr: |
| 718 | NewOpcode = SP::ANDNCCrr; |
| 719 | break; |
| 720 | case SP::ANDNri: |
| 721 | NewOpcode = SP::ANDNCCri; |
| 722 | break; |
| 723 | case SP::ANDrr: |
| 724 | NewOpcode = SP::ANDCCrr; |
| 725 | break; |
| 726 | case SP::ANDri: |
| 727 | NewOpcode = SP::ANDCCri; |
| 728 | break; |
| 729 | case SP::ORrr: |
| 730 | NewOpcode = SP::ORCCrr; |
| 731 | break; |
| 732 | case SP::ORri: |
| 733 | NewOpcode = SP::ORCCri; |
| 734 | break; |
| 735 | case SP::ORNCCrr: |
| 736 | NewOpcode = SP::ORNCCrr; |
| 737 | break; |
| 738 | case SP::ORNri: |
| 739 | NewOpcode = SP::ORNCCri; |
| 740 | break; |
| 741 | case SP::XORrr: |
| 742 | NewOpcode = SP::XORCCrr; |
| 743 | break; |
| 744 | case SP::XNORri: |
| 745 | NewOpcode = SP::XNORCCri; |
| 746 | break; |
| 747 | case SP::XNORrr: |
| 748 | NewOpcode = SP::XNORCCrr; |
| 749 | break; |
| 750 | case SP::ADDrr: |
| 751 | NewOpcode = SP::ADDCCrr; |
| 752 | break; |
| 753 | case SP::ADDri: |
| 754 | NewOpcode = SP::ADDCCri; |
| 755 | break; |
| 756 | case SP::SUBrr: |
| 757 | NewOpcode = SP::SUBCCrr; |
| 758 | break; |
| 759 | case SP::SUBri: |
| 760 | NewOpcode = SP::SUBCCri; |
| 761 | break; |
| 762 | default: |
| 763 | return false; |
| 764 | } |
| 765 | |
| 766 | bool IsICCModified = false; |
| 767 | MachineBasicBlock::iterator I = MI; |
| 768 | MachineBasicBlock::iterator C = CmpInstr; |
| 769 | MachineBasicBlock::iterator E = CmpInstr.getParent()->end(); |
| 770 | const TargetRegisterInfo *TRI = &getRegisterInfo(); |
| 771 | |
| 772 | // If ICC is used or modified between MI and CmpInstr we cannot optimize. |
| 773 | while (++I != C) { |
| 774 | if (I->modifiesRegister(Reg: SP::ICC, TRI) || I->readsRegister(Reg: SP::ICC, TRI)) |
| 775 | return false; |
| 776 | } |
| 777 | |
| 778 | while (++I != E) { |
| 779 | // Only allow conditionals on equality. |
| 780 | if (I->readsRegister(Reg: SP::ICC, TRI)) { |
| 781 | bool IsICCBranch = I->getOpcode() == SP::BCOND || |
| 782 | I->getOpcode() == SP::BPICC || |
| 783 | I->getOpcode() == SP::BPXCC; |
| 784 | bool IsICCMove = |
| 785 | I->getOpcode() == SP::MOVICCrr || I->getOpcode() == SP::MOVICCri || |
| 786 | I->getOpcode() == SP::MOVXCCrr || I->getOpcode() == SP::MOVXCCri; |
| 787 | bool IsICCConditional = IsICCBranch || IsICCMove; |
| 788 | if (!IsICCConditional || |
| 789 | (I->getOperand(i: IsICCBranch ? 1 : 3).getImm() != SPCC::ICC_E && |
| 790 | I->getOperand(i: IsICCBranch ? 1 : 3).getImm() != SPCC::ICC_NE)) |
| 791 | return false; |
| 792 | } else if (I->modifiesRegister(Reg: SP::ICC, TRI)) { |
| 793 | IsICCModified = true; |
| 794 | break; |
| 795 | } |
| 796 | } |
| 797 | |
| 798 | if (!IsICCModified) { |
| 799 | MachineBasicBlock *MBB = CmpInstr.getParent(); |
| 800 | if (any_of(Range: MBB->successors(), |
| 801 | P: [](MachineBasicBlock *Succ) { return Succ->isLiveIn(Reg: SP::ICC); })) |
| 802 | return false; |
| 803 | } |
| 804 | |
| 805 | if (MRI->hasOneNonDBGUse(RegNo: SrcReg)) |
| 806 | MI->getOperand(i: 0).setReg(SP::G0); |
| 807 | |
| 808 | MI->setDesc(get(Opcode: NewOpcode)); |
| 809 | MI->addRegisterDefined(Reg: SP::ICC); |
| 810 | CmpInstr.eraseFromParent(); |
| 811 | |
| 812 | return true; |
| 813 | } |
| 814 | |
| 815 | bool SparcInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { |
| 816 | switch (MI.getOpcode()) { |
| 817 | case TargetOpcode::LOAD_STACK_GUARD: { |
| 818 | assert(Subtarget.getTargetTriple().isOSLinux() && |
| 819 | "Only Linux target is expected to contain LOAD_STACK_GUARD" ); |
| 820 | // offsetof(tcbhead_t, stack_guard) from sysdeps/sparc/nptl/tls.h in glibc. |
| 821 | const int64_t Offset = Subtarget.is64Bit() ? 0x28 : 0x14; |
| 822 | MI.setDesc(get(Opcode: Subtarget.is64Bit() ? SP::LDXri : SP::LDri)); |
| 823 | MachineInstrBuilder(*MI.getParent()->getParent(), MI) |
| 824 | .addReg(RegNo: SP::G7) |
| 825 | .addImm(Val: Offset); |
| 826 | return true; |
| 827 | } |
| 828 | case SP::V8BAR: { |
| 829 | assert(!Subtarget.isV9() && |
| 830 | "V8BAR should not be emitted on V9 processors!" ); |
| 831 | |
| 832 | // Emit stbar; ldstub [%sp-1], %g0 |
| 833 | // The sequence acts as a full barrier on V8 systems. |
| 834 | MachineBasicBlock &MBB = *MI.getParent(); |
| 835 | MachineInstr &InstSTBAR = |
| 836 | *BuildMI(BB&: MBB, I&: MI, MIMD: MI.getDebugLoc(), MCID: get(Opcode: SP::STBAR)); |
| 837 | MachineInstr &InstLDSTUB = |
| 838 | *BuildMI(BB&: MBB, I&: MI, MIMD: MI.getDebugLoc(), MCID: get(Opcode: SP::LDSTUBri), DestReg: SP::G0) |
| 839 | .addReg(RegNo: SP::O6) |
| 840 | .addImm(Val: -1); |
| 841 | MIBundleBuilder(MBB, InstSTBAR, InstLDSTUB); |
| 842 | MBB.erase(I: MI); |
| 843 | return true; |
| 844 | } |
| 845 | } |
| 846 | return false; |
| 847 | } |
| 848 | |