| 1 | //===------- X86ExpandPseudo.cpp - Expand pseudo instructions -------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file contains a pass that expands pseudo instructions into target |
| 10 | // instructions to allow proper scheduling, if-conversion, other late |
| 11 | // optimizations, or simply the encoding of the instructions. |
| 12 | // |
| 13 | //===----------------------------------------------------------------------===// |
| 14 | |
| 15 | #include "X86.h" |
| 16 | #include "X86FrameLowering.h" |
| 17 | #include "X86InstrInfo.h" |
| 18 | #include "X86MachineFunctionInfo.h" |
| 19 | #include "X86Subtarget.h" |
| 20 | #include "llvm/CodeGen/LivePhysRegs.h" |
| 21 | #include "llvm/CodeGen/MachineDominators.h" |
| 22 | #include "llvm/CodeGen/MachineFunctionAnalysisManager.h" |
| 23 | #include "llvm/CodeGen/MachineFunctionPass.h" |
| 24 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
| 25 | #include "llvm/CodeGen/MachineLoopInfo.h" |
| 26 | #include "llvm/CodeGen/MachinePassManager.h" |
| 27 | #include "llvm/CodeGen/Passes.h" // For IDs of passes that are preserved. |
| 28 | #include "llvm/IR/Analysis.h" |
| 29 | #include "llvm/IR/EHPersonalities.h" |
| 30 | #include "llvm/IR/GlobalValue.h" |
| 31 | #include "llvm/Target/TargetMachine.h" |
| 32 | using namespace llvm; |
| 33 | |
| 34 | #define DEBUG_TYPE "x86-expand-pseudo" |
| 35 | #define X86_EXPAND_PSEUDO_NAME "X86 pseudo instruction expansion pass" |
| 36 | |
| 37 | namespace { |
| 38 | class X86ExpandPseudoImpl { |
| 39 | public: |
| 40 | const X86Subtarget *STI = nullptr; |
| 41 | const X86InstrInfo *TII = nullptr; |
| 42 | const X86RegisterInfo *TRI = nullptr; |
| 43 | const X86MachineFunctionInfo *X86FI = nullptr; |
| 44 | const X86FrameLowering *X86FL = nullptr; |
| 45 | |
| 46 | bool runOnMachineFunction(MachineFunction &MF); |
| 47 | |
| 48 | private: |
| 49 | void expandICallBranchFunnel(MachineBasicBlock *MBB, |
| 50 | MachineBasicBlock::iterator MBBI); |
| 51 | void expandCALL_RVMARKER(MachineBasicBlock &MBB, |
| 52 | MachineBasicBlock::iterator MBBI); |
| 53 | bool expandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI); |
| 54 | bool expandMBB(MachineBasicBlock &MBB); |
| 55 | |
| 56 | /// This function expands pseudos which affects control flow. |
| 57 | /// It is done in separate pass to simplify blocks navigation in main |
| 58 | /// pass(calling expandMBB). |
| 59 | bool expandPseudosWhichAffectControlFlow(MachineFunction &MF); |
| 60 | |
| 61 | /// Expand X86::VASTART_SAVE_XMM_REGS into set of xmm copying instructions, |
| 62 | /// placed into separate block guarded by check for al register(for SystemV |
| 63 | /// abi). |
| 64 | void expandVastartSaveXmmRegs( |
| 65 | MachineBasicBlock *EntryBlk, |
| 66 | MachineBasicBlock::iterator VAStartPseudoInstr) const; |
| 67 | }; |
| 68 | |
| 69 | class X86ExpandPseudoLegacy : public MachineFunctionPass { |
| 70 | public: |
| 71 | static char ID; |
| 72 | X86ExpandPseudoLegacy() : MachineFunctionPass(ID) {} |
| 73 | |
| 74 | void getAnalysisUsage(AnalysisUsage &AU) const override { |
| 75 | AU.setPreservesCFG(); |
| 76 | AU.addPreservedID(ID&: MachineLoopInfoID); |
| 77 | AU.addPreservedID(ID&: MachineDominatorsID); |
| 78 | MachineFunctionPass::getAnalysisUsage(AU); |
| 79 | } |
| 80 | |
| 81 | const X86Subtarget *STI = nullptr; |
| 82 | const X86InstrInfo *TII = nullptr; |
| 83 | const X86RegisterInfo *TRI = nullptr; |
| 84 | const X86MachineFunctionInfo *X86FI = nullptr; |
| 85 | const X86FrameLowering *X86FL = nullptr; |
| 86 | |
| 87 | bool runOnMachineFunction(MachineFunction &MF) override; |
| 88 | |
| 89 | MachineFunctionProperties getRequiredProperties() const override { |
| 90 | return MachineFunctionProperties().setNoVRegs(); |
| 91 | } |
| 92 | |
| 93 | StringRef getPassName() const override { |
| 94 | return "X86 pseudo instruction expansion pass" ; |
| 95 | } |
| 96 | }; |
| 97 | char X86ExpandPseudoLegacy::ID = 0; |
| 98 | } // End anonymous namespace. |
| 99 | |
| 100 | INITIALIZE_PASS(X86ExpandPseudoLegacy, DEBUG_TYPE, X86_EXPAND_PSEUDO_NAME, |
| 101 | false, false) |
| 102 | |
| 103 | void X86ExpandPseudoImpl::expandICallBranchFunnel( |
| 104 | MachineBasicBlock *MBB, MachineBasicBlock::iterator MBBI) { |
| 105 | MachineBasicBlock *JTMBB = MBB; |
| 106 | MachineInstr *JTInst = &*MBBI; |
| 107 | MachineFunction *MF = MBB->getParent(); |
| 108 | const BasicBlock *BB = MBB->getBasicBlock(); |
| 109 | auto InsPt = MachineFunction::iterator(MBB); |
| 110 | ++InsPt; |
| 111 | |
| 112 | std::vector<std::pair<MachineBasicBlock *, unsigned>> TargetMBBs; |
| 113 | const DebugLoc &DL = JTInst->getDebugLoc(); |
| 114 | MachineOperand Selector = JTInst->getOperand(i: 0); |
| 115 | const GlobalValue *CombinedGlobal = JTInst->getOperand(i: 1).getGlobal(); |
| 116 | |
| 117 | auto CmpTarget = [&](unsigned Target) { |
| 118 | if (Selector.isReg()) |
| 119 | MBB->addLiveIn(PhysReg: Selector.getReg()); |
| 120 | BuildMI(BB&: *MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: X86::LEA64r), DestReg: X86::R11) |
| 121 | .addReg(RegNo: X86::RIP) |
| 122 | .addImm(Val: 1) |
| 123 | .addReg(RegNo: 0) |
| 124 | .addGlobalAddress(GV: CombinedGlobal, |
| 125 | Offset: JTInst->getOperand(i: 2 + 2 * Target).getImm()) |
| 126 | .addReg(RegNo: 0); |
| 127 | BuildMI(BB&: *MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: X86::CMP64rr)) |
| 128 | .add(MO: Selector) |
| 129 | .addReg(RegNo: X86::R11); |
| 130 | }; |
| 131 | |
| 132 | auto CreateMBB = [&]() { |
| 133 | auto *NewMBB = MF->CreateMachineBasicBlock(BB); |
| 134 | MBB->addSuccessor(Succ: NewMBB); |
| 135 | if (!MBB->isLiveIn(Reg: X86::EFLAGS)) |
| 136 | MBB->addLiveIn(PhysReg: X86::EFLAGS); |
| 137 | return NewMBB; |
| 138 | }; |
| 139 | |
| 140 | auto EmitCondJump = [&](unsigned CC, MachineBasicBlock *ThenMBB) { |
| 141 | BuildMI(BB&: *MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: X86::JCC_1)).addMBB(MBB: ThenMBB).addImm(Val: CC); |
| 142 | |
| 143 | auto *ElseMBB = CreateMBB(); |
| 144 | MF->insert(MBBI: InsPt, MBB: ElseMBB); |
| 145 | MBB = ElseMBB; |
| 146 | MBBI = MBB->end(); |
| 147 | }; |
| 148 | |
| 149 | auto EmitCondJumpTarget = [&](unsigned CC, unsigned Target) { |
| 150 | auto *ThenMBB = CreateMBB(); |
| 151 | TargetMBBs.push_back(x: {ThenMBB, Target}); |
| 152 | EmitCondJump(CC, ThenMBB); |
| 153 | }; |
| 154 | |
| 155 | auto EmitTailCall = [&](unsigned Target) { |
| 156 | BuildMI(BB&: *MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: X86::TAILJMPd64)) |
| 157 | .add(MO: JTInst->getOperand(i: 3 + 2 * Target)); |
| 158 | }; |
| 159 | |
| 160 | std::function<void(unsigned, unsigned)> EmitBranchFunnel = |
| 161 | [&](unsigned FirstTarget, unsigned NumTargets) { |
| 162 | if (NumTargets == 1) { |
| 163 | EmitTailCall(FirstTarget); |
| 164 | return; |
| 165 | } |
| 166 | |
| 167 | if (NumTargets == 2) { |
| 168 | CmpTarget(FirstTarget + 1); |
| 169 | EmitCondJumpTarget(X86::COND_B, FirstTarget); |
| 170 | EmitTailCall(FirstTarget + 1); |
| 171 | return; |
| 172 | } |
| 173 | |
| 174 | if (NumTargets < 6) { |
| 175 | CmpTarget(FirstTarget + 1); |
| 176 | EmitCondJumpTarget(X86::COND_B, FirstTarget); |
| 177 | EmitCondJumpTarget(X86::COND_E, FirstTarget + 1); |
| 178 | EmitBranchFunnel(FirstTarget + 2, NumTargets - 2); |
| 179 | return; |
| 180 | } |
| 181 | |
| 182 | auto *ThenMBB = CreateMBB(); |
| 183 | CmpTarget(FirstTarget + (NumTargets / 2)); |
| 184 | EmitCondJump(X86::COND_B, ThenMBB); |
| 185 | EmitCondJumpTarget(X86::COND_E, FirstTarget + (NumTargets / 2)); |
| 186 | EmitBranchFunnel(FirstTarget + (NumTargets / 2) + 1, |
| 187 | NumTargets - (NumTargets / 2) - 1); |
| 188 | |
| 189 | MF->insert(MBBI: InsPt, MBB: ThenMBB); |
| 190 | MBB = ThenMBB; |
| 191 | MBBI = MBB->end(); |
| 192 | EmitBranchFunnel(FirstTarget, NumTargets / 2); |
| 193 | }; |
| 194 | |
| 195 | EmitBranchFunnel(0, (JTInst->getNumOperands() - 2) / 2); |
| 196 | for (auto P : TargetMBBs) { |
| 197 | MF->insert(MBBI: InsPt, MBB: P.first); |
| 198 | BuildMI(BB: P.first, MIMD: DL, MCID: TII->get(Opcode: X86::TAILJMPd64)) |
| 199 | .add(MO: JTInst->getOperand(i: 3 + 2 * P.second)); |
| 200 | } |
| 201 | JTMBB->erase(I: JTInst); |
| 202 | } |
| 203 | |
| 204 | void X86ExpandPseudoImpl::expandCALL_RVMARKER( |
| 205 | MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) { |
| 206 | // Expand CALL_RVMARKER pseudo to call instruction, followed by the special |
| 207 | //"movq %rax, %rdi" marker. |
| 208 | MachineInstr &MI = *MBBI; |
| 209 | |
| 210 | MachineInstr *OriginalCall; |
| 211 | assert((MI.getOperand(1).isGlobal() || MI.getOperand(1).isReg()) && |
| 212 | "invalid operand for regular call" ); |
| 213 | unsigned Opc = -1; |
| 214 | if (MI.getOpcode() == X86::CALL64m_RVMARKER) |
| 215 | Opc = X86::CALL64m; |
| 216 | else if (MI.getOpcode() == X86::CALL64r_RVMARKER) |
| 217 | Opc = X86::CALL64r; |
| 218 | else if (MI.getOpcode() == X86::CALL64pcrel32_RVMARKER) |
| 219 | Opc = X86::CALL64pcrel32; |
| 220 | else |
| 221 | llvm_unreachable("unexpected opcode" ); |
| 222 | |
| 223 | OriginalCall = BuildMI(BB&: MBB, I: MBBI, MIMD: MI.getDebugLoc(), MCID: TII->get(Opcode: Opc)).getInstr(); |
| 224 | bool RAXImplicitDead = false; |
| 225 | for (MachineOperand &Op : llvm::drop_begin(RangeOrContainer: MI.operands())) { |
| 226 | // RAX may be 'implicit dead', if there are no other users of the return |
| 227 | // value. We introduce a new use, so change it to 'implicit def'. |
| 228 | if (Op.isReg() && Op.isImplicit() && Op.isDead() && |
| 229 | TRI->regsOverlap(RegA: Op.getReg(), RegB: X86::RAX)) { |
| 230 | Op.setIsDead(false); |
| 231 | Op.setIsDef(true); |
| 232 | RAXImplicitDead = true; |
| 233 | } |
| 234 | OriginalCall->addOperand(Op); |
| 235 | } |
| 236 | |
| 237 | // Emit marker "movq %rax, %rdi". %rdi is not callee-saved, so it cannot be |
| 238 | // live across the earlier call. The call to the ObjC runtime function returns |
| 239 | // the first argument, so the value of %rax is unchanged after the ObjC |
| 240 | // runtime call. On Windows targets, the runtime call follows the regular |
| 241 | // x64 calling convention and expects the first argument in %rcx. |
| 242 | auto TargetReg = STI->getTargetTriple().isOSWindows() ? X86::RCX : X86::RDI; |
| 243 | auto *Marker = BuildMI(BB&: MBB, I: MBBI, MIMD: MI.getDebugLoc(), MCID: TII->get(Opcode: X86::MOV64rr)) |
| 244 | .addReg(RegNo: TargetReg, Flags: RegState::Define) |
| 245 | .addReg(RegNo: X86::RAX) |
| 246 | .getInstr(); |
| 247 | if (MI.shouldUpdateAdditionalCallInfo()) |
| 248 | MBB.getParent()->moveAdditionalCallInfo(Old: &MI, New: Marker); |
| 249 | |
| 250 | // Emit call to ObjC runtime. |
| 251 | const uint32_t *RegMask = |
| 252 | TRI->getCallPreservedMask(MF: *MBB.getParent(), CallingConv::C); |
| 253 | MachineInstr *RtCall = |
| 254 | BuildMI(BB&: MBB, I: MBBI, MIMD: MI.getDebugLoc(), MCID: TII->get(Opcode: X86::CALL64pcrel32)) |
| 255 | .addGlobalAddress(GV: MI.getOperand(i: 0).getGlobal(), Offset: 0, TargetFlags: 0) |
| 256 | .addRegMask(Mask: RegMask) |
| 257 | .addReg(RegNo: X86::RAX, |
| 258 | Flags: RegState::Implicit | |
| 259 | (RAXImplicitDead ? (RegState::Dead | RegState::Define) |
| 260 | : RegState::Define)) |
| 261 | .getInstr(); |
| 262 | MI.eraseFromParent(); |
| 263 | |
| 264 | auto &TM = MBB.getParent()->getTarget(); |
| 265 | // On Darwin platforms, wrap the expanded sequence in a bundle to prevent |
| 266 | // later optimizations from breaking up the sequence. |
| 267 | if (TM.getTargetTriple().isOSDarwin()) |
| 268 | finalizeBundle(MBB, FirstMI: OriginalCall->getIterator(), |
| 269 | LastMI: std::next(x: RtCall->getIterator())); |
| 270 | } |
| 271 | |
| 272 | /// If \p MBBI is a pseudo instruction, this method expands |
| 273 | /// it to the corresponding (sequence of) actual instruction(s). |
| 274 | /// \returns true if \p MBBI has been expanded. |
| 275 | bool X86ExpandPseudoImpl::expandMI(MachineBasicBlock &MBB, |
| 276 | MachineBasicBlock::iterator MBBI) { |
| 277 | MachineInstr &MI = *MBBI; |
| 278 | unsigned Opcode = MI.getOpcode(); |
| 279 | const DebugLoc &DL = MBBI->getDebugLoc(); |
| 280 | #define GET_EGPR_IF_ENABLED(OPC) (STI->hasEGPR() ? OPC##_EVEX : OPC) |
| 281 | switch (Opcode) { |
| 282 | default: |
| 283 | return false; |
| 284 | case X86::TCRETURNdi: |
| 285 | case X86::TCRETURNdicc: |
| 286 | case X86::TCRETURNri: |
| 287 | case X86::TCRETURN_WIN64ri: |
| 288 | case X86::TCRETURN_HIPE32ri: |
| 289 | case X86::TCRETURNmi: |
| 290 | case X86::TCRETURNdi64: |
| 291 | case X86::TCRETURNdi64cc: |
| 292 | case X86::TCRETURNri64: |
| 293 | case X86::TCRETURNri64_ImpCall: |
| 294 | case X86::TCRETURNmi64: |
| 295 | case X86::TCRETURN_WINmi64: { |
| 296 | bool isMem = Opcode == X86::TCRETURNmi || Opcode == X86::TCRETURNmi64 || |
| 297 | Opcode == X86::TCRETURN_WINmi64; |
| 298 | MachineOperand &JumpTarget = MBBI->getOperand(i: 0); |
| 299 | MachineOperand &StackAdjust = MBBI->getOperand(i: isMem ? X86::AddrNumOperands |
| 300 | : 1); |
| 301 | assert(StackAdjust.isImm() && "Expecting immediate value." ); |
| 302 | |
| 303 | // Adjust stack pointer. |
| 304 | int StackAdj = StackAdjust.getImm(); |
| 305 | int MaxTCDelta = X86FI->getTCReturnAddrDelta(); |
| 306 | int64_t Offset = 0; |
| 307 | assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive" ); |
| 308 | |
| 309 | // Incoporate the retaddr area. |
| 310 | Offset = StackAdj - MaxTCDelta; |
| 311 | assert(Offset >= 0 && "Offset should never be negative" ); |
| 312 | |
| 313 | if (Opcode == X86::TCRETURNdicc || Opcode == X86::TCRETURNdi64cc) { |
| 314 | assert(Offset == 0 && "Conditional tail call cannot adjust the stack." ); |
| 315 | } |
| 316 | |
| 317 | if (Offset) { |
| 318 | // Check for possible merge with preceding ADD instruction. |
| 319 | Offset = X86FL->mergeSPAdd(MBB, MBBI, AddOffset: Offset, doMergeWithPrevious: true); |
| 320 | X86FL->emitSPUpdate(MBB, MBBI, DL, NumBytes: Offset, /*InEpilogue=*/true); |
| 321 | } |
| 322 | |
| 323 | // Use this predicate to set REX prefix for X86_64 targets. |
| 324 | bool IsX64 = STI->isTargetWin64() || STI->isTargetUEFI64(); |
| 325 | // Jump to label or value in register. |
| 326 | if (Opcode == X86::TCRETURNdi || Opcode == X86::TCRETURNdicc || |
| 327 | Opcode == X86::TCRETURNdi64 || Opcode == X86::TCRETURNdi64cc) { |
| 328 | unsigned Op; |
| 329 | switch (Opcode) { |
| 330 | case X86::TCRETURNdi: |
| 331 | Op = X86::TAILJMPd; |
| 332 | break; |
| 333 | case X86::TCRETURNdicc: |
| 334 | Op = X86::TAILJMPd_CC; |
| 335 | break; |
| 336 | case X86::TCRETURNdi64cc: |
| 337 | assert(!MBB.getParent()->hasWinCFI() && |
| 338 | "Conditional tail calls confuse " |
| 339 | "the Win64 unwinder." ); |
| 340 | Op = X86::TAILJMPd64_CC; |
| 341 | break; |
| 342 | default: |
| 343 | // Note: Win64 uses REX prefixes indirect jumps out of functions, but |
| 344 | // not direct ones. |
| 345 | Op = X86::TAILJMPd64; |
| 346 | break; |
| 347 | } |
| 348 | MachineInstrBuilder MIB = BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: Op)); |
| 349 | if (JumpTarget.isGlobal()) { |
| 350 | MIB.addGlobalAddress(GV: JumpTarget.getGlobal(), Offset: JumpTarget.getOffset(), |
| 351 | TargetFlags: JumpTarget.getTargetFlags()); |
| 352 | } else { |
| 353 | assert(JumpTarget.isSymbol()); |
| 354 | MIB.addExternalSymbol(FnName: JumpTarget.getSymbolName(), |
| 355 | TargetFlags: JumpTarget.getTargetFlags()); |
| 356 | } |
| 357 | if (Op == X86::TAILJMPd_CC || Op == X86::TAILJMPd64_CC) { |
| 358 | MIB.addImm(Val: MBBI->getOperand(i: 2).getImm()); |
| 359 | } |
| 360 | |
| 361 | } else if (Opcode == X86::TCRETURNmi || Opcode == X86::TCRETURNmi64 || |
| 362 | Opcode == X86::TCRETURN_WINmi64) { |
| 363 | unsigned Op = (Opcode == X86::TCRETURNmi) |
| 364 | ? X86::TAILJMPm |
| 365 | : (IsX64 ? X86::TAILJMPm64_REX : X86::TAILJMPm64); |
| 366 | MachineInstrBuilder MIB = BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: Op)); |
| 367 | for (unsigned i = 0; i != X86::AddrNumOperands; ++i) |
| 368 | MIB.add(MO: MBBI->getOperand(i)); |
| 369 | } else if (Opcode == X86::TCRETURNri64 || |
| 370 | Opcode == X86::TCRETURNri64_ImpCall || |
| 371 | Opcode == X86::TCRETURN_WIN64ri) { |
| 372 | JumpTarget.setIsKill(); |
| 373 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, |
| 374 | MCID: TII->get(Opcode: IsX64 ? X86::TAILJMPr64_REX : X86::TAILJMPr64)) |
| 375 | .add(MO: JumpTarget); |
| 376 | } else { |
| 377 | assert(!IsX64 && "Win64 and UEFI64 require REX for indirect jumps." ); |
| 378 | JumpTarget.setIsKill(); |
| 379 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: X86::TAILJMPr)) |
| 380 | .add(MO: JumpTarget); |
| 381 | } |
| 382 | |
| 383 | MachineInstr &NewMI = *std::prev(x: MBBI); |
| 384 | NewMI.copyImplicitOps(MF&: *MBBI->getParent()->getParent(), MI: *MBBI); |
| 385 | NewMI.setCFIType(MF&: *MBB.getParent(), Type: MI.getCFIType()); |
| 386 | |
| 387 | // Update the call info. |
| 388 | if (MBBI->isCandidateForAdditionalCallInfo()) |
| 389 | MBB.getParent()->moveAdditionalCallInfo(Old: &*MBBI, New: &NewMI); |
| 390 | |
| 391 | // Delete the pseudo instruction TCRETURN. |
| 392 | MBB.erase(I: MBBI); |
| 393 | |
| 394 | return true; |
| 395 | } |
| 396 | case X86::EH_RETURN: |
| 397 | case X86::EH_RETURN64: { |
| 398 | MachineOperand &DestAddr = MBBI->getOperand(i: 0); |
| 399 | assert(DestAddr.isReg() && "Offset should be in register!" ); |
| 400 | const bool Uses64BitFramePtr = STI->isTarget64BitLP64(); |
| 401 | Register StackPtr = TRI->getStackRegister(); |
| 402 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, |
| 403 | MCID: TII->get(Opcode: Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr), DestReg: StackPtr) |
| 404 | .addReg(RegNo: DestAddr.getReg()); |
| 405 | // The EH_RETURN pseudo is really removed during the MC Lowering. |
| 406 | return true; |
| 407 | } |
| 408 | case X86::IRET: { |
| 409 | // Adjust stack to erase error code |
| 410 | int64_t StackAdj = MBBI->getOperand(i: 0).getImm(); |
| 411 | X86FL->emitSPUpdate(MBB, MBBI, DL, NumBytes: StackAdj, InEpilogue: true); |
| 412 | // Replace pseudo with machine iret |
| 413 | unsigned RetOp = STI->is64Bit() ? X86::IRET64 : X86::IRET32; |
| 414 | // Use UIRET if UINTR is present (except for building kernel) |
| 415 | if (STI->is64Bit() && STI->hasUINTR() && |
| 416 | MBB.getParent()->getTarget().getCodeModel() != CodeModel::Kernel) |
| 417 | RetOp = X86::UIRET; |
| 418 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: RetOp)); |
| 419 | MBB.erase(I: MBBI); |
| 420 | return true; |
| 421 | } |
| 422 | case X86::RET: { |
| 423 | // Adjust stack to erase error code |
| 424 | int64_t StackAdj = MBBI->getOperand(i: 0).getImm(); |
| 425 | MachineInstrBuilder MIB; |
| 426 | if (StackAdj == 0) { |
| 427 | MIB = BuildMI(BB&: MBB, I: MBBI, MIMD: DL, |
| 428 | MCID: TII->get(Opcode: STI->is64Bit() ? X86::RET64 : X86::RET32)); |
| 429 | } else if (isUInt<16>(x: StackAdj)) { |
| 430 | MIB = BuildMI(BB&: MBB, I: MBBI, MIMD: DL, |
| 431 | MCID: TII->get(Opcode: STI->is64Bit() ? X86::RETI64 : X86::RETI32)) |
| 432 | .addImm(Val: StackAdj); |
| 433 | } else { |
| 434 | assert(!STI->is64Bit() && |
| 435 | "shouldn't need to do this for x86_64 targets!" ); |
| 436 | // A ret can only handle immediates as big as 2**16-1. If we need to pop |
| 437 | // off bytes before the return address, we must do it manually. |
| 438 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: X86::POP32r)).addReg(RegNo: X86::ECX, Flags: RegState::Define); |
| 439 | X86FL->emitSPUpdate(MBB, MBBI, DL, NumBytes: StackAdj, /*InEpilogue=*/true); |
| 440 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: X86::PUSH32r)).addReg(RegNo: X86::ECX); |
| 441 | MIB = BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: X86::RET32)); |
| 442 | } |
| 443 | for (unsigned I = 1, E = MBBI->getNumOperands(); I != E; ++I) |
| 444 | MIB.add(MO: MBBI->getOperand(i: I)); |
| 445 | MBB.erase(I: MBBI); |
| 446 | return true; |
| 447 | } |
| 448 | case X86::LCMPXCHG16B_SAVE_RBX: { |
| 449 | // Perform the following transformation. |
| 450 | // SaveRbx = pseudocmpxchg Addr, <4 opds for the address>, InArg, SaveRbx |
| 451 | // => |
| 452 | // RBX = InArg |
| 453 | // actualcmpxchg Addr |
| 454 | // RBX = SaveRbx |
| 455 | const MachineOperand &InArg = MBBI->getOperand(i: 6); |
| 456 | Register SaveRbx = MBBI->getOperand(i: 7).getReg(); |
| 457 | |
| 458 | // Copy the input argument of the pseudo into the argument of the |
| 459 | // actual instruction. |
| 460 | // NOTE: We don't copy the kill flag since the input might be the same reg |
| 461 | // as one of the other operands of LCMPXCHG16B. |
| 462 | TII->copyPhysReg(MBB, MI: MBBI, DL, DestReg: X86::RBX, SrcReg: InArg.getReg(), KillSrc: false); |
| 463 | // Create the actual instruction. |
| 464 | MachineInstr *NewInstr = BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: X86::LCMPXCHG16B)); |
| 465 | // Copy the operands related to the address. If we access a frame variable, |
| 466 | // we need to replace the RBX base with SaveRbx, as RBX has another value. |
| 467 | const MachineOperand &Base = MBBI->getOperand(i: 1); |
| 468 | if (Base.getReg() == X86::RBX || Base.getReg() == X86::EBX) |
| 469 | NewInstr->addOperand(Op: MachineOperand::CreateReg( |
| 470 | Reg: Base.getReg() == X86::RBX |
| 471 | ? SaveRbx |
| 472 | : Register(TRI->getSubReg(Reg: SaveRbx, Idx: X86::sub_32bit)), |
| 473 | /*IsDef=*/isDef: false)); |
| 474 | else |
| 475 | NewInstr->addOperand(Op: Base); |
| 476 | for (unsigned Idx = 1 + 1; Idx < 1 + X86::AddrNumOperands; ++Idx) |
| 477 | NewInstr->addOperand(Op: MBBI->getOperand(i: Idx)); |
| 478 | // Finally, restore the value of RBX. |
| 479 | TII->copyPhysReg(MBB, MI: MBBI, DL, DestReg: X86::RBX, SrcReg: SaveRbx, |
| 480 | /*SrcIsKill*/ KillSrc: true); |
| 481 | |
| 482 | // Delete the pseudo. |
| 483 | MBBI->eraseFromParent(); |
| 484 | return true; |
| 485 | } |
| 486 | // Loading/storing mask pairs requires two kmov operations. The second one of |
| 487 | // these needs a 2 byte displacement relative to the specified address (with |
| 488 | // 32 bit spill size). The pairs of 1bit masks up to 16 bit masks all use the |
| 489 | // same spill size, they all are stored using MASKPAIR16STORE, loaded using |
| 490 | // MASKPAIR16LOAD. |
| 491 | // |
| 492 | // The displacement value might wrap around in theory, thus the asserts in |
| 493 | // both cases. |
| 494 | case X86::MASKPAIR16LOAD: { |
| 495 | int64_t Disp = MBBI->getOperand(i: 1 + X86::AddrDisp).getImm(); |
| 496 | assert(Disp >= 0 && Disp <= INT32_MAX - 2 && "Unexpected displacement" ); |
| 497 | Register Reg = MBBI->getOperand(i: 0).getReg(); |
| 498 | bool DstIsDead = MBBI->getOperand(i: 0).isDead(); |
| 499 | Register Reg0 = TRI->getSubReg(Reg, Idx: X86::sub_mask_0); |
| 500 | Register Reg1 = TRI->getSubReg(Reg, Idx: X86::sub_mask_1); |
| 501 | |
| 502 | auto MIBLo = |
| 503 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(GET_EGPR_IF_ENABLED(X86::KMOVWkm))) |
| 504 | .addReg(RegNo: Reg0, Flags: RegState::Define | getDeadRegState(B: DstIsDead)); |
| 505 | auto MIBHi = |
| 506 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(GET_EGPR_IF_ENABLED(X86::KMOVWkm))) |
| 507 | .addReg(RegNo: Reg1, Flags: RegState::Define | getDeadRegState(B: DstIsDead)); |
| 508 | |
| 509 | for (int i = 0; i < X86::AddrNumOperands; ++i) { |
| 510 | MIBLo.add(MO: MBBI->getOperand(i: 1 + i)); |
| 511 | if (i == X86::AddrDisp) |
| 512 | MIBHi.addImm(Val: Disp + 2); |
| 513 | else |
| 514 | MIBHi.add(MO: MBBI->getOperand(i: 1 + i)); |
| 515 | } |
| 516 | |
| 517 | // Split the memory operand, adjusting the offset and size for the halves. |
| 518 | MachineMemOperand *OldMMO = MBBI->memoperands().front(); |
| 519 | MachineFunction *MF = MBB.getParent(); |
| 520 | MachineMemOperand *MMOLo = MF->getMachineMemOperand(MMO: OldMMO, Offset: 0, Size: 2); |
| 521 | MachineMemOperand *MMOHi = MF->getMachineMemOperand(MMO: OldMMO, Offset: 2, Size: 2); |
| 522 | |
| 523 | MIBLo.setMemRefs(MMOLo); |
| 524 | MIBHi.setMemRefs(MMOHi); |
| 525 | |
| 526 | // Delete the pseudo. |
| 527 | MBB.erase(I: MBBI); |
| 528 | return true; |
| 529 | } |
| 530 | case X86::MASKPAIR16STORE: { |
| 531 | int64_t Disp = MBBI->getOperand(i: X86::AddrDisp).getImm(); |
| 532 | assert(Disp >= 0 && Disp <= INT32_MAX - 2 && "Unexpected displacement" ); |
| 533 | Register Reg = MBBI->getOperand(i: X86::AddrNumOperands).getReg(); |
| 534 | bool SrcIsKill = MBBI->getOperand(i: X86::AddrNumOperands).isKill(); |
| 535 | Register Reg0 = TRI->getSubReg(Reg, Idx: X86::sub_mask_0); |
| 536 | Register Reg1 = TRI->getSubReg(Reg, Idx: X86::sub_mask_1); |
| 537 | |
| 538 | auto MIBLo = |
| 539 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(GET_EGPR_IF_ENABLED(X86::KMOVWmk))); |
| 540 | auto MIBHi = |
| 541 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(GET_EGPR_IF_ENABLED(X86::KMOVWmk))); |
| 542 | |
| 543 | for (int i = 0; i < X86::AddrNumOperands; ++i) { |
| 544 | MIBLo.add(MO: MBBI->getOperand(i)); |
| 545 | if (i == X86::AddrDisp) |
| 546 | MIBHi.addImm(Val: Disp + 2); |
| 547 | else |
| 548 | MIBHi.add(MO: MBBI->getOperand(i)); |
| 549 | } |
| 550 | MIBLo.addReg(RegNo: Reg0, Flags: getKillRegState(B: SrcIsKill)); |
| 551 | MIBHi.addReg(RegNo: Reg1, Flags: getKillRegState(B: SrcIsKill)); |
| 552 | |
| 553 | // Split the memory operand, adjusting the offset and size for the halves. |
| 554 | MachineMemOperand *OldMMO = MBBI->memoperands().front(); |
| 555 | MachineFunction *MF = MBB.getParent(); |
| 556 | MachineMemOperand *MMOLo = MF->getMachineMemOperand(MMO: OldMMO, Offset: 0, Size: 2); |
| 557 | MachineMemOperand *MMOHi = MF->getMachineMemOperand(MMO: OldMMO, Offset: 2, Size: 2); |
| 558 | |
| 559 | MIBLo.setMemRefs(MMOLo); |
| 560 | MIBHi.setMemRefs(MMOHi); |
| 561 | |
| 562 | // Delete the pseudo. |
| 563 | MBB.erase(I: MBBI); |
| 564 | return true; |
| 565 | } |
| 566 | case X86::MWAITX_SAVE_RBX: { |
| 567 | // Perform the following transformation. |
| 568 | // SaveRbx = pseudomwaitx InArg, SaveRbx |
| 569 | // => |
| 570 | // [E|R]BX = InArg |
| 571 | // actualmwaitx |
| 572 | // [E|R]BX = SaveRbx |
| 573 | const MachineOperand &InArg = MBBI->getOperand(i: 1); |
| 574 | // Copy the input argument of the pseudo into the argument of the |
| 575 | // actual instruction. |
| 576 | TII->copyPhysReg(MBB, MI: MBBI, DL, DestReg: X86::EBX, SrcReg: InArg.getReg(), KillSrc: InArg.isKill()); |
| 577 | // Create the actual instruction. |
| 578 | BuildMI(BB&: MBB, I: MBBI, MIMD: DL, MCID: TII->get(Opcode: X86::MWAITXrrr)); |
| 579 | // Finally, restore the value of RBX. |
| 580 | Register SaveRbx = MBBI->getOperand(i: 2).getReg(); |
| 581 | TII->copyPhysReg(MBB, MI: MBBI, DL, DestReg: X86::RBX, SrcReg: SaveRbx, /*SrcIsKill*/ KillSrc: true); |
| 582 | // Delete the pseudo. |
| 583 | MBBI->eraseFromParent(); |
| 584 | return true; |
| 585 | } |
| 586 | case TargetOpcode::ICALL_BRANCH_FUNNEL: |
| 587 | expandICallBranchFunnel(MBB: &MBB, MBBI); |
| 588 | return true; |
| 589 | case X86::PLDTILECFGV: { |
| 590 | MI.setDesc(TII->get(GET_EGPR_IF_ENABLED(X86::LDTILECFG))); |
| 591 | return true; |
| 592 | } |
| 593 | case X86::PTILELOADDV: |
| 594 | case X86::PTILELOADDT1V: |
| 595 | case X86::PTILELOADDRSV: |
| 596 | case X86::PTILELOADDRST1V: |
| 597 | case X86::PTCVTROWD2PSrreV: |
| 598 | case X86::PTCVTROWD2PSrriV: |
| 599 | case X86::PTCVTROWPS2BF16HrreV: |
| 600 | case X86::PTCVTROWPS2BF16HrriV: |
| 601 | case X86::PTCVTROWPS2BF16LrreV: |
| 602 | case X86::PTCVTROWPS2BF16LrriV: |
| 603 | case X86::PTCVTROWPS2PHHrreV: |
| 604 | case X86::PTCVTROWPS2PHHrriV: |
| 605 | case X86::PTCVTROWPS2PHLrreV: |
| 606 | case X86::PTCVTROWPS2PHLrriV: |
| 607 | case X86::PTILEMOVROWrreV: |
| 608 | case X86::PTILEMOVROWrriV: { |
| 609 | for (unsigned i = 2; i > 0; --i) |
| 610 | MI.removeOperand(OpNo: i); |
| 611 | unsigned Opc; |
| 612 | switch (Opcode) { |
| 613 | case X86::PTILELOADDRSV: |
| 614 | Opc = GET_EGPR_IF_ENABLED(X86::TILELOADDRS); |
| 615 | break; |
| 616 | case X86::PTILELOADDRST1V: |
| 617 | Opc = GET_EGPR_IF_ENABLED(X86::TILELOADDRST1); |
| 618 | break; |
| 619 | case X86::PTILELOADDV: |
| 620 | Opc = GET_EGPR_IF_ENABLED(X86::TILELOADD); |
| 621 | break; |
| 622 | case X86::PTILELOADDT1V: |
| 623 | Opc = GET_EGPR_IF_ENABLED(X86::TILELOADDT1); |
| 624 | break; |
| 625 | case X86::PTCVTROWD2PSrreV: |
| 626 | Opc = X86::TCVTROWD2PSrte; |
| 627 | break; |
| 628 | case X86::PTCVTROWD2PSrriV: |
| 629 | Opc = X86::TCVTROWD2PSrti; |
| 630 | break; |
| 631 | case X86::PTCVTROWPS2BF16HrreV: |
| 632 | Opc = X86::TCVTROWPS2BF16Hrte; |
| 633 | break; |
| 634 | case X86::PTCVTROWPS2BF16HrriV: |
| 635 | Opc = X86::TCVTROWPS2BF16Hrti; |
| 636 | break; |
| 637 | case X86::PTCVTROWPS2BF16LrreV: |
| 638 | Opc = X86::TCVTROWPS2BF16Lrte; |
| 639 | break; |
| 640 | case X86::PTCVTROWPS2BF16LrriV: |
| 641 | Opc = X86::TCVTROWPS2BF16Lrti; |
| 642 | break; |
| 643 | case X86::PTCVTROWPS2PHHrreV: |
| 644 | Opc = X86::TCVTROWPS2PHHrte; |
| 645 | break; |
| 646 | case X86::PTCVTROWPS2PHHrriV: |
| 647 | Opc = X86::TCVTROWPS2PHHrti; |
| 648 | break; |
| 649 | case X86::PTCVTROWPS2PHLrreV: |
| 650 | Opc = X86::TCVTROWPS2PHLrte; |
| 651 | break; |
| 652 | case X86::PTCVTROWPS2PHLrriV: |
| 653 | Opc = X86::TCVTROWPS2PHLrti; |
| 654 | break; |
| 655 | case X86::PTILEMOVROWrreV: |
| 656 | Opc = X86::TILEMOVROWrte; |
| 657 | break; |
| 658 | case X86::PTILEMOVROWrriV: |
| 659 | Opc = X86::TILEMOVROWrti; |
| 660 | break; |
| 661 | default: |
| 662 | llvm_unreachable("Unexpected Opcode" ); |
| 663 | } |
| 664 | MI.setDesc(TII->get(Opcode: Opc)); |
| 665 | return true; |
| 666 | } |
| 667 | case X86::PTCMMIMFP16PSV: |
| 668 | case X86::PTCMMRLFP16PSV: |
| 669 | case X86::PTDPBSSDV: |
| 670 | case X86::PTDPBSUDV: |
| 671 | case X86::PTDPBUSDV: |
| 672 | case X86::PTDPBUUDV: |
| 673 | case X86::PTDPBF16PSV: |
| 674 | case X86::PTDPFP16PSV: |
| 675 | case X86::PTMMULTF32PSV: |
| 676 | case X86::PTDPBF8PSV: |
| 677 | case X86::PTDPBHF8PSV: |
| 678 | case X86::PTDPHBF8PSV: |
| 679 | case X86::PTDPHF8PSV: { |
| 680 | MI.untieRegOperand(OpIdx: 4); |
| 681 | for (unsigned i = 3; i > 0; --i) |
| 682 | MI.removeOperand(OpNo: i); |
| 683 | unsigned Opc; |
| 684 | switch (Opcode) { |
| 685 | // clang-format off |
| 686 | case X86::PTCMMIMFP16PSV: Opc = X86::TCMMIMFP16PS; break; |
| 687 | case X86::PTCMMRLFP16PSV: Opc = X86::TCMMRLFP16PS; break; |
| 688 | case X86::PTDPBSSDV: Opc = X86::TDPBSSD; break; |
| 689 | case X86::PTDPBSUDV: Opc = X86::TDPBSUD; break; |
| 690 | case X86::PTDPBUSDV: Opc = X86::TDPBUSD; break; |
| 691 | case X86::PTDPBUUDV: Opc = X86::TDPBUUD; break; |
| 692 | case X86::PTDPBF16PSV: Opc = X86::TDPBF16PS; break; |
| 693 | case X86::PTDPFP16PSV: Opc = X86::TDPFP16PS; break; |
| 694 | case X86::PTMMULTF32PSV: Opc = X86::TMMULTF32PS; break; |
| 695 | case X86::PTDPBF8PSV: Opc = X86::TDPBF8PS; break; |
| 696 | case X86::PTDPBHF8PSV: Opc = X86::TDPBHF8PS; break; |
| 697 | case X86::PTDPHBF8PSV: Opc = X86::TDPHBF8PS; break; |
| 698 | case X86::PTDPHF8PSV: Opc = X86::TDPHF8PS; break; |
| 699 | // clang-format on |
| 700 | default: |
| 701 | llvm_unreachable("Unexpected Opcode" ); |
| 702 | } |
| 703 | MI.setDesc(TII->get(Opcode: Opc)); |
| 704 | MI.tieOperands(DefIdx: 0, UseIdx: 1); |
| 705 | return true; |
| 706 | } |
| 707 | case X86::PTILESTOREDV: { |
| 708 | for (int i = 1; i >= 0; --i) |
| 709 | MI.removeOperand(OpNo: i); |
| 710 | MI.setDesc(TII->get(GET_EGPR_IF_ENABLED(X86::TILESTORED))); |
| 711 | return true; |
| 712 | } |
| 713 | #undef GET_EGPR_IF_ENABLED |
| 714 | case X86::PTILEZEROV: { |
| 715 | for (int i = 2; i > 0; --i) // Remove row, col |
| 716 | MI.removeOperand(OpNo: i); |
| 717 | MI.setDesc(TII->get(Opcode: X86::TILEZERO)); |
| 718 | return true; |
| 719 | } |
| 720 | case X86::CALL64pcrel32_RVMARKER: |
| 721 | case X86::CALL64r_RVMARKER: |
| 722 | case X86::CALL64m_RVMARKER: |
| 723 | expandCALL_RVMARKER(MBB, MBBI); |
| 724 | return true; |
| 725 | case X86::CALL64r_ImpCall: |
| 726 | MI.setDesc(TII->get(Opcode: X86::CALL64r)); |
| 727 | return true; |
| 728 | case X86::ADD32mi_ND: |
| 729 | case X86::ADD64mi32_ND: |
| 730 | case X86::SUB32mi_ND: |
| 731 | case X86::SUB64mi32_ND: |
| 732 | case X86::AND32mi_ND: |
| 733 | case X86::AND64mi32_ND: |
| 734 | case X86::OR32mi_ND: |
| 735 | case X86::OR64mi32_ND: |
| 736 | case X86::XOR32mi_ND: |
| 737 | case X86::XOR64mi32_ND: |
| 738 | case X86::ADC32mi_ND: |
| 739 | case X86::ADC64mi32_ND: |
| 740 | case X86::SBB32mi_ND: |
| 741 | case X86::SBB64mi32_ND: { |
| 742 | // It's possible for an EVEX-encoded legacy instruction to reach the 15-byte |
| 743 | // instruction length limit: 4 bytes of EVEX prefix + 1 byte of opcode + 1 |
| 744 | // byte of ModRM + 1 byte of SIB + 4 bytes of displacement + 4 bytes of |
| 745 | // immediate = 15 bytes in total, e.g. |
| 746 | // |
| 747 | // subq $184, %fs:257(%rbx, %rcx), %rax |
| 748 | // |
| 749 | // In such a case, no additional (ADSIZE or segment override) prefix can be |
| 750 | // used. To resolve the issue, we split the “long” instruction into 2 |
| 751 | // instructions: |
| 752 | // |
| 753 | // movq %fs:257(%rbx, %rcx),%rax |
| 754 | // subq $184, %rax |
| 755 | // |
| 756 | // Therefore we consider the OPmi_ND to be a pseudo instruction to some |
| 757 | // extent. |
| 758 | const MachineOperand &ImmOp = |
| 759 | MI.getOperand(i: MI.getNumExplicitOperands() - 1); |
| 760 | // If the immediate is a expr, conservatively estimate 4 bytes. |
| 761 | if (ImmOp.isImm() && isInt<8>(x: ImmOp.getImm())) |
| 762 | return false; |
| 763 | int MemOpNo = X86::getFirstAddrOperandIdx(MI); |
| 764 | const MachineOperand &DispOp = MI.getOperand(i: MemOpNo + X86::AddrDisp); |
| 765 | Register Base = MI.getOperand(i: MemOpNo + X86::AddrBaseReg).getReg(); |
| 766 | // If the displacement is a expr, conservatively estimate 4 bytes. |
| 767 | if (Base && DispOp.isImm() && isInt<8>(x: DispOp.getImm())) |
| 768 | return false; |
| 769 | // There can only be one of three: SIB, segment override register, ADSIZE |
| 770 | Register Index = MI.getOperand(i: MemOpNo + X86::AddrIndexReg).getReg(); |
| 771 | unsigned Count = !!MI.getOperand(i: MemOpNo + X86::AddrSegmentReg).getReg(); |
| 772 | if (X86II::needSIB(BaseReg: Base, IndexReg: Index, /*In64BitMode=*/true)) |
| 773 | ++Count; |
| 774 | if (X86MCRegisterClasses[X86::GR32RegClassID].contains(Reg: Base) || |
| 775 | X86MCRegisterClasses[X86::GR32RegClassID].contains(Reg: Index)) |
| 776 | ++Count; |
| 777 | if (Count < 2) |
| 778 | return false; |
| 779 | unsigned Opc, LoadOpc; |
| 780 | switch (Opcode) { |
| 781 | #define MI_TO_RI(OP) \ |
| 782 | case X86::OP##32mi_ND: \ |
| 783 | Opc = X86::OP##32ri; \ |
| 784 | LoadOpc = X86::MOV32rm; \ |
| 785 | break; \ |
| 786 | case X86::OP##64mi32_ND: \ |
| 787 | Opc = X86::OP##64ri32; \ |
| 788 | LoadOpc = X86::MOV64rm; \ |
| 789 | break; |
| 790 | |
| 791 | default: |
| 792 | llvm_unreachable("Unexpected Opcode" ); |
| 793 | MI_TO_RI(ADD); |
| 794 | MI_TO_RI(SUB); |
| 795 | MI_TO_RI(AND); |
| 796 | MI_TO_RI(OR); |
| 797 | MI_TO_RI(XOR); |
| 798 | MI_TO_RI(ADC); |
| 799 | MI_TO_RI(SBB); |
| 800 | #undef MI_TO_RI |
| 801 | } |
| 802 | // Insert OPri. |
| 803 | Register DestReg = MI.getOperand(i: 0).getReg(); |
| 804 | BuildMI(BB&: MBB, I: std::next(x: MBBI), MIMD: DL, MCID: TII->get(Opcode: Opc), DestReg) |
| 805 | .addReg(RegNo: DestReg) |
| 806 | .add(MO: ImmOp); |
| 807 | // Change OPmi_ND to MOVrm. |
| 808 | for (unsigned I = MI.getNumImplicitOperands() + 1; I != 0; --I) |
| 809 | MI.removeOperand(OpNo: MI.getNumOperands() - 1); |
| 810 | MI.setDesc(TII->get(Opcode: LoadOpc)); |
| 811 | return true; |
| 812 | } |
| 813 | } |
| 814 | llvm_unreachable("Previous switch has a fallthrough?" ); |
| 815 | } |
| 816 | |
| 817 | // This function creates additional block for storing varargs guarded |
| 818 | // registers. It adds check for %al into entry block, to skip |
| 819 | // GuardedRegsBlk if xmm registers should not be stored. |
| 820 | // |
| 821 | // EntryBlk[VAStartPseudoInstr] EntryBlk |
| 822 | // | | . |
| 823 | // | | . |
| 824 | // | | GuardedRegsBlk |
| 825 | // | => | . |
| 826 | // | | . |
| 827 | // | TailBlk |
| 828 | // | | |
| 829 | // | | |
| 830 | // |
| 831 | void X86ExpandPseudoImpl::expandVastartSaveXmmRegs( |
| 832 | MachineBasicBlock *EntryBlk, |
| 833 | MachineBasicBlock::iterator VAStartPseudoInstr) const { |
| 834 | assert(VAStartPseudoInstr->getOpcode() == X86::VASTART_SAVE_XMM_REGS); |
| 835 | |
| 836 | MachineFunction *Func = EntryBlk->getParent(); |
| 837 | const TargetInstrInfo *TII = STI->getInstrInfo(); |
| 838 | const DebugLoc &DL = VAStartPseudoInstr->getDebugLoc(); |
| 839 | Register CountReg = VAStartPseudoInstr->getOperand(i: 0).getReg(); |
| 840 | |
| 841 | // Calculate liveins for newly created blocks. |
| 842 | LivePhysRegs LiveRegs(*STI->getRegisterInfo()); |
| 843 | SmallVector<std::pair<MCPhysReg, const MachineOperand *>, 8> Clobbers; |
| 844 | |
| 845 | LiveRegs.addLiveIns(MBB: *EntryBlk); |
| 846 | for (MachineInstr &MI : EntryBlk->instrs()) { |
| 847 | if (MI.getOpcode() == VAStartPseudoInstr->getOpcode()) |
| 848 | break; |
| 849 | |
| 850 | LiveRegs.stepForward(MI, Clobbers); |
| 851 | } |
| 852 | |
| 853 | // Create the new basic blocks. One block contains all the XMM stores, |
| 854 | // and another block is the final destination regardless of whether any |
| 855 | // stores were performed. |
| 856 | const BasicBlock *LLVMBlk = EntryBlk->getBasicBlock(); |
| 857 | MachineFunction::iterator EntryBlkIter = ++EntryBlk->getIterator(); |
| 858 | MachineBasicBlock *GuardedRegsBlk = Func->CreateMachineBasicBlock(BB: LLVMBlk); |
| 859 | MachineBasicBlock *TailBlk = Func->CreateMachineBasicBlock(BB: LLVMBlk); |
| 860 | Func->insert(MBBI: EntryBlkIter, MBB: GuardedRegsBlk); |
| 861 | Func->insert(MBBI: EntryBlkIter, MBB: TailBlk); |
| 862 | |
| 863 | // Transfer the remainder of EntryBlk and its successor edges to TailBlk. |
| 864 | TailBlk->splice(Where: TailBlk->begin(), Other: EntryBlk, |
| 865 | From: std::next(x: MachineBasicBlock::iterator(VAStartPseudoInstr)), |
| 866 | To: EntryBlk->end()); |
| 867 | TailBlk->transferSuccessorsAndUpdatePHIs(FromMBB: EntryBlk); |
| 868 | |
| 869 | uint64_t FrameOffset = VAStartPseudoInstr->getOperand(i: 4).getImm(); |
| 870 | uint64_t VarArgsRegsOffset = VAStartPseudoInstr->getOperand(i: 6).getImm(); |
| 871 | |
| 872 | // TODO: add support for YMM and ZMM here. |
| 873 | unsigned MOVOpc = STI->hasAVX() ? X86::VMOVAPSmr : X86::MOVAPSmr; |
| 874 | |
| 875 | // In the XMM save block, save all the XMM argument registers. |
| 876 | for (int64_t OpndIdx = 7, RegIdx = 0; |
| 877 | OpndIdx < VAStartPseudoInstr->getNumOperands() - 1; |
| 878 | OpndIdx++, RegIdx++) { |
| 879 | auto NewMI = BuildMI(BB: GuardedRegsBlk, MIMD: DL, MCID: TII->get(Opcode: MOVOpc)); |
| 880 | for (int i = 0; i < X86::AddrNumOperands; ++i) { |
| 881 | if (i == X86::AddrDisp) |
| 882 | NewMI.addImm(Val: FrameOffset + VarArgsRegsOffset + RegIdx * 16); |
| 883 | else |
| 884 | NewMI.add(MO: VAStartPseudoInstr->getOperand(i: i + 1)); |
| 885 | } |
| 886 | NewMI.addReg(RegNo: VAStartPseudoInstr->getOperand(i: OpndIdx).getReg()); |
| 887 | assert(VAStartPseudoInstr->getOperand(OpndIdx).getReg().isPhysical()); |
| 888 | } |
| 889 | |
| 890 | // The original block will now fall through to the GuardedRegsBlk. |
| 891 | EntryBlk->addSuccessor(Succ: GuardedRegsBlk); |
| 892 | // The GuardedRegsBlk will fall through to the TailBlk. |
| 893 | GuardedRegsBlk->addSuccessor(Succ: TailBlk); |
| 894 | |
| 895 | if (!STI->isCallingConvWin64(CC: Func->getFunction().getCallingConv())) { |
| 896 | // If %al is 0, branch around the XMM save block. |
| 897 | BuildMI(BB: EntryBlk, MIMD: DL, MCID: TII->get(Opcode: X86::TEST8rr)) |
| 898 | .addReg(RegNo: CountReg) |
| 899 | .addReg(RegNo: CountReg); |
| 900 | BuildMI(BB: EntryBlk, MIMD: DL, MCID: TII->get(Opcode: X86::JCC_1)) |
| 901 | .addMBB(MBB: TailBlk) |
| 902 | .addImm(Val: X86::COND_E); |
| 903 | EntryBlk->addSuccessor(Succ: TailBlk); |
| 904 | } |
| 905 | |
| 906 | // Add liveins to the created block. |
| 907 | addLiveIns(MBB&: *GuardedRegsBlk, LiveRegs); |
| 908 | addLiveIns(MBB&: *TailBlk, LiveRegs); |
| 909 | |
| 910 | // Delete the pseudo. |
| 911 | VAStartPseudoInstr->eraseFromParent(); |
| 912 | } |
| 913 | |
| 914 | /// Expand all pseudo instructions contained in \p MBB. |
| 915 | /// \returns true if any expansion occurred for \p MBB. |
| 916 | bool X86ExpandPseudoImpl::expandMBB(MachineBasicBlock &MBB) { |
| 917 | bool Modified = false; |
| 918 | |
| 919 | // MBBI may be invalidated by the expansion. |
| 920 | MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end(); |
| 921 | while (MBBI != E) { |
| 922 | MachineBasicBlock::iterator NMBBI = std::next(x: MBBI); |
| 923 | Modified |= expandMI(MBB, MBBI); |
| 924 | MBBI = NMBBI; |
| 925 | } |
| 926 | |
| 927 | return Modified; |
| 928 | } |
| 929 | |
| 930 | bool X86ExpandPseudoImpl::expandPseudosWhichAffectControlFlow( |
| 931 | MachineFunction &MF) { |
| 932 | // Currently pseudo which affects control flow is only |
| 933 | // X86::VASTART_SAVE_XMM_REGS which is located in Entry block. |
| 934 | // So we do not need to evaluate other blocks. |
| 935 | for (MachineInstr &Instr : MF.front().instrs()) { |
| 936 | if (Instr.getOpcode() == X86::VASTART_SAVE_XMM_REGS) { |
| 937 | expandVastartSaveXmmRegs(EntryBlk: &(MF.front()), VAStartPseudoInstr: Instr); |
| 938 | return true; |
| 939 | } |
| 940 | } |
| 941 | |
| 942 | return false; |
| 943 | } |
| 944 | |
| 945 | bool X86ExpandPseudoImpl::runOnMachineFunction(MachineFunction &MF) { |
| 946 | STI = &MF.getSubtarget<X86Subtarget>(); |
| 947 | TII = STI->getInstrInfo(); |
| 948 | TRI = STI->getRegisterInfo(); |
| 949 | X86FI = MF.getInfo<X86MachineFunctionInfo>(); |
| 950 | X86FL = STI->getFrameLowering(); |
| 951 | |
| 952 | bool Modified = expandPseudosWhichAffectControlFlow(MF); |
| 953 | |
| 954 | for (MachineBasicBlock &MBB : MF) |
| 955 | Modified |= expandMBB(MBB); |
| 956 | return Modified; |
| 957 | } |
| 958 | |
| 959 | /// Returns an instance of the pseudo instruction expansion pass. |
| 960 | FunctionPass *llvm::createX86ExpandPseudoLegacyPass() { |
| 961 | return new X86ExpandPseudoLegacy(); |
| 962 | } |
| 963 | |
| 964 | bool X86ExpandPseudoLegacy::runOnMachineFunction(MachineFunction &MF) { |
| 965 | X86ExpandPseudoImpl Impl; |
| 966 | return Impl.runOnMachineFunction(MF); |
| 967 | } |
| 968 | |
| 969 | PreservedAnalyses |
| 970 | X86ExpandPseudoPass::run(MachineFunction &MF, |
| 971 | MachineFunctionAnalysisManager &MFAM) { |
| 972 | X86ExpandPseudoImpl Impl; |
| 973 | bool Changed = Impl.runOnMachineFunction(MF); |
| 974 | if (!Changed) |
| 975 | return PreservedAnalyses::all(); |
| 976 | |
| 977 | PreservedAnalyses PA = getMachineFunctionPassPreservedAnalyses(); |
| 978 | PA.preserveSet<CFGAnalyses>(); |
| 979 | return PA; |
| 980 | } |
| 981 | |