| 1 | //==--- InstrEmitter.cpp - Emit MachineInstrs for the SelectionDAG class ---==// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This implements the Emit routines for the SelectionDAG class, which creates |
| 10 | // MachineInstrs based on the decisions of the SelectionDAG instruction |
| 11 | // selection. |
| 12 | // |
| 13 | //===----------------------------------------------------------------------===// |
| 14 | |
| 15 | #include "InstrEmitter.h" |
| 16 | #include "SDNodeDbgValue.h" |
| 17 | #include "llvm/BinaryFormat/Dwarf.h" |
| 18 | #include "llvm/CodeGen/MachineConstantPool.h" |
| 19 | #include "llvm/CodeGen/MachineFunction.h" |
| 20 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
| 21 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
| 22 | #include "llvm/CodeGen/StackMaps.h" |
| 23 | #include "llvm/CodeGen/TargetInstrInfo.h" |
| 24 | #include "llvm/CodeGen/TargetLowering.h" |
| 25 | #include "llvm/CodeGen/TargetSubtargetInfo.h" |
| 26 | #include "llvm/IR/DebugInfoMetadata.h" |
| 27 | #include "llvm/IR/PseudoProbe.h" |
| 28 | #include "llvm/Support/ErrorHandling.h" |
| 29 | #include "llvm/Target/TargetMachine.h" |
| 30 | using namespace llvm; |
| 31 | |
| 32 | #define DEBUG_TYPE "instr-emitter" |
| 33 | |
| 34 | /// MinRCSize - Smallest register class we allow when constraining virtual |
| 35 | /// registers. If satisfying all register class constraints would require |
| 36 | /// using a smaller register class, emit a COPY to a new virtual register |
| 37 | /// instead. |
| 38 | const unsigned MinRCSize = 4; |
| 39 | |
| 40 | /// CountResults - The results of target nodes have register or immediate |
| 41 | /// operands first, then an optional chain, and optional glue operands (which do |
| 42 | /// not go into the resulting MachineInstr). |
| 43 | unsigned InstrEmitter::CountResults(SDNode *Node) { |
| 44 | unsigned N = Node->getNumValues(); |
| 45 | while (N && Node->getValueType(ResNo: N - 1) == MVT::Glue) |
| 46 | --N; |
| 47 | if (N && Node->getValueType(ResNo: N - 1) == MVT::Other) |
| 48 | --N; // Skip over chain result. |
| 49 | return N; |
| 50 | } |
| 51 | |
| 52 | /// countOperands - The inputs to target nodes have any actual inputs first, |
| 53 | /// followed by an optional chain operand, then an optional glue operand. |
| 54 | /// Compute the number of actual operands that will go into the resulting |
| 55 | /// MachineInstr. |
| 56 | /// |
| 57 | /// Also count physreg RegisterSDNode and RegisterMaskSDNode operands preceding |
| 58 | /// the chain and glue. These operands may be implicit on the machine instr. |
| 59 | static unsigned countOperands(SDNode *Node, unsigned NumExpUses, |
| 60 | unsigned &NumImpUses) { |
| 61 | unsigned N = Node->getNumOperands(); |
| 62 | while (N && Node->getOperand(Num: N - 1).getValueType() == MVT::Glue) |
| 63 | --N; |
| 64 | if (N && Node->getOperand(Num: N - 1).getValueType() == MVT::Other) |
| 65 | --N; // Ignore chain if it exists. |
| 66 | |
| 67 | // Count RegisterSDNode and RegisterMaskSDNode operands for NumImpUses. |
| 68 | NumImpUses = N - NumExpUses; |
| 69 | for (unsigned I = N; I > NumExpUses; --I) { |
| 70 | if (isa<RegisterMaskSDNode>(Val: Node->getOperand(Num: I - 1))) |
| 71 | continue; |
| 72 | if (RegisterSDNode *RN = dyn_cast<RegisterSDNode>(Val: Node->getOperand(Num: I - 1))) |
| 73 | if (RN->getReg().isPhysical()) |
| 74 | continue; |
| 75 | NumImpUses = N - I; |
| 76 | break; |
| 77 | } |
| 78 | |
| 79 | return N; |
| 80 | } |
| 81 | |
| 82 | /// EmitCopyFromReg - Generate machine code for an CopyFromReg node or an |
| 83 | /// implicit physical register output. |
| 84 | void InstrEmitter::EmitCopyFromReg(SDNode *Node, unsigned ResNo, bool IsClone, |
| 85 | Register SrcReg, VRBaseMapType &VRBaseMap) { |
| 86 | Register VRBase; |
| 87 | if (SrcReg.isVirtual()) { |
| 88 | // Just use the input register directly! |
| 89 | SDValue Op(Node, ResNo); |
| 90 | if (IsClone) |
| 91 | VRBaseMap.erase(Val: Op); |
| 92 | bool isNew = VRBaseMap.insert(KV: std::make_pair(x&: Op, y&: SrcReg)).second; |
| 93 | (void)isNew; // Silence compiler warning. |
| 94 | assert(isNew && "Node emitted out of order - early" ); |
| 95 | return; |
| 96 | } |
| 97 | |
| 98 | // If the node is only used by a CopyToReg and the dest reg is a vreg, use |
| 99 | // the CopyToReg'd destination register instead of creating a new vreg. |
| 100 | bool MatchReg = true; |
| 101 | const TargetRegisterClass *UseRC = nullptr; |
| 102 | MVT VT = Node->getSimpleValueType(ResNo); |
| 103 | |
| 104 | // Stick to the preferred register classes for legal types. |
| 105 | if (TLI->isTypeLegal(VT)) |
| 106 | UseRC = TLI->getRegClassFor(VT, isDivergent: Node->isDivergent()); |
| 107 | |
| 108 | for (SDNode *User : Node->users()) { |
| 109 | bool Match = true; |
| 110 | if (User->getOpcode() == ISD::CopyToReg && |
| 111 | User->getOperand(Num: 2).getNode() == Node && |
| 112 | User->getOperand(Num: 2).getResNo() == ResNo) { |
| 113 | Register DestReg = cast<RegisterSDNode>(Val: User->getOperand(Num: 1))->getReg(); |
| 114 | if (DestReg.isVirtual()) { |
| 115 | VRBase = DestReg; |
| 116 | Match = false; |
| 117 | } else if (DestReg != SrcReg) |
| 118 | Match = false; |
| 119 | } else { |
| 120 | for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { |
| 121 | SDValue Op = User->getOperand(Num: i); |
| 122 | if (Op.getNode() != Node || Op.getResNo() != ResNo) |
| 123 | continue; |
| 124 | MVT VT = Node->getSimpleValueType(ResNo: Op.getResNo()); |
| 125 | if (VT == MVT::Other || VT == MVT::Glue) |
| 126 | continue; |
| 127 | Match = false; |
| 128 | if (User->isMachineOpcode()) { |
| 129 | const MCInstrDesc &II = TII->get(Opcode: User->getMachineOpcode()); |
| 130 | const TargetRegisterClass *RC = nullptr; |
| 131 | if (i + II.getNumDefs() < II.getNumOperands()) { |
| 132 | RC = TRI->getAllocatableClass( |
| 133 | RC: TII->getRegClass(MCID: II, OpNum: i + II.getNumDefs(), TRI, MF: *MF)); |
| 134 | } |
| 135 | if (!UseRC) |
| 136 | UseRC = RC; |
| 137 | else if (RC) { |
| 138 | const TargetRegisterClass *ComRC = |
| 139 | TRI->getCommonSubClass(A: UseRC, B: RC); |
| 140 | // If multiple uses expect disjoint register classes, we emit |
| 141 | // copies in AddRegisterOperand. |
| 142 | if (ComRC) |
| 143 | UseRC = ComRC; |
| 144 | } |
| 145 | } |
| 146 | } |
| 147 | } |
| 148 | MatchReg &= Match; |
| 149 | if (VRBase) |
| 150 | break; |
| 151 | } |
| 152 | |
| 153 | const TargetRegisterClass *SrcRC = nullptr, *DstRC = nullptr; |
| 154 | SrcRC = TRI->getMinimalPhysRegClass(Reg: SrcReg, VT); |
| 155 | |
| 156 | // Figure out the register class to create for the destreg. |
| 157 | if (VRBase) { |
| 158 | DstRC = MRI->getRegClass(Reg: VRBase); |
| 159 | } else if (UseRC) { |
| 160 | assert(TRI->isTypeLegalForClass(*UseRC, VT) && |
| 161 | "Incompatible phys register def and uses!" ); |
| 162 | DstRC = UseRC; |
| 163 | } else |
| 164 | DstRC = SrcRC; |
| 165 | |
| 166 | // If all uses are reading from the src physical register and copying the |
| 167 | // register is either impossible or very expensive, then don't create a copy. |
| 168 | if (MatchReg && SrcRC->getCopyCost() < 0) { |
| 169 | VRBase = SrcReg; |
| 170 | } else { |
| 171 | // Create the reg, emit the copy. |
| 172 | VRBase = MRI->createVirtualRegister(RegClass: DstRC); |
| 173 | BuildMI(BB&: *MBB, I: InsertPos, MIMD: Node->getDebugLoc(), MCID: TII->get(Opcode: TargetOpcode::COPY), |
| 174 | DestReg: VRBase).addReg(RegNo: SrcReg); |
| 175 | } |
| 176 | |
| 177 | SDValue Op(Node, ResNo); |
| 178 | if (IsClone) |
| 179 | VRBaseMap.erase(Val: Op); |
| 180 | bool isNew = VRBaseMap.insert(KV: std::make_pair(x&: Op, y&: VRBase)).second; |
| 181 | (void)isNew; // Silence compiler warning. |
| 182 | assert(isNew && "Node emitted out of order - early" ); |
| 183 | } |
| 184 | |
| 185 | void InstrEmitter::CreateVirtualRegisters(SDNode *Node, |
| 186 | MachineInstrBuilder &MIB, |
| 187 | const MCInstrDesc &II, |
| 188 | bool IsClone, bool IsCloned, |
| 189 | VRBaseMapType &VRBaseMap) { |
| 190 | assert(Node->getMachineOpcode() != TargetOpcode::IMPLICIT_DEF && |
| 191 | "IMPLICIT_DEF should have been handled as a special case elsewhere!" ); |
| 192 | |
| 193 | unsigned NumResults = CountResults(Node); |
| 194 | bool HasVRegVariadicDefs = !MF->getTarget().usesPhysRegsForValues() && |
| 195 | II.isVariadic() && II.variadicOpsAreDefs(); |
| 196 | unsigned NumVRegs = HasVRegVariadicDefs ? NumResults : II.getNumDefs(); |
| 197 | if (Node->getMachineOpcode() == TargetOpcode::STATEPOINT) |
| 198 | NumVRegs = NumResults; |
| 199 | for (unsigned i = 0; i < NumVRegs; ++i) { |
| 200 | // If the specific node value is only used by a CopyToReg and the dest reg |
| 201 | // is a vreg in the same register class, use the CopyToReg'd destination |
| 202 | // register instead of creating a new vreg. |
| 203 | Register VRBase; |
| 204 | const TargetRegisterClass *RC = |
| 205 | TRI->getAllocatableClass(RC: TII->getRegClass(MCID: II, OpNum: i, TRI, MF: *MF)); |
| 206 | // Always let the value type influence the used register class. The |
| 207 | // constraints on the instruction may be too lax to represent the value |
| 208 | // type correctly. For example, a 64-bit float (X86::FR64) can't live in |
| 209 | // the 32-bit float super-class (X86::FR32). |
| 210 | if (i < NumResults && TLI->isTypeLegal(VT: Node->getSimpleValueType(ResNo: i))) { |
| 211 | const TargetRegisterClass *VTRC = TLI->getRegClassFor( |
| 212 | VT: Node->getSimpleValueType(ResNo: i), |
| 213 | isDivergent: (Node->isDivergent() || (RC && TRI->isDivergentRegClass(RC)))); |
| 214 | if (RC) |
| 215 | VTRC = TRI->getCommonSubClass(A: RC, B: VTRC); |
| 216 | if (VTRC) |
| 217 | RC = VTRC; |
| 218 | } |
| 219 | |
| 220 | if (!II.operands().empty() && II.operands()[i].isOptionalDef()) { |
| 221 | // Optional def must be a physical register. |
| 222 | VRBase = cast<RegisterSDNode>(Val: Node->getOperand(Num: i-NumResults))->getReg(); |
| 223 | assert(VRBase.isPhysical()); |
| 224 | MIB.addReg(RegNo: VRBase, flags: RegState::Define); |
| 225 | } |
| 226 | |
| 227 | if (!VRBase && !IsClone && !IsCloned) |
| 228 | for (SDNode *User : Node->users()) { |
| 229 | if (User->getOpcode() == ISD::CopyToReg && |
| 230 | User->getOperand(Num: 2).getNode() == Node && |
| 231 | User->getOperand(Num: 2).getResNo() == i) { |
| 232 | Register Reg = cast<RegisterSDNode>(Val: User->getOperand(Num: 1))->getReg(); |
| 233 | if (Reg.isVirtual()) { |
| 234 | const TargetRegisterClass *RegRC = MRI->getRegClass(Reg); |
| 235 | if (RegRC == RC) { |
| 236 | VRBase = Reg; |
| 237 | MIB.addReg(RegNo: VRBase, flags: RegState::Define); |
| 238 | break; |
| 239 | } |
| 240 | } |
| 241 | } |
| 242 | } |
| 243 | |
| 244 | // Create the result registers for this node and add the result regs to |
| 245 | // the machine instruction. |
| 246 | if (VRBase == 0) { |
| 247 | assert(RC && "Isn't a register operand!" ); |
| 248 | VRBase = MRI->createVirtualRegister(RegClass: RC); |
| 249 | MIB.addReg(RegNo: VRBase, flags: RegState::Define); |
| 250 | } |
| 251 | |
| 252 | // If this def corresponds to a result of the SDNode insert the VRBase into |
| 253 | // the lookup map. |
| 254 | if (i < NumResults) { |
| 255 | SDValue Op(Node, i); |
| 256 | if (IsClone) |
| 257 | VRBaseMap.erase(Val: Op); |
| 258 | bool isNew = VRBaseMap.insert(KV: std::make_pair(x&: Op, y&: VRBase)).second; |
| 259 | (void)isNew; // Silence compiler warning. |
| 260 | assert(isNew && "Node emitted out of order - early" ); |
| 261 | } |
| 262 | } |
| 263 | } |
| 264 | |
| 265 | /// getVR - Return the virtual register corresponding to the specified result |
| 266 | /// of the specified node. |
| 267 | Register InstrEmitter::getVR(SDValue Op, VRBaseMapType &VRBaseMap) { |
| 268 | if (Op.isMachineOpcode() && |
| 269 | Op.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF) { |
| 270 | // Add an IMPLICIT_DEF instruction before every use. |
| 271 | // IMPLICIT_DEF can produce any type of result so its MCInstrDesc |
| 272 | // does not include operand register class info. |
| 273 | const TargetRegisterClass *RC = TLI->getRegClassFor( |
| 274 | VT: Op.getSimpleValueType(), isDivergent: Op.getNode()->isDivergent()); |
| 275 | Register VReg = MRI->createVirtualRegister(RegClass: RC); |
| 276 | BuildMI(BB&: *MBB, I: InsertPos, MIMD: Op.getDebugLoc(), |
| 277 | MCID: TII->get(Opcode: TargetOpcode::IMPLICIT_DEF), DestReg: VReg); |
| 278 | return VReg; |
| 279 | } |
| 280 | |
| 281 | VRBaseMapType::iterator I = VRBaseMap.find(Val: Op); |
| 282 | assert(I != VRBaseMap.end() && "Node emitted out of order - late" ); |
| 283 | return I->second; |
| 284 | } |
| 285 | |
| 286 | static bool isConvergenceCtrlMachineOp(SDValue Op) { |
| 287 | if (Op->isMachineOpcode()) { |
| 288 | switch (Op->getMachineOpcode()) { |
| 289 | case TargetOpcode::CONVERGENCECTRL_ANCHOR: |
| 290 | case TargetOpcode::CONVERGENCECTRL_ENTRY: |
| 291 | case TargetOpcode::CONVERGENCECTRL_LOOP: |
| 292 | case TargetOpcode::CONVERGENCECTRL_GLUE: |
| 293 | return true; |
| 294 | } |
| 295 | return false; |
| 296 | } |
| 297 | |
| 298 | // We can reach here when CopyFromReg is encountered. But rather than making a |
| 299 | // special case for that, we just make sure we don't reach here in some |
| 300 | // surprising way. |
| 301 | switch (Op->getOpcode()) { |
| 302 | case ISD::CONVERGENCECTRL_ANCHOR: |
| 303 | case ISD::CONVERGENCECTRL_ENTRY: |
| 304 | case ISD::CONVERGENCECTRL_LOOP: |
| 305 | case ISD::CONVERGENCECTRL_GLUE: |
| 306 | llvm_unreachable("Convergence control should have been selected by now." ); |
| 307 | } |
| 308 | return false; |
| 309 | } |
| 310 | |
| 311 | /// AddRegisterOperand - Add the specified register as an operand to the |
| 312 | /// specified machine instr. Insert register copies if the register is |
| 313 | /// not in the required register class. |
| 314 | void |
| 315 | InstrEmitter::AddRegisterOperand(MachineInstrBuilder &MIB, |
| 316 | SDValue Op, |
| 317 | unsigned IIOpNum, |
| 318 | const MCInstrDesc *II, |
| 319 | VRBaseMapType &VRBaseMap, |
| 320 | bool IsDebug, bool IsClone, bool IsCloned) { |
| 321 | assert(Op.getValueType() != MVT::Other && |
| 322 | Op.getValueType() != MVT::Glue && |
| 323 | "Chain and glue operands should occur at end of operand list!" ); |
| 324 | // Get/emit the operand. |
| 325 | Register VReg = getVR(Op, VRBaseMap); |
| 326 | |
| 327 | const MCInstrDesc &MCID = MIB->getDesc(); |
| 328 | bool isOptDef = IIOpNum < MCID.getNumOperands() && |
| 329 | MCID.operands()[IIOpNum].isOptionalDef(); |
| 330 | |
| 331 | // If the instruction requires a register in a different class, create |
| 332 | // a new virtual register and copy the value into it, but first attempt to |
| 333 | // shrink VReg's register class within reason. For example, if VReg == GR32 |
| 334 | // and II requires a GR32_NOSP, just constrain VReg to GR32_NOSP. |
| 335 | if (II) { |
| 336 | const TargetRegisterClass *OpRC = nullptr; |
| 337 | if (IIOpNum < II->getNumOperands()) |
| 338 | OpRC = TII->getRegClass(MCID: *II, OpNum: IIOpNum, TRI, MF: *MF); |
| 339 | |
| 340 | if (OpRC) { |
| 341 | unsigned MinNumRegs = MinRCSize; |
| 342 | // Don't apply any RC size limit for IMPLICIT_DEF. Each use has a unique |
| 343 | // virtual register. |
| 344 | if (Op.isMachineOpcode() && |
| 345 | Op.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF) |
| 346 | MinNumRegs = 0; |
| 347 | |
| 348 | const TargetRegisterClass *ConstrainedRC |
| 349 | = MRI->constrainRegClass(Reg: VReg, RC: OpRC, MinNumRegs); |
| 350 | if (!ConstrainedRC) { |
| 351 | OpRC = TRI->getAllocatableClass(RC: OpRC); |
| 352 | assert(OpRC && "Constraints cannot be fulfilled for allocation" ); |
| 353 | Register NewVReg = MRI->createVirtualRegister(RegClass: OpRC); |
| 354 | BuildMI(BB&: *MBB, I: InsertPos, MIMD: MIB->getDebugLoc(), |
| 355 | MCID: TII->get(Opcode: TargetOpcode::COPY), DestReg: NewVReg) |
| 356 | .addReg(RegNo: VReg); |
| 357 | VReg = NewVReg; |
| 358 | } else { |
| 359 | assert(ConstrainedRC->isAllocatable() && |
| 360 | "Constraining an allocatable VReg produced an unallocatable class?" ); |
| 361 | } |
| 362 | } |
| 363 | } |
| 364 | |
| 365 | // If this value has only one use, that use is a kill. This is a |
| 366 | // conservative approximation. InstrEmitter does trivial coalescing |
| 367 | // with CopyFromReg nodes, so don't emit kill flags for them. |
| 368 | // Avoid kill flags on Schedule cloned nodes, since there will be |
| 369 | // multiple uses. |
| 370 | // Tied operands are never killed, so we need to check that. And that |
| 371 | // means we need to determine the index of the operand. |
| 372 | // Don't kill convergence control tokens. Initially they are only used in glue |
| 373 | // nodes, and the InstrEmitter later adds implicit uses on the users of the |
| 374 | // glue node. This can sometimes make it seem like there is only one use, |
| 375 | // which is the glue node itself. |
| 376 | bool isKill = Op.hasOneUse() && !isConvergenceCtrlMachineOp(Op) && |
| 377 | Op.getNode()->getOpcode() != ISD::CopyFromReg && !IsDebug && |
| 378 | !(IsClone || IsCloned); |
| 379 | if (isKill) { |
| 380 | unsigned Idx = MIB->getNumOperands(); |
| 381 | while (Idx > 0 && |
| 382 | MIB->getOperand(i: Idx-1).isReg() && |
| 383 | MIB->getOperand(i: Idx-1).isImplicit()) |
| 384 | --Idx; |
| 385 | bool isTied = MCID.getOperandConstraint(OpNum: Idx, Constraint: MCOI::TIED_TO) != -1; |
| 386 | if (isTied) |
| 387 | isKill = false; |
| 388 | } |
| 389 | |
| 390 | MIB.addReg(RegNo: VReg, flags: getDefRegState(B: isOptDef) | getKillRegState(B: isKill) | |
| 391 | getDebugRegState(B: IsDebug)); |
| 392 | } |
| 393 | |
| 394 | /// AddOperand - Add the specified operand to the specified machine instr. II |
| 395 | /// specifies the instruction information for the node, and IIOpNum is the |
| 396 | /// operand number (in the II) that we are adding. |
| 397 | void InstrEmitter::AddOperand(MachineInstrBuilder &MIB, SDValue Op, |
| 398 | unsigned IIOpNum, const MCInstrDesc *II, |
| 399 | VRBaseMapType &VRBaseMap, bool IsDebug, |
| 400 | bool IsClone, bool IsCloned) { |
| 401 | if (Op.isMachineOpcode()) { |
| 402 | AddRegisterOperand(MIB, Op, IIOpNum, II, VRBaseMap, |
| 403 | IsDebug, IsClone, IsCloned); |
| 404 | } else if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: Op)) { |
| 405 | MIB.addImm(Val: C->getSExtValue()); |
| 406 | } else if (ConstantFPSDNode *F = dyn_cast<ConstantFPSDNode>(Val&: Op)) { |
| 407 | MIB.addFPImm(Val: F->getConstantFPValue()); |
| 408 | } else if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(Val&: Op)) { |
| 409 | Register VReg = R->getReg(); |
| 410 | MVT OpVT = Op.getSimpleValueType(); |
| 411 | const TargetRegisterClass *IIRC = |
| 412 | II ? TRI->getAllocatableClass(RC: TII->getRegClass(MCID: *II, OpNum: IIOpNum, TRI, MF: *MF)) |
| 413 | : nullptr; |
| 414 | const TargetRegisterClass *OpRC = |
| 415 | TLI->isTypeLegal(VT: OpVT) |
| 416 | ? TLI->getRegClassFor(VT: OpVT, |
| 417 | isDivergent: Op.getNode()->isDivergent() || |
| 418 | (IIRC && TRI->isDivergentRegClass(RC: IIRC))) |
| 419 | : nullptr; |
| 420 | |
| 421 | if (OpRC && IIRC && OpRC != IIRC && VReg.isVirtual()) { |
| 422 | Register NewVReg = MRI->createVirtualRegister(RegClass: IIRC); |
| 423 | BuildMI(BB&: *MBB, I: InsertPos, MIMD: Op.getNode()->getDebugLoc(), |
| 424 | MCID: TII->get(Opcode: TargetOpcode::COPY), DestReg: NewVReg).addReg(RegNo: VReg); |
| 425 | VReg = NewVReg; |
| 426 | } |
| 427 | // Turn additional physreg operands into implicit uses on non-variadic |
| 428 | // instructions. This is used by call and return instructions passing |
| 429 | // arguments in registers. |
| 430 | bool Imp = II && (IIOpNum >= II->getNumOperands() && !II->isVariadic()); |
| 431 | MIB.addReg(RegNo: VReg, flags: getImplRegState(B: Imp)); |
| 432 | } else if (RegisterMaskSDNode *RM = dyn_cast<RegisterMaskSDNode>(Val&: Op)) { |
| 433 | MIB.addRegMask(Mask: RM->getRegMask()); |
| 434 | } else if (GlobalAddressSDNode *TGA = dyn_cast<GlobalAddressSDNode>(Val&: Op)) { |
| 435 | MIB.addGlobalAddress(GV: TGA->getGlobal(), Offset: TGA->getOffset(), |
| 436 | TargetFlags: TGA->getTargetFlags()); |
| 437 | } else if (BasicBlockSDNode *BBNode = dyn_cast<BasicBlockSDNode>(Val&: Op)) { |
| 438 | MIB.addMBB(MBB: BBNode->getBasicBlock()); |
| 439 | } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Val&: Op)) { |
| 440 | MIB.addFrameIndex(Idx: FI->getIndex()); |
| 441 | } else if (JumpTableSDNode *JT = dyn_cast<JumpTableSDNode>(Val&: Op)) { |
| 442 | MIB.addJumpTableIndex(Idx: JT->getIndex(), TargetFlags: JT->getTargetFlags()); |
| 443 | } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Val&: Op)) { |
| 444 | int Offset = CP->getOffset(); |
| 445 | Align Alignment = CP->getAlign(); |
| 446 | |
| 447 | unsigned Idx; |
| 448 | MachineConstantPool *MCP = MF->getConstantPool(); |
| 449 | if (CP->isMachineConstantPoolEntry()) |
| 450 | Idx = MCP->getConstantPoolIndex(V: CP->getMachineCPVal(), Alignment); |
| 451 | else |
| 452 | Idx = MCP->getConstantPoolIndex(C: CP->getConstVal(), Alignment); |
| 453 | MIB.addConstantPoolIndex(Idx, Offset, TargetFlags: CP->getTargetFlags()); |
| 454 | } else if (ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Val&: Op)) { |
| 455 | MIB.addExternalSymbol(FnName: ES->getSymbol(), TargetFlags: ES->getTargetFlags()); |
| 456 | } else if (auto *SymNode = dyn_cast<MCSymbolSDNode>(Val&: Op)) { |
| 457 | MIB.addSym(Sym: SymNode->getMCSymbol()); |
| 458 | } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Val&: Op)) { |
| 459 | MIB.addBlockAddress(BA: BA->getBlockAddress(), |
| 460 | Offset: BA->getOffset(), |
| 461 | TargetFlags: BA->getTargetFlags()); |
| 462 | } else if (TargetIndexSDNode *TI = dyn_cast<TargetIndexSDNode>(Val&: Op)) { |
| 463 | MIB.addTargetIndex(Idx: TI->getIndex(), Offset: TI->getOffset(), TargetFlags: TI->getTargetFlags()); |
| 464 | } else { |
| 465 | assert(Op.getValueType() != MVT::Other && |
| 466 | Op.getValueType() != MVT::Glue && |
| 467 | "Chain and glue operands should occur at end of operand list!" ); |
| 468 | AddRegisterOperand(MIB, Op, IIOpNum, II, VRBaseMap, |
| 469 | IsDebug, IsClone, IsCloned); |
| 470 | } |
| 471 | } |
| 472 | |
| 473 | Register InstrEmitter::ConstrainForSubReg(Register VReg, unsigned SubIdx, |
| 474 | MVT VT, bool isDivergent, const DebugLoc &DL) { |
| 475 | const TargetRegisterClass *VRC = MRI->getRegClass(Reg: VReg); |
| 476 | const TargetRegisterClass *RC = TRI->getSubClassWithSubReg(RC: VRC, Idx: SubIdx); |
| 477 | |
| 478 | // RC is a sub-class of VRC that supports SubIdx. Try to constrain VReg |
| 479 | // within reason. |
| 480 | if (RC && RC != VRC) |
| 481 | RC = MRI->constrainRegClass(Reg: VReg, RC, MinNumRegs: MinRCSize); |
| 482 | |
| 483 | // VReg has been adjusted. It can be used with SubIdx operands now. |
| 484 | if (RC) |
| 485 | return VReg; |
| 486 | |
| 487 | // VReg couldn't be reasonably constrained. Emit a COPY to a new virtual |
| 488 | // register instead. |
| 489 | RC = TRI->getSubClassWithSubReg(RC: TLI->getRegClassFor(VT, isDivergent), Idx: SubIdx); |
| 490 | assert(RC && "No legal register class for VT supports that SubIdx" ); |
| 491 | Register NewReg = MRI->createVirtualRegister(RegClass: RC); |
| 492 | BuildMI(BB&: *MBB, I: InsertPos, MIMD: DL, MCID: TII->get(Opcode: TargetOpcode::COPY), DestReg: NewReg) |
| 493 | .addReg(RegNo: VReg); |
| 494 | return NewReg; |
| 495 | } |
| 496 | |
| 497 | /// EmitSubregNode - Generate machine code for subreg nodes. |
| 498 | /// |
| 499 | void InstrEmitter::EmitSubregNode(SDNode *Node, VRBaseMapType &VRBaseMap, |
| 500 | bool IsClone, bool IsCloned) { |
| 501 | Register VRBase; |
| 502 | unsigned Opc = Node->getMachineOpcode(); |
| 503 | |
| 504 | // If the node is only used by a CopyToReg and the dest reg is a vreg, use |
| 505 | // the CopyToReg'd destination register instead of creating a new vreg. |
| 506 | for (SDNode *User : Node->users()) { |
| 507 | if (User->getOpcode() == ISD::CopyToReg && |
| 508 | User->getOperand(Num: 2).getNode() == Node) { |
| 509 | Register DestReg = cast<RegisterSDNode>(Val: User->getOperand(Num: 1))->getReg(); |
| 510 | if (DestReg.isVirtual()) { |
| 511 | VRBase = DestReg; |
| 512 | break; |
| 513 | } |
| 514 | } |
| 515 | } |
| 516 | |
| 517 | if (Opc == TargetOpcode::EXTRACT_SUBREG) { |
| 518 | // EXTRACT_SUBREG is lowered as %dst = COPY %src:sub. There are no |
| 519 | // constraints on the %dst register, COPY can target all legal register |
| 520 | // classes. |
| 521 | unsigned SubIdx = Node->getConstantOperandVal(Num: 1); |
| 522 | const TargetRegisterClass *TRC = |
| 523 | TLI->getRegClassFor(VT: Node->getSimpleValueType(ResNo: 0), isDivergent: Node->isDivergent()); |
| 524 | |
| 525 | Register Reg; |
| 526 | MachineInstr *DefMI; |
| 527 | RegisterSDNode *R = dyn_cast<RegisterSDNode>(Val: Node->getOperand(Num: 0)); |
| 528 | if (R && R->getReg().isPhysical()) { |
| 529 | Reg = R->getReg(); |
| 530 | DefMI = nullptr; |
| 531 | } else { |
| 532 | Reg = R ? R->getReg() : getVR(Op: Node->getOperand(Num: 0), VRBaseMap); |
| 533 | DefMI = MRI->getVRegDef(Reg); |
| 534 | } |
| 535 | |
| 536 | Register SrcReg, DstReg; |
| 537 | unsigned DefSubIdx; |
| 538 | if (DefMI && |
| 539 | TII->isCoalescableExtInstr(MI: *DefMI, SrcReg, DstReg, SubIdx&: DefSubIdx) && |
| 540 | SubIdx == DefSubIdx && |
| 541 | TRC == MRI->getRegClass(Reg: SrcReg)) { |
| 542 | // Optimize these: |
| 543 | // r1025 = s/zext r1024, 4 |
| 544 | // r1026 = extract_subreg r1025, 4 |
| 545 | // to a copy |
| 546 | // r1026 = copy r1024 |
| 547 | VRBase = MRI->createVirtualRegister(RegClass: TRC); |
| 548 | BuildMI(BB&: *MBB, I: InsertPos, MIMD: Node->getDebugLoc(), |
| 549 | MCID: TII->get(Opcode: TargetOpcode::COPY), DestReg: VRBase).addReg(RegNo: SrcReg); |
| 550 | MRI->clearKillFlags(Reg: SrcReg); |
| 551 | } else { |
| 552 | // Reg may not support a SubIdx sub-register, and we may need to |
| 553 | // constrain its register class or issue a COPY to a compatible register |
| 554 | // class. |
| 555 | if (Reg.isVirtual()) |
| 556 | Reg = ConstrainForSubReg(VReg: Reg, SubIdx, |
| 557 | VT: Node->getOperand(Num: 0).getSimpleValueType(), |
| 558 | isDivergent: Node->isDivergent(), DL: Node->getDebugLoc()); |
| 559 | // Create the destreg if it is missing. |
| 560 | if (!VRBase) |
| 561 | VRBase = MRI->createVirtualRegister(RegClass: TRC); |
| 562 | |
| 563 | // Create the extract_subreg machine instruction. |
| 564 | MachineInstrBuilder CopyMI = |
| 565 | BuildMI(BB&: *MBB, I: InsertPos, MIMD: Node->getDebugLoc(), |
| 566 | MCID: TII->get(Opcode: TargetOpcode::COPY), DestReg: VRBase); |
| 567 | if (Reg.isVirtual()) |
| 568 | CopyMI.addReg(RegNo: Reg, flags: 0, SubReg: SubIdx); |
| 569 | else |
| 570 | CopyMI.addReg(RegNo: TRI->getSubReg(Reg, Idx: SubIdx)); |
| 571 | } |
| 572 | } else if (Opc == TargetOpcode::INSERT_SUBREG || |
| 573 | Opc == TargetOpcode::SUBREG_TO_REG) { |
| 574 | SDValue N0 = Node->getOperand(Num: 0); |
| 575 | SDValue N1 = Node->getOperand(Num: 1); |
| 576 | SDValue N2 = Node->getOperand(Num: 2); |
| 577 | unsigned SubIdx = N2->getAsZExtVal(); |
| 578 | |
| 579 | // Figure out the register class to create for the destreg. It should be |
| 580 | // the largest legal register class supporting SubIdx sub-registers. |
| 581 | // RegisterCoalescer will constrain it further if it decides to eliminate |
| 582 | // the INSERT_SUBREG instruction. |
| 583 | // |
| 584 | // %dst = INSERT_SUBREG %src, %sub, SubIdx |
| 585 | // |
| 586 | // is lowered by TwoAddressInstructionPass to: |
| 587 | // |
| 588 | // %dst = COPY %src |
| 589 | // %dst:SubIdx = COPY %sub |
| 590 | // |
| 591 | // There is no constraint on the %src register class. |
| 592 | // |
| 593 | const TargetRegisterClass *SRC = |
| 594 | TLI->getRegClassFor(VT: Node->getSimpleValueType(ResNo: 0), isDivergent: Node->isDivergent()); |
| 595 | SRC = TRI->getSubClassWithSubReg(RC: SRC, Idx: SubIdx); |
| 596 | assert(SRC && "No register class supports VT and SubIdx for INSERT_SUBREG" ); |
| 597 | |
| 598 | if (VRBase == 0 || !SRC->hasSubClassEq(RC: MRI->getRegClass(Reg: VRBase))) |
| 599 | VRBase = MRI->createVirtualRegister(RegClass: SRC); |
| 600 | |
| 601 | // Create the insert_subreg or subreg_to_reg machine instruction. |
| 602 | MachineInstrBuilder MIB = |
| 603 | BuildMI(MF&: *MF, MIMD: Node->getDebugLoc(), MCID: TII->get(Opcode: Opc), DestReg: VRBase); |
| 604 | |
| 605 | // If creating a subreg_to_reg, then the first input operand |
| 606 | // is an implicit value immediate, otherwise it's a register |
| 607 | if (Opc == TargetOpcode::SUBREG_TO_REG) { |
| 608 | const ConstantSDNode *SD = cast<ConstantSDNode>(Val&: N0); |
| 609 | MIB.addImm(Val: SD->getZExtValue()); |
| 610 | } else |
| 611 | AddOperand(MIB, Op: N0, IIOpNum: 0, II: nullptr, VRBaseMap, /*IsDebug=*/false, |
| 612 | IsClone, IsCloned); |
| 613 | // Add the subregister being inserted |
| 614 | AddOperand(MIB, Op: N1, IIOpNum: 0, II: nullptr, VRBaseMap, /*IsDebug=*/false, |
| 615 | IsClone, IsCloned); |
| 616 | MIB.addImm(Val: SubIdx); |
| 617 | MBB->insert(I: InsertPos, MI: MIB); |
| 618 | } else |
| 619 | llvm_unreachable("Node is not insert_subreg, extract_subreg, or subreg_to_reg" ); |
| 620 | |
| 621 | SDValue Op(Node, 0); |
| 622 | bool isNew = VRBaseMap.insert(KV: std::make_pair(x&: Op, y&: VRBase)).second; |
| 623 | (void)isNew; // Silence compiler warning. |
| 624 | assert(isNew && "Node emitted out of order - early" ); |
| 625 | } |
| 626 | |
| 627 | /// EmitCopyToRegClassNode - Generate machine code for COPY_TO_REGCLASS nodes. |
| 628 | /// COPY_TO_REGCLASS is just a normal copy, except that the destination |
| 629 | /// register is constrained to be in a particular register class. |
| 630 | /// |
| 631 | void |
| 632 | InstrEmitter::EmitCopyToRegClassNode(SDNode *Node, |
| 633 | VRBaseMapType &VRBaseMap) { |
| 634 | // Create the new VReg in the destination class and emit a copy. |
| 635 | unsigned DstRCIdx = Node->getConstantOperandVal(Num: 1); |
| 636 | const TargetRegisterClass *DstRC = |
| 637 | TRI->getAllocatableClass(RC: TRI->getRegClass(i: DstRCIdx)); |
| 638 | Register NewVReg = MRI->createVirtualRegister(RegClass: DstRC); |
| 639 | const MCInstrDesc &II = TII->get(Opcode: TargetOpcode::COPY); |
| 640 | MachineInstrBuilder MIB = BuildMI(MF&: *MF, MIMD: Node->getDebugLoc(), MCID: II, DestReg: NewVReg); |
| 641 | AddOperand(MIB, Op: Node->getOperand(Num: 0), IIOpNum: 1, II: &II, VRBaseMap, /*IsDebug=*/false, |
| 642 | /*IsClone=*/false, /*IsCloned*/ false); |
| 643 | |
| 644 | MBB->insert(I: InsertPos, MI: MIB); |
| 645 | SDValue Op(Node, 0); |
| 646 | bool isNew = VRBaseMap.insert(KV: std::make_pair(x&: Op, y&: NewVReg)).second; |
| 647 | (void)isNew; // Silence compiler warning. |
| 648 | assert(isNew && "Node emitted out of order - early" ); |
| 649 | } |
| 650 | |
| 651 | /// EmitRegSequence - Generate machine code for REG_SEQUENCE nodes. |
| 652 | /// |
| 653 | void InstrEmitter::EmitRegSequence(SDNode *Node, VRBaseMapType &VRBaseMap, |
| 654 | bool IsClone, bool IsCloned) { |
| 655 | unsigned DstRCIdx = Node->getConstantOperandVal(Num: 0); |
| 656 | const TargetRegisterClass *RC = TRI->getRegClass(i: DstRCIdx); |
| 657 | Register NewVReg = MRI->createVirtualRegister(RegClass: TRI->getAllocatableClass(RC)); |
| 658 | const MCInstrDesc &II = TII->get(Opcode: TargetOpcode::REG_SEQUENCE); |
| 659 | MachineInstrBuilder MIB = BuildMI(MF&: *MF, MIMD: Node->getDebugLoc(), MCID: II, DestReg: NewVReg); |
| 660 | unsigned NumOps = Node->getNumOperands(); |
| 661 | // If the input pattern has a chain, then the root of the corresponding |
| 662 | // output pattern will get a chain as well. This can happen to be a |
| 663 | // REG_SEQUENCE (which is not "guarded" by countOperands/CountResults). |
| 664 | if (NumOps && Node->getOperand(Num: NumOps-1).getValueType() == MVT::Other) |
| 665 | --NumOps; // Ignore chain if it exists. |
| 666 | |
| 667 | assert((NumOps & 1) == 1 && |
| 668 | "REG_SEQUENCE must have an odd number of operands!" ); |
| 669 | for (unsigned i = 1; i != NumOps; ++i) { |
| 670 | SDValue Op = Node->getOperand(Num: i); |
| 671 | if ((i & 1) == 0) { |
| 672 | RegisterSDNode *R = dyn_cast<RegisterSDNode>(Val: Node->getOperand(Num: i-1)); |
| 673 | // Skip physical registers as they don't have a vreg to get and we'll |
| 674 | // insert copies for them in TwoAddressInstructionPass anyway. |
| 675 | if (!R || !R->getReg().isPhysical()) { |
| 676 | unsigned SubIdx = Op->getAsZExtVal(); |
| 677 | Register SubReg = getVR(Op: Node->getOperand(Num: i - 1), VRBaseMap); |
| 678 | const TargetRegisterClass *TRC = MRI->getRegClass(Reg: SubReg); |
| 679 | const TargetRegisterClass *SRC = |
| 680 | TRI->getMatchingSuperRegClass(A: RC, B: TRC, Idx: SubIdx); |
| 681 | if (SRC && SRC != RC) { |
| 682 | MRI->setRegClass(Reg: NewVReg, RC: SRC); |
| 683 | RC = SRC; |
| 684 | } |
| 685 | } |
| 686 | } |
| 687 | AddOperand(MIB, Op, IIOpNum: i+1, II: &II, VRBaseMap, /*IsDebug=*/false, |
| 688 | IsClone, IsCloned); |
| 689 | } |
| 690 | |
| 691 | MBB->insert(I: InsertPos, MI: MIB); |
| 692 | SDValue Op(Node, 0); |
| 693 | bool isNew = VRBaseMap.insert(KV: std::make_pair(x&: Op, y&: NewVReg)).second; |
| 694 | (void)isNew; // Silence compiler warning. |
| 695 | assert(isNew && "Node emitted out of order - early" ); |
| 696 | } |
| 697 | |
| 698 | /// EmitDbgValue - Generate machine instruction for a dbg_value node. |
| 699 | /// |
| 700 | MachineInstr * |
| 701 | InstrEmitter::EmitDbgValue(SDDbgValue *SD, |
| 702 | VRBaseMapType &VRBaseMap) { |
| 703 | DebugLoc DL = SD->getDebugLoc(); |
| 704 | assert(cast<DILocalVariable>(SD->getVariable()) |
| 705 | ->isValidLocationForIntrinsic(DL) && |
| 706 | "Expected inlined-at fields to agree" ); |
| 707 | |
| 708 | SD->setIsEmitted(); |
| 709 | |
| 710 | assert(!SD->getLocationOps().empty() && |
| 711 | "dbg_value with no location operands?" ); |
| 712 | |
| 713 | if (SD->isInvalidated()) |
| 714 | return EmitDbgNoLocation(SD); |
| 715 | |
| 716 | // Attempt to produce a DBG_INSTR_REF if we've been asked to. |
| 717 | if (EmitDebugInstrRefs) |
| 718 | if (auto *InstrRef = EmitDbgInstrRef(SD, VRBaseMap)) |
| 719 | return InstrRef; |
| 720 | |
| 721 | // Emit variadic dbg_value nodes as DBG_VALUE_LIST if they have not been |
| 722 | // emitted as instruction references. |
| 723 | if (SD->isVariadic()) |
| 724 | return EmitDbgValueList(SD, VRBaseMap); |
| 725 | |
| 726 | // Emit single-location dbg_value nodes as DBG_VALUE if they have not been |
| 727 | // emitted as instruction references. |
| 728 | return EmitDbgValueFromSingleOp(SD, VRBaseMap); |
| 729 | } |
| 730 | |
| 731 | MachineOperand GetMOForConstDbgOp(const SDDbgOperand &Op) { |
| 732 | const Value *V = Op.getConst(); |
| 733 | if (const ConstantInt *CI = dyn_cast<ConstantInt>(Val: V)) { |
| 734 | if (CI->getBitWidth() > 64) |
| 735 | return MachineOperand::CreateCImm(CI); |
| 736 | return MachineOperand::CreateImm(Val: CI->getSExtValue()); |
| 737 | } |
| 738 | if (const ConstantFP *CF = dyn_cast<ConstantFP>(Val: V)) |
| 739 | return MachineOperand::CreateFPImm(CFP: CF); |
| 740 | // Note: This assumes that all nullptr constants are zero-valued. |
| 741 | if (isa<ConstantPointerNull>(Val: V)) |
| 742 | return MachineOperand::CreateImm(Val: 0); |
| 743 | // Undef or unhandled value type, so return an undef operand. |
| 744 | return MachineOperand::CreateReg( |
| 745 | /* Reg */ 0U, /* isDef */ false, /* isImp */ false, |
| 746 | /* isKill */ false, /* isDead */ false, |
| 747 | /* isUndef */ false, /* isEarlyClobber */ false, |
| 748 | /* SubReg */ 0, /* isDebug */ true); |
| 749 | } |
| 750 | |
| 751 | void InstrEmitter::AddDbgValueLocationOps( |
| 752 | MachineInstrBuilder &MIB, const MCInstrDesc &DbgValDesc, |
| 753 | ArrayRef<SDDbgOperand> LocationOps, |
| 754 | VRBaseMapType &VRBaseMap) { |
| 755 | for (const SDDbgOperand &Op : LocationOps) { |
| 756 | switch (Op.getKind()) { |
| 757 | case SDDbgOperand::FRAMEIX: |
| 758 | MIB.addFrameIndex(Idx: Op.getFrameIx()); |
| 759 | break; |
| 760 | case SDDbgOperand::VREG: |
| 761 | MIB.addReg(RegNo: Op.getVReg()); |
| 762 | break; |
| 763 | case SDDbgOperand::SDNODE: { |
| 764 | SDValue V = SDValue(Op.getSDNode(), Op.getResNo()); |
| 765 | // It's possible we replaced this SDNode with other(s) and therefore |
| 766 | // didn't generate code for it. It's better to catch these cases where |
| 767 | // they happen and transfer the debug info, but trying to guarantee that |
| 768 | // in all cases would be very fragile; this is a safeguard for any |
| 769 | // that were missed. |
| 770 | if (VRBaseMap.count(Val: V) == 0) |
| 771 | MIB.addReg(RegNo: 0U); // undef |
| 772 | else |
| 773 | AddOperand(MIB, Op: V, IIOpNum: (*MIB).getNumOperands(), II: &DbgValDesc, VRBaseMap, |
| 774 | /*IsDebug=*/true, /*IsClone=*/false, /*IsCloned=*/false); |
| 775 | } break; |
| 776 | case SDDbgOperand::CONST: |
| 777 | MIB.add(MO: GetMOForConstDbgOp(Op)); |
| 778 | break; |
| 779 | } |
| 780 | } |
| 781 | } |
| 782 | |
| 783 | MachineInstr * |
| 784 | InstrEmitter::EmitDbgInstrRef(SDDbgValue *SD, |
| 785 | VRBaseMapType &VRBaseMap) { |
| 786 | MDNode *Var = SD->getVariable(); |
| 787 | const DIExpression *Expr = (DIExpression *)SD->getExpression(); |
| 788 | DebugLoc DL = SD->getDebugLoc(); |
| 789 | const MCInstrDesc &RefII = TII->get(Opcode: TargetOpcode::DBG_INSTR_REF); |
| 790 | |
| 791 | // Returns true if the given operand is not a legal debug operand for a |
| 792 | // DBG_INSTR_REF. |
| 793 | auto IsInvalidOp = [](SDDbgOperand DbgOp) { |
| 794 | return DbgOp.getKind() == SDDbgOperand::FRAMEIX; |
| 795 | }; |
| 796 | // Returns true if the given operand is not itself an instruction reference |
| 797 | // but is a legal debug operand for a DBG_INSTR_REF. |
| 798 | auto IsNonInstrRefOp = [](SDDbgOperand DbgOp) { |
| 799 | return DbgOp.getKind() == SDDbgOperand::CONST; |
| 800 | }; |
| 801 | |
| 802 | // If this variable location does not depend on any instructions or contains |
| 803 | // any stack locations, produce it as a standard debug value instead. |
| 804 | if (any_of(Range: SD->getLocationOps(), P: IsInvalidOp) || |
| 805 | all_of(Range: SD->getLocationOps(), P: IsNonInstrRefOp)) { |
| 806 | if (SD->isVariadic()) |
| 807 | return EmitDbgValueList(SD, VRBaseMap); |
| 808 | return EmitDbgValueFromSingleOp(SD, VRBaseMap); |
| 809 | } |
| 810 | |
| 811 | // Immediately fold any indirectness from the LLVM-IR intrinsic into the |
| 812 | // expression: |
| 813 | if (SD->isIndirect()) |
| 814 | Expr = DIExpression::append(Expr, Ops: dwarf::DW_OP_deref); |
| 815 | // If this is not already a variadic expression, it must be modified to become |
| 816 | // one. |
| 817 | if (!SD->isVariadic()) |
| 818 | Expr = DIExpression::convertToVariadicExpression(Expr); |
| 819 | |
| 820 | SmallVector<MachineOperand> MOs; |
| 821 | |
| 822 | // It may not be immediately possible to identify the MachineInstr that |
| 823 | // defines a VReg, it can depend for example on the order blocks are |
| 824 | // emitted in. When this happens, or when further analysis is needed later, |
| 825 | // produce an instruction like this: |
| 826 | // |
| 827 | // DBG_INSTR_REF !123, !456, %0:gr64 |
| 828 | // |
| 829 | // i.e., point the instruction at the vreg, and patch it up later in |
| 830 | // MachineFunction::finalizeDebugInstrRefs. |
| 831 | auto AddVRegOp = [&](Register VReg) { |
| 832 | MOs.push_back(Elt: MachineOperand::CreateReg( |
| 833 | /* Reg */ VReg, /* isDef */ false, /* isImp */ false, |
| 834 | /* isKill */ false, /* isDead */ false, |
| 835 | /* isUndef */ false, /* isEarlyClobber */ false, |
| 836 | /* SubReg */ 0, /* isDebug */ true)); |
| 837 | }; |
| 838 | unsigned OpCount = SD->getLocationOps().size(); |
| 839 | for (unsigned OpIdx = 0; OpIdx < OpCount; ++OpIdx) { |
| 840 | SDDbgOperand DbgOperand = SD->getLocationOps()[OpIdx]; |
| 841 | |
| 842 | // Try to find both the defined register and the instruction defining it. |
| 843 | MachineInstr *DefMI = nullptr; |
| 844 | Register VReg; |
| 845 | |
| 846 | if (DbgOperand.getKind() == SDDbgOperand::VREG) { |
| 847 | VReg = DbgOperand.getVReg(); |
| 848 | |
| 849 | // No definition means that block hasn't been emitted yet. Leave a vreg |
| 850 | // reference to be fixed later. |
| 851 | if (!MRI->hasOneDef(RegNo: VReg)) { |
| 852 | AddVRegOp(VReg); |
| 853 | continue; |
| 854 | } |
| 855 | |
| 856 | DefMI = &*MRI->def_instr_begin(RegNo: VReg); |
| 857 | } else if (DbgOperand.getKind() == SDDbgOperand::SDNODE) { |
| 858 | // Look up the corresponding VReg for the given SDNode, if any. |
| 859 | SDNode *Node = DbgOperand.getSDNode(); |
| 860 | SDValue Op = SDValue(Node, DbgOperand.getResNo()); |
| 861 | VRBaseMapType::iterator I = VRBaseMap.find(Val: Op); |
| 862 | // No VReg -> produce a DBG_VALUE $noreg instead. |
| 863 | if (I == VRBaseMap.end()) |
| 864 | break; |
| 865 | |
| 866 | // Try to pick out a defining instruction at this point. |
| 867 | VReg = getVR(Op, VRBaseMap); |
| 868 | |
| 869 | // Again, if there's no instruction defining the VReg right now, fix it up |
| 870 | // later. |
| 871 | if (!MRI->hasOneDef(RegNo: VReg)) { |
| 872 | AddVRegOp(VReg); |
| 873 | continue; |
| 874 | } |
| 875 | |
| 876 | DefMI = &*MRI->def_instr_begin(RegNo: VReg); |
| 877 | } else { |
| 878 | assert(DbgOperand.getKind() == SDDbgOperand::CONST); |
| 879 | MOs.push_back(Elt: GetMOForConstDbgOp(Op: DbgOperand)); |
| 880 | continue; |
| 881 | } |
| 882 | |
| 883 | // Avoid copy like instructions: they don't define values, only move them. |
| 884 | // Leave a virtual-register reference until it can be fixed up later, to |
| 885 | // find the underlying value definition. |
| 886 | if (DefMI->isCopyLike() || TII->isCopyInstr(MI: *DefMI)) { |
| 887 | AddVRegOp(VReg); |
| 888 | continue; |
| 889 | } |
| 890 | |
| 891 | // Find the operand number which defines the specified VReg. |
| 892 | unsigned OperandIdx = 0; |
| 893 | for (const auto &MO : DefMI->operands()) { |
| 894 | if (MO.isReg() && MO.isDef() && MO.getReg() == VReg) |
| 895 | break; |
| 896 | ++OperandIdx; |
| 897 | } |
| 898 | assert(OperandIdx < DefMI->getNumOperands()); |
| 899 | |
| 900 | // Make the DBG_INSTR_REF refer to that instruction, and that operand. |
| 901 | unsigned InstrNum = DefMI->getDebugInstrNum(); |
| 902 | MOs.push_back(Elt: MachineOperand::CreateDbgInstrRef(InstrIdx: InstrNum, OpIdx: OperandIdx)); |
| 903 | } |
| 904 | |
| 905 | // If we haven't created a valid MachineOperand for every DbgOp, abort and |
| 906 | // produce an undef DBG_VALUE. |
| 907 | if (MOs.size() != OpCount) |
| 908 | return EmitDbgNoLocation(SD); |
| 909 | |
| 910 | return BuildMI(MF&: *MF, DL, MCID: RefII, IsIndirect: false, MOs, Variable: Var, Expr); |
| 911 | } |
| 912 | |
| 913 | MachineInstr *InstrEmitter::EmitDbgNoLocation(SDDbgValue *SD) { |
| 914 | // An invalidated SDNode must generate an undef DBG_VALUE: although the |
| 915 | // original value is no longer computed, earlier DBG_VALUEs live ranges |
| 916 | // must not leak into later code. |
| 917 | DIVariable *Var = SD->getVariable(); |
| 918 | const DIExpression *Expr = |
| 919 | DIExpression::convertToUndefExpression(Expr: SD->getExpression()); |
| 920 | DebugLoc DL = SD->getDebugLoc(); |
| 921 | const MCInstrDesc &Desc = TII->get(Opcode: TargetOpcode::DBG_VALUE); |
| 922 | return BuildMI(MF&: *MF, DL, MCID: Desc, IsIndirect: false, Reg: 0U, Variable: Var, Expr); |
| 923 | } |
| 924 | |
| 925 | MachineInstr * |
| 926 | InstrEmitter::EmitDbgValueList(SDDbgValue *SD, |
| 927 | VRBaseMapType &VRBaseMap) { |
| 928 | MDNode *Var = SD->getVariable(); |
| 929 | DIExpression *Expr = SD->getExpression(); |
| 930 | DebugLoc DL = SD->getDebugLoc(); |
| 931 | // DBG_VALUE_LIST := "DBG_VALUE_LIST" var, expression, loc (, loc)* |
| 932 | const MCInstrDesc &DbgValDesc = TII->get(Opcode: TargetOpcode::DBG_VALUE_LIST); |
| 933 | // Build the DBG_VALUE_LIST instruction base. |
| 934 | auto MIB = BuildMI(MF&: *MF, MIMD: DL, MCID: DbgValDesc); |
| 935 | MIB.addMetadata(MD: Var); |
| 936 | MIB.addMetadata(MD: Expr); |
| 937 | AddDbgValueLocationOps(MIB, DbgValDesc, LocationOps: SD->getLocationOps(), VRBaseMap); |
| 938 | return &*MIB; |
| 939 | } |
| 940 | |
| 941 | MachineInstr * |
| 942 | InstrEmitter::EmitDbgValueFromSingleOp(SDDbgValue *SD, |
| 943 | VRBaseMapType &VRBaseMap) { |
| 944 | MDNode *Var = SD->getVariable(); |
| 945 | DIExpression *Expr = SD->getExpression(); |
| 946 | DebugLoc DL = SD->getDebugLoc(); |
| 947 | const MCInstrDesc &II = TII->get(Opcode: TargetOpcode::DBG_VALUE); |
| 948 | |
| 949 | assert(SD->getLocationOps().size() == 1 && |
| 950 | "Non variadic dbg_value should have only one location op" ); |
| 951 | |
| 952 | // See about constant-folding the expression. |
| 953 | // Copy the location operand in case we replace it. |
| 954 | SmallVector<SDDbgOperand, 1> LocationOps(1, SD->getLocationOps()[0]); |
| 955 | if (Expr && LocationOps[0].getKind() == SDDbgOperand::CONST) { |
| 956 | const Value *V = LocationOps[0].getConst(); |
| 957 | if (auto *C = dyn_cast<ConstantInt>(Val: V)) { |
| 958 | std::tie(args&: Expr, args&: C) = Expr->constantFold(CI: C); |
| 959 | LocationOps[0] = SDDbgOperand::fromConst(Const: C); |
| 960 | } |
| 961 | } |
| 962 | |
| 963 | // Emit non-variadic dbg_value nodes as DBG_VALUE. |
| 964 | // DBG_VALUE := "DBG_VALUE" loc, isIndirect, var, expr |
| 965 | auto MIB = BuildMI(MF&: *MF, MIMD: DL, MCID: II); |
| 966 | AddDbgValueLocationOps(MIB, DbgValDesc: II, LocationOps, VRBaseMap); |
| 967 | |
| 968 | if (SD->isIndirect()) |
| 969 | MIB.addImm(Val: 0U); |
| 970 | else |
| 971 | MIB.addReg(RegNo: 0U); |
| 972 | |
| 973 | return MIB.addMetadata(MD: Var).addMetadata(MD: Expr); |
| 974 | } |
| 975 | |
| 976 | MachineInstr * |
| 977 | InstrEmitter::EmitDbgLabel(SDDbgLabel *SD) { |
| 978 | MDNode *Label = SD->getLabel(); |
| 979 | DebugLoc DL = SD->getDebugLoc(); |
| 980 | assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) && |
| 981 | "Expected inlined-at fields to agree" ); |
| 982 | |
| 983 | const MCInstrDesc &II = TII->get(Opcode: TargetOpcode::DBG_LABEL); |
| 984 | MachineInstrBuilder MIB = BuildMI(MF&: *MF, MIMD: DL, MCID: II); |
| 985 | MIB.addMetadata(MD: Label); |
| 986 | |
| 987 | return &*MIB; |
| 988 | } |
| 989 | |
| 990 | /// EmitMachineNode - Generate machine code for a target-specific node and |
| 991 | /// needed dependencies. |
| 992 | /// |
| 993 | void InstrEmitter:: |
| 994 | EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned, |
| 995 | VRBaseMapType &VRBaseMap) { |
| 996 | unsigned Opc = Node->getMachineOpcode(); |
| 997 | |
| 998 | // Handle subreg insert/extract specially |
| 999 | if (Opc == TargetOpcode::EXTRACT_SUBREG || |
| 1000 | Opc == TargetOpcode::INSERT_SUBREG || |
| 1001 | Opc == TargetOpcode::SUBREG_TO_REG) { |
| 1002 | EmitSubregNode(Node, VRBaseMap, IsClone, IsCloned); |
| 1003 | return; |
| 1004 | } |
| 1005 | |
| 1006 | // Handle COPY_TO_REGCLASS specially. |
| 1007 | if (Opc == TargetOpcode::COPY_TO_REGCLASS) { |
| 1008 | EmitCopyToRegClassNode(Node, VRBaseMap); |
| 1009 | return; |
| 1010 | } |
| 1011 | |
| 1012 | // Handle REG_SEQUENCE specially. |
| 1013 | if (Opc == TargetOpcode::REG_SEQUENCE) { |
| 1014 | EmitRegSequence(Node, VRBaseMap, IsClone, IsCloned); |
| 1015 | return; |
| 1016 | } |
| 1017 | |
| 1018 | if (Opc == TargetOpcode::IMPLICIT_DEF) |
| 1019 | // We want a unique VR for each IMPLICIT_DEF use. |
| 1020 | return; |
| 1021 | |
| 1022 | const MCInstrDesc &II = TII->get(Opcode: Opc); |
| 1023 | unsigned NumResults = CountResults(Node); |
| 1024 | unsigned NumDefs = II.getNumDefs(); |
| 1025 | const MCPhysReg *ScratchRegs = nullptr; |
| 1026 | |
| 1027 | // Handle STACKMAP and PATCHPOINT specially and then use the generic code. |
| 1028 | if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) { |
| 1029 | // Stackmaps do not have arguments and do not preserve their calling |
| 1030 | // convention. However, to simplify runtime support, they clobber the same |
| 1031 | // scratch registers as AnyRegCC. |
| 1032 | unsigned CC = CallingConv::AnyReg; |
| 1033 | if (Opc == TargetOpcode::PATCHPOINT) { |
| 1034 | CC = Node->getConstantOperandVal(Num: PatchPointOpers::CCPos); |
| 1035 | NumDefs = NumResults; |
| 1036 | } |
| 1037 | ScratchRegs = TLI->getScratchRegisters(CC: (CallingConv::ID) CC); |
| 1038 | } else if (Opc == TargetOpcode::STATEPOINT) { |
| 1039 | NumDefs = NumResults; |
| 1040 | } |
| 1041 | |
| 1042 | unsigned NumImpUses = 0; |
| 1043 | unsigned NodeOperands = |
| 1044 | countOperands(Node, NumExpUses: II.getNumOperands() - NumDefs, NumImpUses); |
| 1045 | bool HasVRegVariadicDefs = !MF->getTarget().usesPhysRegsForValues() && |
| 1046 | II.isVariadic() && II.variadicOpsAreDefs(); |
| 1047 | bool HasPhysRegOuts = NumResults > NumDefs && !II.implicit_defs().empty() && |
| 1048 | !HasVRegVariadicDefs; |
| 1049 | #ifndef NDEBUG |
| 1050 | unsigned NumMIOperands = NodeOperands + NumResults; |
| 1051 | if (II.isVariadic()) |
| 1052 | assert(NumMIOperands >= II.getNumOperands() && |
| 1053 | "Too few operands for a variadic node!" ); |
| 1054 | else |
| 1055 | assert(NumMIOperands >= II.getNumOperands() && |
| 1056 | NumMIOperands <= |
| 1057 | II.getNumOperands() + II.implicit_defs().size() + NumImpUses && |
| 1058 | "#operands for dag node doesn't match .td file!" ); |
| 1059 | #endif |
| 1060 | |
| 1061 | // Create the new machine instruction. |
| 1062 | MachineInstrBuilder MIB = BuildMI(MF&: *MF, MIMD: Node->getDebugLoc(), MCID: II); |
| 1063 | |
| 1064 | // Transfer IR flags from the SDNode to the MachineInstr |
| 1065 | MachineInstr *MI = MIB.getInstr(); |
| 1066 | const SDNodeFlags Flags = Node->getFlags(); |
| 1067 | if (Flags.hasUnpredictable()) |
| 1068 | MI->setFlag(MachineInstr::MIFlag::Unpredictable); |
| 1069 | |
| 1070 | // Add result register values for things that are defined by this |
| 1071 | // instruction. |
| 1072 | if (NumResults) { |
| 1073 | CreateVirtualRegisters(Node, MIB, II, IsClone, IsCloned, VRBaseMap); |
| 1074 | |
| 1075 | if (Flags.hasNoSignedZeros()) |
| 1076 | MI->setFlag(MachineInstr::MIFlag::FmNsz); |
| 1077 | |
| 1078 | if (Flags.hasAllowReciprocal()) |
| 1079 | MI->setFlag(MachineInstr::MIFlag::FmArcp); |
| 1080 | |
| 1081 | if (Flags.hasNoNaNs()) |
| 1082 | MI->setFlag(MachineInstr::MIFlag::FmNoNans); |
| 1083 | |
| 1084 | if (Flags.hasNoInfs()) |
| 1085 | MI->setFlag(MachineInstr::MIFlag::FmNoInfs); |
| 1086 | |
| 1087 | if (Flags.hasAllowContract()) |
| 1088 | MI->setFlag(MachineInstr::MIFlag::FmContract); |
| 1089 | |
| 1090 | if (Flags.hasApproximateFuncs()) |
| 1091 | MI->setFlag(MachineInstr::MIFlag::FmAfn); |
| 1092 | |
| 1093 | if (Flags.hasAllowReassociation()) |
| 1094 | MI->setFlag(MachineInstr::MIFlag::FmReassoc); |
| 1095 | |
| 1096 | if (Flags.hasNoUnsignedWrap()) |
| 1097 | MI->setFlag(MachineInstr::MIFlag::NoUWrap); |
| 1098 | |
| 1099 | if (Flags.hasNoSignedWrap()) |
| 1100 | MI->setFlag(MachineInstr::MIFlag::NoSWrap); |
| 1101 | |
| 1102 | if (Flags.hasExact()) |
| 1103 | MI->setFlag(MachineInstr::MIFlag::IsExact); |
| 1104 | |
| 1105 | if (Flags.hasNoFPExcept()) |
| 1106 | MI->setFlag(MachineInstr::MIFlag::NoFPExcept); |
| 1107 | |
| 1108 | if (Flags.hasDisjoint()) |
| 1109 | MI->setFlag(MachineInstr::MIFlag::Disjoint); |
| 1110 | |
| 1111 | if (Flags.hasSameSign()) |
| 1112 | MI->setFlag(MachineInstr::MIFlag::SameSign); |
| 1113 | } |
| 1114 | |
| 1115 | // Emit all of the actual operands of this instruction, adding them to the |
| 1116 | // instruction as appropriate. |
| 1117 | bool HasOptPRefs = NumDefs > NumResults; |
| 1118 | assert((!HasOptPRefs || !HasPhysRegOuts) && |
| 1119 | "Unable to cope with optional defs and phys regs defs!" ); |
| 1120 | unsigned NumSkip = HasOptPRefs ? NumDefs - NumResults : 0; |
| 1121 | for (unsigned i = NumSkip; i != NodeOperands; ++i) |
| 1122 | AddOperand(MIB, Op: Node->getOperand(Num: i), IIOpNum: i-NumSkip+NumDefs, II: &II, |
| 1123 | VRBaseMap, /*IsDebug=*/false, IsClone, IsCloned); |
| 1124 | |
| 1125 | // Add scratch registers as implicit def and early clobber |
| 1126 | if (ScratchRegs) |
| 1127 | for (unsigned i = 0; ScratchRegs[i]; ++i) |
| 1128 | MIB.addReg(RegNo: ScratchRegs[i], flags: RegState::ImplicitDefine | |
| 1129 | RegState::EarlyClobber); |
| 1130 | |
| 1131 | // Set the memory reference descriptions of this instruction now that it is |
| 1132 | // part of the function. |
| 1133 | MIB.setMemRefs(cast<MachineSDNode>(Val: Node)->memoperands()); |
| 1134 | |
| 1135 | // Set the CFI type. |
| 1136 | MIB->setCFIType(MF&: *MF, Type: Node->getCFIType()); |
| 1137 | |
| 1138 | // Insert the instruction into position in the block. This needs to |
| 1139 | // happen before any custom inserter hook is called so that the |
| 1140 | // hook knows where in the block to insert the replacement code. |
| 1141 | MBB->insert(I: InsertPos, MI: MIB); |
| 1142 | |
| 1143 | // The MachineInstr may also define physregs instead of virtregs. These |
| 1144 | // physreg values can reach other instructions in different ways: |
| 1145 | // |
| 1146 | // 1. When there is a use of a Node value beyond the explicitly defined |
| 1147 | // virtual registers, we emit a CopyFromReg for one of the implicitly |
| 1148 | // defined physregs. This only happens when HasPhysRegOuts is true. |
| 1149 | // |
| 1150 | // 2. A CopyFromReg reading a physreg may be glued to this instruction. |
| 1151 | // |
| 1152 | // 3. A glued instruction may implicitly use a physreg. |
| 1153 | // |
| 1154 | // 4. A glued instruction may use a RegisterSDNode operand. |
| 1155 | // |
| 1156 | // Collect all the used physreg defs, and make sure that any unused physreg |
| 1157 | // defs are marked as dead. |
| 1158 | SmallVector<Register, 8> UsedRegs; |
| 1159 | |
| 1160 | // Additional results must be physical register defs. |
| 1161 | if (HasPhysRegOuts) { |
| 1162 | for (unsigned i = NumDefs; i < NumResults; ++i) { |
| 1163 | Register Reg = II.implicit_defs()[i - NumDefs]; |
| 1164 | if (!Node->hasAnyUseOfValue(Value: i)) |
| 1165 | continue; |
| 1166 | // This implicitly defined physreg has a use. |
| 1167 | UsedRegs.push_back(Elt: Reg); |
| 1168 | EmitCopyFromReg(Node, ResNo: i, IsClone, SrcReg: Reg, VRBaseMap); |
| 1169 | } |
| 1170 | } |
| 1171 | |
| 1172 | // Scan the glue chain for any used physregs. |
| 1173 | if (Node->getValueType(ResNo: Node->getNumValues()-1) == MVT::Glue) { |
| 1174 | for (SDNode *F = Node->getGluedUser(); F; F = F->getGluedUser()) { |
| 1175 | if (F->getOpcode() == ISD::CopyFromReg) { |
| 1176 | UsedRegs.push_back(Elt: cast<RegisterSDNode>(Val: F->getOperand(Num: 1))->getReg()); |
| 1177 | continue; |
| 1178 | } else if (F->getOpcode() == ISD::CopyToReg) { |
| 1179 | // Skip CopyToReg nodes that are internal to the glue chain. |
| 1180 | continue; |
| 1181 | } |
| 1182 | // Collect declared implicit uses. |
| 1183 | const MCInstrDesc &MCID = TII->get(Opcode: F->getMachineOpcode()); |
| 1184 | append_range(C&: UsedRegs, R: MCID.implicit_uses()); |
| 1185 | // In addition to declared implicit uses, we must also check for |
| 1186 | // direct RegisterSDNode operands. |
| 1187 | for (const SDValue &Op : F->op_values()) |
| 1188 | if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(Val: Op)) { |
| 1189 | Register Reg = R->getReg(); |
| 1190 | if (Reg.isPhysical()) |
| 1191 | UsedRegs.push_back(Elt: Reg); |
| 1192 | } |
| 1193 | } |
| 1194 | } |
| 1195 | |
| 1196 | // Add rounding control registers as implicit def for function call. |
| 1197 | if (II.isCall() && MF->getFunction().hasFnAttribute(Kind: Attribute::StrictFP)) { |
| 1198 | ArrayRef<MCPhysReg> RCRegs = TLI->getRoundingControlRegisters(); |
| 1199 | llvm::append_range(C&: UsedRegs, R&: RCRegs); |
| 1200 | } |
| 1201 | |
| 1202 | // Finally mark unused registers as dead. |
| 1203 | if (!UsedRegs.empty() || !II.implicit_defs().empty() || II.hasOptionalDef()) |
| 1204 | MIB->setPhysRegsDeadExcept(UsedRegs, TRI: *TRI); |
| 1205 | |
| 1206 | // STATEPOINT is too 'dynamic' to have meaningful machine description. |
| 1207 | // We have to manually tie operands. |
| 1208 | if (Opc == TargetOpcode::STATEPOINT && NumDefs > 0) { |
| 1209 | assert(!HasPhysRegOuts && "STATEPOINT mishandled" ); |
| 1210 | MachineInstr *MI = MIB; |
| 1211 | unsigned Def = 0; |
| 1212 | int First = StatepointOpers(MI).getFirstGCPtrIdx(); |
| 1213 | assert(First > 0 && "Statepoint has Defs but no GC ptr list" ); |
| 1214 | unsigned Use = (unsigned)First; |
| 1215 | while (Def < NumDefs) { |
| 1216 | if (MI->getOperand(i: Use).isReg()) |
| 1217 | MI->tieOperands(DefIdx: Def++, UseIdx: Use); |
| 1218 | Use = StackMaps::getNextMetaArgIdx(MI, CurIdx: Use); |
| 1219 | } |
| 1220 | } |
| 1221 | |
| 1222 | if (SDNode *GluedNode = Node->getGluedNode()) { |
| 1223 | // FIXME: Possibly iterate over multiple glue nodes? |
| 1224 | if (GluedNode->getOpcode() == |
| 1225 | ~(unsigned)TargetOpcode::CONVERGENCECTRL_GLUE) { |
| 1226 | Register VReg = getVR(Op: GluedNode->getOperand(Num: 0), VRBaseMap); |
| 1227 | MachineOperand MO = MachineOperand::CreateReg(Reg: VReg, /*isDef=*/false, |
| 1228 | /*isImp=*/true); |
| 1229 | MIB->addOperand(Op: MO); |
| 1230 | } |
| 1231 | } |
| 1232 | |
| 1233 | // Run post-isel target hook to adjust this instruction if needed. |
| 1234 | if (II.hasPostISelHook()) |
| 1235 | TLI->AdjustInstrPostInstrSelection(MI&: *MIB, Node); |
| 1236 | } |
| 1237 | |
| 1238 | /// EmitSpecialNode - Generate machine code for a target-independent node and |
| 1239 | /// needed dependencies. |
| 1240 | void InstrEmitter:: |
| 1241 | EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned, |
| 1242 | VRBaseMapType &VRBaseMap) { |
| 1243 | switch (Node->getOpcode()) { |
| 1244 | default: |
| 1245 | #ifndef NDEBUG |
| 1246 | Node->dump(); |
| 1247 | #endif |
| 1248 | llvm_unreachable("This target-independent node should have been selected!" ); |
| 1249 | case ISD::EntryToken: |
| 1250 | case ISD::MERGE_VALUES: |
| 1251 | case ISD::TokenFactor: // fall thru |
| 1252 | break; |
| 1253 | case ISD::CopyToReg: { |
| 1254 | Register DestReg = cast<RegisterSDNode>(Val: Node->getOperand(Num: 1))->getReg(); |
| 1255 | SDValue SrcVal = Node->getOperand(Num: 2); |
| 1256 | if (DestReg.isVirtual() && SrcVal.isMachineOpcode() && |
| 1257 | SrcVal.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF) { |
| 1258 | // Instead building a COPY to that vreg destination, build an |
| 1259 | // IMPLICIT_DEF instruction instead. |
| 1260 | BuildMI(BB&: *MBB, I: InsertPos, MIMD: Node->getDebugLoc(), |
| 1261 | MCID: TII->get(Opcode: TargetOpcode::IMPLICIT_DEF), DestReg); |
| 1262 | break; |
| 1263 | } |
| 1264 | Register SrcReg; |
| 1265 | if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(Val&: SrcVal)) |
| 1266 | SrcReg = R->getReg(); |
| 1267 | else |
| 1268 | SrcReg = getVR(Op: SrcVal, VRBaseMap); |
| 1269 | |
| 1270 | if (SrcReg == DestReg) // Coalesced away the copy? Ignore. |
| 1271 | break; |
| 1272 | |
| 1273 | BuildMI(BB&: *MBB, I: InsertPos, MIMD: Node->getDebugLoc(), MCID: TII->get(Opcode: TargetOpcode::COPY), |
| 1274 | DestReg).addReg(RegNo: SrcReg); |
| 1275 | break; |
| 1276 | } |
| 1277 | case ISD::CopyFromReg: { |
| 1278 | Register SrcReg = cast<RegisterSDNode>(Val: Node->getOperand(Num: 1))->getReg(); |
| 1279 | EmitCopyFromReg(Node, ResNo: 0, IsClone, SrcReg, VRBaseMap); |
| 1280 | break; |
| 1281 | } |
| 1282 | case ISD::EH_LABEL: |
| 1283 | case ISD::ANNOTATION_LABEL: { |
| 1284 | unsigned Opc = (Node->getOpcode() == ISD::EH_LABEL) |
| 1285 | ? TargetOpcode::EH_LABEL |
| 1286 | : TargetOpcode::ANNOTATION_LABEL; |
| 1287 | MCSymbol *S = cast<LabelSDNode>(Val: Node)->getLabel(); |
| 1288 | BuildMI(BB&: *MBB, I: InsertPos, MIMD: Node->getDebugLoc(), |
| 1289 | MCID: TII->get(Opcode: Opc)).addSym(Sym: S); |
| 1290 | break; |
| 1291 | } |
| 1292 | |
| 1293 | case ISD::LIFETIME_START: |
| 1294 | case ISD::LIFETIME_END: { |
| 1295 | unsigned TarOp = (Node->getOpcode() == ISD::LIFETIME_START) |
| 1296 | ? TargetOpcode::LIFETIME_START |
| 1297 | : TargetOpcode::LIFETIME_END; |
| 1298 | auto *FI = cast<FrameIndexSDNode>(Val: Node->getOperand(Num: 1)); |
| 1299 | BuildMI(BB&: *MBB, I: InsertPos, MIMD: Node->getDebugLoc(), MCID: TII->get(Opcode: TarOp)) |
| 1300 | .addFrameIndex(Idx: FI->getIndex()); |
| 1301 | break; |
| 1302 | } |
| 1303 | |
| 1304 | case ISD::PSEUDO_PROBE: { |
| 1305 | unsigned TarOp = TargetOpcode::PSEUDO_PROBE; |
| 1306 | auto Guid = cast<PseudoProbeSDNode>(Val: Node)->getGuid(); |
| 1307 | auto Index = cast<PseudoProbeSDNode>(Val: Node)->getIndex(); |
| 1308 | auto Attr = cast<PseudoProbeSDNode>(Val: Node)->getAttributes(); |
| 1309 | |
| 1310 | BuildMI(BB&: *MBB, I: InsertPos, MIMD: Node->getDebugLoc(), MCID: TII->get(Opcode: TarOp)) |
| 1311 | .addImm(Val: Guid) |
| 1312 | .addImm(Val: Index) |
| 1313 | .addImm(Val: (uint8_t)PseudoProbeType::Block) |
| 1314 | .addImm(Val: Attr); |
| 1315 | break; |
| 1316 | } |
| 1317 | |
| 1318 | case ISD::INLINEASM: |
| 1319 | case ISD::INLINEASM_BR: { |
| 1320 | unsigned NumOps = Node->getNumOperands(); |
| 1321 | if (Node->getOperand(Num: NumOps-1).getValueType() == MVT::Glue) |
| 1322 | --NumOps; // Ignore the glue operand. |
| 1323 | |
| 1324 | // Create the inline asm machine instruction. |
| 1325 | unsigned TgtOpc = Node->getOpcode() == ISD::INLINEASM_BR |
| 1326 | ? TargetOpcode::INLINEASM_BR |
| 1327 | : TargetOpcode::INLINEASM; |
| 1328 | MachineInstrBuilder MIB = |
| 1329 | BuildMI(MF&: *MF, MIMD: Node->getDebugLoc(), MCID: TII->get(Opcode: TgtOpc)); |
| 1330 | |
| 1331 | // Add the asm string as an external symbol operand. |
| 1332 | SDValue AsmStrV = Node->getOperand(Num: InlineAsm::Op_AsmString); |
| 1333 | const char *AsmStr = cast<ExternalSymbolSDNode>(Val&: AsmStrV)->getSymbol(); |
| 1334 | MIB.addExternalSymbol(FnName: AsmStr); |
| 1335 | |
| 1336 | // Add the HasSideEffect, isAlignStack, AsmDialect, MayLoad and MayStore |
| 1337 | // bits. |
| 1338 | int64_t = |
| 1339 | cast<ConstantSDNode>(Val: Node->getOperand(Num: InlineAsm::Op_ExtraInfo))-> |
| 1340 | getZExtValue(); |
| 1341 | MIB.addImm(Val: ExtraInfo); |
| 1342 | |
| 1343 | // Remember to operand index of the group flags. |
| 1344 | SmallVector<unsigned, 8> GroupIdx; |
| 1345 | |
| 1346 | // Remember registers that are part of early-clobber defs. |
| 1347 | SmallVector<Register, 8> ECRegs; |
| 1348 | |
| 1349 | // Add all of the operand registers to the instruction. |
| 1350 | for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) { |
| 1351 | unsigned Flags = Node->getConstantOperandVal(Num: i); |
| 1352 | const InlineAsm::Flag F(Flags); |
| 1353 | const unsigned NumVals = F.getNumOperandRegisters(); |
| 1354 | |
| 1355 | GroupIdx.push_back(Elt: MIB->getNumOperands()); |
| 1356 | MIB.addImm(Val: Flags); |
| 1357 | ++i; // Skip the ID value. |
| 1358 | |
| 1359 | switch (F.getKind()) { |
| 1360 | case InlineAsm::Kind::RegDef: |
| 1361 | for (unsigned j = 0; j != NumVals; ++j, ++i) { |
| 1362 | Register Reg = cast<RegisterSDNode>(Val: Node->getOperand(Num: i))->getReg(); |
| 1363 | // FIXME: Add dead flags for physical and virtual registers defined. |
| 1364 | // For now, mark physical register defs as implicit to help fast |
| 1365 | // regalloc. This makes inline asm look a lot like calls. |
| 1366 | MIB.addReg(RegNo: Reg, flags: RegState::Define | getImplRegState(B: Reg.isPhysical())); |
| 1367 | } |
| 1368 | break; |
| 1369 | case InlineAsm::Kind::RegDefEarlyClobber: |
| 1370 | case InlineAsm::Kind::Clobber: |
| 1371 | for (unsigned j = 0; j != NumVals; ++j, ++i) { |
| 1372 | Register Reg = cast<RegisterSDNode>(Val: Node->getOperand(Num: i))->getReg(); |
| 1373 | MIB.addReg(RegNo: Reg, flags: RegState::Define | RegState::EarlyClobber | |
| 1374 | getImplRegState(B: Reg.isPhysical())); |
| 1375 | ECRegs.push_back(Elt: Reg); |
| 1376 | } |
| 1377 | break; |
| 1378 | case InlineAsm::Kind::RegUse: // Use of register. |
| 1379 | case InlineAsm::Kind::Imm: // Immediate. |
| 1380 | case InlineAsm::Kind::Mem: // Non-function addressing mode. |
| 1381 | // The addressing mode has been selected, just add all of the |
| 1382 | // operands to the machine instruction. |
| 1383 | for (unsigned j = 0; j != NumVals; ++j, ++i) |
| 1384 | AddOperand(MIB, Op: Node->getOperand(Num: i), IIOpNum: 0, II: nullptr, VRBaseMap, |
| 1385 | /*IsDebug=*/false, IsClone, IsCloned); |
| 1386 | |
| 1387 | // Manually set isTied bits. |
| 1388 | if (F.isRegUseKind()) { |
| 1389 | unsigned DefGroup; |
| 1390 | if (F.isUseOperandTiedToDef(Idx&: DefGroup)) { |
| 1391 | unsigned DefIdx = GroupIdx[DefGroup] + 1; |
| 1392 | unsigned UseIdx = GroupIdx.back() + 1; |
| 1393 | for (unsigned j = 0; j != NumVals; ++j) |
| 1394 | MIB->tieOperands(DefIdx: DefIdx + j, UseIdx: UseIdx + j); |
| 1395 | } |
| 1396 | } |
| 1397 | break; |
| 1398 | case InlineAsm::Kind::Func: // Function addressing mode. |
| 1399 | for (unsigned j = 0; j != NumVals; ++j, ++i) { |
| 1400 | SDValue Op = Node->getOperand(Num: i); |
| 1401 | AddOperand(MIB, Op, IIOpNum: 0, II: nullptr, VRBaseMap, |
| 1402 | /*IsDebug=*/false, IsClone, IsCloned); |
| 1403 | |
| 1404 | // Adjust Target Flags for function reference. |
| 1405 | if (auto *TGA = dyn_cast<GlobalAddressSDNode>(Val&: Op)) { |
| 1406 | unsigned NewFlags = |
| 1407 | MF->getSubtarget().classifyGlobalFunctionReference( |
| 1408 | GV: TGA->getGlobal()); |
| 1409 | unsigned LastIdx = MIB.getInstr()->getNumOperands() - 1; |
| 1410 | MIB.getInstr()->getOperand(i: LastIdx).setTargetFlags(NewFlags); |
| 1411 | } |
| 1412 | } |
| 1413 | } |
| 1414 | } |
| 1415 | |
| 1416 | // Add rounding control registers as implicit def for inline asm. |
| 1417 | if (MF->getFunction().hasFnAttribute(Kind: Attribute::StrictFP)) { |
| 1418 | ArrayRef<MCPhysReg> RCRegs = TLI->getRoundingControlRegisters(); |
| 1419 | for (MCPhysReg Reg : RCRegs) |
| 1420 | MIB.addReg(RegNo: Reg, flags: RegState::ImplicitDefine); |
| 1421 | } |
| 1422 | |
| 1423 | // GCC inline assembly allows input operands to also be early-clobber |
| 1424 | // output operands (so long as the operand is written only after it's |
| 1425 | // used), but this does not match the semantics of our early-clobber flag. |
| 1426 | // If an early-clobber operand register is also an input operand register, |
| 1427 | // then remove the early-clobber flag. |
| 1428 | for (Register Reg : ECRegs) { |
| 1429 | if (MIB->readsRegister(Reg, TRI)) { |
| 1430 | MachineOperand *MO = |
| 1431 | MIB->findRegisterDefOperand(Reg, TRI, isDead: false, Overlap: false); |
| 1432 | assert(MO && "No def operand for clobbered register?" ); |
| 1433 | MO->setIsEarlyClobber(false); |
| 1434 | } |
| 1435 | } |
| 1436 | |
| 1437 | // Get the mdnode from the asm if it exists and add it to the instruction. |
| 1438 | SDValue MDV = Node->getOperand(Num: InlineAsm::Op_MDNode); |
| 1439 | const MDNode *MD = cast<MDNodeSDNode>(Val&: MDV)->getMD(); |
| 1440 | if (MD) |
| 1441 | MIB.addMetadata(MD); |
| 1442 | |
| 1443 | MBB->insert(I: InsertPos, MI: MIB); |
| 1444 | break; |
| 1445 | } |
| 1446 | } |
| 1447 | } |
| 1448 | |
| 1449 | /// InstrEmitter - Construct an InstrEmitter and set it to start inserting |
| 1450 | /// at the given position in the given block. |
| 1451 | InstrEmitter::InstrEmitter(const TargetMachine &TM, MachineBasicBlock *mbb, |
| 1452 | MachineBasicBlock::iterator insertpos) |
| 1453 | : MF(mbb->getParent()), MRI(&MF->getRegInfo()), |
| 1454 | TII(MF->getSubtarget().getInstrInfo()), |
| 1455 | TRI(MF->getSubtarget().getRegisterInfo()), |
| 1456 | TLI(MF->getSubtarget().getTargetLowering()), MBB(mbb), |
| 1457 | InsertPos(insertpos) { |
| 1458 | EmitDebugInstrRefs = mbb->getParent()->useDebugInstrRef(); |
| 1459 | } |
| 1460 | |