| 1 | //==--- InstrEmitter.cpp - Emit MachineInstrs for the SelectionDAG class ---==// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This implements the Emit routines for the SelectionDAG class, which creates |
| 10 | // MachineInstrs based on the decisions of the SelectionDAG instruction |
| 11 | // selection. |
| 12 | // |
| 13 | //===----------------------------------------------------------------------===// |
| 14 | |
| 15 | #include "InstrEmitter.h" |
| 16 | #include "SDNodeDbgValue.h" |
| 17 | #include "llvm/BinaryFormat/Dwarf.h" |
| 18 | #include "llvm/CodeGen/ISDOpcodes.h" |
| 19 | #include "llvm/CodeGen/MachineConstantPool.h" |
| 20 | #include "llvm/CodeGen/MachineFunction.h" |
| 21 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
| 22 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
| 23 | #include "llvm/CodeGen/SelectionDAGNodes.h" |
| 24 | #include "llvm/CodeGen/StackMaps.h" |
| 25 | #include "llvm/CodeGen/TargetInstrInfo.h" |
| 26 | #include "llvm/CodeGen/TargetLowering.h" |
| 27 | #include "llvm/CodeGen/TargetSubtargetInfo.h" |
| 28 | #include "llvm/IR/DebugInfoMetadata.h" |
| 29 | #include "llvm/IR/PseudoProbe.h" |
| 30 | #include "llvm/Support/ErrorHandling.h" |
| 31 | #include "llvm/Target/TargetMachine.h" |
| 32 | using namespace llvm; |
| 33 | |
| 34 | #define DEBUG_TYPE "instr-emitter" |
| 35 | |
| 36 | /// MinRCSize - Smallest register class we allow when constraining virtual |
| 37 | /// registers. If satisfying all register class constraints would require |
| 38 | /// using a smaller register class, emit a COPY to a new virtual register |
| 39 | /// instead. |
| 40 | const unsigned MinRCSize = 4; |
| 41 | |
| 42 | /// CountResults - The results of target nodes have register or immediate |
| 43 | /// operands first, then an optional chain, and optional glue operands (which do |
| 44 | /// not go into the resulting MachineInstr). |
| 45 | unsigned InstrEmitter::CountResults(SDNode *Node) { |
| 46 | unsigned N = Node->getNumValues(); |
| 47 | while (N && Node->getValueType(ResNo: N - 1) == MVT::Glue) |
| 48 | --N; |
| 49 | if (N && Node->getValueType(ResNo: N - 1) == MVT::Other) |
| 50 | --N; // Skip over chain result. |
| 51 | return N; |
| 52 | } |
| 53 | |
| 54 | /// countOperands - The inputs to target nodes have any actual inputs first, |
| 55 | /// followed by an optional chain operand, then an optional glue operand. |
| 56 | /// Compute the number of actual operands that will go into the resulting |
| 57 | /// MachineInstr. |
| 58 | /// |
| 59 | /// Also count physreg RegisterSDNode and RegisterMaskSDNode operands preceding |
| 60 | /// the chain and glue. These operands may be implicit on the machine instr. |
| 61 | static unsigned countOperands(SDNode *Node, unsigned NumExpUses, |
| 62 | unsigned &NumImpUses) { |
| 63 | unsigned N = Node->getNumOperands(); |
| 64 | while (N && Node->getOperand(Num: N - 1).getValueType() == MVT::Glue) |
| 65 | --N; |
| 66 | if (N && Node->getOperand(Num: N - 1).getOpcode() == ISD::DEACTIVATION_SYMBOL) |
| 67 | --N; // Ignore deactivation symbol if it exists. |
| 68 | if (N && Node->getOperand(Num: N - 1).getValueType() == MVT::Other) |
| 69 | --N; // Ignore chain if it exists. |
| 70 | |
| 71 | // Count RegisterSDNode and RegisterMaskSDNode operands for NumImpUses. |
| 72 | NumImpUses = N - NumExpUses; |
| 73 | for (unsigned I = N; I > NumExpUses; --I) { |
| 74 | if (isa<RegisterMaskSDNode>(Val: Node->getOperand(Num: I - 1))) |
| 75 | continue; |
| 76 | if (RegisterSDNode *RN = dyn_cast<RegisterSDNode>(Val: Node->getOperand(Num: I - 1))) |
| 77 | if (RN->getReg().isPhysical()) |
| 78 | continue; |
| 79 | NumImpUses = N - I; |
| 80 | break; |
| 81 | } |
| 82 | |
| 83 | return N; |
| 84 | } |
| 85 | |
| 86 | /// EmitCopyFromReg - Generate machine code for an CopyFromReg node or an |
| 87 | /// implicit physical register output. |
| 88 | void InstrEmitter::EmitCopyFromReg(SDValue Op, bool IsClone, Register SrcReg, |
| 89 | VRBaseMapType &VRBaseMap) { |
| 90 | Register VRBase; |
| 91 | if (SrcReg.isVirtual()) { |
| 92 | // Just use the input register directly! |
| 93 | if (IsClone) |
| 94 | VRBaseMap.erase(Val: Op); |
| 95 | bool isNew = VRBaseMap.insert(KV: std::make_pair(x&: Op, y&: SrcReg)).second; |
| 96 | (void)isNew; // Silence compiler warning. |
| 97 | assert(isNew && "Node emitted out of order - early" ); |
| 98 | return; |
| 99 | } |
| 100 | |
| 101 | // If the node is only used by a CopyToReg and the dest reg is a vreg, use |
| 102 | // the CopyToReg'd destination register instead of creating a new vreg. |
| 103 | bool MatchReg = true; |
| 104 | const TargetRegisterClass *UseRC = nullptr; |
| 105 | MVT VT = Op.getSimpleValueType(); |
| 106 | |
| 107 | // Stick to the preferred register classes for legal types. |
| 108 | if (TLI->isTypeLegal(VT)) |
| 109 | UseRC = TLI->getRegClassFor(VT, isDivergent: Op->isDivergent()); |
| 110 | |
| 111 | for (SDNode *User : Op->users()) { |
| 112 | bool Match = true; |
| 113 | if (User->getOpcode() == ISD::CopyToReg && User->getOperand(Num: 2) == Op) { |
| 114 | Register DestReg = cast<RegisterSDNode>(Val: User->getOperand(Num: 1))->getReg(); |
| 115 | if (DestReg.isVirtual()) { |
| 116 | VRBase = DestReg; |
| 117 | Match = false; |
| 118 | } else if (DestReg != SrcReg) |
| 119 | Match = false; |
| 120 | } else { |
| 121 | for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { |
| 122 | if (User->getOperand(Num: i) != Op) |
| 123 | continue; |
| 124 | if (VT == MVT::Other || VT == MVT::Glue) |
| 125 | continue; |
| 126 | Match = false; |
| 127 | if (User->isMachineOpcode()) { |
| 128 | const MCInstrDesc &II = TII->get(Opcode: User->getMachineOpcode()); |
| 129 | const TargetRegisterClass *RC = nullptr; |
| 130 | if (i + II.getNumDefs() < II.getNumOperands()) { |
| 131 | RC = TRI->getAllocatableClass( |
| 132 | RC: TII->getRegClass(MCID: II, OpNum: i + II.getNumDefs())); |
| 133 | } |
| 134 | if (!UseRC) |
| 135 | UseRC = RC; |
| 136 | else if (RC) { |
| 137 | const TargetRegisterClass *ComRC = |
| 138 | TRI->getCommonSubClass(A: UseRC, B: RC); |
| 139 | // If multiple uses expect disjoint register classes, we emit |
| 140 | // copies in AddRegisterOperand. |
| 141 | if (ComRC) |
| 142 | UseRC = ComRC; |
| 143 | } |
| 144 | } |
| 145 | } |
| 146 | } |
| 147 | MatchReg &= Match; |
| 148 | if (VRBase) |
| 149 | break; |
| 150 | } |
| 151 | |
| 152 | const TargetRegisterClass *SrcRC = nullptr, *DstRC = nullptr; |
| 153 | SrcRC = TRI->getMinimalPhysRegClass(Reg: SrcReg, VT); |
| 154 | |
| 155 | // Figure out the register class to create for the destreg. |
| 156 | if (VRBase) { |
| 157 | DstRC = MRI->getRegClass(Reg: VRBase); |
| 158 | } else if (UseRC) { |
| 159 | assert(TRI->isTypeLegalForClass(*UseRC, VT) && |
| 160 | "Incompatible phys register def and uses!" ); |
| 161 | DstRC = UseRC; |
| 162 | } else |
| 163 | DstRC = SrcRC; |
| 164 | |
| 165 | // If all uses are reading from the src physical register and copying the |
| 166 | // register is either impossible or very expensive, then don't create a copy. |
| 167 | if (MatchReg && SrcRC->expensiveOrImpossibleToCopy()) { |
| 168 | VRBase = SrcReg; |
| 169 | } else { |
| 170 | // Create the reg, emit the copy. |
| 171 | VRBase = MRI->createVirtualRegister(RegClass: DstRC); |
| 172 | BuildMI(BB&: *MBB, I: InsertPos, MIMD: Op.getDebugLoc(), MCID: TII->get(Opcode: TargetOpcode::COPY), |
| 173 | DestReg: VRBase) |
| 174 | .addReg(RegNo: SrcReg); |
| 175 | } |
| 176 | |
| 177 | if (IsClone) |
| 178 | VRBaseMap.erase(Val: Op); |
| 179 | bool isNew = VRBaseMap.insert(KV: std::make_pair(x&: Op, y&: VRBase)).second; |
| 180 | (void)isNew; // Silence compiler warning. |
| 181 | assert(isNew && "Node emitted out of order - early" ); |
| 182 | } |
| 183 | |
| 184 | void InstrEmitter::CreateVirtualRegisters(SDNode *Node, |
| 185 | MachineInstrBuilder &MIB, |
| 186 | const MCInstrDesc &II, |
| 187 | bool IsClone, bool IsCloned, |
| 188 | VRBaseMapType &VRBaseMap) { |
| 189 | assert(Node->getMachineOpcode() != TargetOpcode::IMPLICIT_DEF && |
| 190 | "IMPLICIT_DEF should have been handled as a special case elsewhere!" ); |
| 191 | |
| 192 | unsigned NumResults = CountResults(Node); |
| 193 | bool HasVRegVariadicDefs = !MF->getTarget().usesPhysRegsForValues() && |
| 194 | II.isVariadic() && II.variadicOpsAreDefs(); |
| 195 | unsigned NumVRegs = HasVRegVariadicDefs ? NumResults : II.getNumDefs(); |
| 196 | if (Node->getMachineOpcode() == TargetOpcode::STATEPOINT) |
| 197 | NumVRegs = NumResults; |
| 198 | for (unsigned i = 0; i < NumVRegs; ++i) { |
| 199 | // If the specific node value is only used by a CopyToReg and the dest reg |
| 200 | // is a vreg in the same register class, use the CopyToReg'd destination |
| 201 | // register instead of creating a new vreg. |
| 202 | Register VRBase; |
| 203 | const TargetRegisterClass *RC = |
| 204 | TRI->getAllocatableClass(RC: TII->getRegClass(MCID: II, OpNum: i)); |
| 205 | // Always let the value type influence the used register class. The |
| 206 | // constraints on the instruction may be too lax to represent the value |
| 207 | // type correctly. For example, a 64-bit float (X86::FR64) can't live in |
| 208 | // the 32-bit float super-class (X86::FR32). |
| 209 | if (i < NumResults && TLI->isTypeLegal(VT: Node->getSimpleValueType(ResNo: i))) { |
| 210 | const TargetRegisterClass *VTRC = TLI->getRegClassFor( |
| 211 | VT: Node->getSimpleValueType(ResNo: i), |
| 212 | isDivergent: (Node->isDivergent() || (RC && TRI->isDivergentRegClass(RC)))); |
| 213 | if (RC) |
| 214 | VTRC = TRI->getCommonSubClass(A: RC, B: VTRC); |
| 215 | if (VTRC) |
| 216 | RC = VTRC; |
| 217 | } |
| 218 | |
| 219 | if (!II.operands().empty() && II.operands()[i].isOptionalDef()) { |
| 220 | // Optional def must be a physical register. |
| 221 | VRBase = cast<RegisterSDNode>(Val: Node->getOperand(Num: i-NumResults))->getReg(); |
| 222 | assert(VRBase.isPhysical()); |
| 223 | MIB.addReg(RegNo: VRBase, Flags: RegState::Define); |
| 224 | } |
| 225 | |
| 226 | if (!VRBase && !IsClone && !IsCloned) |
| 227 | for (SDNode *User : Node->users()) { |
| 228 | if (User->getOpcode() == ISD::CopyToReg && |
| 229 | User->getOperand(Num: 2).getNode() == Node && |
| 230 | User->getOperand(Num: 2).getResNo() == i) { |
| 231 | Register Reg = cast<RegisterSDNode>(Val: User->getOperand(Num: 1))->getReg(); |
| 232 | if (Reg.isVirtual()) { |
| 233 | const TargetRegisterClass *RegRC = MRI->getRegClass(Reg); |
| 234 | if (RegRC == RC) { |
| 235 | VRBase = Reg; |
| 236 | MIB.addReg(RegNo: VRBase, Flags: RegState::Define); |
| 237 | break; |
| 238 | } |
| 239 | } |
| 240 | } |
| 241 | } |
| 242 | |
| 243 | // Create the result registers for this node and add the result regs to |
| 244 | // the machine instruction. |
| 245 | if (!VRBase) { |
| 246 | assert(RC && "Isn't a register operand!" ); |
| 247 | VRBase = MRI->createVirtualRegister(RegClass: RC); |
| 248 | MIB.addReg(RegNo: VRBase, Flags: RegState::Define); |
| 249 | } |
| 250 | |
| 251 | // If this def corresponds to a result of the SDNode insert the VRBase into |
| 252 | // the lookup map. |
| 253 | if (i < NumResults) { |
| 254 | SDValue Op(Node, i); |
| 255 | if (IsClone) |
| 256 | VRBaseMap.erase(Val: Op); |
| 257 | bool isNew = VRBaseMap.insert(KV: std::make_pair(x&: Op, y&: VRBase)).second; |
| 258 | (void)isNew; // Silence compiler warning. |
| 259 | assert(isNew && "Node emitted out of order - early" ); |
| 260 | } |
| 261 | } |
| 262 | } |
| 263 | |
| 264 | /// getVR - Return the virtual register corresponding to the specified result |
| 265 | /// of the specified node. |
| 266 | Register InstrEmitter::getVR(SDValue Op, VRBaseMapType &VRBaseMap) { |
| 267 | if (Op.isMachineOpcode() && |
| 268 | Op.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF) { |
| 269 | // Add an IMPLICIT_DEF instruction before every use. |
| 270 | // IMPLICIT_DEF can produce any type of result so its MCInstrDesc |
| 271 | // does not include operand register class info. |
| 272 | const TargetRegisterClass *RC = TLI->getRegClassFor( |
| 273 | VT: Op.getSimpleValueType(), isDivergent: Op.getNode()->isDivergent()); |
| 274 | Register VReg = MRI->createVirtualRegister(RegClass: RC); |
| 275 | BuildMI(BB&: *MBB, I: InsertPos, MIMD: Op.getDebugLoc(), |
| 276 | MCID: TII->get(Opcode: TargetOpcode::IMPLICIT_DEF), DestReg: VReg); |
| 277 | return VReg; |
| 278 | } |
| 279 | |
| 280 | VRBaseMapType::iterator I = VRBaseMap.find(Val: Op); |
| 281 | assert(I != VRBaseMap.end() && "Node emitted out of order - late" ); |
| 282 | return I->second; |
| 283 | } |
| 284 | |
| 285 | static bool isConvergenceCtrlMachineOp(SDValue Op) { |
| 286 | if (Op->isMachineOpcode()) { |
| 287 | switch (Op->getMachineOpcode()) { |
| 288 | case TargetOpcode::CONVERGENCECTRL_ANCHOR: |
| 289 | case TargetOpcode::CONVERGENCECTRL_ENTRY: |
| 290 | case TargetOpcode::CONVERGENCECTRL_LOOP: |
| 291 | case TargetOpcode::CONVERGENCECTRL_GLUE: |
| 292 | return true; |
| 293 | } |
| 294 | return false; |
| 295 | } |
| 296 | |
| 297 | // We can reach here when CopyFromReg is encountered. But rather than making a |
| 298 | // special case for that, we just make sure we don't reach here in some |
| 299 | // surprising way. |
| 300 | switch (Op->getOpcode()) { |
| 301 | case ISD::CONVERGENCECTRL_ANCHOR: |
| 302 | case ISD::CONVERGENCECTRL_ENTRY: |
| 303 | case ISD::CONVERGENCECTRL_LOOP: |
| 304 | case ISD::CONVERGENCECTRL_GLUE: |
| 305 | llvm_unreachable("Convergence control should have been selected by now." ); |
| 306 | } |
| 307 | return false; |
| 308 | } |
| 309 | |
| 310 | /// AddRegisterOperand - Add the specified register as an operand to the |
| 311 | /// specified machine instr. Insert register copies if the register is |
| 312 | /// not in the required register class. |
| 313 | void |
| 314 | InstrEmitter::AddRegisterOperand(MachineInstrBuilder &MIB, |
| 315 | SDValue Op, |
| 316 | unsigned IIOpNum, |
| 317 | const MCInstrDesc *II, |
| 318 | VRBaseMapType &VRBaseMap, |
| 319 | bool IsDebug, bool IsClone, bool IsCloned) { |
| 320 | assert(Op.getValueType() != MVT::Other && |
| 321 | Op.getValueType() != MVT::Glue && |
| 322 | "Chain and glue operands should occur at end of operand list!" ); |
| 323 | // Get/emit the operand. |
| 324 | Register VReg = getVR(Op, VRBaseMap); |
| 325 | |
| 326 | const MCInstrDesc &MCID = MIB->getDesc(); |
| 327 | bool isOptDef = IIOpNum < MCID.getNumOperands() && |
| 328 | MCID.operands()[IIOpNum].isOptionalDef(); |
| 329 | |
| 330 | // If the instruction requires a register in a different class, create |
| 331 | // a new virtual register and copy the value into it, but first attempt to |
| 332 | // shrink VReg's register class within reason. For example, if VReg == GR32 |
| 333 | // and II requires a GR32_NOSP, just constrain VReg to GR32_NOSP. |
| 334 | if (II) { |
| 335 | const TargetRegisterClass *OpRC = nullptr; |
| 336 | if (IIOpNum < II->getNumOperands()) |
| 337 | OpRC = TII->getRegClass(MCID: *II, OpNum: IIOpNum); |
| 338 | |
| 339 | if (OpRC) { |
| 340 | unsigned MinNumRegs = MinRCSize; |
| 341 | // Don't apply any RC size limit for IMPLICIT_DEF. Each use has a unique |
| 342 | // virtual register. |
| 343 | if (Op.isMachineOpcode() && |
| 344 | Op.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF) |
| 345 | MinNumRegs = 0; |
| 346 | |
| 347 | const TargetRegisterClass *ConstrainedRC |
| 348 | = MRI->constrainRegClass(Reg: VReg, RC: OpRC, MinNumRegs); |
| 349 | if (!ConstrainedRC) { |
| 350 | OpRC = TRI->getAllocatableClass(RC: OpRC); |
| 351 | assert(OpRC && "Constraints cannot be fulfilled for allocation" ); |
| 352 | Register NewVReg = MRI->createVirtualRegister(RegClass: OpRC); |
| 353 | BuildMI(BB&: *MBB, I: InsertPos, MIMD: MIB->getDebugLoc(), |
| 354 | MCID: TII->get(Opcode: TargetOpcode::COPY), DestReg: NewVReg) |
| 355 | .addReg(RegNo: VReg); |
| 356 | VReg = NewVReg; |
| 357 | } else { |
| 358 | assert(ConstrainedRC->isAllocatable() && |
| 359 | "Constraining an allocatable VReg produced an unallocatable class?" ); |
| 360 | } |
| 361 | } |
| 362 | } |
| 363 | |
| 364 | // If this value has only one use, that use is a kill. This is a |
| 365 | // conservative approximation. InstrEmitter does trivial coalescing |
| 366 | // with CopyFromReg nodes, so don't emit kill flags for them. |
| 367 | // Avoid kill flags on Schedule cloned nodes, since there will be |
| 368 | // multiple uses. |
| 369 | // Tied operands are never killed, so we need to check that. And that |
| 370 | // means we need to determine the index of the operand. |
| 371 | // Don't kill convergence control tokens. Initially they are only used in glue |
| 372 | // nodes, and the InstrEmitter later adds implicit uses on the users of the |
| 373 | // glue node. This can sometimes make it seem like there is only one use, |
| 374 | // which is the glue node itself. |
| 375 | bool isKill = Op.hasOneUse() && !isConvergenceCtrlMachineOp(Op) && |
| 376 | Op.getNode()->getOpcode() != ISD::CopyFromReg && !IsDebug && |
| 377 | !(IsClone || IsCloned); |
| 378 | if (isKill) { |
| 379 | unsigned Idx = MIB->getNumOperands(); |
| 380 | while (Idx > 0 && |
| 381 | MIB->getOperand(i: Idx-1).isReg() && |
| 382 | MIB->getOperand(i: Idx-1).isImplicit()) |
| 383 | --Idx; |
| 384 | bool isTied = MCID.getOperandConstraint(OpNum: Idx, Constraint: MCOI::TIED_TO) != -1; |
| 385 | if (isTied) |
| 386 | isKill = false; |
| 387 | } |
| 388 | |
| 389 | MIB.addReg(RegNo: VReg, Flags: getDefRegState(B: isOptDef) | getKillRegState(B: isKill) | |
| 390 | getDebugRegState(B: IsDebug)); |
| 391 | } |
| 392 | |
| 393 | /// AddOperand - Add the specified operand to the specified machine instr. II |
| 394 | /// specifies the instruction information for the node, and IIOpNum is the |
| 395 | /// operand number (in the II) that we are adding. |
| 396 | void InstrEmitter::AddOperand(MachineInstrBuilder &MIB, SDValue Op, |
| 397 | unsigned IIOpNum, const MCInstrDesc *II, |
| 398 | VRBaseMapType &VRBaseMap, bool IsDebug, |
| 399 | bool IsClone, bool IsCloned) { |
| 400 | if (Op.isMachineOpcode()) { |
| 401 | AddRegisterOperand(MIB, Op, IIOpNum, II, VRBaseMap, |
| 402 | IsDebug, IsClone, IsCloned); |
| 403 | } else if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: Op)) { |
| 404 | if (C->getAPIntValue().getSignificantBits() <= 64) { |
| 405 | MIB.addImm(Val: C->getSExtValue()); |
| 406 | } else { |
| 407 | MIB.addCImm( |
| 408 | Val: ConstantInt::get(Context&: MF->getFunction().getContext(), V: C->getAPIntValue())); |
| 409 | } |
| 410 | } else if (ConstantFPSDNode *F = dyn_cast<ConstantFPSDNode>(Val&: Op)) { |
| 411 | MIB.addFPImm(Val: F->getConstantFPValue()); |
| 412 | } else if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(Val&: Op)) { |
| 413 | Register VReg = R->getReg(); |
| 414 | MVT OpVT = Op.getSimpleValueType(); |
| 415 | const TargetRegisterClass *IIRC = |
| 416 | II ? TRI->getAllocatableClass(RC: TII->getRegClass(MCID: *II, OpNum: IIOpNum)) : nullptr; |
| 417 | const TargetRegisterClass *OpRC = |
| 418 | TLI->isTypeLegal(VT: OpVT) |
| 419 | ? TLI->getRegClassFor(VT: OpVT, |
| 420 | isDivergent: Op.getNode()->isDivergent() || |
| 421 | (IIRC && TRI->isDivergentRegClass(RC: IIRC))) |
| 422 | : nullptr; |
| 423 | |
| 424 | if (OpRC && IIRC && OpRC != IIRC && VReg.isVirtual()) { |
| 425 | Register NewVReg = MRI->createVirtualRegister(RegClass: IIRC); |
| 426 | BuildMI(BB&: *MBB, I: InsertPos, MIMD: Op.getNode()->getDebugLoc(), |
| 427 | MCID: TII->get(Opcode: TargetOpcode::COPY), DestReg: NewVReg).addReg(RegNo: VReg); |
| 428 | VReg = NewVReg; |
| 429 | } |
| 430 | // Turn additional physreg operands into implicit uses on non-variadic |
| 431 | // instructions. This is used by call and return instructions passing |
| 432 | // arguments in registers. |
| 433 | bool Imp = II && (IIOpNum >= II->getNumOperands() && !II->isVariadic()); |
| 434 | MIB.addReg(RegNo: VReg, Flags: getImplRegState(B: Imp)); |
| 435 | } else if (RegisterMaskSDNode *RM = dyn_cast<RegisterMaskSDNode>(Val&: Op)) { |
| 436 | MIB.addRegMask(Mask: RM->getRegMask()); |
| 437 | } else if (GlobalAddressSDNode *TGA = dyn_cast<GlobalAddressSDNode>(Val&: Op)) { |
| 438 | MIB.addGlobalAddress(GV: TGA->getGlobal(), Offset: TGA->getOffset(), |
| 439 | TargetFlags: TGA->getTargetFlags()); |
| 440 | } else if (BasicBlockSDNode *BBNode = dyn_cast<BasicBlockSDNode>(Val&: Op)) { |
| 441 | MIB.addMBB(MBB: BBNode->getBasicBlock()); |
| 442 | } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Val&: Op)) { |
| 443 | MIB.addFrameIndex(Idx: FI->getIndex()); |
| 444 | } else if (JumpTableSDNode *JT = dyn_cast<JumpTableSDNode>(Val&: Op)) { |
| 445 | MIB.addJumpTableIndex(Idx: JT->getIndex(), TargetFlags: JT->getTargetFlags()); |
| 446 | } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Val&: Op)) { |
| 447 | int Offset = CP->getOffset(); |
| 448 | Align Alignment = CP->getAlign(); |
| 449 | |
| 450 | unsigned Idx; |
| 451 | MachineConstantPool *MCP = MF->getConstantPool(); |
| 452 | if (CP->isMachineConstantPoolEntry()) |
| 453 | Idx = MCP->getConstantPoolIndex(V: CP->getMachineCPVal(), Alignment); |
| 454 | else |
| 455 | Idx = MCP->getConstantPoolIndex(C: CP->getConstVal(), Alignment); |
| 456 | MIB.addConstantPoolIndex(Idx, Offset, TargetFlags: CP->getTargetFlags()); |
| 457 | } else if (ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Val&: Op)) { |
| 458 | MIB.addExternalSymbol(FnName: ES->getSymbol(), TargetFlags: ES->getTargetFlags()); |
| 459 | } else if (auto *SymNode = dyn_cast<MCSymbolSDNode>(Val&: Op)) { |
| 460 | MIB.addSym(Sym: SymNode->getMCSymbol()); |
| 461 | } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Val&: Op)) { |
| 462 | MIB.addBlockAddress(BA: BA->getBlockAddress(), |
| 463 | Offset: BA->getOffset(), |
| 464 | TargetFlags: BA->getTargetFlags()); |
| 465 | } else if (TargetIndexSDNode *TI = dyn_cast<TargetIndexSDNode>(Val&: Op)) { |
| 466 | MIB.addTargetIndex(Idx: TI->getIndex(), Offset: TI->getOffset(), TargetFlags: TI->getTargetFlags()); |
| 467 | } else { |
| 468 | assert(Op.getValueType() != MVT::Other && |
| 469 | Op.getValueType() != MVT::Glue && |
| 470 | "Chain and glue operands should occur at end of operand list!" ); |
| 471 | AddRegisterOperand(MIB, Op, IIOpNum, II, VRBaseMap, |
| 472 | IsDebug, IsClone, IsCloned); |
| 473 | } |
| 474 | } |
| 475 | |
| 476 | Register InstrEmitter::ConstrainForSubReg(Register VReg, unsigned SubIdx, |
| 477 | MVT VT, bool isDivergent, const DebugLoc &DL) { |
| 478 | const TargetRegisterClass *VRC = MRI->getRegClass(Reg: VReg); |
| 479 | const TargetRegisterClass *RC = TRI->getSubClassWithSubReg(RC: VRC, Idx: SubIdx); |
| 480 | |
| 481 | // RC is a sub-class of VRC that supports SubIdx. Try to constrain VReg |
| 482 | // within reason. |
| 483 | if (RC && RC != VRC) |
| 484 | RC = MRI->constrainRegClass(Reg: VReg, RC, MinNumRegs: MinRCSize); |
| 485 | |
| 486 | // VReg has been adjusted. It can be used with SubIdx operands now. |
| 487 | if (RC) |
| 488 | return VReg; |
| 489 | |
| 490 | // VReg couldn't be reasonably constrained. Emit a COPY to a new virtual |
| 491 | // register instead. |
| 492 | RC = TRI->getSubClassWithSubReg(RC: TLI->getRegClassFor(VT, isDivergent), Idx: SubIdx); |
| 493 | assert(RC && "No legal register class for VT supports that SubIdx" ); |
| 494 | Register NewReg = MRI->createVirtualRegister(RegClass: RC); |
| 495 | BuildMI(BB&: *MBB, I: InsertPos, MIMD: DL, MCID: TII->get(Opcode: TargetOpcode::COPY), DestReg: NewReg) |
| 496 | .addReg(RegNo: VReg); |
| 497 | return NewReg; |
| 498 | } |
| 499 | |
| 500 | /// EmitSubregNode - Generate machine code for subreg nodes. |
| 501 | /// |
| 502 | void InstrEmitter::EmitSubregNode(SDNode *Node, VRBaseMapType &VRBaseMap, |
| 503 | bool IsClone, bool IsCloned) { |
| 504 | Register VRBase; |
| 505 | unsigned Opc = Node->getMachineOpcode(); |
| 506 | |
| 507 | // If the node is only used by a CopyToReg and the dest reg is a vreg, use |
| 508 | // the CopyToReg'd destination register instead of creating a new vreg. |
| 509 | for (SDNode *User : Node->users()) { |
| 510 | if (User->getOpcode() == ISD::CopyToReg && |
| 511 | User->getOperand(Num: 2).getNode() == Node) { |
| 512 | Register DestReg = cast<RegisterSDNode>(Val: User->getOperand(Num: 1))->getReg(); |
| 513 | if (DestReg.isVirtual()) { |
| 514 | VRBase = DestReg; |
| 515 | break; |
| 516 | } |
| 517 | } |
| 518 | } |
| 519 | |
| 520 | if (Opc == TargetOpcode::EXTRACT_SUBREG) { |
| 521 | // EXTRACT_SUBREG is lowered as %dst = COPY %src:sub. There are no |
| 522 | // constraints on the %dst register, COPY can target all legal register |
| 523 | // classes. |
| 524 | unsigned SubIdx = Node->getConstantOperandVal(Num: 1); |
| 525 | const TargetRegisterClass *TRC = |
| 526 | TLI->getRegClassFor(VT: Node->getSimpleValueType(ResNo: 0), isDivergent: Node->isDivergent()); |
| 527 | |
| 528 | Register Reg; |
| 529 | MachineInstr *DefMI; |
| 530 | RegisterSDNode *R = dyn_cast<RegisterSDNode>(Val: Node->getOperand(Num: 0)); |
| 531 | if (R && R->getReg().isPhysical()) { |
| 532 | Reg = R->getReg(); |
| 533 | DefMI = nullptr; |
| 534 | } else { |
| 535 | Reg = R ? R->getReg() : getVR(Op: Node->getOperand(Num: 0), VRBaseMap); |
| 536 | DefMI = MRI->getVRegDef(Reg); |
| 537 | } |
| 538 | |
| 539 | Register SrcReg, DstReg; |
| 540 | unsigned DefSubIdx; |
| 541 | if (DefMI && |
| 542 | TII->isCoalescableExtInstr(MI: *DefMI, SrcReg, DstReg, SubIdx&: DefSubIdx) && |
| 543 | SubIdx == DefSubIdx && |
| 544 | TRC == MRI->getRegClass(Reg: SrcReg)) { |
| 545 | // Optimize these: |
| 546 | // r1025 = s/zext r1024, 4 |
| 547 | // r1026 = extract_subreg r1025, 4 |
| 548 | // to a copy |
| 549 | // r1026 = copy r1024 |
| 550 | VRBase = MRI->createVirtualRegister(RegClass: TRC); |
| 551 | BuildMI(BB&: *MBB, I: InsertPos, MIMD: Node->getDebugLoc(), |
| 552 | MCID: TII->get(Opcode: TargetOpcode::COPY), DestReg: VRBase).addReg(RegNo: SrcReg); |
| 553 | MRI->clearKillFlags(Reg: SrcReg); |
| 554 | } else { |
| 555 | // Reg may not support a SubIdx sub-register, and we may need to |
| 556 | // constrain its register class or issue a COPY to a compatible register |
| 557 | // class. |
| 558 | if (Reg.isVirtual()) |
| 559 | Reg = ConstrainForSubReg(VReg: Reg, SubIdx, |
| 560 | VT: Node->getOperand(Num: 0).getSimpleValueType(), |
| 561 | isDivergent: Node->isDivergent(), DL: Node->getDebugLoc()); |
| 562 | // Create the destreg if it is missing. |
| 563 | if (!VRBase) |
| 564 | VRBase = MRI->createVirtualRegister(RegClass: TRC); |
| 565 | |
| 566 | // Create the extract_subreg machine instruction. |
| 567 | MachineInstrBuilder CopyMI = |
| 568 | BuildMI(BB&: *MBB, I: InsertPos, MIMD: Node->getDebugLoc(), |
| 569 | MCID: TII->get(Opcode: TargetOpcode::COPY), DestReg: VRBase); |
| 570 | if (Reg.isVirtual()) |
| 571 | CopyMI.addReg(RegNo: Reg, Flags: {}, SubReg: SubIdx); |
| 572 | else |
| 573 | CopyMI.addReg(RegNo: TRI->getSubReg(Reg, Idx: SubIdx)); |
| 574 | } |
| 575 | } else if (Opc == TargetOpcode::INSERT_SUBREG || |
| 576 | Opc == TargetOpcode::SUBREG_TO_REG) { |
| 577 | SDValue N0 = Node->getOperand(Num: 0); |
| 578 | SDValue N1 = Node->getOperand(Num: 1); |
| 579 | SDValue N2 = Node->getOperand(Num: 2); |
| 580 | unsigned SubIdx = N2->getAsZExtVal(); |
| 581 | |
| 582 | // Figure out the register class to create for the destreg. It should be |
| 583 | // the largest legal register class supporting SubIdx sub-registers. |
| 584 | // RegisterCoalescer will constrain it further if it decides to eliminate |
| 585 | // the INSERT_SUBREG instruction. |
| 586 | // |
| 587 | // %dst = INSERT_SUBREG %src, %sub, SubIdx |
| 588 | // |
| 589 | // is lowered by TwoAddressInstructionPass to: |
| 590 | // |
| 591 | // %dst = COPY %src |
| 592 | // %dst:SubIdx = COPY %sub |
| 593 | // |
| 594 | // There is no constraint on the %src register class. |
| 595 | // |
| 596 | const TargetRegisterClass *SRC = |
| 597 | TLI->getRegClassFor(VT: Node->getSimpleValueType(ResNo: 0), isDivergent: Node->isDivergent()); |
| 598 | SRC = TRI->getSubClassWithSubReg(RC: SRC, Idx: SubIdx); |
| 599 | assert(SRC && "No register class supports VT and SubIdx for INSERT_SUBREG" ); |
| 600 | |
| 601 | if (VRBase == 0 || !SRC->hasSubClassEq(RC: MRI->getRegClass(Reg: VRBase))) |
| 602 | VRBase = MRI->createVirtualRegister(RegClass: SRC); |
| 603 | |
| 604 | // Create the insert_subreg or subreg_to_reg machine instruction. |
| 605 | MachineInstrBuilder MIB = |
| 606 | BuildMI(MF&: *MF, MIMD: Node->getDebugLoc(), MCID: TII->get(Opcode: Opc), DestReg: VRBase); |
| 607 | |
| 608 | // If creating a subreg_to_reg, then the first input operand |
| 609 | // is an implicit value immediate, otherwise it's a register |
| 610 | if (Opc == TargetOpcode::SUBREG_TO_REG) { |
| 611 | const ConstantSDNode *SD = cast<ConstantSDNode>(Val&: N0); |
| 612 | MIB.addImm(Val: SD->getZExtValue()); |
| 613 | } else |
| 614 | AddOperand(MIB, Op: N0, IIOpNum: 0, II: nullptr, VRBaseMap, /*IsDebug=*/false, |
| 615 | IsClone, IsCloned); |
| 616 | // Add the subregister being inserted |
| 617 | AddOperand(MIB, Op: N1, IIOpNum: 0, II: nullptr, VRBaseMap, /*IsDebug=*/false, |
| 618 | IsClone, IsCloned); |
| 619 | MIB.addImm(Val: SubIdx); |
| 620 | MBB->insert(I: InsertPos, MI: MIB); |
| 621 | } else |
| 622 | llvm_unreachable("Node is not insert_subreg, extract_subreg, or subreg_to_reg" ); |
| 623 | |
| 624 | SDValue Op(Node, 0); |
| 625 | bool isNew = VRBaseMap.insert(KV: std::make_pair(x&: Op, y&: VRBase)).second; |
| 626 | (void)isNew; // Silence compiler warning. |
| 627 | assert(isNew && "Node emitted out of order - early" ); |
| 628 | } |
| 629 | |
| 630 | /// EmitCopyToRegClassNode - Generate machine code for COPY_TO_REGCLASS nodes. |
| 631 | /// COPY_TO_REGCLASS is just a normal copy, except that the destination |
| 632 | /// register is constrained to be in a particular register class. |
| 633 | /// |
| 634 | void |
| 635 | InstrEmitter::EmitCopyToRegClassNode(SDNode *Node, |
| 636 | VRBaseMapType &VRBaseMap) { |
| 637 | // Create the new VReg in the destination class and emit a copy. |
| 638 | unsigned DstRCIdx = Node->getConstantOperandVal(Num: 1); |
| 639 | const TargetRegisterClass *DstRC = |
| 640 | TRI->getAllocatableClass(RC: TRI->getRegClass(i: DstRCIdx)); |
| 641 | Register NewVReg = MRI->createVirtualRegister(RegClass: DstRC); |
| 642 | const MCInstrDesc &II = TII->get(Opcode: TargetOpcode::COPY); |
| 643 | MachineInstrBuilder MIB = BuildMI(MF&: *MF, MIMD: Node->getDebugLoc(), MCID: II, DestReg: NewVReg); |
| 644 | AddOperand(MIB, Op: Node->getOperand(Num: 0), IIOpNum: 1, II: &II, VRBaseMap, /*IsDebug=*/false, |
| 645 | /*IsClone=*/false, /*IsCloned*/ false); |
| 646 | |
| 647 | MBB->insert(I: InsertPos, MI: MIB); |
| 648 | SDValue Op(Node, 0); |
| 649 | bool isNew = VRBaseMap.insert(KV: std::make_pair(x&: Op, y&: NewVReg)).second; |
| 650 | (void)isNew; // Silence compiler warning. |
| 651 | assert(isNew && "Node emitted out of order - early" ); |
| 652 | } |
| 653 | |
| 654 | /// EmitRegSequence - Generate machine code for REG_SEQUENCE nodes. |
| 655 | /// |
| 656 | void InstrEmitter::EmitRegSequence(SDNode *Node, VRBaseMapType &VRBaseMap, |
| 657 | bool IsClone, bool IsCloned) { |
| 658 | unsigned DstRCIdx = Node->getConstantOperandVal(Num: 0); |
| 659 | const TargetRegisterClass *RC = TRI->getRegClass(i: DstRCIdx); |
| 660 | Register NewVReg = MRI->createVirtualRegister(RegClass: TRI->getAllocatableClass(RC)); |
| 661 | const MCInstrDesc &II = TII->get(Opcode: TargetOpcode::REG_SEQUENCE); |
| 662 | MachineInstrBuilder MIB = BuildMI(MF&: *MF, MIMD: Node->getDebugLoc(), MCID: II, DestReg: NewVReg); |
| 663 | unsigned NumOps = Node->getNumOperands(); |
| 664 | // If the input pattern has a chain, then the root of the corresponding |
| 665 | // output pattern will get a chain as well. This can happen to be a |
| 666 | // REG_SEQUENCE (which is not "guarded" by countOperands/CountResults). |
| 667 | if (NumOps && Node->getOperand(Num: NumOps-1).getValueType() == MVT::Other) |
| 668 | --NumOps; // Ignore chain if it exists. |
| 669 | |
| 670 | assert((NumOps & 1) == 1 && |
| 671 | "REG_SEQUENCE must have an odd number of operands!" ); |
| 672 | for (unsigned i = 1; i != NumOps; ++i) { |
| 673 | SDValue Op = Node->getOperand(Num: i); |
| 674 | if ((i & 1) == 0) { |
| 675 | RegisterSDNode *R = dyn_cast<RegisterSDNode>(Val: Node->getOperand(Num: i-1)); |
| 676 | // Skip physical registers as they don't have a vreg to get and we'll |
| 677 | // insert copies for them in TwoAddressInstructionPass anyway. |
| 678 | if (!R || !R->getReg().isPhysical()) { |
| 679 | unsigned SubIdx = Op->getAsZExtVal(); |
| 680 | Register SubReg = getVR(Op: Node->getOperand(Num: i - 1), VRBaseMap); |
| 681 | const TargetRegisterClass *TRC = MRI->getRegClass(Reg: SubReg); |
| 682 | const TargetRegisterClass *SRC = |
| 683 | TRI->getMatchingSuperRegClass(A: RC, B: TRC, Idx: SubIdx); |
| 684 | if (SRC && SRC != RC) { |
| 685 | MRI->setRegClass(Reg: NewVReg, RC: SRC); |
| 686 | RC = SRC; |
| 687 | } |
| 688 | } |
| 689 | } |
| 690 | AddOperand(MIB, Op, IIOpNum: i+1, II: &II, VRBaseMap, /*IsDebug=*/false, |
| 691 | IsClone, IsCloned); |
| 692 | } |
| 693 | |
| 694 | MBB->insert(I: InsertPos, MI: MIB); |
| 695 | SDValue Op(Node, 0); |
| 696 | bool isNew = VRBaseMap.insert(KV: std::make_pair(x&: Op, y&: NewVReg)).second; |
| 697 | (void)isNew; // Silence compiler warning. |
| 698 | assert(isNew && "Node emitted out of order - early" ); |
| 699 | } |
| 700 | |
| 701 | /// EmitDbgValue - Generate machine instruction for a dbg_value node. |
| 702 | /// |
| 703 | MachineInstr * |
| 704 | InstrEmitter::EmitDbgValue(SDDbgValue *SD, |
| 705 | VRBaseMapType &VRBaseMap) { |
| 706 | DebugLoc DL = SD->getDebugLoc(); |
| 707 | assert(cast<DILocalVariable>(SD->getVariable()) |
| 708 | ->isValidLocationForIntrinsic(DL) && |
| 709 | "Expected inlined-at fields to agree" ); |
| 710 | |
| 711 | SD->setIsEmitted(); |
| 712 | |
| 713 | assert(!SD->getLocationOps().empty() && |
| 714 | "dbg_value with no location operands?" ); |
| 715 | |
| 716 | if (SD->isInvalidated()) |
| 717 | return EmitDbgNoLocation(SD); |
| 718 | |
| 719 | // Attempt to produce a DBG_INSTR_REF if we've been asked to. |
| 720 | if (EmitDebugInstrRefs) |
| 721 | if (auto *InstrRef = EmitDbgInstrRef(SD, VRBaseMap)) |
| 722 | return InstrRef; |
| 723 | |
| 724 | // Emit variadic dbg_value nodes as DBG_VALUE_LIST if they have not been |
| 725 | // emitted as instruction references. |
| 726 | if (SD->isVariadic()) |
| 727 | return EmitDbgValueList(SD, VRBaseMap); |
| 728 | |
| 729 | // Emit single-location dbg_value nodes as DBG_VALUE if they have not been |
| 730 | // emitted as instruction references. |
| 731 | return EmitDbgValueFromSingleOp(SD, VRBaseMap); |
| 732 | } |
| 733 | |
| 734 | MachineOperand GetMOForConstDbgOp(const SDDbgOperand &Op) { |
| 735 | const Value *V = Op.getConst(); |
| 736 | if (const ConstantInt *CI = dyn_cast<ConstantInt>(Val: V)) { |
| 737 | if (CI->getBitWidth() > 64) |
| 738 | return MachineOperand::CreateCImm(CI); |
| 739 | if (CI->getBitWidth() == 1) |
| 740 | return MachineOperand::CreateImm(Val: CI->getZExtValue()); |
| 741 | return MachineOperand::CreateImm(Val: CI->getSExtValue()); |
| 742 | } |
| 743 | if (const ConstantFP *CF = dyn_cast<ConstantFP>(Val: V)) |
| 744 | return MachineOperand::CreateFPImm(CFP: CF); |
| 745 | // Note: This assumes that all nullptr constants are zero-valued. |
| 746 | if (isa<ConstantPointerNull>(Val: V)) |
| 747 | return MachineOperand::CreateImm(Val: 0); |
| 748 | // Undef or unhandled value type, so return an undef operand. |
| 749 | return MachineOperand::CreateReg( |
| 750 | /* Reg */ 0U, /* isDef */ false, /* isImp */ false, |
| 751 | /* isKill */ false, /* isDead */ false, |
| 752 | /* isUndef */ false, /* isEarlyClobber */ false, |
| 753 | /* SubReg */ 0, /* isDebug */ true); |
| 754 | } |
| 755 | |
| 756 | void InstrEmitter::AddDbgValueLocationOps( |
| 757 | MachineInstrBuilder &MIB, const MCInstrDesc &DbgValDesc, |
| 758 | ArrayRef<SDDbgOperand> LocationOps, |
| 759 | VRBaseMapType &VRBaseMap) { |
| 760 | for (const SDDbgOperand &Op : LocationOps) { |
| 761 | switch (Op.getKind()) { |
| 762 | case SDDbgOperand::FRAMEIX: |
| 763 | MIB.addFrameIndex(Idx: Op.getFrameIx()); |
| 764 | break; |
| 765 | case SDDbgOperand::VREG: |
| 766 | MIB.addReg(RegNo: Op.getVReg()); |
| 767 | break; |
| 768 | case SDDbgOperand::SDNODE: { |
| 769 | SDValue V = SDValue(Op.getSDNode(), Op.getResNo()); |
| 770 | // It's possible we replaced this SDNode with other(s) and therefore |
| 771 | // didn't generate code for it. It's better to catch these cases where |
| 772 | // they happen and transfer the debug info, but trying to guarantee that |
| 773 | // in all cases would be very fragile; this is a safeguard for any |
| 774 | // that were missed. |
| 775 | if (VRBaseMap.count(Val: V) == 0) |
| 776 | MIB.addReg(RegNo: 0U); // undef |
| 777 | else |
| 778 | AddOperand(MIB, Op: V, IIOpNum: (*MIB).getNumOperands(), II: &DbgValDesc, VRBaseMap, |
| 779 | /*IsDebug=*/true, /*IsClone=*/false, /*IsCloned=*/false); |
| 780 | } break; |
| 781 | case SDDbgOperand::CONST: |
| 782 | MIB.add(MO: GetMOForConstDbgOp(Op)); |
| 783 | break; |
| 784 | } |
| 785 | } |
| 786 | } |
| 787 | |
| 788 | MachineInstr * |
| 789 | InstrEmitter::EmitDbgInstrRef(SDDbgValue *SD, |
| 790 | VRBaseMapType &VRBaseMap) { |
| 791 | MDNode *Var = SD->getVariable(); |
| 792 | const DIExpression *Expr = SD->getExpression(); |
| 793 | DebugLoc DL = SD->getDebugLoc(); |
| 794 | const MCInstrDesc &RefII = TII->get(Opcode: TargetOpcode::DBG_INSTR_REF); |
| 795 | |
| 796 | // Returns true if the given operand is not a legal debug operand for a |
| 797 | // DBG_INSTR_REF. |
| 798 | auto IsInvalidOp = [](SDDbgOperand DbgOp) { |
| 799 | return DbgOp.getKind() == SDDbgOperand::FRAMEIX; |
| 800 | }; |
| 801 | // Returns true if the given operand is not itself an instruction reference |
| 802 | // but is a legal debug operand for a DBG_INSTR_REF. |
| 803 | auto IsNonInstrRefOp = [](SDDbgOperand DbgOp) { |
| 804 | return DbgOp.getKind() == SDDbgOperand::CONST; |
| 805 | }; |
| 806 | |
| 807 | // If this variable location does not depend on any instructions or contains |
| 808 | // any stack locations, produce it as a standard debug value instead. |
| 809 | if (any_of(Range: SD->getLocationOps(), P: IsInvalidOp) || |
| 810 | all_of(Range: SD->getLocationOps(), P: IsNonInstrRefOp)) { |
| 811 | if (SD->isVariadic()) |
| 812 | return EmitDbgValueList(SD, VRBaseMap); |
| 813 | return EmitDbgValueFromSingleOp(SD, VRBaseMap); |
| 814 | } |
| 815 | |
| 816 | // Immediately fold any indirectness from the LLVM-IR intrinsic into the |
| 817 | // expression: |
| 818 | if (SD->isIndirect()) |
| 819 | Expr = DIExpression::append(Expr, Ops: dwarf::DW_OP_deref); |
| 820 | // If this is not already a variadic expression, it must be modified to become |
| 821 | // one. |
| 822 | if (!SD->isVariadic()) |
| 823 | Expr = DIExpression::convertToVariadicExpression(Expr); |
| 824 | |
| 825 | SmallVector<MachineOperand> MOs; |
| 826 | |
| 827 | // It may not be immediately possible to identify the MachineInstr that |
| 828 | // defines a VReg, it can depend for example on the order blocks are |
| 829 | // emitted in. When this happens, or when further analysis is needed later, |
| 830 | // produce an instruction like this: |
| 831 | // |
| 832 | // DBG_INSTR_REF !123, !456, %0:gr64 |
| 833 | // |
| 834 | // i.e., point the instruction at the vreg, and patch it up later in |
| 835 | // MachineFunction::finalizeDebugInstrRefs. |
| 836 | auto AddVRegOp = [&](Register VReg) { |
| 837 | MOs.push_back(Elt: MachineOperand::CreateReg( |
| 838 | /* Reg */ VReg, /* isDef */ false, /* isImp */ false, |
| 839 | /* isKill */ false, /* isDead */ false, |
| 840 | /* isUndef */ false, /* isEarlyClobber */ false, |
| 841 | /* SubReg */ 0, /* isDebug */ true)); |
| 842 | }; |
| 843 | unsigned OpCount = SD->getLocationOps().size(); |
| 844 | for (unsigned OpIdx = 0; OpIdx < OpCount; ++OpIdx) { |
| 845 | SDDbgOperand DbgOperand = SD->getLocationOps()[OpIdx]; |
| 846 | |
| 847 | // Try to find both the defined register and the instruction defining it. |
| 848 | MachineInstr *DefMI = nullptr; |
| 849 | Register VReg; |
| 850 | |
| 851 | if (DbgOperand.getKind() == SDDbgOperand::VREG) { |
| 852 | VReg = DbgOperand.getVReg(); |
| 853 | |
| 854 | // No definition means that block hasn't been emitted yet. Leave a vreg |
| 855 | // reference to be fixed later. |
| 856 | if (!MRI->hasOneDef(RegNo: VReg)) { |
| 857 | AddVRegOp(VReg); |
| 858 | continue; |
| 859 | } |
| 860 | |
| 861 | DefMI = &*MRI->def_instr_begin(RegNo: VReg); |
| 862 | } else if (DbgOperand.getKind() == SDDbgOperand::SDNODE) { |
| 863 | // Look up the corresponding VReg for the given SDNode, if any. |
| 864 | SDNode *Node = DbgOperand.getSDNode(); |
| 865 | SDValue Op = SDValue(Node, DbgOperand.getResNo()); |
| 866 | VRBaseMapType::iterator I = VRBaseMap.find(Val: Op); |
| 867 | // No VReg -> produce a DBG_VALUE $noreg instead. |
| 868 | if (I == VRBaseMap.end()) |
| 869 | break; |
| 870 | |
| 871 | // Try to pick out a defining instruction at this point. |
| 872 | VReg = getVR(Op, VRBaseMap); |
| 873 | |
| 874 | // Again, if there's no instruction defining the VReg right now, fix it up |
| 875 | // later. |
| 876 | if (!MRI->hasOneDef(RegNo: VReg)) { |
| 877 | AddVRegOp(VReg); |
| 878 | continue; |
| 879 | } |
| 880 | |
| 881 | DefMI = &*MRI->def_instr_begin(RegNo: VReg); |
| 882 | } else { |
| 883 | assert(DbgOperand.getKind() == SDDbgOperand::CONST); |
| 884 | MOs.push_back(Elt: GetMOForConstDbgOp(Op: DbgOperand)); |
| 885 | continue; |
| 886 | } |
| 887 | |
| 888 | // Avoid copy like instructions: they don't define values, only move them. |
| 889 | // Leave a virtual-register reference until it can be fixed up later, to |
| 890 | // find the underlying value definition. |
| 891 | if (DefMI->isCopyLike() || TII->isCopyInstr(MI: *DefMI)) { |
| 892 | AddVRegOp(VReg); |
| 893 | continue; |
| 894 | } |
| 895 | |
| 896 | // Find the operand number which defines the specified VReg. |
| 897 | unsigned OperandIdx = 0; |
| 898 | for (const auto &MO : DefMI->operands()) { |
| 899 | if (MO.isReg() && MO.isDef() && MO.getReg() == VReg) |
| 900 | break; |
| 901 | ++OperandIdx; |
| 902 | } |
| 903 | assert(OperandIdx < DefMI->getNumOperands()); |
| 904 | |
| 905 | // Make the DBG_INSTR_REF refer to that instruction, and that operand. |
| 906 | unsigned InstrNum = DefMI->getDebugInstrNum(); |
| 907 | MOs.push_back(Elt: MachineOperand::CreateDbgInstrRef(InstrIdx: InstrNum, OpIdx: OperandIdx)); |
| 908 | } |
| 909 | |
| 910 | // If we haven't created a valid MachineOperand for every DbgOp, abort and |
| 911 | // produce an undef DBG_VALUE. |
| 912 | if (MOs.size() != OpCount) |
| 913 | return EmitDbgNoLocation(SD); |
| 914 | |
| 915 | return BuildMI(MF&: *MF, DL, MCID: RefII, IsIndirect: false, MOs, Variable: Var, Expr); |
| 916 | } |
| 917 | |
| 918 | MachineInstr *InstrEmitter::EmitDbgNoLocation(SDDbgValue *SD) { |
| 919 | // An invalidated SDNode must generate an undef DBG_VALUE: although the |
| 920 | // original value is no longer computed, earlier DBG_VALUEs live ranges |
| 921 | // must not leak into later code. |
| 922 | DIVariable *Var = SD->getVariable(); |
| 923 | const DIExpression *Expr = |
| 924 | DIExpression::convertToUndefExpression(Expr: SD->getExpression()); |
| 925 | DebugLoc DL = SD->getDebugLoc(); |
| 926 | const MCInstrDesc &Desc = TII->get(Opcode: TargetOpcode::DBG_VALUE); |
| 927 | return BuildMI(MF&: *MF, DL, MCID: Desc, IsIndirect: false, Reg: 0U, Variable: Var, Expr); |
| 928 | } |
| 929 | |
| 930 | MachineInstr * |
| 931 | InstrEmitter::EmitDbgValueList(SDDbgValue *SD, |
| 932 | VRBaseMapType &VRBaseMap) { |
| 933 | MDNode *Var = SD->getVariable(); |
| 934 | DIExpression *Expr = SD->getExpression(); |
| 935 | DebugLoc DL = SD->getDebugLoc(); |
| 936 | // DBG_VALUE_LIST := "DBG_VALUE_LIST" var, expression, loc (, loc)* |
| 937 | const MCInstrDesc &DbgValDesc = TII->get(Opcode: TargetOpcode::DBG_VALUE_LIST); |
| 938 | // Build the DBG_VALUE_LIST instruction base. |
| 939 | auto MIB = BuildMI(MF&: *MF, MIMD: DL, MCID: DbgValDesc); |
| 940 | MIB.addMetadata(MD: Var); |
| 941 | MIB.addMetadata(MD: Expr); |
| 942 | AddDbgValueLocationOps(MIB, DbgValDesc, LocationOps: SD->getLocationOps(), VRBaseMap); |
| 943 | return &*MIB; |
| 944 | } |
| 945 | |
| 946 | MachineInstr * |
| 947 | InstrEmitter::EmitDbgValueFromSingleOp(SDDbgValue *SD, |
| 948 | VRBaseMapType &VRBaseMap) { |
| 949 | MDNode *Var = SD->getVariable(); |
| 950 | DIExpression *Expr = SD->getExpression(); |
| 951 | DebugLoc DL = SD->getDebugLoc(); |
| 952 | const MCInstrDesc &II = TII->get(Opcode: TargetOpcode::DBG_VALUE); |
| 953 | |
| 954 | assert(SD->getLocationOps().size() == 1 && |
| 955 | "Non variadic dbg_value should have only one location op" ); |
| 956 | |
| 957 | // See about constant-folding the expression. |
| 958 | // Copy the location operand in case we replace it. |
| 959 | SmallVector<SDDbgOperand, 1> LocationOps(1, SD->getLocationOps()[0]); |
| 960 | if (Expr && LocationOps[0].getKind() == SDDbgOperand::CONST) { |
| 961 | const Value *V = LocationOps[0].getConst(); |
| 962 | if (auto *C = dyn_cast<ConstantInt>(Val: V)) { |
| 963 | std::tie(args&: Expr, args&: C) = Expr->constantFold(CI: C); |
| 964 | LocationOps[0] = SDDbgOperand::fromConst(Const: C); |
| 965 | } |
| 966 | } |
| 967 | |
| 968 | // Emit non-variadic dbg_value nodes as DBG_VALUE. |
| 969 | // DBG_VALUE := "DBG_VALUE" loc, isIndirect, var, expr |
| 970 | auto MIB = BuildMI(MF&: *MF, MIMD: DL, MCID: II); |
| 971 | AddDbgValueLocationOps(MIB, DbgValDesc: II, LocationOps, VRBaseMap); |
| 972 | |
| 973 | if (SD->isIndirect()) |
| 974 | MIB.addImm(Val: 0U); |
| 975 | else |
| 976 | MIB.addReg(RegNo: 0U); |
| 977 | |
| 978 | return MIB.addMetadata(MD: Var).addMetadata(MD: Expr); |
| 979 | } |
| 980 | |
| 981 | MachineInstr * |
| 982 | InstrEmitter::EmitDbgLabel(SDDbgLabel *SD) { |
| 983 | MDNode *Label = SD->getLabel(); |
| 984 | DebugLoc DL = SD->getDebugLoc(); |
| 985 | assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) && |
| 986 | "Expected inlined-at fields to agree" ); |
| 987 | |
| 988 | const MCInstrDesc &II = TII->get(Opcode: TargetOpcode::DBG_LABEL); |
| 989 | MachineInstrBuilder MIB = BuildMI(MF&: *MF, MIMD: DL, MCID: II); |
| 990 | MIB.addMetadata(MD: Label); |
| 991 | |
| 992 | return &*MIB; |
| 993 | } |
| 994 | |
| 995 | /// EmitMachineNode - Generate machine code for a target-specific node and |
| 996 | /// needed dependencies. |
| 997 | /// |
| 998 | void InstrEmitter:: |
| 999 | EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned, |
| 1000 | VRBaseMapType &VRBaseMap) { |
| 1001 | unsigned Opc = Node->getMachineOpcode(); |
| 1002 | |
| 1003 | // Handle subreg insert/extract specially |
| 1004 | if (Opc == TargetOpcode::EXTRACT_SUBREG || |
| 1005 | Opc == TargetOpcode::INSERT_SUBREG || |
| 1006 | Opc == TargetOpcode::SUBREG_TO_REG) { |
| 1007 | EmitSubregNode(Node, VRBaseMap, IsClone, IsCloned); |
| 1008 | return; |
| 1009 | } |
| 1010 | |
| 1011 | // Handle COPY_TO_REGCLASS specially. |
| 1012 | if (Opc == TargetOpcode::COPY_TO_REGCLASS) { |
| 1013 | EmitCopyToRegClassNode(Node, VRBaseMap); |
| 1014 | return; |
| 1015 | } |
| 1016 | |
| 1017 | // Handle REG_SEQUENCE specially. |
| 1018 | if (Opc == TargetOpcode::REG_SEQUENCE) { |
| 1019 | EmitRegSequence(Node, VRBaseMap, IsClone, IsCloned); |
| 1020 | return; |
| 1021 | } |
| 1022 | |
| 1023 | if (Opc == TargetOpcode::IMPLICIT_DEF) |
| 1024 | // We want a unique VR for each IMPLICIT_DEF use. |
| 1025 | return; |
| 1026 | |
| 1027 | const MCInstrDesc &II = TII->get(Opcode: Opc); |
| 1028 | unsigned NumResults = CountResults(Node); |
| 1029 | unsigned NumDefs = II.getNumDefs(); |
| 1030 | const MCPhysReg *ScratchRegs = nullptr; |
| 1031 | |
| 1032 | // Handle STACKMAP and PATCHPOINT specially and then use the generic code. |
| 1033 | if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) { |
| 1034 | // Stackmaps do not have arguments and do not preserve their calling |
| 1035 | // convention. However, to simplify runtime support, they clobber the same |
| 1036 | // scratch registers as AnyRegCC. |
| 1037 | unsigned CC = CallingConv::AnyReg; |
| 1038 | if (Opc == TargetOpcode::PATCHPOINT) { |
| 1039 | CC = Node->getConstantOperandVal(Num: PatchPointOpers::CCPos); |
| 1040 | NumDefs = NumResults; |
| 1041 | } |
| 1042 | ScratchRegs = TLI->getScratchRegisters(CC: (CallingConv::ID) CC); |
| 1043 | } else if (Opc == TargetOpcode::STATEPOINT) { |
| 1044 | NumDefs = NumResults; |
| 1045 | } |
| 1046 | |
| 1047 | unsigned NumImpUses = 0; |
| 1048 | unsigned NodeOperands = |
| 1049 | countOperands(Node, NumExpUses: II.getNumOperands() - NumDefs, NumImpUses); |
| 1050 | bool HasVRegVariadicDefs = !MF->getTarget().usesPhysRegsForValues() && |
| 1051 | II.isVariadic() && II.variadicOpsAreDefs(); |
| 1052 | bool HasPhysRegOuts = NumResults > NumDefs && !II.implicit_defs().empty() && |
| 1053 | !HasVRegVariadicDefs; |
| 1054 | #ifndef NDEBUG |
| 1055 | unsigned NumMIOperands = NodeOperands + NumResults; |
| 1056 | if (II.isVariadic()) |
| 1057 | assert(NumMIOperands >= II.getNumOperands() && |
| 1058 | "Too few operands for a variadic node!" ); |
| 1059 | else |
| 1060 | assert(NumMIOperands >= II.getNumOperands() && |
| 1061 | NumMIOperands <= |
| 1062 | II.getNumOperands() + II.implicit_defs().size() + NumImpUses && |
| 1063 | "#operands for dag node doesn't match .td file!" ); |
| 1064 | #endif |
| 1065 | |
| 1066 | // Create the new machine instruction. |
| 1067 | MachineInstrBuilder MIB = BuildMI(MF&: *MF, MIMD: Node->getDebugLoc(), MCID: II); |
| 1068 | |
| 1069 | // Transfer IR flags from the SDNode to the MachineInstr |
| 1070 | MachineInstr *MI = MIB.getInstr(); |
| 1071 | const SDNodeFlags Flags = Node->getFlags(); |
| 1072 | if (Flags.hasUnpredictable()) |
| 1073 | MI->setFlag(MachineInstr::MIFlag::Unpredictable); |
| 1074 | |
| 1075 | // Add result register values for things that are defined by this |
| 1076 | // instruction. |
| 1077 | if (NumResults) { |
| 1078 | CreateVirtualRegisters(Node, MIB, II, IsClone, IsCloned, VRBaseMap); |
| 1079 | |
| 1080 | if (Flags.hasNoSignedZeros()) |
| 1081 | MI->setFlag(MachineInstr::MIFlag::FmNsz); |
| 1082 | |
| 1083 | if (Flags.hasAllowReciprocal()) |
| 1084 | MI->setFlag(MachineInstr::MIFlag::FmArcp); |
| 1085 | |
| 1086 | if (Flags.hasNoNaNs()) |
| 1087 | MI->setFlag(MachineInstr::MIFlag::FmNoNans); |
| 1088 | |
| 1089 | if (Flags.hasNoInfs()) |
| 1090 | MI->setFlag(MachineInstr::MIFlag::FmNoInfs); |
| 1091 | |
| 1092 | if (Flags.hasAllowContract()) |
| 1093 | MI->setFlag(MachineInstr::MIFlag::FmContract); |
| 1094 | |
| 1095 | if (Flags.hasApproximateFuncs()) |
| 1096 | MI->setFlag(MachineInstr::MIFlag::FmAfn); |
| 1097 | |
| 1098 | if (Flags.hasAllowReassociation()) |
| 1099 | MI->setFlag(MachineInstr::MIFlag::FmReassoc); |
| 1100 | |
| 1101 | if (Flags.hasNoUnsignedWrap()) |
| 1102 | MI->setFlag(MachineInstr::MIFlag::NoUWrap); |
| 1103 | |
| 1104 | if (Flags.hasNoSignedWrap()) |
| 1105 | MI->setFlag(MachineInstr::MIFlag::NoSWrap); |
| 1106 | |
| 1107 | if (Flags.hasExact()) |
| 1108 | MI->setFlag(MachineInstr::MIFlag::IsExact); |
| 1109 | |
| 1110 | if (Flags.hasNoFPExcept()) |
| 1111 | MI->setFlag(MachineInstr::MIFlag::NoFPExcept); |
| 1112 | |
| 1113 | if (Flags.hasDisjoint()) |
| 1114 | MI->setFlag(MachineInstr::MIFlag::Disjoint); |
| 1115 | |
| 1116 | if (Flags.hasSameSign()) |
| 1117 | MI->setFlag(MachineInstr::MIFlag::SameSign); |
| 1118 | } |
| 1119 | |
| 1120 | // Emit all of the actual operands of this instruction, adding them to the |
| 1121 | // instruction as appropriate. |
| 1122 | bool HasOptPRefs = NumDefs > NumResults; |
| 1123 | assert((!HasOptPRefs || !HasPhysRegOuts) && |
| 1124 | "Unable to cope with optional defs and phys regs defs!" ); |
| 1125 | unsigned NumSkip = HasOptPRefs ? NumDefs - NumResults : 0; |
| 1126 | for (unsigned i = NumSkip; i != NodeOperands; ++i) |
| 1127 | AddOperand(MIB, Op: Node->getOperand(Num: i), IIOpNum: i-NumSkip+NumDefs, II: &II, |
| 1128 | VRBaseMap, /*IsDebug=*/false, IsClone, IsCloned); |
| 1129 | |
| 1130 | // Add scratch registers as implicit def and early clobber |
| 1131 | if (ScratchRegs) |
| 1132 | for (unsigned i = 0; ScratchRegs[i]; ++i) |
| 1133 | MIB.addReg(RegNo: ScratchRegs[i], Flags: RegState::ImplicitDefine | |
| 1134 | RegState::EarlyClobber); |
| 1135 | |
| 1136 | // Set the memory reference descriptions of this instruction now that it is |
| 1137 | // part of the function. |
| 1138 | MIB.setMemRefs(cast<MachineSDNode>(Val: Node)->memoperands()); |
| 1139 | |
| 1140 | // Set the CFI type. |
| 1141 | MIB->setCFIType(MF&: *MF, Type: Node->getCFIType()); |
| 1142 | |
| 1143 | // Insert the instruction into position in the block. This needs to |
| 1144 | // happen before any custom inserter hook is called so that the |
| 1145 | // hook knows where in the block to insert the replacement code. |
| 1146 | MBB->insert(I: InsertPos, MI: MIB); |
| 1147 | |
| 1148 | // The MachineInstr may also define physregs instead of virtregs. These |
| 1149 | // physreg values can reach other instructions in different ways: |
| 1150 | // |
| 1151 | // 1. When there is a use of a Node value beyond the explicitly defined |
| 1152 | // virtual registers, we emit a CopyFromReg for one of the implicitly |
| 1153 | // defined physregs. This only happens when HasPhysRegOuts is true. |
| 1154 | // |
| 1155 | // 2. A CopyFromReg reading a physreg may be glued to this instruction. |
| 1156 | // |
| 1157 | // 3. A glued instruction may implicitly use a physreg. |
| 1158 | // |
| 1159 | // 4. A glued instruction may use a RegisterSDNode operand. |
| 1160 | // |
| 1161 | // Collect all the used physreg defs, and make sure that any unused physreg |
| 1162 | // defs are marked as dead. |
| 1163 | SmallVector<Register, 8> UsedRegs; |
| 1164 | |
| 1165 | // Additional results must be physical register defs. |
| 1166 | if (HasPhysRegOuts) { |
| 1167 | for (unsigned i = NumDefs; i < NumResults; ++i) { |
| 1168 | Register Reg = II.implicit_defs()[i - NumDefs]; |
| 1169 | if (!Node->hasAnyUseOfValue(Value: i)) |
| 1170 | continue; |
| 1171 | // This implicitly defined physreg has a use. |
| 1172 | UsedRegs.push_back(Elt: Reg); |
| 1173 | EmitCopyFromReg(Op: SDValue(Node, i), IsClone, SrcReg: Reg, VRBaseMap); |
| 1174 | } |
| 1175 | } |
| 1176 | |
| 1177 | // Scan the glue chain for any used physregs. |
| 1178 | if (Node->getValueType(ResNo: Node->getNumValues()-1) == MVT::Glue) { |
| 1179 | for (SDNode *F = Node->getGluedUser(); F; F = F->getGluedUser()) { |
| 1180 | if (F->getOpcode() == ISD::CopyFromReg) { |
| 1181 | Register Reg = cast<RegisterSDNode>(Val: F->getOperand(Num: 1))->getReg(); |
| 1182 | if (Reg.isPhysical()) |
| 1183 | UsedRegs.push_back(Elt: Reg); |
| 1184 | continue; |
| 1185 | } else if (F->getOpcode() == ISD::CopyToReg) { |
| 1186 | // Skip CopyToReg nodes that are internal to the glue chain. |
| 1187 | continue; |
| 1188 | } |
| 1189 | // Collect declared implicit uses. |
| 1190 | const MCInstrDesc &MCID = TII->get(Opcode: F->getMachineOpcode()); |
| 1191 | append_range(C&: UsedRegs, R: MCID.implicit_uses()); |
| 1192 | // In addition to declared implicit uses, we must also check for |
| 1193 | // direct RegisterSDNode operands. |
| 1194 | for (const SDValue &Op : F->op_values()) |
| 1195 | if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(Val: Op)) { |
| 1196 | Register Reg = R->getReg(); |
| 1197 | if (Reg.isPhysical()) |
| 1198 | UsedRegs.push_back(Elt: Reg); |
| 1199 | } |
| 1200 | } |
| 1201 | } |
| 1202 | |
| 1203 | // Add rounding control registers as implicit def for function call. |
| 1204 | if (II.isCall() && MF->getFunction().hasFnAttribute(Kind: Attribute::StrictFP)) { |
| 1205 | ArrayRef<MCPhysReg> RCRegs = TLI->getRoundingControlRegisters(); |
| 1206 | llvm::append_range(C&: UsedRegs, R&: RCRegs); |
| 1207 | } |
| 1208 | |
| 1209 | // Finally mark unused registers as dead. |
| 1210 | if (!UsedRegs.empty() || !II.implicit_defs().empty() || II.hasOptionalDef()) |
| 1211 | MIB->setPhysRegsDeadExcept(UsedRegs, TRI: *TRI); |
| 1212 | |
| 1213 | // STATEPOINT is too 'dynamic' to have meaningful machine description. |
| 1214 | // We have to manually tie operands. |
| 1215 | if (Opc == TargetOpcode::STATEPOINT && NumDefs > 0) { |
| 1216 | assert(!HasPhysRegOuts && "STATEPOINT mishandled" ); |
| 1217 | MachineInstr *MI = MIB; |
| 1218 | unsigned Def = 0; |
| 1219 | int First = StatepointOpers(MI).getFirstGCPtrIdx(); |
| 1220 | assert(First > 0 && "Statepoint has Defs but no GC ptr list" ); |
| 1221 | unsigned Use = (unsigned)First; |
| 1222 | while (Def < NumDefs) { |
| 1223 | if (MI->getOperand(i: Use).isReg()) |
| 1224 | MI->tieOperands(DefIdx: Def++, UseIdx: Use); |
| 1225 | Use = StackMaps::getNextMetaArgIdx(MI, CurIdx: Use); |
| 1226 | } |
| 1227 | } |
| 1228 | |
| 1229 | unsigned Op = Node->getNumOperands(); |
| 1230 | if (Op != 0 && Node->getOperand(Num: Op - 1)->getOpcode() == |
| 1231 | ~(unsigned)TargetOpcode::CONVERGENCECTRL_GLUE) { |
| 1232 | Register VReg = getVR(Op: Node->getOperand(Num: Op - 1)->getOperand(Num: 0), VRBaseMap); |
| 1233 | MachineOperand MO = MachineOperand::CreateReg(Reg: VReg, /*isDef=*/false, |
| 1234 | /*isImp=*/true); |
| 1235 | MIB->addOperand(Op: MO); |
| 1236 | Op--; |
| 1237 | } |
| 1238 | |
| 1239 | if (Op != 0 && |
| 1240 | Node->getOperand(Num: Op - 1)->getOpcode() == ISD::DEACTIVATION_SYMBOL) { |
| 1241 | MI->setDeactivationSymbol( |
| 1242 | MF&: *MF, DS: const_cast<GlobalValue *>( |
| 1243 | cast<DeactivationSymbolSDNode>(Val: Node->getOperand(Num: Op - 1)) |
| 1244 | ->getGlobal())); |
| 1245 | Op--; |
| 1246 | } |
| 1247 | |
| 1248 | // Run post-isel target hook to adjust this instruction if needed. |
| 1249 | if (II.hasPostISelHook()) |
| 1250 | TLI->AdjustInstrPostInstrSelection(MI&: *MIB, Node); |
| 1251 | } |
| 1252 | |
| 1253 | /// EmitSpecialNode - Generate machine code for a target-independent node and |
| 1254 | /// needed dependencies. |
| 1255 | void InstrEmitter:: |
| 1256 | EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned, |
| 1257 | VRBaseMapType &VRBaseMap) { |
| 1258 | switch (Node->getOpcode()) { |
| 1259 | default: |
| 1260 | #ifndef NDEBUG |
| 1261 | Node->dump(); |
| 1262 | #endif |
| 1263 | llvm_unreachable("This target-independent node should have been selected!" ); |
| 1264 | case ISD::EntryToken: |
| 1265 | case ISD::MERGE_VALUES: |
| 1266 | case ISD::TokenFactor: |
| 1267 | case ISD::DEACTIVATION_SYMBOL: |
| 1268 | break; |
| 1269 | case ISD::CopyToReg: { |
| 1270 | Register DestReg = cast<RegisterSDNode>(Val: Node->getOperand(Num: 1))->getReg(); |
| 1271 | SDValue SrcVal = Node->getOperand(Num: 2); |
| 1272 | if (DestReg.isVirtual() && SrcVal.isMachineOpcode() && |
| 1273 | SrcVal.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF) { |
| 1274 | // Instead building a COPY to that vreg destination, build an |
| 1275 | // IMPLICIT_DEF instruction instead. |
| 1276 | BuildMI(BB&: *MBB, I: InsertPos, MIMD: Node->getDebugLoc(), |
| 1277 | MCID: TII->get(Opcode: TargetOpcode::IMPLICIT_DEF), DestReg); |
| 1278 | break; |
| 1279 | } |
| 1280 | Register SrcReg; |
| 1281 | if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(Val&: SrcVal)) |
| 1282 | SrcReg = R->getReg(); |
| 1283 | else |
| 1284 | SrcReg = getVR(Op: SrcVal, VRBaseMap); |
| 1285 | |
| 1286 | if (SrcReg == DestReg) // Coalesced away the copy? Ignore. |
| 1287 | break; |
| 1288 | |
| 1289 | BuildMI(BB&: *MBB, I: InsertPos, MIMD: Node->getDebugLoc(), MCID: TII->get(Opcode: TargetOpcode::COPY), |
| 1290 | DestReg).addReg(RegNo: SrcReg); |
| 1291 | break; |
| 1292 | } |
| 1293 | case ISD::CopyFromReg: { |
| 1294 | Register SrcReg = cast<RegisterSDNode>(Val: Node->getOperand(Num: 1))->getReg(); |
| 1295 | EmitCopyFromReg(Op: SDValue(Node, 0), IsClone, SrcReg, VRBaseMap); |
| 1296 | break; |
| 1297 | } |
| 1298 | case ISD::EH_LABEL: |
| 1299 | case ISD::ANNOTATION_LABEL: { |
| 1300 | unsigned Opc = (Node->getOpcode() == ISD::EH_LABEL) |
| 1301 | ? TargetOpcode::EH_LABEL |
| 1302 | : TargetOpcode::ANNOTATION_LABEL; |
| 1303 | MCSymbol *S = cast<LabelSDNode>(Val: Node)->getLabel(); |
| 1304 | BuildMI(BB&: *MBB, I: InsertPos, MIMD: Node->getDebugLoc(), |
| 1305 | MCID: TII->get(Opcode: Opc)).addSym(Sym: S); |
| 1306 | break; |
| 1307 | } |
| 1308 | |
| 1309 | case ISD::LIFETIME_START: |
| 1310 | case ISD::LIFETIME_END: { |
| 1311 | unsigned TarOp = (Node->getOpcode() == ISD::LIFETIME_START) |
| 1312 | ? TargetOpcode::LIFETIME_START |
| 1313 | : TargetOpcode::LIFETIME_END; |
| 1314 | auto *FI = cast<FrameIndexSDNode>(Val: Node->getOperand(Num: 1)); |
| 1315 | BuildMI(BB&: *MBB, I: InsertPos, MIMD: Node->getDebugLoc(), MCID: TII->get(Opcode: TarOp)) |
| 1316 | .addFrameIndex(Idx: FI->getIndex()); |
| 1317 | break; |
| 1318 | } |
| 1319 | |
| 1320 | case ISD::PSEUDO_PROBE: { |
| 1321 | unsigned TarOp = TargetOpcode::PSEUDO_PROBE; |
| 1322 | auto Guid = cast<PseudoProbeSDNode>(Val: Node)->getGuid(); |
| 1323 | auto Index = cast<PseudoProbeSDNode>(Val: Node)->getIndex(); |
| 1324 | auto Attr = cast<PseudoProbeSDNode>(Val: Node)->getAttributes(); |
| 1325 | |
| 1326 | BuildMI(BB&: *MBB, I: InsertPos, MIMD: Node->getDebugLoc(), MCID: TII->get(Opcode: TarOp)) |
| 1327 | .addImm(Val: Guid) |
| 1328 | .addImm(Val: Index) |
| 1329 | .addImm(Val: (uint8_t)PseudoProbeType::Block) |
| 1330 | .addImm(Val: Attr); |
| 1331 | break; |
| 1332 | } |
| 1333 | |
| 1334 | case ISD::INLINEASM: |
| 1335 | case ISD::INLINEASM_BR: { |
| 1336 | unsigned NumOps = Node->getNumOperands(); |
| 1337 | if (Node->getOperand(Num: NumOps-1).getValueType() == MVT::Glue) |
| 1338 | --NumOps; // Ignore the glue operand. |
| 1339 | |
| 1340 | // Create the inline asm machine instruction. |
| 1341 | unsigned TgtOpc = Node->getOpcode() == ISD::INLINEASM_BR |
| 1342 | ? TargetOpcode::INLINEASM_BR |
| 1343 | : TargetOpcode::INLINEASM; |
| 1344 | MachineInstrBuilder MIB = |
| 1345 | BuildMI(MF&: *MF, MIMD: Node->getDebugLoc(), MCID: TII->get(Opcode: TgtOpc)); |
| 1346 | |
| 1347 | // Add the asm string as an external symbol operand. |
| 1348 | SDValue AsmStrV = Node->getOperand(Num: InlineAsm::Op_AsmString); |
| 1349 | const char *AsmStr = cast<ExternalSymbolSDNode>(Val&: AsmStrV)->getSymbol(); |
| 1350 | MIB.addExternalSymbol(FnName: AsmStr); |
| 1351 | |
| 1352 | // Add the HasSideEffect, isAlignStack, AsmDialect, MayLoad and MayStore |
| 1353 | // bits. |
| 1354 | int64_t = |
| 1355 | cast<ConstantSDNode>(Val: Node->getOperand(Num: InlineAsm::Op_ExtraInfo))-> |
| 1356 | getZExtValue(); |
| 1357 | MIB.addImm(Val: ExtraInfo); |
| 1358 | |
| 1359 | // Remember to operand index of the group flags. |
| 1360 | SmallVector<unsigned, 8> GroupIdx; |
| 1361 | |
| 1362 | // Remember registers that are part of early-clobber defs. |
| 1363 | SmallVector<Register, 8> ECRegs; |
| 1364 | |
| 1365 | // Add all of the operand registers to the instruction. |
| 1366 | for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) { |
| 1367 | unsigned Flags = Node->getConstantOperandVal(Num: i); |
| 1368 | const InlineAsm::Flag F(Flags); |
| 1369 | const unsigned NumVals = F.getNumOperandRegisters(); |
| 1370 | |
| 1371 | GroupIdx.push_back(Elt: MIB->getNumOperands()); |
| 1372 | MIB.addImm(Val: Flags); |
| 1373 | ++i; // Skip the ID value. |
| 1374 | |
| 1375 | switch (F.getKind()) { |
| 1376 | case InlineAsm::Kind::RegDef: |
| 1377 | for (unsigned j = 0; j != NumVals; ++j, ++i) { |
| 1378 | Register Reg = cast<RegisterSDNode>(Val: Node->getOperand(Num: i))->getReg(); |
| 1379 | // FIXME: Add dead flags for physical and virtual registers defined. |
| 1380 | // For now, mark physical register defs as implicit to help fast |
| 1381 | // regalloc. This makes inline asm look a lot like calls. |
| 1382 | MIB.addReg(RegNo: Reg, Flags: RegState::Define | getImplRegState(B: Reg.isPhysical())); |
| 1383 | } |
| 1384 | break; |
| 1385 | case InlineAsm::Kind::RegDefEarlyClobber: |
| 1386 | case InlineAsm::Kind::Clobber: |
| 1387 | for (unsigned j = 0; j != NumVals; ++j, ++i) { |
| 1388 | Register Reg = cast<RegisterSDNode>(Val: Node->getOperand(Num: i))->getReg(); |
| 1389 | MIB.addReg(RegNo: Reg, Flags: RegState::Define | RegState::EarlyClobber | |
| 1390 | getImplRegState(B: Reg.isPhysical())); |
| 1391 | ECRegs.push_back(Elt: Reg); |
| 1392 | } |
| 1393 | break; |
| 1394 | case InlineAsm::Kind::RegUse: // Use of register. |
| 1395 | case InlineAsm::Kind::Imm: // Immediate. |
| 1396 | case InlineAsm::Kind::Mem: // Non-function addressing mode. |
| 1397 | // The addressing mode has been selected, just add all of the |
| 1398 | // operands to the machine instruction. |
| 1399 | for (unsigned j = 0; j != NumVals; ++j, ++i) |
| 1400 | AddOperand(MIB, Op: Node->getOperand(Num: i), IIOpNum: 0, II: nullptr, VRBaseMap, |
| 1401 | /*IsDebug=*/false, IsClone, IsCloned); |
| 1402 | |
| 1403 | // Manually set isTied bits. |
| 1404 | if (F.isRegUseKind()) { |
| 1405 | unsigned DefGroup; |
| 1406 | if (F.isUseOperandTiedToDef(Idx&: DefGroup)) { |
| 1407 | unsigned DefIdx = GroupIdx[DefGroup] + 1; |
| 1408 | unsigned UseIdx = GroupIdx.back() + 1; |
| 1409 | for (unsigned j = 0; j != NumVals; ++j) |
| 1410 | MIB->tieOperands(DefIdx: DefIdx + j, UseIdx: UseIdx + j); |
| 1411 | } |
| 1412 | } |
| 1413 | break; |
| 1414 | case InlineAsm::Kind::Func: // Function addressing mode. |
| 1415 | for (unsigned j = 0; j != NumVals; ++j, ++i) { |
| 1416 | SDValue Op = Node->getOperand(Num: i); |
| 1417 | AddOperand(MIB, Op, IIOpNum: 0, II: nullptr, VRBaseMap, |
| 1418 | /*IsDebug=*/false, IsClone, IsCloned); |
| 1419 | |
| 1420 | // Adjust Target Flags for function reference. |
| 1421 | if (auto *TGA = dyn_cast<GlobalAddressSDNode>(Val&: Op)) { |
| 1422 | unsigned NewFlags = |
| 1423 | MF->getSubtarget().classifyGlobalFunctionReference( |
| 1424 | GV: TGA->getGlobal()); |
| 1425 | unsigned LastIdx = MIB.getInstr()->getNumOperands() - 1; |
| 1426 | MIB.getInstr()->getOperand(i: LastIdx).setTargetFlags(NewFlags); |
| 1427 | } |
| 1428 | } |
| 1429 | } |
| 1430 | } |
| 1431 | |
| 1432 | // GCC inline assembly allows input operands to also be early-clobber |
| 1433 | // output operands (so long as the operand is written only after it's |
| 1434 | // used), but this does not match the semantics of our early-clobber flag. |
| 1435 | // If an early-clobber operand register is also an input operand register, |
| 1436 | // then remove the early-clobber flag. |
| 1437 | for (Register Reg : ECRegs) { |
| 1438 | if (MIB->readsRegister(Reg, TRI)) { |
| 1439 | MachineOperand *MO = |
| 1440 | MIB->findRegisterDefOperand(Reg, TRI, isDead: false, Overlap: false); |
| 1441 | assert(MO && "No def operand for clobbered register?" ); |
| 1442 | MO->setIsEarlyClobber(false); |
| 1443 | } |
| 1444 | } |
| 1445 | |
| 1446 | // Get the mdnode from the asm if it exists and add it to the instruction. |
| 1447 | SDValue MDV = Node->getOperand(Num: InlineAsm::Op_MDNode); |
| 1448 | const MDNode *MD = cast<MDNodeSDNode>(Val&: MDV)->getMD(); |
| 1449 | if (MD) |
| 1450 | MIB.addMetadata(MD); |
| 1451 | |
| 1452 | // Add rounding control registers as implicit def for inline asm. |
| 1453 | if (MF->getFunction().hasFnAttribute(Kind: Attribute::StrictFP)) { |
| 1454 | ArrayRef<MCPhysReg> RCRegs = TLI->getRoundingControlRegisters(); |
| 1455 | for (MCPhysReg Reg : RCRegs) |
| 1456 | MIB.addReg(RegNo: Reg, Flags: RegState::ImplicitDefine); |
| 1457 | } |
| 1458 | |
| 1459 | MBB->insert(I: InsertPos, MI: MIB); |
| 1460 | break; |
| 1461 | } |
| 1462 | } |
| 1463 | } |
| 1464 | |
| 1465 | /// InstrEmitter - Construct an InstrEmitter and set it to start inserting |
| 1466 | /// at the given position in the given block. |
| 1467 | InstrEmitter::InstrEmitter(const TargetMachine &TM, MachineBasicBlock *mbb, |
| 1468 | MachineBasicBlock::iterator insertpos) |
| 1469 | : MF(mbb->getParent()), MRI(&MF->getRegInfo()), |
| 1470 | TII(MF->getSubtarget().getInstrInfo()), |
| 1471 | TRI(MF->getSubtarget().getRegisterInfo()), |
| 1472 | TLI(MF->getSubtarget().getTargetLowering()), MBB(mbb), |
| 1473 | InsertPos(insertpos) { |
| 1474 | EmitDebugInstrRefs = mbb->getParent()->useDebugInstrRef(); |
| 1475 | } |
| 1476 | |