| 1 | //===-- CodeGenCommonISel.cpp ---------------------------------------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file defines common utilies that are shared between SelectionDAG and |
| 10 | // GlobalISel frameworks. |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | #include "llvm/CodeGen/CodeGenCommonISel.h" |
| 15 | #include "llvm/Analysis/BranchProbabilityInfo.h" |
| 16 | #include "llvm/CodeGen/MachineBasicBlock.h" |
| 17 | #include "llvm/CodeGen/MachineFunction.h" |
| 18 | #include "llvm/CodeGen/TargetInstrInfo.h" |
| 19 | #include "llvm/CodeGen/TargetOpcodes.h" |
| 20 | #include "llvm/IR/DebugInfoMetadata.h" |
| 21 | |
| 22 | #define DEBUG_TYPE "codegen-common" |
| 23 | |
| 24 | using namespace llvm; |
| 25 | |
| 26 | /// Add a successor MBB to ParentMBB< creating a new MachineBB for BB if SuccMBB |
| 27 | /// is 0. |
| 28 | MachineBasicBlock * |
| 29 | StackProtectorDescriptor::addSuccessorMBB( |
| 30 | const BasicBlock *BB, MachineBasicBlock *ParentMBB, bool IsLikely, |
| 31 | MachineBasicBlock *SuccMBB) { |
| 32 | // If SuccBB has not been created yet, create it. |
| 33 | if (!SuccMBB) { |
| 34 | MachineFunction *MF = ParentMBB->getParent(); |
| 35 | MachineFunction::iterator BBI(ParentMBB); |
| 36 | SuccMBB = MF->CreateMachineBasicBlock(BB); |
| 37 | MF->insert(MBBI: ++BBI, MBB: SuccMBB); |
| 38 | } |
| 39 | // Add it as a successor of ParentMBB. |
| 40 | ParentMBB->addSuccessor( |
| 41 | Succ: SuccMBB, Prob: BranchProbabilityInfo::getBranchProbStackProtector(IsLikely)); |
| 42 | return SuccMBB; |
| 43 | } |
| 44 | |
| 45 | /// Given that the input MI is before a partial terminator sequence TSeq, return |
| 46 | /// true if M + TSeq also a partial terminator sequence. |
| 47 | /// |
| 48 | /// A Terminator sequence is a sequence of MachineInstrs which at this point in |
| 49 | /// lowering copy vregs into physical registers, which are then passed into |
| 50 | /// terminator instructors so we can satisfy ABI constraints. A partial |
| 51 | /// terminator sequence is an improper subset of a terminator sequence (i.e. it |
| 52 | /// may be the whole terminator sequence). |
| 53 | static bool MIIsInTerminatorSequence(const MachineInstr &MI) { |
| 54 | // If we do not have a copy or an implicit def, we return true if and only if |
| 55 | // MI is a debug value. |
| 56 | if (!MI.isCopy() && !MI.isImplicitDef()) { |
| 57 | // Sometimes DBG_VALUE MI sneak in between the copies from the vregs to the |
| 58 | // physical registers if there is debug info associated with the terminator |
| 59 | // of our mbb. We want to include said debug info in our terminator |
| 60 | // sequence, so we return true in that case. |
| 61 | if (MI.isDebugInstr()) |
| 62 | return true; |
| 63 | |
| 64 | // For GlobalISel, we may have extension instructions for arguments within |
| 65 | // copy sequences. Allow these. |
| 66 | switch (MI.getOpcode()) { |
| 67 | case TargetOpcode::G_TRUNC: |
| 68 | case TargetOpcode::G_ZEXT: |
| 69 | case TargetOpcode::G_ANYEXT: |
| 70 | case TargetOpcode::G_SEXT: |
| 71 | case TargetOpcode::G_MERGE_VALUES: |
| 72 | case TargetOpcode::G_UNMERGE_VALUES: |
| 73 | case TargetOpcode::G_CONCAT_VECTORS: |
| 74 | case TargetOpcode::G_BUILD_VECTOR: |
| 75 | case TargetOpcode::G_EXTRACT: |
| 76 | return true; |
| 77 | default: |
| 78 | return false; |
| 79 | } |
| 80 | } |
| 81 | |
| 82 | // We have left the terminator sequence if we are not doing one of the |
| 83 | // following: |
| 84 | // |
| 85 | // 1. Copying a vreg into a physical register. |
| 86 | // 2. Copying a vreg into a vreg. |
| 87 | // 3. Defining a register via an implicit def. |
| 88 | |
| 89 | // OPI should always be a register definition... |
| 90 | MachineInstr::const_mop_iterator OPI = MI.operands_begin(); |
| 91 | if (!OPI->isReg() || !OPI->isDef()) |
| 92 | return false; |
| 93 | |
| 94 | // Defining any register via an implicit def is always ok. |
| 95 | if (MI.isImplicitDef()) |
| 96 | return true; |
| 97 | |
| 98 | // Grab the copy source... |
| 99 | MachineInstr::const_mop_iterator OPI2 = OPI; |
| 100 | ++OPI2; |
| 101 | assert(OPI2 != MI.operands_end() |
| 102 | && "Should have a copy implying we should have 2 arguments." ); |
| 103 | |
| 104 | // Make sure that the copy dest is not a vreg when the copy source is a |
| 105 | // physical register. |
| 106 | if (!OPI2->isReg() || |
| 107 | (!OPI->getReg().isPhysical() && OPI2->getReg().isPhysical())) |
| 108 | return false; |
| 109 | |
| 110 | return true; |
| 111 | } |
| 112 | |
| 113 | /// Find the split point at which to splice the end of BB into its success stack |
| 114 | /// protector check machine basic block. |
| 115 | /// |
| 116 | /// On many platforms, due to ABI constraints, terminators, even before register |
| 117 | /// allocation, use physical registers. This creates an issue for us since |
| 118 | /// physical registers at this point can not travel across basic |
| 119 | /// blocks. Luckily, selectiondag always moves physical registers into vregs |
| 120 | /// when they enter functions and moves them through a sequence of copies back |
| 121 | /// into the physical registers right before the terminator creating a |
| 122 | /// ``Terminator Sequence''. This function is searching for the beginning of the |
| 123 | /// terminator sequence so that we can ensure that we splice off not just the |
| 124 | /// terminator, but additionally the copies that move the vregs into the |
| 125 | /// physical registers. |
| 126 | MachineBasicBlock::iterator |
| 127 | llvm::findSplitPointForStackProtector(MachineBasicBlock *BB, |
| 128 | const TargetInstrInfo &TII) { |
| 129 | MachineBasicBlock::iterator SplitPoint = BB->getFirstTerminator(); |
| 130 | if (SplitPoint == BB->begin()) |
| 131 | return SplitPoint; |
| 132 | |
| 133 | MachineBasicBlock::iterator Start = BB->begin(); |
| 134 | MachineBasicBlock::iterator Previous = SplitPoint; |
| 135 | do { |
| 136 | --Previous; |
| 137 | } while (Previous != Start && Previous->isDebugInstr()); |
| 138 | |
| 139 | if (TII.isTailCall(Inst: *SplitPoint) && |
| 140 | Previous->getOpcode() == TII.getCallFrameDestroyOpcode()) { |
| 141 | // Call frames cannot be nested, so if this frame is describing the tail |
| 142 | // call itself, then we must insert before the sequence even starts. For |
| 143 | // example: |
| 144 | // <split point> |
| 145 | // ADJCALLSTACKDOWN ... |
| 146 | // <Moves> |
| 147 | // ADJCALLSTACKUP ... |
| 148 | // TAILJMP somewhere |
| 149 | // On the other hand, it could be an unrelated call in which case this tail |
| 150 | // call has no register moves of its own and should be the split point. For |
| 151 | // example: |
| 152 | // ADJCALLSTACKDOWN |
| 153 | // CALL something_else |
| 154 | // ADJCALLSTACKUP |
| 155 | // <split point> |
| 156 | // TAILJMP somewhere |
| 157 | do { |
| 158 | --Previous; |
| 159 | if (Previous->isCall()) |
| 160 | return SplitPoint; |
| 161 | } while(Previous->getOpcode() != TII.getCallFrameSetupOpcode()); |
| 162 | |
| 163 | return Previous; |
| 164 | } |
| 165 | |
| 166 | while (MIIsInTerminatorSequence(MI: *Previous)) { |
| 167 | SplitPoint = Previous; |
| 168 | if (Previous == Start) |
| 169 | break; |
| 170 | --Previous; |
| 171 | } |
| 172 | |
| 173 | return SplitPoint; |
| 174 | } |
| 175 | |
| 176 | FPClassTest llvm::invertFPClassTestIfSimpler(FPClassTest Test, bool UseFCmp) { |
| 177 | FPClassTest InvertedTest = ~Test; |
| 178 | |
| 179 | // Pick the direction with fewer tests |
| 180 | // TODO: Handle more combinations of cases that can be handled together |
| 181 | switch (static_cast<unsigned>(InvertedTest)) { |
| 182 | case fcNan: |
| 183 | case fcSNan: |
| 184 | case fcQNan: |
| 185 | case fcInf: |
| 186 | case fcPosInf: |
| 187 | case fcNegInf: |
| 188 | case fcNormal: |
| 189 | case fcPosNormal: |
| 190 | case fcNegNormal: |
| 191 | case fcSubnormal: |
| 192 | case fcPosSubnormal: |
| 193 | case fcNegSubnormal: |
| 194 | case fcZero: |
| 195 | case fcPosZero: |
| 196 | case fcNegZero: |
| 197 | case fcFinite: |
| 198 | case fcPosFinite: |
| 199 | case fcNegFinite: |
| 200 | case fcZero | fcNan: |
| 201 | case fcSubnormal | fcZero: |
| 202 | case fcSubnormal | fcZero | fcNan: |
| 203 | return InvertedTest; |
| 204 | case fcInf | fcNan: |
| 205 | case fcPosInf | fcNan: |
| 206 | case fcNegInf | fcNan: |
| 207 | // If we're trying to use fcmp, we can take advantage of the nan check |
| 208 | // behavior of the compare (but this is more instructions in the integer |
| 209 | // expansion). |
| 210 | return UseFCmp ? InvertedTest : fcNone; |
| 211 | default: |
| 212 | return fcNone; |
| 213 | } |
| 214 | |
| 215 | llvm_unreachable("covered FPClassTest" ); |
| 216 | } |
| 217 | |
| 218 | static MachineOperand *getSalvageOpsForCopy(const MachineRegisterInfo &MRI, |
| 219 | MachineInstr &Copy) { |
| 220 | assert(Copy.getOpcode() == TargetOpcode::COPY && "Must be a COPY" ); |
| 221 | |
| 222 | return &Copy.getOperand(i: 1); |
| 223 | } |
| 224 | |
| 225 | static MachineOperand *getSalvageOpsForTrunc(const MachineRegisterInfo &MRI, |
| 226 | MachineInstr &Trunc, |
| 227 | SmallVectorImpl<uint64_t> &Ops) { |
| 228 | assert(Trunc.getOpcode() == TargetOpcode::G_TRUNC && "Must be a G_TRUNC" ); |
| 229 | |
| 230 | const auto FromLLT = MRI.getType(Reg: Trunc.getOperand(i: 1).getReg()); |
| 231 | const auto ToLLT = MRI.getType(Reg: Trunc.defs().begin()->getReg()); |
| 232 | |
| 233 | // TODO: Support non-scalar types. |
| 234 | if (!FromLLT.isScalar()) { |
| 235 | return nullptr; |
| 236 | } |
| 237 | |
| 238 | auto ExtOps = DIExpression::getExtOps(FromSize: FromLLT.getSizeInBits(), |
| 239 | ToSize: ToLLT.getSizeInBits(), Signed: false); |
| 240 | Ops.append(in_start: ExtOps.begin(), in_end: ExtOps.end()); |
| 241 | return &Trunc.getOperand(i: 1); |
| 242 | } |
| 243 | |
| 244 | static MachineOperand *salvageDebugInfoImpl(const MachineRegisterInfo &MRI, |
| 245 | MachineInstr &MI, |
| 246 | SmallVectorImpl<uint64_t> &Ops) { |
| 247 | switch (MI.getOpcode()) { |
| 248 | case TargetOpcode::G_TRUNC: |
| 249 | return getSalvageOpsForTrunc(MRI, Trunc&: MI, Ops); |
| 250 | case TargetOpcode::COPY: |
| 251 | return getSalvageOpsForCopy(MRI, Copy&: MI); |
| 252 | default: |
| 253 | return nullptr; |
| 254 | } |
| 255 | } |
| 256 | |
| 257 | void llvm::salvageDebugInfoForDbgValue(const MachineRegisterInfo &MRI, |
| 258 | MachineInstr &MI, |
| 259 | ArrayRef<MachineOperand *> DbgUsers) { |
| 260 | // These are arbitrary chosen limits on the maximum number of values and the |
| 261 | // maximum size of a debug expression we can salvage up to, used for |
| 262 | // performance reasons. |
| 263 | const unsigned MaxExpressionSize = 128; |
| 264 | |
| 265 | for (auto *DefMO : DbgUsers) { |
| 266 | MachineInstr *DbgMI = DefMO->getParent(); |
| 267 | if (DbgMI->isIndirectDebugValue()) { |
| 268 | continue; |
| 269 | } |
| 270 | |
| 271 | int UseMOIdx = |
| 272 | DbgMI->findRegisterUseOperandIdx(Reg: DefMO->getReg(), /*TRI=*/nullptr); |
| 273 | assert(UseMOIdx != -1 && DbgMI->hasDebugOperandForReg(DefMO->getReg()) && |
| 274 | "Must use salvaged instruction as its location" ); |
| 275 | |
| 276 | // TODO: Support DBG_VALUE_LIST. |
| 277 | if (DbgMI->getOpcode() != TargetOpcode::DBG_VALUE) { |
| 278 | assert(DbgMI->getOpcode() == TargetOpcode::DBG_VALUE_LIST && |
| 279 | "Must be either DBG_VALUE or DBG_VALUE_LIST" ); |
| 280 | continue; |
| 281 | } |
| 282 | |
| 283 | const DIExpression *SalvagedExpr = DbgMI->getDebugExpression(); |
| 284 | |
| 285 | SmallVector<uint64_t, 16> Ops; |
| 286 | auto Op0 = salvageDebugInfoImpl(MRI, MI, Ops); |
| 287 | if (!Op0) |
| 288 | continue; |
| 289 | SalvagedExpr = DIExpression::appendOpsToArg(Expr: SalvagedExpr, Ops, ArgNo: 0, StackValue: true); |
| 290 | |
| 291 | bool IsValidSalvageExpr = |
| 292 | SalvagedExpr->getNumElements() <= MaxExpressionSize; |
| 293 | if (IsValidSalvageExpr) { |
| 294 | auto &UseMO = DbgMI->getOperand(i: UseMOIdx); |
| 295 | UseMO.setReg(Op0->getReg()); |
| 296 | UseMO.setSubReg(Op0->getSubReg()); |
| 297 | DbgMI->getDebugExpressionOp().setMetadata(SalvagedExpr); |
| 298 | |
| 299 | LLVM_DEBUG(dbgs() << "SALVAGE: " << *DbgMI << '\n'); |
| 300 | } |
| 301 | } |
| 302 | } |
| 303 | |