| 1 | //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | /// \file |
| 9 | /// This file implements the IRTranslator class. |
| 10 | //===----------------------------------------------------------------------===// |
| 11 | |
| 12 | #include "llvm/CodeGen/GlobalISel/IRTranslator.h" |
| 13 | #include "llvm/ADT/PostOrderIterator.h" |
| 14 | #include "llvm/ADT/STLExtras.h" |
| 15 | #include "llvm/ADT/ScopeExit.h" |
| 16 | #include "llvm/ADT/SmallSet.h" |
| 17 | #include "llvm/ADT/SmallVector.h" |
| 18 | #include "llvm/Analysis/AliasAnalysis.h" |
| 19 | #include "llvm/Analysis/AssumptionCache.h" |
| 20 | #include "llvm/Analysis/BranchProbabilityInfo.h" |
| 21 | #include "llvm/Analysis/Loads.h" |
| 22 | #include "llvm/Analysis/OptimizationRemarkEmitter.h" |
| 23 | #include "llvm/Analysis/ValueTracking.h" |
| 24 | #include "llvm/Analysis/VectorUtils.h" |
| 25 | #include "llvm/CodeGen/Analysis.h" |
| 26 | #include "llvm/CodeGen/GlobalISel/CSEInfo.h" |
| 27 | #include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h" |
| 28 | #include "llvm/CodeGen/GlobalISel/CallLowering.h" |
| 29 | #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" |
| 30 | #include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h" |
| 31 | #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" |
| 32 | #include "llvm/CodeGen/LowLevelTypeUtils.h" |
| 33 | #include "llvm/CodeGen/MachineBasicBlock.h" |
| 34 | #include "llvm/CodeGen/MachineFrameInfo.h" |
| 35 | #include "llvm/CodeGen/MachineFunction.h" |
| 36 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
| 37 | #include "llvm/CodeGen/MachineMemOperand.h" |
| 38 | #include "llvm/CodeGen/MachineModuleInfo.h" |
| 39 | #include "llvm/CodeGen/MachineOperand.h" |
| 40 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
| 41 | #include "llvm/CodeGen/StackProtector.h" |
| 42 | #include "llvm/CodeGen/SwitchLoweringUtils.h" |
| 43 | #include "llvm/CodeGen/TargetFrameLowering.h" |
| 44 | #include "llvm/CodeGen/TargetInstrInfo.h" |
| 45 | #include "llvm/CodeGen/TargetLowering.h" |
| 46 | #include "llvm/CodeGen/TargetOpcodes.h" |
| 47 | #include "llvm/CodeGen/TargetPassConfig.h" |
| 48 | #include "llvm/CodeGen/TargetRegisterInfo.h" |
| 49 | #include "llvm/CodeGen/TargetSubtargetInfo.h" |
| 50 | #include "llvm/CodeGenTypes/LowLevelType.h" |
| 51 | #include "llvm/IR/BasicBlock.h" |
| 52 | #include "llvm/IR/CFG.h" |
| 53 | #include "llvm/IR/Constant.h" |
| 54 | #include "llvm/IR/Constants.h" |
| 55 | #include "llvm/IR/DataLayout.h" |
| 56 | #include "llvm/IR/DerivedTypes.h" |
| 57 | #include "llvm/IR/DiagnosticInfo.h" |
| 58 | #include "llvm/IR/Function.h" |
| 59 | #include "llvm/IR/GetElementPtrTypeIterator.h" |
| 60 | #include "llvm/IR/InlineAsm.h" |
| 61 | #include "llvm/IR/InstrTypes.h" |
| 62 | #include "llvm/IR/Instructions.h" |
| 63 | #include "llvm/IR/IntrinsicInst.h" |
| 64 | #include "llvm/IR/Intrinsics.h" |
| 65 | #include "llvm/IR/IntrinsicsAMDGPU.h" |
| 66 | #include "llvm/IR/LLVMContext.h" |
| 67 | #include "llvm/IR/Metadata.h" |
| 68 | #include "llvm/IR/PatternMatch.h" |
| 69 | #include "llvm/IR/Statepoint.h" |
| 70 | #include "llvm/IR/Type.h" |
| 71 | #include "llvm/IR/User.h" |
| 72 | #include "llvm/IR/Value.h" |
| 73 | #include "llvm/InitializePasses.h" |
| 74 | #include "llvm/MC/MCContext.h" |
| 75 | #include "llvm/Pass.h" |
| 76 | #include "llvm/Support/Casting.h" |
| 77 | #include "llvm/Support/CodeGen.h" |
| 78 | #include "llvm/Support/Debug.h" |
| 79 | #include "llvm/Support/ErrorHandling.h" |
| 80 | #include "llvm/Support/MathExtras.h" |
| 81 | #include "llvm/Support/raw_ostream.h" |
| 82 | #include "llvm/Target/TargetMachine.h" |
| 83 | #include "llvm/Transforms/Utils/Local.h" |
| 84 | #include "llvm/Transforms/Utils/MemoryOpRemark.h" |
| 85 | #include <algorithm> |
| 86 | #include <cassert> |
| 87 | #include <cstdint> |
| 88 | #include <iterator> |
| 89 | #include <optional> |
| 90 | #include <string> |
| 91 | #include <utility> |
| 92 | #include <vector> |
| 93 | |
| 94 | #define DEBUG_TYPE "irtranslator" |
| 95 | |
| 96 | using namespace llvm; |
| 97 | |
| 98 | static cl::opt<bool> |
| 99 | EnableCSEInIRTranslator("enable-cse-in-irtranslator" , |
| 100 | cl::desc("Should enable CSE in irtranslator" ), |
| 101 | cl::Optional, cl::init(Val: false)); |
| 102 | char IRTranslator::ID = 0; |
| 103 | |
| 104 | INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI" , |
| 105 | false, false) |
| 106 | INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) |
| 107 | INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass) |
| 108 | INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) |
| 109 | INITIALIZE_PASS_DEPENDENCY(StackProtector) |
| 110 | INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) |
| 111 | INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI" , |
| 112 | false, false) |
| 113 | |
| 114 | static void (MachineFunction &MF, |
| 115 | const TargetPassConfig &TPC, |
| 116 | OptimizationRemarkEmitter &ORE, |
| 117 | OptimizationRemarkMissed &R) { |
| 118 | MF.getProperties().setFailedISel(); |
| 119 | |
| 120 | // Print the function name explicitly if we don't have a debug location (which |
| 121 | // makes the diagnostic less useful) or if we're going to emit a raw error. |
| 122 | if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled()) |
| 123 | R << (" (in function: " + MF.getName() + ")" ).str(); |
| 124 | |
| 125 | if (TPC.isGlobalISelAbortEnabled()) |
| 126 | report_fatal_error(reason: Twine(R.getMsg())); |
| 127 | else |
| 128 | ORE.emit(OptDiag&: R); |
| 129 | } |
| 130 | |
| 131 | IRTranslator::IRTranslator(CodeGenOptLevel optlevel) |
| 132 | : MachineFunctionPass(ID), OptLevel(optlevel) {} |
| 133 | |
| 134 | #ifndef NDEBUG |
| 135 | namespace { |
| 136 | /// Verify that every instruction created has the same DILocation as the |
| 137 | /// instruction being translated. |
| 138 | class DILocationVerifier : public GISelChangeObserver { |
| 139 | const Instruction *CurrInst = nullptr; |
| 140 | |
| 141 | public: |
| 142 | DILocationVerifier() = default; |
| 143 | ~DILocationVerifier() = default; |
| 144 | |
| 145 | const Instruction *getCurrentInst() const { return CurrInst; } |
| 146 | void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; } |
| 147 | |
| 148 | void erasingInstr(MachineInstr &MI) override {} |
| 149 | void changingInstr(MachineInstr &MI) override {} |
| 150 | void changedInstr(MachineInstr &MI) override {} |
| 151 | |
| 152 | void createdInstr(MachineInstr &MI) override { |
| 153 | assert(getCurrentInst() && "Inserted instruction without a current MI" ); |
| 154 | |
| 155 | // Only print the check message if we're actually checking it. |
| 156 | #ifndef NDEBUG |
| 157 | LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst |
| 158 | << " was copied to " << MI); |
| 159 | #endif |
| 160 | // We allow insts in the entry block to have no debug loc because |
| 161 | // they could have originated from constants, and we don't want a jumpy |
| 162 | // debug experience. |
| 163 | assert((CurrInst->getDebugLoc() == MI.getDebugLoc() || |
| 164 | (MI.getParent()->isEntryBlock() && !MI.getDebugLoc()) || |
| 165 | (MI.isDebugInstr())) && |
| 166 | "Line info was not transferred to all instructions" ); |
| 167 | } |
| 168 | }; |
| 169 | } // namespace |
| 170 | #endif // ifndef NDEBUG |
| 171 | |
| 172 | |
| 173 | void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const { |
| 174 | AU.addRequired<StackProtector>(); |
| 175 | AU.addRequired<TargetPassConfig>(); |
| 176 | AU.addRequired<GISelCSEAnalysisWrapperPass>(); |
| 177 | AU.addRequired<AssumptionCacheTracker>(); |
| 178 | if (OptLevel != CodeGenOptLevel::None) { |
| 179 | AU.addRequired<BranchProbabilityInfoWrapperPass>(); |
| 180 | AU.addRequired<AAResultsWrapperPass>(); |
| 181 | } |
| 182 | AU.addRequired<TargetLibraryInfoWrapperPass>(); |
| 183 | AU.addPreserved<TargetLibraryInfoWrapperPass>(); |
| 184 | getSelectionDAGFallbackAnalysisUsage(AU); |
| 185 | MachineFunctionPass::getAnalysisUsage(AU); |
| 186 | } |
| 187 | |
| 188 | IRTranslator::ValueToVRegInfo::VRegListT & |
| 189 | IRTranslator::allocateVRegs(const Value &Val) { |
| 190 | auto VRegsIt = VMap.findVRegs(V: Val); |
| 191 | if (VRegsIt != VMap.vregs_end()) |
| 192 | return *VRegsIt->second; |
| 193 | auto *Regs = VMap.getVRegs(V: Val); |
| 194 | auto *Offsets = VMap.getOffsets(V: Val); |
| 195 | SmallVector<LLT, 4> SplitTys; |
| 196 | computeValueLLTs(DL: *DL, Ty&: *Val.getType(), ValueTys&: SplitTys, |
| 197 | Offsets: Offsets->empty() ? Offsets : nullptr); |
| 198 | for (unsigned i = 0; i < SplitTys.size(); ++i) |
| 199 | Regs->push_back(Elt: 0); |
| 200 | return *Regs; |
| 201 | } |
| 202 | |
| 203 | ArrayRef<Register> IRTranslator::getOrCreateVRegs(const Value &Val) { |
| 204 | auto VRegsIt = VMap.findVRegs(V: Val); |
| 205 | if (VRegsIt != VMap.vregs_end()) |
| 206 | return *VRegsIt->second; |
| 207 | |
| 208 | if (Val.getType()->isVoidTy()) |
| 209 | return *VMap.getVRegs(V: Val); |
| 210 | |
| 211 | // Create entry for this type. |
| 212 | auto *VRegs = VMap.getVRegs(V: Val); |
| 213 | auto *Offsets = VMap.getOffsets(V: Val); |
| 214 | |
| 215 | if (!Val.getType()->isTokenTy()) |
| 216 | assert(Val.getType()->isSized() && |
| 217 | "Don't know how to create an empty vreg" ); |
| 218 | |
| 219 | SmallVector<LLT, 4> SplitTys; |
| 220 | computeValueLLTs(DL: *DL, Ty&: *Val.getType(), ValueTys&: SplitTys, |
| 221 | Offsets: Offsets->empty() ? Offsets : nullptr); |
| 222 | |
| 223 | if (!isa<Constant>(Val)) { |
| 224 | for (auto Ty : SplitTys) |
| 225 | VRegs->push_back(Elt: MRI->createGenericVirtualRegister(Ty)); |
| 226 | return *VRegs; |
| 227 | } |
| 228 | |
| 229 | if (Val.getType()->isAggregateType()) { |
| 230 | // UndefValue, ConstantAggregateZero |
| 231 | auto &C = cast<Constant>(Val); |
| 232 | unsigned Idx = 0; |
| 233 | while (auto Elt = C.getAggregateElement(Elt: Idx++)) { |
| 234 | auto EltRegs = getOrCreateVRegs(Val: *Elt); |
| 235 | llvm::append_range(C&: *VRegs, R&: EltRegs); |
| 236 | } |
| 237 | } else { |
| 238 | assert(SplitTys.size() == 1 && "unexpectedly split LLT" ); |
| 239 | VRegs->push_back(Elt: MRI->createGenericVirtualRegister(Ty: SplitTys[0])); |
| 240 | bool Success = translate(C: cast<Constant>(Val), Reg: VRegs->front()); |
| 241 | if (!Success) { |
| 242 | OptimizationRemarkMissed R("gisel-irtranslator" , "GISelFailure" , |
| 243 | MF->getFunction().getSubprogram(), |
| 244 | &MF->getFunction().getEntryBlock()); |
| 245 | R << "unable to translate constant: " << ore::NV("Type" , Val.getType()); |
| 246 | reportTranslationError(MF&: *MF, TPC: *TPC, ORE&: *ORE, R); |
| 247 | return *VRegs; |
| 248 | } |
| 249 | } |
| 250 | |
| 251 | return *VRegs; |
| 252 | } |
| 253 | |
| 254 | int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) { |
| 255 | auto [MapEntry, Inserted] = FrameIndices.try_emplace(Key: &AI); |
| 256 | if (!Inserted) |
| 257 | return MapEntry->second; |
| 258 | |
| 259 | uint64_t ElementSize = DL->getTypeAllocSize(Ty: AI.getAllocatedType()); |
| 260 | uint64_t Size = |
| 261 | ElementSize * cast<ConstantInt>(Val: AI.getArraySize())->getZExtValue(); |
| 262 | |
| 263 | // Always allocate at least one byte. |
| 264 | Size = std::max<uint64_t>(a: Size, b: 1u); |
| 265 | |
| 266 | int &FI = MapEntry->second; |
| 267 | FI = MF->getFrameInfo().CreateStackObject(Size, Alignment: AI.getAlign(), isSpillSlot: false, Alloca: &AI); |
| 268 | return FI; |
| 269 | } |
| 270 | |
| 271 | Align IRTranslator::getMemOpAlign(const Instruction &I) { |
| 272 | if (const StoreInst *SI = dyn_cast<StoreInst>(Val: &I)) |
| 273 | return SI->getAlign(); |
| 274 | if (const LoadInst *LI = dyn_cast<LoadInst>(Val: &I)) |
| 275 | return LI->getAlign(); |
| 276 | if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(Val: &I)) |
| 277 | return AI->getAlign(); |
| 278 | if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(Val: &I)) |
| 279 | return AI->getAlign(); |
| 280 | |
| 281 | OptimizationRemarkMissed R("gisel-irtranslator" , "" , &I); |
| 282 | R << "unable to translate memop: " << ore::NV("Opcode" , &I); |
| 283 | reportTranslationError(MF&: *MF, TPC: *TPC, ORE&: *ORE, R); |
| 284 | return Align(1); |
| 285 | } |
| 286 | |
| 287 | MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) { |
| 288 | MachineBasicBlock *MBB = FuncInfo.getMBB(BB: &BB); |
| 289 | assert(MBB && "BasicBlock was not encountered before" ); |
| 290 | return *MBB; |
| 291 | } |
| 292 | |
| 293 | void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) { |
| 294 | assert(NewPred && "new predecessor must be a real MachineBasicBlock" ); |
| 295 | MachinePreds[Edge].push_back(Elt: NewPred); |
| 296 | } |
| 297 | |
| 298 | static bool containsBF16Type(const User &U) { |
| 299 | // BF16 cannot currently be represented by LLT, to avoid miscompiles we |
| 300 | // prevent any instructions using them. FIXME: This can be removed once LLT |
| 301 | // supports bfloat. |
| 302 | return U.getType()->getScalarType()->isBFloatTy() || |
| 303 | any_of(Range: U.operands(), P: [](Value *V) { |
| 304 | return V->getType()->getScalarType()->isBFloatTy(); |
| 305 | }); |
| 306 | } |
| 307 | |
| 308 | bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U, |
| 309 | MachineIRBuilder &MIRBuilder) { |
| 310 | if (containsBF16Type(U)) |
| 311 | return false; |
| 312 | |
| 313 | // Get or create a virtual register for each value. |
| 314 | // Unless the value is a Constant => loadimm cst? |
| 315 | // or inline constant each time? |
| 316 | // Creation of a virtual register needs to have a size. |
| 317 | Register Op0 = getOrCreateVReg(Val: *U.getOperand(i: 0)); |
| 318 | Register Op1 = getOrCreateVReg(Val: *U.getOperand(i: 1)); |
| 319 | Register Res = getOrCreateVReg(Val: U); |
| 320 | uint32_t Flags = 0; |
| 321 | if (isa<Instruction>(Val: U)) { |
| 322 | const Instruction &I = cast<Instruction>(Val: U); |
| 323 | Flags = MachineInstr::copyFlagsFromInstruction(I); |
| 324 | } |
| 325 | |
| 326 | MIRBuilder.buildInstr(Opc: Opcode, DstOps: {Res}, SrcOps: {Op0, Op1}, Flags); |
| 327 | return true; |
| 328 | } |
| 329 | |
| 330 | bool IRTranslator::translateUnaryOp(unsigned Opcode, const User &U, |
| 331 | MachineIRBuilder &MIRBuilder) { |
| 332 | if (containsBF16Type(U)) |
| 333 | return false; |
| 334 | |
| 335 | Register Op0 = getOrCreateVReg(Val: *U.getOperand(i: 0)); |
| 336 | Register Res = getOrCreateVReg(Val: U); |
| 337 | uint32_t Flags = 0; |
| 338 | if (isa<Instruction>(Val: U)) { |
| 339 | const Instruction &I = cast<Instruction>(Val: U); |
| 340 | Flags = MachineInstr::copyFlagsFromInstruction(I); |
| 341 | } |
| 342 | MIRBuilder.buildInstr(Opc: Opcode, DstOps: {Res}, SrcOps: {Op0}, Flags); |
| 343 | return true; |
| 344 | } |
| 345 | |
| 346 | bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) { |
| 347 | return translateUnaryOp(Opcode: TargetOpcode::G_FNEG, U, MIRBuilder); |
| 348 | } |
| 349 | |
| 350 | bool IRTranslator::translateCompare(const User &U, |
| 351 | MachineIRBuilder &MIRBuilder) { |
| 352 | if (containsBF16Type(U)) |
| 353 | return false; |
| 354 | |
| 355 | auto *CI = cast<CmpInst>(Val: &U); |
| 356 | Register Op0 = getOrCreateVReg(Val: *U.getOperand(i: 0)); |
| 357 | Register Op1 = getOrCreateVReg(Val: *U.getOperand(i: 1)); |
| 358 | Register Res = getOrCreateVReg(Val: U); |
| 359 | CmpInst::Predicate Pred = CI->getPredicate(); |
| 360 | uint32_t Flags = MachineInstr::copyFlagsFromInstruction(I: *CI); |
| 361 | if (CmpInst::isIntPredicate(P: Pred)) |
| 362 | MIRBuilder.buildICmp(Pred, Res, Op0, Op1, Flags); |
| 363 | else if (Pred == CmpInst::FCMP_FALSE) |
| 364 | MIRBuilder.buildCopy( |
| 365 | Res, Op: getOrCreateVReg(Val: *Constant::getNullValue(Ty: U.getType()))); |
| 366 | else if (Pred == CmpInst::FCMP_TRUE) |
| 367 | MIRBuilder.buildCopy( |
| 368 | Res, Op: getOrCreateVReg(Val: *Constant::getAllOnesValue(Ty: U.getType()))); |
| 369 | else |
| 370 | MIRBuilder.buildFCmp(Pred, Res, Op0, Op1, Flags); |
| 371 | |
| 372 | return true; |
| 373 | } |
| 374 | |
| 375 | bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) { |
| 376 | const ReturnInst &RI = cast<ReturnInst>(Val: U); |
| 377 | const Value *Ret = RI.getReturnValue(); |
| 378 | if (Ret && DL->getTypeStoreSize(Ty: Ret->getType()).isZero()) |
| 379 | Ret = nullptr; |
| 380 | |
| 381 | ArrayRef<Register> VRegs; |
| 382 | if (Ret) |
| 383 | VRegs = getOrCreateVRegs(Val: *Ret); |
| 384 | |
| 385 | Register SwiftErrorVReg = 0; |
| 386 | if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) { |
| 387 | SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt( |
| 388 | &RI, &MIRBuilder.getMBB(), SwiftError.getFunctionArg()); |
| 389 | } |
| 390 | |
| 391 | // The target may mess up with the insertion point, but |
| 392 | // this is not important as a return is the last instruction |
| 393 | // of the block anyway. |
| 394 | return CLI->lowerReturn(MIRBuilder, Val: Ret, VRegs, FLI&: FuncInfo, SwiftErrorVReg); |
| 395 | } |
| 396 | |
| 397 | void IRTranslator::emitBranchForMergedCondition( |
| 398 | const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, |
| 399 | MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, |
| 400 | BranchProbability TProb, BranchProbability FProb, bool InvertCond) { |
| 401 | // If the leaf of the tree is a comparison, merge the condition into |
| 402 | // the caseblock. |
| 403 | if (const CmpInst *BOp = dyn_cast<CmpInst>(Val: Cond)) { |
| 404 | CmpInst::Predicate Condition; |
| 405 | if (const ICmpInst *IC = dyn_cast<ICmpInst>(Val: Cond)) { |
| 406 | Condition = InvertCond ? IC->getInversePredicate() : IC->getPredicate(); |
| 407 | } else { |
| 408 | const FCmpInst *FC = cast<FCmpInst>(Val: Cond); |
| 409 | Condition = InvertCond ? FC->getInversePredicate() : FC->getPredicate(); |
| 410 | } |
| 411 | |
| 412 | SwitchCG::CaseBlock CB(Condition, false, BOp->getOperand(i_nocapture: 0), |
| 413 | BOp->getOperand(i_nocapture: 1), nullptr, TBB, FBB, CurBB, |
| 414 | CurBuilder->getDebugLoc(), TProb, FProb); |
| 415 | SL->SwitchCases.push_back(x: CB); |
| 416 | return; |
| 417 | } |
| 418 | |
| 419 | // Create a CaseBlock record representing this branch. |
| 420 | CmpInst::Predicate Pred = InvertCond ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ; |
| 421 | SwitchCG::CaseBlock CB( |
| 422 | Pred, false, Cond, ConstantInt::getTrue(Context&: MF->getFunction().getContext()), |
| 423 | nullptr, TBB, FBB, CurBB, CurBuilder->getDebugLoc(), TProb, FProb); |
| 424 | SL->SwitchCases.push_back(x: CB); |
| 425 | } |
| 426 | |
| 427 | static bool isValInBlock(const Value *V, const BasicBlock *BB) { |
| 428 | if (const Instruction *I = dyn_cast<Instruction>(Val: V)) |
| 429 | return I->getParent() == BB; |
| 430 | return true; |
| 431 | } |
| 432 | |
| 433 | void IRTranslator::findMergedConditions( |
| 434 | const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, |
| 435 | MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, |
| 436 | Instruction::BinaryOps Opc, BranchProbability TProb, |
| 437 | BranchProbability FProb, bool InvertCond) { |
| 438 | using namespace PatternMatch; |
| 439 | assert((Opc == Instruction::And || Opc == Instruction::Or) && |
| 440 | "Expected Opc to be AND/OR" ); |
| 441 | // Skip over not part of the tree and remember to invert op and operands at |
| 442 | // next level. |
| 443 | Value *NotCond; |
| 444 | if (match(V: Cond, P: m_OneUse(SubPattern: m_Not(V: m_Value(V&: NotCond)))) && |
| 445 | isValInBlock(V: NotCond, BB: CurBB->getBasicBlock())) { |
| 446 | findMergedConditions(Cond: NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb, |
| 447 | InvertCond: !InvertCond); |
| 448 | return; |
| 449 | } |
| 450 | |
| 451 | const Instruction *BOp = dyn_cast<Instruction>(Val: Cond); |
| 452 | const Value *BOpOp0, *BOpOp1; |
| 453 | // Compute the effective opcode for Cond, taking into account whether it needs |
| 454 | // to be inverted, e.g. |
| 455 | // and (not (or A, B)), C |
| 456 | // gets lowered as |
| 457 | // and (and (not A, not B), C) |
| 458 | Instruction::BinaryOps BOpc = (Instruction::BinaryOps)0; |
| 459 | if (BOp) { |
| 460 | BOpc = match(V: BOp, P: m_LogicalAnd(L: m_Value(V&: BOpOp0), R: m_Value(V&: BOpOp1))) |
| 461 | ? Instruction::And |
| 462 | : (match(V: BOp, P: m_LogicalOr(L: m_Value(V&: BOpOp0), R: m_Value(V&: BOpOp1))) |
| 463 | ? Instruction::Or |
| 464 | : (Instruction::BinaryOps)0); |
| 465 | if (InvertCond) { |
| 466 | if (BOpc == Instruction::And) |
| 467 | BOpc = Instruction::Or; |
| 468 | else if (BOpc == Instruction::Or) |
| 469 | BOpc = Instruction::And; |
| 470 | } |
| 471 | } |
| 472 | |
| 473 | // If this node is not part of the or/and tree, emit it as a branch. |
| 474 | // Note that all nodes in the tree should have same opcode. |
| 475 | bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->hasOneUse(); |
| 476 | if (!BOpIsInOrAndTree || BOp->getParent() != CurBB->getBasicBlock() || |
| 477 | !isValInBlock(V: BOpOp0, BB: CurBB->getBasicBlock()) || |
| 478 | !isValInBlock(V: BOpOp1, BB: CurBB->getBasicBlock())) { |
| 479 | emitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB, TProb, FProb, |
| 480 | InvertCond); |
| 481 | return; |
| 482 | } |
| 483 | |
| 484 | // Create TmpBB after CurBB. |
| 485 | MachineFunction::iterator BBI(CurBB); |
| 486 | MachineBasicBlock *TmpBB = |
| 487 | MF->CreateMachineBasicBlock(BB: CurBB->getBasicBlock()); |
| 488 | CurBB->getParent()->insert(MBBI: ++BBI, MBB: TmpBB); |
| 489 | |
| 490 | if (Opc == Instruction::Or) { |
| 491 | // Codegen X | Y as: |
| 492 | // BB1: |
| 493 | // jmp_if_X TBB |
| 494 | // jmp TmpBB |
| 495 | // TmpBB: |
| 496 | // jmp_if_Y TBB |
| 497 | // jmp FBB |
| 498 | // |
| 499 | |
| 500 | // We have flexibility in setting Prob for BB1 and Prob for TmpBB. |
| 501 | // The requirement is that |
| 502 | // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB) |
| 503 | // = TrueProb for original BB. |
| 504 | // Assuming the original probabilities are A and B, one choice is to set |
| 505 | // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to |
| 506 | // A/(1+B) and 2B/(1+B). This choice assumes that |
| 507 | // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB. |
| 508 | // Another choice is to assume TrueProb for BB1 equals to TrueProb for |
| 509 | // TmpBB, but the math is more complicated. |
| 510 | |
| 511 | auto NewTrueProb = TProb / 2; |
| 512 | auto NewFalseProb = TProb / 2 + FProb; |
| 513 | // Emit the LHS condition. |
| 514 | findMergedConditions(Cond: BOpOp0, TBB, FBB: TmpBB, CurBB, SwitchBB, Opc, TProb: NewTrueProb, |
| 515 | FProb: NewFalseProb, InvertCond); |
| 516 | |
| 517 | // Normalize A/2 and B to get A/(1+B) and 2B/(1+B). |
| 518 | SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb}; |
| 519 | BranchProbability::normalizeProbabilities(Begin: Probs.begin(), End: Probs.end()); |
| 520 | // Emit the RHS condition into TmpBB. |
| 521 | findMergedConditions(Cond: BOpOp1, TBB, FBB, CurBB: TmpBB, SwitchBB, Opc, TProb: Probs[0], |
| 522 | FProb: Probs[1], InvertCond); |
| 523 | } else { |
| 524 | assert(Opc == Instruction::And && "Unknown merge op!" ); |
| 525 | // Codegen X & Y as: |
| 526 | // BB1: |
| 527 | // jmp_if_X TmpBB |
| 528 | // jmp FBB |
| 529 | // TmpBB: |
| 530 | // jmp_if_Y TBB |
| 531 | // jmp FBB |
| 532 | // |
| 533 | // This requires creation of TmpBB after CurBB. |
| 534 | |
| 535 | // We have flexibility in setting Prob for BB1 and Prob for TmpBB. |
| 536 | // The requirement is that |
| 537 | // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB) |
| 538 | // = FalseProb for original BB. |
| 539 | // Assuming the original probabilities are A and B, one choice is to set |
| 540 | // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to |
| 541 | // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 == |
| 542 | // TrueProb for BB1 * FalseProb for TmpBB. |
| 543 | |
| 544 | auto NewTrueProb = TProb + FProb / 2; |
| 545 | auto NewFalseProb = FProb / 2; |
| 546 | // Emit the LHS condition. |
| 547 | findMergedConditions(Cond: BOpOp0, TBB: TmpBB, FBB, CurBB, SwitchBB, Opc, TProb: NewTrueProb, |
| 548 | FProb: NewFalseProb, InvertCond); |
| 549 | |
| 550 | // Normalize A and B/2 to get 2A/(1+A) and B/(1+A). |
| 551 | SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2}; |
| 552 | BranchProbability::normalizeProbabilities(Begin: Probs.begin(), End: Probs.end()); |
| 553 | // Emit the RHS condition into TmpBB. |
| 554 | findMergedConditions(Cond: BOpOp1, TBB, FBB, CurBB: TmpBB, SwitchBB, Opc, TProb: Probs[0], |
| 555 | FProb: Probs[1], InvertCond); |
| 556 | } |
| 557 | } |
| 558 | |
| 559 | bool IRTranslator::shouldEmitAsBranches( |
| 560 | const std::vector<SwitchCG::CaseBlock> &Cases) { |
| 561 | // For multiple cases, it's better to emit as branches. |
| 562 | if (Cases.size() != 2) |
| 563 | return true; |
| 564 | |
| 565 | // If this is two comparisons of the same values or'd or and'd together, they |
| 566 | // will get folded into a single comparison, so don't emit two blocks. |
| 567 | if ((Cases[0].CmpLHS == Cases[1].CmpLHS && |
| 568 | Cases[0].CmpRHS == Cases[1].CmpRHS) || |
| 569 | (Cases[0].CmpRHS == Cases[1].CmpLHS && |
| 570 | Cases[0].CmpLHS == Cases[1].CmpRHS)) { |
| 571 | return false; |
| 572 | } |
| 573 | |
| 574 | // Handle: (X != null) | (Y != null) --> (X|Y) != 0 |
| 575 | // Handle: (X == null) & (Y == null) --> (X|Y) == 0 |
| 576 | if (Cases[0].CmpRHS == Cases[1].CmpRHS && |
| 577 | Cases[0].PredInfo.Pred == Cases[1].PredInfo.Pred && |
| 578 | isa<Constant>(Val: Cases[0].CmpRHS) && |
| 579 | cast<Constant>(Val: Cases[0].CmpRHS)->isNullValue()) { |
| 580 | if (Cases[0].PredInfo.Pred == CmpInst::ICMP_EQ && |
| 581 | Cases[0].TrueBB == Cases[1].ThisBB) |
| 582 | return false; |
| 583 | if (Cases[0].PredInfo.Pred == CmpInst::ICMP_NE && |
| 584 | Cases[0].FalseBB == Cases[1].ThisBB) |
| 585 | return false; |
| 586 | } |
| 587 | |
| 588 | return true; |
| 589 | } |
| 590 | |
| 591 | bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) { |
| 592 | const BranchInst &BrInst = cast<BranchInst>(Val: U); |
| 593 | auto &CurMBB = MIRBuilder.getMBB(); |
| 594 | auto *Succ0MBB = &getMBB(BB: *BrInst.getSuccessor(i: 0)); |
| 595 | |
| 596 | if (BrInst.isUnconditional()) { |
| 597 | // If the unconditional target is the layout successor, fallthrough. |
| 598 | if (OptLevel == CodeGenOptLevel::None || |
| 599 | !CurMBB.isLayoutSuccessor(MBB: Succ0MBB)) |
| 600 | MIRBuilder.buildBr(Dest&: *Succ0MBB); |
| 601 | |
| 602 | // Link successors. |
| 603 | for (const BasicBlock *Succ : successors(I: &BrInst)) |
| 604 | CurMBB.addSuccessor(Succ: &getMBB(BB: *Succ)); |
| 605 | return true; |
| 606 | } |
| 607 | |
| 608 | // If this condition is one of the special cases we handle, do special stuff |
| 609 | // now. |
| 610 | const Value *CondVal = BrInst.getCondition(); |
| 611 | MachineBasicBlock *Succ1MBB = &getMBB(BB: *BrInst.getSuccessor(i: 1)); |
| 612 | |
| 613 | // If this is a series of conditions that are or'd or and'd together, emit |
| 614 | // this as a sequence of branches instead of setcc's with and/or operations. |
| 615 | // As long as jumps are not expensive (exceptions for multi-use logic ops, |
| 616 | // unpredictable branches, and vector extracts because those jumps are likely |
| 617 | // expensive for any target), this should improve performance. |
| 618 | // For example, instead of something like: |
| 619 | // cmp A, B |
| 620 | // C = seteq |
| 621 | // cmp D, E |
| 622 | // F = setle |
| 623 | // or C, F |
| 624 | // jnz foo |
| 625 | // Emit: |
| 626 | // cmp A, B |
| 627 | // je foo |
| 628 | // cmp D, E |
| 629 | // jle foo |
| 630 | using namespace PatternMatch; |
| 631 | const Instruction *CondI = dyn_cast<Instruction>(Val: CondVal); |
| 632 | if (!TLI->isJumpExpensive() && CondI && CondI->hasOneUse() && |
| 633 | !BrInst.hasMetadata(KindID: LLVMContext::MD_unpredictable)) { |
| 634 | Instruction::BinaryOps Opcode = (Instruction::BinaryOps)0; |
| 635 | Value *Vec; |
| 636 | const Value *BOp0, *BOp1; |
| 637 | if (match(V: CondI, P: m_LogicalAnd(L: m_Value(V&: BOp0), R: m_Value(V&: BOp1)))) |
| 638 | Opcode = Instruction::And; |
| 639 | else if (match(V: CondI, P: m_LogicalOr(L: m_Value(V&: BOp0), R: m_Value(V&: BOp1)))) |
| 640 | Opcode = Instruction::Or; |
| 641 | |
| 642 | if (Opcode && !(match(V: BOp0, P: m_ExtractElt(Val: m_Value(V&: Vec), Idx: m_Value())) && |
| 643 | match(V: BOp1, P: m_ExtractElt(Val: m_Specific(V: Vec), Idx: m_Value())))) { |
| 644 | findMergedConditions(Cond: CondI, TBB: Succ0MBB, FBB: Succ1MBB, CurBB: &CurMBB, SwitchBB: &CurMBB, Opc: Opcode, |
| 645 | TProb: getEdgeProbability(Src: &CurMBB, Dst: Succ0MBB), |
| 646 | FProb: getEdgeProbability(Src: &CurMBB, Dst: Succ1MBB), |
| 647 | /*InvertCond=*/false); |
| 648 | assert(SL->SwitchCases[0].ThisBB == &CurMBB && "Unexpected lowering!" ); |
| 649 | |
| 650 | // Allow some cases to be rejected. |
| 651 | if (shouldEmitAsBranches(Cases: SL->SwitchCases)) { |
| 652 | // Emit the branch for this block. |
| 653 | emitSwitchCase(CB&: SL->SwitchCases[0], SwitchBB: &CurMBB, MIB&: *CurBuilder); |
| 654 | SL->SwitchCases.erase(position: SL->SwitchCases.begin()); |
| 655 | return true; |
| 656 | } |
| 657 | |
| 658 | // Okay, we decided not to do this, remove any inserted MBB's and clear |
| 659 | // SwitchCases. |
| 660 | for (unsigned I = 1, E = SL->SwitchCases.size(); I != E; ++I) |
| 661 | MF->erase(MBBI: SL->SwitchCases[I].ThisBB); |
| 662 | |
| 663 | SL->SwitchCases.clear(); |
| 664 | } |
| 665 | } |
| 666 | |
| 667 | // Create a CaseBlock record representing this branch. |
| 668 | SwitchCG::CaseBlock CB(CmpInst::ICMP_EQ, false, CondVal, |
| 669 | ConstantInt::getTrue(Context&: MF->getFunction().getContext()), |
| 670 | nullptr, Succ0MBB, Succ1MBB, &CurMBB, |
| 671 | CurBuilder->getDebugLoc()); |
| 672 | |
| 673 | // Use emitSwitchCase to actually insert the fast branch sequence for this |
| 674 | // cond branch. |
| 675 | emitSwitchCase(CB, SwitchBB: &CurMBB, MIB&: *CurBuilder); |
| 676 | return true; |
| 677 | } |
| 678 | |
| 679 | void IRTranslator::addSuccessorWithProb(MachineBasicBlock *Src, |
| 680 | MachineBasicBlock *Dst, |
| 681 | BranchProbability Prob) { |
| 682 | if (!FuncInfo.BPI) { |
| 683 | Src->addSuccessorWithoutProb(Succ: Dst); |
| 684 | return; |
| 685 | } |
| 686 | if (Prob.isUnknown()) |
| 687 | Prob = getEdgeProbability(Src, Dst); |
| 688 | Src->addSuccessor(Succ: Dst, Prob); |
| 689 | } |
| 690 | |
| 691 | BranchProbability |
| 692 | IRTranslator::getEdgeProbability(const MachineBasicBlock *Src, |
| 693 | const MachineBasicBlock *Dst) const { |
| 694 | const BasicBlock *SrcBB = Src->getBasicBlock(); |
| 695 | const BasicBlock *DstBB = Dst->getBasicBlock(); |
| 696 | if (!FuncInfo.BPI) { |
| 697 | // If BPI is not available, set the default probability as 1 / N, where N is |
| 698 | // the number of successors. |
| 699 | auto SuccSize = std::max<uint32_t>(a: succ_size(BB: SrcBB), b: 1); |
| 700 | return BranchProbability(1, SuccSize); |
| 701 | } |
| 702 | return FuncInfo.BPI->getEdgeProbability(Src: SrcBB, Dst: DstBB); |
| 703 | } |
| 704 | |
| 705 | bool IRTranslator::translateSwitch(const User &U, MachineIRBuilder &MIB) { |
| 706 | using namespace SwitchCG; |
| 707 | // Extract cases from the switch. |
| 708 | const SwitchInst &SI = cast<SwitchInst>(Val: U); |
| 709 | BranchProbabilityInfo *BPI = FuncInfo.BPI; |
| 710 | CaseClusterVector Clusters; |
| 711 | Clusters.reserve(n: SI.getNumCases()); |
| 712 | for (const auto &I : SI.cases()) { |
| 713 | MachineBasicBlock *Succ = &getMBB(BB: *I.getCaseSuccessor()); |
| 714 | assert(Succ && "Could not find successor mbb in mapping" ); |
| 715 | const ConstantInt *CaseVal = I.getCaseValue(); |
| 716 | BranchProbability Prob = |
| 717 | BPI ? BPI->getEdgeProbability(Src: SI.getParent(), IndexInSuccessors: I.getSuccessorIndex()) |
| 718 | : BranchProbability(1, SI.getNumCases() + 1); |
| 719 | Clusters.push_back(x: CaseCluster::range(Low: CaseVal, High: CaseVal, MBB: Succ, Prob)); |
| 720 | } |
| 721 | |
| 722 | MachineBasicBlock *DefaultMBB = &getMBB(BB: *SI.getDefaultDest()); |
| 723 | |
| 724 | // Cluster adjacent cases with the same destination. We do this at all |
| 725 | // optimization levels because it's cheap to do and will make codegen faster |
| 726 | // if there are many clusters. |
| 727 | sortAndRangeify(Clusters); |
| 728 | |
| 729 | MachineBasicBlock *SwitchMBB = &getMBB(BB: *SI.getParent()); |
| 730 | |
| 731 | // If there is only the default destination, jump there directly. |
| 732 | if (Clusters.empty()) { |
| 733 | SwitchMBB->addSuccessor(Succ: DefaultMBB); |
| 734 | if (DefaultMBB != SwitchMBB->getNextNode()) |
| 735 | MIB.buildBr(Dest&: *DefaultMBB); |
| 736 | return true; |
| 737 | } |
| 738 | |
| 739 | SL->findJumpTables(Clusters, SI: &SI, SL: std::nullopt, DefaultMBB, PSI: nullptr, BFI: nullptr); |
| 740 | SL->findBitTestClusters(Clusters, SI: &SI); |
| 741 | |
| 742 | LLVM_DEBUG({ |
| 743 | dbgs() << "Case clusters: " ; |
| 744 | for (const CaseCluster &C : Clusters) { |
| 745 | if (C.Kind == CC_JumpTable) |
| 746 | dbgs() << "JT:" ; |
| 747 | if (C.Kind == CC_BitTests) |
| 748 | dbgs() << "BT:" ; |
| 749 | |
| 750 | C.Low->getValue().print(dbgs(), true); |
| 751 | if (C.Low != C.High) { |
| 752 | dbgs() << '-'; |
| 753 | C.High->getValue().print(dbgs(), true); |
| 754 | } |
| 755 | dbgs() << ' '; |
| 756 | } |
| 757 | dbgs() << '\n'; |
| 758 | }); |
| 759 | |
| 760 | assert(!Clusters.empty()); |
| 761 | SwitchWorkList WorkList; |
| 762 | CaseClusterIt First = Clusters.begin(); |
| 763 | CaseClusterIt Last = Clusters.end() - 1; |
| 764 | auto DefaultProb = getEdgeProbability(Src: SwitchMBB, Dst: DefaultMBB); |
| 765 | WorkList.push_back(Elt: {.MBB: SwitchMBB, .FirstCluster: First, .LastCluster: Last, .GE: nullptr, .LT: nullptr, .DefaultProb: DefaultProb}); |
| 766 | |
| 767 | while (!WorkList.empty()) { |
| 768 | SwitchWorkListItem W = WorkList.pop_back_val(); |
| 769 | |
| 770 | unsigned NumClusters = W.LastCluster - W.FirstCluster + 1; |
| 771 | // For optimized builds, lower large range as a balanced binary tree. |
| 772 | if (NumClusters > 3 && |
| 773 | MF->getTarget().getOptLevel() != CodeGenOptLevel::None && |
| 774 | !DefaultMBB->getParent()->getFunction().hasMinSize()) { |
| 775 | splitWorkItem(WorkList, W, Cond: SI.getCondition(), SwitchMBB, MIB); |
| 776 | continue; |
| 777 | } |
| 778 | |
| 779 | if (!lowerSwitchWorkItem(W, Cond: SI.getCondition(), SwitchMBB, DefaultMBB, MIB)) |
| 780 | return false; |
| 781 | } |
| 782 | return true; |
| 783 | } |
| 784 | |
| 785 | void IRTranslator::splitWorkItem(SwitchCG::SwitchWorkList &WorkList, |
| 786 | const SwitchCG::SwitchWorkListItem &W, |
| 787 | Value *Cond, MachineBasicBlock *SwitchMBB, |
| 788 | MachineIRBuilder &MIB) { |
| 789 | using namespace SwitchCG; |
| 790 | assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) && |
| 791 | "Clusters not sorted?" ); |
| 792 | assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!" ); |
| 793 | |
| 794 | auto [LastLeft, FirstRight, LeftProb, RightProb] = |
| 795 | SL->computeSplitWorkItemInfo(W); |
| 796 | |
| 797 | // Use the first element on the right as pivot since we will make less-than |
| 798 | // comparisons against it. |
| 799 | CaseClusterIt PivotCluster = FirstRight; |
| 800 | assert(PivotCluster > W.FirstCluster); |
| 801 | assert(PivotCluster <= W.LastCluster); |
| 802 | |
| 803 | CaseClusterIt FirstLeft = W.FirstCluster; |
| 804 | CaseClusterIt LastRight = W.LastCluster; |
| 805 | |
| 806 | const ConstantInt *Pivot = PivotCluster->Low; |
| 807 | |
| 808 | // New blocks will be inserted immediately after the current one. |
| 809 | MachineFunction::iterator BBI(W.MBB); |
| 810 | ++BBI; |
| 811 | |
| 812 | // We will branch to the LHS if Value < Pivot. If LHS is a single cluster, |
| 813 | // we can branch to its destination directly if it's squeezed exactly in |
| 814 | // between the known lower bound and Pivot - 1. |
| 815 | MachineBasicBlock *LeftMBB; |
| 816 | if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range && |
| 817 | FirstLeft->Low == W.GE && |
| 818 | (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) { |
| 819 | LeftMBB = FirstLeft->MBB; |
| 820 | } else { |
| 821 | LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(BB: W.MBB->getBasicBlock()); |
| 822 | FuncInfo.MF->insert(MBBI: BBI, MBB: LeftMBB); |
| 823 | WorkList.push_back( |
| 824 | Elt: {.MBB: LeftMBB, .FirstCluster: FirstLeft, .LastCluster: LastLeft, .GE: W.GE, .LT: Pivot, .DefaultProb: W.DefaultProb / 2}); |
| 825 | } |
| 826 | |
| 827 | // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a |
| 828 | // single cluster, RHS.Low == Pivot, and we can branch to its destination |
| 829 | // directly if RHS.High equals the current upper bound. |
| 830 | MachineBasicBlock *RightMBB; |
| 831 | if (FirstRight == LastRight && FirstRight->Kind == CC_Range && W.LT && |
| 832 | (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) { |
| 833 | RightMBB = FirstRight->MBB; |
| 834 | } else { |
| 835 | RightMBB = FuncInfo.MF->CreateMachineBasicBlock(BB: W.MBB->getBasicBlock()); |
| 836 | FuncInfo.MF->insert(MBBI: BBI, MBB: RightMBB); |
| 837 | WorkList.push_back( |
| 838 | Elt: {.MBB: RightMBB, .FirstCluster: FirstRight, .LastCluster: LastRight, .GE: Pivot, .LT: W.LT, .DefaultProb: W.DefaultProb / 2}); |
| 839 | } |
| 840 | |
| 841 | // Create the CaseBlock record that will be used to lower the branch. |
| 842 | CaseBlock CB(ICmpInst::Predicate::ICMP_SLT, false, Cond, Pivot, nullptr, |
| 843 | LeftMBB, RightMBB, W.MBB, MIB.getDebugLoc(), LeftProb, |
| 844 | RightProb); |
| 845 | |
| 846 | if (W.MBB == SwitchMBB) |
| 847 | emitSwitchCase(CB, SwitchBB: SwitchMBB, MIB); |
| 848 | else |
| 849 | SL->SwitchCases.push_back(x: CB); |
| 850 | } |
| 851 | |
| 852 | void IRTranslator::emitJumpTable(SwitchCG::JumpTable &JT, |
| 853 | MachineBasicBlock *MBB) { |
| 854 | // Emit the code for the jump table |
| 855 | assert(JT.Reg && "Should lower JT Header first!" ); |
| 856 | MachineIRBuilder MIB(*MBB->getParent()); |
| 857 | MIB.setMBB(*MBB); |
| 858 | MIB.setDebugLoc(CurBuilder->getDebugLoc()); |
| 859 | |
| 860 | Type *PtrIRTy = PointerType::getUnqual(C&: MF->getFunction().getContext()); |
| 861 | const LLT PtrTy = getLLTForType(Ty&: *PtrIRTy, DL: *DL); |
| 862 | |
| 863 | auto Table = MIB.buildJumpTable(PtrTy, JTI: JT.JTI); |
| 864 | MIB.buildBrJT(TablePtr: Table.getReg(Idx: 0), JTI: JT.JTI, IndexReg: JT.Reg); |
| 865 | } |
| 866 | |
| 867 | bool IRTranslator::(SwitchCG::JumpTable &JT, |
| 868 | SwitchCG::JumpTableHeader &JTH, |
| 869 | MachineBasicBlock *) { |
| 870 | MachineIRBuilder MIB(*HeaderBB->getParent()); |
| 871 | MIB.setMBB(*HeaderBB); |
| 872 | MIB.setDebugLoc(CurBuilder->getDebugLoc()); |
| 873 | |
| 874 | const Value &SValue = *JTH.SValue; |
| 875 | // Subtract the lowest switch case value from the value being switched on. |
| 876 | const LLT SwitchTy = getLLTForType(Ty&: *SValue.getType(), DL: *DL); |
| 877 | Register SwitchOpReg = getOrCreateVReg(Val: SValue); |
| 878 | auto FirstCst = MIB.buildConstant(Res: SwitchTy, Val: JTH.First); |
| 879 | auto Sub = MIB.buildSub(Dst: {SwitchTy}, Src0: SwitchOpReg, Src1: FirstCst); |
| 880 | |
| 881 | // This value may be smaller or larger than the target's pointer type, and |
| 882 | // therefore require extension or truncating. |
| 883 | auto *PtrIRTy = PointerType::getUnqual(C&: SValue.getContext()); |
| 884 | const LLT PtrScalarTy = LLT::scalar(SizeInBits: DL->getTypeSizeInBits(Ty: PtrIRTy)); |
| 885 | Sub = MIB.buildZExtOrTrunc(Res: PtrScalarTy, Op: Sub); |
| 886 | |
| 887 | JT.Reg = Sub.getReg(Idx: 0); |
| 888 | |
| 889 | if (JTH.FallthroughUnreachable) { |
| 890 | if (JT.MBB != HeaderBB->getNextNode()) |
| 891 | MIB.buildBr(Dest&: *JT.MBB); |
| 892 | return true; |
| 893 | } |
| 894 | |
| 895 | // Emit the range check for the jump table, and branch to the default block |
| 896 | // for the switch statement if the value being switched on exceeds the |
| 897 | // largest case in the switch. |
| 898 | auto Cst = getOrCreateVReg( |
| 899 | Val: *ConstantInt::get(Ty: SValue.getType(), V: JTH.Last - JTH.First)); |
| 900 | Cst = MIB.buildZExtOrTrunc(Res: PtrScalarTy, Op: Cst).getReg(Idx: 0); |
| 901 | auto Cmp = MIB.buildICmp(Pred: CmpInst::ICMP_UGT, Res: LLT::scalar(SizeInBits: 1), Op0: Sub, Op1: Cst); |
| 902 | |
| 903 | auto BrCond = MIB.buildBrCond(Tst: Cmp.getReg(Idx: 0), Dest&: *JT.Default); |
| 904 | |
| 905 | // Avoid emitting unnecessary branches to the next block. |
| 906 | if (JT.MBB != HeaderBB->getNextNode()) |
| 907 | BrCond = MIB.buildBr(Dest&: *JT.MBB); |
| 908 | return true; |
| 909 | } |
| 910 | |
| 911 | void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB, |
| 912 | MachineBasicBlock *SwitchBB, |
| 913 | MachineIRBuilder &MIB) { |
| 914 | Register CondLHS = getOrCreateVReg(Val: *CB.CmpLHS); |
| 915 | Register Cond; |
| 916 | DebugLoc OldDbgLoc = MIB.getDebugLoc(); |
| 917 | MIB.setDebugLoc(CB.DbgLoc); |
| 918 | MIB.setMBB(*CB.ThisBB); |
| 919 | |
| 920 | if (CB.PredInfo.NoCmp) { |
| 921 | // Branch or fall through to TrueBB. |
| 922 | addSuccessorWithProb(Src: CB.ThisBB, Dst: CB.TrueBB, Prob: CB.TrueProb); |
| 923 | addMachineCFGPred(Edge: {SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()}, |
| 924 | NewPred: CB.ThisBB); |
| 925 | CB.ThisBB->normalizeSuccProbs(); |
| 926 | if (CB.TrueBB != CB.ThisBB->getNextNode()) |
| 927 | MIB.buildBr(Dest&: *CB.TrueBB); |
| 928 | MIB.setDebugLoc(OldDbgLoc); |
| 929 | return; |
| 930 | } |
| 931 | |
| 932 | const LLT i1Ty = LLT::scalar(SizeInBits: 1); |
| 933 | // Build the compare. |
| 934 | if (!CB.CmpMHS) { |
| 935 | const auto *CI = dyn_cast<ConstantInt>(Val: CB.CmpRHS); |
| 936 | // For conditional branch lowering, we might try to do something silly like |
| 937 | // emit an G_ICMP to compare an existing G_ICMP i1 result with true. If so, |
| 938 | // just re-use the existing condition vreg. |
| 939 | if (MRI->getType(Reg: CondLHS).getSizeInBits() == 1 && CI && CI->isOne() && |
| 940 | CB.PredInfo.Pred == CmpInst::ICMP_EQ) { |
| 941 | Cond = CondLHS; |
| 942 | } else { |
| 943 | Register CondRHS = getOrCreateVReg(Val: *CB.CmpRHS); |
| 944 | if (CmpInst::isFPPredicate(P: CB.PredInfo.Pred)) |
| 945 | Cond = |
| 946 | MIB.buildFCmp(Pred: CB.PredInfo.Pred, Res: i1Ty, Op0: CondLHS, Op1: CondRHS).getReg(Idx: 0); |
| 947 | else |
| 948 | Cond = |
| 949 | MIB.buildICmp(Pred: CB.PredInfo.Pred, Res: i1Ty, Op0: CondLHS, Op1: CondRHS).getReg(Idx: 0); |
| 950 | } |
| 951 | } else { |
| 952 | assert(CB.PredInfo.Pred == CmpInst::ICMP_SLE && |
| 953 | "Can only handle SLE ranges" ); |
| 954 | |
| 955 | const APInt& Low = cast<ConstantInt>(Val: CB.CmpLHS)->getValue(); |
| 956 | const APInt& High = cast<ConstantInt>(Val: CB.CmpRHS)->getValue(); |
| 957 | |
| 958 | Register CmpOpReg = getOrCreateVReg(Val: *CB.CmpMHS); |
| 959 | if (cast<ConstantInt>(Val: CB.CmpLHS)->isMinValue(IsSigned: true)) { |
| 960 | Register CondRHS = getOrCreateVReg(Val: *CB.CmpRHS); |
| 961 | Cond = |
| 962 | MIB.buildICmp(Pred: CmpInst::ICMP_SLE, Res: i1Ty, Op0: CmpOpReg, Op1: CondRHS).getReg(Idx: 0); |
| 963 | } else { |
| 964 | const LLT CmpTy = MRI->getType(Reg: CmpOpReg); |
| 965 | auto Sub = MIB.buildSub(Dst: {CmpTy}, Src0: CmpOpReg, Src1: CondLHS); |
| 966 | auto Diff = MIB.buildConstant(Res: CmpTy, Val: High - Low); |
| 967 | Cond = MIB.buildICmp(Pred: CmpInst::ICMP_ULE, Res: i1Ty, Op0: Sub, Op1: Diff).getReg(Idx: 0); |
| 968 | } |
| 969 | } |
| 970 | |
| 971 | // Update successor info |
| 972 | addSuccessorWithProb(Src: CB.ThisBB, Dst: CB.TrueBB, Prob: CB.TrueProb); |
| 973 | |
| 974 | addMachineCFGPred(Edge: {SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()}, |
| 975 | NewPred: CB.ThisBB); |
| 976 | |
| 977 | // TrueBB and FalseBB are always different unless the incoming IR is |
| 978 | // degenerate. This only happens when running llc on weird IR. |
| 979 | if (CB.TrueBB != CB.FalseBB) |
| 980 | addSuccessorWithProb(Src: CB.ThisBB, Dst: CB.FalseBB, Prob: CB.FalseProb); |
| 981 | CB.ThisBB->normalizeSuccProbs(); |
| 982 | |
| 983 | addMachineCFGPred(Edge: {SwitchBB->getBasicBlock(), CB.FalseBB->getBasicBlock()}, |
| 984 | NewPred: CB.ThisBB); |
| 985 | |
| 986 | MIB.buildBrCond(Tst: Cond, Dest&: *CB.TrueBB); |
| 987 | MIB.buildBr(Dest&: *CB.FalseBB); |
| 988 | MIB.setDebugLoc(OldDbgLoc); |
| 989 | } |
| 990 | |
| 991 | bool IRTranslator::lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W, |
| 992 | MachineBasicBlock *SwitchMBB, |
| 993 | MachineBasicBlock *CurMBB, |
| 994 | MachineBasicBlock *DefaultMBB, |
| 995 | MachineIRBuilder &MIB, |
| 996 | MachineFunction::iterator BBI, |
| 997 | BranchProbability UnhandledProbs, |
| 998 | SwitchCG::CaseClusterIt I, |
| 999 | MachineBasicBlock *Fallthrough, |
| 1000 | bool FallthroughUnreachable) { |
| 1001 | using namespace SwitchCG; |
| 1002 | MachineFunction *CurMF = SwitchMBB->getParent(); |
| 1003 | // FIXME: Optimize away range check based on pivot comparisons. |
| 1004 | JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first; |
| 1005 | SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second; |
| 1006 | BranchProbability DefaultProb = W.DefaultProb; |
| 1007 | |
| 1008 | // The jump block hasn't been inserted yet; insert it here. |
| 1009 | MachineBasicBlock *JumpMBB = JT->MBB; |
| 1010 | CurMF->insert(MBBI: BBI, MBB: JumpMBB); |
| 1011 | |
| 1012 | // Since the jump table block is separate from the switch block, we need |
| 1013 | // to keep track of it as a machine predecessor to the default block, |
| 1014 | // otherwise we lose the phi edges. |
| 1015 | addMachineCFGPred(Edge: {SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()}, |
| 1016 | NewPred: CurMBB); |
| 1017 | addMachineCFGPred(Edge: {SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()}, |
| 1018 | NewPred: JumpMBB); |
| 1019 | |
| 1020 | auto JumpProb = I->Prob; |
| 1021 | auto FallthroughProb = UnhandledProbs; |
| 1022 | |
| 1023 | // If the default statement is a target of the jump table, we evenly |
| 1024 | // distribute the default probability to successors of CurMBB. Also |
| 1025 | // update the probability on the edge from JumpMBB to Fallthrough. |
| 1026 | for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(), |
| 1027 | SE = JumpMBB->succ_end(); |
| 1028 | SI != SE; ++SI) { |
| 1029 | if (*SI == DefaultMBB) { |
| 1030 | JumpProb += DefaultProb / 2; |
| 1031 | FallthroughProb -= DefaultProb / 2; |
| 1032 | JumpMBB->setSuccProbability(I: SI, Prob: DefaultProb / 2); |
| 1033 | JumpMBB->normalizeSuccProbs(); |
| 1034 | } else { |
| 1035 | // Also record edges from the jump table block to it's successors. |
| 1036 | addMachineCFGPred(Edge: {SwitchMBB->getBasicBlock(), (*SI)->getBasicBlock()}, |
| 1037 | NewPred: JumpMBB); |
| 1038 | } |
| 1039 | } |
| 1040 | |
| 1041 | if (FallthroughUnreachable) |
| 1042 | JTH->FallthroughUnreachable = true; |
| 1043 | |
| 1044 | if (!JTH->FallthroughUnreachable) |
| 1045 | addSuccessorWithProb(Src: CurMBB, Dst: Fallthrough, Prob: FallthroughProb); |
| 1046 | addSuccessorWithProb(Src: CurMBB, Dst: JumpMBB, Prob: JumpProb); |
| 1047 | CurMBB->normalizeSuccProbs(); |
| 1048 | |
| 1049 | // The jump table header will be inserted in our current block, do the |
| 1050 | // range check, and fall through to our fallthrough block. |
| 1051 | JTH->HeaderBB = CurMBB; |
| 1052 | JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader. |
| 1053 | |
| 1054 | // If we're in the right place, emit the jump table header right now. |
| 1055 | if (CurMBB == SwitchMBB) { |
| 1056 | if (!emitJumpTableHeader(JT&: *JT, JTH&: *JTH, HeaderBB: CurMBB)) |
| 1057 | return false; |
| 1058 | JTH->Emitted = true; |
| 1059 | } |
| 1060 | return true; |
| 1061 | } |
| 1062 | bool IRTranslator::lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I, |
| 1063 | Value *Cond, |
| 1064 | MachineBasicBlock *Fallthrough, |
| 1065 | bool FallthroughUnreachable, |
| 1066 | BranchProbability UnhandledProbs, |
| 1067 | MachineBasicBlock *CurMBB, |
| 1068 | MachineIRBuilder &MIB, |
| 1069 | MachineBasicBlock *SwitchMBB) { |
| 1070 | using namespace SwitchCG; |
| 1071 | const Value *RHS, *LHS, *MHS; |
| 1072 | CmpInst::Predicate Pred; |
| 1073 | if (I->Low == I->High) { |
| 1074 | // Check Cond == I->Low. |
| 1075 | Pred = CmpInst::ICMP_EQ; |
| 1076 | LHS = Cond; |
| 1077 | RHS = I->Low; |
| 1078 | MHS = nullptr; |
| 1079 | } else { |
| 1080 | // Check I->Low <= Cond <= I->High. |
| 1081 | Pred = CmpInst::ICMP_SLE; |
| 1082 | LHS = I->Low; |
| 1083 | MHS = Cond; |
| 1084 | RHS = I->High; |
| 1085 | } |
| 1086 | |
| 1087 | // If Fallthrough is unreachable, fold away the comparison. |
| 1088 | // The false probability is the sum of all unhandled cases. |
| 1089 | CaseBlock CB(Pred, FallthroughUnreachable, LHS, RHS, MHS, I->MBB, Fallthrough, |
| 1090 | CurMBB, MIB.getDebugLoc(), I->Prob, UnhandledProbs); |
| 1091 | |
| 1092 | emitSwitchCase(CB, SwitchBB: SwitchMBB, MIB); |
| 1093 | return true; |
| 1094 | } |
| 1095 | |
| 1096 | void IRTranslator::(SwitchCG::BitTestBlock &B, |
| 1097 | MachineBasicBlock *SwitchBB) { |
| 1098 | MachineIRBuilder &MIB = *CurBuilder; |
| 1099 | MIB.setMBB(*SwitchBB); |
| 1100 | |
| 1101 | // Subtract the minimum value. |
| 1102 | Register SwitchOpReg = getOrCreateVReg(Val: *B.SValue); |
| 1103 | |
| 1104 | LLT SwitchOpTy = MRI->getType(Reg: SwitchOpReg); |
| 1105 | Register MinValReg = MIB.buildConstant(Res: SwitchOpTy, Val: B.First).getReg(Idx: 0); |
| 1106 | auto RangeSub = MIB.buildSub(Dst: SwitchOpTy, Src0: SwitchOpReg, Src1: MinValReg); |
| 1107 | |
| 1108 | Type *PtrIRTy = PointerType::getUnqual(C&: MF->getFunction().getContext()); |
| 1109 | const LLT PtrTy = getLLTForType(Ty&: *PtrIRTy, DL: *DL); |
| 1110 | |
| 1111 | LLT MaskTy = SwitchOpTy; |
| 1112 | if (MaskTy.getSizeInBits() > PtrTy.getSizeInBits() || |
| 1113 | !llvm::has_single_bit<uint32_t>(Value: MaskTy.getSizeInBits())) |
| 1114 | MaskTy = LLT::scalar(SizeInBits: PtrTy.getSizeInBits()); |
| 1115 | else { |
| 1116 | // Ensure that the type will fit the mask value. |
| 1117 | for (const SwitchCG::BitTestCase &Case : B.Cases) { |
| 1118 | if (!isUIntN(N: SwitchOpTy.getSizeInBits(), x: Case.Mask)) { |
| 1119 | // Switch table case range are encoded into series of masks. |
| 1120 | // Just use pointer type, it's guaranteed to fit. |
| 1121 | MaskTy = LLT::scalar(SizeInBits: PtrTy.getSizeInBits()); |
| 1122 | break; |
| 1123 | } |
| 1124 | } |
| 1125 | } |
| 1126 | Register SubReg = RangeSub.getReg(Idx: 0); |
| 1127 | if (SwitchOpTy != MaskTy) |
| 1128 | SubReg = MIB.buildZExtOrTrunc(Res: MaskTy, Op: SubReg).getReg(Idx: 0); |
| 1129 | |
| 1130 | B.RegVT = getMVTForLLT(Ty: MaskTy); |
| 1131 | B.Reg = SubReg; |
| 1132 | |
| 1133 | MachineBasicBlock *MBB = B.Cases[0].ThisBB; |
| 1134 | |
| 1135 | if (!B.FallthroughUnreachable) |
| 1136 | addSuccessorWithProb(Src: SwitchBB, Dst: B.Default, Prob: B.DefaultProb); |
| 1137 | addSuccessorWithProb(Src: SwitchBB, Dst: MBB, Prob: B.Prob); |
| 1138 | |
| 1139 | SwitchBB->normalizeSuccProbs(); |
| 1140 | |
| 1141 | if (!B.FallthroughUnreachable) { |
| 1142 | // Conditional branch to the default block. |
| 1143 | auto RangeCst = MIB.buildConstant(Res: SwitchOpTy, Val: B.Range); |
| 1144 | auto RangeCmp = MIB.buildICmp(Pred: CmpInst::Predicate::ICMP_UGT, Res: LLT::scalar(SizeInBits: 1), |
| 1145 | Op0: RangeSub, Op1: RangeCst); |
| 1146 | MIB.buildBrCond(Tst: RangeCmp, Dest&: *B.Default); |
| 1147 | } |
| 1148 | |
| 1149 | // Avoid emitting unnecessary branches to the next block. |
| 1150 | if (MBB != SwitchBB->getNextNode()) |
| 1151 | MIB.buildBr(Dest&: *MBB); |
| 1152 | } |
| 1153 | |
| 1154 | void IRTranslator::emitBitTestCase(SwitchCG::BitTestBlock &BB, |
| 1155 | MachineBasicBlock *NextMBB, |
| 1156 | BranchProbability BranchProbToNext, |
| 1157 | Register Reg, SwitchCG::BitTestCase &B, |
| 1158 | MachineBasicBlock *SwitchBB) { |
| 1159 | MachineIRBuilder &MIB = *CurBuilder; |
| 1160 | MIB.setMBB(*SwitchBB); |
| 1161 | |
| 1162 | LLT SwitchTy = getLLTForMVT(Ty: BB.RegVT); |
| 1163 | Register Cmp; |
| 1164 | unsigned PopCount = llvm::popcount(Value: B.Mask); |
| 1165 | if (PopCount == 1) { |
| 1166 | // Testing for a single bit; just compare the shift count with what it |
| 1167 | // would need to be to shift a 1 bit in that position. |
| 1168 | auto MaskTrailingZeros = |
| 1169 | MIB.buildConstant(Res: SwitchTy, Val: llvm::countr_zero(Val: B.Mask)); |
| 1170 | Cmp = |
| 1171 | MIB.buildICmp(Pred: ICmpInst::ICMP_EQ, Res: LLT::scalar(SizeInBits: 1), Op0: Reg, Op1: MaskTrailingZeros) |
| 1172 | .getReg(Idx: 0); |
| 1173 | } else if (PopCount == BB.Range) { |
| 1174 | // There is only one zero bit in the range, test for it directly. |
| 1175 | auto MaskTrailingOnes = |
| 1176 | MIB.buildConstant(Res: SwitchTy, Val: llvm::countr_one(Value: B.Mask)); |
| 1177 | Cmp = MIB.buildICmp(Pred: CmpInst::ICMP_NE, Res: LLT::scalar(SizeInBits: 1), Op0: Reg, Op1: MaskTrailingOnes) |
| 1178 | .getReg(Idx: 0); |
| 1179 | } else { |
| 1180 | // Make desired shift. |
| 1181 | auto CstOne = MIB.buildConstant(Res: SwitchTy, Val: 1); |
| 1182 | auto SwitchVal = MIB.buildShl(Dst: SwitchTy, Src0: CstOne, Src1: Reg); |
| 1183 | |
| 1184 | // Emit bit tests and jumps. |
| 1185 | auto CstMask = MIB.buildConstant(Res: SwitchTy, Val: B.Mask); |
| 1186 | auto AndOp = MIB.buildAnd(Dst: SwitchTy, Src0: SwitchVal, Src1: CstMask); |
| 1187 | auto CstZero = MIB.buildConstant(Res: SwitchTy, Val: 0); |
| 1188 | Cmp = MIB.buildICmp(Pred: CmpInst::ICMP_NE, Res: LLT::scalar(SizeInBits: 1), Op0: AndOp, Op1: CstZero) |
| 1189 | .getReg(Idx: 0); |
| 1190 | } |
| 1191 | |
| 1192 | // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb. |
| 1193 | addSuccessorWithProb(Src: SwitchBB, Dst: B.TargetBB, Prob: B.ExtraProb); |
| 1194 | // The branch probability from SwitchBB to NextMBB is BranchProbToNext. |
| 1195 | addSuccessorWithProb(Src: SwitchBB, Dst: NextMBB, Prob: BranchProbToNext); |
| 1196 | // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is |
| 1197 | // one as they are relative probabilities (and thus work more like weights), |
| 1198 | // and hence we need to normalize them to let the sum of them become one. |
| 1199 | SwitchBB->normalizeSuccProbs(); |
| 1200 | |
| 1201 | // Record the fact that the IR edge from the header to the bit test target |
| 1202 | // will go through our new block. Neeeded for PHIs to have nodes added. |
| 1203 | addMachineCFGPred(Edge: {BB.Parent->getBasicBlock(), B.TargetBB->getBasicBlock()}, |
| 1204 | NewPred: SwitchBB); |
| 1205 | |
| 1206 | MIB.buildBrCond(Tst: Cmp, Dest&: *B.TargetBB); |
| 1207 | |
| 1208 | // Avoid emitting unnecessary branches to the next block. |
| 1209 | if (NextMBB != SwitchBB->getNextNode()) |
| 1210 | MIB.buildBr(Dest&: *NextMBB); |
| 1211 | } |
| 1212 | |
| 1213 | bool IRTranslator::lowerBitTestWorkItem( |
| 1214 | SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB, |
| 1215 | MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB, |
| 1216 | MachineIRBuilder &MIB, MachineFunction::iterator BBI, |
| 1217 | BranchProbability DefaultProb, BranchProbability UnhandledProbs, |
| 1218 | SwitchCG::CaseClusterIt I, MachineBasicBlock *Fallthrough, |
| 1219 | bool FallthroughUnreachable) { |
| 1220 | using namespace SwitchCG; |
| 1221 | MachineFunction *CurMF = SwitchMBB->getParent(); |
| 1222 | // FIXME: Optimize away range check based on pivot comparisons. |
| 1223 | BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex]; |
| 1224 | // The bit test blocks haven't been inserted yet; insert them here. |
| 1225 | for (BitTestCase &BTC : BTB->Cases) |
| 1226 | CurMF->insert(MBBI: BBI, MBB: BTC.ThisBB); |
| 1227 | |
| 1228 | // Fill in fields of the BitTestBlock. |
| 1229 | BTB->Parent = CurMBB; |
| 1230 | BTB->Default = Fallthrough; |
| 1231 | |
| 1232 | BTB->DefaultProb = UnhandledProbs; |
| 1233 | // If the cases in bit test don't form a contiguous range, we evenly |
| 1234 | // distribute the probability on the edge to Fallthrough to two |
| 1235 | // successors of CurMBB. |
| 1236 | if (!BTB->ContiguousRange) { |
| 1237 | BTB->Prob += DefaultProb / 2; |
| 1238 | BTB->DefaultProb -= DefaultProb / 2; |
| 1239 | } |
| 1240 | |
| 1241 | if (FallthroughUnreachable) |
| 1242 | BTB->FallthroughUnreachable = true; |
| 1243 | |
| 1244 | // If we're in the right place, emit the bit test header right now. |
| 1245 | if (CurMBB == SwitchMBB) { |
| 1246 | emitBitTestHeader(B&: *BTB, SwitchBB: SwitchMBB); |
| 1247 | BTB->Emitted = true; |
| 1248 | } |
| 1249 | return true; |
| 1250 | } |
| 1251 | |
| 1252 | bool IRTranslator::lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W, |
| 1253 | Value *Cond, |
| 1254 | MachineBasicBlock *SwitchMBB, |
| 1255 | MachineBasicBlock *DefaultMBB, |
| 1256 | MachineIRBuilder &MIB) { |
| 1257 | using namespace SwitchCG; |
| 1258 | MachineFunction *CurMF = FuncInfo.MF; |
| 1259 | MachineBasicBlock *NextMBB = nullptr; |
| 1260 | MachineFunction::iterator BBI(W.MBB); |
| 1261 | if (++BBI != FuncInfo.MF->end()) |
| 1262 | NextMBB = &*BBI; |
| 1263 | |
| 1264 | if (EnableOpts) { |
| 1265 | // Here, we order cases by probability so the most likely case will be |
| 1266 | // checked first. However, two clusters can have the same probability in |
| 1267 | // which case their relative ordering is non-deterministic. So we use Low |
| 1268 | // as a tie-breaker as clusters are guaranteed to never overlap. |
| 1269 | llvm::sort(Start: W.FirstCluster, End: W.LastCluster + 1, |
| 1270 | Comp: [](const CaseCluster &a, const CaseCluster &b) { |
| 1271 | return a.Prob != b.Prob |
| 1272 | ? a.Prob > b.Prob |
| 1273 | : a.Low->getValue().slt(RHS: b.Low->getValue()); |
| 1274 | }); |
| 1275 | |
| 1276 | // Rearrange the case blocks so that the last one falls through if possible |
| 1277 | // without changing the order of probabilities. |
| 1278 | for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster;) { |
| 1279 | --I; |
| 1280 | if (I->Prob > W.LastCluster->Prob) |
| 1281 | break; |
| 1282 | if (I->Kind == CC_Range && I->MBB == NextMBB) { |
| 1283 | std::swap(a&: *I, b&: *W.LastCluster); |
| 1284 | break; |
| 1285 | } |
| 1286 | } |
| 1287 | } |
| 1288 | |
| 1289 | // Compute total probability. |
| 1290 | BranchProbability DefaultProb = W.DefaultProb; |
| 1291 | BranchProbability UnhandledProbs = DefaultProb; |
| 1292 | for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I) |
| 1293 | UnhandledProbs += I->Prob; |
| 1294 | |
| 1295 | MachineBasicBlock *CurMBB = W.MBB; |
| 1296 | for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) { |
| 1297 | bool FallthroughUnreachable = false; |
| 1298 | MachineBasicBlock *Fallthrough; |
| 1299 | if (I == W.LastCluster) { |
| 1300 | // For the last cluster, fall through to the default destination. |
| 1301 | Fallthrough = DefaultMBB; |
| 1302 | FallthroughUnreachable = isa<UnreachableInst>( |
| 1303 | Val: DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg()); |
| 1304 | } else { |
| 1305 | Fallthrough = CurMF->CreateMachineBasicBlock(BB: CurMBB->getBasicBlock()); |
| 1306 | CurMF->insert(MBBI: BBI, MBB: Fallthrough); |
| 1307 | } |
| 1308 | UnhandledProbs -= I->Prob; |
| 1309 | |
| 1310 | switch (I->Kind) { |
| 1311 | case CC_BitTests: { |
| 1312 | if (!lowerBitTestWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI, |
| 1313 | DefaultProb, UnhandledProbs, I, Fallthrough, |
| 1314 | FallthroughUnreachable)) { |
| 1315 | LLVM_DEBUG(dbgs() << "Failed to lower bit test for switch" ); |
| 1316 | return false; |
| 1317 | } |
| 1318 | break; |
| 1319 | } |
| 1320 | |
| 1321 | case CC_JumpTable: { |
| 1322 | if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI, |
| 1323 | UnhandledProbs, I, Fallthrough, |
| 1324 | FallthroughUnreachable)) { |
| 1325 | LLVM_DEBUG(dbgs() << "Failed to lower jump table" ); |
| 1326 | return false; |
| 1327 | } |
| 1328 | break; |
| 1329 | } |
| 1330 | case CC_Range: { |
| 1331 | if (!lowerSwitchRangeWorkItem(I, Cond, Fallthrough, |
| 1332 | FallthroughUnreachable, UnhandledProbs, |
| 1333 | CurMBB, MIB, SwitchMBB)) { |
| 1334 | LLVM_DEBUG(dbgs() << "Failed to lower switch range" ); |
| 1335 | return false; |
| 1336 | } |
| 1337 | break; |
| 1338 | } |
| 1339 | } |
| 1340 | CurMBB = Fallthrough; |
| 1341 | } |
| 1342 | |
| 1343 | return true; |
| 1344 | } |
| 1345 | |
| 1346 | bool IRTranslator::translateIndirectBr(const User &U, |
| 1347 | MachineIRBuilder &MIRBuilder) { |
| 1348 | const IndirectBrInst &BrInst = cast<IndirectBrInst>(Val: U); |
| 1349 | |
| 1350 | const Register Tgt = getOrCreateVReg(Val: *BrInst.getAddress()); |
| 1351 | MIRBuilder.buildBrIndirect(Tgt); |
| 1352 | |
| 1353 | // Link successors. |
| 1354 | SmallPtrSet<const BasicBlock *, 32> AddedSuccessors; |
| 1355 | MachineBasicBlock &CurBB = MIRBuilder.getMBB(); |
| 1356 | for (const BasicBlock *Succ : successors(I: &BrInst)) { |
| 1357 | // It's legal for indirectbr instructions to have duplicate blocks in the |
| 1358 | // destination list. We don't allow this in MIR. Skip anything that's |
| 1359 | // already a successor. |
| 1360 | if (!AddedSuccessors.insert(Ptr: Succ).second) |
| 1361 | continue; |
| 1362 | CurBB.addSuccessor(Succ: &getMBB(BB: *Succ)); |
| 1363 | } |
| 1364 | |
| 1365 | return true; |
| 1366 | } |
| 1367 | |
| 1368 | static bool isSwiftError(const Value *V) { |
| 1369 | if (auto Arg = dyn_cast<Argument>(Val: V)) |
| 1370 | return Arg->hasSwiftErrorAttr(); |
| 1371 | if (auto AI = dyn_cast<AllocaInst>(Val: V)) |
| 1372 | return AI->isSwiftError(); |
| 1373 | return false; |
| 1374 | } |
| 1375 | |
| 1376 | bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) { |
| 1377 | const LoadInst &LI = cast<LoadInst>(Val: U); |
| 1378 | TypeSize StoreSize = DL->getTypeStoreSize(Ty: LI.getType()); |
| 1379 | if (StoreSize.isZero()) |
| 1380 | return true; |
| 1381 | |
| 1382 | ArrayRef<Register> Regs = getOrCreateVRegs(Val: LI); |
| 1383 | ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V: LI); |
| 1384 | Register Base = getOrCreateVReg(Val: *LI.getPointerOperand()); |
| 1385 | AAMDNodes AAInfo = LI.getAAMetadata(); |
| 1386 | |
| 1387 | const Value *Ptr = LI.getPointerOperand(); |
| 1388 | Type *OffsetIRTy = DL->getIndexType(PtrTy: Ptr->getType()); |
| 1389 | LLT OffsetTy = getLLTForType(Ty&: *OffsetIRTy, DL: *DL); |
| 1390 | |
| 1391 | if (CLI->supportSwiftError() && isSwiftError(V: Ptr)) { |
| 1392 | assert(Regs.size() == 1 && "swifterror should be single pointer" ); |
| 1393 | Register VReg = |
| 1394 | SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.getMBB(), Ptr); |
| 1395 | MIRBuilder.buildCopy(Res: Regs[0], Op: VReg); |
| 1396 | return true; |
| 1397 | } |
| 1398 | |
| 1399 | MachineMemOperand::Flags Flags = |
| 1400 | TLI->getLoadMemOperandFlags(LI, DL: *DL, AC, LibInfo); |
| 1401 | if (AA && !(Flags & MachineMemOperand::MOInvariant)) { |
| 1402 | if (AA->pointsToConstantMemory( |
| 1403 | Loc: MemoryLocation(Ptr, LocationSize::precise(Value: StoreSize), AAInfo))) { |
| 1404 | Flags |= MachineMemOperand::MOInvariant; |
| 1405 | } |
| 1406 | } |
| 1407 | |
| 1408 | const MDNode *Ranges = |
| 1409 | Regs.size() == 1 ? LI.getMetadata(KindID: LLVMContext::MD_range) : nullptr; |
| 1410 | for (unsigned i = 0; i < Regs.size(); ++i) { |
| 1411 | Register Addr; |
| 1412 | MIRBuilder.materializePtrAdd(Res&: Addr, Op0: Base, ValueTy: OffsetTy, Value: Offsets[i] / 8); |
| 1413 | |
| 1414 | MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8); |
| 1415 | Align BaseAlign = getMemOpAlign(I: LI); |
| 1416 | auto MMO = MF->getMachineMemOperand( |
| 1417 | PtrInfo: Ptr, f: Flags, MemTy: MRI->getType(Reg: Regs[i]), |
| 1418 | base_alignment: commonAlignment(A: BaseAlign, Offset: Offsets[i] / 8), AAInfo, Ranges, |
| 1419 | SSID: LI.getSyncScopeID(), Ordering: LI.getOrdering()); |
| 1420 | MIRBuilder.buildLoad(Res: Regs[i], Addr, MMO&: *MMO); |
| 1421 | } |
| 1422 | |
| 1423 | return true; |
| 1424 | } |
| 1425 | |
| 1426 | bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) { |
| 1427 | const StoreInst &SI = cast<StoreInst>(Val: U); |
| 1428 | if (DL->getTypeStoreSize(Ty: SI.getValueOperand()->getType()).isZero()) |
| 1429 | return true; |
| 1430 | |
| 1431 | ArrayRef<Register> Vals = getOrCreateVRegs(Val: *SI.getValueOperand()); |
| 1432 | ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V: *SI.getValueOperand()); |
| 1433 | Register Base = getOrCreateVReg(Val: *SI.getPointerOperand()); |
| 1434 | |
| 1435 | Type *OffsetIRTy = DL->getIndexType(PtrTy: SI.getPointerOperandType()); |
| 1436 | LLT OffsetTy = getLLTForType(Ty&: *OffsetIRTy, DL: *DL); |
| 1437 | |
| 1438 | if (CLI->supportSwiftError() && isSwiftError(V: SI.getPointerOperand())) { |
| 1439 | assert(Vals.size() == 1 && "swifterror should be single pointer" ); |
| 1440 | |
| 1441 | Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.getMBB(), |
| 1442 | SI.getPointerOperand()); |
| 1443 | MIRBuilder.buildCopy(Res: VReg, Op: Vals[0]); |
| 1444 | return true; |
| 1445 | } |
| 1446 | |
| 1447 | MachineMemOperand::Flags Flags = TLI->getStoreMemOperandFlags(SI, DL: *DL); |
| 1448 | |
| 1449 | for (unsigned i = 0; i < Vals.size(); ++i) { |
| 1450 | Register Addr; |
| 1451 | MIRBuilder.materializePtrAdd(Res&: Addr, Op0: Base, ValueTy: OffsetTy, Value: Offsets[i] / 8); |
| 1452 | |
| 1453 | MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8); |
| 1454 | Align BaseAlign = getMemOpAlign(I: SI); |
| 1455 | auto MMO = MF->getMachineMemOperand( |
| 1456 | PtrInfo: Ptr, f: Flags, MemTy: MRI->getType(Reg: Vals[i]), |
| 1457 | base_alignment: commonAlignment(A: BaseAlign, Offset: Offsets[i] / 8), AAInfo: SI.getAAMetadata(), Ranges: nullptr, |
| 1458 | SSID: SI.getSyncScopeID(), Ordering: SI.getOrdering()); |
| 1459 | MIRBuilder.buildStore(Val: Vals[i], Addr, MMO&: *MMO); |
| 1460 | } |
| 1461 | return true; |
| 1462 | } |
| 1463 | |
| 1464 | static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) { |
| 1465 | const Value *Src = U.getOperand(i: 0); |
| 1466 | Type *Int32Ty = Type::getInt32Ty(C&: U.getContext()); |
| 1467 | |
| 1468 | // getIndexedOffsetInType is designed for GEPs, so the first index is the |
| 1469 | // usual array element rather than looking into the actual aggregate. |
| 1470 | SmallVector<Value *, 1> Indices; |
| 1471 | Indices.push_back(Elt: ConstantInt::get(Ty: Int32Ty, V: 0)); |
| 1472 | |
| 1473 | if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Val: &U)) { |
| 1474 | for (auto Idx : EVI->indices()) |
| 1475 | Indices.push_back(Elt: ConstantInt::get(Ty: Int32Ty, V: Idx)); |
| 1476 | } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(Val: &U)) { |
| 1477 | for (auto Idx : IVI->indices()) |
| 1478 | Indices.push_back(Elt: ConstantInt::get(Ty: Int32Ty, V: Idx)); |
| 1479 | } else { |
| 1480 | llvm::append_range(C&: Indices, R: drop_begin(RangeOrContainer: U.operands())); |
| 1481 | } |
| 1482 | |
| 1483 | return 8 * static_cast<uint64_t>( |
| 1484 | DL.getIndexedOffsetInType(ElemTy: Src->getType(), Indices)); |
| 1485 | } |
| 1486 | |
| 1487 | bool IRTranslator::(const User &U, |
| 1488 | MachineIRBuilder &MIRBuilder) { |
| 1489 | const Value *Src = U.getOperand(i: 0); |
| 1490 | uint64_t Offset = getOffsetFromIndices(U, DL: *DL); |
| 1491 | ArrayRef<Register> SrcRegs = getOrCreateVRegs(Val: *Src); |
| 1492 | ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V: *Src); |
| 1493 | unsigned Idx = llvm::lower_bound(Range&: Offsets, Value&: Offset) - Offsets.begin(); |
| 1494 | auto &DstRegs = allocateVRegs(Val: U); |
| 1495 | |
| 1496 | for (unsigned i = 0; i < DstRegs.size(); ++i) |
| 1497 | DstRegs[i] = SrcRegs[Idx++]; |
| 1498 | |
| 1499 | return true; |
| 1500 | } |
| 1501 | |
| 1502 | bool IRTranslator::translateInsertValue(const User &U, |
| 1503 | MachineIRBuilder &MIRBuilder) { |
| 1504 | const Value *Src = U.getOperand(i: 0); |
| 1505 | uint64_t Offset = getOffsetFromIndices(U, DL: *DL); |
| 1506 | auto &DstRegs = allocateVRegs(Val: U); |
| 1507 | ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(V: U); |
| 1508 | ArrayRef<Register> SrcRegs = getOrCreateVRegs(Val: *Src); |
| 1509 | ArrayRef<Register> InsertedRegs = getOrCreateVRegs(Val: *U.getOperand(i: 1)); |
| 1510 | auto *InsertedIt = InsertedRegs.begin(); |
| 1511 | |
| 1512 | for (unsigned i = 0; i < DstRegs.size(); ++i) { |
| 1513 | if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end()) |
| 1514 | DstRegs[i] = *InsertedIt++; |
| 1515 | else |
| 1516 | DstRegs[i] = SrcRegs[i]; |
| 1517 | } |
| 1518 | |
| 1519 | return true; |
| 1520 | } |
| 1521 | |
| 1522 | bool IRTranslator::translateSelect(const User &U, |
| 1523 | MachineIRBuilder &MIRBuilder) { |
| 1524 | Register Tst = getOrCreateVReg(Val: *U.getOperand(i: 0)); |
| 1525 | ArrayRef<Register> ResRegs = getOrCreateVRegs(Val: U); |
| 1526 | ArrayRef<Register> Op0Regs = getOrCreateVRegs(Val: *U.getOperand(i: 1)); |
| 1527 | ArrayRef<Register> Op1Regs = getOrCreateVRegs(Val: *U.getOperand(i: 2)); |
| 1528 | |
| 1529 | uint32_t Flags = 0; |
| 1530 | if (const SelectInst *SI = dyn_cast<SelectInst>(Val: &U)) |
| 1531 | Flags = MachineInstr::copyFlagsFromInstruction(I: *SI); |
| 1532 | |
| 1533 | for (unsigned i = 0; i < ResRegs.size(); ++i) { |
| 1534 | MIRBuilder.buildSelect(Res: ResRegs[i], Tst, Op0: Op0Regs[i], Op1: Op1Regs[i], Flags); |
| 1535 | } |
| 1536 | |
| 1537 | return true; |
| 1538 | } |
| 1539 | |
| 1540 | bool IRTranslator::translateCopy(const User &U, const Value &V, |
| 1541 | MachineIRBuilder &MIRBuilder) { |
| 1542 | Register Src = getOrCreateVReg(Val: V); |
| 1543 | auto &Regs = *VMap.getVRegs(V: U); |
| 1544 | if (Regs.empty()) { |
| 1545 | Regs.push_back(Elt: Src); |
| 1546 | VMap.getOffsets(V: U)->push_back(Elt: 0); |
| 1547 | } else { |
| 1548 | // If we already assigned a vreg for this instruction, we can't change that. |
| 1549 | // Emit a copy to satisfy the users we already emitted. |
| 1550 | MIRBuilder.buildCopy(Res: Regs[0], Op: Src); |
| 1551 | } |
| 1552 | return true; |
| 1553 | } |
| 1554 | |
| 1555 | bool IRTranslator::translateBitCast(const User &U, |
| 1556 | MachineIRBuilder &MIRBuilder) { |
| 1557 | // If we're bitcasting to the source type, we can reuse the source vreg. |
| 1558 | if (getLLTForType(Ty&: *U.getOperand(i: 0)->getType(), DL: *DL) == |
| 1559 | getLLTForType(Ty&: *U.getType(), DL: *DL)) { |
| 1560 | // If the source is a ConstantInt then it was probably created by |
| 1561 | // ConstantHoisting and we should leave it alone. |
| 1562 | if (isa<ConstantInt>(Val: U.getOperand(i: 0))) |
| 1563 | return translateCast(Opcode: TargetOpcode::G_CONSTANT_FOLD_BARRIER, U, |
| 1564 | MIRBuilder); |
| 1565 | return translateCopy(U, V: *U.getOperand(i: 0), MIRBuilder); |
| 1566 | } |
| 1567 | |
| 1568 | return translateCast(Opcode: TargetOpcode::G_BITCAST, U, MIRBuilder); |
| 1569 | } |
| 1570 | |
| 1571 | bool IRTranslator::translateCast(unsigned Opcode, const User &U, |
| 1572 | MachineIRBuilder &MIRBuilder) { |
| 1573 | if (containsBF16Type(U)) |
| 1574 | return false; |
| 1575 | |
| 1576 | uint32_t Flags = 0; |
| 1577 | if (const Instruction *I = dyn_cast<Instruction>(Val: &U)) |
| 1578 | Flags = MachineInstr::copyFlagsFromInstruction(I: *I); |
| 1579 | |
| 1580 | Register Op = getOrCreateVReg(Val: *U.getOperand(i: 0)); |
| 1581 | Register Res = getOrCreateVReg(Val: U); |
| 1582 | MIRBuilder.buildInstr(Opc: Opcode, DstOps: {Res}, SrcOps: {Op}, Flags); |
| 1583 | return true; |
| 1584 | } |
| 1585 | |
| 1586 | bool IRTranslator::translateGetElementPtr(const User &U, |
| 1587 | MachineIRBuilder &MIRBuilder) { |
| 1588 | Value &Op0 = *U.getOperand(i: 0); |
| 1589 | Register BaseReg = getOrCreateVReg(Val: Op0); |
| 1590 | Type *PtrIRTy = Op0.getType(); |
| 1591 | LLT PtrTy = getLLTForType(Ty&: *PtrIRTy, DL: *DL); |
| 1592 | Type *OffsetIRTy = DL->getIndexType(PtrTy: PtrIRTy); |
| 1593 | LLT OffsetTy = getLLTForType(Ty&: *OffsetIRTy, DL: *DL); |
| 1594 | |
| 1595 | uint32_t Flags = 0; |
| 1596 | if (const Instruction *I = dyn_cast<Instruction>(Val: &U)) |
| 1597 | Flags = MachineInstr::copyFlagsFromInstruction(I: *I); |
| 1598 | |
| 1599 | // Normalize Vector GEP - all scalar operands should be converted to the |
| 1600 | // splat vector. |
| 1601 | unsigned VectorWidth = 0; |
| 1602 | |
| 1603 | // True if we should use a splat vector; using VectorWidth alone is not |
| 1604 | // sufficient. |
| 1605 | bool WantSplatVector = false; |
| 1606 | if (auto *VT = dyn_cast<VectorType>(Val: U.getType())) { |
| 1607 | VectorWidth = cast<FixedVectorType>(Val: VT)->getNumElements(); |
| 1608 | // We don't produce 1 x N vectors; those are treated as scalars. |
| 1609 | WantSplatVector = VectorWidth > 1; |
| 1610 | } |
| 1611 | |
| 1612 | // We might need to splat the base pointer into a vector if the offsets |
| 1613 | // are vectors. |
| 1614 | if (WantSplatVector && !PtrTy.isVector()) { |
| 1615 | BaseReg = MIRBuilder |
| 1616 | .buildSplatBuildVector(Res: LLT::fixed_vector(NumElements: VectorWidth, ScalarTy: PtrTy), |
| 1617 | Src: BaseReg) |
| 1618 | .getReg(Idx: 0); |
| 1619 | PtrIRTy = FixedVectorType::get(ElementType: PtrIRTy, NumElts: VectorWidth); |
| 1620 | PtrTy = getLLTForType(Ty&: *PtrIRTy, DL: *DL); |
| 1621 | OffsetIRTy = DL->getIndexType(PtrTy: PtrIRTy); |
| 1622 | OffsetTy = getLLTForType(Ty&: *OffsetIRTy, DL: *DL); |
| 1623 | } |
| 1624 | |
| 1625 | int64_t Offset = 0; |
| 1626 | for (gep_type_iterator GTI = gep_type_begin(GEP: &U), E = gep_type_end(GEP: &U); |
| 1627 | GTI != E; ++GTI) { |
| 1628 | const Value *Idx = GTI.getOperand(); |
| 1629 | if (StructType *StTy = GTI.getStructTypeOrNull()) { |
| 1630 | unsigned Field = cast<Constant>(Val: Idx)->getUniqueInteger().getZExtValue(); |
| 1631 | Offset += DL->getStructLayout(Ty: StTy)->getElementOffset(Idx: Field); |
| 1632 | continue; |
| 1633 | } else { |
| 1634 | uint64_t ElementSize = GTI.getSequentialElementStride(DL: *DL); |
| 1635 | |
| 1636 | // If this is a scalar constant or a splat vector of constants, |
| 1637 | // handle it quickly. |
| 1638 | if (const auto *CI = dyn_cast<ConstantInt>(Val: Idx)) { |
| 1639 | if (std::optional<int64_t> Val = CI->getValue().trySExtValue()) { |
| 1640 | Offset += ElementSize * *Val; |
| 1641 | continue; |
| 1642 | } |
| 1643 | } |
| 1644 | |
| 1645 | if (Offset != 0) { |
| 1646 | auto OffsetMIB = MIRBuilder.buildConstant(Res: {OffsetTy}, Val: Offset); |
| 1647 | BaseReg = MIRBuilder.buildPtrAdd(Res: PtrTy, Op0: BaseReg, Op1: OffsetMIB.getReg(Idx: 0)) |
| 1648 | .getReg(Idx: 0); |
| 1649 | Offset = 0; |
| 1650 | } |
| 1651 | |
| 1652 | Register IdxReg = getOrCreateVReg(Val: *Idx); |
| 1653 | LLT IdxTy = MRI->getType(Reg: IdxReg); |
| 1654 | if (IdxTy != OffsetTy) { |
| 1655 | if (!IdxTy.isVector() && WantSplatVector) { |
| 1656 | IdxReg = MIRBuilder |
| 1657 | .buildSplatBuildVector(Res: OffsetTy.changeElementType(NewEltTy: IdxTy), |
| 1658 | Src: IdxReg) |
| 1659 | .getReg(Idx: 0); |
| 1660 | } |
| 1661 | |
| 1662 | IdxReg = MIRBuilder.buildSExtOrTrunc(Res: OffsetTy, Op: IdxReg).getReg(Idx: 0); |
| 1663 | } |
| 1664 | |
| 1665 | // N = N + Idx * ElementSize; |
| 1666 | // Avoid doing it for ElementSize of 1. |
| 1667 | Register GepOffsetReg; |
| 1668 | if (ElementSize != 1) { |
| 1669 | auto ElementSizeMIB = MIRBuilder.buildConstant( |
| 1670 | Res: getLLTForType(Ty&: *OffsetIRTy, DL: *DL), Val: ElementSize); |
| 1671 | GepOffsetReg = |
| 1672 | MIRBuilder.buildMul(Dst: OffsetTy, Src0: IdxReg, Src1: ElementSizeMIB).getReg(Idx: 0); |
| 1673 | } else |
| 1674 | GepOffsetReg = IdxReg; |
| 1675 | |
| 1676 | BaseReg = MIRBuilder.buildPtrAdd(Res: PtrTy, Op0: BaseReg, Op1: GepOffsetReg).getReg(Idx: 0); |
| 1677 | } |
| 1678 | } |
| 1679 | |
| 1680 | if (Offset != 0) { |
| 1681 | auto OffsetMIB = |
| 1682 | MIRBuilder.buildConstant(Res: OffsetTy, Val: Offset); |
| 1683 | |
| 1684 | if (int64_t(Offset) >= 0 && cast<GEPOperator>(Val: U).isInBounds()) |
| 1685 | Flags |= MachineInstr::MIFlag::NoUWrap; |
| 1686 | |
| 1687 | MIRBuilder.buildPtrAdd(Res: getOrCreateVReg(Val: U), Op0: BaseReg, Op1: OffsetMIB.getReg(Idx: 0), |
| 1688 | Flags); |
| 1689 | return true; |
| 1690 | } |
| 1691 | |
| 1692 | MIRBuilder.buildCopy(Res: getOrCreateVReg(Val: U), Op: BaseReg); |
| 1693 | return true; |
| 1694 | } |
| 1695 | |
| 1696 | bool IRTranslator::translateMemFunc(const CallInst &CI, |
| 1697 | MachineIRBuilder &MIRBuilder, |
| 1698 | unsigned Opcode) { |
| 1699 | const Value *SrcPtr = CI.getArgOperand(i: 1); |
| 1700 | // If the source is undef, then just emit a nop. |
| 1701 | if (isa<UndefValue>(Val: SrcPtr)) |
| 1702 | return true; |
| 1703 | |
| 1704 | SmallVector<Register, 3> SrcRegs; |
| 1705 | |
| 1706 | unsigned MinPtrSize = UINT_MAX; |
| 1707 | for (auto AI = CI.arg_begin(), AE = CI.arg_end(); std::next(x: AI) != AE; ++AI) { |
| 1708 | Register SrcReg = getOrCreateVReg(Val: **AI); |
| 1709 | LLT SrcTy = MRI->getType(Reg: SrcReg); |
| 1710 | if (SrcTy.isPointer()) |
| 1711 | MinPtrSize = std::min<unsigned>(a: SrcTy.getSizeInBits(), b: MinPtrSize); |
| 1712 | SrcRegs.push_back(Elt: SrcReg); |
| 1713 | } |
| 1714 | |
| 1715 | LLT SizeTy = LLT::scalar(SizeInBits: MinPtrSize); |
| 1716 | |
| 1717 | // The size operand should be the minimum of the pointer sizes. |
| 1718 | Register &SizeOpReg = SrcRegs[SrcRegs.size() - 1]; |
| 1719 | if (MRI->getType(Reg: SizeOpReg) != SizeTy) |
| 1720 | SizeOpReg = MIRBuilder.buildZExtOrTrunc(Res: SizeTy, Op: SizeOpReg).getReg(Idx: 0); |
| 1721 | |
| 1722 | auto ICall = MIRBuilder.buildInstr(Opcode); |
| 1723 | for (Register SrcReg : SrcRegs) |
| 1724 | ICall.addUse(RegNo: SrcReg); |
| 1725 | |
| 1726 | Align DstAlign; |
| 1727 | Align SrcAlign; |
| 1728 | unsigned IsVol = |
| 1729 | cast<ConstantInt>(Val: CI.getArgOperand(i: CI.arg_size() - 1))->getZExtValue(); |
| 1730 | |
| 1731 | ConstantInt *CopySize = nullptr; |
| 1732 | |
| 1733 | if (auto *MCI = dyn_cast<MemCpyInst>(Val: &CI)) { |
| 1734 | DstAlign = MCI->getDestAlign().valueOrOne(); |
| 1735 | SrcAlign = MCI->getSourceAlign().valueOrOne(); |
| 1736 | CopySize = dyn_cast<ConstantInt>(Val: MCI->getArgOperand(i: 2)); |
| 1737 | } else if (auto *MMI = dyn_cast<MemMoveInst>(Val: &CI)) { |
| 1738 | DstAlign = MMI->getDestAlign().valueOrOne(); |
| 1739 | SrcAlign = MMI->getSourceAlign().valueOrOne(); |
| 1740 | CopySize = dyn_cast<ConstantInt>(Val: MMI->getArgOperand(i: 2)); |
| 1741 | } else { |
| 1742 | auto *MSI = cast<MemSetInst>(Val: &CI); |
| 1743 | DstAlign = MSI->getDestAlign().valueOrOne(); |
| 1744 | } |
| 1745 | |
| 1746 | if (Opcode != TargetOpcode::G_MEMCPY_INLINE) { |
| 1747 | // We need to propagate the tail call flag from the IR inst as an argument. |
| 1748 | // Otherwise, we have to pessimize and assume later that we cannot tail call |
| 1749 | // any memory intrinsics. |
| 1750 | ICall.addImm(Val: CI.isTailCall() ? 1 : 0); |
| 1751 | } |
| 1752 | |
| 1753 | // Create mem operands to store the alignment and volatile info. |
| 1754 | MachineMemOperand::Flags LoadFlags = MachineMemOperand::MOLoad; |
| 1755 | MachineMemOperand::Flags StoreFlags = MachineMemOperand::MOStore; |
| 1756 | if (IsVol) { |
| 1757 | LoadFlags |= MachineMemOperand::MOVolatile; |
| 1758 | StoreFlags |= MachineMemOperand::MOVolatile; |
| 1759 | } |
| 1760 | |
| 1761 | AAMDNodes AAInfo = CI.getAAMetadata(); |
| 1762 | if (AA && CopySize && |
| 1763 | AA->pointsToConstantMemory(Loc: MemoryLocation( |
| 1764 | SrcPtr, LocationSize::precise(Value: CopySize->getZExtValue()), AAInfo))) { |
| 1765 | LoadFlags |= MachineMemOperand::MOInvariant; |
| 1766 | |
| 1767 | // FIXME: pointsToConstantMemory probably does not imply dereferenceable, |
| 1768 | // but the previous usage implied it did. Probably should check |
| 1769 | // isDereferenceableAndAlignedPointer. |
| 1770 | LoadFlags |= MachineMemOperand::MODereferenceable; |
| 1771 | } |
| 1772 | |
| 1773 | ICall.addMemOperand( |
| 1774 | MMO: MF->getMachineMemOperand(PtrInfo: MachinePointerInfo(CI.getArgOperand(i: 0)), |
| 1775 | F: StoreFlags, Size: 1, BaseAlignment: DstAlign, AAInfo)); |
| 1776 | if (Opcode != TargetOpcode::G_MEMSET) |
| 1777 | ICall.addMemOperand(MMO: MF->getMachineMemOperand( |
| 1778 | PtrInfo: MachinePointerInfo(SrcPtr), F: LoadFlags, Size: 1, BaseAlignment: SrcAlign, AAInfo)); |
| 1779 | |
| 1780 | return true; |
| 1781 | } |
| 1782 | |
| 1783 | bool IRTranslator::translateTrap(const CallInst &CI, |
| 1784 | MachineIRBuilder &MIRBuilder, |
| 1785 | unsigned Opcode) { |
| 1786 | StringRef TrapFuncName = |
| 1787 | CI.getAttributes().getFnAttr(Kind: "trap-func-name" ).getValueAsString(); |
| 1788 | if (TrapFuncName.empty()) { |
| 1789 | if (Opcode == TargetOpcode::G_UBSANTRAP) { |
| 1790 | uint64_t Code = cast<ConstantInt>(Val: CI.getOperand(i_nocapture: 0))->getZExtValue(); |
| 1791 | MIRBuilder.buildInstr(Opc: Opcode, DstOps: {}, SrcOps: ArrayRef<llvm::SrcOp>{Code}); |
| 1792 | } else { |
| 1793 | MIRBuilder.buildInstr(Opcode); |
| 1794 | } |
| 1795 | return true; |
| 1796 | } |
| 1797 | |
| 1798 | CallLowering::CallLoweringInfo Info; |
| 1799 | if (Opcode == TargetOpcode::G_UBSANTRAP) |
| 1800 | Info.OrigArgs.push_back(Elt: {getOrCreateVRegs(Val: *CI.getArgOperand(i: 0)), |
| 1801 | CI.getArgOperand(i: 0)->getType(), 0}); |
| 1802 | |
| 1803 | Info.Callee = MachineOperand::CreateES(SymName: TrapFuncName.data()); |
| 1804 | Info.CB = &CI; |
| 1805 | Info.OrigRet = {Register(), Type::getVoidTy(C&: CI.getContext()), 0}; |
| 1806 | return CLI->lowerCall(MIRBuilder, Info); |
| 1807 | } |
| 1808 | |
| 1809 | bool IRTranslator::translateVectorInterleave2Intrinsic( |
| 1810 | const CallInst &CI, MachineIRBuilder &MIRBuilder) { |
| 1811 | assert(CI.getIntrinsicID() == Intrinsic::vector_interleave2 && |
| 1812 | "This function can only be called on the interleave2 intrinsic!" ); |
| 1813 | // Canonicalize interleave2 to G_SHUFFLE_VECTOR (similar to SelectionDAG). |
| 1814 | Register Op0 = getOrCreateVReg(Val: *CI.getOperand(i_nocapture: 0)); |
| 1815 | Register Op1 = getOrCreateVReg(Val: *CI.getOperand(i_nocapture: 1)); |
| 1816 | Register Res = getOrCreateVReg(Val: CI); |
| 1817 | |
| 1818 | LLT OpTy = MRI->getType(Reg: Op0); |
| 1819 | MIRBuilder.buildShuffleVector(Res, Src1: Op0, Src2: Op1, |
| 1820 | Mask: createInterleaveMask(VF: OpTy.getNumElements(), NumVecs: 2)); |
| 1821 | |
| 1822 | return true; |
| 1823 | } |
| 1824 | |
| 1825 | bool IRTranslator::translateVectorDeinterleave2Intrinsic( |
| 1826 | const CallInst &CI, MachineIRBuilder &MIRBuilder) { |
| 1827 | assert(CI.getIntrinsicID() == Intrinsic::vector_deinterleave2 && |
| 1828 | "This function can only be called on the deinterleave2 intrinsic!" ); |
| 1829 | // Canonicalize deinterleave2 to shuffles that extract sub-vectors (similar to |
| 1830 | // SelectionDAG). |
| 1831 | Register Op = getOrCreateVReg(Val: *CI.getOperand(i_nocapture: 0)); |
| 1832 | auto Undef = MIRBuilder.buildUndef(Res: MRI->getType(Reg: Op)); |
| 1833 | ArrayRef<Register> Res = getOrCreateVRegs(Val: CI); |
| 1834 | |
| 1835 | LLT ResTy = MRI->getType(Reg: Res[0]); |
| 1836 | MIRBuilder.buildShuffleVector(Res: Res[0], Src1: Op, Src2: Undef, |
| 1837 | Mask: createStrideMask(Start: 0, Stride: 2, VF: ResTy.getNumElements())); |
| 1838 | MIRBuilder.buildShuffleVector(Res: Res[1], Src1: Op, Src2: Undef, |
| 1839 | Mask: createStrideMask(Start: 1, Stride: 2, VF: ResTy.getNumElements())); |
| 1840 | |
| 1841 | return true; |
| 1842 | } |
| 1843 | |
| 1844 | void IRTranslator::getStackGuard(Register DstReg, |
| 1845 | MachineIRBuilder &MIRBuilder) { |
| 1846 | const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); |
| 1847 | MRI->setRegClass(Reg: DstReg, RC: TRI->getPointerRegClass(MF: *MF)); |
| 1848 | auto MIB = |
| 1849 | MIRBuilder.buildInstr(Opc: TargetOpcode::LOAD_STACK_GUARD, DstOps: {DstReg}, SrcOps: {}); |
| 1850 | |
| 1851 | Value *Global = TLI->getSDagStackGuard(M: *MF->getFunction().getParent()); |
| 1852 | if (!Global) |
| 1853 | return; |
| 1854 | |
| 1855 | unsigned AddrSpace = Global->getType()->getPointerAddressSpace(); |
| 1856 | LLT PtrTy = LLT::pointer(AddressSpace: AddrSpace, SizeInBits: DL->getPointerSizeInBits(AS: AddrSpace)); |
| 1857 | |
| 1858 | MachinePointerInfo MPInfo(Global); |
| 1859 | auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant | |
| 1860 | MachineMemOperand::MODereferenceable; |
| 1861 | MachineMemOperand *MemRef = MF->getMachineMemOperand( |
| 1862 | PtrInfo: MPInfo, f: Flags, MemTy: PtrTy, base_alignment: DL->getPointerABIAlignment(AS: AddrSpace)); |
| 1863 | MIB.setMemRefs({MemRef}); |
| 1864 | } |
| 1865 | |
| 1866 | bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op, |
| 1867 | MachineIRBuilder &MIRBuilder) { |
| 1868 | ArrayRef<Register> ResRegs = getOrCreateVRegs(Val: CI); |
| 1869 | MIRBuilder.buildInstr( |
| 1870 | Opc: Op, DstOps: {ResRegs[0], ResRegs[1]}, |
| 1871 | SrcOps: {getOrCreateVReg(Val: *CI.getOperand(i_nocapture: 0)), getOrCreateVReg(Val: *CI.getOperand(i_nocapture: 1))}); |
| 1872 | |
| 1873 | return true; |
| 1874 | } |
| 1875 | |
| 1876 | bool IRTranslator::translateFixedPointIntrinsic(unsigned Op, const CallInst &CI, |
| 1877 | MachineIRBuilder &MIRBuilder) { |
| 1878 | Register Dst = getOrCreateVReg(Val: CI); |
| 1879 | Register Src0 = getOrCreateVReg(Val: *CI.getOperand(i_nocapture: 0)); |
| 1880 | Register Src1 = getOrCreateVReg(Val: *CI.getOperand(i_nocapture: 1)); |
| 1881 | uint64_t Scale = cast<ConstantInt>(Val: CI.getOperand(i_nocapture: 2))->getZExtValue(); |
| 1882 | MIRBuilder.buildInstr(Opc: Op, DstOps: {Dst}, SrcOps: { Src0, Src1, Scale }); |
| 1883 | return true; |
| 1884 | } |
| 1885 | |
| 1886 | unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) { |
| 1887 | switch (ID) { |
| 1888 | default: |
| 1889 | break; |
| 1890 | case Intrinsic::acos: |
| 1891 | return TargetOpcode::G_FACOS; |
| 1892 | case Intrinsic::asin: |
| 1893 | return TargetOpcode::G_FASIN; |
| 1894 | case Intrinsic::atan: |
| 1895 | return TargetOpcode::G_FATAN; |
| 1896 | case Intrinsic::atan2: |
| 1897 | return TargetOpcode::G_FATAN2; |
| 1898 | case Intrinsic::bswap: |
| 1899 | return TargetOpcode::G_BSWAP; |
| 1900 | case Intrinsic::bitreverse: |
| 1901 | return TargetOpcode::G_BITREVERSE; |
| 1902 | case Intrinsic::fshl: |
| 1903 | return TargetOpcode::G_FSHL; |
| 1904 | case Intrinsic::fshr: |
| 1905 | return TargetOpcode::G_FSHR; |
| 1906 | case Intrinsic::ceil: |
| 1907 | return TargetOpcode::G_FCEIL; |
| 1908 | case Intrinsic::cos: |
| 1909 | return TargetOpcode::G_FCOS; |
| 1910 | case Intrinsic::cosh: |
| 1911 | return TargetOpcode::G_FCOSH; |
| 1912 | case Intrinsic::ctpop: |
| 1913 | return TargetOpcode::G_CTPOP; |
| 1914 | case Intrinsic::exp: |
| 1915 | return TargetOpcode::G_FEXP; |
| 1916 | case Intrinsic::exp2: |
| 1917 | return TargetOpcode::G_FEXP2; |
| 1918 | case Intrinsic::exp10: |
| 1919 | return TargetOpcode::G_FEXP10; |
| 1920 | case Intrinsic::fabs: |
| 1921 | return TargetOpcode::G_FABS; |
| 1922 | case Intrinsic::copysign: |
| 1923 | return TargetOpcode::G_FCOPYSIGN; |
| 1924 | case Intrinsic::minnum: |
| 1925 | return TargetOpcode::G_FMINNUM; |
| 1926 | case Intrinsic::maxnum: |
| 1927 | return TargetOpcode::G_FMAXNUM; |
| 1928 | case Intrinsic::minimum: |
| 1929 | return TargetOpcode::G_FMINIMUM; |
| 1930 | case Intrinsic::maximum: |
| 1931 | return TargetOpcode::G_FMAXIMUM; |
| 1932 | case Intrinsic::minimumnum: |
| 1933 | return TargetOpcode::G_FMINIMUMNUM; |
| 1934 | case Intrinsic::maximumnum: |
| 1935 | return TargetOpcode::G_FMAXIMUMNUM; |
| 1936 | case Intrinsic::canonicalize: |
| 1937 | return TargetOpcode::G_FCANONICALIZE; |
| 1938 | case Intrinsic::floor: |
| 1939 | return TargetOpcode::G_FFLOOR; |
| 1940 | case Intrinsic::fma: |
| 1941 | return TargetOpcode::G_FMA; |
| 1942 | case Intrinsic::log: |
| 1943 | return TargetOpcode::G_FLOG; |
| 1944 | case Intrinsic::log2: |
| 1945 | return TargetOpcode::G_FLOG2; |
| 1946 | case Intrinsic::log10: |
| 1947 | return TargetOpcode::G_FLOG10; |
| 1948 | case Intrinsic::ldexp: |
| 1949 | return TargetOpcode::G_FLDEXP; |
| 1950 | case Intrinsic::nearbyint: |
| 1951 | return TargetOpcode::G_FNEARBYINT; |
| 1952 | case Intrinsic::pow: |
| 1953 | return TargetOpcode::G_FPOW; |
| 1954 | case Intrinsic::powi: |
| 1955 | return TargetOpcode::G_FPOWI; |
| 1956 | case Intrinsic::rint: |
| 1957 | return TargetOpcode::G_FRINT; |
| 1958 | case Intrinsic::round: |
| 1959 | return TargetOpcode::G_INTRINSIC_ROUND; |
| 1960 | case Intrinsic::roundeven: |
| 1961 | return TargetOpcode::G_INTRINSIC_ROUNDEVEN; |
| 1962 | case Intrinsic::sin: |
| 1963 | return TargetOpcode::G_FSIN; |
| 1964 | case Intrinsic::sinh: |
| 1965 | return TargetOpcode::G_FSINH; |
| 1966 | case Intrinsic::sqrt: |
| 1967 | return TargetOpcode::G_FSQRT; |
| 1968 | case Intrinsic::tan: |
| 1969 | return TargetOpcode::G_FTAN; |
| 1970 | case Intrinsic::tanh: |
| 1971 | return TargetOpcode::G_FTANH; |
| 1972 | case Intrinsic::trunc: |
| 1973 | return TargetOpcode::G_INTRINSIC_TRUNC; |
| 1974 | case Intrinsic::readcyclecounter: |
| 1975 | return TargetOpcode::G_READCYCLECOUNTER; |
| 1976 | case Intrinsic::readsteadycounter: |
| 1977 | return TargetOpcode::G_READSTEADYCOUNTER; |
| 1978 | case Intrinsic::ptrmask: |
| 1979 | return TargetOpcode::G_PTRMASK; |
| 1980 | case Intrinsic::lrint: |
| 1981 | return TargetOpcode::G_INTRINSIC_LRINT; |
| 1982 | case Intrinsic::llrint: |
| 1983 | return TargetOpcode::G_INTRINSIC_LLRINT; |
| 1984 | // FADD/FMUL require checking the FMF, so are handled elsewhere. |
| 1985 | case Intrinsic::vector_reduce_fmin: |
| 1986 | return TargetOpcode::G_VECREDUCE_FMIN; |
| 1987 | case Intrinsic::vector_reduce_fmax: |
| 1988 | return TargetOpcode::G_VECREDUCE_FMAX; |
| 1989 | case Intrinsic::vector_reduce_fminimum: |
| 1990 | return TargetOpcode::G_VECREDUCE_FMINIMUM; |
| 1991 | case Intrinsic::vector_reduce_fmaximum: |
| 1992 | return TargetOpcode::G_VECREDUCE_FMAXIMUM; |
| 1993 | case Intrinsic::vector_reduce_add: |
| 1994 | return TargetOpcode::G_VECREDUCE_ADD; |
| 1995 | case Intrinsic::vector_reduce_mul: |
| 1996 | return TargetOpcode::G_VECREDUCE_MUL; |
| 1997 | case Intrinsic::vector_reduce_and: |
| 1998 | return TargetOpcode::G_VECREDUCE_AND; |
| 1999 | case Intrinsic::vector_reduce_or: |
| 2000 | return TargetOpcode::G_VECREDUCE_OR; |
| 2001 | case Intrinsic::vector_reduce_xor: |
| 2002 | return TargetOpcode::G_VECREDUCE_XOR; |
| 2003 | case Intrinsic::vector_reduce_smax: |
| 2004 | return TargetOpcode::G_VECREDUCE_SMAX; |
| 2005 | case Intrinsic::vector_reduce_smin: |
| 2006 | return TargetOpcode::G_VECREDUCE_SMIN; |
| 2007 | case Intrinsic::vector_reduce_umax: |
| 2008 | return TargetOpcode::G_VECREDUCE_UMAX; |
| 2009 | case Intrinsic::vector_reduce_umin: |
| 2010 | return TargetOpcode::G_VECREDUCE_UMIN; |
| 2011 | case Intrinsic::experimental_vector_compress: |
| 2012 | return TargetOpcode::G_VECTOR_COMPRESS; |
| 2013 | case Intrinsic::lround: |
| 2014 | return TargetOpcode::G_LROUND; |
| 2015 | case Intrinsic::llround: |
| 2016 | return TargetOpcode::G_LLROUND; |
| 2017 | case Intrinsic::get_fpenv: |
| 2018 | return TargetOpcode::G_GET_FPENV; |
| 2019 | case Intrinsic::get_fpmode: |
| 2020 | return TargetOpcode::G_GET_FPMODE; |
| 2021 | } |
| 2022 | return Intrinsic::not_intrinsic; |
| 2023 | } |
| 2024 | |
| 2025 | bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI, |
| 2026 | Intrinsic::ID ID, |
| 2027 | MachineIRBuilder &MIRBuilder) { |
| 2028 | |
| 2029 | unsigned Op = getSimpleIntrinsicOpcode(ID); |
| 2030 | |
| 2031 | // Is this a simple intrinsic? |
| 2032 | if (Op == Intrinsic::not_intrinsic) |
| 2033 | return false; |
| 2034 | |
| 2035 | // Yes. Let's translate it. |
| 2036 | SmallVector<llvm::SrcOp, 4> VRegs; |
| 2037 | for (const auto &Arg : CI.args()) |
| 2038 | VRegs.push_back(Elt: getOrCreateVReg(Val: *Arg)); |
| 2039 | |
| 2040 | MIRBuilder.buildInstr(Opc: Op, DstOps: {getOrCreateVReg(Val: CI)}, SrcOps: VRegs, |
| 2041 | Flags: MachineInstr::copyFlagsFromInstruction(I: CI)); |
| 2042 | return true; |
| 2043 | } |
| 2044 | |
| 2045 | // TODO: Include ConstainedOps.def when all strict instructions are defined. |
| 2046 | static unsigned getConstrainedOpcode(Intrinsic::ID ID) { |
| 2047 | switch (ID) { |
| 2048 | case Intrinsic::experimental_constrained_fadd: |
| 2049 | return TargetOpcode::G_STRICT_FADD; |
| 2050 | case Intrinsic::experimental_constrained_fsub: |
| 2051 | return TargetOpcode::G_STRICT_FSUB; |
| 2052 | case Intrinsic::experimental_constrained_fmul: |
| 2053 | return TargetOpcode::G_STRICT_FMUL; |
| 2054 | case Intrinsic::experimental_constrained_fdiv: |
| 2055 | return TargetOpcode::G_STRICT_FDIV; |
| 2056 | case Intrinsic::experimental_constrained_frem: |
| 2057 | return TargetOpcode::G_STRICT_FREM; |
| 2058 | case Intrinsic::experimental_constrained_fma: |
| 2059 | return TargetOpcode::G_STRICT_FMA; |
| 2060 | case Intrinsic::experimental_constrained_sqrt: |
| 2061 | return TargetOpcode::G_STRICT_FSQRT; |
| 2062 | case Intrinsic::experimental_constrained_ldexp: |
| 2063 | return TargetOpcode::G_STRICT_FLDEXP; |
| 2064 | default: |
| 2065 | return 0; |
| 2066 | } |
| 2067 | } |
| 2068 | |
| 2069 | bool IRTranslator::translateConstrainedFPIntrinsic( |
| 2070 | const ConstrainedFPIntrinsic &FPI, MachineIRBuilder &MIRBuilder) { |
| 2071 | fp::ExceptionBehavior EB = *FPI.getExceptionBehavior(); |
| 2072 | |
| 2073 | unsigned Opcode = getConstrainedOpcode(ID: FPI.getIntrinsicID()); |
| 2074 | if (!Opcode) |
| 2075 | return false; |
| 2076 | |
| 2077 | uint32_t Flags = MachineInstr::copyFlagsFromInstruction(I: FPI); |
| 2078 | if (EB == fp::ExceptionBehavior::ebIgnore) |
| 2079 | Flags |= MachineInstr::NoFPExcept; |
| 2080 | |
| 2081 | SmallVector<llvm::SrcOp, 4> VRegs; |
| 2082 | for (unsigned I = 0, E = FPI.getNonMetadataArgCount(); I != E; ++I) |
| 2083 | VRegs.push_back(Elt: getOrCreateVReg(Val: *FPI.getArgOperand(i: I))); |
| 2084 | |
| 2085 | MIRBuilder.buildInstr(Opc: Opcode, DstOps: {getOrCreateVReg(Val: FPI)}, SrcOps: VRegs, Flags); |
| 2086 | return true; |
| 2087 | } |
| 2088 | |
| 2089 | std::optional<MCRegister> IRTranslator::getArgPhysReg(Argument &Arg) { |
| 2090 | auto VRegs = getOrCreateVRegs(Val: Arg); |
| 2091 | if (VRegs.size() != 1) |
| 2092 | return std::nullopt; |
| 2093 | |
| 2094 | // Arguments are lowered as a copy of a livein physical register. |
| 2095 | auto *VRegDef = MF->getRegInfo().getVRegDef(Reg: VRegs[0]); |
| 2096 | if (!VRegDef || !VRegDef->isCopy()) |
| 2097 | return std::nullopt; |
| 2098 | return VRegDef->getOperand(i: 1).getReg().asMCReg(); |
| 2099 | } |
| 2100 | |
| 2101 | bool IRTranslator::translateIfEntryValueArgument(bool isDeclare, Value *Val, |
| 2102 | const DILocalVariable *Var, |
| 2103 | const DIExpression *Expr, |
| 2104 | const DebugLoc &DL, |
| 2105 | MachineIRBuilder &MIRBuilder) { |
| 2106 | auto *Arg = dyn_cast<Argument>(Val); |
| 2107 | if (!Arg) |
| 2108 | return false; |
| 2109 | |
| 2110 | if (!Expr->isEntryValue()) |
| 2111 | return false; |
| 2112 | |
| 2113 | std::optional<MCRegister> PhysReg = getArgPhysReg(Arg&: *Arg); |
| 2114 | if (!PhysReg) { |
| 2115 | LLVM_DEBUG(dbgs() << "Dropping dbg." << (isDeclare ? "declare" : "value" ) |
| 2116 | << ": expression is entry_value but " |
| 2117 | << "couldn't find a physical register\n" ); |
| 2118 | LLVM_DEBUG(dbgs() << *Var << "\n" ); |
| 2119 | return true; |
| 2120 | } |
| 2121 | |
| 2122 | if (isDeclare) { |
| 2123 | // Append an op deref to account for the fact that this is a dbg_declare. |
| 2124 | Expr = DIExpression::append(Expr, Ops: dwarf::DW_OP_deref); |
| 2125 | MF->setVariableDbgInfo(Var, Expr, Reg: *PhysReg, Loc: DL); |
| 2126 | } else { |
| 2127 | MIRBuilder.buildDirectDbgValue(Reg: *PhysReg, Variable: Var, Expr); |
| 2128 | } |
| 2129 | |
| 2130 | return true; |
| 2131 | } |
| 2132 | |
| 2133 | static unsigned getConvOpcode(Intrinsic::ID ID) { |
| 2134 | switch (ID) { |
| 2135 | default: |
| 2136 | llvm_unreachable("Unexpected intrinsic" ); |
| 2137 | case Intrinsic::experimental_convergence_anchor: |
| 2138 | return TargetOpcode::CONVERGENCECTRL_ANCHOR; |
| 2139 | case Intrinsic::experimental_convergence_entry: |
| 2140 | return TargetOpcode::CONVERGENCECTRL_ENTRY; |
| 2141 | case Intrinsic::experimental_convergence_loop: |
| 2142 | return TargetOpcode::CONVERGENCECTRL_LOOP; |
| 2143 | } |
| 2144 | } |
| 2145 | |
| 2146 | bool IRTranslator::translateConvergenceControlIntrinsic( |
| 2147 | const CallInst &CI, Intrinsic::ID ID, MachineIRBuilder &MIRBuilder) { |
| 2148 | MachineInstrBuilder MIB = MIRBuilder.buildInstr(Opcode: getConvOpcode(ID)); |
| 2149 | Register OutputReg = getOrCreateConvergenceTokenVReg(Token: CI); |
| 2150 | MIB.addDef(RegNo: OutputReg); |
| 2151 | |
| 2152 | if (ID == Intrinsic::experimental_convergence_loop) { |
| 2153 | auto Bundle = CI.getOperandBundle(ID: LLVMContext::OB_convergencectrl); |
| 2154 | assert(Bundle && "Expected a convergence control token." ); |
| 2155 | Register InputReg = |
| 2156 | getOrCreateConvergenceTokenVReg(Token: *Bundle->Inputs[0].get()); |
| 2157 | MIB.addUse(RegNo: InputReg); |
| 2158 | } |
| 2159 | |
| 2160 | return true; |
| 2161 | } |
| 2162 | |
| 2163 | bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID, |
| 2164 | MachineIRBuilder &MIRBuilder) { |
| 2165 | if (auto *MI = dyn_cast<AnyMemIntrinsic>(Val: &CI)) { |
| 2166 | if (ORE->enabled()) { |
| 2167 | if (MemoryOpRemark::canHandle(I: MI, TLI: *LibInfo)) { |
| 2168 | MemoryOpRemark R(*ORE, "gisel-irtranslator-memsize" , *DL, *LibInfo); |
| 2169 | R.visit(I: MI); |
| 2170 | } |
| 2171 | } |
| 2172 | } |
| 2173 | |
| 2174 | // If this is a simple intrinsic (that is, we just need to add a def of |
| 2175 | // a vreg, and uses for each arg operand, then translate it. |
| 2176 | if (translateSimpleIntrinsic(CI, ID, MIRBuilder)) |
| 2177 | return true; |
| 2178 | |
| 2179 | switch (ID) { |
| 2180 | default: |
| 2181 | break; |
| 2182 | case Intrinsic::lifetime_start: |
| 2183 | case Intrinsic::lifetime_end: { |
| 2184 | // No stack colouring in O0, discard region information. |
| 2185 | if (MF->getTarget().getOptLevel() == CodeGenOptLevel::None || |
| 2186 | MF->getFunction().hasOptNone()) |
| 2187 | return true; |
| 2188 | |
| 2189 | unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START |
| 2190 | : TargetOpcode::LIFETIME_END; |
| 2191 | |
| 2192 | // Get the underlying objects for the location passed on the lifetime |
| 2193 | // marker. |
| 2194 | SmallVector<const Value *, 4> Allocas; |
| 2195 | getUnderlyingObjects(V: CI.getArgOperand(i: 1), Objects&: Allocas); |
| 2196 | |
| 2197 | // Iterate over each underlying object, creating lifetime markers for each |
| 2198 | // static alloca. Quit if we find a non-static alloca. |
| 2199 | for (const Value *V : Allocas) { |
| 2200 | const AllocaInst *AI = dyn_cast<AllocaInst>(Val: V); |
| 2201 | if (!AI) |
| 2202 | continue; |
| 2203 | |
| 2204 | if (!AI->isStaticAlloca()) |
| 2205 | return true; |
| 2206 | |
| 2207 | MIRBuilder.buildInstr(Opcode: Op).addFrameIndex(Idx: getOrCreateFrameIndex(AI: *AI)); |
| 2208 | } |
| 2209 | return true; |
| 2210 | } |
| 2211 | case Intrinsic::fake_use: { |
| 2212 | SmallVector<llvm::SrcOp, 4> VRegs; |
| 2213 | for (const auto &Arg : CI.args()) |
| 2214 | llvm::append_range(C&: VRegs, R: getOrCreateVRegs(Val: *Arg)); |
| 2215 | MIRBuilder.buildInstr(Opc: TargetOpcode::FAKE_USE, DstOps: {}, SrcOps: VRegs); |
| 2216 | MF->setHasFakeUses(true); |
| 2217 | return true; |
| 2218 | } |
| 2219 | case Intrinsic::dbg_declare: { |
| 2220 | const DbgDeclareInst &DI = cast<DbgDeclareInst>(Val: CI); |
| 2221 | assert(DI.getVariable() && "Missing variable" ); |
| 2222 | translateDbgDeclareRecord(Address: DI.getAddress(), HasArgList: DI.hasArgList(), Variable: DI.getVariable(), |
| 2223 | Expression: DI.getExpression(), DL: DI.getDebugLoc(), MIRBuilder); |
| 2224 | return true; |
| 2225 | } |
| 2226 | case Intrinsic::dbg_label: { |
| 2227 | const DbgLabelInst &DI = cast<DbgLabelInst>(Val: CI); |
| 2228 | assert(DI.getLabel() && "Missing label" ); |
| 2229 | |
| 2230 | assert(DI.getLabel()->isValidLocationForIntrinsic( |
| 2231 | MIRBuilder.getDebugLoc()) && |
| 2232 | "Expected inlined-at fields to agree" ); |
| 2233 | |
| 2234 | MIRBuilder.buildDbgLabel(Label: DI.getLabel()); |
| 2235 | return true; |
| 2236 | } |
| 2237 | case Intrinsic::vaend: |
| 2238 | // No target I know of cares about va_end. Certainly no in-tree target |
| 2239 | // does. Simplest intrinsic ever! |
| 2240 | return true; |
| 2241 | case Intrinsic::vastart: { |
| 2242 | Value *Ptr = CI.getArgOperand(i: 0); |
| 2243 | unsigned ListSize = TLI->getVaListSizeInBits(DL: *DL) / 8; |
| 2244 | Align Alignment = getKnownAlignment(V: Ptr, DL: *DL); |
| 2245 | |
| 2246 | MIRBuilder.buildInstr(Opc: TargetOpcode::G_VASTART, DstOps: {}, SrcOps: {getOrCreateVReg(Val: *Ptr)}) |
| 2247 | .addMemOperand(MMO: MF->getMachineMemOperand(PtrInfo: MachinePointerInfo(Ptr), |
| 2248 | F: MachineMemOperand::MOStore, |
| 2249 | Size: ListSize, BaseAlignment: Alignment)); |
| 2250 | return true; |
| 2251 | } |
| 2252 | case Intrinsic::dbg_assign: |
| 2253 | // A dbg.assign is a dbg.value with more information about stack locations, |
| 2254 | // typically produced during optimisation of variables with leaked |
| 2255 | // addresses. We can treat it like a normal dbg_value intrinsic here; to |
| 2256 | // benefit from the full analysis of stack/SSA locations, GlobalISel would |
| 2257 | // need to register for and use the AssignmentTrackingAnalysis pass. |
| 2258 | [[fallthrough]]; |
| 2259 | case Intrinsic::dbg_value: { |
| 2260 | // This form of DBG_VALUE is target-independent. |
| 2261 | const DbgValueInst &DI = cast<DbgValueInst>(Val: CI); |
| 2262 | translateDbgValueRecord(V: DI.getValue(), HasArgList: DI.hasArgList(), Variable: DI.getVariable(), |
| 2263 | Expression: DI.getExpression(), DL: DI.getDebugLoc(), MIRBuilder); |
| 2264 | return true; |
| 2265 | } |
| 2266 | case Intrinsic::uadd_with_overflow: |
| 2267 | return translateOverflowIntrinsic(CI, Op: TargetOpcode::G_UADDO, MIRBuilder); |
| 2268 | case Intrinsic::sadd_with_overflow: |
| 2269 | return translateOverflowIntrinsic(CI, Op: TargetOpcode::G_SADDO, MIRBuilder); |
| 2270 | case Intrinsic::usub_with_overflow: |
| 2271 | return translateOverflowIntrinsic(CI, Op: TargetOpcode::G_USUBO, MIRBuilder); |
| 2272 | case Intrinsic::ssub_with_overflow: |
| 2273 | return translateOverflowIntrinsic(CI, Op: TargetOpcode::G_SSUBO, MIRBuilder); |
| 2274 | case Intrinsic::umul_with_overflow: |
| 2275 | return translateOverflowIntrinsic(CI, Op: TargetOpcode::G_UMULO, MIRBuilder); |
| 2276 | case Intrinsic::smul_with_overflow: |
| 2277 | return translateOverflowIntrinsic(CI, Op: TargetOpcode::G_SMULO, MIRBuilder); |
| 2278 | case Intrinsic::uadd_sat: |
| 2279 | return translateBinaryOp(Opcode: TargetOpcode::G_UADDSAT, U: CI, MIRBuilder); |
| 2280 | case Intrinsic::sadd_sat: |
| 2281 | return translateBinaryOp(Opcode: TargetOpcode::G_SADDSAT, U: CI, MIRBuilder); |
| 2282 | case Intrinsic::usub_sat: |
| 2283 | return translateBinaryOp(Opcode: TargetOpcode::G_USUBSAT, U: CI, MIRBuilder); |
| 2284 | case Intrinsic::ssub_sat: |
| 2285 | return translateBinaryOp(Opcode: TargetOpcode::G_SSUBSAT, U: CI, MIRBuilder); |
| 2286 | case Intrinsic::ushl_sat: |
| 2287 | return translateBinaryOp(Opcode: TargetOpcode::G_USHLSAT, U: CI, MIRBuilder); |
| 2288 | case Intrinsic::sshl_sat: |
| 2289 | return translateBinaryOp(Opcode: TargetOpcode::G_SSHLSAT, U: CI, MIRBuilder); |
| 2290 | case Intrinsic::umin: |
| 2291 | return translateBinaryOp(Opcode: TargetOpcode::G_UMIN, U: CI, MIRBuilder); |
| 2292 | case Intrinsic::umax: |
| 2293 | return translateBinaryOp(Opcode: TargetOpcode::G_UMAX, U: CI, MIRBuilder); |
| 2294 | case Intrinsic::smin: |
| 2295 | return translateBinaryOp(Opcode: TargetOpcode::G_SMIN, U: CI, MIRBuilder); |
| 2296 | case Intrinsic::smax: |
| 2297 | return translateBinaryOp(Opcode: TargetOpcode::G_SMAX, U: CI, MIRBuilder); |
| 2298 | case Intrinsic::abs: |
| 2299 | // TODO: Preserve "int min is poison" arg in GMIR? |
| 2300 | return translateUnaryOp(Opcode: TargetOpcode::G_ABS, U: CI, MIRBuilder); |
| 2301 | case Intrinsic::smul_fix: |
| 2302 | return translateFixedPointIntrinsic(Op: TargetOpcode::G_SMULFIX, CI, MIRBuilder); |
| 2303 | case Intrinsic::umul_fix: |
| 2304 | return translateFixedPointIntrinsic(Op: TargetOpcode::G_UMULFIX, CI, MIRBuilder); |
| 2305 | case Intrinsic::smul_fix_sat: |
| 2306 | return translateFixedPointIntrinsic(Op: TargetOpcode::G_SMULFIXSAT, CI, MIRBuilder); |
| 2307 | case Intrinsic::umul_fix_sat: |
| 2308 | return translateFixedPointIntrinsic(Op: TargetOpcode::G_UMULFIXSAT, CI, MIRBuilder); |
| 2309 | case Intrinsic::sdiv_fix: |
| 2310 | return translateFixedPointIntrinsic(Op: TargetOpcode::G_SDIVFIX, CI, MIRBuilder); |
| 2311 | case Intrinsic::udiv_fix: |
| 2312 | return translateFixedPointIntrinsic(Op: TargetOpcode::G_UDIVFIX, CI, MIRBuilder); |
| 2313 | case Intrinsic::sdiv_fix_sat: |
| 2314 | return translateFixedPointIntrinsic(Op: TargetOpcode::G_SDIVFIXSAT, CI, MIRBuilder); |
| 2315 | case Intrinsic::udiv_fix_sat: |
| 2316 | return translateFixedPointIntrinsic(Op: TargetOpcode::G_UDIVFIXSAT, CI, MIRBuilder); |
| 2317 | case Intrinsic::fmuladd: { |
| 2318 | const TargetMachine &TM = MF->getTarget(); |
| 2319 | Register Dst = getOrCreateVReg(Val: CI); |
| 2320 | Register Op0 = getOrCreateVReg(Val: *CI.getArgOperand(i: 0)); |
| 2321 | Register Op1 = getOrCreateVReg(Val: *CI.getArgOperand(i: 1)); |
| 2322 | Register Op2 = getOrCreateVReg(Val: *CI.getArgOperand(i: 2)); |
| 2323 | if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict && |
| 2324 | TLI->isFMAFasterThanFMulAndFAdd(MF: *MF, |
| 2325 | TLI->getValueType(DL: *DL, Ty: CI.getType()))) { |
| 2326 | // TODO: Revisit this to see if we should move this part of the |
| 2327 | // lowering to the combiner. |
| 2328 | MIRBuilder.buildFMA(Dst, Src0: Op0, Src1: Op1, Src2: Op2, |
| 2329 | Flags: MachineInstr::copyFlagsFromInstruction(I: CI)); |
| 2330 | } else { |
| 2331 | LLT Ty = getLLTForType(Ty&: *CI.getType(), DL: *DL); |
| 2332 | auto FMul = MIRBuilder.buildFMul( |
| 2333 | Dst: Ty, Src0: Op0, Src1: Op1, Flags: MachineInstr::copyFlagsFromInstruction(I: CI)); |
| 2334 | MIRBuilder.buildFAdd(Dst, Src0: FMul, Src1: Op2, |
| 2335 | Flags: MachineInstr::copyFlagsFromInstruction(I: CI)); |
| 2336 | } |
| 2337 | return true; |
| 2338 | } |
| 2339 | case Intrinsic::convert_from_fp16: |
| 2340 | // FIXME: This intrinsic should probably be removed from the IR. |
| 2341 | MIRBuilder.buildFPExt(Res: getOrCreateVReg(Val: CI), |
| 2342 | Op: getOrCreateVReg(Val: *CI.getArgOperand(i: 0)), |
| 2343 | Flags: MachineInstr::copyFlagsFromInstruction(I: CI)); |
| 2344 | return true; |
| 2345 | case Intrinsic::convert_to_fp16: |
| 2346 | // FIXME: This intrinsic should probably be removed from the IR. |
| 2347 | MIRBuilder.buildFPTrunc(Res: getOrCreateVReg(Val: CI), |
| 2348 | Op: getOrCreateVReg(Val: *CI.getArgOperand(i: 0)), |
| 2349 | Flags: MachineInstr::copyFlagsFromInstruction(I: CI)); |
| 2350 | return true; |
| 2351 | case Intrinsic::frexp: { |
| 2352 | ArrayRef<Register> VRegs = getOrCreateVRegs(Val: CI); |
| 2353 | MIRBuilder.buildFFrexp(Fract: VRegs[0], Exp: VRegs[1], |
| 2354 | Src: getOrCreateVReg(Val: *CI.getArgOperand(i: 0)), |
| 2355 | Flags: MachineInstr::copyFlagsFromInstruction(I: CI)); |
| 2356 | return true; |
| 2357 | } |
| 2358 | case Intrinsic::sincos: { |
| 2359 | ArrayRef<Register> VRegs = getOrCreateVRegs(Val: CI); |
| 2360 | MIRBuilder.buildFSincos(Sin: VRegs[0], Cos: VRegs[1], |
| 2361 | Src: getOrCreateVReg(Val: *CI.getArgOperand(i: 0)), |
| 2362 | Flags: MachineInstr::copyFlagsFromInstruction(I: CI)); |
| 2363 | return true; |
| 2364 | } |
| 2365 | case Intrinsic::fptosi_sat: |
| 2366 | MIRBuilder.buildFPTOSI_SAT(Dst: getOrCreateVReg(Val: CI), |
| 2367 | Src0: getOrCreateVReg(Val: *CI.getArgOperand(i: 0))); |
| 2368 | return true; |
| 2369 | case Intrinsic::fptoui_sat: |
| 2370 | MIRBuilder.buildFPTOUI_SAT(Dst: getOrCreateVReg(Val: CI), |
| 2371 | Src0: getOrCreateVReg(Val: *CI.getArgOperand(i: 0))); |
| 2372 | return true; |
| 2373 | case Intrinsic::memcpy_inline: |
| 2374 | return translateMemFunc(CI, MIRBuilder, Opcode: TargetOpcode::G_MEMCPY_INLINE); |
| 2375 | case Intrinsic::memcpy: |
| 2376 | return translateMemFunc(CI, MIRBuilder, Opcode: TargetOpcode::G_MEMCPY); |
| 2377 | case Intrinsic::memmove: |
| 2378 | return translateMemFunc(CI, MIRBuilder, Opcode: TargetOpcode::G_MEMMOVE); |
| 2379 | case Intrinsic::memset: |
| 2380 | return translateMemFunc(CI, MIRBuilder, Opcode: TargetOpcode::G_MEMSET); |
| 2381 | case Intrinsic::eh_typeid_for: { |
| 2382 | GlobalValue *GV = ExtractTypeInfo(V: CI.getArgOperand(i: 0)); |
| 2383 | Register Reg = getOrCreateVReg(Val: CI); |
| 2384 | unsigned TypeID = MF->getTypeIDFor(TI: GV); |
| 2385 | MIRBuilder.buildConstant(Res: Reg, Val: TypeID); |
| 2386 | return true; |
| 2387 | } |
| 2388 | case Intrinsic::objectsize: |
| 2389 | llvm_unreachable("llvm.objectsize.* should have been lowered already" ); |
| 2390 | |
| 2391 | case Intrinsic::is_constant: |
| 2392 | llvm_unreachable("llvm.is.constant.* should have been lowered already" ); |
| 2393 | |
| 2394 | case Intrinsic::stackguard: |
| 2395 | getStackGuard(DstReg: getOrCreateVReg(Val: CI), MIRBuilder); |
| 2396 | return true; |
| 2397 | case Intrinsic::stackprotector: { |
| 2398 | LLT PtrTy = getLLTForType(Ty&: *CI.getArgOperand(i: 0)->getType(), DL: *DL); |
| 2399 | Register GuardVal; |
| 2400 | if (TLI->useLoadStackGuardNode(M: *CI.getModule())) { |
| 2401 | GuardVal = MRI->createGenericVirtualRegister(Ty: PtrTy); |
| 2402 | getStackGuard(DstReg: GuardVal, MIRBuilder); |
| 2403 | } else |
| 2404 | GuardVal = getOrCreateVReg(Val: *CI.getArgOperand(i: 0)); // The guard's value. |
| 2405 | |
| 2406 | AllocaInst *Slot = cast<AllocaInst>(Val: CI.getArgOperand(i: 1)); |
| 2407 | int FI = getOrCreateFrameIndex(AI: *Slot); |
| 2408 | MF->getFrameInfo().setStackProtectorIndex(FI); |
| 2409 | |
| 2410 | MIRBuilder.buildStore( |
| 2411 | Val: GuardVal, Addr: getOrCreateVReg(Val: *Slot), |
| 2412 | MMO&: *MF->getMachineMemOperand(PtrInfo: MachinePointerInfo::getFixedStack(MF&: *MF, FI), |
| 2413 | f: MachineMemOperand::MOStore | |
| 2414 | MachineMemOperand::MOVolatile, |
| 2415 | MemTy: PtrTy, base_alignment: Align(8))); |
| 2416 | return true; |
| 2417 | } |
| 2418 | case Intrinsic::stacksave: { |
| 2419 | MIRBuilder.buildInstr(Opc: TargetOpcode::G_STACKSAVE, DstOps: {getOrCreateVReg(Val: CI)}, SrcOps: {}); |
| 2420 | return true; |
| 2421 | } |
| 2422 | case Intrinsic::stackrestore: { |
| 2423 | MIRBuilder.buildInstr(Opc: TargetOpcode::G_STACKRESTORE, DstOps: {}, |
| 2424 | SrcOps: {getOrCreateVReg(Val: *CI.getArgOperand(i: 0))}); |
| 2425 | return true; |
| 2426 | } |
| 2427 | case Intrinsic::cttz: |
| 2428 | case Intrinsic::ctlz: { |
| 2429 | ConstantInt *Cst = cast<ConstantInt>(Val: CI.getArgOperand(i: 1)); |
| 2430 | bool isTrailing = ID == Intrinsic::cttz; |
| 2431 | unsigned Opcode = isTrailing |
| 2432 | ? Cst->isZero() ? TargetOpcode::G_CTTZ |
| 2433 | : TargetOpcode::G_CTTZ_ZERO_UNDEF |
| 2434 | : Cst->isZero() ? TargetOpcode::G_CTLZ |
| 2435 | : TargetOpcode::G_CTLZ_ZERO_UNDEF; |
| 2436 | MIRBuilder.buildInstr(Opc: Opcode, DstOps: {getOrCreateVReg(Val: CI)}, |
| 2437 | SrcOps: {getOrCreateVReg(Val: *CI.getArgOperand(i: 0))}); |
| 2438 | return true; |
| 2439 | } |
| 2440 | case Intrinsic::invariant_start: { |
| 2441 | MIRBuilder.buildUndef(Res: getOrCreateVReg(Val: CI)); |
| 2442 | return true; |
| 2443 | } |
| 2444 | case Intrinsic::invariant_end: |
| 2445 | return true; |
| 2446 | case Intrinsic::expect: |
| 2447 | case Intrinsic::expect_with_probability: |
| 2448 | case Intrinsic::annotation: |
| 2449 | case Intrinsic::ptr_annotation: |
| 2450 | case Intrinsic::launder_invariant_group: |
| 2451 | case Intrinsic::strip_invariant_group: { |
| 2452 | // Drop the intrinsic, but forward the value. |
| 2453 | MIRBuilder.buildCopy(Res: getOrCreateVReg(Val: CI), |
| 2454 | Op: getOrCreateVReg(Val: *CI.getArgOperand(i: 0))); |
| 2455 | return true; |
| 2456 | } |
| 2457 | case Intrinsic::assume: |
| 2458 | case Intrinsic::experimental_noalias_scope_decl: |
| 2459 | case Intrinsic::var_annotation: |
| 2460 | case Intrinsic::sideeffect: |
| 2461 | // Discard annotate attributes, assumptions, and artificial side-effects. |
| 2462 | return true; |
| 2463 | case Intrinsic::read_volatile_register: |
| 2464 | case Intrinsic::read_register: { |
| 2465 | Value *Arg = CI.getArgOperand(i: 0); |
| 2466 | MIRBuilder |
| 2467 | .buildInstr(Opc: TargetOpcode::G_READ_REGISTER, DstOps: {getOrCreateVReg(Val: CI)}, SrcOps: {}) |
| 2468 | .addMetadata(MD: cast<MDNode>(Val: cast<MetadataAsValue>(Val: Arg)->getMetadata())); |
| 2469 | return true; |
| 2470 | } |
| 2471 | case Intrinsic::write_register: { |
| 2472 | Value *Arg = CI.getArgOperand(i: 0); |
| 2473 | MIRBuilder.buildInstr(Opcode: TargetOpcode::G_WRITE_REGISTER) |
| 2474 | .addMetadata(MD: cast<MDNode>(Val: cast<MetadataAsValue>(Val: Arg)->getMetadata())) |
| 2475 | .addUse(RegNo: getOrCreateVReg(Val: *CI.getArgOperand(i: 1))); |
| 2476 | return true; |
| 2477 | } |
| 2478 | case Intrinsic::localescape: { |
| 2479 | MachineBasicBlock &EntryMBB = MF->front(); |
| 2480 | StringRef EscapedName = GlobalValue::dropLLVMManglingEscape(Name: MF->getName()); |
| 2481 | |
| 2482 | // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission |
| 2483 | // is the same on all targets. |
| 2484 | for (unsigned Idx = 0, E = CI.arg_size(); Idx < E; ++Idx) { |
| 2485 | Value *Arg = CI.getArgOperand(i: Idx)->stripPointerCasts(); |
| 2486 | if (isa<ConstantPointerNull>(Val: Arg)) |
| 2487 | continue; // Skip null pointers. They represent a hole in index space. |
| 2488 | |
| 2489 | int FI = getOrCreateFrameIndex(AI: *cast<AllocaInst>(Val: Arg)); |
| 2490 | MCSymbol *FrameAllocSym = |
| 2491 | MF->getContext().getOrCreateFrameAllocSymbol(FuncName: EscapedName, Idx); |
| 2492 | |
| 2493 | // This should be inserted at the start of the entry block. |
| 2494 | auto LocalEscape = |
| 2495 | MIRBuilder.buildInstrNoInsert(Opcode: TargetOpcode::LOCAL_ESCAPE) |
| 2496 | .addSym(Sym: FrameAllocSym) |
| 2497 | .addFrameIndex(Idx: FI); |
| 2498 | |
| 2499 | EntryMBB.insert(I: EntryMBB.begin(), MI: LocalEscape); |
| 2500 | } |
| 2501 | |
| 2502 | return true; |
| 2503 | } |
| 2504 | case Intrinsic::vector_reduce_fadd: |
| 2505 | case Intrinsic::vector_reduce_fmul: { |
| 2506 | // Need to check for the reassoc flag to decide whether we want a |
| 2507 | // sequential reduction opcode or not. |
| 2508 | Register Dst = getOrCreateVReg(Val: CI); |
| 2509 | Register ScalarSrc = getOrCreateVReg(Val: *CI.getArgOperand(i: 0)); |
| 2510 | Register VecSrc = getOrCreateVReg(Val: *CI.getArgOperand(i: 1)); |
| 2511 | unsigned Opc = 0; |
| 2512 | if (!CI.hasAllowReassoc()) { |
| 2513 | // The sequential ordering case. |
| 2514 | Opc = ID == Intrinsic::vector_reduce_fadd |
| 2515 | ? TargetOpcode::G_VECREDUCE_SEQ_FADD |
| 2516 | : TargetOpcode::G_VECREDUCE_SEQ_FMUL; |
| 2517 | MIRBuilder.buildInstr(Opc, DstOps: {Dst}, SrcOps: {ScalarSrc, VecSrc}, |
| 2518 | Flags: MachineInstr::copyFlagsFromInstruction(I: CI)); |
| 2519 | return true; |
| 2520 | } |
| 2521 | // We split the operation into a separate G_FADD/G_FMUL + the reduce, |
| 2522 | // since the associativity doesn't matter. |
| 2523 | unsigned ScalarOpc; |
| 2524 | if (ID == Intrinsic::vector_reduce_fadd) { |
| 2525 | Opc = TargetOpcode::G_VECREDUCE_FADD; |
| 2526 | ScalarOpc = TargetOpcode::G_FADD; |
| 2527 | } else { |
| 2528 | Opc = TargetOpcode::G_VECREDUCE_FMUL; |
| 2529 | ScalarOpc = TargetOpcode::G_FMUL; |
| 2530 | } |
| 2531 | LLT DstTy = MRI->getType(Reg: Dst); |
| 2532 | auto Rdx = MIRBuilder.buildInstr( |
| 2533 | Opc, DstOps: {DstTy}, SrcOps: {VecSrc}, Flags: MachineInstr::copyFlagsFromInstruction(I: CI)); |
| 2534 | MIRBuilder.buildInstr(Opc: ScalarOpc, DstOps: {Dst}, SrcOps: {ScalarSrc, Rdx}, |
| 2535 | Flags: MachineInstr::copyFlagsFromInstruction(I: CI)); |
| 2536 | |
| 2537 | return true; |
| 2538 | } |
| 2539 | case Intrinsic::trap: |
| 2540 | return translateTrap(CI, MIRBuilder, Opcode: TargetOpcode::G_TRAP); |
| 2541 | case Intrinsic::debugtrap: |
| 2542 | return translateTrap(CI, MIRBuilder, Opcode: TargetOpcode::G_DEBUGTRAP); |
| 2543 | case Intrinsic::ubsantrap: |
| 2544 | return translateTrap(CI, MIRBuilder, Opcode: TargetOpcode::G_UBSANTRAP); |
| 2545 | case Intrinsic::allow_runtime_check: |
| 2546 | case Intrinsic::allow_ubsan_check: |
| 2547 | MIRBuilder.buildCopy(Res: getOrCreateVReg(Val: CI), |
| 2548 | Op: getOrCreateVReg(Val: *ConstantInt::getTrue(Ty: CI.getType()))); |
| 2549 | return true; |
| 2550 | case Intrinsic::amdgcn_cs_chain: |
| 2551 | return translateCallBase(CB: CI, MIRBuilder); |
| 2552 | case Intrinsic::fptrunc_round: { |
| 2553 | uint32_t Flags = MachineInstr::copyFlagsFromInstruction(I: CI); |
| 2554 | |
| 2555 | // Convert the metadata argument to a constant integer |
| 2556 | Metadata *MD = cast<MetadataAsValue>(Val: CI.getArgOperand(i: 1))->getMetadata(); |
| 2557 | std::optional<RoundingMode> RoundMode = |
| 2558 | convertStrToRoundingMode(cast<MDString>(Val: MD)->getString()); |
| 2559 | |
| 2560 | // Add the Rounding mode as an integer |
| 2561 | MIRBuilder |
| 2562 | .buildInstr(Opc: TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND, |
| 2563 | DstOps: {getOrCreateVReg(Val: CI)}, |
| 2564 | SrcOps: {getOrCreateVReg(Val: *CI.getArgOperand(i: 0))}, Flags) |
| 2565 | .addImm(Val: (int)*RoundMode); |
| 2566 | |
| 2567 | return true; |
| 2568 | } |
| 2569 | case Intrinsic::is_fpclass: { |
| 2570 | Value *FpValue = CI.getOperand(i_nocapture: 0); |
| 2571 | ConstantInt *TestMaskValue = cast<ConstantInt>(Val: CI.getOperand(i_nocapture: 1)); |
| 2572 | |
| 2573 | MIRBuilder |
| 2574 | .buildInstr(Opc: TargetOpcode::G_IS_FPCLASS, DstOps: {getOrCreateVReg(Val: CI)}, |
| 2575 | SrcOps: {getOrCreateVReg(Val: *FpValue)}) |
| 2576 | .addImm(Val: TestMaskValue->getZExtValue()); |
| 2577 | |
| 2578 | return true; |
| 2579 | } |
| 2580 | case Intrinsic::set_fpenv: { |
| 2581 | Value *FPEnv = CI.getOperand(i_nocapture: 0); |
| 2582 | MIRBuilder.buildSetFPEnv(Src: getOrCreateVReg(Val: *FPEnv)); |
| 2583 | return true; |
| 2584 | } |
| 2585 | case Intrinsic::reset_fpenv: |
| 2586 | MIRBuilder.buildResetFPEnv(); |
| 2587 | return true; |
| 2588 | case Intrinsic::set_fpmode: { |
| 2589 | Value *FPState = CI.getOperand(i_nocapture: 0); |
| 2590 | MIRBuilder.buildSetFPMode(Src: getOrCreateVReg(Val: *FPState)); |
| 2591 | return true; |
| 2592 | } |
| 2593 | case Intrinsic::reset_fpmode: |
| 2594 | MIRBuilder.buildResetFPMode(); |
| 2595 | return true; |
| 2596 | case Intrinsic::vscale: { |
| 2597 | MIRBuilder.buildVScale(Res: getOrCreateVReg(Val: CI), MinElts: 1); |
| 2598 | return true; |
| 2599 | } |
| 2600 | case Intrinsic::scmp: |
| 2601 | MIRBuilder.buildSCmp(Res: getOrCreateVReg(Val: CI), |
| 2602 | Op0: getOrCreateVReg(Val: *CI.getOperand(i_nocapture: 0)), |
| 2603 | Op1: getOrCreateVReg(Val: *CI.getOperand(i_nocapture: 1))); |
| 2604 | return true; |
| 2605 | case Intrinsic::ucmp: |
| 2606 | MIRBuilder.buildUCmp(Res: getOrCreateVReg(Val: CI), |
| 2607 | Op0: getOrCreateVReg(Val: *CI.getOperand(i_nocapture: 0)), |
| 2608 | Op1: getOrCreateVReg(Val: *CI.getOperand(i_nocapture: 1))); |
| 2609 | return true; |
| 2610 | case Intrinsic::vector_extract: |
| 2611 | return translateExtractVector(U: CI, MIRBuilder); |
| 2612 | case Intrinsic::vector_insert: |
| 2613 | return translateInsertVector(U: CI, MIRBuilder); |
| 2614 | case Intrinsic::stepvector: { |
| 2615 | MIRBuilder.buildStepVector(Res: getOrCreateVReg(Val: CI), Step: 1); |
| 2616 | return true; |
| 2617 | } |
| 2618 | case Intrinsic::prefetch: { |
| 2619 | Value *Addr = CI.getOperand(i_nocapture: 0); |
| 2620 | unsigned RW = cast<ConstantInt>(Val: CI.getOperand(i_nocapture: 1))->getZExtValue(); |
| 2621 | unsigned Locality = cast<ConstantInt>(Val: CI.getOperand(i_nocapture: 2))->getZExtValue(); |
| 2622 | unsigned CacheType = cast<ConstantInt>(Val: CI.getOperand(i_nocapture: 3))->getZExtValue(); |
| 2623 | |
| 2624 | auto Flags = RW ? MachineMemOperand::MOStore : MachineMemOperand::MOLoad; |
| 2625 | auto &MMO = *MF->getMachineMemOperand(PtrInfo: MachinePointerInfo(Addr), f: Flags, |
| 2626 | MemTy: LLT(), base_alignment: Align()); |
| 2627 | |
| 2628 | MIRBuilder.buildPrefetch(Addr: getOrCreateVReg(Val: *Addr), RW, Locality, CacheType, |
| 2629 | MMO); |
| 2630 | |
| 2631 | return true; |
| 2632 | } |
| 2633 | |
| 2634 | case Intrinsic::vector_interleave2: |
| 2635 | case Intrinsic::vector_deinterleave2: { |
| 2636 | // Both intrinsics have at least one operand. |
| 2637 | Value *Op0 = CI.getOperand(i_nocapture: 0); |
| 2638 | LLT ResTy = getLLTForType(Ty&: *Op0->getType(), DL: MIRBuilder.getDataLayout()); |
| 2639 | if (!ResTy.isFixedVector()) |
| 2640 | return false; |
| 2641 | |
| 2642 | if (CI.getIntrinsicID() == Intrinsic::vector_interleave2) |
| 2643 | return translateVectorInterleave2Intrinsic(CI, MIRBuilder); |
| 2644 | |
| 2645 | return translateVectorDeinterleave2Intrinsic(CI, MIRBuilder); |
| 2646 | } |
| 2647 | |
| 2648 | #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \ |
| 2649 | case Intrinsic::INTRINSIC: |
| 2650 | #include "llvm/IR/ConstrainedOps.def" |
| 2651 | return translateConstrainedFPIntrinsic(FPI: cast<ConstrainedFPIntrinsic>(Val: CI), |
| 2652 | MIRBuilder); |
| 2653 | case Intrinsic::experimental_convergence_anchor: |
| 2654 | case Intrinsic::experimental_convergence_entry: |
| 2655 | case Intrinsic::experimental_convergence_loop: |
| 2656 | return translateConvergenceControlIntrinsic(CI, ID, MIRBuilder); |
| 2657 | } |
| 2658 | return false; |
| 2659 | } |
| 2660 | |
| 2661 | bool IRTranslator::translateInlineAsm(const CallBase &CB, |
| 2662 | MachineIRBuilder &MIRBuilder) { |
| 2663 | if (containsBF16Type(U: CB)) |
| 2664 | return false; |
| 2665 | |
| 2666 | const InlineAsmLowering *ALI = MF->getSubtarget().getInlineAsmLowering(); |
| 2667 | |
| 2668 | if (!ALI) { |
| 2669 | LLVM_DEBUG( |
| 2670 | dbgs() << "Inline asm lowering is not supported for this target yet\n" ); |
| 2671 | return false; |
| 2672 | } |
| 2673 | |
| 2674 | return ALI->lowerInlineAsm( |
| 2675 | MIRBuilder, CB, GetOrCreateVRegs: [&](const Value &Val) { return getOrCreateVRegs(Val); }); |
| 2676 | } |
| 2677 | |
| 2678 | bool IRTranslator::translateCallBase(const CallBase &CB, |
| 2679 | MachineIRBuilder &MIRBuilder) { |
| 2680 | ArrayRef<Register> Res = getOrCreateVRegs(Val: CB); |
| 2681 | |
| 2682 | SmallVector<ArrayRef<Register>, 8> Args; |
| 2683 | Register SwiftInVReg = 0; |
| 2684 | Register SwiftErrorVReg = 0; |
| 2685 | for (const auto &Arg : CB.args()) { |
| 2686 | if (CLI->supportSwiftError() && isSwiftError(V: Arg)) { |
| 2687 | assert(SwiftInVReg == 0 && "Expected only one swift error argument" ); |
| 2688 | LLT Ty = getLLTForType(Ty&: *Arg->getType(), DL: *DL); |
| 2689 | SwiftInVReg = MRI->createGenericVirtualRegister(Ty); |
| 2690 | MIRBuilder.buildCopy(Res: SwiftInVReg, Op: SwiftError.getOrCreateVRegUseAt( |
| 2691 | &CB, &MIRBuilder.getMBB(), Arg)); |
| 2692 | Args.emplace_back(Args: ArrayRef(SwiftInVReg)); |
| 2693 | SwiftErrorVReg = |
| 2694 | SwiftError.getOrCreateVRegDefAt(&CB, &MIRBuilder.getMBB(), Arg); |
| 2695 | continue; |
| 2696 | } |
| 2697 | Args.push_back(Elt: getOrCreateVRegs(Val: *Arg)); |
| 2698 | } |
| 2699 | |
| 2700 | if (auto *CI = dyn_cast<CallInst>(Val: &CB)) { |
| 2701 | if (ORE->enabled()) { |
| 2702 | if (MemoryOpRemark::canHandle(I: CI, TLI: *LibInfo)) { |
| 2703 | MemoryOpRemark R(*ORE, "gisel-irtranslator-memsize" , *DL, *LibInfo); |
| 2704 | R.visit(I: CI); |
| 2705 | } |
| 2706 | } |
| 2707 | } |
| 2708 | |
| 2709 | std::optional<CallLowering::PtrAuthInfo> PAI; |
| 2710 | if (auto Bundle = CB.getOperandBundle(ID: LLVMContext::OB_ptrauth)) { |
| 2711 | // Functions should never be ptrauth-called directly. |
| 2712 | assert(!CB.getCalledFunction() && "invalid direct ptrauth call" ); |
| 2713 | |
| 2714 | const Value *Key = Bundle->Inputs[0]; |
| 2715 | const Value *Discriminator = Bundle->Inputs[1]; |
| 2716 | |
| 2717 | // Look through ptrauth constants to try to eliminate the matching bundle |
| 2718 | // and turn this into a direct call with no ptrauth. |
| 2719 | // CallLowering will use the raw pointer if it doesn't find the PAI. |
| 2720 | const auto *CalleeCPA = dyn_cast<ConstantPtrAuth>(Val: CB.getCalledOperand()); |
| 2721 | if (!CalleeCPA || !isa<Function>(Val: CalleeCPA->getPointer()) || |
| 2722 | !CalleeCPA->isKnownCompatibleWith(Key, Discriminator, DL: *DL)) { |
| 2723 | // If we can't make it direct, package the bundle into PAI. |
| 2724 | Register DiscReg = getOrCreateVReg(Val: *Discriminator); |
| 2725 | PAI = CallLowering::PtrAuthInfo{.Key: cast<ConstantInt>(Val: Key)->getZExtValue(), |
| 2726 | .Discriminator: DiscReg}; |
| 2727 | } |
| 2728 | } |
| 2729 | |
| 2730 | Register ConvergenceCtrlToken = 0; |
| 2731 | if (auto Bundle = CB.getOperandBundle(ID: LLVMContext::OB_convergencectrl)) { |
| 2732 | const auto &Token = *Bundle->Inputs[0].get(); |
| 2733 | ConvergenceCtrlToken = getOrCreateConvergenceTokenVReg(Token); |
| 2734 | } |
| 2735 | |
| 2736 | // We don't set HasCalls on MFI here yet because call lowering may decide to |
| 2737 | // optimize into tail calls. Instead, we defer that to selection where a final |
| 2738 | // scan is done to check if any instructions are calls. |
| 2739 | bool Success = CLI->lowerCall( |
| 2740 | MIRBuilder, Call: CB, ResRegs: Res, ArgRegs: Args, SwiftErrorVReg, PAI, ConvergenceCtrlToken, |
| 2741 | GetCalleeReg: [&]() { return getOrCreateVReg(Val: *CB.getCalledOperand()); }); |
| 2742 | |
| 2743 | // Check if we just inserted a tail call. |
| 2744 | if (Success) { |
| 2745 | assert(!HasTailCall && "Can't tail call return twice from block?" ); |
| 2746 | const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); |
| 2747 | HasTailCall = TII->isTailCall(Inst: *std::prev(x: MIRBuilder.getInsertPt())); |
| 2748 | } |
| 2749 | |
| 2750 | return Success; |
| 2751 | } |
| 2752 | |
| 2753 | bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) { |
| 2754 | if (containsBF16Type(U)) |
| 2755 | return false; |
| 2756 | |
| 2757 | const CallInst &CI = cast<CallInst>(Val: U); |
| 2758 | const Function *F = CI.getCalledFunction(); |
| 2759 | |
| 2760 | // FIXME: support Windows dllimport function calls and calls through |
| 2761 | // weak symbols. |
| 2762 | if (F && (F->hasDLLImportStorageClass() || |
| 2763 | (MF->getTarget().getTargetTriple().isOSWindows() && |
| 2764 | F->hasExternalWeakLinkage()))) |
| 2765 | return false; |
| 2766 | |
| 2767 | // FIXME: support control flow guard targets. |
| 2768 | if (CI.countOperandBundlesOfType(ID: LLVMContext::OB_cfguardtarget)) |
| 2769 | return false; |
| 2770 | |
| 2771 | // FIXME: support statepoints and related. |
| 2772 | if (isa<GCStatepointInst, GCRelocateInst, GCResultInst>(Val: U)) |
| 2773 | return false; |
| 2774 | |
| 2775 | if (CI.isInlineAsm()) |
| 2776 | return translateInlineAsm(CB: CI, MIRBuilder); |
| 2777 | |
| 2778 | diagnoseDontCall(CI); |
| 2779 | |
| 2780 | Intrinsic::ID ID = F ? F->getIntrinsicID() : Intrinsic::not_intrinsic; |
| 2781 | if (!F || ID == Intrinsic::not_intrinsic) |
| 2782 | return translateCallBase(CB: CI, MIRBuilder); |
| 2783 | |
| 2784 | assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic" ); |
| 2785 | |
| 2786 | if (translateKnownIntrinsic(CI, ID, MIRBuilder)) |
| 2787 | return true; |
| 2788 | |
| 2789 | ArrayRef<Register> ResultRegs; |
| 2790 | if (!CI.getType()->isVoidTy()) |
| 2791 | ResultRegs = getOrCreateVRegs(Val: CI); |
| 2792 | |
| 2793 | // Ignore the callsite attributes. Backend code is most likely not expecting |
| 2794 | // an intrinsic to sometimes have side effects and sometimes not. |
| 2795 | MachineInstrBuilder MIB = MIRBuilder.buildIntrinsic(ID, Res: ResultRegs); |
| 2796 | if (isa<FPMathOperator>(Val: CI)) |
| 2797 | MIB->copyIRFlags(I: CI); |
| 2798 | |
| 2799 | for (const auto &Arg : enumerate(First: CI.args())) { |
| 2800 | // If this is required to be an immediate, don't materialize it in a |
| 2801 | // register. |
| 2802 | if (CI.paramHasAttr(ArgNo: Arg.index(), Kind: Attribute::ImmArg)) { |
| 2803 | if (ConstantInt *CI = dyn_cast<ConstantInt>(Val: Arg.value())) { |
| 2804 | // imm arguments are more convenient than cimm (and realistically |
| 2805 | // probably sufficient), so use them. |
| 2806 | assert(CI->getBitWidth() <= 64 && |
| 2807 | "large intrinsic immediates not handled" ); |
| 2808 | MIB.addImm(Val: CI->getSExtValue()); |
| 2809 | } else { |
| 2810 | MIB.addFPImm(Val: cast<ConstantFP>(Val: Arg.value())); |
| 2811 | } |
| 2812 | } else if (auto *MDVal = dyn_cast<MetadataAsValue>(Val: Arg.value())) { |
| 2813 | auto *MD = MDVal->getMetadata(); |
| 2814 | auto *MDN = dyn_cast<MDNode>(Val: MD); |
| 2815 | if (!MDN) { |
| 2816 | if (auto *ConstMD = dyn_cast<ConstantAsMetadata>(Val: MD)) |
| 2817 | MDN = MDNode::get(Context&: MF->getFunction().getContext(), MDs: ConstMD); |
| 2818 | else // This was probably an MDString. |
| 2819 | return false; |
| 2820 | } |
| 2821 | MIB.addMetadata(MD: MDN); |
| 2822 | } else { |
| 2823 | ArrayRef<Register> VRegs = getOrCreateVRegs(Val: *Arg.value()); |
| 2824 | if (VRegs.size() > 1) |
| 2825 | return false; |
| 2826 | MIB.addUse(RegNo: VRegs[0]); |
| 2827 | } |
| 2828 | } |
| 2829 | |
| 2830 | // Add a MachineMemOperand if it is a target mem intrinsic. |
| 2831 | TargetLowering::IntrinsicInfo Info; |
| 2832 | // TODO: Add a GlobalISel version of getTgtMemIntrinsic. |
| 2833 | if (TLI->getTgtMemIntrinsic(Info, CI, *MF, ID)) { |
| 2834 | Align Alignment = Info.align.value_or( |
| 2835 | u: DL->getABITypeAlign(Ty: Info.memVT.getTypeForEVT(Context&: F->getContext()))); |
| 2836 | LLT MemTy = Info.memVT.isSimple() |
| 2837 | ? getLLTForMVT(Ty: Info.memVT.getSimpleVT()) |
| 2838 | : LLT::scalar(SizeInBits: Info.memVT.getStoreSizeInBits()); |
| 2839 | |
| 2840 | // TODO: We currently just fallback to address space 0 if getTgtMemIntrinsic |
| 2841 | // didn't yield anything useful. |
| 2842 | MachinePointerInfo MPI; |
| 2843 | if (Info.ptrVal) |
| 2844 | MPI = MachinePointerInfo(Info.ptrVal, Info.offset); |
| 2845 | else if (Info.fallbackAddressSpace) |
| 2846 | MPI = MachinePointerInfo(*Info.fallbackAddressSpace); |
| 2847 | MIB.addMemOperand(MMO: MF->getMachineMemOperand( |
| 2848 | PtrInfo: MPI, f: Info.flags, MemTy, base_alignment: Alignment, AAInfo: CI.getAAMetadata(), |
| 2849 | /*Ranges=*/nullptr, SSID: Info.ssid, Ordering: Info.order, FailureOrdering: Info.failureOrder)); |
| 2850 | } |
| 2851 | |
| 2852 | if (CI.isConvergent()) { |
| 2853 | if (auto Bundle = CI.getOperandBundle(ID: LLVMContext::OB_convergencectrl)) { |
| 2854 | auto *Token = Bundle->Inputs[0].get(); |
| 2855 | Register TokenReg = getOrCreateVReg(Val: *Token); |
| 2856 | MIB.addUse(RegNo: TokenReg, Flags: RegState::Implicit); |
| 2857 | } |
| 2858 | } |
| 2859 | |
| 2860 | return true; |
| 2861 | } |
| 2862 | |
| 2863 | bool IRTranslator::findUnwindDestinations( |
| 2864 | const BasicBlock *EHPadBB, |
| 2865 | BranchProbability Prob, |
| 2866 | SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>> |
| 2867 | &UnwindDests) { |
| 2868 | EHPersonality Personality = classifyEHPersonality( |
| 2869 | Pers: EHPadBB->getParent()->getFunction().getPersonalityFn()); |
| 2870 | bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX; |
| 2871 | bool IsCoreCLR = Personality == EHPersonality::CoreCLR; |
| 2872 | bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX; |
| 2873 | bool IsSEH = isAsynchronousEHPersonality(Pers: Personality); |
| 2874 | |
| 2875 | if (IsWasmCXX) { |
| 2876 | // Ignore this for now. |
| 2877 | return false; |
| 2878 | } |
| 2879 | |
| 2880 | while (EHPadBB) { |
| 2881 | BasicBlock::const_iterator Pad = EHPadBB->getFirstNonPHIIt(); |
| 2882 | BasicBlock *NewEHPadBB = nullptr; |
| 2883 | if (isa<LandingPadInst>(Val: Pad)) { |
| 2884 | // Stop on landingpads. They are not funclets. |
| 2885 | UnwindDests.emplace_back(Args: &getMBB(BB: *EHPadBB), Args&: Prob); |
| 2886 | break; |
| 2887 | } |
| 2888 | if (isa<CleanupPadInst>(Val: Pad)) { |
| 2889 | // Stop on cleanup pads. Cleanups are always funclet entries for all known |
| 2890 | // personalities. |
| 2891 | UnwindDests.emplace_back(Args: &getMBB(BB: *EHPadBB), Args&: Prob); |
| 2892 | UnwindDests.back().first->setIsEHScopeEntry(); |
| 2893 | UnwindDests.back().first->setIsEHFuncletEntry(); |
| 2894 | break; |
| 2895 | } |
| 2896 | if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Val&: Pad)) { |
| 2897 | // Add the catchpad handlers to the possible destinations. |
| 2898 | for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) { |
| 2899 | UnwindDests.emplace_back(Args: &getMBB(BB: *CatchPadBB), Args&: Prob); |
| 2900 | // For MSVC++ and the CLR, catchblocks are funclets and need prologues. |
| 2901 | if (IsMSVCCXX || IsCoreCLR) |
| 2902 | UnwindDests.back().first->setIsEHFuncletEntry(); |
| 2903 | if (!IsSEH) |
| 2904 | UnwindDests.back().first->setIsEHScopeEntry(); |
| 2905 | } |
| 2906 | NewEHPadBB = CatchSwitch->getUnwindDest(); |
| 2907 | } else { |
| 2908 | continue; |
| 2909 | } |
| 2910 | |
| 2911 | BranchProbabilityInfo *BPI = FuncInfo.BPI; |
| 2912 | if (BPI && NewEHPadBB) |
| 2913 | Prob *= BPI->getEdgeProbability(Src: EHPadBB, Dst: NewEHPadBB); |
| 2914 | EHPadBB = NewEHPadBB; |
| 2915 | } |
| 2916 | return true; |
| 2917 | } |
| 2918 | |
| 2919 | bool IRTranslator::translateInvoke(const User &U, |
| 2920 | MachineIRBuilder &MIRBuilder) { |
| 2921 | const InvokeInst &I = cast<InvokeInst>(Val: U); |
| 2922 | MCContext &Context = MF->getContext(); |
| 2923 | |
| 2924 | const BasicBlock *ReturnBB = I.getSuccessor(i: 0); |
| 2925 | const BasicBlock *EHPadBB = I.getSuccessor(i: 1); |
| 2926 | |
| 2927 | const Function *Fn = I.getCalledFunction(); |
| 2928 | |
| 2929 | // FIXME: support invoking patchpoint and statepoint intrinsics. |
| 2930 | if (Fn && Fn->isIntrinsic()) |
| 2931 | return false; |
| 2932 | |
| 2933 | // FIXME: support whatever these are. |
| 2934 | if (I.hasDeoptState()) |
| 2935 | return false; |
| 2936 | |
| 2937 | // FIXME: support control flow guard targets. |
| 2938 | if (I.countOperandBundlesOfType(ID: LLVMContext::OB_cfguardtarget)) |
| 2939 | return false; |
| 2940 | |
| 2941 | // FIXME: support Windows exception handling. |
| 2942 | if (!isa<LandingPadInst>(Val: EHPadBB->getFirstNonPHIIt())) |
| 2943 | return false; |
| 2944 | |
| 2945 | // FIXME: support Windows dllimport function calls and calls through |
| 2946 | // weak symbols. |
| 2947 | if (Fn && (Fn->hasDLLImportStorageClass() || |
| 2948 | (MF->getTarget().getTargetTriple().isOSWindows() && |
| 2949 | Fn->hasExternalWeakLinkage()))) |
| 2950 | return false; |
| 2951 | |
| 2952 | bool LowerInlineAsm = I.isInlineAsm(); |
| 2953 | bool NeedEHLabel = true; |
| 2954 | |
| 2955 | // Emit the actual call, bracketed by EH_LABELs so that the MF knows about |
| 2956 | // the region covered by the try. |
| 2957 | MCSymbol *BeginSymbol = nullptr; |
| 2958 | if (NeedEHLabel) { |
| 2959 | MIRBuilder.buildInstr(Opcode: TargetOpcode::G_INVOKE_REGION_START); |
| 2960 | BeginSymbol = Context.createTempSymbol(); |
| 2961 | MIRBuilder.buildInstr(Opcode: TargetOpcode::EH_LABEL).addSym(Sym: BeginSymbol); |
| 2962 | } |
| 2963 | |
| 2964 | if (LowerInlineAsm) { |
| 2965 | if (!translateInlineAsm(CB: I, MIRBuilder)) |
| 2966 | return false; |
| 2967 | } else if (!translateCallBase(CB: I, MIRBuilder)) |
| 2968 | return false; |
| 2969 | |
| 2970 | MCSymbol *EndSymbol = nullptr; |
| 2971 | if (NeedEHLabel) { |
| 2972 | EndSymbol = Context.createTempSymbol(); |
| 2973 | MIRBuilder.buildInstr(Opcode: TargetOpcode::EH_LABEL).addSym(Sym: EndSymbol); |
| 2974 | } |
| 2975 | |
| 2976 | SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests; |
| 2977 | BranchProbabilityInfo *BPI = FuncInfo.BPI; |
| 2978 | MachineBasicBlock *InvokeMBB = &MIRBuilder.getMBB(); |
| 2979 | BranchProbability EHPadBBProb = |
| 2980 | BPI ? BPI->getEdgeProbability(Src: InvokeMBB->getBasicBlock(), Dst: EHPadBB) |
| 2981 | : BranchProbability::getZero(); |
| 2982 | |
| 2983 | if (!findUnwindDestinations(EHPadBB, Prob: EHPadBBProb, UnwindDests)) |
| 2984 | return false; |
| 2985 | |
| 2986 | MachineBasicBlock &EHPadMBB = getMBB(BB: *EHPadBB), |
| 2987 | &ReturnMBB = getMBB(BB: *ReturnBB); |
| 2988 | // Update successor info. |
| 2989 | addSuccessorWithProb(Src: InvokeMBB, Dst: &ReturnMBB); |
| 2990 | for (auto &UnwindDest : UnwindDests) { |
| 2991 | UnwindDest.first->setIsEHPad(); |
| 2992 | addSuccessorWithProb(Src: InvokeMBB, Dst: UnwindDest.first, Prob: UnwindDest.second); |
| 2993 | } |
| 2994 | InvokeMBB->normalizeSuccProbs(); |
| 2995 | |
| 2996 | if (NeedEHLabel) { |
| 2997 | assert(BeginSymbol && "Expected a begin symbol!" ); |
| 2998 | assert(EndSymbol && "Expected an end symbol!" ); |
| 2999 | MF->addInvoke(LandingPad: &EHPadMBB, BeginLabel: BeginSymbol, EndLabel: EndSymbol); |
| 3000 | } |
| 3001 | |
| 3002 | MIRBuilder.buildBr(Dest&: ReturnMBB); |
| 3003 | return true; |
| 3004 | } |
| 3005 | |
| 3006 | bool IRTranslator::translateCallBr(const User &U, |
| 3007 | MachineIRBuilder &MIRBuilder) { |
| 3008 | // FIXME: Implement this. |
| 3009 | return false; |
| 3010 | } |
| 3011 | |
| 3012 | bool IRTranslator::translateLandingPad(const User &U, |
| 3013 | MachineIRBuilder &MIRBuilder) { |
| 3014 | const LandingPadInst &LP = cast<LandingPadInst>(Val: U); |
| 3015 | |
| 3016 | MachineBasicBlock &MBB = MIRBuilder.getMBB(); |
| 3017 | |
| 3018 | MBB.setIsEHPad(); |
| 3019 | |
| 3020 | // If there aren't registers to copy the values into (e.g., during SjLj |
| 3021 | // exceptions), then don't bother. |
| 3022 | const Constant *PersonalityFn = MF->getFunction().getPersonalityFn(); |
| 3023 | if (TLI->getExceptionPointerRegister(PersonalityFn) == 0 && |
| 3024 | TLI->getExceptionSelectorRegister(PersonalityFn) == 0) |
| 3025 | return true; |
| 3026 | |
| 3027 | // If landingpad's return type is token type, we don't create DAG nodes |
| 3028 | // for its exception pointer and selector value. The extraction of exception |
| 3029 | // pointer or selector value from token type landingpads is not currently |
| 3030 | // supported. |
| 3031 | if (LP.getType()->isTokenTy()) |
| 3032 | return true; |
| 3033 | |
| 3034 | // Add a label to mark the beginning of the landing pad. Deletion of the |
| 3035 | // landing pad can thus be detected via the MachineModuleInfo. |
| 3036 | MIRBuilder.buildInstr(Opcode: TargetOpcode::EH_LABEL) |
| 3037 | .addSym(Sym: MF->addLandingPad(LandingPad: &MBB)); |
| 3038 | |
| 3039 | // If the unwinder does not preserve all registers, ensure that the |
| 3040 | // function marks the clobbered registers as used. |
| 3041 | const TargetRegisterInfo &TRI = *MF->getSubtarget().getRegisterInfo(); |
| 3042 | if (auto *RegMask = TRI.getCustomEHPadPreservedMask(MF: *MF)) |
| 3043 | MF->getRegInfo().addPhysRegsUsedFromRegMask(RegMask); |
| 3044 | |
| 3045 | LLT Ty = getLLTForType(Ty&: *LP.getType(), DL: *DL); |
| 3046 | Register Undef = MRI->createGenericVirtualRegister(Ty); |
| 3047 | MIRBuilder.buildUndef(Res: Undef); |
| 3048 | |
| 3049 | SmallVector<LLT, 2> Tys; |
| 3050 | for (Type *Ty : cast<StructType>(Val: LP.getType())->elements()) |
| 3051 | Tys.push_back(Elt: getLLTForType(Ty&: *Ty, DL: *DL)); |
| 3052 | assert(Tys.size() == 2 && "Only two-valued landingpads are supported" ); |
| 3053 | |
| 3054 | // Mark exception register as live in. |
| 3055 | Register ExceptionReg = TLI->getExceptionPointerRegister(PersonalityFn); |
| 3056 | if (!ExceptionReg) |
| 3057 | return false; |
| 3058 | |
| 3059 | MBB.addLiveIn(PhysReg: ExceptionReg); |
| 3060 | ArrayRef<Register> ResRegs = getOrCreateVRegs(Val: LP); |
| 3061 | MIRBuilder.buildCopy(Res: ResRegs[0], Op: ExceptionReg); |
| 3062 | |
| 3063 | Register SelectorReg = TLI->getExceptionSelectorRegister(PersonalityFn); |
| 3064 | if (!SelectorReg) |
| 3065 | return false; |
| 3066 | |
| 3067 | MBB.addLiveIn(PhysReg: SelectorReg); |
| 3068 | Register PtrVReg = MRI->createGenericVirtualRegister(Ty: Tys[0]); |
| 3069 | MIRBuilder.buildCopy(Res: PtrVReg, Op: SelectorReg); |
| 3070 | MIRBuilder.buildCast(Dst: ResRegs[1], Src: PtrVReg); |
| 3071 | |
| 3072 | return true; |
| 3073 | } |
| 3074 | |
| 3075 | bool IRTranslator::translateAlloca(const User &U, |
| 3076 | MachineIRBuilder &MIRBuilder) { |
| 3077 | auto &AI = cast<AllocaInst>(Val: U); |
| 3078 | |
| 3079 | if (AI.isSwiftError()) |
| 3080 | return true; |
| 3081 | |
| 3082 | if (AI.isStaticAlloca()) { |
| 3083 | Register Res = getOrCreateVReg(Val: AI); |
| 3084 | int FI = getOrCreateFrameIndex(AI); |
| 3085 | MIRBuilder.buildFrameIndex(Res, Idx: FI); |
| 3086 | return true; |
| 3087 | } |
| 3088 | |
| 3089 | // FIXME: support stack probing for Windows. |
| 3090 | if (MF->getTarget().getTargetTriple().isOSWindows()) |
| 3091 | return false; |
| 3092 | |
| 3093 | // Now we're in the harder dynamic case. |
| 3094 | Register NumElts = getOrCreateVReg(Val: *AI.getArraySize()); |
| 3095 | Type *IntPtrIRTy = DL->getIntPtrType(AI.getType()); |
| 3096 | LLT IntPtrTy = getLLTForType(Ty&: *IntPtrIRTy, DL: *DL); |
| 3097 | if (MRI->getType(Reg: NumElts) != IntPtrTy) { |
| 3098 | Register ExtElts = MRI->createGenericVirtualRegister(Ty: IntPtrTy); |
| 3099 | MIRBuilder.buildZExtOrTrunc(Res: ExtElts, Op: NumElts); |
| 3100 | NumElts = ExtElts; |
| 3101 | } |
| 3102 | |
| 3103 | Type *Ty = AI.getAllocatedType(); |
| 3104 | |
| 3105 | Register AllocSize = MRI->createGenericVirtualRegister(Ty: IntPtrTy); |
| 3106 | Register TySize = |
| 3107 | getOrCreateVReg(Val: *ConstantInt::get(Ty: IntPtrIRTy, V: DL->getTypeAllocSize(Ty))); |
| 3108 | MIRBuilder.buildMul(Dst: AllocSize, Src0: NumElts, Src1: TySize); |
| 3109 | |
| 3110 | // Round the size of the allocation up to the stack alignment size |
| 3111 | // by add SA-1 to the size. This doesn't overflow because we're computing |
| 3112 | // an address inside an alloca. |
| 3113 | Align StackAlign = MF->getSubtarget().getFrameLowering()->getStackAlign(); |
| 3114 | auto SAMinusOne = MIRBuilder.buildConstant(Res: IntPtrTy, Val: StackAlign.value() - 1); |
| 3115 | auto AllocAdd = MIRBuilder.buildAdd(Dst: IntPtrTy, Src0: AllocSize, Src1: SAMinusOne, |
| 3116 | Flags: MachineInstr::NoUWrap); |
| 3117 | auto AlignCst = |
| 3118 | MIRBuilder.buildConstant(Res: IntPtrTy, Val: ~(uint64_t)(StackAlign.value() - 1)); |
| 3119 | auto AlignedAlloc = MIRBuilder.buildAnd(Dst: IntPtrTy, Src0: AllocAdd, Src1: AlignCst); |
| 3120 | |
| 3121 | Align Alignment = std::max(a: AI.getAlign(), b: DL->getPrefTypeAlign(Ty)); |
| 3122 | if (Alignment <= StackAlign) |
| 3123 | Alignment = Align(1); |
| 3124 | MIRBuilder.buildDynStackAlloc(Res: getOrCreateVReg(Val: AI), Size: AlignedAlloc, Alignment); |
| 3125 | |
| 3126 | MF->getFrameInfo().CreateVariableSizedObject(Alignment, Alloca: &AI); |
| 3127 | assert(MF->getFrameInfo().hasVarSizedObjects()); |
| 3128 | return true; |
| 3129 | } |
| 3130 | |
| 3131 | bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) { |
| 3132 | // FIXME: We may need more info about the type. Because of how LLT works, |
| 3133 | // we're completely discarding the i64/double distinction here (amongst |
| 3134 | // others). Fortunately the ABIs I know of where that matters don't use va_arg |
| 3135 | // anyway but that's not guaranteed. |
| 3136 | MIRBuilder.buildInstr(Opc: TargetOpcode::G_VAARG, DstOps: {getOrCreateVReg(Val: U)}, |
| 3137 | SrcOps: {getOrCreateVReg(Val: *U.getOperand(i: 0)), |
| 3138 | DL->getABITypeAlign(Ty: U.getType()).value()}); |
| 3139 | return true; |
| 3140 | } |
| 3141 | |
| 3142 | bool IRTranslator::translateUnreachable(const User &U, |
| 3143 | MachineIRBuilder &MIRBuilder) { |
| 3144 | auto &UI = cast<UnreachableInst>(Val: U); |
| 3145 | if (!UI.shouldLowerToTrap(TrapUnreachable: MF->getTarget().Options.TrapUnreachable, |
| 3146 | NoTrapAfterNoreturn: MF->getTarget().Options.NoTrapAfterNoreturn)) |
| 3147 | return true; |
| 3148 | |
| 3149 | MIRBuilder.buildTrap(); |
| 3150 | return true; |
| 3151 | } |
| 3152 | |
| 3153 | bool IRTranslator::translateInsertElement(const User &U, |
| 3154 | MachineIRBuilder &MIRBuilder) { |
| 3155 | // If it is a <1 x Ty> vector, use the scalar as it is |
| 3156 | // not a legal vector type in LLT. |
| 3157 | if (auto *FVT = dyn_cast<FixedVectorType>(Val: U.getType()); |
| 3158 | FVT && FVT->getNumElements() == 1) |
| 3159 | return translateCopy(U, V: *U.getOperand(i: 1), MIRBuilder); |
| 3160 | |
| 3161 | Register Res = getOrCreateVReg(Val: U); |
| 3162 | Register Val = getOrCreateVReg(Val: *U.getOperand(i: 0)); |
| 3163 | Register Elt = getOrCreateVReg(Val: *U.getOperand(i: 1)); |
| 3164 | unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(DL: *DL); |
| 3165 | Register Idx; |
| 3166 | if (auto *CI = dyn_cast<ConstantInt>(Val: U.getOperand(i: 2))) { |
| 3167 | if (CI->getBitWidth() != PreferredVecIdxWidth) { |
| 3168 | APInt NewIdx = CI->getValue().zextOrTrunc(width: PreferredVecIdxWidth); |
| 3169 | auto *NewIdxCI = ConstantInt::get(Context&: CI->getContext(), V: NewIdx); |
| 3170 | Idx = getOrCreateVReg(Val: *NewIdxCI); |
| 3171 | } |
| 3172 | } |
| 3173 | if (!Idx) |
| 3174 | Idx = getOrCreateVReg(Val: *U.getOperand(i: 2)); |
| 3175 | if (MRI->getType(Reg: Idx).getSizeInBits() != PreferredVecIdxWidth) { |
| 3176 | const LLT VecIdxTy = LLT::scalar(SizeInBits: PreferredVecIdxWidth); |
| 3177 | Idx = MIRBuilder.buildZExtOrTrunc(Res: VecIdxTy, Op: Idx).getReg(Idx: 0); |
| 3178 | } |
| 3179 | MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx); |
| 3180 | return true; |
| 3181 | } |
| 3182 | |
| 3183 | bool IRTranslator::translateInsertVector(const User &U, |
| 3184 | MachineIRBuilder &MIRBuilder) { |
| 3185 | Register Dst = getOrCreateVReg(Val: U); |
| 3186 | Register Vec = getOrCreateVReg(Val: *U.getOperand(i: 0)); |
| 3187 | Register Elt = getOrCreateVReg(Val: *U.getOperand(i: 1)); |
| 3188 | |
| 3189 | ConstantInt *CI = cast<ConstantInt>(Val: U.getOperand(i: 2)); |
| 3190 | unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(DL: *DL); |
| 3191 | |
| 3192 | // Resize Index to preferred index width. |
| 3193 | if (CI->getBitWidth() != PreferredVecIdxWidth) { |
| 3194 | APInt NewIdx = CI->getValue().zextOrTrunc(width: PreferredVecIdxWidth); |
| 3195 | CI = ConstantInt::get(Context&: CI->getContext(), V: NewIdx); |
| 3196 | } |
| 3197 | |
| 3198 | // If it is a <1 x Ty> vector, we have to use other means. |
| 3199 | if (auto *ResultType = dyn_cast<FixedVectorType>(Val: U.getOperand(i: 1)->getType()); |
| 3200 | ResultType && ResultType->getNumElements() == 1) { |
| 3201 | if (auto *InputType = dyn_cast<FixedVectorType>(Val: U.getOperand(i: 0)->getType()); |
| 3202 | InputType && InputType->getNumElements() == 1) { |
| 3203 | // We are inserting an illegal fixed vector into an illegal |
| 3204 | // fixed vector, use the scalar as it is not a legal vector type |
| 3205 | // in LLT. |
| 3206 | return translateCopy(U, V: *U.getOperand(i: 0), MIRBuilder); |
| 3207 | } |
| 3208 | if (isa<FixedVectorType>(Val: U.getOperand(i: 0)->getType())) { |
| 3209 | // We are inserting an illegal fixed vector into a legal fixed |
| 3210 | // vector, use the scalar as it is not a legal vector type in |
| 3211 | // LLT. |
| 3212 | Register Idx = getOrCreateVReg(Val: *CI); |
| 3213 | MIRBuilder.buildInsertVectorElement(Res: Dst, Val: Vec, Elt, Idx); |
| 3214 | return true; |
| 3215 | } |
| 3216 | if (isa<ScalableVectorType>(Val: U.getOperand(i: 0)->getType())) { |
| 3217 | // We are inserting an illegal fixed vector into a scalable |
| 3218 | // vector, use a scalar element insert. |
| 3219 | LLT VecIdxTy = LLT::scalar(SizeInBits: PreferredVecIdxWidth); |
| 3220 | Register Idx = getOrCreateVReg(Val: *CI); |
| 3221 | auto ScaledIndex = MIRBuilder.buildMul( |
| 3222 | Dst: VecIdxTy, Src0: MIRBuilder.buildVScale(Res: VecIdxTy, MinElts: 1), Src1: Idx); |
| 3223 | MIRBuilder.buildInsertVectorElement(Res: Dst, Val: Vec, Elt, Idx: ScaledIndex); |
| 3224 | return true; |
| 3225 | } |
| 3226 | } |
| 3227 | |
| 3228 | MIRBuilder.buildInsertSubvector( |
| 3229 | Res: getOrCreateVReg(Val: U), Src0: getOrCreateVReg(Val: *U.getOperand(i: 0)), |
| 3230 | Src1: getOrCreateVReg(Val: *U.getOperand(i: 1)), Index: CI->getZExtValue()); |
| 3231 | return true; |
| 3232 | } |
| 3233 | |
| 3234 | bool IRTranslator::(const User &U, |
| 3235 | MachineIRBuilder &MIRBuilder) { |
| 3236 | // If it is a <1 x Ty> vector, use the scalar as it is |
| 3237 | // not a legal vector type in LLT. |
| 3238 | if (const FixedVectorType *FVT = |
| 3239 | dyn_cast<FixedVectorType>(Val: U.getOperand(i: 0)->getType())) |
| 3240 | if (FVT->getNumElements() == 1) |
| 3241 | return translateCopy(U, V: *U.getOperand(i: 0), MIRBuilder); |
| 3242 | |
| 3243 | Register Res = getOrCreateVReg(Val: U); |
| 3244 | Register Val = getOrCreateVReg(Val: *U.getOperand(i: 0)); |
| 3245 | unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(DL: *DL); |
| 3246 | Register Idx; |
| 3247 | if (auto *CI = dyn_cast<ConstantInt>(Val: U.getOperand(i: 1))) { |
| 3248 | if (CI->getBitWidth() != PreferredVecIdxWidth) { |
| 3249 | APInt NewIdx = CI->getValue().zextOrTrunc(width: PreferredVecIdxWidth); |
| 3250 | auto *NewIdxCI = ConstantInt::get(Context&: CI->getContext(), V: NewIdx); |
| 3251 | Idx = getOrCreateVReg(Val: *NewIdxCI); |
| 3252 | } |
| 3253 | } |
| 3254 | if (!Idx) |
| 3255 | Idx = getOrCreateVReg(Val: *U.getOperand(i: 1)); |
| 3256 | if (MRI->getType(Reg: Idx).getSizeInBits() != PreferredVecIdxWidth) { |
| 3257 | const LLT VecIdxTy = LLT::scalar(SizeInBits: PreferredVecIdxWidth); |
| 3258 | Idx = MIRBuilder.buildZExtOrTrunc(Res: VecIdxTy, Op: Idx).getReg(Idx: 0); |
| 3259 | } |
| 3260 | MIRBuilder.buildExtractVectorElement(Res, Val, Idx); |
| 3261 | return true; |
| 3262 | } |
| 3263 | |
| 3264 | bool IRTranslator::(const User &U, |
| 3265 | MachineIRBuilder &MIRBuilder) { |
| 3266 | Register Res = getOrCreateVReg(Val: U); |
| 3267 | Register Vec = getOrCreateVReg(Val: *U.getOperand(i: 0)); |
| 3268 | ConstantInt *CI = cast<ConstantInt>(Val: U.getOperand(i: 1)); |
| 3269 | unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(DL: *DL); |
| 3270 | |
| 3271 | // Resize Index to preferred index width. |
| 3272 | if (CI->getBitWidth() != PreferredVecIdxWidth) { |
| 3273 | APInt NewIdx = CI->getValue().zextOrTrunc(width: PreferredVecIdxWidth); |
| 3274 | CI = ConstantInt::get(Context&: CI->getContext(), V: NewIdx); |
| 3275 | } |
| 3276 | |
| 3277 | // If it is a <1 x Ty> vector, we have to use other means. |
| 3278 | if (auto *ResultType = dyn_cast<FixedVectorType>(Val: U.getType()); |
| 3279 | ResultType && ResultType->getNumElements() == 1) { |
| 3280 | if (auto *InputType = dyn_cast<FixedVectorType>(Val: U.getOperand(i: 0)->getType()); |
| 3281 | InputType && InputType->getNumElements() == 1) { |
| 3282 | // We are extracting an illegal fixed vector from an illegal fixed vector, |
| 3283 | // use the scalar as it is not a legal vector type in LLT. |
| 3284 | return translateCopy(U, V: *U.getOperand(i: 0), MIRBuilder); |
| 3285 | } |
| 3286 | if (isa<FixedVectorType>(Val: U.getOperand(i: 0)->getType())) { |
| 3287 | // We are extracting an illegal fixed vector from a legal fixed |
| 3288 | // vector, use the scalar as it is not a legal vector type in |
| 3289 | // LLT. |
| 3290 | Register Idx = getOrCreateVReg(Val: *CI); |
| 3291 | MIRBuilder.buildExtractVectorElement(Res, Val: Vec, Idx); |
| 3292 | return true; |
| 3293 | } |
| 3294 | if (isa<ScalableVectorType>(Val: U.getOperand(i: 0)->getType())) { |
| 3295 | // We are extracting an illegal fixed vector from a scalable |
| 3296 | // vector, use a scalar element extract. |
| 3297 | LLT VecIdxTy = LLT::scalar(SizeInBits: PreferredVecIdxWidth); |
| 3298 | Register Idx = getOrCreateVReg(Val: *CI); |
| 3299 | auto ScaledIndex = MIRBuilder.buildMul( |
| 3300 | Dst: VecIdxTy, Src0: MIRBuilder.buildVScale(Res: VecIdxTy, MinElts: 1), Src1: Idx); |
| 3301 | MIRBuilder.buildExtractVectorElement(Res, Val: Vec, Idx: ScaledIndex); |
| 3302 | return true; |
| 3303 | } |
| 3304 | } |
| 3305 | |
| 3306 | MIRBuilder.buildExtractSubvector(Res: getOrCreateVReg(Val: U), |
| 3307 | Src: getOrCreateVReg(Val: *U.getOperand(i: 0)), |
| 3308 | Index: CI->getZExtValue()); |
| 3309 | return true; |
| 3310 | } |
| 3311 | |
| 3312 | bool IRTranslator::translateShuffleVector(const User &U, |
| 3313 | MachineIRBuilder &MIRBuilder) { |
| 3314 | // A ShuffleVector that operates on scalable vectors is a splat vector where |
| 3315 | // the value of the splat vector is the 0th element of the first operand, |
| 3316 | // since the index mask operand is the zeroinitializer (undef and |
| 3317 | // poison are treated as zeroinitializer here). |
| 3318 | if (U.getOperand(i: 0)->getType()->isScalableTy()) { |
| 3319 | Register Val = getOrCreateVReg(Val: *U.getOperand(i: 0)); |
| 3320 | auto SplatVal = MIRBuilder.buildExtractVectorElementConstant( |
| 3321 | Res: MRI->getType(Reg: Val).getElementType(), Val, Idx: 0); |
| 3322 | MIRBuilder.buildSplatVector(Res: getOrCreateVReg(Val: U), Val: SplatVal); |
| 3323 | return true; |
| 3324 | } |
| 3325 | |
| 3326 | ArrayRef<int> Mask; |
| 3327 | if (auto *SVI = dyn_cast<ShuffleVectorInst>(Val: &U)) |
| 3328 | Mask = SVI->getShuffleMask(); |
| 3329 | else |
| 3330 | Mask = cast<ConstantExpr>(Val: U).getShuffleMask(); |
| 3331 | ArrayRef<int> MaskAlloc = MF->allocateShuffleMask(Mask); |
| 3332 | MIRBuilder |
| 3333 | .buildInstr(Opc: TargetOpcode::G_SHUFFLE_VECTOR, DstOps: {getOrCreateVReg(Val: U)}, |
| 3334 | SrcOps: {getOrCreateVReg(Val: *U.getOperand(i: 0)), |
| 3335 | getOrCreateVReg(Val: *U.getOperand(i: 1))}) |
| 3336 | .addShuffleMask(Val: MaskAlloc); |
| 3337 | return true; |
| 3338 | } |
| 3339 | |
| 3340 | bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) { |
| 3341 | const PHINode &PI = cast<PHINode>(Val: U); |
| 3342 | |
| 3343 | SmallVector<MachineInstr *, 4> Insts; |
| 3344 | for (auto Reg : getOrCreateVRegs(Val: PI)) { |
| 3345 | auto MIB = MIRBuilder.buildInstr(Opc: TargetOpcode::G_PHI, DstOps: {Reg}, SrcOps: {}); |
| 3346 | Insts.push_back(Elt: MIB.getInstr()); |
| 3347 | } |
| 3348 | |
| 3349 | PendingPHIs.emplace_back(Args: &PI, Args: std::move(Insts)); |
| 3350 | return true; |
| 3351 | } |
| 3352 | |
| 3353 | bool IRTranslator::translateAtomicCmpXchg(const User &U, |
| 3354 | MachineIRBuilder &MIRBuilder) { |
| 3355 | const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(Val: U); |
| 3356 | |
| 3357 | auto Flags = TLI->getAtomicMemOperandFlags(AI: I, DL: *DL); |
| 3358 | |
| 3359 | auto Res = getOrCreateVRegs(Val: I); |
| 3360 | Register OldValRes = Res[0]; |
| 3361 | Register SuccessRes = Res[1]; |
| 3362 | Register Addr = getOrCreateVReg(Val: *I.getPointerOperand()); |
| 3363 | Register Cmp = getOrCreateVReg(Val: *I.getCompareOperand()); |
| 3364 | Register NewVal = getOrCreateVReg(Val: *I.getNewValOperand()); |
| 3365 | |
| 3366 | MIRBuilder.buildAtomicCmpXchgWithSuccess( |
| 3367 | OldValRes, SuccessRes, Addr, CmpVal: Cmp, NewVal, |
| 3368 | MMO&: *MF->getMachineMemOperand( |
| 3369 | PtrInfo: MachinePointerInfo(I.getPointerOperand()), f: Flags, MemTy: MRI->getType(Reg: Cmp), |
| 3370 | base_alignment: getMemOpAlign(I), AAInfo: I.getAAMetadata(), Ranges: nullptr, SSID: I.getSyncScopeID(), |
| 3371 | Ordering: I.getSuccessOrdering(), FailureOrdering: I.getFailureOrdering())); |
| 3372 | return true; |
| 3373 | } |
| 3374 | |
| 3375 | bool IRTranslator::translateAtomicRMW(const User &U, |
| 3376 | MachineIRBuilder &MIRBuilder) { |
| 3377 | if (containsBF16Type(U)) |
| 3378 | return false; |
| 3379 | |
| 3380 | const AtomicRMWInst &I = cast<AtomicRMWInst>(Val: U); |
| 3381 | auto Flags = TLI->getAtomicMemOperandFlags(AI: I, DL: *DL); |
| 3382 | |
| 3383 | Register Res = getOrCreateVReg(Val: I); |
| 3384 | Register Addr = getOrCreateVReg(Val: *I.getPointerOperand()); |
| 3385 | Register Val = getOrCreateVReg(Val: *I.getValOperand()); |
| 3386 | |
| 3387 | unsigned Opcode = 0; |
| 3388 | switch (I.getOperation()) { |
| 3389 | default: |
| 3390 | return false; |
| 3391 | case AtomicRMWInst::Xchg: |
| 3392 | Opcode = TargetOpcode::G_ATOMICRMW_XCHG; |
| 3393 | break; |
| 3394 | case AtomicRMWInst::Add: |
| 3395 | Opcode = TargetOpcode::G_ATOMICRMW_ADD; |
| 3396 | break; |
| 3397 | case AtomicRMWInst::Sub: |
| 3398 | Opcode = TargetOpcode::G_ATOMICRMW_SUB; |
| 3399 | break; |
| 3400 | case AtomicRMWInst::And: |
| 3401 | Opcode = TargetOpcode::G_ATOMICRMW_AND; |
| 3402 | break; |
| 3403 | case AtomicRMWInst::Nand: |
| 3404 | Opcode = TargetOpcode::G_ATOMICRMW_NAND; |
| 3405 | break; |
| 3406 | case AtomicRMWInst::Or: |
| 3407 | Opcode = TargetOpcode::G_ATOMICRMW_OR; |
| 3408 | break; |
| 3409 | case AtomicRMWInst::Xor: |
| 3410 | Opcode = TargetOpcode::G_ATOMICRMW_XOR; |
| 3411 | break; |
| 3412 | case AtomicRMWInst::Max: |
| 3413 | Opcode = TargetOpcode::G_ATOMICRMW_MAX; |
| 3414 | break; |
| 3415 | case AtomicRMWInst::Min: |
| 3416 | Opcode = TargetOpcode::G_ATOMICRMW_MIN; |
| 3417 | break; |
| 3418 | case AtomicRMWInst::UMax: |
| 3419 | Opcode = TargetOpcode::G_ATOMICRMW_UMAX; |
| 3420 | break; |
| 3421 | case AtomicRMWInst::UMin: |
| 3422 | Opcode = TargetOpcode::G_ATOMICRMW_UMIN; |
| 3423 | break; |
| 3424 | case AtomicRMWInst::FAdd: |
| 3425 | Opcode = TargetOpcode::G_ATOMICRMW_FADD; |
| 3426 | break; |
| 3427 | case AtomicRMWInst::FSub: |
| 3428 | Opcode = TargetOpcode::G_ATOMICRMW_FSUB; |
| 3429 | break; |
| 3430 | case AtomicRMWInst::FMax: |
| 3431 | Opcode = TargetOpcode::G_ATOMICRMW_FMAX; |
| 3432 | break; |
| 3433 | case AtomicRMWInst::FMin: |
| 3434 | Opcode = TargetOpcode::G_ATOMICRMW_FMIN; |
| 3435 | break; |
| 3436 | case AtomicRMWInst::FMaximum: |
| 3437 | Opcode = TargetOpcode::G_ATOMICRMW_FMAXIMUM; |
| 3438 | break; |
| 3439 | case AtomicRMWInst::FMinimum: |
| 3440 | Opcode = TargetOpcode::G_ATOMICRMW_FMINIMUM; |
| 3441 | break; |
| 3442 | case AtomicRMWInst::UIncWrap: |
| 3443 | Opcode = TargetOpcode::G_ATOMICRMW_UINC_WRAP; |
| 3444 | break; |
| 3445 | case AtomicRMWInst::UDecWrap: |
| 3446 | Opcode = TargetOpcode::G_ATOMICRMW_UDEC_WRAP; |
| 3447 | break; |
| 3448 | case AtomicRMWInst::USubCond: |
| 3449 | Opcode = TargetOpcode::G_ATOMICRMW_USUB_COND; |
| 3450 | break; |
| 3451 | case AtomicRMWInst::USubSat: |
| 3452 | Opcode = TargetOpcode::G_ATOMICRMW_USUB_SAT; |
| 3453 | break; |
| 3454 | } |
| 3455 | |
| 3456 | MIRBuilder.buildAtomicRMW( |
| 3457 | Opcode, OldValRes: Res, Addr, Val, |
| 3458 | MMO&: *MF->getMachineMemOperand(PtrInfo: MachinePointerInfo(I.getPointerOperand()), |
| 3459 | f: Flags, MemTy: MRI->getType(Reg: Val), base_alignment: getMemOpAlign(I), |
| 3460 | AAInfo: I.getAAMetadata(), Ranges: nullptr, SSID: I.getSyncScopeID(), |
| 3461 | Ordering: I.getOrdering())); |
| 3462 | return true; |
| 3463 | } |
| 3464 | |
| 3465 | bool IRTranslator::translateFence(const User &U, |
| 3466 | MachineIRBuilder &MIRBuilder) { |
| 3467 | const FenceInst &Fence = cast<FenceInst>(Val: U); |
| 3468 | MIRBuilder.buildFence(Ordering: static_cast<unsigned>(Fence.getOrdering()), |
| 3469 | Scope: Fence.getSyncScopeID()); |
| 3470 | return true; |
| 3471 | } |
| 3472 | |
| 3473 | bool IRTranslator::translateFreeze(const User &U, |
| 3474 | MachineIRBuilder &MIRBuilder) { |
| 3475 | const ArrayRef<Register> DstRegs = getOrCreateVRegs(Val: U); |
| 3476 | const ArrayRef<Register> SrcRegs = getOrCreateVRegs(Val: *U.getOperand(i: 0)); |
| 3477 | |
| 3478 | assert(DstRegs.size() == SrcRegs.size() && |
| 3479 | "Freeze with different source and destination type?" ); |
| 3480 | |
| 3481 | for (unsigned I = 0; I < DstRegs.size(); ++I) { |
| 3482 | MIRBuilder.buildFreeze(Dst: DstRegs[I], Src: SrcRegs[I]); |
| 3483 | } |
| 3484 | |
| 3485 | return true; |
| 3486 | } |
| 3487 | |
| 3488 | void IRTranslator::finishPendingPhis() { |
| 3489 | #ifndef NDEBUG |
| 3490 | DILocationVerifier Verifier; |
| 3491 | GISelObserverWrapper WrapperObserver(&Verifier); |
| 3492 | RAIIMFObsDelInstaller ObsInstall(*MF, WrapperObserver); |
| 3493 | #endif // ifndef NDEBUG |
| 3494 | for (auto &Phi : PendingPHIs) { |
| 3495 | const PHINode *PI = Phi.first; |
| 3496 | if (PI->getType()->isEmptyTy()) |
| 3497 | continue; |
| 3498 | ArrayRef<MachineInstr *> ComponentPHIs = Phi.second; |
| 3499 | MachineBasicBlock *PhiMBB = ComponentPHIs[0]->getParent(); |
| 3500 | EntryBuilder->setDebugLoc(PI->getDebugLoc()); |
| 3501 | #ifndef NDEBUG |
| 3502 | Verifier.setCurrentInst(PI); |
| 3503 | #endif // ifndef NDEBUG |
| 3504 | |
| 3505 | SmallSet<const MachineBasicBlock *, 16> SeenPreds; |
| 3506 | for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) { |
| 3507 | auto IRPred = PI->getIncomingBlock(i); |
| 3508 | ArrayRef<Register> ValRegs = getOrCreateVRegs(Val: *PI->getIncomingValue(i)); |
| 3509 | for (auto *Pred : getMachinePredBBs(Edge: {IRPred, PI->getParent()})) { |
| 3510 | if (SeenPreds.count(Ptr: Pred) || !PhiMBB->isPredecessor(MBB: Pred)) |
| 3511 | continue; |
| 3512 | SeenPreds.insert(Ptr: Pred); |
| 3513 | for (unsigned j = 0; j < ValRegs.size(); ++j) { |
| 3514 | MachineInstrBuilder MIB(*MF, ComponentPHIs[j]); |
| 3515 | MIB.addUse(RegNo: ValRegs[j]); |
| 3516 | MIB.addMBB(MBB: Pred); |
| 3517 | } |
| 3518 | } |
| 3519 | } |
| 3520 | } |
| 3521 | } |
| 3522 | |
| 3523 | void IRTranslator::translateDbgValueRecord(Value *V, bool HasArgList, |
| 3524 | const DILocalVariable *Variable, |
| 3525 | const DIExpression *Expression, |
| 3526 | const DebugLoc &DL, |
| 3527 | MachineIRBuilder &MIRBuilder) { |
| 3528 | assert(Variable->isValidLocationForIntrinsic(DL) && |
| 3529 | "Expected inlined-at fields to agree" ); |
| 3530 | // Act as if we're handling a debug intrinsic. |
| 3531 | MIRBuilder.setDebugLoc(DL); |
| 3532 | |
| 3533 | if (!V || HasArgList) { |
| 3534 | // DI cannot produce a valid DBG_VALUE, so produce an undef DBG_VALUE to |
| 3535 | // terminate any prior location. |
| 3536 | MIRBuilder.buildIndirectDbgValue(Reg: 0, Variable, Expr: Expression); |
| 3537 | return; |
| 3538 | } |
| 3539 | |
| 3540 | if (const auto *CI = dyn_cast<Constant>(Val: V)) { |
| 3541 | MIRBuilder.buildConstDbgValue(C: *CI, Variable, Expr: Expression); |
| 3542 | return; |
| 3543 | } |
| 3544 | |
| 3545 | if (auto *AI = dyn_cast<AllocaInst>(Val: V); |
| 3546 | AI && AI->isStaticAlloca() && Expression->startsWithDeref()) { |
| 3547 | // If the value is an alloca and the expression starts with a |
| 3548 | // dereference, track a stack slot instead of a register, as registers |
| 3549 | // may be clobbered. |
| 3550 | auto ExprOperands = Expression->getElements(); |
| 3551 | auto *ExprDerefRemoved = |
| 3552 | DIExpression::get(Context&: AI->getContext(), Elements: ExprOperands.drop_front()); |
| 3553 | MIRBuilder.buildFIDbgValue(FI: getOrCreateFrameIndex(AI: *AI), Variable, |
| 3554 | Expr: ExprDerefRemoved); |
| 3555 | return; |
| 3556 | } |
| 3557 | if (translateIfEntryValueArgument(isDeclare: false, Val: V, Var: Variable, Expr: Expression, DL, |
| 3558 | MIRBuilder)) |
| 3559 | return; |
| 3560 | for (Register Reg : getOrCreateVRegs(Val: *V)) { |
| 3561 | // FIXME: This does not handle register-indirect values at offset 0. The |
| 3562 | // direct/indirect thing shouldn't really be handled by something as |
| 3563 | // implicit as reg+noreg vs reg+imm in the first place, but it seems |
| 3564 | // pretty baked in right now. |
| 3565 | MIRBuilder.buildDirectDbgValue(Reg, Variable, Expr: Expression); |
| 3566 | } |
| 3567 | } |
| 3568 | |
| 3569 | void IRTranslator::translateDbgDeclareRecord(Value *Address, bool HasArgList, |
| 3570 | const DILocalVariable *Variable, |
| 3571 | const DIExpression *Expression, |
| 3572 | const DebugLoc &DL, |
| 3573 | MachineIRBuilder &MIRBuilder) { |
| 3574 | if (!Address || isa<UndefValue>(Val: Address)) { |
| 3575 | LLVM_DEBUG(dbgs() << "Dropping debug info for " << *Variable << "\n" ); |
| 3576 | return; |
| 3577 | } |
| 3578 | |
| 3579 | assert(Variable->isValidLocationForIntrinsic(DL) && |
| 3580 | "Expected inlined-at fields to agree" ); |
| 3581 | auto AI = dyn_cast<AllocaInst>(Val: Address); |
| 3582 | if (AI && AI->isStaticAlloca()) { |
| 3583 | // Static allocas are tracked at the MF level, no need for DBG_VALUE |
| 3584 | // instructions (in fact, they get ignored if they *do* exist). |
| 3585 | MF->setVariableDbgInfo(Var: Variable, Expr: Expression, |
| 3586 | Slot: getOrCreateFrameIndex(AI: *AI), Loc: DL); |
| 3587 | return; |
| 3588 | } |
| 3589 | |
| 3590 | if (translateIfEntryValueArgument(isDeclare: true, Val: Address, Var: Variable, |
| 3591 | Expr: Expression, DL, |
| 3592 | MIRBuilder)) |
| 3593 | return; |
| 3594 | |
| 3595 | // A dbg.declare describes the address of a source variable, so lower it |
| 3596 | // into an indirect DBG_VALUE. |
| 3597 | MIRBuilder.setDebugLoc(DL); |
| 3598 | MIRBuilder.buildIndirectDbgValue(Reg: getOrCreateVReg(Val: *Address), Variable, |
| 3599 | Expr: Expression); |
| 3600 | } |
| 3601 | |
| 3602 | void IRTranslator::translateDbgInfo(const Instruction &Inst, |
| 3603 | MachineIRBuilder &MIRBuilder) { |
| 3604 | for (DbgRecord &DR : Inst.getDbgRecordRange()) { |
| 3605 | if (DbgLabelRecord *DLR = dyn_cast<DbgLabelRecord>(Val: &DR)) { |
| 3606 | MIRBuilder.setDebugLoc(DLR->getDebugLoc()); |
| 3607 | assert(DLR->getLabel() && "Missing label" ); |
| 3608 | assert(DLR->getLabel()->isValidLocationForIntrinsic( |
| 3609 | MIRBuilder.getDebugLoc()) && |
| 3610 | "Expected inlined-at fields to agree" ); |
| 3611 | MIRBuilder.buildDbgLabel(Label: DLR->getLabel()); |
| 3612 | continue; |
| 3613 | } |
| 3614 | DbgVariableRecord &DVR = cast<DbgVariableRecord>(Val&: DR); |
| 3615 | const DILocalVariable *Variable = DVR.getVariable(); |
| 3616 | const DIExpression *Expression = DVR.getExpression(); |
| 3617 | Value *V = DVR.getVariableLocationOp(OpIdx: 0); |
| 3618 | if (DVR.isDbgDeclare()) |
| 3619 | translateDbgDeclareRecord(Address: V, HasArgList: DVR.hasArgList(), Variable, Expression, |
| 3620 | DL: DVR.getDebugLoc(), MIRBuilder); |
| 3621 | else |
| 3622 | translateDbgValueRecord(V, HasArgList: DVR.hasArgList(), Variable, Expression, |
| 3623 | DL: DVR.getDebugLoc(), MIRBuilder); |
| 3624 | } |
| 3625 | } |
| 3626 | |
| 3627 | bool IRTranslator::translate(const Instruction &Inst) { |
| 3628 | CurBuilder->setDebugLoc(Inst.getDebugLoc()); |
| 3629 | CurBuilder->setPCSections(Inst.getMetadata(KindID: LLVMContext::MD_pcsections)); |
| 3630 | CurBuilder->setMMRAMetadata(Inst.getMetadata(KindID: LLVMContext::MD_mmra)); |
| 3631 | |
| 3632 | if (TLI->fallBackToDAGISel(Inst)) |
| 3633 | return false; |
| 3634 | |
| 3635 | switch (Inst.getOpcode()) { |
| 3636 | #define HANDLE_INST(NUM, OPCODE, CLASS) \ |
| 3637 | case Instruction::OPCODE: \ |
| 3638 | return translate##OPCODE(Inst, *CurBuilder.get()); |
| 3639 | #include "llvm/IR/Instruction.def" |
| 3640 | default: |
| 3641 | return false; |
| 3642 | } |
| 3643 | } |
| 3644 | |
| 3645 | bool IRTranslator::translate(const Constant &C, Register Reg) { |
| 3646 | // We only emit constants into the entry block from here. To prevent jumpy |
| 3647 | // debug behaviour remove debug line. |
| 3648 | if (auto CurrInstDL = CurBuilder->getDL()) |
| 3649 | EntryBuilder->setDebugLoc(DebugLoc()); |
| 3650 | |
| 3651 | if (auto CI = dyn_cast<ConstantInt>(Val: &C)) { |
| 3652 | // buildConstant expects a to-be-splatted scalar ConstantInt. |
| 3653 | if (isa<VectorType>(Val: CI->getType())) |
| 3654 | CI = ConstantInt::get(Context&: CI->getContext(), V: CI->getValue()); |
| 3655 | EntryBuilder->buildConstant(Res: Reg, Val: *CI); |
| 3656 | } else if (auto CF = dyn_cast<ConstantFP>(Val: &C)) { |
| 3657 | // buildFConstant expects a to-be-splatted scalar ConstantFP. |
| 3658 | if (isa<VectorType>(Val: CF->getType())) |
| 3659 | CF = ConstantFP::get(Context&: CF->getContext(), V: CF->getValue()); |
| 3660 | EntryBuilder->buildFConstant(Res: Reg, Val: *CF); |
| 3661 | } else if (isa<UndefValue>(Val: C)) |
| 3662 | EntryBuilder->buildUndef(Res: Reg); |
| 3663 | else if (isa<ConstantPointerNull>(Val: C)) |
| 3664 | EntryBuilder->buildConstant(Res: Reg, Val: 0); |
| 3665 | else if (auto GV = dyn_cast<GlobalValue>(Val: &C)) |
| 3666 | EntryBuilder->buildGlobalValue(Res: Reg, GV); |
| 3667 | else if (auto CPA = dyn_cast<ConstantPtrAuth>(Val: &C)) { |
| 3668 | Register Addr = getOrCreateVReg(Val: *CPA->getPointer()); |
| 3669 | Register AddrDisc = getOrCreateVReg(Val: *CPA->getAddrDiscriminator()); |
| 3670 | EntryBuilder->buildConstantPtrAuth(Res: Reg, CPA, Addr, AddrDisc); |
| 3671 | } else if (auto CAZ = dyn_cast<ConstantAggregateZero>(Val: &C)) { |
| 3672 | Constant &Elt = *CAZ->getElementValue(Idx: 0u); |
| 3673 | if (isa<ScalableVectorType>(Val: CAZ->getType())) { |
| 3674 | EntryBuilder->buildSplatVector(Res: Reg, Val: getOrCreateVReg(Val: Elt)); |
| 3675 | return true; |
| 3676 | } |
| 3677 | // Return the scalar if it is a <1 x Ty> vector. |
| 3678 | unsigned NumElts = CAZ->getElementCount().getFixedValue(); |
| 3679 | if (NumElts == 1) |
| 3680 | return translateCopy(U: C, V: Elt, MIRBuilder&: *EntryBuilder); |
| 3681 | // All elements are zero so we can just use the first one. |
| 3682 | EntryBuilder->buildSplatBuildVector(Res: Reg, Src: getOrCreateVReg(Val: Elt)); |
| 3683 | } else if (auto CV = dyn_cast<ConstantDataVector>(Val: &C)) { |
| 3684 | // Return the scalar if it is a <1 x Ty> vector. |
| 3685 | if (CV->getNumElements() == 1) |
| 3686 | return translateCopy(U: C, V: *CV->getElementAsConstant(i: 0), MIRBuilder&: *EntryBuilder); |
| 3687 | SmallVector<Register, 4> Ops; |
| 3688 | for (unsigned i = 0; i < CV->getNumElements(); ++i) { |
| 3689 | Constant &Elt = *CV->getElementAsConstant(i); |
| 3690 | Ops.push_back(Elt: getOrCreateVReg(Val: Elt)); |
| 3691 | } |
| 3692 | EntryBuilder->buildBuildVector(Res: Reg, Ops); |
| 3693 | } else if (auto CE = dyn_cast<ConstantExpr>(Val: &C)) { |
| 3694 | switch(CE->getOpcode()) { |
| 3695 | #define HANDLE_INST(NUM, OPCODE, CLASS) \ |
| 3696 | case Instruction::OPCODE: \ |
| 3697 | return translate##OPCODE(*CE, *EntryBuilder.get()); |
| 3698 | #include "llvm/IR/Instruction.def" |
| 3699 | default: |
| 3700 | return false; |
| 3701 | } |
| 3702 | } else if (auto CV = dyn_cast<ConstantVector>(Val: &C)) { |
| 3703 | if (CV->getNumOperands() == 1) |
| 3704 | return translateCopy(U: C, V: *CV->getOperand(i_nocapture: 0), MIRBuilder&: *EntryBuilder); |
| 3705 | SmallVector<Register, 4> Ops; |
| 3706 | for (unsigned i = 0; i < CV->getNumOperands(); ++i) { |
| 3707 | Ops.push_back(Elt: getOrCreateVReg(Val: *CV->getOperand(i_nocapture: i))); |
| 3708 | } |
| 3709 | EntryBuilder->buildBuildVector(Res: Reg, Ops); |
| 3710 | } else if (auto *BA = dyn_cast<BlockAddress>(Val: &C)) { |
| 3711 | EntryBuilder->buildBlockAddress(Res: Reg, BA); |
| 3712 | } else |
| 3713 | return false; |
| 3714 | |
| 3715 | return true; |
| 3716 | } |
| 3717 | |
| 3718 | bool IRTranslator::finalizeBasicBlock(const BasicBlock &BB, |
| 3719 | MachineBasicBlock &MBB) { |
| 3720 | for (auto &BTB : SL->BitTestCases) { |
| 3721 | // Emit header first, if it wasn't already emitted. |
| 3722 | if (!BTB.Emitted) |
| 3723 | emitBitTestHeader(B&: BTB, SwitchBB: BTB.Parent); |
| 3724 | |
| 3725 | BranchProbability UnhandledProb = BTB.Prob; |
| 3726 | for (unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) { |
| 3727 | UnhandledProb -= BTB.Cases[j].ExtraProb; |
| 3728 | // Set the current basic block to the mbb we wish to insert the code into |
| 3729 | MachineBasicBlock *MBB = BTB.Cases[j].ThisBB; |
| 3730 | // If all cases cover a contiguous range, it is not necessary to jump to |
| 3731 | // the default block after the last bit test fails. This is because the |
| 3732 | // range check during bit test header creation has guaranteed that every |
| 3733 | // case here doesn't go outside the range. In this case, there is no need |
| 3734 | // to perform the last bit test, as it will always be true. Instead, make |
| 3735 | // the second-to-last bit-test fall through to the target of the last bit |
| 3736 | // test, and delete the last bit test. |
| 3737 | |
| 3738 | MachineBasicBlock *NextMBB; |
| 3739 | if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) { |
| 3740 | // Second-to-last bit-test with contiguous range: fall through to the |
| 3741 | // target of the final bit test. |
| 3742 | NextMBB = BTB.Cases[j + 1].TargetBB; |
| 3743 | } else if (j + 1 == ej) { |
| 3744 | // For the last bit test, fall through to Default. |
| 3745 | NextMBB = BTB.Default; |
| 3746 | } else { |
| 3747 | // Otherwise, fall through to the next bit test. |
| 3748 | NextMBB = BTB.Cases[j + 1].ThisBB; |
| 3749 | } |
| 3750 | |
| 3751 | emitBitTestCase(BB&: BTB, NextMBB, BranchProbToNext: UnhandledProb, Reg: BTB.Reg, B&: BTB.Cases[j], SwitchBB: MBB); |
| 3752 | |
| 3753 | if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) { |
| 3754 | // We need to record the replacement phi edge here that normally |
| 3755 | // happens in emitBitTestCase before we delete the case, otherwise the |
| 3756 | // phi edge will be lost. |
| 3757 | addMachineCFGPred(Edge: {BTB.Parent->getBasicBlock(), |
| 3758 | BTB.Cases[ej - 1].TargetBB->getBasicBlock()}, |
| 3759 | NewPred: MBB); |
| 3760 | // Since we're not going to use the final bit test, remove it. |
| 3761 | BTB.Cases.pop_back(); |
| 3762 | break; |
| 3763 | } |
| 3764 | } |
| 3765 | // This is "default" BB. We have two jumps to it. From "header" BB and from |
| 3766 | // last "case" BB, unless the latter was skipped. |
| 3767 | CFGEdge = {BTB.Parent->getBasicBlock(), |
| 3768 | BTB.Default->getBasicBlock()}; |
| 3769 | addMachineCFGPred(Edge: HeaderToDefaultEdge, NewPred: BTB.Parent); |
| 3770 | if (!BTB.ContiguousRange) { |
| 3771 | addMachineCFGPred(Edge: HeaderToDefaultEdge, NewPred: BTB.Cases.back().ThisBB); |
| 3772 | } |
| 3773 | } |
| 3774 | SL->BitTestCases.clear(); |
| 3775 | |
| 3776 | for (auto &JTCase : SL->JTCases) { |
| 3777 | // Emit header first, if it wasn't already emitted. |
| 3778 | if (!JTCase.first.Emitted) |
| 3779 | emitJumpTableHeader(JT&: JTCase.second, JTH&: JTCase.first, HeaderBB: JTCase.first.HeaderBB); |
| 3780 | |
| 3781 | emitJumpTable(JT&: JTCase.second, MBB: JTCase.second.MBB); |
| 3782 | } |
| 3783 | SL->JTCases.clear(); |
| 3784 | |
| 3785 | for (auto &SwCase : SL->SwitchCases) |
| 3786 | emitSwitchCase(CB&: SwCase, SwitchBB: &CurBuilder->getMBB(), MIB&: *CurBuilder); |
| 3787 | SL->SwitchCases.clear(); |
| 3788 | |
| 3789 | // Check if we need to generate stack-protector guard checks. |
| 3790 | StackProtector &SP = getAnalysis<StackProtector>(); |
| 3791 | if (SP.shouldEmitSDCheck(BB)) { |
| 3792 | bool FunctionBasedInstrumentation = |
| 3793 | TLI->getSSPStackGuardCheck(M: *MF->getFunction().getParent()); |
| 3794 | SPDescriptor.initialize(BB: &BB, MBB: &MBB, FunctionBasedInstrumentation); |
| 3795 | } |
| 3796 | // Handle stack protector. |
| 3797 | if (SPDescriptor.shouldEmitFunctionBasedCheckStackProtector()) { |
| 3798 | LLVM_DEBUG(dbgs() << "Unimplemented stack protector case\n" ); |
| 3799 | return false; |
| 3800 | } else if (SPDescriptor.shouldEmitStackProtector()) { |
| 3801 | MachineBasicBlock *ParentMBB = SPDescriptor.getParentMBB(); |
| 3802 | MachineBasicBlock *SuccessMBB = SPDescriptor.getSuccessMBB(); |
| 3803 | |
| 3804 | // Find the split point to split the parent mbb. At the same time copy all |
| 3805 | // physical registers used in the tail of parent mbb into virtual registers |
| 3806 | // before the split point and back into physical registers after the split |
| 3807 | // point. This prevents us needing to deal with Live-ins and many other |
| 3808 | // register allocation issues caused by us splitting the parent mbb. The |
| 3809 | // register allocator will clean up said virtual copies later on. |
| 3810 | MachineBasicBlock::iterator SplitPoint = findSplitPointForStackProtector( |
| 3811 | BB: ParentMBB, TII: *MF->getSubtarget().getInstrInfo()); |
| 3812 | |
| 3813 | // Splice the terminator of ParentMBB into SuccessMBB. |
| 3814 | SuccessMBB->splice(Where: SuccessMBB->end(), Other: ParentMBB, From: SplitPoint, |
| 3815 | To: ParentMBB->end()); |
| 3816 | |
| 3817 | // Add compare/jump on neq/jump to the parent BB. |
| 3818 | if (!emitSPDescriptorParent(SPD&: SPDescriptor, ParentBB: ParentMBB)) |
| 3819 | return false; |
| 3820 | |
| 3821 | // CodeGen Failure MBB if we have not codegened it yet. |
| 3822 | MachineBasicBlock *FailureMBB = SPDescriptor.getFailureMBB(); |
| 3823 | if (FailureMBB->empty()) { |
| 3824 | if (!emitSPDescriptorFailure(SPD&: SPDescriptor, FailureBB: FailureMBB)) |
| 3825 | return false; |
| 3826 | } |
| 3827 | |
| 3828 | // Clear the Per-BB State. |
| 3829 | SPDescriptor.resetPerBBState(); |
| 3830 | } |
| 3831 | return true; |
| 3832 | } |
| 3833 | |
| 3834 | bool IRTranslator::emitSPDescriptorParent(StackProtectorDescriptor &SPD, |
| 3835 | MachineBasicBlock *ParentBB) { |
| 3836 | CurBuilder->setInsertPt(MBB&: *ParentBB, II: ParentBB->end()); |
| 3837 | // First create the loads to the guard/stack slot for the comparison. |
| 3838 | Type *PtrIRTy = PointerType::getUnqual(C&: MF->getFunction().getContext()); |
| 3839 | const LLT PtrTy = getLLTForType(Ty&: *PtrIRTy, DL: *DL); |
| 3840 | LLT PtrMemTy = getLLTForMVT(Ty: TLI->getPointerMemTy(DL: *DL)); |
| 3841 | |
| 3842 | MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo(); |
| 3843 | int FI = MFI.getStackProtectorIndex(); |
| 3844 | |
| 3845 | Register Guard; |
| 3846 | Register StackSlotPtr = CurBuilder->buildFrameIndex(Res: PtrTy, Idx: FI).getReg(Idx: 0); |
| 3847 | const Module &M = *ParentBB->getParent()->getFunction().getParent(); |
| 3848 | Align Align = DL->getPrefTypeAlign(Ty: PointerType::getUnqual(C&: M.getContext())); |
| 3849 | |
| 3850 | // Generate code to load the content of the guard slot. |
| 3851 | Register GuardVal = |
| 3852 | CurBuilder |
| 3853 | ->buildLoad(Res: PtrMemTy, Addr: StackSlotPtr, |
| 3854 | PtrInfo: MachinePointerInfo::getFixedStack(MF&: *MF, FI), Alignment: Align, |
| 3855 | MMOFlags: MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile) |
| 3856 | .getReg(Idx: 0); |
| 3857 | |
| 3858 | if (TLI->useStackGuardXorFP()) { |
| 3859 | LLVM_DEBUG(dbgs() << "Stack protector xor'ing with FP not yet implemented" ); |
| 3860 | return false; |
| 3861 | } |
| 3862 | |
| 3863 | // Retrieve guard check function, nullptr if instrumentation is inlined. |
| 3864 | if (const Function *GuardCheckFn = TLI->getSSPStackGuardCheck(M)) { |
| 3865 | // This path is currently untestable on GlobalISel, since the only platform |
| 3866 | // that needs this seems to be Windows, and we fall back on that currently. |
| 3867 | // The code still lives here in case that changes. |
| 3868 | // Silence warning about unused variable until the code below that uses |
| 3869 | // 'GuardCheckFn' is enabled. |
| 3870 | (void)GuardCheckFn; |
| 3871 | return false; |
| 3872 | #if 0 |
| 3873 | // The target provides a guard check function to validate the guard value. |
| 3874 | // Generate a call to that function with the content of the guard slot as |
| 3875 | // argument. |
| 3876 | FunctionType *FnTy = GuardCheckFn->getFunctionType(); |
| 3877 | assert(FnTy->getNumParams() == 1 && "Invalid function signature" ); |
| 3878 | ISD::ArgFlagsTy Flags; |
| 3879 | if (GuardCheckFn->hasAttribute(1, Attribute::AttrKind::InReg)) |
| 3880 | Flags.setInReg(); |
| 3881 | CallLowering::ArgInfo GuardArgInfo( |
| 3882 | {GuardVal, FnTy->getParamType(0), {Flags}}); |
| 3883 | |
| 3884 | CallLowering::CallLoweringInfo Info; |
| 3885 | Info.OrigArgs.push_back(GuardArgInfo); |
| 3886 | Info.CallConv = GuardCheckFn->getCallingConv(); |
| 3887 | Info.Callee = MachineOperand::CreateGA(GuardCheckFn, 0); |
| 3888 | Info.OrigRet = {Register(), FnTy->getReturnType()}; |
| 3889 | if (!CLI->lowerCall(MIRBuilder, Info)) { |
| 3890 | LLVM_DEBUG(dbgs() << "Failed to lower call to stack protector check\n" ); |
| 3891 | return false; |
| 3892 | } |
| 3893 | return true; |
| 3894 | #endif |
| 3895 | } |
| 3896 | |
| 3897 | // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD. |
| 3898 | // Otherwise, emit a volatile load to retrieve the stack guard value. |
| 3899 | if (TLI->useLoadStackGuardNode(M: *ParentBB->getBasicBlock()->getModule())) { |
| 3900 | Guard = |
| 3901 | MRI->createGenericVirtualRegister(Ty: LLT::scalar(SizeInBits: PtrTy.getSizeInBits())); |
| 3902 | getStackGuard(DstReg: Guard, MIRBuilder&: *CurBuilder); |
| 3903 | } else { |
| 3904 | // TODO: test using android subtarget when we support @llvm.thread.pointer. |
| 3905 | const Value *IRGuard = TLI->getSDagStackGuard(M); |
| 3906 | Register GuardPtr = getOrCreateVReg(Val: *IRGuard); |
| 3907 | |
| 3908 | Guard = CurBuilder |
| 3909 | ->buildLoad(Res: PtrMemTy, Addr: GuardPtr, |
| 3910 | PtrInfo: MachinePointerInfo::getFixedStack(MF&: *MF, FI), Alignment: Align, |
| 3911 | MMOFlags: MachineMemOperand::MOLoad | |
| 3912 | MachineMemOperand::MOVolatile) |
| 3913 | .getReg(Idx: 0); |
| 3914 | } |
| 3915 | |
| 3916 | // Perform the comparison. |
| 3917 | auto Cmp = |
| 3918 | CurBuilder->buildICmp(Pred: CmpInst::ICMP_NE, Res: LLT::scalar(SizeInBits: 1), Op0: Guard, Op1: GuardVal); |
| 3919 | // If the guard/stackslot do not equal, branch to failure MBB. |
| 3920 | CurBuilder->buildBrCond(Tst: Cmp, Dest&: *SPD.getFailureMBB()); |
| 3921 | // Otherwise branch to success MBB. |
| 3922 | CurBuilder->buildBr(Dest&: *SPD.getSuccessMBB()); |
| 3923 | return true; |
| 3924 | } |
| 3925 | |
| 3926 | bool IRTranslator::emitSPDescriptorFailure(StackProtectorDescriptor &SPD, |
| 3927 | MachineBasicBlock *FailureBB) { |
| 3928 | CurBuilder->setInsertPt(MBB&: *FailureBB, II: FailureBB->end()); |
| 3929 | |
| 3930 | const RTLIB::Libcall Libcall = RTLIB::STACKPROTECTOR_CHECK_FAIL; |
| 3931 | const char *Name = TLI->getLibcallName(Call: Libcall); |
| 3932 | |
| 3933 | CallLowering::CallLoweringInfo Info; |
| 3934 | Info.CallConv = TLI->getLibcallCallingConv(Call: Libcall); |
| 3935 | Info.Callee = MachineOperand::CreateES(SymName: Name); |
| 3936 | Info.OrigRet = {Register(), Type::getVoidTy(C&: MF->getFunction().getContext()), |
| 3937 | 0}; |
| 3938 | if (!CLI->lowerCall(MIRBuilder&: *CurBuilder, Info)) { |
| 3939 | LLVM_DEBUG(dbgs() << "Failed to lower call to stack protector fail\n" ); |
| 3940 | return false; |
| 3941 | } |
| 3942 | |
| 3943 | // Emit a trap instruction if we are required to do so. |
| 3944 | const TargetOptions &TargetOpts = TLI->getTargetMachine().Options; |
| 3945 | if (TargetOpts.TrapUnreachable && !TargetOpts.NoTrapAfterNoreturn) |
| 3946 | CurBuilder->buildInstr(Opcode: TargetOpcode::G_TRAP); |
| 3947 | |
| 3948 | return true; |
| 3949 | } |
| 3950 | |
| 3951 | void IRTranslator::finalizeFunction() { |
| 3952 | // Release the memory used by the different maps we |
| 3953 | // needed during the translation. |
| 3954 | PendingPHIs.clear(); |
| 3955 | VMap.reset(); |
| 3956 | FrameIndices.clear(); |
| 3957 | MachinePreds.clear(); |
| 3958 | // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it |
| 3959 | // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid |
| 3960 | // destroying it twice (in ~IRTranslator() and ~LLVMContext()) |
| 3961 | EntryBuilder.reset(); |
| 3962 | CurBuilder.reset(); |
| 3963 | FuncInfo.clear(); |
| 3964 | SPDescriptor.resetPerFunctionState(); |
| 3965 | } |
| 3966 | |
| 3967 | /// Returns true if a BasicBlock \p BB within a variadic function contains a |
| 3968 | /// variadic musttail call. |
| 3969 | static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB) { |
| 3970 | if (!IsVarArg) |
| 3971 | return false; |
| 3972 | |
| 3973 | // Walk the block backwards, because tail calls usually only appear at the end |
| 3974 | // of a block. |
| 3975 | return llvm::any_of(Range: llvm::reverse(C: BB), P: [](const Instruction &I) { |
| 3976 | const auto *CI = dyn_cast<CallInst>(Val: &I); |
| 3977 | return CI && CI->isMustTailCall(); |
| 3978 | }); |
| 3979 | } |
| 3980 | |
| 3981 | bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) { |
| 3982 | MF = &CurMF; |
| 3983 | const Function &F = MF->getFunction(); |
| 3984 | GISelCSEAnalysisWrapper &Wrapper = |
| 3985 | getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper(); |
| 3986 | // Set the CSEConfig and run the analysis. |
| 3987 | GISelCSEInfo *CSEInfo = nullptr; |
| 3988 | TPC = &getAnalysis<TargetPassConfig>(); |
| 3989 | bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences() |
| 3990 | ? EnableCSEInIRTranslator |
| 3991 | : TPC->isGISelCSEEnabled(); |
| 3992 | TLI = MF->getSubtarget().getTargetLowering(); |
| 3993 | |
| 3994 | if (EnableCSE) { |
| 3995 | EntryBuilder = std::make_unique<CSEMIRBuilder>(args&: CurMF); |
| 3996 | CSEInfo = &Wrapper.get(CSEOpt: TPC->getCSEConfig()); |
| 3997 | EntryBuilder->setCSEInfo(CSEInfo); |
| 3998 | CurBuilder = std::make_unique<CSEMIRBuilder>(args&: CurMF); |
| 3999 | CurBuilder->setCSEInfo(CSEInfo); |
| 4000 | } else { |
| 4001 | EntryBuilder = std::make_unique<MachineIRBuilder>(); |
| 4002 | CurBuilder = std::make_unique<MachineIRBuilder>(); |
| 4003 | } |
| 4004 | CLI = MF->getSubtarget().getCallLowering(); |
| 4005 | CurBuilder->setMF(*MF); |
| 4006 | EntryBuilder->setMF(*MF); |
| 4007 | MRI = &MF->getRegInfo(); |
| 4008 | DL = &F.getDataLayout(); |
| 4009 | ORE = std::make_unique<OptimizationRemarkEmitter>(args: &F); |
| 4010 | const TargetMachine &TM = MF->getTarget(); |
| 4011 | TM.resetTargetOptions(F); |
| 4012 | EnableOpts = OptLevel != CodeGenOptLevel::None && !skipFunction(F); |
| 4013 | FuncInfo.MF = MF; |
| 4014 | if (EnableOpts) { |
| 4015 | AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); |
| 4016 | FuncInfo.BPI = &getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI(); |
| 4017 | } else { |
| 4018 | AA = nullptr; |
| 4019 | FuncInfo.BPI = nullptr; |
| 4020 | } |
| 4021 | |
| 4022 | AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache( |
| 4023 | F&: MF->getFunction()); |
| 4024 | LibInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); |
| 4025 | FuncInfo.CanLowerReturn = CLI->checkReturnTypeForCallConv(MF&: *MF); |
| 4026 | |
| 4027 | SL = std::make_unique<GISelSwitchLowering>(args: this, args&: FuncInfo); |
| 4028 | SL->init(tli: *TLI, tm: TM, dl: *DL); |
| 4029 | |
| 4030 | assert(PendingPHIs.empty() && "stale PHIs" ); |
| 4031 | |
| 4032 | // Targets which want to use big endian can enable it using |
| 4033 | // enableBigEndian() |
| 4034 | if (!DL->isLittleEndian() && !CLI->enableBigEndian()) { |
| 4035 | // Currently we don't properly handle big endian code. |
| 4036 | OptimizationRemarkMissed R("gisel-irtranslator" , "GISelFailure" , |
| 4037 | F.getSubprogram(), &F.getEntryBlock()); |
| 4038 | R << "unable to translate in big endian mode" ; |
| 4039 | reportTranslationError(MF&: *MF, TPC: *TPC, ORE&: *ORE, R); |
| 4040 | return false; |
| 4041 | } |
| 4042 | |
| 4043 | // Release the per-function state when we return, whether we succeeded or not. |
| 4044 | auto FinalizeOnReturn = make_scope_exit(F: [this]() { finalizeFunction(); }); |
| 4045 | |
| 4046 | // Setup a separate basic-block for the arguments and constants |
| 4047 | MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock(); |
| 4048 | MF->push_back(MBB: EntryBB); |
| 4049 | EntryBuilder->setMBB(*EntryBB); |
| 4050 | |
| 4051 | DebugLoc DbgLoc = F.getEntryBlock().getFirstNonPHIIt()->getDebugLoc(); |
| 4052 | SwiftError.setFunction(CurMF); |
| 4053 | SwiftError.createEntriesInEntryBlock(DbgLoc); |
| 4054 | |
| 4055 | bool IsVarArg = F.isVarArg(); |
| 4056 | bool HasMustTailInVarArgFn = false; |
| 4057 | |
| 4058 | // Create all blocks, in IR order, to preserve the layout. |
| 4059 | FuncInfo.MBBMap.resize(N: F.getMaxBlockNumber()); |
| 4060 | for (const BasicBlock &BB: F) { |
| 4061 | auto *&MBB = FuncInfo.MBBMap[BB.getNumber()]; |
| 4062 | |
| 4063 | MBB = MF->CreateMachineBasicBlock(BB: &BB); |
| 4064 | MF->push_back(MBB); |
| 4065 | |
| 4066 | if (BB.hasAddressTaken()) |
| 4067 | MBB->setAddressTakenIRBlock(const_cast<BasicBlock *>(&BB)); |
| 4068 | |
| 4069 | if (!HasMustTailInVarArgFn) |
| 4070 | HasMustTailInVarArgFn = checkForMustTailInVarArgFn(IsVarArg, BB); |
| 4071 | } |
| 4072 | |
| 4073 | MF->getFrameInfo().setHasMustTailInVarArgFunc(HasMustTailInVarArgFn); |
| 4074 | |
| 4075 | // Make our arguments/constants entry block fallthrough to the IR entry block. |
| 4076 | EntryBB->addSuccessor(Succ: &getMBB(BB: F.front())); |
| 4077 | |
| 4078 | if (CLI->fallBackToDAGISel(MF: *MF)) { |
| 4079 | OptimizationRemarkMissed R("gisel-irtranslator" , "GISelFailure" , |
| 4080 | F.getSubprogram(), &F.getEntryBlock()); |
| 4081 | R << "unable to lower function: " |
| 4082 | << ore::NV("Prototype" , F.getFunctionType()); |
| 4083 | reportTranslationError(MF&: *MF, TPC: *TPC, ORE&: *ORE, R); |
| 4084 | return false; |
| 4085 | } |
| 4086 | |
| 4087 | // Lower the actual args into this basic block. |
| 4088 | SmallVector<ArrayRef<Register>, 8> VRegArgs; |
| 4089 | for (const Argument &Arg: F.args()) { |
| 4090 | if (DL->getTypeStoreSize(Ty: Arg.getType()).isZero()) |
| 4091 | continue; // Don't handle zero sized types. |
| 4092 | ArrayRef<Register> VRegs = getOrCreateVRegs(Val: Arg); |
| 4093 | VRegArgs.push_back(Elt: VRegs); |
| 4094 | |
| 4095 | if (Arg.hasSwiftErrorAttr()) { |
| 4096 | assert(VRegs.size() == 1 && "Too many vregs for Swift error" ); |
| 4097 | SwiftError.setCurrentVReg(MBB: EntryBB, SwiftError.getFunctionArg(), VRegs[0]); |
| 4098 | } |
| 4099 | } |
| 4100 | |
| 4101 | if (!CLI->lowerFormalArguments(MIRBuilder&: *EntryBuilder, F, VRegs: VRegArgs, FLI&: FuncInfo)) { |
| 4102 | OptimizationRemarkMissed R("gisel-irtranslator" , "GISelFailure" , |
| 4103 | F.getSubprogram(), &F.getEntryBlock()); |
| 4104 | R << "unable to lower arguments: " |
| 4105 | << ore::NV("Prototype" , F.getFunctionType()); |
| 4106 | reportTranslationError(MF&: *MF, TPC: *TPC, ORE&: *ORE, R); |
| 4107 | return false; |
| 4108 | } |
| 4109 | |
| 4110 | // Need to visit defs before uses when translating instructions. |
| 4111 | GISelObserverWrapper WrapperObserver; |
| 4112 | if (EnableCSE && CSEInfo) |
| 4113 | WrapperObserver.addObserver(O: CSEInfo); |
| 4114 | { |
| 4115 | ReversePostOrderTraversal<const Function *> RPOT(&F); |
| 4116 | #ifndef NDEBUG |
| 4117 | DILocationVerifier Verifier; |
| 4118 | WrapperObserver.addObserver(&Verifier); |
| 4119 | #endif // ifndef NDEBUG |
| 4120 | RAIIMFObsDelInstaller ObsInstall(*MF, WrapperObserver); |
| 4121 | for (const BasicBlock *BB : RPOT) { |
| 4122 | MachineBasicBlock &MBB = getMBB(BB: *BB); |
| 4123 | // Set the insertion point of all the following translations to |
| 4124 | // the end of this basic block. |
| 4125 | CurBuilder->setMBB(MBB); |
| 4126 | HasTailCall = false; |
| 4127 | for (const Instruction &Inst : *BB) { |
| 4128 | // If we translated a tail call in the last step, then we know |
| 4129 | // everything after the call is either a return, or something that is |
| 4130 | // handled by the call itself. (E.g. a lifetime marker or assume |
| 4131 | // intrinsic.) In this case, we should stop translating the block and |
| 4132 | // move on. |
| 4133 | if (HasTailCall) |
| 4134 | break; |
| 4135 | #ifndef NDEBUG |
| 4136 | Verifier.setCurrentInst(&Inst); |
| 4137 | #endif // ifndef NDEBUG |
| 4138 | |
| 4139 | // Translate any debug-info attached to the instruction. |
| 4140 | translateDbgInfo(Inst, MIRBuilder&: *CurBuilder); |
| 4141 | |
| 4142 | if (translate(Inst)) |
| 4143 | continue; |
| 4144 | |
| 4145 | OptimizationRemarkMissed R("gisel-irtranslator" , "GISelFailure" , |
| 4146 | Inst.getDebugLoc(), BB); |
| 4147 | R << "unable to translate instruction: " << ore::NV("Opcode" , &Inst); |
| 4148 | |
| 4149 | if (ORE->allowExtraAnalysis(PassName: "gisel-irtranslator" )) { |
| 4150 | std::string InstStrStorage; |
| 4151 | raw_string_ostream InstStr(InstStrStorage); |
| 4152 | InstStr << Inst; |
| 4153 | |
| 4154 | R << ": '" << InstStrStorage << "'" ; |
| 4155 | } |
| 4156 | |
| 4157 | reportTranslationError(MF&: *MF, TPC: *TPC, ORE&: *ORE, R); |
| 4158 | return false; |
| 4159 | } |
| 4160 | |
| 4161 | if (!finalizeBasicBlock(BB: *BB, MBB)) { |
| 4162 | OptimizationRemarkMissed R("gisel-irtranslator" , "GISelFailure" , |
| 4163 | BB->getTerminator()->getDebugLoc(), BB); |
| 4164 | R << "unable to translate basic block" ; |
| 4165 | reportTranslationError(MF&: *MF, TPC: *TPC, ORE&: *ORE, R); |
| 4166 | return false; |
| 4167 | } |
| 4168 | } |
| 4169 | #ifndef NDEBUG |
| 4170 | WrapperObserver.removeObserver(&Verifier); |
| 4171 | #endif |
| 4172 | } |
| 4173 | |
| 4174 | finishPendingPhis(); |
| 4175 | |
| 4176 | SwiftError.propagateVRegs(); |
| 4177 | |
| 4178 | // Merge the argument lowering and constants block with its single |
| 4179 | // successor, the LLVM-IR entry block. We want the basic block to |
| 4180 | // be maximal. |
| 4181 | assert(EntryBB->succ_size() == 1 && |
| 4182 | "Custom BB used for lowering should have only one successor" ); |
| 4183 | // Get the successor of the current entry block. |
| 4184 | MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin(); |
| 4185 | assert(NewEntryBB.pred_size() == 1 && |
| 4186 | "LLVM-IR entry block has a predecessor!?" ); |
| 4187 | // Move all the instruction from the current entry block to the |
| 4188 | // new entry block. |
| 4189 | NewEntryBB.splice(Where: NewEntryBB.begin(), Other: EntryBB, From: EntryBB->begin(), |
| 4190 | To: EntryBB->end()); |
| 4191 | |
| 4192 | // Update the live-in information for the new entry block. |
| 4193 | for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins()) |
| 4194 | NewEntryBB.addLiveIn(RegMaskPair: LiveIn); |
| 4195 | NewEntryBB.sortUniqueLiveIns(); |
| 4196 | |
| 4197 | // Get rid of the now empty basic block. |
| 4198 | EntryBB->removeSuccessor(Succ: &NewEntryBB); |
| 4199 | MF->remove(MBBI: EntryBB); |
| 4200 | MF->deleteMachineBasicBlock(MBB: EntryBB); |
| 4201 | |
| 4202 | assert(&MF->front() == &NewEntryBB && |
| 4203 | "New entry wasn't next in the list of basic block!" ); |
| 4204 | |
| 4205 | // Initialize stack protector information. |
| 4206 | StackProtector &SP = getAnalysis<StackProtector>(); |
| 4207 | SP.copyToMachineFrameInfo(MFI&: MF->getFrameInfo()); |
| 4208 | |
| 4209 | return false; |
| 4210 | } |
| 4211 | |