1//===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the IRTranslator class.
10//===----------------------------------------------------------------------===//
11
12#include "llvm/CodeGen/GlobalISel/IRTranslator.h"
13#include "llvm/ADT/PostOrderIterator.h"
14#include "llvm/ADT/STLExtras.h"
15#include "llvm/ADT/ScopeExit.h"
16#include "llvm/ADT/SmallVector.h"
17#include "llvm/Analysis/AliasAnalysis.h"
18#include "llvm/Analysis/AssumptionCache.h"
19#include "llvm/Analysis/BranchProbabilityInfo.h"
20#include "llvm/Analysis/Loads.h"
21#include "llvm/Analysis/OptimizationRemarkEmitter.h"
22#include "llvm/Analysis/ValueTracking.h"
23#include "llvm/Analysis/VectorUtils.h"
24#include "llvm/CodeGen/Analysis.h"
25#include "llvm/CodeGen/GlobalISel/CSEInfo.h"
26#include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h"
27#include "llvm/CodeGen/GlobalISel/CallLowering.h"
28#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
29#include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h"
30#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
31#include "llvm/CodeGen/LowLevelTypeUtils.h"
32#include "llvm/CodeGen/MachineBasicBlock.h"
33#include "llvm/CodeGen/MachineFrameInfo.h"
34#include "llvm/CodeGen/MachineFunction.h"
35#include "llvm/CodeGen/MachineInstrBuilder.h"
36#include "llvm/CodeGen/MachineMemOperand.h"
37#include "llvm/CodeGen/MachineModuleInfo.h"
38#include "llvm/CodeGen/MachineOperand.h"
39#include "llvm/CodeGen/MachineRegisterInfo.h"
40#include "llvm/CodeGen/StackProtector.h"
41#include "llvm/CodeGen/SwitchLoweringUtils.h"
42#include "llvm/CodeGen/TargetFrameLowering.h"
43#include "llvm/CodeGen/TargetInstrInfo.h"
44#include "llvm/CodeGen/TargetLowering.h"
45#include "llvm/CodeGen/TargetOpcodes.h"
46#include "llvm/CodeGen/TargetPassConfig.h"
47#include "llvm/CodeGen/TargetRegisterInfo.h"
48#include "llvm/CodeGen/TargetSubtargetInfo.h"
49#include "llvm/CodeGenTypes/LowLevelType.h"
50#include "llvm/IR/BasicBlock.h"
51#include "llvm/IR/CFG.h"
52#include "llvm/IR/Constant.h"
53#include "llvm/IR/Constants.h"
54#include "llvm/IR/DataLayout.h"
55#include "llvm/IR/DerivedTypes.h"
56#include "llvm/IR/DiagnosticInfo.h"
57#include "llvm/IR/Function.h"
58#include "llvm/IR/GetElementPtrTypeIterator.h"
59#include "llvm/IR/InlineAsm.h"
60#include "llvm/IR/InstrTypes.h"
61#include "llvm/IR/Instructions.h"
62#include "llvm/IR/IntrinsicInst.h"
63#include "llvm/IR/Intrinsics.h"
64#include "llvm/IR/IntrinsicsAMDGPU.h"
65#include "llvm/IR/LLVMContext.h"
66#include "llvm/IR/Metadata.h"
67#include "llvm/IR/PatternMatch.h"
68#include "llvm/IR/Statepoint.h"
69#include "llvm/IR/Type.h"
70#include "llvm/IR/User.h"
71#include "llvm/IR/Value.h"
72#include "llvm/InitializePasses.h"
73#include "llvm/MC/MCContext.h"
74#include "llvm/Pass.h"
75#include "llvm/Support/Casting.h"
76#include "llvm/Support/CodeGen.h"
77#include "llvm/Support/Debug.h"
78#include "llvm/Support/ErrorHandling.h"
79#include "llvm/Support/MathExtras.h"
80#include "llvm/Support/raw_ostream.h"
81#include "llvm/Target/TargetMachine.h"
82#include "llvm/Transforms/Utils/Local.h"
83#include "llvm/Transforms/Utils/MemoryOpRemark.h"
84#include <algorithm>
85#include <cassert>
86#include <cstdint>
87#include <iterator>
88#include <optional>
89#include <string>
90#include <utility>
91#include <vector>
92
93#define DEBUG_TYPE "irtranslator"
94
95using namespace llvm;
96
97static cl::opt<bool>
98 EnableCSEInIRTranslator("enable-cse-in-irtranslator",
99 cl::desc("Should enable CSE in irtranslator"),
100 cl::Optional, cl::init(Val: false));
101char IRTranslator::ID = 0;
102
103INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
104 false, false)
105INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
106INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass)
107INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
108INITIALIZE_PASS_DEPENDENCY(StackProtector)
109INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
110INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
111 false, false)
112
113static void reportTranslationError(MachineFunction &MF,
114 OptimizationRemarkEmitter &ORE,
115 OptimizationRemarkMissed &R) {
116 MF.getProperties().setFailedISel();
117 bool IsGlobalISelAbortEnabled =
118 MF.getTarget().Options.GlobalISelAbort == GlobalISelAbortMode::Enable;
119
120 // Print the function name explicitly if we don't have a debug location (which
121 // makes the diagnostic less useful) or if we're going to emit a raw error.
122 if (!R.getLocation().isValid() || IsGlobalISelAbortEnabled)
123 R << (" (in function: " + MF.getName() + ")").str();
124
125 if (IsGlobalISelAbortEnabled)
126 report_fatal_error(reason: Twine(R.getMsg()));
127 else
128 ORE.emit(OptDiag&: R);
129}
130
131IRTranslator::IRTranslator(CodeGenOptLevel optlevel)
132 : MachineFunctionPass(ID), OptLevel(optlevel) {}
133
134#ifndef NDEBUG
135namespace {
136/// Verify that every instruction created has the same DILocation as the
137/// instruction being translated.
138class DILocationVerifier : public GISelChangeObserver {
139 const Instruction *CurrInst = nullptr;
140
141public:
142 DILocationVerifier() = default;
143 ~DILocationVerifier() override = default;
144
145 const Instruction *getCurrentInst() const { return CurrInst; }
146 void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; }
147
148 void erasingInstr(MachineInstr &MI) override {}
149 void changingInstr(MachineInstr &MI) override {}
150 void changedInstr(MachineInstr &MI) override {}
151
152 void createdInstr(MachineInstr &MI) override {
153 assert(getCurrentInst() && "Inserted instruction without a current MI");
154
155 // Only print the check message if we're actually checking it.
156#ifndef NDEBUG
157 LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst
158 << " was copied to " << MI);
159#endif
160 // We allow insts in the entry block to have no debug loc because
161 // they could have originated from constants, and we don't want a jumpy
162 // debug experience.
163 assert((CurrInst->getDebugLoc() == MI.getDebugLoc() ||
164 (MI.getParent()->isEntryBlock() && !MI.getDebugLoc()) ||
165 (MI.isDebugInstr())) &&
166 "Line info was not transferred to all instructions");
167 }
168};
169} // namespace
170#endif // ifndef NDEBUG
171
172
173void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const {
174 AU.addRequired<StackProtector>();
175 AU.addRequired<TargetPassConfig>();
176 AU.addRequired<GISelCSEAnalysisWrapperPass>();
177 AU.addRequired<AssumptionCacheTracker>();
178 if (OptLevel != CodeGenOptLevel::None) {
179 AU.addRequired<BranchProbabilityInfoWrapperPass>();
180 AU.addRequired<AAResultsWrapperPass>();
181 }
182 AU.addRequired<TargetLibraryInfoWrapperPass>();
183 AU.addPreserved<TargetLibraryInfoWrapperPass>();
184 AU.addRequired<LibcallLoweringInfoWrapper>();
185
186 getSelectionDAGFallbackAnalysisUsage(AU);
187 MachineFunctionPass::getAnalysisUsage(AU);
188}
189
190IRTranslator::ValueToVRegInfo::VRegListT &
191IRTranslator::allocateVRegs(const Value &Val) {
192 auto VRegsIt = VMap.findVRegs(V: Val);
193 if (VRegsIt != VMap.vregs_end())
194 return *VRegsIt->second;
195 auto *Regs = VMap.getVRegs(V: Val);
196 auto *Offsets = VMap.getOffsets(V: Val);
197 SmallVector<LLT, 4> SplitTys;
198 computeValueLLTs(DL: *DL, Ty&: *Val.getType(), ValueLLTs&: SplitTys,
199 FixedOffsets: Offsets->empty() ? Offsets : nullptr);
200 for (unsigned i = 0; i < SplitTys.size(); ++i)
201 Regs->push_back(Elt: 0);
202 return *Regs;
203}
204
205ArrayRef<Register> IRTranslator::getOrCreateVRegs(const Value &Val) {
206 auto VRegsIt = VMap.findVRegs(V: Val);
207 if (VRegsIt != VMap.vregs_end())
208 return *VRegsIt->second;
209
210 if (Val.getType()->isVoidTy())
211 return *VMap.getVRegs(V: Val);
212
213 // Create entry for this type.
214 auto *VRegs = VMap.getVRegs(V: Val);
215 auto *Offsets = VMap.getOffsets(V: Val);
216
217 if (!Val.getType()->isTokenTy())
218 assert(Val.getType()->isSized() &&
219 "Don't know how to create an empty vreg");
220
221 SmallVector<LLT, 4> SplitTys;
222 computeValueLLTs(DL: *DL, Ty&: *Val.getType(), ValueLLTs&: SplitTys,
223 FixedOffsets: Offsets->empty() ? Offsets : nullptr);
224
225 if (!isa<Constant>(Val)) {
226 for (auto Ty : SplitTys)
227 VRegs->push_back(Elt: MRI->createGenericVirtualRegister(Ty));
228 return *VRegs;
229 }
230
231 if (Val.getType()->isAggregateType()) {
232 // UndefValue, ConstantAggregateZero
233 auto &C = cast<Constant>(Val);
234 unsigned Idx = 0;
235 while (auto Elt = C.getAggregateElement(Elt: Idx++)) {
236 auto EltRegs = getOrCreateVRegs(Val: *Elt);
237 llvm::append_range(C&: *VRegs, R&: EltRegs);
238 }
239 } else {
240 assert(SplitTys.size() == 1 && "unexpectedly split LLT");
241 VRegs->push_back(Elt: MRI->createGenericVirtualRegister(Ty: SplitTys[0]));
242 bool Success = translate(C: cast<Constant>(Val), Reg: VRegs->front());
243 if (!Success) {
244 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
245 MF->getFunction().getSubprogram(),
246 &MF->getFunction().getEntryBlock());
247 R << "unable to translate constant: " << ore::NV("Type", Val.getType());
248 reportTranslationError(MF&: *MF, ORE&: *ORE, R);
249 return *VRegs;
250 }
251 }
252
253 return *VRegs;
254}
255
256int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
257 auto [MapEntry, Inserted] = FrameIndices.try_emplace(Key: &AI);
258 if (!Inserted)
259 return MapEntry->second;
260
261 TypeSize TySize = AI.getAllocationSize(DL: *DL).value_or(u: TypeSize::getZero());
262 uint64_t Size = TySize.getKnownMinValue();
263
264 // Always allocate at least one byte.
265 Size = std::max<uint64_t>(a: Size, b: 1u);
266
267 int &FI = MapEntry->second;
268 FI = MF->getFrameInfo().CreateStackObject(Size, Alignment: AI.getAlign(), isSpillSlot: false, Alloca: &AI);
269
270 // Scalable vectors and structures that contain scalable vectors may
271 // need a special StackID to distinguish them from other (fixed size)
272 // stack objects.
273 if (TySize.isScalable()) {
274 auto StackID =
275 MF->getSubtarget().getFrameLowering()->getStackIDForScalableVectors();
276 MF->getFrameInfo().setStackID(ObjectIdx: FI, ID: StackID);
277 }
278
279 return FI;
280}
281
282Align IRTranslator::getMemOpAlign(const Instruction &I) {
283 if (const StoreInst *SI = dyn_cast<StoreInst>(Val: &I))
284 return SI->getAlign();
285 if (const LoadInst *LI = dyn_cast<LoadInst>(Val: &I))
286 return LI->getAlign();
287 if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(Val: &I))
288 return AI->getAlign();
289 if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(Val: &I))
290 return AI->getAlign();
291
292 OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
293 R << "unable to translate memop: " << ore::NV("Opcode", &I);
294 reportTranslationError(MF&: *MF, ORE&: *ORE, R);
295 return Align(1);
296}
297
298MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
299 MachineBasicBlock *MBB = FuncInfo.getMBB(BB: &BB);
300 assert(MBB && "BasicBlock was not encountered before");
301 return *MBB;
302}
303
304void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
305 assert(NewPred && "new predecessor must be a real MachineBasicBlock");
306 MachinePreds[Edge].push_back(Elt: NewPred);
307}
308
309bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
310 MachineIRBuilder &MIRBuilder) {
311 if (!mayTranslateUserTypes(U))
312 return false;
313
314 // Get or create a virtual register for each value.
315 // Unless the value is a Constant => loadimm cst?
316 // or inline constant each time?
317 // Creation of a virtual register needs to have a size.
318 Register Op0 = getOrCreateVReg(Val: *U.getOperand(i: 0));
319 Register Op1 = getOrCreateVReg(Val: *U.getOperand(i: 1));
320 Register Res = getOrCreateVReg(Val: U);
321 uint32_t Flags = 0;
322 if (isa<Instruction>(Val: U)) {
323 const Instruction &I = cast<Instruction>(Val: U);
324 Flags = MachineInstr::copyFlagsFromInstruction(I);
325 }
326
327 MIRBuilder.buildInstr(Opc: Opcode, DstOps: {Res}, SrcOps: {Op0, Op1}, Flags);
328 return true;
329}
330
331bool IRTranslator::translateUnaryOp(unsigned Opcode, const User &U,
332 MachineIRBuilder &MIRBuilder) {
333 if (!mayTranslateUserTypes(U))
334 return false;
335
336 Register Op0 = getOrCreateVReg(Val: *U.getOperand(i: 0));
337 Register Res = getOrCreateVReg(Val: U);
338 uint32_t Flags = 0;
339 if (isa<Instruction>(Val: U)) {
340 const Instruction &I = cast<Instruction>(Val: U);
341 Flags = MachineInstr::copyFlagsFromInstruction(I);
342 }
343 MIRBuilder.buildInstr(Opc: Opcode, DstOps: {Res}, SrcOps: {Op0}, Flags);
344 return true;
345}
346
347bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) {
348 return translateUnaryOp(Opcode: TargetOpcode::G_FNEG, U, MIRBuilder);
349}
350
351bool IRTranslator::translateCompare(const User &U,
352 MachineIRBuilder &MIRBuilder) {
353 if (!mayTranslateUserTypes(U))
354 return false;
355
356 auto *CI = cast<CmpInst>(Val: &U);
357 Register Op0 = getOrCreateVReg(Val: *U.getOperand(i: 0));
358 Register Op1 = getOrCreateVReg(Val: *U.getOperand(i: 1));
359 Register Res = getOrCreateVReg(Val: U);
360 CmpInst::Predicate Pred = CI->getPredicate();
361 uint32_t Flags = MachineInstr::copyFlagsFromInstruction(I: *CI);
362 if (CmpInst::isIntPredicate(P: Pred))
363 MIRBuilder.buildICmp(Pred, Res, Op0, Op1, Flags);
364 else if (Pred == CmpInst::FCMP_FALSE)
365 MIRBuilder.buildCopy(
366 Res, Op: getOrCreateVReg(Val: *Constant::getNullValue(Ty: U.getType())));
367 else if (Pred == CmpInst::FCMP_TRUE)
368 MIRBuilder.buildCopy(
369 Res, Op: getOrCreateVReg(Val: *Constant::getAllOnesValue(Ty: U.getType())));
370 else
371 MIRBuilder.buildFCmp(Pred, Res, Op0, Op1, Flags);
372
373 return true;
374}
375
376bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
377 const ReturnInst &RI = cast<ReturnInst>(Val: U);
378 const Value *Ret = RI.getReturnValue();
379 if (Ret && DL->getTypeStoreSize(Ty: Ret->getType()).isZero())
380 Ret = nullptr;
381
382 ArrayRef<Register> VRegs;
383 if (Ret)
384 VRegs = getOrCreateVRegs(Val: *Ret);
385
386 Register SwiftErrorVReg = 0;
387 if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) {
388 SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt(
389 &RI, &MIRBuilder.getMBB(), SwiftError.getFunctionArg());
390 }
391
392 // The target may mess up with the insertion point, but
393 // this is not important as a return is the last instruction
394 // of the block anyway.
395 return CLI->lowerReturn(MIRBuilder, Val: Ret, VRegs, FLI&: FuncInfo, SwiftErrorVReg);
396}
397
398void IRTranslator::emitBranchForMergedCondition(
399 const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
400 MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB,
401 BranchProbability TProb, BranchProbability FProb, bool InvertCond) {
402 // If the leaf of the tree is a comparison, merge the condition into
403 // the caseblock.
404 if (const CmpInst *BOp = dyn_cast<CmpInst>(Val: Cond)) {
405 CmpInst::Predicate Condition;
406 if (const ICmpInst *IC = dyn_cast<ICmpInst>(Val: Cond)) {
407 Condition = InvertCond ? IC->getInversePredicate() : IC->getPredicate();
408 } else {
409 const FCmpInst *FC = cast<FCmpInst>(Val: Cond);
410 Condition = InvertCond ? FC->getInversePredicate() : FC->getPredicate();
411 }
412
413 SwitchCG::CaseBlock CB(Condition, false, BOp->getOperand(i_nocapture: 0),
414 BOp->getOperand(i_nocapture: 1), nullptr, TBB, FBB, CurBB,
415 CurBuilder->getDebugLoc(), TProb, FProb);
416 SL->SwitchCases.push_back(x: CB);
417 return;
418 }
419
420 // Create a CaseBlock record representing this branch.
421 CmpInst::Predicate Pred = InvertCond ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ;
422 SwitchCG::CaseBlock CB(
423 Pred, false, Cond, ConstantInt::getTrue(Context&: MF->getFunction().getContext()),
424 nullptr, TBB, FBB, CurBB, CurBuilder->getDebugLoc(), TProb, FProb);
425 SL->SwitchCases.push_back(x: CB);
426}
427
428static bool isValInBlock(const Value *V, const BasicBlock *BB) {
429 if (const Instruction *I = dyn_cast<Instruction>(Val: V))
430 return I->getParent() == BB;
431 return true;
432}
433
434void IRTranslator::findMergedConditions(
435 const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
436 MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB,
437 Instruction::BinaryOps Opc, BranchProbability TProb,
438 BranchProbability FProb, bool InvertCond) {
439 using namespace PatternMatch;
440 assert((Opc == Instruction::And || Opc == Instruction::Or) &&
441 "Expected Opc to be AND/OR");
442 // Skip over not part of the tree and remember to invert op and operands at
443 // next level.
444 Value *NotCond;
445 if (match(V: Cond, P: m_OneUse(SubPattern: m_Not(V: m_Value(V&: NotCond)))) &&
446 isValInBlock(V: NotCond, BB: CurBB->getBasicBlock())) {
447 findMergedConditions(Cond: NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
448 InvertCond: !InvertCond);
449 return;
450 }
451
452 const Instruction *BOp = dyn_cast<Instruction>(Val: Cond);
453 const Value *BOpOp0, *BOpOp1;
454 // Compute the effective opcode for Cond, taking into account whether it needs
455 // to be inverted, e.g.
456 // and (not (or A, B)), C
457 // gets lowered as
458 // and (and (not A, not B), C)
459 Instruction::BinaryOps BOpc = (Instruction::BinaryOps)0;
460 if (BOp) {
461 BOpc = match(V: BOp, P: m_LogicalAnd(L: m_Value(V&: BOpOp0), R: m_Value(V&: BOpOp1)))
462 ? Instruction::And
463 : (match(V: BOp, P: m_LogicalOr(L: m_Value(V&: BOpOp0), R: m_Value(V&: BOpOp1)))
464 ? Instruction::Or
465 : (Instruction::BinaryOps)0);
466 if (InvertCond) {
467 if (BOpc == Instruction::And)
468 BOpc = Instruction::Or;
469 else if (BOpc == Instruction::Or)
470 BOpc = Instruction::And;
471 }
472 }
473
474 // If this node is not part of the or/and tree, emit it as a branch.
475 // Note that all nodes in the tree should have same opcode.
476 bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->hasOneUse();
477 if (!BOpIsInOrAndTree || BOp->getParent() != CurBB->getBasicBlock() ||
478 !isValInBlock(V: BOpOp0, BB: CurBB->getBasicBlock()) ||
479 !isValInBlock(V: BOpOp1, BB: CurBB->getBasicBlock())) {
480 emitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB, TProb, FProb,
481 InvertCond);
482 return;
483 }
484
485 // Create TmpBB after CurBB.
486 MachineFunction::iterator BBI(CurBB);
487 MachineBasicBlock *TmpBB =
488 MF->CreateMachineBasicBlock(BB: CurBB->getBasicBlock());
489 CurBB->getParent()->insert(MBBI: ++BBI, MBB: TmpBB);
490
491 if (Opc == Instruction::Or) {
492 // Codegen X | Y as:
493 // BB1:
494 // jmp_if_X TBB
495 // jmp TmpBB
496 // TmpBB:
497 // jmp_if_Y TBB
498 // jmp FBB
499 //
500
501 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
502 // The requirement is that
503 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
504 // = TrueProb for original BB.
505 // Assuming the original probabilities are A and B, one choice is to set
506 // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
507 // A/(1+B) and 2B/(1+B). This choice assumes that
508 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
509 // Another choice is to assume TrueProb for BB1 equals to TrueProb for
510 // TmpBB, but the math is more complicated.
511
512 auto NewTrueProb = TProb / 2;
513 auto NewFalseProb = TProb / 2 + FProb;
514 // Emit the LHS condition.
515 findMergedConditions(Cond: BOpOp0, TBB, FBB: TmpBB, CurBB, SwitchBB, Opc, TProb: NewTrueProb,
516 FProb: NewFalseProb, InvertCond);
517
518 // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
519 SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
520 BranchProbability::normalizeProbabilities(Begin: Probs.begin(), End: Probs.end());
521 // Emit the RHS condition into TmpBB.
522 findMergedConditions(Cond: BOpOp1, TBB, FBB, CurBB: TmpBB, SwitchBB, Opc, TProb: Probs[0],
523 FProb: Probs[1], InvertCond);
524 } else {
525 assert(Opc == Instruction::And && "Unknown merge op!");
526 // Codegen X & Y as:
527 // BB1:
528 // jmp_if_X TmpBB
529 // jmp FBB
530 // TmpBB:
531 // jmp_if_Y TBB
532 // jmp FBB
533 //
534 // This requires creation of TmpBB after CurBB.
535
536 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
537 // The requirement is that
538 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
539 // = FalseProb for original BB.
540 // Assuming the original probabilities are A and B, one choice is to set
541 // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
542 // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
543 // TrueProb for BB1 * FalseProb for TmpBB.
544
545 auto NewTrueProb = TProb + FProb / 2;
546 auto NewFalseProb = FProb / 2;
547 // Emit the LHS condition.
548 findMergedConditions(Cond: BOpOp0, TBB: TmpBB, FBB, CurBB, SwitchBB, Opc, TProb: NewTrueProb,
549 FProb: NewFalseProb, InvertCond);
550
551 // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
552 SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
553 BranchProbability::normalizeProbabilities(Begin: Probs.begin(), End: Probs.end());
554 // Emit the RHS condition into TmpBB.
555 findMergedConditions(Cond: BOpOp1, TBB, FBB, CurBB: TmpBB, SwitchBB, Opc, TProb: Probs[0],
556 FProb: Probs[1], InvertCond);
557 }
558}
559
560bool IRTranslator::shouldEmitAsBranches(
561 const std::vector<SwitchCG::CaseBlock> &Cases) {
562 // For multiple cases, it's better to emit as branches.
563 if (Cases.size() != 2)
564 return true;
565
566 // If this is two comparisons of the same values or'd or and'd together, they
567 // will get folded into a single comparison, so don't emit two blocks.
568 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
569 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
570 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
571 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
572 return false;
573 }
574
575 // Handle: (X != null) | (Y != null) --> (X|Y) != 0
576 // Handle: (X == null) & (Y == null) --> (X|Y) == 0
577 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
578 Cases[0].PredInfo.Pred == Cases[1].PredInfo.Pred &&
579 isa<Constant>(Val: Cases[0].CmpRHS) &&
580 cast<Constant>(Val: Cases[0].CmpRHS)->isNullValue()) {
581 if (Cases[0].PredInfo.Pred == CmpInst::ICMP_EQ &&
582 Cases[0].TrueBB == Cases[1].ThisBB)
583 return false;
584 if (Cases[0].PredInfo.Pred == CmpInst::ICMP_NE &&
585 Cases[0].FalseBB == Cases[1].ThisBB)
586 return false;
587 }
588
589 return true;
590}
591
592bool IRTranslator::translateUncondBr(const User &U,
593 MachineIRBuilder &MIRBuilder) {
594 const UncondBrInst &BrInst = cast<UncondBrInst>(Val: U);
595 auto &CurMBB = MIRBuilder.getMBB();
596 auto *Succ0MBB = &getMBB(BB: *BrInst.getSuccessor(i: 0));
597
598 // If the unconditional target is the layout successor, fallthrough.
599 if (OptLevel == CodeGenOptLevel::None || !CurMBB.isLayoutSuccessor(MBB: Succ0MBB))
600 MIRBuilder.buildBr(Dest&: *Succ0MBB);
601
602 // Link successors.
603 for (const BasicBlock *Succ : successors(I: &BrInst))
604 CurMBB.addSuccessor(Succ: &getMBB(BB: *Succ));
605 return true;
606}
607
608bool IRTranslator::translateCondBr(const User &U,
609 MachineIRBuilder &MIRBuilder) {
610 const CondBrInst &BrInst = cast<CondBrInst>(Val: U);
611 auto &CurMBB = MIRBuilder.getMBB();
612 auto *Succ0MBB = &getMBB(BB: *BrInst.getSuccessor(i: 0));
613
614 // If this condition is one of the special cases we handle, do special stuff
615 // now.
616 const Value *CondVal = BrInst.getCondition();
617 MachineBasicBlock *Succ1MBB = &getMBB(BB: *BrInst.getSuccessor(i: 1));
618
619 // If this is a series of conditions that are or'd or and'd together, emit
620 // this as a sequence of branches instead of setcc's with and/or operations.
621 // As long as jumps are not expensive (exceptions for multi-use logic ops,
622 // unpredictable branches, and vector extracts because those jumps are likely
623 // expensive for any target), this should improve performance.
624 // For example, instead of something like:
625 // cmp A, B
626 // C = seteq
627 // cmp D, E
628 // F = setle
629 // or C, F
630 // jnz foo
631 // Emit:
632 // cmp A, B
633 // je foo
634 // cmp D, E
635 // jle foo
636 using namespace PatternMatch;
637 const Instruction *CondI = dyn_cast<Instruction>(Val: CondVal);
638 if (!TLI->isJumpExpensive() && CondI && CondI->hasOneUse() &&
639 !BrInst.hasMetadata(KindID: LLVMContext::MD_unpredictable)) {
640 Instruction::BinaryOps Opcode = (Instruction::BinaryOps)0;
641 Value *Vec;
642 const Value *BOp0, *BOp1;
643 if (match(V: CondI, P: m_LogicalAnd(L: m_Value(V&: BOp0), R: m_Value(V&: BOp1))))
644 Opcode = Instruction::And;
645 else if (match(V: CondI, P: m_LogicalOr(L: m_Value(V&: BOp0), R: m_Value(V&: BOp1))))
646 Opcode = Instruction::Or;
647
648 if (Opcode && !(match(V: BOp0, P: m_ExtractElt(Val: m_Value(V&: Vec), Idx: m_Value())) &&
649 match(V: BOp1, P: m_ExtractElt(Val: m_Specific(V: Vec), Idx: m_Value())))) {
650 findMergedConditions(Cond: CondI, TBB: Succ0MBB, FBB: Succ1MBB, CurBB: &CurMBB, SwitchBB: &CurMBB, Opc: Opcode,
651 TProb: getEdgeProbability(Src: &CurMBB, Dst: Succ0MBB),
652 FProb: getEdgeProbability(Src: &CurMBB, Dst: Succ1MBB),
653 /*InvertCond=*/false);
654 assert(SL->SwitchCases[0].ThisBB == &CurMBB && "Unexpected lowering!");
655
656 // Allow some cases to be rejected.
657 if (shouldEmitAsBranches(Cases: SL->SwitchCases)) {
658 // Emit the branch for this block.
659 emitSwitchCase(CB&: SL->SwitchCases[0], SwitchBB: &CurMBB, MIB&: *CurBuilder);
660 SL->SwitchCases.erase(position: SL->SwitchCases.begin());
661 return true;
662 }
663
664 // Okay, we decided not to do this, remove any inserted MBB's and clear
665 // SwitchCases.
666 for (unsigned I = 1, E = SL->SwitchCases.size(); I != E; ++I)
667 MF->erase(MBBI: SL->SwitchCases[I].ThisBB);
668
669 SL->SwitchCases.clear();
670 }
671 }
672
673 // Create a CaseBlock record representing this branch.
674 SwitchCG::CaseBlock CB(CmpInst::ICMP_EQ, false, CondVal,
675 ConstantInt::getTrue(Context&: MF->getFunction().getContext()),
676 nullptr, Succ0MBB, Succ1MBB, &CurMBB,
677 CurBuilder->getDebugLoc());
678
679 // Use emitSwitchCase to actually insert the fast branch sequence for this
680 // cond branch.
681 emitSwitchCase(CB, SwitchBB: &CurMBB, MIB&: *CurBuilder);
682 return true;
683}
684
685void IRTranslator::addSuccessorWithProb(MachineBasicBlock *Src,
686 MachineBasicBlock *Dst,
687 BranchProbability Prob) {
688 if (!FuncInfo.BPI) {
689 Src->addSuccessorWithoutProb(Succ: Dst);
690 return;
691 }
692 if (Prob.isUnknown())
693 Prob = getEdgeProbability(Src, Dst);
694 Src->addSuccessor(Succ: Dst, Prob);
695}
696
697BranchProbability
698IRTranslator::getEdgeProbability(const MachineBasicBlock *Src,
699 const MachineBasicBlock *Dst) const {
700 const BasicBlock *SrcBB = Src->getBasicBlock();
701 const BasicBlock *DstBB = Dst->getBasicBlock();
702 if (!FuncInfo.BPI) {
703 // If BPI is not available, set the default probability as 1 / N, where N is
704 // the number of successors.
705 auto SuccSize = std::max<uint32_t>(a: succ_size(BB: SrcBB), b: 1);
706 return BranchProbability(1, SuccSize);
707 }
708 return FuncInfo.BPI->getEdgeProbability(Src: SrcBB, Dst: DstBB);
709}
710
711bool IRTranslator::translateSwitch(const User &U, MachineIRBuilder &MIB) {
712 using namespace SwitchCG;
713 // Extract cases from the switch.
714 const SwitchInst &SI = cast<SwitchInst>(Val: U);
715 BranchProbabilityInfo *BPI = FuncInfo.BPI;
716 CaseClusterVector Clusters;
717 Clusters.reserve(n: SI.getNumCases());
718 for (const auto &I : SI.cases()) {
719 MachineBasicBlock *Succ = &getMBB(BB: *I.getCaseSuccessor());
720 assert(Succ && "Could not find successor mbb in mapping");
721 const ConstantInt *CaseVal = I.getCaseValue();
722 BranchProbability Prob =
723 BPI ? BPI->getEdgeProbability(Src: SI.getParent(), IndexInSuccessors: I.getSuccessorIndex())
724 : BranchProbability(1, SI.getNumCases() + 1);
725 Clusters.push_back(x: CaseCluster::range(Low: CaseVal, High: CaseVal, MBB: Succ, Prob));
726 }
727
728 MachineBasicBlock *DefaultMBB = &getMBB(BB: *SI.getDefaultDest());
729
730 // Cluster adjacent cases with the same destination. We do this at all
731 // optimization levels because it's cheap to do and will make codegen faster
732 // if there are many clusters.
733 sortAndRangeify(Clusters);
734
735 MachineBasicBlock *SwitchMBB = &getMBB(BB: *SI.getParent());
736
737 // If there is only the default destination, jump there directly.
738 if (Clusters.empty()) {
739 SwitchMBB->addSuccessor(Succ: DefaultMBB);
740 if (DefaultMBB != SwitchMBB->getNextNode())
741 MIB.buildBr(Dest&: *DefaultMBB);
742 return true;
743 }
744
745 SL->findJumpTables(Clusters, SI: &SI, SL: std::nullopt, DefaultMBB, PSI: nullptr, BFI: nullptr);
746 SL->findBitTestClusters(Clusters, SI: &SI);
747
748 LLVM_DEBUG({
749 dbgs() << "Case clusters: ";
750 for (const CaseCluster &C : Clusters) {
751 if (C.Kind == CC_JumpTable)
752 dbgs() << "JT:";
753 if (C.Kind == CC_BitTests)
754 dbgs() << "BT:";
755
756 C.Low->getValue().print(dbgs(), true);
757 if (C.Low != C.High) {
758 dbgs() << '-';
759 C.High->getValue().print(dbgs(), true);
760 }
761 dbgs() << ' ';
762 }
763 dbgs() << '\n';
764 });
765
766 assert(!Clusters.empty());
767 SwitchWorkList WorkList;
768 CaseClusterIt First = Clusters.begin();
769 CaseClusterIt Last = Clusters.end() - 1;
770 auto DefaultProb = getEdgeProbability(Src: SwitchMBB, Dst: DefaultMBB);
771 WorkList.push_back(Elt: {.MBB: SwitchMBB, .FirstCluster: First, .LastCluster: Last, .GE: nullptr, .LT: nullptr, .DefaultProb: DefaultProb});
772
773 while (!WorkList.empty()) {
774 SwitchWorkListItem W = WorkList.pop_back_val();
775
776 unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;
777 // For optimized builds, lower large range as a balanced binary tree.
778 if (NumClusters > 3 &&
779 MF->getTarget().getOptLevel() != CodeGenOptLevel::None &&
780 !DefaultMBB->getParent()->getFunction().hasMinSize()) {
781 splitWorkItem(WorkList, W, Cond: SI.getCondition(), SwitchMBB, MIB);
782 continue;
783 }
784
785 if (!lowerSwitchWorkItem(W, Cond: SI.getCondition(), SwitchMBB, DefaultMBB, MIB))
786 return false;
787 }
788 return true;
789}
790
791void IRTranslator::splitWorkItem(SwitchCG::SwitchWorkList &WorkList,
792 const SwitchCG::SwitchWorkListItem &W,
793 Value *Cond, MachineBasicBlock *SwitchMBB,
794 MachineIRBuilder &MIB) {
795 using namespace SwitchCG;
796 assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) &&
797 "Clusters not sorted?");
798 assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!");
799
800 auto [LastLeft, FirstRight, LeftProb, RightProb] =
801 SL->computeSplitWorkItemInfo(W);
802
803 // Use the first element on the right as pivot since we will make less-than
804 // comparisons against it.
805 CaseClusterIt PivotCluster = FirstRight;
806 assert(PivotCluster > W.FirstCluster);
807 assert(PivotCluster <= W.LastCluster);
808
809 CaseClusterIt FirstLeft = W.FirstCluster;
810 CaseClusterIt LastRight = W.LastCluster;
811
812 const ConstantInt *Pivot = PivotCluster->Low;
813
814 // New blocks will be inserted immediately after the current one.
815 MachineFunction::iterator BBI(W.MBB);
816 ++BBI;
817
818 // We will branch to the LHS if Value < Pivot. If LHS is a single cluster,
819 // we can branch to its destination directly if it's squeezed exactly in
820 // between the known lower bound and Pivot - 1.
821 MachineBasicBlock *LeftMBB;
822 if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
823 FirstLeft->Low == W.GE &&
824 (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) {
825 LeftMBB = FirstLeft->MBB;
826 } else {
827 LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(BB: W.MBB->getBasicBlock());
828 FuncInfo.MF->insert(MBBI: BBI, MBB: LeftMBB);
829 WorkList.push_back(
830 Elt: {.MBB: LeftMBB, .FirstCluster: FirstLeft, .LastCluster: LastLeft, .GE: W.GE, .LT: Pivot, .DefaultProb: W.DefaultProb / 2});
831 }
832
833 // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a
834 // single cluster, RHS.Low == Pivot, and we can branch to its destination
835 // directly if RHS.High equals the current upper bound.
836 MachineBasicBlock *RightMBB;
837 if (FirstRight == LastRight && FirstRight->Kind == CC_Range && W.LT &&
838 (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) {
839 RightMBB = FirstRight->MBB;
840 } else {
841 RightMBB = FuncInfo.MF->CreateMachineBasicBlock(BB: W.MBB->getBasicBlock());
842 FuncInfo.MF->insert(MBBI: BBI, MBB: RightMBB);
843 WorkList.push_back(
844 Elt: {.MBB: RightMBB, .FirstCluster: FirstRight, .LastCluster: LastRight, .GE: Pivot, .LT: W.LT, .DefaultProb: W.DefaultProb / 2});
845 }
846
847 // Create the CaseBlock record that will be used to lower the branch.
848 CaseBlock CB(ICmpInst::Predicate::ICMP_SLT, false, Cond, Pivot, nullptr,
849 LeftMBB, RightMBB, W.MBB, MIB.getDebugLoc(), LeftProb,
850 RightProb);
851
852 if (W.MBB == SwitchMBB)
853 emitSwitchCase(CB, SwitchBB: SwitchMBB, MIB);
854 else
855 SL->SwitchCases.push_back(x: CB);
856}
857
858void IRTranslator::emitJumpTable(SwitchCG::JumpTable &JT,
859 MachineBasicBlock *MBB) {
860 // Emit the code for the jump table
861 assert(JT.Reg && "Should lower JT Header first!");
862 MachineIRBuilder MIB(*MBB->getParent());
863 MIB.setMBB(*MBB);
864 MIB.setDebugLoc(CurBuilder->getDebugLoc());
865
866 Type *PtrIRTy = PointerType::getUnqual(C&: MF->getFunction().getContext());
867 const LLT PtrTy = getLLTForType(Ty&: *PtrIRTy, DL: *DL);
868
869 auto Table = MIB.buildJumpTable(PtrTy, JTI: JT.JTI);
870 MIB.buildBrJT(TablePtr: Table.getReg(Idx: 0), JTI: JT.JTI, IndexReg: JT.Reg);
871}
872
873bool IRTranslator::emitJumpTableHeader(SwitchCG::JumpTable &JT,
874 SwitchCG::JumpTableHeader &JTH,
875 MachineBasicBlock *HeaderBB) {
876 MachineIRBuilder MIB(*HeaderBB->getParent());
877 MIB.setMBB(*HeaderBB);
878 MIB.setDebugLoc(CurBuilder->getDebugLoc());
879
880 const Value &SValue = *JTH.SValue;
881 // Subtract the lowest switch case value from the value being switched on.
882 const LLT SwitchTy = getLLTForType(Ty&: *SValue.getType(), DL: *DL);
883 Register SwitchOpReg = getOrCreateVReg(Val: SValue);
884 auto FirstCst = MIB.buildConstant(Res: SwitchTy, Val: JTH.First);
885 auto Sub = MIB.buildSub(Dst: {SwitchTy}, Src0: SwitchOpReg, Src1: FirstCst);
886
887 // This value may be smaller or larger than the target's pointer type, and
888 // therefore require extension or truncating.
889 auto *PtrIRTy = PointerType::getUnqual(C&: SValue.getContext());
890 const LLT PtrScalarTy = LLT::scalar(SizeInBits: DL->getTypeSizeInBits(Ty: PtrIRTy));
891 Sub = MIB.buildZExtOrTrunc(Res: PtrScalarTy, Op: Sub);
892
893 JT.Reg = Sub.getReg(Idx: 0);
894
895 if (JTH.FallthroughUnreachable) {
896 if (JT.MBB != HeaderBB->getNextNode())
897 MIB.buildBr(Dest&: *JT.MBB);
898 return true;
899 }
900
901 // Emit the range check for the jump table, and branch to the default block
902 // for the switch statement if the value being switched on exceeds the
903 // largest case in the switch.
904 auto Cst = getOrCreateVReg(
905 Val: *ConstantInt::get(Ty: SValue.getType(), V: JTH.Last - JTH.First));
906 Cst = MIB.buildZExtOrTrunc(Res: PtrScalarTy, Op: Cst).getReg(Idx: 0);
907 auto Cmp = MIB.buildICmp(Pred: CmpInst::ICMP_UGT, Res: LLT::integer(SizeInBits: 1), Op0: Sub, Op1: Cst);
908
909 auto BrCond = MIB.buildBrCond(Tst: Cmp.getReg(Idx: 0), Dest&: *JT.Default);
910
911 // Avoid emitting unnecessary branches to the next block.
912 if (JT.MBB != HeaderBB->getNextNode())
913 BrCond = MIB.buildBr(Dest&: *JT.MBB);
914 return true;
915}
916
917void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB,
918 MachineBasicBlock *SwitchBB,
919 MachineIRBuilder &MIB) {
920 Register CondLHS = getOrCreateVReg(Val: *CB.CmpLHS);
921 Register Cond;
922 DebugLoc OldDbgLoc = MIB.getDebugLoc();
923 MIB.setDebugLoc(CB.DbgLoc);
924 MIB.setMBB(*CB.ThisBB);
925
926 if (CB.PredInfo.NoCmp) {
927 // Branch or fall through to TrueBB.
928 addSuccessorWithProb(Src: CB.ThisBB, Dst: CB.TrueBB, Prob: CB.TrueProb);
929 addMachineCFGPred(Edge: {SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
930 NewPred: CB.ThisBB);
931 CB.ThisBB->normalizeSuccProbs();
932 if (CB.TrueBB != CB.ThisBB->getNextNode())
933 MIB.buildBr(Dest&: *CB.TrueBB);
934 MIB.setDebugLoc(OldDbgLoc);
935 return;
936 }
937
938 const LLT i1Ty = LLT::integer(SizeInBits: 1);
939 // Build the compare.
940 if (!CB.CmpMHS) {
941 const auto *CI = dyn_cast<ConstantInt>(Val: CB.CmpRHS);
942 // For conditional branch lowering, we might try to do something silly like
943 // emit an G_ICMP to compare an existing G_ICMP i1 result with true. If so,
944 // just re-use the existing condition vreg.
945 if (MRI->getType(Reg: CondLHS).getSizeInBits() == 1 && CI && CI->isOne() &&
946 CB.PredInfo.Pred == CmpInst::ICMP_EQ) {
947 Cond = CondLHS;
948 } else {
949 Register CondRHS = getOrCreateVReg(Val: *CB.CmpRHS);
950 if (CmpInst::isFPPredicate(P: CB.PredInfo.Pred))
951 Cond =
952 MIB.buildFCmp(Pred: CB.PredInfo.Pred, Res: i1Ty, Op0: CondLHS, Op1: CondRHS).getReg(Idx: 0);
953 else
954 Cond =
955 MIB.buildICmp(Pred: CB.PredInfo.Pred, Res: i1Ty, Op0: CondLHS, Op1: CondRHS).getReg(Idx: 0);
956 }
957 } else {
958 assert(CB.PredInfo.Pred == CmpInst::ICMP_SLE &&
959 "Can only handle SLE ranges");
960
961 const APInt& Low = cast<ConstantInt>(Val: CB.CmpLHS)->getValue();
962 const APInt& High = cast<ConstantInt>(Val: CB.CmpRHS)->getValue();
963
964 Register CmpOpReg = getOrCreateVReg(Val: *CB.CmpMHS);
965 if (cast<ConstantInt>(Val: CB.CmpLHS)->isMinValue(IsSigned: true)) {
966 Register CondRHS = getOrCreateVReg(Val: *CB.CmpRHS);
967 Cond =
968 MIB.buildICmp(Pred: CmpInst::ICMP_SLE, Res: i1Ty, Op0: CmpOpReg, Op1: CondRHS).getReg(Idx: 0);
969 } else {
970 const LLT CmpTy = MRI->getType(Reg: CmpOpReg);
971 auto Sub = MIB.buildSub(Dst: {CmpTy}, Src0: CmpOpReg, Src1: CondLHS);
972 auto Diff = MIB.buildConstant(Res: CmpTy, Val: High - Low);
973 Cond = MIB.buildICmp(Pred: CmpInst::ICMP_ULE, Res: i1Ty, Op0: Sub, Op1: Diff).getReg(Idx: 0);
974 }
975 }
976
977 // Update successor info
978 addSuccessorWithProb(Src: CB.ThisBB, Dst: CB.TrueBB, Prob: CB.TrueProb);
979
980 addMachineCFGPred(Edge: {SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
981 NewPred: CB.ThisBB);
982
983 // TrueBB and FalseBB are always different unless the incoming IR is
984 // degenerate. This only happens when running llc on weird IR.
985 if (CB.TrueBB != CB.FalseBB)
986 addSuccessorWithProb(Src: CB.ThisBB, Dst: CB.FalseBB, Prob: CB.FalseProb);
987 CB.ThisBB->normalizeSuccProbs();
988
989 addMachineCFGPred(Edge: {SwitchBB->getBasicBlock(), CB.FalseBB->getBasicBlock()},
990 NewPred: CB.ThisBB);
991
992 MIB.buildBrCond(Tst: Cond, Dest&: *CB.TrueBB);
993 MIB.buildBr(Dest&: *CB.FalseBB);
994 MIB.setDebugLoc(OldDbgLoc);
995}
996
997bool IRTranslator::lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W,
998 MachineBasicBlock *SwitchMBB,
999 MachineBasicBlock *CurMBB,
1000 MachineBasicBlock *DefaultMBB,
1001 MachineIRBuilder &MIB,
1002 MachineFunction::iterator BBI,
1003 BranchProbability UnhandledProbs,
1004 SwitchCG::CaseClusterIt I,
1005 MachineBasicBlock *Fallthrough,
1006 bool FallthroughUnreachable) {
1007 using namespace SwitchCG;
1008 MachineFunction *CurMF = SwitchMBB->getParent();
1009 // FIXME: Optimize away range check based on pivot comparisons.
1010 JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;
1011 SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;
1012 BranchProbability DefaultProb = W.DefaultProb;
1013
1014 // The jump block hasn't been inserted yet; insert it here.
1015 MachineBasicBlock *JumpMBB = JT->MBB;
1016 CurMF->insert(MBBI: BBI, MBB: JumpMBB);
1017
1018 // Since the jump table block is separate from the switch block, we need
1019 // to keep track of it as a machine predecessor to the default block,
1020 // otherwise we lose the phi edges.
1021 addMachineCFGPred(Edge: {SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
1022 NewPred: CurMBB);
1023 addMachineCFGPred(Edge: {SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
1024 NewPred: JumpMBB);
1025
1026 auto JumpProb = I->Prob;
1027 auto FallthroughProb = UnhandledProbs;
1028
1029 // If the default statement is a target of the jump table, we evenly
1030 // distribute the default probability to successors of CurMBB. Also
1031 // update the probability on the edge from JumpMBB to Fallthrough.
1032 for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
1033 SE = JumpMBB->succ_end();
1034 SI != SE; ++SI) {
1035 if (*SI == DefaultMBB) {
1036 JumpProb += DefaultProb / 2;
1037 FallthroughProb -= DefaultProb / 2;
1038 JumpMBB->setSuccProbability(I: SI, Prob: DefaultProb / 2);
1039 JumpMBB->normalizeSuccProbs();
1040 } else {
1041 // Also record edges from the jump table block to it's successors.
1042 addMachineCFGPred(Edge: {SwitchMBB->getBasicBlock(), (*SI)->getBasicBlock()},
1043 NewPred: JumpMBB);
1044 }
1045 }
1046
1047 if (FallthroughUnreachable)
1048 JTH->FallthroughUnreachable = true;
1049
1050 if (!JTH->FallthroughUnreachable)
1051 addSuccessorWithProb(Src: CurMBB, Dst: Fallthrough, Prob: FallthroughProb);
1052 addSuccessorWithProb(Src: CurMBB, Dst: JumpMBB, Prob: JumpProb);
1053 CurMBB->normalizeSuccProbs();
1054
1055 // The jump table header will be inserted in our current block, do the
1056 // range check, and fall through to our fallthrough block.
1057 JTH->HeaderBB = CurMBB;
1058 JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
1059
1060 // If we're in the right place, emit the jump table header right now.
1061 if (CurMBB == SwitchMBB) {
1062 if (!emitJumpTableHeader(JT&: *JT, JTH&: *JTH, HeaderBB: CurMBB))
1063 return false;
1064 JTH->Emitted = true;
1065 }
1066 return true;
1067}
1068bool IRTranslator::lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I,
1069 Value *Cond,
1070 MachineBasicBlock *Fallthrough,
1071 bool FallthroughUnreachable,
1072 BranchProbability UnhandledProbs,
1073 MachineBasicBlock *CurMBB,
1074 MachineIRBuilder &MIB,
1075 MachineBasicBlock *SwitchMBB) {
1076 using namespace SwitchCG;
1077 const Value *RHS, *LHS, *MHS;
1078 CmpInst::Predicate Pred;
1079 if (I->Low == I->High) {
1080 // Check Cond == I->Low.
1081 Pred = CmpInst::ICMP_EQ;
1082 LHS = Cond;
1083 RHS = I->Low;
1084 MHS = nullptr;
1085 } else {
1086 // Check I->Low <= Cond <= I->High.
1087 Pred = CmpInst::ICMP_SLE;
1088 LHS = I->Low;
1089 MHS = Cond;
1090 RHS = I->High;
1091 }
1092
1093 // If Fallthrough is unreachable, fold away the comparison.
1094 // The false probability is the sum of all unhandled cases.
1095 CaseBlock CB(Pred, FallthroughUnreachable, LHS, RHS, MHS, I->MBB, Fallthrough,
1096 CurMBB, MIB.getDebugLoc(), I->Prob, UnhandledProbs);
1097
1098 emitSwitchCase(CB, SwitchBB: SwitchMBB, MIB);
1099 return true;
1100}
1101
1102void IRTranslator::emitBitTestHeader(SwitchCG::BitTestBlock &B,
1103 MachineBasicBlock *SwitchBB) {
1104 MachineIRBuilder &MIB = *CurBuilder;
1105 MIB.setMBB(*SwitchBB);
1106
1107 // Subtract the minimum value.
1108 Register SwitchOpReg = getOrCreateVReg(Val: *B.SValue);
1109
1110 LLT SwitchOpTy = MRI->getType(Reg: SwitchOpReg);
1111 Register MinValReg = MIB.buildConstant(Res: SwitchOpTy, Val: B.First).getReg(Idx: 0);
1112 auto RangeSub = MIB.buildSub(Dst: SwitchOpTy, Src0: SwitchOpReg, Src1: MinValReg);
1113
1114 Type *PtrIRTy = PointerType::getUnqual(C&: MF->getFunction().getContext());
1115 const LLT PtrTy = getLLTForType(Ty&: *PtrIRTy, DL: *DL);
1116
1117 LLT MaskTy = SwitchOpTy;
1118 if (MaskTy.getSizeInBits() > PtrTy.getSizeInBits() ||
1119 !llvm::has_single_bit<uint32_t>(Value: MaskTy.getSizeInBits()))
1120 MaskTy = LLT::scalar(SizeInBits: PtrTy.getSizeInBits());
1121 else {
1122 // Ensure that the type will fit the mask value.
1123 for (const SwitchCG::BitTestCase &Case : B.Cases) {
1124 if (!isUIntN(N: SwitchOpTy.getSizeInBits(), x: Case.Mask)) {
1125 // Switch table case range are encoded into series of masks.
1126 // Just use pointer type, it's guaranteed to fit.
1127 MaskTy = LLT::scalar(SizeInBits: PtrTy.getSizeInBits());
1128 break;
1129 }
1130 }
1131 }
1132 Register SubReg = RangeSub.getReg(Idx: 0);
1133 if (SwitchOpTy != MaskTy)
1134 SubReg = MIB.buildZExtOrTrunc(Res: MaskTy, Op: SubReg).getReg(Idx: 0);
1135
1136 B.RegVT = getMVTForLLT(Ty: MaskTy);
1137 B.Reg = SubReg;
1138
1139 MachineBasicBlock *MBB = B.Cases[0].ThisBB;
1140
1141 if (!B.FallthroughUnreachable)
1142 addSuccessorWithProb(Src: SwitchBB, Dst: B.Default, Prob: B.DefaultProb);
1143 addSuccessorWithProb(Src: SwitchBB, Dst: MBB, Prob: B.Prob);
1144
1145 SwitchBB->normalizeSuccProbs();
1146
1147 if (!B.FallthroughUnreachable) {
1148 // Conditional branch to the default block.
1149 auto RangeCst = MIB.buildConstant(Res: SwitchOpTy, Val: B.Range);
1150 auto RangeCmp = MIB.buildICmp(Pred: CmpInst::Predicate::ICMP_UGT, Res: LLT::integer(SizeInBits: 1),
1151 Op0: RangeSub, Op1: RangeCst);
1152 MIB.buildBrCond(Tst: RangeCmp, Dest&: *B.Default);
1153 }
1154
1155 // Avoid emitting unnecessary branches to the next block.
1156 if (MBB != SwitchBB->getNextNode())
1157 MIB.buildBr(Dest&: *MBB);
1158}
1159
1160void IRTranslator::emitBitTestCase(SwitchCG::BitTestBlock &BB,
1161 MachineBasicBlock *NextMBB,
1162 BranchProbability BranchProbToNext,
1163 Register Reg, SwitchCG::BitTestCase &B,
1164 MachineBasicBlock *SwitchBB) {
1165 MachineIRBuilder &MIB = *CurBuilder;
1166 MIB.setMBB(*SwitchBB);
1167
1168 LLT SwitchTy = getLLTForMVT(Ty: BB.RegVT);
1169 Register Cmp;
1170 unsigned PopCount = llvm::popcount(Value: B.Mask);
1171 if (PopCount == 1) {
1172 // Testing for a single bit; just compare the shift count with what it
1173 // would need to be to shift a 1 bit in that position.
1174 auto MaskTrailingZeros =
1175 MIB.buildConstant(Res: SwitchTy, Val: llvm::countr_zero(Val: B.Mask));
1176 Cmp = MIB.buildICmp(Pred: ICmpInst::ICMP_EQ, Res: LLT::integer(SizeInBits: 1), Op0: Reg,
1177 Op1: MaskTrailingZeros)
1178 .getReg(Idx: 0);
1179 } else if (PopCount == BB.Range) {
1180 // There is only one zero bit in the range, test for it directly.
1181 auto MaskTrailingOnes =
1182 MIB.buildConstant(Res: SwitchTy, Val: llvm::countr_one(Value: B.Mask));
1183 Cmp =
1184 MIB.buildICmp(Pred: CmpInst::ICMP_NE, Res: LLT::integer(SizeInBits: 1), Op0: Reg, Op1: MaskTrailingOnes)
1185 .getReg(Idx: 0);
1186 } else {
1187 // Make desired shift.
1188 auto CstOne = MIB.buildConstant(Res: SwitchTy, Val: 1);
1189 auto SwitchVal = MIB.buildShl(Dst: SwitchTy, Src0: CstOne, Src1: Reg);
1190
1191 // Emit bit tests and jumps.
1192 auto CstMask = MIB.buildConstant(Res: SwitchTy, Val: B.Mask);
1193 auto AndOp = MIB.buildAnd(Dst: SwitchTy, Src0: SwitchVal, Src1: CstMask);
1194 auto CstZero = MIB.buildConstant(Res: SwitchTy, Val: 0);
1195 Cmp = MIB.buildICmp(Pred: CmpInst::ICMP_NE, Res: LLT::integer(SizeInBits: 1), Op0: AndOp, Op1: CstZero)
1196 .getReg(Idx: 0);
1197 }
1198
1199 // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
1200 addSuccessorWithProb(Src: SwitchBB, Dst: B.TargetBB, Prob: B.ExtraProb);
1201 // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
1202 addSuccessorWithProb(Src: SwitchBB, Dst: NextMBB, Prob: BranchProbToNext);
1203 // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
1204 // one as they are relative probabilities (and thus work more like weights),
1205 // and hence we need to normalize them to let the sum of them become one.
1206 SwitchBB->normalizeSuccProbs();
1207
1208 // Record the fact that the IR edge from the header to the bit test target
1209 // will go through our new block. Neeeded for PHIs to have nodes added.
1210 addMachineCFGPred(Edge: {BB.Parent->getBasicBlock(), B.TargetBB->getBasicBlock()},
1211 NewPred: SwitchBB);
1212
1213 MIB.buildBrCond(Tst: Cmp, Dest&: *B.TargetBB);
1214
1215 // Avoid emitting unnecessary branches to the next block.
1216 if (NextMBB != SwitchBB->getNextNode())
1217 MIB.buildBr(Dest&: *NextMBB);
1218}
1219
1220bool IRTranslator::lowerBitTestWorkItem(
1221 SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
1222 MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
1223 MachineIRBuilder &MIB, MachineFunction::iterator BBI,
1224 BranchProbability DefaultProb, BranchProbability UnhandledProbs,
1225 SwitchCG::CaseClusterIt I, MachineBasicBlock *Fallthrough,
1226 bool FallthroughUnreachable) {
1227 using namespace SwitchCG;
1228 MachineFunction *CurMF = SwitchMBB->getParent();
1229 // FIXME: Optimize away range check based on pivot comparisons.
1230 BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex];
1231 // The bit test blocks haven't been inserted yet; insert them here.
1232 for (BitTestCase &BTC : BTB->Cases)
1233 CurMF->insert(MBBI: BBI, MBB: BTC.ThisBB);
1234
1235 // Fill in fields of the BitTestBlock.
1236 BTB->Parent = CurMBB;
1237 BTB->Default = Fallthrough;
1238
1239 BTB->DefaultProb = UnhandledProbs;
1240 // If the cases in bit test don't form a contiguous range, we evenly
1241 // distribute the probability on the edge to Fallthrough to two
1242 // successors of CurMBB.
1243 if (!BTB->ContiguousRange) {
1244 BTB->Prob += DefaultProb / 2;
1245 BTB->DefaultProb -= DefaultProb / 2;
1246 }
1247
1248 if (FallthroughUnreachable)
1249 BTB->FallthroughUnreachable = true;
1250
1251 // If we're in the right place, emit the bit test header right now.
1252 if (CurMBB == SwitchMBB) {
1253 emitBitTestHeader(B&: *BTB, SwitchBB: SwitchMBB);
1254 BTB->Emitted = true;
1255 }
1256 return true;
1257}
1258
1259bool IRTranslator::lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W,
1260 Value *Cond,
1261 MachineBasicBlock *SwitchMBB,
1262 MachineBasicBlock *DefaultMBB,
1263 MachineIRBuilder &MIB) {
1264 using namespace SwitchCG;
1265 MachineFunction *CurMF = FuncInfo.MF;
1266 MachineBasicBlock *NextMBB = nullptr;
1267 MachineFunction::iterator BBI(W.MBB);
1268 if (++BBI != FuncInfo.MF->end())
1269 NextMBB = &*BBI;
1270
1271 if (EnableOpts) {
1272 // Here, we order cases by probability so the most likely case will be
1273 // checked first. However, two clusters can have the same probability in
1274 // which case their relative ordering is non-deterministic. So we use Low
1275 // as a tie-breaker as clusters are guaranteed to never overlap.
1276 llvm::sort(Start: W.FirstCluster, End: W.LastCluster + 1,
1277 Comp: [](const CaseCluster &a, const CaseCluster &b) {
1278 return a.Prob != b.Prob
1279 ? a.Prob > b.Prob
1280 : a.Low->getValue().slt(RHS: b.Low->getValue());
1281 });
1282
1283 // Rearrange the case blocks so that the last one falls through if possible
1284 // without changing the order of probabilities.
1285 for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster;) {
1286 --I;
1287 if (I->Prob > W.LastCluster->Prob)
1288 break;
1289 if (I->Kind == CC_Range && I->MBB == NextMBB) {
1290 std::swap(a&: *I, b&: *W.LastCluster);
1291 break;
1292 }
1293 }
1294 }
1295
1296 // Compute total probability.
1297 BranchProbability DefaultProb = W.DefaultProb;
1298 BranchProbability UnhandledProbs = DefaultProb;
1299 for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
1300 UnhandledProbs += I->Prob;
1301
1302 MachineBasicBlock *CurMBB = W.MBB;
1303 for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
1304 bool FallthroughUnreachable = false;
1305 MachineBasicBlock *Fallthrough;
1306 if (I == W.LastCluster) {
1307 // For the last cluster, fall through to the default destination.
1308 Fallthrough = DefaultMBB;
1309 FallthroughUnreachable = isa<UnreachableInst>(
1310 Val: DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg());
1311 } else {
1312 Fallthrough = CurMF->CreateMachineBasicBlock(BB: CurMBB->getBasicBlock());
1313 CurMF->insert(MBBI: BBI, MBB: Fallthrough);
1314 }
1315 UnhandledProbs -= I->Prob;
1316
1317 switch (I->Kind) {
1318 case CC_BitTests: {
1319 if (!lowerBitTestWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1320 DefaultProb, UnhandledProbs, I, Fallthrough,
1321 FallthroughUnreachable)) {
1322 LLVM_DEBUG(dbgs() << "Failed to lower bit test for switch");
1323 return false;
1324 }
1325 break;
1326 }
1327
1328 case CC_JumpTable: {
1329 if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1330 UnhandledProbs, I, Fallthrough,
1331 FallthroughUnreachable)) {
1332 LLVM_DEBUG(dbgs() << "Failed to lower jump table");
1333 return false;
1334 }
1335 break;
1336 }
1337 case CC_Range: {
1338 if (!lowerSwitchRangeWorkItem(I, Cond, Fallthrough,
1339 FallthroughUnreachable, UnhandledProbs,
1340 CurMBB, MIB, SwitchMBB)) {
1341 LLVM_DEBUG(dbgs() << "Failed to lower switch range");
1342 return false;
1343 }
1344 break;
1345 }
1346 }
1347 CurMBB = Fallthrough;
1348 }
1349
1350 return true;
1351}
1352
1353bool IRTranslator::translateIndirectBr(const User &U,
1354 MachineIRBuilder &MIRBuilder) {
1355 const IndirectBrInst &BrInst = cast<IndirectBrInst>(Val: U);
1356
1357 const Register Tgt = getOrCreateVReg(Val: *BrInst.getAddress());
1358 MIRBuilder.buildBrIndirect(Tgt);
1359
1360 // Link successors.
1361 SmallPtrSet<const BasicBlock *, 32> AddedSuccessors;
1362 MachineBasicBlock &CurBB = MIRBuilder.getMBB();
1363 for (const BasicBlock *Succ : successors(I: &BrInst)) {
1364 // It's legal for indirectbr instructions to have duplicate blocks in the
1365 // destination list. We don't allow this in MIR. Skip anything that's
1366 // already a successor.
1367 if (!AddedSuccessors.insert(Ptr: Succ).second)
1368 continue;
1369 CurBB.addSuccessor(Succ: &getMBB(BB: *Succ));
1370 }
1371
1372 return true;
1373}
1374
1375static bool isSwiftError(const Value *V) {
1376 if (auto Arg = dyn_cast<Argument>(Val: V))
1377 return Arg->hasSwiftErrorAttr();
1378 if (auto AI = dyn_cast<AllocaInst>(Val: V))
1379 return AI->isSwiftError();
1380 return false;
1381}
1382
1383bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
1384 const LoadInst &LI = cast<LoadInst>(Val: U);
1385 TypeSize StoreSize = DL->getTypeStoreSize(Ty: LI.getType());
1386 if (StoreSize.isZero())
1387 return true;
1388
1389 ArrayRef<Register> Regs = getOrCreateVRegs(Val: LI);
1390 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V: LI);
1391 Register Base = getOrCreateVReg(Val: *LI.getPointerOperand());
1392 AAMDNodes AAInfo = LI.getAAMetadata();
1393
1394 const Value *Ptr = LI.getPointerOperand();
1395 Type *OffsetIRTy = DL->getIndexType(PtrTy: Ptr->getType());
1396 LLT OffsetTy = getLLTForType(Ty&: *OffsetIRTy, DL: *DL);
1397
1398 if (CLI->supportSwiftError() && isSwiftError(V: Ptr)) {
1399 assert(Regs.size() == 1 && "swifterror should be single pointer");
1400 Register VReg =
1401 SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.getMBB(), Ptr);
1402 MIRBuilder.buildCopy(Res: Regs[0], Op: VReg);
1403 return true;
1404 }
1405
1406 MachineMemOperand::Flags Flags =
1407 TLI->getLoadMemOperandFlags(LI, DL: *DL, AC, LibInfo);
1408 if (AA && !(Flags & MachineMemOperand::MOInvariant)) {
1409 if (AA->pointsToConstantMemory(
1410 Loc: MemoryLocation(Ptr, LocationSize::precise(Value: StoreSize), AAInfo))) {
1411 Flags |= MachineMemOperand::MOInvariant;
1412 }
1413 }
1414
1415 const MDNode *Ranges =
1416 Regs.size() == 1 ? LI.getMetadata(KindID: LLVMContext::MD_range) : nullptr;
1417 for (unsigned i = 0; i < Regs.size(); ++i) {
1418 Register Addr;
1419 MIRBuilder.materializeObjectPtrOffset(Res&: Addr, Op0: Base, ValueTy: OffsetTy, Value: Offsets[i]);
1420
1421 MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i]);
1422 Align BaseAlign = getMemOpAlign(I: LI);
1423 auto MMO =
1424 MF->getMachineMemOperand(PtrInfo: Ptr, f: Flags, MemTy: MRI->getType(Reg: Regs[i]),
1425 base_alignment: commonAlignment(A: BaseAlign, Offset: Offsets[i]), AAInfo,
1426 Ranges, SSID: LI.getSyncScopeID(), Ordering: LI.getOrdering());
1427 MIRBuilder.buildLoad(Res: Regs[i], Addr, MMO&: *MMO);
1428 }
1429
1430 return true;
1431}
1432
1433bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
1434 const StoreInst &SI = cast<StoreInst>(Val: U);
1435 if (DL->getTypeStoreSize(Ty: SI.getValueOperand()->getType()).isZero())
1436 return true;
1437
1438 ArrayRef<Register> Vals = getOrCreateVRegs(Val: *SI.getValueOperand());
1439 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V: *SI.getValueOperand());
1440 Register Base = getOrCreateVReg(Val: *SI.getPointerOperand());
1441
1442 Type *OffsetIRTy = DL->getIndexType(PtrTy: SI.getPointerOperandType());
1443 LLT OffsetTy = getLLTForType(Ty&: *OffsetIRTy, DL: *DL);
1444
1445 if (CLI->supportSwiftError() && isSwiftError(V: SI.getPointerOperand())) {
1446 assert(Vals.size() == 1 && "swifterror should be single pointer");
1447
1448 Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.getMBB(),
1449 SI.getPointerOperand());
1450 MIRBuilder.buildCopy(Res: VReg, Op: Vals[0]);
1451 return true;
1452 }
1453
1454 MachineMemOperand::Flags Flags = TLI->getStoreMemOperandFlags(SI, DL: *DL);
1455
1456 for (unsigned i = 0; i < Vals.size(); ++i) {
1457 Register Addr;
1458 MIRBuilder.materializeObjectPtrOffset(Res&: Addr, Op0: Base, ValueTy: OffsetTy, Value: Offsets[i]);
1459
1460 MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i]);
1461 Align BaseAlign = getMemOpAlign(I: SI);
1462 auto MMO = MF->getMachineMemOperand(PtrInfo: Ptr, f: Flags, MemTy: MRI->getType(Reg: Vals[i]),
1463 base_alignment: commonAlignment(A: BaseAlign, Offset: Offsets[i]),
1464 AAInfo: SI.getAAMetadata(), Ranges: nullptr,
1465 SSID: SI.getSyncScopeID(), Ordering: SI.getOrdering());
1466 MIRBuilder.buildStore(Val: Vals[i], Addr, MMO&: *MMO);
1467 }
1468 return true;
1469}
1470
1471static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) {
1472 const Value *Src = U.getOperand(i: 0);
1473 Type *Int32Ty = Type::getInt32Ty(C&: U.getContext());
1474
1475 // getIndexedOffsetInType is designed for GEPs, so the first index is the
1476 // usual array element rather than looking into the actual aggregate.
1477 SmallVector<Value *, 1> Indices;
1478 Indices.push_back(Elt: ConstantInt::get(Ty: Int32Ty, V: 0));
1479
1480 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Val: &U)) {
1481 for (auto Idx : EVI->indices())
1482 Indices.push_back(Elt: ConstantInt::get(Ty: Int32Ty, V: Idx));
1483 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(Val: &U)) {
1484 for (auto Idx : IVI->indices())
1485 Indices.push_back(Elt: ConstantInt::get(Ty: Int32Ty, V: Idx));
1486 } else {
1487 llvm::append_range(C&: Indices, R: drop_begin(RangeOrContainer: U.operands()));
1488 }
1489
1490 return static_cast<uint64_t>(
1491 DL.getIndexedOffsetInType(ElemTy: Src->getType(), Indices));
1492}
1493
1494bool IRTranslator::translateExtractValue(const User &U,
1495 MachineIRBuilder &MIRBuilder) {
1496 const Value *Src = U.getOperand(i: 0);
1497 uint64_t Offset = getOffsetFromIndices(U, DL: *DL);
1498 ArrayRef<Register> SrcRegs = getOrCreateVRegs(Val: *Src);
1499 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V: *Src);
1500 unsigned Idx = llvm::lower_bound(Range&: Offsets, Value&: Offset) - Offsets.begin();
1501 auto &DstRegs = allocateVRegs(Val: U);
1502
1503 for (unsigned i = 0; i < DstRegs.size(); ++i)
1504 DstRegs[i] = SrcRegs[Idx++];
1505
1506 return true;
1507}
1508
1509bool IRTranslator::translateInsertValue(const User &U,
1510 MachineIRBuilder &MIRBuilder) {
1511 const Value *Src = U.getOperand(i: 0);
1512 uint64_t Offset = getOffsetFromIndices(U, DL: *DL);
1513 auto &DstRegs = allocateVRegs(Val: U);
1514 ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(V: U);
1515 ArrayRef<Register> SrcRegs = getOrCreateVRegs(Val: *Src);
1516 ArrayRef<Register> InsertedRegs = getOrCreateVRegs(Val: *U.getOperand(i: 1));
1517 auto *InsertedIt = InsertedRegs.begin();
1518
1519 for (unsigned i = 0; i < DstRegs.size(); ++i) {
1520 if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end())
1521 DstRegs[i] = *InsertedIt++;
1522 else
1523 DstRegs[i] = SrcRegs[i];
1524 }
1525
1526 return true;
1527}
1528
1529bool IRTranslator::translateSelect(const User &U,
1530 MachineIRBuilder &MIRBuilder) {
1531 Register Tst = getOrCreateVReg(Val: *U.getOperand(i: 0));
1532 ArrayRef<Register> ResRegs = getOrCreateVRegs(Val: U);
1533 ArrayRef<Register> Op0Regs = getOrCreateVRegs(Val: *U.getOperand(i: 1));
1534 ArrayRef<Register> Op1Regs = getOrCreateVRegs(Val: *U.getOperand(i: 2));
1535
1536 uint32_t Flags = 0;
1537 if (const SelectInst *SI = dyn_cast<SelectInst>(Val: &U))
1538 Flags = MachineInstr::copyFlagsFromInstruction(I: *SI);
1539
1540 for (unsigned i = 0; i < ResRegs.size(); ++i) {
1541 MIRBuilder.buildSelect(Res: ResRegs[i], Tst, Op0: Op0Regs[i], Op1: Op1Regs[i], Flags);
1542 }
1543
1544 return true;
1545}
1546
1547bool IRTranslator::translateCopy(const User &U, const Value &V,
1548 MachineIRBuilder &MIRBuilder) {
1549 Register Src = getOrCreateVReg(Val: V);
1550 auto &Regs = *VMap.getVRegs(V: U);
1551 if (Regs.empty()) {
1552 Regs.push_back(Elt: Src);
1553 VMap.getOffsets(V: U)->push_back(Elt: 0);
1554 } else {
1555 // If we already assigned a vreg for this instruction, we can't change that.
1556 // Emit a copy to satisfy the users we already emitted.
1557 MIRBuilder.buildCopy(Res: Regs[0], Op: Src);
1558 }
1559 return true;
1560}
1561
1562bool IRTranslator::translateBitCast(const User &U,
1563 MachineIRBuilder &MIRBuilder) {
1564 // If we're bitcasting to the source type, we can reuse the source vreg.
1565 if (getLLTForType(Ty&: *U.getOperand(i: 0)->getType(), DL: *DL) ==
1566 getLLTForType(Ty&: *U.getType(), DL: *DL)) {
1567 // If the source is a ConstantInt then it was probably created by
1568 // ConstantHoisting and we should leave it alone.
1569 if (isa<ConstantInt>(Val: U.getOperand(i: 0)))
1570 return translateCast(Opcode: TargetOpcode::G_CONSTANT_FOLD_BARRIER, U,
1571 MIRBuilder);
1572 return translateCopy(U, V: *U.getOperand(i: 0), MIRBuilder);
1573 }
1574
1575 return translateCast(Opcode: TargetOpcode::G_BITCAST, U, MIRBuilder);
1576}
1577
1578bool IRTranslator::translateCast(unsigned Opcode, const User &U,
1579 MachineIRBuilder &MIRBuilder) {
1580 if (!mayTranslateUserTypes(U))
1581 return false;
1582
1583 uint32_t Flags = 0;
1584 if (const Instruction *I = dyn_cast<Instruction>(Val: &U))
1585 Flags = MachineInstr::copyFlagsFromInstruction(I: *I);
1586
1587 Register Op = getOrCreateVReg(Val: *U.getOperand(i: 0));
1588 Register Res = getOrCreateVReg(Val: U);
1589 MIRBuilder.buildInstr(Opc: Opcode, DstOps: {Res}, SrcOps: {Op}, Flags);
1590 return true;
1591}
1592
1593bool IRTranslator::translateGetElementPtr(const User &U,
1594 MachineIRBuilder &MIRBuilder) {
1595 Value &Op0 = *U.getOperand(i: 0);
1596 Register BaseReg = getOrCreateVReg(Val: Op0);
1597 Type *PtrIRTy = Op0.getType();
1598 LLT PtrTy = getLLTForType(Ty&: *PtrIRTy, DL: *DL);
1599 Type *OffsetIRTy = DL->getIndexType(PtrTy: PtrIRTy);
1600 LLT OffsetTy = getLLTForType(Ty&: *OffsetIRTy, DL: *DL);
1601
1602 uint32_t PtrAddFlags = 0;
1603 // Each PtrAdd generated to implement the GEP inherits its nuw, nusw, inbounds
1604 // flags.
1605 if (const Instruction *I = dyn_cast<Instruction>(Val: &U))
1606 PtrAddFlags = MachineInstr::copyFlagsFromInstruction(I: *I);
1607
1608 auto PtrAddFlagsWithConst = [&](int64_t Offset) {
1609 // For nusw/inbounds GEP with an offset that is nonnegative when interpreted
1610 // as signed, assume there is no unsigned overflow.
1611 if (Offset >= 0 && (PtrAddFlags & MachineInstr::MIFlag::NoUSWrap))
1612 return PtrAddFlags | MachineInstr::MIFlag::NoUWrap;
1613 return PtrAddFlags;
1614 };
1615
1616 // Normalize Vector GEP - all scalar operands should be converted to the
1617 // splat vector.
1618 unsigned VectorWidth = 0;
1619
1620 // True if we should use a splat vector; using VectorWidth alone is not
1621 // sufficient.
1622 bool WantSplatVector = false;
1623 if (auto *VT = dyn_cast<VectorType>(Val: U.getType())) {
1624 VectorWidth = cast<FixedVectorType>(Val: VT)->getNumElements();
1625 // We don't produce 1 x N vectors; those are treated as scalars.
1626 WantSplatVector = VectorWidth > 1;
1627 }
1628
1629 // We might need to splat the base pointer into a vector if the offsets
1630 // are vectors.
1631 if (WantSplatVector && !PtrTy.isVector()) {
1632 BaseReg = MIRBuilder
1633 .buildSplatBuildVector(Res: LLT::fixed_vector(NumElements: VectorWidth, ScalarTy: PtrTy),
1634 Src: BaseReg)
1635 .getReg(Idx: 0);
1636 PtrIRTy = FixedVectorType::get(ElementType: PtrIRTy, NumElts: VectorWidth);
1637 PtrTy = getLLTForType(Ty&: *PtrIRTy, DL: *DL);
1638 OffsetIRTy = DL->getIndexType(PtrTy: PtrIRTy);
1639 OffsetTy = getLLTForType(Ty&: *OffsetIRTy, DL: *DL);
1640 }
1641
1642 int64_t Offset = 0;
1643 for (gep_type_iterator GTI = gep_type_begin(GEP: &U), E = gep_type_end(GEP: &U);
1644 GTI != E; ++GTI) {
1645 const Value *Idx = GTI.getOperand();
1646 if (StructType *StTy = GTI.getStructTypeOrNull()) {
1647 unsigned Field = cast<Constant>(Val: Idx)->getUniqueInteger().getZExtValue();
1648 Offset += DL->getStructLayout(Ty: StTy)->getElementOffset(Idx: Field);
1649 continue;
1650 } else {
1651 uint64_t ElementSize = GTI.getSequentialElementStride(DL: *DL);
1652
1653 // If this is a scalar constant or a splat vector of constants,
1654 // handle it quickly.
1655 if (const auto *CI = dyn_cast<ConstantInt>(Val: Idx)) {
1656 if (std::optional<int64_t> Val = CI->getValue().trySExtValue()) {
1657 Offset += ElementSize * *Val;
1658 continue;
1659 }
1660 }
1661
1662 if (Offset != 0) {
1663 auto OffsetMIB = MIRBuilder.buildConstant(Res: {OffsetTy}, Val: Offset);
1664 BaseReg = MIRBuilder
1665 .buildPtrAdd(Res: PtrTy, Op0: BaseReg, Op1: OffsetMIB.getReg(Idx: 0),
1666 Flags: PtrAddFlagsWithConst(Offset))
1667 .getReg(Idx: 0);
1668 Offset = 0;
1669 }
1670
1671 Register IdxReg = getOrCreateVReg(Val: *Idx);
1672 LLT IdxTy = MRI->getType(Reg: IdxReg);
1673 if (IdxTy != OffsetTy) {
1674 if (!IdxTy.isVector() && WantSplatVector) {
1675 IdxReg = MIRBuilder
1676 .buildSplatBuildVector(Res: OffsetTy.changeElementType(NewEltTy: IdxTy),
1677 Src: IdxReg)
1678 .getReg(Idx: 0);
1679 }
1680
1681 IdxReg = MIRBuilder.buildSExtOrTrunc(Res: OffsetTy, Op: IdxReg).getReg(Idx: 0);
1682 }
1683
1684 // N = N + Idx * ElementSize;
1685 // Avoid doing it for ElementSize of 1.
1686 Register GepOffsetReg;
1687 if (ElementSize != 1) {
1688 auto ElementSizeMIB = MIRBuilder.buildConstant(
1689 Res: getLLTForType(Ty&: *OffsetIRTy, DL: *DL), Val: ElementSize);
1690
1691 // The multiplication is NUW if the GEP is NUW and NSW if the GEP is
1692 // NUSW.
1693 uint32_t ScaleFlags = PtrAddFlags & MachineInstr::MIFlag::NoUWrap;
1694 if (PtrAddFlags & MachineInstr::MIFlag::NoUSWrap)
1695 ScaleFlags |= MachineInstr::MIFlag::NoSWrap;
1696
1697 GepOffsetReg =
1698 MIRBuilder.buildMul(Dst: OffsetTy, Src0: IdxReg, Src1: ElementSizeMIB, Flags: ScaleFlags)
1699 .getReg(Idx: 0);
1700 } else {
1701 GepOffsetReg = IdxReg;
1702 }
1703
1704 BaseReg =
1705 MIRBuilder.buildPtrAdd(Res: PtrTy, Op0: BaseReg, Op1: GepOffsetReg, Flags: PtrAddFlags)
1706 .getReg(Idx: 0);
1707 }
1708 }
1709
1710 if (Offset != 0) {
1711 auto OffsetMIB =
1712 MIRBuilder.buildConstant(Res: OffsetTy, Val: Offset);
1713
1714 MIRBuilder.buildPtrAdd(Res: getOrCreateVReg(Val: U), Op0: BaseReg, Op1: OffsetMIB.getReg(Idx: 0),
1715 Flags: PtrAddFlagsWithConst(Offset));
1716 return true;
1717 }
1718
1719 MIRBuilder.buildCopy(Res: getOrCreateVReg(Val: U), Op: BaseReg);
1720 return true;
1721}
1722
1723bool IRTranslator::translateMemFunc(const CallInst &CI,
1724 MachineIRBuilder &MIRBuilder,
1725 unsigned Opcode) {
1726 const Value *SrcPtr = CI.getArgOperand(i: 1);
1727 // If the source is undef, then just emit a nop.
1728 if (isa<UndefValue>(Val: SrcPtr))
1729 return true;
1730
1731 SmallVector<Register, 3> SrcRegs;
1732
1733 unsigned MinPtrSize = UINT_MAX;
1734 for (auto AI = CI.arg_begin(), AE = CI.arg_end(); std::next(x: AI) != AE; ++AI) {
1735 Register SrcReg = getOrCreateVReg(Val: **AI);
1736 LLT SrcTy = MRI->getType(Reg: SrcReg);
1737 if (SrcTy.isPointer())
1738 MinPtrSize = std::min<unsigned>(a: SrcTy.getSizeInBits(), b: MinPtrSize);
1739 SrcRegs.push_back(Elt: SrcReg);
1740 }
1741
1742 LLT SizeTy = LLT::integer(SizeInBits: MinPtrSize);
1743
1744 // The size operand should be the minimum of the pointer sizes.
1745 Register &SizeOpReg = SrcRegs[SrcRegs.size() - 1];
1746 if (MRI->getType(Reg: SizeOpReg) != SizeTy)
1747 SizeOpReg = MIRBuilder.buildZExtOrTrunc(Res: SizeTy, Op: SizeOpReg).getReg(Idx: 0);
1748
1749 auto ICall = MIRBuilder.buildInstr(Opcode);
1750 for (Register SrcReg : SrcRegs)
1751 ICall.addUse(RegNo: SrcReg);
1752
1753 Align DstAlign;
1754 Align SrcAlign;
1755 unsigned IsVol =
1756 cast<ConstantInt>(Val: CI.getArgOperand(i: CI.arg_size() - 1))->getZExtValue();
1757
1758 ConstantInt *CopySize = nullptr;
1759
1760 if (auto *MCI = dyn_cast<MemCpyInst>(Val: &CI)) {
1761 DstAlign = MCI->getDestAlign().valueOrOne();
1762 SrcAlign = MCI->getSourceAlign().valueOrOne();
1763 CopySize = dyn_cast<ConstantInt>(Val: MCI->getArgOperand(i: 2));
1764 } else if (auto *MMI = dyn_cast<MemMoveInst>(Val: &CI)) {
1765 DstAlign = MMI->getDestAlign().valueOrOne();
1766 SrcAlign = MMI->getSourceAlign().valueOrOne();
1767 CopySize = dyn_cast<ConstantInt>(Val: MMI->getArgOperand(i: 2));
1768 } else {
1769 auto *MSI = cast<MemSetInst>(Val: &CI);
1770 DstAlign = MSI->getDestAlign().valueOrOne();
1771 }
1772
1773 if (Opcode != TargetOpcode::G_MEMCPY_INLINE) {
1774 // We need to propagate the tail call flag from the IR inst as an argument.
1775 // Otherwise, we have to pessimize and assume later that we cannot tail call
1776 // any memory intrinsics.
1777 ICall.addImm(Val: CI.isTailCall() ? 1 : 0);
1778 }
1779
1780 // Create mem operands to store the alignment and volatile info.
1781 MachineMemOperand::Flags LoadFlags = MachineMemOperand::MOLoad;
1782 MachineMemOperand::Flags StoreFlags = MachineMemOperand::MOStore;
1783 if (IsVol) {
1784 LoadFlags |= MachineMemOperand::MOVolatile;
1785 StoreFlags |= MachineMemOperand::MOVolatile;
1786 }
1787
1788 AAMDNodes AAInfo = CI.getAAMetadata();
1789 if (AA && CopySize &&
1790 AA->pointsToConstantMemory(Loc: MemoryLocation(
1791 SrcPtr, LocationSize::precise(Value: CopySize->getZExtValue()), AAInfo))) {
1792 LoadFlags |= MachineMemOperand::MOInvariant;
1793
1794 // FIXME: pointsToConstantMemory probably does not imply dereferenceable,
1795 // but the previous usage implied it did. Probably should check
1796 // isDereferenceableAndAlignedPointer.
1797 LoadFlags |= MachineMemOperand::MODereferenceable;
1798 }
1799
1800 ICall.addMemOperand(
1801 MMO: MF->getMachineMemOperand(PtrInfo: MachinePointerInfo(CI.getArgOperand(i: 0)),
1802 F: StoreFlags, Size: 1, BaseAlignment: DstAlign, AAInfo));
1803 if (Opcode != TargetOpcode::G_MEMSET)
1804 ICall.addMemOperand(MMO: MF->getMachineMemOperand(
1805 PtrInfo: MachinePointerInfo(SrcPtr), F: LoadFlags, Size: 1, BaseAlignment: SrcAlign, AAInfo));
1806
1807 return true;
1808}
1809
1810bool IRTranslator::translateTrap(const CallInst &CI,
1811 MachineIRBuilder &MIRBuilder,
1812 unsigned Opcode) {
1813 StringRef TrapFuncName =
1814 CI.getAttributes().getFnAttr(Kind: "trap-func-name").getValueAsString();
1815 if (TrapFuncName.empty()) {
1816 if (Opcode == TargetOpcode::G_UBSANTRAP) {
1817 uint64_t Code = cast<ConstantInt>(Val: CI.getOperand(i_nocapture: 0))->getZExtValue();
1818 MIRBuilder.buildInstr(Opc: Opcode, DstOps: {}, SrcOps: ArrayRef<llvm::SrcOp>{Code});
1819 } else {
1820 MIRBuilder.buildInstr(Opcode);
1821 }
1822 return true;
1823 }
1824
1825 CallLowering::CallLoweringInfo Info;
1826 if (Opcode == TargetOpcode::G_UBSANTRAP)
1827 Info.OrigArgs.push_back(Elt: {getOrCreateVRegs(Val: *CI.getArgOperand(i: 0)),
1828 CI.getArgOperand(i: 0)->getType(), 0});
1829
1830 Info.Callee = MachineOperand::CreateES(SymName: TrapFuncName.data());
1831 Info.CB = &CI;
1832 Info.OrigRet = {Register(), Type::getVoidTy(C&: CI.getContext()), 0};
1833 return CLI->lowerCall(MIRBuilder, Info);
1834}
1835
1836bool IRTranslator::translateVectorInterleave2Intrinsic(
1837 const CallInst &CI, MachineIRBuilder &MIRBuilder) {
1838 assert(CI.getIntrinsicID() == Intrinsic::vector_interleave2 &&
1839 "This function can only be called on the interleave2 intrinsic!");
1840 // Canonicalize interleave2 to G_SHUFFLE_VECTOR (similar to SelectionDAG).
1841 Register Op0 = getOrCreateVReg(Val: *CI.getOperand(i_nocapture: 0));
1842 Register Op1 = getOrCreateVReg(Val: *CI.getOperand(i_nocapture: 1));
1843 Register Res = getOrCreateVReg(Val: CI);
1844
1845 LLT OpTy = MRI->getType(Reg: Op0);
1846 MIRBuilder.buildShuffleVector(Res, Src1: Op0, Src2: Op1,
1847 Mask: createInterleaveMask(VF: OpTy.getNumElements(), NumVecs: 2));
1848
1849 return true;
1850}
1851
1852bool IRTranslator::translateVectorDeinterleave2Intrinsic(
1853 const CallInst &CI, MachineIRBuilder &MIRBuilder) {
1854 assert(CI.getIntrinsicID() == Intrinsic::vector_deinterleave2 &&
1855 "This function can only be called on the deinterleave2 intrinsic!");
1856 // Canonicalize deinterleave2 to shuffles that extract sub-vectors (similar to
1857 // SelectionDAG).
1858 Register Op = getOrCreateVReg(Val: *CI.getOperand(i_nocapture: 0));
1859 auto Undef = MIRBuilder.buildUndef(Res: MRI->getType(Reg: Op));
1860 ArrayRef<Register> Res = getOrCreateVRegs(Val: CI);
1861
1862 LLT ResTy = MRI->getType(Reg: Res[0]);
1863 MIRBuilder.buildShuffleVector(Res: Res[0], Src1: Op, Src2: Undef,
1864 Mask: createStrideMask(Start: 0, Stride: 2, VF: ResTy.getNumElements()));
1865 MIRBuilder.buildShuffleVector(Res: Res[1], Src1: Op, Src2: Undef,
1866 Mask: createStrideMask(Start: 1, Stride: 2, VF: ResTy.getNumElements()));
1867
1868 return true;
1869}
1870
1871void IRTranslator::getStackGuard(Register DstReg,
1872 MachineIRBuilder &MIRBuilder) {
1873 Value *Global =
1874 TLI->getSDagStackGuard(M: *MF->getFunction().getParent(), Libcalls: *Libcalls);
1875 if (!Global) {
1876 LLVMContext &Ctx = MIRBuilder.getContext();
1877 Ctx.diagnose(DI: DiagnosticInfoGeneric("unable to lower stackguard"));
1878 MIRBuilder.buildUndef(Res: DstReg);
1879 return;
1880 }
1881
1882 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
1883 MRI->setRegClass(Reg: DstReg, RC: TRI->getPointerRegClass());
1884 auto MIB =
1885 MIRBuilder.buildInstr(Opc: TargetOpcode::LOAD_STACK_GUARD, DstOps: {DstReg}, SrcOps: {});
1886
1887 unsigned AddrSpace = Global->getType()->getPointerAddressSpace();
1888 LLT PtrTy = LLT::pointer(AddressSpace: AddrSpace, SizeInBits: DL->getPointerSizeInBits(AS: AddrSpace));
1889
1890 MachinePointerInfo MPInfo(Global);
1891 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
1892 MachineMemOperand::MODereferenceable;
1893 MachineMemOperand *MemRef = MF->getMachineMemOperand(
1894 PtrInfo: MPInfo, f: Flags, MemTy: PtrTy, base_alignment: DL->getPointerABIAlignment(AS: AddrSpace));
1895 MIB.setMemRefs({MemRef});
1896}
1897
1898bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
1899 MachineIRBuilder &MIRBuilder) {
1900 ArrayRef<Register> ResRegs = getOrCreateVRegs(Val: CI);
1901 MIRBuilder.buildInstr(
1902 Opc: Op, DstOps: {ResRegs[0], ResRegs[1]},
1903 SrcOps: {getOrCreateVReg(Val: *CI.getOperand(i_nocapture: 0)), getOrCreateVReg(Val: *CI.getOperand(i_nocapture: 1))});
1904
1905 return true;
1906}
1907
1908bool IRTranslator::translateFixedPointIntrinsic(unsigned Op, const CallInst &CI,
1909 MachineIRBuilder &MIRBuilder) {
1910 Register Dst = getOrCreateVReg(Val: CI);
1911 Register Src0 = getOrCreateVReg(Val: *CI.getOperand(i_nocapture: 0));
1912 Register Src1 = getOrCreateVReg(Val: *CI.getOperand(i_nocapture: 1));
1913 uint64_t Scale = cast<ConstantInt>(Val: CI.getOperand(i_nocapture: 2))->getZExtValue();
1914 MIRBuilder.buildInstr(Opc: Op, DstOps: {Dst}, SrcOps: { Src0, Src1, Scale });
1915 return true;
1916}
1917
1918unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) {
1919 switch (ID) {
1920 default:
1921 break;
1922 case Intrinsic::acos:
1923 return TargetOpcode::G_FACOS;
1924 case Intrinsic::asin:
1925 return TargetOpcode::G_FASIN;
1926 case Intrinsic::atan:
1927 return TargetOpcode::G_FATAN;
1928 case Intrinsic::atan2:
1929 return TargetOpcode::G_FATAN2;
1930 case Intrinsic::bswap:
1931 return TargetOpcode::G_BSWAP;
1932 case Intrinsic::bitreverse:
1933 return TargetOpcode::G_BITREVERSE;
1934 case Intrinsic::fshl:
1935 return TargetOpcode::G_FSHL;
1936 case Intrinsic::fshr:
1937 return TargetOpcode::G_FSHR;
1938 case Intrinsic::ceil:
1939 return TargetOpcode::G_FCEIL;
1940 case Intrinsic::cos:
1941 return TargetOpcode::G_FCOS;
1942 case Intrinsic::cosh:
1943 return TargetOpcode::G_FCOSH;
1944 case Intrinsic::ctpop:
1945 return TargetOpcode::G_CTPOP;
1946 case Intrinsic::exp:
1947 return TargetOpcode::G_FEXP;
1948 case Intrinsic::exp2:
1949 return TargetOpcode::G_FEXP2;
1950 case Intrinsic::exp10:
1951 return TargetOpcode::G_FEXP10;
1952 case Intrinsic::fabs:
1953 return TargetOpcode::G_FABS;
1954 case Intrinsic::copysign:
1955 return TargetOpcode::G_FCOPYSIGN;
1956 case Intrinsic::minnum:
1957 return TargetOpcode::G_FMINNUM;
1958 case Intrinsic::maxnum:
1959 return TargetOpcode::G_FMAXNUM;
1960 case Intrinsic::minimum:
1961 return TargetOpcode::G_FMINIMUM;
1962 case Intrinsic::maximum:
1963 return TargetOpcode::G_FMAXIMUM;
1964 case Intrinsic::minimumnum:
1965 return TargetOpcode::G_FMINIMUMNUM;
1966 case Intrinsic::maximumnum:
1967 return TargetOpcode::G_FMAXIMUMNUM;
1968 case Intrinsic::canonicalize:
1969 return TargetOpcode::G_FCANONICALIZE;
1970 case Intrinsic::floor:
1971 return TargetOpcode::G_FFLOOR;
1972 case Intrinsic::fma:
1973 return TargetOpcode::G_FMA;
1974 case Intrinsic::log:
1975 return TargetOpcode::G_FLOG;
1976 case Intrinsic::log2:
1977 return TargetOpcode::G_FLOG2;
1978 case Intrinsic::log10:
1979 return TargetOpcode::G_FLOG10;
1980 case Intrinsic::ldexp:
1981 return TargetOpcode::G_FLDEXP;
1982 case Intrinsic::nearbyint:
1983 return TargetOpcode::G_FNEARBYINT;
1984 case Intrinsic::pow:
1985 return TargetOpcode::G_FPOW;
1986 case Intrinsic::powi:
1987 return TargetOpcode::G_FPOWI;
1988 case Intrinsic::rint:
1989 return TargetOpcode::G_FRINT;
1990 case Intrinsic::round:
1991 return TargetOpcode::G_INTRINSIC_ROUND;
1992 case Intrinsic::roundeven:
1993 return TargetOpcode::G_INTRINSIC_ROUNDEVEN;
1994 case Intrinsic::sin:
1995 return TargetOpcode::G_FSIN;
1996 case Intrinsic::sinh:
1997 return TargetOpcode::G_FSINH;
1998 case Intrinsic::sqrt:
1999 return TargetOpcode::G_FSQRT;
2000 case Intrinsic::tan:
2001 return TargetOpcode::G_FTAN;
2002 case Intrinsic::tanh:
2003 return TargetOpcode::G_FTANH;
2004 case Intrinsic::trunc:
2005 return TargetOpcode::G_INTRINSIC_TRUNC;
2006 case Intrinsic::readcyclecounter:
2007 return TargetOpcode::G_READCYCLECOUNTER;
2008 case Intrinsic::readsteadycounter:
2009 return TargetOpcode::G_READSTEADYCOUNTER;
2010 case Intrinsic::ptrmask:
2011 return TargetOpcode::G_PTRMASK;
2012 case Intrinsic::lrint:
2013 return TargetOpcode::G_INTRINSIC_LRINT;
2014 case Intrinsic::llrint:
2015 return TargetOpcode::G_INTRINSIC_LLRINT;
2016 // FADD/FMUL require checking the FMF, so are handled elsewhere.
2017 case Intrinsic::vector_reduce_fmin:
2018 return TargetOpcode::G_VECREDUCE_FMIN;
2019 case Intrinsic::vector_reduce_fmax:
2020 return TargetOpcode::G_VECREDUCE_FMAX;
2021 case Intrinsic::vector_reduce_fminimum:
2022 return TargetOpcode::G_VECREDUCE_FMINIMUM;
2023 case Intrinsic::vector_reduce_fmaximum:
2024 return TargetOpcode::G_VECREDUCE_FMAXIMUM;
2025 case Intrinsic::vector_reduce_add:
2026 return TargetOpcode::G_VECREDUCE_ADD;
2027 case Intrinsic::vector_reduce_mul:
2028 return TargetOpcode::G_VECREDUCE_MUL;
2029 case Intrinsic::vector_reduce_and:
2030 return TargetOpcode::G_VECREDUCE_AND;
2031 case Intrinsic::vector_reduce_or:
2032 return TargetOpcode::G_VECREDUCE_OR;
2033 case Intrinsic::vector_reduce_xor:
2034 return TargetOpcode::G_VECREDUCE_XOR;
2035 case Intrinsic::vector_reduce_smax:
2036 return TargetOpcode::G_VECREDUCE_SMAX;
2037 case Intrinsic::vector_reduce_smin:
2038 return TargetOpcode::G_VECREDUCE_SMIN;
2039 case Intrinsic::vector_reduce_umax:
2040 return TargetOpcode::G_VECREDUCE_UMAX;
2041 case Intrinsic::vector_reduce_umin:
2042 return TargetOpcode::G_VECREDUCE_UMIN;
2043 case Intrinsic::experimental_vector_compress:
2044 return TargetOpcode::G_VECTOR_COMPRESS;
2045 case Intrinsic::lround:
2046 return TargetOpcode::G_LROUND;
2047 case Intrinsic::llround:
2048 return TargetOpcode::G_LLROUND;
2049 case Intrinsic::get_fpenv:
2050 return TargetOpcode::G_GET_FPENV;
2051 case Intrinsic::get_fpmode:
2052 return TargetOpcode::G_GET_FPMODE;
2053 }
2054 return Intrinsic::not_intrinsic;
2055}
2056
2057bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI,
2058 Intrinsic::ID ID,
2059 MachineIRBuilder &MIRBuilder) {
2060
2061 unsigned Op = getSimpleIntrinsicOpcode(ID);
2062
2063 // Is this a simple intrinsic?
2064 if (Op == Intrinsic::not_intrinsic)
2065 return false;
2066
2067 // Yes. Let's translate it.
2068 SmallVector<llvm::SrcOp, 4> VRegs;
2069 for (const auto &Arg : CI.args())
2070 VRegs.push_back(Elt: getOrCreateVReg(Val: *Arg));
2071
2072 MIRBuilder.buildInstr(Opc: Op, DstOps: {getOrCreateVReg(Val: CI)}, SrcOps: VRegs,
2073 Flags: MachineInstr::copyFlagsFromInstruction(I: CI));
2074 return true;
2075}
2076
2077// TODO: Include ConstainedOps.def when all strict instructions are defined.
2078static unsigned getConstrainedOpcode(Intrinsic::ID ID) {
2079 switch (ID) {
2080 case Intrinsic::experimental_constrained_fadd:
2081 return TargetOpcode::G_STRICT_FADD;
2082 case Intrinsic::experimental_constrained_fsub:
2083 return TargetOpcode::G_STRICT_FSUB;
2084 case Intrinsic::experimental_constrained_fmul:
2085 return TargetOpcode::G_STRICT_FMUL;
2086 case Intrinsic::experimental_constrained_fdiv:
2087 return TargetOpcode::G_STRICT_FDIV;
2088 case Intrinsic::experimental_constrained_frem:
2089 return TargetOpcode::G_STRICT_FREM;
2090 case Intrinsic::experimental_constrained_fma:
2091 return TargetOpcode::G_STRICT_FMA;
2092 case Intrinsic::experimental_constrained_sqrt:
2093 return TargetOpcode::G_STRICT_FSQRT;
2094 case Intrinsic::experimental_constrained_ldexp:
2095 return TargetOpcode::G_STRICT_FLDEXP;
2096 default:
2097 return 0;
2098 }
2099}
2100
2101bool IRTranslator::translateConstrainedFPIntrinsic(
2102 const ConstrainedFPIntrinsic &FPI, MachineIRBuilder &MIRBuilder) {
2103 fp::ExceptionBehavior EB = *FPI.getExceptionBehavior();
2104
2105 unsigned Opcode = getConstrainedOpcode(ID: FPI.getIntrinsicID());
2106 if (!Opcode)
2107 return false;
2108
2109 uint32_t Flags = MachineInstr::copyFlagsFromInstruction(I: FPI);
2110 if (EB == fp::ExceptionBehavior::ebIgnore)
2111 Flags |= MachineInstr::NoFPExcept;
2112
2113 SmallVector<llvm::SrcOp, 4> VRegs;
2114 for (unsigned I = 0, E = FPI.getNonMetadataArgCount(); I != E; ++I)
2115 VRegs.push_back(Elt: getOrCreateVReg(Val: *FPI.getArgOperand(i: I)));
2116
2117 MIRBuilder.buildInstr(Opc: Opcode, DstOps: {getOrCreateVReg(Val: FPI)}, SrcOps: VRegs, Flags);
2118 return true;
2119}
2120
2121std::optional<MCRegister> IRTranslator::getArgPhysReg(Argument &Arg) {
2122 auto VRegs = getOrCreateVRegs(Val: Arg);
2123 if (VRegs.size() != 1)
2124 return std::nullopt;
2125
2126 // Arguments are lowered as a copy of a livein physical register.
2127 auto *VRegDef = MF->getRegInfo().getVRegDef(Reg: VRegs[0]);
2128 if (!VRegDef || !VRegDef->isCopy())
2129 return std::nullopt;
2130 return VRegDef->getOperand(i: 1).getReg().asMCReg();
2131}
2132
2133bool IRTranslator::translateIfEntryValueArgument(bool isDeclare, Value *Val,
2134 const DILocalVariable *Var,
2135 const DIExpression *Expr,
2136 const DebugLoc &DL,
2137 MachineIRBuilder &MIRBuilder) {
2138 auto *Arg = dyn_cast<Argument>(Val);
2139 if (!Arg)
2140 return false;
2141
2142 if (!Expr->isEntryValue())
2143 return false;
2144
2145 std::optional<MCRegister> PhysReg = getArgPhysReg(Arg&: *Arg);
2146 if (!PhysReg) {
2147 LLVM_DEBUG(dbgs() << "Dropping dbg." << (isDeclare ? "declare" : "value")
2148 << ": expression is entry_value but "
2149 << "couldn't find a physical register\n");
2150 LLVM_DEBUG(dbgs() << *Var << "\n");
2151 return true;
2152 }
2153
2154 if (isDeclare) {
2155 // Append an op deref to account for the fact that this is a dbg_declare.
2156 Expr = DIExpression::append(Expr, Ops: dwarf::DW_OP_deref);
2157 MF->setVariableDbgInfo(Var, Expr, Reg: *PhysReg, Loc: DL);
2158 } else {
2159 MIRBuilder.buildDirectDbgValue(Reg: *PhysReg, Variable: Var, Expr);
2160 }
2161
2162 return true;
2163}
2164
2165static unsigned getConvOpcode(Intrinsic::ID ID) {
2166 switch (ID) {
2167 default:
2168 llvm_unreachable("Unexpected intrinsic");
2169 case Intrinsic::experimental_convergence_anchor:
2170 return TargetOpcode::CONVERGENCECTRL_ANCHOR;
2171 case Intrinsic::experimental_convergence_entry:
2172 return TargetOpcode::CONVERGENCECTRL_ENTRY;
2173 case Intrinsic::experimental_convergence_loop:
2174 return TargetOpcode::CONVERGENCECTRL_LOOP;
2175 }
2176}
2177
2178bool IRTranslator::translateConvergenceControlIntrinsic(
2179 const CallInst &CI, Intrinsic::ID ID, MachineIRBuilder &MIRBuilder) {
2180 MachineInstrBuilder MIB = MIRBuilder.buildInstr(Opcode: getConvOpcode(ID));
2181 Register OutputReg = getOrCreateConvergenceTokenVReg(Token: CI);
2182 MIB.addDef(RegNo: OutputReg);
2183
2184 if (ID == Intrinsic::experimental_convergence_loop) {
2185 auto Bundle = CI.getOperandBundle(ID: LLVMContext::OB_convergencectrl);
2186 assert(Bundle && "Expected a convergence control token.");
2187 Register InputReg =
2188 getOrCreateConvergenceTokenVReg(Token: *Bundle->Inputs[0].get());
2189 MIB.addUse(RegNo: InputReg);
2190 }
2191
2192 return true;
2193}
2194
2195bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
2196 MachineIRBuilder &MIRBuilder) {
2197 if (auto *MI = dyn_cast<AnyMemIntrinsic>(Val: &CI)) {
2198 if (ORE->enabled()) {
2199 if (MemoryOpRemark::canHandle(I: MI, TLI: *LibInfo)) {
2200 MemoryOpRemark R(*ORE, "gisel-irtranslator-memsize", *DL, *LibInfo);
2201 R.visit(I: MI);
2202 }
2203 }
2204 }
2205
2206 // If this is a simple intrinsic (that is, we just need to add a def of
2207 // a vreg, and uses for each arg operand, then translate it.
2208 if (translateSimpleIntrinsic(CI, ID, MIRBuilder))
2209 return true;
2210
2211 switch (ID) {
2212 default:
2213 break;
2214 case Intrinsic::lifetime_start:
2215 case Intrinsic::lifetime_end: {
2216 // No stack colouring in O0, discard region information.
2217 if (MF->getTarget().getOptLevel() == CodeGenOptLevel::None ||
2218 MF->getFunction().hasOptNone())
2219 return true;
2220
2221 unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START
2222 : TargetOpcode::LIFETIME_END;
2223
2224 const AllocaInst *AI = dyn_cast<AllocaInst>(Val: CI.getArgOperand(i: 0));
2225 if (!AI || !AI->isStaticAlloca())
2226 return true;
2227
2228 MIRBuilder.buildInstr(Opcode: Op).addFrameIndex(Idx: getOrCreateFrameIndex(AI: *AI));
2229 return true;
2230 }
2231 case Intrinsic::fake_use: {
2232 SmallVector<llvm::SrcOp, 4> VRegs;
2233 for (const auto &Arg : CI.args())
2234 llvm::append_range(C&: VRegs, R: getOrCreateVRegs(Val: *Arg));
2235 MIRBuilder.buildInstr(Opc: TargetOpcode::FAKE_USE, DstOps: {}, SrcOps: VRegs);
2236 MF->setHasFakeUses(true);
2237 return true;
2238 }
2239 case Intrinsic::dbg_declare: {
2240 const DbgDeclareInst &DI = cast<DbgDeclareInst>(Val: CI);
2241 assert(DI.getVariable() && "Missing variable");
2242 translateDbgDeclareRecord(Address: DI.getAddress(), HasArgList: DI.hasArgList(), Variable: DI.getVariable(),
2243 Expression: DI.getExpression(), DL: DI.getDebugLoc(), MIRBuilder);
2244 return true;
2245 }
2246 case Intrinsic::dbg_label: {
2247 const DbgLabelInst &DI = cast<DbgLabelInst>(Val: CI);
2248 assert(DI.getLabel() && "Missing label");
2249
2250 assert(DI.getLabel()->isValidLocationForIntrinsic(
2251 MIRBuilder.getDebugLoc()) &&
2252 "Expected inlined-at fields to agree");
2253
2254 MIRBuilder.buildDbgLabel(Label: DI.getLabel());
2255 return true;
2256 }
2257 case Intrinsic::vaend:
2258 // No target I know of cares about va_end. Certainly no in-tree target
2259 // does. Simplest intrinsic ever!
2260 return true;
2261 case Intrinsic::vastart: {
2262 Value *Ptr = CI.getArgOperand(i: 0);
2263 unsigned ListSize = TLI->getVaListSizeInBits(DL: *DL) / 8;
2264 Align Alignment = getKnownAlignment(V: Ptr, DL: *DL);
2265
2266 MIRBuilder.buildInstr(Opc: TargetOpcode::G_VASTART, DstOps: {}, SrcOps: {getOrCreateVReg(Val: *Ptr)})
2267 .addMemOperand(MMO: MF->getMachineMemOperand(PtrInfo: MachinePointerInfo(Ptr),
2268 F: MachineMemOperand::MOStore,
2269 Size: ListSize, BaseAlignment: Alignment));
2270 return true;
2271 }
2272 case Intrinsic::dbg_assign:
2273 // A dbg.assign is a dbg.value with more information about stack locations,
2274 // typically produced during optimisation of variables with leaked
2275 // addresses. We can treat it like a normal dbg_value intrinsic here; to
2276 // benefit from the full analysis of stack/SSA locations, GlobalISel would
2277 // need to register for and use the AssignmentTrackingAnalysis pass.
2278 [[fallthrough]];
2279 case Intrinsic::dbg_value: {
2280 // This form of DBG_VALUE is target-independent.
2281 const DbgValueInst &DI = cast<DbgValueInst>(Val: CI);
2282 translateDbgValueRecord(V: DI.getValue(), HasArgList: DI.hasArgList(), Variable: DI.getVariable(),
2283 Expression: DI.getExpression(), DL: DI.getDebugLoc(), MIRBuilder);
2284 return true;
2285 }
2286 case Intrinsic::uadd_with_overflow:
2287 return translateOverflowIntrinsic(CI, Op: TargetOpcode::G_UADDO, MIRBuilder);
2288 case Intrinsic::sadd_with_overflow:
2289 return translateOverflowIntrinsic(CI, Op: TargetOpcode::G_SADDO, MIRBuilder);
2290 case Intrinsic::usub_with_overflow:
2291 return translateOverflowIntrinsic(CI, Op: TargetOpcode::G_USUBO, MIRBuilder);
2292 case Intrinsic::ssub_with_overflow:
2293 return translateOverflowIntrinsic(CI, Op: TargetOpcode::G_SSUBO, MIRBuilder);
2294 case Intrinsic::umul_with_overflow:
2295 return translateOverflowIntrinsic(CI, Op: TargetOpcode::G_UMULO, MIRBuilder);
2296 case Intrinsic::smul_with_overflow:
2297 return translateOverflowIntrinsic(CI, Op: TargetOpcode::G_SMULO, MIRBuilder);
2298 case Intrinsic::uadd_sat:
2299 return translateBinaryOp(Opcode: TargetOpcode::G_UADDSAT, U: CI, MIRBuilder);
2300 case Intrinsic::sadd_sat:
2301 return translateBinaryOp(Opcode: TargetOpcode::G_SADDSAT, U: CI, MIRBuilder);
2302 case Intrinsic::usub_sat:
2303 return translateBinaryOp(Opcode: TargetOpcode::G_USUBSAT, U: CI, MIRBuilder);
2304 case Intrinsic::ssub_sat:
2305 return translateBinaryOp(Opcode: TargetOpcode::G_SSUBSAT, U: CI, MIRBuilder);
2306 case Intrinsic::ushl_sat:
2307 return translateBinaryOp(Opcode: TargetOpcode::G_USHLSAT, U: CI, MIRBuilder);
2308 case Intrinsic::sshl_sat:
2309 return translateBinaryOp(Opcode: TargetOpcode::G_SSHLSAT, U: CI, MIRBuilder);
2310 case Intrinsic::umin:
2311 return translateBinaryOp(Opcode: TargetOpcode::G_UMIN, U: CI, MIRBuilder);
2312 case Intrinsic::umax:
2313 return translateBinaryOp(Opcode: TargetOpcode::G_UMAX, U: CI, MIRBuilder);
2314 case Intrinsic::smin:
2315 return translateBinaryOp(Opcode: TargetOpcode::G_SMIN, U: CI, MIRBuilder);
2316 case Intrinsic::smax:
2317 return translateBinaryOp(Opcode: TargetOpcode::G_SMAX, U: CI, MIRBuilder);
2318 case Intrinsic::abs:
2319 // TODO: Preserve "int min is poison" arg in GMIR?
2320 return translateUnaryOp(Opcode: TargetOpcode::G_ABS, U: CI, MIRBuilder);
2321 case Intrinsic::smul_fix:
2322 return translateFixedPointIntrinsic(Op: TargetOpcode::G_SMULFIX, CI, MIRBuilder);
2323 case Intrinsic::umul_fix:
2324 return translateFixedPointIntrinsic(Op: TargetOpcode::G_UMULFIX, CI, MIRBuilder);
2325 case Intrinsic::smul_fix_sat:
2326 return translateFixedPointIntrinsic(Op: TargetOpcode::G_SMULFIXSAT, CI, MIRBuilder);
2327 case Intrinsic::umul_fix_sat:
2328 return translateFixedPointIntrinsic(Op: TargetOpcode::G_UMULFIXSAT, CI, MIRBuilder);
2329 case Intrinsic::sdiv_fix:
2330 return translateFixedPointIntrinsic(Op: TargetOpcode::G_SDIVFIX, CI, MIRBuilder);
2331 case Intrinsic::udiv_fix:
2332 return translateFixedPointIntrinsic(Op: TargetOpcode::G_UDIVFIX, CI, MIRBuilder);
2333 case Intrinsic::sdiv_fix_sat:
2334 return translateFixedPointIntrinsic(Op: TargetOpcode::G_SDIVFIXSAT, CI, MIRBuilder);
2335 case Intrinsic::udiv_fix_sat:
2336 return translateFixedPointIntrinsic(Op: TargetOpcode::G_UDIVFIXSAT, CI, MIRBuilder);
2337 case Intrinsic::fmuladd: {
2338 const TargetMachine &TM = MF->getTarget();
2339 Register Dst = getOrCreateVReg(Val: CI);
2340 Register Op0 = getOrCreateVReg(Val: *CI.getArgOperand(i: 0));
2341 Register Op1 = getOrCreateVReg(Val: *CI.getArgOperand(i: 1));
2342 Register Op2 = getOrCreateVReg(Val: *CI.getArgOperand(i: 2));
2343 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
2344 TLI->isFMAFasterThanFMulAndFAdd(MF: *MF,
2345 TLI->getValueType(DL: *DL, Ty: CI.getType()))) {
2346 // TODO: Revisit this to see if we should move this part of the
2347 // lowering to the combiner.
2348 MIRBuilder.buildFMA(Dst, Src0: Op0, Src1: Op1, Src2: Op2,
2349 Flags: MachineInstr::copyFlagsFromInstruction(I: CI));
2350 } else {
2351 LLT Ty = getLLTForType(Ty&: *CI.getType(), DL: *DL);
2352 auto FMul = MIRBuilder.buildFMul(
2353 Dst: Ty, Src0: Op0, Src1: Op1, Flags: MachineInstr::copyFlagsFromInstruction(I: CI));
2354 MIRBuilder.buildFAdd(Dst, Src0: FMul, Src1: Op2,
2355 Flags: MachineInstr::copyFlagsFromInstruction(I: CI));
2356 }
2357 return true;
2358 }
2359 case Intrinsic::frexp: {
2360 ArrayRef<Register> VRegs = getOrCreateVRegs(Val: CI);
2361 MIRBuilder.buildFFrexp(Fract: VRegs[0], Exp: VRegs[1],
2362 Src: getOrCreateVReg(Val: *CI.getArgOperand(i: 0)),
2363 Flags: MachineInstr::copyFlagsFromInstruction(I: CI));
2364 return true;
2365 }
2366 case Intrinsic::modf: {
2367 ArrayRef<Register> VRegs = getOrCreateVRegs(Val: CI);
2368 MIRBuilder.buildModf(Fract: VRegs[0], Int: VRegs[1],
2369 Src: getOrCreateVReg(Val: *CI.getArgOperand(i: 0)),
2370 Flags: MachineInstr::copyFlagsFromInstruction(I: CI));
2371 return true;
2372 }
2373 case Intrinsic::sincos: {
2374 ArrayRef<Register> VRegs = getOrCreateVRegs(Val: CI);
2375 MIRBuilder.buildFSincos(Sin: VRegs[0], Cos: VRegs[1],
2376 Src: getOrCreateVReg(Val: *CI.getArgOperand(i: 0)),
2377 Flags: MachineInstr::copyFlagsFromInstruction(I: CI));
2378 return true;
2379 }
2380 case Intrinsic::fptosi_sat:
2381 MIRBuilder.buildFPTOSI_SAT(Dst: getOrCreateVReg(Val: CI),
2382 Src0: getOrCreateVReg(Val: *CI.getArgOperand(i: 0)));
2383 return true;
2384 case Intrinsic::fptoui_sat:
2385 MIRBuilder.buildFPTOUI_SAT(Dst: getOrCreateVReg(Val: CI),
2386 Src0: getOrCreateVReg(Val: *CI.getArgOperand(i: 0)));
2387 return true;
2388 case Intrinsic::memcpy_inline:
2389 return translateMemFunc(CI, MIRBuilder, Opcode: TargetOpcode::G_MEMCPY_INLINE);
2390 case Intrinsic::memcpy:
2391 return translateMemFunc(CI, MIRBuilder, Opcode: TargetOpcode::G_MEMCPY);
2392 case Intrinsic::memmove:
2393 return translateMemFunc(CI, MIRBuilder, Opcode: TargetOpcode::G_MEMMOVE);
2394 case Intrinsic::memset:
2395 return translateMemFunc(CI, MIRBuilder, Opcode: TargetOpcode::G_MEMSET);
2396 case Intrinsic::eh_typeid_for: {
2397 GlobalValue *GV = ExtractTypeInfo(V: CI.getArgOperand(i: 0));
2398 Register Reg = getOrCreateVReg(Val: CI);
2399 unsigned TypeID = MF->getTypeIDFor(TI: GV);
2400 MIRBuilder.buildConstant(Res: Reg, Val: TypeID);
2401 return true;
2402 }
2403 case Intrinsic::objectsize:
2404 llvm_unreachable("llvm.objectsize.* should have been lowered already");
2405
2406 case Intrinsic::is_constant:
2407 llvm_unreachable("llvm.is.constant.* should have been lowered already");
2408
2409 case Intrinsic::stackguard:
2410 getStackGuard(DstReg: getOrCreateVReg(Val: CI), MIRBuilder);
2411 return true;
2412 case Intrinsic::stackprotector: {
2413 LLT PtrTy = getLLTForType(Ty&: *CI.getArgOperand(i: 0)->getType(), DL: *DL);
2414 Register GuardVal;
2415 if (TLI->useLoadStackGuardNode(M: *CI.getModule())) {
2416 GuardVal = MRI->createGenericVirtualRegister(Ty: PtrTy);
2417 getStackGuard(DstReg: GuardVal, MIRBuilder);
2418 } else
2419 GuardVal = getOrCreateVReg(Val: *CI.getArgOperand(i: 0)); // The guard's value.
2420
2421 AllocaInst *Slot = cast<AllocaInst>(Val: CI.getArgOperand(i: 1));
2422 int FI = getOrCreateFrameIndex(AI: *Slot);
2423 MF->getFrameInfo().setStackProtectorIndex(FI);
2424
2425 MIRBuilder.buildStore(
2426 Val: GuardVal, Addr: getOrCreateVReg(Val: *Slot),
2427 MMO&: *MF->getMachineMemOperand(PtrInfo: MachinePointerInfo::getFixedStack(MF&: *MF, FI),
2428 f: MachineMemOperand::MOStore |
2429 MachineMemOperand::MOVolatile,
2430 MemTy: PtrTy, base_alignment: Align(8)));
2431 return true;
2432 }
2433 case Intrinsic::stacksave: {
2434 MIRBuilder.buildInstr(Opc: TargetOpcode::G_STACKSAVE, DstOps: {getOrCreateVReg(Val: CI)}, SrcOps: {});
2435 return true;
2436 }
2437 case Intrinsic::stackrestore: {
2438 MIRBuilder.buildInstr(Opc: TargetOpcode::G_STACKRESTORE, DstOps: {},
2439 SrcOps: {getOrCreateVReg(Val: *CI.getArgOperand(i: 0))});
2440 return true;
2441 }
2442 case Intrinsic::cttz:
2443 case Intrinsic::ctlz: {
2444 ConstantInt *Cst = cast<ConstantInt>(Val: CI.getArgOperand(i: 1));
2445 bool isTrailing = ID == Intrinsic::cttz;
2446 unsigned Opcode = isTrailing
2447 ? Cst->isZero() ? TargetOpcode::G_CTTZ
2448 : TargetOpcode::G_CTTZ_ZERO_UNDEF
2449 : Cst->isZero() ? TargetOpcode::G_CTLZ
2450 : TargetOpcode::G_CTLZ_ZERO_UNDEF;
2451 MIRBuilder.buildInstr(Opc: Opcode, DstOps: {getOrCreateVReg(Val: CI)},
2452 SrcOps: {getOrCreateVReg(Val: *CI.getArgOperand(i: 0))});
2453 return true;
2454 }
2455 case Intrinsic::invariant_start: {
2456 MIRBuilder.buildUndef(Res: getOrCreateVReg(Val: CI));
2457 return true;
2458 }
2459 case Intrinsic::invariant_end:
2460 return true;
2461 case Intrinsic::expect:
2462 case Intrinsic::expect_with_probability:
2463 case Intrinsic::annotation:
2464 case Intrinsic::ptr_annotation:
2465 case Intrinsic::launder_invariant_group:
2466 case Intrinsic::strip_invariant_group: {
2467 // Drop the intrinsic, but forward the value.
2468 MIRBuilder.buildCopy(Res: getOrCreateVReg(Val: CI),
2469 Op: getOrCreateVReg(Val: *CI.getArgOperand(i: 0)));
2470 return true;
2471 }
2472 case Intrinsic::assume:
2473 case Intrinsic::experimental_noalias_scope_decl:
2474 case Intrinsic::var_annotation:
2475 case Intrinsic::sideeffect:
2476 // Discard annotate attributes, assumptions, and artificial side-effects.
2477 return true;
2478 case Intrinsic::read_volatile_register:
2479 case Intrinsic::read_register: {
2480 Value *Arg = CI.getArgOperand(i: 0);
2481 MIRBuilder
2482 .buildInstr(Opc: TargetOpcode::G_READ_REGISTER, DstOps: {getOrCreateVReg(Val: CI)}, SrcOps: {})
2483 .addMetadata(MD: cast<MDNode>(Val: cast<MetadataAsValue>(Val: Arg)->getMetadata()));
2484 return true;
2485 }
2486 case Intrinsic::write_register: {
2487 Value *Arg = CI.getArgOperand(i: 0);
2488 MIRBuilder.buildInstr(Opcode: TargetOpcode::G_WRITE_REGISTER)
2489 .addMetadata(MD: cast<MDNode>(Val: cast<MetadataAsValue>(Val: Arg)->getMetadata()))
2490 .addUse(RegNo: getOrCreateVReg(Val: *CI.getArgOperand(i: 1)));
2491 return true;
2492 }
2493 case Intrinsic::localescape: {
2494 MachineBasicBlock &EntryMBB = MF->front();
2495 StringRef EscapedName = GlobalValue::dropLLVMManglingEscape(Name: MF->getName());
2496
2497 // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
2498 // is the same on all targets.
2499 for (unsigned Idx = 0, E = CI.arg_size(); Idx < E; ++Idx) {
2500 Value *Arg = CI.getArgOperand(i: Idx)->stripPointerCasts();
2501 if (isa<ConstantPointerNull>(Val: Arg))
2502 continue; // Skip null pointers. They represent a hole in index space.
2503
2504 int FI = getOrCreateFrameIndex(AI: *cast<AllocaInst>(Val: Arg));
2505 MCSymbol *FrameAllocSym =
2506 MF->getContext().getOrCreateFrameAllocSymbol(FuncName: EscapedName, Idx);
2507
2508 // This should be inserted at the start of the entry block.
2509 auto LocalEscape =
2510 MIRBuilder.buildInstrNoInsert(Opcode: TargetOpcode::LOCAL_ESCAPE)
2511 .addSym(Sym: FrameAllocSym)
2512 .addFrameIndex(Idx: FI);
2513
2514 EntryMBB.insert(I: EntryMBB.begin(), MI: LocalEscape);
2515 }
2516
2517 return true;
2518 }
2519 case Intrinsic::vector_reduce_fadd:
2520 case Intrinsic::vector_reduce_fmul: {
2521 // Need to check for the reassoc flag to decide whether we want a
2522 // sequential reduction opcode or not.
2523 Register Dst = getOrCreateVReg(Val: CI);
2524 Register ScalarSrc = getOrCreateVReg(Val: *CI.getArgOperand(i: 0));
2525 Register VecSrc = getOrCreateVReg(Val: *CI.getArgOperand(i: 1));
2526 unsigned Opc = 0;
2527 if (!CI.hasAllowReassoc()) {
2528 // The sequential ordering case.
2529 Opc = ID == Intrinsic::vector_reduce_fadd
2530 ? TargetOpcode::G_VECREDUCE_SEQ_FADD
2531 : TargetOpcode::G_VECREDUCE_SEQ_FMUL;
2532 if (!MRI->getType(Reg: VecSrc).isVector())
2533 Opc = ID == Intrinsic::vector_reduce_fadd ? TargetOpcode::G_FADD
2534 : TargetOpcode::G_FMUL;
2535 MIRBuilder.buildInstr(Opc, DstOps: {Dst}, SrcOps: {ScalarSrc, VecSrc},
2536 Flags: MachineInstr::copyFlagsFromInstruction(I: CI));
2537 return true;
2538 }
2539 // We split the operation into a separate G_FADD/G_FMUL + the reduce,
2540 // since the associativity doesn't matter.
2541 unsigned ScalarOpc;
2542 if (ID == Intrinsic::vector_reduce_fadd) {
2543 Opc = TargetOpcode::G_VECREDUCE_FADD;
2544 ScalarOpc = TargetOpcode::G_FADD;
2545 } else {
2546 Opc = TargetOpcode::G_VECREDUCE_FMUL;
2547 ScalarOpc = TargetOpcode::G_FMUL;
2548 }
2549 LLT DstTy = MRI->getType(Reg: Dst);
2550 auto Rdx = MIRBuilder.buildInstr(
2551 Opc, DstOps: {DstTy}, SrcOps: {VecSrc}, Flags: MachineInstr::copyFlagsFromInstruction(I: CI));
2552 MIRBuilder.buildInstr(Opc: ScalarOpc, DstOps: {Dst}, SrcOps: {ScalarSrc, Rdx},
2553 Flags: MachineInstr::copyFlagsFromInstruction(I: CI));
2554
2555 return true;
2556 }
2557 case Intrinsic::trap:
2558 return translateTrap(CI, MIRBuilder, Opcode: TargetOpcode::G_TRAP);
2559 case Intrinsic::debugtrap:
2560 return translateTrap(CI, MIRBuilder, Opcode: TargetOpcode::G_DEBUGTRAP);
2561 case Intrinsic::ubsantrap:
2562 return translateTrap(CI, MIRBuilder, Opcode: TargetOpcode::G_UBSANTRAP);
2563 case Intrinsic::allow_runtime_check:
2564 case Intrinsic::allow_ubsan_check:
2565 MIRBuilder.buildCopy(Res: getOrCreateVReg(Val: CI),
2566 Op: getOrCreateVReg(Val: *ConstantInt::getTrue(Ty: CI.getType())));
2567 return true;
2568 case Intrinsic::amdgcn_cs_chain:
2569 case Intrinsic::amdgcn_call_whole_wave:
2570 return translateCallBase(CB: CI, MIRBuilder);
2571 case Intrinsic::fptrunc_round: {
2572 uint32_t Flags = MachineInstr::copyFlagsFromInstruction(I: CI);
2573
2574 // Convert the metadata argument to a constant integer
2575 Metadata *MD = cast<MetadataAsValue>(Val: CI.getArgOperand(i: 1))->getMetadata();
2576 std::optional<RoundingMode> RoundMode =
2577 convertStrToRoundingMode(cast<MDString>(Val: MD)->getString());
2578
2579 // Add the Rounding mode as an integer
2580 MIRBuilder
2581 .buildInstr(Opc: TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND,
2582 DstOps: {getOrCreateVReg(Val: CI)},
2583 SrcOps: {getOrCreateVReg(Val: *CI.getArgOperand(i: 0))}, Flags)
2584 .addImm(Val: (int)*RoundMode);
2585
2586 return true;
2587 }
2588 case Intrinsic::is_fpclass: {
2589 Value *FpValue = CI.getOperand(i_nocapture: 0);
2590 ConstantInt *TestMaskValue = cast<ConstantInt>(Val: CI.getOperand(i_nocapture: 1));
2591
2592 MIRBuilder
2593 .buildInstr(Opc: TargetOpcode::G_IS_FPCLASS, DstOps: {getOrCreateVReg(Val: CI)},
2594 SrcOps: {getOrCreateVReg(Val: *FpValue)})
2595 .addImm(Val: TestMaskValue->getZExtValue());
2596
2597 return true;
2598 }
2599 case Intrinsic::set_fpenv: {
2600 Value *FPEnv = CI.getOperand(i_nocapture: 0);
2601 MIRBuilder.buildSetFPEnv(Src: getOrCreateVReg(Val: *FPEnv));
2602 return true;
2603 }
2604 case Intrinsic::reset_fpenv:
2605 MIRBuilder.buildResetFPEnv();
2606 return true;
2607 case Intrinsic::set_fpmode: {
2608 Value *FPState = CI.getOperand(i_nocapture: 0);
2609 MIRBuilder.buildSetFPMode(Src: getOrCreateVReg(Val: *FPState));
2610 return true;
2611 }
2612 case Intrinsic::reset_fpmode:
2613 MIRBuilder.buildResetFPMode();
2614 return true;
2615 case Intrinsic::get_rounding:
2616 MIRBuilder.buildGetRounding(Dst: getOrCreateVReg(Val: CI));
2617 return true;
2618 case Intrinsic::set_rounding:
2619 MIRBuilder.buildSetRounding(Src: getOrCreateVReg(Val: *CI.getOperand(i_nocapture: 0)));
2620 return true;
2621 case Intrinsic::vscale: {
2622 MIRBuilder.buildVScale(Res: getOrCreateVReg(Val: CI), MinElts: 1);
2623 return true;
2624 }
2625 case Intrinsic::scmp:
2626 MIRBuilder.buildSCmp(Res: getOrCreateVReg(Val: CI),
2627 Op0: getOrCreateVReg(Val: *CI.getOperand(i_nocapture: 0)),
2628 Op1: getOrCreateVReg(Val: *CI.getOperand(i_nocapture: 1)));
2629 return true;
2630 case Intrinsic::ucmp:
2631 MIRBuilder.buildUCmp(Res: getOrCreateVReg(Val: CI),
2632 Op0: getOrCreateVReg(Val: *CI.getOperand(i_nocapture: 0)),
2633 Op1: getOrCreateVReg(Val: *CI.getOperand(i_nocapture: 1)));
2634 return true;
2635 case Intrinsic::vector_extract:
2636 return translateExtractVector(U: CI, MIRBuilder);
2637 case Intrinsic::vector_insert:
2638 return translateInsertVector(U: CI, MIRBuilder);
2639 case Intrinsic::stepvector: {
2640 MIRBuilder.buildStepVector(Res: getOrCreateVReg(Val: CI), Step: 1);
2641 return true;
2642 }
2643 case Intrinsic::prefetch: {
2644 Value *Addr = CI.getOperand(i_nocapture: 0);
2645 unsigned RW = cast<ConstantInt>(Val: CI.getOperand(i_nocapture: 1))->getZExtValue();
2646 unsigned Locality = cast<ConstantInt>(Val: CI.getOperand(i_nocapture: 2))->getZExtValue();
2647 unsigned CacheType = cast<ConstantInt>(Val: CI.getOperand(i_nocapture: 3))->getZExtValue();
2648
2649 auto Flags = RW ? MachineMemOperand::MOStore : MachineMemOperand::MOLoad;
2650 auto &MMO = *MF->getMachineMemOperand(PtrInfo: MachinePointerInfo(Addr), f: Flags,
2651 MemTy: LLT(), base_alignment: Align());
2652
2653 MIRBuilder.buildPrefetch(Addr: getOrCreateVReg(Val: *Addr), RW, Locality, CacheType,
2654 MMO);
2655
2656 return true;
2657 }
2658
2659 case Intrinsic::vector_interleave2:
2660 case Intrinsic::vector_deinterleave2: {
2661 // Both intrinsics have at least one operand.
2662 Value *Op0 = CI.getOperand(i_nocapture: 0);
2663 LLT ResTy = getLLTForType(Ty&: *Op0->getType(), DL: MIRBuilder.getDataLayout());
2664 if (!ResTy.isFixedVector())
2665 return false;
2666
2667 if (CI.getIntrinsicID() == Intrinsic::vector_interleave2)
2668 return translateVectorInterleave2Intrinsic(CI, MIRBuilder);
2669
2670 return translateVectorDeinterleave2Intrinsic(CI, MIRBuilder);
2671 }
2672
2673#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
2674 case Intrinsic::INTRINSIC:
2675#include "llvm/IR/ConstrainedOps.def"
2676 return translateConstrainedFPIntrinsic(FPI: cast<ConstrainedFPIntrinsic>(Val: CI),
2677 MIRBuilder);
2678 case Intrinsic::experimental_convergence_anchor:
2679 case Intrinsic::experimental_convergence_entry:
2680 case Intrinsic::experimental_convergence_loop:
2681 return translateConvergenceControlIntrinsic(CI, ID, MIRBuilder);
2682 case Intrinsic::reloc_none: {
2683 Metadata *MD = cast<MetadataAsValue>(Val: CI.getArgOperand(i: 0))->getMetadata();
2684 StringRef SymbolName = cast<MDString>(Val: MD)->getString();
2685 MIRBuilder.buildInstr(Opcode: TargetOpcode::RELOC_NONE)
2686 .addExternalSymbol(FnName: SymbolName.data());
2687 return true;
2688 }
2689 }
2690 return false;
2691}
2692
2693bool IRTranslator::translateInlineAsm(const CallBase &CB,
2694 MachineIRBuilder &MIRBuilder) {
2695 if (!mayTranslateUserTypes(U: CB))
2696 return false;
2697
2698 const InlineAsmLowering *ALI = MF->getSubtarget().getInlineAsmLowering();
2699
2700 if (!ALI) {
2701 LLVM_DEBUG(
2702 dbgs() << "Inline asm lowering is not supported for this target yet\n");
2703 return false;
2704 }
2705
2706 return ALI->lowerInlineAsm(
2707 MIRBuilder, CB, GetOrCreateVRegs: [&](const Value &Val) { return getOrCreateVRegs(Val); });
2708}
2709
2710bool IRTranslator::translateCallBase(const CallBase &CB,
2711 MachineIRBuilder &MIRBuilder) {
2712 ArrayRef<Register> Res = getOrCreateVRegs(Val: CB);
2713
2714 SmallVector<ArrayRef<Register>, 8> Args;
2715 Register SwiftInVReg = 0;
2716 Register SwiftErrorVReg = 0;
2717 for (const auto &Arg : CB.args()) {
2718 if (CLI->supportSwiftError() && isSwiftError(V: Arg)) {
2719 assert(SwiftInVReg == 0 && "Expected only one swift error argument");
2720 LLT Ty = getLLTForType(Ty&: *Arg->getType(), DL: *DL);
2721 SwiftInVReg = MRI->createGenericVirtualRegister(Ty);
2722 MIRBuilder.buildCopy(Res: SwiftInVReg, Op: SwiftError.getOrCreateVRegUseAt(
2723 &CB, &MIRBuilder.getMBB(), Arg));
2724 Args.emplace_back(Args: ArrayRef(SwiftInVReg));
2725 SwiftErrorVReg =
2726 SwiftError.getOrCreateVRegDefAt(&CB, &MIRBuilder.getMBB(), Arg);
2727 continue;
2728 }
2729 Args.push_back(Elt: getOrCreateVRegs(Val: *Arg));
2730 }
2731
2732 if (auto *CI = dyn_cast<CallInst>(Val: &CB)) {
2733 if (ORE->enabled()) {
2734 if (MemoryOpRemark::canHandle(I: CI, TLI: *LibInfo)) {
2735 MemoryOpRemark R(*ORE, "gisel-irtranslator-memsize", *DL, *LibInfo);
2736 R.visit(I: CI);
2737 }
2738 }
2739 }
2740
2741 std::optional<CallLowering::PtrAuthInfo> PAI;
2742 if (auto Bundle = CB.getOperandBundle(ID: LLVMContext::OB_ptrauth)) {
2743 // Functions should never be ptrauth-called directly.
2744 assert(!CB.getCalledFunction() && "invalid direct ptrauth call");
2745
2746 const Value *Key = Bundle->Inputs[0];
2747 const Value *Discriminator = Bundle->Inputs[1];
2748
2749 // Look through ptrauth constants to try to eliminate the matching bundle
2750 // and turn this into a direct call with no ptrauth.
2751 // CallLowering will use the raw pointer if it doesn't find the PAI.
2752 const auto *CalleeCPA = dyn_cast<ConstantPtrAuth>(Val: CB.getCalledOperand());
2753 if (!CalleeCPA || !isa<Function>(Val: CalleeCPA->getPointer()) ||
2754 !CalleeCPA->isKnownCompatibleWith(Key, Discriminator, DL: *DL)) {
2755 // If we can't make it direct, package the bundle into PAI.
2756 Register DiscReg = getOrCreateVReg(Val: *Discriminator);
2757 PAI = CallLowering::PtrAuthInfo{.Key: cast<ConstantInt>(Val: Key)->getZExtValue(),
2758 .Discriminator: DiscReg};
2759 }
2760 }
2761
2762 Register ConvergenceCtrlToken = 0;
2763 if (auto Bundle = CB.getOperandBundle(ID: LLVMContext::OB_convergencectrl)) {
2764 const auto &Token = *Bundle->Inputs[0].get();
2765 ConvergenceCtrlToken = getOrCreateConvergenceTokenVReg(Token);
2766 }
2767
2768 // We don't set HasCalls on MFI here yet because call lowering may decide to
2769 // optimize into tail calls. Instead, we defer that to selection where a final
2770 // scan is done to check if any instructions are calls.
2771 bool Success = CLI->lowerCall(
2772 MIRBuilder, Call: CB, ResRegs: Res, ArgRegs: Args, SwiftErrorVReg, PAI, ConvergenceCtrlToken,
2773 GetCalleeReg: [&]() { return getOrCreateVReg(Val: *CB.getCalledOperand()); });
2774
2775 // Check if we just inserted a tail call.
2776 if (Success) {
2777 assert(!HasTailCall && "Can't tail call return twice from block?");
2778 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
2779 HasTailCall = TII->isTailCall(Inst: *std::prev(x: MIRBuilder.getInsertPt()));
2780 }
2781
2782 return Success;
2783}
2784
2785bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
2786 if (!mayTranslateUserTypes(U))
2787 return false;
2788
2789 const CallInst &CI = cast<CallInst>(Val: U);
2790 const Function *F = CI.getCalledFunction();
2791
2792 // FIXME: support Windows dllimport function calls and calls through
2793 // weak symbols.
2794 if (F && (F->hasDLLImportStorageClass() ||
2795 (MF->getTarget().getTargetTriple().isOSWindows() &&
2796 F->hasExternalWeakLinkage())))
2797 return false;
2798
2799 // FIXME: support control flow guard targets.
2800 if (CI.countOperandBundlesOfType(ID: LLVMContext::OB_cfguardtarget))
2801 return false;
2802
2803 // FIXME: support statepoints and related.
2804 if (isa<GCStatepointInst, GCRelocateInst, GCResultInst>(Val: U))
2805 return false;
2806
2807 if (CI.isInlineAsm())
2808 return translateInlineAsm(CB: CI, MIRBuilder);
2809
2810 Intrinsic::ID ID = F ? F->getIntrinsicID() : Intrinsic::not_intrinsic;
2811 if (!F || ID == Intrinsic::not_intrinsic) {
2812 if (translateCallBase(CB: CI, MIRBuilder)) {
2813 diagnoseDontCall(CI);
2814 return true;
2815 }
2816 return false;
2817 }
2818
2819 assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
2820
2821 if (translateKnownIntrinsic(CI, ID, MIRBuilder))
2822 return true;
2823
2824 SmallVector<TargetLowering::IntrinsicInfo> Infos;
2825 TLI->getTgtMemIntrinsic(Infos, I: CI, MF&: *MF, Intrinsic: ID);
2826
2827 return translateIntrinsic(CB: CI, ID, MIRBuilder, TgtMemIntrinsicInfos: Infos);
2828}
2829
2830/// Translate a call or callbr to an intrinsic.
2831bool IRTranslator::translateIntrinsic(
2832 const CallBase &CB, Intrinsic::ID ID, MachineIRBuilder &MIRBuilder,
2833 ArrayRef<TargetLowering::IntrinsicInfo> TgtMemIntrinsicInfos) {
2834 ArrayRef<Register> ResultRegs;
2835 if (!CB.getType()->isVoidTy())
2836 ResultRegs = getOrCreateVRegs(Val: CB);
2837
2838 // Ignore the callsite attributes. Backend code is most likely not expecting
2839 // an intrinsic to sometimes have side effects and sometimes not.
2840 MachineInstrBuilder MIB = MIRBuilder.buildIntrinsic(ID, Res: ResultRegs);
2841 if (isa<FPMathOperator>(Val: CB))
2842 MIB->copyIRFlags(I: CB);
2843
2844 for (const auto &Arg : enumerate(First: CB.args())) {
2845 // If this is required to be an immediate, don't materialize it in a
2846 // register.
2847 if (CB.paramHasAttr(ArgNo: Arg.index(), Kind: Attribute::ImmArg)) {
2848 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val: Arg.value())) {
2849 // imm arguments are more convenient than cimm (and realistically
2850 // probably sufficient), so use them.
2851 assert(CI->getBitWidth() <= 64 &&
2852 "large intrinsic immediates not handled");
2853 MIB.addImm(Val: CI->getSExtValue());
2854 } else {
2855 MIB.addFPImm(Val: cast<ConstantFP>(Val: Arg.value()));
2856 }
2857 } else if (auto *MDVal = dyn_cast<MetadataAsValue>(Val: Arg.value())) {
2858 auto *MD = MDVal->getMetadata();
2859 auto *MDN = dyn_cast<MDNode>(Val: MD);
2860 if (!MDN) {
2861 if (auto *ConstMD = dyn_cast<ConstantAsMetadata>(Val: MD))
2862 MDN = MDNode::get(Context&: MF->getFunction().getContext(), MDs: ConstMD);
2863 else // This was probably an MDString.
2864 return false;
2865 }
2866 MIB.addMetadata(MD: MDN);
2867 } else {
2868 ArrayRef<Register> VRegs = getOrCreateVRegs(Val: *Arg.value());
2869 if (VRegs.size() > 1)
2870 return false;
2871 MIB.addUse(RegNo: VRegs[0]);
2872 }
2873 }
2874
2875 // Add MachineMemOperands for each memory access described by the target.
2876 for (const auto &Info : TgtMemIntrinsicInfos) {
2877 Align Alignment = Info.align.value_or(
2878 u: DL->getABITypeAlign(Ty: Info.memVT.getTypeForEVT(Context&: CB.getContext())));
2879 LLT MemTy = Info.memVT.isSimple()
2880 ? getLLTForMVT(Ty: Info.memVT.getSimpleVT())
2881 : LLT::scalar(SizeInBits: Info.memVT.getStoreSizeInBits());
2882
2883 // TODO: We currently just fallback to address space 0 if
2884 // getTgtMemIntrinsic didn't yield anything useful.
2885 MachinePointerInfo MPI;
2886 if (Info.ptrVal) {
2887 MPI = MachinePointerInfo(Info.ptrVal, Info.offset);
2888 } else if (Info.fallbackAddressSpace) {
2889 MPI = MachinePointerInfo(*Info.fallbackAddressSpace);
2890 }
2891 MIB.addMemOperand(MMO: MF->getMachineMemOperand(
2892 PtrInfo: MPI, f: Info.flags, MemTy, base_alignment: Alignment, AAInfo: CB.getAAMetadata(),
2893 /*Ranges=*/nullptr, SSID: Info.ssid, Ordering: Info.order, FailureOrdering: Info.failureOrder));
2894 }
2895
2896 if (CB.isConvergent()) {
2897 if (auto Bundle = CB.getOperandBundle(ID: LLVMContext::OB_convergencectrl)) {
2898 auto *Token = Bundle->Inputs[0].get();
2899 Register TokenReg = getOrCreateVReg(Val: *Token);
2900 MIB.addUse(RegNo: TokenReg, Flags: RegState::Implicit);
2901 }
2902 }
2903
2904 if (auto Bundle = CB.getOperandBundle(ID: LLVMContext::OB_deactivation_symbol))
2905 MIB->setDeactivationSymbol(MF&: *MF, DS: Bundle->Inputs[0].get());
2906
2907 return true;
2908}
2909
2910bool IRTranslator::findUnwindDestinations(
2911 const BasicBlock *EHPadBB,
2912 BranchProbability Prob,
2913 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
2914 &UnwindDests) {
2915 EHPersonality Personality = classifyEHPersonality(
2916 Pers: EHPadBB->getParent()->getFunction().getPersonalityFn());
2917 bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
2918 bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
2919 bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX;
2920 bool IsSEH = isAsynchronousEHPersonality(Pers: Personality);
2921
2922 if (IsWasmCXX) {
2923 // Ignore this for now.
2924 return false;
2925 }
2926
2927 while (EHPadBB) {
2928 BasicBlock::const_iterator Pad = EHPadBB->getFirstNonPHIIt();
2929 BasicBlock *NewEHPadBB = nullptr;
2930 if (isa<LandingPadInst>(Val: Pad)) {
2931 // Stop on landingpads. They are not funclets.
2932 UnwindDests.emplace_back(Args: &getMBB(BB: *EHPadBB), Args&: Prob);
2933 break;
2934 }
2935 if (isa<CleanupPadInst>(Val: Pad)) {
2936 // Stop on cleanup pads. Cleanups are always funclet entries for all known
2937 // personalities.
2938 UnwindDests.emplace_back(Args: &getMBB(BB: *EHPadBB), Args&: Prob);
2939 UnwindDests.back().first->setIsEHScopeEntry();
2940 UnwindDests.back().first->setIsEHFuncletEntry();
2941 break;
2942 }
2943 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Val&: Pad)) {
2944 // Add the catchpad handlers to the possible destinations.
2945 for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2946 UnwindDests.emplace_back(Args: &getMBB(BB: *CatchPadBB), Args&: Prob);
2947 // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
2948 if (IsMSVCCXX || IsCoreCLR)
2949 UnwindDests.back().first->setIsEHFuncletEntry();
2950 if (!IsSEH)
2951 UnwindDests.back().first->setIsEHScopeEntry();
2952 }
2953 NewEHPadBB = CatchSwitch->getUnwindDest();
2954 } else {
2955 continue;
2956 }
2957
2958 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2959 if (BPI && NewEHPadBB)
2960 Prob *= BPI->getEdgeProbability(Src: EHPadBB, Dst: NewEHPadBB);
2961 EHPadBB = NewEHPadBB;
2962 }
2963 return true;
2964}
2965
2966bool IRTranslator::translateInvoke(const User &U,
2967 MachineIRBuilder &MIRBuilder) {
2968 const InvokeInst &I = cast<InvokeInst>(Val: U);
2969 MCContext &Context = MF->getContext();
2970
2971 const BasicBlock *ReturnBB = I.getSuccessor(i: 0);
2972 const BasicBlock *EHPadBB = I.getSuccessor(i: 1);
2973
2974 const Function *Fn = I.getCalledFunction();
2975
2976 // FIXME: support invoking patchpoint and statepoint intrinsics.
2977 if (Fn && Fn->isIntrinsic())
2978 return false;
2979
2980 // FIXME: support whatever these are.
2981 if (I.hasDeoptState())
2982 return false;
2983
2984 // FIXME: support control flow guard targets.
2985 if (I.countOperandBundlesOfType(ID: LLVMContext::OB_cfguardtarget))
2986 return false;
2987
2988 // FIXME: support Windows exception handling.
2989 if (!isa<LandingPadInst>(Val: EHPadBB->getFirstNonPHIIt()))
2990 return false;
2991
2992 // FIXME: support Windows dllimport function calls and calls through
2993 // weak symbols.
2994 if (Fn && (Fn->hasDLLImportStorageClass() ||
2995 (MF->getTarget().getTargetTriple().isOSWindows() &&
2996 Fn->hasExternalWeakLinkage())))
2997 return false;
2998
2999 bool LowerInlineAsm = I.isInlineAsm();
3000 bool NeedEHLabel = true;
3001
3002 // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
3003 // the region covered by the try.
3004 MCSymbol *BeginSymbol = nullptr;
3005 if (NeedEHLabel) {
3006 MIRBuilder.buildInstr(Opcode: TargetOpcode::G_INVOKE_REGION_START);
3007 BeginSymbol = Context.createTempSymbol();
3008 MIRBuilder.buildInstr(Opcode: TargetOpcode::EH_LABEL).addSym(Sym: BeginSymbol);
3009 }
3010
3011 if (LowerInlineAsm) {
3012 if (!translateInlineAsm(CB: I, MIRBuilder))
3013 return false;
3014 } else if (!translateCallBase(CB: I, MIRBuilder))
3015 return false;
3016
3017 MCSymbol *EndSymbol = nullptr;
3018 if (NeedEHLabel) {
3019 EndSymbol = Context.createTempSymbol();
3020 MIRBuilder.buildInstr(Opcode: TargetOpcode::EH_LABEL).addSym(Sym: EndSymbol);
3021 }
3022
3023 SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
3024 BranchProbabilityInfo *BPI = FuncInfo.BPI;
3025 MachineBasicBlock *InvokeMBB = &MIRBuilder.getMBB();
3026 BranchProbability EHPadBBProb =
3027 BPI ? BPI->getEdgeProbability(Src: InvokeMBB->getBasicBlock(), Dst: EHPadBB)
3028 : BranchProbability::getZero();
3029
3030 if (!findUnwindDestinations(EHPadBB, Prob: EHPadBBProb, UnwindDests))
3031 return false;
3032
3033 MachineBasicBlock &EHPadMBB = getMBB(BB: *EHPadBB),
3034 &ReturnMBB = getMBB(BB: *ReturnBB);
3035 // Update successor info.
3036 addSuccessorWithProb(Src: InvokeMBB, Dst: &ReturnMBB);
3037 for (auto &UnwindDest : UnwindDests) {
3038 UnwindDest.first->setIsEHPad();
3039 addSuccessorWithProb(Src: InvokeMBB, Dst: UnwindDest.first, Prob: UnwindDest.second);
3040 }
3041 InvokeMBB->normalizeSuccProbs();
3042
3043 if (NeedEHLabel) {
3044 assert(BeginSymbol && "Expected a begin symbol!");
3045 assert(EndSymbol && "Expected an end symbol!");
3046 MF->addInvoke(LandingPad: &EHPadMBB, BeginLabel: BeginSymbol, EndLabel: EndSymbol);
3047 }
3048
3049 MIRBuilder.buildBr(Dest&: ReturnMBB);
3050 return true;
3051}
3052
3053/// The intrinsics currently supported by callbr are implicit control flow
3054/// intrinsics such as amdgcn.kill.
3055bool IRTranslator::translateCallBr(const User &U,
3056 MachineIRBuilder &MIRBuilder) {
3057 if (!mayTranslateUserTypes(U))
3058 return false; // see translateCall
3059
3060 const CallBrInst &I = cast<CallBrInst>(Val: U);
3061 MachineBasicBlock *CallBrMBB = &MIRBuilder.getMBB();
3062
3063 Intrinsic::ID IID = I.getIntrinsicID();
3064 if (I.isInlineAsm()) {
3065 // FIXME: inline asm is not yet supported for callbr in GlobalISel. As soon
3066 // as we add support, we need to handle the indirect asm targets, see
3067 // SelectionDAGBuilder::visitCallBr().
3068 return false;
3069 }
3070 if (!translateIntrinsic(CB: I, ID: IID, MIRBuilder))
3071 return false;
3072
3073 // Retrieve successors.
3074 SmallPtrSet<BasicBlock *, 8> Dests = {I.getDefaultDest()};
3075 MachineBasicBlock *Return = &getMBB(BB: *I.getDefaultDest());
3076
3077 // Update successor info.
3078 addSuccessorWithProb(Src: CallBrMBB, Dst: Return, Prob: BranchProbability::getOne());
3079
3080 // Add indirect targets as successors. For intrinsic callbr, these represent
3081 // implicit control flow (e.g., the "kill" path for amdgcn.kill). We mark them
3082 // with setIsInlineAsmBrIndirectTarget so the machine verifier accepts them as
3083 // valid successors, even though they're not from inline asm.
3084 for (BasicBlock *Dest : I.getIndirectDests()) {
3085 MachineBasicBlock &Target = getMBB(BB: *Dest);
3086 Target.setIsInlineAsmBrIndirectTarget();
3087 Target.setLabelMustBeEmitted();
3088 // Don't add duplicate machine successors.
3089 if (Dests.insert(Ptr: Dest).second)
3090 addSuccessorWithProb(Src: CallBrMBB, Dst: &Target, Prob: BranchProbability::getZero());
3091 }
3092
3093 CallBrMBB->normalizeSuccProbs();
3094
3095 // Drop into default successor.
3096 MIRBuilder.buildBr(Dest&: *Return);
3097
3098 return true;
3099}
3100
3101bool IRTranslator::translateLandingPad(const User &U,
3102 MachineIRBuilder &MIRBuilder) {
3103 const LandingPadInst &LP = cast<LandingPadInst>(Val: U);
3104
3105 MachineBasicBlock &MBB = MIRBuilder.getMBB();
3106
3107 MBB.setIsEHPad();
3108
3109 // If there aren't registers to copy the values into (e.g., during SjLj
3110 // exceptions), then don't bother.
3111 const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
3112 if (TLI->getExceptionPointerRegister(PersonalityFn) == 0 &&
3113 TLI->getExceptionSelectorRegister(PersonalityFn) == 0)
3114 return true;
3115
3116 // If landingpad's return type is token type, we don't create DAG nodes
3117 // for its exception pointer and selector value. The extraction of exception
3118 // pointer or selector value from token type landingpads is not currently
3119 // supported.
3120 if (LP.getType()->isTokenTy())
3121 return true;
3122
3123 // Add a label to mark the beginning of the landing pad. Deletion of the
3124 // landing pad can thus be detected via the MachineModuleInfo.
3125 MIRBuilder.buildInstr(Opcode: TargetOpcode::EH_LABEL)
3126 .addSym(Sym: MF->addLandingPad(LandingPad: &MBB));
3127
3128 // If the unwinder does not preserve all registers, ensure that the
3129 // function marks the clobbered registers as used.
3130 const TargetRegisterInfo &TRI = *MF->getSubtarget().getRegisterInfo();
3131 if (auto *RegMask = TRI.getCustomEHPadPreservedMask(MF: *MF))
3132 MF->getRegInfo().addPhysRegsUsedFromRegMask(RegMask);
3133
3134 LLT Ty = getLLTForType(Ty&: *LP.getType(), DL: *DL);
3135 Register Undef = MRI->createGenericVirtualRegister(Ty);
3136 MIRBuilder.buildUndef(Res: Undef);
3137
3138 SmallVector<LLT, 2> Tys;
3139 for (Type *Ty : cast<StructType>(Val: LP.getType())->elements())
3140 Tys.push_back(Elt: getLLTForType(Ty&: *Ty, DL: *DL));
3141 assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
3142
3143 // Mark exception register as live in.
3144 Register ExceptionReg = TLI->getExceptionPointerRegister(PersonalityFn);
3145 if (!ExceptionReg)
3146 return false;
3147
3148 MBB.addLiveIn(PhysReg: ExceptionReg);
3149 ArrayRef<Register> ResRegs = getOrCreateVRegs(Val: LP);
3150 MIRBuilder.buildCopy(Res: ResRegs[0], Op: ExceptionReg);
3151
3152 Register SelectorReg = TLI->getExceptionSelectorRegister(PersonalityFn);
3153 if (!SelectorReg)
3154 return false;
3155
3156 MBB.addLiveIn(PhysReg: SelectorReg);
3157 Register PtrVReg = MRI->createGenericVirtualRegister(Ty: Tys[0]);
3158 MIRBuilder.buildCopy(Res: PtrVReg, Op: SelectorReg);
3159 MIRBuilder.buildCast(Dst: ResRegs[1], Src: PtrVReg);
3160
3161 return true;
3162}
3163
3164bool IRTranslator::translateAlloca(const User &U,
3165 MachineIRBuilder &MIRBuilder) {
3166 auto &AI = cast<AllocaInst>(Val: U);
3167
3168 if (AI.isSwiftError())
3169 return true;
3170
3171 if (AI.isStaticAlloca()) {
3172 Register Res = getOrCreateVReg(Val: AI);
3173 int FI = getOrCreateFrameIndex(AI);
3174 MIRBuilder.buildFrameIndex(Res, Idx: FI);
3175 return true;
3176 }
3177
3178 // FIXME: support stack probing for Windows.
3179 if (MF->getTarget().getTargetTriple().isOSWindows())
3180 return false;
3181
3182 // Now we're in the harder dynamic case.
3183 Register NumElts = getOrCreateVReg(Val: *AI.getArraySize());
3184 Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
3185 LLT IntPtrTy = getLLTForType(Ty&: *IntPtrIRTy, DL: *DL);
3186 if (MRI->getType(Reg: NumElts) != IntPtrTy) {
3187 Register ExtElts = MRI->createGenericVirtualRegister(Ty: IntPtrTy);
3188 MIRBuilder.buildZExtOrTrunc(Res: ExtElts, Op: NumElts);
3189 NumElts = ExtElts;
3190 }
3191
3192 Type *Ty = AI.getAllocatedType();
3193 TypeSize TySize = DL->getTypeAllocSize(Ty);
3194
3195 Register AllocSize = MRI->createGenericVirtualRegister(Ty: IntPtrTy);
3196 Register TySizeReg;
3197 if (TySize.isScalable()) {
3198 // For scalable types, use vscale * min_value
3199 TySizeReg = MRI->createGenericVirtualRegister(Ty: IntPtrTy);
3200 MIRBuilder.buildVScale(Res: TySizeReg, MinElts: TySize.getKnownMinValue());
3201 } else {
3202 // For fixed types, use a constant
3203 TySizeReg =
3204 getOrCreateVReg(Val: *ConstantInt::get(Ty: IntPtrIRTy, V: TySize.getFixedValue()));
3205 }
3206 MIRBuilder.buildMul(Dst: AllocSize, Src0: NumElts, Src1: TySizeReg);
3207
3208 // Round the size of the allocation up to the stack alignment size
3209 // by add SA-1 to the size. This doesn't overflow because we're computing
3210 // an address inside an alloca.
3211 Align StackAlign = MF->getSubtarget().getFrameLowering()->getStackAlign();
3212 auto SAMinusOne = MIRBuilder.buildConstant(Res: IntPtrTy, Val: StackAlign.value() - 1);
3213 auto AllocAdd = MIRBuilder.buildAdd(Dst: IntPtrTy, Src0: AllocSize, Src1: SAMinusOne,
3214 Flags: MachineInstr::NoUWrap);
3215 auto AlignCst =
3216 MIRBuilder.buildConstant(Res: IntPtrTy, Val: ~(uint64_t)(StackAlign.value() - 1));
3217 auto AlignedAlloc = MIRBuilder.buildAnd(Dst: IntPtrTy, Src0: AllocAdd, Src1: AlignCst);
3218
3219 Align Alignment = AI.getAlign();
3220 if (Alignment <= StackAlign)
3221 Alignment = Align(1);
3222 MIRBuilder.buildDynStackAlloc(Res: getOrCreateVReg(Val: AI), Size: AlignedAlloc, Alignment);
3223
3224 MF->getFrameInfo().CreateVariableSizedObject(Alignment, Alloca: &AI);
3225 assert(MF->getFrameInfo().hasVarSizedObjects());
3226 return true;
3227}
3228
3229bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
3230 // FIXME: We may need more info about the type. Because of how LLT works,
3231 // we're completely discarding the i64/double distinction here (amongst
3232 // others). Fortunately the ABIs I know of where that matters don't use va_arg
3233 // anyway but that's not guaranteed.
3234 MIRBuilder.buildInstr(Opc: TargetOpcode::G_VAARG, DstOps: {getOrCreateVReg(Val: U)},
3235 SrcOps: {getOrCreateVReg(Val: *U.getOperand(i: 0)),
3236 DL->getABITypeAlign(Ty: U.getType()).value()});
3237 return true;
3238}
3239
3240bool IRTranslator::translateUnreachable(const User &U,
3241 MachineIRBuilder &MIRBuilder) {
3242 auto &UI = cast<UnreachableInst>(Val: U);
3243 if (!UI.shouldLowerToTrap(TrapUnreachable: MF->getTarget().Options.TrapUnreachable,
3244 NoTrapAfterNoreturn: MF->getTarget().Options.NoTrapAfterNoreturn))
3245 return true;
3246
3247 MIRBuilder.buildTrap();
3248 return true;
3249}
3250
3251bool IRTranslator::translateInsertElement(const User &U,
3252 MachineIRBuilder &MIRBuilder) {
3253 // If it is a <1 x Ty> vector, use the scalar as it is
3254 // not a legal vector type in LLT.
3255 if (auto *FVT = dyn_cast<FixedVectorType>(Val: U.getType());
3256 FVT && FVT->getNumElements() == 1)
3257 return translateCopy(U, V: *U.getOperand(i: 1), MIRBuilder);
3258
3259 Register Res = getOrCreateVReg(Val: U);
3260 Register Val = getOrCreateVReg(Val: *U.getOperand(i: 0));
3261 Register Elt = getOrCreateVReg(Val: *U.getOperand(i: 1));
3262 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(DL: *DL);
3263 Register Idx;
3264 if (auto *CI = dyn_cast<ConstantInt>(Val: U.getOperand(i: 2))) {
3265 if (CI->getBitWidth() != PreferredVecIdxWidth) {
3266 APInt NewIdx = CI->getValue().zextOrTrunc(width: PreferredVecIdxWidth);
3267 auto *NewIdxCI = ConstantInt::get(Context&: CI->getContext(), V: NewIdx);
3268 Idx = getOrCreateVReg(Val: *NewIdxCI);
3269 }
3270 }
3271 if (!Idx)
3272 Idx = getOrCreateVReg(Val: *U.getOperand(i: 2));
3273 if (MRI->getType(Reg: Idx).getSizeInBits() != PreferredVecIdxWidth) {
3274 const LLT VecIdxTy =
3275 MRI->getType(Reg: Idx).changeElementSize(NewEltSize: PreferredVecIdxWidth);
3276 Idx = MIRBuilder.buildZExtOrTrunc(Res: VecIdxTy, Op: Idx).getReg(Idx: 0);
3277 }
3278 MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
3279 return true;
3280}
3281
3282bool IRTranslator::translateInsertVector(const User &U,
3283 MachineIRBuilder &MIRBuilder) {
3284 Register Dst = getOrCreateVReg(Val: U);
3285 Register Vec = getOrCreateVReg(Val: *U.getOperand(i: 0));
3286 Register Elt = getOrCreateVReg(Val: *U.getOperand(i: 1));
3287
3288 ConstantInt *CI = cast<ConstantInt>(Val: U.getOperand(i: 2));
3289 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(DL: *DL);
3290
3291 // Resize Index to preferred index width.
3292 if (CI->getBitWidth() != PreferredVecIdxWidth) {
3293 APInt NewIdx = CI->getValue().zextOrTrunc(width: PreferredVecIdxWidth);
3294 CI = ConstantInt::get(Context&: CI->getContext(), V: NewIdx);
3295 }
3296
3297 // If it is a <1 x Ty> vector, we have to use other means.
3298 if (auto *ResultType = dyn_cast<FixedVectorType>(Val: U.getOperand(i: 1)->getType());
3299 ResultType && ResultType->getNumElements() == 1) {
3300 if (auto *InputType = dyn_cast<FixedVectorType>(Val: U.getOperand(i: 0)->getType());
3301 InputType && InputType->getNumElements() == 1) {
3302 // We are inserting an illegal fixed vector into an illegal
3303 // fixed vector, use the scalar as it is not a legal vector type
3304 // in LLT.
3305 return translateCopy(U, V: *U.getOperand(i: 0), MIRBuilder);
3306 }
3307 if (isa<FixedVectorType>(Val: U.getOperand(i: 0)->getType())) {
3308 // We are inserting an illegal fixed vector into a legal fixed
3309 // vector, use the scalar as it is not a legal vector type in
3310 // LLT.
3311 Register Idx = getOrCreateVReg(Val: *CI);
3312 MIRBuilder.buildInsertVectorElement(Res: Dst, Val: Vec, Elt, Idx);
3313 return true;
3314 }
3315 if (isa<ScalableVectorType>(Val: U.getOperand(i: 0)->getType())) {
3316 // We are inserting an illegal fixed vector into a scalable
3317 // vector, use a scalar element insert.
3318 LLT VecIdxTy = LLT::scalar(SizeInBits: PreferredVecIdxWidth);
3319 Register Idx = getOrCreateVReg(Val: *CI);
3320 auto ScaledIndex = MIRBuilder.buildMul(
3321 Dst: VecIdxTy, Src0: MIRBuilder.buildVScale(Res: VecIdxTy, MinElts: 1), Src1: Idx);
3322 MIRBuilder.buildInsertVectorElement(Res: Dst, Val: Vec, Elt, Idx: ScaledIndex);
3323 return true;
3324 }
3325 }
3326
3327 MIRBuilder.buildInsertSubvector(
3328 Res: getOrCreateVReg(Val: U), Src0: getOrCreateVReg(Val: *U.getOperand(i: 0)),
3329 Src1: getOrCreateVReg(Val: *U.getOperand(i: 1)), Index: CI->getZExtValue());
3330 return true;
3331}
3332
3333bool IRTranslator::translateExtractElement(const User &U,
3334 MachineIRBuilder &MIRBuilder) {
3335 // If it is a <1 x Ty> vector, use the scalar as it is
3336 // not a legal vector type in LLT.
3337 if (const FixedVectorType *FVT =
3338 dyn_cast<FixedVectorType>(Val: U.getOperand(i: 0)->getType()))
3339 if (FVT->getNumElements() == 1)
3340 return translateCopy(U, V: *U.getOperand(i: 0), MIRBuilder);
3341
3342 Register Res = getOrCreateVReg(Val: U);
3343 Register Val = getOrCreateVReg(Val: *U.getOperand(i: 0));
3344 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(DL: *DL);
3345 Register Idx;
3346 if (auto *CI = dyn_cast<ConstantInt>(Val: U.getOperand(i: 1))) {
3347 if (CI->getBitWidth() != PreferredVecIdxWidth) {
3348 APInt NewIdx = CI->getValue().zextOrTrunc(width: PreferredVecIdxWidth);
3349 auto *NewIdxCI = ConstantInt::get(Context&: CI->getContext(), V: NewIdx);
3350 Idx = getOrCreateVReg(Val: *NewIdxCI);
3351 }
3352 }
3353 if (!Idx)
3354 Idx = getOrCreateVReg(Val: *U.getOperand(i: 1));
3355 if (MRI->getType(Reg: Idx).getSizeInBits() != PreferredVecIdxWidth) {
3356 const LLT VecIdxTy =
3357 MRI->getType(Reg: Idx).changeElementSize(NewEltSize: PreferredVecIdxWidth);
3358 Idx = MIRBuilder.buildZExtOrTrunc(Res: VecIdxTy, Op: Idx).getReg(Idx: 0);
3359 }
3360 MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
3361 return true;
3362}
3363
3364bool IRTranslator::translateExtractVector(const User &U,
3365 MachineIRBuilder &MIRBuilder) {
3366 Register Res = getOrCreateVReg(Val: U);
3367 Register Vec = getOrCreateVReg(Val: *U.getOperand(i: 0));
3368 ConstantInt *CI = cast<ConstantInt>(Val: U.getOperand(i: 1));
3369 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(DL: *DL);
3370
3371 // Resize Index to preferred index width.
3372 if (CI->getBitWidth() != PreferredVecIdxWidth) {
3373 APInt NewIdx = CI->getValue().zextOrTrunc(width: PreferredVecIdxWidth);
3374 CI = ConstantInt::get(Context&: CI->getContext(), V: NewIdx);
3375 }
3376
3377 // If it is a <1 x Ty> vector, we have to use other means.
3378 if (auto *ResultType = dyn_cast<FixedVectorType>(Val: U.getType());
3379 ResultType && ResultType->getNumElements() == 1) {
3380 if (auto *InputType = dyn_cast<FixedVectorType>(Val: U.getOperand(i: 0)->getType());
3381 InputType && InputType->getNumElements() == 1) {
3382 // We are extracting an illegal fixed vector from an illegal fixed vector,
3383 // use the scalar as it is not a legal vector type in LLT.
3384 return translateCopy(U, V: *U.getOperand(i: 0), MIRBuilder);
3385 }
3386 if (isa<FixedVectorType>(Val: U.getOperand(i: 0)->getType())) {
3387 // We are extracting an illegal fixed vector from a legal fixed
3388 // vector, use the scalar as it is not a legal vector type in
3389 // LLT.
3390 Register Idx = getOrCreateVReg(Val: *CI);
3391 MIRBuilder.buildExtractVectorElement(Res, Val: Vec, Idx);
3392 return true;
3393 }
3394 if (isa<ScalableVectorType>(Val: U.getOperand(i: 0)->getType())) {
3395 // We are extracting an illegal fixed vector from a scalable
3396 // vector, use a scalar element extract.
3397 LLT VecIdxTy = LLT::scalar(SizeInBits: PreferredVecIdxWidth);
3398 Register Idx = getOrCreateVReg(Val: *CI);
3399 auto ScaledIndex = MIRBuilder.buildMul(
3400 Dst: VecIdxTy, Src0: MIRBuilder.buildVScale(Res: VecIdxTy, MinElts: 1), Src1: Idx);
3401 MIRBuilder.buildExtractVectorElement(Res, Val: Vec, Idx: ScaledIndex);
3402 return true;
3403 }
3404 }
3405
3406 MIRBuilder.buildExtractSubvector(Res: getOrCreateVReg(Val: U),
3407 Src: getOrCreateVReg(Val: *U.getOperand(i: 0)),
3408 Index: CI->getZExtValue());
3409 return true;
3410}
3411
3412bool IRTranslator::translateShuffleVector(const User &U,
3413 MachineIRBuilder &MIRBuilder) {
3414 // A ShuffleVector that operates on scalable vectors is a splat vector where
3415 // the value of the splat vector is the 0th element of the first operand,
3416 // since the index mask operand is the zeroinitializer (undef and
3417 // poison are treated as zeroinitializer here).
3418 if (U.getOperand(i: 0)->getType()->isScalableTy()) {
3419 Register Val = getOrCreateVReg(Val: *U.getOperand(i: 0));
3420 auto SplatVal = MIRBuilder.buildExtractVectorElementConstant(
3421 Res: MRI->getType(Reg: Val).getElementType(), Val, Idx: 0);
3422 MIRBuilder.buildSplatVector(Res: getOrCreateVReg(Val: U), Val: SplatVal);
3423 return true;
3424 }
3425
3426 ArrayRef<int> Mask;
3427 if (auto *SVI = dyn_cast<ShuffleVectorInst>(Val: &U))
3428 Mask = SVI->getShuffleMask();
3429 else
3430 Mask = cast<ConstantExpr>(Val: U).getShuffleMask();
3431
3432 // As GISel does not represent <1 x > vectors as a separate type from scalars,
3433 // we transform shuffle_vector with a scalar output to an
3434 // ExtractVectorElement. If the input type is also scalar it becomes a Copy.
3435 unsigned DstElts = cast<FixedVectorType>(Val: U.getType())->getNumElements();
3436 unsigned SrcElts =
3437 cast<FixedVectorType>(Val: U.getOperand(i: 0)->getType())->getNumElements();
3438 if (DstElts == 1) {
3439 unsigned M = Mask[0];
3440 if (SrcElts == 1) {
3441 if (M == 0 || M == 1)
3442 return translateCopy(U, V: *U.getOperand(i: M), MIRBuilder);
3443 MIRBuilder.buildUndef(Res: getOrCreateVReg(Val: U));
3444 } else {
3445 Register Dst = getOrCreateVReg(Val: U);
3446 if (M < SrcElts) {
3447 MIRBuilder.buildExtractVectorElementConstant(
3448 Res: Dst, Val: getOrCreateVReg(Val: *U.getOperand(i: 0)), Idx: M);
3449 } else if (M < SrcElts * 2) {
3450 MIRBuilder.buildExtractVectorElementConstant(
3451 Res: Dst, Val: getOrCreateVReg(Val: *U.getOperand(i: 1)), Idx: M - SrcElts);
3452 } else {
3453 MIRBuilder.buildUndef(Res: Dst);
3454 }
3455 }
3456 return true;
3457 }
3458
3459 // A single element src is transformed to a build_vector.
3460 if (SrcElts == 1) {
3461 SmallVector<Register> Ops;
3462 Register Undef;
3463 for (int M : Mask) {
3464 LLT SrcTy = getLLTForType(Ty&: *U.getOperand(i: 0)->getType(), DL: *DL);
3465 if (M == 0 || M == 1) {
3466 Ops.push_back(Elt: getOrCreateVReg(Val: *U.getOperand(i: M)));
3467 } else {
3468 if (!Undef.isValid()) {
3469 Undef = MRI->createGenericVirtualRegister(Ty: SrcTy);
3470 MIRBuilder.buildUndef(Res: Undef);
3471 }
3472 Ops.push_back(Elt: Undef);
3473 }
3474 }
3475 MIRBuilder.buildBuildVector(Res: getOrCreateVReg(Val: U), Ops);
3476 return true;
3477 }
3478
3479 ArrayRef<int> MaskAlloc = MF->allocateShuffleMask(Mask);
3480 MIRBuilder
3481 .buildInstr(Opc: TargetOpcode::G_SHUFFLE_VECTOR, DstOps: {getOrCreateVReg(Val: U)},
3482 SrcOps: {getOrCreateVReg(Val: *U.getOperand(i: 0)),
3483 getOrCreateVReg(Val: *U.getOperand(i: 1))})
3484 .addShuffleMask(Val: MaskAlloc);
3485 return true;
3486}
3487
3488bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
3489 const PHINode &PI = cast<PHINode>(Val: U);
3490
3491 SmallVector<MachineInstr *, 4> Insts;
3492 for (auto Reg : getOrCreateVRegs(Val: PI)) {
3493 auto MIB = MIRBuilder.buildInstr(Opc: TargetOpcode::G_PHI, DstOps: {Reg}, SrcOps: {});
3494 Insts.push_back(Elt: MIB.getInstr());
3495 }
3496
3497 PendingPHIs.emplace_back(Args: &PI, Args: std::move(Insts));
3498 return true;
3499}
3500
3501bool IRTranslator::translateAtomicCmpXchg(const User &U,
3502 MachineIRBuilder &MIRBuilder) {
3503 const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(Val: U);
3504
3505 auto Flags = TLI->getAtomicMemOperandFlags(AI: I, DL: *DL);
3506
3507 auto Res = getOrCreateVRegs(Val: I);
3508 Register OldValRes = Res[0];
3509 Register SuccessRes = Res[1];
3510 Register Addr = getOrCreateVReg(Val: *I.getPointerOperand());
3511 Register Cmp = getOrCreateVReg(Val: *I.getCompareOperand());
3512 Register NewVal = getOrCreateVReg(Val: *I.getNewValOperand());
3513
3514 MIRBuilder.buildAtomicCmpXchgWithSuccess(
3515 OldValRes, SuccessRes, Addr, CmpVal: Cmp, NewVal,
3516 MMO&: *MF->getMachineMemOperand(
3517 PtrInfo: MachinePointerInfo(I.getPointerOperand()), f: Flags, MemTy: MRI->getType(Reg: Cmp),
3518 base_alignment: getMemOpAlign(I), AAInfo: I.getAAMetadata(), Ranges: nullptr, SSID: I.getSyncScopeID(),
3519 Ordering: I.getSuccessOrdering(), FailureOrdering: I.getFailureOrdering()));
3520 return true;
3521}
3522
3523bool IRTranslator::translateAtomicRMW(const User &U,
3524 MachineIRBuilder &MIRBuilder) {
3525 if (!mayTranslateUserTypes(U))
3526 return false;
3527
3528 const AtomicRMWInst &I = cast<AtomicRMWInst>(Val: U);
3529 auto Flags = TLI->getAtomicMemOperandFlags(AI: I, DL: *DL);
3530
3531 Register Res = getOrCreateVReg(Val: I);
3532 Register Addr = getOrCreateVReg(Val: *I.getPointerOperand());
3533 Register Val = getOrCreateVReg(Val: *I.getValOperand());
3534
3535 unsigned Opcode = 0;
3536 switch (I.getOperation()) {
3537 default:
3538 return false;
3539 case AtomicRMWInst::Xchg:
3540 Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
3541 break;
3542 case AtomicRMWInst::Add:
3543 Opcode = TargetOpcode::G_ATOMICRMW_ADD;
3544 break;
3545 case AtomicRMWInst::Sub:
3546 Opcode = TargetOpcode::G_ATOMICRMW_SUB;
3547 break;
3548 case AtomicRMWInst::And:
3549 Opcode = TargetOpcode::G_ATOMICRMW_AND;
3550 break;
3551 case AtomicRMWInst::Nand:
3552 Opcode = TargetOpcode::G_ATOMICRMW_NAND;
3553 break;
3554 case AtomicRMWInst::Or:
3555 Opcode = TargetOpcode::G_ATOMICRMW_OR;
3556 break;
3557 case AtomicRMWInst::Xor:
3558 Opcode = TargetOpcode::G_ATOMICRMW_XOR;
3559 break;
3560 case AtomicRMWInst::Max:
3561 Opcode = TargetOpcode::G_ATOMICRMW_MAX;
3562 break;
3563 case AtomicRMWInst::Min:
3564 Opcode = TargetOpcode::G_ATOMICRMW_MIN;
3565 break;
3566 case AtomicRMWInst::UMax:
3567 Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
3568 break;
3569 case AtomicRMWInst::UMin:
3570 Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
3571 break;
3572 case AtomicRMWInst::FAdd:
3573 Opcode = TargetOpcode::G_ATOMICRMW_FADD;
3574 break;
3575 case AtomicRMWInst::FSub:
3576 Opcode = TargetOpcode::G_ATOMICRMW_FSUB;
3577 break;
3578 case AtomicRMWInst::FMax:
3579 Opcode = TargetOpcode::G_ATOMICRMW_FMAX;
3580 break;
3581 case AtomicRMWInst::FMin:
3582 Opcode = TargetOpcode::G_ATOMICRMW_FMIN;
3583 break;
3584 case AtomicRMWInst::FMaximum:
3585 Opcode = TargetOpcode::G_ATOMICRMW_FMAXIMUM;
3586 break;
3587 case AtomicRMWInst::FMinimum:
3588 Opcode = TargetOpcode::G_ATOMICRMW_FMINIMUM;
3589 break;
3590 case AtomicRMWInst::FMaximumNum:
3591 Opcode = TargetOpcode::G_ATOMICRMW_FMAXIMUMNUM;
3592 break;
3593 case AtomicRMWInst::FMinimumNum:
3594 Opcode = TargetOpcode::G_ATOMICRMW_FMINIMUMNUM;
3595 break;
3596 case AtomicRMWInst::UIncWrap:
3597 Opcode = TargetOpcode::G_ATOMICRMW_UINC_WRAP;
3598 break;
3599 case AtomicRMWInst::UDecWrap:
3600 Opcode = TargetOpcode::G_ATOMICRMW_UDEC_WRAP;
3601 break;
3602 case AtomicRMWInst::USubCond:
3603 Opcode = TargetOpcode::G_ATOMICRMW_USUB_COND;
3604 break;
3605 case AtomicRMWInst::USubSat:
3606 Opcode = TargetOpcode::G_ATOMICRMW_USUB_SAT;
3607 break;
3608 }
3609
3610 MIRBuilder.buildAtomicRMW(
3611 Opcode, OldValRes: Res, Addr, Val,
3612 MMO&: *MF->getMachineMemOperand(PtrInfo: MachinePointerInfo(I.getPointerOperand()),
3613 f: Flags, MemTy: MRI->getType(Reg: Val), base_alignment: getMemOpAlign(I),
3614 AAInfo: I.getAAMetadata(), Ranges: nullptr, SSID: I.getSyncScopeID(),
3615 Ordering: I.getOrdering()));
3616 return true;
3617}
3618
3619bool IRTranslator::translateFence(const User &U,
3620 MachineIRBuilder &MIRBuilder) {
3621 const FenceInst &Fence = cast<FenceInst>(Val: U);
3622 MIRBuilder.buildFence(Ordering: static_cast<unsigned>(Fence.getOrdering()),
3623 Scope: Fence.getSyncScopeID());
3624 return true;
3625}
3626
3627bool IRTranslator::translateFreeze(const User &U,
3628 MachineIRBuilder &MIRBuilder) {
3629 const ArrayRef<Register> DstRegs = getOrCreateVRegs(Val: U);
3630 const ArrayRef<Register> SrcRegs = getOrCreateVRegs(Val: *U.getOperand(i: 0));
3631
3632 assert(DstRegs.size() == SrcRegs.size() &&
3633 "Freeze with different source and destination type?");
3634
3635 for (unsigned I = 0; I < DstRegs.size(); ++I) {
3636 MIRBuilder.buildFreeze(Dst: DstRegs[I], Src: SrcRegs[I]);
3637 }
3638
3639 return true;
3640}
3641
3642void IRTranslator::finishPendingPhis() {
3643#ifndef NDEBUG
3644 DILocationVerifier Verifier;
3645 GISelObserverWrapper WrapperObserver(&Verifier);
3646 RAIIMFObsDelInstaller ObsInstall(*MF, WrapperObserver);
3647#endif // ifndef NDEBUG
3648 for (auto &Phi : PendingPHIs) {
3649 const PHINode *PI = Phi.first;
3650 if (PI->getType()->isEmptyTy())
3651 continue;
3652 ArrayRef<MachineInstr *> ComponentPHIs = Phi.second;
3653 MachineBasicBlock *PhiMBB = ComponentPHIs[0]->getParent();
3654 EntryBuilder->setDebugLoc(PI->getDebugLoc());
3655#ifndef NDEBUG
3656 Verifier.setCurrentInst(PI);
3657#endif // ifndef NDEBUG
3658
3659 SmallPtrSet<const MachineBasicBlock *, 16> SeenPreds;
3660 for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
3661 auto IRPred = PI->getIncomingBlock(i);
3662 ArrayRef<Register> ValRegs = getOrCreateVRegs(Val: *PI->getIncomingValue(i));
3663 for (auto *Pred : getMachinePredBBs(Edge: {IRPred, PI->getParent()})) {
3664 if (SeenPreds.count(Ptr: Pred) || !PhiMBB->isPredecessor(MBB: Pred))
3665 continue;
3666 SeenPreds.insert(Ptr: Pred);
3667 for (unsigned j = 0; j < ValRegs.size(); ++j) {
3668 MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
3669 MIB.addUse(RegNo: ValRegs[j]);
3670 MIB.addMBB(MBB: Pred);
3671 }
3672 }
3673 }
3674 }
3675}
3676
3677void IRTranslator::translateDbgValueRecord(Value *V, bool HasArgList,
3678 const DILocalVariable *Variable,
3679 const DIExpression *Expression,
3680 const DebugLoc &DL,
3681 MachineIRBuilder &MIRBuilder) {
3682 assert(Variable->isValidLocationForIntrinsic(DL) &&
3683 "Expected inlined-at fields to agree");
3684 // Act as if we're handling a debug intrinsic.
3685 MIRBuilder.setDebugLoc(DL);
3686
3687 if (!V || HasArgList) {
3688 // DI cannot produce a valid DBG_VALUE, so produce an undef DBG_VALUE to
3689 // terminate any prior location.
3690 MIRBuilder.buildIndirectDbgValue(Reg: 0, Variable, Expr: Expression);
3691 return;
3692 }
3693
3694 if (const auto *CI = dyn_cast<Constant>(Val: V)) {
3695 MIRBuilder.buildConstDbgValue(C: *CI, Variable, Expr: Expression);
3696 return;
3697 }
3698
3699 if (auto *AI = dyn_cast<AllocaInst>(Val: V);
3700 AI && AI->isStaticAlloca() && Expression->startsWithDeref()) {
3701 // If the value is an alloca and the expression starts with a
3702 // dereference, track a stack slot instead of a register, as registers
3703 // may be clobbered.
3704 auto ExprOperands = Expression->getElements();
3705 auto *ExprDerefRemoved =
3706 DIExpression::get(Context&: AI->getContext(), Elements: ExprOperands.drop_front());
3707 MIRBuilder.buildFIDbgValue(FI: getOrCreateFrameIndex(AI: *AI), Variable,
3708 Expr: ExprDerefRemoved);
3709 return;
3710 }
3711 if (translateIfEntryValueArgument(isDeclare: false, Val: V, Var: Variable, Expr: Expression, DL,
3712 MIRBuilder))
3713 return;
3714 for (Register Reg : getOrCreateVRegs(Val: *V)) {
3715 // FIXME: This does not handle register-indirect values at offset 0. The
3716 // direct/indirect thing shouldn't really be handled by something as
3717 // implicit as reg+noreg vs reg+imm in the first place, but it seems
3718 // pretty baked in right now.
3719 MIRBuilder.buildDirectDbgValue(Reg, Variable, Expr: Expression);
3720 }
3721}
3722
3723void IRTranslator::translateDbgDeclareRecord(Value *Address, bool HasArgList,
3724 const DILocalVariable *Variable,
3725 const DIExpression *Expression,
3726 const DebugLoc &DL,
3727 MachineIRBuilder &MIRBuilder) {
3728 if (!Address || isa<UndefValue>(Val: Address)) {
3729 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *Variable << "\n");
3730 return;
3731 }
3732
3733 assert(Variable->isValidLocationForIntrinsic(DL) &&
3734 "Expected inlined-at fields to agree");
3735 auto AI = dyn_cast<AllocaInst>(Val: Address);
3736 if (AI && AI->isStaticAlloca()) {
3737 // Static allocas are tracked at the MF level, no need for DBG_VALUE
3738 // instructions (in fact, they get ignored if they *do* exist).
3739 MF->setVariableDbgInfo(Var: Variable, Expr: Expression,
3740 Slot: getOrCreateFrameIndex(AI: *AI), Loc: DL);
3741 return;
3742 }
3743
3744 if (translateIfEntryValueArgument(isDeclare: true, Val: Address, Var: Variable,
3745 Expr: Expression, DL,
3746 MIRBuilder))
3747 return;
3748
3749 // A dbg.declare describes the address of a source variable, so lower it
3750 // into an indirect DBG_VALUE.
3751 MIRBuilder.setDebugLoc(DL);
3752 MIRBuilder.buildIndirectDbgValue(Reg: getOrCreateVReg(Val: *Address), Variable,
3753 Expr: Expression);
3754}
3755
3756void IRTranslator::translateDbgInfo(const Instruction &Inst,
3757 MachineIRBuilder &MIRBuilder) {
3758 for (DbgRecord &DR : Inst.getDbgRecordRange()) {
3759 if (DbgLabelRecord *DLR = dyn_cast<DbgLabelRecord>(Val: &DR)) {
3760 MIRBuilder.setDebugLoc(DLR->getDebugLoc());
3761 assert(DLR->getLabel() && "Missing label");
3762 assert(DLR->getLabel()->isValidLocationForIntrinsic(
3763 MIRBuilder.getDebugLoc()) &&
3764 "Expected inlined-at fields to agree");
3765 MIRBuilder.buildDbgLabel(Label: DLR->getLabel());
3766 continue;
3767 }
3768 DbgVariableRecord &DVR = cast<DbgVariableRecord>(Val&: DR);
3769 const DILocalVariable *Variable = DVR.getVariable();
3770 const DIExpression *Expression = DVR.getExpression();
3771 Value *V = DVR.getVariableLocationOp(OpIdx: 0);
3772 if (DVR.isDbgDeclare())
3773 translateDbgDeclareRecord(Address: V, HasArgList: DVR.hasArgList(), Variable, Expression,
3774 DL: DVR.getDebugLoc(), MIRBuilder);
3775 else
3776 translateDbgValueRecord(V, HasArgList: DVR.hasArgList(), Variable, Expression,
3777 DL: DVR.getDebugLoc(), MIRBuilder);
3778 }
3779}
3780
3781bool IRTranslator::translate(const Instruction &Inst) {
3782 CurBuilder->setDebugLoc(Inst.getDebugLoc());
3783 CurBuilder->setPCSections(Inst.getMetadata(KindID: LLVMContext::MD_pcsections));
3784 CurBuilder->setMMRAMetadata(Inst.getMetadata(KindID: LLVMContext::MD_mmra));
3785
3786 if (TLI->fallBackToDAGISel(Inst))
3787 return false;
3788
3789 switch (Inst.getOpcode()) {
3790#define HANDLE_INST(NUM, OPCODE, CLASS) \
3791 case Instruction::OPCODE: \
3792 return translate##OPCODE(Inst, *CurBuilder.get());
3793#include "llvm/IR/Instruction.def"
3794 default:
3795 return false;
3796 }
3797}
3798
3799bool IRTranslator::translate(const Constant &C, Register Reg) {
3800 // We only emit constants into the entry block from here. To prevent jumpy
3801 // debug behaviour remove debug line.
3802 if (auto CurrInstDL = CurBuilder->getDL())
3803 EntryBuilder->setDebugLoc(DebugLoc());
3804
3805 if (auto CI = dyn_cast<ConstantInt>(Val: &C)) {
3806 // buildConstant expects a to-be-splatted scalar ConstantInt.
3807 if (isa<VectorType>(Val: CI->getType()))
3808 CI = ConstantInt::get(Context&: CI->getContext(), V: CI->getValue());
3809 EntryBuilder->buildConstant(Res: Reg, Val: *CI);
3810 } else if (auto CF = dyn_cast<ConstantFP>(Val: &C)) {
3811 // buildFConstant expects a to-be-splatted scalar ConstantFP.
3812 if (isa<VectorType>(Val: CF->getType()))
3813 CF = ConstantFP::get(Context&: CF->getContext(), V: CF->getValue());
3814 EntryBuilder->buildFConstant(Res: Reg, Val: *CF);
3815 } else if (isa<UndefValue>(Val: C))
3816 EntryBuilder->buildUndef(Res: Reg);
3817 else if (isa<ConstantPointerNull>(Val: C))
3818 EntryBuilder->buildConstant(Res: Reg, Val: 0);
3819 else if (auto GV = dyn_cast<GlobalValue>(Val: &C))
3820 EntryBuilder->buildGlobalValue(Res: Reg, GV);
3821 else if (auto CPA = dyn_cast<ConstantPtrAuth>(Val: &C)) {
3822 Register Addr = getOrCreateVReg(Val: *CPA->getPointer());
3823 Register AddrDisc = getOrCreateVReg(Val: *CPA->getAddrDiscriminator());
3824 EntryBuilder->buildConstantPtrAuth(Res: Reg, CPA, Addr, AddrDisc);
3825 } else if (auto CAZ = dyn_cast<ConstantAggregateZero>(Val: &C)) {
3826 Constant &Elt = *CAZ->getElementValue(Idx: 0u);
3827 if (isa<ScalableVectorType>(Val: CAZ->getType())) {
3828 EntryBuilder->buildSplatVector(Res: Reg, Val: getOrCreateVReg(Val: Elt));
3829 return true;
3830 }
3831 // Return the scalar if it is a <1 x Ty> vector.
3832 unsigned NumElts = CAZ->getElementCount().getFixedValue();
3833 if (NumElts == 1)
3834 return translateCopy(U: C, V: Elt, MIRBuilder&: *EntryBuilder);
3835 // All elements are zero so we can just use the first one.
3836 EntryBuilder->buildSplatBuildVector(Res: Reg, Src: getOrCreateVReg(Val: Elt));
3837 } else if (auto CV = dyn_cast<ConstantDataVector>(Val: &C)) {
3838 // Return the scalar if it is a <1 x Ty> vector.
3839 if (CV->getNumElements() == 1)
3840 return translateCopy(U: C, V: *CV->getElementAsConstant(i: 0), MIRBuilder&: *EntryBuilder);
3841 SmallVector<Register, 4> Ops;
3842 for (unsigned i = 0; i < CV->getNumElements(); ++i) {
3843 Constant &Elt = *CV->getElementAsConstant(i);
3844 Ops.push_back(Elt: getOrCreateVReg(Val: Elt));
3845 }
3846 EntryBuilder->buildBuildVector(Res: Reg, Ops);
3847 } else if (auto CE = dyn_cast<ConstantExpr>(Val: &C)) {
3848 switch(CE->getOpcode()) {
3849#define HANDLE_INST(NUM, OPCODE, CLASS) \
3850 case Instruction::OPCODE: \
3851 return translate##OPCODE(*CE, *EntryBuilder.get());
3852#include "llvm/IR/Instruction.def"
3853 default:
3854 return false;
3855 }
3856 } else if (auto CV = dyn_cast<ConstantVector>(Val: &C)) {
3857 if (CV->getNumOperands() == 1)
3858 return translateCopy(U: C, V: *CV->getOperand(i_nocapture: 0), MIRBuilder&: *EntryBuilder);
3859 SmallVector<Register, 4> Ops;
3860 for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
3861 Ops.push_back(Elt: getOrCreateVReg(Val: *CV->getOperand(i_nocapture: i)));
3862 }
3863 EntryBuilder->buildBuildVector(Res: Reg, Ops);
3864 } else if (auto *BA = dyn_cast<BlockAddress>(Val: &C)) {
3865 EntryBuilder->buildBlockAddress(Res: Reg, BA);
3866 } else
3867 return false;
3868
3869 return true;
3870}
3871
3872bool IRTranslator::mayTranslateUserTypes(const User &U) const {
3873 const TargetMachine &TM = TLI->getTargetMachine();
3874 if (LLT::getUseExtended())
3875 return true;
3876
3877 // BF16 cannot currently be represented by default LLT. To avoid miscompiles
3878 // we prevent any instructions using them by default in all targets that do
3879 // not explicitly enable it via LLT::setUseExtended(true).
3880 // SPIRV target is exception.
3881 return TM.getTargetTriple().isSPIRV() ||
3882 (!U.getType()->getScalarType()->isBFloatTy() &&
3883 !any_of(Range: U.operands(), P: [](Value *V) {
3884 return V->getType()->getScalarType()->isBFloatTy();
3885 }));
3886}
3887
3888bool IRTranslator::finalizeBasicBlock(const BasicBlock &BB,
3889 MachineBasicBlock &MBB) {
3890 for (auto &BTB : SL->BitTestCases) {
3891 // Emit header first, if it wasn't already emitted.
3892 if (!BTB.Emitted)
3893 emitBitTestHeader(B&: BTB, SwitchBB: BTB.Parent);
3894
3895 BranchProbability UnhandledProb = BTB.Prob;
3896 for (unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) {
3897 UnhandledProb -= BTB.Cases[j].ExtraProb;
3898 // Set the current basic block to the mbb we wish to insert the code into
3899 MachineBasicBlock *MBB = BTB.Cases[j].ThisBB;
3900 // If all cases cover a contiguous range, it is not necessary to jump to
3901 // the default block after the last bit test fails. This is because the
3902 // range check during bit test header creation has guaranteed that every
3903 // case here doesn't go outside the range. In this case, there is no need
3904 // to perform the last bit test, as it will always be true. Instead, make
3905 // the second-to-last bit-test fall through to the target of the last bit
3906 // test, and delete the last bit test.
3907
3908 MachineBasicBlock *NextMBB;
3909 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3910 // Second-to-last bit-test with contiguous range: fall through to the
3911 // target of the final bit test.
3912 NextMBB = BTB.Cases[j + 1].TargetBB;
3913 } else if (j + 1 == ej) {
3914 // For the last bit test, fall through to Default.
3915 NextMBB = BTB.Default;
3916 } else {
3917 // Otherwise, fall through to the next bit test.
3918 NextMBB = BTB.Cases[j + 1].ThisBB;
3919 }
3920
3921 emitBitTestCase(BB&: BTB, NextMBB, BranchProbToNext: UnhandledProb, Reg: BTB.Reg, B&: BTB.Cases[j], SwitchBB: MBB);
3922
3923 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3924 // We need to record the replacement phi edge here that normally
3925 // happens in emitBitTestCase before we delete the case, otherwise the
3926 // phi edge will be lost.
3927 addMachineCFGPred(Edge: {BTB.Parent->getBasicBlock(),
3928 BTB.Cases[ej - 1].TargetBB->getBasicBlock()},
3929 NewPred: MBB);
3930 // Since we're not going to use the final bit test, remove it.
3931 BTB.Cases.pop_back();
3932 break;
3933 }
3934 }
3935 // This is "default" BB. We have two jumps to it. From "header" BB and from
3936 // last "case" BB, unless the latter was skipped.
3937 CFGEdge HeaderToDefaultEdge = {BTB.Parent->getBasicBlock(),
3938 BTB.Default->getBasicBlock()};
3939 addMachineCFGPred(Edge: HeaderToDefaultEdge, NewPred: BTB.Parent);
3940 if (!BTB.ContiguousRange) {
3941 addMachineCFGPred(Edge: HeaderToDefaultEdge, NewPred: BTB.Cases.back().ThisBB);
3942 }
3943 }
3944 SL->BitTestCases.clear();
3945
3946 for (auto &JTCase : SL->JTCases) {
3947 // Emit header first, if it wasn't already emitted.
3948 if (!JTCase.first.Emitted)
3949 emitJumpTableHeader(JT&: JTCase.second, JTH&: JTCase.first, HeaderBB: JTCase.first.HeaderBB);
3950
3951 emitJumpTable(JT&: JTCase.second, MBB: JTCase.second.MBB);
3952 }
3953 SL->JTCases.clear();
3954
3955 for (auto &SwCase : SL->SwitchCases)
3956 emitSwitchCase(CB&: SwCase, SwitchBB: &CurBuilder->getMBB(), MIB&: *CurBuilder);
3957 SL->SwitchCases.clear();
3958
3959 // Check if we need to generate stack-protector guard checks.
3960 StackProtector &SP = getAnalysis<StackProtector>();
3961 if (SP.shouldEmitSDCheck(BB)) {
3962 bool FunctionBasedInstrumentation =
3963 TLI->getSSPStackGuardCheck(M: *MF->getFunction().getParent(), Libcalls: *Libcalls);
3964 SPDescriptor.initialize(BB: &BB, MBB: &MBB, FunctionBasedInstrumentation);
3965 }
3966 // Handle stack protector.
3967 if (SPDescriptor.shouldEmitFunctionBasedCheckStackProtector()) {
3968 LLVM_DEBUG(dbgs() << "Unimplemented stack protector case\n");
3969 return false;
3970 } else if (SPDescriptor.shouldEmitStackProtector()) {
3971 MachineBasicBlock *ParentMBB = SPDescriptor.getParentMBB();
3972 MachineBasicBlock *SuccessMBB = SPDescriptor.getSuccessMBB();
3973
3974 // Find the split point to split the parent mbb. At the same time copy all
3975 // physical registers used in the tail of parent mbb into virtual registers
3976 // before the split point and back into physical registers after the split
3977 // point. This prevents us needing to deal with Live-ins and many other
3978 // register allocation issues caused by us splitting the parent mbb. The
3979 // register allocator will clean up said virtual copies later on.
3980 MachineBasicBlock::iterator SplitPoint = findSplitPointForStackProtector(
3981 BB: ParentMBB, TII: *MF->getSubtarget().getInstrInfo());
3982
3983 // Splice the terminator of ParentMBB into SuccessMBB.
3984 SuccessMBB->splice(Where: SuccessMBB->end(), Other: ParentMBB, From: SplitPoint,
3985 To: ParentMBB->end());
3986
3987 // Add compare/jump on neq/jump to the parent BB.
3988 if (!emitSPDescriptorParent(SPD&: SPDescriptor, ParentBB: ParentMBB))
3989 return false;
3990
3991 // CodeGen Failure MBB if we have not codegened it yet.
3992 MachineBasicBlock *FailureMBB = SPDescriptor.getFailureMBB();
3993 if (FailureMBB->empty()) {
3994 if (!emitSPDescriptorFailure(SPD&: SPDescriptor, FailureBB: FailureMBB))
3995 return false;
3996 }
3997
3998 // Clear the Per-BB State.
3999 SPDescriptor.resetPerBBState();
4000 }
4001 return true;
4002}
4003
4004bool IRTranslator::emitSPDescriptorParent(StackProtectorDescriptor &SPD,
4005 MachineBasicBlock *ParentBB) {
4006 CurBuilder->setInsertPt(MBB&: *ParentBB, II: ParentBB->end());
4007 // First create the loads to the guard/stack slot for the comparison.
4008 Type *PtrIRTy = PointerType::getUnqual(C&: MF->getFunction().getContext());
4009 const LLT PtrTy = getLLTForType(Ty&: *PtrIRTy, DL: *DL);
4010 LLT PtrMemTy = getLLTForMVT(Ty: TLI->getPointerMemTy(DL: *DL));
4011
4012 MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
4013 int FI = MFI.getStackProtectorIndex();
4014
4015 Register Guard;
4016 Register StackSlotPtr = CurBuilder->buildFrameIndex(Res: PtrTy, Idx: FI).getReg(Idx: 0);
4017 const Module &M = *ParentBB->getParent()->getFunction().getParent();
4018 Align Align = DL->getPrefTypeAlign(Ty: PointerType::getUnqual(C&: M.getContext()));
4019
4020 // Generate code to load the content of the guard slot.
4021 Register GuardVal =
4022 CurBuilder
4023 ->buildLoad(Res: PtrMemTy, Addr: StackSlotPtr,
4024 PtrInfo: MachinePointerInfo::getFixedStack(MF&: *MF, FI), Alignment: Align,
4025 MMOFlags: MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile)
4026 .getReg(Idx: 0);
4027
4028 if (TLI->useStackGuardXorFP()) {
4029 LLVM_DEBUG(dbgs() << "Stack protector xor'ing with FP not yet implemented");
4030 return false;
4031 }
4032
4033 // Retrieve guard check function, nullptr if instrumentation is inlined.
4034 if (const Function *GuardCheckFn = TLI->getSSPStackGuardCheck(M, Libcalls: *Libcalls)) {
4035 // This path is currently untestable on GlobalISel, since the only platform
4036 // that needs this seems to be Windows, and we fall back on that currently.
4037 // The code still lives here in case that changes.
4038 // Silence warning about unused variable until the code below that uses
4039 // 'GuardCheckFn' is enabled.
4040 (void)GuardCheckFn;
4041 return false;
4042#if 0
4043 // The target provides a guard check function to validate the guard value.
4044 // Generate a call to that function with the content of the guard slot as
4045 // argument.
4046 FunctionType *FnTy = GuardCheckFn->getFunctionType();
4047 assert(FnTy->getNumParams() == 1 && "Invalid function signature");
4048 ISD::ArgFlagsTy Flags;
4049 if (GuardCheckFn->hasAttribute(1, Attribute::AttrKind::InReg))
4050 Flags.setInReg();
4051 CallLowering::ArgInfo GuardArgInfo(
4052 {GuardVal, FnTy->getParamType(0), {Flags}});
4053
4054 CallLowering::CallLoweringInfo Info;
4055 Info.OrigArgs.push_back(GuardArgInfo);
4056 Info.CallConv = GuardCheckFn->getCallingConv();
4057 Info.Callee = MachineOperand::CreateGA(GuardCheckFn, 0);
4058 Info.OrigRet = {Register(), FnTy->getReturnType()};
4059 if (!CLI->lowerCall(MIRBuilder, Info)) {
4060 LLVM_DEBUG(dbgs() << "Failed to lower call to stack protector check\n");
4061 return false;
4062 }
4063 return true;
4064#endif
4065 }
4066
4067 // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
4068 // Otherwise, emit a volatile load to retrieve the stack guard value.
4069 if (TLI->useLoadStackGuardNode(M: *ParentBB->getBasicBlock()->getModule())) {
4070 Guard =
4071 MRI->createGenericVirtualRegister(Ty: LLT::scalar(SizeInBits: PtrTy.getSizeInBits()));
4072 getStackGuard(DstReg: Guard, MIRBuilder&: *CurBuilder);
4073 } else {
4074 // TODO: test using android subtarget when we support @llvm.thread.pointer.
4075 const Value *IRGuard = TLI->getSDagStackGuard(M, Libcalls: *Libcalls);
4076 Register GuardPtr = getOrCreateVReg(Val: *IRGuard);
4077
4078 Guard = CurBuilder
4079 ->buildLoad(Res: PtrMemTy, Addr: GuardPtr,
4080 PtrInfo: MachinePointerInfo::getFixedStack(MF&: *MF, FI), Alignment: Align,
4081 MMOFlags: MachineMemOperand::MOLoad |
4082 MachineMemOperand::MOVolatile)
4083 .getReg(Idx: 0);
4084 }
4085
4086 // Perform the comparison.
4087 auto Cmp =
4088 CurBuilder->buildICmp(Pred: CmpInst::ICMP_NE, Res: LLT::integer(SizeInBits: 1), Op0: Guard, Op1: GuardVal);
4089 // If the guard/stackslot do not equal, branch to failure MBB.
4090 CurBuilder->buildBrCond(Tst: Cmp, Dest&: *SPD.getFailureMBB());
4091 // Otherwise branch to success MBB.
4092 CurBuilder->buildBr(Dest&: *SPD.getSuccessMBB());
4093 return true;
4094}
4095
4096bool IRTranslator::emitSPDescriptorFailure(StackProtectorDescriptor &SPD,
4097 MachineBasicBlock *FailureBB) {
4098 const RTLIB::LibcallImpl LibcallImpl =
4099 Libcalls->getLibcallImpl(Call: RTLIB::STACKPROTECTOR_CHECK_FAIL);
4100 if (LibcallImpl == RTLIB::Unsupported)
4101 return false;
4102
4103 CurBuilder->setInsertPt(MBB&: *FailureBB, II: FailureBB->end());
4104
4105 CallLowering::CallLoweringInfo Info;
4106 Info.CallConv = Libcalls->getLibcallImplCallingConv(Call: LibcallImpl);
4107
4108 StringRef LibcallName =
4109 RTLIB::RuntimeLibcallsInfo::getLibcallImplName(CallImpl: LibcallImpl);
4110 Info.Callee = MachineOperand::CreateES(SymName: LibcallName.data());
4111 Info.OrigRet = {Register(), Type::getVoidTy(C&: MF->getFunction().getContext()),
4112 0};
4113 if (!CLI->lowerCall(MIRBuilder&: *CurBuilder, Info)) {
4114 LLVM_DEBUG(dbgs() << "Failed to lower call to stack protector fail\n");
4115 return false;
4116 }
4117
4118 // Emit a trap instruction if we are required to do so.
4119 const TargetOptions &TargetOpts = TLI->getTargetMachine().Options;
4120 if (TargetOpts.TrapUnreachable && !TargetOpts.NoTrapAfterNoreturn)
4121 CurBuilder->buildInstr(Opcode: TargetOpcode::G_TRAP);
4122
4123 return true;
4124}
4125
4126void IRTranslator::finalizeFunction() {
4127 // Release the memory used by the different maps we
4128 // needed during the translation.
4129 PendingPHIs.clear();
4130 VMap.reset();
4131 FrameIndices.clear();
4132 MachinePreds.clear();
4133 // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
4134 // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid
4135 // destroying it twice (in ~IRTranslator() and ~LLVMContext())
4136 EntryBuilder.reset();
4137 CurBuilder.reset();
4138 FuncInfo.clear();
4139 SPDescriptor.resetPerFunctionState();
4140}
4141
4142/// Returns true if a BasicBlock \p BB within a variadic function contains a
4143/// variadic musttail call.
4144static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB) {
4145 if (!IsVarArg)
4146 return false;
4147
4148 // Walk the block backwards, because tail calls usually only appear at the end
4149 // of a block.
4150 return llvm::any_of(Range: llvm::reverse(C: BB), P: [](const Instruction &I) {
4151 const auto *CI = dyn_cast<CallInst>(Val: &I);
4152 return CI && CI->isMustTailCall();
4153 });
4154}
4155
4156bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
4157 MF = &CurMF;
4158 const Function &F = MF->getFunction();
4159 ORE = std::make_unique<OptimizationRemarkEmitter>(args: &F);
4160 CLI = MF->getSubtarget().getCallLowering();
4161
4162 if (CLI->fallBackToDAGISel(MF: *MF)) {
4163 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
4164 F.getSubprogram(), &F.getEntryBlock());
4165 R << "unable to lower function: "
4166 << ore::NV("Prototype", F.getFunctionType());
4167
4168 reportTranslationError(MF&: *MF, ORE&: *ORE, R);
4169 return false;
4170 }
4171
4172 GISelCSEAnalysisWrapper &Wrapper =
4173 getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
4174 // Set the CSEConfig and run the analysis.
4175 GISelCSEInfo *CSEInfo = nullptr;
4176 TPC = &getAnalysis<TargetPassConfig>();
4177
4178 bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences()
4179 ? EnableCSEInIRTranslator
4180 : TPC->isGISelCSEEnabled();
4181
4182 const TargetSubtargetInfo &Subtarget = MF->getSubtarget();
4183 TLI = Subtarget.getTargetLowering();
4184
4185 if (EnableCSE) {
4186 EntryBuilder = std::make_unique<CSEMIRBuilder>(args&: CurMF);
4187 CSEInfo = &Wrapper.get(CSEOpt: TPC->getCSEConfig());
4188 EntryBuilder->setCSEInfo(CSEInfo);
4189 CurBuilder = std::make_unique<CSEMIRBuilder>(args&: CurMF);
4190 CurBuilder->setCSEInfo(CSEInfo);
4191 } else {
4192 EntryBuilder = std::make_unique<MachineIRBuilder>();
4193 CurBuilder = std::make_unique<MachineIRBuilder>();
4194 }
4195 CLI = Subtarget.getCallLowering();
4196 CurBuilder->setMF(*MF);
4197 EntryBuilder->setMF(*MF);
4198 MRI = &MF->getRegInfo();
4199 DL = &F.getDataLayout();
4200 const TargetMachine &TM = MF->getTarget();
4201 TM.resetTargetOptions(F);
4202 EnableOpts = OptLevel != CodeGenOptLevel::None && !skipFunction(F);
4203 FuncInfo.MF = MF;
4204 if (EnableOpts) {
4205 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
4206 FuncInfo.BPI = &getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI();
4207 } else {
4208 AA = nullptr;
4209 FuncInfo.BPI = nullptr;
4210 }
4211
4212 AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
4213 F&: MF->getFunction());
4214 LibInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
4215 Libcalls = &getAnalysis<LibcallLoweringInfoWrapper>().getLibcallLowering(
4216 M: *F.getParent(), Subtarget);
4217
4218 FuncInfo.CanLowerReturn = CLI->checkReturnTypeForCallConv(MF&: *MF);
4219
4220 SL = std::make_unique<GISelSwitchLowering>(args: this, args&: FuncInfo);
4221 SL->init(tli: *TLI, tm: TM, dl: *DL);
4222
4223 assert(PendingPHIs.empty() && "stale PHIs");
4224
4225 // Targets which want to use big endian can enable it using
4226 // enableBigEndian()
4227 if (!DL->isLittleEndian() && !CLI->enableBigEndian()) {
4228 // Currently we don't properly handle big endian code.
4229 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
4230 F.getSubprogram(), &F.getEntryBlock());
4231 R << "unable to translate in big endian mode";
4232 reportTranslationError(MF&: *MF, ORE&: *ORE, R);
4233 return false;
4234 }
4235
4236 // Release the per-function state when we return, whether we succeeded or not.
4237 llvm::scope_exit FinalizeOnReturn([this]() { finalizeFunction(); });
4238
4239 // Setup a separate basic-block for the arguments and constants
4240 MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock();
4241 MF->push_back(MBB: EntryBB);
4242 EntryBuilder->setMBB(*EntryBB);
4243
4244 DebugLoc DbgLoc = F.getEntryBlock().getFirstNonPHIIt()->getDebugLoc();
4245 SwiftError.setFunction(CurMF);
4246 SwiftError.createEntriesInEntryBlock(DbgLoc);
4247
4248 bool IsVarArg = F.isVarArg();
4249 bool HasMustTailInVarArgFn = false;
4250
4251 // Create all blocks, in IR order, to preserve the layout.
4252 FuncInfo.MBBMap.resize(N: F.getMaxBlockNumber());
4253 for (const BasicBlock &BB: F) {
4254 auto *&MBB = FuncInfo.MBBMap[BB.getNumber()];
4255
4256 MBB = MF->CreateMachineBasicBlock(BB: &BB);
4257 MF->push_back(MBB);
4258
4259 // Only mark the block if the BlockAddress actually has users. The
4260 // hasAddressTaken flag may be stale if the BlockAddress was optimized away
4261 // but the constant still exists in the uniquing table.
4262 if (BB.hasAddressTaken()) {
4263 if (BlockAddress *BA = BlockAddress::lookup(BB: &BB))
4264 if (!BA->hasZeroLiveUses())
4265 MBB->setAddressTakenIRBlock(const_cast<BasicBlock *>(&BB));
4266 }
4267
4268 if (!HasMustTailInVarArgFn)
4269 HasMustTailInVarArgFn = checkForMustTailInVarArgFn(IsVarArg, BB);
4270 }
4271
4272 MF->getFrameInfo().setHasMustTailInVarArgFunc(HasMustTailInVarArgFn);
4273
4274 // Make our arguments/constants entry block fallthrough to the IR entry block.
4275 EntryBB->addSuccessor(Succ: &getMBB(BB: F.front()));
4276
4277 // Lower the actual args into this basic block.
4278 SmallVector<ArrayRef<Register>, 8> VRegArgs;
4279 for (const Argument &Arg: F.args()) {
4280 if (DL->getTypeStoreSize(Ty: Arg.getType()).isZero())
4281 continue; // Don't handle zero sized types.
4282 ArrayRef<Register> VRegs = getOrCreateVRegs(Val: Arg);
4283 VRegArgs.push_back(Elt: VRegs);
4284
4285 if (CLI->supportSwiftError() && Arg.hasSwiftErrorAttr()) {
4286 assert(VRegs.size() == 1 && "Too many vregs for Swift error");
4287 SwiftError.setCurrentVReg(MBB: EntryBB, SwiftError.getFunctionArg(), VRegs[0]);
4288 }
4289 }
4290
4291 if (!CLI->lowerFormalArguments(MIRBuilder&: *EntryBuilder, F, VRegs: VRegArgs, FLI&: FuncInfo)) {
4292 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
4293 F.getSubprogram(), &F.getEntryBlock());
4294 R << "unable to lower arguments: "
4295 << ore::NV("Prototype", F.getFunctionType());
4296 reportTranslationError(MF&: *MF, ORE&: *ORE, R);
4297 return false;
4298 }
4299
4300 // Need to visit defs before uses when translating instructions.
4301 GISelObserverWrapper WrapperObserver;
4302 if (EnableCSE && CSEInfo)
4303 WrapperObserver.addObserver(O: CSEInfo);
4304 {
4305 ReversePostOrderTraversal<const Function *> RPOT(&F);
4306#ifndef NDEBUG
4307 DILocationVerifier Verifier;
4308 WrapperObserver.addObserver(&Verifier);
4309#endif // ifndef NDEBUG
4310 RAIIMFObsDelInstaller ObsInstall(*MF, WrapperObserver);
4311 for (const BasicBlock *BB : RPOT) {
4312 MachineBasicBlock &MBB = getMBB(BB: *BB);
4313 // Set the insertion point of all the following translations to
4314 // the end of this basic block.
4315 CurBuilder->setMBB(MBB);
4316 HasTailCall = false;
4317 for (const Instruction &Inst : *BB) {
4318 // If we translated a tail call in the last step, then we know
4319 // everything after the call is either a return, or something that is
4320 // handled by the call itself. (E.g. a lifetime marker or assume
4321 // intrinsic.) In this case, we should stop translating the block and
4322 // move on.
4323 if (HasTailCall)
4324 break;
4325#ifndef NDEBUG
4326 Verifier.setCurrentInst(&Inst);
4327#endif // ifndef NDEBUG
4328
4329 // Translate any debug-info attached to the instruction.
4330 translateDbgInfo(Inst, MIRBuilder&: *CurBuilder);
4331
4332 if (translate(Inst))
4333 continue;
4334
4335 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
4336 Inst.getDebugLoc(), BB);
4337 R << "unable to translate instruction: " << ore::NV("Opcode", &Inst);
4338
4339 if (ORE->allowExtraAnalysis(PassName: "gisel-irtranslator")) {
4340 std::string InstStrStorage;
4341 raw_string_ostream InstStr(InstStrStorage);
4342 InstStr << Inst;
4343
4344 R << ": '" << InstStrStorage << "'";
4345 }
4346
4347 reportTranslationError(MF&: *MF, ORE&: *ORE, R);
4348 return false;
4349 }
4350
4351 if (!finalizeBasicBlock(BB: *BB, MBB)) {
4352 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
4353 BB->getTerminator()->getDebugLoc(), BB);
4354 R << "unable to translate basic block";
4355 reportTranslationError(MF&: *MF, ORE&: *ORE, R);
4356 return false;
4357 }
4358 }
4359#ifndef NDEBUG
4360 WrapperObserver.removeObserver(&Verifier);
4361#endif
4362 }
4363
4364 finishPendingPhis();
4365
4366 SwiftError.propagateVRegs();
4367
4368 // Merge the argument lowering and constants block with its single
4369 // successor, the LLVM-IR entry block. We want the basic block to
4370 // be maximal.
4371 assert(EntryBB->succ_size() == 1 &&
4372 "Custom BB used for lowering should have only one successor");
4373 // Get the successor of the current entry block.
4374 MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
4375 assert(NewEntryBB.pred_size() == 1 &&
4376 "LLVM-IR entry block has a predecessor!?");
4377 // Move all the instruction from the current entry block to the
4378 // new entry block.
4379 NewEntryBB.splice(Where: NewEntryBB.begin(), Other: EntryBB, From: EntryBB->begin(),
4380 To: EntryBB->end());
4381
4382 // Update the live-in information for the new entry block.
4383 for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
4384 NewEntryBB.addLiveIn(RegMaskPair: LiveIn);
4385 NewEntryBB.sortUniqueLiveIns();
4386
4387 // Get rid of the now empty basic block.
4388 EntryBB->removeSuccessor(Succ: &NewEntryBB);
4389 MF->remove(MBBI: EntryBB);
4390 MF->deleteMachineBasicBlock(MBB: EntryBB);
4391
4392 assert(&MF->front() == &NewEntryBB &&
4393 "New entry wasn't next in the list of basic block!");
4394
4395 // Initialize stack protector information.
4396 StackProtector &SP = getAnalysis<StackProtector>();
4397 SP.copyToMachineFrameInfo(MFI&: MF->getFrameInfo());
4398
4399 return false;
4400}
4401