1//===-- FunctionLoweringInfo.cpp ------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This implements routines for translating functions from LLVM IR into
10// Machine IR.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/CodeGen/FunctionLoweringInfo.h"
15#include "llvm/ADT/APInt.h"
16#include "llvm/Analysis/UniformityAnalysis.h"
17#include "llvm/CodeGen/Analysis.h"
18#include "llvm/CodeGen/MachineFrameInfo.h"
19#include "llvm/CodeGen/MachineFunction.h"
20#include "llvm/CodeGen/MachineInstrBuilder.h"
21#include "llvm/CodeGen/MachineRegisterInfo.h"
22#include "llvm/CodeGen/TargetFrameLowering.h"
23#include "llvm/CodeGen/TargetInstrInfo.h"
24#include "llvm/CodeGen/TargetLowering.h"
25#include "llvm/CodeGen/TargetRegisterInfo.h"
26#include "llvm/CodeGen/TargetSubtargetInfo.h"
27#include "llvm/CodeGen/WasmEHFuncInfo.h"
28#include "llvm/CodeGen/WinEHFuncInfo.h"
29#include "llvm/IR/DataLayout.h"
30#include "llvm/IR/DerivedTypes.h"
31#include "llvm/IR/Function.h"
32#include "llvm/IR/Instructions.h"
33#include "llvm/IR/IntrinsicInst.h"
34#include "llvm/IR/Module.h"
35#include "llvm/Support/Debug.h"
36#include "llvm/Support/ErrorHandling.h"
37#include "llvm/Support/raw_ostream.h"
38#include <algorithm>
39using namespace llvm;
40
41#define DEBUG_TYPE "function-lowering-info"
42
43/// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
44/// PHI nodes or outside of the basic block that defines it, or used by a
45/// switch or atomic instruction, which may expand to multiple basic blocks.
46static bool isUsedOutsideOfDefiningBlock(const Instruction *I) {
47 if (I->use_empty()) return false;
48 if (isa<PHINode>(Val: I)) return true;
49 const BasicBlock *BB = I->getParent();
50 for (const User *U : I->users())
51 if (cast<Instruction>(Val: U)->getParent() != BB || isa<PHINode>(Val: U))
52 return true;
53
54 return false;
55}
56
57static ISD::NodeType getPreferredExtendForValue(const Instruction *I) {
58 // For the users of the source value being used for compare instruction, if
59 // the number of signed predicate is greater than unsigned predicate, we
60 // prefer to use SIGN_EXTEND.
61 //
62 // With this optimization, we would be able to reduce some redundant sign or
63 // zero extension instruction, and eventually more machine CSE opportunities
64 // can be exposed.
65 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
66 unsigned NumOfSigned = 0, NumOfUnsigned = 0;
67 for (const Use &U : I->uses()) {
68 if (const auto *CI = dyn_cast<CmpInst>(Val: U.getUser())) {
69 NumOfSigned += CI->isSigned();
70 NumOfUnsigned += CI->isUnsigned();
71 }
72 if (const auto *CallI = dyn_cast<CallBase>(Val: U.getUser())) {
73 if (!CallI->isArgOperand(U: &U))
74 continue;
75 unsigned ArgNo = CallI->getArgOperandNo(U: &U);
76 NumOfUnsigned += CallI->paramHasAttr(ArgNo, Kind: Attribute::ZExt);
77 NumOfSigned += CallI->paramHasAttr(ArgNo, Kind: Attribute::SExt);
78 }
79 }
80 if (NumOfSigned > NumOfUnsigned)
81 ExtendKind = ISD::SIGN_EXTEND;
82
83 return ExtendKind;
84}
85
86void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
87 SelectionDAG *DAG) {
88 Fn = &fn;
89 MF = &mf;
90 TLI = MF->getSubtarget().getTargetLowering();
91 RegInfo = &MF->getRegInfo();
92 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
93 UA = DAG->getUniformityInfo();
94
95 // Check whether the function can return without sret-demotion.
96 SmallVector<ISD::OutputArg, 4> Outs;
97 CallingConv::ID CC = Fn->getCallingConv();
98
99 GetReturnInfo(CC, ReturnType: Fn->getReturnType(), attr: Fn->getAttributes(), Outs, TLI: *TLI,
100 DL: mf.getDataLayout());
101 CanLowerReturn =
102 TLI->CanLowerReturn(CC, *MF, Fn->isVarArg(), Outs, Fn->getContext());
103
104 // If this personality uses funclets, we need to do a bit more work.
105 DenseMap<const AllocaInst *, TinyPtrVector<int *>> CatchObjects;
106 EHPersonality Personality = classifyEHPersonality(
107 Pers: Fn->hasPersonalityFn() ? Fn->getPersonalityFn() : nullptr);
108 if (isFuncletEHPersonality(Pers: Personality)) {
109 // Calculate state numbers if we haven't already.
110 WinEHFuncInfo &EHInfo = *MF->getWinEHFuncInfo();
111 if (Personality == EHPersonality::MSVC_CXX)
112 calculateWinCXXEHStateNumbers(ParentFn: &fn, FuncInfo&: EHInfo);
113 else if (isAsynchronousEHPersonality(Pers: Personality))
114 calculateSEHStateNumbers(ParentFn: &fn, FuncInfo&: EHInfo);
115 else if (Personality == EHPersonality::CoreCLR)
116 calculateClrEHStateNumbers(Fn: &fn, FuncInfo&: EHInfo);
117
118 // Map all BB references in the WinEH data to MBBs.
119 for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) {
120 for (WinEHHandlerType &H : TBME.HandlerArray) {
121 if (const AllocaInst *AI = H.CatchObj.Alloca)
122 CatchObjects.insert(KV: {AI, {}}).first->second.push_back(
123 NewVal: &H.CatchObj.FrameIndex);
124 else
125 H.CatchObj.FrameIndex = INT_MAX;
126 }
127 }
128 }
129
130 // Initialize the mapping of values to registers. This is only set up for
131 // instruction values that are used outside of the block that defines
132 // them.
133 const Align StackAlign = TFI->getStackAlign();
134 for (const BasicBlock &BB : *Fn) {
135 for (const Instruction &I : BB) {
136 if (const AllocaInst *AI = dyn_cast<AllocaInst>(Val: &I)) {
137 Type *Ty = AI->getAllocatedType();
138 Align Alignment = AI->getAlign();
139
140 // Static allocas can be folded into the initial stack frame
141 // adjustment. For targets that don't realign the stack, don't
142 // do this if there is an extra alignment requirement.
143 if (AI->isStaticAlloca() &&
144 (TFI->isStackRealignable() || (Alignment <= StackAlign))) {
145 const ConstantInt *CUI = cast<ConstantInt>(Val: AI->getArraySize());
146 uint64_t TySize =
147 MF->getDataLayout().getTypeAllocSize(Ty).getKnownMinValue();
148
149 TySize *= CUI->getZExtValue(); // Get total allocated size.
150 if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
151 int FrameIndex = INT_MAX;
152 auto Iter = CatchObjects.find(Val: AI);
153 if (Iter != CatchObjects.end() && TLI->needsFixedCatchObjects()) {
154 FrameIndex = MF->getFrameInfo().CreateFixedObject(
155 Size: TySize, SPOffset: 0, /*IsImmutable=*/false, /*isAliased=*/true);
156 MF->getFrameInfo().setObjectAlignment(ObjectIdx: FrameIndex, Alignment);
157 } else {
158 FrameIndex = MF->getFrameInfo().CreateStackObject(Size: TySize, Alignment,
159 isSpillSlot: false, Alloca: AI);
160 }
161
162 // Scalable vectors and structures that contain scalable vectors may
163 // need a special StackID to distinguish them from other (fixed size)
164 // stack objects.
165 if (Ty->isScalableTy())
166 MF->getFrameInfo().setStackID(ObjectIdx: FrameIndex,
167 ID: TFI->getStackIDForScalableVectors());
168
169 StaticAllocaMap[AI] = FrameIndex;
170 // Update the catch handler information.
171 if (Iter != CatchObjects.end()) {
172 for (int *CatchObjPtr : Iter->second)
173 *CatchObjPtr = FrameIndex;
174 }
175 } else {
176 // FIXME: Overaligned static allocas should be grouped into
177 // a single dynamic allocation instead of using a separate
178 // stack allocation for each one.
179 // Inform the Frame Information that we have variable-sized objects.
180 MF->getFrameInfo().CreateVariableSizedObject(
181 Alignment: Alignment <= StackAlign ? Align(1) : Alignment, Alloca: AI);
182 }
183 } else if (auto *Call = dyn_cast<CallBase>(Val: &I)) {
184 // Look for inline asm that clobbers the SP register.
185 if (Call->isInlineAsm()) {
186 Register SP = TLI->getStackPointerRegisterToSaveRestore();
187 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
188 std::vector<TargetLowering::AsmOperandInfo> Ops =
189 TLI->ParseConstraints(DL: Fn->getDataLayout(), TRI,
190 Call: *Call);
191 for (TargetLowering::AsmOperandInfo &Op : Ops) {
192 if (Op.Type == InlineAsm::isClobber) {
193 // Clobbers don't have SDValue operands, hence SDValue().
194 TLI->ComputeConstraintToUse(OpInfo&: Op, Op: SDValue(), DAG);
195 std::pair<unsigned, const TargetRegisterClass *> PhysReg =
196 TLI->getRegForInlineAsmConstraint(TRI, Constraint: Op.ConstraintCode,
197 VT: Op.ConstraintVT);
198 if (PhysReg.first == SP)
199 MF->getFrameInfo().setHasOpaqueSPAdjustment(true);
200 }
201 }
202 }
203 // Look for calls to the @llvm.va_start intrinsic. We can omit some
204 // prologue boilerplate for variadic functions that don't examine their
205 // arguments.
206 if (const auto *II = dyn_cast<IntrinsicInst>(Val: &I)) {
207 if (II->getIntrinsicID() == Intrinsic::vastart)
208 MF->getFrameInfo().setHasVAStart(true);
209 }
210
211 // If we have a musttail call in a variadic function, we need to ensure
212 // we forward implicit register parameters.
213 if (const auto *CI = dyn_cast<CallInst>(Val: &I)) {
214 if (CI->isMustTailCall() && Fn->isVarArg())
215 MF->getFrameInfo().setHasMustTailInVarArgFunc(true);
216 }
217
218 // Determine if there is a call to setjmp in the machine function.
219 if (Call->hasFnAttr(Kind: Attribute::ReturnsTwice))
220 MF->setExposesReturnsTwice(true);
221 }
222
223 // Mark values used outside their block as exported, by allocating
224 // a virtual register for them.
225 if (isUsedOutsideOfDefiningBlock(I: &I))
226 if (!isa<AllocaInst>(Val: I) || !StaticAllocaMap.count(Val: cast<AllocaInst>(Val: &I)))
227 InitializeRegForValue(V: &I);
228
229 // Decide the preferred extend type for a value. This iterates over all
230 // users and therefore isn't cheap, so don't do this at O0.
231 if (DAG->getOptLevel() != CodeGenOptLevel::None)
232 PreferredExtendType[&I] = getPreferredExtendForValue(I: &I);
233 }
234 }
235
236 // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This
237 // also creates the initial PHI MachineInstrs, though none of the input
238 // operands are populated.
239 for (const BasicBlock &BB : *Fn) {
240 // Don't create MachineBasicBlocks for imaginary EH pad blocks. These blocks
241 // are really data, and no instructions can live here.
242 if (BB.isEHPad()) {
243 const Instruction *PadInst = BB.getFirstNonPHI();
244 // If this is a non-landingpad EH pad, mark this function as using
245 // funclets.
246 // FIXME: SEH catchpads do not create EH scope/funclets, so we could avoid
247 // setting this in such cases in order to improve frame layout.
248 if (!isa<LandingPadInst>(Val: PadInst)) {
249 MF->setHasEHScopes(true);
250 MF->setHasEHFunclets(true);
251 MF->getFrameInfo().setHasOpaqueSPAdjustment(true);
252 }
253 if (isa<CatchSwitchInst>(Val: PadInst)) {
254 assert(&*BB.begin() == PadInst &&
255 "WinEHPrepare failed to remove PHIs from imaginary BBs");
256 continue;
257 }
258 if (isa<FuncletPadInst>(Val: PadInst) &&
259 Personality != EHPersonality::Wasm_CXX)
260 assert(&*BB.begin() == PadInst && "WinEHPrepare failed to demote PHIs");
261 }
262
263 MachineBasicBlock *MBB = mf.CreateMachineBasicBlock(BB: &BB);
264 MBBMap[&BB] = MBB;
265 MF->push_back(MBB);
266
267 // Transfer the address-taken flag. This is necessary because there could
268 // be multiple MachineBasicBlocks corresponding to one BasicBlock, and only
269 // the first one should be marked.
270 if (BB.hasAddressTaken())
271 MBB->setAddressTakenIRBlock(const_cast<BasicBlock *>(&BB));
272
273 // Mark landing pad blocks.
274 if (BB.isEHPad())
275 MBB->setIsEHPad();
276
277 // Create Machine PHI nodes for LLVM PHI nodes, lowering them as
278 // appropriate.
279 for (const PHINode &PN : BB.phis()) {
280 if (PN.use_empty())
281 continue;
282
283 // Skip empty types
284 if (PN.getType()->isEmptyTy())
285 continue;
286
287 DebugLoc DL = PN.getDebugLoc();
288 unsigned PHIReg = ValueMap[&PN];
289 assert(PHIReg && "PHI node does not have an assigned virtual register!");
290
291 SmallVector<EVT, 4> ValueVTs;
292 ComputeValueVTs(TLI: *TLI, DL: MF->getDataLayout(), Ty: PN.getType(), ValueVTs);
293 for (EVT VT : ValueVTs) {
294 unsigned NumRegisters = TLI->getNumRegisters(Context&: Fn->getContext(), VT);
295 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
296 for (unsigned i = 0; i != NumRegisters; ++i)
297 BuildMI(BB: MBB, MIMD: DL, MCID: TII->get(Opcode: TargetOpcode::PHI), DestReg: PHIReg + i);
298 PHIReg += NumRegisters;
299 }
300 }
301 }
302
303 if (isFuncletEHPersonality(Pers: Personality)) {
304 WinEHFuncInfo &EHInfo = *MF->getWinEHFuncInfo();
305
306 // Map all BB references in the WinEH data to MBBs.
307 for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) {
308 for (WinEHHandlerType &H : TBME.HandlerArray) {
309 if (H.Handler)
310 H.Handler = MBBMap[cast<const BasicBlock *>(Val&: H.Handler)];
311 }
312 }
313 for (CxxUnwindMapEntry &UME : EHInfo.CxxUnwindMap)
314 if (UME.Cleanup)
315 UME.Cleanup = MBBMap[cast<const BasicBlock *>(Val&: UME.Cleanup)];
316 for (SEHUnwindMapEntry &UME : EHInfo.SEHUnwindMap) {
317 const auto *BB = cast<const BasicBlock *>(Val&: UME.Handler);
318 UME.Handler = MBBMap[BB];
319 }
320 for (ClrEHUnwindMapEntry &CME : EHInfo.ClrEHUnwindMap) {
321 const auto *BB = cast<const BasicBlock *>(Val&: CME.Handler);
322 CME.Handler = MBBMap[BB];
323 }
324 } else if (Personality == EHPersonality::Wasm_CXX) {
325 WasmEHFuncInfo &EHInfo = *MF->getWasmEHFuncInfo();
326 calculateWasmEHInfo(F: &fn, EHInfo);
327
328 // Map all BB references in the Wasm EH data to MBBs.
329 DenseMap<BBOrMBB, BBOrMBB> SrcToUnwindDest;
330 for (auto &KV : EHInfo.SrcToUnwindDest) {
331 const auto *Src = cast<const BasicBlock *>(Val&: KV.first);
332 const auto *Dest = cast<const BasicBlock *>(Val&: KV.second);
333 SrcToUnwindDest[MBBMap[Src]] = MBBMap[Dest];
334 }
335 EHInfo.SrcToUnwindDest = std::move(SrcToUnwindDest);
336 DenseMap<BBOrMBB, SmallPtrSet<BBOrMBB, 4>> UnwindDestToSrcs;
337 for (auto &KV : EHInfo.UnwindDestToSrcs) {
338 const auto *Dest = cast<const BasicBlock *>(Val&: KV.first);
339 UnwindDestToSrcs[MBBMap[Dest]] = SmallPtrSet<BBOrMBB, 4>();
340 for (const auto P : KV.second)
341 UnwindDestToSrcs[MBBMap[Dest]].insert(
342 Ptr: MBBMap[cast<const BasicBlock *>(Val: P)]);
343 }
344 EHInfo.UnwindDestToSrcs = std::move(UnwindDestToSrcs);
345 }
346}
347
348/// clear - Clear out all the function-specific state. This returns this
349/// FunctionLoweringInfo to an empty state, ready to be used for a
350/// different function.
351void FunctionLoweringInfo::clear() {
352 MBBMap.clear();
353 ValueMap.clear();
354 VirtReg2Value.clear();
355 StaticAllocaMap.clear();
356 LiveOutRegInfo.clear();
357 VisitedBBs.clear();
358 ArgDbgValues.clear();
359 DescribedArgs.clear();
360 ByValArgFrameIndexMap.clear();
361 RegFixups.clear();
362 RegsWithFixups.clear();
363 StatepointStackSlots.clear();
364 StatepointRelocationMaps.clear();
365 PreferredExtendType.clear();
366 PreprocessedDbgDeclares.clear();
367 PreprocessedDVRDeclares.clear();
368}
369
370/// CreateReg - Allocate a single virtual register for the given type.
371Register FunctionLoweringInfo::CreateReg(MVT VT, bool isDivergent) {
372 return RegInfo->createVirtualRegister(RegClass: TLI->getRegClassFor(VT, isDivergent));
373}
374
375/// CreateRegs - Allocate the appropriate number of virtual registers of
376/// the correctly promoted or expanded types. Assign these registers
377/// consecutive vreg numbers and return the first assigned number.
378///
379/// In the case that the given value has struct or array type, this function
380/// will assign registers for each member or element.
381///
382Register FunctionLoweringInfo::CreateRegs(Type *Ty, bool isDivergent) {
383 SmallVector<EVT, 4> ValueVTs;
384 ComputeValueVTs(TLI: *TLI, DL: MF->getDataLayout(), Ty, ValueVTs);
385
386 Register FirstReg;
387 for (EVT ValueVT : ValueVTs) {
388 MVT RegisterVT = TLI->getRegisterType(Context&: Ty->getContext(), VT: ValueVT);
389
390 unsigned NumRegs = TLI->getNumRegisters(Context&: Ty->getContext(), VT: ValueVT);
391 for (unsigned i = 0; i != NumRegs; ++i) {
392 Register R = CreateReg(VT: RegisterVT, isDivergent);
393 if (!FirstReg) FirstReg = R;
394 }
395 }
396 return FirstReg;
397}
398
399Register FunctionLoweringInfo::CreateRegs(const Value *V) {
400 return CreateRegs(Ty: V->getType(), isDivergent: UA && UA->isDivergent(V) &&
401 !TLI->requiresUniformRegister(MF&: *MF, V));
402}
403
404Register FunctionLoweringInfo::InitializeRegForValue(const Value *V) {
405 // Tokens live in vregs only when used for convergence control.
406 if (V->getType()->isTokenTy() && !isa<ConvergenceControlInst>(Val: V))
407 return 0;
408 Register &R = ValueMap[V];
409 assert(R == Register() && "Already initialized this value register!");
410 assert(VirtReg2Value.empty());
411 return R = CreateRegs(V);
412}
413
414/// GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the
415/// register is a PHI destination and the PHI's LiveOutInfo is not valid. If
416/// the register's LiveOutInfo is for a smaller bit width, it is extended to
417/// the larger bit width by zero extension. The bit width must be no smaller
418/// than the LiveOutInfo's existing bit width.
419const FunctionLoweringInfo::LiveOutInfo *
420FunctionLoweringInfo::GetLiveOutRegInfo(Register Reg, unsigned BitWidth) {
421 if (!LiveOutRegInfo.inBounds(n: Reg))
422 return nullptr;
423
424 LiveOutInfo *LOI = &LiveOutRegInfo[Reg];
425 if (!LOI->IsValid)
426 return nullptr;
427
428 if (BitWidth > LOI->Known.getBitWidth()) {
429 LOI->NumSignBits = 1;
430 LOI->Known = LOI->Known.anyext(BitWidth);
431 }
432
433 return LOI;
434}
435
436/// ComputePHILiveOutRegInfo - Compute LiveOutInfo for a PHI's destination
437/// register based on the LiveOutInfo of its operands.
438void FunctionLoweringInfo::ComputePHILiveOutRegInfo(const PHINode *PN) {
439 Type *Ty = PN->getType();
440 if (!Ty->isIntegerTy() || Ty->isVectorTy())
441 return;
442
443 SmallVector<EVT, 1> ValueVTs;
444 ComputeValueVTs(TLI: *TLI, DL: MF->getDataLayout(), Ty, ValueVTs);
445 assert(ValueVTs.size() == 1 &&
446 "PHIs with non-vector integer types should have a single VT.");
447 EVT IntVT = ValueVTs[0];
448
449 if (TLI->getNumRegisters(Context&: PN->getContext(), VT: IntVT) != 1)
450 return;
451 IntVT = TLI->getRegisterType(Context&: PN->getContext(), VT: IntVT);
452 unsigned BitWidth = IntVT.getSizeInBits();
453
454 auto It = ValueMap.find(Val: PN);
455 if (It == ValueMap.end())
456 return;
457
458 Register DestReg = It->second;
459 if (DestReg == 0)
460 return;
461 assert(DestReg.isVirtual() && "Expected a virtual reg");
462 LiveOutRegInfo.grow(n: DestReg);
463 LiveOutInfo &DestLOI = LiveOutRegInfo[DestReg];
464
465 Value *V = PN->getIncomingValue(i: 0);
466 if (isa<UndefValue>(Val: V) || isa<ConstantExpr>(Val: V)) {
467 DestLOI.NumSignBits = 1;
468 DestLOI.Known = KnownBits(BitWidth);
469 return;
470 }
471
472 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val: V)) {
473 APInt Val;
474 if (TLI->signExtendConstant(C: CI))
475 Val = CI->getValue().sext(width: BitWidth);
476 else
477 Val = CI->getValue().zext(width: BitWidth);
478 DestLOI.NumSignBits = Val.getNumSignBits();
479 DestLOI.Known = KnownBits::makeConstant(C: Val);
480 } else {
481 assert(ValueMap.count(V) && "V should have been placed in ValueMap when its"
482 "CopyToReg node was created.");
483 Register SrcReg = ValueMap[V];
484 if (!SrcReg.isVirtual()) {
485 DestLOI.IsValid = false;
486 return;
487 }
488 const LiveOutInfo *SrcLOI = GetLiveOutRegInfo(Reg: SrcReg, BitWidth);
489 if (!SrcLOI) {
490 DestLOI.IsValid = false;
491 return;
492 }
493 DestLOI = *SrcLOI;
494 }
495
496 assert(DestLOI.Known.Zero.getBitWidth() == BitWidth &&
497 DestLOI.Known.One.getBitWidth() == BitWidth &&
498 "Masks should have the same bit width as the type.");
499
500 for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) {
501 Value *V = PN->getIncomingValue(i);
502 if (isa<UndefValue>(Val: V) || isa<ConstantExpr>(Val: V)) {
503 DestLOI.NumSignBits = 1;
504 DestLOI.Known = KnownBits(BitWidth);
505 return;
506 }
507
508 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val: V)) {
509 APInt Val;
510 if (TLI->signExtendConstant(C: CI))
511 Val = CI->getValue().sext(width: BitWidth);
512 else
513 Val = CI->getValue().zext(width: BitWidth);
514 DestLOI.NumSignBits = std::min(a: DestLOI.NumSignBits, b: Val.getNumSignBits());
515 DestLOI.Known.Zero &= ~Val;
516 DestLOI.Known.One &= Val;
517 continue;
518 }
519
520 assert(ValueMap.count(V) && "V should have been placed in ValueMap when "
521 "its CopyToReg node was created.");
522 Register SrcReg = ValueMap[V];
523 if (!SrcReg.isVirtual()) {
524 DestLOI.IsValid = false;
525 return;
526 }
527 const LiveOutInfo *SrcLOI = GetLiveOutRegInfo(Reg: SrcReg, BitWidth);
528 if (!SrcLOI) {
529 DestLOI.IsValid = false;
530 return;
531 }
532 DestLOI.NumSignBits = std::min(a: DestLOI.NumSignBits, b: SrcLOI->NumSignBits);
533 DestLOI.Known = DestLOI.Known.intersectWith(RHS: SrcLOI->Known);
534 }
535}
536
537/// setArgumentFrameIndex - Record frame index for the byval
538/// argument. This overrides previous frame index entry for this argument,
539/// if any.
540void FunctionLoweringInfo::setArgumentFrameIndex(const Argument *A,
541 int FI) {
542 ByValArgFrameIndexMap[A] = FI;
543}
544
545/// getArgumentFrameIndex - Get frame index for the byval argument.
546/// If the argument does not have any assigned frame index then 0 is
547/// returned.
548int FunctionLoweringInfo::getArgumentFrameIndex(const Argument *A) {
549 auto I = ByValArgFrameIndexMap.find(Val: A);
550 if (I != ByValArgFrameIndexMap.end())
551 return I->second;
552 LLVM_DEBUG(dbgs() << "Argument does not have assigned frame index!\n");
553 return INT_MAX;
554}
555
556Register FunctionLoweringInfo::getCatchPadExceptionPointerVReg(
557 const Value *CPI, const TargetRegisterClass *RC) {
558 MachineRegisterInfo &MRI = MF->getRegInfo();
559 auto I = CatchPadExceptionPointers.insert(KV: {CPI, 0});
560 Register &VReg = I.first->second;
561 if (I.second)
562 VReg = MRI.createVirtualRegister(RegClass: RC);
563 assert(VReg && "null vreg in exception pointer table!");
564 return VReg;
565}
566
567const Value *
568FunctionLoweringInfo::getValueFromVirtualReg(Register Vreg) {
569 if (VirtReg2Value.empty()) {
570 SmallVector<EVT, 4> ValueVTs;
571 for (auto &P : ValueMap) {
572 ValueVTs.clear();
573 ComputeValueVTs(TLI: *TLI, DL: Fn->getDataLayout(),
574 Ty: P.first->getType(), ValueVTs);
575 unsigned Reg = P.second;
576 for (EVT VT : ValueVTs) {
577 unsigned NumRegisters = TLI->getNumRegisters(Context&: Fn->getContext(), VT);
578 for (unsigned i = 0, e = NumRegisters; i != e; ++i)
579 VirtReg2Value[Reg++] = P.first;
580 }
581 }
582 }
583 return VirtReg2Value.lookup(Val: Vreg);
584}
585