1//===-- FunctionLoweringInfo.cpp ------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This implements routines for translating functions from LLVM IR into
10// Machine IR.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/CodeGen/FunctionLoweringInfo.h"
15#include "llvm/ADT/APInt.h"
16#include "llvm/Analysis/UniformityAnalysis.h"
17#include "llvm/CodeGen/Analysis.h"
18#include "llvm/CodeGen/MachineFrameInfo.h"
19#include "llvm/CodeGen/MachineFunction.h"
20#include "llvm/CodeGen/MachineInstrBuilder.h"
21#include "llvm/CodeGen/MachineRegisterInfo.h"
22#include "llvm/CodeGen/TargetFrameLowering.h"
23#include "llvm/CodeGen/TargetInstrInfo.h"
24#include "llvm/CodeGen/TargetLowering.h"
25#include "llvm/CodeGen/TargetRegisterInfo.h"
26#include "llvm/CodeGen/TargetSubtargetInfo.h"
27#include "llvm/CodeGen/WasmEHFuncInfo.h"
28#include "llvm/CodeGen/WinEHFuncInfo.h"
29#include "llvm/IR/Constants.h"
30#include "llvm/IR/DataLayout.h"
31#include "llvm/IR/DerivedTypes.h"
32#include "llvm/IR/Function.h"
33#include "llvm/IR/Instructions.h"
34#include "llvm/IR/IntrinsicInst.h"
35#include "llvm/IR/Intrinsics.h"
36#include "llvm/Support/Debug.h"
37#include "llvm/Support/ErrorHandling.h"
38#include "llvm/Support/raw_ostream.h"
39#include <algorithm>
40using namespace llvm;
41
42#define DEBUG_TYPE "function-lowering-info"
43
44/// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
45/// PHI nodes or outside of the basic block that defines it, or used by a
46/// switch or atomic instruction, which may expand to multiple basic blocks.
47static bool isUsedOutsideOfDefiningBlock(const Instruction *I) {
48 if (I->use_empty()) return false;
49 if (isa<PHINode>(Val: I)) return true;
50 const BasicBlock *BB = I->getParent();
51 for (const User *U : I->users())
52 if (cast<Instruction>(Val: U)->getParent() != BB || isa<PHINode>(Val: U))
53 return true;
54
55 return false;
56}
57
58static ISD::NodeType getPreferredExtendForValue(const Instruction *I) {
59 // For the users of the source value being used for compare instruction, if
60 // the number of signed predicate is greater than unsigned predicate, we
61 // prefer to use SIGN_EXTEND.
62 //
63 // With this optimization, we would be able to reduce some redundant sign or
64 // zero extension instruction, and eventually more machine CSE opportunities
65 // can be exposed.
66 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
67 unsigned NumOfSigned = 0, NumOfUnsigned = 0;
68 for (const Use &U : I->uses()) {
69 if (const auto *CI = dyn_cast<CmpInst>(Val: U.getUser())) {
70 NumOfSigned += CI->isSigned();
71 NumOfUnsigned += CI->isUnsigned();
72 }
73 if (const auto *CallI = dyn_cast<CallBase>(Val: U.getUser())) {
74 if (!CallI->isArgOperand(U: &U))
75 continue;
76 unsigned ArgNo = CallI->getArgOperandNo(U: &U);
77 NumOfUnsigned += CallI->paramHasAttr(ArgNo, Kind: Attribute::ZExt);
78 NumOfSigned += CallI->paramHasAttr(ArgNo, Kind: Attribute::SExt);
79 }
80 }
81 if (NumOfSigned > NumOfUnsigned)
82 ExtendKind = ISD::SIGN_EXTEND;
83
84 return ExtendKind;
85}
86
87void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
88 SelectionDAG *DAG) {
89 Fn = &fn;
90 MF = &mf;
91 TLI = MF->getSubtarget().getTargetLowering();
92 RegInfo = &MF->getRegInfo();
93 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
94 UA = DAG->getUniformityInfo();
95
96 // Check whether the function can return without sret-demotion.
97 SmallVector<ISD::OutputArg, 4> Outs;
98 CallingConv::ID CC = Fn->getCallingConv();
99
100 GetReturnInfo(CC, ReturnType: Fn->getReturnType(), attr: Fn->getAttributes(), Outs, TLI: *TLI,
101 DL: mf.getDataLayout());
102 CanLowerReturn =
103 TLI->CanLowerReturn(CC, *MF, Fn->isVarArg(), Outs, Fn->getContext(), RetTy: Fn->getReturnType());
104
105 // If this personality uses funclets, we need to do a bit more work.
106 DenseMap<const AllocaInst *, TinyPtrVector<int *>> CatchObjects;
107 EHPersonality Personality = classifyEHPersonality(
108 Pers: Fn->hasPersonalityFn() ? Fn->getPersonalityFn() : nullptr);
109 if (isFuncletEHPersonality(Pers: Personality)) {
110 // Calculate state numbers if we haven't already.
111 WinEHFuncInfo &EHInfo = *MF->getWinEHFuncInfo();
112 if (Personality == EHPersonality::MSVC_CXX)
113 calculateWinCXXEHStateNumbers(ParentFn: &fn, FuncInfo&: EHInfo);
114 else if (isAsynchronousEHPersonality(Pers: Personality))
115 calculateSEHStateNumbers(ParentFn: &fn, FuncInfo&: EHInfo);
116 else if (Personality == EHPersonality::CoreCLR)
117 calculateClrEHStateNumbers(Fn: &fn, FuncInfo&: EHInfo);
118
119 // Map all BB references in the WinEH data to MBBs.
120 for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) {
121 for (WinEHHandlerType &H : TBME.HandlerArray) {
122 if (const AllocaInst *AI = H.CatchObj.Alloca)
123 CatchObjects[AI].push_back(NewVal: &H.CatchObj.FrameIndex);
124 else
125 H.CatchObj.FrameIndex = INT_MAX;
126 }
127 }
128 }
129
130 // Initialize the mapping of values to registers. This is only set up for
131 // instruction values that are used outside of the block that defines
132 // them.
133 const Align StackAlign = TFI->getStackAlign();
134 for (const BasicBlock &BB : *Fn) {
135 for (const Instruction &I : BB) {
136 if (const AllocaInst *AI = dyn_cast<AllocaInst>(Val: &I)) {
137 Align Alignment = AI->getAlign();
138
139 // Static allocas can be folded into the initial stack frame
140 // adjustment. For targets that don't realign the stack, don't
141 // do this if there is an extra alignment requirement.
142 if (AI->isStaticAlloca() &&
143 (TFI->isStackRealignable() || (Alignment <= StackAlign))) {
144 TypeSize AllocaSize = AI->getAllocationSize(DL: MF->getDataLayout())
145 .value_or(u: TypeSize::getZero());
146 uint64_t TySize = AllocaSize.getKnownMinValue();
147 if (TySize == 0)
148 TySize = 1; // Don't create zero-sized stack objects.
149 int FrameIndex = INT_MAX;
150 auto Iter = CatchObjects.find(Val: AI);
151 if (Iter != CatchObjects.end() && TLI->needsFixedCatchObjects()) {
152 FrameIndex = MF->getFrameInfo().CreateFixedObject(
153 Size: TySize, SPOffset: 0, /*IsImmutable=*/false, /*isAliased=*/true);
154 MF->getFrameInfo().setObjectAlignment(ObjectIdx: FrameIndex, Alignment);
155 } else {
156 FrameIndex = MF->getFrameInfo().CreateStackObject(Size: TySize, Alignment,
157 isSpillSlot: false, Alloca: AI);
158 }
159
160 // Scalable vectors and structures that contain scalable vectors may
161 // need a special StackID to distinguish them from other (fixed size)
162 // stack objects.
163 if (AllocaSize.isScalable())
164 MF->getFrameInfo().setStackID(ObjectIdx: FrameIndex,
165 ID: TFI->getStackIDForScalableVectors());
166
167 StaticAllocaMap[AI] = FrameIndex;
168 // Update the catch handler information.
169 if (Iter != CatchObjects.end()) {
170 for (int *CatchObjPtr : Iter->second)
171 *CatchObjPtr = FrameIndex;
172 }
173 } else {
174 // FIXME: Overaligned static allocas should be grouped into
175 // a single dynamic allocation instead of using a separate
176 // stack allocation for each one.
177 // Inform the Frame Information that we have variable-sized objects.
178 MF->getFrameInfo().CreateVariableSizedObject(
179 Alignment: Alignment <= StackAlign ? Align(1) : Alignment, Alloca: AI);
180 }
181 } else if (auto *Call = dyn_cast<CallBase>(Val: &I)) {
182 // Look for inline asm that clobbers the SP register.
183 if (Call->isInlineAsm()) {
184 Register SP = TLI->getStackPointerRegisterToSaveRestore();
185 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
186 std::vector<TargetLowering::AsmOperandInfo> Ops =
187 TLI->ParseConstraints(DL: Fn->getDataLayout(), TRI,
188 Call: *Call);
189 for (TargetLowering::AsmOperandInfo &Op : Ops) {
190 if (Op.Type == InlineAsm::isClobber) {
191 // Clobbers don't have SDValue operands, hence SDValue().
192 TLI->ComputeConstraintToUse(OpInfo&: Op, Op: SDValue(), DAG);
193 std::pair<unsigned, const TargetRegisterClass *> PhysReg =
194 TLI->getRegForInlineAsmConstraint(TRI, Constraint: Op.ConstraintCode,
195 VT: Op.ConstraintVT);
196 if (PhysReg.first == SP)
197 MF->getFrameInfo().setHasOpaqueSPAdjustment(true);
198 }
199 }
200 }
201 if (const auto *II = dyn_cast<IntrinsicInst>(Val: &I)) {
202 switch (II->getIntrinsicID()) {
203 case Intrinsic::vastart:
204 // Look for calls to the @llvm.va_start intrinsic. We can omit
205 // some prologue boilerplate for variadic functions that don't
206 // examine their arguments.
207 MF->getFrameInfo().setHasVAStart(true);
208 break;
209 case Intrinsic::fake_use:
210 // Look for llvm.fake.uses, so that we can remove loads into fake
211 // uses later if necessary.
212 MF->setHasFakeUses(true);
213 break;
214 default:
215 break;
216 }
217 }
218
219 // If we have a musttail call in a variadic function, we need to ensure
220 // we forward implicit register parameters.
221 if (const auto *CI = dyn_cast<CallInst>(Val: &I)) {
222 if (CI->isMustTailCall() && Fn->isVarArg())
223 MF->getFrameInfo().setHasMustTailInVarArgFunc(true);
224 }
225
226 // Determine if there is a call to setjmp in the machine function.
227 if (Call->hasFnAttr(Kind: Attribute::ReturnsTwice))
228 MF->setExposesReturnsTwice(true);
229 }
230
231 // Mark values used outside their block as exported, by allocating
232 // a virtual register for them.
233 if (isUsedOutsideOfDefiningBlock(I: &I))
234 if (!isa<AllocaInst>(Val: I) || !StaticAllocaMap.count(Val: cast<AllocaInst>(Val: &I)))
235 InitializeRegForValue(V: &I);
236
237 // Decide the preferred extend type for a value. This iterates over all
238 // users and therefore isn't cheap, so don't do this at O0.
239 if (DAG->getOptLevel() != CodeGenOptLevel::None)
240 PreferredExtendType[&I] = getPreferredExtendForValue(I: &I);
241 }
242 }
243
244 // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This
245 // also creates the initial PHI MachineInstrs, though none of the input
246 // operands are populated.
247 MBBMap.resize(N: Fn->getMaxBlockNumber());
248 for (const BasicBlock &BB : *Fn) {
249 // Don't create MachineBasicBlocks for imaginary EH pad blocks. These blocks
250 // are really data, and no instructions can live here.
251 if (BB.isEHPad()) {
252 BasicBlock::const_iterator PadInst = BB.getFirstNonPHIIt();
253 // If this is a non-landingpad EH pad, mark this function as using
254 // funclets.
255 // FIXME: SEH catchpads do not create EH scope/funclets, so we could avoid
256 // setting this in such cases in order to improve frame layout.
257 if (!isa<LandingPadInst>(Val: PadInst)) {
258 MF->setHasEHScopes(true);
259 MF->setHasEHFunclets(true);
260 MF->getFrameInfo().setHasOpaqueSPAdjustment(true);
261 }
262 if (isa<CatchSwitchInst>(Val: PadInst)) {
263 assert(BB.begin() == PadInst &&
264 "WinEHPrepare failed to remove PHIs from imaginary BBs");
265 continue;
266 }
267 if (isa<FuncletPadInst>(Val: PadInst) &&
268 Personality != EHPersonality::Wasm_CXX)
269 assert(BB.begin() == PadInst && "WinEHPrepare failed to demote PHIs");
270 }
271
272 MachineBasicBlock *MBB = mf.CreateMachineBasicBlock(BB: &BB);
273 MBBMap[BB.getNumber()] = MBB;
274 MF->push_back(MBB);
275
276 // Transfer the address-taken flag. This is necessary because there could
277 // be multiple MachineBasicBlocks corresponding to one BasicBlock, and only
278 // the first one should be marked.
279 // Only mark the block if the BlockAddress actually has users. The
280 // hasAddressTaken flag may be stale if the BlockAddress was optimized away
281 // but the constant still exists in the uniquing table.
282 if (BB.hasAddressTaken()) {
283 if (BlockAddress *BA = BlockAddress::lookup(BB: &BB))
284 if (!BA->hasZeroLiveUses())
285 MBB->setAddressTakenIRBlock(const_cast<BasicBlock *>(&BB));
286 }
287
288 // Mark landing pad blocks.
289 if (BB.isEHPad())
290 MBB->setIsEHPad();
291
292 // Create Machine PHI nodes for LLVM PHI nodes, lowering them as
293 // appropriate.
294 for (const PHINode &PN : BB.phis()) {
295 if (PN.use_empty())
296 continue;
297
298 // Skip empty types
299 if (PN.getType()->isEmptyTy())
300 continue;
301
302 DebugLoc DL = PN.getDebugLoc();
303 Register PHIReg = ValueMap[&PN];
304 assert(PHIReg && "PHI node does not have an assigned virtual register!");
305
306 SmallVector<EVT, 4> ValueVTs;
307 ComputeValueVTs(TLI: *TLI, DL: MF->getDataLayout(), Ty: PN.getType(), ValueVTs);
308 for (EVT VT : ValueVTs) {
309 unsigned NumRegisters = TLI->getNumRegisters(Context&: Fn->getContext(), VT);
310 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
311 for (unsigned i = 0; i != NumRegisters; ++i)
312 BuildMI(BB: MBB, MIMD: DL, MCID: TII->get(Opcode: TargetOpcode::PHI), DestReg: PHIReg + i);
313 PHIReg += NumRegisters;
314 }
315 }
316 }
317
318 if (isFuncletEHPersonality(Pers: Personality)) {
319 WinEHFuncInfo &EHInfo = *MF->getWinEHFuncInfo();
320
321 // Map all BB references in the WinEH data to MBBs.
322 for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) {
323 for (WinEHHandlerType &H : TBME.HandlerArray) {
324 if (H.Handler)
325 H.Handler = getMBB(BB: cast<const BasicBlock *>(Val&: H.Handler));
326 }
327 }
328 for (CxxUnwindMapEntry &UME : EHInfo.CxxUnwindMap)
329 if (UME.Cleanup)
330 UME.Cleanup = getMBB(BB: cast<const BasicBlock *>(Val&: UME.Cleanup));
331 for (SEHUnwindMapEntry &UME : EHInfo.SEHUnwindMap)
332 UME.Handler = getMBB(BB: cast<const BasicBlock *>(Val&: UME.Handler));
333 for (ClrEHUnwindMapEntry &CME : EHInfo.ClrEHUnwindMap)
334 CME.Handler = getMBB(BB: cast<const BasicBlock *>(Val&: CME.Handler));
335 } else if (Personality == EHPersonality::Wasm_CXX) {
336 WasmEHFuncInfo &EHInfo = *MF->getWasmEHFuncInfo();
337 calculateWasmEHInfo(F: &fn, EHInfo);
338
339 // Map all BB references in the Wasm EH data to MBBs.
340 DenseMap<BBOrMBB, BBOrMBB> SrcToUnwindDest;
341 for (auto &KV : EHInfo.SrcToUnwindDest) {
342 const auto *Src = cast<const BasicBlock *>(Val&: KV.first);
343 const auto *Dest = cast<const BasicBlock *>(Val&: KV.second);
344 SrcToUnwindDest[getMBB(BB: Src)] = getMBB(BB: Dest);
345 }
346 EHInfo.SrcToUnwindDest = std::move(SrcToUnwindDest);
347 DenseMap<BBOrMBB, SmallPtrSet<BBOrMBB, 4>> UnwindDestToSrcs;
348 for (auto &KV : EHInfo.UnwindDestToSrcs) {
349 const auto *Dest = cast<const BasicBlock *>(Val&: KV.first);
350 MachineBasicBlock *DestMBB = getMBB(BB: Dest);
351 auto &Srcs = UnwindDestToSrcs[DestMBB];
352 for (const auto P : KV.second)
353 Srcs.insert(Ptr: getMBB(BB: cast<const BasicBlock *>(Val: P)));
354 }
355 EHInfo.UnwindDestToSrcs = std::move(UnwindDestToSrcs);
356 }
357}
358
359/// clear - Clear out all the function-specific state. This returns this
360/// FunctionLoweringInfo to an empty state, ready to be used for a
361/// different function.
362void FunctionLoweringInfo::clear() {
363 MBBMap.clear();
364 ValueMap.clear();
365 VirtReg2Value.clear();
366 StaticAllocaMap.clear();
367 LiveOutRegInfo.clear();
368 VisitedBBs.clear();
369 ArgDbgValues.clear();
370 DescribedArgs.clear();
371 ByValArgFrameIndexMap.clear();
372 RegFixups.clear();
373 RegsWithFixups.clear();
374 StatepointStackSlots.clear();
375 StatepointRelocationMaps.clear();
376 PreferredExtendType.clear();
377 PreprocessedDVRDeclares.clear();
378}
379
380/// CreateReg - Allocate a single virtual register for the given type.
381Register FunctionLoweringInfo::CreateReg(MVT VT, bool isDivergent) {
382 return RegInfo->createVirtualRegister(RegClass: TLI->getRegClassFor(VT, isDivergent));
383}
384
385/// CreateRegs - Allocate the appropriate number of virtual registers of
386/// the correctly promoted or expanded types. Assign these registers
387/// consecutive vreg numbers and return the first assigned number.
388///
389/// In the case that the given value has struct or array type, this function
390/// will assign registers for each member or element.
391///
392Register FunctionLoweringInfo::CreateRegs(Type *Ty, bool isDivergent) {
393 SmallVector<EVT, 4> ValueVTs;
394 ComputeValueVTs(TLI: *TLI, DL: MF->getDataLayout(), Ty, ValueVTs);
395
396 Register FirstReg;
397 for (EVT ValueVT : ValueVTs) {
398 MVT RegisterVT = TLI->getRegisterType(Context&: Ty->getContext(), VT: ValueVT);
399
400 unsigned NumRegs = TLI->getNumRegisters(Context&: Ty->getContext(), VT: ValueVT);
401 for (unsigned i = 0; i != NumRegs; ++i) {
402 Register R = CreateReg(VT: RegisterVT, isDivergent);
403 if (!FirstReg) FirstReg = R;
404 }
405 }
406 return FirstReg;
407}
408
409Register FunctionLoweringInfo::CreateRegs(const Value *V) {
410 return CreateRegs(Ty: V->getType(), isDivergent: UA && UA->isDivergent(V) &&
411 !TLI->requiresUniformRegister(MF&: *MF, V));
412}
413
414Register FunctionLoweringInfo::InitializeRegForValue(const Value *V) {
415 // Tokens live in vregs only when used for convergence control.
416 if (V->getType()->isTokenTy() && !isa<ConvergenceControlInst>(Val: V))
417 return 0;
418 Register &R = ValueMap[V];
419 assert(R == Register() && "Already initialized this value register!");
420 assert(VirtReg2Value.empty());
421 return R = CreateRegs(V);
422}
423
424/// GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the
425/// register is a PHI destination and the PHI's LiveOutInfo is not valid. If
426/// the register's LiveOutInfo is for a smaller bit width, it is extended to
427/// the larger bit width by zero extension. The bit width must be no smaller
428/// than the LiveOutInfo's existing bit width.
429const FunctionLoweringInfo::LiveOutInfo *
430FunctionLoweringInfo::GetLiveOutRegInfo(Register Reg, unsigned BitWidth) {
431 if (!LiveOutRegInfo.inBounds(N: Reg))
432 return nullptr;
433
434 LiveOutInfo *LOI = &LiveOutRegInfo[Reg];
435 if (!LOI->IsValid)
436 return nullptr;
437
438 if (BitWidth > LOI->Known.getBitWidth()) {
439 LOI->NumSignBits = 1;
440 LOI->Known = LOI->Known.anyext(BitWidth);
441 }
442
443 return LOI;
444}
445
446/// ComputePHILiveOutRegInfo - Compute LiveOutInfo for a PHI's destination
447/// register based on the LiveOutInfo of its operands.
448void FunctionLoweringInfo::ComputePHILiveOutRegInfo(const PHINode *PN) {
449 Type *Ty = PN->getType();
450 if (!Ty->isIntegerTy())
451 return;
452
453 SmallVector<EVT, 1> ValueVTs;
454 ComputeValueVTs(TLI: *TLI, DL: MF->getDataLayout(), Ty, ValueVTs);
455 assert(ValueVTs.size() == 1 &&
456 "PHIs with non-vector integer types should have a single VT.");
457 EVT IntVT = ValueVTs[0];
458
459 unsigned NumRegisters = TLI->getNumRegisters(Context&: PN->getContext(), VT: IntVT);
460 // FIXME: Support multiple registers for big endian targets.
461 if (NumRegisters != 1 && MF->getDataLayout().isBigEndian())
462 return;
463 IntVT = TLI->getRegisterType(Context&: PN->getContext(), VT: IntVT);
464 unsigned BitWidth = IntVT.getSizeInBits();
465
466 auto It = ValueMap.find(Val: PN);
467 if (It == ValueMap.end())
468 return;
469
470 Register BaseReg = It->second;
471 if (!BaseReg)
472 return;
473 assert(BaseReg.isVirtual() && "Expected a virtual reg");
474
475 for (unsigned RegIdx = 0; RegIdx < NumRegisters; ++RegIdx) {
476 // Split registers are assigned sequentially.
477 Register DestReg = BaseReg.id() + RegIdx;
478 LiveOutRegInfo.grow(N: DestReg);
479 LiveOutInfo &DestLOI = LiveOutRegInfo[DestReg];
480
481 Value *V = PN->getIncomingValue(i: 0);
482 if (isa<UndefValue>(Val: V) || isa<ConstantExpr>(Val: V)) {
483 DestLOI.NumSignBits = 1;
484 DestLOI.Known = KnownBits(BitWidth);
485 continue;
486 }
487
488 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val: V)) {
489 APInt Val;
490 if (TLI->signExtendConstant(C: CI))
491 Val = CI->getValue().sext(width: BitWidth * NumRegisters);
492 else
493 Val = CI->getValue().zext(width: BitWidth * NumRegisters);
494 APInt Extracted = Val.extractBits(numBits: BitWidth, bitPosition: BitWidth * RegIdx);
495 DestLOI.NumSignBits = Extracted.getNumSignBits();
496 DestLOI.Known = KnownBits::makeConstant(C: Extracted);
497 } else {
498 assert(ValueMap.count(V) &&
499 "V should have been placed in ValueMap when its"
500 "CopyToReg node was created.");
501 Register SrcReg = ValueMap[V];
502 if (!SrcReg.isVirtual()) {
503 DestLOI.IsValid = false;
504 continue;
505 }
506 // Split registers are assigned sequentially.
507 SrcReg = SrcReg.id() + RegIdx;
508 const LiveOutInfo *SrcLOI = GetLiveOutRegInfo(Reg: SrcReg, BitWidth);
509 if (!SrcLOI) {
510 DestLOI.IsValid = false;
511 continue;
512 }
513 DestLOI = *SrcLOI;
514 }
515
516 assert(DestLOI.Known.Zero.getBitWidth() == BitWidth &&
517 DestLOI.Known.One.getBitWidth() == BitWidth &&
518 "Masks should have the same bit width as the type.");
519
520 for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) {
521 Value *V = PN->getIncomingValue(i);
522 if (isa<UndefValue>(Val: V) || isa<ConstantExpr>(Val: V)) {
523 DestLOI.NumSignBits = 1;
524 DestLOI.Known = KnownBits(BitWidth);
525 break;
526 }
527
528 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val: V)) {
529 APInt Val;
530 if (TLI->signExtendConstant(C: CI))
531 Val = CI->getValue().sext(width: BitWidth * NumRegisters);
532 else
533 Val = CI->getValue().zext(width: BitWidth * NumRegisters);
534 APInt Extracted = Val.extractBits(numBits: BitWidth, bitPosition: BitWidth * RegIdx);
535 DestLOI.NumSignBits =
536 std::min(a: DestLOI.NumSignBits, b: Extracted.getNumSignBits());
537 DestLOI.Known =
538 DestLOI.Known.intersectWith(RHS: KnownBits::makeConstant(C: Extracted));
539 continue;
540 }
541
542 assert(ValueMap.count(V) && "V should have been placed in ValueMap when "
543 "its CopyToReg node was created.");
544 Register SrcReg = ValueMap[V];
545 if (!SrcReg.isVirtual()) {
546 DestLOI.IsValid = false;
547 break;
548 }
549 // Split registers are assigned sequentially.
550 SrcReg = SrcReg.id() + RegIdx;
551 const LiveOutInfo *SrcLOI = GetLiveOutRegInfo(Reg: SrcReg, BitWidth);
552 if (!SrcLOI) {
553 DestLOI.IsValid = false;
554 break;
555 }
556 DestLOI.NumSignBits = std::min(a: DestLOI.NumSignBits, b: SrcLOI->NumSignBits);
557 DestLOI.Known = DestLOI.Known.intersectWith(RHS: SrcLOI->Known);
558 }
559 }
560}
561
562/// setArgumentFrameIndex - Record frame index for the byval
563/// argument. This overrides previous frame index entry for this argument,
564/// if any.
565void FunctionLoweringInfo::setArgumentFrameIndex(const Argument *A,
566 int FI) {
567 ByValArgFrameIndexMap[A] = FI;
568}
569
570/// getArgumentFrameIndex - Get frame index for the byval argument.
571/// If the argument does not have any assigned frame index then 0 is
572/// returned.
573int FunctionLoweringInfo::getArgumentFrameIndex(const Argument *A) {
574 auto I = ByValArgFrameIndexMap.find(Val: A);
575 if (I != ByValArgFrameIndexMap.end())
576 return I->second;
577 LLVM_DEBUG(dbgs() << "Argument does not have assigned frame index!\n");
578 return INT_MAX;
579}
580
581Register FunctionLoweringInfo::getCatchPadExceptionPointerVReg(
582 const Value *CPI, const TargetRegisterClass *RC) {
583 MachineRegisterInfo &MRI = MF->getRegInfo();
584 auto I = CatchPadExceptionPointers.insert(KV: {CPI, 0});
585 Register &VReg = I.first->second;
586 if (I.second)
587 VReg = MRI.createVirtualRegister(RegClass: RC);
588 assert(VReg && "null vreg in exception pointer table!");
589 return VReg;
590}
591
592const Value *
593FunctionLoweringInfo::getValueFromVirtualReg(Register Vreg) {
594 if (VirtReg2Value.empty()) {
595 SmallVector<EVT, 4> ValueVTs;
596 for (auto &P : ValueMap) {
597 ValueVTs.clear();
598 ComputeValueVTs(TLI: *TLI, DL: Fn->getDataLayout(),
599 Ty: P.first->getType(), ValueVTs);
600 Register Reg = P.second;
601 for (EVT VT : ValueVTs) {
602 unsigned NumRegisters = TLI->getNumRegisters(Context&: Fn->getContext(), VT);
603 for (unsigned i = 0, e = NumRegisters; i != e; ++i)
604 VirtReg2Value[Reg++] = P.first;
605 }
606 }
607 }
608 return VirtReg2Value.lookup(Val: Vreg);
609}
610