1//===- SelectionDAGBuilder.h - Selection-DAG building -----------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This implements routines for translating from LLVM IR into SelectionDAG IR.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_LIB_CODEGEN_SELECTIONDAG_SELECTIONDAGBUILDER_H
14#define LLVM_LIB_CODEGEN_SELECTIONDAG_SELECTIONDAGBUILDER_H
15
16#include "StatepointLowering.h"
17#include "llvm/ADT/ArrayRef.h"
18#include "llvm/ADT/DenseMap.h"
19#include "llvm/ADT/MapVector.h"
20#include "llvm/ADT/SmallVector.h"
21#include "llvm/CodeGen/AssignmentTrackingAnalysis.h"
22#include "llvm/CodeGen/CodeGenCommonISel.h"
23#include "llvm/CodeGen/ISDOpcodes.h"
24#include "llvm/CodeGen/SelectionDAGNodes.h"
25#include "llvm/CodeGen/SwitchLoweringUtils.h"
26#include "llvm/CodeGen/TargetLowering.h"
27#include "llvm/CodeGen/ValueTypes.h"
28#include "llvm/CodeGenTypes/MachineValueType.h"
29#include "llvm/IR/DebugLoc.h"
30#include "llvm/IR/Instruction.h"
31#include "llvm/Support/BranchProbability.h"
32#include "llvm/Support/CodeGen.h"
33#include "llvm/Support/ErrorHandling.h"
34#include <algorithm>
35#include <cassert>
36#include <cstdint>
37#include <optional>
38#include <utility>
39#include <vector>
40
41namespace llvm {
42
43class AAResults;
44class AllocaInst;
45class AtomicCmpXchgInst;
46class AtomicRMWInst;
47class AssumptionCache;
48class BasicBlock;
49class CallInst;
50class CallBrInst;
51class CatchPadInst;
52class CatchReturnInst;
53class CatchSwitchInst;
54class CondBrInst;
55class CleanupPadInst;
56class CleanupReturnInst;
57class Constant;
58class ConstrainedFPIntrinsic;
59class DataLayout;
60class DIExpression;
61class DILocalVariable;
62class DILocation;
63class FenceInst;
64class FunctionLoweringInfo;
65class GCFunctionInfo;
66class GCRelocateInst;
67class GCResultInst;
68class GCStatepointInst;
69class IndirectBrInst;
70class InvokeInst;
71class LandingPadInst;
72class LLVMContext;
73class LoadInst;
74class MachineBasicBlock;
75class PHINode;
76class ResumeInst;
77class ReturnInst;
78class SDDbgValue;
79class SelectionDAG;
80class StoreInst;
81class SwiftErrorValueTracking;
82class SwitchInst;
83class TargetLibraryInfo;
84class TargetMachine;
85class Type;
86class VAArgInst;
87class UnreachableInst;
88class Use;
89class User;
90class Value;
91
92//===----------------------------------------------------------------------===//
93/// SelectionDAGBuilder - This is the common target-independent lowering
94/// implementation that is parameterized by a TargetLowering object.
95///
96class SelectionDAGBuilder {
97 /// The current instruction being visited.
98 const Instruction *CurInst = nullptr;
99
100 DenseMap<const Value*, SDValue> NodeMap;
101
102 /// Maps argument value for unused arguments. This is used
103 /// to preserve debug information for incoming arguments.
104 DenseMap<const Value*, SDValue> UnusedArgNodeMap;
105
106 /// Helper type for DanglingDebugInfoMap.
107 class DanglingDebugInfo {
108 unsigned SDNodeOrder = 0;
109
110 public:
111 DILocalVariable *Variable;
112 DIExpression *Expression;
113 DebugLoc dl;
114 DanglingDebugInfo() = default;
115 DanglingDebugInfo(DILocalVariable *Var, DIExpression *Expr, DebugLoc DL,
116 unsigned SDNO)
117 : SDNodeOrder(SDNO), Variable(Var), Expression(Expr),
118 dl(std::move(DL)) {}
119
120 DILocalVariable *getVariable() const { return Variable; }
121 DIExpression *getExpression() const { return Expression; }
122 DebugLoc getDebugLoc() const { return dl; }
123 unsigned getSDNodeOrder() const { return SDNodeOrder; }
124
125 /// Helper for printing DanglingDebugInfo. This hoop-jumping is to
126 /// store a Value pointer, so that we can print a whole DDI as one object.
127 /// Call SelectionDAGBuilder::printDDI instead of using directly.
128 struct Print {
129 Print(const Value *V, const DanglingDebugInfo &DDI) : V(V), DDI(DDI) {}
130 const Value *V;
131 const DanglingDebugInfo &DDI;
132 friend raw_ostream &operator<<(raw_ostream &OS,
133 const DanglingDebugInfo::Print &P) {
134 OS << "DDI(var=" << *P.DDI.getVariable();
135 if (P.V)
136 OS << ", val=" << *P.V;
137 else
138 OS << ", val=nullptr";
139
140 OS << ", expr=" << *P.DDI.getExpression()
141 << ", order=" << P.DDI.getSDNodeOrder()
142 << ", loc=" << P.DDI.getDebugLoc() << ")";
143 return OS;
144 }
145 };
146 };
147
148 /// Returns an object that defines `raw_ostream &operator<<` for printing.
149 /// Usage example:
150 //// errs() << printDDI(MyDanglingInfo) << " is dangling\n";
151 DanglingDebugInfo::Print printDDI(const Value *V,
152 const DanglingDebugInfo &DDI) {
153 return DanglingDebugInfo::Print(V, DDI);
154 }
155
156 /// Helper type for DanglingDebugInfoMap.
157 typedef std::vector<DanglingDebugInfo> DanglingDebugInfoVector;
158
159 /// Keeps track of dbg_values for which we have not yet seen the referent.
160 /// We defer handling these until we do see it.
161 MapVector<const Value*, DanglingDebugInfoVector> DanglingDebugInfoMap;
162
163 /// Cache the module flag for whether we should use debug-info assignment
164 /// tracking.
165 bool AssignmentTrackingEnabled = false;
166
167public:
168 /// Loads are not emitted to the program immediately. We bunch them up and
169 /// then emit token factor nodes when possible. This allows us to get simple
170 /// disambiguation between loads without worrying about alias analysis.
171 SmallVector<SDValue, 8> PendingLoads;
172
173 /// State used while lowering a statepoint sequence (gc_statepoint,
174 /// gc_relocate, and gc_result). See StatepointLowering.hpp/cpp for details.
175 StatepointLoweringState StatepointLowering;
176
177private:
178 /// CopyToReg nodes that copy values to virtual registers for export to other
179 /// blocks need to be emitted before any terminator instruction, but they have
180 /// no other ordering requirements. We bunch them up and the emit a single
181 /// tokenfactor for them just before terminator instructions.
182 SmallVector<SDValue, 8> PendingExports;
183
184 /// Similar to loads, nodes corresponding to constrained FP intrinsics are
185 /// bunched up and emitted when necessary. These can be moved across each
186 /// other and any (normal) memory operation (load or store), but not across
187 /// calls or instructions having unspecified side effects. As a special
188 /// case, constrained FP intrinsics using fpexcept.strict may not be deleted
189 /// even if otherwise unused, so they need to be chained before any
190 /// terminator instruction (like PendingExports). We track the latter
191 /// set of nodes in a separate list.
192 SmallVector<SDValue, 8> PendingConstrainedFP;
193 SmallVector<SDValue, 8> PendingConstrainedFPStrict;
194
195 /// Update root to include all chains from the Pending list.
196 SDValue updateRoot(SmallVectorImpl<SDValue> &Pending);
197
198 /// Given a node representing a floating-point operation and its specified
199 /// exception behavior, this either updates the root or stores the node in
200 /// a list to be added to chains latter.
201 void pushFPOpOutChain(SDValue Result, fp::ExceptionBehavior EB);
202
203 /// A unique monotonically increasing number used to order the SDNodes we
204 /// create.
205 unsigned SDNodeOrder;
206
207 /// Emit comparison and split W into two subtrees.
208 void splitWorkItem(SwitchCG::SwitchWorkList &WorkList,
209 const SwitchCG::SwitchWorkListItem &W, Value *Cond,
210 MachineBasicBlock *SwitchMBB);
211
212 /// Lower W.
213 void lowerWorkItem(SwitchCG::SwitchWorkListItem W, Value *Cond,
214 MachineBasicBlock *SwitchMBB,
215 MachineBasicBlock *DefaultMBB);
216
217 /// Peel the top probability case if it exceeds the threshold
218 MachineBasicBlock *
219 peelDominantCaseCluster(const SwitchInst &SI,
220 SwitchCG::CaseClusterVector &Clusters,
221 BranchProbability &PeeledCaseProb);
222
223private:
224 const TargetMachine &TM;
225
226public:
227 /// Lowest valid SDNodeOrder. The special case 0 is reserved for scheduling
228 /// nodes without a corresponding SDNode.
229 static const unsigned LowestSDNodeOrder = 1;
230
231 SelectionDAG &DAG;
232 BatchAAResults *BatchAA = nullptr;
233 AssumptionCache *AC = nullptr;
234 const TargetLibraryInfo *LibInfo = nullptr;
235 const TargetTransformInfo *TTI = nullptr;
236
237 class SDAGSwitchLowering : public SwitchCG::SwitchLowering {
238 public:
239 SDAGSwitchLowering(SelectionDAGBuilder *sdb, FunctionLoweringInfo &funcinfo)
240 : SwitchCG::SwitchLowering(funcinfo), SDB(sdb) {}
241
242 void addSuccessorWithProb(
243 MachineBasicBlock *Src, MachineBasicBlock *Dst,
244 BranchProbability Prob = BranchProbability::getUnknown()) override {
245 SDB->addSuccessorWithProb(Src, Dst, Prob);
246 }
247
248 private:
249 SelectionDAGBuilder *SDB = nullptr;
250 };
251
252 // Data related to deferred switch lowerings. Used to construct additional
253 // Basic Blocks in SelectionDAGISel::FinishBasicBlock.
254 std::unique_ptr<SDAGSwitchLowering> SL;
255
256 /// A StackProtectorDescriptor structure used to communicate stack protector
257 /// information in between SelectBasicBlock and FinishBasicBlock.
258 StackProtectorDescriptor SPDescriptor;
259
260 // Emit PHI-node-operand constants only once even if used by multiple
261 // PHI nodes.
262 DenseMap<const Constant *, Register> ConstantsOut;
263
264 /// Information about the function as a whole.
265 FunctionLoweringInfo &FuncInfo;
266
267 /// Information about the swifterror values used throughout the function.
268 SwiftErrorValueTracking &SwiftError;
269
270 /// Garbage collection metadata for the function.
271 GCFunctionInfo *GFI = nullptr;
272
273 /// Map a landing pad to the call site indexes.
274 DenseMap<MachineBasicBlock *, SmallVector<unsigned, 4>> LPadToCallSiteMap;
275
276 /// This is set to true if a call in the current block has been translated as
277 /// a tail call. In this case, no subsequent DAG nodes should be created.
278 bool HasTailCall = false;
279
280 LLVMContext *Context = nullptr;
281
282 SelectionDAGBuilder(SelectionDAG &dag, FunctionLoweringInfo &funcinfo,
283 SwiftErrorValueTracking &swifterror, CodeGenOptLevel ol)
284 : SDNodeOrder(LowestSDNodeOrder), TM(dag.getTarget()), DAG(dag),
285 SL(std::make_unique<SDAGSwitchLowering>(args: this, args&: funcinfo)),
286 FuncInfo(funcinfo), SwiftError(swifterror) {}
287
288 void init(GCFunctionInfo *gfi, BatchAAResults *BatchAA, AssumptionCache *AC,
289 const TargetLibraryInfo *li, const TargetTransformInfo &TTI);
290
291 /// Clear out the current SelectionDAG and the associated state and prepare
292 /// this SelectionDAGBuilder object to be used for a new block. This doesn't
293 /// clear out information about additional blocks that are needed to complete
294 /// switch lowering or PHI node updating; that information is cleared out as
295 /// it is consumed.
296 void clear();
297
298 /// Clear the dangling debug information map. This function is separated from
299 /// the clear so that debug information that is dangling in a basic block can
300 /// be properly resolved in a different basic block. This allows the
301 /// SelectionDAG to resolve dangling debug information attached to PHI nodes.
302 void clearDanglingDebugInfo();
303
304 /// Return the current virtual root of the Selection DAG, flushing any
305 /// PendingLoad items. This must be done before emitting a store or any other
306 /// memory node that may need to be ordered after any prior load instructions.
307 SDValue getMemoryRoot();
308
309 /// Return the current virtual root of the Selection DAG, flushing
310 /// PendingConstrainedFP or PendingConstrainedFPStrict items if the new
311 /// exception behavior (specified by \p EB) differs from that of the pending
312 /// instructions. This must be done before emitting constrained FP operation
313 /// call.
314 SDValue getFPOperationRoot(fp::ExceptionBehavior EB);
315
316 /// Similar to getMemoryRoot, but also flushes PendingConstrainedFP(Strict)
317 /// items. This must be done before emitting any call other any other node
318 /// that may need to be ordered after FP instructions due to other side
319 /// effects.
320 SDValue getRoot();
321
322 /// Similar to getRoot, but instead of flushing all the PendingLoad items,
323 /// flush all the PendingExports (and PendingConstrainedFPStrict) items.
324 /// It is necessary to do this before emitting a terminator instruction.
325 SDValue getControlRoot();
326
327 SDLoc getCurSDLoc() const {
328 return SDLoc(CurInst, SDNodeOrder);
329 }
330
331 DebugLoc getCurDebugLoc() const {
332 return CurInst ? CurInst->getDebugLoc() : DebugLoc();
333 }
334
335 void CopyValueToVirtualRegister(const Value *V, Register Reg,
336 ISD::NodeType ExtendType = ISD::ANY_EXTEND);
337
338 void visit(const Instruction &I);
339 void visitDbgInfo(const Instruction &I);
340
341 void visit(unsigned Opcode, const User &I);
342
343 /// If there was virtual register allocated for the value V emit CopyFromReg
344 /// of the specified type Ty. Return empty SDValue() otherwise.
345 SDValue getCopyFromRegs(const Value *V, Type *Ty);
346
347 /// Register a dbg_value which relies on a Value which we have not yet seen.
348 void addDanglingDebugInfo(SmallVectorImpl<Value *> &Values,
349 DILocalVariable *Var, DIExpression *Expr,
350 bool IsVariadic, DebugLoc DL, unsigned Order);
351
352 /// If we have dangling debug info that describes \p Variable, or an
353 /// overlapping part of variable considering the \p Expr, then this method
354 /// will drop that debug info as it isn't valid any longer.
355 void dropDanglingDebugInfo(const DILocalVariable *Variable,
356 const DIExpression *Expr);
357
358 /// If we saw an earlier dbg_value referring to V, generate the debug data
359 /// structures now that we've seen its definition.
360 void resolveDanglingDebugInfo(const Value *V, SDValue Val);
361
362 /// For the given dangling debuginfo record, perform last-ditch efforts to
363 /// resolve the debuginfo to something that is represented in this DAG. If
364 /// this cannot be done, produce an Undef debug value record.
365 void salvageUnresolvedDbgValue(const Value *V, DanglingDebugInfo &DDI);
366
367 /// For a given list of Values, attempt to create and record a SDDbgValue in
368 /// the SelectionDAG.
369 bool handleDebugValue(ArrayRef<const Value *> Values, DILocalVariable *Var,
370 DIExpression *Expr, DebugLoc DbgLoc, unsigned Order,
371 bool IsVariadic);
372
373 /// Create a record for a kill location debug intrinsic.
374 void handleKillDebugValue(DILocalVariable *Var, DIExpression *Expr,
375 DebugLoc DbgLoc, unsigned Order);
376
377 void handleDebugDeclare(Value *Address, DILocalVariable *Variable,
378 DIExpression *Expression, DebugLoc DL);
379
380 /// Evict any dangling debug information, attempting to salvage it first.
381 void resolveOrClearDbgInfo();
382
383 SDValue getValue(const Value *V);
384
385 SDValue getNonRegisterValue(const Value *V);
386 SDValue getValueImpl(const Value *V);
387
388 void setValue(const Value *V, SDValue NewN) {
389 SDValue &N = NodeMap[V];
390 assert(!N.getNode() && "Already set a value for this node!");
391 N = NewN;
392 }
393
394 void setUnusedArgValue(const Value *V, SDValue NewN) {
395 SDValue &N = UnusedArgNodeMap[V];
396 assert(!N.getNode() && "Already set a value for this node!");
397 N = NewN;
398 }
399
400 bool shouldKeepJumpConditionsTogether(
401 const FunctionLoweringInfo &FuncInfo, const CondBrInst &I,
402 Instruction::BinaryOps Opc, const Value *Lhs, const Value *Rhs,
403 TargetLoweringBase::CondMergingParams Params) const;
404
405 void FindMergedConditions(const Value *Cond, MachineBasicBlock *TBB,
406 MachineBasicBlock *FBB, MachineBasicBlock *CurBB,
407 MachineBasicBlock *SwitchBB,
408 Instruction::BinaryOps Opc, BranchProbability TProb,
409 BranchProbability FProb, bool InvertCond);
410 void EmitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB,
411 MachineBasicBlock *FBB,
412 MachineBasicBlock *CurBB,
413 MachineBasicBlock *SwitchBB,
414 BranchProbability TProb, BranchProbability FProb,
415 bool InvertCond);
416 bool ShouldEmitAsBranches(const std::vector<SwitchCG::CaseBlock> &Cases);
417 bool isExportableFromCurrentBlock(const Value *V, const BasicBlock *FromBB);
418 void CopyToExportRegsIfNeeded(const Value *V);
419 void ExportFromCurrentBlock(const Value *V);
420 void LowerCallTo(const CallBase &CB, SDValue Callee, bool IsTailCall,
421 bool IsMustTailCall, const BasicBlock *EHPadBB = nullptr,
422 const TargetLowering::PtrAuthInfo *PAI = nullptr);
423
424 // Check some of the target-independent constraints for tail calls. This does
425 // not iterate over the call arguments.
426 bool canTailCall(const CallBase &CB) const;
427
428 // Lower range metadata from 0 to N to assert zext to an integer of nearest
429 // floor power of two.
430 SDValue lowerRangeToAssertZExt(SelectionDAG &DAG, const Instruction &I,
431 SDValue Op);
432
433 // Lower nofpclass attributes to AssertNoFPClass
434 SDValue lowerNoFPClassToAssertNoFPClass(SelectionDAG &DAG,
435 const Instruction &I, SDValue Op);
436
437 void populateCallLoweringInfo(TargetLowering::CallLoweringInfo &CLI,
438 const CallBase *Call, unsigned ArgIdx,
439 unsigned NumArgs, SDValue Callee,
440 Type *ReturnTy, AttributeSet RetAttrs,
441 bool IsPatchPoint);
442
443 std::pair<SDValue, SDValue>
444 lowerInvokable(TargetLowering::CallLoweringInfo &CLI,
445 const BasicBlock *EHPadBB = nullptr);
446
447 /// When an MBB was split during scheduling, update the
448 /// references that need to refer to the last resulting block.
449 void UpdateSplitBlock(MachineBasicBlock *First, MachineBasicBlock *Last);
450
451 /// Describes a gc.statepoint or a gc.statepoint like thing for the purposes
452 /// of lowering into a STATEPOINT node.
453 struct StatepointLoweringInfo {
454 /// Bases[i] is the base pointer for Ptrs[i]. Together they denote the set
455 /// of gc pointers this STATEPOINT has to relocate.
456 SmallVector<const Value *, 16> Bases;
457 SmallVector<const Value *, 16> Ptrs;
458
459 /// The set of gc.relocate calls associated with this gc.statepoint.
460 SmallVector<const GCRelocateInst *, 16> GCRelocates;
461
462 /// The full list of gc-live arguments to the gc.statepoint being lowered.
463 ArrayRef<const Use> GCLives;
464
465 /// The gc.statepoint instruction.
466 const Instruction *StatepointInstr = nullptr;
467
468 /// The list of gc transition arguments present in the gc.statepoint being
469 /// lowered.
470 ArrayRef<const Use> GCTransitionArgs;
471
472 /// The ID that the resulting STATEPOINT instruction has to report.
473 uint64_t ID = -1;
474
475 /// Information regarding the underlying call instruction.
476 TargetLowering::CallLoweringInfo CLI;
477
478 /// The deoptimization state associated with this gc.statepoint call, if
479 /// any.
480 ArrayRef<const Use> DeoptState;
481
482 /// Flags associated with the meta arguments being lowered.
483 uint64_t StatepointFlags = -1;
484
485 /// The number of patchable bytes the call needs to get lowered into.
486 unsigned NumPatchBytes = -1;
487
488 /// The exception handling unwind destination, in case this represents an
489 /// invoke of gc.statepoint.
490 const BasicBlock *EHPadBB = nullptr;
491
492 explicit StatepointLoweringInfo(SelectionDAG &DAG) : CLI(DAG) {}
493 };
494
495 /// Lower \p SLI into a STATEPOINT instruction.
496 SDValue LowerAsSTATEPOINT(StatepointLoweringInfo &SI);
497
498 // This function is responsible for the whole statepoint lowering process.
499 // It uniformly handles invoke and call statepoints.
500 void LowerStatepoint(const GCStatepointInst &I,
501 const BasicBlock *EHPadBB = nullptr);
502
503 void LowerCallSiteWithDeoptBundle(const CallBase *Call, SDValue Callee,
504 const BasicBlock *EHPadBB);
505
506 void LowerDeoptimizeCall(const CallInst *CI);
507 void LowerDeoptimizingReturn();
508
509 void LowerCallSiteWithDeoptBundleImpl(const CallBase *Call, SDValue Callee,
510 const BasicBlock *EHPadBB,
511 bool VarArgDisallowed,
512 bool ForceVoidReturnTy);
513
514 void LowerCallSiteWithPtrAuthBundle(const CallBase &CB,
515 const BasicBlock *EHPadBB);
516
517 /// Returns the type of FrameIndex and TargetFrameIndex nodes.
518 MVT getFrameIndexTy() {
519 return DAG.getTargetLoweringInfo().getFrameIndexTy(DL: DAG.getDataLayout());
520 }
521
522private:
523 // Terminator instructions.
524 void visitRet(const ReturnInst &I);
525 void visitUncondBr(const UncondBrInst &I);
526 void visitCondBr(const CondBrInst &I);
527 void visitSwitch(const SwitchInst &I);
528 void visitIndirectBr(const IndirectBrInst &I);
529 void visitUnreachable(const UnreachableInst &I);
530 void visitCleanupRet(const CleanupReturnInst &I);
531 void visitCatchSwitch(const CatchSwitchInst &I);
532 void visitCatchRet(const CatchReturnInst &I);
533 void visitCatchPad(const CatchPadInst &I);
534 void visitCleanupPad(const CleanupPadInst &CPI);
535
536 BranchProbability getEdgeProbability(const MachineBasicBlock *Src,
537 const MachineBasicBlock *Dst) const;
538 void addSuccessorWithProb(
539 MachineBasicBlock *Src, MachineBasicBlock *Dst,
540 BranchProbability Prob = BranchProbability::getUnknown());
541
542public:
543 void visitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB);
544 void visitSPDescriptorParent(StackProtectorDescriptor &SPD,
545 MachineBasicBlock *ParentBB);
546 void visitSPDescriptorFailure(StackProtectorDescriptor &SPD);
547 void visitBitTestHeader(SwitchCG::BitTestBlock &B,
548 MachineBasicBlock *SwitchBB);
549 void visitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB,
550 BranchProbability BranchProbToNext, Register Reg,
551 SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB);
552 void visitJumpTable(SwitchCG::JumpTable &JT);
553 void visitJumpTableHeader(SwitchCG::JumpTable &JT,
554 SwitchCG::JumpTableHeader &JTH,
555 MachineBasicBlock *SwitchBB);
556
557private:
558 // These all get lowered before this pass.
559 void visitInvoke(const InvokeInst &I);
560 void visitCallBrLandingPad(const CallInst &I);
561 void visitResume(const ResumeInst &I);
562
563 void visitCallBr(const CallBrInst &I);
564 void visitCallBrIntrinsic(const CallBrInst &I);
565
566 void visitUnary(const User &I, unsigned Opcode);
567 void visitFNeg(const User &I) { visitUnary(I, Opcode: ISD::FNEG); }
568
569 void visitBinary(const User &I, unsigned Opcode);
570 void visitShift(const User &I, unsigned Opcode);
571 void visitAdd(const User &I) { visitBinary(I, Opcode: ISD::ADD); }
572 void visitFAdd(const User &I) { visitBinary(I, Opcode: ISD::FADD); }
573 void visitSub(const User &I) { visitBinary(I, Opcode: ISD::SUB); }
574 void visitFSub(const User &I) { visitBinary(I, Opcode: ISD::FSUB); }
575 void visitMul(const User &I) { visitBinary(I, Opcode: ISD::MUL); }
576 void visitFMul(const User &I) { visitBinary(I, Opcode: ISD::FMUL); }
577 void visitURem(const User &I) { visitBinary(I, Opcode: ISD::UREM); }
578 void visitSRem(const User &I) { visitBinary(I, Opcode: ISD::SREM); }
579 void visitFRem(const User &I) { visitBinary(I, Opcode: ISD::FREM); }
580 void visitUDiv(const User &I) { visitBinary(I, Opcode: ISD::UDIV); }
581 void visitSDiv(const User &I);
582 void visitFDiv(const User &I) { visitBinary(I, Opcode: ISD::FDIV); }
583 void visitAnd (const User &I) { visitBinary(I, Opcode: ISD::AND); }
584 void visitOr (const User &I) { visitBinary(I, Opcode: ISD::OR); }
585 void visitXor (const User &I) { visitBinary(I, Opcode: ISD::XOR); }
586 void visitShl (const User &I) { visitShift(I, Opcode: ISD::SHL); }
587 void visitLShr(const User &I) { visitShift(I, Opcode: ISD::SRL); }
588 void visitAShr(const User &I) { visitShift(I, Opcode: ISD::SRA); }
589 void visitICmp(const ICmpInst &I);
590 void visitFCmp(const FCmpInst &I);
591 // Visit the conversion instructions
592 void visitTrunc(const User &I);
593 void visitZExt(const User &I);
594 void visitSExt(const User &I);
595 void visitFPTrunc(const User &I);
596 void visitFPExt(const User &I);
597 void visitFPToUI(const User &I);
598 void visitFPToSI(const User &I);
599 void visitUIToFP(const User &I);
600 void visitSIToFP(const User &I);
601 void visitPtrToAddr(const User &I);
602 void visitPtrToInt(const User &I);
603 void visitIntToPtr(const User &I);
604 void visitBitCast(const User &I);
605 void visitAddrSpaceCast(const User &I);
606
607 void visitExtractElement(const User &I);
608 void visitInsertElement(const User &I);
609 void visitShuffleVector(const User &I);
610
611 void visitExtractValue(const ExtractValueInst &I);
612 void visitInsertValue(const InsertValueInst &I);
613 void visitLandingPad(const LandingPadInst &LP);
614
615 void visitGetElementPtr(const User &I);
616 void visitSelect(const User &I);
617
618 void visitAlloca(const AllocaInst &I);
619 void visitLoad(const LoadInst &I);
620 void visitStore(const StoreInst &I);
621 void visitMaskedLoad(const CallInst &I, bool IsExpanding = false);
622 void visitMaskedStore(const CallInst &I, bool IsCompressing = false);
623 void visitMaskedGather(const CallInst &I);
624 void visitMaskedScatter(const CallInst &I);
625 void visitAtomicCmpXchg(const AtomicCmpXchgInst &I);
626 void visitAtomicRMW(const AtomicRMWInst &I);
627 void visitFence(const FenceInst &I);
628 void visitPHI(const PHINode &I);
629 void visitCall(const CallInst &I);
630 bool visitMemCmpBCmpCall(const CallInst &I);
631 bool visitMemCCpyCall(const CallInst &I);
632 bool visitMemPCpyCall(const CallInst &I);
633 bool visitMemChrCall(const CallInst &I);
634 bool visitStrCpyCall(const CallInst &I, bool isStpcpy);
635 bool visitStrCmpCall(const CallInst &I);
636 bool visitStrLenCall(const CallInst &I);
637 bool visitStrNLenCall(const CallInst &I);
638 bool visitStrstrCall(const CallInst &I);
639 bool visitUnaryFloatCall(const CallInst &I, unsigned Opcode);
640 bool visitBinaryFloatCall(const CallInst &I, unsigned Opcode);
641 void visitAtomicLoad(const LoadInst &I);
642 void visitAtomicStore(const StoreInst &I);
643 void visitLoadFromSwiftError(const LoadInst &I);
644 void visitStoreToSwiftError(const StoreInst &I);
645 void visitFreeze(const FreezeInst &I);
646
647 void visitInlineAsm(const CallBase &Call,
648 const BasicBlock *EHPadBB = nullptr);
649
650 bool visitEntryValueDbgValue(ArrayRef<const Value *> Values,
651 DILocalVariable *Variable, DIExpression *Expr,
652 DebugLoc DbgLoc);
653 void visitIntrinsicCall(const CallInst &I, unsigned Intrinsic);
654 void visitTargetIntrinsic(const CallInst &I, unsigned Intrinsic);
655 void visitConstrainedFPIntrinsic(const ConstrainedFPIntrinsic &FPI);
656 void visitConvergenceControl(const CallInst &I, unsigned Intrinsic);
657 void visitVectorHistogram(const CallInst &I, unsigned IntrinsicID);
658 void visitVectorExtractLastActive(const CallInst &I, unsigned Intrinsic);
659 void visitVPLoad(const VPIntrinsic &VPIntrin, EVT VT,
660 const SmallVectorImpl<SDValue> &OpValues);
661 void visitVPLoadFF(const VPIntrinsic &VPIntrin, EVT VT, EVT EVLVT,
662 const SmallVectorImpl<SDValue> &OpValues);
663 void visitVPStore(const VPIntrinsic &VPIntrin,
664 const SmallVectorImpl<SDValue> &OpValues);
665 void visitVPGather(const VPIntrinsic &VPIntrin, EVT VT,
666 const SmallVectorImpl<SDValue> &OpValues);
667 void visitVPScatter(const VPIntrinsic &VPIntrin,
668 const SmallVectorImpl<SDValue> &OpValues);
669 void visitVPStridedLoad(const VPIntrinsic &VPIntrin, EVT VT,
670 const SmallVectorImpl<SDValue> &OpValues);
671 void visitVPStridedStore(const VPIntrinsic &VPIntrin,
672 const SmallVectorImpl<SDValue> &OpValues);
673 void visitVPCmp(const VPCmpIntrinsic &VPIntrin);
674 void visitVectorPredicationIntrinsic(const VPIntrinsic &VPIntrin);
675
676 void visitVAStart(const CallInst &I);
677 void visitVAArg(const VAArgInst &I);
678 void visitVAEnd(const CallInst &I);
679 void visitVACopy(const CallInst &I);
680 void visitStackmap(const CallInst &I);
681 void visitPatchpoint(const CallBase &CB, const BasicBlock *EHPadBB = nullptr);
682
683 // These two are implemented in StatepointLowering.cpp
684 void visitGCRelocate(const GCRelocateInst &Relocate);
685 void visitGCResult(const GCResultInst &I);
686
687 void visitVectorReduce(const CallInst &I, unsigned Intrinsic);
688 void visitVectorReverse(const CallInst &I);
689 void visitVectorSplice(const CallInst &I);
690 void visitVectorInterleave(const CallInst &I, unsigned Factor);
691 void visitVectorDeinterleave(const CallInst &I, unsigned Factor);
692 void visitStepVector(const CallInst &I);
693
694 void visitUserOp1(const Instruction &I) {
695 llvm_unreachable("UserOp1 should not exist at instruction selection time!");
696 }
697 void visitUserOp2(const Instruction &I) {
698 llvm_unreachable("UserOp2 should not exist at instruction selection time!");
699 }
700
701 void processIntegerCallValue(const Instruction &I,
702 SDValue Value, bool IsSigned);
703
704 void HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB);
705
706 void emitInlineAsmError(const CallBase &Call, const Twine &Message);
707
708 /// An enum that states to emit func argument dbg value the kind of intrinsic
709 /// it originally had. This controls the internal behavior of
710 /// EmitFuncArgumentDbgValue.
711 enum class FuncArgumentDbgValueKind {
712 Value, // This was originally a llvm.dbg.value.
713 Declare, // This was originally a llvm.dbg.declare.
714 };
715
716 /// If V is an function argument then create corresponding DBG_VALUE machine
717 /// instruction for it now. At the end of instruction selection, they will be
718 /// inserted to the entry BB.
719 bool EmitFuncArgumentDbgValue(const Value *V, DILocalVariable *Variable,
720 DIExpression *Expr, DILocation *DL,
721 FuncArgumentDbgValueKind Kind,
722 const SDValue &N);
723
724 /// Return the next block after MBB, or nullptr if there is none.
725 MachineBasicBlock *NextBlock(MachineBasicBlock *MBB);
726
727 /// Update the DAG and DAG builder with the relevant information after
728 /// a new root node has been created which could be a tail call.
729 void updateDAGForMaybeTailCall(SDValue MaybeTC);
730
731 /// Return the appropriate SDDbgValue based on N.
732 SDDbgValue *getDbgValue(SDValue N, DILocalVariable *Variable,
733 DIExpression *Expr, const DebugLoc &dl,
734 unsigned DbgSDNodeOrder);
735
736 SDValue lowerStartEH(SDValue Chain, const BasicBlock *EHPadBB,
737 MCSymbol *&BeginLabel);
738 SDValue lowerEndEH(SDValue Chain, const InvokeInst *II,
739 const BasicBlock *EHPadBB, MCSymbol *BeginLabel);
740
741 std::pair<bool, bool> getTargetIntrinsicCallProperties(const CallBase &I);
742 SmallVector<SDValue, 8> getTargetIntrinsicOperands(
743 const CallBase &I, bool HasChain, bool OnlyLoad,
744 TargetLowering::IntrinsicInfo *TgtMemIntrinsicInfo = nullptr);
745 SDVTList getTargetIntrinsicVTList(const CallBase &I, bool HasChain);
746 SDValue getTargetNonMemIntrinsicNode(const Type &IntrinsicVT, bool HasChain,
747 ArrayRef<SDValue> Ops,
748 const SDVTList &VTs);
749 SDValue handleTargetIntrinsicRet(const CallBase &I, bool HasChain,
750 bool OnlyLoad, SDValue Result);
751};
752
753/// This struct represents the registers (physical or virtual)
754/// that a particular set of values is assigned, and the type information about
755/// the value. The most common situation is to represent one value at a time,
756/// but struct or array values are handled element-wise as multiple values. The
757/// splitting of aggregates is performed recursively, so that we never have
758/// aggregate-typed registers. The values at this point do not necessarily have
759/// legal types, so each value may require one or more registers of some legal
760/// type.
761///
762struct RegsForValue {
763 /// The value types of the values, which may not be legal, and
764 /// may need be promoted or synthesized from one or more registers.
765 SmallVector<EVT, 4> ValueVTs;
766
767 /// The value types of the registers. This is the same size as ValueVTs and it
768 /// records, for each value, what the type of the assigned register or
769 /// registers are. (Individual values are never synthesized from more than one
770 /// type of register.)
771 ///
772 /// With virtual registers, the contents of RegVTs is redundant with TLI's
773 /// getRegisterType member function, however when with physical registers
774 /// it is necessary to have a separate record of the types.
775 SmallVector<MVT, 4> RegVTs;
776
777 /// This list holds the registers assigned to the values.
778 /// Each legal or promoted value requires one register, and each
779 /// expanded value requires multiple registers.
780 SmallVector<Register, 4> Regs;
781
782 /// This list holds the number of registers for each value.
783 SmallVector<unsigned, 4> RegCount;
784
785 /// Records if this value needs to be treated in an ABI dependant manner,
786 /// different to normal type legalization.
787 std::optional<CallingConv::ID> CallConv;
788
789 RegsForValue() = default;
790 RegsForValue(const SmallVector<Register, 4> &regs, MVT regvt, EVT valuevt,
791 std::optional<CallingConv::ID> CC = std::nullopt);
792 RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
793 const DataLayout &DL, Register Reg, Type *Ty,
794 std::optional<CallingConv::ID> CC);
795
796 bool isABIMangled() const { return CallConv.has_value(); }
797
798 /// Add the specified values to this one.
799 void append(const RegsForValue &RHS) {
800 ValueVTs.append(in_start: RHS.ValueVTs.begin(), in_end: RHS.ValueVTs.end());
801 RegVTs.append(in_start: RHS.RegVTs.begin(), in_end: RHS.RegVTs.end());
802 Regs.append(in_start: RHS.Regs.begin(), in_end: RHS.Regs.end());
803 RegCount.push_back(Elt: RHS.Regs.size());
804 }
805
806 /// Emit a series of CopyFromReg nodes that copies from this value and returns
807 /// the result as a ValueVTs value. This uses Chain/Flag as the input and
808 /// updates them for the output Chain/Flag. If the Flag pointer is NULL, no
809 /// flag is used.
810 SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo,
811 const SDLoc &dl, SDValue &Chain, SDValue *Glue,
812 const Value *V = nullptr) const;
813
814 /// Emit a series of CopyToReg nodes that copies the specified value into the
815 /// registers specified by this object. This uses Chain/Flag as the input and
816 /// updates them for the output Chain/Flag. If the Flag pointer is nullptr, no
817 /// flag is used. If V is not nullptr, then it is used in printing better
818 /// diagnostic messages on error.
819 void getCopyToRegs(SDValue Val, SelectionDAG &DAG, const SDLoc &dl,
820 SDValue &Chain, SDValue *Glue, const Value *V = nullptr,
821 ISD::NodeType PreferredExtendType = ISD::ANY_EXTEND) const;
822
823 /// Add this value to the specified inlineasm node operand list. This adds the
824 /// code marker, matching input operand index (if applicable), and includes
825 /// the number of values added into it.
826 void AddInlineAsmOperands(InlineAsm::Kind Code, bool HasMatching,
827 unsigned MatchingIdx, const SDLoc &dl,
828 SelectionDAG &DAG, std::vector<SDValue> &Ops) const;
829
830 /// Check if the total RegCount is greater than one.
831 bool occupiesMultipleRegs() const {
832 return std::accumulate(first: RegCount.begin(), last: RegCount.end(), init: 0) > 1;
833 }
834
835 /// Return a list of registers and their sizes.
836 SmallVector<std::pair<Register, TypeSize>, 4> getRegsAndSizes() const;
837};
838
839} // end namespace llvm
840
841#endif // LLVM_LIB_CODEGEN_SELECTIONDAG_SELECTIONDAGBUILDER_H
842