1 | //===- SelectionDAGBuilder.h - Selection-DAG building -----------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This implements routines for translating from LLVM IR into SelectionDAG IR. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #ifndef LLVM_LIB_CODEGEN_SELECTIONDAG_SELECTIONDAGBUILDER_H |
14 | #define LLVM_LIB_CODEGEN_SELECTIONDAG_SELECTIONDAGBUILDER_H |
15 | |
16 | #include "StatepointLowering.h" |
17 | #include "llvm/ADT/ArrayRef.h" |
18 | #include "llvm/ADT/DenseMap.h" |
19 | #include "llvm/ADT/MapVector.h" |
20 | #include "llvm/ADT/SmallVector.h" |
21 | #include "llvm/CodeGen/AssignmentTrackingAnalysis.h" |
22 | #include "llvm/CodeGen/CodeGenCommonISel.h" |
23 | #include "llvm/CodeGen/ISDOpcodes.h" |
24 | #include "llvm/CodeGen/SelectionDAGNodes.h" |
25 | #include "llvm/CodeGen/SwitchLoweringUtils.h" |
26 | #include "llvm/CodeGen/TargetLowering.h" |
27 | #include "llvm/CodeGen/ValueTypes.h" |
28 | #include "llvm/CodeGenTypes/MachineValueType.h" |
29 | #include "llvm/IR/DebugLoc.h" |
30 | #include "llvm/IR/Instruction.h" |
31 | #include "llvm/Support/BranchProbability.h" |
32 | #include "llvm/Support/CodeGen.h" |
33 | #include "llvm/Support/ErrorHandling.h" |
34 | #include <algorithm> |
35 | #include <cassert> |
36 | #include <cstdint> |
37 | #include <optional> |
38 | #include <utility> |
39 | #include <vector> |
40 | |
41 | namespace llvm { |
42 | |
43 | class AAResults; |
44 | class AllocaInst; |
45 | class AtomicCmpXchgInst; |
46 | class AtomicRMWInst; |
47 | class AssumptionCache; |
48 | class BasicBlock; |
49 | class BranchInst; |
50 | class CallInst; |
51 | class CallBrInst; |
52 | class CatchPadInst; |
53 | class CatchReturnInst; |
54 | class CatchSwitchInst; |
55 | class CleanupPadInst; |
56 | class CleanupReturnInst; |
57 | class Constant; |
58 | class ConstrainedFPIntrinsic; |
59 | class DbgValueInst; |
60 | class DataLayout; |
61 | class DIExpression; |
62 | class DILocalVariable; |
63 | class DILocation; |
64 | class FenceInst; |
65 | class FunctionLoweringInfo; |
66 | class GCFunctionInfo; |
67 | class GCRelocateInst; |
68 | class GCResultInst; |
69 | class GCStatepointInst; |
70 | class IndirectBrInst; |
71 | class InvokeInst; |
72 | class LandingPadInst; |
73 | class LLVMContext; |
74 | class LoadInst; |
75 | class MachineBasicBlock; |
76 | class PHINode; |
77 | class ResumeInst; |
78 | class ReturnInst; |
79 | class SDDbgValue; |
80 | class SelectionDAG; |
81 | class StoreInst; |
82 | class SwiftErrorValueTracking; |
83 | class SwitchInst; |
84 | class TargetLibraryInfo; |
85 | class TargetMachine; |
86 | class Type; |
87 | class VAArgInst; |
88 | class UnreachableInst; |
89 | class Use; |
90 | class User; |
91 | class Value; |
92 | |
93 | //===----------------------------------------------------------------------===// |
94 | /// SelectionDAGBuilder - This is the common target-independent lowering |
95 | /// implementation that is parameterized by a TargetLowering object. |
96 | /// |
97 | class SelectionDAGBuilder { |
98 | /// The current instruction being visited. |
99 | const Instruction *CurInst = nullptr; |
100 | |
101 | DenseMap<const Value*, SDValue> NodeMap; |
102 | |
103 | /// Maps argument value for unused arguments. This is used |
104 | /// to preserve debug information for incoming arguments. |
105 | DenseMap<const Value*, SDValue> UnusedArgNodeMap; |
106 | |
107 | /// Helper type for DanglingDebugInfoMap. |
108 | class DanglingDebugInfo { |
109 | unsigned SDNodeOrder = 0; |
110 | |
111 | public: |
112 | DILocalVariable *Variable; |
113 | DIExpression *Expression; |
114 | DebugLoc dl; |
115 | DanglingDebugInfo() = default; |
116 | DanglingDebugInfo(DILocalVariable *Var, DIExpression *Expr, DebugLoc DL, |
117 | unsigned SDNO) |
118 | : SDNodeOrder(SDNO), Variable(Var), Expression(Expr), |
119 | dl(std::move(DL)) {} |
120 | |
121 | DILocalVariable *getVariable() const { return Variable; } |
122 | DIExpression *getExpression() const { return Expression; } |
123 | DebugLoc getDebugLoc() const { return dl; } |
124 | unsigned getSDNodeOrder() const { return SDNodeOrder; } |
125 | |
126 | /// Helper for printing DanglingDebugInfo. This hoop-jumping is to |
127 | /// store a Value pointer, so that we can print a whole DDI as one object. |
128 | /// Call SelectionDAGBuilder::printDDI instead of using directly. |
129 | struct Print { |
130 | Print(const Value *V, const DanglingDebugInfo &DDI) : V(V), DDI(DDI) {} |
131 | const Value *V; |
132 | const DanglingDebugInfo &DDI; |
133 | friend raw_ostream &operator<<(raw_ostream &OS, |
134 | const DanglingDebugInfo::Print &P) { |
135 | OS << "DDI(var=" << *P.DDI.getVariable(); |
136 | if (P.V) |
137 | OS << ", val=" << *P.V; |
138 | else |
139 | OS << ", val=nullptr" ; |
140 | |
141 | OS << ", expr=" << *P.DDI.getExpression() |
142 | << ", order=" << P.DDI.getSDNodeOrder() |
143 | << ", loc=" << P.DDI.getDebugLoc() << ")" ; |
144 | return OS; |
145 | } |
146 | }; |
147 | }; |
148 | |
149 | /// Returns an object that defines `raw_ostream &operator<<` for printing. |
150 | /// Usage example: |
151 | //// errs() << printDDI(MyDanglingInfo) << " is dangling\n"; |
152 | DanglingDebugInfo::Print printDDI(const Value *V, |
153 | const DanglingDebugInfo &DDI) { |
154 | return DanglingDebugInfo::Print(V, DDI); |
155 | } |
156 | |
157 | /// Helper type for DanglingDebugInfoMap. |
158 | typedef std::vector<DanglingDebugInfo> DanglingDebugInfoVector; |
159 | |
160 | /// Keeps track of dbg_values for which we have not yet seen the referent. |
161 | /// We defer handling these until we do see it. |
162 | MapVector<const Value*, DanglingDebugInfoVector> DanglingDebugInfoMap; |
163 | |
164 | /// Cache the module flag for whether we should use debug-info assignment |
165 | /// tracking. |
166 | bool AssignmentTrackingEnabled = false; |
167 | |
168 | public: |
169 | /// Loads are not emitted to the program immediately. We bunch them up and |
170 | /// then emit token factor nodes when possible. This allows us to get simple |
171 | /// disambiguation between loads without worrying about alias analysis. |
172 | SmallVector<SDValue, 8> PendingLoads; |
173 | |
174 | /// State used while lowering a statepoint sequence (gc_statepoint, |
175 | /// gc_relocate, and gc_result). See StatepointLowering.hpp/cpp for details. |
176 | StatepointLoweringState StatepointLowering; |
177 | |
178 | private: |
179 | /// CopyToReg nodes that copy values to virtual registers for export to other |
180 | /// blocks need to be emitted before any terminator instruction, but they have |
181 | /// no other ordering requirements. We bunch them up and the emit a single |
182 | /// tokenfactor for them just before terminator instructions. |
183 | SmallVector<SDValue, 8> PendingExports; |
184 | |
185 | /// Similar to loads, nodes corresponding to constrained FP intrinsics are |
186 | /// bunched up and emitted when necessary. These can be moved across each |
187 | /// other and any (normal) memory operation (load or store), but not across |
188 | /// calls or instructions having unspecified side effects. As a special |
189 | /// case, constrained FP intrinsics using fpexcept.strict may not be deleted |
190 | /// even if otherwise unused, so they need to be chained before any |
191 | /// terminator instruction (like PendingExports). We track the latter |
192 | /// set of nodes in a separate list. |
193 | SmallVector<SDValue, 8> PendingConstrainedFP; |
194 | SmallVector<SDValue, 8> PendingConstrainedFPStrict; |
195 | |
196 | /// Update root to include all chains from the Pending list. |
197 | SDValue updateRoot(SmallVectorImpl<SDValue> &Pending); |
198 | |
199 | /// A unique monotonically increasing number used to order the SDNodes we |
200 | /// create. |
201 | unsigned SDNodeOrder; |
202 | |
203 | /// Emit comparison and split W into two subtrees. |
204 | void splitWorkItem(SwitchCG::SwitchWorkList &WorkList, |
205 | const SwitchCG::SwitchWorkListItem &W, Value *Cond, |
206 | MachineBasicBlock *SwitchMBB); |
207 | |
208 | /// Lower W. |
209 | void lowerWorkItem(SwitchCG::SwitchWorkListItem W, Value *Cond, |
210 | MachineBasicBlock *SwitchMBB, |
211 | MachineBasicBlock *DefaultMBB); |
212 | |
213 | /// Peel the top probability case if it exceeds the threshold |
214 | MachineBasicBlock * |
215 | peelDominantCaseCluster(const SwitchInst &SI, |
216 | SwitchCG::CaseClusterVector &Clusters, |
217 | BranchProbability &PeeledCaseProb); |
218 | |
219 | private: |
220 | const TargetMachine &TM; |
221 | |
222 | public: |
223 | /// Lowest valid SDNodeOrder. The special case 0 is reserved for scheduling |
224 | /// nodes without a corresponding SDNode. |
225 | static const unsigned LowestSDNodeOrder = 1; |
226 | |
227 | SelectionDAG &DAG; |
228 | AAResults *AA = nullptr; |
229 | AssumptionCache *AC = nullptr; |
230 | const TargetLibraryInfo *LibInfo = nullptr; |
231 | |
232 | class SDAGSwitchLowering : public SwitchCG::SwitchLowering { |
233 | public: |
234 | SDAGSwitchLowering(SelectionDAGBuilder *sdb, FunctionLoweringInfo &funcinfo) |
235 | : SwitchCG::SwitchLowering(funcinfo), SDB(sdb) {} |
236 | |
237 | void addSuccessorWithProb( |
238 | MachineBasicBlock *Src, MachineBasicBlock *Dst, |
239 | BranchProbability Prob = BranchProbability::getUnknown()) override { |
240 | SDB->addSuccessorWithProb(Src, Dst, Prob); |
241 | } |
242 | |
243 | private: |
244 | SelectionDAGBuilder *SDB = nullptr; |
245 | }; |
246 | |
247 | // Data related to deferred switch lowerings. Used to construct additional |
248 | // Basic Blocks in SelectionDAGISel::FinishBasicBlock. |
249 | std::unique_ptr<SDAGSwitchLowering> SL; |
250 | |
251 | /// A StackProtectorDescriptor structure used to communicate stack protector |
252 | /// information in between SelectBasicBlock and FinishBasicBlock. |
253 | StackProtectorDescriptor SPDescriptor; |
254 | |
255 | // Emit PHI-node-operand constants only once even if used by multiple |
256 | // PHI nodes. |
257 | DenseMap<const Constant *, unsigned> ConstantsOut; |
258 | |
259 | /// Information about the function as a whole. |
260 | FunctionLoweringInfo &FuncInfo; |
261 | |
262 | /// Information about the swifterror values used throughout the function. |
263 | SwiftErrorValueTracking &SwiftError; |
264 | |
265 | /// Garbage collection metadata for the function. |
266 | GCFunctionInfo *GFI = nullptr; |
267 | |
268 | /// Map a landing pad to the call site indexes. |
269 | DenseMap<MachineBasicBlock *, SmallVector<unsigned, 4>> LPadToCallSiteMap; |
270 | |
271 | /// This is set to true if a call in the current block has been translated as |
272 | /// a tail call. In this case, no subsequent DAG nodes should be created. |
273 | bool HasTailCall = false; |
274 | |
275 | LLVMContext *Context = nullptr; |
276 | |
277 | SelectionDAGBuilder(SelectionDAG &dag, FunctionLoweringInfo &funcinfo, |
278 | SwiftErrorValueTracking &swifterror, CodeGenOptLevel ol) |
279 | : SDNodeOrder(LowestSDNodeOrder), TM(dag.getTarget()), DAG(dag), |
280 | SL(std::make_unique<SDAGSwitchLowering>(args: this, args&: funcinfo)), |
281 | FuncInfo(funcinfo), SwiftError(swifterror) {} |
282 | |
283 | void init(GCFunctionInfo *gfi, AAResults *AA, AssumptionCache *AC, |
284 | const TargetLibraryInfo *li); |
285 | |
286 | /// Clear out the current SelectionDAG and the associated state and prepare |
287 | /// this SelectionDAGBuilder object to be used for a new block. This doesn't |
288 | /// clear out information about additional blocks that are needed to complete |
289 | /// switch lowering or PHI node updating; that information is cleared out as |
290 | /// it is consumed. |
291 | void clear(); |
292 | |
293 | /// Clear the dangling debug information map. This function is separated from |
294 | /// the clear so that debug information that is dangling in a basic block can |
295 | /// be properly resolved in a different basic block. This allows the |
296 | /// SelectionDAG to resolve dangling debug information attached to PHI nodes. |
297 | void clearDanglingDebugInfo(); |
298 | |
299 | /// Return the current virtual root of the Selection DAG, flushing any |
300 | /// PendingLoad items. This must be done before emitting a store or any other |
301 | /// memory node that may need to be ordered after any prior load instructions. |
302 | SDValue getMemoryRoot(); |
303 | |
304 | /// Similar to getMemoryRoot, but also flushes PendingConstrainedFP(Strict) |
305 | /// items. This must be done before emitting any call other any other node |
306 | /// that may need to be ordered after FP instructions due to other side |
307 | /// effects. |
308 | SDValue getRoot(); |
309 | |
310 | /// Similar to getRoot, but instead of flushing all the PendingLoad items, |
311 | /// flush all the PendingExports (and PendingConstrainedFPStrict) items. |
312 | /// It is necessary to do this before emitting a terminator instruction. |
313 | SDValue getControlRoot(); |
314 | |
315 | SDLoc getCurSDLoc() const { |
316 | return SDLoc(CurInst, SDNodeOrder); |
317 | } |
318 | |
319 | DebugLoc getCurDebugLoc() const { |
320 | return CurInst ? CurInst->getDebugLoc() : DebugLoc(); |
321 | } |
322 | |
323 | void CopyValueToVirtualRegister(const Value *V, unsigned Reg, |
324 | ISD::NodeType ExtendType = ISD::ANY_EXTEND); |
325 | |
326 | void visit(const Instruction &I); |
327 | void visitDbgInfo(const Instruction &I); |
328 | |
329 | void visit(unsigned Opcode, const User &I); |
330 | |
331 | /// If there was virtual register allocated for the value V emit CopyFromReg |
332 | /// of the specified type Ty. Return empty SDValue() otherwise. |
333 | SDValue getCopyFromRegs(const Value *V, Type *Ty); |
334 | |
335 | /// Register a dbg_value which relies on a Value which we have not yet seen. |
336 | void addDanglingDebugInfo(SmallVectorImpl<Value *> &Values, |
337 | DILocalVariable *Var, DIExpression *Expr, |
338 | bool IsVariadic, DebugLoc DL, unsigned Order); |
339 | |
340 | /// If we have dangling debug info that describes \p Variable, or an |
341 | /// overlapping part of variable considering the \p Expr, then this method |
342 | /// will drop that debug info as it isn't valid any longer. |
343 | void dropDanglingDebugInfo(const DILocalVariable *Variable, |
344 | const DIExpression *Expr); |
345 | |
346 | /// If we saw an earlier dbg_value referring to V, generate the debug data |
347 | /// structures now that we've seen its definition. |
348 | void resolveDanglingDebugInfo(const Value *V, SDValue Val); |
349 | |
350 | /// For the given dangling debuginfo record, perform last-ditch efforts to |
351 | /// resolve the debuginfo to something that is represented in this DAG. If |
352 | /// this cannot be done, produce an Undef debug value record. |
353 | void salvageUnresolvedDbgValue(const Value *V, DanglingDebugInfo &DDI); |
354 | |
355 | /// For a given list of Values, attempt to create and record a SDDbgValue in |
356 | /// the SelectionDAG. |
357 | bool handleDebugValue(ArrayRef<const Value *> Values, DILocalVariable *Var, |
358 | DIExpression *Expr, DebugLoc DbgLoc, unsigned Order, |
359 | bool IsVariadic); |
360 | |
361 | /// Create a record for a kill location debug intrinsic. |
362 | void handleKillDebugValue(DILocalVariable *Var, DIExpression *Expr, |
363 | DebugLoc DbgLoc, unsigned Order); |
364 | |
365 | void handleDebugDeclare(Value *Address, DILocalVariable *Variable, |
366 | DIExpression *Expression, DebugLoc DL); |
367 | |
368 | /// Evict any dangling debug information, attempting to salvage it first. |
369 | void resolveOrClearDbgInfo(); |
370 | |
371 | SDValue getValue(const Value *V); |
372 | |
373 | SDValue getNonRegisterValue(const Value *V); |
374 | SDValue getValueImpl(const Value *V); |
375 | |
376 | void setValue(const Value *V, SDValue NewN) { |
377 | SDValue &N = NodeMap[V]; |
378 | assert(!N.getNode() && "Already set a value for this node!" ); |
379 | N = NewN; |
380 | } |
381 | |
382 | void setUnusedArgValue(const Value *V, SDValue NewN) { |
383 | SDValue &N = UnusedArgNodeMap[V]; |
384 | assert(!N.getNode() && "Already set a value for this node!" ); |
385 | N = NewN; |
386 | } |
387 | |
388 | bool shouldKeepJumpConditionsTogether( |
389 | const FunctionLoweringInfo &FuncInfo, const BranchInst &I, |
390 | Instruction::BinaryOps Opc, const Value *Lhs, const Value *Rhs, |
391 | TargetLoweringBase::CondMergingParams Params) const; |
392 | |
393 | void FindMergedConditions(const Value *Cond, MachineBasicBlock *TBB, |
394 | MachineBasicBlock *FBB, MachineBasicBlock *CurBB, |
395 | MachineBasicBlock *SwitchBB, |
396 | Instruction::BinaryOps Opc, BranchProbability TProb, |
397 | BranchProbability FProb, bool InvertCond); |
398 | void EmitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB, |
399 | MachineBasicBlock *FBB, |
400 | MachineBasicBlock *CurBB, |
401 | MachineBasicBlock *SwitchBB, |
402 | BranchProbability TProb, BranchProbability FProb, |
403 | bool InvertCond); |
404 | bool ShouldEmitAsBranches(const std::vector<SwitchCG::CaseBlock> &Cases); |
405 | bool isExportableFromCurrentBlock(const Value *V, const BasicBlock *FromBB); |
406 | void CopyToExportRegsIfNeeded(const Value *V); |
407 | void ExportFromCurrentBlock(const Value *V); |
408 | void LowerCallTo(const CallBase &CB, SDValue Callee, bool IsTailCall, |
409 | bool IsMustTailCall, const BasicBlock *EHPadBB = nullptr, |
410 | const TargetLowering::PtrAuthInfo *PAI = nullptr); |
411 | |
412 | // Lower range metadata from 0 to N to assert zext to an integer of nearest |
413 | // floor power of two. |
414 | SDValue lowerRangeToAssertZExt(SelectionDAG &DAG, const Instruction &I, |
415 | SDValue Op); |
416 | |
417 | void populateCallLoweringInfo(TargetLowering::CallLoweringInfo &CLI, |
418 | const CallBase *Call, unsigned ArgIdx, |
419 | unsigned NumArgs, SDValue Callee, |
420 | Type *ReturnTy, AttributeSet RetAttrs, |
421 | bool IsPatchPoint); |
422 | |
423 | std::pair<SDValue, SDValue> |
424 | lowerInvokable(TargetLowering::CallLoweringInfo &CLI, |
425 | const BasicBlock *EHPadBB = nullptr); |
426 | |
427 | /// When an MBB was split during scheduling, update the |
428 | /// references that need to refer to the last resulting block. |
429 | void UpdateSplitBlock(MachineBasicBlock *First, MachineBasicBlock *Last); |
430 | |
431 | /// Describes a gc.statepoint or a gc.statepoint like thing for the purposes |
432 | /// of lowering into a STATEPOINT node. |
433 | struct StatepointLoweringInfo { |
434 | /// Bases[i] is the base pointer for Ptrs[i]. Together they denote the set |
435 | /// of gc pointers this STATEPOINT has to relocate. |
436 | SmallVector<const Value *, 16> Bases; |
437 | SmallVector<const Value *, 16> Ptrs; |
438 | |
439 | /// The set of gc.relocate calls associated with this gc.statepoint. |
440 | SmallVector<const GCRelocateInst *, 16> GCRelocates; |
441 | |
442 | /// The full list of gc arguments to the gc.statepoint being lowered. |
443 | ArrayRef<const Use> GCArgs; |
444 | |
445 | /// The gc.statepoint instruction. |
446 | const Instruction *StatepointInstr = nullptr; |
447 | |
448 | /// The list of gc transition arguments present in the gc.statepoint being |
449 | /// lowered. |
450 | ArrayRef<const Use> GCTransitionArgs; |
451 | |
452 | /// The ID that the resulting STATEPOINT instruction has to report. |
453 | uint64_t ID = -1; |
454 | |
455 | /// Information regarding the underlying call instruction. |
456 | TargetLowering::CallLoweringInfo CLI; |
457 | |
458 | /// The deoptimization state associated with this gc.statepoint call, if |
459 | /// any. |
460 | ArrayRef<const Use> DeoptState; |
461 | |
462 | /// Flags associated with the meta arguments being lowered. |
463 | uint64_t StatepointFlags = -1; |
464 | |
465 | /// The number of patchable bytes the call needs to get lowered into. |
466 | unsigned NumPatchBytes = -1; |
467 | |
468 | /// The exception handling unwind destination, in case this represents an |
469 | /// invoke of gc.statepoint. |
470 | const BasicBlock *EHPadBB = nullptr; |
471 | |
472 | explicit StatepointLoweringInfo(SelectionDAG &DAG) : CLI(DAG) {} |
473 | }; |
474 | |
475 | /// Lower \p SLI into a STATEPOINT instruction. |
476 | SDValue LowerAsSTATEPOINT(StatepointLoweringInfo &SI); |
477 | |
478 | // This function is responsible for the whole statepoint lowering process. |
479 | // It uniformly handles invoke and call statepoints. |
480 | void LowerStatepoint(const GCStatepointInst &I, |
481 | const BasicBlock *EHPadBB = nullptr); |
482 | |
483 | void LowerCallSiteWithDeoptBundle(const CallBase *Call, SDValue Callee, |
484 | const BasicBlock *EHPadBB); |
485 | |
486 | void LowerDeoptimizeCall(const CallInst *CI); |
487 | void LowerDeoptimizingReturn(); |
488 | |
489 | void LowerCallSiteWithDeoptBundleImpl(const CallBase *Call, SDValue Callee, |
490 | const BasicBlock *EHPadBB, |
491 | bool VarArgDisallowed, |
492 | bool ForceVoidReturnTy); |
493 | |
494 | void LowerCallSiteWithPtrAuthBundle(const CallBase &CB, |
495 | const BasicBlock *EHPadBB); |
496 | |
497 | /// Returns the type of FrameIndex and TargetFrameIndex nodes. |
498 | MVT getFrameIndexTy() { |
499 | return DAG.getTargetLoweringInfo().getFrameIndexTy(DL: DAG.getDataLayout()); |
500 | } |
501 | |
502 | private: |
503 | // Terminator instructions. |
504 | void visitRet(const ReturnInst &I); |
505 | void visitBr(const BranchInst &I); |
506 | void visitSwitch(const SwitchInst &I); |
507 | void visitIndirectBr(const IndirectBrInst &I); |
508 | void visitUnreachable(const UnreachableInst &I); |
509 | void visitCleanupRet(const CleanupReturnInst &I); |
510 | void visitCatchSwitch(const CatchSwitchInst &I); |
511 | void visitCatchRet(const CatchReturnInst &I); |
512 | void visitCatchPad(const CatchPadInst &I); |
513 | void visitCleanupPad(const CleanupPadInst &CPI); |
514 | |
515 | BranchProbability getEdgeProbability(const MachineBasicBlock *Src, |
516 | const MachineBasicBlock *Dst) const; |
517 | void addSuccessorWithProb( |
518 | MachineBasicBlock *Src, MachineBasicBlock *Dst, |
519 | BranchProbability Prob = BranchProbability::getUnknown()); |
520 | |
521 | public: |
522 | void visitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB); |
523 | void visitSPDescriptorParent(StackProtectorDescriptor &SPD, |
524 | MachineBasicBlock *ParentBB); |
525 | void visitSPDescriptorFailure(StackProtectorDescriptor &SPD); |
526 | void (SwitchCG::BitTestBlock &B, |
527 | MachineBasicBlock *SwitchBB); |
528 | void visitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB, |
529 | BranchProbability BranchProbToNext, unsigned Reg, |
530 | SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB); |
531 | void visitJumpTable(SwitchCG::JumpTable &JT); |
532 | void (SwitchCG::JumpTable &JT, |
533 | SwitchCG::JumpTableHeader &JTH, |
534 | MachineBasicBlock *SwitchBB); |
535 | |
536 | private: |
537 | // These all get lowered before this pass. |
538 | void visitInvoke(const InvokeInst &I); |
539 | void visitCallBr(const CallBrInst &I); |
540 | void visitCallBrLandingPad(const CallInst &I); |
541 | void visitResume(const ResumeInst &I); |
542 | |
543 | void visitUnary(const User &I, unsigned Opcode); |
544 | void visitFNeg(const User &I) { visitUnary(I, Opcode: ISD::FNEG); } |
545 | |
546 | void visitBinary(const User &I, unsigned Opcode); |
547 | void visitShift(const User &I, unsigned Opcode); |
548 | void visitAdd(const User &I) { visitBinary(I, Opcode: ISD::ADD); } |
549 | void visitFAdd(const User &I) { visitBinary(I, Opcode: ISD::FADD); } |
550 | void visitSub(const User &I) { visitBinary(I, Opcode: ISD::SUB); } |
551 | void visitFSub(const User &I) { visitBinary(I, Opcode: ISD::FSUB); } |
552 | void visitMul(const User &I) { visitBinary(I, Opcode: ISD::MUL); } |
553 | void visitFMul(const User &I) { visitBinary(I, Opcode: ISD::FMUL); } |
554 | void visitURem(const User &I) { visitBinary(I, Opcode: ISD::UREM); } |
555 | void visitSRem(const User &I) { visitBinary(I, Opcode: ISD::SREM); } |
556 | void visitFRem(const User &I) { visitBinary(I, Opcode: ISD::FREM); } |
557 | void visitUDiv(const User &I) { visitBinary(I, Opcode: ISD::UDIV); } |
558 | void visitSDiv(const User &I); |
559 | void visitFDiv(const User &I) { visitBinary(I, Opcode: ISD::FDIV); } |
560 | void visitAnd (const User &I) { visitBinary(I, Opcode: ISD::AND); } |
561 | void visitOr (const User &I) { visitBinary(I, Opcode: ISD::OR); } |
562 | void visitXor (const User &I) { visitBinary(I, Opcode: ISD::XOR); } |
563 | void visitShl (const User &I) { visitShift(I, Opcode: ISD::SHL); } |
564 | void visitLShr(const User &I) { visitShift(I, Opcode: ISD::SRL); } |
565 | void visitAShr(const User &I) { visitShift(I, Opcode: ISD::SRA); } |
566 | void visitICmp(const ICmpInst &I); |
567 | void visitFCmp(const FCmpInst &I); |
568 | // Visit the conversion instructions |
569 | void visitTrunc(const User &I); |
570 | void visitZExt(const User &I); |
571 | void visitSExt(const User &I); |
572 | void visitFPTrunc(const User &I); |
573 | void visitFPExt(const User &I); |
574 | void visitFPToUI(const User &I); |
575 | void visitFPToSI(const User &I); |
576 | void visitUIToFP(const User &I); |
577 | void visitSIToFP(const User &I); |
578 | void visitPtrToInt(const User &I); |
579 | void visitIntToPtr(const User &I); |
580 | void visitBitCast(const User &I); |
581 | void visitAddrSpaceCast(const User &I); |
582 | |
583 | void (const User &I); |
584 | void visitInsertElement(const User &I); |
585 | void visitShuffleVector(const User &I); |
586 | |
587 | void (const ExtractValueInst &I); |
588 | void visitInsertValue(const InsertValueInst &I); |
589 | void visitLandingPad(const LandingPadInst &LP); |
590 | |
591 | void visitGetElementPtr(const User &I); |
592 | void visitSelect(const User &I); |
593 | |
594 | void visitAlloca(const AllocaInst &I); |
595 | void visitLoad(const LoadInst &I); |
596 | void visitStore(const StoreInst &I); |
597 | void visitMaskedLoad(const CallInst &I, bool IsExpanding = false); |
598 | void visitMaskedStore(const CallInst &I, bool IsCompressing = false); |
599 | void visitMaskedGather(const CallInst &I); |
600 | void visitMaskedScatter(const CallInst &I); |
601 | void visitAtomicCmpXchg(const AtomicCmpXchgInst &I); |
602 | void visitAtomicRMW(const AtomicRMWInst &I); |
603 | void visitFence(const FenceInst &I); |
604 | void visitPHI(const PHINode &I); |
605 | void visitCall(const CallInst &I); |
606 | bool visitMemCmpBCmpCall(const CallInst &I); |
607 | bool visitMemPCpyCall(const CallInst &I); |
608 | bool visitMemChrCall(const CallInst &I); |
609 | bool visitStrCpyCall(const CallInst &I, bool isStpcpy); |
610 | bool visitStrCmpCall(const CallInst &I); |
611 | bool visitStrLenCall(const CallInst &I); |
612 | bool visitStrNLenCall(const CallInst &I); |
613 | bool visitUnaryFloatCall(const CallInst &I, unsigned Opcode); |
614 | bool visitBinaryFloatCall(const CallInst &I, unsigned Opcode); |
615 | void visitAtomicLoad(const LoadInst &I); |
616 | void visitAtomicStore(const StoreInst &I); |
617 | void visitLoadFromSwiftError(const LoadInst &I); |
618 | void visitStoreToSwiftError(const StoreInst &I); |
619 | void visitFreeze(const FreezeInst &I); |
620 | |
621 | void visitInlineAsm(const CallBase &Call, |
622 | const BasicBlock *EHPadBB = nullptr); |
623 | |
624 | bool visitEntryValueDbgValue(ArrayRef<const Value *> Values, |
625 | DILocalVariable *Variable, DIExpression *Expr, |
626 | DebugLoc DbgLoc); |
627 | void visitIntrinsicCall(const CallInst &I, unsigned Intrinsic); |
628 | void visitTargetIntrinsic(const CallInst &I, unsigned Intrinsic); |
629 | void visitConstrainedFPIntrinsic(const ConstrainedFPIntrinsic &FPI); |
630 | void visitConvergenceControl(const CallInst &I, unsigned Intrinsic); |
631 | void visitVectorHistogram(const CallInst &I, unsigned IntrinsicID); |
632 | void visitVPLoad(const VPIntrinsic &VPIntrin, EVT VT, |
633 | const SmallVectorImpl<SDValue> &OpValues); |
634 | void visitVPStore(const VPIntrinsic &VPIntrin, |
635 | const SmallVectorImpl<SDValue> &OpValues); |
636 | void visitVPGather(const VPIntrinsic &VPIntrin, EVT VT, |
637 | const SmallVectorImpl<SDValue> &OpValues); |
638 | void visitVPScatter(const VPIntrinsic &VPIntrin, |
639 | const SmallVectorImpl<SDValue> &OpValues); |
640 | void visitVPStridedLoad(const VPIntrinsic &VPIntrin, EVT VT, |
641 | const SmallVectorImpl<SDValue> &OpValues); |
642 | void visitVPStridedStore(const VPIntrinsic &VPIntrin, |
643 | const SmallVectorImpl<SDValue> &OpValues); |
644 | void visitVPCmp(const VPCmpIntrinsic &VPIntrin); |
645 | void visitVectorPredicationIntrinsic(const VPIntrinsic &VPIntrin); |
646 | |
647 | void visitVAStart(const CallInst &I); |
648 | void visitVAArg(const VAArgInst &I); |
649 | void visitVAEnd(const CallInst &I); |
650 | void visitVACopy(const CallInst &I); |
651 | void visitStackmap(const CallInst &I); |
652 | void visitPatchpoint(const CallBase &CB, const BasicBlock *EHPadBB = nullptr); |
653 | |
654 | // These two are implemented in StatepointLowering.cpp |
655 | void visitGCRelocate(const GCRelocateInst &Relocate); |
656 | void visitGCResult(const GCResultInst &I); |
657 | |
658 | void visitVectorReduce(const CallInst &I, unsigned Intrinsic); |
659 | void visitVectorReverse(const CallInst &I); |
660 | void visitVectorSplice(const CallInst &I); |
661 | void visitVectorInterleave(const CallInst &I); |
662 | void visitVectorDeinterleave(const CallInst &I); |
663 | void visitStepVector(const CallInst &I); |
664 | |
665 | void visitUserOp1(const Instruction &I) { |
666 | llvm_unreachable("UserOp1 should not exist at instruction selection time!" ); |
667 | } |
668 | void visitUserOp2(const Instruction &I) { |
669 | llvm_unreachable("UserOp2 should not exist at instruction selection time!" ); |
670 | } |
671 | |
672 | void processIntegerCallValue(const Instruction &I, |
673 | SDValue Value, bool IsSigned); |
674 | |
675 | void HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB); |
676 | |
677 | void emitInlineAsmError(const CallBase &Call, const Twine &Message); |
678 | |
679 | /// An enum that states to emit func argument dbg value the kind of intrinsic |
680 | /// it originally had. This controls the internal behavior of |
681 | /// EmitFuncArgumentDbgValue. |
682 | enum class FuncArgumentDbgValueKind { |
683 | Value, // This was originally a llvm.dbg.value. |
684 | Declare, // This was originally a llvm.dbg.declare. |
685 | }; |
686 | |
687 | /// If V is an function argument then create corresponding DBG_VALUE machine |
688 | /// instruction for it now. At the end of instruction selection, they will be |
689 | /// inserted to the entry BB. |
690 | bool EmitFuncArgumentDbgValue(const Value *V, DILocalVariable *Variable, |
691 | DIExpression *Expr, DILocation *DL, |
692 | FuncArgumentDbgValueKind Kind, |
693 | const SDValue &N); |
694 | |
695 | /// Return the next block after MBB, or nullptr if there is none. |
696 | MachineBasicBlock *NextBlock(MachineBasicBlock *MBB); |
697 | |
698 | /// Update the DAG and DAG builder with the relevant information after |
699 | /// a new root node has been created which could be a tail call. |
700 | void updateDAGForMaybeTailCall(SDValue MaybeTC); |
701 | |
702 | /// Return the appropriate SDDbgValue based on N. |
703 | SDDbgValue *getDbgValue(SDValue N, DILocalVariable *Variable, |
704 | DIExpression *Expr, const DebugLoc &dl, |
705 | unsigned DbgSDNodeOrder); |
706 | |
707 | /// Lowers CallInst to an external symbol. |
708 | void lowerCallToExternalSymbol(const CallInst &I, const char *FunctionName); |
709 | |
710 | SDValue lowerStartEH(SDValue Chain, const BasicBlock *EHPadBB, |
711 | MCSymbol *&BeginLabel); |
712 | SDValue lowerEndEH(SDValue Chain, const InvokeInst *II, |
713 | const BasicBlock *EHPadBB, MCSymbol *BeginLabel); |
714 | }; |
715 | |
716 | /// This struct represents the registers (physical or virtual) |
717 | /// that a particular set of values is assigned, and the type information about |
718 | /// the value. The most common situation is to represent one value at a time, |
719 | /// but struct or array values are handled element-wise as multiple values. The |
720 | /// splitting of aggregates is performed recursively, so that we never have |
721 | /// aggregate-typed registers. The values at this point do not necessarily have |
722 | /// legal types, so each value may require one or more registers of some legal |
723 | /// type. |
724 | /// |
725 | struct RegsForValue { |
726 | /// The value types of the values, which may not be legal, and |
727 | /// may need be promoted or synthesized from one or more registers. |
728 | SmallVector<EVT, 4> ValueVTs; |
729 | |
730 | /// The value types of the registers. This is the same size as ValueVTs and it |
731 | /// records, for each value, what the type of the assigned register or |
732 | /// registers are. (Individual values are never synthesized from more than one |
733 | /// type of register.) |
734 | /// |
735 | /// With virtual registers, the contents of RegVTs is redundant with TLI's |
736 | /// getRegisterType member function, however when with physical registers |
737 | /// it is necessary to have a separate record of the types. |
738 | SmallVector<MVT, 4> RegVTs; |
739 | |
740 | /// This list holds the registers assigned to the values. |
741 | /// Each legal or promoted value requires one register, and each |
742 | /// expanded value requires multiple registers. |
743 | SmallVector<unsigned, 4> Regs; |
744 | |
745 | /// This list holds the number of registers for each value. |
746 | SmallVector<unsigned, 4> RegCount; |
747 | |
748 | /// Records if this value needs to be treated in an ABI dependant manner, |
749 | /// different to normal type legalization. |
750 | std::optional<CallingConv::ID> CallConv; |
751 | |
752 | RegsForValue() = default; |
753 | RegsForValue(const SmallVector<unsigned, 4> ®s, MVT regvt, EVT valuevt, |
754 | std::optional<CallingConv::ID> CC = std::nullopt); |
755 | RegsForValue(LLVMContext &Context, const TargetLowering &TLI, |
756 | const DataLayout &DL, unsigned Reg, Type *Ty, |
757 | std::optional<CallingConv::ID> CC); |
758 | |
759 | bool isABIMangled() const { return CallConv.has_value(); } |
760 | |
761 | /// Add the specified values to this one. |
762 | void append(const RegsForValue &RHS) { |
763 | ValueVTs.append(in_start: RHS.ValueVTs.begin(), in_end: RHS.ValueVTs.end()); |
764 | RegVTs.append(in_start: RHS.RegVTs.begin(), in_end: RHS.RegVTs.end()); |
765 | Regs.append(in_start: RHS.Regs.begin(), in_end: RHS.Regs.end()); |
766 | RegCount.push_back(Elt: RHS.Regs.size()); |
767 | } |
768 | |
769 | /// Emit a series of CopyFromReg nodes that copies from this value and returns |
770 | /// the result as a ValueVTs value. This uses Chain/Flag as the input and |
771 | /// updates them for the output Chain/Flag. If the Flag pointer is NULL, no |
772 | /// flag is used. |
773 | SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo, |
774 | const SDLoc &dl, SDValue &Chain, SDValue *Glue, |
775 | const Value *V = nullptr) const; |
776 | |
777 | /// Emit a series of CopyToReg nodes that copies the specified value into the |
778 | /// registers specified by this object. This uses Chain/Flag as the input and |
779 | /// updates them for the output Chain/Flag. If the Flag pointer is nullptr, no |
780 | /// flag is used. If V is not nullptr, then it is used in printing better |
781 | /// diagnostic messages on error. |
782 | void getCopyToRegs(SDValue Val, SelectionDAG &DAG, const SDLoc &dl, |
783 | SDValue &Chain, SDValue *Glue, const Value *V = nullptr, |
784 | ISD::NodeType PreferredExtendType = ISD::ANY_EXTEND) const; |
785 | |
786 | /// Add this value to the specified inlineasm node operand list. This adds the |
787 | /// code marker, matching input operand index (if applicable), and includes |
788 | /// the number of values added into it. |
789 | void AddInlineAsmOperands(InlineAsm::Kind Code, bool HasMatching, |
790 | unsigned MatchingIdx, const SDLoc &dl, |
791 | SelectionDAG &DAG, std::vector<SDValue> &Ops) const; |
792 | |
793 | /// Check if the total RegCount is greater than one. |
794 | bool occupiesMultipleRegs() const { |
795 | return std::accumulate(first: RegCount.begin(), last: RegCount.end(), init: 0) > 1; |
796 | } |
797 | |
798 | /// Return a list of registers and their sizes. |
799 | SmallVector<std::pair<unsigned, TypeSize>, 4> getRegsAndSizes() const; |
800 | }; |
801 | |
802 | } // end namespace llvm |
803 | |
804 | #endif // LLVM_LIB_CODEGEN_SELECTIONDAG_SELECTIONDAGBUILDER_H |
805 | |