1//===- SelectionDAGISel.cpp - Implement the SelectionDAGISel class --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This implements the SelectionDAGISel class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/CodeGen/SelectionDAGISel.h"
14#include "ScheduleDAGSDNodes.h"
15#include "SelectionDAGBuilder.h"
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/PostOrderIterator.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SmallPtrSet.h"
21#include "llvm/ADT/SmallVector.h"
22#include "llvm/ADT/Statistic.h"
23#include "llvm/ADT/StringRef.h"
24#include "llvm/Analysis/AliasAnalysis.h"
25#include "llvm/Analysis/AssumptionCache.h"
26#include "llvm/Analysis/BranchProbabilityInfo.h"
27#include "llvm/Analysis/CFG.h"
28#include "llvm/Analysis/LazyBlockFrequencyInfo.h"
29#include "llvm/Analysis/OptimizationRemarkEmitter.h"
30#include "llvm/Analysis/ProfileSummaryInfo.h"
31#include "llvm/Analysis/TargetLibraryInfo.h"
32#include "llvm/Analysis/TargetTransformInfo.h"
33#include "llvm/Analysis/UniformityAnalysis.h"
34#include "llvm/CodeGen/AssignmentTrackingAnalysis.h"
35#include "llvm/CodeGen/CodeGenCommonISel.h"
36#include "llvm/CodeGen/FastISel.h"
37#include "llvm/CodeGen/FunctionLoweringInfo.h"
38#include "llvm/CodeGen/GCMetadata.h"
39#include "llvm/CodeGen/ISDOpcodes.h"
40#include "llvm/CodeGen/MachineBasicBlock.h"
41#include "llvm/CodeGen/MachineFrameInfo.h"
42#include "llvm/CodeGen/MachineFunction.h"
43#include "llvm/CodeGen/MachineFunctionPass.h"
44#include "llvm/CodeGen/MachineInstr.h"
45#include "llvm/CodeGen/MachineInstrBuilder.h"
46#include "llvm/CodeGen/MachineMemOperand.h"
47#include "llvm/CodeGen/MachineModuleInfo.h"
48#include "llvm/CodeGen/MachineOperand.h"
49#include "llvm/CodeGen/MachinePassRegistry.h"
50#include "llvm/CodeGen/MachineRegisterInfo.h"
51#include "llvm/CodeGen/SchedulerRegistry.h"
52#include "llvm/CodeGen/SelectionDAG.h"
53#include "llvm/CodeGen/SelectionDAGNodes.h"
54#include "llvm/CodeGen/StackMaps.h"
55#include "llvm/CodeGen/StackProtector.h"
56#include "llvm/CodeGen/SwiftErrorValueTracking.h"
57#include "llvm/CodeGen/TargetInstrInfo.h"
58#include "llvm/CodeGen/TargetLowering.h"
59#include "llvm/CodeGen/TargetRegisterInfo.h"
60#include "llvm/CodeGen/TargetSubtargetInfo.h"
61#include "llvm/CodeGen/ValueTypes.h"
62#include "llvm/CodeGen/WinEHFuncInfo.h"
63#include "llvm/CodeGenTypes/MachineValueType.h"
64#include "llvm/IR/BasicBlock.h"
65#include "llvm/IR/Constants.h"
66#include "llvm/IR/DataLayout.h"
67#include "llvm/IR/DebugInfo.h"
68#include "llvm/IR/DebugInfoMetadata.h"
69#include "llvm/IR/DebugLoc.h"
70#include "llvm/IR/DiagnosticInfo.h"
71#include "llvm/IR/EHPersonalities.h"
72#include "llvm/IR/Function.h"
73#include "llvm/IR/InlineAsm.h"
74#include "llvm/IR/InstIterator.h"
75#include "llvm/IR/Instruction.h"
76#include "llvm/IR/Instructions.h"
77#include "llvm/IR/IntrinsicInst.h"
78#include "llvm/IR/Intrinsics.h"
79#include "llvm/IR/IntrinsicsWebAssembly.h"
80#include "llvm/IR/Metadata.h"
81#include "llvm/IR/Module.h"
82#include "llvm/IR/PrintPasses.h"
83#include "llvm/IR/Statepoint.h"
84#include "llvm/IR/Type.h"
85#include "llvm/IR/User.h"
86#include "llvm/IR/Value.h"
87#include "llvm/InitializePasses.h"
88#include "llvm/MC/MCInstrDesc.h"
89#include "llvm/Pass.h"
90#include "llvm/Support/BranchProbability.h"
91#include "llvm/Support/Casting.h"
92#include "llvm/Support/CodeGen.h"
93#include "llvm/Support/CommandLine.h"
94#include "llvm/Support/Compiler.h"
95#include "llvm/Support/Debug.h"
96#include "llvm/Support/ErrorHandling.h"
97#include "llvm/Support/KnownBits.h"
98#include "llvm/Support/Timer.h"
99#include "llvm/Support/raw_ostream.h"
100#include "llvm/Target/TargetIntrinsicInfo.h"
101#include "llvm/Target/TargetMachine.h"
102#include "llvm/Target/TargetOptions.h"
103#include "llvm/Transforms/Utils/BasicBlockUtils.h"
104#include <algorithm>
105#include <cassert>
106#include <cstdint>
107#include <iterator>
108#include <limits>
109#include <memory>
110#include <optional>
111#include <string>
112#include <utility>
113#include <vector>
114
115using namespace llvm;
116
117#define DEBUG_TYPE "isel"
118#define ISEL_DUMP_DEBUG_TYPE DEBUG_TYPE "-dump"
119
120STATISTIC(NumFastIselFailures, "Number of instructions fast isel failed on");
121STATISTIC(NumFastIselSuccess, "Number of instructions fast isel selected");
122STATISTIC(NumFastIselBlocks, "Number of blocks selected entirely by fast isel");
123STATISTIC(NumDAGBlocks, "Number of blocks selected using DAG");
124STATISTIC(NumDAGIselRetries,"Number of times dag isel has to try another path");
125STATISTIC(NumEntryBlocks, "Number of entry blocks encountered");
126STATISTIC(NumFastIselFailLowerArguments,
127 "Number of entry blocks where fast isel failed to lower arguments");
128
129static cl::opt<int> EnableFastISelAbort(
130 "fast-isel-abort", cl::Hidden,
131 cl::desc("Enable abort calls when \"fast\" instruction selection "
132 "fails to lower an instruction: 0 disable the abort, 1 will "
133 "abort but for args, calls and terminators, 2 will also "
134 "abort for argument lowering, and 3 will never fallback "
135 "to SelectionDAG."));
136
137static cl::opt<bool> EnableFastISelFallbackReport(
138 "fast-isel-report-on-fallback", cl::Hidden,
139 cl::desc("Emit a diagnostic when \"fast\" instruction selection "
140 "falls back to SelectionDAG."));
141
142static cl::opt<bool>
143UseMBPI("use-mbpi",
144 cl::desc("use Machine Branch Probability Info"),
145 cl::init(Val: true), cl::Hidden);
146
147#ifndef NDEBUG
148static cl::opt<std::string>
149FilterDAGBasicBlockName("filter-view-dags", cl::Hidden,
150 cl::desc("Only display the basic block whose name "
151 "matches this for all view-*-dags options"));
152static cl::opt<bool>
153ViewDAGCombine1("view-dag-combine1-dags", cl::Hidden,
154 cl::desc("Pop up a window to show dags before the first "
155 "dag combine pass"));
156static cl::opt<bool>
157ViewLegalizeTypesDAGs("view-legalize-types-dags", cl::Hidden,
158 cl::desc("Pop up a window to show dags before legalize types"));
159static cl::opt<bool>
160 ViewDAGCombineLT("view-dag-combine-lt-dags", cl::Hidden,
161 cl::desc("Pop up a window to show dags before the post "
162 "legalize types dag combine pass"));
163static cl::opt<bool>
164 ViewLegalizeDAGs("view-legalize-dags", cl::Hidden,
165 cl::desc("Pop up a window to show dags before legalize"));
166static cl::opt<bool>
167ViewDAGCombine2("view-dag-combine2-dags", cl::Hidden,
168 cl::desc("Pop up a window to show dags before the second "
169 "dag combine pass"));
170static cl::opt<bool>
171ViewISelDAGs("view-isel-dags", cl::Hidden,
172 cl::desc("Pop up a window to show isel dags as they are selected"));
173static cl::opt<bool>
174ViewSchedDAGs("view-sched-dags", cl::Hidden,
175 cl::desc("Pop up a window to show sched dags as they are processed"));
176static cl::opt<bool>
177ViewSUnitDAGs("view-sunit-dags", cl::Hidden,
178 cl::desc("Pop up a window to show SUnit dags after they are processed"));
179#else
180static const bool ViewDAGCombine1 = false, ViewLegalizeTypesDAGs = false,
181 ViewDAGCombineLT = false, ViewLegalizeDAGs = false,
182 ViewDAGCombine2 = false, ViewISelDAGs = false,
183 ViewSchedDAGs = false, ViewSUnitDAGs = false;
184#endif
185
186#ifndef NDEBUG
187#define ISEL_DUMP(X) \
188 do { \
189 if (llvm::DebugFlag && \
190 (isCurrentDebugType(DEBUG_TYPE) || \
191 (isCurrentDebugType(ISEL_DUMP_DEBUG_TYPE) && MatchFilterFuncName))) { \
192 X; \
193 } \
194 } while (false)
195#else
196#define ISEL_DUMP(X) do { } while (false)
197#endif
198
199//===---------------------------------------------------------------------===//
200///
201/// RegisterScheduler class - Track the registration of instruction schedulers.
202///
203//===---------------------------------------------------------------------===//
204MachinePassRegistry<RegisterScheduler::FunctionPassCtor>
205 RegisterScheduler::Registry;
206
207//===---------------------------------------------------------------------===//
208///
209/// ISHeuristic command line option for instruction schedulers.
210///
211//===---------------------------------------------------------------------===//
212static cl::opt<RegisterScheduler::FunctionPassCtor, false,
213 RegisterPassParser<RegisterScheduler>>
214ISHeuristic("pre-RA-sched",
215 cl::init(Val: &createDefaultScheduler), cl::Hidden,
216 cl::desc("Instruction schedulers available (before register"
217 " allocation):"));
218
219static RegisterScheduler
220defaultListDAGScheduler("default", "Best scheduler for the target",
221 createDefaultScheduler);
222
223static bool dontUseFastISelFor(const Function &Fn) {
224 // Don't enable FastISel for functions with swiftasync Arguments.
225 // Debug info on those is reliant on good Argument lowering, and FastISel is
226 // not capable of lowering the entire function. Mixing the two selectors tend
227 // to result in poor lowering of Arguments.
228 return any_of(Range: Fn.args(), P: [](const Argument &Arg) {
229 return Arg.hasAttribute(Kind: Attribute::AttrKind::SwiftAsync);
230 });
231}
232
233namespace llvm {
234
235 //===--------------------------------------------------------------------===//
236 /// This class is used by SelectionDAGISel to temporarily override
237 /// the optimization level on a per-function basis.
238 class OptLevelChanger {
239 SelectionDAGISel &IS;
240 CodeGenOptLevel SavedOptLevel;
241 bool SavedFastISel;
242
243 public:
244 OptLevelChanger(SelectionDAGISel &ISel, CodeGenOptLevel NewOptLevel)
245 : IS(ISel) {
246 SavedOptLevel = IS.OptLevel;
247 SavedFastISel = IS.TM.Options.EnableFastISel;
248 if (NewOptLevel != SavedOptLevel) {
249 IS.OptLevel = NewOptLevel;
250 IS.TM.setOptLevel(NewOptLevel);
251 LLVM_DEBUG(dbgs() << "\nChanging optimization level for Function "
252 << IS.MF->getFunction().getName() << "\n");
253 LLVM_DEBUG(dbgs() << "\tBefore: -O" << static_cast<int>(SavedOptLevel)
254 << " ; After: -O" << static_cast<int>(NewOptLevel)
255 << "\n");
256 if (NewOptLevel == CodeGenOptLevel::None)
257 IS.TM.setFastISel(IS.TM.getO0WantsFastISel());
258 }
259 if (dontUseFastISelFor(Fn: IS.MF->getFunction()))
260 IS.TM.setFastISel(false);
261 LLVM_DEBUG(
262 dbgs() << "\tFastISel is "
263 << (IS.TM.Options.EnableFastISel ? "enabled" : "disabled")
264 << "\n");
265 }
266
267 ~OptLevelChanger() {
268 if (IS.OptLevel == SavedOptLevel)
269 return;
270 LLVM_DEBUG(dbgs() << "\nRestoring optimization level for Function "
271 << IS.MF->getFunction().getName() << "\n");
272 LLVM_DEBUG(dbgs() << "\tBefore: -O" << static_cast<int>(IS.OptLevel)
273 << " ; After: -O" << static_cast<int>(SavedOptLevel) << "\n");
274 IS.OptLevel = SavedOptLevel;
275 IS.TM.setOptLevel(SavedOptLevel);
276 IS.TM.setFastISel(SavedFastISel);
277 }
278 };
279
280 //===--------------------------------------------------------------------===//
281 /// createDefaultScheduler - This creates an instruction scheduler appropriate
282 /// for the target.
283 ScheduleDAGSDNodes *createDefaultScheduler(SelectionDAGISel *IS,
284 CodeGenOptLevel OptLevel) {
285 const TargetLowering *TLI = IS->TLI;
286 const TargetSubtargetInfo &ST = IS->MF->getSubtarget();
287
288 // Try first to see if the Target has its own way of selecting a scheduler
289 if (auto *SchedulerCtor = ST.getDAGScheduler(OptLevel)) {
290 return SchedulerCtor(IS, OptLevel);
291 }
292
293 if (OptLevel == CodeGenOptLevel::None ||
294 (ST.enableMachineScheduler() && ST.enableMachineSchedDefaultSched()) ||
295 TLI->getSchedulingPreference() == Sched::Source)
296 return createSourceListDAGScheduler(IS, OptLevel);
297 if (TLI->getSchedulingPreference() == Sched::RegPressure)
298 return createBURRListDAGScheduler(IS, OptLevel);
299 if (TLI->getSchedulingPreference() == Sched::Hybrid)
300 return createHybridListDAGScheduler(IS, OptLevel);
301 if (TLI->getSchedulingPreference() == Sched::VLIW)
302 return createVLIWDAGScheduler(IS, OptLevel);
303 if (TLI->getSchedulingPreference() == Sched::Fast)
304 return createFastDAGScheduler(IS, OptLevel);
305 if (TLI->getSchedulingPreference() == Sched::Linearize)
306 return createDAGLinearizer(IS, OptLevel);
307 assert(TLI->getSchedulingPreference() == Sched::ILP &&
308 "Unknown sched type!");
309 return createILPListDAGScheduler(IS, OptLevel);
310 }
311
312} // end namespace llvm
313
314MachineBasicBlock *
315TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
316 MachineBasicBlock *MBB) const {
317#ifndef NDEBUG
318 dbgs() << "If a target marks an instruction with "
319 "'usesCustomInserter', it must implement "
320 "TargetLowering::EmitInstrWithCustomInserter!\n";
321#endif
322 llvm_unreachable(nullptr);
323}
324
325void TargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
326 SDNode *Node) const {
327 assert(!MI.hasPostISelHook() &&
328 "If a target marks an instruction with 'hasPostISelHook', "
329 "it must implement TargetLowering::AdjustInstrPostInstrSelection!");
330}
331
332//===----------------------------------------------------------------------===//
333// SelectionDAGISel code
334//===----------------------------------------------------------------------===//
335
336SelectionDAGISelLegacy::SelectionDAGISelLegacy(
337 char &ID, std::unique_ptr<SelectionDAGISel> S)
338 : MachineFunctionPass(ID), Selector(std::move(S)) {
339 initializeGCModuleInfoPass(*PassRegistry::getPassRegistry());
340 initializeBranchProbabilityInfoWrapperPassPass(
341 *PassRegistry::getPassRegistry());
342 initializeAAResultsWrapperPassPass(*PassRegistry::getPassRegistry());
343 initializeTargetLibraryInfoWrapperPassPass(*PassRegistry::getPassRegistry());
344}
345
346bool SelectionDAGISelLegacy::runOnMachineFunction(MachineFunction &MF) {
347 // If we already selected that function, we do not need to run SDISel.
348 if (MF.getProperties().hasProperty(
349 P: MachineFunctionProperties::Property::Selected))
350 return false;
351
352 // Do some sanity-checking on the command-line options.
353 if (EnableFastISelAbort && !Selector->TM.Options.EnableFastISel)
354 report_fatal_error(reason: "-fast-isel-abort > 0 requires -fast-isel");
355
356 // Decide what flavour of variable location debug-info will be used, before
357 // we change the optimisation level.
358 MF.setUseDebugInstrRef(MF.shouldUseDebugInstrRef());
359
360 // Reset the target options before resetting the optimization
361 // level below.
362 // FIXME: This is a horrible hack and should be processed via
363 // codegen looking at the optimization level explicitly when
364 // it wants to look at it.
365 Selector->TM.resetTargetOptions(F: MF.getFunction());
366 // Reset OptLevel to None for optnone functions.
367 CodeGenOptLevel NewOptLevel = skipFunction(F: MF.getFunction())
368 ? CodeGenOptLevel::None
369 : Selector->OptLevel;
370
371 Selector->MF = &MF;
372 OptLevelChanger OLC(*Selector, NewOptLevel);
373 Selector->initializeAnalysisResults(MFP&: *this);
374 return Selector->runOnMachineFunction(mf&: MF);
375}
376
377SelectionDAGISel::SelectionDAGISel(TargetMachine &tm, CodeGenOptLevel OL)
378 : TM(tm), FuncInfo(new FunctionLoweringInfo()),
379 SwiftError(new SwiftErrorValueTracking()),
380 CurDAG(new SelectionDAG(tm, OL)),
381 SDB(std::make_unique<SelectionDAGBuilder>(args&: *CurDAG, args&: *FuncInfo, args&: *SwiftError,
382 args&: OL)),
383 OptLevel(OL) {
384 initializeGCModuleInfoPass(*PassRegistry::getPassRegistry());
385 initializeBranchProbabilityInfoWrapperPassPass(
386 *PassRegistry::getPassRegistry());
387 initializeAAResultsWrapperPassPass(*PassRegistry::getPassRegistry());
388 initializeTargetLibraryInfoWrapperPassPass(*PassRegistry::getPassRegistry());
389}
390
391SelectionDAGISel::~SelectionDAGISel() {
392 delete CurDAG;
393 delete SwiftError;
394}
395
396void SelectionDAGISelLegacy::getAnalysisUsage(AnalysisUsage &AU) const {
397 CodeGenOptLevel OptLevel = Selector->OptLevel;
398 if (OptLevel != CodeGenOptLevel::None)
399 AU.addRequired<AAResultsWrapperPass>();
400 AU.addRequired<GCModuleInfo>();
401 AU.addRequired<StackProtector>();
402 AU.addPreserved<GCModuleInfo>();
403 AU.addRequired<TargetLibraryInfoWrapperPass>();
404#ifndef NDEBUG
405 AU.addRequired<TargetTransformInfoWrapperPass>();
406#endif
407 AU.addRequired<AssumptionCacheTracker>();
408 if (UseMBPI && OptLevel != CodeGenOptLevel::None)
409 AU.addRequired<BranchProbabilityInfoWrapperPass>();
410 AU.addRequired<ProfileSummaryInfoWrapperPass>();
411 // AssignmentTrackingAnalysis only runs if assignment tracking is enabled for
412 // the module.
413 AU.addRequired<AssignmentTrackingAnalysis>();
414 AU.addPreserved<AssignmentTrackingAnalysis>();
415 if (OptLevel != CodeGenOptLevel::None)
416 LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AU);
417 MachineFunctionPass::getAnalysisUsage(AU);
418}
419
420static void computeUsesMSVCFloatingPoint(const Triple &TT, const Function &F,
421 MachineModuleInfo &MMI) {
422 // Only needed for MSVC
423 if (!TT.isWindowsMSVCEnvironment())
424 return;
425
426 // If it's already set, nothing to do.
427 if (MMI.usesMSVCFloatingPoint())
428 return;
429
430 for (const Instruction &I : instructions(F)) {
431 if (I.getType()->isFPOrFPVectorTy()) {
432 MMI.setUsesMSVCFloatingPoint(true);
433 return;
434 }
435 for (const auto &Op : I.operands()) {
436 if (Op->getType()->isFPOrFPVectorTy()) {
437 MMI.setUsesMSVCFloatingPoint(true);
438 return;
439 }
440 }
441 }
442}
443
444PreservedAnalyses
445SelectionDAGISelPass::run(MachineFunction &MF,
446 MachineFunctionAnalysisManager &MFAM) {
447 // If we already selected that function, we do not need to run SDISel.
448 if (MF.getProperties().hasProperty(
449 P: MachineFunctionProperties::Property::Selected))
450 return PreservedAnalyses::all();
451
452 // Do some sanity-checking on the command-line options.
453 if (EnableFastISelAbort && !Selector->TM.Options.EnableFastISel)
454 report_fatal_error(reason: "-fast-isel-abort > 0 requires -fast-isel");
455
456 // Decide what flavour of variable location debug-info will be used, before
457 // we change the optimisation level.
458 MF.setUseDebugInstrRef(MF.shouldUseDebugInstrRef());
459
460 // Reset the target options before resetting the optimization
461 // level below.
462 // FIXME: This is a horrible hack and should be processed via
463 // codegen looking at the optimization level explicitly when
464 // it wants to look at it.
465 Selector->TM.resetTargetOptions(F: MF.getFunction());
466 // Reset OptLevel to None for optnone functions.
467 // TODO: Add a function analysis to handle this.
468 Selector->MF = &MF;
469 // Reset OptLevel to None for optnone functions.
470 CodeGenOptLevel NewOptLevel = MF.getFunction().hasOptNone()
471 ? CodeGenOptLevel::None
472 : Selector->OptLevel;
473
474 OptLevelChanger OLC(*Selector, NewOptLevel);
475 Selector->initializeAnalysisResults(MFAM);
476 Selector->runOnMachineFunction(mf&: MF);
477
478 return getMachineFunctionPassPreservedAnalyses();
479}
480
481void SelectionDAGISel::initializeAnalysisResults(
482 MachineFunctionAnalysisManager &MFAM) {
483 auto &FAM = MFAM.getResult<FunctionAnalysisManagerMachineFunctionProxy>(IR&: *MF)
484 .getManager();
485 auto &MAMP = MFAM.getResult<ModuleAnalysisManagerMachineFunctionProxy>(IR&: *MF);
486 Function &Fn = MF->getFunction();
487#ifndef NDEBUG
488 FuncName = Fn.getName();
489 MatchFilterFuncName = isFunctionInPrintList(FuncName);
490#else
491 (void)MatchFilterFuncName;
492#endif
493
494 TII = MF->getSubtarget().getInstrInfo();
495 TLI = MF->getSubtarget().getTargetLowering();
496 RegInfo = &MF->getRegInfo();
497 LibInfo = &FAM.getResult<TargetLibraryAnalysis>(IR&: Fn);
498 GFI = Fn.hasGC() ? &FAM.getResult<GCFunctionAnalysis>(IR&: Fn) : nullptr;
499 ORE = std::make_unique<OptimizationRemarkEmitter>(args: &Fn);
500 AC = &FAM.getResult<AssumptionAnalysis>(IR&: Fn);
501 auto *PSI = MAMP.getCachedResult<ProfileSummaryAnalysis>(IR&: *Fn.getParent());
502 BlockFrequencyInfo *BFI = nullptr;
503 FAM.getResult<BlockFrequencyAnalysis>(IR&: Fn);
504 if (PSI && PSI->hasProfileSummary() && OptLevel != CodeGenOptLevel::None)
505 BFI = &FAM.getResult<BlockFrequencyAnalysis>(IR&: Fn);
506
507 FunctionVarLocs const *FnVarLocs = nullptr;
508 if (isAssignmentTrackingEnabled(M: *Fn.getParent()))
509 FnVarLocs = &FAM.getResult<DebugAssignmentTrackingAnalysis>(IR&: Fn);
510
511 auto *UA = FAM.getCachedResult<UniformityInfoAnalysis>(IR&: Fn);
512 CurDAG->init(NewMF&: *MF, NewORE&: *ORE, AM&: MFAM, LibraryInfo: LibInfo, UA, PSIin: PSI, BFIin: BFI, FnVarLocs);
513
514 // Now get the optional analyzes if we want to.
515 // This is based on the possibly changed OptLevel (after optnone is taken
516 // into account). That's unfortunate but OK because it just means we won't
517 // ask for passes that have been required anyway.
518
519 if (UseMBPI && OptLevel != CodeGenOptLevel::None)
520 FuncInfo->BPI = &FAM.getResult<BranchProbabilityAnalysis>(IR&: Fn);
521 else
522 FuncInfo->BPI = nullptr;
523
524 if (OptLevel != CodeGenOptLevel::None)
525 AA = &FAM.getResult<AAManager>(IR&: Fn);
526 else
527 AA = nullptr;
528
529 SP = &FAM.getResult<SSPLayoutAnalysis>(IR&: Fn);
530
531#if !defined(NDEBUG) && LLVM_ENABLE_ABI_BREAKING_CHECKS
532 TTI = &FAM.getResult<TargetIRAnalysis>(Fn);
533#endif
534}
535
536void SelectionDAGISel::initializeAnalysisResults(MachineFunctionPass &MFP) {
537 Function &Fn = MF->getFunction();
538#ifndef NDEBUG
539 FuncName = Fn.getName();
540 MatchFilterFuncName = isFunctionInPrintList(FuncName);
541#else
542 (void)MatchFilterFuncName;
543#endif
544
545 TII = MF->getSubtarget().getInstrInfo();
546 TLI = MF->getSubtarget().getTargetLowering();
547 RegInfo = &MF->getRegInfo();
548 LibInfo = &MFP.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F: Fn);
549 GFI = Fn.hasGC() ? &MFP.getAnalysis<GCModuleInfo>().getFunctionInfo(F: Fn)
550 : nullptr;
551 ORE = std::make_unique<OptimizationRemarkEmitter>(args: &Fn);
552 AC = &MFP.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F&: Fn);
553 auto *PSI = &MFP.getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
554 BlockFrequencyInfo *BFI = nullptr;
555 if (PSI && PSI->hasProfileSummary() && OptLevel != CodeGenOptLevel::None)
556 BFI = &MFP.getAnalysis<LazyBlockFrequencyInfoPass>().getBFI();
557
558 FunctionVarLocs const *FnVarLocs = nullptr;
559 if (isAssignmentTrackingEnabled(M: *Fn.getParent()))
560 FnVarLocs = MFP.getAnalysis<AssignmentTrackingAnalysis>().getResults();
561
562 UniformityInfo *UA = nullptr;
563 if (auto *UAPass = MFP.getAnalysisIfAvailable<UniformityInfoWrapperPass>())
564 UA = &UAPass->getUniformityInfo();
565 CurDAG->init(NewMF&: *MF, NewORE&: *ORE, PassPtr: &MFP, LibraryInfo: LibInfo, UA, PSIin: PSI, BFIin: BFI, FnVarLocs);
566
567 // Now get the optional analyzes if we want to.
568 // This is based on the possibly changed OptLevel (after optnone is taken
569 // into account). That's unfortunate but OK because it just means we won't
570 // ask for passes that have been required anyway.
571
572 if (UseMBPI && OptLevel != CodeGenOptLevel::None)
573 FuncInfo->BPI =
574 &MFP.getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI();
575 else
576 FuncInfo->BPI = nullptr;
577
578 if (OptLevel != CodeGenOptLevel::None)
579 AA = &MFP.getAnalysis<AAResultsWrapperPass>().getAAResults();
580 else
581 AA = nullptr;
582
583 SP = &MFP.getAnalysis<StackProtector>().getLayoutInfo();
584
585#if !defined(NDEBUG) && LLVM_ENABLE_ABI_BREAKING_CHECKS
586 TTI = &MFP.getAnalysis<TargetTransformInfoWrapperPass>().getTTI(Fn);
587#endif
588}
589
590bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
591 SwiftError->setFunction(mf);
592 const Function &Fn = mf.getFunction();
593
594 bool InstrRef = mf.shouldUseDebugInstrRef();
595
596 FuncInfo->set(Fn: MF->getFunction(), MF&: *MF, DAG: CurDAG);
597
598 ISEL_DUMP(dbgs() << "\n\n\n=== " << FuncName << '\n');
599
600 SDB->init(gfi: GFI, AA, AC, li: LibInfo);
601
602 MF->setHasInlineAsm(false);
603
604 FuncInfo->SplitCSR = false;
605
606 // We split CSR if the target supports it for the given function
607 // and the function has only return exits.
608 if (OptLevel != CodeGenOptLevel::None && TLI->supportSplitCSR(MF)) {
609 FuncInfo->SplitCSR = true;
610
611 // Collect all the return blocks.
612 for (const BasicBlock &BB : Fn) {
613 if (!succ_empty(BB: &BB))
614 continue;
615
616 const Instruction *Term = BB.getTerminator();
617 if (isa<UnreachableInst>(Val: Term) || isa<ReturnInst>(Val: Term))
618 continue;
619
620 // Bail out if the exit block is not Return nor Unreachable.
621 FuncInfo->SplitCSR = false;
622 break;
623 }
624 }
625
626 MachineBasicBlock *EntryMBB = &MF->front();
627 if (FuncInfo->SplitCSR)
628 // This performs initialization so lowering for SplitCSR will be correct.
629 TLI->initializeSplitCSR(Entry: EntryMBB);
630
631 SelectAllBasicBlocks(Fn);
632 if (FastISelFailed && EnableFastISelFallbackReport) {
633 DiagnosticInfoISelFallback DiagFallback(Fn);
634 Fn.getContext().diagnose(DI: DiagFallback);
635 }
636
637 // Replace forward-declared registers with the registers containing
638 // the desired value.
639 // Note: it is important that this happens **before** the call to
640 // EmitLiveInCopies, since implementations can skip copies of unused
641 // registers. If we don't apply the reg fixups before, some registers may
642 // appear as unused and will be skipped, resulting in bad MI.
643 MachineRegisterInfo &MRI = MF->getRegInfo();
644 for (DenseMap<Register, Register>::iterator I = FuncInfo->RegFixups.begin(),
645 E = FuncInfo->RegFixups.end();
646 I != E; ++I) {
647 Register From = I->first;
648 Register To = I->second;
649 // If To is also scheduled to be replaced, find what its ultimate
650 // replacement is.
651 while (true) {
652 DenseMap<Register, Register>::iterator J = FuncInfo->RegFixups.find(Val: To);
653 if (J == E)
654 break;
655 To = J->second;
656 }
657 // Make sure the new register has a sufficiently constrained register class.
658 if (From.isVirtual() && To.isVirtual())
659 MRI.constrainRegClass(Reg: To, RC: MRI.getRegClass(Reg: From));
660 // Replace it.
661
662 // Replacing one register with another won't touch the kill flags.
663 // We need to conservatively clear the kill flags as a kill on the old
664 // register might dominate existing uses of the new register.
665 if (!MRI.use_empty(RegNo: To))
666 MRI.clearKillFlags(Reg: From);
667 MRI.replaceRegWith(FromReg: From, ToReg: To);
668 }
669
670 // If the first basic block in the function has live ins that need to be
671 // copied into vregs, emit the copies into the top of the block before
672 // emitting the code for the block.
673 const TargetRegisterInfo &TRI = *MF->getSubtarget().getRegisterInfo();
674 RegInfo->EmitLiveInCopies(EntryMBB, TRI, TII: *TII);
675
676 // Insert copies in the entry block and the return blocks.
677 if (FuncInfo->SplitCSR) {
678 SmallVector<MachineBasicBlock*, 4> Returns;
679 // Collect all the return blocks.
680 for (MachineBasicBlock &MBB : mf) {
681 if (!MBB.succ_empty())
682 continue;
683
684 MachineBasicBlock::iterator Term = MBB.getFirstTerminator();
685 if (Term != MBB.end() && Term->isReturn()) {
686 Returns.push_back(Elt: &MBB);
687 continue;
688 }
689 }
690 TLI->insertCopiesSplitCSR(Entry: EntryMBB, Exits: Returns);
691 }
692
693 DenseMap<unsigned, unsigned> LiveInMap;
694 if (!FuncInfo->ArgDbgValues.empty())
695 for (std::pair<unsigned, unsigned> LI : RegInfo->liveins())
696 if (LI.second)
697 LiveInMap.insert(KV: LI);
698
699 // Insert DBG_VALUE instructions for function arguments to the entry block.
700 for (unsigned i = 0, e = FuncInfo->ArgDbgValues.size(); i != e; ++i) {
701 MachineInstr *MI = FuncInfo->ArgDbgValues[e - i - 1];
702 assert(MI->getOpcode() != TargetOpcode::DBG_VALUE_LIST &&
703 "Function parameters should not be described by DBG_VALUE_LIST.");
704 bool hasFI = MI->getDebugOperand(Index: 0).isFI();
705 Register Reg =
706 hasFI ? TRI.getFrameRegister(MF: *MF) : MI->getDebugOperand(Index: 0).getReg();
707 if (Reg.isPhysical())
708 EntryMBB->insert(I: EntryMBB->begin(), MI);
709 else {
710 MachineInstr *Def = RegInfo->getVRegDef(Reg);
711 if (Def) {
712 MachineBasicBlock::iterator InsertPos = Def;
713 // FIXME: VR def may not be in entry block.
714 Def->getParent()->insert(I: std::next(x: InsertPos), MI);
715 } else
716 LLVM_DEBUG(dbgs() << "Dropping debug info for dead vreg"
717 << Register::virtReg2Index(Reg) << "\n");
718 }
719
720 // Don't try and extend through copies in instruction referencing mode.
721 if (InstrRef)
722 continue;
723
724 // If Reg is live-in then update debug info to track its copy in a vreg.
725 DenseMap<unsigned, unsigned>::iterator LDI = LiveInMap.find(Val: Reg);
726 if (LDI != LiveInMap.end()) {
727 assert(!hasFI && "There's no handling of frame pointer updating here yet "
728 "- add if needed");
729 MachineInstr *Def = RegInfo->getVRegDef(Reg: LDI->second);
730 MachineBasicBlock::iterator InsertPos = Def;
731 const MDNode *Variable = MI->getDebugVariable();
732 const MDNode *Expr = MI->getDebugExpression();
733 DebugLoc DL = MI->getDebugLoc();
734 bool IsIndirect = MI->isIndirectDebugValue();
735 if (IsIndirect)
736 assert(MI->getDebugOffset().getImm() == 0 &&
737 "DBG_VALUE with nonzero offset");
738 assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
739 "Expected inlined-at fields to agree");
740 assert(MI->getOpcode() != TargetOpcode::DBG_VALUE_LIST &&
741 "Didn't expect to see a DBG_VALUE_LIST here");
742 // Def is never a terminator here, so it is ok to increment InsertPos.
743 BuildMI(BB&: *EntryMBB, I: ++InsertPos, DL, MCID: TII->get(Opcode: TargetOpcode::DBG_VALUE),
744 IsIndirect, Reg: LDI->second, Variable, Expr);
745
746 // If this vreg is directly copied into an exported register then
747 // that COPY instructions also need DBG_VALUE, if it is the only
748 // user of LDI->second.
749 MachineInstr *CopyUseMI = nullptr;
750 for (MachineInstr &UseMI : RegInfo->use_instructions(Reg: LDI->second)) {
751 if (UseMI.isDebugValue())
752 continue;
753 if (UseMI.isCopy() && !CopyUseMI && UseMI.getParent() == EntryMBB) {
754 CopyUseMI = &UseMI;
755 continue;
756 }
757 // Otherwise this is another use or second copy use.
758 CopyUseMI = nullptr;
759 break;
760 }
761 if (CopyUseMI &&
762 TRI.getRegSizeInBits(Reg: LDI->second, MRI) ==
763 TRI.getRegSizeInBits(Reg: CopyUseMI->getOperand(i: 0).getReg(), MRI)) {
764 // Use MI's debug location, which describes where Variable was
765 // declared, rather than whatever is attached to CopyUseMI.
766 MachineInstr *NewMI =
767 BuildMI(MF&: *MF, DL, MCID: TII->get(Opcode: TargetOpcode::DBG_VALUE), IsIndirect,
768 Reg: CopyUseMI->getOperand(i: 0).getReg(), Variable, Expr);
769 MachineBasicBlock::iterator Pos = CopyUseMI;
770 EntryMBB->insertAfter(I: Pos, MI: NewMI);
771 }
772 }
773 }
774
775 // For debug-info, in instruction referencing mode, we need to perform some
776 // post-isel maintenence.
777 if (MF->useDebugInstrRef())
778 MF->finalizeDebugInstrRefs();
779
780 // Determine if there are any calls in this machine function.
781 MachineFrameInfo &MFI = MF->getFrameInfo();
782 for (const auto &MBB : *MF) {
783 if (MFI.hasCalls() && MF->hasInlineAsm())
784 break;
785
786 for (const auto &MI : MBB) {
787 const MCInstrDesc &MCID = TII->get(Opcode: MI.getOpcode());
788 if ((MCID.isCall() && !MCID.isReturn()) ||
789 MI.isStackAligningInlineAsm()) {
790 MFI.setHasCalls(true);
791 }
792 if (MI.isInlineAsm()) {
793 MF->setHasInlineAsm(true);
794 }
795 }
796 }
797
798 // Determine if floating point is used for msvc
799 computeUsesMSVCFloatingPoint(TT: TM.getTargetTriple(), F: Fn, MMI&: MF->getMMI());
800
801 // Release function-specific state. SDB and CurDAG are already cleared
802 // at this point.
803 FuncInfo->clear();
804
805 ISEL_DUMP(dbgs() << "*** MachineFunction at end of ISel ***\n");
806 ISEL_DUMP(MF->print(dbgs()));
807
808 return true;
809}
810
811static void reportFastISelFailure(MachineFunction &MF,
812 OptimizationRemarkEmitter &ORE,
813 OptimizationRemarkMissed &R,
814 bool ShouldAbort) {
815 // Print the function name explicitly if we don't have a debug location (which
816 // makes the diagnostic less useful) or if we're going to emit a raw error.
817 if (!R.getLocation().isValid() || ShouldAbort)
818 R << (" (in function: " + MF.getName() + ")").str();
819
820 if (ShouldAbort)
821 report_fatal_error(reason: Twine(R.getMsg()));
822
823 ORE.emit(OptDiag&: R);
824 LLVM_DEBUG(dbgs() << R.getMsg() << "\n");
825}
826
827void SelectionDAGISel::SelectBasicBlock(BasicBlock::const_iterator Begin,
828 BasicBlock::const_iterator End,
829 bool &HadTailCall) {
830 // Allow creating illegal types during DAG building for the basic block.
831 CurDAG->NewNodesMustHaveLegalTypes = false;
832
833 // Lower the instructions. If a call is emitted as a tail call, cease emitting
834 // nodes for this block. If an instruction is elided, don't emit it, but do
835 // handle any debug-info attached to it.
836 for (BasicBlock::const_iterator I = Begin; I != End && !SDB->HasTailCall; ++I) {
837 if (!ElidedArgCopyInstrs.count(Ptr: &*I))
838 SDB->visit(I: *I);
839 else
840 SDB->visitDbgInfo(I: *I);
841 }
842
843 // Make sure the root of the DAG is up-to-date.
844 CurDAG->setRoot(SDB->getControlRoot());
845 HadTailCall = SDB->HasTailCall;
846 SDB->resolveOrClearDbgInfo();
847 SDB->clear();
848
849 // Final step, emit the lowered DAG as machine code.
850 CodeGenAndEmitDAG();
851}
852
853void SelectionDAGISel::ComputeLiveOutVRegInfo() {
854 SmallPtrSet<SDNode *, 16> Added;
855 SmallVector<SDNode*, 128> Worklist;
856
857 Worklist.push_back(Elt: CurDAG->getRoot().getNode());
858 Added.insert(Ptr: CurDAG->getRoot().getNode());
859
860 KnownBits Known;
861
862 do {
863 SDNode *N = Worklist.pop_back_val();
864
865 // Otherwise, add all chain operands to the worklist.
866 for (const SDValue &Op : N->op_values())
867 if (Op.getValueType() == MVT::Other && Added.insert(Ptr: Op.getNode()).second)
868 Worklist.push_back(Elt: Op.getNode());
869
870 // If this is a CopyToReg with a vreg dest, process it.
871 if (N->getOpcode() != ISD::CopyToReg)
872 continue;
873
874 unsigned DestReg = cast<RegisterSDNode>(Val: N->getOperand(Num: 1))->getReg();
875 if (!Register::isVirtualRegister(Reg: DestReg))
876 continue;
877
878 // Ignore non-integer values.
879 SDValue Src = N->getOperand(Num: 2);
880 EVT SrcVT = Src.getValueType();
881 if (!SrcVT.isInteger())
882 continue;
883
884 unsigned NumSignBits = CurDAG->ComputeNumSignBits(Op: Src);
885 Known = CurDAG->computeKnownBits(Op: Src);
886 FuncInfo->AddLiveOutRegInfo(Reg: DestReg, NumSignBits, Known);
887 } while (!Worklist.empty());
888}
889
890void SelectionDAGISel::CodeGenAndEmitDAG() {
891 StringRef GroupName = "sdag";
892 StringRef GroupDescription = "Instruction Selection and Scheduling";
893 std::string BlockName;
894 bool MatchFilterBB = false;
895 (void)MatchFilterBB;
896
897 // Pre-type legalization allow creation of any node types.
898 CurDAG->NewNodesMustHaveLegalTypes = false;
899
900#ifndef NDEBUG
901 MatchFilterBB = (FilterDAGBasicBlockName.empty() ||
902 FilterDAGBasicBlockName ==
903 FuncInfo->MBB->getBasicBlock()->getName());
904#endif
905#ifdef NDEBUG
906 if (ViewDAGCombine1 || ViewLegalizeTypesDAGs || ViewDAGCombineLT ||
907 ViewLegalizeDAGs || ViewDAGCombine2 || ViewISelDAGs || ViewSchedDAGs ||
908 ViewSUnitDAGs)
909#endif
910 {
911 BlockName =
912 (MF->getName() + ":" + FuncInfo->MBB->getBasicBlock()->getName()).str();
913 }
914 ISEL_DUMP(dbgs() << "\nInitial selection DAG: "
915 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
916 << "'\n";
917 CurDAG->dump());
918
919#if LLVM_ENABLE_ABI_BREAKING_CHECKS
920 if (TTI->hasBranchDivergence())
921 CurDAG->VerifyDAGDivergence();
922#endif
923
924 if (ViewDAGCombine1 && MatchFilterBB)
925 CurDAG->viewGraph(Title: "dag-combine1 input for " + BlockName);
926
927 // Run the DAG combiner in pre-legalize mode.
928 {
929 NamedRegionTimer T("combine1", "DAG Combining 1", GroupName,
930 GroupDescription, TimePassesIsEnabled);
931 CurDAG->Combine(Level: BeforeLegalizeTypes, AA, OptLevel);
932 }
933
934 ISEL_DUMP(dbgs() << "\nOptimized lowered selection DAG: "
935 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
936 << "'\n";
937 CurDAG->dump());
938
939#if LLVM_ENABLE_ABI_BREAKING_CHECKS
940 if (TTI->hasBranchDivergence())
941 CurDAG->VerifyDAGDivergence();
942#endif
943
944 // Second step, hack on the DAG until it only uses operations and types that
945 // the target supports.
946 if (ViewLegalizeTypesDAGs && MatchFilterBB)
947 CurDAG->viewGraph(Title: "legalize-types input for " + BlockName);
948
949 bool Changed;
950 {
951 NamedRegionTimer T("legalize_types", "Type Legalization", GroupName,
952 GroupDescription, TimePassesIsEnabled);
953 Changed = CurDAG->LegalizeTypes();
954 }
955
956 ISEL_DUMP(dbgs() << "\nType-legalized selection DAG: "
957 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
958 << "'\n";
959 CurDAG->dump());
960
961#if LLVM_ENABLE_ABI_BREAKING_CHECKS
962 if (TTI->hasBranchDivergence())
963 CurDAG->VerifyDAGDivergence();
964#endif
965
966 // Only allow creation of legal node types.
967 CurDAG->NewNodesMustHaveLegalTypes = true;
968
969 if (Changed) {
970 if (ViewDAGCombineLT && MatchFilterBB)
971 CurDAG->viewGraph(Title: "dag-combine-lt input for " + BlockName);
972
973 // Run the DAG combiner in post-type-legalize mode.
974 {
975 NamedRegionTimer T("combine_lt", "DAG Combining after legalize types",
976 GroupName, GroupDescription, TimePassesIsEnabled);
977 CurDAG->Combine(Level: AfterLegalizeTypes, AA, OptLevel);
978 }
979
980 ISEL_DUMP(dbgs() << "\nOptimized type-legalized selection DAG: "
981 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
982 << "'\n";
983 CurDAG->dump());
984
985#if LLVM_ENABLE_ABI_BREAKING_CHECKS
986 if (TTI->hasBranchDivergence())
987 CurDAG->VerifyDAGDivergence();
988#endif
989 }
990
991 {
992 NamedRegionTimer T("legalize_vec", "Vector Legalization", GroupName,
993 GroupDescription, TimePassesIsEnabled);
994 Changed = CurDAG->LegalizeVectors();
995 }
996
997 if (Changed) {
998 ISEL_DUMP(dbgs() << "\nVector-legalized selection DAG: "
999 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
1000 << "'\n";
1001 CurDAG->dump());
1002
1003#if LLVM_ENABLE_ABI_BREAKING_CHECKS
1004 if (TTI->hasBranchDivergence())
1005 CurDAG->VerifyDAGDivergence();
1006#endif
1007
1008 {
1009 NamedRegionTimer T("legalize_types2", "Type Legalization 2", GroupName,
1010 GroupDescription, TimePassesIsEnabled);
1011 CurDAG->LegalizeTypes();
1012 }
1013
1014 ISEL_DUMP(dbgs() << "\nVector/type-legalized selection DAG: "
1015 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
1016 << "'\n";
1017 CurDAG->dump());
1018
1019#if LLVM_ENABLE_ABI_BREAKING_CHECKS
1020 if (TTI->hasBranchDivergence())
1021 CurDAG->VerifyDAGDivergence();
1022#endif
1023
1024 if (ViewDAGCombineLT && MatchFilterBB)
1025 CurDAG->viewGraph(Title: "dag-combine-lv input for " + BlockName);
1026
1027 // Run the DAG combiner in post-type-legalize mode.
1028 {
1029 NamedRegionTimer T("combine_lv", "DAG Combining after legalize vectors",
1030 GroupName, GroupDescription, TimePassesIsEnabled);
1031 CurDAG->Combine(Level: AfterLegalizeVectorOps, AA, OptLevel);
1032 }
1033
1034 ISEL_DUMP(dbgs() << "\nOptimized vector-legalized selection DAG: "
1035 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
1036 << "'\n";
1037 CurDAG->dump());
1038
1039#if LLVM_ENABLE_ABI_BREAKING_CHECKS
1040 if (TTI->hasBranchDivergence())
1041 CurDAG->VerifyDAGDivergence();
1042#endif
1043 }
1044
1045 if (ViewLegalizeDAGs && MatchFilterBB)
1046 CurDAG->viewGraph(Title: "legalize input for " + BlockName);
1047
1048 {
1049 NamedRegionTimer T("legalize", "DAG Legalization", GroupName,
1050 GroupDescription, TimePassesIsEnabled);
1051 CurDAG->Legalize();
1052 }
1053
1054 ISEL_DUMP(dbgs() << "\nLegalized selection DAG: "
1055 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
1056 << "'\n";
1057 CurDAG->dump());
1058
1059#if LLVM_ENABLE_ABI_BREAKING_CHECKS
1060 if (TTI->hasBranchDivergence())
1061 CurDAG->VerifyDAGDivergence();
1062#endif
1063
1064 if (ViewDAGCombine2 && MatchFilterBB)
1065 CurDAG->viewGraph(Title: "dag-combine2 input for " + BlockName);
1066
1067 // Run the DAG combiner in post-legalize mode.
1068 {
1069 NamedRegionTimer T("combine2", "DAG Combining 2", GroupName,
1070 GroupDescription, TimePassesIsEnabled);
1071 CurDAG->Combine(Level: AfterLegalizeDAG, AA, OptLevel);
1072 }
1073
1074 ISEL_DUMP(dbgs() << "\nOptimized legalized selection DAG: "
1075 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
1076 << "'\n";
1077 CurDAG->dump());
1078
1079#if LLVM_ENABLE_ABI_BREAKING_CHECKS
1080 if (TTI->hasBranchDivergence())
1081 CurDAG->VerifyDAGDivergence();
1082#endif
1083
1084 if (OptLevel != CodeGenOptLevel::None)
1085 ComputeLiveOutVRegInfo();
1086
1087 if (ViewISelDAGs && MatchFilterBB)
1088 CurDAG->viewGraph(Title: "isel input for " + BlockName);
1089
1090 // Third, instruction select all of the operations to machine code, adding the
1091 // code to the MachineBasicBlock.
1092 {
1093 NamedRegionTimer T("isel", "Instruction Selection", GroupName,
1094 GroupDescription, TimePassesIsEnabled);
1095 DoInstructionSelection();
1096 }
1097
1098 ISEL_DUMP(dbgs() << "\nSelected selection DAG: "
1099 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
1100 << "'\n";
1101 CurDAG->dump());
1102
1103 if (ViewSchedDAGs && MatchFilterBB)
1104 CurDAG->viewGraph(Title: "scheduler input for " + BlockName);
1105
1106 // Schedule machine code.
1107 ScheduleDAGSDNodes *Scheduler = CreateScheduler();
1108 {
1109 NamedRegionTimer T("sched", "Instruction Scheduling", GroupName,
1110 GroupDescription, TimePassesIsEnabled);
1111 Scheduler->Run(dag: CurDAG, bb: FuncInfo->MBB);
1112 }
1113
1114 if (ViewSUnitDAGs && MatchFilterBB)
1115 Scheduler->viewGraph();
1116
1117 // Emit machine code to BB. This can change 'BB' to the last block being
1118 // inserted into.
1119 MachineBasicBlock *FirstMBB = FuncInfo->MBB, *LastMBB;
1120 {
1121 NamedRegionTimer T("emit", "Instruction Creation", GroupName,
1122 GroupDescription, TimePassesIsEnabled);
1123
1124 // FuncInfo->InsertPt is passed by reference and set to the end of the
1125 // scheduled instructions.
1126 LastMBB = FuncInfo->MBB = Scheduler->EmitSchedule(InsertPos&: FuncInfo->InsertPt);
1127 }
1128
1129 // If the block was split, make sure we update any references that are used to
1130 // update PHI nodes later on.
1131 if (FirstMBB != LastMBB)
1132 SDB->UpdateSplitBlock(First: FirstMBB, Last: LastMBB);
1133
1134 // Free the scheduler state.
1135 {
1136 NamedRegionTimer T("cleanup", "Instruction Scheduling Cleanup", GroupName,
1137 GroupDescription, TimePassesIsEnabled);
1138 delete Scheduler;
1139 }
1140
1141 // Free the SelectionDAG state, now that we're finished with it.
1142 CurDAG->clear();
1143}
1144
1145namespace {
1146
1147/// ISelUpdater - helper class to handle updates of the instruction selection
1148/// graph.
1149class ISelUpdater : public SelectionDAG::DAGUpdateListener {
1150 SelectionDAG::allnodes_iterator &ISelPosition;
1151
1152public:
1153 ISelUpdater(SelectionDAG &DAG, SelectionDAG::allnodes_iterator &isp)
1154 : SelectionDAG::DAGUpdateListener(DAG), ISelPosition(isp) {}
1155
1156 /// NodeDeleted - Handle nodes deleted from the graph. If the node being
1157 /// deleted is the current ISelPosition node, update ISelPosition.
1158 ///
1159 void NodeDeleted(SDNode *N, SDNode *E) override {
1160 if (ISelPosition == SelectionDAG::allnodes_iterator(N))
1161 ++ISelPosition;
1162 }
1163
1164 /// NodeInserted - Handle new nodes inserted into the graph: propagate
1165 /// metadata from root nodes that also applies to new nodes, in case the root
1166 /// is later deleted.
1167 void NodeInserted(SDNode *N) override {
1168 SDNode *CurNode = &*ISelPosition;
1169 if (MDNode *MD = DAG.getPCSections(Node: CurNode))
1170 DAG.addPCSections(Node: N, MD);
1171 if (MDNode *MMRA = DAG.getMMRAMetadata(Node: CurNode))
1172 DAG.addMMRAMetadata(Node: N, MMRA);
1173 }
1174};
1175
1176} // end anonymous namespace
1177
1178// This function is used to enforce the topological node id property
1179// leveraged during instruction selection. Before the selection process all
1180// nodes are given a non-negative id such that all nodes have a greater id than
1181// their operands. As this holds transitively we can prune checks that a node N
1182// is a predecessor of M another by not recursively checking through M's
1183// operands if N's ID is larger than M's ID. This significantly improves
1184// performance of various legality checks (e.g. IsLegalToFold / UpdateChains).
1185
1186// However, when we fuse multiple nodes into a single node during the
1187// selection we may induce a predecessor relationship between inputs and
1188// outputs of distinct nodes being merged, violating the topological property.
1189// Should a fused node have a successor which has yet to be selected,
1190// our legality checks would be incorrect. To avoid this we mark all unselected
1191// successor nodes, i.e. id != -1, as invalid for pruning by bit-negating (x =>
1192// (-(x+1))) the ids and modify our pruning check to ignore negative Ids of M.
1193// We use bit-negation to more clearly enforce that node id -1 can only be
1194// achieved by selected nodes. As the conversion is reversable to the original
1195// Id, topological pruning can still be leveraged when looking for unselected
1196// nodes. This method is called internally in all ISel replacement related
1197// functions.
1198void SelectionDAGISel::EnforceNodeIdInvariant(SDNode *Node) {
1199 SmallVector<SDNode *, 4> Nodes;
1200 Nodes.push_back(Elt: Node);
1201
1202 while (!Nodes.empty()) {
1203 SDNode *N = Nodes.pop_back_val();
1204 for (auto *U : N->uses()) {
1205 auto UId = U->getNodeId();
1206 if (UId > 0) {
1207 InvalidateNodeId(N: U);
1208 Nodes.push_back(Elt: U);
1209 }
1210 }
1211 }
1212}
1213
1214// InvalidateNodeId - As explained in EnforceNodeIdInvariant, mark a
1215// NodeId with the equivalent node id which is invalid for topological
1216// pruning.
1217void SelectionDAGISel::InvalidateNodeId(SDNode *N) {
1218 int InvalidId = -(N->getNodeId() + 1);
1219 N->setNodeId(InvalidId);
1220}
1221
1222// getUninvalidatedNodeId - get original uninvalidated node id.
1223int SelectionDAGISel::getUninvalidatedNodeId(SDNode *N) {
1224 int Id = N->getNodeId();
1225 if (Id < -1)
1226 return -(Id + 1);
1227 return Id;
1228}
1229
1230void SelectionDAGISel::DoInstructionSelection() {
1231 LLVM_DEBUG(dbgs() << "===== Instruction selection begins: "
1232 << printMBBReference(*FuncInfo->MBB) << " '"
1233 << FuncInfo->MBB->getName() << "'\n");
1234
1235 PreprocessISelDAG();
1236
1237 // Select target instructions for the DAG.
1238 {
1239 // Number all nodes with a topological order and set DAGSize.
1240 DAGSize = CurDAG->AssignTopologicalOrder();
1241
1242 // Create a dummy node (which is not added to allnodes), that adds
1243 // a reference to the root node, preventing it from being deleted,
1244 // and tracking any changes of the root.
1245 HandleSDNode Dummy(CurDAG->getRoot());
1246 SelectionDAG::allnodes_iterator ISelPosition (CurDAG->getRoot().getNode());
1247 ++ISelPosition;
1248
1249 // Make sure that ISelPosition gets properly updated when nodes are deleted
1250 // in calls made from this function. New nodes inherit relevant metadata.
1251 ISelUpdater ISU(*CurDAG, ISelPosition);
1252
1253 // The AllNodes list is now topological-sorted. Visit the
1254 // nodes by starting at the end of the list (the root of the
1255 // graph) and preceding back toward the beginning (the entry
1256 // node).
1257 while (ISelPosition != CurDAG->allnodes_begin()) {
1258 SDNode *Node = &*--ISelPosition;
1259 // Skip dead nodes. DAGCombiner is expected to eliminate all dead nodes,
1260 // but there are currently some corner cases that it misses. Also, this
1261 // makes it theoretically possible to disable the DAGCombiner.
1262 if (Node->use_empty())
1263 continue;
1264
1265#ifndef NDEBUG
1266 SmallVector<SDNode *, 4> Nodes;
1267 Nodes.push_back(Node);
1268
1269 while (!Nodes.empty()) {
1270 auto N = Nodes.pop_back_val();
1271 if (N->getOpcode() == ISD::TokenFactor || N->getNodeId() < 0)
1272 continue;
1273 for (const SDValue &Op : N->op_values()) {
1274 if (Op->getOpcode() == ISD::TokenFactor)
1275 Nodes.push_back(Op.getNode());
1276 else {
1277 // We rely on topological ordering of node ids for checking for
1278 // cycles when fusing nodes during selection. All unselected nodes
1279 // successors of an already selected node should have a negative id.
1280 // This assertion will catch such cases. If this assertion triggers
1281 // it is likely you using DAG-level Value/Node replacement functions
1282 // (versus equivalent ISEL replacement) in backend-specific
1283 // selections. See comment in EnforceNodeIdInvariant for more
1284 // details.
1285 assert(Op->getNodeId() != -1 &&
1286 "Node has already selected predecessor node");
1287 }
1288 }
1289 }
1290#endif
1291
1292 // When we are using non-default rounding modes or FP exception behavior
1293 // FP operations are represented by StrictFP pseudo-operations. For
1294 // targets that do not (yet) understand strict FP operations directly,
1295 // we convert them to normal FP opcodes instead at this point. This
1296 // will allow them to be handled by existing target-specific instruction
1297 // selectors.
1298 if (!TLI->isStrictFPEnabled() && Node->isStrictFPOpcode()) {
1299 // For some opcodes, we need to call TLI->getOperationAction using
1300 // the first operand type instead of the result type. Note that this
1301 // must match what SelectionDAGLegalize::LegalizeOp is doing.
1302 EVT ActionVT;
1303 switch (Node->getOpcode()) {
1304 case ISD::STRICT_SINT_TO_FP:
1305 case ISD::STRICT_UINT_TO_FP:
1306 case ISD::STRICT_LRINT:
1307 case ISD::STRICT_LLRINT:
1308 case ISD::STRICT_LROUND:
1309 case ISD::STRICT_LLROUND:
1310 case ISD::STRICT_FSETCC:
1311 case ISD::STRICT_FSETCCS:
1312 ActionVT = Node->getOperand(Num: 1).getValueType();
1313 break;
1314 default:
1315 ActionVT = Node->getValueType(ResNo: 0);
1316 break;
1317 }
1318 if (TLI->getOperationAction(Op: Node->getOpcode(), VT: ActionVT)
1319 == TargetLowering::Expand)
1320 Node = CurDAG->mutateStrictFPToFP(Node);
1321 }
1322
1323 LLVM_DEBUG(dbgs() << "\nISEL: Starting selection on root node: ";
1324 Node->dump(CurDAG));
1325
1326 Select(N: Node);
1327 }
1328
1329 CurDAG->setRoot(Dummy.getValue());
1330 }
1331
1332 LLVM_DEBUG(dbgs() << "\n===== Instruction selection ends:\n");
1333
1334 PostprocessISelDAG();
1335}
1336
1337static bool hasExceptionPointerOrCodeUser(const CatchPadInst *CPI) {
1338 for (const User *U : CPI->users()) {
1339 if (const IntrinsicInst *EHPtrCall = dyn_cast<IntrinsicInst>(Val: U)) {
1340 Intrinsic::ID IID = EHPtrCall->getIntrinsicID();
1341 if (IID == Intrinsic::eh_exceptionpointer ||
1342 IID == Intrinsic::eh_exceptioncode)
1343 return true;
1344 }
1345 }
1346 return false;
1347}
1348
1349// wasm.landingpad.index intrinsic is for associating a landing pad index number
1350// with a catchpad instruction. Retrieve the landing pad index in the intrinsic
1351// and store the mapping in the function.
1352static void mapWasmLandingPadIndex(MachineBasicBlock *MBB,
1353 const CatchPadInst *CPI) {
1354 MachineFunction *MF = MBB->getParent();
1355 // In case of single catch (...), we don't emit LSDA, so we don't need
1356 // this information.
1357 bool IsSingleCatchAllClause =
1358 CPI->arg_size() == 1 &&
1359 cast<Constant>(Val: CPI->getArgOperand(i: 0))->isNullValue();
1360 // cathchpads for longjmp use an empty type list, e.g. catchpad within %0 []
1361 // and they don't need LSDA info
1362 bool IsCatchLongjmp = CPI->arg_size() == 0;
1363 if (!IsSingleCatchAllClause && !IsCatchLongjmp) {
1364 // Create a mapping from landing pad label to landing pad index.
1365 bool IntrFound = false;
1366 for (const User *U : CPI->users()) {
1367 if (const auto *Call = dyn_cast<IntrinsicInst>(Val: U)) {
1368 Intrinsic::ID IID = Call->getIntrinsicID();
1369 if (IID == Intrinsic::wasm_landingpad_index) {
1370 Value *IndexArg = Call->getArgOperand(i: 1);
1371 int Index = cast<ConstantInt>(Val: IndexArg)->getZExtValue();
1372 MF->setWasmLandingPadIndex(LPad: MBB, Index);
1373 IntrFound = true;
1374 break;
1375 }
1376 }
1377 }
1378 assert(IntrFound && "wasm.landingpad.index intrinsic not found!");
1379 (void)IntrFound;
1380 }
1381}
1382
1383/// PrepareEHLandingPad - Emit an EH_LABEL, set up live-in registers, and
1384/// do other setup for EH landing-pad blocks.
1385bool SelectionDAGISel::PrepareEHLandingPad() {
1386 MachineBasicBlock *MBB = FuncInfo->MBB;
1387 const Constant *PersonalityFn = FuncInfo->Fn->getPersonalityFn();
1388 const BasicBlock *LLVMBB = MBB->getBasicBlock();
1389 const TargetRegisterClass *PtrRC =
1390 TLI->getRegClassFor(VT: TLI->getPointerTy(DL: CurDAG->getDataLayout()));
1391
1392 auto Pers = classifyEHPersonality(Pers: PersonalityFn);
1393
1394 // Catchpads have one live-in register, which typically holds the exception
1395 // pointer or code.
1396 if (isFuncletEHPersonality(Pers)) {
1397 if (const auto *CPI = dyn_cast<CatchPadInst>(Val: LLVMBB->getFirstNonPHI())) {
1398 if (hasExceptionPointerOrCodeUser(CPI)) {
1399 // Get or create the virtual register to hold the pointer or code. Mark
1400 // the live in physreg and copy into the vreg.
1401 MCPhysReg EHPhysReg = TLI->getExceptionPointerRegister(PersonalityFn);
1402 assert(EHPhysReg && "target lacks exception pointer register");
1403 MBB->addLiveIn(PhysReg: EHPhysReg);
1404 unsigned VReg = FuncInfo->getCatchPadExceptionPointerVReg(CPI, RC: PtrRC);
1405 BuildMI(BB&: *MBB, I: FuncInfo->InsertPt, MIMD: SDB->getCurDebugLoc(),
1406 MCID: TII->get(Opcode: TargetOpcode::COPY), DestReg: VReg)
1407 .addReg(RegNo: EHPhysReg, flags: RegState::Kill);
1408 }
1409 }
1410 return true;
1411 }
1412
1413 // Add a label to mark the beginning of the landing pad. Deletion of the
1414 // landing pad can thus be detected via the MachineModuleInfo.
1415 MCSymbol *Label = MF->addLandingPad(LandingPad: MBB);
1416
1417 const MCInstrDesc &II = TII->get(Opcode: TargetOpcode::EH_LABEL);
1418 BuildMI(BB&: *MBB, I: FuncInfo->InsertPt, MIMD: SDB->getCurDebugLoc(), MCID: II)
1419 .addSym(Sym: Label);
1420
1421 // If the unwinder does not preserve all registers, ensure that the
1422 // function marks the clobbered registers as used.
1423 const TargetRegisterInfo &TRI = *MF->getSubtarget().getRegisterInfo();
1424 if (auto *RegMask = TRI.getCustomEHPadPreservedMask(MF: *MF))
1425 MF->getRegInfo().addPhysRegsUsedFromRegMask(RegMask);
1426
1427 if (Pers == EHPersonality::Wasm_CXX) {
1428 if (const auto *CPI = dyn_cast<CatchPadInst>(Val: LLVMBB->getFirstNonPHI()))
1429 mapWasmLandingPadIndex(MBB, CPI);
1430 } else {
1431 // Assign the call site to the landing pad's begin label.
1432 MF->setCallSiteLandingPad(Sym: Label, Sites: SDB->LPadToCallSiteMap[MBB]);
1433 // Mark exception register as live in.
1434 if (unsigned Reg = TLI->getExceptionPointerRegister(PersonalityFn))
1435 FuncInfo->ExceptionPointerVirtReg = MBB->addLiveIn(PhysReg: Reg, RC: PtrRC);
1436 // Mark exception selector register as live in.
1437 if (unsigned Reg = TLI->getExceptionSelectorRegister(PersonalityFn))
1438 FuncInfo->ExceptionSelectorVirtReg = MBB->addLiveIn(PhysReg: Reg, RC: PtrRC);
1439 }
1440
1441 return true;
1442}
1443
1444// Mark and Report IPToState for each Block under IsEHa
1445void SelectionDAGISel::reportIPToStateForBlocks(MachineFunction *MF) {
1446 MachineModuleInfo &MMI = MF->getMMI();
1447 llvm::WinEHFuncInfo *EHInfo = MF->getWinEHFuncInfo();
1448 if (!EHInfo)
1449 return;
1450 for (MachineBasicBlock &MBB : *MF) {
1451 const BasicBlock *BB = MBB.getBasicBlock();
1452 int State = EHInfo->BlockToStateMap[BB];
1453 if (BB->getFirstMayFaultInst()) {
1454 // Report IP range only for blocks with Faulty inst
1455 auto MBBb = MBB.getFirstNonPHI();
1456
1457 if (MBBb == MBB.end())
1458 continue;
1459
1460 MachineInstr *MIb = &*MBBb;
1461 if (MIb->isTerminator())
1462 continue;
1463
1464 // Insert EH Labels
1465 MCSymbol *BeginLabel = MMI.getContext().createTempSymbol();
1466 MCSymbol *EndLabel = MMI.getContext().createTempSymbol();
1467 EHInfo->addIPToStateRange(State, InvokeBegin: BeginLabel, InvokeEnd: EndLabel);
1468 BuildMI(BB&: MBB, I: MBBb, MIMD: SDB->getCurDebugLoc(),
1469 MCID: TII->get(Opcode: TargetOpcode::EH_LABEL))
1470 .addSym(Sym: BeginLabel);
1471 auto MBBe = MBB.instr_end();
1472 MachineInstr *MIe = &*(--MBBe);
1473 // insert before (possible multiple) terminators
1474 while (MIe->isTerminator())
1475 MIe = &*(--MBBe);
1476 ++MBBe;
1477 BuildMI(BB&: MBB, I: MBBe, MIMD: SDB->getCurDebugLoc(),
1478 MCID: TII->get(Opcode: TargetOpcode::EH_LABEL))
1479 .addSym(Sym: EndLabel);
1480 }
1481 }
1482}
1483
1484/// isFoldedOrDeadInstruction - Return true if the specified instruction is
1485/// side-effect free and is either dead or folded into a generated instruction.
1486/// Return false if it needs to be emitted.
1487static bool isFoldedOrDeadInstruction(const Instruction *I,
1488 const FunctionLoweringInfo &FuncInfo) {
1489 return !I->mayWriteToMemory() && // Side-effecting instructions aren't folded.
1490 !I->isTerminator() && // Terminators aren't folded.
1491 !isa<DbgInfoIntrinsic>(Val: I) && // Debug instructions aren't folded.
1492 !I->isEHPad() && // EH pad instructions aren't folded.
1493 !FuncInfo.isExportedInst(V: I); // Exported instrs must be computed.
1494}
1495
1496static bool processIfEntryValueDbgDeclare(FunctionLoweringInfo &FuncInfo,
1497 const Value *Arg, DIExpression *Expr,
1498 DILocalVariable *Var,
1499 DebugLoc DbgLoc) {
1500 if (!Expr->isEntryValue() || !isa<Argument>(Val: Arg))
1501 return false;
1502
1503 auto ArgIt = FuncInfo.ValueMap.find(Val: Arg);
1504 if (ArgIt == FuncInfo.ValueMap.end())
1505 return false;
1506 Register ArgVReg = ArgIt->getSecond();
1507
1508 // Find the corresponding livein physical register to this argument.
1509 for (auto [PhysReg, VirtReg] : FuncInfo.RegInfo->liveins())
1510 if (VirtReg == ArgVReg) {
1511 // Append an op deref to account for the fact that this is a dbg_declare.
1512 Expr = DIExpression::append(Expr, Ops: dwarf::DW_OP_deref);
1513 FuncInfo.MF->setVariableDbgInfo(Var, Expr, Reg: PhysReg, Loc: DbgLoc);
1514 LLVM_DEBUG(dbgs() << "processDbgDeclare: setVariableDbgInfo Var=" << *Var
1515 << ", Expr=" << *Expr << ", MCRegister=" << PhysReg
1516 << ", DbgLoc=" << DbgLoc << "\n");
1517 return true;
1518 }
1519 return false;
1520}
1521
1522static bool processDbgDeclare(FunctionLoweringInfo &FuncInfo,
1523 const Value *Address, DIExpression *Expr,
1524 DILocalVariable *Var, DebugLoc DbgLoc) {
1525 if (!Address) {
1526 LLVM_DEBUG(dbgs() << "processDbgDeclares skipping " << *Var
1527 << " (bad address)\n");
1528 return false;
1529 }
1530
1531 if (processIfEntryValueDbgDeclare(FuncInfo, Arg: Address, Expr, Var, DbgLoc))
1532 return true;
1533
1534 MachineFunction *MF = FuncInfo.MF;
1535 const DataLayout &DL = MF->getDataLayout();
1536
1537 assert(Var && "Missing variable");
1538 assert(DbgLoc && "Missing location");
1539
1540 // Look through casts and constant offset GEPs. These mostly come from
1541 // inalloca.
1542 APInt Offset(DL.getTypeSizeInBits(Ty: Address->getType()), 0);
1543 Address = Address->stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
1544
1545 // Check if the variable is a static alloca or a byval or inalloca
1546 // argument passed in memory. If it is not, then we will ignore this
1547 // intrinsic and handle this during isel like dbg.value.
1548 int FI = std::numeric_limits<int>::max();
1549 if (const auto *AI = dyn_cast<AllocaInst>(Val: Address)) {
1550 auto SI = FuncInfo.StaticAllocaMap.find(Val: AI);
1551 if (SI != FuncInfo.StaticAllocaMap.end())
1552 FI = SI->second;
1553 } else if (const auto *Arg = dyn_cast<Argument>(Val: Address))
1554 FI = FuncInfo.getArgumentFrameIndex(A: Arg);
1555
1556 if (FI == std::numeric_limits<int>::max())
1557 return false;
1558
1559 if (Offset.getBoolValue())
1560 Expr = DIExpression::prepend(Expr, Flags: DIExpression::ApplyOffset,
1561 Offset: Offset.getZExtValue());
1562
1563 LLVM_DEBUG(dbgs() << "processDbgDeclare: setVariableDbgInfo Var=" << *Var
1564 << ", Expr=" << *Expr << ", FI=" << FI
1565 << ", DbgLoc=" << DbgLoc << "\n");
1566 MF->setVariableDbgInfo(Var, Expr, Slot: FI, Loc: DbgLoc);
1567 return true;
1568}
1569
1570/// Collect llvm.dbg.declare information. This is done after argument lowering
1571/// in case the declarations refer to arguments.
1572static void processDbgDeclares(FunctionLoweringInfo &FuncInfo) {
1573 for (const auto &I : instructions(F: *FuncInfo.Fn)) {
1574 const auto *DI = dyn_cast<DbgDeclareInst>(Val: &I);
1575 if (DI && processDbgDeclare(FuncInfo, Address: DI->getAddress(), Expr: DI->getExpression(),
1576 Var: DI->getVariable(), DbgLoc: DI->getDebugLoc()))
1577 FuncInfo.PreprocessedDbgDeclares.insert(Ptr: DI);
1578 for (const DbgVariableRecord &DVR : filterDbgVars(R: I.getDbgRecordRange())) {
1579 if (DVR.Type == DbgVariableRecord::LocationType::Declare &&
1580 processDbgDeclare(FuncInfo, Address: DVR.getVariableLocationOp(OpIdx: 0),
1581 Expr: DVR.getExpression(), Var: DVR.getVariable(),
1582 DbgLoc: DVR.getDebugLoc()))
1583 FuncInfo.PreprocessedDVRDeclares.insert(Ptr: &DVR);
1584 }
1585 }
1586}
1587
1588/// Collect single location variable information generated with assignment
1589/// tracking. This is done after argument lowering in case the declarations
1590/// refer to arguments.
1591static void processSingleLocVars(FunctionLoweringInfo &FuncInfo,
1592 FunctionVarLocs const *FnVarLocs) {
1593 for (auto It = FnVarLocs->single_locs_begin(),
1594 End = FnVarLocs->single_locs_end();
1595 It != End; ++It) {
1596 assert(!It->Values.hasArgList() && "Single loc variadic ops not supported");
1597 processDbgDeclare(FuncInfo, Address: It->Values.getVariableLocationOp(OpIdx: 0), Expr: It->Expr,
1598 Var: FnVarLocs->getDILocalVariable(ID: It->VariableID), DbgLoc: It->DL);
1599 }
1600}
1601
1602void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
1603 FastISelFailed = false;
1604 // Initialize the Fast-ISel state, if needed.
1605 FastISel *FastIS = nullptr;
1606 if (TM.Options.EnableFastISel) {
1607 LLVM_DEBUG(dbgs() << "Enabling fast-isel\n");
1608 FastIS = TLI->createFastISel(*FuncInfo, LibInfo);
1609 }
1610
1611 ReversePostOrderTraversal<const Function*> RPOT(&Fn);
1612
1613 // Lower arguments up front. An RPO iteration always visits the entry block
1614 // first.
1615 assert(*RPOT.begin() == &Fn.getEntryBlock());
1616 ++NumEntryBlocks;
1617
1618 // Set up FuncInfo for ISel. Entry blocks never have PHIs.
1619 FuncInfo->MBB = FuncInfo->MBBMap[&Fn.getEntryBlock()];
1620 FuncInfo->InsertPt = FuncInfo->MBB->begin();
1621
1622 CurDAG->setFunctionLoweringInfo(FuncInfo.get());
1623
1624 if (!FastIS) {
1625 LowerArguments(F: Fn);
1626 } else {
1627 // See if fast isel can lower the arguments.
1628 FastIS->startNewBlock();
1629 if (!FastIS->lowerArguments()) {
1630 FastISelFailed = true;
1631 // Fast isel failed to lower these arguments
1632 ++NumFastIselFailLowerArguments;
1633
1634 OptimizationRemarkMissed R("sdagisel", "FastISelFailure",
1635 Fn.getSubprogram(),
1636 &Fn.getEntryBlock());
1637 R << "FastISel didn't lower all arguments: "
1638 << ore::NV("Prototype", Fn.getFunctionType());
1639 reportFastISelFailure(MF&: *MF, ORE&: *ORE, R, ShouldAbort: EnableFastISelAbort > 1);
1640
1641 // Use SelectionDAG argument lowering
1642 LowerArguments(F: Fn);
1643 CurDAG->setRoot(SDB->getControlRoot());
1644 SDB->clear();
1645 CodeGenAndEmitDAG();
1646 }
1647
1648 // If we inserted any instructions at the beginning, make a note of
1649 // where they are, so we can be sure to emit subsequent instructions
1650 // after them.
1651 if (FuncInfo->InsertPt != FuncInfo->MBB->begin())
1652 FastIS->setLastLocalValue(&*std::prev(x: FuncInfo->InsertPt));
1653 else
1654 FastIS->setLastLocalValue(nullptr);
1655 }
1656
1657 bool Inserted = SwiftError->createEntriesInEntryBlock(DbgLoc: SDB->getCurDebugLoc());
1658
1659 if (FastIS && Inserted)
1660 FastIS->setLastLocalValue(&*std::prev(x: FuncInfo->InsertPt));
1661
1662 if (isAssignmentTrackingEnabled(M: *Fn.getParent())) {
1663 assert(CurDAG->getFunctionVarLocs() &&
1664 "expected AssignmentTrackingAnalysis pass results");
1665 processSingleLocVars(FuncInfo&: *FuncInfo, FnVarLocs: CurDAG->getFunctionVarLocs());
1666 } else {
1667 processDbgDeclares(FuncInfo&: *FuncInfo);
1668 }
1669
1670 // Iterate over all basic blocks in the function.
1671 for (const BasicBlock *LLVMBB : RPOT) {
1672 if (OptLevel != CodeGenOptLevel::None) {
1673 bool AllPredsVisited = true;
1674 for (const BasicBlock *Pred : predecessors(BB: LLVMBB)) {
1675 if (!FuncInfo->VisitedBBs.count(Ptr: Pred)) {
1676 AllPredsVisited = false;
1677 break;
1678 }
1679 }
1680
1681 if (AllPredsVisited) {
1682 for (const PHINode &PN : LLVMBB->phis())
1683 FuncInfo->ComputePHILiveOutRegInfo(&PN);
1684 } else {
1685 for (const PHINode &PN : LLVMBB->phis())
1686 FuncInfo->InvalidatePHILiveOutRegInfo(PN: &PN);
1687 }
1688
1689 FuncInfo->VisitedBBs.insert(Ptr: LLVMBB);
1690 }
1691
1692 BasicBlock::const_iterator const Begin =
1693 LLVMBB->getFirstNonPHI()->getIterator();
1694 BasicBlock::const_iterator const End = LLVMBB->end();
1695 BasicBlock::const_iterator BI = End;
1696
1697 FuncInfo->MBB = FuncInfo->MBBMap[LLVMBB];
1698 if (!FuncInfo->MBB)
1699 continue; // Some blocks like catchpads have no code or MBB.
1700
1701 // Insert new instructions after any phi or argument setup code.
1702 FuncInfo->InsertPt = FuncInfo->MBB->end();
1703
1704 // Setup an EH landing-pad block.
1705 FuncInfo->ExceptionPointerVirtReg = 0;
1706 FuncInfo->ExceptionSelectorVirtReg = 0;
1707 if (LLVMBB->isEHPad())
1708 if (!PrepareEHLandingPad())
1709 continue;
1710
1711 // Before doing SelectionDAG ISel, see if FastISel has been requested.
1712 if (FastIS) {
1713 if (LLVMBB != &Fn.getEntryBlock())
1714 FastIS->startNewBlock();
1715
1716 unsigned NumFastIselRemaining = std::distance(first: Begin, last: End);
1717
1718 // Pre-assign swifterror vregs.
1719 SwiftError->preassignVRegs(MBB: FuncInfo->MBB, Begin, End);
1720
1721 // Do FastISel on as many instructions as possible.
1722 for (; BI != Begin; --BI) {
1723 const Instruction *Inst = &*std::prev(x: BI);
1724
1725 // If we no longer require this instruction, skip it.
1726 if (isFoldedOrDeadInstruction(I: Inst, FuncInfo: *FuncInfo) ||
1727 ElidedArgCopyInstrs.count(Ptr: Inst)) {
1728 --NumFastIselRemaining;
1729 FastIS->handleDbgInfo(II: Inst);
1730 continue;
1731 }
1732
1733 // Bottom-up: reset the insert pos at the top, after any local-value
1734 // instructions.
1735 FastIS->recomputeInsertPt();
1736
1737 // Try to select the instruction with FastISel.
1738 if (FastIS->selectInstruction(I: Inst)) {
1739 --NumFastIselRemaining;
1740 ++NumFastIselSuccess;
1741
1742 FastIS->handleDbgInfo(II: Inst);
1743 // If fast isel succeeded, skip over all the folded instructions, and
1744 // then see if there is a load right before the selected instructions.
1745 // Try to fold the load if so.
1746 const Instruction *BeforeInst = Inst;
1747 while (BeforeInst != &*Begin) {
1748 BeforeInst = &*std::prev(x: BasicBlock::const_iterator(BeforeInst));
1749 if (!isFoldedOrDeadInstruction(I: BeforeInst, FuncInfo: *FuncInfo))
1750 break;
1751 }
1752 if (BeforeInst != Inst && isa<LoadInst>(Val: BeforeInst) &&
1753 BeforeInst->hasOneUse() &&
1754 FastIS->tryToFoldLoad(LI: cast<LoadInst>(Val: BeforeInst), FoldInst: Inst)) {
1755 // If we succeeded, don't re-select the load.
1756 LLVM_DEBUG(dbgs()
1757 << "FastISel folded load: " << *BeforeInst << "\n");
1758 FastIS->handleDbgInfo(II: BeforeInst);
1759 BI = std::next(x: BasicBlock::const_iterator(BeforeInst));
1760 --NumFastIselRemaining;
1761 ++NumFastIselSuccess;
1762 }
1763 continue;
1764 }
1765
1766 FastISelFailed = true;
1767
1768 // Then handle certain instructions as single-LLVM-Instruction blocks.
1769 // We cannot separate out GCrelocates to their own blocks since we need
1770 // to keep track of gc-relocates for a particular gc-statepoint. This is
1771 // done by SelectionDAGBuilder::LowerAsSTATEPOINT, called before
1772 // visitGCRelocate.
1773 if (isa<CallInst>(Val: Inst) && !isa<GCStatepointInst>(Val: Inst) &&
1774 !isa<GCRelocateInst>(Val: Inst) && !isa<GCResultInst>(Val: Inst)) {
1775 OptimizationRemarkMissed R("sdagisel", "FastISelFailure",
1776 Inst->getDebugLoc(), LLVMBB);
1777
1778 R << "FastISel missed call";
1779
1780 if (R.isEnabled() || EnableFastISelAbort) {
1781 std::string InstStrStorage;
1782 raw_string_ostream InstStr(InstStrStorage);
1783 InstStr << *Inst;
1784
1785 R << ": " << InstStrStorage;
1786 }
1787
1788 reportFastISelFailure(MF&: *MF, ORE&: *ORE, R, ShouldAbort: EnableFastISelAbort > 2);
1789
1790 if (!Inst->getType()->isVoidTy() && !Inst->getType()->isTokenTy() &&
1791 !Inst->use_empty()) {
1792 Register &R = FuncInfo->ValueMap[Inst];
1793 if (!R)
1794 R = FuncInfo->CreateRegs(V: Inst);
1795 }
1796
1797 bool HadTailCall = false;
1798 MachineBasicBlock::iterator SavedInsertPt = FuncInfo->InsertPt;
1799 SelectBasicBlock(Begin: Inst->getIterator(), End: BI, HadTailCall);
1800
1801 // If the call was emitted as a tail call, we're done with the block.
1802 // We also need to delete any previously emitted instructions.
1803 if (HadTailCall) {
1804 FastIS->removeDeadCode(I: SavedInsertPt, E: FuncInfo->MBB->end());
1805 --BI;
1806 break;
1807 }
1808
1809 // Recompute NumFastIselRemaining as Selection DAG instruction
1810 // selection may have handled the call, input args, etc.
1811 unsigned RemainingNow = std::distance(first: Begin, last: BI);
1812 NumFastIselFailures += NumFastIselRemaining - RemainingNow;
1813 NumFastIselRemaining = RemainingNow;
1814 continue;
1815 }
1816
1817 OptimizationRemarkMissed R("sdagisel", "FastISelFailure",
1818 Inst->getDebugLoc(), LLVMBB);
1819
1820 bool ShouldAbort = EnableFastISelAbort;
1821 if (Inst->isTerminator()) {
1822 // Use a different message for terminator misses.
1823 R << "FastISel missed terminator";
1824 // Don't abort for terminator unless the level is really high
1825 ShouldAbort = (EnableFastISelAbort > 2);
1826 } else {
1827 R << "FastISel missed";
1828 }
1829
1830 if (R.isEnabled() || EnableFastISelAbort) {
1831 std::string InstStrStorage;
1832 raw_string_ostream InstStr(InstStrStorage);
1833 InstStr << *Inst;
1834 R << ": " << InstStrStorage;
1835 }
1836
1837 reportFastISelFailure(MF&: *MF, ORE&: *ORE, R, ShouldAbort);
1838
1839 NumFastIselFailures += NumFastIselRemaining;
1840 break;
1841 }
1842
1843 FastIS->recomputeInsertPt();
1844 }
1845
1846 if (SP->shouldEmitSDCheck(BB: *LLVMBB)) {
1847 bool FunctionBasedInstrumentation =
1848 TLI->getSSPStackGuardCheck(M: *Fn.getParent());
1849 SDB->SPDescriptor.initialize(BB: LLVMBB, MBB: FuncInfo->MBBMap[LLVMBB],
1850 FunctionBasedInstrumentation);
1851 }
1852
1853 if (Begin != BI)
1854 ++NumDAGBlocks;
1855 else
1856 ++NumFastIselBlocks;
1857
1858 if (Begin != BI) {
1859 // Run SelectionDAG instruction selection on the remainder of the block
1860 // not handled by FastISel. If FastISel is not run, this is the entire
1861 // block.
1862 bool HadTailCall;
1863 SelectBasicBlock(Begin, End: BI, HadTailCall);
1864
1865 // But if FastISel was run, we already selected some of the block.
1866 // If we emitted a tail-call, we need to delete any previously emitted
1867 // instruction that follows it.
1868 if (FastIS && HadTailCall && FuncInfo->InsertPt != FuncInfo->MBB->end())
1869 FastIS->removeDeadCode(I: FuncInfo->InsertPt, E: FuncInfo->MBB->end());
1870 }
1871
1872 if (FastIS)
1873 FastIS->finishBasicBlock();
1874 FinishBasicBlock();
1875 FuncInfo->PHINodesToUpdate.clear();
1876 ElidedArgCopyInstrs.clear();
1877 }
1878
1879 // AsynchEH: Report Block State under -AsynchEH
1880 if (Fn.getParent()->getModuleFlag(Key: "eh-asynch"))
1881 reportIPToStateForBlocks(MF);
1882
1883 SP->copyToMachineFrameInfo(MFI&: MF->getFrameInfo());
1884
1885 SwiftError->propagateVRegs();
1886
1887 delete FastIS;
1888 SDB->clearDanglingDebugInfo();
1889 SDB->SPDescriptor.resetPerFunctionState();
1890}
1891
1892void
1893SelectionDAGISel::FinishBasicBlock() {
1894 LLVM_DEBUG(dbgs() << "Total amount of phi nodes to update: "
1895 << FuncInfo->PHINodesToUpdate.size() << "\n";
1896 for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e;
1897 ++i) dbgs()
1898 << "Node " << i << " : (" << FuncInfo->PHINodesToUpdate[i].first
1899 << ", " << FuncInfo->PHINodesToUpdate[i].second << ")\n");
1900
1901 // Next, now that we know what the last MBB the LLVM BB expanded is, update
1902 // PHI nodes in successors.
1903 for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e; ++i) {
1904 MachineInstrBuilder PHI(*MF, FuncInfo->PHINodesToUpdate[i].first);
1905 assert(PHI->isPHI() &&
1906 "This is not a machine PHI node that we are updating!");
1907 if (!FuncInfo->MBB->isSuccessor(MBB: PHI->getParent()))
1908 continue;
1909 PHI.addReg(RegNo: FuncInfo->PHINodesToUpdate[i].second).addMBB(MBB: FuncInfo->MBB);
1910 }
1911
1912 // Handle stack protector.
1913 if (SDB->SPDescriptor.shouldEmitFunctionBasedCheckStackProtector()) {
1914 // The target provides a guard check function. There is no need to
1915 // generate error handling code or to split current basic block.
1916 MachineBasicBlock *ParentMBB = SDB->SPDescriptor.getParentMBB();
1917
1918 // Add load and check to the basicblock.
1919 FuncInfo->MBB = ParentMBB;
1920 FuncInfo->InsertPt =
1921 findSplitPointForStackProtector(BB: ParentMBB, TII: *TII);
1922 SDB->visitSPDescriptorParent(SPD&: SDB->SPDescriptor, ParentBB: ParentMBB);
1923 CurDAG->setRoot(SDB->getRoot());
1924 SDB->clear();
1925 CodeGenAndEmitDAG();
1926
1927 // Clear the Per-BB State.
1928 SDB->SPDescriptor.resetPerBBState();
1929 } else if (SDB->SPDescriptor.shouldEmitStackProtector()) {
1930 MachineBasicBlock *ParentMBB = SDB->SPDescriptor.getParentMBB();
1931 MachineBasicBlock *SuccessMBB = SDB->SPDescriptor.getSuccessMBB();
1932
1933 // Find the split point to split the parent mbb. At the same time copy all
1934 // physical registers used in the tail of parent mbb into virtual registers
1935 // before the split point and back into physical registers after the split
1936 // point. This prevents us needing to deal with Live-ins and many other
1937 // register allocation issues caused by us splitting the parent mbb. The
1938 // register allocator will clean up said virtual copies later on.
1939 MachineBasicBlock::iterator SplitPoint =
1940 findSplitPointForStackProtector(BB: ParentMBB, TII: *TII);
1941
1942 // Splice the terminator of ParentMBB into SuccessMBB.
1943 SuccessMBB->splice(Where: SuccessMBB->end(), Other: ParentMBB,
1944 From: SplitPoint,
1945 To: ParentMBB->end());
1946
1947 // Add compare/jump on neq/jump to the parent BB.
1948 FuncInfo->MBB = ParentMBB;
1949 FuncInfo->InsertPt = ParentMBB->end();
1950 SDB->visitSPDescriptorParent(SPD&: SDB->SPDescriptor, ParentBB: ParentMBB);
1951 CurDAG->setRoot(SDB->getRoot());
1952 SDB->clear();
1953 CodeGenAndEmitDAG();
1954
1955 // CodeGen Failure MBB if we have not codegened it yet.
1956 MachineBasicBlock *FailureMBB = SDB->SPDescriptor.getFailureMBB();
1957 if (FailureMBB->empty()) {
1958 FuncInfo->MBB = FailureMBB;
1959 FuncInfo->InsertPt = FailureMBB->end();
1960 SDB->visitSPDescriptorFailure(SPD&: SDB->SPDescriptor);
1961 CurDAG->setRoot(SDB->getRoot());
1962 SDB->clear();
1963 CodeGenAndEmitDAG();
1964 }
1965
1966 // Clear the Per-BB State.
1967 SDB->SPDescriptor.resetPerBBState();
1968 }
1969
1970 // Lower each BitTestBlock.
1971 for (auto &BTB : SDB->SL->BitTestCases) {
1972 // Lower header first, if it wasn't already lowered
1973 if (!BTB.Emitted) {
1974 // Set the current basic block to the mbb we wish to insert the code into
1975 FuncInfo->MBB = BTB.Parent;
1976 FuncInfo->InsertPt = FuncInfo->MBB->end();
1977 // Emit the code
1978 SDB->visitBitTestHeader(B&: BTB, SwitchBB: FuncInfo->MBB);
1979 CurDAG->setRoot(SDB->getRoot());
1980 SDB->clear();
1981 CodeGenAndEmitDAG();
1982 }
1983
1984 BranchProbability UnhandledProb = BTB.Prob;
1985 for (unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) {
1986 UnhandledProb -= BTB.Cases[j].ExtraProb;
1987 // Set the current basic block to the mbb we wish to insert the code into
1988 FuncInfo->MBB = BTB.Cases[j].ThisBB;
1989 FuncInfo->InsertPt = FuncInfo->MBB->end();
1990 // Emit the code
1991
1992 // If all cases cover a contiguous range, it is not necessary to jump to
1993 // the default block after the last bit test fails. This is because the
1994 // range check during bit test header creation has guaranteed that every
1995 // case here doesn't go outside the range. In this case, there is no need
1996 // to perform the last bit test, as it will always be true. Instead, make
1997 // the second-to-last bit-test fall through to the target of the last bit
1998 // test, and delete the last bit test.
1999
2000 MachineBasicBlock *NextMBB;
2001 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
2002 // Second-to-last bit-test with contiguous range or omitted range
2003 // check: fall through to the target of the final bit test.
2004 NextMBB = BTB.Cases[j + 1].TargetBB;
2005 } else if (j + 1 == ej) {
2006 // For the last bit test, fall through to Default.
2007 NextMBB = BTB.Default;
2008 } else {
2009 // Otherwise, fall through to the next bit test.
2010 NextMBB = BTB.Cases[j + 1].ThisBB;
2011 }
2012
2013 SDB->visitBitTestCase(BB&: BTB, NextMBB, BranchProbToNext: UnhandledProb, Reg: BTB.Reg, B&: BTB.Cases[j],
2014 SwitchBB: FuncInfo->MBB);
2015
2016 CurDAG->setRoot(SDB->getRoot());
2017 SDB->clear();
2018 CodeGenAndEmitDAG();
2019
2020 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
2021 // Since we're not going to use the final bit test, remove it.
2022 BTB.Cases.pop_back();
2023 break;
2024 }
2025 }
2026
2027 // Update PHI Nodes
2028 for (const std::pair<MachineInstr *, unsigned> &P :
2029 FuncInfo->PHINodesToUpdate) {
2030 MachineInstrBuilder PHI(*MF, P.first);
2031 MachineBasicBlock *PHIBB = PHI->getParent();
2032 assert(PHI->isPHI() &&
2033 "This is not a machine PHI node that we are updating!");
2034 // This is "default" BB. We have two jumps to it. From "header" BB and
2035 // from last "case" BB, unless the latter was skipped.
2036 if (PHIBB == BTB.Default) {
2037 PHI.addReg(RegNo: P.second).addMBB(MBB: BTB.Parent);
2038 if (!BTB.ContiguousRange) {
2039 PHI.addReg(RegNo: P.second).addMBB(MBB: BTB.Cases.back().ThisBB);
2040 }
2041 }
2042 // One of "cases" BB.
2043 for (const SwitchCG::BitTestCase &BT : BTB.Cases) {
2044 MachineBasicBlock* cBB = BT.ThisBB;
2045 if (cBB->isSuccessor(MBB: PHIBB))
2046 PHI.addReg(RegNo: P.second).addMBB(MBB: cBB);
2047 }
2048 }
2049 }
2050 SDB->SL->BitTestCases.clear();
2051
2052 // If the JumpTable record is filled in, then we need to emit a jump table.
2053 // Updating the PHI nodes is tricky in this case, since we need to determine
2054 // whether the PHI is a successor of the range check MBB or the jump table MBB
2055 for (unsigned i = 0, e = SDB->SL->JTCases.size(); i != e; ++i) {
2056 // Lower header first, if it wasn't already lowered
2057 if (!SDB->SL->JTCases[i].first.Emitted) {
2058 // Set the current basic block to the mbb we wish to insert the code into
2059 FuncInfo->MBB = SDB->SL->JTCases[i].first.HeaderBB;
2060 FuncInfo->InsertPt = FuncInfo->MBB->end();
2061 // Emit the code
2062 SDB->visitJumpTableHeader(JT&: SDB->SL->JTCases[i].second,
2063 JTH&: SDB->SL->JTCases[i].first, SwitchBB: FuncInfo->MBB);
2064 CurDAG->setRoot(SDB->getRoot());
2065 SDB->clear();
2066 CodeGenAndEmitDAG();
2067 }
2068
2069 // Set the current basic block to the mbb we wish to insert the code into
2070 FuncInfo->MBB = SDB->SL->JTCases[i].second.MBB;
2071 FuncInfo->InsertPt = FuncInfo->MBB->end();
2072 // Emit the code
2073 SDB->visitJumpTable(JT&: SDB->SL->JTCases[i].second);
2074 CurDAG->setRoot(SDB->getRoot());
2075 SDB->clear();
2076 CodeGenAndEmitDAG();
2077
2078 // Update PHI Nodes
2079 for (unsigned pi = 0, pe = FuncInfo->PHINodesToUpdate.size();
2080 pi != pe; ++pi) {
2081 MachineInstrBuilder PHI(*MF, FuncInfo->PHINodesToUpdate[pi].first);
2082 MachineBasicBlock *PHIBB = PHI->getParent();
2083 assert(PHI->isPHI() &&
2084 "This is not a machine PHI node that we are updating!");
2085 // "default" BB. We can go there only from header BB.
2086 if (PHIBB == SDB->SL->JTCases[i].second.Default)
2087 PHI.addReg(RegNo: FuncInfo->PHINodesToUpdate[pi].second)
2088 .addMBB(MBB: SDB->SL->JTCases[i].first.HeaderBB);
2089 // JT BB. Just iterate over successors here
2090 if (FuncInfo->MBB->isSuccessor(MBB: PHIBB))
2091 PHI.addReg(RegNo: FuncInfo->PHINodesToUpdate[pi].second).addMBB(MBB: FuncInfo->MBB);
2092 }
2093 }
2094 SDB->SL->JTCases.clear();
2095
2096 // If we generated any switch lowering information, build and codegen any
2097 // additional DAGs necessary.
2098 for (unsigned i = 0, e = SDB->SL->SwitchCases.size(); i != e; ++i) {
2099 // Set the current basic block to the mbb we wish to insert the code into
2100 FuncInfo->MBB = SDB->SL->SwitchCases[i].ThisBB;
2101 FuncInfo->InsertPt = FuncInfo->MBB->end();
2102
2103 // Determine the unique successors.
2104 SmallVector<MachineBasicBlock *, 2> Succs;
2105 Succs.push_back(Elt: SDB->SL->SwitchCases[i].TrueBB);
2106 if (SDB->SL->SwitchCases[i].TrueBB != SDB->SL->SwitchCases[i].FalseBB)
2107 Succs.push_back(Elt: SDB->SL->SwitchCases[i].FalseBB);
2108
2109 // Emit the code. Note that this could result in FuncInfo->MBB being split.
2110 SDB->visitSwitchCase(CB&: SDB->SL->SwitchCases[i], SwitchBB: FuncInfo->MBB);
2111 CurDAG->setRoot(SDB->getRoot());
2112 SDB->clear();
2113 CodeGenAndEmitDAG();
2114
2115 // Remember the last block, now that any splitting is done, for use in
2116 // populating PHI nodes in successors.
2117 MachineBasicBlock *ThisBB = FuncInfo->MBB;
2118
2119 // Handle any PHI nodes in successors of this chunk, as if we were coming
2120 // from the original BB before switch expansion. Note that PHI nodes can
2121 // occur multiple times in PHINodesToUpdate. We have to be very careful to
2122 // handle them the right number of times.
2123 for (MachineBasicBlock *Succ : Succs) {
2124 FuncInfo->MBB = Succ;
2125 FuncInfo->InsertPt = FuncInfo->MBB->end();
2126 // FuncInfo->MBB may have been removed from the CFG if a branch was
2127 // constant folded.
2128 if (ThisBB->isSuccessor(MBB: FuncInfo->MBB)) {
2129 for (MachineBasicBlock::iterator
2130 MBBI = FuncInfo->MBB->begin(), MBBE = FuncInfo->MBB->end();
2131 MBBI != MBBE && MBBI->isPHI(); ++MBBI) {
2132 MachineInstrBuilder PHI(*MF, MBBI);
2133 // This value for this PHI node is recorded in PHINodesToUpdate.
2134 for (unsigned pn = 0; ; ++pn) {
2135 assert(pn != FuncInfo->PHINodesToUpdate.size() &&
2136 "Didn't find PHI entry!");
2137 if (FuncInfo->PHINodesToUpdate[pn].first == PHI) {
2138 PHI.addReg(RegNo: FuncInfo->PHINodesToUpdate[pn].second).addMBB(MBB: ThisBB);
2139 break;
2140 }
2141 }
2142 }
2143 }
2144 }
2145 }
2146 SDB->SL->SwitchCases.clear();
2147}
2148
2149/// Create the scheduler. If a specific scheduler was specified
2150/// via the SchedulerRegistry, use it, otherwise select the
2151/// one preferred by the target.
2152///
2153ScheduleDAGSDNodes *SelectionDAGISel::CreateScheduler() {
2154 return ISHeuristic(this, OptLevel);
2155}
2156
2157//===----------------------------------------------------------------------===//
2158// Helper functions used by the generated instruction selector.
2159//===----------------------------------------------------------------------===//
2160// Calls to these methods are generated by tblgen.
2161
2162/// CheckAndMask - The isel is trying to match something like (and X, 255). If
2163/// the dag combiner simplified the 255, we still want to match. RHS is the
2164/// actual value in the DAG on the RHS of an AND, and DesiredMaskS is the value
2165/// specified in the .td file (e.g. 255).
2166bool SelectionDAGISel::CheckAndMask(SDValue LHS, ConstantSDNode *RHS,
2167 int64_t DesiredMaskS) const {
2168 const APInt &ActualMask = RHS->getAPIntValue();
2169 const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS);
2170
2171 // If the actual mask exactly matches, success!
2172 if (ActualMask == DesiredMask)
2173 return true;
2174
2175 // If the actual AND mask is allowing unallowed bits, this doesn't match.
2176 if (!ActualMask.isSubsetOf(RHS: DesiredMask))
2177 return false;
2178
2179 // Otherwise, the DAG Combiner may have proven that the value coming in is
2180 // either already zero or is not demanded. Check for known zero input bits.
2181 APInt NeededMask = DesiredMask & ~ActualMask;
2182 if (CurDAG->MaskedValueIsZero(Op: LHS, Mask: NeededMask))
2183 return true;
2184
2185 // TODO: check to see if missing bits are just not demanded.
2186
2187 // Otherwise, this pattern doesn't match.
2188 return false;
2189}
2190
2191/// CheckOrMask - The isel is trying to match something like (or X, 255). If
2192/// the dag combiner simplified the 255, we still want to match. RHS is the
2193/// actual value in the DAG on the RHS of an OR, and DesiredMaskS is the value
2194/// specified in the .td file (e.g. 255).
2195bool SelectionDAGISel::CheckOrMask(SDValue LHS, ConstantSDNode *RHS,
2196 int64_t DesiredMaskS) const {
2197 const APInt &ActualMask = RHS->getAPIntValue();
2198 const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS);
2199
2200 // If the actual mask exactly matches, success!
2201 if (ActualMask == DesiredMask)
2202 return true;
2203
2204 // If the actual AND mask is allowing unallowed bits, this doesn't match.
2205 if (!ActualMask.isSubsetOf(RHS: DesiredMask))
2206 return false;
2207
2208 // Otherwise, the DAG Combiner may have proven that the value coming in is
2209 // either already zero or is not demanded. Check for known zero input bits.
2210 APInt NeededMask = DesiredMask & ~ActualMask;
2211 KnownBits Known = CurDAG->computeKnownBits(Op: LHS);
2212
2213 // If all the missing bits in the or are already known to be set, match!
2214 if (NeededMask.isSubsetOf(RHS: Known.One))
2215 return true;
2216
2217 // TODO: check to see if missing bits are just not demanded.
2218
2219 // Otherwise, this pattern doesn't match.
2220 return false;
2221}
2222
2223/// SelectInlineAsmMemoryOperands - Calls to this are automatically generated
2224/// by tblgen. Others should not call it.
2225void SelectionDAGISel::SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops,
2226 const SDLoc &DL) {
2227 // Change the vector of SDValue into a list of SDNodeHandle for x86 might call
2228 // replaceAllUses when matching address.
2229
2230 std::list<HandleSDNode> Handles;
2231
2232 Handles.emplace_back(args&: Ops[InlineAsm::Op_InputChain]); // 0
2233 Handles.emplace_back(args&: Ops[InlineAsm::Op_AsmString]); // 1
2234 Handles.emplace_back(args&: Ops[InlineAsm::Op_MDNode]); // 2, !srcloc
2235 Handles.emplace_back(
2236 args&: Ops[InlineAsm::Op_ExtraInfo]); // 3 (SideEffect, AlignStack)
2237
2238 unsigned i = InlineAsm::Op_FirstOperand, e = Ops.size();
2239 if (Ops[e - 1].getValueType() == MVT::Glue)
2240 --e; // Don't process a glue operand if it is here.
2241
2242 while (i != e) {
2243 InlineAsm::Flag Flags(Ops[i]->getAsZExtVal());
2244 if (!Flags.isMemKind() && !Flags.isFuncKind()) {
2245 // Just skip over this operand, copying the operands verbatim.
2246 Handles.insert(position: Handles.end(), first: Ops.begin() + i,
2247 last: Ops.begin() + i + Flags.getNumOperandRegisters() + 1);
2248 i += Flags.getNumOperandRegisters() + 1;
2249 } else {
2250 assert(Flags.getNumOperandRegisters() == 1 &&
2251 "Memory operand with multiple values?");
2252
2253 unsigned TiedToOperand;
2254 if (Flags.isUseOperandTiedToDef(Idx&: TiedToOperand)) {
2255 // We need the constraint ID from the operand this is tied to.
2256 unsigned CurOp = InlineAsm::Op_FirstOperand;
2257 Flags = InlineAsm::Flag(Ops[CurOp]->getAsZExtVal());
2258 for (; TiedToOperand; --TiedToOperand) {
2259 CurOp += Flags.getNumOperandRegisters() + 1;
2260 Flags = InlineAsm::Flag(Ops[CurOp]->getAsZExtVal());
2261 }
2262 }
2263
2264 // Otherwise, this is a memory operand. Ask the target to select it.
2265 std::vector<SDValue> SelOps;
2266 const InlineAsm::ConstraintCode ConstraintID =
2267 Flags.getMemoryConstraintID();
2268 if (SelectInlineAsmMemoryOperand(Op: Ops[i + 1], ConstraintID, OutOps&: SelOps))
2269 report_fatal_error(reason: "Could not match memory address. Inline asm"
2270 " failure!");
2271
2272 // Add this to the output node.
2273 Flags = InlineAsm::Flag(Flags.isMemKind() ? InlineAsm::Kind::Mem
2274 : InlineAsm::Kind::Func,
2275 SelOps.size());
2276 Flags.setMemConstraint(ConstraintID);
2277 Handles.emplace_back(args: CurDAG->getTargetConstant(Val: Flags, DL, VT: MVT::i32));
2278 Handles.insert(position: Handles.end(), first: SelOps.begin(), last: SelOps.end());
2279 i += 2;
2280 }
2281 }
2282
2283 // Add the glue input back if present.
2284 if (e != Ops.size())
2285 Handles.emplace_back(args&: Ops.back());
2286
2287 Ops.clear();
2288 for (auto &handle : Handles)
2289 Ops.push_back(x: handle.getValue());
2290}
2291
2292/// findGlueUse - Return use of MVT::Glue value produced by the specified
2293/// SDNode.
2294///
2295static SDNode *findGlueUse(SDNode *N) {
2296 unsigned FlagResNo = N->getNumValues()-1;
2297 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
2298 SDUse &Use = I.getUse();
2299 if (Use.getResNo() == FlagResNo)
2300 return Use.getUser();
2301 }
2302 return nullptr;
2303}
2304
2305/// findNonImmUse - Return true if "Def" is a predecessor of "Root" via a path
2306/// beyond "ImmedUse". We may ignore chains as they are checked separately.
2307static bool findNonImmUse(SDNode *Root, SDNode *Def, SDNode *ImmedUse,
2308 bool IgnoreChains) {
2309 SmallPtrSet<const SDNode *, 16> Visited;
2310 SmallVector<const SDNode *, 16> WorkList;
2311 // Only check if we have non-immediate uses of Def.
2312 if (ImmedUse->isOnlyUserOf(N: Def))
2313 return false;
2314
2315 // We don't care about paths to Def that go through ImmedUse so mark it
2316 // visited and mark non-def operands as used.
2317 Visited.insert(Ptr: ImmedUse);
2318 for (const SDValue &Op : ImmedUse->op_values()) {
2319 SDNode *N = Op.getNode();
2320 // Ignore chain deps (they are validated by
2321 // HandleMergeInputChains) and immediate uses
2322 if ((Op.getValueType() == MVT::Other && IgnoreChains) || N == Def)
2323 continue;
2324 if (!Visited.insert(Ptr: N).second)
2325 continue;
2326 WorkList.push_back(Elt: N);
2327 }
2328
2329 // Initialize worklist to operands of Root.
2330 if (Root != ImmedUse) {
2331 for (const SDValue &Op : Root->op_values()) {
2332 SDNode *N = Op.getNode();
2333 // Ignore chains (they are validated by HandleMergeInputChains)
2334 if ((Op.getValueType() == MVT::Other && IgnoreChains) || N == Def)
2335 continue;
2336 if (!Visited.insert(Ptr: N).second)
2337 continue;
2338 WorkList.push_back(Elt: N);
2339 }
2340 }
2341
2342 return SDNode::hasPredecessorHelper(N: Def, Visited, Worklist&: WorkList, MaxSteps: 0, TopologicalPrune: true);
2343}
2344
2345/// IsProfitableToFold - Returns true if it's profitable to fold the specific
2346/// operand node N of U during instruction selection that starts at Root.
2347bool SelectionDAGISel::IsProfitableToFold(SDValue N, SDNode *U,
2348 SDNode *Root) const {
2349 if (OptLevel == CodeGenOptLevel::None)
2350 return false;
2351 return N.hasOneUse();
2352}
2353
2354/// IsLegalToFold - Returns true if the specific operand node N of
2355/// U can be folded during instruction selection that starts at Root.
2356bool SelectionDAGISel::IsLegalToFold(SDValue N, SDNode *U, SDNode *Root,
2357 CodeGenOptLevel OptLevel,
2358 bool IgnoreChains) {
2359 if (OptLevel == CodeGenOptLevel::None)
2360 return false;
2361
2362 // If Root use can somehow reach N through a path that doesn't contain
2363 // U then folding N would create a cycle. e.g. In the following
2364 // diagram, Root can reach N through X. If N is folded into Root, then
2365 // X is both a predecessor and a successor of U.
2366 //
2367 // [N*] //
2368 // ^ ^ //
2369 // / \ //
2370 // [U*] [X]? //
2371 // ^ ^ //
2372 // \ / //
2373 // \ / //
2374 // [Root*] //
2375 //
2376 // * indicates nodes to be folded together.
2377 //
2378 // If Root produces glue, then it gets (even more) interesting. Since it
2379 // will be "glued" together with its glue use in the scheduler, we need to
2380 // check if it might reach N.
2381 //
2382 // [N*] //
2383 // ^ ^ //
2384 // / \ //
2385 // [U*] [X]? //
2386 // ^ ^ //
2387 // \ \ //
2388 // \ | //
2389 // [Root*] | //
2390 // ^ | //
2391 // f | //
2392 // | / //
2393 // [Y] / //
2394 // ^ / //
2395 // f / //
2396 // | / //
2397 // [GU] //
2398 //
2399 // If GU (glue use) indirectly reaches N (the load), and Root folds N
2400 // (call it Fold), then X is a predecessor of GU and a successor of
2401 // Fold. But since Fold and GU are glued together, this will create
2402 // a cycle in the scheduling graph.
2403
2404 // If the node has glue, walk down the graph to the "lowest" node in the
2405 // glueged set.
2406 EVT VT = Root->getValueType(ResNo: Root->getNumValues()-1);
2407 while (VT == MVT::Glue) {
2408 SDNode *GU = findGlueUse(N: Root);
2409 if (!GU)
2410 break;
2411 Root = GU;
2412 VT = Root->getValueType(ResNo: Root->getNumValues()-1);
2413
2414 // If our query node has a glue result with a use, we've walked up it. If
2415 // the user (which has already been selected) has a chain or indirectly uses
2416 // the chain, HandleMergeInputChains will not consider it. Because of
2417 // this, we cannot ignore chains in this predicate.
2418 IgnoreChains = false;
2419 }
2420
2421 return !findNonImmUse(Root, Def: N.getNode(), ImmedUse: U, IgnoreChains);
2422}
2423
2424void SelectionDAGISel::Select_INLINEASM(SDNode *N) {
2425 SDLoc DL(N);
2426
2427 std::vector<SDValue> Ops(N->op_begin(), N->op_end());
2428 SelectInlineAsmMemoryOperands(Ops, DL);
2429
2430 const EVT VTs[] = {MVT::Other, MVT::Glue};
2431 SDValue New = CurDAG->getNode(Opcode: N->getOpcode(), DL, ResultTys: VTs, Ops);
2432 New->setNodeId(-1);
2433 ReplaceUses(F: N, T: New.getNode());
2434 CurDAG->RemoveDeadNode(N);
2435}
2436
2437void SelectionDAGISel::Select_READ_REGISTER(SDNode *Op) {
2438 SDLoc dl(Op);
2439 MDNodeSDNode *MD = cast<MDNodeSDNode>(Val: Op->getOperand(Num: 1));
2440 const MDString *RegStr = cast<MDString>(Val: MD->getMD()->getOperand(I: 0));
2441
2442 EVT VT = Op->getValueType(ResNo: 0);
2443 LLT Ty = VT.isSimple() ? getLLTForMVT(Ty: VT.getSimpleVT()) : LLT();
2444 Register Reg =
2445 TLI->getRegisterByName(RegName: RegStr->getString().data(), Ty,
2446 MF: CurDAG->getMachineFunction());
2447 SDValue New = CurDAG->getCopyFromReg(
2448 Chain: Op->getOperand(Num: 0), dl, Reg, VT: Op->getValueType(ResNo: 0));
2449 New->setNodeId(-1);
2450 ReplaceUses(F: Op, T: New.getNode());
2451 CurDAG->RemoveDeadNode(N: Op);
2452}
2453
2454void SelectionDAGISel::Select_WRITE_REGISTER(SDNode *Op) {
2455 SDLoc dl(Op);
2456 MDNodeSDNode *MD = cast<MDNodeSDNode>(Val: Op->getOperand(Num: 1));
2457 const MDString *RegStr = cast<MDString>(Val: MD->getMD()->getOperand(I: 0));
2458
2459 EVT VT = Op->getOperand(Num: 2).getValueType();
2460 LLT Ty = VT.isSimple() ? getLLTForMVT(Ty: VT.getSimpleVT()) : LLT();
2461
2462 Register Reg = TLI->getRegisterByName(RegName: RegStr->getString().data(), Ty,
2463 MF: CurDAG->getMachineFunction());
2464 SDValue New = CurDAG->getCopyToReg(
2465 Chain: Op->getOperand(Num: 0), dl, Reg, N: Op->getOperand(Num: 2));
2466 New->setNodeId(-1);
2467 ReplaceUses(F: Op, T: New.getNode());
2468 CurDAG->RemoveDeadNode(N: Op);
2469}
2470
2471void SelectionDAGISel::Select_UNDEF(SDNode *N) {
2472 CurDAG->SelectNodeTo(N, MachineOpc: TargetOpcode::IMPLICIT_DEF, VT: N->getValueType(ResNo: 0));
2473}
2474
2475void SelectionDAGISel::Select_FREEZE(SDNode *N) {
2476 // TODO: We don't have FREEZE pseudo-instruction in MachineInstr-level now.
2477 // If FREEZE instruction is added later, the code below must be changed as
2478 // well.
2479 CurDAG->SelectNodeTo(N, MachineOpc: TargetOpcode::COPY, VT: N->getValueType(ResNo: 0),
2480 Op1: N->getOperand(Num: 0));
2481}
2482
2483void SelectionDAGISel::Select_ARITH_FENCE(SDNode *N) {
2484 CurDAG->SelectNodeTo(N, MachineOpc: TargetOpcode::ARITH_FENCE, VT: N->getValueType(ResNo: 0),
2485 Op1: N->getOperand(Num: 0));
2486}
2487
2488void SelectionDAGISel::Select_MEMBARRIER(SDNode *N) {
2489 CurDAG->SelectNodeTo(N, MachineOpc: TargetOpcode::MEMBARRIER, VT: N->getValueType(ResNo: 0),
2490 Op1: N->getOperand(Num: 0));
2491}
2492
2493void SelectionDAGISel::Select_CONVERGENCECTRL_ANCHOR(SDNode *N) {
2494 CurDAG->SelectNodeTo(N, MachineOpc: TargetOpcode::CONVERGENCECTRL_ANCHOR,
2495 VT: N->getValueType(ResNo: 0));
2496}
2497
2498void SelectionDAGISel::Select_CONVERGENCECTRL_ENTRY(SDNode *N) {
2499 CurDAG->SelectNodeTo(N, MachineOpc: TargetOpcode::CONVERGENCECTRL_ENTRY,
2500 VT: N->getValueType(ResNo: 0));
2501}
2502
2503void SelectionDAGISel::Select_CONVERGENCECTRL_LOOP(SDNode *N) {
2504 CurDAG->SelectNodeTo(N, MachineOpc: TargetOpcode::CONVERGENCECTRL_LOOP,
2505 VT: N->getValueType(ResNo: 0), Op1: N->getOperand(Num: 0));
2506}
2507
2508void SelectionDAGISel::pushStackMapLiveVariable(SmallVectorImpl<SDValue> &Ops,
2509 SDValue OpVal, SDLoc DL) {
2510 SDNode *OpNode = OpVal.getNode();
2511
2512 // FrameIndex nodes should have been directly emitted to TargetFrameIndex
2513 // nodes at DAG-construction time.
2514 assert(OpNode->getOpcode() != ISD::FrameIndex);
2515
2516 if (OpNode->getOpcode() == ISD::Constant) {
2517 Ops.push_back(
2518 Elt: CurDAG->getTargetConstant(Val: StackMaps::ConstantOp, DL, VT: MVT::i64));
2519 Ops.push_back(Elt: CurDAG->getTargetConstant(Val: OpNode->getAsZExtVal(), DL,
2520 VT: OpVal.getValueType()));
2521 } else {
2522 Ops.push_back(Elt: OpVal);
2523 }
2524}
2525
2526void SelectionDAGISel::Select_STACKMAP(SDNode *N) {
2527 SmallVector<SDValue, 32> Ops;
2528 auto *It = N->op_begin();
2529 SDLoc DL(N);
2530
2531 // Stash the chain and glue operands so we can move them to the end.
2532 SDValue Chain = *It++;
2533 SDValue InGlue = *It++;
2534
2535 // <id> operand.
2536 SDValue ID = *It++;
2537 assert(ID.getValueType() == MVT::i64);
2538 Ops.push_back(Elt: ID);
2539
2540 // <numShadowBytes> operand.
2541 SDValue Shad = *It++;
2542 assert(Shad.getValueType() == MVT::i32);
2543 Ops.push_back(Elt: Shad);
2544
2545 // Live variable operands.
2546 for (; It != N->op_end(); It++)
2547 pushStackMapLiveVariable(Ops, OpVal: *It, DL);
2548
2549 Ops.push_back(Elt: Chain);
2550 Ops.push_back(Elt: InGlue);
2551
2552 SDVTList NodeTys = CurDAG->getVTList(VT1: MVT::Other, VT2: MVT::Glue);
2553 CurDAG->SelectNodeTo(N, MachineOpc: TargetOpcode::STACKMAP, VTs: NodeTys, Ops);
2554}
2555
2556void SelectionDAGISel::Select_PATCHPOINT(SDNode *N) {
2557 SmallVector<SDValue, 32> Ops;
2558 auto *It = N->op_begin();
2559 SDLoc DL(N);
2560
2561 // Cache arguments that will be moved to the end in the target node.
2562 SDValue Chain = *It++;
2563 std::optional<SDValue> Glue;
2564 if (It->getValueType() == MVT::Glue)
2565 Glue = *It++;
2566 SDValue RegMask = *It++;
2567
2568 // <id> operand.
2569 SDValue ID = *It++;
2570 assert(ID.getValueType() == MVT::i64);
2571 Ops.push_back(Elt: ID);
2572
2573 // <numShadowBytes> operand.
2574 SDValue Shad = *It++;
2575 assert(Shad.getValueType() == MVT::i32);
2576 Ops.push_back(Elt: Shad);
2577
2578 // Add the callee.
2579 Ops.push_back(Elt: *It++);
2580
2581 // Add <numArgs>.
2582 SDValue NumArgs = *It++;
2583 assert(NumArgs.getValueType() == MVT::i32);
2584 Ops.push_back(Elt: NumArgs);
2585
2586 // Calling convention.
2587 Ops.push_back(Elt: *It++);
2588
2589 // Push the args for the call.
2590 for (uint64_t I = NumArgs->getAsZExtVal(); I != 0; I--)
2591 Ops.push_back(Elt: *It++);
2592
2593 // Now push the live variables.
2594 for (; It != N->op_end(); It++)
2595 pushStackMapLiveVariable(Ops, OpVal: *It, DL);
2596
2597 // Finally, the regmask, chain and (if present) glue are moved to the end.
2598 Ops.push_back(Elt: RegMask);
2599 Ops.push_back(Elt: Chain);
2600 if (Glue.has_value())
2601 Ops.push_back(Elt: *Glue);
2602
2603 SDVTList NodeTys = N->getVTList();
2604 CurDAG->SelectNodeTo(N, MachineOpc: TargetOpcode::PATCHPOINT, VTs: NodeTys, Ops);
2605}
2606
2607/// GetVBR - decode a vbr encoding whose top bit is set.
2608LLVM_ATTRIBUTE_ALWAYS_INLINE static uint64_t
2609GetVBR(uint64_t Val, const unsigned char *MatcherTable, unsigned &Idx) {
2610 assert(Val >= 128 && "Not a VBR");
2611 Val &= 127; // Remove first vbr bit.
2612
2613 unsigned Shift = 7;
2614 uint64_t NextBits;
2615 do {
2616 NextBits = MatcherTable[Idx++];
2617 Val |= (NextBits&127) << Shift;
2618 Shift += 7;
2619 } while (NextBits & 128);
2620
2621 return Val;
2622}
2623
2624void SelectionDAGISel::Select_JUMP_TABLE_DEBUG_INFO(SDNode *N) {
2625 SDLoc dl(N);
2626 CurDAG->SelectNodeTo(N, MachineOpc: TargetOpcode::JUMP_TABLE_DEBUG_INFO, VT: MVT::Glue,
2627 Op1: CurDAG->getTargetConstant(Val: N->getConstantOperandVal(Num: 1),
2628 DL: dl, VT: MVT::i64, isOpaque: true));
2629}
2630
2631/// When a match is complete, this method updates uses of interior chain results
2632/// to use the new results.
2633void SelectionDAGISel::UpdateChains(
2634 SDNode *NodeToMatch, SDValue InputChain,
2635 SmallVectorImpl<SDNode *> &ChainNodesMatched, bool isMorphNodeTo) {
2636 SmallVector<SDNode*, 4> NowDeadNodes;
2637
2638 // Now that all the normal results are replaced, we replace the chain and
2639 // glue results if present.
2640 if (!ChainNodesMatched.empty()) {
2641 assert(InputChain.getNode() &&
2642 "Matched input chains but didn't produce a chain");
2643 // Loop over all of the nodes we matched that produced a chain result.
2644 // Replace all the chain results with the final chain we ended up with.
2645 for (unsigned i = 0, e = ChainNodesMatched.size(); i != e; ++i) {
2646 SDNode *ChainNode = ChainNodesMatched[i];
2647 // If ChainNode is null, it's because we replaced it on a previous
2648 // iteration and we cleared it out of the map. Just skip it.
2649 if (!ChainNode)
2650 continue;
2651
2652 assert(ChainNode->getOpcode() != ISD::DELETED_NODE &&
2653 "Deleted node left in chain");
2654
2655 // Don't replace the results of the root node if we're doing a
2656 // MorphNodeTo.
2657 if (ChainNode == NodeToMatch && isMorphNodeTo)
2658 continue;
2659
2660 SDValue ChainVal = SDValue(ChainNode, ChainNode->getNumValues()-1);
2661 if (ChainVal.getValueType() == MVT::Glue)
2662 ChainVal = ChainVal.getValue(R: ChainVal->getNumValues()-2);
2663 assert(ChainVal.getValueType() == MVT::Other && "Not a chain?");
2664 SelectionDAG::DAGNodeDeletedListener NDL(
2665 *CurDAG, [&](SDNode *N, SDNode *E) {
2666 std::replace(first: ChainNodesMatched.begin(), last: ChainNodesMatched.end(), old_value: N,
2667 new_value: static_cast<SDNode *>(nullptr));
2668 });
2669 if (ChainNode->getOpcode() != ISD::TokenFactor)
2670 ReplaceUses(F: ChainVal, T: InputChain);
2671
2672 // If the node became dead and we haven't already seen it, delete it.
2673 if (ChainNode != NodeToMatch && ChainNode->use_empty() &&
2674 !llvm::is_contained(Range&: NowDeadNodes, Element: ChainNode))
2675 NowDeadNodes.push_back(Elt: ChainNode);
2676 }
2677 }
2678
2679 if (!NowDeadNodes.empty())
2680 CurDAG->RemoveDeadNodes(DeadNodes&: NowDeadNodes);
2681
2682 LLVM_DEBUG(dbgs() << "ISEL: Match complete!\n");
2683}
2684
2685/// HandleMergeInputChains - This implements the OPC_EmitMergeInputChains
2686/// operation for when the pattern matched at least one node with a chains. The
2687/// input vector contains a list of all of the chained nodes that we match. We
2688/// must determine if this is a valid thing to cover (i.e. matching it won't
2689/// induce cycles in the DAG) and if so, creating a TokenFactor node. that will
2690/// be used as the input node chain for the generated nodes.
2691static SDValue
2692HandleMergeInputChains(SmallVectorImpl<SDNode*> &ChainNodesMatched,
2693 SelectionDAG *CurDAG) {
2694
2695 SmallPtrSet<const SDNode *, 16> Visited;
2696 SmallVector<const SDNode *, 8> Worklist;
2697 SmallVector<SDValue, 3> InputChains;
2698 unsigned int Max = 8192;
2699
2700 // Quick exit on trivial merge.
2701 if (ChainNodesMatched.size() == 1)
2702 return ChainNodesMatched[0]->getOperand(Num: 0);
2703
2704 // Add chains that aren't already added (internal). Peek through
2705 // token factors.
2706 std::function<void(const SDValue)> AddChains = [&](const SDValue V) {
2707 if (V.getValueType() != MVT::Other)
2708 return;
2709 if (V->getOpcode() == ISD::EntryToken)
2710 return;
2711 if (!Visited.insert(Ptr: V.getNode()).second)
2712 return;
2713 if (V->getOpcode() == ISD::TokenFactor) {
2714 for (const SDValue &Op : V->op_values())
2715 AddChains(Op);
2716 } else
2717 InputChains.push_back(Elt: V);
2718 };
2719
2720 for (auto *N : ChainNodesMatched) {
2721 Worklist.push_back(Elt: N);
2722 Visited.insert(Ptr: N);
2723 }
2724
2725 while (!Worklist.empty())
2726 AddChains(Worklist.pop_back_val()->getOperand(Num: 0));
2727
2728 // Skip the search if there are no chain dependencies.
2729 if (InputChains.size() == 0)
2730 return CurDAG->getEntryNode();
2731
2732 // If one of these chains is a successor of input, we must have a
2733 // node that is both the predecessor and successor of the
2734 // to-be-merged nodes. Fail.
2735 Visited.clear();
2736 for (SDValue V : InputChains)
2737 Worklist.push_back(Elt: V.getNode());
2738
2739 for (auto *N : ChainNodesMatched)
2740 if (SDNode::hasPredecessorHelper(N, Visited, Worklist, MaxSteps: Max, TopologicalPrune: true))
2741 return SDValue();
2742
2743 // Return merged chain.
2744 if (InputChains.size() == 1)
2745 return InputChains[0];
2746 return CurDAG->getNode(Opcode: ISD::TokenFactor, DL: SDLoc(ChainNodesMatched[0]),
2747 VT: MVT::Other, Ops: InputChains);
2748}
2749
2750/// MorphNode - Handle morphing a node in place for the selector.
2751SDNode *SelectionDAGISel::
2752MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTList,
2753 ArrayRef<SDValue> Ops, unsigned EmitNodeInfo) {
2754 // It is possible we're using MorphNodeTo to replace a node with no
2755 // normal results with one that has a normal result (or we could be
2756 // adding a chain) and the input could have glue and chains as well.
2757 // In this case we need to shift the operands down.
2758 // FIXME: This is a horrible hack and broken in obscure cases, no worse
2759 // than the old isel though.
2760 int OldGlueResultNo = -1, OldChainResultNo = -1;
2761
2762 unsigned NTMNumResults = Node->getNumValues();
2763 if (Node->getValueType(ResNo: NTMNumResults-1) == MVT::Glue) {
2764 OldGlueResultNo = NTMNumResults-1;
2765 if (NTMNumResults != 1 &&
2766 Node->getValueType(ResNo: NTMNumResults-2) == MVT::Other)
2767 OldChainResultNo = NTMNumResults-2;
2768 } else if (Node->getValueType(ResNo: NTMNumResults-1) == MVT::Other)
2769 OldChainResultNo = NTMNumResults-1;
2770
2771 // Call the underlying SelectionDAG routine to do the transmogrification. Note
2772 // that this deletes operands of the old node that become dead.
2773 SDNode *Res = CurDAG->MorphNodeTo(N: Node, Opc: ~TargetOpc, VTs: VTList, Ops);
2774
2775 // MorphNodeTo can operate in two ways: if an existing node with the
2776 // specified operands exists, it can just return it. Otherwise, it
2777 // updates the node in place to have the requested operands.
2778 if (Res == Node) {
2779 // If we updated the node in place, reset the node ID. To the isel,
2780 // this should be just like a newly allocated machine node.
2781 Res->setNodeId(-1);
2782 }
2783
2784 unsigned ResNumResults = Res->getNumValues();
2785 // Move the glue if needed.
2786 if ((EmitNodeInfo & OPFL_GlueOutput) && OldGlueResultNo != -1 &&
2787 static_cast<unsigned>(OldGlueResultNo) != ResNumResults - 1)
2788 ReplaceUses(F: SDValue(Node, OldGlueResultNo),
2789 T: SDValue(Res, ResNumResults - 1));
2790
2791 if ((EmitNodeInfo & OPFL_GlueOutput) != 0)
2792 --ResNumResults;
2793
2794 // Move the chain reference if needed.
2795 if ((EmitNodeInfo & OPFL_Chain) && OldChainResultNo != -1 &&
2796 static_cast<unsigned>(OldChainResultNo) != ResNumResults - 1)
2797 ReplaceUses(F: SDValue(Node, OldChainResultNo),
2798 T: SDValue(Res, ResNumResults - 1));
2799
2800 // Otherwise, no replacement happened because the node already exists. Replace
2801 // Uses of the old node with the new one.
2802 if (Res != Node) {
2803 ReplaceNode(F: Node, T: Res);
2804 } else {
2805 EnforceNodeIdInvariant(Node: Res);
2806 }
2807
2808 return Res;
2809}
2810
2811/// CheckSame - Implements OP_CheckSame.
2812LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2813CheckSame(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N,
2814 const SmallVectorImpl<std::pair<SDValue, SDNode *>> &RecordedNodes) {
2815 // Accept if it is exactly the same as a previously recorded node.
2816 unsigned RecNo = MatcherTable[MatcherIndex++];
2817 assert(RecNo < RecordedNodes.size() && "Invalid CheckSame");
2818 return N == RecordedNodes[RecNo].first;
2819}
2820
2821/// CheckChildSame - Implements OP_CheckChildXSame.
2822LLVM_ATTRIBUTE_ALWAYS_INLINE static bool CheckChildSame(
2823 const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N,
2824 const SmallVectorImpl<std::pair<SDValue, SDNode *>> &RecordedNodes,
2825 unsigned ChildNo) {
2826 if (ChildNo >= N.getNumOperands())
2827 return false; // Match fails if out of range child #.
2828 return ::CheckSame(MatcherTable, MatcherIndex, N: N.getOperand(i: ChildNo),
2829 RecordedNodes);
2830}
2831
2832/// CheckPatternPredicate - Implements OP_CheckPatternPredicate.
2833LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2834CheckPatternPredicate(unsigned Opcode, const unsigned char *MatcherTable,
2835 unsigned &MatcherIndex, const SelectionDAGISel &SDISel) {
2836 bool TwoBytePredNo =
2837 Opcode == SelectionDAGISel::OPC_CheckPatternPredicateTwoByte;
2838 unsigned PredNo =
2839 TwoBytePredNo || Opcode == SelectionDAGISel::OPC_CheckPatternPredicate
2840 ? MatcherTable[MatcherIndex++]
2841 : Opcode - SelectionDAGISel::OPC_CheckPatternPredicate0;
2842 if (TwoBytePredNo)
2843 PredNo |= MatcherTable[MatcherIndex++] << 8;
2844 return SDISel.CheckPatternPredicate(PredNo);
2845}
2846
2847/// CheckNodePredicate - Implements OP_CheckNodePredicate.
2848LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2849CheckNodePredicate(unsigned Opcode, const unsigned char *MatcherTable,
2850 unsigned &MatcherIndex, const SelectionDAGISel &SDISel,
2851 SDNode *N) {
2852 unsigned PredNo = Opcode == SelectionDAGISel::OPC_CheckPredicate
2853 ? MatcherTable[MatcherIndex++]
2854 : Opcode - SelectionDAGISel::OPC_CheckPredicate0;
2855 return SDISel.CheckNodePredicate(N, PredNo);
2856}
2857
2858LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2859CheckOpcode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2860 SDNode *N) {
2861 uint16_t Opc = MatcherTable[MatcherIndex++];
2862 Opc |= static_cast<uint16_t>(MatcherTable[MatcherIndex++]) << 8;
2863 return N->getOpcode() == Opc;
2864}
2865
2866LLVM_ATTRIBUTE_ALWAYS_INLINE static bool CheckType(MVT::SimpleValueType VT,
2867 SDValue N,
2868 const TargetLowering *TLI,
2869 const DataLayout &DL) {
2870 if (N.getValueType() == VT)
2871 return true;
2872
2873 // Handle the case when VT is iPTR.
2874 return VT == MVT::iPTR && N.getValueType() == TLI->getPointerTy(DL);
2875}
2876
2877LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2878CheckChildType(MVT::SimpleValueType VT, SDValue N, const TargetLowering *TLI,
2879 const DataLayout &DL, unsigned ChildNo) {
2880 if (ChildNo >= N.getNumOperands())
2881 return false; // Match fails if out of range child #.
2882 return ::CheckType(VT, N: N.getOperand(i: ChildNo), TLI, DL);
2883}
2884
2885LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2886CheckCondCode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2887 SDValue N) {
2888 return cast<CondCodeSDNode>(Val&: N)->get() ==
2889 static_cast<ISD::CondCode>(MatcherTable[MatcherIndex++]);
2890}
2891
2892LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2893CheckChild2CondCode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2894 SDValue N) {
2895 if (2 >= N.getNumOperands())
2896 return false;
2897 return ::CheckCondCode(MatcherTable, MatcherIndex, N: N.getOperand(i: 2));
2898}
2899
2900LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2901CheckValueType(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2902 SDValue N, const TargetLowering *TLI, const DataLayout &DL) {
2903 MVT::SimpleValueType VT =
2904 static_cast<MVT::SimpleValueType>(MatcherTable[MatcherIndex++]);
2905 if (cast<VTSDNode>(Val&: N)->getVT() == VT)
2906 return true;
2907
2908 // Handle the case when VT is iPTR.
2909 return VT == MVT::iPTR && cast<VTSDNode>(Val&: N)->getVT() == TLI->getPointerTy(DL);
2910}
2911
2912// Bit 0 stores the sign of the immediate. The upper bits contain the magnitude
2913// shifted left by 1.
2914static uint64_t decodeSignRotatedValue(uint64_t V) {
2915 if ((V & 1) == 0)
2916 return V >> 1;
2917 if (V != 1)
2918 return -(V >> 1);
2919 // There is no such thing as -0 with integers. "-0" really means MININT.
2920 return 1ULL << 63;
2921}
2922
2923LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2924CheckInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2925 SDValue N) {
2926 int64_t Val = MatcherTable[MatcherIndex++];
2927 if (Val & 128)
2928 Val = GetVBR(Val, MatcherTable, Idx&: MatcherIndex);
2929
2930 Val = decodeSignRotatedValue(V: Val);
2931
2932 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: N);
2933 return C && C->getAPIntValue().trySExtValue() == Val;
2934}
2935
2936LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2937CheckChildInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2938 SDValue N, unsigned ChildNo) {
2939 if (ChildNo >= N.getNumOperands())
2940 return false; // Match fails if out of range child #.
2941 return ::CheckInteger(MatcherTable, MatcherIndex, N: N.getOperand(i: ChildNo));
2942}
2943
2944LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2945CheckAndImm(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2946 SDValue N, const SelectionDAGISel &SDISel) {
2947 int64_t Val = MatcherTable[MatcherIndex++];
2948 if (Val & 128)
2949 Val = GetVBR(Val, MatcherTable, Idx&: MatcherIndex);
2950
2951 if (N->getOpcode() != ISD::AND) return false;
2952
2953 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 1));
2954 return C && SDISel.CheckAndMask(LHS: N.getOperand(i: 0), RHS: C, DesiredMaskS: Val);
2955}
2956
2957LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2958CheckOrImm(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N,
2959 const SelectionDAGISel &SDISel) {
2960 int64_t Val = MatcherTable[MatcherIndex++];
2961 if (Val & 128)
2962 Val = GetVBR(Val, MatcherTable, Idx&: MatcherIndex);
2963
2964 if (N->getOpcode() != ISD::OR) return false;
2965
2966 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 1));
2967 return C && SDISel.CheckOrMask(LHS: N.getOperand(i: 0), RHS: C, DesiredMaskS: Val);
2968}
2969
2970/// IsPredicateKnownToFail - If we know how and can do so without pushing a
2971/// scope, evaluate the current node. If the current predicate is known to
2972/// fail, set Result=true and return anything. If the current predicate is
2973/// known to pass, set Result=false and return the MatcherIndex to continue
2974/// with. If the current predicate is unknown, set Result=false and return the
2975/// MatcherIndex to continue with.
2976static unsigned IsPredicateKnownToFail(const unsigned char *Table,
2977 unsigned Index, SDValue N,
2978 bool &Result,
2979 const SelectionDAGISel &SDISel,
2980 SmallVectorImpl<std::pair<SDValue, SDNode*>> &RecordedNodes) {
2981 unsigned Opcode = Table[Index++];
2982 switch (Opcode) {
2983 default:
2984 Result = false;
2985 return Index-1; // Could not evaluate this predicate.
2986 case SelectionDAGISel::OPC_CheckSame:
2987 Result = !::CheckSame(MatcherTable: Table, MatcherIndex&: Index, N, RecordedNodes);
2988 return Index;
2989 case SelectionDAGISel::OPC_CheckChild0Same:
2990 case SelectionDAGISel::OPC_CheckChild1Same:
2991 case SelectionDAGISel::OPC_CheckChild2Same:
2992 case SelectionDAGISel::OPC_CheckChild3Same:
2993 Result = !::CheckChildSame(MatcherTable: Table, MatcherIndex&: Index, N, RecordedNodes,
2994 ChildNo: Table[Index-1] - SelectionDAGISel::OPC_CheckChild0Same);
2995 return Index;
2996 case SelectionDAGISel::OPC_CheckPatternPredicate:
2997 case SelectionDAGISel::OPC_CheckPatternPredicate0:
2998 case SelectionDAGISel::OPC_CheckPatternPredicate1:
2999 case SelectionDAGISel::OPC_CheckPatternPredicate2:
3000 case SelectionDAGISel::OPC_CheckPatternPredicate3:
3001 case SelectionDAGISel::OPC_CheckPatternPredicate4:
3002 case SelectionDAGISel::OPC_CheckPatternPredicate5:
3003 case SelectionDAGISel::OPC_CheckPatternPredicate6:
3004 case SelectionDAGISel::OPC_CheckPatternPredicate7:
3005 case SelectionDAGISel::OPC_CheckPatternPredicateTwoByte:
3006 Result = !::CheckPatternPredicate(Opcode, MatcherTable: Table, MatcherIndex&: Index, SDISel);
3007 return Index;
3008 case SelectionDAGISel::OPC_CheckPredicate:
3009 case SelectionDAGISel::OPC_CheckPredicate0:
3010 case SelectionDAGISel::OPC_CheckPredicate1:
3011 case SelectionDAGISel::OPC_CheckPredicate2:
3012 case SelectionDAGISel::OPC_CheckPredicate3:
3013 case SelectionDAGISel::OPC_CheckPredicate4:
3014 case SelectionDAGISel::OPC_CheckPredicate5:
3015 case SelectionDAGISel::OPC_CheckPredicate6:
3016 case SelectionDAGISel::OPC_CheckPredicate7:
3017 Result = !::CheckNodePredicate(Opcode, MatcherTable: Table, MatcherIndex&: Index, SDISel, N: N.getNode());
3018 return Index;
3019 case SelectionDAGISel::OPC_CheckOpcode:
3020 Result = !::CheckOpcode(MatcherTable: Table, MatcherIndex&: Index, N: N.getNode());
3021 return Index;
3022 case SelectionDAGISel::OPC_CheckType:
3023 case SelectionDAGISel::OPC_CheckTypeI32:
3024 case SelectionDAGISel::OPC_CheckTypeI64: {
3025 MVT::SimpleValueType VT;
3026 switch (Opcode) {
3027 case SelectionDAGISel::OPC_CheckTypeI32:
3028 VT = MVT::i32;
3029 break;
3030 case SelectionDAGISel::OPC_CheckTypeI64:
3031 VT = MVT::i64;
3032 break;
3033 default:
3034 VT = static_cast<MVT::SimpleValueType>(Table[Index++]);
3035 break;
3036 }
3037 Result = !::CheckType(VT, N, TLI: SDISel.TLI, DL: SDISel.CurDAG->getDataLayout());
3038 return Index;
3039 }
3040 case SelectionDAGISel::OPC_CheckTypeRes: {
3041 unsigned Res = Table[Index++];
3042 Result = !::CheckType(VT: static_cast<MVT::SimpleValueType>(Table[Index++]),
3043 N: N.getValue(R: Res), TLI: SDISel.TLI,
3044 DL: SDISel.CurDAG->getDataLayout());
3045 return Index;
3046 }
3047 case SelectionDAGISel::OPC_CheckChild0Type:
3048 case SelectionDAGISel::OPC_CheckChild1Type:
3049 case SelectionDAGISel::OPC_CheckChild2Type:
3050 case SelectionDAGISel::OPC_CheckChild3Type:
3051 case SelectionDAGISel::OPC_CheckChild4Type:
3052 case SelectionDAGISel::OPC_CheckChild5Type:
3053 case SelectionDAGISel::OPC_CheckChild6Type:
3054 case SelectionDAGISel::OPC_CheckChild7Type:
3055 case SelectionDAGISel::OPC_CheckChild0TypeI32:
3056 case SelectionDAGISel::OPC_CheckChild1TypeI32:
3057 case SelectionDAGISel::OPC_CheckChild2TypeI32:
3058 case SelectionDAGISel::OPC_CheckChild3TypeI32:
3059 case SelectionDAGISel::OPC_CheckChild4TypeI32:
3060 case SelectionDAGISel::OPC_CheckChild5TypeI32:
3061 case SelectionDAGISel::OPC_CheckChild6TypeI32:
3062 case SelectionDAGISel::OPC_CheckChild7TypeI32:
3063 case SelectionDAGISel::OPC_CheckChild0TypeI64:
3064 case SelectionDAGISel::OPC_CheckChild1TypeI64:
3065 case SelectionDAGISel::OPC_CheckChild2TypeI64:
3066 case SelectionDAGISel::OPC_CheckChild3TypeI64:
3067 case SelectionDAGISel::OPC_CheckChild4TypeI64:
3068 case SelectionDAGISel::OPC_CheckChild5TypeI64:
3069 case SelectionDAGISel::OPC_CheckChild6TypeI64:
3070 case SelectionDAGISel::OPC_CheckChild7TypeI64: {
3071 MVT::SimpleValueType VT;
3072 unsigned ChildNo;
3073 if (Opcode >= SelectionDAGISel::OPC_CheckChild0TypeI32 &&
3074 Opcode <= SelectionDAGISel::OPC_CheckChild7TypeI32) {
3075 VT = MVT::i32;
3076 ChildNo = Opcode - SelectionDAGISel::OPC_CheckChild0TypeI32;
3077 } else if (Opcode >= SelectionDAGISel::OPC_CheckChild0TypeI64 &&
3078 Opcode <= SelectionDAGISel::OPC_CheckChild7TypeI64) {
3079 VT = MVT::i64;
3080 ChildNo = Opcode - SelectionDAGISel::OPC_CheckChild0TypeI64;
3081 } else {
3082 VT = static_cast<MVT::SimpleValueType>(Table[Index++]);
3083 ChildNo = Opcode - SelectionDAGISel::OPC_CheckChild0Type;
3084 }
3085 Result = !::CheckChildType(VT, N, TLI: SDISel.TLI,
3086 DL: SDISel.CurDAG->getDataLayout(), ChildNo);
3087 return Index;
3088 }
3089 case SelectionDAGISel::OPC_CheckCondCode:
3090 Result = !::CheckCondCode(MatcherTable: Table, MatcherIndex&: Index, N);
3091 return Index;
3092 case SelectionDAGISel::OPC_CheckChild2CondCode:
3093 Result = !::CheckChild2CondCode(MatcherTable: Table, MatcherIndex&: Index, N);
3094 return Index;
3095 case SelectionDAGISel::OPC_CheckValueType:
3096 Result = !::CheckValueType(MatcherTable: Table, MatcherIndex&: Index, N, TLI: SDISel.TLI,
3097 DL: SDISel.CurDAG->getDataLayout());
3098 return Index;
3099 case SelectionDAGISel::OPC_CheckInteger:
3100 Result = !::CheckInteger(MatcherTable: Table, MatcherIndex&: Index, N);
3101 return Index;
3102 case SelectionDAGISel::OPC_CheckChild0Integer:
3103 case SelectionDAGISel::OPC_CheckChild1Integer:
3104 case SelectionDAGISel::OPC_CheckChild2Integer:
3105 case SelectionDAGISel::OPC_CheckChild3Integer:
3106 case SelectionDAGISel::OPC_CheckChild4Integer:
3107 Result = !::CheckChildInteger(MatcherTable: Table, MatcherIndex&: Index, N,
3108 ChildNo: Table[Index-1] - SelectionDAGISel::OPC_CheckChild0Integer);
3109 return Index;
3110 case SelectionDAGISel::OPC_CheckAndImm:
3111 Result = !::CheckAndImm(MatcherTable: Table, MatcherIndex&: Index, N, SDISel);
3112 return Index;
3113 case SelectionDAGISel::OPC_CheckOrImm:
3114 Result = !::CheckOrImm(MatcherTable: Table, MatcherIndex&: Index, N, SDISel);
3115 return Index;
3116 }
3117}
3118
3119namespace {
3120
3121struct MatchScope {
3122 /// FailIndex - If this match fails, this is the index to continue with.
3123 unsigned FailIndex;
3124
3125 /// NodeStack - The node stack when the scope was formed.
3126 SmallVector<SDValue, 4> NodeStack;
3127
3128 /// NumRecordedNodes - The number of recorded nodes when the scope was formed.
3129 unsigned NumRecordedNodes;
3130
3131 /// NumMatchedMemRefs - The number of matched memref entries.
3132 unsigned NumMatchedMemRefs;
3133
3134 /// InputChain/InputGlue - The current chain/glue
3135 SDValue InputChain, InputGlue;
3136
3137 /// HasChainNodesMatched - True if the ChainNodesMatched list is non-empty.
3138 bool HasChainNodesMatched;
3139};
3140
3141/// \A DAG update listener to keep the matching state
3142/// (i.e. RecordedNodes and MatchScope) uptodate if the target is allowed to
3143/// change the DAG while matching. X86 addressing mode matcher is an example
3144/// for this.
3145class MatchStateUpdater : public SelectionDAG::DAGUpdateListener
3146{
3147 SDNode **NodeToMatch;
3148 SmallVectorImpl<std::pair<SDValue, SDNode *>> &RecordedNodes;
3149 SmallVectorImpl<MatchScope> &MatchScopes;
3150
3151public:
3152 MatchStateUpdater(SelectionDAG &DAG, SDNode **NodeToMatch,
3153 SmallVectorImpl<std::pair<SDValue, SDNode *>> &RN,
3154 SmallVectorImpl<MatchScope> &MS)
3155 : SelectionDAG::DAGUpdateListener(DAG), NodeToMatch(NodeToMatch),
3156 RecordedNodes(RN), MatchScopes(MS) {}
3157
3158 void NodeDeleted(SDNode *N, SDNode *E) override {
3159 // Some early-returns here to avoid the search if we deleted the node or
3160 // if the update comes from MorphNodeTo (MorphNodeTo is the last thing we
3161 // do, so it's unnecessary to update matching state at that point).
3162 // Neither of these can occur currently because we only install this
3163 // update listener during matching a complex patterns.
3164 if (!E || E->isMachineOpcode())
3165 return;
3166 // Check if NodeToMatch was updated.
3167 if (N == *NodeToMatch)
3168 *NodeToMatch = E;
3169 // Performing linear search here does not matter because we almost never
3170 // run this code. You'd have to have a CSE during complex pattern
3171 // matching.
3172 for (auto &I : RecordedNodes)
3173 if (I.first.getNode() == N)
3174 I.first.setNode(E);
3175
3176 for (auto &I : MatchScopes)
3177 for (auto &J : I.NodeStack)
3178 if (J.getNode() == N)
3179 J.setNode(E);
3180 }
3181};
3182
3183} // end anonymous namespace
3184
3185void SelectionDAGISel::SelectCodeCommon(SDNode *NodeToMatch,
3186 const unsigned char *MatcherTable,
3187 unsigned TableSize) {
3188 // FIXME: Should these even be selected? Handle these cases in the caller?
3189 switch (NodeToMatch->getOpcode()) {
3190 default:
3191 break;
3192 case ISD::EntryToken: // These nodes remain the same.
3193 case ISD::BasicBlock:
3194 case ISD::Register:
3195 case ISD::RegisterMask:
3196 case ISD::HANDLENODE:
3197 case ISD::MDNODE_SDNODE:
3198 case ISD::TargetConstant:
3199 case ISD::TargetConstantFP:
3200 case ISD::TargetConstantPool:
3201 case ISD::TargetFrameIndex:
3202 case ISD::TargetExternalSymbol:
3203 case ISD::MCSymbol:
3204 case ISD::TargetBlockAddress:
3205 case ISD::TargetJumpTable:
3206 case ISD::TargetGlobalTLSAddress:
3207 case ISD::TargetGlobalAddress:
3208 case ISD::TokenFactor:
3209 case ISD::CopyFromReg:
3210 case ISD::CopyToReg:
3211 case ISD::EH_LABEL:
3212 case ISD::ANNOTATION_LABEL:
3213 case ISD::LIFETIME_START:
3214 case ISD::LIFETIME_END:
3215 case ISD::PSEUDO_PROBE:
3216 NodeToMatch->setNodeId(-1); // Mark selected.
3217 return;
3218 case ISD::AssertSext:
3219 case ISD::AssertZext:
3220 case ISD::AssertAlign:
3221 ReplaceUses(F: SDValue(NodeToMatch, 0), T: NodeToMatch->getOperand(Num: 0));
3222 CurDAG->RemoveDeadNode(N: NodeToMatch);
3223 return;
3224 case ISD::INLINEASM:
3225 case ISD::INLINEASM_BR:
3226 Select_INLINEASM(N: NodeToMatch);
3227 return;
3228 case ISD::READ_REGISTER:
3229 Select_READ_REGISTER(Op: NodeToMatch);
3230 return;
3231 case ISD::WRITE_REGISTER:
3232 Select_WRITE_REGISTER(Op: NodeToMatch);
3233 return;
3234 case ISD::UNDEF:
3235 Select_UNDEF(N: NodeToMatch);
3236 return;
3237 case ISD::FREEZE:
3238 Select_FREEZE(N: NodeToMatch);
3239 return;
3240 case ISD::ARITH_FENCE:
3241 Select_ARITH_FENCE(N: NodeToMatch);
3242 return;
3243 case ISD::MEMBARRIER:
3244 Select_MEMBARRIER(N: NodeToMatch);
3245 return;
3246 case ISD::STACKMAP:
3247 Select_STACKMAP(N: NodeToMatch);
3248 return;
3249 case ISD::PATCHPOINT:
3250 Select_PATCHPOINT(N: NodeToMatch);
3251 return;
3252 case ISD::JUMP_TABLE_DEBUG_INFO:
3253 Select_JUMP_TABLE_DEBUG_INFO(N: NodeToMatch);
3254 return;
3255 case ISD::CONVERGENCECTRL_ANCHOR:
3256 Select_CONVERGENCECTRL_ANCHOR(N: NodeToMatch);
3257 return;
3258 case ISD::CONVERGENCECTRL_ENTRY:
3259 Select_CONVERGENCECTRL_ENTRY(N: NodeToMatch);
3260 return;
3261 case ISD::CONVERGENCECTRL_LOOP:
3262 Select_CONVERGENCECTRL_LOOP(N: NodeToMatch);
3263 return;
3264 }
3265
3266 assert(!NodeToMatch->isMachineOpcode() && "Node already selected!");
3267
3268 // Set up the node stack with NodeToMatch as the only node on the stack.
3269 SmallVector<SDValue, 8> NodeStack;
3270 SDValue N = SDValue(NodeToMatch, 0);
3271 NodeStack.push_back(Elt: N);
3272
3273 // MatchScopes - Scopes used when matching, if a match failure happens, this
3274 // indicates where to continue checking.
3275 SmallVector<MatchScope, 8> MatchScopes;
3276
3277 // RecordedNodes - This is the set of nodes that have been recorded by the
3278 // state machine. The second value is the parent of the node, or null if the
3279 // root is recorded.
3280 SmallVector<std::pair<SDValue, SDNode*>, 8> RecordedNodes;
3281
3282 // MatchedMemRefs - This is the set of MemRef's we've seen in the input
3283 // pattern.
3284 SmallVector<MachineMemOperand*, 2> MatchedMemRefs;
3285
3286 // These are the current input chain and glue for use when generating nodes.
3287 // Various Emit operations change these. For example, emitting a copytoreg
3288 // uses and updates these.
3289 SDValue InputChain, InputGlue;
3290
3291 // ChainNodesMatched - If a pattern matches nodes that have input/output
3292 // chains, the OPC_EmitMergeInputChains operation is emitted which indicates
3293 // which ones they are. The result is captured into this list so that we can
3294 // update the chain results when the pattern is complete.
3295 SmallVector<SDNode*, 3> ChainNodesMatched;
3296
3297 LLVM_DEBUG(dbgs() << "ISEL: Starting pattern match\n");
3298
3299 // Determine where to start the interpreter. Normally we start at opcode #0,
3300 // but if the state machine starts with an OPC_SwitchOpcode, then we
3301 // accelerate the first lookup (which is guaranteed to be hot) with the
3302 // OpcodeOffset table.
3303 unsigned MatcherIndex = 0;
3304
3305 if (!OpcodeOffset.empty()) {
3306 // Already computed the OpcodeOffset table, just index into it.
3307 if (N.getOpcode() < OpcodeOffset.size())
3308 MatcherIndex = OpcodeOffset[N.getOpcode()];
3309 LLVM_DEBUG(dbgs() << " Initial Opcode index to " << MatcherIndex << "\n");
3310
3311 } else if (MatcherTable[0] == OPC_SwitchOpcode) {
3312 // Otherwise, the table isn't computed, but the state machine does start
3313 // with an OPC_SwitchOpcode instruction. Populate the table now, since this
3314 // is the first time we're selecting an instruction.
3315 unsigned Idx = 1;
3316 while (true) {
3317 // Get the size of this case.
3318 unsigned CaseSize = MatcherTable[Idx++];
3319 if (CaseSize & 128)
3320 CaseSize = GetVBR(Val: CaseSize, MatcherTable, Idx);
3321 if (CaseSize == 0) break;
3322
3323 // Get the opcode, add the index to the table.
3324 uint16_t Opc = MatcherTable[Idx++];
3325 Opc |= static_cast<uint16_t>(MatcherTable[Idx++]) << 8;
3326 if (Opc >= OpcodeOffset.size())
3327 OpcodeOffset.resize(new_size: (Opc+1)*2);
3328 OpcodeOffset[Opc] = Idx;
3329 Idx += CaseSize;
3330 }
3331
3332 // Okay, do the lookup for the first opcode.
3333 if (N.getOpcode() < OpcodeOffset.size())
3334 MatcherIndex = OpcodeOffset[N.getOpcode()];
3335 }
3336
3337 while (true) {
3338 assert(MatcherIndex < TableSize && "Invalid index");
3339#ifndef NDEBUG
3340 unsigned CurrentOpcodeIndex = MatcherIndex;
3341#endif
3342 BuiltinOpcodes Opcode =
3343 static_cast<BuiltinOpcodes>(MatcherTable[MatcherIndex++]);
3344 switch (Opcode) {
3345 case OPC_Scope: {
3346 // Okay, the semantics of this operation are that we should push a scope
3347 // then evaluate the first child. However, pushing a scope only to have
3348 // the first check fail (which then pops it) is inefficient. If we can
3349 // determine immediately that the first check (or first several) will
3350 // immediately fail, don't even bother pushing a scope for them.
3351 unsigned FailIndex;
3352
3353 while (true) {
3354 unsigned NumToSkip = MatcherTable[MatcherIndex++];
3355 if (NumToSkip & 128)
3356 NumToSkip = GetVBR(Val: NumToSkip, MatcherTable, Idx&: MatcherIndex);
3357 // Found the end of the scope with no match.
3358 if (NumToSkip == 0) {
3359 FailIndex = 0;
3360 break;
3361 }
3362
3363 FailIndex = MatcherIndex+NumToSkip;
3364
3365 unsigned MatcherIndexOfPredicate = MatcherIndex;
3366 (void)MatcherIndexOfPredicate; // silence warning.
3367
3368 // If we can't evaluate this predicate without pushing a scope (e.g. if
3369 // it is a 'MoveParent') or if the predicate succeeds on this node, we
3370 // push the scope and evaluate the full predicate chain.
3371 bool Result;
3372 MatcherIndex = IsPredicateKnownToFail(Table: MatcherTable, Index: MatcherIndex, N,
3373 Result, SDISel: *this, RecordedNodes);
3374 if (!Result)
3375 break;
3376
3377 LLVM_DEBUG(
3378 dbgs() << " Skipped scope entry (due to false predicate) at "
3379 << "index " << MatcherIndexOfPredicate << ", continuing at "
3380 << FailIndex << "\n");
3381 ++NumDAGIselRetries;
3382
3383 // Otherwise, we know that this case of the Scope is guaranteed to fail,
3384 // move to the next case.
3385 MatcherIndex = FailIndex;
3386 }
3387
3388 // If the whole scope failed to match, bail.
3389 if (FailIndex == 0) break;
3390
3391 // Push a MatchScope which indicates where to go if the first child fails
3392 // to match.
3393 MatchScope NewEntry;
3394 NewEntry.FailIndex = FailIndex;
3395 NewEntry.NodeStack.append(in_start: NodeStack.begin(), in_end: NodeStack.end());
3396 NewEntry.NumRecordedNodes = RecordedNodes.size();
3397 NewEntry.NumMatchedMemRefs = MatchedMemRefs.size();
3398 NewEntry.InputChain = InputChain;
3399 NewEntry.InputGlue = InputGlue;
3400 NewEntry.HasChainNodesMatched = !ChainNodesMatched.empty();
3401 MatchScopes.push_back(Elt: NewEntry);
3402 continue;
3403 }
3404 case OPC_RecordNode: {
3405 // Remember this node, it may end up being an operand in the pattern.
3406 SDNode *Parent = nullptr;
3407 if (NodeStack.size() > 1)
3408 Parent = NodeStack[NodeStack.size()-2].getNode();
3409 RecordedNodes.push_back(Elt: std::make_pair(x&: N, y&: Parent));
3410 continue;
3411 }
3412
3413 case OPC_RecordChild0: case OPC_RecordChild1:
3414 case OPC_RecordChild2: case OPC_RecordChild3:
3415 case OPC_RecordChild4: case OPC_RecordChild5:
3416 case OPC_RecordChild6: case OPC_RecordChild7: {
3417 unsigned ChildNo = Opcode-OPC_RecordChild0;
3418 if (ChildNo >= N.getNumOperands())
3419 break; // Match fails if out of range child #.
3420
3421 RecordedNodes.push_back(Elt: std::make_pair(x: N->getOperand(Num: ChildNo),
3422 y: N.getNode()));
3423 continue;
3424 }
3425 case OPC_RecordMemRef:
3426 if (auto *MN = dyn_cast<MemSDNode>(Val&: N))
3427 MatchedMemRefs.push_back(Elt: MN->getMemOperand());
3428 else {
3429 LLVM_DEBUG(dbgs() << "Expected MemSDNode "; N->dump(CurDAG);
3430 dbgs() << '\n');
3431 }
3432
3433 continue;
3434
3435 case OPC_CaptureGlueInput:
3436 // If the current node has an input glue, capture it in InputGlue.
3437 if (N->getNumOperands() != 0 &&
3438 N->getOperand(Num: N->getNumOperands()-1).getValueType() == MVT::Glue)
3439 InputGlue = N->getOperand(Num: N->getNumOperands()-1);
3440 continue;
3441
3442 case OPC_MoveChild: {
3443 unsigned ChildNo = MatcherTable[MatcherIndex++];
3444 if (ChildNo >= N.getNumOperands())
3445 break; // Match fails if out of range child #.
3446 N = N.getOperand(i: ChildNo);
3447 NodeStack.push_back(Elt: N);
3448 continue;
3449 }
3450
3451 case OPC_MoveChild0: case OPC_MoveChild1:
3452 case OPC_MoveChild2: case OPC_MoveChild3:
3453 case OPC_MoveChild4: case OPC_MoveChild5:
3454 case OPC_MoveChild6: case OPC_MoveChild7: {
3455 unsigned ChildNo = Opcode-OPC_MoveChild0;
3456 if (ChildNo >= N.getNumOperands())
3457 break; // Match fails if out of range child #.
3458 N = N.getOperand(i: ChildNo);
3459 NodeStack.push_back(Elt: N);
3460 continue;
3461 }
3462
3463 case OPC_MoveSibling:
3464 case OPC_MoveSibling0:
3465 case OPC_MoveSibling1:
3466 case OPC_MoveSibling2:
3467 case OPC_MoveSibling3:
3468 case OPC_MoveSibling4:
3469 case OPC_MoveSibling5:
3470 case OPC_MoveSibling6:
3471 case OPC_MoveSibling7: {
3472 // Pop the current node off the NodeStack.
3473 NodeStack.pop_back();
3474 assert(!NodeStack.empty() && "Node stack imbalance!");
3475 N = NodeStack.back();
3476
3477 unsigned SiblingNo = Opcode == OPC_MoveSibling
3478 ? MatcherTable[MatcherIndex++]
3479 : Opcode - OPC_MoveSibling0;
3480 if (SiblingNo >= N.getNumOperands())
3481 break; // Match fails if out of range sibling #.
3482 N = N.getOperand(i: SiblingNo);
3483 NodeStack.push_back(Elt: N);
3484 continue;
3485 }
3486 case OPC_MoveParent:
3487 // Pop the current node off the NodeStack.
3488 NodeStack.pop_back();
3489 assert(!NodeStack.empty() && "Node stack imbalance!");
3490 N = NodeStack.back();
3491 continue;
3492
3493 case OPC_CheckSame:
3494 if (!::CheckSame(MatcherTable, MatcherIndex, N, RecordedNodes)) break;
3495 continue;
3496
3497 case OPC_CheckChild0Same: case OPC_CheckChild1Same:
3498 case OPC_CheckChild2Same: case OPC_CheckChild3Same:
3499 if (!::CheckChildSame(MatcherTable, MatcherIndex, N, RecordedNodes,
3500 ChildNo: Opcode-OPC_CheckChild0Same))
3501 break;
3502 continue;
3503
3504 case OPC_CheckPatternPredicate:
3505 case OPC_CheckPatternPredicate0:
3506 case OPC_CheckPatternPredicate1:
3507 case OPC_CheckPatternPredicate2:
3508 case OPC_CheckPatternPredicate3:
3509 case OPC_CheckPatternPredicate4:
3510 case OPC_CheckPatternPredicate5:
3511 case OPC_CheckPatternPredicate6:
3512 case OPC_CheckPatternPredicate7:
3513 case OPC_CheckPatternPredicateTwoByte:
3514 if (!::CheckPatternPredicate(Opcode, MatcherTable, MatcherIndex, SDISel: *this))
3515 break;
3516 continue;
3517 case SelectionDAGISel::OPC_CheckPredicate0:
3518 case SelectionDAGISel::OPC_CheckPredicate1:
3519 case SelectionDAGISel::OPC_CheckPredicate2:
3520 case SelectionDAGISel::OPC_CheckPredicate3:
3521 case SelectionDAGISel::OPC_CheckPredicate4:
3522 case SelectionDAGISel::OPC_CheckPredicate5:
3523 case SelectionDAGISel::OPC_CheckPredicate6:
3524 case SelectionDAGISel::OPC_CheckPredicate7:
3525 case OPC_CheckPredicate:
3526 if (!::CheckNodePredicate(Opcode, MatcherTable, MatcherIndex, SDISel: *this,
3527 N: N.getNode()))
3528 break;
3529 continue;
3530 case OPC_CheckPredicateWithOperands: {
3531 unsigned OpNum = MatcherTable[MatcherIndex++];
3532 SmallVector<SDValue, 8> Operands;
3533
3534 for (unsigned i = 0; i < OpNum; ++i)
3535 Operands.push_back(Elt: RecordedNodes[MatcherTable[MatcherIndex++]].first);
3536
3537 unsigned PredNo = MatcherTable[MatcherIndex++];
3538 if (!CheckNodePredicateWithOperands(N: N.getNode(), PredNo, Operands))
3539 break;
3540 continue;
3541 }
3542 case OPC_CheckComplexPat:
3543 case OPC_CheckComplexPat0:
3544 case OPC_CheckComplexPat1:
3545 case OPC_CheckComplexPat2:
3546 case OPC_CheckComplexPat3:
3547 case OPC_CheckComplexPat4:
3548 case OPC_CheckComplexPat5:
3549 case OPC_CheckComplexPat6:
3550 case OPC_CheckComplexPat7: {
3551 unsigned CPNum = Opcode == OPC_CheckComplexPat
3552 ? MatcherTable[MatcherIndex++]
3553 : Opcode - OPC_CheckComplexPat0;
3554 unsigned RecNo = MatcherTable[MatcherIndex++];
3555 assert(RecNo < RecordedNodes.size() && "Invalid CheckComplexPat");
3556
3557 // If target can modify DAG during matching, keep the matching state
3558 // consistent.
3559 std::unique_ptr<MatchStateUpdater> MSU;
3560 if (ComplexPatternFuncMutatesDAG())
3561 MSU.reset(p: new MatchStateUpdater(*CurDAG, &NodeToMatch, RecordedNodes,
3562 MatchScopes));
3563
3564 if (!CheckComplexPattern(Root: NodeToMatch, Parent: RecordedNodes[RecNo].second,
3565 N: RecordedNodes[RecNo].first, PatternNo: CPNum,
3566 Result&: RecordedNodes))
3567 break;
3568 continue;
3569 }
3570 case OPC_CheckOpcode:
3571 if (!::CheckOpcode(MatcherTable, MatcherIndex, N: N.getNode())) break;
3572 continue;
3573
3574 case OPC_CheckType:
3575 case OPC_CheckTypeI32:
3576 case OPC_CheckTypeI64:
3577 MVT::SimpleValueType VT;
3578 switch (Opcode) {
3579 case OPC_CheckTypeI32:
3580 VT = MVT::i32;
3581 break;
3582 case OPC_CheckTypeI64:
3583 VT = MVT::i64;
3584 break;
3585 default:
3586 VT = static_cast<MVT::SimpleValueType>(MatcherTable[MatcherIndex++]);
3587 break;
3588 }
3589 if (!::CheckType(VT, N, TLI, DL: CurDAG->getDataLayout()))
3590 break;
3591 continue;
3592
3593 case OPC_CheckTypeRes: {
3594 unsigned Res = MatcherTable[MatcherIndex++];
3595 if (!::CheckType(
3596 VT: static_cast<MVT::SimpleValueType>(MatcherTable[MatcherIndex++]),
3597 N: N.getValue(R: Res), TLI, DL: CurDAG->getDataLayout()))
3598 break;
3599 continue;
3600 }
3601
3602 case OPC_SwitchOpcode: {
3603 unsigned CurNodeOpcode = N.getOpcode();
3604 unsigned SwitchStart = MatcherIndex-1; (void)SwitchStart;
3605 unsigned CaseSize;
3606 while (true) {
3607 // Get the size of this case.
3608 CaseSize = MatcherTable[MatcherIndex++];
3609 if (CaseSize & 128)
3610 CaseSize = GetVBR(Val: CaseSize, MatcherTable, Idx&: MatcherIndex);
3611 if (CaseSize == 0) break;
3612
3613 uint16_t Opc = MatcherTable[MatcherIndex++];
3614 Opc |= static_cast<uint16_t>(MatcherTable[MatcherIndex++]) << 8;
3615
3616 // If the opcode matches, then we will execute this case.
3617 if (CurNodeOpcode == Opc)
3618 break;
3619
3620 // Otherwise, skip over this case.
3621 MatcherIndex += CaseSize;
3622 }
3623
3624 // If no cases matched, bail out.
3625 if (CaseSize == 0) break;
3626
3627 // Otherwise, execute the case we found.
3628 LLVM_DEBUG(dbgs() << " OpcodeSwitch from " << SwitchStart << " to "
3629 << MatcherIndex << "\n");
3630 continue;
3631 }
3632
3633 case OPC_SwitchType: {
3634 MVT CurNodeVT = N.getSimpleValueType();
3635 unsigned SwitchStart = MatcherIndex-1; (void)SwitchStart;
3636 unsigned CaseSize;
3637 while (true) {
3638 // Get the size of this case.
3639 CaseSize = MatcherTable[MatcherIndex++];
3640 if (CaseSize & 128)
3641 CaseSize = GetVBR(Val: CaseSize, MatcherTable, Idx&: MatcherIndex);
3642 if (CaseSize == 0) break;
3643
3644 MVT CaseVT =
3645 static_cast<MVT::SimpleValueType>(MatcherTable[MatcherIndex++]);
3646 if (CaseVT == MVT::iPTR)
3647 CaseVT = TLI->getPointerTy(DL: CurDAG->getDataLayout());
3648
3649 // If the VT matches, then we will execute this case.
3650 if (CurNodeVT == CaseVT)
3651 break;
3652
3653 // Otherwise, skip over this case.
3654 MatcherIndex += CaseSize;
3655 }
3656
3657 // If no cases matched, bail out.
3658 if (CaseSize == 0) break;
3659
3660 // Otherwise, execute the case we found.
3661 LLVM_DEBUG(dbgs() << " TypeSwitch[" << CurNodeVT
3662 << "] from " << SwitchStart << " to " << MatcherIndex
3663 << '\n');
3664 continue;
3665 }
3666 case OPC_CheckChild0Type:
3667 case OPC_CheckChild1Type:
3668 case OPC_CheckChild2Type:
3669 case OPC_CheckChild3Type:
3670 case OPC_CheckChild4Type:
3671 case OPC_CheckChild5Type:
3672 case OPC_CheckChild6Type:
3673 case OPC_CheckChild7Type:
3674 case OPC_CheckChild0TypeI32:
3675 case OPC_CheckChild1TypeI32:
3676 case OPC_CheckChild2TypeI32:
3677 case OPC_CheckChild3TypeI32:
3678 case OPC_CheckChild4TypeI32:
3679 case OPC_CheckChild5TypeI32:
3680 case OPC_CheckChild6TypeI32:
3681 case OPC_CheckChild7TypeI32:
3682 case OPC_CheckChild0TypeI64:
3683 case OPC_CheckChild1TypeI64:
3684 case OPC_CheckChild2TypeI64:
3685 case OPC_CheckChild3TypeI64:
3686 case OPC_CheckChild4TypeI64:
3687 case OPC_CheckChild5TypeI64:
3688 case OPC_CheckChild6TypeI64:
3689 case OPC_CheckChild7TypeI64: {
3690 MVT::SimpleValueType VT;
3691 unsigned ChildNo;
3692 if (Opcode >= SelectionDAGISel::OPC_CheckChild0TypeI32 &&
3693 Opcode <= SelectionDAGISel::OPC_CheckChild7TypeI32) {
3694 VT = MVT::i32;
3695 ChildNo = Opcode - SelectionDAGISel::OPC_CheckChild0TypeI32;
3696 } else if (Opcode >= SelectionDAGISel::OPC_CheckChild0TypeI64 &&
3697 Opcode <= SelectionDAGISel::OPC_CheckChild7TypeI64) {
3698 VT = MVT::i64;
3699 ChildNo = Opcode - SelectionDAGISel::OPC_CheckChild0TypeI64;
3700 } else {
3701 VT = static_cast<MVT::SimpleValueType>(MatcherTable[MatcherIndex++]);
3702 ChildNo = Opcode - SelectionDAGISel::OPC_CheckChild0Type;
3703 }
3704 if (!::CheckChildType(VT, N, TLI, DL: CurDAG->getDataLayout(), ChildNo))
3705 break;
3706 continue;
3707 }
3708 case OPC_CheckCondCode:
3709 if (!::CheckCondCode(MatcherTable, MatcherIndex, N)) break;
3710 continue;
3711 case OPC_CheckChild2CondCode:
3712 if (!::CheckChild2CondCode(MatcherTable, MatcherIndex, N)) break;
3713 continue;
3714 case OPC_CheckValueType:
3715 if (!::CheckValueType(MatcherTable, MatcherIndex, N, TLI,
3716 DL: CurDAG->getDataLayout()))
3717 break;
3718 continue;
3719 case OPC_CheckInteger:
3720 if (!::CheckInteger(MatcherTable, MatcherIndex, N)) break;
3721 continue;
3722 case OPC_CheckChild0Integer: case OPC_CheckChild1Integer:
3723 case OPC_CheckChild2Integer: case OPC_CheckChild3Integer:
3724 case OPC_CheckChild4Integer:
3725 if (!::CheckChildInteger(MatcherTable, MatcherIndex, N,
3726 ChildNo: Opcode-OPC_CheckChild0Integer)) break;
3727 continue;
3728 case OPC_CheckAndImm:
3729 if (!::CheckAndImm(MatcherTable, MatcherIndex, N, SDISel: *this)) break;
3730 continue;
3731 case OPC_CheckOrImm:
3732 if (!::CheckOrImm(MatcherTable, MatcherIndex, N, SDISel: *this)) break;
3733 continue;
3734 case OPC_CheckImmAllOnesV:
3735 if (!ISD::isConstantSplatVectorAllOnes(N: N.getNode()))
3736 break;
3737 continue;
3738 case OPC_CheckImmAllZerosV:
3739 if (!ISD::isConstantSplatVectorAllZeros(N: N.getNode()))
3740 break;
3741 continue;
3742
3743 case OPC_CheckFoldableChainNode: {
3744 assert(NodeStack.size() != 1 && "No parent node");
3745 // Verify that all intermediate nodes between the root and this one have
3746 // a single use (ignoring chains, which are handled in UpdateChains).
3747 bool HasMultipleUses = false;
3748 for (unsigned i = 1, e = NodeStack.size()-1; i != e; ++i) {
3749 unsigned NNonChainUses = 0;
3750 SDNode *NS = NodeStack[i].getNode();
3751 for (auto UI = NS->use_begin(), UE = NS->use_end(); UI != UE; ++UI)
3752 if (UI.getUse().getValueType() != MVT::Other)
3753 if (++NNonChainUses > 1) {
3754 HasMultipleUses = true;
3755 break;
3756 }
3757 if (HasMultipleUses) break;
3758 }
3759 if (HasMultipleUses) break;
3760
3761 // Check to see that the target thinks this is profitable to fold and that
3762 // we can fold it without inducing cycles in the graph.
3763 if (!IsProfitableToFold(N, U: NodeStack[NodeStack.size()-2].getNode(),
3764 Root: NodeToMatch) ||
3765 !IsLegalToFold(N, U: NodeStack[NodeStack.size()-2].getNode(),
3766 Root: NodeToMatch, OptLevel,
3767 IgnoreChains: true/*We validate our own chains*/))
3768 break;
3769
3770 continue;
3771 }
3772 case OPC_EmitInteger:
3773 case OPC_EmitInteger8:
3774 case OPC_EmitInteger16:
3775 case OPC_EmitInteger32:
3776 case OPC_EmitInteger64:
3777 case OPC_EmitStringInteger:
3778 case OPC_EmitStringInteger32: {
3779 MVT::SimpleValueType VT;
3780 switch (Opcode) {
3781 case OPC_EmitInteger8:
3782 VT = MVT::i8;
3783 break;
3784 case OPC_EmitInteger16:
3785 VT = MVT::i16;
3786 break;
3787 case OPC_EmitInteger32:
3788 case OPC_EmitStringInteger32:
3789 VT = MVT::i32;
3790 break;
3791 case OPC_EmitInteger64:
3792 VT = MVT::i64;
3793 break;
3794 default:
3795 VT = static_cast<MVT::SimpleValueType>(MatcherTable[MatcherIndex++]);
3796 break;
3797 }
3798 int64_t Val = MatcherTable[MatcherIndex++];
3799 if (Val & 128)
3800 Val = GetVBR(Val, MatcherTable, Idx&: MatcherIndex);
3801 if (Opcode >= OPC_EmitInteger && Opcode <= OPC_EmitInteger64)
3802 Val = decodeSignRotatedValue(V: Val);
3803 RecordedNodes.push_back(Elt: std::pair<SDValue, SDNode *>(
3804 CurDAG->getTargetConstant(Val, DL: SDLoc(NodeToMatch), VT), nullptr));
3805 continue;
3806 }
3807 case OPC_EmitRegister:
3808 case OPC_EmitRegisterI32:
3809 case OPC_EmitRegisterI64: {
3810 MVT::SimpleValueType VT;
3811 switch (Opcode) {
3812 case OPC_EmitRegisterI32:
3813 VT = MVT::i32;
3814 break;
3815 case OPC_EmitRegisterI64:
3816 VT = MVT::i64;
3817 break;
3818 default:
3819 VT = static_cast<MVT::SimpleValueType>(MatcherTable[MatcherIndex++]);
3820 break;
3821 }
3822 unsigned RegNo = MatcherTable[MatcherIndex++];
3823 RecordedNodes.push_back(Elt: std::pair<SDValue, SDNode *>(
3824 CurDAG->getRegister(Reg: RegNo, VT), nullptr));
3825 continue;
3826 }
3827 case OPC_EmitRegister2: {
3828 // For targets w/ more than 256 register names, the register enum
3829 // values are stored in two bytes in the matcher table (just like
3830 // opcodes).
3831 MVT::SimpleValueType VT =
3832 static_cast<MVT::SimpleValueType>(MatcherTable[MatcherIndex++]);
3833 unsigned RegNo = MatcherTable[MatcherIndex++];
3834 RegNo |= MatcherTable[MatcherIndex++] << 8;
3835 RecordedNodes.push_back(Elt: std::pair<SDValue, SDNode*>(
3836 CurDAG->getRegister(Reg: RegNo, VT), nullptr));
3837 continue;
3838 }
3839
3840 case OPC_EmitConvertToTarget:
3841 case OPC_EmitConvertToTarget0:
3842 case OPC_EmitConvertToTarget1:
3843 case OPC_EmitConvertToTarget2:
3844 case OPC_EmitConvertToTarget3:
3845 case OPC_EmitConvertToTarget4:
3846 case OPC_EmitConvertToTarget5:
3847 case OPC_EmitConvertToTarget6:
3848 case OPC_EmitConvertToTarget7: {
3849 // Convert from IMM/FPIMM to target version.
3850 unsigned RecNo = Opcode == OPC_EmitConvertToTarget
3851 ? MatcherTable[MatcherIndex++]
3852 : Opcode - OPC_EmitConvertToTarget0;
3853 assert(RecNo < RecordedNodes.size() && "Invalid EmitConvertToTarget");
3854 SDValue Imm = RecordedNodes[RecNo].first;
3855
3856 if (Imm->getOpcode() == ISD::Constant) {
3857 const ConstantInt *Val=cast<ConstantSDNode>(Val&: Imm)->getConstantIntValue();
3858 Imm = CurDAG->getTargetConstant(Val: *Val, DL: SDLoc(NodeToMatch),
3859 VT: Imm.getValueType());
3860 } else if (Imm->getOpcode() == ISD::ConstantFP) {
3861 const ConstantFP *Val=cast<ConstantFPSDNode>(Val&: Imm)->getConstantFPValue();
3862 Imm = CurDAG->getTargetConstantFP(Val: *Val, DL: SDLoc(NodeToMatch),
3863 VT: Imm.getValueType());
3864 }
3865
3866 RecordedNodes.push_back(Elt: std::make_pair(x&: Imm, y&: RecordedNodes[RecNo].second));
3867 continue;
3868 }
3869
3870 case OPC_EmitMergeInputChains1_0: // OPC_EmitMergeInputChains, 1, 0
3871 case OPC_EmitMergeInputChains1_1: // OPC_EmitMergeInputChains, 1, 1
3872 case OPC_EmitMergeInputChains1_2: { // OPC_EmitMergeInputChains, 1, 2
3873 // These are space-optimized forms of OPC_EmitMergeInputChains.
3874 assert(!InputChain.getNode() &&
3875 "EmitMergeInputChains should be the first chain producing node");
3876 assert(ChainNodesMatched.empty() &&
3877 "Should only have one EmitMergeInputChains per match");
3878
3879 // Read all of the chained nodes.
3880 unsigned RecNo = Opcode - OPC_EmitMergeInputChains1_0;
3881 assert(RecNo < RecordedNodes.size() && "Invalid EmitMergeInputChains");
3882 ChainNodesMatched.push_back(Elt: RecordedNodes[RecNo].first.getNode());
3883
3884 // If the chained node is not the root, we can't fold it if it has
3885 // multiple uses.
3886 // FIXME: What if other value results of the node have uses not matched
3887 // by this pattern?
3888 if (ChainNodesMatched.back() != NodeToMatch &&
3889 !RecordedNodes[RecNo].first.hasOneUse()) {
3890 ChainNodesMatched.clear();
3891 break;
3892 }
3893
3894 // Merge the input chains if they are not intra-pattern references.
3895 InputChain = HandleMergeInputChains(ChainNodesMatched, CurDAG);
3896
3897 if (!InputChain.getNode())
3898 break; // Failed to merge.
3899 continue;
3900 }
3901
3902 case OPC_EmitMergeInputChains: {
3903 assert(!InputChain.getNode() &&
3904 "EmitMergeInputChains should be the first chain producing node");
3905 // This node gets a list of nodes we matched in the input that have
3906 // chains. We want to token factor all of the input chains to these nodes
3907 // together. However, if any of the input chains is actually one of the
3908 // nodes matched in this pattern, then we have an intra-match reference.
3909 // Ignore these because the newly token factored chain should not refer to
3910 // the old nodes.
3911 unsigned NumChains = MatcherTable[MatcherIndex++];
3912 assert(NumChains != 0 && "Can't TF zero chains");
3913
3914 assert(ChainNodesMatched.empty() &&
3915 "Should only have one EmitMergeInputChains per match");
3916
3917 // Read all of the chained nodes.
3918 for (unsigned i = 0; i != NumChains; ++i) {
3919 unsigned RecNo = MatcherTable[MatcherIndex++];
3920 assert(RecNo < RecordedNodes.size() && "Invalid EmitMergeInputChains");
3921 ChainNodesMatched.push_back(Elt: RecordedNodes[RecNo].first.getNode());
3922
3923 // If the chained node is not the root, we can't fold it if it has
3924 // multiple uses.
3925 // FIXME: What if other value results of the node have uses not matched
3926 // by this pattern?
3927 if (ChainNodesMatched.back() != NodeToMatch &&
3928 !RecordedNodes[RecNo].first.hasOneUse()) {
3929 ChainNodesMatched.clear();
3930 break;
3931 }
3932 }
3933
3934 // If the inner loop broke out, the match fails.
3935 if (ChainNodesMatched.empty())
3936 break;
3937
3938 // Merge the input chains if they are not intra-pattern references.
3939 InputChain = HandleMergeInputChains(ChainNodesMatched, CurDAG);
3940
3941 if (!InputChain.getNode())
3942 break; // Failed to merge.
3943
3944 continue;
3945 }
3946
3947 case OPC_EmitCopyToReg:
3948 case OPC_EmitCopyToReg0:
3949 case OPC_EmitCopyToReg1:
3950 case OPC_EmitCopyToReg2:
3951 case OPC_EmitCopyToReg3:
3952 case OPC_EmitCopyToReg4:
3953 case OPC_EmitCopyToReg5:
3954 case OPC_EmitCopyToReg6:
3955 case OPC_EmitCopyToReg7:
3956 case OPC_EmitCopyToRegTwoByte: {
3957 unsigned RecNo =
3958 Opcode >= OPC_EmitCopyToReg0 && Opcode <= OPC_EmitCopyToReg7
3959 ? Opcode - OPC_EmitCopyToReg0
3960 : MatcherTable[MatcherIndex++];
3961 assert(RecNo < RecordedNodes.size() && "Invalid EmitCopyToReg");
3962 unsigned DestPhysReg = MatcherTable[MatcherIndex++];
3963 if (Opcode == OPC_EmitCopyToRegTwoByte)
3964 DestPhysReg |= MatcherTable[MatcherIndex++] << 8;
3965
3966 if (!InputChain.getNode())
3967 InputChain = CurDAG->getEntryNode();
3968
3969 InputChain = CurDAG->getCopyToReg(Chain: InputChain, dl: SDLoc(NodeToMatch),
3970 Reg: DestPhysReg, N: RecordedNodes[RecNo].first,
3971 Glue: InputGlue);
3972
3973 InputGlue = InputChain.getValue(R: 1);
3974 continue;
3975 }
3976
3977 case OPC_EmitNodeXForm: {
3978 unsigned XFormNo = MatcherTable[MatcherIndex++];
3979 unsigned RecNo = MatcherTable[MatcherIndex++];
3980 assert(RecNo < RecordedNodes.size() && "Invalid EmitNodeXForm");
3981 SDValue Res = RunSDNodeXForm(V: RecordedNodes[RecNo].first, XFormNo);
3982 RecordedNodes.push_back(Elt: std::pair<SDValue,SDNode*>(Res, nullptr));
3983 continue;
3984 }
3985 case OPC_Coverage: {
3986 // This is emitted right before MorphNode/EmitNode.
3987 // So it should be safe to assume that this node has been selected
3988 unsigned index = MatcherTable[MatcherIndex++];
3989 index |= (MatcherTable[MatcherIndex++] << 8);
3990 dbgs() << "COVERED: " << getPatternForIndex(index) << "\n";
3991 dbgs() << "INCLUDED: " << getIncludePathForIndex(index) << "\n";
3992 continue;
3993 }
3994
3995 case OPC_EmitNode:
3996 case OPC_EmitNode0:
3997 case OPC_EmitNode1:
3998 case OPC_EmitNode2:
3999 case OPC_EmitNode0None:
4000 case OPC_EmitNode1None:
4001 case OPC_EmitNode2None:
4002 case OPC_EmitNode0Chain:
4003 case OPC_EmitNode1Chain:
4004 case OPC_EmitNode2Chain:
4005 case OPC_MorphNodeTo:
4006 case OPC_MorphNodeTo0:
4007 case OPC_MorphNodeTo1:
4008 case OPC_MorphNodeTo2:
4009 case OPC_MorphNodeTo0None:
4010 case OPC_MorphNodeTo1None:
4011 case OPC_MorphNodeTo2None:
4012 case OPC_MorphNodeTo0Chain:
4013 case OPC_MorphNodeTo1Chain:
4014 case OPC_MorphNodeTo2Chain:
4015 case OPC_MorphNodeTo0GlueInput:
4016 case OPC_MorphNodeTo1GlueInput:
4017 case OPC_MorphNodeTo2GlueInput:
4018 case OPC_MorphNodeTo0GlueOutput:
4019 case OPC_MorphNodeTo1GlueOutput:
4020 case OPC_MorphNodeTo2GlueOutput: {
4021 uint16_t TargetOpc = MatcherTable[MatcherIndex++];
4022 TargetOpc |= static_cast<uint16_t>(MatcherTable[MatcherIndex++]) << 8;
4023 unsigned EmitNodeInfo;
4024 if (Opcode >= OPC_EmitNode0None && Opcode <= OPC_EmitNode2Chain) {
4025 if (Opcode >= OPC_EmitNode0Chain && Opcode <= OPC_EmitNode2Chain)
4026 EmitNodeInfo = OPFL_Chain;
4027 else
4028 EmitNodeInfo = OPFL_None;
4029 } else if (Opcode >= OPC_MorphNodeTo0None &&
4030 Opcode <= OPC_MorphNodeTo2GlueOutput) {
4031 if (Opcode >= OPC_MorphNodeTo0Chain && Opcode <= OPC_MorphNodeTo2Chain)
4032 EmitNodeInfo = OPFL_Chain;
4033 else if (Opcode >= OPC_MorphNodeTo0GlueInput &&
4034 Opcode <= OPC_MorphNodeTo2GlueInput)
4035 EmitNodeInfo = OPFL_GlueInput;
4036 else if (Opcode >= OPC_MorphNodeTo0GlueOutput &&
4037 Opcode <= OPC_MorphNodeTo2GlueOutput)
4038 EmitNodeInfo = OPFL_GlueOutput;
4039 else
4040 EmitNodeInfo = OPFL_None;
4041 } else
4042 EmitNodeInfo = MatcherTable[MatcherIndex++];
4043 // Get the result VT list.
4044 unsigned NumVTs;
4045 // If this is one of the compressed forms, get the number of VTs based
4046 // on the Opcode. Otherwise read the next byte from the table.
4047 if (Opcode >= OPC_MorphNodeTo0 && Opcode <= OPC_MorphNodeTo2)
4048 NumVTs = Opcode - OPC_MorphNodeTo0;
4049 else if (Opcode >= OPC_MorphNodeTo0None && Opcode <= OPC_MorphNodeTo2None)
4050 NumVTs = Opcode - OPC_MorphNodeTo0None;
4051 else if (Opcode >= OPC_MorphNodeTo0Chain &&
4052 Opcode <= OPC_MorphNodeTo2Chain)
4053 NumVTs = Opcode - OPC_MorphNodeTo0Chain;
4054 else if (Opcode >= OPC_MorphNodeTo0GlueInput &&
4055 Opcode <= OPC_MorphNodeTo2GlueInput)
4056 NumVTs = Opcode - OPC_MorphNodeTo0GlueInput;
4057 else if (Opcode >= OPC_MorphNodeTo0GlueOutput &&
4058 Opcode <= OPC_MorphNodeTo2GlueOutput)
4059 NumVTs = Opcode - OPC_MorphNodeTo0GlueOutput;
4060 else if (Opcode >= OPC_EmitNode0 && Opcode <= OPC_EmitNode2)
4061 NumVTs = Opcode - OPC_EmitNode0;
4062 else if (Opcode >= OPC_EmitNode0None && Opcode <= OPC_EmitNode2None)
4063 NumVTs = Opcode - OPC_EmitNode0None;
4064 else if (Opcode >= OPC_EmitNode0Chain && Opcode <= OPC_EmitNode2Chain)
4065 NumVTs = Opcode - OPC_EmitNode0Chain;
4066 else
4067 NumVTs = MatcherTable[MatcherIndex++];
4068 SmallVector<EVT, 4> VTs;
4069 for (unsigned i = 0; i != NumVTs; ++i) {
4070 MVT::SimpleValueType VT =
4071 static_cast<MVT::SimpleValueType>(MatcherTable[MatcherIndex++]);
4072 if (VT == MVT::iPTR)
4073 VT = TLI->getPointerTy(DL: CurDAG->getDataLayout()).SimpleTy;
4074 VTs.push_back(Elt: VT);
4075 }
4076
4077 if (EmitNodeInfo & OPFL_Chain)
4078 VTs.push_back(Elt: MVT::Other);
4079 if (EmitNodeInfo & OPFL_GlueOutput)
4080 VTs.push_back(Elt: MVT::Glue);
4081
4082 // This is hot code, so optimize the two most common cases of 1 and 2
4083 // results.
4084 SDVTList VTList;
4085 if (VTs.size() == 1)
4086 VTList = CurDAG->getVTList(VT: VTs[0]);
4087 else if (VTs.size() == 2)
4088 VTList = CurDAG->getVTList(VT1: VTs[0], VT2: VTs[1]);
4089 else
4090 VTList = CurDAG->getVTList(VTs);
4091
4092 // Get the operand list.
4093 unsigned NumOps = MatcherTable[MatcherIndex++];
4094 SmallVector<SDValue, 8> Ops;
4095 for (unsigned i = 0; i != NumOps; ++i) {
4096 unsigned RecNo = MatcherTable[MatcherIndex++];
4097 if (RecNo & 128)
4098 RecNo = GetVBR(Val: RecNo, MatcherTable, Idx&: MatcherIndex);
4099
4100 assert(RecNo < RecordedNodes.size() && "Invalid EmitNode");
4101 Ops.push_back(Elt: RecordedNodes[RecNo].first);
4102 }
4103
4104 // If there are variadic operands to add, handle them now.
4105 if (EmitNodeInfo & OPFL_VariadicInfo) {
4106 // Determine the start index to copy from.
4107 unsigned FirstOpToCopy = getNumFixedFromVariadicInfo(Flags: EmitNodeInfo);
4108 FirstOpToCopy += (EmitNodeInfo & OPFL_Chain) ? 1 : 0;
4109 assert(NodeToMatch->getNumOperands() >= FirstOpToCopy &&
4110 "Invalid variadic node");
4111 // Copy all of the variadic operands, not including a potential glue
4112 // input.
4113 for (unsigned i = FirstOpToCopy, e = NodeToMatch->getNumOperands();
4114 i != e; ++i) {
4115 SDValue V = NodeToMatch->getOperand(Num: i);
4116 if (V.getValueType() == MVT::Glue) break;
4117 Ops.push_back(Elt: V);
4118 }
4119 }
4120
4121 // If this has chain/glue inputs, add them.
4122 if (EmitNodeInfo & OPFL_Chain)
4123 Ops.push_back(Elt: InputChain);
4124 if ((EmitNodeInfo & OPFL_GlueInput) && InputGlue.getNode() != nullptr)
4125 Ops.push_back(Elt: InputGlue);
4126
4127 // Check whether any matched node could raise an FP exception. Since all
4128 // such nodes must have a chain, it suffices to check ChainNodesMatched.
4129 // We need to perform this check before potentially modifying one of the
4130 // nodes via MorphNode.
4131 bool MayRaiseFPException =
4132 llvm::any_of(Range&: ChainNodesMatched, P: [this](SDNode *N) {
4133 return mayRaiseFPException(Node: N) && !N->getFlags().hasNoFPExcept();
4134 });
4135
4136 // Create the node.
4137 MachineSDNode *Res = nullptr;
4138 bool IsMorphNodeTo =
4139 Opcode == OPC_MorphNodeTo ||
4140 (Opcode >= OPC_MorphNodeTo0 && Opcode <= OPC_MorphNodeTo2GlueOutput);
4141 if (!IsMorphNodeTo) {
4142 // If this is a normal EmitNode command, just create the new node and
4143 // add the results to the RecordedNodes list.
4144 Res = CurDAG->getMachineNode(Opcode: TargetOpc, dl: SDLoc(NodeToMatch),
4145 VTs: VTList, Ops);
4146
4147 // Add all the non-glue/non-chain results to the RecordedNodes list.
4148 for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
4149 if (VTs[i] == MVT::Other || VTs[i] == MVT::Glue) break;
4150 RecordedNodes.push_back(Elt: std::pair<SDValue,SDNode*>(SDValue(Res, i),
4151 nullptr));
4152 }
4153 } else {
4154 assert(NodeToMatch->getOpcode() != ISD::DELETED_NODE &&
4155 "NodeToMatch was removed partway through selection");
4156 SelectionDAG::DAGNodeDeletedListener NDL(*CurDAG, [&](SDNode *N,
4157 SDNode *E) {
4158 CurDAG->salvageDebugInfo(N&: *N);
4159 auto &Chain = ChainNodesMatched;
4160 assert((!E || !is_contained(Chain, N)) &&
4161 "Chain node replaced during MorphNode");
4162 llvm::erase(C&: Chain, V: N);
4163 });
4164 Res = cast<MachineSDNode>(Val: MorphNode(Node: NodeToMatch, TargetOpc, VTList,
4165 Ops, EmitNodeInfo));
4166 }
4167
4168 // Set the NoFPExcept flag when no original matched node could
4169 // raise an FP exception, but the new node potentially might.
4170 if (!MayRaiseFPException && mayRaiseFPException(Node: Res)) {
4171 SDNodeFlags Flags = Res->getFlags();
4172 Flags.setNoFPExcept(true);
4173 Res->setFlags(Flags);
4174 }
4175
4176 // If the node had chain/glue results, update our notion of the current
4177 // chain and glue.
4178 if (EmitNodeInfo & OPFL_GlueOutput) {
4179 InputGlue = SDValue(Res, VTs.size()-1);
4180 if (EmitNodeInfo & OPFL_Chain)
4181 InputChain = SDValue(Res, VTs.size()-2);
4182 } else if (EmitNodeInfo & OPFL_Chain)
4183 InputChain = SDValue(Res, VTs.size()-1);
4184
4185 // If the OPFL_MemRefs glue is set on this node, slap all of the
4186 // accumulated memrefs onto it.
4187 //
4188 // FIXME: This is vastly incorrect for patterns with multiple outputs
4189 // instructions that access memory and for ComplexPatterns that match
4190 // loads.
4191 if (EmitNodeInfo & OPFL_MemRefs) {
4192 // Only attach load or store memory operands if the generated
4193 // instruction may load or store.
4194 const MCInstrDesc &MCID = TII->get(Opcode: TargetOpc);
4195 bool mayLoad = MCID.mayLoad();
4196 bool mayStore = MCID.mayStore();
4197
4198 // We expect to have relatively few of these so just filter them into a
4199 // temporary buffer so that we can easily add them to the instruction.
4200 SmallVector<MachineMemOperand *, 4> FilteredMemRefs;
4201 for (MachineMemOperand *MMO : MatchedMemRefs) {
4202 if (MMO->isLoad()) {
4203 if (mayLoad)
4204 FilteredMemRefs.push_back(Elt: MMO);
4205 } else if (MMO->isStore()) {
4206 if (mayStore)
4207 FilteredMemRefs.push_back(Elt: MMO);
4208 } else {
4209 FilteredMemRefs.push_back(Elt: MMO);
4210 }
4211 }
4212
4213 CurDAG->setNodeMemRefs(N: Res, NewMemRefs: FilteredMemRefs);
4214 }
4215
4216 LLVM_DEBUG(if (!MatchedMemRefs.empty() && Res->memoperands_empty()) dbgs()
4217 << " Dropping mem operands\n";
4218 dbgs() << " " << (IsMorphNodeTo ? "Morphed" : "Created")
4219 << " node: ";
4220 Res->dump(CurDAG););
4221
4222 // If this was a MorphNodeTo then we're completely done!
4223 if (IsMorphNodeTo) {
4224 // Update chain uses.
4225 UpdateChains(NodeToMatch: Res, InputChain, ChainNodesMatched, isMorphNodeTo: true);
4226 return;
4227 }
4228 continue;
4229 }
4230
4231 case OPC_CompleteMatch: {
4232 // The match has been completed, and any new nodes (if any) have been
4233 // created. Patch up references to the matched dag to use the newly
4234 // created nodes.
4235 unsigned NumResults = MatcherTable[MatcherIndex++];
4236
4237 for (unsigned i = 0; i != NumResults; ++i) {
4238 unsigned ResSlot = MatcherTable[MatcherIndex++];
4239 if (ResSlot & 128)
4240 ResSlot = GetVBR(Val: ResSlot, MatcherTable, Idx&: MatcherIndex);
4241
4242 assert(ResSlot < RecordedNodes.size() && "Invalid CompleteMatch");
4243 SDValue Res = RecordedNodes[ResSlot].first;
4244
4245 assert(i < NodeToMatch->getNumValues() &&
4246 NodeToMatch->getValueType(i) != MVT::Other &&
4247 NodeToMatch->getValueType(i) != MVT::Glue &&
4248 "Invalid number of results to complete!");
4249 assert((NodeToMatch->getValueType(i) == Res.getValueType() ||
4250 NodeToMatch->getValueType(i) == MVT::iPTR ||
4251 Res.getValueType() == MVT::iPTR ||
4252 NodeToMatch->getValueType(i).getSizeInBits() ==
4253 Res.getValueSizeInBits()) &&
4254 "invalid replacement");
4255 ReplaceUses(F: SDValue(NodeToMatch, i), T: Res);
4256 }
4257
4258 // Update chain uses.
4259 UpdateChains(NodeToMatch, InputChain, ChainNodesMatched, isMorphNodeTo: false);
4260
4261 // If the root node defines glue, we need to update it to the glue result.
4262 // TODO: This never happens in our tests and I think it can be removed /
4263 // replaced with an assert, but if we do it this the way the change is
4264 // NFC.
4265 if (NodeToMatch->getValueType(ResNo: NodeToMatch->getNumValues() - 1) ==
4266 MVT::Glue &&
4267 InputGlue.getNode())
4268 ReplaceUses(F: SDValue(NodeToMatch, NodeToMatch->getNumValues() - 1),
4269 T: InputGlue);
4270
4271 assert(NodeToMatch->use_empty() &&
4272 "Didn't replace all uses of the node?");
4273 CurDAG->RemoveDeadNode(N: NodeToMatch);
4274
4275 return;
4276 }
4277 }
4278
4279 // If the code reached this point, then the match failed. See if there is
4280 // another child to try in the current 'Scope', otherwise pop it until we
4281 // find a case to check.
4282 LLVM_DEBUG(dbgs() << " Match failed at index " << CurrentOpcodeIndex
4283 << "\n");
4284 ++NumDAGIselRetries;
4285 while (true) {
4286 if (MatchScopes.empty()) {
4287 CannotYetSelect(N: NodeToMatch);
4288 return;
4289 }
4290
4291 // Restore the interpreter state back to the point where the scope was
4292 // formed.
4293 MatchScope &LastScope = MatchScopes.back();
4294 RecordedNodes.resize(N: LastScope.NumRecordedNodes);
4295 NodeStack.clear();
4296 NodeStack.append(in_start: LastScope.NodeStack.begin(), in_end: LastScope.NodeStack.end());
4297 N = NodeStack.back();
4298
4299 if (LastScope.NumMatchedMemRefs != MatchedMemRefs.size())
4300 MatchedMemRefs.resize(N: LastScope.NumMatchedMemRefs);
4301 MatcherIndex = LastScope.FailIndex;
4302
4303 LLVM_DEBUG(dbgs() << " Continuing at " << MatcherIndex << "\n");
4304
4305 InputChain = LastScope.InputChain;
4306 InputGlue = LastScope.InputGlue;
4307 if (!LastScope.HasChainNodesMatched)
4308 ChainNodesMatched.clear();
4309
4310 // Check to see what the offset is at the new MatcherIndex. If it is zero
4311 // we have reached the end of this scope, otherwise we have another child
4312 // in the current scope to try.
4313 unsigned NumToSkip = MatcherTable[MatcherIndex++];
4314 if (NumToSkip & 128)
4315 NumToSkip = GetVBR(Val: NumToSkip, MatcherTable, Idx&: MatcherIndex);
4316
4317 // If we have another child in this scope to match, update FailIndex and
4318 // try it.
4319 if (NumToSkip != 0) {
4320 LastScope.FailIndex = MatcherIndex+NumToSkip;
4321 break;
4322 }
4323
4324 // End of this scope, pop it and try the next child in the containing
4325 // scope.
4326 MatchScopes.pop_back();
4327 }
4328 }
4329}
4330
4331/// Return whether the node may raise an FP exception.
4332bool SelectionDAGISel::mayRaiseFPException(SDNode *N) const {
4333 // For machine opcodes, consult the MCID flag.
4334 if (N->isMachineOpcode()) {
4335 const MCInstrDesc &MCID = TII->get(Opcode: N->getMachineOpcode());
4336 return MCID.mayRaiseFPException();
4337 }
4338
4339 // For ISD opcodes, only StrictFP opcodes may raise an FP
4340 // exception.
4341 if (N->isTargetOpcode())
4342 return N->isTargetStrictFPOpcode();
4343 return N->isStrictFPOpcode();
4344}
4345
4346bool SelectionDAGISel::isOrEquivalentToAdd(const SDNode *N) const {
4347 assert(N->getOpcode() == ISD::OR && "Unexpected opcode");
4348 auto *C = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 1));
4349 if (!C)
4350 return false;
4351
4352 // Detect when "or" is used to add an offset to a stack object.
4353 if (auto *FN = dyn_cast<FrameIndexSDNode>(Val: N->getOperand(Num: 0))) {
4354 MachineFrameInfo &MFI = MF->getFrameInfo();
4355 Align A = MFI.getObjectAlign(ObjectIdx: FN->getIndex());
4356 int32_t Off = C->getSExtValue();
4357 // If the alleged offset fits in the zero bits guaranteed by
4358 // the alignment, then this or is really an add.
4359 return (Off >= 0) && (((A.value() - 1) & Off) == unsigned(Off));
4360 }
4361 return false;
4362}
4363
4364void SelectionDAGISel::CannotYetSelect(SDNode *N) {
4365 std::string msg;
4366 raw_string_ostream Msg(msg);
4367 Msg << "Cannot select: ";
4368
4369 if (N->getOpcode() != ISD::INTRINSIC_W_CHAIN &&
4370 N->getOpcode() != ISD::INTRINSIC_WO_CHAIN &&
4371 N->getOpcode() != ISD::INTRINSIC_VOID) {
4372 N->printrFull(O&: Msg, G: CurDAG);
4373 Msg << "\nIn function: " << MF->getName();
4374 } else {
4375 bool HasInputChain = N->getOperand(Num: 0).getValueType() == MVT::Other;
4376 unsigned iid = N->getConstantOperandVal(Num: HasInputChain);
4377 if (iid < Intrinsic::num_intrinsics)
4378 Msg << "intrinsic %" << Intrinsic::getBaseName(id: (Intrinsic::ID)iid);
4379 else if (const TargetIntrinsicInfo *TII = TM.getIntrinsicInfo())
4380 Msg << "target intrinsic %" << TII->getName(IID: iid);
4381 else
4382 Msg << "unknown intrinsic #" << iid;
4383 }
4384 report_fatal_error(reason: Twine(msg));
4385}
4386