1//===-- ARMTargetMachine.cpp - Define TargetMachine for ARM ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9//
10//===----------------------------------------------------------------------===//
11
12#include "ARMTargetMachine.h"
13#include "ARM.h"
14#include "ARMLatencyMutations.h"
15#include "ARMMachineFunctionInfo.h"
16#include "ARMMacroFusion.h"
17#include "ARMSubtarget.h"
18#include "ARMTargetObjectFile.h"
19#include "ARMTargetTransformInfo.h"
20#include "MCTargetDesc/ARMMCTargetDesc.h"
21#include "TargetInfo/ARMTargetInfo.h"
22#include "llvm/ADT/StringRef.h"
23#include "llvm/Analysis/TargetTransformInfo.h"
24#include "llvm/CodeGen/ExecutionDomainFix.h"
25#include "llvm/CodeGen/GlobalISel/CSEInfo.h"
26#include "llvm/CodeGen/GlobalISel/CallLowering.h"
27#include "llvm/CodeGen/GlobalISel/IRTranslator.h"
28#include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
29#include "llvm/CodeGen/GlobalISel/Legalizer.h"
30#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
31#include "llvm/CodeGen/GlobalISel/RegBankSelect.h"
32#include "llvm/CodeGen/MIRParser/MIParser.h"
33#include "llvm/CodeGen/MachineFunction.h"
34#include "llvm/CodeGen/MachineScheduler.h"
35#include "llvm/CodeGen/Passes.h"
36#include "llvm/CodeGen/TargetPassConfig.h"
37#include "llvm/IR/Attributes.h"
38#include "llvm/IR/DataLayout.h"
39#include "llvm/IR/Function.h"
40#include "llvm/MC/TargetRegistry.h"
41#include "llvm/Pass.h"
42#include "llvm/Support/CodeGen.h"
43#include "llvm/Support/CommandLine.h"
44#include "llvm/Support/Compiler.h"
45#include "llvm/Support/ErrorHandling.h"
46#include "llvm/Target/TargetLoweringObjectFile.h"
47#include "llvm/Target/TargetOptions.h"
48#include "llvm/TargetParser/ARMTargetParser.h"
49#include "llvm/TargetParser/TargetParser.h"
50#include "llvm/TargetParser/Triple.h"
51#include "llvm/Transforms/CFGuard.h"
52#include "llvm/Transforms/IPO.h"
53#include "llvm/Transforms/Scalar.h"
54#include <cassert>
55#include <memory>
56#include <optional>
57#include <string>
58
59using namespace llvm;
60
61static cl::opt<bool>
62DisableA15SDOptimization("disable-a15-sd-optimization", cl::Hidden,
63 cl::desc("Inhibit optimization of S->D register accesses on A15"),
64 cl::init(Val: false));
65
66static cl::opt<bool>
67EnableAtomicTidy("arm-atomic-cfg-tidy", cl::Hidden,
68 cl::desc("Run SimplifyCFG after expanding atomic operations"
69 " to make use of cmpxchg flow-based information"),
70 cl::init(Val: true));
71
72static cl::opt<bool>
73EnableARMLoadStoreOpt("arm-load-store-opt", cl::Hidden,
74 cl::desc("Enable ARM load/store optimization pass"),
75 cl::init(Val: true));
76
77// FIXME: Unify control over GlobalMerge.
78static cl::opt<cl::boolOrDefault>
79EnableGlobalMerge("arm-global-merge", cl::Hidden,
80 cl::desc("Enable the global merge pass"));
81
82namespace llvm {
83 void initializeARMExecutionDomainFixPass(PassRegistry&);
84}
85
86extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void LLVMInitializeARMTarget() {
87 // Register the target.
88 RegisterTargetMachine<ARMLETargetMachine> X(getTheARMLETarget());
89 RegisterTargetMachine<ARMLETargetMachine> A(getTheThumbLETarget());
90 RegisterTargetMachine<ARMBETargetMachine> Y(getTheARMBETarget());
91 RegisterTargetMachine<ARMBETargetMachine> B(getTheThumbBETarget());
92
93 PassRegistry &Registry = *PassRegistry::getPassRegistry();
94 initializeGlobalISel(Registry);
95 initializeARMAsmPrinterPass(Registry);
96 initializeARMLoadStoreOptPass(Registry);
97 initializeARMPreAllocLoadStoreOptPass(Registry);
98 initializeARMParallelDSPPass(Registry);
99 initializeARMBranchTargetsPass(Registry);
100 initializeARMConstantIslandsPass(Registry);
101 initializeARMExecutionDomainFixPass(Registry);
102 initializeARMExpandPseudoPass(Registry);
103 initializeThumb2SizeReducePass(Registry);
104 initializeMVEVPTBlockPass(Registry);
105 initializeMVETPAndVPTOptimisationsPass(Registry);
106 initializeMVETailPredicationPass(Registry);
107 initializeARMLowOverheadLoopsPass(Registry);
108 initializeARMBlockPlacementPass(Registry);
109 initializeMVEGatherScatterLoweringPass(Registry);
110 initializeARMSLSHardeningPass(Registry);
111 initializeMVELaneInterleavingPass(Registry);
112 initializeARMFixCortexA57AES1742098Pass(Registry);
113 initializeARMDAGToDAGISelLegacyPass(Registry);
114 initializeKCFIPass(Registry);
115}
116
117static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
118 if (TT.isOSBinFormatMachO())
119 return std::make_unique<TargetLoweringObjectFileMachO>();
120 if (TT.isOSWindows())
121 return std::make_unique<TargetLoweringObjectFileCOFF>();
122 return std::make_unique<ARMElfTargetObjectFile>();
123}
124
125static Reloc::Model getEffectiveRelocModel(const Triple &TT,
126 std::optional<Reloc::Model> RM) {
127 if (!RM)
128 // Default relocation model on Darwin is PIC.
129 return TT.isOSBinFormatMachO() ? Reloc::PIC_ : Reloc::Static;
130
131 if (*RM == Reloc::ROPI || *RM == Reloc::RWPI || *RM == Reloc::ROPI_RWPI)
132 assert(TT.isOSBinFormatELF() &&
133 "ROPI/RWPI currently only supported for ELF");
134
135 // DynamicNoPIC is only used on darwin.
136 if (*RM == Reloc::DynamicNoPIC && !TT.isOSDarwin())
137 return Reloc::Static;
138
139 return *RM;
140}
141
142/// Create an ARM architecture model.
143///
144ARMBaseTargetMachine::ARMBaseTargetMachine(const Target &T, const Triple &TT,
145 StringRef CPU, StringRef FS,
146 const TargetOptions &Options,
147 std::optional<Reloc::Model> RM,
148 std::optional<CodeModel::Model> CM,
149 CodeGenOptLevel OL)
150 : CodeGenTargetMachineImpl(
151 T, TT.computeDataLayout(ABIName: Options.MCOptions.ABIName), TT, CPU, FS,
152 Options, getEffectiveRelocModel(TT, RM),
153 getEffectiveCodeModel(CM, Default: CodeModel::Small), OL),
154 TargetABI(ARM::computeTargetABI(TT, ABIName: Options.MCOptions.ABIName)),
155 TLOF(createTLOF(TT: getTargetTriple())), isLittle(TT.isLittleEndian()) {
156
157 // Default to triple-appropriate float ABI
158 if (Options.FloatABIType == FloatABI::Default) {
159 if (isTargetHardFloat())
160 this->Options.FloatABIType = FloatABI::Hard;
161 else
162 this->Options.FloatABIType = FloatABI::Soft;
163 }
164
165 // Default to triple-appropriate EABI
166 if (Options.EABIVersion == EABI::Default ||
167 Options.EABIVersion == EABI::Unknown) {
168 // musl is compatible with glibc with regard to EABI version
169 if ((TargetTriple.getEnvironment() == Triple::GNUEABI ||
170 TargetTriple.getEnvironment() == Triple::GNUEABIT64 ||
171 TargetTriple.getEnvironment() == Triple::GNUEABIHF ||
172 TargetTriple.getEnvironment() == Triple::GNUEABIHFT64 ||
173 TargetTriple.getEnvironment() == Triple::MuslEABI ||
174 TargetTriple.getEnvironment() == Triple::MuslEABIHF ||
175 TargetTriple.getEnvironment() == Triple::OpenHOS) &&
176 !(TargetTriple.isOSWindows() || TargetTriple.isOSDarwin()))
177 this->Options.EABIVersion = EABI::GNU;
178 else
179 this->Options.EABIVersion = EABI::EABI5;
180 }
181
182 if (TT.isOSBinFormatMachO()) {
183 this->Options.TrapUnreachable = true;
184 this->Options.NoTrapAfterNoreturn = true;
185 }
186
187 // ARM supports the debug entry values.
188 setSupportsDebugEntryValues(true);
189
190 initAsmInfo();
191
192 // ARM supports the MachineOutliner.
193 setMachineOutliner(true);
194 setSupportsDefaultOutlining(true);
195}
196
197ARMBaseTargetMachine::~ARMBaseTargetMachine() = default;
198
199MachineFunctionInfo *ARMBaseTargetMachine::createMachineFunctionInfo(
200 BumpPtrAllocator &Allocator, const Function &F,
201 const TargetSubtargetInfo *STI) const {
202 return ARMFunctionInfo::create<ARMFunctionInfo>(
203 Allocator, F, STI: static_cast<const ARMSubtarget *>(STI));
204}
205
206const ARMSubtarget *
207ARMBaseTargetMachine::getSubtargetImpl(const Function &F) const {
208 Attribute CPUAttr = F.getFnAttribute(Kind: "target-cpu");
209 Attribute FSAttr = F.getFnAttribute(Kind: "target-features");
210
211 std::string CPU =
212 CPUAttr.isValid() ? CPUAttr.getValueAsString().str() : TargetCPU;
213 std::string FS =
214 FSAttr.isValid() ? FSAttr.getValueAsString().str() : TargetFS;
215
216 // FIXME: This is related to the code below to reset the target options,
217 // we need to know whether or not the soft float flag is set on the
218 // function before we can generate a subtarget. We also need to use
219 // it as a key for the subtarget since that can be the only difference
220 // between two functions.
221 bool SoftFloat = F.getFnAttribute(Kind: "use-soft-float").getValueAsBool();
222 // If the soft float attribute is set on the function turn on the soft float
223 // subtarget feature.
224 if (SoftFloat)
225 FS += FS.empty() ? "+soft-float" : ",+soft-float";
226
227 // Use the optminsize to identify the subtarget, but don't use it in the
228 // feature string.
229 std::string Key = CPU + FS;
230 if (F.hasMinSize())
231 Key += "+minsize";
232
233 DenormalMode DM = F.getDenormalModeRaw();
234 if (DM != DenormalMode::getIEEE())
235 Key += "denormal-fp-math=" + DM.str();
236
237 auto &I = SubtargetMap[Key];
238 if (!I) {
239 // This needs to be done before we create a new subtarget since any
240 // creation will depend on the TM and the code generation flags on the
241 // function that reside in TargetOptions.
242 resetTargetOptions(F);
243 I = std::make_unique<ARMSubtarget>(args: TargetTriple, args&: CPU, args&: FS, args: *this, args: isLittle,
244 args: F.hasMinSize(), args&: DM);
245
246 if (!I->isThumb() && !I->hasARMOps())
247 F.getContext().emitError(ErrorStr: "Function '" + F.getName() + "' uses ARM "
248 "instructions, but the target does not support ARM mode execution.");
249 }
250
251 return I.get();
252}
253
254TargetTransformInfo
255ARMBaseTargetMachine::getTargetTransformInfo(const Function &F) const {
256 return TargetTransformInfo(std::make_unique<ARMTTIImpl>(args: this, args: F));
257}
258
259ScheduleDAGInstrs *
260ARMBaseTargetMachine::createMachineScheduler(MachineSchedContext *C) const {
261 ScheduleDAGMILive *DAG = createSchedLive(C);
262 // add DAG Mutations here.
263 const ARMSubtarget &ST = C->MF->getSubtarget<ARMSubtarget>();
264 if (ST.hasFusion())
265 DAG->addMutation(Mutation: createARMMacroFusionDAGMutation());
266 return DAG;
267}
268
269ScheduleDAGInstrs *
270ARMBaseTargetMachine::createPostMachineScheduler(MachineSchedContext *C) const {
271 ScheduleDAGMI *DAG = createSchedPostRA(C);
272 // add DAG Mutations here.
273 const ARMSubtarget &ST = C->MF->getSubtarget<ARMSubtarget>();
274 if (ST.hasFusion())
275 DAG->addMutation(Mutation: createARMMacroFusionDAGMutation());
276 if (auto Mutation = createARMLatencyMutations(ST, AA: C->AA))
277 DAG->addMutation(Mutation: std::move(Mutation));
278 return DAG;
279}
280
281ARMLETargetMachine::ARMLETargetMachine(const Target &T, const Triple &TT,
282 StringRef CPU, StringRef FS,
283 const TargetOptions &Options,
284 std::optional<Reloc::Model> RM,
285 std::optional<CodeModel::Model> CM,
286 CodeGenOptLevel OL, bool JIT)
287 : ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
288
289ARMBETargetMachine::ARMBETargetMachine(const Target &T, const Triple &TT,
290 StringRef CPU, StringRef FS,
291 const TargetOptions &Options,
292 std::optional<Reloc::Model> RM,
293 std::optional<CodeModel::Model> CM,
294 CodeGenOptLevel OL, bool JIT)
295 : ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
296
297namespace {
298
299/// ARM Code Generator Pass Configuration Options.
300class ARMPassConfig : public TargetPassConfig {
301public:
302 ARMPassConfig(ARMBaseTargetMachine &TM, PassManagerBase &PM)
303 : TargetPassConfig(TM, PM) {}
304
305 ARMBaseTargetMachine &getARMTargetMachine() const {
306 return getTM<ARMBaseTargetMachine>();
307 }
308
309 void addIRPasses() override;
310 void addCodeGenPrepare() override;
311 bool addPreISel() override;
312 bool addInstSelector() override;
313 bool addIRTranslator() override;
314 bool addLegalizeMachineIR() override;
315 bool addRegBankSelect() override;
316 bool addGlobalInstructionSelect() override;
317 void addPreRegAlloc() override;
318 void addPreSched2() override;
319 void addPreEmitPass() override;
320 void addPreEmitPass2() override;
321
322 std::unique_ptr<CSEConfigBase> getCSEConfig() const override;
323};
324
325class ARMExecutionDomainFix : public ExecutionDomainFix {
326public:
327 static char ID;
328 ARMExecutionDomainFix() : ExecutionDomainFix(ID, ARM::DPRRegClass) {}
329 StringRef getPassName() const override {
330 return "ARM Execution Domain Fix";
331 }
332};
333char ARMExecutionDomainFix::ID;
334
335} // end anonymous namespace
336
337INITIALIZE_PASS_BEGIN(ARMExecutionDomainFix, "arm-execution-domain-fix",
338 "ARM Execution Domain Fix", false, false)
339INITIALIZE_PASS_DEPENDENCY(ReachingDefInfoWrapperPass)
340INITIALIZE_PASS_END(ARMExecutionDomainFix, "arm-execution-domain-fix",
341 "ARM Execution Domain Fix", false, false)
342
343TargetPassConfig *ARMBaseTargetMachine::createPassConfig(PassManagerBase &PM) {
344 return new ARMPassConfig(*this, PM);
345}
346
347std::unique_ptr<CSEConfigBase> ARMPassConfig::getCSEConfig() const {
348 return getStandardCSEConfigForOpt(Level: TM->getOptLevel());
349}
350
351void ARMPassConfig::addIRPasses() {
352 if (TM->Options.ThreadModel == ThreadModel::Single)
353 addPass(P: createLowerAtomicPass());
354 else
355 addPass(P: createAtomicExpandLegacyPass());
356
357 // Cmpxchg instructions are often used with a subsequent comparison to
358 // determine whether it succeeded. We can exploit existing control-flow in
359 // ldrex/strex loops to simplify this, but it needs tidying up.
360 if (TM->getOptLevel() != CodeGenOptLevel::None && EnableAtomicTidy)
361 addPass(P: createCFGSimplificationPass(
362 Options: SimplifyCFGOptions().hoistCommonInsts(B: true).sinkCommonInsts(B: true),
363 Ftor: [this](const Function &F) {
364 const auto &ST = this->TM->getSubtarget<ARMSubtarget>(F);
365 return ST.hasAnyDataBarrier() && !ST.isThumb1Only();
366 }));
367
368 addPass(P: createMVEGatherScatterLoweringPass());
369 addPass(P: createMVELaneInterleavingPass());
370
371 TargetPassConfig::addIRPasses();
372
373 // Run the parallel DSP pass.
374 if (getOptLevel() == CodeGenOptLevel::Aggressive)
375 addPass(P: createARMParallelDSPPass());
376
377 // Match complex arithmetic patterns
378 if (TM->getOptLevel() >= CodeGenOptLevel::Default)
379 addPass(P: createComplexDeinterleavingPass(TM));
380
381 // Match interleaved memory accesses to ldN/stN intrinsics.
382 if (TM->getOptLevel() != CodeGenOptLevel::None)
383 addPass(P: createInterleavedAccessPass());
384
385 // Add Control Flow Guard checks.
386 if (TM->getTargetTriple().isOSWindows())
387 addPass(P: createCFGuardCheckPass());
388
389 if (TM->Options.JMCInstrument)
390 addPass(P: createJMCInstrumenterPass());
391}
392
393void ARMPassConfig::addCodeGenPrepare() {
394 if (getOptLevel() != CodeGenOptLevel::None)
395 addPass(P: createTypePromotionLegacyPass());
396 TargetPassConfig::addCodeGenPrepare();
397}
398
399bool ARMPassConfig::addPreISel() {
400 if ((TM->getOptLevel() != CodeGenOptLevel::None &&
401 EnableGlobalMerge == cl::BOU_UNSET) ||
402 EnableGlobalMerge == cl::BOU_TRUE) {
403 // FIXME: This is using the thumb1 only constant value for
404 // maximal global offset for merging globals. We may want
405 // to look into using the old value for non-thumb1 code of
406 // 4095 based on the TargetMachine, but this starts to become
407 // tricky when doing code gen per function.
408 bool OnlyOptimizeForSize =
409 (TM->getOptLevel() < CodeGenOptLevel::Aggressive) &&
410 (EnableGlobalMerge == cl::BOU_UNSET);
411 // Merging of extern globals is enabled by default on non-Mach-O as we
412 // expect it to be generally either beneficial or harmless. On Mach-O it
413 // is disabled as we emit the .subsections_via_symbols directive which
414 // means that merging extern globals is not safe.
415 bool MergeExternalByDefault = !TM->getTargetTriple().isOSBinFormatMachO();
416 addPass(P: createGlobalMergePass(TM, MaximalOffset: 127, OnlyOptimizeForSize,
417 MergeExternalByDefault));
418 }
419
420 if (TM->getOptLevel() != CodeGenOptLevel::None) {
421 addPass(P: createHardwareLoopsLegacyPass());
422 addPass(P: createMVETailPredicationPass());
423 // FIXME: IR passes can delete address-taken basic blocks, deleting
424 // corresponding blockaddresses. ARMConstantPoolConstant holds references to
425 // address-taken basic blocks which can be invalidated if the function
426 // containing the blockaddress has already been codegen'd and the basic
427 // block is removed. Work around this by forcing all IR passes to run before
428 // any ISel takes place. We should have a more principled way of handling
429 // this. See D99707 for more details.
430 addPass(P: createBarrierNoopPass());
431 }
432
433 return false;
434}
435
436bool ARMPassConfig::addInstSelector() {
437 addPass(P: createARMISelDag(TM&: getARMTargetMachine(), OptLevel: getOptLevel()));
438 return false;
439}
440
441bool ARMPassConfig::addIRTranslator() {
442 addPass(P: new IRTranslator(getOptLevel()));
443 return false;
444}
445
446bool ARMPassConfig::addLegalizeMachineIR() {
447 addPass(P: new Legalizer());
448 return false;
449}
450
451bool ARMPassConfig::addRegBankSelect() {
452 addPass(P: new RegBankSelect());
453 return false;
454}
455
456bool ARMPassConfig::addGlobalInstructionSelect() {
457 addPass(P: new InstructionSelect(getOptLevel()));
458 return false;
459}
460
461void ARMPassConfig::addPreRegAlloc() {
462 if (getOptLevel() != CodeGenOptLevel::None) {
463 if (getOptLevel() == CodeGenOptLevel::Aggressive)
464 addPass(PassID: &MachinePipelinerID);
465
466 addPass(P: createMVETPAndVPTOptimisationsPass());
467
468 addPass(P: createMLxExpansionPass());
469
470 if (EnableARMLoadStoreOpt)
471 addPass(P: createARMLoadStoreOptimizationPass(/* pre-register alloc */ PreAlloc: true));
472
473 if (!DisableA15SDOptimization)
474 addPass(P: createA15SDOptimizerPass());
475 }
476}
477
478void ARMPassConfig::addPreSched2() {
479 if (getOptLevel() != CodeGenOptLevel::None) {
480 if (EnableARMLoadStoreOpt)
481 addPass(P: createARMLoadStoreOptimizationPass());
482
483 addPass(P: new ARMExecutionDomainFix());
484 addPass(P: createBreakFalseDeps());
485 }
486
487 // Expand some pseudo instructions into multiple instructions to allow
488 // proper scheduling.
489 addPass(P: createARMExpandPseudoPass());
490
491 // Emit KCFI checks for indirect calls.
492 addPass(P: createKCFIPass());
493
494 if (getOptLevel() != CodeGenOptLevel::None) {
495 // When optimising for size, always run the Thumb2SizeReduction pass before
496 // IfConversion. Otherwise, check whether IT blocks are restricted
497 // (e.g. in v8, IfConversion depends on Thumb instruction widths)
498 addPass(P: createThumb2SizeReductionPass(Ftor: [this](const Function &F) {
499 return this->TM->getSubtarget<ARMSubtarget>(F).hasMinSize() ||
500 this->TM->getSubtarget<ARMSubtarget>(F).restrictIT();
501 }));
502
503 addPass(P: createIfConverter(Ftor: [](const MachineFunction &MF) {
504 return !MF.getSubtarget<ARMSubtarget>().isThumb1Only();
505 }));
506 }
507 addPass(P: createThumb2ITBlockPass());
508
509 // Add both scheduling passes to give the subtarget an opportunity to pick
510 // between them.
511 if (getOptLevel() != CodeGenOptLevel::None) {
512 addPass(PassID: &PostMachineSchedulerID);
513 addPass(PassID: &PostRASchedulerID);
514 }
515
516 addPass(P: createMVEVPTBlockPass());
517 addPass(P: createARMIndirectThunks());
518 addPass(P: createARMSLSHardeningPass());
519}
520
521void ARMPassConfig::addPreEmitPass() {
522 addPass(P: createThumb2SizeReductionPass());
523
524 // Unpack bundles for:
525 // - Thumb2: Constant island pass requires unbundled instructions
526 // - KCFI: KCFI_CHECK pseudo instructions need to be unbundled for AsmPrinter
527 addPass(P: createUnpackMachineBundles(Ftor: [](const MachineFunction &MF) {
528 return MF.getSubtarget<ARMSubtarget>().isThumb2() ||
529 MF.getFunction().getParent()->getModuleFlag(Key: "kcfi");
530 }));
531
532 // Don't optimize barriers or block placement at -O0.
533 if (getOptLevel() != CodeGenOptLevel::None) {
534 addPass(P: createARMBlockPlacementPass());
535 addPass(P: createARMOptimizeBarriersPass());
536 }
537}
538
539void ARMPassConfig::addPreEmitPass2() {
540
541 // Inserts fixup instructions before unsafe AES operations. Instructions may
542 // be inserted at the start of blocks and at within blocks so this pass has to
543 // come before those below.
544 addPass(P: createARMFixCortexA57AES1742098Pass());
545 // Inserts BTIs at the start of functions and indirectly-called basic blocks,
546 // so passes cannot add to the start of basic blocks once this has run.
547 addPass(P: createARMBranchTargetsPass());
548 // Inserts Constant Islands. Block sizes cannot be increased after this point,
549 // as this may push the branch ranges and load offsets of accessing constant
550 // pools out of range..
551 addPass(P: createARMConstantIslandPass());
552 // Finalises Low-Overhead Loops. This replaces pseudo instructions with real
553 // instructions, but the pseudos all have conservative sizes so that block
554 // sizes will only be decreased by this pass.
555 addPass(P: createARMLowOverheadLoopsPass());
556
557 if (TM->getTargetTriple().isOSWindows()) {
558 // Identify valid longjmp targets for Windows Control Flow Guard.
559 addPass(P: createCFGuardLongjmpPass());
560 // Identify valid eh continuation targets for Windows EHCont Guard.
561 addPass(P: createEHContGuardTargetsPass());
562 }
563}
564
565yaml::MachineFunctionInfo *
566ARMBaseTargetMachine::createDefaultFuncInfoYAML() const {
567 return new yaml::ARMFunctionInfo();
568}
569
570yaml::MachineFunctionInfo *
571ARMBaseTargetMachine::convertFuncInfoToYAML(const MachineFunction &MF) const {
572 const auto *MFI = MF.getInfo<ARMFunctionInfo>();
573 return new yaml::ARMFunctionInfo(*MFI);
574}
575
576bool ARMBaseTargetMachine::parseMachineFunctionInfo(
577 const yaml::MachineFunctionInfo &MFI, PerFunctionMIParsingState &PFS,
578 SMDiagnostic &Error, SMRange &SourceRange) const {
579 const auto &YamlMFI = static_cast<const yaml::ARMFunctionInfo &>(MFI);
580 MachineFunction &MF = PFS.MF;
581 MF.getInfo<ARMFunctionInfo>()->initializeBaseYamlFields(YamlMFI);
582 return false;
583}
584
585void ARMBaseTargetMachine::reset() { SubtargetMap.clear(); }
586