1//===-- ARMSubtarget.cpp - ARM Subtarget Information ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the ARM specific subclass of TargetSubtargetInfo.
10//
11//===----------------------------------------------------------------------===//
12
13#include "ARM.h"
14
15#include "ARMCallLowering.h"
16#include "ARMFrameLowering.h"
17#include "ARMInstrInfo.h"
18#include "ARMLegalizerInfo.h"
19#include "ARMRegisterBankInfo.h"
20#include "ARMSubtarget.h"
21#include "ARMTargetMachine.h"
22#include "MCTargetDesc/ARMMCTargetDesc.h"
23#include "Thumb1FrameLowering.h"
24#include "Thumb1InstrInfo.h"
25#include "Thumb2InstrInfo.h"
26#include "llvm/ADT/StringRef.h"
27#include "llvm/ADT/Twine.h"
28#include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
29#include "llvm/CodeGen/MachineFrameInfo.h"
30#include "llvm/CodeGen/MachineFunction.h"
31#include "llvm/IR/Function.h"
32#include "llvm/IR/GlobalValue.h"
33#include "llvm/MC/MCAsmInfo.h"
34#include "llvm/MC/MCTargetOptions.h"
35#include "llvm/Support/CodeGen.h"
36#include "llvm/Support/CommandLine.h"
37#include "llvm/Target/TargetOptions.h"
38#include "llvm/TargetParser/ARMTargetParser.h"
39#include "llvm/TargetParser/Triple.h"
40
41using namespace llvm;
42
43#define DEBUG_TYPE "arm-subtarget"
44
45#define GET_SUBTARGETINFO_TARGET_DESC
46#define GET_SUBTARGETINFO_CTOR
47#include "ARMGenSubtargetInfo.inc"
48
49static cl::opt<bool>
50UseFusedMulOps("arm-use-mulops",
51 cl::init(Val: true), cl::Hidden);
52
53enum ITMode {
54 DefaultIT,
55 RestrictedIT
56};
57
58static cl::opt<ITMode>
59 IT(cl::desc("IT block support"), cl::Hidden, cl::init(Val: DefaultIT),
60 cl::values(clEnumValN(DefaultIT, "arm-default-it",
61 "Generate any type of IT block"),
62 clEnumValN(RestrictedIT, "arm-restrict-it",
63 "Disallow complex IT blocks")));
64
65/// ForceFastISel - Use the fast-isel, even for subtargets where it is not
66/// currently supported (for testing only).
67static cl::opt<bool>
68ForceFastISel("arm-force-fast-isel",
69 cl::init(Val: false), cl::Hidden);
70
71/// initializeSubtargetDependencies - Initializes using a CPU and feature string
72/// so that we can use initializer lists for subtarget initialization.
73ARMSubtarget &ARMSubtarget::initializeSubtargetDependencies(StringRef CPU,
74 StringRef FS) {
75 initializeEnvironment();
76 initSubtargetFeatures(CPU, FS);
77 return *this;
78}
79
80ARMFrameLowering *ARMSubtarget::initializeFrameLowering(StringRef CPU,
81 StringRef FS) {
82 ARMSubtarget &STI = initializeSubtargetDependencies(CPU, FS);
83 if (STI.isThumb1Only())
84 return (ARMFrameLowering *)new Thumb1FrameLowering(STI);
85
86 return new ARMFrameLowering(STI);
87}
88
89ARMSubtarget::ARMSubtarget(const Triple &TT, const std::string &CPU,
90 const std::string &FS,
91 const ARMBaseTargetMachine &TM, bool IsLittle,
92 bool MinSize)
93 : ARMGenSubtargetInfo(TT, CPU, /*TuneCPU*/ CPU, FS),
94 UseMulOps(UseFusedMulOps), CPUString(CPU), OptMinSize(MinSize),
95 IsLittle(IsLittle), TargetTriple(TT), Options(TM.Options), TM(TM),
96 FrameLowering(initializeFrameLowering(CPU, FS)),
97 // At this point initializeSubtargetDependencies has been called so
98 // we can query directly.
99 InstrInfo(isThumb1Only()
100 ? (ARMBaseInstrInfo *)new Thumb1InstrInfo(*this)
101 : !isThumb()
102 ? (ARMBaseInstrInfo *)new ARMInstrInfo(*this)
103 : (ARMBaseInstrInfo *)new Thumb2InstrInfo(*this)),
104 TLInfo(TM, *this) {
105
106 CallLoweringInfo.reset(p: new ARMCallLowering(*getTargetLowering()));
107 Legalizer.reset(p: new ARMLegalizerInfo(*this));
108
109 auto *RBI = new ARMRegisterBankInfo(*getRegisterInfo());
110
111 // FIXME: At this point, we can't rely on Subtarget having RBI.
112 // It's awkward to mix passing RBI and the Subtarget; should we pass
113 // TII/TRI as well?
114 InstSelector.reset(p: createARMInstructionSelector(TM, STI: *this, RBI: *RBI));
115
116 RegBankInfo.reset(p: RBI);
117}
118
119const CallLowering *ARMSubtarget::getCallLowering() const {
120 return CallLoweringInfo.get();
121}
122
123InstructionSelector *ARMSubtarget::getInstructionSelector() const {
124 return InstSelector.get();
125}
126
127const LegalizerInfo *ARMSubtarget::getLegalizerInfo() const {
128 return Legalizer.get();
129}
130
131const RegisterBankInfo *ARMSubtarget::getRegBankInfo() const {
132 return RegBankInfo.get();
133}
134
135bool ARMSubtarget::isXRaySupported() const {
136 // We don't currently suppport Thumb, but Windows requires Thumb.
137 return hasV6Ops() && hasARMOps() && !isTargetWindows();
138}
139
140void ARMSubtarget::initializeEnvironment() {
141 // MCAsmInfo isn't always present (e.g. in opt) so we can't initialize this
142 // directly from it, but we can try to make sure they're consistent when both
143 // available.
144 UseSjLjEH = (isTargetDarwin() && !isTargetWatchABI() &&
145 Options.ExceptionModel == ExceptionHandling::None) ||
146 Options.ExceptionModel == ExceptionHandling::SjLj;
147 assert((!TM.getMCAsmInfo() ||
148 (TM.getMCAsmInfo()->getExceptionHandlingType() ==
149 ExceptionHandling::SjLj) == UseSjLjEH) &&
150 "inconsistent sjlj choice between CodeGen and MC");
151}
152
153void ARMSubtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) {
154 if (CPUString.empty()) {
155 CPUString = "generic";
156
157 if (isTargetDarwin()) {
158 StringRef ArchName = TargetTriple.getArchName();
159 ARM::ArchKind AK = ARM::parseArch(Arch: ArchName);
160 if (AK == ARM::ArchKind::ARMV7S)
161 // Default to the Swift CPU when targeting armv7s/thumbv7s.
162 CPUString = "swift";
163 else if (AK == ARM::ArchKind::ARMV7K)
164 // Default to the Cortex-a7 CPU when targeting armv7k/thumbv7k.
165 // ARMv7k does not use SjLj exception handling.
166 CPUString = "cortex-a7";
167 }
168 }
169
170 // Insert the architecture feature derived from the target triple into the
171 // feature string. This is important for setting features that are implied
172 // based on the architecture version.
173 std::string ArchFS = ARM_MC::ParseARMTriple(TT: TargetTriple, CPU: CPUString);
174 if (!FS.empty()) {
175 if (!ArchFS.empty())
176 ArchFS = (Twine(ArchFS) + "," + FS).str();
177 else
178 ArchFS = std::string(FS);
179 }
180 ParseSubtargetFeatures(CPU: CPUString, /*TuneCPU*/ CPUString, FS: ArchFS);
181
182 // FIXME: This used enable V6T2 support implicitly for Thumb2 mode.
183 // Assert this for now to make the change obvious.
184 assert(hasV6T2Ops() || !hasThumb2());
185
186 if (genExecuteOnly()) {
187 // Execute only support for >= v8-M Baseline requires movt support
188 if (hasV8MBaselineOps())
189 NoMovt = false;
190 if (!hasV6MOps())
191 report_fatal_error(reason: "Cannot generate execute-only code for this target");
192 }
193
194 // Keep a pointer to static instruction cost data for the specified CPU.
195 SchedModel = getSchedModelForCPU(CPU: CPUString);
196
197 // Initialize scheduling itinerary for the specified CPU.
198 InstrItins = getInstrItineraryForCPU(CPU: CPUString);
199
200 // FIXME: this is invalid for WindowsCE
201 if (isTargetWindows())
202 NoARM = true;
203
204 if (TM.isAAPCS_ABI())
205 stackAlignment = Align(8);
206 if (isTargetNaCl() || TM.isAAPCS16_ABI())
207 stackAlignment = Align(16);
208
209 // FIXME: Completely disable sibcall for Thumb1 since ThumbRegisterInfo::
210 // emitEpilogue is not ready for them. Thumb tail calls also use t2B, as
211 // the Thumb1 16-bit unconditional branch doesn't have sufficient relocation
212 // support in the assembler and linker to be used. This would need to be
213 // fixed to fully support tail calls in Thumb1.
214 //
215 // For ARMv8-M, we /do/ implement tail calls. Doing this is tricky for v8-M
216 // baseline, since the LDM/POP instruction on Thumb doesn't take LR. This
217 // means if we need to reload LR, it takes extra instructions, which outweighs
218 // the value of the tail call; but here we don't know yet whether LR is going
219 // to be used. We take the optimistic approach of generating the tail call and
220 // perhaps taking a hit if we need to restore the LR.
221
222 // Thumb1 PIC calls to external symbols use BX, so they can be tail calls,
223 // but we need to make sure there are enough registers; the only valid
224 // registers are the 4 used for parameters. We don't currently do this
225 // case.
226
227 SupportsTailCall = !isThumb1Only() || hasV8MBaselineOps();
228
229 switch (IT) {
230 case DefaultIT:
231 RestrictIT = false;
232 break;
233 case RestrictedIT:
234 RestrictIT = true;
235 break;
236 }
237
238 // NEON f32 ops are non-IEEE 754 compliant. Darwin is ok with it by default.
239 const FeatureBitset &Bits = getFeatureBits();
240 if ((Bits[ARM::ProcA5] || Bits[ARM::ProcA8]) && // Where this matters
241 (Options.UnsafeFPMath || isTargetDarwin()))
242 HasNEONForFP = true;
243
244 if (isRWPI())
245 ReserveR9 = true;
246
247 // If MVEVectorCostFactor is still 0 (has not been set to anything else), default it to 2
248 if (MVEVectorCostFactor == 0)
249 MVEVectorCostFactor = 2;
250
251 // FIXME: Teach TableGen to deal with these instead of doing it manually here.
252 switch (ARMProcFamily) {
253 case Others:
254 case CortexA5:
255 break;
256 case CortexA7:
257 LdStMultipleTiming = DoubleIssue;
258 break;
259 case CortexA8:
260 LdStMultipleTiming = DoubleIssue;
261 break;
262 case CortexA9:
263 LdStMultipleTiming = DoubleIssueCheckUnalignedAccess;
264 PreISelOperandLatencyAdjustment = 1;
265 break;
266 case CortexA12:
267 break;
268 case CortexA15:
269 MaxInterleaveFactor = 2;
270 PreISelOperandLatencyAdjustment = 1;
271 PartialUpdateClearance = 12;
272 break;
273 case CortexA17:
274 case CortexA32:
275 case CortexA35:
276 case CortexA53:
277 case CortexA55:
278 case CortexA57:
279 case CortexA72:
280 case CortexA73:
281 case CortexA75:
282 case CortexA76:
283 case CortexA77:
284 case CortexA78:
285 case CortexA78AE:
286 case CortexA78C:
287 case CortexA510:
288 case CortexA710:
289 case CortexR4:
290 case CortexR5:
291 case CortexR7:
292 case CortexM3:
293 case CortexM55:
294 case CortexM7:
295 case CortexM85:
296 case CortexR52:
297 case CortexR52plus:
298 case CortexX1:
299 case CortexX1C:
300 break;
301 case Exynos:
302 LdStMultipleTiming = SingleIssuePlusExtras;
303 MaxInterleaveFactor = 4;
304 if (!isThumb())
305 PreferBranchLogAlignment = 3;
306 break;
307 case Kryo:
308 break;
309 case Krait:
310 PreISelOperandLatencyAdjustment = 1;
311 break;
312 case NeoverseV1:
313 break;
314 case Swift:
315 MaxInterleaveFactor = 2;
316 LdStMultipleTiming = SingleIssuePlusExtras;
317 PreISelOperandLatencyAdjustment = 1;
318 PartialUpdateClearance = 12;
319 break;
320 }
321}
322
323bool ARMSubtarget::isROPI() const {
324 return TM.getRelocationModel() == Reloc::ROPI ||
325 TM.getRelocationModel() == Reloc::ROPI_RWPI;
326}
327bool ARMSubtarget::isRWPI() const {
328 return TM.getRelocationModel() == Reloc::RWPI ||
329 TM.getRelocationModel() == Reloc::ROPI_RWPI;
330}
331
332bool ARMSubtarget::isGVIndirectSymbol(const GlobalValue *GV) const {
333 if (!TM.shouldAssumeDSOLocal(GV))
334 return true;
335
336 // 32 bit macho has no relocation for a-b if a is undefined, even if b is in
337 // the section that is being relocated. This means we have to use o load even
338 // for GVs that are known to be local to the dso.
339 if (isTargetMachO() && TM.isPositionIndependent() &&
340 (GV->isDeclarationForLinker() || GV->hasCommonLinkage()))
341 return true;
342
343 return false;
344}
345
346bool ARMSubtarget::isGVInGOT(const GlobalValue *GV) const {
347 return isTargetELF() && TM.isPositionIndependent() && !GV->isDSOLocal();
348}
349
350unsigned ARMSubtarget::getMispredictionPenalty() const {
351 return SchedModel.MispredictPenalty;
352}
353
354bool ARMSubtarget::enableMachineScheduler() const {
355 // The MachineScheduler can increase register usage, so we use more high
356 // registers and end up with more T2 instructions that cannot be converted to
357 // T1 instructions. At least until we do better at converting to thumb1
358 // instructions, on cortex-m at Oz where we are size-paranoid, don't use the
359 // Machine scheduler, relying on the DAG register pressure scheduler instead.
360 if (isMClass() && hasMinSize())
361 return false;
362 // Enable the MachineScheduler before register allocation for subtargets
363 // with the use-misched feature.
364 return useMachineScheduler();
365}
366
367bool ARMSubtarget::enableSubRegLiveness() const {
368 // Enable SubRegLiveness for MVE to better optimize s subregs for mqpr regs
369 // and q subregs for qqqqpr regs.
370 return hasMVEIntegerOps();
371}
372
373bool ARMSubtarget::enableMachinePipeliner() const {
374 // Enable the MachinePipeliner before register allocation for subtargets
375 // with the use-mipipeliner feature.
376 return getSchedModel().hasInstrSchedModel() && useMachinePipeliner();
377}
378
379bool ARMSubtarget::useDFAforSMS() const { return false; }
380
381// This overrides the PostRAScheduler bit in the SchedModel for any CPU.
382bool ARMSubtarget::enablePostRAScheduler() const {
383 if (enableMachineScheduler())
384 return false;
385 if (disablePostRAScheduler())
386 return false;
387 // Thumb1 cores will generally not benefit from post-ra scheduling
388 return !isThumb1Only();
389}
390
391bool ARMSubtarget::enablePostRAMachineScheduler() const {
392 if (!enableMachineScheduler())
393 return false;
394 if (disablePostRAScheduler())
395 return false;
396 return !isThumb1Only();
397}
398
399bool ARMSubtarget::useStride4VFPs() const {
400 // For general targets, the prologue can grow when VFPs are allocated with
401 // stride 4 (more vpush instructions). But WatchOS uses a compact unwind
402 // format which it's more important to get right.
403 return isTargetWatchABI() ||
404 (useWideStrideVFP() && !OptMinSize);
405}
406
407bool ARMSubtarget::useMovt() const {
408 // NOTE Windows on ARM needs to use mov.w/mov.t pairs to materialise 32-bit
409 // immediates as it is inherently position independent, and may be out of
410 // range otherwise.
411 return !NoMovt && hasV8MBaselineOps() &&
412 (isTargetWindows() || !OptMinSize || genExecuteOnly());
413}
414
415bool ARMSubtarget::useFastISel() const {
416 // Enable fast-isel for any target, for testing only.
417 if (ForceFastISel)
418 return true;
419
420 // Limit fast-isel to the targets that are or have been tested.
421 if (!hasV6Ops())
422 return false;
423
424 // Thumb2 support on iOS; ARM support on iOS, Linux and NaCl.
425 return TM.Options.EnableFastISel &&
426 ((isTargetMachO() && !isThumb1Only()) ||
427 (isTargetLinux() && !isThumb()) || (isTargetNaCl() && !isThumb()));
428}
429
430unsigned ARMSubtarget::getGPRAllocationOrder(const MachineFunction &MF) const {
431 // The GPR register class has multiple possible allocation orders, with
432 // tradeoffs preferred by different sub-architectures and optimisation goals.
433 // The allocation orders are:
434 // 0: (the default tablegen order, not used)
435 // 1: r14, r0-r13
436 // 2: r0-r7
437 // 3: r0-r7, r12, lr, r8-r11
438 // Note that the register allocator will change this order so that
439 // callee-saved registers are used later, as they require extra work in the
440 // prologue/epilogue (though we sometimes override that).
441
442 // For thumb1-only targets, only the low registers are allocatable.
443 if (isThumb1Only())
444 return 2;
445
446 // Allocate low registers first, so we can select more 16-bit instructions.
447 // We also (in ignoreCSRForAllocationOrder) override the default behaviour
448 // with regards to callee-saved registers, because pushing extra registers is
449 // much cheaper (in terms of code size) than using high registers. After
450 // that, we allocate r12 (doesn't need to be saved), lr (saving it means we
451 // can return with the pop, don't need an extra "bx lr") and then the rest of
452 // the high registers.
453 if (isThumb2() && MF.getFunction().hasMinSize())
454 return 3;
455
456 // Otherwise, allocate in the default order, using LR first because saving it
457 // allows a shorter epilogue sequence.
458 return 1;
459}
460
461bool ARMSubtarget::ignoreCSRForAllocationOrder(const MachineFunction &MF,
462 MCRegister PhysReg) const {
463 // To minimize code size in Thumb2, we prefer the usage of low regs (lower
464 // cost per use) so we can use narrow encoding. By default, caller-saved
465 // registers (e.g. lr, r12) are always allocated first, regardless of
466 // their cost per use. When optForMinSize, we prefer the low regs even if
467 // they are CSR because usually push/pop can be folded into existing ones.
468 return isThumb2() && MF.getFunction().hasMinSize() &&
469 ARM::GPRRegClass.contains(Reg: PhysReg);
470}
471
472ARMSubtarget::PushPopSplitVariation
473ARMSubtarget::getPushPopSplitVariation(const MachineFunction &MF) const {
474 const Function &F = MF.getFunction();
475 const MachineFrameInfo &MFI = MF.getFrameInfo();
476
477 // Thumb1 always splits the pushes at R7, because the Thumb1 push instruction
478 // cannot use high registers except for lr.
479 if (isThumb1Only())
480 return SplitR7;
481
482 // If R7 is the frame pointer, we must split at R7 to ensure that the
483 // previous frame pointer (R7) and return address (LR) are adjacent on the
484 // stack, to form a valid frame record.
485 if (getFramePointerReg() == ARM::R7 &&
486 MF.getTarget().Options.FramePointerIsReserved(MF))
487 return SplitR7;
488
489 // Returns SplitR11WindowsSEH when the stack pointer needs to be
490 // restored from the frame pointer r11 + an offset and Windows CFI is enabled.
491 // This stack unwinding cannot be expressed with SEH unwind opcodes when done
492 // with a single push, making it necessary to split the push into r4-r10, and
493 // another containing r11+lr.
494 if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI() &&
495 F.needsUnwindTableEntry() &&
496 (MFI.hasVarSizedObjects() || getRegisterInfo()->hasStackRealignment(MF)))
497 return SplitR11WindowsSEH;
498
499 // Returns SplitR11AAPCSSignRA when the frame pointer is R11, requiring R11
500 // and LR to be adjacent on the stack, and branch signing is enabled,
501 // requiring R12 to be on the stack.
502 if (MF.getInfo<ARMFunctionInfo>()->shouldSignReturnAddress() &&
503 getFramePointerReg() == ARM::R11 &&
504 MF.getTarget().Options.FramePointerIsReserved(MF))
505 return SplitR11AAPCSSignRA;
506 return NoSplit;
507}
508