1//===--- AArch64.cpp - Implement AArch64 target feature support -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements AArch64 TargetInfo objects.
10//
11//===----------------------------------------------------------------------===//
12
13#include "AArch64.h"
14#include "clang/Basic/Diagnostic.h"
15#include "clang/Basic/LangOptions.h"
16#include "clang/Basic/TargetBuiltins.h"
17#include "clang/Basic/TargetInfo.h"
18#include "llvm/ADT/APSInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/StringSwitch.h"
21#include "llvm/TargetParser/AArch64TargetParser.h"
22#include "llvm/TargetParser/ARMTargetParserCommon.h"
23#include <optional>
24
25using namespace clang;
26using namespace clang::targets;
27
28static constexpr int NumNeonBuiltins =
29 NEON::FirstFp16Builtin - Builtin::FirstTSBuiltin;
30static constexpr int NumFp16Builtins =
31 NEON::FirstTSBuiltin - NEON::FirstFp16Builtin;
32static constexpr int NumSVEBuiltins =
33 SVE::FirstNeonBridgeBuiltin - NEON::FirstTSBuiltin;
34static constexpr int NumSVENeonBridgeBuiltins =
35 SVE::FirstTSBuiltin - SVE::FirstNeonBridgeBuiltin;
36static constexpr int NumSMEBuiltins = SME::FirstTSBuiltin - SVE::FirstTSBuiltin;
37static constexpr int NumAArch64Builtins =
38 AArch64::LastTSBuiltin - SME::FirstTSBuiltin;
39static constexpr int NumBuiltins =
40 AArch64::LastTSBuiltin - Builtin::FirstTSBuiltin;
41static_assert(NumBuiltins ==
42 (NumNeonBuiltins + NumFp16Builtins + NumSVEBuiltins +
43 NumSVENeonBridgeBuiltins + NumSMEBuiltins + NumAArch64Builtins));
44
45namespace clang {
46namespace AArch64 {
47#define GET_BUILTIN_STR_TABLE
48#include "clang/Basic/BuiltinsAArch64.inc"
49#undef GET_BUILTIN_STR_TABLE
50
51static constexpr Builtin::Info BuiltinInfos[] = {
52#define GET_BUILTIN_INFOS
53#include "clang/Basic/BuiltinsAArch64.inc"
54#undef GET_BUILTIN_INFOS
55};
56
57static constexpr Builtin::Info PrefixedBuiltinInfos[] = {
58#define GET_BUILTIN_PREFIXED_INFOS
59#include "clang/Basic/BuiltinsAArch64.inc"
60#undef GET_BUILTIN_PREFIXED_INFOS
61};
62static_assert((std::size(BuiltinInfos) + std::size(PrefixedBuiltinInfos)) ==
63 NumAArch64Builtins);
64} // namespace AArch64
65
66namespace NEON {
67#define GET_NEON_BUILTIN_STR_TABLE
68#include "clang/Basic/arm_neon.inc"
69#undef GET_NEON_BUILTIN_STR_TABLE
70
71static constexpr std::array<Builtin::Info, NumNeonBuiltins> BuiltinInfos = {
72#define GET_NEON_BUILTIN_INFOS
73#include "clang/Basic/arm_neon.inc"
74#undef GET_NEON_BUILTIN_INFOS
75};
76
77namespace FP16 {
78#define GET_NEON_BUILTIN_STR_TABLE
79#include "clang/Basic/arm_fp16.inc"
80#undef GET_NEON_BUILTIN_STR_TABLE
81
82static constexpr std::array<Builtin::Info, NumFp16Builtins> BuiltinInfos = {
83#define GET_NEON_BUILTIN_INFOS
84#include "clang/Basic/arm_fp16.inc"
85#undef GET_NEON_BUILTIN_INFOS
86};
87} // namespace FP16
88} // namespace NEON
89
90namespace SVE {
91#define GET_SVE_BUILTIN_STR_TABLE
92#include "clang/Basic/arm_sve_builtins.inc"
93#undef GET_SVE_BUILTIN_STR_TABLE
94
95static constexpr std::array<Builtin::Info, NumSVEBuiltins> BuiltinInfos = {
96#define GET_SVE_BUILTIN_INFOS
97#include "clang/Basic/arm_sve_builtins.inc"
98#undef GET_SVE_BUILTIN_INFOS
99};
100} // namespace SVE
101
102namespace SME {
103#define GET_SME_BUILTIN_STR_TABLE
104#include "clang/Basic/arm_sme_builtins.inc"
105#undef GET_SME_BUILTIN_STR_TABLE
106
107static constexpr std::array<Builtin::Info, NumSMEBuiltins> BuiltinInfos = {
108#define GET_SME_BUILTIN_INFOS
109#include "clang/Basic/arm_sme_builtins.inc"
110#undef GET_SME_BUILTIN_INFOS
111};
112} // namespace SME
113} // namespace clang
114
115static constexpr llvm::StringTable BuiltinSVENeonBridgeStrings =
116 CLANG_BUILTIN_STR_TABLE_START
117#define TARGET_BUILTIN CLANG_TARGET_BUILTIN_STR_TABLE
118#define GET_SVE_BUILTINS
119#include "clang/Basic/BuiltinsAArch64NeonSVEBridge.def"
120#undef GET_SVE_BUILTINS
121#undef TARGET_BUILTIN
122 ;
123
124static constexpr auto BuiltinSVENeonBridgeInfos =
125 Builtin::MakeInfos<NumSVENeonBridgeBuiltins>(Infos: {
126#define TARGET_BUILTIN CLANG_TARGET_BUILTIN_ENTRY
127#define GET_SVE_BUILTINS
128#include "clang/Basic/BuiltinsAArch64NeonSVEBridge.def"
129#undef GET_SVE_BUILTINS
130#undef TARGET_BUILTIN
131 });
132
133AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
134 const TargetOptions &Opts)
135 : TargetInfo(Triple), ABI("aapcs") {
136 if (getTriple().isOSOpenBSD()) {
137 Int64Type = SignedLongLong;
138 IntMaxType = SignedLongLong;
139 } else {
140 if (!getTriple().isOSDarwin() && !getTriple().isOSNetBSD())
141 WCharType = UnsignedInt;
142
143 Int64Type = SignedLong;
144 IntMaxType = SignedLong;
145 }
146
147 AddrSpaceMap = &ARM64AddrSpaceMap;
148
149 // All AArch64 implementations support ARMv8 FP, which makes half a legal type.
150 HasFastHalfType = true;
151 HalfArgsAndReturns = true;
152 HasFloat16 = true;
153 HasStrictFP = true;
154
155 if (Triple.isArch64Bit())
156 LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
157 else
158 LongWidth = LongAlign = PointerWidth = PointerAlign = 32;
159
160 BitIntMaxAlign = 128;
161 MaxVectorAlign = 128;
162 MaxAtomicInlineWidth = 128;
163 MaxAtomicPromoteWidth = 128;
164
165 LongDoubleWidth = LongDoubleAlign = SuitableAlign = 128;
166 LongDoubleFormat = &llvm::APFloat::IEEEquad();
167
168 BFloat16Width = BFloat16Align = 16;
169 BFloat16Format = &llvm::APFloat::BFloat();
170
171 // Make __builtin_ms_va_list available.
172 HasBuiltinMSVaList = true;
173
174 // Make the Neon ACLE and SVE types available. Note that this deliberately
175 // doesn't depend on SveMode, since in principle it should be possible to turn
176 // SVE on and off within a translation unit. It should also be possible
177 // to compile the global declaration:
178 //
179 // __SVInt8_t *ptr;
180 //
181 // even without SVE.
182 HasAArch64ACLETypes = true;
183
184 // {} in inline assembly are neon specifiers, not assembly variant
185 // specifiers.
186 NoAsmVariants = true;
187
188 // AAPCS gives rules for bitfields. 7.1.7 says: "The container type
189 // contributes to the alignment of the containing aggregate in the same way
190 // a plain (non bit-field) member of that type would, without exception for
191 // zero-sized or anonymous bit-fields."
192 assert(UseBitFieldTypeAlignment && "bitfields affect type alignment");
193 UseZeroLengthBitfieldAlignment = true;
194
195 // AAPCS64 allows any "fundamental integer data type" to be used for
196 // over-sized bitfields, which includes 128-bit integers.
197 LargestOverSizedBitfieldContainer = 128;
198
199 HasUnalignedAccess = true;
200
201 // AArch64 targets default to using the ARM C++ ABI.
202 TheCXXABI.set(TargetCXXABI::GenericAArch64);
203
204 if (Triple.getOS() == llvm::Triple::Linux)
205 this->MCountName = "\01_mcount";
206 else if (Triple.getOS() == llvm::Triple::UnknownOS)
207 this->MCountName =
208 Opts.EABIVersion == llvm::EABI::GNU ? "\01_mcount" : "mcount";
209}
210
211StringRef AArch64TargetInfo::getABI() const { return ABI; }
212
213bool AArch64TargetInfo::setABI(const std::string &Name) {
214 if (Name != "aapcs" && Name != "aapcs-soft" && Name != "darwinpcs")
215 return false;
216
217 ABI = Name;
218 return true;
219}
220
221bool AArch64TargetInfo::validateTarget(DiagnosticsEngine &Diags) const {
222 if (hasFeature(Feature: "fp") && ABI == "aapcs-soft") {
223 // aapcs-soft is not allowed for targets with an FPU, to avoid there being
224 // two incomatible ABIs.
225 Diags.Report(DiagID: diag::err_target_unsupported_abi_with_fpu) << ABI;
226 return false;
227 }
228 return true;
229}
230
231bool AArch64TargetInfo::validateGlobalRegisterVariable(
232 StringRef RegName, unsigned RegSize, bool &HasSizeMismatch) const {
233 if (RegName == "sp") {
234 HasSizeMismatch = RegSize != 64;
235 return true;
236 }
237 if (RegName.starts_with(Prefix: "w"))
238 HasSizeMismatch = RegSize != 32;
239 else if (RegName.starts_with(Prefix: "x"))
240 HasSizeMismatch = RegSize != 64;
241 else
242 return false;
243 StringRef RegNum = RegName.drop_front();
244 // Check if the register is reserved. See also
245 // AArch64TargetLowering::getRegisterByName().
246 return RegNum == "0" ||
247 (RegNum == "18" &&
248 llvm::AArch64::isX18ReservedByDefault(TT: getTriple())) ||
249 getTargetOpts().FeatureMap.lookup(Key: ("reserve-x" + RegNum).str());
250}
251
252bool AArch64TargetInfo::validateBranchProtection(StringRef Spec, StringRef,
253 BranchProtectionInfo &BPI,
254 const LangOptions &LO,
255 StringRef &Err) const {
256 llvm::ARM::ParsedBranchProtection PBP;
257 if (!llvm::ARM::parseBranchProtection(Spec, PBP, Err, EnablePAuthLR: HasPAuthLR))
258 return false;
259
260 // GCS is currently untested with ptrauth-returns, but enabling this could be
261 // allowed in future after testing with a suitable system.
262 if (LO.PointerAuthReturns &&
263 (PBP.Scope != "none" || PBP.BranchProtectionPAuthLR ||
264 PBP.GuardedControlStack))
265 return false;
266
267 BPI.SignReturnAddr =
268 llvm::StringSwitch<LangOptions::SignReturnAddressScopeKind>(PBP.Scope)
269 .Case(S: "non-leaf", Value: LangOptions::SignReturnAddressScopeKind::NonLeaf)
270 .Case(S: "all", Value: LangOptions::SignReturnAddressScopeKind::All)
271 .Default(Value: LangOptions::SignReturnAddressScopeKind::None);
272
273 if (PBP.Key == "a_key")
274 BPI.SignKey = LangOptions::SignReturnAddressKeyKind::AKey;
275 else
276 BPI.SignKey = LangOptions::SignReturnAddressKeyKind::BKey;
277
278 BPI.BranchTargetEnforcement = PBP.BranchTargetEnforcement;
279 BPI.BranchProtectionPAuthLR = PBP.BranchProtectionPAuthLR;
280 BPI.GuardedControlStack = PBP.GuardedControlStack;
281 return true;
282}
283
284bool AArch64TargetInfo::isValidCPUName(StringRef Name) const {
285 return llvm::AArch64::parseCpu(Name).has_value();
286}
287
288bool AArch64TargetInfo::setCPU(const std::string &Name) {
289 return isValidCPUName(Name);
290}
291
292void AArch64TargetInfo::fillValidCPUList(
293 SmallVectorImpl<StringRef> &Values) const {
294 llvm::AArch64::fillValidCPUArchList(Values);
295}
296
297void AArch64TargetInfo::getTargetDefinesARMV81A(const LangOptions &Opts,
298 MacroBuilder &Builder) const {
299 Builder.defineMacro(Name: "__ARM_FEATURE_QRDMX", Value: "1");
300}
301
302void AArch64TargetInfo::getTargetDefinesARMV82A(const LangOptions &Opts,
303 MacroBuilder &Builder) const {
304 // Also include the ARMv8.1 defines
305 getTargetDefinesARMV81A(Opts, Builder);
306}
307
308void AArch64TargetInfo::getTargetDefinesARMV83A(const LangOptions &Opts,
309 MacroBuilder &Builder) const {
310 Builder.defineMacro(Name: "__ARM_FEATURE_COMPLEX", Value: "1");
311 Builder.defineMacro(Name: "__ARM_FEATURE_JCVT", Value: "1");
312 // Also include the Armv8.2 defines
313 getTargetDefinesARMV82A(Opts, Builder);
314}
315
316void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions &Opts,
317 MacroBuilder &Builder) const {
318 // Also include the Armv8.3 defines
319 getTargetDefinesARMV83A(Opts, Builder);
320}
321
322void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions &Opts,
323 MacroBuilder &Builder) const {
324 Builder.defineMacro(Name: "__ARM_FEATURE_FRINT", Value: "1");
325 // Also include the Armv8.4 defines
326 getTargetDefinesARMV84A(Opts, Builder);
327}
328
329void AArch64TargetInfo::getTargetDefinesARMV86A(const LangOptions &Opts,
330 MacroBuilder &Builder) const {
331 // Also include the Armv8.5 defines
332 // FIXME: Armv8.6 makes the following extensions mandatory:
333 // - __ARM_FEATURE_BF16
334 // - __ARM_FEATURE_MATMUL_INT8
335 // Handle them here.
336 getTargetDefinesARMV85A(Opts, Builder);
337}
338
339void AArch64TargetInfo::getTargetDefinesARMV87A(const LangOptions &Opts,
340 MacroBuilder &Builder) const {
341 // Also include the Armv8.6 defines
342 getTargetDefinesARMV86A(Opts, Builder);
343}
344
345void AArch64TargetInfo::getTargetDefinesARMV88A(const LangOptions &Opts,
346 MacroBuilder &Builder) const {
347 // Also include the Armv8.7 defines
348 getTargetDefinesARMV87A(Opts, Builder);
349}
350
351void AArch64TargetInfo::getTargetDefinesARMV89A(const LangOptions &Opts,
352 MacroBuilder &Builder) const {
353 // Also include the Armv8.8 defines
354 getTargetDefinesARMV88A(Opts, Builder);
355}
356
357void AArch64TargetInfo::getTargetDefinesARMV9A(const LangOptions &Opts,
358 MacroBuilder &Builder) const {
359 // Armv9-A maps to Armv8.5-A
360 getTargetDefinesARMV85A(Opts, Builder);
361}
362
363void AArch64TargetInfo::getTargetDefinesARMV91A(const LangOptions &Opts,
364 MacroBuilder &Builder) const {
365 // Armv9.1-A maps to Armv8.6-A
366 getTargetDefinesARMV86A(Opts, Builder);
367}
368
369void AArch64TargetInfo::getTargetDefinesARMV92A(const LangOptions &Opts,
370 MacroBuilder &Builder) const {
371 // Armv9.2-A maps to Armv8.7-A
372 getTargetDefinesARMV87A(Opts, Builder);
373}
374
375void AArch64TargetInfo::getTargetDefinesARMV93A(const LangOptions &Opts,
376 MacroBuilder &Builder) const {
377 // Armv9.3-A maps to Armv8.8-A
378 getTargetDefinesARMV88A(Opts, Builder);
379}
380
381void AArch64TargetInfo::getTargetDefinesARMV94A(const LangOptions &Opts,
382 MacroBuilder &Builder) const {
383 // Armv9.4-A maps to Armv8.9-A
384 getTargetDefinesARMV89A(Opts, Builder);
385}
386
387void AArch64TargetInfo::getTargetDefinesARMV95A(const LangOptions &Opts,
388 MacroBuilder &Builder) const {
389 // Armv9.5-A does not have a v8.* equivalent, but is a superset of v9.4-A.
390 getTargetDefinesARMV94A(Opts, Builder);
391}
392
393void AArch64TargetInfo::getTargetDefinesARMV96A(const LangOptions &Opts,
394 MacroBuilder &Builder) const {
395 // Armv9.6-A does not have a v8.* equivalent, but is a superset of v9.5-A.
396 getTargetDefinesARMV95A(Opts, Builder);
397}
398
399void AArch64TargetInfo::getTargetDefinesARMV97A(const LangOptions &Opts,
400 MacroBuilder &Builder) const {
401 // Armv9.7-A does not have a v8.* equivalent, but is a superset of v9.6-A.
402 getTargetDefinesARMV96A(Opts, Builder);
403}
404
405void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
406 MacroBuilder &Builder) const {
407 // Target identification.
408 if (getTriple().isWindowsArm64EC()) {
409 // Define the same set of macros as would be defined on x86_64 to ensure that
410 // ARM64EC datatype layouts match those of x86_64 compiled code
411 Builder.defineMacro(Name: "__amd64__");
412 Builder.defineMacro(Name: "__amd64");
413 Builder.defineMacro(Name: "__x86_64");
414 Builder.defineMacro(Name: "__x86_64__");
415 Builder.defineMacro(Name: "__arm64ec__");
416 } else {
417 Builder.defineMacro(Name: "__aarch64__");
418 }
419
420 if (getTriple().isLFI())
421 Builder.defineMacro(Name: "__LFI__");
422
423 // Inline assembly supports AArch64 flag outputs.
424 Builder.defineMacro(Name: "__GCC_ASM_FLAG_OUTPUTS__");
425
426 std::string CodeModel = getTargetOpts().CodeModel;
427 if (CodeModel == "default")
428 CodeModel = "small";
429 for (char &c : CodeModel)
430 c = toupper(c: c);
431 Builder.defineMacro(Name: "__AARCH64_CMODEL_" + CodeModel + "__");
432
433 // ACLE predefines. Many can only have one possible value on v8 AArch64.
434 Builder.defineMacro(Name: "__ARM_ACLE_VERSION(year, quarter, patch)",
435 Value: "(100 * (year) + 10 * (quarter) + (patch))");
436#define ARM_ACLE_VERSION(Y, Q, P) (100 * (Y) + 10 * (Q) + (P))
437 Builder.defineMacro(Name: "__ARM_ACLE", Value: Twine(ARM_ACLE_VERSION(2024, 2, 0)));
438 Builder.defineMacro(Name: "__FUNCTION_MULTI_VERSIONING_SUPPORT_LEVEL",
439 Value: Twine(ARM_ACLE_VERSION(2024, 3, 0)));
440#undef ARM_ACLE_VERSION
441 Builder.defineMacro(Name: "__ARM_ARCH",
442 Value: std::to_string(val: ArchInfo->Version.getMajor()));
443 Builder.defineMacro(Name: "__ARM_ARCH_PROFILE",
444 Value: std::string("'") + (char)ArchInfo->Profile + "'");
445
446 Builder.defineMacro(Name: "__ARM_64BIT_STATE", Value: "1");
447 Builder.defineMacro(Name: "__ARM_PCS_AAPCS64", Value: "1");
448 Builder.defineMacro(Name: "__ARM_ARCH_ISA_A64", Value: "1");
449
450 Builder.defineMacro(Name: "__ARM_FEATURE_CLZ", Value: "1");
451 Builder.defineMacro(Name: "__ARM_FEATURE_FMA", Value: "1");
452 Builder.defineMacro(Name: "__ARM_FEATURE_LDREX", Value: "0xF");
453 Builder.defineMacro(Name: "__ARM_FEATURE_IDIV", Value: "1"); // As specified in ACLE
454 Builder.defineMacro(Name: "__ARM_FEATURE_DIV"); // For backwards compatibility
455 Builder.defineMacro(Name: "__ARM_FEATURE_NUMERIC_MAXMIN", Value: "1");
456 Builder.defineMacro(Name: "__ARM_FEATURE_DIRECTED_ROUNDING", Value: "1");
457
458 Builder.defineMacro(Name: "__ARM_ALIGN_MAX_STACK_PWR", Value: "4");
459
460 // These macros are set when Clang can parse declarations with these
461 // attributes.
462 Builder.defineMacro(Name: "__ARM_STATE_ZA", Value: "1");
463 Builder.defineMacro(Name: "__ARM_STATE_ZT0", Value: "1");
464
465 // 0xe implies support for half, single and double precision operations.
466 if (FPU & FPUMode)
467 Builder.defineMacro(Name: "__ARM_FP", Value: "0xE");
468
469 // PCS specifies this for SysV variants, which is all we support. Other ABIs
470 // may choose __ARM_FP16_FORMAT_ALTERNATIVE.
471 Builder.defineMacro(Name: "__ARM_FP16_FORMAT_IEEE", Value: "1");
472 Builder.defineMacro(Name: "__ARM_FP16_ARGS", Value: "1");
473
474 // Clang supports arm_neon_sve_bridge.h
475 Builder.defineMacro(Name: "__ARM_NEON_SVE_BRIDGE", Value: "1");
476
477 if (Opts.UnsafeFPMath)
478 Builder.defineMacro(Name: "__ARM_FP_FAST", Value: "1");
479
480 Builder.defineMacro(Name: "__ARM_SIZEOF_WCHAR_T",
481 Value: Twine(Opts.WCharSize ? Opts.WCharSize : 4));
482
483 Builder.defineMacro(Name: "__ARM_SIZEOF_MINIMAL_ENUM", Value: Opts.ShortEnums ? "1" : "4");
484
485 // Clang supports range prefetch intrinsics
486 Builder.defineMacro(Name: "__ARM_PREFETCH_RANGE", Value: "1");
487
488 if (FPU & NeonMode) {
489 Builder.defineMacro(Name: "__ARM_NEON", Value: "1");
490 // 64-bit NEON supports half, single and double precision operations.
491 Builder.defineMacro(Name: "__ARM_NEON_FP", Value: "0xE");
492 }
493
494 if (FPU & SveMode)
495 Builder.defineMacro(Name: "__ARM_FEATURE_SVE", Value: "1");
496
497 if (HasSVE2)
498 Builder.defineMacro(Name: "__ARM_FEATURE_SVE2", Value: "1");
499
500 if (HasSVE2p1)
501 Builder.defineMacro(Name: "__ARM_FEATURE_SVE2p1", Value: "1");
502
503 if (HasSVE2 && HasSVEAES)
504 Builder.defineMacro(Name: "__ARM_FEATURE_SVE2_AES", Value: "1");
505
506 if (HasSVE2 && HasSVEBitPerm)
507 Builder.defineMacro(Name: "__ARM_FEATURE_SVE2_BITPERM", Value: "1");
508
509 if (HasSVE2 && HasSVE2SHA3)
510 Builder.defineMacro(Name: "__ARM_FEATURE_SVE2_SHA3", Value: "1");
511
512 if (HasSVE2 && HasSVE2SM4)
513 Builder.defineMacro(Name: "__ARM_FEATURE_SVE2_SM4", Value: "1");
514
515 if (HasSVEB16B16)
516 Builder.defineMacro(Name: "__ARM_FEATURE_SVE_B16B16", Value: "1");
517
518 if (HasSME) {
519 Builder.defineMacro(Name: "__ARM_FEATURE_SME");
520 Builder.defineMacro(Name: "__ARM_FEATURE_LOCALLY_STREAMING", Value: "1");
521 }
522
523 if (HasSME2)
524 Builder.defineMacro(Name: "__ARM_FEATURE_SME2", Value: "1");
525
526 if (HasSME2p1)
527 Builder.defineMacro(Name: "__ARM_FEATURE_SME2p1", Value: "1");
528
529 if (HasSMEF16F16)
530 Builder.defineMacro(Name: "__ARM_FEATURE_SME_F16F16", Value: "1");
531
532 if (HasSMEB16B16)
533 Builder.defineMacro(Name: "__ARM_FEATURE_SME_B16B16", Value: "1");
534
535 if (HasFP8)
536 Builder.defineMacro(Name: "__ARM_FEATURE_FP8", Value: "1");
537
538 if (HasFP8FMA)
539 Builder.defineMacro(Name: "__ARM_FEATURE_FP8FMA", Value: "1");
540
541 if (HasFP8DOT2)
542 Builder.defineMacro(Name: "__ARM_FEATURE_FP8DOT2", Value: "1");
543
544 if (HasFP8DOT4)
545 Builder.defineMacro(Name: "__ARM_FEATURE_FP8DOT4", Value: "1");
546
547 if (HasSSVE_FP8DOT2)
548 Builder.defineMacro(Name: "__ARM_FEATURE_SSVE_FP8DOT2", Value: "1");
549
550 if (HasSSVE_FP8DOT4)
551 Builder.defineMacro(Name: "__ARM_FEATURE_SSVE_FP8DOT4", Value: "1");
552
553 if (HasSSVE_FP8FMA)
554 Builder.defineMacro(Name: "__ARM_FEATURE_SSVE_FP8FMA", Value: "1");
555
556 if (HasSME_F8F32)
557 Builder.defineMacro(Name: "__ARM_FEATURE_SME_F8F32", Value: "1");
558
559 if (HasSME_F8F16)
560 Builder.defineMacro(Name: "__ARM_FEATURE_SME_F8F16", Value: "1");
561
562 if (HasCRC)
563 Builder.defineMacro(Name: "__ARM_FEATURE_CRC32", Value: "1");
564
565 if (HasCSSC)
566 Builder.defineMacro(Name: "__ARM_FEATURE_CSSC", Value: "1");
567
568 if (HasRCPC3)
569 Builder.defineMacro(Name: "__ARM_FEATURE_RCPC", Value: "3");
570 else if (HasRCPC)
571 Builder.defineMacro(Name: "__ARM_FEATURE_RCPC", Value: "1");
572
573 if (HasFPRCVT)
574 Builder.defineMacro(Name: "__ARM_FEATURE_FPRCVT", Value: "1");
575
576 if (HasF8F16MM)
577 Builder.defineMacro(Name: "__ARM_FEATURE_F8F16MM", Value: "1");
578
579 if (HasF8F32MM)
580 Builder.defineMacro(Name: "__ARM_FEATURE_F8F32MM", Value: "1");
581
582 if (HasSVE_F16F32MM)
583 Builder.defineMacro(Name: "__ARM_FEATURE_SVE_F16F32MM", Value: "1");
584
585 if (HasSVE_BFSCALE)
586 Builder.defineMacro(Name: "__ARM_FEATURE_SVE_BFSCALE", Value: "1");
587
588 if (HasSVE_AES2)
589 Builder.defineMacro(Name: "__ARM_FEATURE_SVE_AES2", Value: "1");
590
591 if (HasSSVE_AES)
592 Builder.defineMacro(Name: "__ARM_FEATURE_SSVE_AES", Value: "1");
593
594 if (HasSVE2p2)
595 Builder.defineMacro(Name: "__ARM_FEATURE_SVE2p2", Value: "1");
596
597 if (HasSME2p2)
598 Builder.defineMacro(Name: "__ARM_FEATURE_SME2p2", Value: "1");
599
600 if (HasFMV)
601 Builder.defineMacro(Name: "__HAVE_FUNCTION_MULTI_VERSIONING", Value: "1");
602
603 // The __ARM_FEATURE_CRYPTO is deprecated in favor of finer grained feature
604 // macros for AES, SHA2, SHA3 and SM4
605 if (HasAES && HasSHA2)
606 Builder.defineMacro(Name: "__ARM_FEATURE_CRYPTO", Value: "1");
607
608 if (HasAES)
609 Builder.defineMacro(Name: "__ARM_FEATURE_AES", Value: "1");
610
611 if (HasSHA2)
612 Builder.defineMacro(Name: "__ARM_FEATURE_SHA2", Value: "1");
613
614 if (HasSHA3) {
615 Builder.defineMacro(Name: "__ARM_FEATURE_SHA3", Value: "1");
616 Builder.defineMacro(Name: "__ARM_FEATURE_SHA512", Value: "1");
617 }
618
619 if (HasSM4) {
620 Builder.defineMacro(Name: "__ARM_FEATURE_SM3", Value: "1");
621 Builder.defineMacro(Name: "__ARM_FEATURE_SM4", Value: "1");
622 }
623
624 if (HasPAuth)
625 Builder.defineMacro(Name: "__ARM_FEATURE_PAUTH", Value: "1");
626
627 if (HasPAuthLR)
628 Builder.defineMacro(Name: "__ARM_FEATURE_PAUTH_LR", Value: "1");
629
630 if (HasBTI)
631 Builder.defineMacro(Name: "__ARM_FEATURE_BTI", Value: "1");
632
633 if (HasUnalignedAccess)
634 Builder.defineMacro(Name: "__ARM_FEATURE_UNALIGNED", Value: "1");
635
636 if ((FPU & NeonMode) && HasFullFP16)
637 Builder.defineMacro(Name: "__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", Value: "1");
638 if (HasFullFP16)
639 Builder.defineMacro(Name: "__ARM_FEATURE_FP16_SCALAR_ARITHMETIC", Value: "1");
640
641 if (HasDotProd)
642 Builder.defineMacro(Name: "__ARM_FEATURE_DOTPROD", Value: "1");
643
644 if (HasMTE)
645 Builder.defineMacro(Name: "__ARM_FEATURE_MEMORY_TAGGING", Value: "1");
646
647 if (HasMatMul)
648 Builder.defineMacro(Name: "__ARM_FEATURE_MATMUL_INT8", Value: "1");
649
650 if (HasLSE)
651 Builder.defineMacro(Name: "__ARM_FEATURE_ATOMICS", Value: "1");
652
653 if (HasBFloat16) {
654 Builder.defineMacro(Name: "__ARM_FEATURE_BF16", Value: "1");
655 Builder.defineMacro(Name: "__ARM_FEATURE_BF16_VECTOR_ARITHMETIC", Value: "1");
656 Builder.defineMacro(Name: "__ARM_BF16_FORMAT_ALTERNATIVE", Value: "1");
657 Builder.defineMacro(Name: "__ARM_FEATURE_BF16_SCALAR_ARITHMETIC", Value: "1");
658 }
659
660 if ((FPU & SveMode) && HasBFloat16) {
661 Builder.defineMacro(Name: "__ARM_FEATURE_SVE_BF16", Value: "1");
662 }
663
664 if ((FPU & SveMode) && HasMatmulFP64)
665 Builder.defineMacro(Name: "__ARM_FEATURE_SVE_MATMUL_FP64", Value: "1");
666
667 if ((FPU & SveMode) && HasMatmulFP32)
668 Builder.defineMacro(Name: "__ARM_FEATURE_SVE_MATMUL_FP32", Value: "1");
669
670 if ((FPU & SveMode) && HasMatMul)
671 Builder.defineMacro(Name: "__ARM_FEATURE_SVE_MATMUL_INT8", Value: "1");
672
673 if ((FPU & NeonMode) && HasFP16FML)
674 Builder.defineMacro(Name: "__ARM_FEATURE_FP16_FML", Value: "1");
675
676 if (Opts.hasSignReturnAddress()) {
677 // Bitmask:
678 // 0: Protection using the A key
679 // 1: Protection using the B key
680 // 2: Protection including leaf functions
681 // 3: Protection using PC as a diversifier
682 unsigned Value = 0;
683
684 if (Opts.isSignReturnAddressWithAKey())
685 Value |= (1 << 0);
686 else
687 Value |= (1 << 1);
688
689 if (Opts.isSignReturnAddressScopeAll())
690 Value |= (1 << 2);
691
692 if (Opts.BranchProtectionPAuthLR)
693 Value |= (1 << 3);
694
695 Builder.defineMacro(Name: "__ARM_FEATURE_PAC_DEFAULT", Value: std::to_string(val: Value));
696 }
697
698 if (Opts.BranchTargetEnforcement)
699 Builder.defineMacro(Name: "__ARM_FEATURE_BTI_DEFAULT", Value: "1");
700
701 if (Opts.GuardedControlStack)
702 Builder.defineMacro(Name: "__ARM_FEATURE_GCS_DEFAULT", Value: "1");
703
704 if (HasLS64)
705 Builder.defineMacro(Name: "__ARM_FEATURE_LS64", Value: "1");
706
707 if (HasRandGen)
708 Builder.defineMacro(Name: "__ARM_FEATURE_RNG", Value: "1");
709
710 if (HasMOPS)
711 Builder.defineMacro(Name: "__ARM_FEATURE_MOPS", Value: "1");
712
713 if (HasD128)
714 Builder.defineMacro(Name: "__ARM_FEATURE_SYSREG128", Value: "1");
715
716 if (HasGCS)
717 Builder.defineMacro(Name: "__ARM_FEATURE_GCS", Value: "1");
718
719 if (*ArchInfo == llvm::AArch64::ARMV8_1A)
720 getTargetDefinesARMV81A(Opts, Builder);
721 else if (*ArchInfo == llvm::AArch64::ARMV8_2A)
722 getTargetDefinesARMV82A(Opts, Builder);
723 else if (*ArchInfo == llvm::AArch64::ARMV8_3A)
724 getTargetDefinesARMV83A(Opts, Builder);
725 else if (*ArchInfo == llvm::AArch64::ARMV8_4A)
726 getTargetDefinesARMV84A(Opts, Builder);
727 else if (*ArchInfo == llvm::AArch64::ARMV8_5A)
728 getTargetDefinesARMV85A(Opts, Builder);
729 else if (*ArchInfo == llvm::AArch64::ARMV8_6A)
730 getTargetDefinesARMV86A(Opts, Builder);
731 else if (*ArchInfo == llvm::AArch64::ARMV8_7A)
732 getTargetDefinesARMV87A(Opts, Builder);
733 else if (*ArchInfo == llvm::AArch64::ARMV8_8A)
734 getTargetDefinesARMV88A(Opts, Builder);
735 else if (*ArchInfo == llvm::AArch64::ARMV8_9A)
736 getTargetDefinesARMV89A(Opts, Builder);
737 else if (*ArchInfo == llvm::AArch64::ARMV9A)
738 getTargetDefinesARMV9A(Opts, Builder);
739 else if (*ArchInfo == llvm::AArch64::ARMV9_1A)
740 getTargetDefinesARMV91A(Opts, Builder);
741 else if (*ArchInfo == llvm::AArch64::ARMV9_2A)
742 getTargetDefinesARMV92A(Opts, Builder);
743 else if (*ArchInfo == llvm::AArch64::ARMV9_3A)
744 getTargetDefinesARMV93A(Opts, Builder);
745 else if (*ArchInfo == llvm::AArch64::ARMV9_4A)
746 getTargetDefinesARMV94A(Opts, Builder);
747 else if (*ArchInfo == llvm::AArch64::ARMV9_5A)
748 getTargetDefinesARMV95A(Opts, Builder);
749 else if (*ArchInfo == llvm::AArch64::ARMV9_6A)
750 getTargetDefinesARMV96A(Opts, Builder);
751 else if (*ArchInfo == llvm::AArch64::ARMV9_7A)
752 getTargetDefinesARMV97A(Opts, Builder);
753
754 // All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8|16) builtins work.
755 Builder.defineMacro(Name: "__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
756 Builder.defineMacro(Name: "__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
757 Builder.defineMacro(Name: "__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
758 Builder.defineMacro(Name: "__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
759 Builder.defineMacro(Name: "__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16");
760
761 // Allow detection of fast FMA support.
762 Builder.defineMacro(Name: "__FP_FAST_FMA", Value: "1");
763 Builder.defineMacro(Name: "__FP_FAST_FMAF", Value: "1");
764
765 // C/C++ operators work on both VLS and VLA SVE types
766 if (FPU & SveMode)
767 Builder.defineMacro(Name: "__ARM_FEATURE_SVE_VECTOR_OPERATORS", Value: "2");
768
769 if (Opts.VScaleMin && Opts.VScaleMin == Opts.VScaleMax) {
770 Builder.defineMacro(Name: "__ARM_FEATURE_SVE_BITS", Value: Twine(Opts.VScaleMin * 128));
771 }
772}
773
774llvm::SmallVector<Builtin::InfosShard>
775AArch64TargetInfo::getTargetBuiltins() const {
776 return {
777 {.Strings: &NEON::BuiltinStrings, .Infos: NEON::BuiltinInfos, .NamePrefix: "__builtin_neon_"},
778 {.Strings: &NEON::FP16::BuiltinStrings, .Infos: NEON::FP16::BuiltinInfos,
779 .NamePrefix: "__builtin_neon_"},
780 {.Strings: &SVE::BuiltinStrings, .Infos: SVE::BuiltinInfos, .NamePrefix: "__builtin_sve_"},
781 {.Strings: &BuiltinSVENeonBridgeStrings, .Infos: BuiltinSVENeonBridgeInfos},
782 {.Strings: &SME::BuiltinStrings, .Infos: SME::BuiltinInfos, .NamePrefix: "__builtin_sme_"},
783 {.Strings: &AArch64::BuiltinStrings, .Infos: AArch64::BuiltinInfos},
784 {.Strings: &AArch64::BuiltinStrings, .Infos: AArch64::PrefixedBuiltinInfos,
785 .NamePrefix: "__builtin_arm_"},
786 };
787}
788
789std::optional<std::pair<unsigned, unsigned>>
790AArch64TargetInfo::getVScaleRange(const LangOptions &LangOpts,
791 ArmStreamingKind Mode,
792 llvm::StringMap<bool> *FeatureMap) const {
793 if (Mode == ArmStreamingKind::NotStreaming &&
794 (LangOpts.VScaleMin || LangOpts.VScaleMax))
795 return std::pair<unsigned, unsigned>(
796 LangOpts.VScaleMin ? LangOpts.VScaleMin : 1,
797 LangOpts.VScaleMax ? LangOpts.VScaleMax : 16);
798
799 if (Mode == ArmStreamingKind::Streaming &&
800 (LangOpts.VScaleStreamingMin || LangOpts.VScaleStreamingMax))
801 return std::pair<unsigned, unsigned>(
802 LangOpts.VScaleStreamingMin ? LangOpts.VScaleStreamingMin : 1,
803 LangOpts.VScaleStreamingMax ? LangOpts.VScaleStreamingMax : 16);
804
805 if (Mode == ArmStreamingKind::StreamingCompatible &&
806 ((LangOpts.VScaleMin && LangOpts.VScaleStreamingMin) ||
807 (LangOpts.VScaleMax && LangOpts.VScaleStreamingMax))) {
808 unsigned Min =
809 std::min(a: LangOpts.VScaleMin ? LangOpts.VScaleMin : 1,
810 b: LangOpts.VScaleStreamingMin ? LangOpts.VScaleStreamingMin : 1);
811 unsigned Max = std::max(
812 a: LangOpts.VScaleMax ? LangOpts.VScaleMax : 16,
813 b: LangOpts.VScaleStreamingMax ? LangOpts.VScaleStreamingMax : 16);
814 return std::pair(Min, Max);
815 }
816
817 if (hasFeature(Feature: "sve") || (FeatureMap && (FeatureMap->lookup(Key: "sve"))))
818 return std::pair<unsigned, unsigned>(1, 16);
819
820 if (Mode == ArmStreamingKind::Streaming &&
821 (hasFeature(Feature: "sme") || (FeatureMap && (FeatureMap->lookup(Key: "sme")))))
822 return std::pair<unsigned, unsigned>(1, 16);
823
824 return std::nullopt;
825}
826
827llvm::APInt
828AArch64TargetInfo::getFMVPriority(ArrayRef<StringRef> Features) const {
829 return llvm::AArch64::getFMVPriority(Features);
830}
831
832bool AArch64TargetInfo::doesFeatureAffectCodeGen(StringRef Name) const {
833 // FMV extensions which imply no backend features do not affect codegen.
834 if (auto Ext = llvm::AArch64::parseFMVExtension(Extension: Name))
835 return Ext->ID.has_value();
836 return false;
837}
838
839bool AArch64TargetInfo::validateCpuSupports(StringRef FeatureStr) const {
840 // CPU features might be separated by '+', extract them and check
841 llvm::SmallVector<StringRef, 8> Features;
842 FeatureStr.split(A&: Features, Separator: "+");
843 for (auto &Feature : Features)
844 if (!llvm::AArch64::parseFMVExtension(Extension: Feature.trim()).has_value())
845 return false;
846 return true;
847}
848
849/// A helper class for "hasFeature" lookups (mimicking a StringSwitch).
850struct FeatureLookupBuilder {
851 FeatureLookupBuilder(AArch64FeatureSet &Features) : Features(Features) {
852 Features.clear();
853 }
854
855 FeatureLookupBuilder &Case(StringRef Feat, bool HasFeature) {
856 if (HasFeature)
857 Features.insert(V: Feat);
858 return *this;
859 }
860
861 FeatureLookupBuilder &Cases(ArrayRef<StringRef> Feats, bool HasFeature) {
862 if (HasFeature)
863 Features.insert_range(R&: Feats);
864 return *this;
865 }
866
867private:
868 AArch64FeatureSet &Features;
869};
870
871void AArch64TargetInfo::computeFeatureLookup() {
872 FeatureLookupBuilder(HasFeatureLookup)
873 .Cases(Feats: {"aarch64", "arm64", "arm"}, HasFeature: true)
874 .Case(Feat: "fmv", HasFeature: HasFMV)
875 .Case(Feat: "fp", HasFeature: FPU & FPUMode)
876 .Cases(Feats: {"neon", "simd"}, HasFeature: FPU & NeonMode)
877 .Case(Feat: "jscvt", HasFeature: HasJSCVT)
878 .Case(Feat: "fcma", HasFeature: HasFCMA)
879 .Case(Feat: "rng", HasFeature: HasRandGen)
880 .Case(Feat: "flagm", HasFeature: HasFlagM)
881 .Case(Feat: "flagm2", HasFeature: HasAlternativeNZCV)
882 .Case(Feat: "fp16fml", HasFeature: HasFP16FML)
883 .Case(Feat: "dotprod", HasFeature: HasDotProd)
884 .Case(Feat: "sm4", HasFeature: HasSM4)
885 .Case(Feat: "rdm", HasFeature: HasRDM)
886 .Case(Feat: "lse", HasFeature: HasLSE)
887 .Case(Feat: "crc", HasFeature: HasCRC)
888 .Case(Feat: "cssc", HasFeature: HasCSSC)
889 .Case(Feat: "sha2", HasFeature: HasSHA2)
890 .Case(Feat: "sha3", HasFeature: HasSHA3)
891 .Cases(Feats: {"aes", "pmull"}, HasFeature: HasAES)
892 .Cases(Feats: {"fp16", "fullfp16"}, HasFeature: HasFullFP16)
893 .Case(Feat: "dit", HasFeature: HasDIT)
894 .Case(Feat: "dpb", HasFeature: HasCCPP)
895 .Case(Feat: "dpb2", HasFeature: HasCCDP)
896 .Case(Feat: "rcpc", HasFeature: HasRCPC)
897 .Case(Feat: "frintts", HasFeature: HasFRInt3264)
898 .Case(Feat: "i8mm", HasFeature: HasMatMul)
899 .Case(Feat: "bf16", HasFeature: HasBFloat16)
900 .Case(Feat: "sve", HasFeature: FPU & SveMode)
901 .Case(Feat: "sve-b16b16", HasFeature: HasSVEB16B16)
902 .Case(Feat: "f32mm", HasFeature: FPU & SveMode && HasMatmulFP32)
903 .Case(Feat: "f64mm", HasFeature: FPU & SveMode && HasMatmulFP64)
904 .Case(Feat: "sve2", HasFeature: FPU & SveMode && HasSVE2)
905 .Case(Feat: "sve-aes", HasFeature: HasSVEAES)
906 .Case(Feat: "sve-bitperm", HasFeature: FPU & HasSVEBitPerm)
907 .Case(Feat: "sve2-sha3", HasFeature: FPU & SveMode && HasSVE2SHA3)
908 .Case(Feat: "sve2-sm4", HasFeature: FPU & SveMode && HasSVE2SM4)
909 .Case(Feat: "sve2p1", HasFeature: FPU & SveMode && HasSVE2p1)
910 .Case(Feat: "sme", HasFeature: HasSME)
911 .Case(Feat: "sme2", HasFeature: HasSME2)
912 .Case(Feat: "sme2p1", HasFeature: HasSME2p1)
913 .Case(Feat: "sme-f64f64", HasFeature: HasSMEF64F64)
914 .Case(Feat: "sme-i16i64", HasFeature: HasSMEI16I64)
915 .Case(Feat: "sme-fa64", HasFeature: HasSMEFA64)
916 .Case(Feat: "sme-f16f16", HasFeature: HasSMEF16F16)
917 .Case(Feat: "sme-b16b16", HasFeature: HasSMEB16B16)
918 .Case(Feat: "memtag", HasFeature: HasMTE)
919 .Case(Feat: "sb", HasFeature: HasSB)
920 .Case(Feat: "predres", HasFeature: HasPredRes)
921 .Cases(Feats: {"ssbs", "ssbs2"}, HasFeature: HasSSBS)
922 .Case(Feat: "bti", HasFeature: HasBTI)
923 .Cases(Feats: {"ls64", "ls64_v", "ls64_accdata"}, HasFeature: HasLS64)
924 .Case(Feat: "wfxt", HasFeature: HasWFxT)
925 .Case(Feat: "rcpc3", HasFeature: HasRCPC3)
926 .Case(Feat: "fp8", HasFeature: HasFP8)
927 .Case(Feat: "fp8fma", HasFeature: HasFP8FMA)
928 .Case(Feat: "fp8dot2", HasFeature: HasFP8DOT2)
929 .Case(Feat: "fp8dot4", HasFeature: HasFP8DOT4)
930 .Case(Feat: "ssve-fp8dot2", HasFeature: HasSSVE_FP8DOT2)
931 .Case(Feat: "ssve-fp8dot4", HasFeature: HasSSVE_FP8DOT4)
932 .Case(Feat: "ssve-fp8fma", HasFeature: HasSSVE_FP8FMA)
933 .Case(Feat: "sme-f8f32", HasFeature: HasSME_F8F32)
934 .Case(Feat: "sme-f8f16", HasFeature: HasSME_F8F16)
935 .Case(Feat: "fprcvt", HasFeature: HasFPRCVT)
936 .Case(Feat: "f8f16mm", HasFeature: HasF8F16MM)
937 .Case(Feat: "f8f32mm", HasFeature: HasF8F32MM)
938 .Case(Feat: "sve-f16f32mm", HasFeature: HasSVE_F16F32MM)
939 .Case(Feat: "sve-bfscale", HasFeature: HasSVE_BFSCALE)
940 .Case(Feat: "sve-aes2", HasFeature: HasSVE_AES2)
941 .Case(Feat: "ssve-aes", HasFeature: HasSSVE_AES)
942 .Case(Feat: "sve2p2", HasFeature: FPU & SveMode && HasSVE2p2)
943 .Case(Feat: "sme2p2", HasFeature: HasSME2p2);
944}
945
946bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
947 return HasFeatureLookup.contains(V: Feature);
948}
949
950void AArch64TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
951 StringRef Name, bool Enabled) const {
952 Features[Name] = Enabled;
953 // If the feature is an architecture feature (like v8.2a), add all previous
954 // architecture versions and any dependant target features.
955 const std::optional<llvm::AArch64::ArchInfo> ArchInfo =
956 llvm::AArch64::ArchInfo::findBySubArch(SubArch: Name);
957
958 if (!ArchInfo)
959 return; // Not an architecture, nothing more to do.
960
961 // Disabling an architecture feature does not affect dependent features
962 if (!Enabled)
963 return;
964
965 for (const auto *OtherArch : llvm::AArch64::ArchInfos)
966 if (ArchInfo->implies(Other: *OtherArch))
967 Features[OtherArch->getSubArch()] = true;
968
969 // Set any features implied by the architecture
970 std::vector<StringRef> CPUFeats;
971 if (llvm::AArch64::getExtensionFeatures(Extensions: ArchInfo->DefaultExts, Features&: CPUFeats)) {
972 for (auto F : CPUFeats) {
973 assert(F[0] == '+' && "Expected + in target feature!");
974 Features[F.drop_front(N: 1)] = true;
975 }
976 }
977}
978
979bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
980 DiagnosticsEngine &Diags) {
981 for (const auto &Feature : Features) {
982 if (Feature == "-fp-armv8")
983 HasNoFP = true;
984 if (Feature == "-neon")
985 HasNoNeon = true;
986 if (Feature == "-sve")
987 HasNoSVE = true;
988
989 if (Feature == "+neon" || Feature == "+fp-armv8")
990 FPU |= NeonMode;
991 if (Feature == "+jscvt") {
992 HasJSCVT = true;
993 FPU |= NeonMode;
994 }
995 if (Feature == "+fcma") {
996 HasFCMA = true;
997 FPU |= NeonMode;
998 }
999
1000 if (Feature == "+sve") {
1001 FPU |= NeonMode;
1002 FPU |= SveMode;
1003 HasFullFP16 = true;
1004 }
1005 if (Feature == "+sve2") {
1006 FPU |= NeonMode;
1007 FPU |= SveMode;
1008 HasFullFP16 = true;
1009 HasSVE2 = true;
1010 }
1011 if (Feature == "+sve2p1") {
1012 FPU |= NeonMode;
1013 FPU |= SveMode;
1014 HasFullFP16 = true;
1015 HasSVE2 = true;
1016 HasSVE2p1 = true;
1017 }
1018 if (Feature == "+sve-aes") {
1019 FPU |= NeonMode;
1020 HasFullFP16 = true;
1021 HasSVEAES = true;
1022 }
1023 if (Feature == "+sve2-sha3") {
1024 FPU |= NeonMode;
1025 FPU |= SveMode;
1026 HasFullFP16 = true;
1027 HasSVE2 = true;
1028 HasSVE2SHA3 = true;
1029 }
1030 if (Feature == "+sve2-sm4") {
1031 FPU |= NeonMode;
1032 FPU |= SveMode;
1033 HasFullFP16 = true;
1034 HasSVE2 = true;
1035 HasSVE2SM4 = true;
1036 }
1037 if (Feature == "+sve-b16b16")
1038 HasSVEB16B16 = true;
1039 if (Feature == "+sve-bitperm") {
1040 FPU |= NeonMode;
1041 HasFullFP16 = true;
1042 HasSVEBitPerm = true;
1043 }
1044 if (Feature == "+f32mm") {
1045 FPU |= NeonMode;
1046 FPU |= SveMode;
1047 HasFullFP16 = true;
1048 HasMatmulFP32 = true;
1049 }
1050 if (Feature == "+f64mm") {
1051 FPU |= NeonMode;
1052 FPU |= SveMode;
1053 HasFullFP16 = true;
1054 HasMatmulFP64 = true;
1055 }
1056 if (Feature == "+sme") {
1057 HasSME = true;
1058 HasBFloat16 = true;
1059 HasFullFP16 = true;
1060 }
1061 if (Feature == "+sme2") {
1062 HasSME = true;
1063 HasSME2 = true;
1064 HasBFloat16 = true;
1065 HasFullFP16 = true;
1066 }
1067 if (Feature == "+sme2p1") {
1068 HasSME = true;
1069 HasSME2 = true;
1070 HasSME2p1 = true;
1071 HasBFloat16 = true;
1072 HasFullFP16 = true;
1073 }
1074 if (Feature == "+sme-f64f64") {
1075 HasSME = true;
1076 HasSMEF64F64 = true;
1077 HasBFloat16 = true;
1078 HasFullFP16 = true;
1079 }
1080 if (Feature == "+sme-i16i64") {
1081 HasSME = true;
1082 HasSMEI16I64 = true;
1083 HasBFloat16 = true;
1084 HasFullFP16 = true;
1085 }
1086 if (Feature == "+sme-fa64") {
1087 FPU |= NeonMode;
1088 FPU |= SveMode;
1089 HasSME = true;
1090 HasSVE2 = true;
1091 HasSMEFA64 = true;
1092 }
1093 if (Feature == "+sme-f16f16") {
1094 HasSME = true;
1095 HasSME2 = true;
1096 HasBFloat16 = true;
1097 HasFullFP16 = true;
1098 HasSMEF16F16 = true;
1099 }
1100 if (Feature == "+sme-b16b16") {
1101 HasSME = true;
1102 HasSME2 = true;
1103 HasBFloat16 = true;
1104 HasFullFP16 = true;
1105 HasSVEB16B16 = true;
1106 HasSMEB16B16 = true;
1107 }
1108
1109 if (Feature == "+fp8")
1110 HasFP8 = true;
1111 if (Feature == "+fp8fma")
1112 HasFP8FMA = true;
1113 if (Feature == "+fp8dot2")
1114 HasFP8DOT2 = true;
1115 if (Feature == "+fp8dot4")
1116 HasFP8DOT4 = true;
1117 if (Feature == "+ssve-fp8dot2")
1118 HasSSVE_FP8DOT2 = true;
1119 if (Feature == "+ssve-fp8dot4")
1120 HasSSVE_FP8DOT4 = true;
1121 if (Feature == "+ssve-fp8fma")
1122 HasSSVE_FP8FMA = true;
1123 if (Feature == "+sme-f8f32")
1124 HasSME_F8F32 = true;
1125 if (Feature == "+sme-f8f16")
1126 HasSME_F8F16 = true;
1127 if (Feature == "+sb")
1128 HasSB = true;
1129 if (Feature == "+predres")
1130 HasPredRes = true;
1131 if (Feature == "+ssbs")
1132 HasSSBS = true;
1133 if (Feature == "+bti")
1134 HasBTI = true;
1135 if (Feature == "+wfxt")
1136 HasWFxT = true;
1137 if (Feature == "-fmv")
1138 HasFMV = false;
1139 if (Feature == "+crc")
1140 HasCRC = true;
1141 if (Feature == "+rcpc")
1142 HasRCPC = true;
1143 if (Feature == "+aes") {
1144 FPU |= NeonMode;
1145 HasAES = true;
1146 }
1147 if (Feature == "+sha2") {
1148 FPU |= NeonMode;
1149 HasSHA2 = true;
1150 }
1151 if (Feature == "+sha3") {
1152 FPU |= NeonMode;
1153 HasSHA2 = true;
1154 HasSHA3 = true;
1155 }
1156 if (Feature == "+rdm") {
1157 FPU |= NeonMode;
1158 HasRDM = true;
1159 }
1160 if (Feature == "+dit")
1161 HasDIT = true;
1162 if (Feature == "+cccp")
1163 HasCCPP = true;
1164 if (Feature == "+ccdp") {
1165 HasCCPP = true;
1166 HasCCDP = true;
1167 }
1168 if (Feature == "+fptoint")
1169 HasFRInt3264 = true;
1170 if (Feature == "+sm4") {
1171 FPU |= NeonMode;
1172 HasSM4 = true;
1173 }
1174 if (Feature == "+strict-align")
1175 HasUnalignedAccess = false;
1176 if (Feature == "+fprcvt")
1177 HasFPRCVT = true;
1178 if (Feature == "+f8f16mm")
1179 HasF8F16MM = true;
1180 if (Feature == "+f8f32mm")
1181 HasF8F32MM = true;
1182 if (Feature == "+sve-f16f32mm")
1183 HasSVE_F16F32MM = true;
1184 if (Feature == "+sve-bfscale")
1185 HasSVE_BFSCALE = true;
1186 if (Feature == "+sve-aes2")
1187 HasSVE_AES2 = true;
1188 if (Feature == "+ssve-aes")
1189 HasSSVE_AES = true;
1190 if (Feature == "+sve2p2")
1191 HasSVE2p2 = true;
1192 if (Feature == "+sme2p2")
1193 HasSME2p2 = true;
1194
1195 // All predecessor archs are added but select the latest one for ArchKind.
1196 if (Feature == "+v8a" && ArchInfo->Version < llvm::AArch64::ARMV8A.Version)
1197 ArchInfo = &llvm::AArch64::ARMV8A;
1198 if (Feature == "+v8.1a" &&
1199 ArchInfo->Version < llvm::AArch64::ARMV8_1A.Version)
1200 ArchInfo = &llvm::AArch64::ARMV8_1A;
1201 if (Feature == "+v8.2a" &&
1202 ArchInfo->Version < llvm::AArch64::ARMV8_2A.Version)
1203 ArchInfo = &llvm::AArch64::ARMV8_2A;
1204 if (Feature == "+v8.3a" &&
1205 ArchInfo->Version < llvm::AArch64::ARMV8_3A.Version)
1206 ArchInfo = &llvm::AArch64::ARMV8_3A;
1207 if (Feature == "+v8.4a" &&
1208 ArchInfo->Version < llvm::AArch64::ARMV8_4A.Version)
1209 ArchInfo = &llvm::AArch64::ARMV8_4A;
1210 if (Feature == "+v8.5a" &&
1211 ArchInfo->Version < llvm::AArch64::ARMV8_5A.Version)
1212 ArchInfo = &llvm::AArch64::ARMV8_5A;
1213 if (Feature == "+v8.6a" &&
1214 ArchInfo->Version < llvm::AArch64::ARMV8_6A.Version)
1215 ArchInfo = &llvm::AArch64::ARMV8_6A;
1216 if (Feature == "+v8.7a" &&
1217 ArchInfo->Version < llvm::AArch64::ARMV8_7A.Version)
1218 ArchInfo = &llvm::AArch64::ARMV8_7A;
1219 if (Feature == "+v8.8a" &&
1220 ArchInfo->Version < llvm::AArch64::ARMV8_8A.Version)
1221 ArchInfo = &llvm::AArch64::ARMV8_8A;
1222 if (Feature == "+v8.9a" &&
1223 ArchInfo->Version < llvm::AArch64::ARMV8_9A.Version)
1224 ArchInfo = &llvm::AArch64::ARMV8_9A;
1225 if (Feature == "+v9a" && ArchInfo->Version < llvm::AArch64::ARMV9A.Version)
1226 ArchInfo = &llvm::AArch64::ARMV9A;
1227 if (Feature == "+v9.1a" &&
1228 ArchInfo->Version < llvm::AArch64::ARMV9_1A.Version)
1229 ArchInfo = &llvm::AArch64::ARMV9_1A;
1230 if (Feature == "+v9.2a" &&
1231 ArchInfo->Version < llvm::AArch64::ARMV9_2A.Version)
1232 ArchInfo = &llvm::AArch64::ARMV9_2A;
1233 if (Feature == "+v9.3a" &&
1234 ArchInfo->Version < llvm::AArch64::ARMV9_3A.Version)
1235 ArchInfo = &llvm::AArch64::ARMV9_3A;
1236 if (Feature == "+v9.4a" &&
1237 ArchInfo->Version < llvm::AArch64::ARMV9_4A.Version)
1238 ArchInfo = &llvm::AArch64::ARMV9_4A;
1239 if (Feature == "+v9.5a" &&
1240 ArchInfo->Version < llvm::AArch64::ARMV9_5A.Version)
1241 ArchInfo = &llvm::AArch64::ARMV9_5A;
1242 if (Feature == "+v9.6a" &&
1243 ArchInfo->Version < llvm::AArch64::ARMV9_6A.Version)
1244 ArchInfo = &llvm::AArch64::ARMV9_6A;
1245 if (Feature == "+v9.7a" &&
1246 ArchInfo->Version < llvm::AArch64::ARMV9_7A.Version)
1247 ArchInfo = &llvm::AArch64::ARMV9_7A;
1248 if (Feature == "+v8r")
1249 ArchInfo = &llvm::AArch64::ARMV8R;
1250 if (Feature == "+fullfp16") {
1251 FPU |= NeonMode;
1252 HasFullFP16 = true;
1253 }
1254 if (Feature == "+dotprod") {
1255 FPU |= NeonMode;
1256 HasDotProd = true;
1257 }
1258 if (Feature == "+fp16fml") {
1259 FPU |= NeonMode;
1260 HasFullFP16 = true;
1261 HasFP16FML = true;
1262 }
1263 if (Feature == "+mte")
1264 HasMTE = true;
1265 if (Feature == "+pauth")
1266 HasPAuth = true;
1267 if (Feature == "+i8mm")
1268 HasMatMul = true;
1269 if (Feature == "+bf16")
1270 HasBFloat16 = true;
1271 if (Feature == "+lse")
1272 HasLSE = true;
1273 if (Feature == "+ls64")
1274 HasLS64 = true;
1275 if (Feature == "+rand")
1276 HasRandGen = true;
1277 if (Feature == "+flagm")
1278 HasFlagM = true;
1279 if (Feature == "+altnzcv") {
1280 HasFlagM = true;
1281 HasAlternativeNZCV = true;
1282 }
1283 if (Feature == "+mops")
1284 HasMOPS = true;
1285 if (Feature == "+d128")
1286 HasD128 = true;
1287 if (Feature == "+gcs")
1288 HasGCS = true;
1289 if (Feature == "+rcpc3")
1290 HasRCPC3 = true;
1291 if (Feature == "+pauth-lr") {
1292 HasPAuthLR = true;
1293 HasPAuth = true;
1294 }
1295 if (Feature == "+cssc")
1296 HasCSSC = true;
1297 }
1298
1299 // Check features that are manually disabled by command line options.
1300 // This needs to be checked after architecture-related features are handled,
1301 // making sure they are properly disabled when required.
1302 for (const auto &Feature : Features) {
1303 if (Feature == "-d128")
1304 HasD128 = false;
1305 }
1306
1307 resetDataLayout();
1308
1309 if (HasNoFP) {
1310 FPU &= ~FPUMode;
1311 FPU &= ~NeonMode;
1312 FPU &= ~SveMode;
1313 }
1314 if (HasNoNeon) {
1315 FPU &= ~NeonMode;
1316 FPU &= ~SveMode;
1317 }
1318 if (HasNoSVE)
1319 FPU &= ~SveMode;
1320
1321 computeFeatureLookup();
1322 return true;
1323}
1324
1325// Parse AArch64 Target attributes, which are a comma separated list of:
1326// "arch=<arch>" - parsed to features as per -march=..
1327// "cpu=<cpu>" - parsed to features as per -mcpu=.., with CPU set to <cpu>
1328// "tune=<cpu>" - TuneCPU set to <cpu>
1329// "feature", "no-feature" - Add (or remove) feature.
1330// "+feature", "+nofeature" - Add (or remove) feature.
1331//
1332// A feature may correspond to an Extension (anything with a corresponding
1333// AEK_), in which case an ExtensionSet is used to parse it and expand its
1334// dependencies. If the feature does not yield a successful parse then it
1335// is passed through.
1336ParsedTargetAttr AArch64TargetInfo::parseTargetAttr(StringRef Features) const {
1337 ParsedTargetAttr Ret;
1338 if (Features == "default")
1339 return Ret;
1340 SmallVector<StringRef, 1> AttrFeatures;
1341 Features.split(A&: AttrFeatures, Separator: ",");
1342 bool FoundArch = false;
1343
1344 auto SplitAndAddFeatures = [](StringRef FeatString,
1345 std::vector<std::string> &Features,
1346 llvm::AArch64::ExtensionSet &FeatureBits) {
1347 SmallVector<StringRef, 8> SplitFeatures;
1348 FeatString.split(A&: SplitFeatures, Separator: StringRef("+"), MaxSplit: -1, KeepEmpty: false);
1349 for (StringRef Feature : SplitFeatures) {
1350 if (FeatureBits.parseModifier(Modifier: Feature))
1351 continue;
1352 // Pass through anything that failed to parse so that we can emit
1353 // diagnostics, as well as valid internal feature names.
1354 //
1355 // FIXME: We should consider rejecting internal feature names like
1356 // neon, v8a, etc.
1357 // FIXME: We should consider emitting diagnostics here.
1358 if (Feature.starts_with(Prefix: "no"))
1359 Features.push_back(x: "-" + Feature.drop_front(N: 2).str());
1360 else
1361 Features.push_back(x: "+" + Feature.str());
1362 }
1363 };
1364
1365 llvm::AArch64::ExtensionSet FeatureBits;
1366 // Reconstruct the bitset from the command line option features.
1367 FeatureBits.reconstructFromParsedFeatures(Features: getTargetOpts().FeaturesAsWritten,
1368 NonExtensions&: Ret.Features);
1369
1370 for (auto &Feature : AttrFeatures) {
1371 Feature = Feature.trim();
1372 if (Feature.starts_with(Prefix: "fpmath="))
1373 continue;
1374
1375 if (Feature.starts_with(Prefix: "branch-protection=")) {
1376 Ret.BranchProtection = Feature.split(Separator: '=').second.trim();
1377 continue;
1378 }
1379
1380 if (Feature.starts_with(Prefix: "arch=")) {
1381 if (FoundArch)
1382 Ret.Duplicate = "arch=";
1383 FoundArch = true;
1384 std::pair<StringRef, StringRef> Split =
1385 Feature.split(Separator: "=").second.trim().split(Separator: "+");
1386 const llvm::AArch64::ArchInfo *AI = llvm::AArch64::parseArch(Arch: Split.first);
1387
1388 // Parse the architecture version, adding the required features to
1389 // Ret.Features.
1390 if (!AI)
1391 continue;
1392 FeatureBits.addArchDefaults(Arch: *AI);
1393 // Add any extra features, after the +
1394 SplitAndAddFeatures(Split.second, Ret.Features, FeatureBits);
1395 } else if (Feature.starts_with(Prefix: "cpu=")) {
1396 if (!Ret.CPU.empty())
1397 Ret.Duplicate = "cpu=";
1398 else {
1399 // Split the cpu string into "cpu=", "cortex-a710" and any remaining
1400 // "+feat" features.
1401 std::pair<StringRef, StringRef> Split =
1402 Feature.split(Separator: "=").second.trim().split(Separator: "+");
1403 Ret.CPU = Split.first;
1404 if (auto CpuInfo = llvm::AArch64::parseCpu(Name: Ret.CPU)) {
1405 FeatureBits.addCPUDefaults(CPU: *CpuInfo);
1406 SplitAndAddFeatures(Split.second, Ret.Features, FeatureBits);
1407 }
1408 }
1409 } else if (Feature.starts_with(Prefix: "tune=")) {
1410 if (!Ret.Tune.empty())
1411 Ret.Duplicate = "tune=";
1412 else
1413 Ret.Tune = Feature.split(Separator: "=").second.trim();
1414 } else if (Feature.starts_with(Prefix: "+")) {
1415 SplitAndAddFeatures(Feature, Ret.Features, FeatureBits);
1416 } else {
1417 if (FeatureBits.parseModifier(Modifier: Feature, /* AllowNoDashForm = */ true))
1418 continue;
1419 // Pass through anything that failed to parse so that we can emit
1420 // diagnostics, as well as valid internal feature names.
1421 //
1422 // FIXME: We should consider rejecting internal feature names like
1423 // neon, v8a, etc.
1424 // FIXME: We should consider emitting diagnostics here.
1425 if (Feature.starts_with(Prefix: "no-"))
1426 Ret.Features.push_back(x: "-" + Feature.drop_front(N: 3).str());
1427 else
1428 Ret.Features.push_back(x: "+" + Feature.str());
1429 }
1430 }
1431 FeatureBits.toLLVMFeatureList(Features&: Ret.Features);
1432 return Ret;
1433}
1434
1435bool AArch64TargetInfo::hasBFloat16Type() const {
1436 return true;
1437}
1438
1439TargetInfo::CallingConvCheckResult
1440AArch64TargetInfo::checkCallingConvention(CallingConv CC) const {
1441 switch (CC) {
1442 case CC_C:
1443 case CC_Swift:
1444 case CC_SwiftAsync:
1445 case CC_PreserveMost:
1446 case CC_PreserveAll:
1447 case CC_PreserveNone:
1448 case CC_DeviceKernel:
1449 case CC_AArch64VectorCall:
1450 case CC_AArch64SVEPCS:
1451 case CC_Win64:
1452 return CCCR_OK;
1453 default:
1454 return CCCR_Warning;
1455 }
1456}
1457
1458bool AArch64TargetInfo::isCLZForZeroUndef() const { return false; }
1459
1460TargetInfo::BuiltinVaListKind AArch64TargetInfo::getBuiltinVaListKind() const {
1461 return TargetInfo::AArch64ABIBuiltinVaList;
1462}
1463
1464const char *const AArch64TargetInfo::GCCRegNames[] = {
1465 // clang-format off
1466
1467 // 32-bit Integer registers
1468 "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", "w11",
1469 "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", "w22",
1470 "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "wsp",
1471
1472 // 64-bit Integer registers
1473 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
1474 "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22",
1475 "x23", "x24", "x25", "x26", "x27", "x28", "fp", "lr", "sp",
1476
1477 // 32-bit floating point regsisters
1478 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11",
1479 "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21", "s22",
1480 "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
1481
1482 // 64-bit floating point regsisters
1483 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11",
1484 "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
1485 "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
1486
1487 // Neon vector registers
1488 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11",
1489 "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22",
1490 "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
1491
1492 // SVE vector registers
1493 "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10",
1494 "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21",
1495 "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31",
1496
1497 // SVE predicate registers
1498 "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10",
1499 "p11", "p12", "p13", "p14", "p15",
1500
1501 // SVE predicate-as-counter registers
1502 "pn0", "pn1", "pn2", "pn3", "pn4", "pn5", "pn6", "pn7", "pn8",
1503 "pn9", "pn10", "pn11", "pn12", "pn13", "pn14", "pn15",
1504
1505 // SME registers
1506 "za", "zt0",
1507
1508 // clang-format on
1509};
1510
1511ArrayRef<const char *> AArch64TargetInfo::getGCCRegNames() const {
1512 return llvm::ArrayRef(GCCRegNames);
1513}
1514
1515const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
1516 {.Aliases: {"w31"}, .Register: "wsp"},
1517 {.Aliases: {"x31"}, .Register: "sp"},
1518 // GCC rN registers are aliases of xN registers.
1519 {.Aliases: {"r0"}, .Register: "x0"},
1520 {.Aliases: {"r1"}, .Register: "x1"},
1521 {.Aliases: {"r2"}, .Register: "x2"},
1522 {.Aliases: {"r3"}, .Register: "x3"},
1523 {.Aliases: {"r4"}, .Register: "x4"},
1524 {.Aliases: {"r5"}, .Register: "x5"},
1525 {.Aliases: {"r6"}, .Register: "x6"},
1526 {.Aliases: {"r7"}, .Register: "x7"},
1527 {.Aliases: {"r8"}, .Register: "x8"},
1528 {.Aliases: {"r9"}, .Register: "x9"},
1529 {.Aliases: {"r10"}, .Register: "x10"},
1530 {.Aliases: {"r11"}, .Register: "x11"},
1531 {.Aliases: {"r12"}, .Register: "x12"},
1532 {.Aliases: {"r13"}, .Register: "x13"},
1533 {.Aliases: {"r14"}, .Register: "x14"},
1534 {.Aliases: {"r15"}, .Register: "x15"},
1535 {.Aliases: {"r16"}, .Register: "x16"},
1536 {.Aliases: {"r17"}, .Register: "x17"},
1537 {.Aliases: {"r18"}, .Register: "x18"},
1538 {.Aliases: {"r19"}, .Register: "x19"},
1539 {.Aliases: {"r20"}, .Register: "x20"},
1540 {.Aliases: {"r21"}, .Register: "x21"},
1541 {.Aliases: {"r22"}, .Register: "x22"},
1542 {.Aliases: {"r23"}, .Register: "x23"},
1543 {.Aliases: {"r24"}, .Register: "x24"},
1544 {.Aliases: {"r25"}, .Register: "x25"},
1545 {.Aliases: {"r26"}, .Register: "x26"},
1546 {.Aliases: {"r27"}, .Register: "x27"},
1547 {.Aliases: {"r28"}, .Register: "x28"},
1548 {.Aliases: {"r29", "x29"}, .Register: "fp"},
1549 {.Aliases: {"r30", "x30"}, .Register: "lr"},
1550 // The S/D/Q and W/X registers overlap, but aren't really aliases; we
1551 // don't want to substitute one of these for a different-sized one.
1552};
1553
1554ArrayRef<TargetInfo::GCCRegAlias> AArch64TargetInfo::getGCCRegAliases() const {
1555 return llvm::ArrayRef(GCCRegAliases);
1556}
1557
1558// Returns the length of cc constraint.
1559static unsigned matchAsmCCConstraint(const char *Name) {
1560 constexpr unsigned len = 5;
1561 auto RV = llvm::StringSwitch<unsigned>(Name)
1562 .Case(S: "@cceq", Value: len)
1563 .Case(S: "@ccne", Value: len)
1564 .Case(S: "@cchs", Value: len)
1565 .Case(S: "@cccs", Value: len)
1566 .Case(S: "@cccc", Value: len)
1567 .Case(S: "@cclo", Value: len)
1568 .Case(S: "@ccmi", Value: len)
1569 .Case(S: "@ccpl", Value: len)
1570 .Case(S: "@ccvs", Value: len)
1571 .Case(S: "@ccvc", Value: len)
1572 .Case(S: "@cchi", Value: len)
1573 .Case(S: "@ccls", Value: len)
1574 .Case(S: "@ccge", Value: len)
1575 .Case(S: "@cclt", Value: len)
1576 .Case(S: "@ccgt", Value: len)
1577 .Case(S: "@ccle", Value: len)
1578 .Default(Value: 0);
1579 return RV;
1580}
1581
1582std::string
1583AArch64TargetInfo::convertConstraint(const char *&Constraint) const {
1584 std::string R;
1585 switch (*Constraint) {
1586 case 'U': // Three-character constraint; add "@3" hint for later parsing.
1587 R = std::string("@3") + std::string(Constraint, 3);
1588 Constraint += 2;
1589 break;
1590 case '@':
1591 if (const unsigned Len = matchAsmCCConstraint(Name: Constraint)) {
1592 std::string Converted = "{" + std::string(Constraint, Len) + "}";
1593 Constraint += Len - 1;
1594 return Converted;
1595 }
1596 return std::string(1, *Constraint);
1597 default:
1598 R = TargetInfo::convertConstraint(Constraint);
1599 break;
1600 }
1601 return R;
1602}
1603
1604bool AArch64TargetInfo::validateAsmConstraint(
1605 const char *&Name, TargetInfo::ConstraintInfo &Info) const {
1606 switch (*Name) {
1607 default:
1608 return false;
1609 case 'w': // Floating point and SIMD registers (V0-V31)
1610 Info.setAllowsRegister();
1611 return true;
1612 case 'I': // Constant that can be used with an ADD instruction
1613 case 'J': // Constant that can be used with a SUB instruction
1614 case 'K': // Constant that can be used with a 32-bit logical instruction
1615 case 'L': // Constant that can be used with a 64-bit logical instruction
1616 case 'M': // Constant that can be used as a 32-bit MOV immediate
1617 case 'N': // Constant that can be used as a 64-bit MOV immediate
1618 case 'Y': // Floating point constant zero
1619 case 'Z': // Integer constant zero
1620 return true;
1621 case 'Q': // A memory reference with base register and no offset
1622 Info.setAllowsMemory();
1623 return true;
1624 case 'S': // A symbolic address
1625 Info.setAllowsRegister();
1626 return true;
1627 case 'U':
1628 if (Name[1] == 'p' &&
1629 (Name[2] == 'l' || Name[2] == 'a' || Name[2] == 'h')) {
1630 // SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7, "Uph"=P8-P15)
1631 Info.setAllowsRegister();
1632 Name += 2;
1633 return true;
1634 }
1635 if (Name[1] == 'c' && (Name[2] == 'i' || Name[2] == 'j')) {
1636 // Gpr registers ("Uci"=w8-11, "Ucj"=w12-15)
1637 Info.setAllowsRegister();
1638 Name += 2;
1639 return true;
1640 }
1641 // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes.
1642 // Utf: A memory address suitable for ldp/stp in TF mode.
1643 // Usa: An absolute symbolic address.
1644 // Ush: The high part (bits 32:12) of a pc-relative symbolic address.
1645
1646 // Better to return an error saying that it's an unrecognised constraint
1647 // even if this is a valid constraint in gcc.
1648 return false;
1649 case 'z': // Zero register, wzr or xzr
1650 Info.setAllowsRegister();
1651 return true;
1652 case 'x': // Floating point and SIMD registers (V0-V15)
1653 Info.setAllowsRegister();
1654 return true;
1655 case 'y': // SVE registers (V0-V7)
1656 Info.setAllowsRegister();
1657 return true;
1658 case '@':
1659 // CC condition
1660 if (const unsigned Len = matchAsmCCConstraint(Name)) {
1661 Name += Len - 1;
1662 Info.setAllowsRegister();
1663 Info.setOutputOperandBounds(Min: 0, Max: 2);
1664 return true;
1665 }
1666 }
1667 return false;
1668}
1669
1670bool AArch64TargetInfo::validateConstraintModifier(
1671 StringRef Constraint, char Modifier, unsigned Size,
1672 std::string &SuggestedModifier) const {
1673 // Strip off constraint modifiers.
1674 Constraint = Constraint.ltrim(Chars: "=+&");
1675
1676 switch (Constraint[0]) {
1677 default:
1678 return true;
1679 case 'z':
1680 case 'r': {
1681 switch (Modifier) {
1682 case 'x':
1683 case 'w':
1684 // For now assume that the person knows what they're
1685 // doing with the modifier.
1686 return true;
1687 default:
1688 // By default an 'r' constraint will be in the 'x'
1689 // registers.
1690 if (Size == 64)
1691 return true;
1692
1693 if (Size == 512)
1694 return HasLS64;
1695
1696 SuggestedModifier = "w";
1697 return false;
1698 }
1699 }
1700 }
1701}
1702
1703std::string_view AArch64TargetInfo::getClobbers() const { return ""; }
1704
1705int AArch64TargetInfo::getEHDataRegisterNumber(unsigned RegNo) const {
1706 if (RegNo == 0)
1707 return 0;
1708 if (RegNo == 1)
1709 return 1;
1710 return -1;
1711}
1712
1713bool AArch64TargetInfo::validatePointerAuthKey(
1714 const llvm::APSInt &value) const {
1715 return 0 <= value && value <= 3;
1716}
1717
1718bool AArch64TargetInfo::hasInt128Type() const { return true; }
1719
1720AArch64leTargetInfo::AArch64leTargetInfo(const llvm::Triple &Triple,
1721 const TargetOptions &Opts)
1722 : AArch64TargetInfo(Triple, Opts) {}
1723
1724void AArch64leTargetInfo::getTargetDefines(const LangOptions &Opts,
1725 MacroBuilder &Builder) const {
1726 Builder.defineMacro(Name: "__AARCH64EL__");
1727 AArch64TargetInfo::getTargetDefines(Opts, Builder);
1728}
1729
1730AArch64beTargetInfo::AArch64beTargetInfo(const llvm::Triple &Triple,
1731 const TargetOptions &Opts)
1732 : AArch64TargetInfo(Triple, Opts) {}
1733
1734void AArch64beTargetInfo::getTargetDefines(const LangOptions &Opts,
1735 MacroBuilder &Builder) const {
1736 Builder.defineMacro(Name: "__AARCH64EB__");
1737 Builder.defineMacro(Name: "__AARCH_BIG_ENDIAN");
1738 Builder.defineMacro(Name: "__ARM_BIG_ENDIAN");
1739 AArch64TargetInfo::getTargetDefines(Opts, Builder);
1740}
1741
1742WindowsARM64TargetInfo::WindowsARM64TargetInfo(const llvm::Triple &Triple,
1743 const TargetOptions &Opts)
1744 : WindowsTargetInfo<AArch64leTargetInfo>(Triple, Opts), Triple(Triple) {
1745
1746 // This is an LLP64 platform.
1747 // int:4, long:4, long long:8, long double:8.
1748 IntWidth = IntAlign = 32;
1749 LongWidth = LongAlign = 32;
1750 DoubleAlign = LongLongAlign = 64;
1751 LongDoubleWidth = LongDoubleAlign = 64;
1752 LongDoubleFormat = &llvm::APFloat::IEEEdouble();
1753 IntMaxType = SignedLongLong;
1754 Int64Type = SignedLongLong;
1755 SizeType = UnsignedLongLong;
1756 PtrDiffType = SignedLongLong;
1757 IntPtrType = SignedLongLong;
1758}
1759
1760TargetInfo::BuiltinVaListKind
1761WindowsARM64TargetInfo::getBuiltinVaListKind() const {
1762 return TargetInfo::CharPtrBuiltinVaList;
1763}
1764
1765TargetInfo::CallingConvCheckResult
1766WindowsARM64TargetInfo::checkCallingConvention(CallingConv CC) const {
1767 switch (CC) {
1768 case CC_X86VectorCall:
1769 if (getTriple().isWindowsArm64EC())
1770 return CCCR_OK;
1771 return CCCR_Ignore;
1772 case CC_X86StdCall:
1773 case CC_X86ThisCall:
1774 case CC_X86FastCall:
1775 return CCCR_Ignore;
1776 case CC_C:
1777 case CC_DeviceKernel:
1778 case CC_PreserveMost:
1779 case CC_PreserveAll:
1780 case CC_PreserveNone:
1781 case CC_Swift:
1782 case CC_SwiftAsync:
1783 case CC_Win64:
1784 return CCCR_OK;
1785 default:
1786 return CCCR_Warning;
1787 }
1788}
1789
1790MicrosoftARM64TargetInfo::MicrosoftARM64TargetInfo(const llvm::Triple &Triple,
1791 const TargetOptions &Opts)
1792 : WindowsARM64TargetInfo(Triple, Opts) {
1793 TheCXXABI.set(TargetCXXABI::Microsoft);
1794}
1795
1796void MicrosoftARM64TargetInfo::getTargetDefines(const LangOptions &Opts,
1797 MacroBuilder &Builder) const {
1798 WindowsARM64TargetInfo::getTargetDefines(Opts, Builder);
1799 if (getTriple().isWindowsArm64EC()) {
1800 Builder.defineMacro(Name: "_M_X64", Value: "100");
1801 Builder.defineMacro(Name: "_M_AMD64", Value: "100");
1802 Builder.defineMacro(Name: "_M_ARM64EC", Value: "1");
1803 } else {
1804 Builder.defineMacro(Name: "_M_ARM64", Value: "1");
1805 }
1806}
1807
1808TargetInfo::CallingConvKind
1809MicrosoftARM64TargetInfo::getCallingConvKind(bool ClangABICompat4) const {
1810 return CCK_MicrosoftWin64;
1811}
1812
1813unsigned MicrosoftARM64TargetInfo::getMinGlobalAlign(uint64_t TypeSize,
1814 bool HasNonWeakDef) const {
1815 unsigned Align =
1816 WindowsARM64TargetInfo::getMinGlobalAlign(Size: TypeSize, HasNonWeakDef);
1817
1818 // MSVC does size based alignment for arm64 based on alignment section in
1819 // below document, replicate that to keep alignment consistent with object
1820 // files compiled by MSVC.
1821 // https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions
1822 if (TypeSize >= 512) { // TypeSize >= 64 bytes
1823 Align = std::max(a: Align, b: 128u); // align type at least 16 bytes
1824 } else if (TypeSize >= 64) { // TypeSize >= 8 bytes
1825 Align = std::max(a: Align, b: 64u); // align type at least 8 butes
1826 } else if (TypeSize >= 16) { // TypeSize >= 2 bytes
1827 Align = std::max(a: Align, b: 32u); // align type at least 4 bytes
1828 }
1829 return Align;
1830}
1831
1832MinGWARM64TargetInfo::MinGWARM64TargetInfo(const llvm::Triple &Triple,
1833 const TargetOptions &Opts)
1834 : WindowsARM64TargetInfo(Triple, Opts) {
1835 TheCXXABI.set(TargetCXXABI::GenericAArch64);
1836}
1837
1838AppleMachOAArch64TargetInfo::AppleMachOAArch64TargetInfo(
1839 const llvm::Triple &Triple, const TargetOptions &Opts)
1840 : AppleMachOTargetInfo<AArch64leTargetInfo>(Triple, Opts) {}
1841
1842DarwinAArch64TargetInfo::DarwinAArch64TargetInfo(const llvm::Triple &Triple,
1843 const TargetOptions &Opts)
1844 : DarwinTargetInfo<AArch64leTargetInfo>(Triple, Opts) {
1845 Int64Type = SignedLongLong;
1846 if (getTriple().isArch32Bit())
1847 IntMaxType = SignedLongLong;
1848
1849 WCharType = SignedInt;
1850 UseSignedCharForObjCBool = false;
1851
1852 LongDoubleWidth = LongDoubleAlign = SuitableAlign = 64;
1853 LongDoubleFormat = &llvm::APFloat::IEEEdouble();
1854
1855 UseZeroLengthBitfieldAlignment = false;
1856
1857 if (getTriple().isArch32Bit()) {
1858 UseBitFieldTypeAlignment = false;
1859 ZeroLengthBitfieldBoundary = 32;
1860 UseZeroLengthBitfieldAlignment = true;
1861 TheCXXABI.set(TargetCXXABI::WatchOS);
1862 } else
1863 TheCXXABI.set(TargetCXXABI::AppleARM64);
1864}
1865
1866void clang::targets::getAppleMachOAArch64Defines(MacroBuilder &Builder,
1867 const LangOptions &Opts,
1868 const llvm::Triple &Triple) {
1869 Builder.defineMacro(Name: "__AARCH64_SIMD__");
1870 if (Triple.isArch32Bit())
1871 Builder.defineMacro(Name: "__ARM64_ARCH_8_32__");
1872 else
1873 Builder.defineMacro(Name: "__ARM64_ARCH_8__");
1874 Builder.defineMacro(Name: "__ARM_NEON__");
1875 Builder.defineMacro(Name: "__REGISTER_PREFIX__", Value: "");
1876 Builder.defineMacro(Name: "__arm64", Value: "1");
1877 Builder.defineMacro(Name: "__arm64__", Value: "1");
1878
1879 if (Triple.isArm64e()) {
1880 Builder.defineMacro(Name: "__arm64e__", Value: "1");
1881 Builder.defineMacro(Name: "__PTRAUTH_INTRINSICS__", Value: "1");
1882 }
1883}
1884
1885void AppleMachOAArch64TargetInfo::getOSDefines(const LangOptions &Opts,
1886 const llvm::Triple &Triple,
1887 MacroBuilder &Builder) const {
1888 getAppleMachOAArch64Defines(Builder, Opts, Triple);
1889 AppleMachOTargetInfo<AArch64leTargetInfo>::getOSDefines(Opts, Triple,
1890 Builder);
1891}
1892
1893void DarwinAArch64TargetInfo::getOSDefines(const LangOptions &Opts,
1894 const llvm::Triple &Triple,
1895 MacroBuilder &Builder) const {
1896 getAppleMachOAArch64Defines(Builder, Opts, Triple);
1897 DarwinTargetInfo<AArch64leTargetInfo>::getOSDefines(Opts, Triple, Builder);
1898}
1899
1900TargetInfo::BuiltinVaListKind
1901DarwinAArch64TargetInfo::getBuiltinVaListKind() const {
1902 return TargetInfo::CharPtrBuiltinVaList;
1903}
1904