1//===--- AArch64.cpp - Implement AArch64 target feature support -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements AArch64 TargetInfo objects.
10//
11//===----------------------------------------------------------------------===//
12
13#include "AArch64.h"
14#include "clang/Basic/Diagnostic.h"
15#include "clang/Basic/LangOptions.h"
16#include "clang/Basic/TargetBuiltins.h"
17#include "clang/Basic/TargetInfo.h"
18#include "llvm/ADT/APSInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/StringSwitch.h"
21#include "llvm/TargetParser/AArch64TargetParser.h"
22#include "llvm/TargetParser/ARMTargetParserCommon.h"
23#include <optional>
24
25using namespace clang;
26using namespace clang::targets;
27
28static constexpr int NumNeonBuiltins =
29 NEON::FirstFp16Builtin - Builtin::FirstTSBuiltin;
30static constexpr int NumFp16Builtins =
31 NEON::FirstTSBuiltin - NEON::FirstFp16Builtin;
32static constexpr int NumSVEBuiltins =
33 SVE::FirstNeonBridgeBuiltin - NEON::FirstTSBuiltin;
34static constexpr int NumSVENeonBridgeBuiltins =
35 SVE::FirstTSBuiltin - SVE::FirstNeonBridgeBuiltin;
36static constexpr int NumSMEBuiltins = SME::FirstTSBuiltin - SVE::FirstTSBuiltin;
37static constexpr int NumAArch64Builtins =
38 AArch64::LastTSBuiltin - SME::FirstTSBuiltin;
39static constexpr int NumBuiltins =
40 AArch64::LastTSBuiltin - Builtin::FirstTSBuiltin;
41static_assert(NumBuiltins ==
42 (NumNeonBuiltins + NumFp16Builtins + NumSVEBuiltins +
43 NumSVENeonBridgeBuiltins + NumSMEBuiltins + NumAArch64Builtins));
44
45namespace clang {
46namespace NEON {
47#define GET_NEON_BUILTIN_STR_TABLE
48#include "clang/Basic/arm_neon.inc"
49#undef GET_NEON_BUILTIN_STR_TABLE
50
51static constexpr std::array<Builtin::Info, NumNeonBuiltins> BuiltinInfos = {
52#define GET_NEON_BUILTIN_INFOS
53#include "clang/Basic/arm_neon.inc"
54#undef GET_NEON_BUILTIN_INFOS
55};
56
57namespace FP16 {
58#define GET_NEON_BUILTIN_STR_TABLE
59#include "clang/Basic/arm_fp16.inc"
60#undef GET_NEON_BUILTIN_STR_TABLE
61
62static constexpr std::array<Builtin::Info, NumFp16Builtins> BuiltinInfos = {
63#define GET_NEON_BUILTIN_INFOS
64#include "clang/Basic/arm_fp16.inc"
65#undef GET_NEON_BUILTIN_INFOS
66};
67} // namespace FP16
68} // namespace NEON
69
70namespace SVE {
71#define GET_SVE_BUILTIN_STR_TABLE
72#include "clang/Basic/arm_sve_builtins.inc"
73#undef GET_SVE_BUILTIN_STR_TABLE
74
75static constexpr std::array<Builtin::Info, NumSVEBuiltins> BuiltinInfos = {
76#define GET_SVE_BUILTIN_INFOS
77#include "clang/Basic/arm_sve_builtins.inc"
78#undef GET_SVE_BUILTIN_INFOS
79};
80} // namespace SVE
81
82namespace SME {
83#define GET_SME_BUILTIN_STR_TABLE
84#include "clang/Basic/arm_sme_builtins.inc"
85#undef GET_SME_BUILTIN_STR_TABLE
86
87static constexpr std::array<Builtin::Info, NumSMEBuiltins> BuiltinInfos = {
88#define GET_SME_BUILTIN_INFOS
89#include "clang/Basic/arm_sme_builtins.inc"
90#undef GET_SME_BUILTIN_INFOS
91};
92} // namespace SME
93} // namespace clang
94
95static constexpr llvm::StringTable BuiltinSVENeonBridgeStrings =
96 CLANG_BUILTIN_STR_TABLE_START
97#define TARGET_BUILTIN CLANG_TARGET_BUILTIN_STR_TABLE
98#define GET_SVE_BUILTINS
99#include "clang/Basic/BuiltinsAArch64NeonSVEBridge.def"
100#undef GET_SVE_BUILTINS
101#undef TARGET_BUILTIN
102 ;
103static constexpr llvm::StringTable BuiltinAArch64Strings =
104 CLANG_BUILTIN_STR_TABLE_START
105#define BUILTIN CLANG_BUILTIN_STR_TABLE
106#define TARGET_BUILTIN CLANG_TARGET_BUILTIN_STR_TABLE
107#define TARGET_HEADER_BUILTIN CLANG_TARGET_HEADER_BUILTIN_STR_TABLE
108#include "clang/Basic/BuiltinsAArch64.def"
109 ;
110
111static constexpr auto BuiltinSVENeonBridgeInfos =
112 Builtin::MakeInfos<NumSVENeonBridgeBuiltins>(Infos: {
113#define TARGET_BUILTIN CLANG_TARGET_BUILTIN_ENTRY
114#define GET_SVE_BUILTINS
115#include "clang/Basic/BuiltinsAArch64NeonSVEBridge.def"
116#undef GET_SVE_BUILTINS
117#undef TARGET_BUILTIN
118 });
119static constexpr auto BuiltinAArch64Infos =
120 Builtin::MakeInfos<NumAArch64Builtins>(Infos: {
121#define BUILTIN CLANG_BUILTIN_ENTRY
122#define TARGET_BUILTIN CLANG_TARGET_BUILTIN_ENTRY
123#define LANGBUILTIN CLANG_LANGBUILTIN_ENTRY
124#define TARGET_HEADER_BUILTIN CLANG_TARGET_HEADER_BUILTIN_ENTRY
125#include "clang/Basic/BuiltinsAArch64.def"
126 });
127
128AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
129 const TargetOptions &Opts)
130 : TargetInfo(Triple), ABI("aapcs") {
131 if (getTriple().isOSOpenBSD()) {
132 Int64Type = SignedLongLong;
133 IntMaxType = SignedLongLong;
134 } else {
135 if (!getTriple().isOSDarwin() && !getTriple().isOSNetBSD())
136 WCharType = UnsignedInt;
137
138 Int64Type = SignedLong;
139 IntMaxType = SignedLong;
140 }
141
142 AddrSpaceMap = &ARM64AddrSpaceMap;
143
144 // All AArch64 implementations support ARMv8 FP, which makes half a legal type.
145 HasFastHalfType = true;
146 HalfArgsAndReturns = true;
147 HasFloat16 = true;
148 HasStrictFP = true;
149
150 if (Triple.isArch64Bit())
151 LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
152 else
153 LongWidth = LongAlign = PointerWidth = PointerAlign = 32;
154
155 BitIntMaxAlign = 128;
156 MaxVectorAlign = 128;
157 MaxAtomicInlineWidth = 128;
158 MaxAtomicPromoteWidth = 128;
159
160 LongDoubleWidth = LongDoubleAlign = SuitableAlign = 128;
161 LongDoubleFormat = &llvm::APFloat::IEEEquad();
162
163 BFloat16Width = BFloat16Align = 16;
164 BFloat16Format = &llvm::APFloat::BFloat();
165
166 // Make __builtin_ms_va_list available.
167 HasBuiltinMSVaList = true;
168
169 // Make the Neon ACLE and SVE types available. Note that this deliberately
170 // doesn't depend on SveMode, since in principle it should be possible to turn
171 // SVE on and off within a translation unit. It should also be possible
172 // to compile the global declaration:
173 //
174 // __SVInt8_t *ptr;
175 //
176 // even without SVE.
177 HasAArch64ACLETypes = true;
178
179 // {} in inline assembly are neon specifiers, not assembly variant
180 // specifiers.
181 NoAsmVariants = true;
182
183 // AAPCS gives rules for bitfields. 7.1.7 says: "The container type
184 // contributes to the alignment of the containing aggregate in the same way
185 // a plain (non bit-field) member of that type would, without exception for
186 // zero-sized or anonymous bit-fields."
187 assert(UseBitFieldTypeAlignment && "bitfields affect type alignment");
188 UseZeroLengthBitfieldAlignment = true;
189
190 // AAPCS64 allows any "fundamental integer data type" to be used for
191 // over-sized bitfields, which includes 128-bit integers.
192 LargestOverSizedBitfieldContainer = 128;
193
194 HasUnalignedAccess = true;
195
196 // AArch64 targets default to using the ARM C++ ABI.
197 TheCXXABI.set(TargetCXXABI::GenericAArch64);
198
199 if (Triple.getOS() == llvm::Triple::Linux)
200 this->MCountName = "\01_mcount";
201 else if (Triple.getOS() == llvm::Triple::UnknownOS)
202 this->MCountName =
203 Opts.EABIVersion == llvm::EABI::GNU ? "\01_mcount" : "mcount";
204}
205
206StringRef AArch64TargetInfo::getABI() const { return ABI; }
207
208bool AArch64TargetInfo::setABI(const std::string &Name) {
209 if (Name != "aapcs" && Name != "aapcs-soft" && Name != "darwinpcs")
210 return false;
211
212 ABI = Name;
213 return true;
214}
215
216bool AArch64TargetInfo::validateTarget(DiagnosticsEngine &Diags) const {
217 if (hasFeature(Feature: "fp") && ABI == "aapcs-soft") {
218 // aapcs-soft is not allowed for targets with an FPU, to avoid there being
219 // two incomatible ABIs.
220 Diags.Report(DiagID: diag::err_target_unsupported_abi_with_fpu) << ABI;
221 return false;
222 }
223 return true;
224}
225
226bool AArch64TargetInfo::validateGlobalRegisterVariable(
227 StringRef RegName, unsigned RegSize, bool &HasSizeMismatch) const {
228 if (RegName == "sp") {
229 HasSizeMismatch = RegSize != 64;
230 return true;
231 }
232 if (RegName.starts_with(Prefix: "w"))
233 HasSizeMismatch = RegSize != 32;
234 else if (RegName.starts_with(Prefix: "x"))
235 HasSizeMismatch = RegSize != 64;
236 else
237 return false;
238 StringRef RegNum = RegName.drop_front();
239 // Check if the register is reserved. See also
240 // AArch64TargetLowering::getRegisterByName().
241 return RegNum == "0" ||
242 (RegNum == "18" &&
243 llvm::AArch64::isX18ReservedByDefault(TT: getTriple())) ||
244 getTargetOpts().FeatureMap.lookup(Key: ("reserve-x" + RegNum).str());
245}
246
247bool AArch64TargetInfo::validateBranchProtection(StringRef Spec, StringRef,
248 BranchProtectionInfo &BPI,
249 const LangOptions &LO,
250 StringRef &Err) const {
251 llvm::ARM::ParsedBranchProtection PBP;
252 if (!llvm::ARM::parseBranchProtection(Spec, PBP, Err, EnablePAuthLR: HasPAuthLR))
253 return false;
254
255 // GCS is currently untested with ptrauth-returns, but enabling this could be
256 // allowed in future after testing with a suitable system.
257 if (LO.PointerAuthReturns &&
258 (PBP.Scope != "none" || PBP.BranchProtectionPAuthLR ||
259 PBP.GuardedControlStack))
260 return false;
261
262 BPI.SignReturnAddr =
263 llvm::StringSwitch<LangOptions::SignReturnAddressScopeKind>(PBP.Scope)
264 .Case(S: "non-leaf", Value: LangOptions::SignReturnAddressScopeKind::NonLeaf)
265 .Case(S: "all", Value: LangOptions::SignReturnAddressScopeKind::All)
266 .Default(Value: LangOptions::SignReturnAddressScopeKind::None);
267
268 if (PBP.Key == "a_key")
269 BPI.SignKey = LangOptions::SignReturnAddressKeyKind::AKey;
270 else
271 BPI.SignKey = LangOptions::SignReturnAddressKeyKind::BKey;
272
273 BPI.BranchTargetEnforcement = PBP.BranchTargetEnforcement;
274 BPI.BranchProtectionPAuthLR = PBP.BranchProtectionPAuthLR;
275 BPI.GuardedControlStack = PBP.GuardedControlStack;
276 return true;
277}
278
279bool AArch64TargetInfo::isValidCPUName(StringRef Name) const {
280 return llvm::AArch64::parseCpu(Name).has_value();
281}
282
283bool AArch64TargetInfo::setCPU(const std::string &Name) {
284 return isValidCPUName(Name);
285}
286
287void AArch64TargetInfo::fillValidCPUList(
288 SmallVectorImpl<StringRef> &Values) const {
289 llvm::AArch64::fillValidCPUArchList(Values);
290}
291
292void AArch64TargetInfo::getTargetDefinesARMV81A(const LangOptions &Opts,
293 MacroBuilder &Builder) const {
294 Builder.defineMacro(Name: "__ARM_FEATURE_QRDMX", Value: "1");
295}
296
297void AArch64TargetInfo::getTargetDefinesARMV82A(const LangOptions &Opts,
298 MacroBuilder &Builder) const {
299 // Also include the ARMv8.1 defines
300 getTargetDefinesARMV81A(Opts, Builder);
301}
302
303void AArch64TargetInfo::getTargetDefinesARMV83A(const LangOptions &Opts,
304 MacroBuilder &Builder) const {
305 Builder.defineMacro(Name: "__ARM_FEATURE_COMPLEX", Value: "1");
306 Builder.defineMacro(Name: "__ARM_FEATURE_JCVT", Value: "1");
307 // Also include the Armv8.2 defines
308 getTargetDefinesARMV82A(Opts, Builder);
309}
310
311void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions &Opts,
312 MacroBuilder &Builder) const {
313 // Also include the Armv8.3 defines
314 getTargetDefinesARMV83A(Opts, Builder);
315}
316
317void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions &Opts,
318 MacroBuilder &Builder) const {
319 Builder.defineMacro(Name: "__ARM_FEATURE_FRINT", Value: "1");
320 // Also include the Armv8.4 defines
321 getTargetDefinesARMV84A(Opts, Builder);
322}
323
324void AArch64TargetInfo::getTargetDefinesARMV86A(const LangOptions &Opts,
325 MacroBuilder &Builder) const {
326 // Also include the Armv8.5 defines
327 // FIXME: Armv8.6 makes the following extensions mandatory:
328 // - __ARM_FEATURE_BF16
329 // - __ARM_FEATURE_MATMUL_INT8
330 // Handle them here.
331 getTargetDefinesARMV85A(Opts, Builder);
332}
333
334void AArch64TargetInfo::getTargetDefinesARMV87A(const LangOptions &Opts,
335 MacroBuilder &Builder) const {
336 // Also include the Armv8.6 defines
337 getTargetDefinesARMV86A(Opts, Builder);
338}
339
340void AArch64TargetInfo::getTargetDefinesARMV88A(const LangOptions &Opts,
341 MacroBuilder &Builder) const {
342 // Also include the Armv8.7 defines
343 getTargetDefinesARMV87A(Opts, Builder);
344}
345
346void AArch64TargetInfo::getTargetDefinesARMV89A(const LangOptions &Opts,
347 MacroBuilder &Builder) const {
348 // Also include the Armv8.8 defines
349 getTargetDefinesARMV88A(Opts, Builder);
350}
351
352void AArch64TargetInfo::getTargetDefinesARMV9A(const LangOptions &Opts,
353 MacroBuilder &Builder) const {
354 // Armv9-A maps to Armv8.5-A
355 getTargetDefinesARMV85A(Opts, Builder);
356}
357
358void AArch64TargetInfo::getTargetDefinesARMV91A(const LangOptions &Opts,
359 MacroBuilder &Builder) const {
360 // Armv9.1-A maps to Armv8.6-A
361 getTargetDefinesARMV86A(Opts, Builder);
362}
363
364void AArch64TargetInfo::getTargetDefinesARMV92A(const LangOptions &Opts,
365 MacroBuilder &Builder) const {
366 // Armv9.2-A maps to Armv8.7-A
367 getTargetDefinesARMV87A(Opts, Builder);
368}
369
370void AArch64TargetInfo::getTargetDefinesARMV93A(const LangOptions &Opts,
371 MacroBuilder &Builder) const {
372 // Armv9.3-A maps to Armv8.8-A
373 getTargetDefinesARMV88A(Opts, Builder);
374}
375
376void AArch64TargetInfo::getTargetDefinesARMV94A(const LangOptions &Opts,
377 MacroBuilder &Builder) const {
378 // Armv9.4-A maps to Armv8.9-A
379 getTargetDefinesARMV89A(Opts, Builder);
380}
381
382void AArch64TargetInfo::getTargetDefinesARMV95A(const LangOptions &Opts,
383 MacroBuilder &Builder) const {
384 // Armv9.5-A does not have a v8.* equivalent, but is a superset of v9.4-A.
385 getTargetDefinesARMV94A(Opts, Builder);
386}
387
388void AArch64TargetInfo::getTargetDefinesARMV96A(const LangOptions &Opts,
389 MacroBuilder &Builder) const {
390 // Armv9.6-A does not have a v8.* equivalent, but is a superset of v9.5-A.
391 getTargetDefinesARMV95A(Opts, Builder);
392}
393
394void AArch64TargetInfo::getTargetDefinesARMV97A(const LangOptions &Opts,
395 MacroBuilder &Builder) const {
396 // Armv9.7-A does not have a v8.* equivalent, but is a superset of v9.6-A.
397 getTargetDefinesARMV96A(Opts, Builder);
398}
399
400void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
401 MacroBuilder &Builder) const {
402 // Target identification.
403 if (getTriple().isWindowsArm64EC()) {
404 // Define the same set of macros as would be defined on x86_64 to ensure that
405 // ARM64EC datatype layouts match those of x86_64 compiled code
406 Builder.defineMacro(Name: "__amd64__");
407 Builder.defineMacro(Name: "__amd64");
408 Builder.defineMacro(Name: "__x86_64");
409 Builder.defineMacro(Name: "__x86_64__");
410 Builder.defineMacro(Name: "__arm64ec__");
411 } else {
412 Builder.defineMacro(Name: "__aarch64__");
413 }
414
415 if (getTriple().isLFI())
416 Builder.defineMacro(Name: "__LFI__");
417
418 // Inline assembly supports AArch64 flag outputs.
419 Builder.defineMacro(Name: "__GCC_ASM_FLAG_OUTPUTS__");
420
421 std::string CodeModel = getTargetOpts().CodeModel;
422 if (CodeModel == "default")
423 CodeModel = "small";
424 for (char &c : CodeModel)
425 c = toupper(c: c);
426 Builder.defineMacro(Name: "__AARCH64_CMODEL_" + CodeModel + "__");
427
428 // ACLE predefines. Many can only have one possible value on v8 AArch64.
429 Builder.defineMacro(Name: "__ARM_ACLE_VERSION(year, quarter, patch)",
430 Value: "(100 * (year) + 10 * (quarter) + (patch))");
431#define ARM_ACLE_VERSION(Y, Q, P) (100 * (Y) + 10 * (Q) + (P))
432 Builder.defineMacro(Name: "__ARM_ACLE", Value: Twine(ARM_ACLE_VERSION(2024, 2, 0)));
433 Builder.defineMacro(Name: "__FUNCTION_MULTI_VERSIONING_SUPPORT_LEVEL",
434 Value: Twine(ARM_ACLE_VERSION(2024, 3, 0)));
435#undef ARM_ACLE_VERSION
436 Builder.defineMacro(Name: "__ARM_ARCH",
437 Value: std::to_string(val: ArchInfo->Version.getMajor()));
438 Builder.defineMacro(Name: "__ARM_ARCH_PROFILE",
439 Value: std::string("'") + (char)ArchInfo->Profile + "'");
440
441 Builder.defineMacro(Name: "__ARM_64BIT_STATE", Value: "1");
442 Builder.defineMacro(Name: "__ARM_PCS_AAPCS64", Value: "1");
443 Builder.defineMacro(Name: "__ARM_ARCH_ISA_A64", Value: "1");
444
445 Builder.defineMacro(Name: "__ARM_FEATURE_CLZ", Value: "1");
446 Builder.defineMacro(Name: "__ARM_FEATURE_FMA", Value: "1");
447 Builder.defineMacro(Name: "__ARM_FEATURE_LDREX", Value: "0xF");
448 Builder.defineMacro(Name: "__ARM_FEATURE_IDIV", Value: "1"); // As specified in ACLE
449 Builder.defineMacro(Name: "__ARM_FEATURE_DIV"); // For backwards compatibility
450 Builder.defineMacro(Name: "__ARM_FEATURE_NUMERIC_MAXMIN", Value: "1");
451 Builder.defineMacro(Name: "__ARM_FEATURE_DIRECTED_ROUNDING", Value: "1");
452
453 Builder.defineMacro(Name: "__ARM_ALIGN_MAX_STACK_PWR", Value: "4");
454
455 // These macros are set when Clang can parse declarations with these
456 // attributes.
457 Builder.defineMacro(Name: "__ARM_STATE_ZA", Value: "1");
458 Builder.defineMacro(Name: "__ARM_STATE_ZT0", Value: "1");
459
460 // 0xe implies support for half, single and double precision operations.
461 if (FPU & FPUMode)
462 Builder.defineMacro(Name: "__ARM_FP", Value: "0xE");
463
464 // PCS specifies this for SysV variants, which is all we support. Other ABIs
465 // may choose __ARM_FP16_FORMAT_ALTERNATIVE.
466 Builder.defineMacro(Name: "__ARM_FP16_FORMAT_IEEE", Value: "1");
467 Builder.defineMacro(Name: "__ARM_FP16_ARGS", Value: "1");
468
469 // Clang supports arm_neon_sve_bridge.h
470 Builder.defineMacro(Name: "__ARM_NEON_SVE_BRIDGE", Value: "1");
471
472 if (Opts.UnsafeFPMath)
473 Builder.defineMacro(Name: "__ARM_FP_FAST", Value: "1");
474
475 Builder.defineMacro(Name: "__ARM_SIZEOF_WCHAR_T",
476 Value: Twine(Opts.WCharSize ? Opts.WCharSize : 4));
477
478 Builder.defineMacro(Name: "__ARM_SIZEOF_MINIMAL_ENUM", Value: Opts.ShortEnums ? "1" : "4");
479
480 // Clang supports range prefetch intrinsics
481 Builder.defineMacro(Name: "__ARM_PREFETCH_RANGE", Value: "1");
482
483 if (FPU & NeonMode) {
484 Builder.defineMacro(Name: "__ARM_NEON", Value: "1");
485 // 64-bit NEON supports half, single and double precision operations.
486 Builder.defineMacro(Name: "__ARM_NEON_FP", Value: "0xE");
487 }
488
489 if (FPU & SveMode)
490 Builder.defineMacro(Name: "__ARM_FEATURE_SVE", Value: "1");
491
492 if (HasSVE2)
493 Builder.defineMacro(Name: "__ARM_FEATURE_SVE2", Value: "1");
494
495 if (HasSVE2p1)
496 Builder.defineMacro(Name: "__ARM_FEATURE_SVE2p1", Value: "1");
497
498 if (HasSVE2 && HasSVEAES)
499 Builder.defineMacro(Name: "__ARM_FEATURE_SVE2_AES", Value: "1");
500
501 if (HasSVE2 && HasSVEBitPerm)
502 Builder.defineMacro(Name: "__ARM_FEATURE_SVE2_BITPERM", Value: "1");
503
504 if (HasSVE2 && HasSVE2SHA3)
505 Builder.defineMacro(Name: "__ARM_FEATURE_SVE2_SHA3", Value: "1");
506
507 if (HasSVE2 && HasSVE2SM4)
508 Builder.defineMacro(Name: "__ARM_FEATURE_SVE2_SM4", Value: "1");
509
510 if (HasSVEB16B16)
511 Builder.defineMacro(Name: "__ARM_FEATURE_SVE_B16B16", Value: "1");
512
513 if (HasSME) {
514 Builder.defineMacro(Name: "__ARM_FEATURE_SME");
515 Builder.defineMacro(Name: "__ARM_FEATURE_LOCALLY_STREAMING", Value: "1");
516 }
517
518 if (HasSME2)
519 Builder.defineMacro(Name: "__ARM_FEATURE_SME2", Value: "1");
520
521 if (HasSME2p1)
522 Builder.defineMacro(Name: "__ARM_FEATURE_SME2p1", Value: "1");
523
524 if (HasSMEF16F16)
525 Builder.defineMacro(Name: "__ARM_FEATURE_SME_F16F16", Value: "1");
526
527 if (HasSMEB16B16)
528 Builder.defineMacro(Name: "__ARM_FEATURE_SME_B16B16", Value: "1");
529
530 if (HasFP8)
531 Builder.defineMacro(Name: "__ARM_FEATURE_FP8", Value: "1");
532
533 if (HasFP8FMA)
534 Builder.defineMacro(Name: "__ARM_FEATURE_FP8FMA", Value: "1");
535
536 if (HasFP8DOT2)
537 Builder.defineMacro(Name: "__ARM_FEATURE_FP8DOT2", Value: "1");
538
539 if (HasFP8DOT4)
540 Builder.defineMacro(Name: "__ARM_FEATURE_FP8DOT4", Value: "1");
541
542 if (HasSSVE_FP8DOT2)
543 Builder.defineMacro(Name: "__ARM_FEATURE_SSVE_FP8DOT2", Value: "1");
544
545 if (HasSSVE_FP8DOT4)
546 Builder.defineMacro(Name: "__ARM_FEATURE_SSVE_FP8DOT4", Value: "1");
547
548 if (HasSSVE_FP8FMA)
549 Builder.defineMacro(Name: "__ARM_FEATURE_SSVE_FP8FMA", Value: "1");
550
551 if (HasSME_F8F32)
552 Builder.defineMacro(Name: "__ARM_FEATURE_SME_F8F32", Value: "1");
553
554 if (HasSME_F8F16)
555 Builder.defineMacro(Name: "__ARM_FEATURE_SME_F8F16", Value: "1");
556
557 if (HasCRC)
558 Builder.defineMacro(Name: "__ARM_FEATURE_CRC32", Value: "1");
559
560 if (HasCSSC)
561 Builder.defineMacro(Name: "__ARM_FEATURE_CSSC", Value: "1");
562
563 if (HasRCPC3)
564 Builder.defineMacro(Name: "__ARM_FEATURE_RCPC", Value: "3");
565 else if (HasRCPC)
566 Builder.defineMacro(Name: "__ARM_FEATURE_RCPC", Value: "1");
567
568 if (HasFPRCVT)
569 Builder.defineMacro(Name: "__ARM_FEATURE_FPRCVT", Value: "1");
570
571 if (HasF8F16MM)
572 Builder.defineMacro(Name: "__ARM_FEATURE_F8F16MM", Value: "1");
573
574 if (HasF8F32MM)
575 Builder.defineMacro(Name: "__ARM_FEATURE_F8F32MM", Value: "1");
576
577 if (HasSVE_F16F32MM)
578 Builder.defineMacro(Name: "__ARM_FEATURE_SVE_F16F32MM", Value: "1");
579
580 if (HasSVE_BFSCALE)
581 Builder.defineMacro(Name: "__ARM_FEATURE_SVE_BFSCALE", Value: "1");
582
583 if (HasSVE_AES2)
584 Builder.defineMacro(Name: "__ARM_FEATURE_SVE_AES2", Value: "1");
585
586 if (HasSSVE_AES)
587 Builder.defineMacro(Name: "__ARM_FEATURE_SSVE_AES", Value: "1");
588
589 if (HasSVE2p2)
590 Builder.defineMacro(Name: "__ARM_FEATURE_SVE2p2", Value: "1");
591
592 if (HasSME2p2)
593 Builder.defineMacro(Name: "__ARM_FEATURE_SME2p2", Value: "1");
594
595 if (HasFMV)
596 Builder.defineMacro(Name: "__HAVE_FUNCTION_MULTI_VERSIONING", Value: "1");
597
598 // The __ARM_FEATURE_CRYPTO is deprecated in favor of finer grained feature
599 // macros for AES, SHA2, SHA3 and SM4
600 if (HasAES && HasSHA2)
601 Builder.defineMacro(Name: "__ARM_FEATURE_CRYPTO", Value: "1");
602
603 if (HasAES)
604 Builder.defineMacro(Name: "__ARM_FEATURE_AES", Value: "1");
605
606 if (HasSHA2)
607 Builder.defineMacro(Name: "__ARM_FEATURE_SHA2", Value: "1");
608
609 if (HasSHA3) {
610 Builder.defineMacro(Name: "__ARM_FEATURE_SHA3", Value: "1");
611 Builder.defineMacro(Name: "__ARM_FEATURE_SHA512", Value: "1");
612 }
613
614 if (HasSM4) {
615 Builder.defineMacro(Name: "__ARM_FEATURE_SM3", Value: "1");
616 Builder.defineMacro(Name: "__ARM_FEATURE_SM4", Value: "1");
617 }
618
619 if (HasPAuth)
620 Builder.defineMacro(Name: "__ARM_FEATURE_PAUTH", Value: "1");
621
622 if (HasPAuthLR)
623 Builder.defineMacro(Name: "__ARM_FEATURE_PAUTH_LR", Value: "1");
624
625 if (HasBTI)
626 Builder.defineMacro(Name: "__ARM_FEATURE_BTI", Value: "1");
627
628 if (HasUnalignedAccess)
629 Builder.defineMacro(Name: "__ARM_FEATURE_UNALIGNED", Value: "1");
630
631 if ((FPU & NeonMode) && HasFullFP16)
632 Builder.defineMacro(Name: "__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", Value: "1");
633 if (HasFullFP16)
634 Builder.defineMacro(Name: "__ARM_FEATURE_FP16_SCALAR_ARITHMETIC", Value: "1");
635
636 if (HasDotProd)
637 Builder.defineMacro(Name: "__ARM_FEATURE_DOTPROD", Value: "1");
638
639 if (HasMTE)
640 Builder.defineMacro(Name: "__ARM_FEATURE_MEMORY_TAGGING", Value: "1");
641
642 if (HasMatMul)
643 Builder.defineMacro(Name: "__ARM_FEATURE_MATMUL_INT8", Value: "1");
644
645 if (HasLSE)
646 Builder.defineMacro(Name: "__ARM_FEATURE_ATOMICS", Value: "1");
647
648 if (HasBFloat16) {
649 Builder.defineMacro(Name: "__ARM_FEATURE_BF16", Value: "1");
650 Builder.defineMacro(Name: "__ARM_FEATURE_BF16_VECTOR_ARITHMETIC", Value: "1");
651 Builder.defineMacro(Name: "__ARM_BF16_FORMAT_ALTERNATIVE", Value: "1");
652 Builder.defineMacro(Name: "__ARM_FEATURE_BF16_SCALAR_ARITHMETIC", Value: "1");
653 }
654
655 if ((FPU & SveMode) && HasBFloat16) {
656 Builder.defineMacro(Name: "__ARM_FEATURE_SVE_BF16", Value: "1");
657 }
658
659 if ((FPU & SveMode) && HasMatmulFP64)
660 Builder.defineMacro(Name: "__ARM_FEATURE_SVE_MATMUL_FP64", Value: "1");
661
662 if ((FPU & SveMode) && HasMatmulFP32)
663 Builder.defineMacro(Name: "__ARM_FEATURE_SVE_MATMUL_FP32", Value: "1");
664
665 if ((FPU & SveMode) && HasMatMul)
666 Builder.defineMacro(Name: "__ARM_FEATURE_SVE_MATMUL_INT8", Value: "1");
667
668 if ((FPU & NeonMode) && HasFP16FML)
669 Builder.defineMacro(Name: "__ARM_FEATURE_FP16_FML", Value: "1");
670
671 if (Opts.hasSignReturnAddress()) {
672 // Bitmask:
673 // 0: Protection using the A key
674 // 1: Protection using the B key
675 // 2: Protection including leaf functions
676 // 3: Protection using PC as a diversifier
677 unsigned Value = 0;
678
679 if (Opts.isSignReturnAddressWithAKey())
680 Value |= (1 << 0);
681 else
682 Value |= (1 << 1);
683
684 if (Opts.isSignReturnAddressScopeAll())
685 Value |= (1 << 2);
686
687 if (Opts.BranchProtectionPAuthLR)
688 Value |= (1 << 3);
689
690 Builder.defineMacro(Name: "__ARM_FEATURE_PAC_DEFAULT", Value: std::to_string(val: Value));
691 }
692
693 if (Opts.BranchTargetEnforcement)
694 Builder.defineMacro(Name: "__ARM_FEATURE_BTI_DEFAULT", Value: "1");
695
696 if (Opts.GuardedControlStack)
697 Builder.defineMacro(Name: "__ARM_FEATURE_GCS_DEFAULT", Value: "1");
698
699 if (HasLS64)
700 Builder.defineMacro(Name: "__ARM_FEATURE_LS64", Value: "1");
701
702 if (HasRandGen)
703 Builder.defineMacro(Name: "__ARM_FEATURE_RNG", Value: "1");
704
705 if (HasMOPS)
706 Builder.defineMacro(Name: "__ARM_FEATURE_MOPS", Value: "1");
707
708 if (HasD128)
709 Builder.defineMacro(Name: "__ARM_FEATURE_SYSREG128", Value: "1");
710
711 if (HasGCS)
712 Builder.defineMacro(Name: "__ARM_FEATURE_GCS", Value: "1");
713
714 if (*ArchInfo == llvm::AArch64::ARMV8_1A)
715 getTargetDefinesARMV81A(Opts, Builder);
716 else if (*ArchInfo == llvm::AArch64::ARMV8_2A)
717 getTargetDefinesARMV82A(Opts, Builder);
718 else if (*ArchInfo == llvm::AArch64::ARMV8_3A)
719 getTargetDefinesARMV83A(Opts, Builder);
720 else if (*ArchInfo == llvm::AArch64::ARMV8_4A)
721 getTargetDefinesARMV84A(Opts, Builder);
722 else if (*ArchInfo == llvm::AArch64::ARMV8_5A)
723 getTargetDefinesARMV85A(Opts, Builder);
724 else if (*ArchInfo == llvm::AArch64::ARMV8_6A)
725 getTargetDefinesARMV86A(Opts, Builder);
726 else if (*ArchInfo == llvm::AArch64::ARMV8_7A)
727 getTargetDefinesARMV87A(Opts, Builder);
728 else if (*ArchInfo == llvm::AArch64::ARMV8_8A)
729 getTargetDefinesARMV88A(Opts, Builder);
730 else if (*ArchInfo == llvm::AArch64::ARMV8_9A)
731 getTargetDefinesARMV89A(Opts, Builder);
732 else if (*ArchInfo == llvm::AArch64::ARMV9A)
733 getTargetDefinesARMV9A(Opts, Builder);
734 else if (*ArchInfo == llvm::AArch64::ARMV9_1A)
735 getTargetDefinesARMV91A(Opts, Builder);
736 else if (*ArchInfo == llvm::AArch64::ARMV9_2A)
737 getTargetDefinesARMV92A(Opts, Builder);
738 else if (*ArchInfo == llvm::AArch64::ARMV9_3A)
739 getTargetDefinesARMV93A(Opts, Builder);
740 else if (*ArchInfo == llvm::AArch64::ARMV9_4A)
741 getTargetDefinesARMV94A(Opts, Builder);
742 else if (*ArchInfo == llvm::AArch64::ARMV9_5A)
743 getTargetDefinesARMV95A(Opts, Builder);
744 else if (*ArchInfo == llvm::AArch64::ARMV9_6A)
745 getTargetDefinesARMV96A(Opts, Builder);
746 else if (*ArchInfo == llvm::AArch64::ARMV9_7A)
747 getTargetDefinesARMV97A(Opts, Builder);
748
749 // All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8|16) builtins work.
750 Builder.defineMacro(Name: "__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
751 Builder.defineMacro(Name: "__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
752 Builder.defineMacro(Name: "__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
753 Builder.defineMacro(Name: "__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
754 Builder.defineMacro(Name: "__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16");
755
756 // Allow detection of fast FMA support.
757 Builder.defineMacro(Name: "__FP_FAST_FMA", Value: "1");
758 Builder.defineMacro(Name: "__FP_FAST_FMAF", Value: "1");
759
760 // C/C++ operators work on both VLS and VLA SVE types
761 if (FPU & SveMode)
762 Builder.defineMacro(Name: "__ARM_FEATURE_SVE_VECTOR_OPERATORS", Value: "2");
763
764 if (Opts.VScaleMin && Opts.VScaleMin == Opts.VScaleMax) {
765 Builder.defineMacro(Name: "__ARM_FEATURE_SVE_BITS", Value: Twine(Opts.VScaleMin * 128));
766 }
767}
768
769llvm::SmallVector<Builtin::InfosShard>
770AArch64TargetInfo::getTargetBuiltins() const {
771 return {
772 {.Strings: &NEON::BuiltinStrings, .Infos: NEON::BuiltinInfos, .NamePrefix: "__builtin_neon_"},
773 {.Strings: &NEON::FP16::BuiltinStrings, .Infos: NEON::FP16::BuiltinInfos,
774 .NamePrefix: "__builtin_neon_"},
775 {.Strings: &SVE::BuiltinStrings, .Infos: SVE::BuiltinInfos, .NamePrefix: "__builtin_sve_"},
776 {.Strings: &BuiltinSVENeonBridgeStrings, .Infos: BuiltinSVENeonBridgeInfos},
777 {.Strings: &SME::BuiltinStrings, .Infos: SME::BuiltinInfos, .NamePrefix: "__builtin_sme_"},
778 {.Strings: &BuiltinAArch64Strings, .Infos: BuiltinAArch64Infos},
779 };
780}
781
782std::optional<std::pair<unsigned, unsigned>>
783AArch64TargetInfo::getVScaleRange(const LangOptions &LangOpts,
784 ArmStreamingKind Mode,
785 llvm::StringMap<bool> *FeatureMap) const {
786 if (Mode == ArmStreamingKind::NotStreaming &&
787 (LangOpts.VScaleMin || LangOpts.VScaleMax))
788 return std::pair<unsigned, unsigned>(
789 LangOpts.VScaleMin ? LangOpts.VScaleMin : 1,
790 LangOpts.VScaleMax ? LangOpts.VScaleMax : 16);
791
792 if (Mode == ArmStreamingKind::Streaming &&
793 (LangOpts.VScaleStreamingMin || LangOpts.VScaleStreamingMax))
794 return std::pair<unsigned, unsigned>(
795 LangOpts.VScaleStreamingMin ? LangOpts.VScaleStreamingMin : 1,
796 LangOpts.VScaleStreamingMax ? LangOpts.VScaleStreamingMax : 16);
797
798 if (Mode == ArmStreamingKind::StreamingCompatible &&
799 ((LangOpts.VScaleMin && LangOpts.VScaleStreamingMin) ||
800 (LangOpts.VScaleMax && LangOpts.VScaleStreamingMax))) {
801 unsigned Min =
802 std::min(a: LangOpts.VScaleMin ? LangOpts.VScaleMin : 1,
803 b: LangOpts.VScaleStreamingMin ? LangOpts.VScaleStreamingMin : 1);
804 unsigned Max = std::max(
805 a: LangOpts.VScaleMax ? LangOpts.VScaleMax : 16,
806 b: LangOpts.VScaleStreamingMax ? LangOpts.VScaleStreamingMax : 16);
807 return std::pair(Min, Max);
808 }
809
810 if (hasFeature(Feature: "sve") || (FeatureMap && (FeatureMap->lookup(Key: "sve"))))
811 return std::pair<unsigned, unsigned>(1, 16);
812
813 if (Mode == ArmStreamingKind::Streaming &&
814 (hasFeature(Feature: "sme") || (FeatureMap && (FeatureMap->lookup(Key: "sme")))))
815 return std::pair<unsigned, unsigned>(1, 16);
816
817 return std::nullopt;
818}
819
820llvm::APInt
821AArch64TargetInfo::getFMVPriority(ArrayRef<StringRef> Features) const {
822 return llvm::AArch64::getFMVPriority(Features);
823}
824
825bool AArch64TargetInfo::doesFeatureAffectCodeGen(StringRef Name) const {
826 // FMV extensions which imply no backend features do not affect codegen.
827 if (auto Ext = llvm::AArch64::parseFMVExtension(Extension: Name))
828 return Ext->ID.has_value();
829 return false;
830}
831
832bool AArch64TargetInfo::validateCpuSupports(StringRef FeatureStr) const {
833 // CPU features might be separated by '+', extract them and check
834 llvm::SmallVector<StringRef, 8> Features;
835 FeatureStr.split(A&: Features, Separator: "+");
836 for (auto &Feature : Features)
837 if (!llvm::AArch64::parseFMVExtension(Extension: Feature.trim()).has_value())
838 return false;
839 return true;
840}
841
842bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
843 return llvm::StringSwitch<bool>(Feature)
844 .Cases(CaseStrings: {"aarch64", "arm64", "arm"}, Value: true)
845 .Case(S: "fmv", Value: HasFMV)
846 .Case(S: "fp", Value: FPU & FPUMode)
847 .Cases(CaseStrings: {"neon", "simd"}, Value: FPU & NeonMode)
848 .Case(S: "jscvt", Value: HasJSCVT)
849 .Case(S: "fcma", Value: HasFCMA)
850 .Case(S: "rng", Value: HasRandGen)
851 .Case(S: "flagm", Value: HasFlagM)
852 .Case(S: "flagm2", Value: HasAlternativeNZCV)
853 .Case(S: "fp16fml", Value: HasFP16FML)
854 .Case(S: "dotprod", Value: HasDotProd)
855 .Case(S: "sm4", Value: HasSM4)
856 .Case(S: "rdm", Value: HasRDM)
857 .Case(S: "lse", Value: HasLSE)
858 .Case(S: "crc", Value: HasCRC)
859 .Case(S: "cssc", Value: HasCSSC)
860 .Case(S: "sha2", Value: HasSHA2)
861 .Case(S: "sha3", Value: HasSHA3)
862 .Cases(CaseStrings: {"aes", "pmull"}, Value: HasAES)
863 .Cases(CaseStrings: {"fp16", "fullfp16"}, Value: HasFullFP16)
864 .Case(S: "dit", Value: HasDIT)
865 .Case(S: "dpb", Value: HasCCPP)
866 .Case(S: "dpb2", Value: HasCCDP)
867 .Case(S: "rcpc", Value: HasRCPC)
868 .Case(S: "frintts", Value: HasFRInt3264)
869 .Case(S: "i8mm", Value: HasMatMul)
870 .Case(S: "bf16", Value: HasBFloat16)
871 .Case(S: "sve", Value: FPU & SveMode)
872 .Case(S: "sve-b16b16", Value: HasSVEB16B16)
873 .Case(S: "f32mm", Value: FPU & SveMode && HasMatmulFP32)
874 .Case(S: "f64mm", Value: FPU & SveMode && HasMatmulFP64)
875 .Case(S: "sve2", Value: FPU & SveMode && HasSVE2)
876 .Case(S: "sve-aes", Value: HasSVEAES)
877 .Case(S: "sve-bitperm", Value: FPU & HasSVEBitPerm)
878 .Case(S: "sve2-sha3", Value: FPU & SveMode && HasSVE2SHA3)
879 .Case(S: "sve2-sm4", Value: FPU & SveMode && HasSVE2SM4)
880 .Case(S: "sve2p1", Value: FPU & SveMode && HasSVE2p1)
881 .Case(S: "sme", Value: HasSME)
882 .Case(S: "sme2", Value: HasSME2)
883 .Case(S: "sme2p1", Value: HasSME2p1)
884 .Case(S: "sme-f64f64", Value: HasSMEF64F64)
885 .Case(S: "sme-i16i64", Value: HasSMEI16I64)
886 .Case(S: "sme-fa64", Value: HasSMEFA64)
887 .Case(S: "sme-f16f16", Value: HasSMEF16F16)
888 .Case(S: "sme-b16b16", Value: HasSMEB16B16)
889 .Case(S: "memtag", Value: HasMTE)
890 .Case(S: "sb", Value: HasSB)
891 .Case(S: "predres", Value: HasPredRes)
892 .Cases(CaseStrings: {"ssbs", "ssbs2"}, Value: HasSSBS)
893 .Case(S: "bti", Value: HasBTI)
894 .Cases(CaseStrings: {"ls64", "ls64_v", "ls64_accdata"}, Value: HasLS64)
895 .Case(S: "wfxt", Value: HasWFxT)
896 .Case(S: "rcpc3", Value: HasRCPC3)
897 .Case(S: "fp8", Value: HasFP8)
898 .Case(S: "fp8fma", Value: HasFP8FMA)
899 .Case(S: "fp8dot2", Value: HasFP8DOT2)
900 .Case(S: "fp8dot4", Value: HasFP8DOT4)
901 .Case(S: "ssve-fp8dot2", Value: HasSSVE_FP8DOT2)
902 .Case(S: "ssve-fp8dot4", Value: HasSSVE_FP8DOT4)
903 .Case(S: "ssve-fp8fma", Value: HasSSVE_FP8FMA)
904 .Case(S: "sme-f8f32", Value: HasSME_F8F32)
905 .Case(S: "sme-f8f16", Value: HasSME_F8F16)
906 .Case(S: "fprcvt", Value: HasFPRCVT)
907 .Case(S: "f8f16mm", Value: HasF8F16MM)
908 .Case(S: "f8f32mm", Value: HasF8F32MM)
909 .Case(S: "sve-f16f32mm", Value: HasSVE_F16F32MM)
910 .Case(S: "sve-bfscale", Value: HasSVE_BFSCALE)
911 .Case(S: "sve-aes2", Value: HasSVE_AES2)
912 .Case(S: "ssve-aes", Value: HasSSVE_AES)
913 .Case(S: "sve2p2", Value: FPU & SveMode && HasSVE2p2)
914 .Case(S: "sme2p2", Value: HasSME2p2)
915 .Default(Value: false);
916}
917
918void AArch64TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
919 StringRef Name, bool Enabled) const {
920 Features[Name] = Enabled;
921 // If the feature is an architecture feature (like v8.2a), add all previous
922 // architecture versions and any dependant target features.
923 const std::optional<llvm::AArch64::ArchInfo> ArchInfo =
924 llvm::AArch64::ArchInfo::findBySubArch(SubArch: Name);
925
926 if (!ArchInfo)
927 return; // Not an architecture, nothing more to do.
928
929 // Disabling an architecture feature does not affect dependent features
930 if (!Enabled)
931 return;
932
933 for (const auto *OtherArch : llvm::AArch64::ArchInfos)
934 if (ArchInfo->implies(Other: *OtherArch))
935 Features[OtherArch->getSubArch()] = true;
936
937 // Set any features implied by the architecture
938 std::vector<StringRef> CPUFeats;
939 if (llvm::AArch64::getExtensionFeatures(Extensions: ArchInfo->DefaultExts, Features&: CPUFeats)) {
940 for (auto F : CPUFeats) {
941 assert(F[0] == '+' && "Expected + in target feature!");
942 Features[F.drop_front(N: 1)] = true;
943 }
944 }
945}
946
947bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
948 DiagnosticsEngine &Diags) {
949 for (const auto &Feature : Features) {
950 if (Feature == "-fp-armv8")
951 HasNoFP = true;
952 if (Feature == "-neon")
953 HasNoNeon = true;
954 if (Feature == "-sve")
955 HasNoSVE = true;
956
957 if (Feature == "+neon" || Feature == "+fp-armv8")
958 FPU |= NeonMode;
959 if (Feature == "+jscvt") {
960 HasJSCVT = true;
961 FPU |= NeonMode;
962 }
963 if (Feature == "+fcma") {
964 HasFCMA = true;
965 FPU |= NeonMode;
966 }
967
968 if (Feature == "+sve") {
969 FPU |= NeonMode;
970 FPU |= SveMode;
971 HasFullFP16 = true;
972 }
973 if (Feature == "+sve2") {
974 FPU |= NeonMode;
975 FPU |= SveMode;
976 HasFullFP16 = true;
977 HasSVE2 = true;
978 }
979 if (Feature == "+sve2p1") {
980 FPU |= NeonMode;
981 FPU |= SveMode;
982 HasFullFP16 = true;
983 HasSVE2 = true;
984 HasSVE2p1 = true;
985 }
986 if (Feature == "+sve-aes") {
987 FPU |= NeonMode;
988 HasFullFP16 = true;
989 HasSVEAES = true;
990 }
991 if (Feature == "+sve2-sha3") {
992 FPU |= NeonMode;
993 FPU |= SveMode;
994 HasFullFP16 = true;
995 HasSVE2 = true;
996 HasSVE2SHA3 = true;
997 }
998 if (Feature == "+sve2-sm4") {
999 FPU |= NeonMode;
1000 FPU |= SveMode;
1001 HasFullFP16 = true;
1002 HasSVE2 = true;
1003 HasSVE2SM4 = true;
1004 }
1005 if (Feature == "+sve-b16b16")
1006 HasSVEB16B16 = true;
1007 if (Feature == "+sve-bitperm") {
1008 FPU |= NeonMode;
1009 HasFullFP16 = true;
1010 HasSVEBitPerm = true;
1011 }
1012 if (Feature == "+f32mm") {
1013 FPU |= NeonMode;
1014 FPU |= SveMode;
1015 HasFullFP16 = true;
1016 HasMatmulFP32 = true;
1017 }
1018 if (Feature == "+f64mm") {
1019 FPU |= NeonMode;
1020 FPU |= SveMode;
1021 HasFullFP16 = true;
1022 HasMatmulFP64 = true;
1023 }
1024 if (Feature == "+sme") {
1025 HasSME = true;
1026 HasBFloat16 = true;
1027 HasFullFP16 = true;
1028 }
1029 if (Feature == "+sme2") {
1030 HasSME = true;
1031 HasSME2 = true;
1032 HasBFloat16 = true;
1033 HasFullFP16 = true;
1034 }
1035 if (Feature == "+sme2p1") {
1036 HasSME = true;
1037 HasSME2 = true;
1038 HasSME2p1 = true;
1039 HasBFloat16 = true;
1040 HasFullFP16 = true;
1041 }
1042 if (Feature == "+sme-f64f64") {
1043 HasSME = true;
1044 HasSMEF64F64 = true;
1045 HasBFloat16 = true;
1046 HasFullFP16 = true;
1047 }
1048 if (Feature == "+sme-i16i64") {
1049 HasSME = true;
1050 HasSMEI16I64 = true;
1051 HasBFloat16 = true;
1052 HasFullFP16 = true;
1053 }
1054 if (Feature == "+sme-fa64") {
1055 FPU |= NeonMode;
1056 FPU |= SveMode;
1057 HasSME = true;
1058 HasSVE2 = true;
1059 HasSMEFA64 = true;
1060 }
1061 if (Feature == "+sme-f16f16") {
1062 HasSME = true;
1063 HasSME2 = true;
1064 HasBFloat16 = true;
1065 HasFullFP16 = true;
1066 HasSMEF16F16 = true;
1067 }
1068 if (Feature == "+sme-b16b16") {
1069 HasSME = true;
1070 HasSME2 = true;
1071 HasBFloat16 = true;
1072 HasFullFP16 = true;
1073 HasSVEB16B16 = true;
1074 HasSMEB16B16 = true;
1075 }
1076
1077 if (Feature == "+fp8")
1078 HasFP8 = true;
1079 if (Feature == "+fp8fma")
1080 HasFP8FMA = true;
1081 if (Feature == "+fp8dot2")
1082 HasFP8DOT2 = true;
1083 if (Feature == "+fp8dot4")
1084 HasFP8DOT4 = true;
1085 if (Feature == "+ssve-fp8dot2")
1086 HasSSVE_FP8DOT2 = true;
1087 if (Feature == "+ssve-fp8dot4")
1088 HasSSVE_FP8DOT4 = true;
1089 if (Feature == "+ssve-fp8fma")
1090 HasSSVE_FP8FMA = true;
1091 if (Feature == "+sme-f8f32")
1092 HasSME_F8F32 = true;
1093 if (Feature == "+sme-f8f16")
1094 HasSME_F8F16 = true;
1095 if (Feature == "+sb")
1096 HasSB = true;
1097 if (Feature == "+predres")
1098 HasPredRes = true;
1099 if (Feature == "+ssbs")
1100 HasSSBS = true;
1101 if (Feature == "+bti")
1102 HasBTI = true;
1103 if (Feature == "+wfxt")
1104 HasWFxT = true;
1105 if (Feature == "-fmv")
1106 HasFMV = false;
1107 if (Feature == "+crc")
1108 HasCRC = true;
1109 if (Feature == "+rcpc")
1110 HasRCPC = true;
1111 if (Feature == "+aes") {
1112 FPU |= NeonMode;
1113 HasAES = true;
1114 }
1115 if (Feature == "+sha2") {
1116 FPU |= NeonMode;
1117 HasSHA2 = true;
1118 }
1119 if (Feature == "+sha3") {
1120 FPU |= NeonMode;
1121 HasSHA2 = true;
1122 HasSHA3 = true;
1123 }
1124 if (Feature == "+rdm") {
1125 FPU |= NeonMode;
1126 HasRDM = true;
1127 }
1128 if (Feature == "+dit")
1129 HasDIT = true;
1130 if (Feature == "+cccp")
1131 HasCCPP = true;
1132 if (Feature == "+ccdp") {
1133 HasCCPP = true;
1134 HasCCDP = true;
1135 }
1136 if (Feature == "+fptoint")
1137 HasFRInt3264 = true;
1138 if (Feature == "+sm4") {
1139 FPU |= NeonMode;
1140 HasSM4 = true;
1141 }
1142 if (Feature == "+strict-align")
1143 HasUnalignedAccess = false;
1144 if (Feature == "+fprcvt")
1145 HasFPRCVT = true;
1146 if (Feature == "+f8f16mm")
1147 HasF8F16MM = true;
1148 if (Feature == "+f8f32mm")
1149 HasF8F32MM = true;
1150 if (Feature == "+sve-f16f32mm")
1151 HasSVE_F16F32MM = true;
1152 if (Feature == "+sve-bfscale")
1153 HasSVE_BFSCALE = true;
1154 if (Feature == "+sve-aes2")
1155 HasSVE_AES2 = true;
1156 if (Feature == "+ssve-aes")
1157 HasSSVE_AES = true;
1158 if (Feature == "+sve2p2")
1159 HasSVE2p2 = true;
1160 if (Feature == "+sme2p2")
1161 HasSME2p2 = true;
1162
1163 // All predecessor archs are added but select the latest one for ArchKind.
1164 if (Feature == "+v8a" && ArchInfo->Version < llvm::AArch64::ARMV8A.Version)
1165 ArchInfo = &llvm::AArch64::ARMV8A;
1166 if (Feature == "+v8.1a" &&
1167 ArchInfo->Version < llvm::AArch64::ARMV8_1A.Version)
1168 ArchInfo = &llvm::AArch64::ARMV8_1A;
1169 if (Feature == "+v8.2a" &&
1170 ArchInfo->Version < llvm::AArch64::ARMV8_2A.Version)
1171 ArchInfo = &llvm::AArch64::ARMV8_2A;
1172 if (Feature == "+v8.3a" &&
1173 ArchInfo->Version < llvm::AArch64::ARMV8_3A.Version)
1174 ArchInfo = &llvm::AArch64::ARMV8_3A;
1175 if (Feature == "+v8.4a" &&
1176 ArchInfo->Version < llvm::AArch64::ARMV8_4A.Version)
1177 ArchInfo = &llvm::AArch64::ARMV8_4A;
1178 if (Feature == "+v8.5a" &&
1179 ArchInfo->Version < llvm::AArch64::ARMV8_5A.Version)
1180 ArchInfo = &llvm::AArch64::ARMV8_5A;
1181 if (Feature == "+v8.6a" &&
1182 ArchInfo->Version < llvm::AArch64::ARMV8_6A.Version)
1183 ArchInfo = &llvm::AArch64::ARMV8_6A;
1184 if (Feature == "+v8.7a" &&
1185 ArchInfo->Version < llvm::AArch64::ARMV8_7A.Version)
1186 ArchInfo = &llvm::AArch64::ARMV8_7A;
1187 if (Feature == "+v8.8a" &&
1188 ArchInfo->Version < llvm::AArch64::ARMV8_8A.Version)
1189 ArchInfo = &llvm::AArch64::ARMV8_8A;
1190 if (Feature == "+v8.9a" &&
1191 ArchInfo->Version < llvm::AArch64::ARMV8_9A.Version)
1192 ArchInfo = &llvm::AArch64::ARMV8_9A;
1193 if (Feature == "+v9a" && ArchInfo->Version < llvm::AArch64::ARMV9A.Version)
1194 ArchInfo = &llvm::AArch64::ARMV9A;
1195 if (Feature == "+v9.1a" &&
1196 ArchInfo->Version < llvm::AArch64::ARMV9_1A.Version)
1197 ArchInfo = &llvm::AArch64::ARMV9_1A;
1198 if (Feature == "+v9.2a" &&
1199 ArchInfo->Version < llvm::AArch64::ARMV9_2A.Version)
1200 ArchInfo = &llvm::AArch64::ARMV9_2A;
1201 if (Feature == "+v9.3a" &&
1202 ArchInfo->Version < llvm::AArch64::ARMV9_3A.Version)
1203 ArchInfo = &llvm::AArch64::ARMV9_3A;
1204 if (Feature == "+v9.4a" &&
1205 ArchInfo->Version < llvm::AArch64::ARMV9_4A.Version)
1206 ArchInfo = &llvm::AArch64::ARMV9_4A;
1207 if (Feature == "+v9.5a" &&
1208 ArchInfo->Version < llvm::AArch64::ARMV9_5A.Version)
1209 ArchInfo = &llvm::AArch64::ARMV9_5A;
1210 if (Feature == "+v9.6a" &&
1211 ArchInfo->Version < llvm::AArch64::ARMV9_6A.Version)
1212 ArchInfo = &llvm::AArch64::ARMV9_6A;
1213 if (Feature == "+v9.7a" &&
1214 ArchInfo->Version < llvm::AArch64::ARMV9_7A.Version)
1215 ArchInfo = &llvm::AArch64::ARMV9_7A;
1216 if (Feature == "+v8r")
1217 ArchInfo = &llvm::AArch64::ARMV8R;
1218 if (Feature == "+fullfp16") {
1219 FPU |= NeonMode;
1220 HasFullFP16 = true;
1221 }
1222 if (Feature == "+dotprod") {
1223 FPU |= NeonMode;
1224 HasDotProd = true;
1225 }
1226 if (Feature == "+fp16fml") {
1227 FPU |= NeonMode;
1228 HasFullFP16 = true;
1229 HasFP16FML = true;
1230 }
1231 if (Feature == "+mte")
1232 HasMTE = true;
1233 if (Feature == "+pauth")
1234 HasPAuth = true;
1235 if (Feature == "+i8mm")
1236 HasMatMul = true;
1237 if (Feature == "+bf16")
1238 HasBFloat16 = true;
1239 if (Feature == "+lse")
1240 HasLSE = true;
1241 if (Feature == "+ls64")
1242 HasLS64 = true;
1243 if (Feature == "+rand")
1244 HasRandGen = true;
1245 if (Feature == "+flagm")
1246 HasFlagM = true;
1247 if (Feature == "+altnzcv") {
1248 HasFlagM = true;
1249 HasAlternativeNZCV = true;
1250 }
1251 if (Feature == "+mops")
1252 HasMOPS = true;
1253 if (Feature == "+d128")
1254 HasD128 = true;
1255 if (Feature == "+gcs")
1256 HasGCS = true;
1257 if (Feature == "+rcpc3")
1258 HasRCPC3 = true;
1259 if (Feature == "+pauth-lr") {
1260 HasPAuthLR = true;
1261 HasPAuth = true;
1262 }
1263 if (Feature == "+cssc")
1264 HasCSSC = true;
1265 }
1266
1267 // Check features that are manually disabled by command line options.
1268 // This needs to be checked after architecture-related features are handled,
1269 // making sure they are properly disabled when required.
1270 for (const auto &Feature : Features) {
1271 if (Feature == "-d128")
1272 HasD128 = false;
1273 }
1274
1275 resetDataLayout();
1276
1277 if (HasNoFP) {
1278 FPU &= ~FPUMode;
1279 FPU &= ~NeonMode;
1280 FPU &= ~SveMode;
1281 }
1282 if (HasNoNeon) {
1283 FPU &= ~NeonMode;
1284 FPU &= ~SveMode;
1285 }
1286 if (HasNoSVE)
1287 FPU &= ~SveMode;
1288
1289 return true;
1290}
1291
1292// Parse AArch64 Target attributes, which are a comma separated list of:
1293// "arch=<arch>" - parsed to features as per -march=..
1294// "cpu=<cpu>" - parsed to features as per -mcpu=.., with CPU set to <cpu>
1295// "tune=<cpu>" - TuneCPU set to <cpu>
1296// "feature", "no-feature" - Add (or remove) feature.
1297// "+feature", "+nofeature" - Add (or remove) feature.
1298//
1299// A feature may correspond to an Extension (anything with a corresponding
1300// AEK_), in which case an ExtensionSet is used to parse it and expand its
1301// dependencies. If the feature does not yield a successful parse then it
1302// is passed through.
1303ParsedTargetAttr AArch64TargetInfo::parseTargetAttr(StringRef Features) const {
1304 ParsedTargetAttr Ret;
1305 if (Features == "default")
1306 return Ret;
1307 SmallVector<StringRef, 1> AttrFeatures;
1308 Features.split(A&: AttrFeatures, Separator: ",");
1309 bool FoundArch = false;
1310
1311 auto SplitAndAddFeatures = [](StringRef FeatString,
1312 std::vector<std::string> &Features,
1313 llvm::AArch64::ExtensionSet &FeatureBits) {
1314 SmallVector<StringRef, 8> SplitFeatures;
1315 FeatString.split(A&: SplitFeatures, Separator: StringRef("+"), MaxSplit: -1, KeepEmpty: false);
1316 for (StringRef Feature : SplitFeatures) {
1317 if (FeatureBits.parseModifier(Modifier: Feature))
1318 continue;
1319 // Pass through anything that failed to parse so that we can emit
1320 // diagnostics, as well as valid internal feature names.
1321 //
1322 // FIXME: We should consider rejecting internal feature names like
1323 // neon, v8a, etc.
1324 // FIXME: We should consider emitting diagnostics here.
1325 if (Feature.starts_with(Prefix: "no"))
1326 Features.push_back(x: "-" + Feature.drop_front(N: 2).str());
1327 else
1328 Features.push_back(x: "+" + Feature.str());
1329 }
1330 };
1331
1332 llvm::AArch64::ExtensionSet FeatureBits;
1333 // Reconstruct the bitset from the command line option features.
1334 FeatureBits.reconstructFromParsedFeatures(Features: getTargetOpts().FeaturesAsWritten,
1335 NonExtensions&: Ret.Features);
1336
1337 for (auto &Feature : AttrFeatures) {
1338 Feature = Feature.trim();
1339 if (Feature.starts_with(Prefix: "fpmath="))
1340 continue;
1341
1342 if (Feature.starts_with(Prefix: "branch-protection=")) {
1343 Ret.BranchProtection = Feature.split(Separator: '=').second.trim();
1344 continue;
1345 }
1346
1347 if (Feature.starts_with(Prefix: "arch=")) {
1348 if (FoundArch)
1349 Ret.Duplicate = "arch=";
1350 FoundArch = true;
1351 std::pair<StringRef, StringRef> Split =
1352 Feature.split(Separator: "=").second.trim().split(Separator: "+");
1353 const llvm::AArch64::ArchInfo *AI = llvm::AArch64::parseArch(Arch: Split.first);
1354
1355 // Parse the architecture version, adding the required features to
1356 // Ret.Features.
1357 if (!AI)
1358 continue;
1359 FeatureBits.addArchDefaults(Arch: *AI);
1360 // Add any extra features, after the +
1361 SplitAndAddFeatures(Split.second, Ret.Features, FeatureBits);
1362 } else if (Feature.starts_with(Prefix: "cpu=")) {
1363 if (!Ret.CPU.empty())
1364 Ret.Duplicate = "cpu=";
1365 else {
1366 // Split the cpu string into "cpu=", "cortex-a710" and any remaining
1367 // "+feat" features.
1368 std::pair<StringRef, StringRef> Split =
1369 Feature.split(Separator: "=").second.trim().split(Separator: "+");
1370 Ret.CPU = Split.first;
1371 if (auto CpuInfo = llvm::AArch64::parseCpu(Name: Ret.CPU)) {
1372 FeatureBits.addCPUDefaults(CPU: *CpuInfo);
1373 SplitAndAddFeatures(Split.second, Ret.Features, FeatureBits);
1374 }
1375 }
1376 } else if (Feature.starts_with(Prefix: "tune=")) {
1377 if (!Ret.Tune.empty())
1378 Ret.Duplicate = "tune=";
1379 else
1380 Ret.Tune = Feature.split(Separator: "=").second.trim();
1381 } else if (Feature.starts_with(Prefix: "+")) {
1382 SplitAndAddFeatures(Feature, Ret.Features, FeatureBits);
1383 } else {
1384 if (FeatureBits.parseModifier(Modifier: Feature, /* AllowNoDashForm = */ true))
1385 continue;
1386 // Pass through anything that failed to parse so that we can emit
1387 // diagnostics, as well as valid internal feature names.
1388 //
1389 // FIXME: We should consider rejecting internal feature names like
1390 // neon, v8a, etc.
1391 // FIXME: We should consider emitting diagnostics here.
1392 if (Feature.starts_with(Prefix: "no-"))
1393 Ret.Features.push_back(x: "-" + Feature.drop_front(N: 3).str());
1394 else
1395 Ret.Features.push_back(x: "+" + Feature.str());
1396 }
1397 }
1398 FeatureBits.toLLVMFeatureList(Features&: Ret.Features);
1399 return Ret;
1400}
1401
1402bool AArch64TargetInfo::hasBFloat16Type() const {
1403 return true;
1404}
1405
1406TargetInfo::CallingConvCheckResult
1407AArch64TargetInfo::checkCallingConvention(CallingConv CC) const {
1408 switch (CC) {
1409 case CC_C:
1410 case CC_Swift:
1411 case CC_SwiftAsync:
1412 case CC_PreserveMost:
1413 case CC_PreserveAll:
1414 case CC_PreserveNone:
1415 case CC_DeviceKernel:
1416 case CC_AArch64VectorCall:
1417 case CC_AArch64SVEPCS:
1418 case CC_Win64:
1419 return CCCR_OK;
1420 default:
1421 return CCCR_Warning;
1422 }
1423}
1424
1425bool AArch64TargetInfo::isCLZForZeroUndef() const { return false; }
1426
1427TargetInfo::BuiltinVaListKind AArch64TargetInfo::getBuiltinVaListKind() const {
1428 return TargetInfo::AArch64ABIBuiltinVaList;
1429}
1430
1431const char *const AArch64TargetInfo::GCCRegNames[] = {
1432 // clang-format off
1433
1434 // 32-bit Integer registers
1435 "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", "w11",
1436 "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", "w22",
1437 "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "wsp",
1438
1439 // 64-bit Integer registers
1440 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
1441 "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22",
1442 "x23", "x24", "x25", "x26", "x27", "x28", "fp", "lr", "sp",
1443
1444 // 32-bit floating point regsisters
1445 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11",
1446 "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21", "s22",
1447 "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
1448
1449 // 64-bit floating point regsisters
1450 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11",
1451 "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
1452 "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
1453
1454 // Neon vector registers
1455 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11",
1456 "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22",
1457 "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
1458
1459 // SVE vector registers
1460 "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10",
1461 "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21",
1462 "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31",
1463
1464 // SVE predicate registers
1465 "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10",
1466 "p11", "p12", "p13", "p14", "p15",
1467
1468 // SVE predicate-as-counter registers
1469 "pn0", "pn1", "pn2", "pn3", "pn4", "pn5", "pn6", "pn7", "pn8",
1470 "pn9", "pn10", "pn11", "pn12", "pn13", "pn14", "pn15",
1471
1472 // SME registers
1473 "za", "zt0",
1474
1475 // clang-format on
1476};
1477
1478ArrayRef<const char *> AArch64TargetInfo::getGCCRegNames() const {
1479 return llvm::ArrayRef(GCCRegNames);
1480}
1481
1482const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
1483 {.Aliases: {"w31"}, .Register: "wsp"},
1484 {.Aliases: {"x31"}, .Register: "sp"},
1485 // GCC rN registers are aliases of xN registers.
1486 {.Aliases: {"r0"}, .Register: "x0"},
1487 {.Aliases: {"r1"}, .Register: "x1"},
1488 {.Aliases: {"r2"}, .Register: "x2"},
1489 {.Aliases: {"r3"}, .Register: "x3"},
1490 {.Aliases: {"r4"}, .Register: "x4"},
1491 {.Aliases: {"r5"}, .Register: "x5"},
1492 {.Aliases: {"r6"}, .Register: "x6"},
1493 {.Aliases: {"r7"}, .Register: "x7"},
1494 {.Aliases: {"r8"}, .Register: "x8"},
1495 {.Aliases: {"r9"}, .Register: "x9"},
1496 {.Aliases: {"r10"}, .Register: "x10"},
1497 {.Aliases: {"r11"}, .Register: "x11"},
1498 {.Aliases: {"r12"}, .Register: "x12"},
1499 {.Aliases: {"r13"}, .Register: "x13"},
1500 {.Aliases: {"r14"}, .Register: "x14"},
1501 {.Aliases: {"r15"}, .Register: "x15"},
1502 {.Aliases: {"r16"}, .Register: "x16"},
1503 {.Aliases: {"r17"}, .Register: "x17"},
1504 {.Aliases: {"r18"}, .Register: "x18"},
1505 {.Aliases: {"r19"}, .Register: "x19"},
1506 {.Aliases: {"r20"}, .Register: "x20"},
1507 {.Aliases: {"r21"}, .Register: "x21"},
1508 {.Aliases: {"r22"}, .Register: "x22"},
1509 {.Aliases: {"r23"}, .Register: "x23"},
1510 {.Aliases: {"r24"}, .Register: "x24"},
1511 {.Aliases: {"r25"}, .Register: "x25"},
1512 {.Aliases: {"r26"}, .Register: "x26"},
1513 {.Aliases: {"r27"}, .Register: "x27"},
1514 {.Aliases: {"r28"}, .Register: "x28"},
1515 {.Aliases: {"r29", "x29"}, .Register: "fp"},
1516 {.Aliases: {"r30", "x30"}, .Register: "lr"},
1517 // The S/D/Q and W/X registers overlap, but aren't really aliases; we
1518 // don't want to substitute one of these for a different-sized one.
1519};
1520
1521ArrayRef<TargetInfo::GCCRegAlias> AArch64TargetInfo::getGCCRegAliases() const {
1522 return llvm::ArrayRef(GCCRegAliases);
1523}
1524
1525// Returns the length of cc constraint.
1526static unsigned matchAsmCCConstraint(const char *Name) {
1527 constexpr unsigned len = 5;
1528 auto RV = llvm::StringSwitch<unsigned>(Name)
1529 .Case(S: "@cceq", Value: len)
1530 .Case(S: "@ccne", Value: len)
1531 .Case(S: "@cchs", Value: len)
1532 .Case(S: "@cccs", Value: len)
1533 .Case(S: "@cccc", Value: len)
1534 .Case(S: "@cclo", Value: len)
1535 .Case(S: "@ccmi", Value: len)
1536 .Case(S: "@ccpl", Value: len)
1537 .Case(S: "@ccvs", Value: len)
1538 .Case(S: "@ccvc", Value: len)
1539 .Case(S: "@cchi", Value: len)
1540 .Case(S: "@ccls", Value: len)
1541 .Case(S: "@ccge", Value: len)
1542 .Case(S: "@cclt", Value: len)
1543 .Case(S: "@ccgt", Value: len)
1544 .Case(S: "@ccle", Value: len)
1545 .Default(Value: 0);
1546 return RV;
1547}
1548
1549std::string
1550AArch64TargetInfo::convertConstraint(const char *&Constraint) const {
1551 std::string R;
1552 switch (*Constraint) {
1553 case 'U': // Three-character constraint; add "@3" hint for later parsing.
1554 R = std::string("@3") + std::string(Constraint, 3);
1555 Constraint += 2;
1556 break;
1557 case '@':
1558 if (const unsigned Len = matchAsmCCConstraint(Name: Constraint)) {
1559 std::string Converted = "{" + std::string(Constraint, Len) + "}";
1560 Constraint += Len - 1;
1561 return Converted;
1562 }
1563 return std::string(1, *Constraint);
1564 default:
1565 R = TargetInfo::convertConstraint(Constraint);
1566 break;
1567 }
1568 return R;
1569}
1570
1571bool AArch64TargetInfo::validateAsmConstraint(
1572 const char *&Name, TargetInfo::ConstraintInfo &Info) const {
1573 switch (*Name) {
1574 default:
1575 return false;
1576 case 'w': // Floating point and SIMD registers (V0-V31)
1577 Info.setAllowsRegister();
1578 return true;
1579 case 'I': // Constant that can be used with an ADD instruction
1580 case 'J': // Constant that can be used with a SUB instruction
1581 case 'K': // Constant that can be used with a 32-bit logical instruction
1582 case 'L': // Constant that can be used with a 64-bit logical instruction
1583 case 'M': // Constant that can be used as a 32-bit MOV immediate
1584 case 'N': // Constant that can be used as a 64-bit MOV immediate
1585 case 'Y': // Floating point constant zero
1586 case 'Z': // Integer constant zero
1587 return true;
1588 case 'Q': // A memory reference with base register and no offset
1589 Info.setAllowsMemory();
1590 return true;
1591 case 'S': // A symbolic address
1592 Info.setAllowsRegister();
1593 return true;
1594 case 'U':
1595 if (Name[1] == 'p' &&
1596 (Name[2] == 'l' || Name[2] == 'a' || Name[2] == 'h')) {
1597 // SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7, "Uph"=P8-P15)
1598 Info.setAllowsRegister();
1599 Name += 2;
1600 return true;
1601 }
1602 if (Name[1] == 'c' && (Name[2] == 'i' || Name[2] == 'j')) {
1603 // Gpr registers ("Uci"=w8-11, "Ucj"=w12-15)
1604 Info.setAllowsRegister();
1605 Name += 2;
1606 return true;
1607 }
1608 // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes.
1609 // Utf: A memory address suitable for ldp/stp in TF mode.
1610 // Usa: An absolute symbolic address.
1611 // Ush: The high part (bits 32:12) of a pc-relative symbolic address.
1612
1613 // Better to return an error saying that it's an unrecognised constraint
1614 // even if this is a valid constraint in gcc.
1615 return false;
1616 case 'z': // Zero register, wzr or xzr
1617 Info.setAllowsRegister();
1618 return true;
1619 case 'x': // Floating point and SIMD registers (V0-V15)
1620 Info.setAllowsRegister();
1621 return true;
1622 case 'y': // SVE registers (V0-V7)
1623 Info.setAllowsRegister();
1624 return true;
1625 case '@':
1626 // CC condition
1627 if (const unsigned Len = matchAsmCCConstraint(Name)) {
1628 Name += Len - 1;
1629 Info.setAllowsRegister();
1630 Info.setOutputOperandBounds(Min: 0, Max: 2);
1631 return true;
1632 }
1633 }
1634 return false;
1635}
1636
1637bool AArch64TargetInfo::validateConstraintModifier(
1638 StringRef Constraint, char Modifier, unsigned Size,
1639 std::string &SuggestedModifier) const {
1640 // Strip off constraint modifiers.
1641 Constraint = Constraint.ltrim(Chars: "=+&");
1642
1643 switch (Constraint[0]) {
1644 default:
1645 return true;
1646 case 'z':
1647 case 'r': {
1648 switch (Modifier) {
1649 case 'x':
1650 case 'w':
1651 // For now assume that the person knows what they're
1652 // doing with the modifier.
1653 return true;
1654 default:
1655 // By default an 'r' constraint will be in the 'x'
1656 // registers.
1657 if (Size == 64)
1658 return true;
1659
1660 if (Size == 512)
1661 return HasLS64;
1662
1663 SuggestedModifier = "w";
1664 return false;
1665 }
1666 }
1667 }
1668}
1669
1670std::string_view AArch64TargetInfo::getClobbers() const { return ""; }
1671
1672int AArch64TargetInfo::getEHDataRegisterNumber(unsigned RegNo) const {
1673 if (RegNo == 0)
1674 return 0;
1675 if (RegNo == 1)
1676 return 1;
1677 return -1;
1678}
1679
1680bool AArch64TargetInfo::validatePointerAuthKey(
1681 const llvm::APSInt &value) const {
1682 return 0 <= value && value <= 3;
1683}
1684
1685bool AArch64TargetInfo::hasInt128Type() const { return true; }
1686
1687AArch64leTargetInfo::AArch64leTargetInfo(const llvm::Triple &Triple,
1688 const TargetOptions &Opts)
1689 : AArch64TargetInfo(Triple, Opts) {}
1690
1691void AArch64leTargetInfo::getTargetDefines(const LangOptions &Opts,
1692 MacroBuilder &Builder) const {
1693 Builder.defineMacro(Name: "__AARCH64EL__");
1694 AArch64TargetInfo::getTargetDefines(Opts, Builder);
1695}
1696
1697AArch64beTargetInfo::AArch64beTargetInfo(const llvm::Triple &Triple,
1698 const TargetOptions &Opts)
1699 : AArch64TargetInfo(Triple, Opts) {}
1700
1701void AArch64beTargetInfo::getTargetDefines(const LangOptions &Opts,
1702 MacroBuilder &Builder) const {
1703 Builder.defineMacro(Name: "__AARCH64EB__");
1704 Builder.defineMacro(Name: "__AARCH_BIG_ENDIAN");
1705 Builder.defineMacro(Name: "__ARM_BIG_ENDIAN");
1706 AArch64TargetInfo::getTargetDefines(Opts, Builder);
1707}
1708
1709WindowsARM64TargetInfo::WindowsARM64TargetInfo(const llvm::Triple &Triple,
1710 const TargetOptions &Opts)
1711 : WindowsTargetInfo<AArch64leTargetInfo>(Triple, Opts), Triple(Triple) {
1712
1713 // This is an LLP64 platform.
1714 // int:4, long:4, long long:8, long double:8.
1715 IntWidth = IntAlign = 32;
1716 LongWidth = LongAlign = 32;
1717 DoubleAlign = LongLongAlign = 64;
1718 LongDoubleWidth = LongDoubleAlign = 64;
1719 LongDoubleFormat = &llvm::APFloat::IEEEdouble();
1720 IntMaxType = SignedLongLong;
1721 Int64Type = SignedLongLong;
1722 SizeType = UnsignedLongLong;
1723 PtrDiffType = SignedLongLong;
1724 IntPtrType = SignedLongLong;
1725}
1726
1727TargetInfo::BuiltinVaListKind
1728WindowsARM64TargetInfo::getBuiltinVaListKind() const {
1729 return TargetInfo::CharPtrBuiltinVaList;
1730}
1731
1732TargetInfo::CallingConvCheckResult
1733WindowsARM64TargetInfo::checkCallingConvention(CallingConv CC) const {
1734 switch (CC) {
1735 case CC_X86VectorCall:
1736 if (getTriple().isWindowsArm64EC())
1737 return CCCR_OK;
1738 return CCCR_Ignore;
1739 case CC_X86StdCall:
1740 case CC_X86ThisCall:
1741 case CC_X86FastCall:
1742 return CCCR_Ignore;
1743 case CC_C:
1744 case CC_DeviceKernel:
1745 case CC_PreserveMost:
1746 case CC_PreserveAll:
1747 case CC_PreserveNone:
1748 case CC_Swift:
1749 case CC_SwiftAsync:
1750 case CC_Win64:
1751 return CCCR_OK;
1752 default:
1753 return CCCR_Warning;
1754 }
1755}
1756
1757MicrosoftARM64TargetInfo::MicrosoftARM64TargetInfo(const llvm::Triple &Triple,
1758 const TargetOptions &Opts)
1759 : WindowsARM64TargetInfo(Triple, Opts) {
1760 TheCXXABI.set(TargetCXXABI::Microsoft);
1761}
1762
1763void MicrosoftARM64TargetInfo::getTargetDefines(const LangOptions &Opts,
1764 MacroBuilder &Builder) const {
1765 WindowsARM64TargetInfo::getTargetDefines(Opts, Builder);
1766 if (getTriple().isWindowsArm64EC()) {
1767 Builder.defineMacro(Name: "_M_X64", Value: "100");
1768 Builder.defineMacro(Name: "_M_AMD64", Value: "100");
1769 Builder.defineMacro(Name: "_M_ARM64EC", Value: "1");
1770 } else {
1771 Builder.defineMacro(Name: "_M_ARM64", Value: "1");
1772 }
1773}
1774
1775TargetInfo::CallingConvKind
1776MicrosoftARM64TargetInfo::getCallingConvKind(bool ClangABICompat4) const {
1777 return CCK_MicrosoftWin64;
1778}
1779
1780unsigned MicrosoftARM64TargetInfo::getMinGlobalAlign(uint64_t TypeSize,
1781 bool HasNonWeakDef) const {
1782 unsigned Align =
1783 WindowsARM64TargetInfo::getMinGlobalAlign(Size: TypeSize, HasNonWeakDef);
1784
1785 // MSVC does size based alignment for arm64 based on alignment section in
1786 // below document, replicate that to keep alignment consistent with object
1787 // files compiled by MSVC.
1788 // https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions
1789 if (TypeSize >= 512) { // TypeSize >= 64 bytes
1790 Align = std::max(a: Align, b: 128u); // align type at least 16 bytes
1791 } else if (TypeSize >= 64) { // TypeSize >= 8 bytes
1792 Align = std::max(a: Align, b: 64u); // align type at least 8 butes
1793 } else if (TypeSize >= 16) { // TypeSize >= 2 bytes
1794 Align = std::max(a: Align, b: 32u); // align type at least 4 bytes
1795 }
1796 return Align;
1797}
1798
1799MinGWARM64TargetInfo::MinGWARM64TargetInfo(const llvm::Triple &Triple,
1800 const TargetOptions &Opts)
1801 : WindowsARM64TargetInfo(Triple, Opts) {
1802 TheCXXABI.set(TargetCXXABI::GenericAArch64);
1803}
1804
1805AppleMachOAArch64TargetInfo::AppleMachOAArch64TargetInfo(
1806 const llvm::Triple &Triple, const TargetOptions &Opts)
1807 : AppleMachOTargetInfo<AArch64leTargetInfo>(Triple, Opts) {}
1808
1809DarwinAArch64TargetInfo::DarwinAArch64TargetInfo(const llvm::Triple &Triple,
1810 const TargetOptions &Opts)
1811 : DarwinTargetInfo<AArch64leTargetInfo>(Triple, Opts) {
1812 Int64Type = SignedLongLong;
1813 if (getTriple().isArch32Bit())
1814 IntMaxType = SignedLongLong;
1815
1816 WCharType = SignedInt;
1817 UseSignedCharForObjCBool = false;
1818
1819 LongDoubleWidth = LongDoubleAlign = SuitableAlign = 64;
1820 LongDoubleFormat = &llvm::APFloat::IEEEdouble();
1821
1822 UseZeroLengthBitfieldAlignment = false;
1823
1824 if (getTriple().isArch32Bit()) {
1825 UseBitFieldTypeAlignment = false;
1826 ZeroLengthBitfieldBoundary = 32;
1827 UseZeroLengthBitfieldAlignment = true;
1828 TheCXXABI.set(TargetCXXABI::WatchOS);
1829 } else
1830 TheCXXABI.set(TargetCXXABI::AppleARM64);
1831}
1832
1833void clang::targets::getAppleMachOAArch64Defines(MacroBuilder &Builder,
1834 const LangOptions &Opts,
1835 const llvm::Triple &Triple) {
1836 Builder.defineMacro(Name: "__AARCH64_SIMD__");
1837 if (Triple.isArch32Bit())
1838 Builder.defineMacro(Name: "__ARM64_ARCH_8_32__");
1839 else
1840 Builder.defineMacro(Name: "__ARM64_ARCH_8__");
1841 Builder.defineMacro(Name: "__ARM_NEON__");
1842 Builder.defineMacro(Name: "__REGISTER_PREFIX__", Value: "");
1843 Builder.defineMacro(Name: "__arm64", Value: "1");
1844 Builder.defineMacro(Name: "__arm64__", Value: "1");
1845
1846 if (Triple.isArm64e())
1847 Builder.defineMacro(Name: "__arm64e__", Value: "1");
1848}
1849
1850void AppleMachOAArch64TargetInfo::getOSDefines(const LangOptions &Opts,
1851 const llvm::Triple &Triple,
1852 MacroBuilder &Builder) const {
1853 getAppleMachOAArch64Defines(Builder, Opts, Triple);
1854 AppleMachOTargetInfo<AArch64leTargetInfo>::getOSDefines(Opts, Triple,
1855 Builder);
1856}
1857
1858void DarwinAArch64TargetInfo::getOSDefines(const LangOptions &Opts,
1859 const llvm::Triple &Triple,
1860 MacroBuilder &Builder) const {
1861 getAppleMachOAArch64Defines(Builder, Opts, Triple);
1862 DarwinTargetInfo<AArch64leTargetInfo>::getOSDefines(Opts, Triple, Builder);
1863}
1864
1865TargetInfo::BuiltinVaListKind
1866DarwinAArch64TargetInfo::getBuiltinVaListKind() const {
1867 return TargetInfo::CharPtrBuiltinVaList;
1868}
1869