1//===- ARM.cpp ------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "ABIInfoImpl.h"
10#include "TargetInfo.h"
11
12using namespace clang;
13using namespace clang::CodeGen;
14
15//===----------------------------------------------------------------------===//
16// ARM ABI Implementation
17//===----------------------------------------------------------------------===//
18
19namespace {
20
21class ARMABIInfo : public ABIInfo {
22 ARMABIKind Kind;
23 bool IsFloatABISoftFP;
24
25public:
26 ARMABIInfo(CodeGenTypes &CGT, ARMABIKind Kind) : ABIInfo(CGT), Kind(Kind) {
27 setCCs();
28 IsFloatABISoftFP = CGT.getCodeGenOpts().FloatABI == "softfp" ||
29 CGT.getCodeGenOpts().FloatABI == ""; // default
30 }
31
32 bool isEABI() const {
33 switch (getTarget().getTriple().getEnvironment()) {
34 case llvm::Triple::Android:
35 case llvm::Triple::EABI:
36 case llvm::Triple::EABIHF:
37 case llvm::Triple::GNUEABI:
38 case llvm::Triple::GNUEABIHF:
39 case llvm::Triple::MuslEABI:
40 case llvm::Triple::MuslEABIHF:
41 return true;
42 default:
43 return getTarget().getTriple().isOHOSFamily();
44 }
45 }
46
47 bool isEABIHF() const {
48 switch (getTarget().getTriple().getEnvironment()) {
49 case llvm::Triple::EABIHF:
50 case llvm::Triple::GNUEABIHF:
51 case llvm::Triple::MuslEABIHF:
52 return true;
53 default:
54 return false;
55 }
56 }
57
58 ARMABIKind getABIKind() const { return Kind; }
59
60 bool allowBFloatArgsAndRet() const override {
61 return !IsFloatABISoftFP && getTarget().hasBFloat16Type();
62 }
63
64private:
65 ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic,
66 unsigned functionCallConv) const;
67 ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic,
68 unsigned functionCallConv) const;
69 ABIArgInfo classifyHomogeneousAggregate(QualType Ty, const Type *Base,
70 uint64_t Members) const;
71 ABIArgInfo coerceIllegalVector(QualType Ty) const;
72 bool isIllegalVectorType(QualType Ty) const;
73 bool containsAnyFP16Vectors(QualType Ty) const;
74
75 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
76 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
77 uint64_t Members) const override;
78 bool isZeroLengthBitfieldPermittedInHomogeneousAggregate() const override;
79
80 bool isEffectivelyAAPCS_VFP(unsigned callConvention, bool acceptHalf) const;
81
82 void computeInfo(CGFunctionInfo &FI) const override;
83
84 RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
85 AggValueSlot Slot) const override;
86
87 llvm::CallingConv::ID getLLVMDefaultCC() const;
88 llvm::CallingConv::ID getABIDefaultCC() const;
89 void setCCs();
90};
91
92class ARMSwiftABIInfo : public SwiftABIInfo {
93public:
94 explicit ARMSwiftABIInfo(CodeGenTypes &CGT)
95 : SwiftABIInfo(CGT, /*SwiftErrorInRegister=*/true) {}
96
97 bool isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy,
98 unsigned NumElts) const override;
99};
100
101class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
102public:
103 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIKind K)
104 : TargetCodeGenInfo(std::make_unique<ARMABIInfo>(args&: CGT, args&: K)) {
105 SwiftInfo = std::make_unique<ARMSwiftABIInfo>(args&: CGT);
106 }
107
108 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
109 return 13;
110 }
111
112 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
113 return "mov\tr7, r7\t\t// marker for objc_retainAutoreleaseReturnValue";
114 }
115
116 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
117 llvm::Value *Address) const override {
118 llvm::Value *Four8 = llvm::ConstantInt::get(Ty: CGF.Int8Ty, V: 4);
119
120 // 0-15 are the 16 integer registers.
121 AssignToArrayRange(Builder&: CGF.Builder, Array: Address, Value: Four8, FirstIndex: 0, LastIndex: 15);
122 return false;
123 }
124
125 unsigned getSizeOfUnwindException() const override {
126 if (getABIInfo<ARMABIInfo>().isEABI())
127 return 88;
128 return TargetCodeGenInfo::getSizeOfUnwindException();
129 }
130
131 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
132 CodeGen::CodeGenModule &CGM) const override {
133 if (GV->isDeclaration())
134 return;
135 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(Val: D);
136 if (!FD)
137 return;
138 auto *Fn = cast<llvm::Function>(Val: GV);
139
140 if (const auto *TA = FD->getAttr<TargetAttr>()) {
141 ParsedTargetAttr Attr =
142 CGM.getTarget().parseTargetAttr(Str: TA->getFeaturesStr());
143 if (!Attr.BranchProtection.empty()) {
144 TargetInfo::BranchProtectionInfo BPI{};
145 StringRef DiagMsg;
146 StringRef Arch =
147 Attr.CPU.empty() ? CGM.getTarget().getTargetOpts().CPU : Attr.CPU;
148 if (!CGM.getTarget().validateBranchProtection(Spec: Attr.BranchProtection,
149 Arch, BPI, Err&: DiagMsg)) {
150 CGM.getDiags().Report(
151 Loc: D->getLocation(),
152 DiagID: diag::warn_target_unsupported_branch_protection_attribute)
153 << Arch;
154 } else
155 setBranchProtectionFnAttributes(BPI, F&: (*Fn));
156 } else if (CGM.getLangOpts().BranchTargetEnforcement ||
157 CGM.getLangOpts().hasSignReturnAddress()) {
158 // If the Branch Protection attribute is missing, validate the target
159 // Architecture attribute against Branch Protection command line
160 // settings.
161 if (!CGM.getTarget().isBranchProtectionSupportedArch(Arch: Attr.CPU))
162 CGM.getDiags().Report(
163 Loc: D->getLocation(),
164 DiagID: diag::warn_target_unsupported_branch_protection_attribute)
165 << Attr.CPU;
166 }
167 } else if (CGM.getTarget().isBranchProtectionSupportedArch(
168 Arch: CGM.getTarget().getTargetOpts().CPU)) {
169 TargetInfo::BranchProtectionInfo BPI(CGM.getLangOpts());
170 setBranchProtectionFnAttributes(BPI, F&: (*Fn));
171 }
172
173 const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>();
174 if (!Attr)
175 return;
176
177 const char *Kind;
178 switch (Attr->getInterrupt()) {
179 case ARMInterruptAttr::Generic: Kind = ""; break;
180 case ARMInterruptAttr::IRQ: Kind = "IRQ"; break;
181 case ARMInterruptAttr::FIQ: Kind = "FIQ"; break;
182 case ARMInterruptAttr::SWI: Kind = "SWI"; break;
183 case ARMInterruptAttr::ABORT: Kind = "ABORT"; break;
184 case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break;
185 }
186
187 Fn->addFnAttr(Kind: "interrupt", Val: Kind);
188
189 ARMABIKind ABI = getABIInfo<ARMABIInfo>().getABIKind();
190 if (ABI == ARMABIKind::APCS)
191 return;
192
193 // AAPCS guarantees that sp will be 8-byte aligned on any public interface,
194 // however this is not necessarily true on taking any interrupt. Instruct
195 // the backend to perform a realignment as part of the function prologue.
196 llvm::AttrBuilder B(Fn->getContext());
197 B.addStackAlignmentAttr(Align: 8);
198 Fn->addFnAttrs(Attrs: B);
199 }
200};
201
202class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo {
203public:
204 WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIKind K)
205 : ARMTargetCodeGenInfo(CGT, K) {}
206
207 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
208 CodeGen::CodeGenModule &CGM) const override;
209
210 void getDependentLibraryOption(llvm::StringRef Lib,
211 llvm::SmallString<24> &Opt) const override {
212 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
213 }
214
215 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
216 llvm::SmallString<32> &Opt) const override {
217 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
218 }
219};
220
221void WindowsARMTargetCodeGenInfo::setTargetAttributes(
222 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
223 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
224 if (GV->isDeclaration())
225 return;
226 addStackProbeTargetAttributes(D, GV, CGM);
227}
228}
229
230void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
231 if (!::classifyReturnType(CXXABI: getCXXABI(), FI, Info: *this))
232 FI.getReturnInfo() = classifyReturnType(RetTy: FI.getReturnType(), isVariadic: FI.isVariadic(),
233 functionCallConv: FI.getCallingConvention());
234
235 for (auto &I : FI.arguments())
236 I.info = classifyArgumentType(RetTy: I.type, isVariadic: FI.isVariadic(),
237 functionCallConv: FI.getCallingConvention());
238
239
240 // Always honor user-specified calling convention.
241 if (FI.getCallingConvention() != llvm::CallingConv::C)
242 return;
243
244 llvm::CallingConv::ID cc = getRuntimeCC();
245 if (cc != llvm::CallingConv::C)
246 FI.setEffectiveCallingConvention(cc);
247}
248
249/// Return the default calling convention that LLVM will use.
250llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const {
251 // The default calling convention that LLVM will infer.
252 if (isEABIHF() || getTarget().getTriple().isWatchABI())
253 return llvm::CallingConv::ARM_AAPCS_VFP;
254 else if (isEABI())
255 return llvm::CallingConv::ARM_AAPCS;
256 else
257 return llvm::CallingConv::ARM_APCS;
258}
259
260/// Return the calling convention that our ABI would like us to use
261/// as the C calling convention.
262llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const {
263 switch (getABIKind()) {
264 case ARMABIKind::APCS:
265 return llvm::CallingConv::ARM_APCS;
266 case ARMABIKind::AAPCS:
267 return llvm::CallingConv::ARM_AAPCS;
268 case ARMABIKind::AAPCS_VFP:
269 return llvm::CallingConv::ARM_AAPCS_VFP;
270 case ARMABIKind::AAPCS16_VFP:
271 return llvm::CallingConv::ARM_AAPCS_VFP;
272 }
273 llvm_unreachable("bad ABI kind");
274}
275
276void ARMABIInfo::setCCs() {
277 assert(getRuntimeCC() == llvm::CallingConv::C);
278
279 // Don't muddy up the IR with a ton of explicit annotations if
280 // they'd just match what LLVM will infer from the triple.
281 llvm::CallingConv::ID abiCC = getABIDefaultCC();
282 if (abiCC != getLLVMDefaultCC())
283 RuntimeCC = abiCC;
284}
285
286ABIArgInfo ARMABIInfo::coerceIllegalVector(QualType Ty) const {
287 uint64_t Size = getContext().getTypeSize(T: Ty);
288 if (Size <= 32) {
289 llvm::Type *ResType =
290 llvm::Type::getInt32Ty(C&: getVMContext());
291 return ABIArgInfo::getDirect(T: ResType);
292 }
293 if (Size == 64 || Size == 128) {
294 auto *ResType = llvm::FixedVectorType::get(
295 ElementType: llvm::Type::getInt32Ty(C&: getVMContext()), NumElts: Size / 32);
296 return ABIArgInfo::getDirect(T: ResType);
297 }
298 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
299}
300
301ABIArgInfo ARMABIInfo::classifyHomogeneousAggregate(QualType Ty,
302 const Type *Base,
303 uint64_t Members) const {
304 assert(Base && "Base class should be set for homogeneous aggregate");
305 // Base can be a floating-point or a vector.
306 if (const VectorType *VT = Base->getAs<VectorType>()) {
307 // FP16 vectors should be converted to integer vectors
308 if (!getTarget().hasLegalHalfType() && containsAnyFP16Vectors(Ty)) {
309 uint64_t Size = getContext().getTypeSize(T: VT);
310 auto *NewVecTy = llvm::FixedVectorType::get(
311 ElementType: llvm::Type::getInt32Ty(C&: getVMContext()), NumElts: Size / 32);
312 llvm::Type *Ty = llvm::ArrayType::get(ElementType: NewVecTy, NumElements: Members);
313 return ABIArgInfo::getDirect(T: Ty, Offset: 0, Padding: nullptr, CanBeFlattened: false);
314 }
315 }
316 unsigned Align = 0;
317 if (getABIKind() == ARMABIKind::AAPCS ||
318 getABIKind() == ARMABIKind::AAPCS_VFP) {
319 // For alignment adjusted HFAs, cap the argument alignment to 8, leave it
320 // default otherwise.
321 Align = getContext().getTypeUnadjustedAlignInChars(T: Ty).getQuantity();
322 unsigned BaseAlign = getContext().getTypeAlignInChars(T: Base).getQuantity();
323 Align = (Align > BaseAlign && Align >= 8) ? 8 : 0;
324 }
325 return ABIArgInfo::getDirect(T: nullptr, Offset: 0, Padding: nullptr, CanBeFlattened: false, Align);
326}
327
328ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
329 unsigned functionCallConv) const {
330 // 6.1.2.1 The following argument types are VFP CPRCs:
331 // A single-precision floating-point type (including promoted
332 // half-precision types); A double-precision floating-point type;
333 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate
334 // with a Base Type of a single- or double-precision floating-point type,
335 // 64-bit containerized vectors or 128-bit containerized vectors with one
336 // to four Elements.
337 // Variadic functions should always marshal to the base standard.
338 bool IsAAPCS_VFP =
339 !isVariadic && isEffectivelyAAPCS_VFP(callConvention: functionCallConv, /* AAPCS16 */ acceptHalf: false);
340
341 Ty = useFirstFieldIfTransparentUnion(Ty);
342
343 // Handle illegal vector types here.
344 if (isIllegalVectorType(Ty))
345 return coerceIllegalVector(Ty);
346
347 if (!isAggregateTypeForABI(T: Ty)) {
348 // Treat an enum type as its underlying type.
349 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
350 Ty = EnumTy->getDecl()->getIntegerType();
351 }
352
353 if (const auto *EIT = Ty->getAs<BitIntType>())
354 if (EIT->getNumBits() > 64)
355 return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
356
357 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
358 : ABIArgInfo::getDirect());
359 }
360
361 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(T: Ty, CXXABI&: getCXXABI())) {
362 return getNaturalAlignIndirect(Ty, ByVal: RAA == CGCXXABI::RAA_DirectInMemory);
363 }
364
365 // Ignore empty records.
366 if (isEmptyRecord(Context&: getContext(), T: Ty, AllowArrays: true))
367 return ABIArgInfo::getIgnore();
368
369 if (IsAAPCS_VFP) {
370 // Homogeneous Aggregates need to be expanded when we can fit the aggregate
371 // into VFP registers.
372 const Type *Base = nullptr;
373 uint64_t Members = 0;
374 if (isHomogeneousAggregate(Ty, Base, Members))
375 return classifyHomogeneousAggregate(Ty, Base, Members);
376 } else if (getABIKind() == ARMABIKind::AAPCS16_VFP) {
377 // WatchOS does have homogeneous aggregates. Note that we intentionally use
378 // this convention even for a variadic function: the backend will use GPRs
379 // if needed.
380 const Type *Base = nullptr;
381 uint64_t Members = 0;
382 if (isHomogeneousAggregate(Ty, Base, Members)) {
383 assert(Base && Members <= 4 && "unexpected homogeneous aggregate");
384 llvm::Type *Ty =
385 llvm::ArrayType::get(ElementType: CGT.ConvertType(T: QualType(Base, 0)), NumElements: Members);
386 return ABIArgInfo::getDirect(T: Ty, Offset: 0, Padding: nullptr, CanBeFlattened: false);
387 }
388 }
389
390 if (getABIKind() == ARMABIKind::AAPCS16_VFP &&
391 getContext().getTypeSizeInChars(T: Ty) > CharUnits::fromQuantity(Quantity: 16)) {
392 // WatchOS is adopting the 64-bit AAPCS rule on composite types: if they're
393 // bigger than 128-bits, they get placed in space allocated by the caller,
394 // and a pointer is passed.
395 return ABIArgInfo::getIndirect(
396 Alignment: CharUnits::fromQuantity(Quantity: getContext().getTypeAlign(T: Ty) / 8), ByVal: false);
397 }
398
399 // Support byval for ARM.
400 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at
401 // most 8-byte. We realign the indirect argument if type alignment is bigger
402 // than ABI alignment.
403 uint64_t ABIAlign = 4;
404 uint64_t TyAlign;
405 if (getABIKind() == ARMABIKind::AAPCS_VFP ||
406 getABIKind() == ARMABIKind::AAPCS) {
407 TyAlign = getContext().getTypeUnadjustedAlignInChars(T: Ty).getQuantity();
408 ABIAlign = std::clamp(val: TyAlign, lo: (uint64_t)4, hi: (uint64_t)8);
409 } else {
410 TyAlign = getContext().getTypeAlignInChars(T: Ty).getQuantity();
411 }
412 if (getContext().getTypeSizeInChars(T: Ty) > CharUnits::fromQuantity(Quantity: 64)) {
413 assert(getABIKind() != ARMABIKind::AAPCS16_VFP && "unexpected byval");
414 return ABIArgInfo::getIndirect(Alignment: CharUnits::fromQuantity(Quantity: ABIAlign),
415 /*ByVal=*/true,
416 /*Realign=*/TyAlign > ABIAlign);
417 }
418
419 // On RenderScript, coerce Aggregates <= 64 bytes to an integer array of
420 // same size and alignment.
421 if (getTarget().isRenderScriptTarget()) {
422 return coerceToIntArray(Ty, Context&: getContext(), LLVMContext&: getVMContext());
423 }
424
425 // Otherwise, pass by coercing to a structure of the appropriate size.
426 llvm::Type* ElemTy;
427 unsigned SizeRegs;
428 // FIXME: Try to match the types of the arguments more accurately where
429 // we can.
430 if (TyAlign <= 4) {
431 ElemTy = llvm::Type::getInt32Ty(C&: getVMContext());
432 SizeRegs = (getContext().getTypeSize(T: Ty) + 31) / 32;
433 } else {
434 ElemTy = llvm::Type::getInt64Ty(C&: getVMContext());
435 SizeRegs = (getContext().getTypeSize(T: Ty) + 63) / 64;
436 }
437
438 return ABIArgInfo::getDirect(T: llvm::ArrayType::get(ElementType: ElemTy, NumElements: SizeRegs));
439}
440
441static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
442 llvm::LLVMContext &VMContext) {
443 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
444 // is called integer-like if its size is less than or equal to one word, and
445 // the offset of each of its addressable sub-fields is zero.
446
447 uint64_t Size = Context.getTypeSize(T: Ty);
448
449 // Check that the type fits in a word.
450 if (Size > 32)
451 return false;
452
453 // FIXME: Handle vector types!
454 if (Ty->isVectorType())
455 return false;
456
457 // Float types are never treated as "integer like".
458 if (Ty->isRealFloatingType())
459 return false;
460
461 // If this is a builtin or pointer type then it is ok.
462 if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
463 return true;
464
465 // Small complex integer types are "integer like".
466 if (const ComplexType *CT = Ty->getAs<ComplexType>())
467 return isIntegerLikeType(Ty: CT->getElementType(), Context, VMContext);
468
469 // Single element and zero sized arrays should be allowed, by the definition
470 // above, but they are not.
471
472 // Otherwise, it must be a record type.
473 const RecordType *RT = Ty->getAs<RecordType>();
474 if (!RT) return false;
475
476 // Ignore records with flexible arrays.
477 const RecordDecl *RD = RT->getDecl();
478 if (RD->hasFlexibleArrayMember())
479 return false;
480
481 // Check that all sub-fields are at offset 0, and are themselves "integer
482 // like".
483 const ASTRecordLayout &Layout = Context.getASTRecordLayout(D: RD);
484
485 bool HadField = false;
486 unsigned idx = 0;
487 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
488 i != e; ++i, ++idx) {
489 const FieldDecl *FD = *i;
490
491 // Bit-fields are not addressable, we only need to verify they are "integer
492 // like". We still have to disallow a subsequent non-bitfield, for example:
493 // struct { int : 0; int x }
494 // is non-integer like according to gcc.
495 if (FD->isBitField()) {
496 if (!RD->isUnion())
497 HadField = true;
498
499 if (!isIntegerLikeType(Ty: FD->getType(), Context, VMContext))
500 return false;
501
502 continue;
503 }
504
505 // Check if this field is at offset 0.
506 if (Layout.getFieldOffset(FieldNo: idx) != 0)
507 return false;
508
509 if (!isIntegerLikeType(Ty: FD->getType(), Context, VMContext))
510 return false;
511
512 // Only allow at most one field in a structure. This doesn't match the
513 // wording above, but follows gcc in situations with a field following an
514 // empty structure.
515 if (!RD->isUnion()) {
516 if (HadField)
517 return false;
518
519 HadField = true;
520 }
521 }
522
523 return true;
524}
525
526ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, bool isVariadic,
527 unsigned functionCallConv) const {
528
529 // Variadic functions should always marshal to the base standard.
530 bool IsAAPCS_VFP =
531 !isVariadic && isEffectivelyAAPCS_VFP(callConvention: functionCallConv, /* AAPCS16 */ acceptHalf: true);
532
533 if (RetTy->isVoidType())
534 return ABIArgInfo::getIgnore();
535
536 if (const VectorType *VT = RetTy->getAs<VectorType>()) {
537 // Large vector types should be returned via memory.
538 if (getContext().getTypeSize(T: RetTy) > 128)
539 return getNaturalAlignIndirect(Ty: RetTy);
540 // TODO: FP16/BF16 vectors should be converted to integer vectors
541 // This check is similar to isIllegalVectorType - refactor?
542 if ((!getTarget().hasLegalHalfType() &&
543 (VT->getElementType()->isFloat16Type() ||
544 VT->getElementType()->isHalfType())) ||
545 (IsFloatABISoftFP &&
546 VT->getElementType()->isBFloat16Type()))
547 return coerceIllegalVector(Ty: RetTy);
548 }
549
550 if (!isAggregateTypeForABI(T: RetTy)) {
551 // Treat an enum type as its underlying type.
552 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
553 RetTy = EnumTy->getDecl()->getIntegerType();
554
555 if (const auto *EIT = RetTy->getAs<BitIntType>())
556 if (EIT->getNumBits() > 64)
557 return getNaturalAlignIndirect(Ty: RetTy, /*ByVal=*/false);
558
559 return isPromotableIntegerTypeForABI(Ty: RetTy) ? ABIArgInfo::getExtend(Ty: RetTy)
560 : ABIArgInfo::getDirect();
561 }
562
563 // Are we following APCS?
564 if (getABIKind() == ARMABIKind::APCS) {
565 if (isEmptyRecord(Context&: getContext(), T: RetTy, AllowArrays: false))
566 return ABIArgInfo::getIgnore();
567
568 // Complex types are all returned as packed integers.
569 //
570 // FIXME: Consider using 2 x vector types if the back end handles them
571 // correctly.
572 if (RetTy->isAnyComplexType())
573 return ABIArgInfo::getDirect(T: llvm::IntegerType::get(
574 C&: getVMContext(), NumBits: getContext().getTypeSize(T: RetTy)));
575
576 // Integer like structures are returned in r0.
577 if (isIntegerLikeType(Ty: RetTy, Context&: getContext(), VMContext&: getVMContext())) {
578 // Return in the smallest viable integer type.
579 uint64_t Size = getContext().getTypeSize(T: RetTy);
580 if (Size <= 8)
581 return ABIArgInfo::getDirect(T: llvm::Type::getInt8Ty(C&: getVMContext()));
582 if (Size <= 16)
583 return ABIArgInfo::getDirect(T: llvm::Type::getInt16Ty(C&: getVMContext()));
584 return ABIArgInfo::getDirect(T: llvm::Type::getInt32Ty(C&: getVMContext()));
585 }
586
587 // Otherwise return in memory.
588 return getNaturalAlignIndirect(Ty: RetTy);
589 }
590
591 // Otherwise this is an AAPCS variant.
592
593 if (isEmptyRecord(Context&: getContext(), T: RetTy, AllowArrays: true))
594 return ABIArgInfo::getIgnore();
595
596 // Check for homogeneous aggregates with AAPCS-VFP.
597 if (IsAAPCS_VFP) {
598 const Type *Base = nullptr;
599 uint64_t Members = 0;
600 if (isHomogeneousAggregate(Ty: RetTy, Base, Members))
601 return classifyHomogeneousAggregate(Ty: RetTy, Base, Members);
602 }
603
604 // Aggregates <= 4 bytes are returned in r0; other aggregates
605 // are returned indirectly.
606 uint64_t Size = getContext().getTypeSize(T: RetTy);
607 if (Size <= 32) {
608 // On RenderScript, coerce Aggregates <= 4 bytes to an integer array of
609 // same size and alignment.
610 if (getTarget().isRenderScriptTarget()) {
611 return coerceToIntArray(Ty: RetTy, Context&: getContext(), LLVMContext&: getVMContext());
612 }
613 if (getDataLayout().isBigEndian())
614 // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4)
615 return ABIArgInfo::getDirect(T: llvm::Type::getInt32Ty(C&: getVMContext()));
616
617 // Return in the smallest viable integer type.
618 if (Size <= 8)
619 return ABIArgInfo::getDirect(T: llvm::Type::getInt8Ty(C&: getVMContext()));
620 if (Size <= 16)
621 return ABIArgInfo::getDirect(T: llvm::Type::getInt16Ty(C&: getVMContext()));
622 return ABIArgInfo::getDirect(T: llvm::Type::getInt32Ty(C&: getVMContext()));
623 } else if (Size <= 128 && getABIKind() == ARMABIKind::AAPCS16_VFP) {
624 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(C&: getVMContext());
625 llvm::Type *CoerceTy =
626 llvm::ArrayType::get(ElementType: Int32Ty, NumElements: llvm::alignTo(Value: Size, Align: 32) / 32);
627 return ABIArgInfo::getDirect(T: CoerceTy);
628 }
629
630 return getNaturalAlignIndirect(Ty: RetTy);
631}
632
633/// isIllegalVector - check whether Ty is an illegal vector type.
634bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
635 if (const VectorType *VT = Ty->getAs<VectorType> ()) {
636 // On targets that don't support half, fp16 or bfloat, they are expanded
637 // into float, and we don't want the ABI to depend on whether or not they
638 // are supported in hardware. Thus return false to coerce vectors of these
639 // types into integer vectors.
640 // We do not depend on hasLegalHalfType for bfloat as it is a
641 // separate IR type.
642 if ((!getTarget().hasLegalHalfType() &&
643 (VT->getElementType()->isFloat16Type() ||
644 VT->getElementType()->isHalfType())) ||
645 (IsFloatABISoftFP &&
646 VT->getElementType()->isBFloat16Type()))
647 return true;
648 if (isAndroid()) {
649 // Android shipped using Clang 3.1, which supported a slightly different
650 // vector ABI. The primary differences were that 3-element vector types
651 // were legal, and so were sub 32-bit vectors (i.e. <2 x i8>). This path
652 // accepts that legacy behavior for Android only.
653 // Check whether VT is legal.
654 unsigned NumElements = VT->getNumElements();
655 // NumElements should be power of 2 or equal to 3.
656 if (!llvm::isPowerOf2_32(Value: NumElements) && NumElements != 3)
657 return true;
658 } else {
659 // Check whether VT is legal.
660 unsigned NumElements = VT->getNumElements();
661 uint64_t Size = getContext().getTypeSize(T: VT);
662 // NumElements should be power of 2.
663 if (!llvm::isPowerOf2_32(Value: NumElements))
664 return true;
665 // Size should be greater than 32 bits.
666 return Size <= 32;
667 }
668 }
669 return false;
670}
671
672/// Return true if a type contains any 16-bit floating point vectors
673bool ARMABIInfo::containsAnyFP16Vectors(QualType Ty) const {
674 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(T: Ty)) {
675 uint64_t NElements = AT->getZExtSize();
676 if (NElements == 0)
677 return false;
678 return containsAnyFP16Vectors(Ty: AT->getElementType());
679 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
680 const RecordDecl *RD = RT->getDecl();
681
682 // If this is a C++ record, check the bases first.
683 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD))
684 if (llvm::any_of(Range: CXXRD->bases(), P: [this](const CXXBaseSpecifier &B) {
685 return containsAnyFP16Vectors(Ty: B.getType());
686 }))
687 return true;
688
689 if (llvm::any_of(Range: RD->fields(), P: [this](FieldDecl *FD) {
690 return FD && containsAnyFP16Vectors(Ty: FD->getType());
691 }))
692 return true;
693
694 return false;
695 } else {
696 if (const VectorType *VT = Ty->getAs<VectorType>())
697 return (VT->getElementType()->isFloat16Type() ||
698 VT->getElementType()->isBFloat16Type() ||
699 VT->getElementType()->isHalfType());
700 return false;
701 }
702}
703
704bool ARMSwiftABIInfo::isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy,
705 unsigned NumElts) const {
706 if (!llvm::isPowerOf2_32(Value: NumElts))
707 return false;
708 unsigned size = CGT.getDataLayout().getTypeStoreSizeInBits(Ty: EltTy);
709 if (size > 64)
710 return false;
711 if (VectorSize.getQuantity() != 8 &&
712 (VectorSize.getQuantity() != 16 || NumElts == 1))
713 return false;
714 return true;
715}
716
717bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
718 // Homogeneous aggregates for AAPCS-VFP must have base types of float,
719 // double, or 64-bit or 128-bit vectors.
720 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
721 if (BT->getKind() == BuiltinType::Float ||
722 BT->getKind() == BuiltinType::Double ||
723 BT->getKind() == BuiltinType::LongDouble)
724 return true;
725 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
726 unsigned VecSize = getContext().getTypeSize(T: VT);
727 if (VecSize == 64 || VecSize == 128)
728 return true;
729 }
730 return false;
731}
732
733bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
734 uint64_t Members) const {
735 return Members <= 4;
736}
737
738bool ARMABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate() const {
739 // AAPCS32 says that the rule for whether something is a homogeneous
740 // aggregate is applied to the output of the data layout decision. So
741 // anything that doesn't affect the data layout also does not affect
742 // homogeneity. In particular, zero-length bitfields don't stop a struct
743 // being homogeneous.
744 return true;
745}
746
747bool ARMABIInfo::isEffectivelyAAPCS_VFP(unsigned callConvention,
748 bool acceptHalf) const {
749 // Give precedence to user-specified calling conventions.
750 if (callConvention != llvm::CallingConv::C)
751 return (callConvention == llvm::CallingConv::ARM_AAPCS_VFP);
752 else
753 return (getABIKind() == ARMABIKind::AAPCS_VFP) ||
754 (acceptHalf && (getABIKind() == ARMABIKind::AAPCS16_VFP));
755}
756
757RValue ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
758 QualType Ty, AggValueSlot Slot) const {
759 CharUnits SlotSize = CharUnits::fromQuantity(Quantity: 4);
760
761 // Empty records are ignored for parameter passing purposes.
762 if (isEmptyRecord(Context&: getContext(), T: Ty, AllowArrays: true))
763 return Slot.asRValue();
764
765 CharUnits TySize = getContext().getTypeSizeInChars(T: Ty);
766 CharUnits TyAlignForABI = getContext().getTypeUnadjustedAlignInChars(T: Ty);
767
768 // Use indirect if size of the illegal vector is bigger than 16 bytes.
769 bool IsIndirect = false;
770 const Type *Base = nullptr;
771 uint64_t Members = 0;
772 if (TySize > CharUnits::fromQuantity(Quantity: 16) && isIllegalVectorType(Ty)) {
773 IsIndirect = true;
774
775 // ARMv7k passes structs bigger than 16 bytes indirectly, in space
776 // allocated by the caller.
777 } else if (TySize > CharUnits::fromQuantity(Quantity: 16) &&
778 getABIKind() == ARMABIKind::AAPCS16_VFP &&
779 !isHomogeneousAggregate(Ty, Base, Members)) {
780 IsIndirect = true;
781
782 // Otherwise, bound the type's ABI alignment.
783 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
784 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
785 // Our callers should be prepared to handle an under-aligned address.
786 } else if (getABIKind() == ARMABIKind::AAPCS_VFP ||
787 getABIKind() == ARMABIKind::AAPCS) {
788 TyAlignForABI = std::max(a: TyAlignForABI, b: CharUnits::fromQuantity(Quantity: 4));
789 TyAlignForABI = std::min(a: TyAlignForABI, b: CharUnits::fromQuantity(Quantity: 8));
790 } else if (getABIKind() == ARMABIKind::AAPCS16_VFP) {
791 // ARMv7k allows type alignment up to 16 bytes.
792 TyAlignForABI = std::max(a: TyAlignForABI, b: CharUnits::fromQuantity(Quantity: 4));
793 TyAlignForABI = std::min(a: TyAlignForABI, b: CharUnits::fromQuantity(Quantity: 16));
794 } else {
795 TyAlignForABI = CharUnits::fromQuantity(Quantity: 4);
796 }
797
798 TypeInfoChars TyInfo(TySize, TyAlignForABI, AlignRequirementKind::None);
799 return emitVoidPtrVAArg(CGF, VAListAddr, ValueTy: Ty, IsIndirect, ValueInfo: TyInfo, SlotSizeAndAlign: SlotSize,
800 /*AllowHigherAlign*/ true, Slot);
801}
802
803std::unique_ptr<TargetCodeGenInfo>
804CodeGen::createARMTargetCodeGenInfo(CodeGenModule &CGM, ARMABIKind Kind) {
805 return std::make_unique<ARMTargetCodeGenInfo>(args&: CGM.getTypes(), args&: Kind);
806}
807
808std::unique_ptr<TargetCodeGenInfo>
809CodeGen::createWindowsARMTargetCodeGenInfo(CodeGenModule &CGM, ARMABIKind K) {
810 return std::make_unique<WindowsARMTargetCodeGenInfo>(args&: CGM.getTypes(), args&: K);
811}
812