1//===- ABIInfo.cpp --------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "ABIInfo.h"
10#include "ABIInfoImpl.h"
11
12using namespace clang;
13using namespace clang::CodeGen;
14
15// Pin the vtable to this file.
16ABIInfo::~ABIInfo() = default;
17
18CGCXXABI &ABIInfo::getCXXABI() const { return CGT.getCXXABI(); }
19
20ASTContext &ABIInfo::getContext() const { return CGT.getContext(); }
21
22llvm::LLVMContext &ABIInfo::getVMContext() const {
23 return CGT.getLLVMContext();
24}
25
26const llvm::DataLayout &ABIInfo::getDataLayout() const {
27 return CGT.getDataLayout();
28}
29
30const TargetInfo &ABIInfo::getTarget() const { return CGT.getTarget(); }
31
32const CodeGenOptions &ABIInfo::getCodeGenOpts() const {
33 return CGT.getCodeGenOpts();
34}
35
36bool ABIInfo::isAndroid() const { return getTarget().getTriple().isAndroid(); }
37
38bool ABIInfo::isOHOSFamily() const {
39 return getTarget().getTriple().isOHOSFamily();
40}
41
42RValue ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
43 QualType Ty, AggValueSlot Slot) const {
44 return RValue::getIgnored();
45}
46
47bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
48 return false;
49}
50
51bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
52 uint64_t Members) const {
53 return false;
54}
55
56bool ABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate() const {
57 // For compatibility with GCC, ignore empty bitfields in C++ mode.
58 return getContext().getLangOpts().CPlusPlus;
59}
60
61bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
62 uint64_t &Members) const {
63 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(T: Ty)) {
64 uint64_t NElements = AT->getZExtSize();
65 if (NElements == 0)
66 return false;
67 if (!isHomogeneousAggregate(Ty: AT->getElementType(), Base, Members))
68 return false;
69 Members *= NElements;
70 } else if (const auto *RD = Ty->getAsRecordDecl()) {
71 if (RD->hasFlexibleArrayMember())
72 return false;
73
74 Members = 0;
75
76 // If this is a C++ record, check the properties of the record such as
77 // bases and ABI specific restrictions
78 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD)) {
79 if (!getCXXABI().isPermittedToBeHomogeneousAggregate(RD: CXXRD))
80 return false;
81
82 for (const auto &I : CXXRD->bases()) {
83 // Ignore empty records.
84 if (isEmptyRecord(Context&: getContext(), T: I.getType(), AllowArrays: true))
85 continue;
86
87 uint64_t FldMembers;
88 if (!isHomogeneousAggregate(Ty: I.getType(), Base, Members&: FldMembers))
89 return false;
90
91 Members += FldMembers;
92 }
93 }
94
95 for (const auto *FD : RD->fields()) {
96 // Ignore (non-zero arrays of) empty records.
97 QualType FT = FD->getType();
98 while (const ConstantArrayType *AT =
99 getContext().getAsConstantArrayType(T: FT)) {
100 if (AT->isZeroSize())
101 return false;
102 FT = AT->getElementType();
103 }
104 if (isEmptyRecord(Context&: getContext(), T: FT, AllowArrays: true))
105 continue;
106
107 if (isZeroLengthBitfieldPermittedInHomogeneousAggregate() &&
108 FD->isZeroLengthBitField())
109 continue;
110
111 uint64_t FldMembers;
112 if (!isHomogeneousAggregate(Ty: FD->getType(), Base, Members&: FldMembers))
113 return false;
114
115 Members = (RD->isUnion() ?
116 std::max(a: Members, b: FldMembers) : Members + FldMembers);
117 }
118
119 if (!Base)
120 return false;
121
122 // Ensure there is no padding.
123 if (getContext().getTypeSize(T: Base) * Members !=
124 getContext().getTypeSize(T: Ty))
125 return false;
126 } else {
127 Members = 1;
128 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
129 Members = 2;
130 Ty = CT->getElementType();
131 }
132
133 // Most ABIs only support float, double, and some vector type widths.
134 if (!isHomogeneousAggregateBaseType(Ty))
135 return false;
136
137 // The base type must be the same for all members. Types that
138 // agree in both total size and mode (float vs. vector) are
139 // treated as being equivalent here.
140 const Type *TyPtr = Ty.getTypePtr();
141 if (!Base) {
142 Base = TyPtr;
143 // If it's a non-power-of-2 vector, its size is already a power-of-2,
144 // so make sure to widen it explicitly.
145 if (const VectorType *VT = Base->getAs<VectorType>()) {
146 QualType EltTy = VT->getElementType();
147 unsigned NumElements =
148 getContext().getTypeSize(T: VT) / getContext().getTypeSize(T: EltTy);
149 Base = getContext()
150 .getVectorType(VectorType: EltTy, NumElts: NumElements, VecKind: VT->getVectorKind())
151 .getTypePtr();
152 }
153 }
154
155 if (Base->isVectorType() != TyPtr->isVectorType() ||
156 getContext().getTypeSize(T: Base) != getContext().getTypeSize(T: TyPtr))
157 return false;
158 }
159 return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members);
160}
161
162bool ABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const {
163 if (getContext().isPromotableIntegerType(T: Ty))
164 return true;
165
166 if (const auto *EIT = Ty->getAs<BitIntType>())
167 if (EIT->getNumBits() < getContext().getTypeSize(T: getContext().IntTy))
168 return true;
169
170 return false;
171}
172
173ABIArgInfo ABIInfo::getNaturalAlignIndirect(QualType Ty, unsigned AddrSpace,
174 bool ByVal, bool Realign,
175 llvm::Type *Padding) const {
176 return ABIArgInfo::getIndirect(Alignment: getContext().getTypeAlignInChars(T: Ty),
177 AddrSpace, ByVal, Realign, Padding);
178}
179
180ABIArgInfo ABIInfo::getNaturalAlignIndirectInReg(QualType Ty,
181 bool Realign) const {
182 return ABIArgInfo::getIndirectInReg(Alignment: getContext().getTypeAlignInChars(T: Ty),
183 /*ByVal*/ false, Realign);
184}
185
186void ABIInfo::appendAttributeMangling(TargetAttr *Attr,
187 raw_ostream &Out) const {
188 if (Attr->isDefaultVersion())
189 return;
190 appendAttributeMangling(AttrStr: Attr->getFeaturesStr(), Out);
191}
192
193void ABIInfo::appendAttributeMangling(TargetVersionAttr *Attr,
194 raw_ostream &Out) const {
195 appendAttributeMangling(AttrStr: Attr->getNamesStr(), Out);
196}
197
198void ABIInfo::appendAttributeMangling(TargetClonesAttr *Attr, unsigned Index,
199 raw_ostream &Out) const {
200 appendAttributeMangling(AttrStr: Attr->getFeatureStr(Index), Out);
201 Out << '.' << Attr->getMangledIndex(Index);
202}
203
204void ABIInfo::appendAttributeMangling(StringRef AttrStr,
205 raw_ostream &Out) const {
206 if (AttrStr == "default") {
207 Out << ".default";
208 return;
209 }
210
211 Out << '.';
212 const TargetInfo &TI = CGT.getTarget();
213 ParsedTargetAttr Info = TI.parseTargetAttr(Str: AttrStr);
214
215 llvm::sort(C&: Info.Features, Comp: [&TI](StringRef LHS, StringRef RHS) {
216 // Multiversioning doesn't allow "no-${feature}", so we can
217 // only have "+" prefixes here.
218 assert(LHS.starts_with("+") && RHS.starts_with("+") &&
219 "Features should always have a prefix.");
220 return TI.getFMVPriority(Features: {LHS.substr(Start: 1)})
221 .ugt(RHS: TI.getFMVPriority(Features: {RHS.substr(Start: 1)}));
222 });
223
224 bool IsFirst = true;
225 if (!Info.CPU.empty()) {
226 IsFirst = false;
227 Out << "arch_" << Info.CPU;
228 }
229
230 for (StringRef Feat : Info.Features) {
231 if (!IsFirst)
232 Out << '_';
233 IsFirst = false;
234 Out << Feat.substr(Start: 1);
235 }
236}
237
238llvm::FixedVectorType *
239ABIInfo::getOptimalVectorMemoryType(llvm::FixedVectorType *T,
240 const LangOptions &Opt) const {
241 if (T->getNumElements() == 3 && !Opt.PreserveVec3Type)
242 return llvm::FixedVectorType::get(ElementType: T->getElementType(), NumElts: 4);
243 return T;
244}
245
246llvm::Value *ABIInfo::createCoercedLoad(Address SrcAddr, const ABIArgInfo &AI,
247 CodeGenFunction &CGF) const {
248 return nullptr;
249}
250
251void ABIInfo::createCoercedStore(llvm::Value *Val, Address DstAddr,
252 const ABIArgInfo &AI, bool DestIsVolatile,
253 CodeGenFunction &CGF) const {}
254
255ABIArgInfo ABIInfo::classifyArgForArm64ECVarArg(QualType Ty) const {
256 llvm_unreachable("Only implemented for x86");
257}
258
259// Pin the vtable to this file.
260SwiftABIInfo::~SwiftABIInfo() = default;
261
262/// Does the given lowering require more than the given number of
263/// registers when expanded?
264///
265/// This is intended to be the basis of a reasonable basic implementation
266/// of should{Pass,Return}Indirectly.
267///
268/// For most targets, a limit of four total registers is reasonable; this
269/// limits the amount of code required in order to move around the value
270/// in case it wasn't produced immediately prior to the call by the caller
271/// (or wasn't produced in exactly the right registers) or isn't used
272/// immediately within the callee. But some targets may need to further
273/// limit the register count due to an inability to support that many
274/// return registers.
275bool SwiftABIInfo::occupiesMoreThan(ArrayRef<llvm::Type *> scalarTypes,
276 unsigned maxAllRegisters) const {
277 unsigned intCount = 0, fpCount = 0;
278 for (llvm::Type *type : scalarTypes) {
279 if (type->isPointerTy()) {
280 intCount++;
281 } else if (auto intTy = dyn_cast<llvm::IntegerType>(Val: type)) {
282 auto ptrWidth = CGT.getTarget().getPointerWidth(AddrSpace: LangAS::Default);
283 intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth;
284 } else {
285 assert(type->isVectorTy() || type->isFloatingPointTy());
286 fpCount++;
287 }
288 }
289
290 return (intCount + fpCount > maxAllRegisters);
291}
292
293bool SwiftABIInfo::shouldPassIndirectly(ArrayRef<llvm::Type *> ComponentTys,
294 bool AsReturnValue) const {
295 return occupiesMoreThan(scalarTypes: ComponentTys, /*total=*/maxAllRegisters: 4);
296}
297
298bool SwiftABIInfo::isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy,
299 unsigned NumElts) const {
300 // The default implementation of this assumes that the target guarantees
301 // 128-bit SIMD support but nothing more.
302 return (VectorSize.getQuantity() > 8 && VectorSize.getQuantity() <= 16);
303}
304