| 1 | //===- ABIInfo.cpp --------------------------------------------------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | |
| 9 | #include "ABIInfo.h" |
| 10 | #include "ABIInfoImpl.h" |
| 11 | |
| 12 | using namespace clang; |
| 13 | using namespace clang::CodeGen; |
| 14 | |
| 15 | // Pin the vtable to this file. |
| 16 | ABIInfo::~ABIInfo() = default; |
| 17 | |
| 18 | CGCXXABI &ABIInfo::getCXXABI() const { return CGT.getCXXABI(); } |
| 19 | |
| 20 | ASTContext &ABIInfo::getContext() const { return CGT.getContext(); } |
| 21 | |
| 22 | llvm::LLVMContext &ABIInfo::getVMContext() const { |
| 23 | return CGT.getLLVMContext(); |
| 24 | } |
| 25 | |
| 26 | const llvm::DataLayout &ABIInfo::getDataLayout() const { |
| 27 | return CGT.getDataLayout(); |
| 28 | } |
| 29 | |
| 30 | const TargetInfo &ABIInfo::getTarget() const { return CGT.getTarget(); } |
| 31 | |
| 32 | const CodeGenOptions &ABIInfo::getCodeGenOpts() const { |
| 33 | return CGT.getCodeGenOpts(); |
| 34 | } |
| 35 | |
| 36 | bool ABIInfo::isAndroid() const { return getTarget().getTriple().isAndroid(); } |
| 37 | |
| 38 | bool ABIInfo::isOHOSFamily() const { |
| 39 | return getTarget().getTriple().isOHOSFamily(); |
| 40 | } |
| 41 | |
| 42 | RValue ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, |
| 43 | QualType Ty, AggValueSlot Slot) const { |
| 44 | return RValue::getIgnored(); |
| 45 | } |
| 46 | |
| 47 | bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { |
| 48 | return false; |
| 49 | } |
| 50 | |
| 51 | bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, |
| 52 | uint64_t Members) const { |
| 53 | return false; |
| 54 | } |
| 55 | |
| 56 | bool ABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate() const { |
| 57 | // For compatibility with GCC, ignore empty bitfields in C++ mode. |
| 58 | return getContext().getLangOpts().CPlusPlus; |
| 59 | } |
| 60 | |
| 61 | bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base, |
| 62 | uint64_t &Members) const { |
| 63 | if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(T: Ty)) { |
| 64 | uint64_t NElements = AT->getZExtSize(); |
| 65 | if (NElements == 0) |
| 66 | return false; |
| 67 | if (!isHomogeneousAggregate(Ty: AT->getElementType(), Base, Members)) |
| 68 | return false; |
| 69 | Members *= NElements; |
| 70 | } else if (const RecordType *RT = Ty->getAs<RecordType>()) { |
| 71 | const RecordDecl *RD = RT->getDecl(); |
| 72 | if (RD->hasFlexibleArrayMember()) |
| 73 | return false; |
| 74 | |
| 75 | Members = 0; |
| 76 | |
| 77 | // If this is a C++ record, check the properties of the record such as |
| 78 | // bases and ABI specific restrictions |
| 79 | if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD)) { |
| 80 | if (!getCXXABI().isPermittedToBeHomogeneousAggregate(RD: CXXRD)) |
| 81 | return false; |
| 82 | |
| 83 | for (const auto &I : CXXRD->bases()) { |
| 84 | // Ignore empty records. |
| 85 | if (isEmptyRecord(Context&: getContext(), T: I.getType(), AllowArrays: true)) |
| 86 | continue; |
| 87 | |
| 88 | uint64_t FldMembers; |
| 89 | if (!isHomogeneousAggregate(Ty: I.getType(), Base, Members&: FldMembers)) |
| 90 | return false; |
| 91 | |
| 92 | Members += FldMembers; |
| 93 | } |
| 94 | } |
| 95 | |
| 96 | for (const auto *FD : RD->fields()) { |
| 97 | // Ignore (non-zero arrays of) empty records. |
| 98 | QualType FT = FD->getType(); |
| 99 | while (const ConstantArrayType *AT = |
| 100 | getContext().getAsConstantArrayType(T: FT)) { |
| 101 | if (AT->isZeroSize()) |
| 102 | return false; |
| 103 | FT = AT->getElementType(); |
| 104 | } |
| 105 | if (isEmptyRecord(Context&: getContext(), T: FT, AllowArrays: true)) |
| 106 | continue; |
| 107 | |
| 108 | if (isZeroLengthBitfieldPermittedInHomogeneousAggregate() && |
| 109 | FD->isZeroLengthBitField()) |
| 110 | continue; |
| 111 | |
| 112 | uint64_t FldMembers; |
| 113 | if (!isHomogeneousAggregate(Ty: FD->getType(), Base, Members&: FldMembers)) |
| 114 | return false; |
| 115 | |
| 116 | Members = (RD->isUnion() ? |
| 117 | std::max(a: Members, b: FldMembers) : Members + FldMembers); |
| 118 | } |
| 119 | |
| 120 | if (!Base) |
| 121 | return false; |
| 122 | |
| 123 | // Ensure there is no padding. |
| 124 | if (getContext().getTypeSize(T: Base) * Members != |
| 125 | getContext().getTypeSize(T: Ty)) |
| 126 | return false; |
| 127 | } else { |
| 128 | Members = 1; |
| 129 | if (const ComplexType *CT = Ty->getAs<ComplexType>()) { |
| 130 | Members = 2; |
| 131 | Ty = CT->getElementType(); |
| 132 | } |
| 133 | |
| 134 | // Most ABIs only support float, double, and some vector type widths. |
| 135 | if (!isHomogeneousAggregateBaseType(Ty)) |
| 136 | return false; |
| 137 | |
| 138 | // The base type must be the same for all members. Types that |
| 139 | // agree in both total size and mode (float vs. vector) are |
| 140 | // treated as being equivalent here. |
| 141 | const Type *TyPtr = Ty.getTypePtr(); |
| 142 | if (!Base) { |
| 143 | Base = TyPtr; |
| 144 | // If it's a non-power-of-2 vector, its size is already a power-of-2, |
| 145 | // so make sure to widen it explicitly. |
| 146 | if (const VectorType *VT = Base->getAs<VectorType>()) { |
| 147 | QualType EltTy = VT->getElementType(); |
| 148 | unsigned NumElements = |
| 149 | getContext().getTypeSize(T: VT) / getContext().getTypeSize(T: EltTy); |
| 150 | Base = getContext() |
| 151 | .getVectorType(VectorType: EltTy, NumElts: NumElements, VecKind: VT->getVectorKind()) |
| 152 | .getTypePtr(); |
| 153 | } |
| 154 | } |
| 155 | |
| 156 | if (Base->isVectorType() != TyPtr->isVectorType() || |
| 157 | getContext().getTypeSize(T: Base) != getContext().getTypeSize(T: TyPtr)) |
| 158 | return false; |
| 159 | } |
| 160 | return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members); |
| 161 | } |
| 162 | |
| 163 | bool ABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const { |
| 164 | if (getContext().isPromotableIntegerType(T: Ty)) |
| 165 | return true; |
| 166 | |
| 167 | if (const auto *EIT = Ty->getAs<BitIntType>()) |
| 168 | if (EIT->getNumBits() < getContext().getTypeSize(T: getContext().IntTy)) |
| 169 | return true; |
| 170 | |
| 171 | return false; |
| 172 | } |
| 173 | |
| 174 | ABIArgInfo ABIInfo::getNaturalAlignIndirect(QualType Ty, unsigned AddrSpace, |
| 175 | bool ByVal, bool Realign, |
| 176 | llvm::Type *Padding) const { |
| 177 | return ABIArgInfo::getIndirect(Alignment: getContext().getTypeAlignInChars(T: Ty), |
| 178 | AddrSpace, ByVal, Realign, Padding); |
| 179 | } |
| 180 | |
| 181 | ABIArgInfo ABIInfo::getNaturalAlignIndirectInReg(QualType Ty, |
| 182 | bool Realign) const { |
| 183 | return ABIArgInfo::getIndirectInReg(Alignment: getContext().getTypeAlignInChars(T: Ty), |
| 184 | /*ByVal*/ false, Realign); |
| 185 | } |
| 186 | |
| 187 | void ABIInfo::appendAttributeMangling(TargetAttr *Attr, |
| 188 | raw_ostream &Out) const { |
| 189 | if (Attr->isDefaultVersion()) |
| 190 | return; |
| 191 | appendAttributeMangling(AttrStr: Attr->getFeaturesStr(), Out); |
| 192 | } |
| 193 | |
| 194 | void ABIInfo::appendAttributeMangling(TargetVersionAttr *Attr, |
| 195 | raw_ostream &Out) const { |
| 196 | appendAttributeMangling(AttrStr: Attr->getNamesStr(), Out); |
| 197 | } |
| 198 | |
| 199 | void ABIInfo::appendAttributeMangling(TargetClonesAttr *Attr, unsigned Index, |
| 200 | raw_ostream &Out) const { |
| 201 | appendAttributeMangling(AttrStr: Attr->getFeatureStr(Index), Out); |
| 202 | Out << '.' << Attr->getMangledIndex(Index); |
| 203 | } |
| 204 | |
| 205 | void ABIInfo::appendAttributeMangling(StringRef AttrStr, |
| 206 | raw_ostream &Out) const { |
| 207 | if (AttrStr == "default" ) { |
| 208 | Out << ".default" ; |
| 209 | return; |
| 210 | } |
| 211 | |
| 212 | Out << '.'; |
| 213 | const TargetInfo &TI = CGT.getTarget(); |
| 214 | ParsedTargetAttr Info = TI.parseTargetAttr(Str: AttrStr); |
| 215 | |
| 216 | llvm::sort(C&: Info.Features, Comp: [&TI](StringRef LHS, StringRef RHS) { |
| 217 | // Multiversioning doesn't allow "no-${feature}", so we can |
| 218 | // only have "+" prefixes here. |
| 219 | assert(LHS.starts_with("+" ) && RHS.starts_with("+" ) && |
| 220 | "Features should always have a prefix." ); |
| 221 | return TI.getFMVPriority(Features: {LHS.substr(Start: 1)}) > |
| 222 | TI.getFMVPriority(Features: {RHS.substr(Start: 1)}); |
| 223 | }); |
| 224 | |
| 225 | bool IsFirst = true; |
| 226 | if (!Info.CPU.empty()) { |
| 227 | IsFirst = false; |
| 228 | Out << "arch_" << Info.CPU; |
| 229 | } |
| 230 | |
| 231 | for (StringRef Feat : Info.Features) { |
| 232 | if (!IsFirst) |
| 233 | Out << '_'; |
| 234 | IsFirst = false; |
| 235 | Out << Feat.substr(Start: 1); |
| 236 | } |
| 237 | } |
| 238 | |
| 239 | llvm::FixedVectorType * |
| 240 | ABIInfo::getOptimalVectorMemoryType(llvm::FixedVectorType *T, |
| 241 | const LangOptions &Opt) const { |
| 242 | if (T->getNumElements() == 3 && !Opt.PreserveVec3Type) |
| 243 | return llvm::FixedVectorType::get(ElementType: T->getElementType(), NumElts: 4); |
| 244 | return T; |
| 245 | } |
| 246 | |
| 247 | // Pin the vtable to this file. |
| 248 | SwiftABIInfo::~SwiftABIInfo() = default; |
| 249 | |
| 250 | /// Does the given lowering require more than the given number of |
| 251 | /// registers when expanded? |
| 252 | /// |
| 253 | /// This is intended to be the basis of a reasonable basic implementation |
| 254 | /// of should{Pass,Return}Indirectly. |
| 255 | /// |
| 256 | /// For most targets, a limit of four total registers is reasonable; this |
| 257 | /// limits the amount of code required in order to move around the value |
| 258 | /// in case it wasn't produced immediately prior to the call by the caller |
| 259 | /// (or wasn't produced in exactly the right registers) or isn't used |
| 260 | /// immediately within the callee. But some targets may need to further |
| 261 | /// limit the register count due to an inability to support that many |
| 262 | /// return registers. |
| 263 | bool SwiftABIInfo::occupiesMoreThan(ArrayRef<llvm::Type *> scalarTypes, |
| 264 | unsigned maxAllRegisters) const { |
| 265 | unsigned intCount = 0, fpCount = 0; |
| 266 | for (llvm::Type *type : scalarTypes) { |
| 267 | if (type->isPointerTy()) { |
| 268 | intCount++; |
| 269 | } else if (auto intTy = dyn_cast<llvm::IntegerType>(Val: type)) { |
| 270 | auto ptrWidth = CGT.getTarget().getPointerWidth(AddrSpace: LangAS::Default); |
| 271 | intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth; |
| 272 | } else { |
| 273 | assert(type->isVectorTy() || type->isFloatingPointTy()); |
| 274 | fpCount++; |
| 275 | } |
| 276 | } |
| 277 | |
| 278 | return (intCount + fpCount > maxAllRegisters); |
| 279 | } |
| 280 | |
| 281 | bool SwiftABIInfo::shouldPassIndirectly(ArrayRef<llvm::Type *> ComponentTys, |
| 282 | bool AsReturnValue) const { |
| 283 | return occupiesMoreThan(scalarTypes: ComponentTys, /*total=*/maxAllRegisters: 4); |
| 284 | } |
| 285 | |
| 286 | bool SwiftABIInfo::isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy, |
| 287 | unsigned NumElts) const { |
| 288 | // The default implementation of this assumes that the target guarantees |
| 289 | // 128-bit SIMD support but nothing more. |
| 290 | return (VectorSize.getQuantity() > 8 && VectorSize.getQuantity() <= 16); |
| 291 | } |
| 292 | |