| 1 | //===- ARMTargetStreamer.cpp - ARMTargetStreamer class --*- C++ -*---------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file implements the ARMTargetStreamer class. |
| 10 | // |
| 11 | //===----------------------------------------------------------------------===// |
| 12 | |
| 13 | #include "MCTargetDesc/ARMMCTargetDesc.h" |
| 14 | #include "llvm/MC/ConstantPools.h" |
| 15 | #include "llvm/MC/MCAsmInfo.h" |
| 16 | #include "llvm/MC/MCContext.h" |
| 17 | #include "llvm/MC/MCExpr.h" |
| 18 | #include "llvm/MC/MCStreamer.h" |
| 19 | #include "llvm/MC/MCSubtargetInfo.h" |
| 20 | #include "llvm/Support/ARMBuildAttributes.h" |
| 21 | |
| 22 | using namespace llvm; |
| 23 | |
| 24 | // |
| 25 | // ARMTargetStreamer Implemenation |
| 26 | // |
| 27 | |
| 28 | ARMTargetStreamer::ARMTargetStreamer(MCStreamer &S) |
| 29 | : MCTargetStreamer(S), ConstantPools(new AssemblerConstantPools()) {} |
| 30 | |
| 31 | ARMTargetStreamer::~ARMTargetStreamer() = default; |
| 32 | |
| 33 | // The constant pool handling is shared by all ARMTargetStreamer |
| 34 | // implementations. |
| 35 | const MCExpr *ARMTargetStreamer::addConstantPoolEntry(const MCExpr *Expr, SMLoc Loc) { |
| 36 | return ConstantPools->addEntry(Streamer, Expr, Size: 4, Loc); |
| 37 | } |
| 38 | |
| 39 | void ARMTargetStreamer::emitCurrentConstantPool() { |
| 40 | ConstantPools->emitForCurrentSection(Streamer); |
| 41 | ConstantPools->clearCacheForCurrentSection(Streamer); |
| 42 | } |
| 43 | |
| 44 | // finish() - write out any non-empty assembler constant pools. |
| 45 | void ARMTargetStreamer::emitConstantPools() { |
| 46 | ConstantPools->emitAll(Streamer); |
| 47 | } |
| 48 | |
| 49 | // reset() - Reset any state |
| 50 | void ARMTargetStreamer::reset() {} |
| 51 | |
| 52 | void ARMTargetStreamer::emitInst(uint32_t Inst, char Suffix) { |
| 53 | unsigned Size; |
| 54 | char Buffer[4]; |
| 55 | const bool LittleEndian = getContext().getAsmInfo()->isLittleEndian(); |
| 56 | |
| 57 | switch (Suffix) { |
| 58 | case '\0': |
| 59 | Size = 4; |
| 60 | |
| 61 | for (unsigned II = 0, IE = Size; II != IE; II++) { |
| 62 | const unsigned I = LittleEndian ? (Size - II - 1) : II; |
| 63 | Buffer[Size - II - 1] = uint8_t(Inst >> I * CHAR_BIT); |
| 64 | } |
| 65 | |
| 66 | break; |
| 67 | case 'n': |
| 68 | case 'w': |
| 69 | Size = (Suffix == 'n' ? 2 : 4); |
| 70 | |
| 71 | // Thumb wide instructions are emitted as a pair of 16-bit words of the |
| 72 | // appropriate endianness. |
| 73 | for (unsigned II = 0, IE = Size; II != IE; II = II + 2) { |
| 74 | const unsigned I0 = LittleEndian ? II + 0 : II + 1; |
| 75 | const unsigned I1 = LittleEndian ? II + 1 : II + 0; |
| 76 | Buffer[Size - II - 2] = uint8_t(Inst >> I0 * CHAR_BIT); |
| 77 | Buffer[Size - II - 1] = uint8_t(Inst >> I1 * CHAR_BIT); |
| 78 | } |
| 79 | |
| 80 | break; |
| 81 | default: |
| 82 | llvm_unreachable("Invalid Suffix" ); |
| 83 | } |
| 84 | getStreamer().emitBytes(Data: StringRef(Buffer, Size)); |
| 85 | } |
| 86 | |
| 87 | // The remaining callbacks should be handled separately by each |
| 88 | // streamer. |
| 89 | void ARMTargetStreamer::emitFnStart() {} |
| 90 | void ARMTargetStreamer::emitFnEnd() {} |
| 91 | void ARMTargetStreamer::emitCantUnwind() {} |
| 92 | void ARMTargetStreamer::emitPersonality(const MCSymbol *Personality) {} |
| 93 | void ARMTargetStreamer::emitPersonalityIndex(unsigned Index) {} |
| 94 | void ARMTargetStreamer::emitHandlerData() {} |
| 95 | void ARMTargetStreamer::emitSetFP(MCRegister FpReg, MCRegister SpReg, |
| 96 | int64_t Offset) {} |
| 97 | void ARMTargetStreamer::emitMovSP(MCRegister Reg, int64_t Offset) {} |
| 98 | void ARMTargetStreamer::emitPad(int64_t Offset) {} |
| 99 | void ARMTargetStreamer::emitRegSave(const SmallVectorImpl<MCRegister> &RegList, |
| 100 | bool isVector) {} |
| 101 | void ARMTargetStreamer::emitUnwindRaw(int64_t StackOffset, |
| 102 | const SmallVectorImpl<uint8_t> &Opcodes) { |
| 103 | } |
| 104 | void ARMTargetStreamer::switchVendor(StringRef Vendor) {} |
| 105 | void ARMTargetStreamer::emitAttribute(unsigned Attribute, unsigned Value) {} |
| 106 | void ARMTargetStreamer::emitTextAttribute(unsigned Attribute, |
| 107 | StringRef String) {} |
| 108 | void ARMTargetStreamer::emitIntTextAttribute(unsigned Attribute, |
| 109 | unsigned IntValue, |
| 110 | StringRef StringValue) {} |
| 111 | void ARMTargetStreamer::emitArch(ARM::ArchKind Arch) {} |
| 112 | void ARMTargetStreamer::emitArchExtension(uint64_t ArchExt) {} |
| 113 | void ARMTargetStreamer::emitObjectArch(ARM::ArchKind Arch) {} |
| 114 | void ARMTargetStreamer::emitFPU(ARM::FPUKind FPU) {} |
| 115 | void ARMTargetStreamer::finishAttributeSection() {} |
| 116 | void ARMTargetStreamer::annotateTLSDescriptorSequence( |
| 117 | const MCSymbolRefExpr *SRE) {} |
| 118 | void ARMTargetStreamer::emitSyntaxUnified() {} |
| 119 | void ARMTargetStreamer::emitCode16() {} |
| 120 | void ARMTargetStreamer::emitCode32() {} |
| 121 | void ARMTargetStreamer::emitThumbFunc(MCSymbol *Symbol) {} |
| 122 | void ARMTargetStreamer::emitThumbSet(MCSymbol *Symbol, const MCExpr *Value) {} |
| 123 | |
| 124 | void ARMTargetStreamer::emitARMWinCFIAllocStack(unsigned Size, bool Wide) {} |
| 125 | void ARMTargetStreamer::emitARMWinCFISaveRegMask(unsigned Mask, bool Wide) {} |
| 126 | void ARMTargetStreamer::emitARMWinCFISaveSP(unsigned Reg) {} |
| 127 | void ARMTargetStreamer::emitARMWinCFISaveFRegs(unsigned First, unsigned Last) {} |
| 128 | void ARMTargetStreamer::emitARMWinCFISaveLR(unsigned Offset) {} |
| 129 | void ARMTargetStreamer::emitARMWinCFINop(bool Wide) {} |
| 130 | void ARMTargetStreamer::emitARMWinCFIPrologEnd(bool Fragment) {} |
| 131 | void ARMTargetStreamer::emitARMWinCFIEpilogStart(unsigned Condition) {} |
| 132 | void ARMTargetStreamer::emitARMWinCFIEpilogEnd() {} |
| 133 | void ARMTargetStreamer::emitARMWinCFICustom(unsigned Opcode) {} |
| 134 | |
| 135 | static ARMBuildAttrs::CPUArch getArchForCPU(const MCSubtargetInfo &STI) { |
| 136 | if (STI.getCPU() == "xscale" ) |
| 137 | return ARMBuildAttrs::v5TEJ; |
| 138 | |
| 139 | if (STI.hasFeature(Feature: ARM::HasV9_0aOps)) |
| 140 | return ARMBuildAttrs::v9_A; |
| 141 | else if (STI.hasFeature(Feature: ARM::HasV8Ops)) { |
| 142 | if (STI.hasFeature(Feature: ARM::FeatureRClass)) |
| 143 | return ARMBuildAttrs::v8_R; |
| 144 | return ARMBuildAttrs::v8_A; |
| 145 | } else if (STI.hasFeature(Feature: ARM::HasV8_1MMainlineOps)) |
| 146 | return ARMBuildAttrs::v8_1_M_Main; |
| 147 | else if (STI.hasFeature(Feature: ARM::HasV8MMainlineOps)) |
| 148 | return ARMBuildAttrs::v8_M_Main; |
| 149 | else if (STI.hasFeature(Feature: ARM::HasV7Ops)) { |
| 150 | if (STI.hasFeature(Feature: ARM::FeatureMClass) && STI.hasFeature(Feature: ARM::FeatureDSP)) |
| 151 | return ARMBuildAttrs::v7E_M; |
| 152 | return ARMBuildAttrs::v7; |
| 153 | } else if (STI.hasFeature(Feature: ARM::HasV6T2Ops)) |
| 154 | return ARMBuildAttrs::v6T2; |
| 155 | else if (STI.hasFeature(Feature: ARM::HasV8MBaselineOps)) |
| 156 | return ARMBuildAttrs::v8_M_Base; |
| 157 | else if (STI.hasFeature(Feature: ARM::HasV6MOps)) |
| 158 | return ARMBuildAttrs::v6S_M; |
| 159 | else if (STI.hasFeature(Feature: ARM::HasV6Ops)) |
| 160 | return ARMBuildAttrs::v6; |
| 161 | else if (STI.hasFeature(Feature: ARM::HasV5TEOps)) |
| 162 | return ARMBuildAttrs::v5TE; |
| 163 | else if (STI.hasFeature(Feature: ARM::HasV5TOps)) |
| 164 | return ARMBuildAttrs::v5T; |
| 165 | else if (STI.hasFeature(Feature: ARM::HasV4TOps)) |
| 166 | return ARMBuildAttrs::v4T; |
| 167 | else |
| 168 | return ARMBuildAttrs::v4; |
| 169 | } |
| 170 | |
| 171 | static bool isV8M(const MCSubtargetInfo &STI) { |
| 172 | // Note that v8M Baseline is a subset of v6T2! |
| 173 | return (STI.hasFeature(Feature: ARM::HasV8MBaselineOps) && |
| 174 | !STI.hasFeature(Feature: ARM::HasV6T2Ops)) || |
| 175 | STI.hasFeature(Feature: ARM::HasV8MMainlineOps); |
| 176 | } |
| 177 | |
| 178 | /// Emit the build attributes that only depend on the hardware that we expect |
| 179 | // /to be available, and not on the ABI, or any source-language choices. |
| 180 | void ARMTargetStreamer::emitTargetAttributes(const MCSubtargetInfo &STI) { |
| 181 | switchVendor(Vendor: "aeabi" ); |
| 182 | |
| 183 | const StringRef CPUString = STI.getCPU(); |
| 184 | if (!CPUString.empty() && !CPUString.starts_with(Prefix: "generic" )) { |
| 185 | // FIXME: remove krait check when GNU tools support krait cpu |
| 186 | if (STI.hasFeature(Feature: ARM::ProcKrait)) { |
| 187 | emitTextAttribute(Attribute: ARMBuildAttrs::CPU_name, String: "cortex-a9" ); |
| 188 | // We consider krait as a "cortex-a9" + hwdiv CPU |
| 189 | // Enable hwdiv through ".arch_extension idiv" |
| 190 | if (STI.hasFeature(Feature: ARM::FeatureHWDivThumb) || |
| 191 | STI.hasFeature(Feature: ARM::FeatureHWDivARM)) |
| 192 | emitArchExtension(ArchExt: ARM::AEK_HWDIVTHUMB | ARM::AEK_HWDIVARM); |
| 193 | } else { |
| 194 | emitTextAttribute(Attribute: ARMBuildAttrs::CPU_name, String: CPUString); |
| 195 | } |
| 196 | } |
| 197 | |
| 198 | emitAttribute(Attribute: ARMBuildAttrs::CPU_arch, Value: getArchForCPU(STI)); |
| 199 | |
| 200 | if (STI.hasFeature(Feature: ARM::FeatureAClass)) { |
| 201 | emitAttribute(Attribute: ARMBuildAttrs::CPU_arch_profile, |
| 202 | Value: ARMBuildAttrs::ApplicationProfile); |
| 203 | } else if (STI.hasFeature(Feature: ARM::FeatureRClass)) { |
| 204 | emitAttribute(Attribute: ARMBuildAttrs::CPU_arch_profile, |
| 205 | Value: ARMBuildAttrs::RealTimeProfile); |
| 206 | } else if (STI.hasFeature(Feature: ARM::FeatureMClass)) { |
| 207 | emitAttribute(Attribute: ARMBuildAttrs::CPU_arch_profile, |
| 208 | Value: ARMBuildAttrs::MicroControllerProfile); |
| 209 | } |
| 210 | |
| 211 | emitAttribute(Attribute: ARMBuildAttrs::ARM_ISA_use, Value: STI.hasFeature(Feature: ARM::FeatureNoARM) |
| 212 | ? ARMBuildAttrs::Not_Allowed |
| 213 | : ARMBuildAttrs::Allowed); |
| 214 | |
| 215 | if (isV8M(STI)) { |
| 216 | emitAttribute(Attribute: ARMBuildAttrs::THUMB_ISA_use, |
| 217 | Value: ARMBuildAttrs::AllowThumbDerived); |
| 218 | } else if (STI.hasFeature(Feature: ARM::FeatureThumb2)) { |
| 219 | emitAttribute(Attribute: ARMBuildAttrs::THUMB_ISA_use, |
| 220 | Value: ARMBuildAttrs::AllowThumb32); |
| 221 | } else if (STI.hasFeature(Feature: ARM::HasV4TOps)) { |
| 222 | emitAttribute(Attribute: ARMBuildAttrs::THUMB_ISA_use, Value: ARMBuildAttrs::Allowed); |
| 223 | } |
| 224 | |
| 225 | if (STI.hasFeature(Feature: ARM::FeatureNEON)) { |
| 226 | /* NEON is not exactly a VFP architecture, but GAS emit one of |
| 227 | * neon/neon-fp-armv8/neon-vfpv4/vfpv3/vfpv2 for .fpu parameters */ |
| 228 | if (STI.hasFeature(Feature: ARM::FeatureFPARMv8)) { |
| 229 | if (STI.hasFeature(Feature: ARM::FeatureCrypto)) |
| 230 | emitFPU(FPU: ARM::FK_CRYPTO_NEON_FP_ARMV8); |
| 231 | else |
| 232 | emitFPU(FPU: ARM::FK_NEON_FP_ARMV8); |
| 233 | } else if (STI.hasFeature(Feature: ARM::FeatureVFP4)) |
| 234 | emitFPU(FPU: ARM::FK_NEON_VFPV4); |
| 235 | else |
| 236 | emitFPU(FPU: STI.hasFeature(Feature: ARM::FeatureFP16) ? ARM::FK_NEON_FP16 |
| 237 | : ARM::FK_NEON); |
| 238 | // Emit Tag_Advanced_SIMD_arch for ARMv8 architecture |
| 239 | if (STI.hasFeature(Feature: ARM::HasV8Ops)) |
| 240 | emitAttribute(Attribute: ARMBuildAttrs::Advanced_SIMD_arch, |
| 241 | Value: STI.hasFeature(Feature: ARM::HasV8_1aOps) |
| 242 | ? ARMBuildAttrs::AllowNeonARMv8_1a |
| 243 | : ARMBuildAttrs::AllowNeonARMv8); |
| 244 | } else { |
| 245 | if (STI.hasFeature(Feature: ARM::FeatureFPARMv8_D16_SP)) { |
| 246 | // FPv5 and FP-ARMv8 have the same instructions, so are modeled as one |
| 247 | // FPU, but there are two different names for it depending on the CPU. |
| 248 | if (STI.hasFeature(Feature: ARM::FeatureD32)) |
| 249 | emitFPU(FPU: ARM::FK_FP_ARMV8); |
| 250 | else { |
| 251 | emitFPU(FPU: STI.hasFeature(Feature: ARM::FeatureFP64) ? ARM::FK_FPV5_D16 |
| 252 | : ARM::FK_FPV5_SP_D16); |
| 253 | if (STI.hasFeature(Feature: ARM::HasMVEFloatOps)) |
| 254 | emitArchExtension(ArchExt: ARM::AEK_MVE | ARM::AEK_DSP | ARM::AEK_FP); |
| 255 | } |
| 256 | } else if (STI.hasFeature(Feature: ARM::FeatureVFP4_D16_SP)) |
| 257 | emitFPU(FPU: STI.hasFeature(Feature: ARM::FeatureD32) |
| 258 | ? ARM::FK_VFPV4 |
| 259 | : (STI.hasFeature(Feature: ARM::FeatureFP64) ? ARM::FK_VFPV4_D16 |
| 260 | : ARM::FK_FPV4_SP_D16)); |
| 261 | else if (STI.hasFeature(Feature: ARM::FeatureVFP3_D16_SP)) |
| 262 | emitFPU( |
| 263 | FPU: STI.hasFeature(Feature: ARM::FeatureD32) |
| 264 | // +d32 |
| 265 | ? (STI.hasFeature(Feature: ARM::FeatureFP16) ? ARM::FK_VFPV3_FP16 |
| 266 | : ARM::FK_VFPV3) |
| 267 | // -d32 |
| 268 | : (STI.hasFeature(Feature: ARM::FeatureFP64) |
| 269 | ? (STI.hasFeature(Feature: ARM::FeatureFP16) |
| 270 | ? ARM::FK_VFPV3_D16_FP16 |
| 271 | : ARM::FK_VFPV3_D16) |
| 272 | : (STI.hasFeature(Feature: ARM::FeatureFP16) ? ARM::FK_VFPV3XD_FP16 |
| 273 | : ARM::FK_VFPV3XD))); |
| 274 | else if (STI.hasFeature(Feature: ARM::FeatureVFP2_SP)) |
| 275 | emitFPU(FPU: ARM::FK_VFPV2); |
| 276 | } |
| 277 | |
| 278 | // ABI_HardFP_use attribute to indicate single precision FP. |
| 279 | if (STI.hasFeature(Feature: ARM::FeatureVFP2_SP) && !STI.hasFeature(Feature: ARM::FeatureFP64)) |
| 280 | emitAttribute(Attribute: ARMBuildAttrs::ABI_HardFP_use, |
| 281 | Value: ARMBuildAttrs::HardFPSinglePrecision); |
| 282 | |
| 283 | if (STI.hasFeature(Feature: ARM::FeatureFP16)) |
| 284 | emitAttribute(Attribute: ARMBuildAttrs::FP_HP_extension, Value: ARMBuildAttrs::AllowHPFP); |
| 285 | |
| 286 | if (STI.hasFeature(Feature: ARM::FeatureMP)) |
| 287 | emitAttribute(Attribute: ARMBuildAttrs::MPextension_use, Value: ARMBuildAttrs::AllowMP); |
| 288 | |
| 289 | if (STI.hasFeature(Feature: ARM::HasMVEFloatOps)) |
| 290 | emitAttribute(Attribute: ARMBuildAttrs::MVE_arch, Value: ARMBuildAttrs::AllowMVEIntegerAndFloat); |
| 291 | else if (STI.hasFeature(Feature: ARM::HasMVEIntegerOps)) |
| 292 | emitAttribute(Attribute: ARMBuildAttrs::MVE_arch, Value: ARMBuildAttrs::AllowMVEInteger); |
| 293 | |
| 294 | // Hardware divide in ARM mode is part of base arch, starting from ARMv8. |
| 295 | // If only Thumb hwdiv is present, it must also be in base arch (ARMv7-R/M). |
| 296 | // It is not possible to produce DisallowDIV: if hwdiv is present in the base |
| 297 | // arch, supplying -hwdiv downgrades the effective arch, via ClearImpliedBits. |
| 298 | // AllowDIVExt is only emitted if hwdiv isn't available in the base arch; |
| 299 | // otherwise, the default value (AllowDIVIfExists) applies. |
| 300 | if (STI.hasFeature(Feature: ARM::FeatureHWDivARM) && !STI.hasFeature(Feature: ARM::HasV8Ops)) |
| 301 | emitAttribute(Attribute: ARMBuildAttrs::DIV_use, Value: ARMBuildAttrs::AllowDIVExt); |
| 302 | |
| 303 | if (STI.hasFeature(Feature: ARM::FeatureDSP) && isV8M(STI)) |
| 304 | emitAttribute(Attribute: ARMBuildAttrs::DSP_extension, Value: ARMBuildAttrs::Allowed); |
| 305 | |
| 306 | if (STI.hasFeature(Feature: ARM::FeatureStrictAlign)) |
| 307 | emitAttribute(Attribute: ARMBuildAttrs::CPU_unaligned_access, |
| 308 | Value: ARMBuildAttrs::Not_Allowed); |
| 309 | else |
| 310 | emitAttribute(Attribute: ARMBuildAttrs::CPU_unaligned_access, |
| 311 | Value: ARMBuildAttrs::Allowed); |
| 312 | |
| 313 | if (STI.hasFeature(Feature: ARM::FeatureTrustZone) && |
| 314 | STI.hasFeature(Feature: ARM::FeatureVirtualization)) |
| 315 | emitAttribute(Attribute: ARMBuildAttrs::Virtualization_use, |
| 316 | Value: ARMBuildAttrs::AllowTZVirtualization); |
| 317 | else if (STI.hasFeature(Feature: ARM::FeatureTrustZone)) |
| 318 | emitAttribute(Attribute: ARMBuildAttrs::Virtualization_use, Value: ARMBuildAttrs::AllowTZ); |
| 319 | else if (STI.hasFeature(Feature: ARM::FeatureVirtualization)) |
| 320 | emitAttribute(Attribute: ARMBuildAttrs::Virtualization_use, |
| 321 | Value: ARMBuildAttrs::AllowVirtualization); |
| 322 | |
| 323 | if (STI.hasFeature(Feature: ARM::FeaturePACBTI)) { |
| 324 | emitAttribute(Attribute: ARMBuildAttrs::PAC_extension, Value: ARMBuildAttrs::AllowPAC); |
| 325 | emitAttribute(Attribute: ARMBuildAttrs::BTI_extension, Value: ARMBuildAttrs::AllowBTI); |
| 326 | } |
| 327 | } |
| 328 | |
| 329 | MCTargetStreamer * |
| 330 | llvm::createARMObjectTargetStreamer(MCStreamer &S, const MCSubtargetInfo &STI) { |
| 331 | const Triple &TT = STI.getTargetTriple(); |
| 332 | if (TT.isOSBinFormatELF()) |
| 333 | return createARMObjectTargetELFStreamer(S); |
| 334 | if (TT.isOSBinFormatCOFF()) |
| 335 | return createARMObjectTargetWinCOFFStreamer(S); |
| 336 | if (TT.isOSBinFormatMachO()) |
| 337 | return createARMObjectTargetMachOStreamer(S); |
| 338 | return new ARMTargetStreamer(S); |
| 339 | } |
| 340 | |