| 1 | //===-- LoongArchAsmBackend.cpp - LoongArch Assembler Backend -*- C++ -*---===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file implements the LoongArchAsmBackend class. |
| 10 | // |
| 11 | //===----------------------------------------------------------------------===// |
| 12 | |
| 13 | #include "LoongArchAsmBackend.h" |
| 14 | #include "LoongArchFixupKinds.h" |
| 15 | #include "llvm/BinaryFormat/ELF.h" |
| 16 | #include "llvm/MC/MCAsmInfo.h" |
| 17 | #include "llvm/MC/MCAssembler.h" |
| 18 | #include "llvm/MC/MCContext.h" |
| 19 | #include "llvm/MC/MCELFObjectWriter.h" |
| 20 | #include "llvm/MC/MCExpr.h" |
| 21 | #include "llvm/MC/MCSection.h" |
| 22 | #include "llvm/MC/MCValue.h" |
| 23 | #include "llvm/Support/EndianStream.h" |
| 24 | #include "llvm/Support/LEB128.h" |
| 25 | #include "llvm/Support/MathExtras.h" |
| 26 | |
| 27 | #define DEBUG_TYPE "loongarch-asmbackend" |
| 28 | |
| 29 | using namespace llvm; |
| 30 | |
| 31 | LoongArchAsmBackend::LoongArchAsmBackend(const MCSubtargetInfo &STI, |
| 32 | uint8_t OSABI, bool Is64Bit, |
| 33 | const MCTargetOptions &Options) |
| 34 | : MCAsmBackend(llvm::endianness::little), STI(STI), OSABI(OSABI), |
| 35 | Is64Bit(Is64Bit), TargetOptions(Options) {} |
| 36 | |
| 37 | std::optional<MCFixupKind> |
| 38 | LoongArchAsmBackend::getFixupKind(StringRef Name) const { |
| 39 | if (STI.getTargetTriple().isOSBinFormatELF()) { |
| 40 | auto Type = llvm::StringSwitch<unsigned>(Name) |
| 41 | #define ELF_RELOC(X, Y) .Case(#X, Y) |
| 42 | #include "llvm/BinaryFormat/ELFRelocs/LoongArch.def" |
| 43 | #undef ELF_RELOC |
| 44 | .Case(S: "BFD_RELOC_NONE" , Value: ELF::R_LARCH_NONE) |
| 45 | .Case(S: "BFD_RELOC_32" , Value: ELF::R_LARCH_32) |
| 46 | .Case(S: "BFD_RELOC_64" , Value: ELF::R_LARCH_64) |
| 47 | .Default(Value: -1u); |
| 48 | if (Type != -1u) |
| 49 | return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type); |
| 50 | } |
| 51 | return std::nullopt; |
| 52 | } |
| 53 | |
| 54 | MCFixupKindInfo LoongArchAsmBackend::getFixupKindInfo(MCFixupKind Kind) const { |
| 55 | const static MCFixupKindInfo Infos[] = { |
| 56 | // This table *must* be in the order that the fixup_* kinds are defined in |
| 57 | // LoongArchFixupKinds.h. |
| 58 | // |
| 59 | // {name, offset, bits, flags} |
| 60 | {.Name: "fixup_loongarch_b16" , .TargetOffset: 10, .TargetSize: 16, .Flags: 0}, |
| 61 | {.Name: "fixup_loongarch_b21" , .TargetOffset: 0, .TargetSize: 26, .Flags: 0}, |
| 62 | {.Name: "fixup_loongarch_b26" , .TargetOffset: 0, .TargetSize: 26, .Flags: 0}, |
| 63 | {.Name: "fixup_loongarch_abs_hi20" , .TargetOffset: 5, .TargetSize: 20, .Flags: 0}, |
| 64 | {.Name: "fixup_loongarch_abs_lo12" , .TargetOffset: 10, .TargetSize: 12, .Flags: 0}, |
| 65 | {.Name: "fixup_loongarch_abs64_lo20" , .TargetOffset: 5, .TargetSize: 20, .Flags: 0}, |
| 66 | {.Name: "fixup_loongarch_abs64_hi12" , .TargetOffset: 10, .TargetSize: 12, .Flags: 0}, |
| 67 | }; |
| 68 | |
| 69 | static_assert((std::size(Infos)) == LoongArch::NumTargetFixupKinds, |
| 70 | "Not all fixup kinds added to Infos array" ); |
| 71 | |
| 72 | // Fixup kinds from .reloc directive are like R_LARCH_NONE. They |
| 73 | // do not require any extra processing. |
| 74 | if (mc::isRelocation(FixupKind: Kind)) |
| 75 | return {}; |
| 76 | |
| 77 | if (Kind < FirstTargetFixupKind) |
| 78 | return MCAsmBackend::getFixupKindInfo(Kind); |
| 79 | |
| 80 | assert(unsigned(Kind - FirstTargetFixupKind) < |
| 81 | LoongArch::NumTargetFixupKinds && |
| 82 | "Invalid kind!" ); |
| 83 | return Infos[Kind - FirstTargetFixupKind]; |
| 84 | } |
| 85 | |
| 86 | static void reportOutOfRangeError(MCContext &Ctx, SMLoc Loc, unsigned N) { |
| 87 | Ctx.reportError(L: Loc, Msg: "fixup value out of range [" + Twine(llvm::minIntN(N)) + |
| 88 | ", " + Twine(llvm::maxIntN(N)) + "]" ); |
| 89 | } |
| 90 | |
| 91 | static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value, |
| 92 | MCContext &Ctx) { |
| 93 | switch (Fixup.getKind()) { |
| 94 | default: |
| 95 | llvm_unreachable("Unknown fixup kind" ); |
| 96 | case FK_Data_1: |
| 97 | case FK_Data_2: |
| 98 | case FK_Data_4: |
| 99 | case FK_Data_8: |
| 100 | case FK_Data_leb128: |
| 101 | return Value; |
| 102 | case LoongArch::fixup_loongarch_b16: { |
| 103 | if (!isInt<18>(x: Value)) |
| 104 | reportOutOfRangeError(Ctx, Loc: Fixup.getLoc(), N: 18); |
| 105 | if (Value % 4) |
| 106 | Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value must be 4-byte aligned" ); |
| 107 | return (Value >> 2) & 0xffff; |
| 108 | } |
| 109 | case LoongArch::fixup_loongarch_b21: { |
| 110 | if (!isInt<23>(x: Value)) |
| 111 | reportOutOfRangeError(Ctx, Loc: Fixup.getLoc(), N: 23); |
| 112 | if (Value % 4) |
| 113 | Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value must be 4-byte aligned" ); |
| 114 | return ((Value & 0x3fffc) << 8) | ((Value >> 18) & 0x1f); |
| 115 | } |
| 116 | case LoongArch::fixup_loongarch_b26: { |
| 117 | if (!isInt<28>(x: Value)) |
| 118 | reportOutOfRangeError(Ctx, Loc: Fixup.getLoc(), N: 28); |
| 119 | if (Value % 4) |
| 120 | Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value must be 4-byte aligned" ); |
| 121 | return ((Value & 0x3fffc) << 8) | ((Value >> 18) & 0x3ff); |
| 122 | } |
| 123 | case LoongArch::fixup_loongarch_abs_hi20: |
| 124 | return (Value >> 12) & 0xfffff; |
| 125 | case LoongArch::fixup_loongarch_abs_lo12: |
| 126 | return Value & 0xfff; |
| 127 | case LoongArch::fixup_loongarch_abs64_lo20: |
| 128 | return (Value >> 32) & 0xfffff; |
| 129 | case LoongArch::fixup_loongarch_abs64_hi12: |
| 130 | return (Value >> 52) & 0xfff; |
| 131 | } |
| 132 | } |
| 133 | |
| 134 | static void fixupLeb128(MCContext &Ctx, const MCFixup &Fixup, uint8_t *Data, |
| 135 | uint64_t Value) { |
| 136 | unsigned I; |
| 137 | for (I = 0; Value; ++I, Value >>= 7) |
| 138 | Data[I] |= uint8_t(Value & 0x7f); |
| 139 | } |
| 140 | |
| 141 | void LoongArchAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup, |
| 142 | const MCValue &Target, uint8_t *Data, |
| 143 | uint64_t Value, bool IsResolved) { |
| 144 | IsResolved = addReloc(F, Fixup, Target, FixedValue&: Value, IsResolved); |
| 145 | if (!Value) |
| 146 | return; // Doesn't change encoding. |
| 147 | |
| 148 | auto Kind = Fixup.getKind(); |
| 149 | if (mc::isRelocation(FixupKind: Kind)) |
| 150 | return; |
| 151 | MCFixupKindInfo Info = getFixupKindInfo(Kind); |
| 152 | MCContext &Ctx = getContext(); |
| 153 | |
| 154 | // Fixup leb128 separately. |
| 155 | if (Fixup.getKind() == FK_Data_leb128) |
| 156 | return fixupLeb128(Ctx, Fixup, Data, Value); |
| 157 | |
| 158 | // Apply any target-specific value adjustments. |
| 159 | Value = adjustFixupValue(Fixup, Value, Ctx); |
| 160 | |
| 161 | // Shift the value into position. |
| 162 | Value <<= Info.TargetOffset; |
| 163 | |
| 164 | unsigned NumBytes = alignTo(Value: Info.TargetSize + Info.TargetOffset, Align: 8) / 8; |
| 165 | |
| 166 | assert(Fixup.getOffset() + NumBytes <= F.getSize() && |
| 167 | "Invalid fixup offset!" ); |
| 168 | // For each byte of the fragment that the fixup touches, mask in the |
| 169 | // bits from the fixup value. |
| 170 | for (unsigned I = 0; I != NumBytes; ++I) { |
| 171 | Data[I] |= uint8_t((Value >> (I * 8)) & 0xff); |
| 172 | } |
| 173 | } |
| 174 | |
| 175 | static inline std::pair<MCFixupKind, MCFixupKind> |
| 176 | getRelocPairForSize(unsigned Size) { |
| 177 | switch (Size) { |
| 178 | default: |
| 179 | llvm_unreachable("unsupported fixup size" ); |
| 180 | case 6: |
| 181 | return std::make_pair(x: ELF::R_LARCH_ADD6, y: ELF::R_LARCH_SUB6); |
| 182 | case 8: |
| 183 | return std::make_pair(x: ELF::R_LARCH_ADD8, y: ELF::R_LARCH_SUB8); |
| 184 | case 16: |
| 185 | return std::make_pair(x: ELF::R_LARCH_ADD16, y: ELF::R_LARCH_SUB16); |
| 186 | case 32: |
| 187 | return std::make_pair(x: ELF::R_LARCH_ADD32, y: ELF::R_LARCH_SUB32); |
| 188 | case 64: |
| 189 | return std::make_pair(x: ELF::R_LARCH_ADD64, y: ELF::R_LARCH_SUB64); |
| 190 | case 128: |
| 191 | return std::make_pair(x: ELF::R_LARCH_ADD_ULEB128, y: ELF::R_LARCH_SUB_ULEB128); |
| 192 | } |
| 193 | } |
| 194 | |
| 195 | // Check if an R_LARCH_ALIGN relocation is needed for an alignment directive. |
| 196 | // If conditions are met, compute the padding size and create a fixup encoding |
| 197 | // the padding size in the addend. If MaxBytesToEmit is smaller than the padding |
| 198 | // size, the fixup encodes MaxBytesToEmit in the higher bits and references a |
| 199 | // per-section marker symbol. |
| 200 | bool LoongArchAsmBackend::relaxAlign(MCFragment &F, unsigned &Size) { |
| 201 | // Alignments before the first linker-relaxable instruction have fixed sizes |
| 202 | // and do not require relocations. Alignments after a linker-relaxable |
| 203 | // instruction require a relocation, even if the STI specifies norelax. |
| 204 | // |
| 205 | // firstLinkerRelaxable is the layout order within the subsection, which may |
| 206 | // be smaller than the section's order. Therefore, alignments in a |
| 207 | // lower-numbered subsection may be unnecessarily treated as linker-relaxable. |
| 208 | auto *Sec = F.getParent(); |
| 209 | if (F.getLayoutOrder() <= Sec->firstLinkerRelaxable()) |
| 210 | return false; |
| 211 | |
| 212 | // Use default handling unless linker relaxation is enabled and the |
| 213 | // MaxBytesToEmit >= the nop size. |
| 214 | const unsigned MinNopLen = 4; |
| 215 | unsigned MaxBytesToEmit = F.getAlignMaxBytesToEmit(); |
| 216 | if (MaxBytesToEmit < MinNopLen) |
| 217 | return false; |
| 218 | |
| 219 | Size = F.getAlignment().value() - MinNopLen; |
| 220 | if (F.getAlignment() <= MinNopLen) |
| 221 | return false; |
| 222 | |
| 223 | MCContext &Ctx = getContext(); |
| 224 | const MCExpr *Expr = nullptr; |
| 225 | if (MaxBytesToEmit >= Size) { |
| 226 | Expr = MCConstantExpr::create(Value: Size, Ctx&: getContext()); |
| 227 | } else { |
| 228 | MCSection *Sec = F.getParent(); |
| 229 | const MCSymbolRefExpr *SymRef = getSecToAlignSym()[Sec]; |
| 230 | if (SymRef == nullptr) { |
| 231 | // Define a marker symbol at the section with an offset of 0. |
| 232 | MCSymbol *Sym = Ctx.createNamedTempSymbol(Name: "la-relax-align" ); |
| 233 | Sym->setFragment(&*Sec->getBeginSymbol()->getFragment()); |
| 234 | Asm->registerSymbol(Symbol: *Sym); |
| 235 | SymRef = MCSymbolRefExpr::create(Symbol: Sym, Ctx); |
| 236 | getSecToAlignSym()[Sec] = SymRef; |
| 237 | } |
| 238 | Expr = MCBinaryExpr::createAdd( |
| 239 | LHS: SymRef, |
| 240 | RHS: MCConstantExpr::create(Value: (MaxBytesToEmit << 8) | Log2(A: F.getAlignment()), |
| 241 | Ctx), |
| 242 | Ctx); |
| 243 | } |
| 244 | MCFixup Fixup = |
| 245 | MCFixup::create(Offset: 0, Value: Expr, Kind: FirstLiteralRelocationKind + ELF::R_LARCH_ALIGN); |
| 246 | F.setVarFixups({Fixup}); |
| 247 | F.setLinkerRelaxable(); |
| 248 | return true; |
| 249 | } |
| 250 | |
| 251 | std::pair<bool, bool> LoongArchAsmBackend::relaxLEB128(MCFragment &F, |
| 252 | int64_t &Value) const { |
| 253 | const MCExpr &Expr = F.getLEBValue(); |
| 254 | if (F.isLEBSigned() || !Expr.evaluateKnownAbsolute(Res&: Value, Asm: *Asm)) |
| 255 | return std::make_pair(x: false, y: false); |
| 256 | F.setVarFixups({MCFixup::create(Offset: 0, Value: &Expr, Kind: FK_Data_leb128)}); |
| 257 | return std::make_pair(x: true, y: true); |
| 258 | } |
| 259 | |
| 260 | bool LoongArchAsmBackend::relaxDwarfLineAddr(MCFragment &F) const { |
| 261 | MCContext &C = getContext(); |
| 262 | int64_t LineDelta = F.getDwarfLineDelta(); |
| 263 | const MCExpr &AddrDelta = F.getDwarfAddrDelta(); |
| 264 | int64_t Value; |
| 265 | if (AddrDelta.evaluateAsAbsolute(Res&: Value, Asm: *Asm)) |
| 266 | return false; |
| 267 | [[maybe_unused]] bool IsAbsolute = |
| 268 | AddrDelta.evaluateKnownAbsolute(Res&: Value, Asm: *Asm); |
| 269 | assert(IsAbsolute); |
| 270 | |
| 271 | SmallVector<char> Data; |
| 272 | raw_svector_ostream OS(Data); |
| 273 | |
| 274 | // INT64_MAX is a signal that this is actually a DW_LNE_end_sequence. |
| 275 | if (LineDelta != INT64_MAX) { |
| 276 | OS << uint8_t(dwarf::DW_LNS_advance_line); |
| 277 | encodeSLEB128(Value: LineDelta, OS); |
| 278 | } |
| 279 | |
| 280 | // According to the DWARF specification, the `DW_LNS_fixed_advance_pc` opcode |
| 281 | // takes a single unsigned half (unencoded) operand. The maximum encodable |
| 282 | // value is therefore 65535. Set a conservative upper bound for relaxation. |
| 283 | unsigned PCBytes; |
| 284 | if (Value > 60000) { |
| 285 | unsigned PtrSize = C.getAsmInfo()->getCodePointerSize(); |
| 286 | assert((PtrSize == 4 || PtrSize == 8) && "Unexpected pointer size" ); |
| 287 | PCBytes = PtrSize; |
| 288 | OS << uint8_t(dwarf::DW_LNS_extended_op) << uint8_t(PtrSize + 1) |
| 289 | << uint8_t(dwarf::DW_LNE_set_address); |
| 290 | OS.write_zeros(NumZeros: PtrSize); |
| 291 | } else { |
| 292 | PCBytes = 2; |
| 293 | OS << uint8_t(dwarf::DW_LNS_fixed_advance_pc); |
| 294 | support::endian::write<uint16_t>(os&: OS, value: 0, endian: llvm::endianness::little); |
| 295 | } |
| 296 | auto Offset = OS.tell() - PCBytes; |
| 297 | |
| 298 | if (LineDelta == INT64_MAX) { |
| 299 | OS << uint8_t(dwarf::DW_LNS_extended_op); |
| 300 | OS << uint8_t(1); |
| 301 | OS << uint8_t(dwarf::DW_LNE_end_sequence); |
| 302 | } else { |
| 303 | OS << uint8_t(dwarf::DW_LNS_copy); |
| 304 | } |
| 305 | |
| 306 | F.setVarContents(Data); |
| 307 | F.setVarFixups({MCFixup::create(Offset, Value: &AddrDelta, |
| 308 | Kind: MCFixup::getDataKindForSize(Size: PCBytes))}); |
| 309 | return true; |
| 310 | } |
| 311 | |
| 312 | bool LoongArchAsmBackend::relaxDwarfCFA(MCFragment &F) const { |
| 313 | const MCExpr &AddrDelta = F.getDwarfAddrDelta(); |
| 314 | SmallVector<MCFixup, 2> Fixups; |
| 315 | int64_t Value; |
| 316 | if (AddrDelta.evaluateAsAbsolute(Res&: Value, Asm: *Asm)) |
| 317 | return false; |
| 318 | bool IsAbsolute = AddrDelta.evaluateKnownAbsolute(Res&: Value, Asm: *Asm); |
| 319 | assert(IsAbsolute && "CFA with invalid expression" ); |
| 320 | (void)IsAbsolute; |
| 321 | |
| 322 | assert(getContext().getAsmInfo()->getMinInstAlignment() == 1 && |
| 323 | "expected 1-byte alignment" ); |
| 324 | if (Value == 0) { |
| 325 | F.clearVarContents(); |
| 326 | F.clearVarFixups(); |
| 327 | return true; |
| 328 | } |
| 329 | |
| 330 | auto AddFixups = [&Fixups, |
| 331 | &AddrDelta](unsigned Offset, |
| 332 | std::pair<MCFixupKind, MCFixupKind> FK) { |
| 333 | const MCBinaryExpr &MBE = cast<MCBinaryExpr>(Val: AddrDelta); |
| 334 | Fixups.push_back(Elt: MCFixup::create(Offset, Value: MBE.getLHS(), Kind: std::get<0>(in&: FK))); |
| 335 | Fixups.push_back(Elt: MCFixup::create(Offset, Value: MBE.getRHS(), Kind: std::get<1>(in&: FK))); |
| 336 | }; |
| 337 | |
| 338 | SmallVector<char, 8> Data; |
| 339 | raw_svector_ostream OS(Data); |
| 340 | if (isUIntN(N: 6, x: Value)) { |
| 341 | OS << uint8_t(dwarf::DW_CFA_advance_loc); |
| 342 | AddFixups(0, getRelocPairForSize(Size: 6)); |
| 343 | } else if (isUInt<8>(x: Value)) { |
| 344 | OS << uint8_t(dwarf::DW_CFA_advance_loc1); |
| 345 | support::endian::write<uint8_t>(os&: OS, value: 0, endian: llvm::endianness::little); |
| 346 | AddFixups(1, getRelocPairForSize(Size: 8)); |
| 347 | } else if (isUInt<16>(x: Value)) { |
| 348 | OS << uint8_t(dwarf::DW_CFA_advance_loc2); |
| 349 | support::endian::write<uint16_t>(os&: OS, value: 0, endian: llvm::endianness::little); |
| 350 | AddFixups(1, getRelocPairForSize(Size: 16)); |
| 351 | } else if (isUInt<32>(x: Value)) { |
| 352 | OS << uint8_t(dwarf::DW_CFA_advance_loc4); |
| 353 | support::endian::write<uint32_t>(os&: OS, value: 0, endian: llvm::endianness::little); |
| 354 | AddFixups(1, getRelocPairForSize(Size: 32)); |
| 355 | } else { |
| 356 | llvm_unreachable("unsupported CFA encoding" ); |
| 357 | } |
| 358 | F.setVarContents(Data); |
| 359 | F.setVarFixups(Fixups); |
| 360 | return true; |
| 361 | } |
| 362 | |
| 363 | bool LoongArchAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count, |
| 364 | const MCSubtargetInfo *STI) const { |
| 365 | // We mostly follow binutils' convention here: align to 4-byte boundary with a |
| 366 | // 0-fill padding. |
| 367 | OS.write_zeros(NumZeros: Count % 4); |
| 368 | |
| 369 | // The remainder is now padded with 4-byte nops. |
| 370 | // nop: andi r0, r0, 0 |
| 371 | for (; Count >= 4; Count -= 4) |
| 372 | OS.write(Ptr: "\0\0\x40\x03" , Size: 4); |
| 373 | |
| 374 | return true; |
| 375 | } |
| 376 | |
| 377 | bool LoongArchAsmBackend::isPCRelFixupResolved(const MCSymbol *SymA, |
| 378 | const MCFragment &F) { |
| 379 | // If the section does not contain linker-relaxable fragments, PC-relative |
| 380 | // fixups can be resolved. |
| 381 | if (!F.getParent()->isLinkerRelaxable()) |
| 382 | return true; |
| 383 | |
| 384 | // Otherwise, check if the offset between the symbol and fragment is fully |
| 385 | // resolved, unaffected by linker-relaxable fragments (e.g. instructions or |
| 386 | // offset-affected FT_Align fragments). Complements the generic |
| 387 | // isSymbolRefDifferenceFullyResolvedImpl. |
| 388 | if (!PCRelTemp) |
| 389 | PCRelTemp = getContext().createTempSymbol(); |
| 390 | PCRelTemp->setFragment(const_cast<MCFragment *>(&F)); |
| 391 | MCValue Res; |
| 392 | MCExpr::evaluateSymbolicAdd(Asm, false, MCValue::get(SymA), |
| 393 | MCValue::get(SymA: nullptr, SymB: PCRelTemp), Res); |
| 394 | return !Res.getSubSym(); |
| 395 | } |
| 396 | |
| 397 | bool LoongArchAsmBackend::addReloc(const MCFragment &F, const MCFixup &Fixup, |
| 398 | const MCValue &Target, uint64_t &FixedValue, |
| 399 | bool IsResolved) { |
| 400 | auto Fallback = [&]() { |
| 401 | MCAsmBackend::maybeAddReloc(F, Fixup, Target, Value&: FixedValue, IsResolved); |
| 402 | return true; |
| 403 | }; |
| 404 | uint64_t FixedValueA, FixedValueB; |
| 405 | if (Target.getSubSym()) { |
| 406 | assert(Target.getSpecifier() == 0 && |
| 407 | "relocatable SymA-SymB cannot have relocation specifier" ); |
| 408 | std::pair<MCFixupKind, MCFixupKind> FK; |
| 409 | const MCSymbol &SA = *Target.getAddSym(); |
| 410 | const MCSymbol &SB = *Target.getSubSym(); |
| 411 | |
| 412 | bool force = !SA.isInSection() || !SB.isInSection(); |
| 413 | if (!force) { |
| 414 | const MCSection &SecA = SA.getSection(); |
| 415 | const MCSection &SecB = SB.getSection(); |
| 416 | const MCSection &SecCur = *F.getParent(); |
| 417 | |
| 418 | // To handle the case of A - B which B is same section with the current, |
| 419 | // generate PCRel relocations is better than ADD/SUB relocation pair. |
| 420 | // We can resolve it as A - PC + PC - B. The A - PC will be resolved |
| 421 | // as a PCRel relocation, while PC - B will serve as the addend. |
| 422 | // If the linker relaxation is disabled, it can be done directly since |
| 423 | // PC - B is constant. Otherwise, we should evaluate whether PC - B |
| 424 | // is constant. If it can be resolved as PCRel, use Fallback which |
| 425 | // generates R_LARCH_{32,64}_PCREL relocation later. |
| 426 | if (&SecA != &SecB && &SecB == &SecCur && |
| 427 | isPCRelFixupResolved(SymA: Target.getSubSym(), F)) |
| 428 | return Fallback(); |
| 429 | |
| 430 | // In SecA == SecB case. If the section is not linker-relaxable, the |
| 431 | // FixedValue has already been calculated out in evaluateFixup, |
| 432 | // return true and avoid record relocations. |
| 433 | if (&SecA == &SecB && !SecA.isLinkerRelaxable()) |
| 434 | return true; |
| 435 | } |
| 436 | |
| 437 | switch (Fixup.getKind()) { |
| 438 | case llvm::FK_Data_1: |
| 439 | FK = getRelocPairForSize(Size: 8); |
| 440 | break; |
| 441 | case llvm::FK_Data_2: |
| 442 | FK = getRelocPairForSize(Size: 16); |
| 443 | break; |
| 444 | case llvm::FK_Data_4: |
| 445 | FK = getRelocPairForSize(Size: 32); |
| 446 | break; |
| 447 | case llvm::FK_Data_8: |
| 448 | FK = getRelocPairForSize(Size: 64); |
| 449 | break; |
| 450 | case llvm::FK_Data_leb128: |
| 451 | FK = getRelocPairForSize(Size: 128); |
| 452 | break; |
| 453 | default: |
| 454 | llvm_unreachable("unsupported fixup size" ); |
| 455 | } |
| 456 | MCValue A = MCValue::get(SymA: Target.getAddSym(), SymB: nullptr, Val: Target.getConstant()); |
| 457 | MCValue B = MCValue::get(SymA: Target.getSubSym()); |
| 458 | auto FA = MCFixup::create(Offset: Fixup.getOffset(), Value: nullptr, Kind: std::get<0>(in&: FK)); |
| 459 | auto FB = MCFixup::create(Offset: Fixup.getOffset(), Value: nullptr, Kind: std::get<1>(in&: FK)); |
| 460 | Asm->getWriter().recordRelocation(F, Fixup: FA, Target: A, FixedValue&: FixedValueA); |
| 461 | Asm->getWriter().recordRelocation(F, Fixup: FB, Target: B, FixedValue&: FixedValueB); |
| 462 | FixedValue = FixedValueA - FixedValueB; |
| 463 | return false; |
| 464 | } |
| 465 | |
| 466 | // If linker relaxation is enabled and supported by the current relocation, |
| 467 | // generate a relocation and then append a RELAX. |
| 468 | if (Fixup.isLinkerRelaxable()) |
| 469 | IsResolved = false; |
| 470 | if (IsResolved && Fixup.isPCRel()) |
| 471 | IsResolved = isPCRelFixupResolved(SymA: Target.getAddSym(), F); |
| 472 | |
| 473 | if (!IsResolved) |
| 474 | Asm->getWriter().recordRelocation(F, Fixup, Target, FixedValue); |
| 475 | |
| 476 | if (Fixup.isLinkerRelaxable()) { |
| 477 | auto FA = MCFixup::create(Offset: Fixup.getOffset(), Value: nullptr, Kind: ELF::R_LARCH_RELAX); |
| 478 | Asm->getWriter().recordRelocation(F, Fixup: FA, Target: MCValue::get(SymA: nullptr), |
| 479 | FixedValue&: FixedValueA); |
| 480 | } |
| 481 | |
| 482 | return true; |
| 483 | } |
| 484 | |
| 485 | std::unique_ptr<MCObjectTargetWriter> |
| 486 | LoongArchAsmBackend::createObjectTargetWriter() const { |
| 487 | return createLoongArchELFObjectWriter(OSABI, Is64Bit); |
| 488 | } |
| 489 | |
| 490 | MCAsmBackend *llvm::createLoongArchAsmBackend(const Target &T, |
| 491 | const MCSubtargetInfo &STI, |
| 492 | const MCRegisterInfo &MRI, |
| 493 | const MCTargetOptions &Options) { |
| 494 | const Triple &TT = STI.getTargetTriple(); |
| 495 | uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(OSType: TT.getOS()); |
| 496 | return new LoongArchAsmBackend(STI, OSABI, TT.isArch64Bit(), Options); |
| 497 | } |
| 498 | |