1 | //===-- LoongArchAsmBackend.cpp - LoongArch Assembler Backend -*- C++ -*---===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file implements the LoongArchAsmBackend class. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "LoongArchAsmBackend.h" |
14 | #include "LoongArchFixupKinds.h" |
15 | #include "llvm/BinaryFormat/ELF.h" |
16 | #include "llvm/MC/MCAsmInfo.h" |
17 | #include "llvm/MC/MCAssembler.h" |
18 | #include "llvm/MC/MCContext.h" |
19 | #include "llvm/MC/MCELFObjectWriter.h" |
20 | #include "llvm/MC/MCExpr.h" |
21 | #include "llvm/MC/MCSection.h" |
22 | #include "llvm/MC/MCValue.h" |
23 | #include "llvm/Support/EndianStream.h" |
24 | #include "llvm/Support/LEB128.h" |
25 | #include "llvm/Support/MathExtras.h" |
26 | |
27 | #define DEBUG_TYPE "loongarch-asmbackend" |
28 | |
29 | using namespace llvm; |
30 | |
31 | LoongArchAsmBackend::LoongArchAsmBackend(const MCSubtargetInfo &STI, |
32 | uint8_t OSABI, bool Is64Bit, |
33 | const MCTargetOptions &Options) |
34 | : MCAsmBackend(llvm::endianness::little), STI(STI), OSABI(OSABI), |
35 | Is64Bit(Is64Bit), TargetOptions(Options) {} |
36 | |
37 | std::optional<MCFixupKind> |
38 | LoongArchAsmBackend::getFixupKind(StringRef Name) const { |
39 | if (STI.getTargetTriple().isOSBinFormatELF()) { |
40 | auto Type = llvm::StringSwitch<unsigned>(Name) |
41 | #define ELF_RELOC(X, Y) .Case(#X, Y) |
42 | #include "llvm/BinaryFormat/ELFRelocs/LoongArch.def" |
43 | #undef ELF_RELOC |
44 | .Case(S: "BFD_RELOC_NONE" , Value: ELF::R_LARCH_NONE) |
45 | .Case(S: "BFD_RELOC_32" , Value: ELF::R_LARCH_32) |
46 | .Case(S: "BFD_RELOC_64" , Value: ELF::R_LARCH_64) |
47 | .Default(Value: -1u); |
48 | if (Type != -1u) |
49 | return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type); |
50 | } |
51 | return std::nullopt; |
52 | } |
53 | |
54 | MCFixupKindInfo LoongArchAsmBackend::getFixupKindInfo(MCFixupKind Kind) const { |
55 | const static MCFixupKindInfo Infos[] = { |
56 | // This table *must* be in the order that the fixup_* kinds are defined in |
57 | // LoongArchFixupKinds.h. |
58 | // |
59 | // {name, offset, bits, flags} |
60 | {.Name: "fixup_loongarch_b16" , .TargetOffset: 10, .TargetSize: 16, .Flags: MCFixupKindInfo::FKF_IsPCRel}, |
61 | {.Name: "fixup_loongarch_b21" , .TargetOffset: 0, .TargetSize: 26, .Flags: MCFixupKindInfo::FKF_IsPCRel}, |
62 | {.Name: "fixup_loongarch_b26" , .TargetOffset: 0, .TargetSize: 26, .Flags: MCFixupKindInfo::FKF_IsPCRel}, |
63 | {.Name: "fixup_loongarch_abs_hi20" , .TargetOffset: 5, .TargetSize: 20, .Flags: 0}, |
64 | {.Name: "fixup_loongarch_abs_lo12" , .TargetOffset: 10, .TargetSize: 12, .Flags: 0}, |
65 | {.Name: "fixup_loongarch_abs64_lo20" , .TargetOffset: 5, .TargetSize: 20, .Flags: 0}, |
66 | {.Name: "fixup_loongarch_abs64_hi12" , .TargetOffset: 10, .TargetSize: 12, .Flags: 0}, |
67 | }; |
68 | |
69 | static_assert((std::size(Infos)) == LoongArch::NumTargetFixupKinds, |
70 | "Not all fixup kinds added to Infos array" ); |
71 | |
72 | // Fixup kinds from .reloc directive are like R_LARCH_NONE. They |
73 | // do not require any extra processing. |
74 | if (mc::isRelocation(FixupKind: Kind)) |
75 | return MCAsmBackend::getFixupKindInfo(Kind: FK_NONE); |
76 | |
77 | if (Kind < FirstTargetFixupKind) |
78 | return MCAsmBackend::getFixupKindInfo(Kind); |
79 | |
80 | assert(unsigned(Kind - FirstTargetFixupKind) < |
81 | LoongArch::NumTargetFixupKinds && |
82 | "Invalid kind!" ); |
83 | return Infos[Kind - FirstTargetFixupKind]; |
84 | } |
85 | |
86 | static void reportOutOfRangeError(MCContext &Ctx, SMLoc Loc, unsigned N) { |
87 | Ctx.reportError(L: Loc, Msg: "fixup value out of range [" + Twine(llvm::minIntN(N)) + |
88 | ", " + Twine(llvm::maxIntN(N)) + "]" ); |
89 | } |
90 | |
91 | static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value, |
92 | MCContext &Ctx) { |
93 | switch (Fixup.getTargetKind()) { |
94 | default: |
95 | llvm_unreachable("Unknown fixup kind" ); |
96 | case FK_Data_1: |
97 | case FK_Data_2: |
98 | case FK_Data_4: |
99 | case FK_Data_8: |
100 | case FK_Data_leb128: |
101 | return Value; |
102 | case LoongArch::fixup_loongarch_b16: { |
103 | if (!isInt<18>(x: Value)) |
104 | reportOutOfRangeError(Ctx, Loc: Fixup.getLoc(), N: 18); |
105 | if (Value % 4) |
106 | Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value must be 4-byte aligned" ); |
107 | return (Value >> 2) & 0xffff; |
108 | } |
109 | case LoongArch::fixup_loongarch_b21: { |
110 | if (!isInt<23>(x: Value)) |
111 | reportOutOfRangeError(Ctx, Loc: Fixup.getLoc(), N: 23); |
112 | if (Value % 4) |
113 | Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value must be 4-byte aligned" ); |
114 | return ((Value & 0x3fffc) << 8) | ((Value >> 18) & 0x1f); |
115 | } |
116 | case LoongArch::fixup_loongarch_b26: { |
117 | if (!isInt<28>(x: Value)) |
118 | reportOutOfRangeError(Ctx, Loc: Fixup.getLoc(), N: 28); |
119 | if (Value % 4) |
120 | Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value must be 4-byte aligned" ); |
121 | return ((Value & 0x3fffc) << 8) | ((Value >> 18) & 0x3ff); |
122 | } |
123 | case LoongArch::fixup_loongarch_abs_hi20: |
124 | return (Value >> 12) & 0xfffff; |
125 | case LoongArch::fixup_loongarch_abs_lo12: |
126 | return Value & 0xfff; |
127 | case LoongArch::fixup_loongarch_abs64_lo20: |
128 | return (Value >> 32) & 0xfffff; |
129 | case LoongArch::fixup_loongarch_abs64_hi12: |
130 | return (Value >> 52) & 0xfff; |
131 | } |
132 | } |
133 | |
134 | static void fixupLeb128(MCContext &Ctx, const MCFixup &Fixup, |
135 | MutableArrayRef<char> Data, uint64_t Value) { |
136 | unsigned I; |
137 | for (I = 0; I != Data.size() && Value; ++I, Value >>= 7) |
138 | Data[I] |= uint8_t(Value & 0x7f); |
139 | if (Value) |
140 | Ctx.reportError(L: Fixup.getLoc(), Msg: "Invalid uleb128 value!" ); |
141 | } |
142 | |
143 | void LoongArchAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup, |
144 | const MCValue &Target, |
145 | MutableArrayRef<char> Data, uint64_t Value, |
146 | bool IsResolved) { |
147 | if (IsResolved && shouldForceRelocation(Fixup, Target)) |
148 | IsResolved = false; |
149 | IsResolved = addReloc(F, Fixup, Target, FixedValue&: Value, IsResolved); |
150 | if (!Value) |
151 | return; // Doesn't change encoding. |
152 | |
153 | auto Kind = Fixup.getKind(); |
154 | if (mc::isRelocation(FixupKind: Kind)) |
155 | return; |
156 | MCFixupKindInfo Info = getFixupKindInfo(Kind); |
157 | MCContext &Ctx = getContext(); |
158 | |
159 | // Fixup leb128 separately. |
160 | if (Fixup.getTargetKind() == FK_Data_leb128) |
161 | return fixupLeb128(Ctx, Fixup, Data, Value); |
162 | |
163 | // Apply any target-specific value adjustments. |
164 | Value = adjustFixupValue(Fixup, Value, Ctx); |
165 | |
166 | // Shift the value into position. |
167 | Value <<= Info.TargetOffset; |
168 | |
169 | unsigned Offset = Fixup.getOffset(); |
170 | unsigned NumBytes = alignTo(Value: Info.TargetSize + Info.TargetOffset, Align: 8) / 8; |
171 | |
172 | assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!" ); |
173 | // For each byte of the fragment that the fixup touches, mask in the |
174 | // bits from the fixup value. |
175 | for (unsigned I = 0; I != NumBytes; ++I) { |
176 | Data[Offset + I] |= uint8_t((Value >> (I * 8)) & 0xff); |
177 | } |
178 | } |
179 | |
180 | // Linker relaxation may change code size. We have to insert Nops |
181 | // for .align directive when linker relaxation enabled. So then Linker |
182 | // could satisfy alignment by removing Nops. |
183 | // The function returns the total Nops Size we need to insert. |
184 | bool LoongArchAsmBackend::( |
185 | const MCAlignFragment &AF, unsigned &Size) { |
186 | // Calculate Nops Size only when linker relaxation enabled. |
187 | if (!AF.getSubtargetInfo()->hasFeature(Feature: LoongArch::FeatureRelax)) |
188 | return false; |
189 | |
190 | // Ignore alignment if MaxBytesToEmit is less than the minimum Nop size. |
191 | const unsigned MinNopLen = 4; |
192 | if (AF.getMaxBytesToEmit() < MinNopLen) |
193 | return false; |
194 | Size = AF.getAlignment().value() - MinNopLen; |
195 | return AF.getAlignment() > MinNopLen; |
196 | } |
197 | |
198 | // We need to insert R_LARCH_ALIGN relocation type to indicate the |
199 | // position of Nops and the total bytes of the Nops have been inserted |
200 | // when linker relaxation enabled. |
201 | // The function inserts fixup_loongarch_align fixup which eventually will |
202 | // transfer to R_LARCH_ALIGN relocation type. |
203 | // The improved R_LARCH_ALIGN requires symbol index. The lowest 8 bits of |
204 | // addend represent alignment and the other bits of addend represent the |
205 | // maximum number of bytes to emit. The maximum number of bytes is zero |
206 | // means ignore the emit limit. |
207 | bool LoongArchAsmBackend::shouldInsertFixupForCodeAlign(MCAssembler &Asm, |
208 | MCAlignFragment &AF) { |
209 | // Insert the fixup only when linker relaxation enabled. |
210 | if (!AF.getSubtargetInfo()->hasFeature(Feature: LoongArch::FeatureRelax)) |
211 | return false; |
212 | |
213 | // Calculate total Nops we need to insert. If there are none to insert |
214 | // then simply return. |
215 | unsigned InsertedNopBytes; |
216 | if (!shouldInsertExtraNopBytesForCodeAlign(AF, Size&: InsertedNopBytes)) |
217 | return false; |
218 | |
219 | MCSection *Sec = AF.getParent(); |
220 | MCContext &Ctx = getContext(); |
221 | const MCExpr *Dummy = MCConstantExpr::create(Value: 0, Ctx); |
222 | MCFixup Fixup = MCFixup::create(Offset: 0, Value: Dummy, Kind: ELF::R_LARCH_ALIGN); |
223 | unsigned MaxBytesToEmit = AF.getMaxBytesToEmit(); |
224 | |
225 | auto createExtendedValue = [&]() { |
226 | const MCSymbolRefExpr *MCSym = getSecToAlignSym()[Sec]; |
227 | if (MCSym == nullptr) { |
228 | // Define a marker symbol at the section with an offset of 0. |
229 | MCSymbol *Sym = Ctx.createNamedTempSymbol(Name: "la-relax-align" ); |
230 | Sym->setFragment(&*Sec->getBeginSymbol()->getFragment()); |
231 | Asm.registerSymbol(Symbol: *Sym); |
232 | MCSym = MCSymbolRefExpr::create(Symbol: Sym, Ctx); |
233 | getSecToAlignSym()[Sec] = MCSym; |
234 | } |
235 | return MCValue::get(SymA: &MCSym->getSymbol(), SymB: nullptr, |
236 | Val: MaxBytesToEmit << 8 | Log2(A: AF.getAlignment())); |
237 | }; |
238 | |
239 | uint64_t FixedValue = 0; |
240 | MCValue Value = MaxBytesToEmit >= InsertedNopBytes |
241 | ? MCValue::get(Val: InsertedNopBytes) |
242 | : createExtendedValue(); |
243 | Asm.getWriter().recordRelocation(F: AF, Fixup, Target: Value, FixedValue); |
244 | |
245 | return true; |
246 | } |
247 | |
248 | bool LoongArchAsmBackend::shouldForceRelocation(const MCFixup &Fixup, |
249 | const MCValue &Target) { |
250 | switch (Fixup.getTargetKind()) { |
251 | default: |
252 | return STI.hasFeature(Feature: LoongArch::FeatureRelax); |
253 | case FK_Data_1: |
254 | case FK_Data_2: |
255 | case FK_Data_4: |
256 | case FK_Data_8: |
257 | case FK_Data_leb128: |
258 | return !Target.isAbsolute(); |
259 | } |
260 | } |
261 | |
262 | static inline std::pair<MCFixupKind, MCFixupKind> |
263 | getRelocPairForSize(unsigned Size) { |
264 | switch (Size) { |
265 | default: |
266 | llvm_unreachable("unsupported fixup size" ); |
267 | case 6: |
268 | return std::make_pair(x: MCFixupKind(ELF::R_LARCH_ADD6), |
269 | y: MCFixupKind(ELF::R_LARCH_SUB6)); |
270 | case 8: |
271 | return std::make_pair(x: MCFixupKind(ELF::R_LARCH_ADD8), |
272 | y: MCFixupKind(ELF::R_LARCH_SUB8)); |
273 | case 16: |
274 | return std::make_pair(x: MCFixupKind(ELF::R_LARCH_ADD16), |
275 | y: MCFixupKind(ELF::R_LARCH_SUB16)); |
276 | case 32: |
277 | return std::make_pair(x: MCFixupKind(ELF::R_LARCH_ADD32), |
278 | y: MCFixupKind(ELF::R_LARCH_SUB32)); |
279 | case 64: |
280 | return std::make_pair(x: MCFixupKind(ELF::R_LARCH_ADD64), |
281 | y: MCFixupKind(ELF::R_LARCH_SUB64)); |
282 | case 128: |
283 | return std::make_pair(x: MCFixupKind(ELF::R_LARCH_ADD_ULEB128), |
284 | y: MCFixupKind(ELF::R_LARCH_SUB_ULEB128)); |
285 | } |
286 | } |
287 | |
288 | std::pair<bool, bool> LoongArchAsmBackend::relaxLEB128(MCLEBFragment &LF, |
289 | int64_t &Value) const { |
290 | const MCExpr &Expr = LF.getValue(); |
291 | if (LF.isSigned() || !Expr.evaluateKnownAbsolute(Res&: Value, Asm: *Asm)) |
292 | return std::make_pair(x: false, y: false); |
293 | LF.addFixup(Fixup: MCFixup::create(Offset: 0, Value: &Expr, Kind: FK_Data_leb128, Loc: Expr.getLoc())); |
294 | return std::make_pair(x: true, y: true); |
295 | } |
296 | |
297 | bool LoongArchAsmBackend::relaxDwarfLineAddr(MCDwarfLineAddrFragment &DF, |
298 | bool &WasRelaxed) const { |
299 | MCContext &C = getContext(); |
300 | |
301 | int64_t LineDelta = DF.getLineDelta(); |
302 | const MCExpr &AddrDelta = DF.getAddrDelta(); |
303 | SmallVector<MCFixup, 1> Fixups; |
304 | size_t OldSize = DF.getContents().size(); |
305 | |
306 | int64_t Value; |
307 | if (AddrDelta.evaluateAsAbsolute(Res&: Value, Asm: *Asm)) |
308 | return false; |
309 | bool IsAbsolute = AddrDelta.evaluateKnownAbsolute(Res&: Value, Asm: *Asm); |
310 | assert(IsAbsolute && "CFA with invalid expression" ); |
311 | (void)IsAbsolute; |
312 | |
313 | SmallVector<char> Data; |
314 | raw_svector_ostream OS(Data); |
315 | |
316 | // INT64_MAX is a signal that this is actually a DW_LNE_end_sequence. |
317 | if (LineDelta != INT64_MAX) { |
318 | OS << uint8_t(dwarf::DW_LNS_advance_line); |
319 | encodeSLEB128(Value: LineDelta, OS); |
320 | } |
321 | |
322 | unsigned Offset; |
323 | std::pair<MCFixupKind, MCFixupKind> FK; |
324 | |
325 | // According to the DWARF specification, the `DW_LNS_fixed_advance_pc` opcode |
326 | // takes a single unsigned half (unencoded) operand. The maximum encodable |
327 | // value is therefore 65535. Set a conservative upper bound for relaxation. |
328 | if (Value > 60000) { |
329 | unsigned PtrSize = C.getAsmInfo()->getCodePointerSize(); |
330 | |
331 | OS << uint8_t(dwarf::DW_LNS_extended_op); |
332 | encodeULEB128(Value: PtrSize + 1, OS); |
333 | |
334 | OS << uint8_t(dwarf::DW_LNE_set_address); |
335 | Offset = OS.tell(); |
336 | assert((PtrSize == 4 || PtrSize == 8) && "Unexpected pointer size" ); |
337 | FK = getRelocPairForSize(Size: PtrSize == 4 ? 32 : 64); |
338 | OS.write_zeros(NumZeros: PtrSize); |
339 | } else { |
340 | OS << uint8_t(dwarf::DW_LNS_fixed_advance_pc); |
341 | Offset = OS.tell(); |
342 | FK = getRelocPairForSize(Size: 16); |
343 | support::endian::write<uint16_t>(os&: OS, value: 0, endian: llvm::endianness::little); |
344 | } |
345 | |
346 | const MCBinaryExpr &MBE = cast<MCBinaryExpr>(Val: AddrDelta); |
347 | Fixups.push_back(Elt: MCFixup::create(Offset, Value: MBE.getLHS(), Kind: std::get<0>(in&: FK))); |
348 | Fixups.push_back(Elt: MCFixup::create(Offset, Value: MBE.getRHS(), Kind: std::get<1>(in&: FK))); |
349 | |
350 | if (LineDelta == INT64_MAX) { |
351 | OS << uint8_t(dwarf::DW_LNS_extended_op); |
352 | OS << uint8_t(1); |
353 | OS << uint8_t(dwarf::DW_LNE_end_sequence); |
354 | } else { |
355 | OS << uint8_t(dwarf::DW_LNS_copy); |
356 | } |
357 | |
358 | DF.setContents(Data); |
359 | DF.setFixups(Fixups); |
360 | WasRelaxed = OldSize != Data.size(); |
361 | return true; |
362 | } |
363 | |
364 | bool LoongArchAsmBackend::relaxDwarfCFA(MCDwarfCallFrameFragment &DF, |
365 | bool &WasRelaxed) const { |
366 | const MCExpr &AddrDelta = DF.getAddrDelta(); |
367 | SmallVector<MCFixup, 2> Fixups; |
368 | size_t OldSize = DF.getContents().size(); |
369 | |
370 | int64_t Value; |
371 | if (AddrDelta.evaluateAsAbsolute(Res&: Value, Asm: *Asm)) |
372 | return false; |
373 | bool IsAbsolute = AddrDelta.evaluateKnownAbsolute(Res&: Value, Asm: *Asm); |
374 | assert(IsAbsolute && "CFA with invalid expression" ); |
375 | (void)IsAbsolute; |
376 | |
377 | assert(getContext().getAsmInfo()->getMinInstAlignment() == 1 && |
378 | "expected 1-byte alignment" ); |
379 | if (Value == 0) { |
380 | DF.clearContents(); |
381 | DF.clearFixups(); |
382 | WasRelaxed = OldSize != DF.getContents().size(); |
383 | return true; |
384 | } |
385 | |
386 | auto AddFixups = [&Fixups, |
387 | &AddrDelta](unsigned Offset, |
388 | std::pair<MCFixupKind, MCFixupKind> FK) { |
389 | const MCBinaryExpr &MBE = cast<MCBinaryExpr>(Val: AddrDelta); |
390 | Fixups.push_back(Elt: MCFixup::create(Offset, Value: MBE.getLHS(), Kind: std::get<0>(in&: FK))); |
391 | Fixups.push_back(Elt: MCFixup::create(Offset, Value: MBE.getRHS(), Kind: std::get<1>(in&: FK))); |
392 | }; |
393 | |
394 | SmallVector<char, 8> Data; |
395 | raw_svector_ostream OS(Data); |
396 | if (isUIntN(N: 6, x: Value)) { |
397 | OS << uint8_t(dwarf::DW_CFA_advance_loc); |
398 | AddFixups(0, getRelocPairForSize(Size: 6)); |
399 | } else if (isUInt<8>(x: Value)) { |
400 | OS << uint8_t(dwarf::DW_CFA_advance_loc1); |
401 | support::endian::write<uint8_t>(os&: OS, value: 0, endian: llvm::endianness::little); |
402 | AddFixups(1, getRelocPairForSize(Size: 8)); |
403 | } else if (isUInt<16>(x: Value)) { |
404 | OS << uint8_t(dwarf::DW_CFA_advance_loc2); |
405 | support::endian::write<uint16_t>(os&: OS, value: 0, endian: llvm::endianness::little); |
406 | AddFixups(1, getRelocPairForSize(Size: 16)); |
407 | } else if (isUInt<32>(x: Value)) { |
408 | OS << uint8_t(dwarf::DW_CFA_advance_loc4); |
409 | support::endian::write<uint32_t>(os&: OS, value: 0, endian: llvm::endianness::little); |
410 | AddFixups(1, getRelocPairForSize(Size: 32)); |
411 | } else { |
412 | llvm_unreachable("unsupported CFA encoding" ); |
413 | } |
414 | DF.setContents(Data); |
415 | DF.setFixups(Fixups); |
416 | |
417 | WasRelaxed = OldSize != Data.size(); |
418 | return true; |
419 | } |
420 | |
421 | bool LoongArchAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count, |
422 | const MCSubtargetInfo *STI) const { |
423 | // We mostly follow binutils' convention here: align to 4-byte boundary with a |
424 | // 0-fill padding. |
425 | OS.write_zeros(NumZeros: Count % 4); |
426 | |
427 | // The remainder is now padded with 4-byte nops. |
428 | // nop: andi r0, r0, 0 |
429 | for (; Count >= 4; Count -= 4) |
430 | OS.write(Ptr: "\0\0\x40\x03" , Size: 4); |
431 | |
432 | return true; |
433 | } |
434 | |
435 | bool LoongArchAsmBackend::isPCRelFixupResolved(const MCSymbol *SymA, |
436 | const MCFragment &F) { |
437 | // If the section does not contain linker-relaxable fragments, PC-relative |
438 | // fixups can be resolved. |
439 | if (!F.getParent()->isLinkerRelaxable()) |
440 | return true; |
441 | |
442 | // Otherwise, check if the offset between the symbol and fragment is fully |
443 | // resolved, unaffected by linker-relaxable fragments (e.g. instructions or |
444 | // offset-affected MCAlignFragment). Complements the generic |
445 | // isSymbolRefDifferenceFullyResolvedImpl. |
446 | if (!PCRelTemp) |
447 | PCRelTemp = getContext().createTempSymbol(); |
448 | PCRelTemp->setFragment(const_cast<MCFragment *>(&F)); |
449 | MCValue Res; |
450 | MCExpr::evaluateSymbolicAdd(Asm, false, MCValue::get(SymA), |
451 | MCValue::get(SymA: nullptr, SymB: PCRelTemp), Res); |
452 | return !Res.getSubSym(); |
453 | } |
454 | |
455 | bool LoongArchAsmBackend::addReloc(const MCFragment &F, const MCFixup &Fixup, |
456 | const MCValue &Target, uint64_t &FixedValue, |
457 | bool IsResolved) { |
458 | auto Fallback = [&]() { |
459 | MCAsmBackend::maybeAddReloc(F, Fixup, Target, Value&: FixedValue, IsResolved); |
460 | return true; |
461 | }; |
462 | uint64_t FixedValueA, FixedValueB; |
463 | if (Target.getSubSym()) { |
464 | assert(Target.getSpecifier() == 0 && |
465 | "relocatable SymA-SymB cannot have relocation specifier" ); |
466 | std::pair<MCFixupKind, MCFixupKind> FK; |
467 | const MCSymbol &SA = *Target.getAddSym(); |
468 | const MCSymbol &SB = *Target.getSubSym(); |
469 | |
470 | bool force = !SA.isInSection() || !SB.isInSection(); |
471 | if (!force) { |
472 | const MCSection &SecA = SA.getSection(); |
473 | const MCSection &SecB = SB.getSection(); |
474 | const MCSection &SecCur = *F.getParent(); |
475 | |
476 | // To handle the case of A - B which B is same section with the current, |
477 | // generate PCRel relocations is better than ADD/SUB relocation pair. |
478 | // We can resolve it as A - PC + PC - B. The A - PC will be resolved |
479 | // as a PCRel relocation, while PC - B will serve as the addend. |
480 | // If the linker relaxation is disabled, it can be done directly since |
481 | // PC - B is constant. Otherwise, we should evaluate whether PC - B |
482 | // is constant. If it can be resolved as PCRel, use Fallback which |
483 | // generates R_LARCH_{32,64}_PCREL relocation later. |
484 | if (&SecA != &SecB && &SecB == &SecCur && |
485 | isPCRelFixupResolved(SymA: Target.getSubSym(), F)) |
486 | return Fallback(); |
487 | |
488 | // In SecA == SecB case. If the linker relaxation is disabled, the |
489 | // FixedValue has already been calculated out in evaluateFixup, |
490 | // return true and avoid record relocations. |
491 | if (&SecA == &SecB && !STI.hasFeature(Feature: LoongArch::FeatureRelax)) |
492 | return true; |
493 | } |
494 | |
495 | switch (Fixup.getKind()) { |
496 | case llvm::FK_Data_1: |
497 | FK = getRelocPairForSize(Size: 8); |
498 | break; |
499 | case llvm::FK_Data_2: |
500 | FK = getRelocPairForSize(Size: 16); |
501 | break; |
502 | case llvm::FK_Data_4: |
503 | FK = getRelocPairForSize(Size: 32); |
504 | break; |
505 | case llvm::FK_Data_8: |
506 | FK = getRelocPairForSize(Size: 64); |
507 | break; |
508 | case llvm::FK_Data_leb128: |
509 | FK = getRelocPairForSize(Size: 128); |
510 | break; |
511 | default: |
512 | llvm_unreachable("unsupported fixup size" ); |
513 | } |
514 | MCValue A = MCValue::get(SymA: Target.getAddSym(), SymB: nullptr, Val: Target.getConstant()); |
515 | MCValue B = MCValue::get(SymA: Target.getSubSym()); |
516 | auto FA = MCFixup::create(Offset: Fixup.getOffset(), Value: nullptr, Kind: std::get<0>(in&: FK)); |
517 | auto FB = MCFixup::create(Offset: Fixup.getOffset(), Value: nullptr, Kind: std::get<1>(in&: FK)); |
518 | Asm->getWriter().recordRelocation(F, Fixup: FA, Target: A, FixedValue&: FixedValueA); |
519 | Asm->getWriter().recordRelocation(F, Fixup: FB, Target: B, FixedValue&: FixedValueB); |
520 | FixedValue = FixedValueA - FixedValueB; |
521 | return false; |
522 | } |
523 | |
524 | IsResolved = Fallback(); |
525 | // If linker relaxation is enabled and supported by the current relocation, |
526 | // append a RELAX relocation. |
527 | if (Fixup.isLinkerRelaxable()) { |
528 | auto FA = MCFixup::create(Offset: Fixup.getOffset(), Value: nullptr, Kind: ELF::R_LARCH_RELAX); |
529 | Asm->getWriter().recordRelocation(F, Fixup: FA, Target: MCValue::get(SymA: nullptr), |
530 | FixedValue&: FixedValueA); |
531 | } |
532 | |
533 | return true; |
534 | } |
535 | |
536 | std::unique_ptr<MCObjectTargetWriter> |
537 | LoongArchAsmBackend::createObjectTargetWriter() const { |
538 | return createLoongArchELFObjectWriter( |
539 | OSABI, Is64Bit, Relax: STI.hasFeature(Feature: LoongArch::FeatureRelax)); |
540 | } |
541 | |
542 | MCAsmBackend *llvm::createLoongArchAsmBackend(const Target &T, |
543 | const MCSubtargetInfo &STI, |
544 | const MCRegisterInfo &MRI, |
545 | const MCTargetOptions &Options) { |
546 | const Triple &TT = STI.getTargetTriple(); |
547 | uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(OSType: TT.getOS()); |
548 | return new LoongArchAsmBackend(STI, OSABI, TT.isArch64Bit(), Options); |
549 | } |
550 | |