1//===-- RISCVAsmBackend.cpp - RISC-V Assembler Backend --------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "RISCVAsmBackend.h"
10#include "RISCVFixupKinds.h"
11#include "llvm/ADT/APInt.h"
12#include "llvm/MC/MCAsmInfo.h"
13#include "llvm/MC/MCAssembler.h"
14#include "llvm/MC/MCContext.h"
15#include "llvm/MC/MCELFObjectWriter.h"
16#include "llvm/MC/MCExpr.h"
17#include "llvm/MC/MCMachObjectWriter.h"
18#include "llvm/MC/MCObjectWriter.h"
19#include "llvm/MC/MCSymbol.h"
20#include "llvm/MC/MCValue.h"
21#include "llvm/Support/CommandLine.h"
22#include "llvm/Support/EndianStream.h"
23#include "llvm/Support/ErrorHandling.h"
24#include "llvm/Support/LEB128.h"
25#include "llvm/Support/raw_ostream.h"
26
27using namespace llvm;
28
29// Temporary workaround for old linkers that do not support ULEB128 relocations,
30// which are abused by DWARF v5 DW_LLE_offset_pair/DW_RLE_offset_pair
31// implemented in Clang/LLVM.
32static cl::opt<bool> ULEB128Reloc(
33 "riscv-uleb128-reloc", cl::init(Val: true), cl::Hidden,
34 cl::desc("Emit R_RISCV_SET_ULEB128/E_RISCV_SUB_ULEB128 if appropriate"));
35
36static cl::opt<bool>
37 AlignRvc("riscv-align-rvc", cl::init(Val: true), cl::Hidden,
38 cl::desc("When generating R_RISCV_ALIGN, insert $alignment-2 "
39 "bytes of NOPs even in norvc code"));
40
41RISCVAsmBackend::RISCVAsmBackend(const MCSubtargetInfo &STI, uint8_t OSABI,
42 bool Is64Bit, bool IsLittleEndian,
43 const MCTargetOptions &Options)
44 : MCAsmBackend(IsLittleEndian ? llvm::endianness::little
45 : llvm::endianness::big),
46 STI(STI), OSABI(OSABI), Is64Bit(Is64Bit), TargetOptions(Options) {
47 RISCVFeatures::validate(TT: STI.getTargetTriple(), FeatureBits: STI.getFeatureBits());
48}
49
50std::optional<MCFixupKind> RISCVAsmBackend::getFixupKind(StringRef Name) const {
51 if (STI.getTargetTriple().isOSBinFormatELF()) {
52 unsigned Type;
53 Type = llvm::StringSwitch<unsigned>(Name)
54#define ELF_RELOC(NAME, ID) .Case(#NAME, ID)
55#include "llvm/BinaryFormat/ELFRelocs/RISCV.def"
56#undef ELF_RELOC
57#define ELF_RISCV_NONSTANDARD_RELOC(_VENDOR, NAME, ID) .Case(#NAME, ID)
58#include "llvm/BinaryFormat/ELFRelocs/RISCV_nonstandard.def"
59#undef ELF_RISCV_NONSTANDARD_RELOC
60 .Case(S: "BFD_RELOC_NONE", Value: ELF::R_RISCV_NONE)
61 .Case(S: "BFD_RELOC_32", Value: ELF::R_RISCV_32)
62 .Case(S: "BFD_RELOC_64", Value: ELF::R_RISCV_64)
63 .Default(Value: -1u);
64 if (Type != -1u)
65 return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type);
66 }
67 return std::nullopt;
68}
69
70MCFixupKindInfo RISCVAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
71 const static MCFixupKindInfo Infos[] = {
72 // This table *must* be in the order that the fixup_* kinds are defined in
73 // RISCVFixupKinds.h.
74 //
75 // name offset bits flags
76 {.Name: "fixup_riscv_hi20", .TargetOffset: 12, .TargetSize: 20, .Flags: 0},
77 {.Name: "fixup_riscv_lo12_i", .TargetOffset: 20, .TargetSize: 12, .Flags: 0},
78 {.Name: "fixup_riscv_12_i", .TargetOffset: 20, .TargetSize: 12, .Flags: 0},
79 {.Name: "fixup_riscv_lo12_s", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
80 {.Name: "fixup_riscv_pcrel_hi20", .TargetOffset: 12, .TargetSize: 20, .Flags: 0},
81 {.Name: "fixup_riscv_pcrel_lo12_i", .TargetOffset: 20, .TargetSize: 12, .Flags: 0},
82 {.Name: "fixup_riscv_pcrel_lo12_s", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
83 {.Name: "fixup_riscv_jal", .TargetOffset: 12, .TargetSize: 20, .Flags: 0},
84 {.Name: "fixup_riscv_branch", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
85 {.Name: "fixup_riscv_rvc_jump", .TargetOffset: 2, .TargetSize: 11, .Flags: 0},
86 {.Name: "fixup_riscv_rvc_branch", .TargetOffset: 0, .TargetSize: 16, .Flags: 0},
87 {.Name: "fixup_riscv_rvc_imm", .TargetOffset: 0, .TargetSize: 16, .Flags: 0},
88 {.Name: "fixup_riscv_call", .TargetOffset: 0, .TargetSize: 64, .Flags: 0},
89 {.Name: "fixup_riscv_call_plt", .TargetOffset: 0, .TargetSize: 64, .Flags: 0},
90
91 {.Name: "fixup_riscv_qc_e_branch", .TargetOffset: 0, .TargetSize: 48, .Flags: 0},
92 {.Name: "fixup_riscv_qc_e_32", .TargetOffset: 16, .TargetSize: 32, .Flags: 0},
93 {.Name: "fixup_riscv_qc_abs20_u", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
94 {.Name: "fixup_riscv_qc_e_call_plt", .TargetOffset: 0, .TargetSize: 48, .Flags: 0},
95
96 // Andes fixups
97 {.Name: "fixup_riscv_nds_branch_10", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
98 };
99 static_assert((std::size(Infos)) == RISCV::NumTargetFixupKinds,
100 "Not all fixup kinds added to Infos array");
101
102 // Fixup kinds from raw relocation types and .reloc directives force
103 // relocations and do not use these fields.
104 if (mc::isRelocation(FixupKind: Kind))
105 return {};
106
107 if (Kind < FirstTargetFixupKind)
108 return MCAsmBackend::getFixupKindInfo(Kind);
109
110 assert(unsigned(Kind - FirstTargetFixupKind) < RISCV::NumTargetFixupKinds &&
111 "Invalid kind!");
112 return Infos[Kind - FirstTargetFixupKind];
113}
114
115bool RISCVAsmBackend::fixupNeedsRelaxationAdvanced(const MCFragment &,
116 const MCFixup &Fixup,
117 const MCValue &,
118 uint64_t Value,
119 bool Resolved) const {
120 int64_t Offset = int64_t(Value);
121 auto Kind = Fixup.getKind();
122
123 // Return true if the symbol is unresolved.
124 if (!Resolved)
125 return true;
126
127 switch (Kind) {
128 default:
129 return false;
130 case RISCV::fixup_riscv_rvc_branch:
131 // For compressed branch instructions the immediate must be
132 // in the range [-256, 254].
133 return Offset > 254 || Offset < -256;
134 case RISCV::fixup_riscv_rvc_jump:
135 // For compressed jump instructions the immediate must be
136 // in the range [-2048, 2046].
137 return Offset > 2046 || Offset < -2048;
138 case RISCV::fixup_riscv_branch:
139 case RISCV::fixup_riscv_qc_e_branch:
140 // For conditional branch instructions the immediate must be
141 // in the range [-4096, 4094].
142 return Offset > 4094 || Offset < -4096;
143 case RISCV::fixup_riscv_jal:
144 // For jump instructions the immediate must be in the range
145 // [-1048576, 1048574]
146 return Offset > 1048574 || Offset < -1048576;
147 case RISCV::fixup_riscv_rvc_imm:
148 // This fixup can never be emitted as a relocation, so always needs to be
149 // relaxed.
150 return true;
151 }
152}
153
154// Given a compressed control flow instruction this function returns
155// the expanded instruction, or the original instruction code if no
156// expansion is available.
157static unsigned getRelaxedOpcode(unsigned Opcode, ArrayRef<MCOperand> Operands,
158 const MCSubtargetInfo &STI) {
159 switch (Opcode) {
160 case RISCV::C_BEQZ:
161 return RISCV::BEQ;
162 case RISCV::C_BNEZ:
163 return RISCV::BNE;
164 case RISCV::C_J:
165 case RISCV::C_JAL: // fall through.
166 // This only relaxes one "step" - i.e. from C.J to JAL, not from C.J to
167 // QC.E.J, because we can always relax again if needed.
168 return RISCV::JAL;
169 case RISCV::C_LI:
170 if (!STI.hasFeature(Feature: RISCV::FeatureVendorXqcili))
171 break;
172 // We only need this because `QC.E.LI` can be compressed into a `C.LI`. This
173 // happens because the `simm6` MCOperandPredicate accepts bare symbols, and
174 // `QC.E.LI` is the only instruction that accepts bare symbols at parse-time
175 // and compresses to `C.LI`. `C.LI` does not itself accept bare symbols at
176 // parse time.
177 //
178 // If we have a bare symbol, we need to turn this back to a `QC.E.LI`, as we
179 // have no way to emit a relocation on a `C.LI` instruction.
180 return RISCV::QC_E_LI;
181 case RISCV::JAL: {
182 // We can only relax JAL if we have Xqcilb
183 if (!STI.hasFeature(Feature: RISCV::FeatureVendorXqcilb))
184 break;
185
186 // And only if it is using X0 or X1 for rd.
187 MCRegister Reg = Operands[0].getReg();
188 if (Reg == RISCV::X0)
189 return RISCV::QC_E_J;
190 if (Reg == RISCV::X1)
191 return RISCV::QC_E_JAL;
192
193 break;
194 }
195 case RISCV::BEQ:
196 return RISCV::PseudoLongBEQ;
197 case RISCV::BNE:
198 return RISCV::PseudoLongBNE;
199 case RISCV::BEQI:
200 return RISCV::PseudoLongBEQI;
201 case RISCV::BNEI:
202 return RISCV::PseudoLongBNEI;
203 case RISCV::BLT:
204 return RISCV::PseudoLongBLT;
205 case RISCV::BGE:
206 return RISCV::PseudoLongBGE;
207 case RISCV::BLTU:
208 return RISCV::PseudoLongBLTU;
209 case RISCV::BGEU:
210 return RISCV::PseudoLongBGEU;
211 case RISCV::QC_BEQI:
212 return RISCV::PseudoLongQC_BEQI;
213 case RISCV::QC_BNEI:
214 return RISCV::PseudoLongQC_BNEI;
215 case RISCV::QC_BLTI:
216 return RISCV::PseudoLongQC_BLTI;
217 case RISCV::QC_BGEI:
218 return RISCV::PseudoLongQC_BGEI;
219 case RISCV::QC_BLTUI:
220 return RISCV::PseudoLongQC_BLTUI;
221 case RISCV::QC_BGEUI:
222 return RISCV::PseudoLongQC_BGEUI;
223 case RISCV::QC_E_BEQI:
224 return RISCV::PseudoLongQC_E_BEQI;
225 case RISCV::QC_E_BNEI:
226 return RISCV::PseudoLongQC_E_BNEI;
227 case RISCV::QC_E_BLTI:
228 return RISCV::PseudoLongQC_E_BLTI;
229 case RISCV::QC_E_BGEI:
230 return RISCV::PseudoLongQC_E_BGEI;
231 case RISCV::QC_E_BLTUI:
232 return RISCV::PseudoLongQC_E_BLTUI;
233 case RISCV::QC_E_BGEUI:
234 return RISCV::PseudoLongQC_E_BGEUI;
235 }
236
237 // Returning the original opcode means we cannot relax the instruction.
238 return Opcode;
239}
240
241void RISCVAsmBackend::relaxInstruction(MCInst &Inst,
242 const MCSubtargetInfo &STI) const {
243 if (STI.hasFeature(Feature: RISCV::FeatureExactAssembly))
244 return;
245
246 MCInst Res;
247 switch (Inst.getOpcode()) {
248 default:
249 llvm_unreachable("Opcode not expected!");
250 case RISCV::C_BEQZ:
251 case RISCV::C_BNEZ:
252 case RISCV::C_J:
253 case RISCV::C_JAL: {
254 [[maybe_unused]] bool Success = RISCVRVC::uncompress(OutInst&: Res, MI: Inst, STI);
255 assert(Success && "Can't uncompress instruction");
256 assert(Res.getOpcode() ==
257 getRelaxedOpcode(Inst.getOpcode(), Inst.getOperands(), STI) &&
258 "Branch Relaxation Error");
259 break;
260 }
261 case RISCV::JAL: {
262 // This has to be written manually because the QC.E.J -> JAL is
263 // compression-only, so that it is not used when printing disassembly.
264 assert(STI.hasFeature(RISCV::FeatureVendorXqcilb) &&
265 "JAL is only relaxable with Xqcilb");
266 assert((Inst.getOperand(0).getReg() == RISCV::X0 ||
267 Inst.getOperand(0).getReg() == RISCV::X1) &&
268 "JAL only relaxable with rd=x0 or rd=x1");
269 Res.setOpcode(getRelaxedOpcode(Opcode: Inst.getOpcode(), Operands: Inst.getOperands(), STI));
270 Res.addOperand(Op: Inst.getOperand(i: 1));
271 break;
272 }
273 case RISCV::C_LI: {
274 // This should only be hit when trying to relax a `C.LI` into a `QC.E.LI`
275 // because the `C.LI` has a bare symbol. We cannot use
276 // `RISCVRVC::uncompress` because it will use decompression patterns. The
277 // `QC.E.LI` compression pattern to `C.LI` is compression-only (because we
278 // don't want `c.li` ever printed as `qc.e.li`, which might be done if the
279 // pattern applied to decompression), but that doesn't help much becuase
280 // `C.LI` with a bare symbol will decompress to an `ADDI` anyway (because
281 // `simm12`'s MCOperandPredicate accepts a bare symbol and that pattern
282 // comes first), and we still cannot emit an `ADDI` with a bare symbol.
283 assert(STI.hasFeature(RISCV::FeatureVendorXqcili) &&
284 "C.LI is only relaxable with Xqcili");
285 Res.setOpcode(getRelaxedOpcode(Opcode: Inst.getOpcode(), Operands: Inst.getOperands(), STI));
286 Res.addOperand(Op: Inst.getOperand(i: 0));
287 Res.addOperand(Op: Inst.getOperand(i: 1));
288 break;
289 }
290 case RISCV::BEQ:
291 case RISCV::BNE:
292 case RISCV::BEQI:
293 case RISCV::BNEI:
294 case RISCV::BLT:
295 case RISCV::BGE:
296 case RISCV::BLTU:
297 case RISCV::BGEU:
298 case RISCV::QC_BEQI:
299 case RISCV::QC_BNEI:
300 case RISCV::QC_BLTI:
301 case RISCV::QC_BGEI:
302 case RISCV::QC_BLTUI:
303 case RISCV::QC_BGEUI:
304 case RISCV::QC_E_BEQI:
305 case RISCV::QC_E_BNEI:
306 case RISCV::QC_E_BLTI:
307 case RISCV::QC_E_BGEI:
308 case RISCV::QC_E_BLTUI:
309 case RISCV::QC_E_BGEUI:
310 Res.setOpcode(getRelaxedOpcode(Opcode: Inst.getOpcode(), Operands: Inst.getOperands(), STI));
311 Res.addOperand(Op: Inst.getOperand(i: 0));
312 Res.addOperand(Op: Inst.getOperand(i: 1));
313 Res.addOperand(Op: Inst.getOperand(i: 2));
314 break;
315 }
316 Inst = std::move(Res);
317}
318
319// Check if an R_RISCV_ALIGN relocation is needed for an alignment directive.
320// If conditions are met, compute the padding size and create a fixup encoding
321// the padding size in the addend.
322bool RISCVAsmBackend::relaxAlign(MCFragment &F, unsigned &Size) {
323 // Alignments before the first linker-relaxable instruction have fixed sizes
324 // and do not require relocations. Alignments after a linker-relaxable
325 // instruction require a relocation, even if the STI specifies norelax.
326 //
327 // firstLinkerRelaxable is the layout order within the subsection, which may
328 // be smaller than the section's order. Therefore, alignments in a
329 // lower-numbered subsection may be unnecessarily treated as linker-relaxable.
330 auto *Sec = F.getParent();
331 if (F.getLayoutOrder() <= Sec->firstLinkerRelaxable())
332 return false;
333
334 // Use default handling unless the alignment is larger than the nop size.
335 const MCSubtargetInfo *STI = F.getSubtargetInfo();
336 unsigned MinNopLen =
337 AlignRvc || STI->hasFeature(Feature: RISCV::FeatureStdExtZca) ? 2 : 4;
338 if (F.getAlignment() <= MinNopLen)
339 return false;
340
341 Size = F.getAlignment().value() - MinNopLen;
342 auto *Expr = MCConstantExpr::create(Value: Size, Ctx&: getContext());
343 MCFixup Fixup =
344 MCFixup::create(Offset: 0, Value: Expr, Kind: FirstLiteralRelocationKind + ELF::R_RISCV_ALIGN);
345 F.setVarFixups({Fixup});
346 F.setLinkerRelaxable();
347 return true;
348}
349
350bool RISCVAsmBackend::relaxDwarfLineAddr(MCFragment &F) const {
351 int64_t LineDelta = F.getDwarfLineDelta();
352 const MCExpr &AddrDelta = F.getDwarfAddrDelta();
353 int64_t Value;
354 // If the label difference can be resolved, use the default handling, which
355 // utilizes a shorter special opcode.
356 if (AddrDelta.evaluateAsAbsolute(Res&: Value, Asm: *Asm))
357 return false;
358 [[maybe_unused]] bool IsAbsolute =
359 AddrDelta.evaluateKnownAbsolute(Res&: Value, Asm: *Asm);
360 assert(IsAbsolute && "CFA with invalid expression");
361
362 SmallVector<char> Data;
363 raw_svector_ostream OS(Data);
364
365 // INT64_MAX is a signal that this is actually a DW_LNE_end_sequence.
366 if (LineDelta != INT64_MAX) {
367 OS << uint8_t(dwarf::DW_LNS_advance_line);
368 encodeSLEB128(Value: LineDelta, OS);
369 }
370
371 // According to the DWARF specification, the `DW_LNS_fixed_advance_pc` opcode
372 // takes a single unsigned half (unencoded) operand. The maximum encodable
373 // value is therefore 65535. Set a conservative upper bound for relaxation.
374 unsigned PCBytes;
375 if (Value > 60000) {
376 PCBytes = getContext().getAsmInfo()->getCodePointerSize();
377 OS << uint8_t(dwarf::DW_LNS_extended_op) << uint8_t(PCBytes + 1)
378 << uint8_t(dwarf::DW_LNE_set_address);
379 OS.write_zeros(NumZeros: PCBytes);
380 } else {
381 PCBytes = 2;
382 OS << uint8_t(dwarf::DW_LNS_fixed_advance_pc);
383 support::endian::write<uint16_t>(os&: OS, value: 0, endian: Endian);
384 }
385 auto Offset = OS.tell() - PCBytes;
386
387 if (LineDelta == INT64_MAX) {
388 OS << uint8_t(dwarf::DW_LNS_extended_op);
389 OS << uint8_t(1);
390 OS << uint8_t(dwarf::DW_LNE_end_sequence);
391 } else {
392 OS << uint8_t(dwarf::DW_LNS_copy);
393 }
394
395 F.setVarContents(Data);
396 F.setVarFixups({MCFixup::create(Offset, Value: &AddrDelta,
397 Kind: MCFixup::getDataKindForSize(Size: PCBytes))});
398 return true;
399}
400
401bool RISCVAsmBackend::relaxDwarfCFA(MCFragment &F) const {
402 const MCExpr &AddrDelta = F.getDwarfAddrDelta();
403 SmallVector<MCFixup, 2> Fixups;
404 int64_t Value;
405 if (AddrDelta.evaluateAsAbsolute(Res&: Value, Asm: *Asm))
406 return false;
407 [[maybe_unused]] bool IsAbsolute =
408 AddrDelta.evaluateKnownAbsolute(Res&: Value, Asm: *Asm);
409 assert(IsAbsolute && "CFA with invalid expression");
410
411 assert(getContext().getAsmInfo()->getMinInstAlignment() == 1 &&
412 "expected 1-byte alignment");
413 if (Value == 0) {
414 F.clearVarContents();
415 F.clearVarFixups();
416 return true;
417 }
418
419 auto AddFixups = [&Fixups, &AddrDelta](unsigned Offset,
420 std::pair<unsigned, unsigned> Fixup) {
421 const MCBinaryExpr &MBE = cast<MCBinaryExpr>(Val: AddrDelta);
422 Fixups.push_back(Elt: MCFixup::create(Offset, Value: MBE.getLHS(), Kind: std::get<0>(in&: Fixup)));
423 Fixups.push_back(Elt: MCFixup::create(Offset, Value: MBE.getRHS(), Kind: std::get<1>(in&: Fixup)));
424 };
425
426 SmallVector<char, 8> Data;
427 raw_svector_ostream OS(Data);
428 if (isUIntN(N: 6, x: Value)) {
429 OS << uint8_t(dwarf::DW_CFA_advance_loc);
430 AddFixups(0, {ELF::R_RISCV_SET6, ELF::R_RISCV_SUB6});
431 } else if (isUInt<8>(x: Value)) {
432 OS << uint8_t(dwarf::DW_CFA_advance_loc1);
433 support::endian::write<uint8_t>(os&: OS, value: 0, endian: Endian);
434 AddFixups(1, {ELF::R_RISCV_SET8, ELF::R_RISCV_SUB8});
435 } else if (isUInt<16>(x: Value)) {
436 OS << uint8_t(dwarf::DW_CFA_advance_loc2);
437 support::endian::write<uint16_t>(os&: OS, value: 0, endian: Endian);
438 AddFixups(1, {ELF::R_RISCV_SET16, ELF::R_RISCV_SUB16});
439 } else if (isUInt<32>(x: Value)) {
440 OS << uint8_t(dwarf::DW_CFA_advance_loc4);
441 support::endian::write<uint32_t>(os&: OS, value: 0, endian: Endian);
442 AddFixups(1, {ELF::R_RISCV_SET32, ELF::R_RISCV_SUB32});
443 } else {
444 llvm_unreachable("unsupported CFA encoding");
445 }
446 F.setVarContents(Data);
447 F.setVarFixups(Fixups);
448 return true;
449}
450
451std::pair<bool, bool> RISCVAsmBackend::relaxLEB128(MCFragment &LF,
452 int64_t &Value) const {
453 if (LF.isLEBSigned())
454 return std::make_pair(x: false, y: false);
455 const MCExpr &Expr = LF.getLEBValue();
456 if (ULEB128Reloc) {
457 LF.setVarFixups({MCFixup::create(Offset: 0, Value: &Expr, Kind: FK_Data_leb128)});
458 }
459 return std::make_pair(x: Expr.evaluateKnownAbsolute(Res&: Value, Asm: *Asm), y: false);
460}
461
462bool RISCVAsmBackend::mayNeedRelaxation(unsigned Opcode,
463 ArrayRef<MCOperand> Operands,
464 const MCSubtargetInfo &STI) const {
465 // This function has access to two STIs, the member of the AsmBackend, and the
466 // one passed as an argument. The latter is more specific, so we query it for
467 // specific features.
468 if (STI.hasFeature(Feature: RISCV::FeatureExactAssembly))
469 return false;
470
471 return getRelaxedOpcode(Opcode, Operands, STI) != Opcode;
472}
473
474bool RISCVAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
475 const MCSubtargetInfo *STI) const {
476 // We mostly follow binutils' convention here: align to even boundary with a
477 // 0-fill padding. We emit up to 1 2-byte nop, though we use c.nop if RVC is
478 // enabled or 0-fill otherwise. The remainder is now padded with 4-byte nops.
479
480 // Instructions always are at even addresses. We must be in a data area or
481 // be unaligned due to some other reason.
482 if (Count % 2) {
483 OS.write(Ptr: "\0", Size: 1);
484 Count -= 1;
485 }
486
487 // TODO: emit a mapping symbol right here
488
489 if (Count % 4 == 2) {
490 // The canonical nop with Zca is c.nop. For .balign 4, we generate a 2-byte
491 // c.nop even in a norvc region.
492 OS.write(Ptr: "\x01\0", Size: 2);
493 Count -= 2;
494 }
495
496 // The canonical nop on RISC-V is addi x0, x0, 0.
497 for (; Count >= 4; Count -= 4)
498 OS.write(Ptr: "\x13\0\0\0", Size: 4);
499
500 return true;
501}
502
503static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
504 MCContext &Ctx) {
505 switch (Fixup.getKind()) {
506 default:
507 llvm_unreachable("Unknown fixup kind!");
508 case FK_Data_1:
509 case FK_Data_2:
510 case FK_Data_4:
511 case FK_Data_8:
512 case FK_Data_leb128:
513 return Value;
514 case RISCV::fixup_riscv_lo12_i:
515 case RISCV::fixup_riscv_pcrel_lo12_i:
516 return Value & 0xfff;
517 case RISCV::fixup_riscv_12_i:
518 if (!isInt<12>(x: Value)) {
519 Ctx.reportError(L: Fixup.getLoc(),
520 Msg: "operand must be a constant 12-bit integer");
521 }
522 return Value & 0xfff;
523 case RISCV::fixup_riscv_lo12_s:
524 case RISCV::fixup_riscv_pcrel_lo12_s:
525 return (((Value >> 5) & 0x7f) << 25) | ((Value & 0x1f) << 7);
526 case RISCV::fixup_riscv_hi20:
527 case RISCV::fixup_riscv_pcrel_hi20:
528 // Add 1 if bit 11 is 1, to compensate for low 12 bits being negative.
529 return ((Value + 0x800) >> 12) & 0xfffff;
530 case RISCV::fixup_riscv_jal: {
531 if (!isInt<21>(x: Value))
532 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
533 if (Value & 0x1)
534 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value must be 2-byte aligned");
535 // Need to produce imm[19|10:1|11|19:12] from the 21-bit Value.
536 unsigned Sbit = (Value >> 20) & 0x1;
537 unsigned Hi8 = (Value >> 12) & 0xff;
538 unsigned Mid1 = (Value >> 11) & 0x1;
539 unsigned Lo10 = (Value >> 1) & 0x3ff;
540 // Inst{31} = Sbit;
541 // Inst{30-21} = Lo10;
542 // Inst{20} = Mid1;
543 // Inst{19-12} = Hi8;
544 Value = (Sbit << 19) | (Lo10 << 9) | (Mid1 << 8) | Hi8;
545 return Value;
546 }
547 case RISCV::fixup_riscv_qc_e_branch:
548 case RISCV::fixup_riscv_branch: {
549 if (!isInt<13>(x: Value))
550 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
551 if (Value & 0x1)
552 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value must be 2-byte aligned");
553 // Need to extract imm[12], imm[10:5], imm[4:1], imm[11] from the 13-bit
554 // Value.
555 unsigned Sbit = (Value >> 12) & 0x1;
556 unsigned Hi1 = (Value >> 11) & 0x1;
557 unsigned Mid6 = (Value >> 5) & 0x3f;
558 unsigned Lo4 = (Value >> 1) & 0xf;
559 // Inst{31} = Sbit;
560 // Inst{30-25} = Mid6;
561 // Inst{11-8} = Lo4;
562 // Inst{7} = Hi1;
563 Value = (Sbit << 31) | (Mid6 << 25) | (Lo4 << 8) | (Hi1 << 7);
564 return Value;
565 }
566 case RISCV::fixup_riscv_call:
567 case RISCV::fixup_riscv_call_plt: {
568 // Jalr will add UpperImm with the sign-extended 12-bit LowerImm,
569 // we need to add 0x800ULL before extract upper bits to reflect the
570 // effect of the sign extension.
571 uint64_t UpperImm = (Value + 0x800ULL) & 0xfffff000ULL;
572 uint64_t LowerImm = Value & 0xfffULL;
573 return UpperImm | ((LowerImm << 20) << 32);
574 }
575 case RISCV::fixup_riscv_rvc_jump: {
576 if (!isInt<12>(x: Value))
577 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
578 // Need to produce offset[11|4|9:8|10|6|7|3:1|5] from the 11-bit Value.
579 unsigned Bit11 = (Value >> 11) & 0x1;
580 unsigned Bit4 = (Value >> 4) & 0x1;
581 unsigned Bit9_8 = (Value >> 8) & 0x3;
582 unsigned Bit10 = (Value >> 10) & 0x1;
583 unsigned Bit6 = (Value >> 6) & 0x1;
584 unsigned Bit7 = (Value >> 7) & 0x1;
585 unsigned Bit3_1 = (Value >> 1) & 0x7;
586 unsigned Bit5 = (Value >> 5) & 0x1;
587 Value = (Bit11 << 10) | (Bit4 << 9) | (Bit9_8 << 7) | (Bit10 << 6) |
588 (Bit6 << 5) | (Bit7 << 4) | (Bit3_1 << 1) | Bit5;
589 return Value;
590 }
591 case RISCV::fixup_riscv_rvc_branch: {
592 if (!isInt<9>(x: Value))
593 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
594 // Need to produce offset[8|4:3], [reg 3 bit], offset[7:6|2:1|5]
595 unsigned Bit8 = (Value >> 8) & 0x1;
596 unsigned Bit7_6 = (Value >> 6) & 0x3;
597 unsigned Bit5 = (Value >> 5) & 0x1;
598 unsigned Bit4_3 = (Value >> 3) & 0x3;
599 unsigned Bit2_1 = (Value >> 1) & 0x3;
600 Value = (Bit8 << 12) | (Bit4_3 << 10) | (Bit7_6 << 5) | (Bit2_1 << 3) |
601 (Bit5 << 2);
602 return Value;
603 }
604 case RISCV::fixup_riscv_rvc_imm: {
605 if (!isInt<6>(x: Value))
606 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
607 unsigned Bit5 = (Value >> 5) & 0x1;
608 unsigned Bit4_0 = Value & 0x1f;
609 Value = (Bit5 << 12) | (Bit4_0 << 2);
610 return Value;
611 }
612 case RISCV::fixup_riscv_qc_e_32: {
613 if (!isInt<32>(x: Value))
614 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
615 return Value & 0xffffffffu;
616 }
617 case RISCV::fixup_riscv_qc_abs20_u: {
618 if (!isInt<20>(x: Value))
619 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
620 unsigned Bit19 = (Value >> 19) & 0x1;
621 unsigned Bit14_0 = Value & 0x7fff;
622 unsigned Bit18_15 = (Value >> 15) & 0xf;
623 Value = (Bit19 << 31) | (Bit14_0 << 16) | (Bit18_15 << 12);
624 return Value;
625 }
626 case RISCV::fixup_riscv_qc_e_call_plt: {
627 if (!isInt<32>(x: Value))
628 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
629 if (Value & 0x1)
630 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value must be 2-byte aligned");
631 uint64_t Bit31_16 = (Value >> 16) & 0xffff;
632 uint64_t Bit12 = (Value >> 12) & 0x1;
633 uint64_t Bit10_5 = (Value >> 5) & 0x3f;
634 uint64_t Bit15_13 = (Value >> 13) & 0x7;
635 uint64_t Bit4_1 = (Value >> 1) & 0xf;
636 uint64_t Bit11 = (Value >> 11) & 0x1;
637 Value = (Bit31_16 << 32ull) | (Bit12 << 31) | (Bit10_5 << 25) |
638 (Bit15_13 << 17) | (Bit4_1 << 8) | (Bit11 << 7);
639 return Value;
640 }
641 case RISCV::fixup_riscv_nds_branch_10: {
642 if (!isInt<11>(x: Value))
643 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
644 if (Value & 0x1)
645 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value must be 2-byte aligned");
646 // Need to extract imm[10], imm[9:5], imm[4:1] from the 11-bit Value.
647 unsigned Sbit = (Value >> 10) & 0x1;
648 unsigned Hi5 = (Value >> 5) & 0x1f;
649 unsigned Lo4 = (Value >> 1) & 0xf;
650 // Inst{31} = Sbit;
651 // Inst{29-25} = Hi5;
652 // Inst{11-8} = Lo4;
653 Value = (Sbit << 31) | (Hi5 << 25) | (Lo4 << 8);
654 return Value;
655 }
656 }
657}
658
659bool RISCVAsmBackend::isPCRelFixupResolved(const MCSymbol *SymA,
660 const MCFragment &F) {
661 // If the section does not contain linker-relaxable fragments, PC-relative
662 // fixups can be resolved.
663 if (!F.getParent()->isLinkerRelaxable())
664 return true;
665
666 // Otherwise, check if the offset between the symbol and fragment is fully
667 // resolved, unaffected by linker-relaxable fragments (e.g. instructions or
668 // offset-affected FT_Align fragments). Complements the generic
669 // isSymbolRefDifferenceFullyResolvedImpl.
670 if (!PCRelTemp)
671 PCRelTemp = getContext().createTempSymbol();
672 PCRelTemp->setFragment(const_cast<MCFragment *>(&F));
673 MCValue Res;
674 MCExpr::evaluateSymbolicAdd(Asm, false, MCValue::get(SymA),
675 MCValue::get(SymA: nullptr, SymB: PCRelTemp), Res);
676 return !Res.getSubSym();
677}
678
679// Get the corresponding PC-relative HI fixup that a S_PCREL_LO points to, and
680// optionally the fragment containing it.
681//
682// \returns nullptr if this isn't a S_PCREL_LO pointing to a known PC-relative
683// HI fixup.
684const MCFixup *getPCRelHiFixup(const MCSpecifierExpr &Expr,
685 const MCFragment **DFOut) {
686 MCValue AUIPCLoc;
687 if (!Expr.getSubExpr()->evaluateAsRelocatable(Res&: AUIPCLoc, Asm: nullptr))
688 return nullptr;
689
690 const MCSymbol *AUIPCSymbol = AUIPCLoc.getAddSym();
691 if (!AUIPCSymbol)
692 return nullptr;
693 const auto *DF = AUIPCSymbol->getFragment();
694 if (!DF)
695 return nullptr;
696
697 uint64_t Offset = AUIPCSymbol->getOffset();
698 if (DF->getContents().size() == Offset) {
699 DF = DF->getNext();
700 if (!DF)
701 return nullptr;
702 Offset = 0;
703 }
704
705 for (const MCFixup &F : DF->getFixups()) {
706 if (F.getOffset() != Offset)
707 continue;
708 auto Kind = F.getKind();
709 if (!mc::isRelocation(FixupKind: F.getKind())) {
710 if (Kind == RISCV::fixup_riscv_pcrel_hi20) {
711 *DFOut = DF;
712 return &F;
713 }
714 break;
715 }
716 switch (Kind) {
717 case ELF::R_RISCV_GOT_HI20:
718 case ELF::R_RISCV_TLS_GOT_HI20:
719 case ELF::R_RISCV_TLS_GD_HI20:
720 case ELF::R_RISCV_TLSDESC_HI20:
721 *DFOut = DF;
722 return &F;
723 }
724 }
725
726 return nullptr;
727}
728
729std::optional<bool> RISCVAsmBackend::evaluateFixup(const MCFragment &,
730 MCFixup &Fixup,
731 MCValue &Target,
732 uint64_t &Value) {
733 const MCFixup *AUIPCFixup;
734 const MCFragment *AUIPCDF;
735 MCValue AUIPCTarget;
736 switch (Fixup.getKind()) {
737 default:
738 // Use default handling for `Value` and `IsResolved`.
739 return {};
740 case RISCV::fixup_riscv_pcrel_lo12_i:
741 case RISCV::fixup_riscv_pcrel_lo12_s: {
742 AUIPCFixup =
743 getPCRelHiFixup(Expr: cast<MCSpecifierExpr>(Val: *Fixup.getValue()), DFOut: &AUIPCDF);
744 if (!AUIPCFixup) {
745 getContext().reportError(L: Fixup.getLoc(),
746 Msg: "could not find corresponding %pcrel_hi");
747 return true;
748 }
749
750 // MCAssembler::evaluateFixup will emit an error for this case when it sees
751 // the %pcrel_hi, so don't duplicate it when also seeing the %pcrel_lo.
752 const MCExpr *AUIPCExpr = AUIPCFixup->getValue();
753 if (!AUIPCExpr->evaluateAsRelocatable(Res&: AUIPCTarget, Asm))
754 return true;
755 break;
756 }
757 }
758
759 if (!AUIPCTarget.getAddSym())
760 return false;
761
762 auto &SA = static_cast<const MCSymbolELF &>(*AUIPCTarget.getAddSym());
763 if (SA.isUndefined())
764 return false;
765
766 bool IsResolved = &SA.getSection() == AUIPCDF->getParent() &&
767 SA.getBinding() == ELF::STB_LOCAL &&
768 SA.getType() != ELF::STT_GNU_IFUNC;
769 if (!IsResolved)
770 return false;
771
772 Value = Asm->getSymbolOffset(S: SA) + AUIPCTarget.getConstant();
773 Value -= Asm->getFragmentOffset(F: *AUIPCDF) + AUIPCFixup->getOffset();
774
775 return AUIPCFixup->getKind() == RISCV::fixup_riscv_pcrel_hi20 &&
776 isPCRelFixupResolved(SymA: AUIPCTarget.getAddSym(), F: *AUIPCDF);
777}
778
779void RISCVAsmBackend::maybeAddVendorReloc(const MCFragment &F,
780 const MCFixup &Fixup) {
781 StringRef VendorIdentifier;
782 switch (Fixup.getKind()) {
783 default:
784 // No Vendor Relocation Required.
785 return;
786 case RISCV::fixup_riscv_qc_e_branch:
787 case RISCV::fixup_riscv_qc_abs20_u:
788 case RISCV::fixup_riscv_qc_e_32:
789 case RISCV::fixup_riscv_qc_e_call_plt:
790 VendorIdentifier = "QUALCOMM";
791 break;
792 case RISCV::fixup_riscv_nds_branch_10:
793 VendorIdentifier = "ANDES";
794 break;
795 }
796
797 // Create a local symbol for the vendor relocation to reference. It's fine if
798 // the symbol has the same name as an existing symbol.
799 MCContext &Ctx = Asm->getContext();
800 MCSymbol *VendorSymbol = Ctx.createLocalSymbol(Name: VendorIdentifier);
801 auto [It, Inserted] =
802 VendorSymbols.try_emplace(Key: VendorIdentifier, Args&: VendorSymbol);
803
804 if (Inserted) {
805 // Setup the just-created symbol
806 VendorSymbol->setVariableValue(MCConstantExpr::create(Value: 0, Ctx));
807 Asm->registerSymbol(Symbol: *VendorSymbol);
808 } else {
809 // Fetch the existing symbol
810 VendorSymbol = It->getValue();
811 }
812
813 MCFixup VendorFixup =
814 MCFixup::create(Offset: Fixup.getOffset(), Value: nullptr, Kind: ELF::R_RISCV_VENDOR);
815 // Explicitly create MCValue rather than using an MCExpr and evaluating it so
816 // that the absolute vendor symbol is not evaluated to constant 0.
817 MCValue VendorTarget = MCValue::get(SymA: VendorSymbol);
818 uint64_t VendorValue;
819 Asm->getWriter().recordRelocation(F, Fixup: VendorFixup, Target: VendorTarget, FixedValue&: VendorValue);
820}
821
822static bool relaxableFixupNeedsRelocation(const MCFixupKind Kind) {
823 // Some Fixups are marked as LinkerRelaxable by
824 // `RISCVMCCodeEmitter::getImmOpValue` only because they may be
825 // (assembly-)relaxed into a linker-relaxable instruction. This function
826 // should return `false` for those fixups so they do not get a `R_RISCV_RELAX`
827 // relocation emitted in addition to the relocation.
828 switch (Kind) {
829 default:
830 break;
831 case RISCV::fixup_riscv_rvc_jump:
832 case RISCV::fixup_riscv_branch:
833 case RISCV::fixup_riscv_rvc_branch:
834 case RISCV::fixup_riscv_qc_e_branch:
835 case RISCV::fixup_riscv_rvc_imm:
836 return false;
837 }
838 return true;
839}
840
841bool RISCVAsmBackend::addReloc(const MCFragment &F, const MCFixup &Fixup,
842 const MCValue &Target, uint64_t &FixedValue,
843 bool IsResolved) {
844 uint64_t FixedValueA, FixedValueB;
845 if (Target.getSubSym()) {
846 assert(Target.getSpecifier() == 0 &&
847 "relocatable SymA-SymB cannot have relocation specifier");
848 unsigned TA = 0, TB = 0;
849 switch (Fixup.getKind()) {
850 case llvm::FK_Data_1:
851 TA = ELF::R_RISCV_ADD8;
852 TB = ELF::R_RISCV_SUB8;
853 break;
854 case llvm::FK_Data_2:
855 TA = ELF::R_RISCV_ADD16;
856 TB = ELF::R_RISCV_SUB16;
857 break;
858 case llvm::FK_Data_4:
859 TA = ELF::R_RISCV_ADD32;
860 TB = ELF::R_RISCV_SUB32;
861 break;
862 case llvm::FK_Data_8:
863 TA = ELF::R_RISCV_ADD64;
864 TB = ELF::R_RISCV_SUB64;
865 break;
866 case llvm::FK_Data_leb128:
867 TA = ELF::R_RISCV_SET_ULEB128;
868 TB = ELF::R_RISCV_SUB_ULEB128;
869 break;
870 default:
871 llvm_unreachable("unsupported fixup size");
872 }
873 MCValue A = MCValue::get(SymA: Target.getAddSym(), SymB: nullptr, Val: Target.getConstant());
874 MCValue B = MCValue::get(SymA: Target.getSubSym());
875 auto FA = MCFixup::create(Offset: Fixup.getOffset(), Value: nullptr, Kind: TA);
876 auto FB = MCFixup::create(Offset: Fixup.getOffset(), Value: nullptr, Kind: TB);
877 Asm->getWriter().recordRelocation(F, Fixup: FA, Target: A, FixedValue&: FixedValueA);
878 Asm->getWriter().recordRelocation(F, Fixup: FB, Target: B, FixedValue&: FixedValueB);
879 FixedValue = FixedValueA - FixedValueB;
880 return false;
881 }
882
883 // If linker relaxation is enabled and supported by the current fixup, then we
884 // always want to generate a relocation.
885 bool NeedsRelax = Fixup.isLinkerRelaxable() &&
886 relaxableFixupNeedsRelocation(Kind: Fixup.getKind());
887 if (NeedsRelax)
888 IsResolved = false;
889
890 if (IsResolved && Fixup.isPCRel())
891 IsResolved = isPCRelFixupResolved(SymA: Target.getAddSym(), F);
892
893 if (!IsResolved) {
894 // Some Fixups require a VENDOR relocation, record it (directly) before we
895 // add the relocation.
896 maybeAddVendorReloc(F, Fixup);
897
898 Asm->getWriter().recordRelocation(F, Fixup, Target, FixedValue);
899
900 if (NeedsRelax) {
901 // Some Fixups get a RELAX relocation, record it (directly) after we add
902 // the relocation.
903 MCFixup RelaxFixup =
904 MCFixup::create(Offset: Fixup.getOffset(), Value: nullptr, Kind: ELF::R_RISCV_RELAX);
905 MCValue RelaxTarget = MCValue::get(SymA: nullptr);
906 uint64_t RelaxValue;
907 Asm->getWriter().recordRelocation(F, Fixup: RelaxFixup, Target: RelaxTarget, FixedValue&: RelaxValue);
908 }
909 }
910
911 return false;
912}
913
914// Data fixups should be swapped for big endian cores.
915// Instruction fixups should not be swapped as RISC-V instructions
916// are always little-endian.
917static bool isDataFixup(unsigned Kind) {
918 switch (Kind) {
919 default:
920 return false;
921
922 case FK_Data_1:
923 case FK_Data_2:
924 case FK_Data_4:
925 case FK_Data_8:
926 return true;
927 }
928}
929
930void RISCVAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
931 const MCValue &Target, uint8_t *Data,
932 uint64_t Value, bool IsResolved) {
933 IsResolved = addReloc(F, Fixup, Target, FixedValue&: Value, IsResolved);
934 MCFixupKind Kind = Fixup.getKind();
935 if (mc::isRelocation(FixupKind: Kind))
936 return;
937 MCContext &Ctx = getContext();
938 MCFixupKindInfo Info = getFixupKindInfo(Kind);
939 if (!Value)
940 return; // Doesn't change encoding.
941 // Apply any target-specific value adjustments.
942 Value = adjustFixupValue(Fixup, Value, Ctx);
943
944 // Shift the value into position.
945 Value <<= Info.TargetOffset;
946
947 unsigned NumBytes = alignTo(Value: Info.TargetSize + Info.TargetOffset, Align: 8) / 8;
948 assert(Fixup.getOffset() + NumBytes <= F.getSize() &&
949 "Invalid fixup offset!");
950
951 // For each byte of the fragment that the fixup touches, mask in the
952 // bits from the fixup value.
953 // For big endian cores, data fixup should be swapped.
954 bool SwapValue = Endian == llvm::endianness::big && isDataFixup(Kind);
955 for (unsigned i = 0; i != NumBytes; ++i) {
956 unsigned Idx = SwapValue ? (NumBytes - 1 - i) : i;
957 Data[Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
958 }
959}
960
961std::unique_ptr<MCObjectTargetWriter>
962RISCVAsmBackend::createObjectTargetWriter() const {
963 return createRISCVELFObjectWriter(OSABI, Is64Bit);
964}
965
966class DarwinRISCVAsmBackend : public RISCVAsmBackend {
967public:
968 DarwinRISCVAsmBackend(const MCSubtargetInfo &STI, uint8_t OSABI, bool Is64Bit,
969 bool IsLittleEndian, const MCTargetOptions &Options)
970 : RISCVAsmBackend(STI, OSABI, Is64Bit, IsLittleEndian, Options) {}
971
972 std::unique_ptr<MCObjectTargetWriter>
973 createObjectTargetWriter() const override {
974 const Triple &TT = STI.getTargetTriple();
975 uint32_t CPUType = cantFail(ValOrErr: MachO::getCPUType(T: TT));
976 uint32_t CPUSubType = cantFail(ValOrErr: MachO::getCPUSubType(T: TT));
977 return createRISCVMachObjectWriter(CPUType, CPUSubtype: CPUSubType);
978 }
979
980 bool addReloc(const MCFragment &, const MCFixup &, const MCValue &,
981 uint64_t &FixedValue, bool IsResolved) override;
982
983 std::optional<bool> evaluateFixup(const MCFragment &F, MCFixup &Fixup,
984 MCValue &Target, uint64_t &Value) override {
985 const MCFixup *AUIPCFixup;
986 const MCFragment *AUIPCDF;
987 const MCFixupKind FKind = Fixup.getKind();
988 if ((FKind == RISCV::fixup_riscv_pcrel_lo12_i) ||
989 (FKind == RISCV::fixup_riscv_pcrel_lo12_s)) {
990 AUIPCFixup =
991 getPCRelHiFixup(Expr: cast<MCSpecifierExpr>(Val: *Fixup.getValue()), DFOut: &AUIPCDF);
992 if (!AUIPCFixup) {
993 getContext().reportError(L: Fixup.getLoc(),
994 Msg: "could not find corresponding %pcrel_hi");
995 return true;
996 }
997
998 return false;
999 }
1000
1001 // Use default handling for all other cases.
1002 return {};
1003 }
1004};
1005
1006MCAsmBackend *llvm::createRISCVAsmBackend(const Target &T,
1007 const MCSubtargetInfo &STI,
1008 const MCRegisterInfo &MRI,
1009 const MCTargetOptions &Options) {
1010 const Triple &TT = STI.getTargetTriple();
1011 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(OSType: TT.getOS());
1012 if (TT.isOSBinFormatMachO())
1013 return new DarwinRISCVAsmBackend(STI, OSABI, TT.isArch64Bit(),
1014 TT.isLittleEndian(), Options);
1015
1016 return new RISCVAsmBackend(STI, OSABI, TT.isArch64Bit(), TT.isLittleEndian(),
1017 Options);
1018}
1019
1020bool DarwinRISCVAsmBackend::addReloc(const MCFragment &F, const MCFixup &Fixup,
1021 const MCValue &Target,
1022 uint64_t &FixedValue, bool IsResolved) {
1023 if (!IsResolved)
1024 Asm->getWriter().recordRelocation(F, Fixup, Target, FixedValue);
1025 return IsResolved;
1026}
1027