1//===-- RISCVAsmBackend.cpp - RISC-V Assembler Backend --------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "RISCVAsmBackend.h"
10#include "RISCVFixupKinds.h"
11#include "llvm/ADT/APInt.h"
12#include "llvm/MC/MCAsmInfo.h"
13#include "llvm/MC/MCAssembler.h"
14#include "llvm/MC/MCContext.h"
15#include "llvm/MC/MCELFObjectWriter.h"
16#include "llvm/MC/MCExpr.h"
17#include "llvm/MC/MCObjectWriter.h"
18#include "llvm/MC/MCSymbol.h"
19#include "llvm/MC/MCValue.h"
20#include "llvm/Support/CommandLine.h"
21#include "llvm/Support/EndianStream.h"
22#include "llvm/Support/ErrorHandling.h"
23#include "llvm/Support/LEB128.h"
24#include "llvm/Support/raw_ostream.h"
25
26using namespace llvm;
27
28// Temporary workaround for old linkers that do not support ULEB128 relocations,
29// which are abused by DWARF v5 DW_LLE_offset_pair/DW_RLE_offset_pair
30// implemented in Clang/LLVM.
31static cl::opt<bool> ULEB128Reloc(
32 "riscv-uleb128-reloc", cl::init(Val: true), cl::Hidden,
33 cl::desc("Emit R_RISCV_SET_ULEB128/E_RISCV_SUB_ULEB128 if appropriate"));
34
35RISCVAsmBackend::RISCVAsmBackend(const MCSubtargetInfo &STI, uint8_t OSABI,
36 bool Is64Bit, const MCTargetOptions &Options)
37 : MCAsmBackend(llvm::endianness::little), STI(STI), OSABI(OSABI),
38 Is64Bit(Is64Bit), TargetOptions(Options) {
39 RISCVFeatures::validate(TT: STI.getTargetTriple(), FeatureBits: STI.getFeatureBits());
40}
41
42std::optional<MCFixupKind> RISCVAsmBackend::getFixupKind(StringRef Name) const {
43 if (STI.getTargetTriple().isOSBinFormatELF()) {
44 unsigned Type;
45 Type = llvm::StringSwitch<unsigned>(Name)
46#define ELF_RELOC(NAME, ID) .Case(#NAME, ID)
47#include "llvm/BinaryFormat/ELFRelocs/RISCV.def"
48#undef ELF_RELOC
49#define ELF_RISCV_NONSTANDARD_RELOC(_VENDOR, NAME, ID) .Case(#NAME, ID)
50#include "llvm/BinaryFormat/ELFRelocs/RISCV_nonstandard.def"
51#undef ELF_RISCV_NONSTANDARD_RELOC
52 .Case(S: "BFD_RELOC_NONE", Value: ELF::R_RISCV_NONE)
53 .Case(S: "BFD_RELOC_32", Value: ELF::R_RISCV_32)
54 .Case(S: "BFD_RELOC_64", Value: ELF::R_RISCV_64)
55 .Default(Value: -1u);
56 if (Type != -1u)
57 return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type);
58 }
59 return std::nullopt;
60}
61
62MCFixupKindInfo RISCVAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
63 const static MCFixupKindInfo Infos[] = {
64 // This table *must* be in the order that the fixup_* kinds are defined in
65 // RISCVFixupKinds.h.
66 //
67 // name offset bits flags
68 {.Name: "fixup_riscv_hi20", .TargetOffset: 12, .TargetSize: 20, .Flags: 0},
69 {.Name: "fixup_riscv_lo12_i", .TargetOffset: 20, .TargetSize: 12, .Flags: 0},
70 {.Name: "fixup_riscv_12_i", .TargetOffset: 20, .TargetSize: 12, .Flags: 0},
71 {.Name: "fixup_riscv_lo12_s", .TargetOffset: 0, .TargetSize: 32, .Flags: 0},
72 {.Name: "fixup_riscv_pcrel_hi20", .TargetOffset: 12, .TargetSize: 20, .Flags: MCFixupKindInfo::FKF_IsPCRel},
73 {.Name: "fixup_riscv_pcrel_lo12_i", .TargetOffset: 20, .TargetSize: 12,
74 .Flags: MCFixupKindInfo::FKF_IsPCRel | MCFixupKindInfo::FKF_IsTarget},
75 {.Name: "fixup_riscv_pcrel_lo12_s", .TargetOffset: 0, .TargetSize: 32,
76 .Flags: MCFixupKindInfo::FKF_IsPCRel | MCFixupKindInfo::FKF_IsTarget},
77 {.Name: "fixup_riscv_jal", .TargetOffset: 12, .TargetSize: 20, .Flags: MCFixupKindInfo::FKF_IsPCRel},
78 {.Name: "fixup_riscv_branch", .TargetOffset: 0, .TargetSize: 32, .Flags: MCFixupKindInfo::FKF_IsPCRel},
79 {.Name: "fixup_riscv_rvc_jump", .TargetOffset: 2, .TargetSize: 11, .Flags: MCFixupKindInfo::FKF_IsPCRel},
80 {.Name: "fixup_riscv_rvc_branch", .TargetOffset: 0, .TargetSize: 16, .Flags: MCFixupKindInfo::FKF_IsPCRel},
81 {.Name: "fixup_riscv_call", .TargetOffset: 0, .TargetSize: 64, .Flags: MCFixupKindInfo::FKF_IsPCRel},
82 {.Name: "fixup_riscv_call_plt", .TargetOffset: 0, .TargetSize: 64, .Flags: MCFixupKindInfo::FKF_IsPCRel},
83
84 {.Name: "fixup_riscv_qc_e_branch", .TargetOffset: 0, .TargetSize: 48, .Flags: MCFixupKindInfo::FKF_IsPCRel},
85 {.Name: "fixup_riscv_qc_e_32", .TargetOffset: 16, .TargetSize: 32, .Flags: 0},
86 {.Name: "fixup_riscv_qc_abs20_u", .TargetOffset: 12, .TargetSize: 20, .Flags: 0},
87 {.Name: "fixup_riscv_qc_e_call_plt", .TargetOffset: 0, .TargetSize: 48, .Flags: MCFixupKindInfo::FKF_IsPCRel},
88
89 // Andes fixups
90 {.Name: "fixup_riscv_nds_branch_10", .TargetOffset: 0, .TargetSize: 32, .Flags: MCFixupKindInfo::FKF_IsPCRel},
91 };
92 static_assert((std::size(Infos)) == RISCV::NumTargetFixupKinds,
93 "Not all fixup kinds added to Infos array");
94
95 // Fixup kinds from raw relocation types and .reloc directives force
96 // relocations and do not use these fields.
97 if (mc::isRelocation(FixupKind: Kind))
98 return MCAsmBackend::getFixupKindInfo(Kind: FK_NONE);
99
100 if (Kind < FirstTargetFixupKind)
101 return MCAsmBackend::getFixupKindInfo(Kind);
102
103 assert(unsigned(Kind - FirstTargetFixupKind) < RISCV::NumTargetFixupKinds &&
104 "Invalid kind!");
105 return Infos[Kind - FirstTargetFixupKind];
106}
107
108bool RISCVAsmBackend::fixupNeedsRelaxationAdvanced(const MCFixup &Fixup,
109 const MCValue &,
110 uint64_t Value,
111 bool Resolved) const {
112 int64_t Offset = int64_t(Value);
113 unsigned Kind = Fixup.getTargetKind();
114
115 // Return true if the symbol is unresolved.
116 if (!Resolved)
117 return true;
118
119 switch (Kind) {
120 default:
121 return false;
122 case RISCV::fixup_riscv_rvc_branch:
123 // For compressed branch instructions the immediate must be
124 // in the range [-256, 254].
125 return Offset > 254 || Offset < -256;
126 case RISCV::fixup_riscv_rvc_jump:
127 // For compressed jump instructions the immediate must be
128 // in the range [-2048, 2046].
129 return Offset > 2046 || Offset < -2048;
130 case RISCV::fixup_riscv_branch:
131 case RISCV::fixup_riscv_qc_e_branch:
132 // For conditional branch instructions the immediate must be
133 // in the range [-4096, 4094].
134 return Offset > 4094 || Offset < -4096;
135 case RISCV::fixup_riscv_jal:
136 // For jump instructions the immediate must be in the range
137 // [-1048576, 1048574]
138 return Offset > 1048574 || Offset < -1048576;
139 }
140}
141
142// Given a compressed control flow instruction this function returns
143// the expanded instruction, or the original instruction code if no
144// expansion is available.
145static unsigned getRelaxedOpcode(const MCInst &Inst,
146 const MCSubtargetInfo &STI) {
147 switch (Inst.getOpcode()) {
148 case RISCV::C_BEQZ:
149 return RISCV::BEQ;
150 case RISCV::C_BNEZ:
151 return RISCV::BNE;
152 case RISCV::C_J:
153 case RISCV::C_JAL: // fall through.
154 // This only relaxes one "step" - i.e. from C.J to JAL, not from C.J to
155 // QC.E.J, because we can always relax again if needed.
156 return RISCV::JAL;
157 case RISCV::JAL: {
158 // We can only relax JAL if we have Xqcilb
159 if (!STI.hasFeature(Feature: RISCV::FeatureVendorXqcilb))
160 break;
161
162 // And only if it is using X0 or X1 for rd.
163 MCRegister Reg = Inst.getOperand(i: 0).getReg();
164 if (Reg == RISCV::X0)
165 return RISCV::QC_E_J;
166 if (Reg == RISCV::X1)
167 return RISCV::QC_E_JAL;
168
169 break;
170 }
171 case RISCV::BEQ:
172 return RISCV::PseudoLongBEQ;
173 case RISCV::BNE:
174 return RISCV::PseudoLongBNE;
175 case RISCV::BLT:
176 return RISCV::PseudoLongBLT;
177 case RISCV::BGE:
178 return RISCV::PseudoLongBGE;
179 case RISCV::BLTU:
180 return RISCV::PseudoLongBLTU;
181 case RISCV::BGEU:
182 return RISCV::PseudoLongBGEU;
183 case RISCV::QC_BEQI:
184 return RISCV::PseudoLongQC_BEQI;
185 case RISCV::QC_BNEI:
186 return RISCV::PseudoLongQC_BNEI;
187 case RISCV::QC_BLTI:
188 return RISCV::PseudoLongQC_BLTI;
189 case RISCV::QC_BGEI:
190 return RISCV::PseudoLongQC_BGEI;
191 case RISCV::QC_BLTUI:
192 return RISCV::PseudoLongQC_BLTUI;
193 case RISCV::QC_BGEUI:
194 return RISCV::PseudoLongQC_BGEUI;
195 case RISCV::QC_E_BEQI:
196 return RISCV::PseudoLongQC_E_BEQI;
197 case RISCV::QC_E_BNEI:
198 return RISCV::PseudoLongQC_E_BNEI;
199 case RISCV::QC_E_BLTI:
200 return RISCV::PseudoLongQC_E_BLTI;
201 case RISCV::QC_E_BGEI:
202 return RISCV::PseudoLongQC_E_BGEI;
203 case RISCV::QC_E_BLTUI:
204 return RISCV::PseudoLongQC_E_BLTUI;
205 case RISCV::QC_E_BGEUI:
206 return RISCV::PseudoLongQC_E_BGEUI;
207 }
208
209 // Returning the original opcode means we cannot relax the instruction.
210 return Inst.getOpcode();
211}
212
213void RISCVAsmBackend::relaxInstruction(MCInst &Inst,
214 const MCSubtargetInfo &STI) const {
215 if (STI.hasFeature(Feature: RISCV::FeatureExactAssembly))
216 return;
217
218 MCInst Res;
219 switch (Inst.getOpcode()) {
220 default:
221 llvm_unreachable("Opcode not expected!");
222 case RISCV::C_BEQZ:
223 case RISCV::C_BNEZ:
224 case RISCV::C_J:
225 case RISCV::C_JAL: {
226 [[maybe_unused]] bool Success = RISCVRVC::uncompress(OutInst&: Res, MI: Inst, STI);
227 assert(Success && "Can't uncompress instruction");
228 assert(Res.getOpcode() == getRelaxedOpcode(Inst, STI) &&
229 "Branch Relaxation Error");
230 break;
231 }
232 case RISCV::JAL: {
233 // This has to be written manually because the QC.E.J -> JAL is
234 // compression-only, so that it is not used when printing disassembly.
235 assert(STI.hasFeature(RISCV::FeatureVendorXqcilb) &&
236 "JAL is only relaxable with Xqcilb");
237 assert((Inst.getOperand(0).getReg() == RISCV::X0 ||
238 Inst.getOperand(0).getReg() == RISCV::X1) &&
239 "JAL only relaxable with rd=x0 or rd=x1");
240 Res.setOpcode(getRelaxedOpcode(Inst, STI));
241 Res.addOperand(Op: Inst.getOperand(i: 1));
242 break;
243 }
244 case RISCV::BEQ:
245 case RISCV::BNE:
246 case RISCV::BLT:
247 case RISCV::BGE:
248 case RISCV::BLTU:
249 case RISCV::BGEU:
250 case RISCV::QC_BEQI:
251 case RISCV::QC_BNEI:
252 case RISCV::QC_BLTI:
253 case RISCV::QC_BGEI:
254 case RISCV::QC_BLTUI:
255 case RISCV::QC_BGEUI:
256 case RISCV::QC_E_BEQI:
257 case RISCV::QC_E_BNEI:
258 case RISCV::QC_E_BLTI:
259 case RISCV::QC_E_BGEI:
260 case RISCV::QC_E_BLTUI:
261 case RISCV::QC_E_BGEUI:
262 Res.setOpcode(getRelaxedOpcode(Inst, STI));
263 Res.addOperand(Op: Inst.getOperand(i: 0));
264 Res.addOperand(Op: Inst.getOperand(i: 1));
265 Res.addOperand(Op: Inst.getOperand(i: 2));
266 break;
267 }
268 Inst = std::move(Res);
269}
270
271bool RISCVAsmBackend::relaxDwarfLineAddr(MCDwarfLineAddrFragment &DF,
272 bool &WasRelaxed) const {
273 MCContext &C = getContext();
274
275 int64_t LineDelta = DF.getLineDelta();
276 const MCExpr &AddrDelta = DF.getAddrDelta();
277 SmallVector<MCFixup, 1> Fixups;
278 size_t OldSize = DF.getContents().size();
279
280 int64_t Value;
281 [[maybe_unused]] bool IsAbsolute =
282 AddrDelta.evaluateKnownAbsolute(Res&: Value, Asm: *Asm);
283 assert(IsAbsolute && "CFA with invalid expression");
284
285 Fixups.clear();
286 SmallVector<char> Data;
287 raw_svector_ostream OS(Data);
288
289 // INT64_MAX is a signal that this is actually a DW_LNE_end_sequence.
290 if (LineDelta != INT64_MAX) {
291 OS << uint8_t(dwarf::DW_LNS_advance_line);
292 encodeSLEB128(Value: LineDelta, OS);
293 }
294
295 unsigned Offset;
296 std::pair<MCFixupKind, MCFixupKind> Fixup;
297
298 // According to the DWARF specification, the `DW_LNS_fixed_advance_pc` opcode
299 // takes a single unsigned half (unencoded) operand. The maximum encodable
300 // value is therefore 65535. Set a conservative upper bound for relaxation.
301 if (Value > 60000) {
302 unsigned PtrSize = C.getAsmInfo()->getCodePointerSize();
303
304 OS << uint8_t(dwarf::DW_LNS_extended_op);
305 encodeULEB128(Value: PtrSize + 1, OS);
306
307 OS << uint8_t(dwarf::DW_LNE_set_address);
308 Offset = OS.tell();
309 assert((PtrSize == 4 || PtrSize == 8) && "Unexpected pointer size");
310 Fixup = RISCV::getRelocPairForSize(Size: PtrSize);
311 OS.write_zeros(NumZeros: PtrSize);
312 } else {
313 OS << uint8_t(dwarf::DW_LNS_fixed_advance_pc);
314 Offset = OS.tell();
315 Fixup = RISCV::getRelocPairForSize(Size: 2);
316 support::endian::write<uint16_t>(os&: OS, value: 0, endian: llvm::endianness::little);
317 }
318
319 const MCBinaryExpr &MBE = cast<MCBinaryExpr>(Val: AddrDelta);
320 Fixups.push_back(Elt: MCFixup::create(Offset, Value: MBE.getLHS(), Kind: std::get<0>(in&: Fixup)));
321 Fixups.push_back(Elt: MCFixup::create(Offset, Value: MBE.getRHS(), Kind: std::get<1>(in&: Fixup)));
322
323 if (LineDelta == INT64_MAX) {
324 OS << uint8_t(dwarf::DW_LNS_extended_op);
325 OS << uint8_t(1);
326 OS << uint8_t(dwarf::DW_LNE_end_sequence);
327 } else {
328 OS << uint8_t(dwarf::DW_LNS_copy);
329 }
330
331 DF.setContents(Data);
332 DF.setFixups(Fixups);
333 WasRelaxed = OldSize != Data.size();
334 return true;
335}
336
337bool RISCVAsmBackend::relaxDwarfCFA(MCDwarfCallFrameFragment &DF,
338 bool &WasRelaxed) const {
339 const MCExpr &AddrDelta = DF.getAddrDelta();
340 SmallVector<MCFixup, 2> Fixups;
341 size_t OldSize = DF.getContents().size();
342
343 int64_t Value;
344 if (AddrDelta.evaluateAsAbsolute(Res&: Value, Asm: *Asm))
345 return false;
346 [[maybe_unused]] bool IsAbsolute =
347 AddrDelta.evaluateKnownAbsolute(Res&: Value, Asm: *Asm);
348 assert(IsAbsolute && "CFA with invalid expression");
349
350 assert(getContext().getAsmInfo()->getMinInstAlignment() == 1 &&
351 "expected 1-byte alignment");
352 if (Value == 0) {
353 DF.clearContents();
354 DF.clearFixups();
355 WasRelaxed = OldSize != DF.getContents().size();
356 return true;
357 }
358
359 auto AddFixups = [&Fixups, &AddrDelta](unsigned Offset,
360 std::pair<unsigned, unsigned> Fixup) {
361 const MCBinaryExpr &MBE = cast<MCBinaryExpr>(Val: AddrDelta);
362 Fixups.push_back(Elt: MCFixup::create(Offset, Value: MBE.getLHS(), Kind: std::get<0>(in&: Fixup)));
363 Fixups.push_back(Elt: MCFixup::create(Offset, Value: MBE.getRHS(), Kind: std::get<1>(in&: Fixup)));
364 };
365
366 SmallVector<char, 8> Data;
367 raw_svector_ostream OS(Data);
368 if (isUIntN(N: 6, x: Value)) {
369 OS << uint8_t(dwarf::DW_CFA_advance_loc);
370 AddFixups(0, {ELF::R_RISCV_SET6, ELF::R_RISCV_SUB6});
371 } else if (isUInt<8>(x: Value)) {
372 OS << uint8_t(dwarf::DW_CFA_advance_loc1);
373 support::endian::write<uint8_t>(os&: OS, value: 0, endian: llvm::endianness::little);
374 AddFixups(1, {ELF::R_RISCV_SET8, ELF::R_RISCV_SUB8});
375 } else if (isUInt<16>(x: Value)) {
376 OS << uint8_t(dwarf::DW_CFA_advance_loc2);
377 support::endian::write<uint16_t>(os&: OS, value: 0, endian: llvm::endianness::little);
378 AddFixups(1, {ELF::R_RISCV_SET16, ELF::R_RISCV_SUB16});
379 } else if (isUInt<32>(x: Value)) {
380 OS << uint8_t(dwarf::DW_CFA_advance_loc4);
381 support::endian::write<uint32_t>(os&: OS, value: 0, endian: llvm::endianness::little);
382 AddFixups(1, {ELF::R_RISCV_SET32, ELF::R_RISCV_SUB32});
383 } else {
384 llvm_unreachable("unsupported CFA encoding");
385 }
386 DF.setContents(Data);
387 DF.setFixups(Fixups);
388
389 WasRelaxed = OldSize != Data.size();
390 return true;
391}
392
393std::pair<bool, bool> RISCVAsmBackend::relaxLEB128(MCLEBFragment &LF,
394 int64_t &Value) const {
395 if (LF.isSigned())
396 return std::make_pair(x: false, y: false);
397 const MCExpr &Expr = LF.getValue();
398 if (ULEB128Reloc) {
399 LF.addFixup(Fixup: MCFixup::create(Offset: 0, Value: &Expr, Kind: FK_Data_leb128, Loc: Expr.getLoc()));
400 }
401 return std::make_pair(x: Expr.evaluateKnownAbsolute(Res&: Value, Asm: *Asm), y: false);
402}
403
404bool RISCVAsmBackend::mayNeedRelaxation(const MCInst &Inst,
405 const MCSubtargetInfo &STI) const {
406 // This function has access to two STIs, the member of the AsmBackend, and the
407 // one passed as an argument. The latter is more specific, so we query it for
408 // specific features.
409 if (STI.hasFeature(Feature: RISCV::FeatureExactAssembly))
410 return false;
411
412 return getRelaxedOpcode(Inst, STI) != Inst.getOpcode();
413}
414
415bool RISCVAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
416 const MCSubtargetInfo *STI) const {
417 // We mostly follow binutils' convention here: align to even boundary with a
418 // 0-fill padding. We emit up to 1 2-byte nop, though we use c.nop if RVC is
419 // enabled or 0-fill otherwise. The remainder is now padded with 4-byte nops.
420
421 // Instructions always are at even addresses. We must be in a data area or
422 // be unaligned due to some other reason.
423 if (Count % 2) {
424 OS.write(Ptr: "\0", Size: 1);
425 Count -= 1;
426 }
427
428 if (Count % 4 == 2) {
429 // The canonical nop with Zca is c.nop.
430 OS.write(Ptr: STI->hasFeature(Feature: RISCV::FeatureStdExtZca) ? "\x01\0" : "\0\0", Size: 2);
431 Count -= 2;
432 }
433
434 // The canonical nop on RISC-V is addi x0, x0, 0.
435 for (; Count >= 4; Count -= 4)
436 OS.write(Ptr: "\x13\0\0\0", Size: 4);
437
438 return true;
439}
440
441static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
442 MCContext &Ctx) {
443 switch (Fixup.getTargetKind()) {
444 default:
445 llvm_unreachable("Unknown fixup kind!");
446 case FK_Data_1:
447 case FK_Data_2:
448 case FK_Data_4:
449 case FK_Data_8:
450 case FK_Data_leb128:
451 return Value;
452 case RISCV::fixup_riscv_lo12_i:
453 case RISCV::fixup_riscv_pcrel_lo12_i:
454 return Value & 0xfff;
455 case RISCV::fixup_riscv_12_i:
456 if (!isInt<12>(x: Value)) {
457 Ctx.reportError(L: Fixup.getLoc(),
458 Msg: "operand must be a constant 12-bit integer");
459 }
460 return Value & 0xfff;
461 case RISCV::fixup_riscv_lo12_s:
462 case RISCV::fixup_riscv_pcrel_lo12_s:
463 return (((Value >> 5) & 0x7f) << 25) | ((Value & 0x1f) << 7);
464 case RISCV::fixup_riscv_hi20:
465 case RISCV::fixup_riscv_pcrel_hi20:
466 // Add 1 if bit 11 is 1, to compensate for low 12 bits being negative.
467 return ((Value + 0x800) >> 12) & 0xfffff;
468 case RISCV::fixup_riscv_jal: {
469 if (!isInt<21>(x: Value))
470 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
471 if (Value & 0x1)
472 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value must be 2-byte aligned");
473 // Need to produce imm[19|10:1|11|19:12] from the 21-bit Value.
474 unsigned Sbit = (Value >> 20) & 0x1;
475 unsigned Hi8 = (Value >> 12) & 0xff;
476 unsigned Mid1 = (Value >> 11) & 0x1;
477 unsigned Lo10 = (Value >> 1) & 0x3ff;
478 // Inst{31} = Sbit;
479 // Inst{30-21} = Lo10;
480 // Inst{20} = Mid1;
481 // Inst{19-12} = Hi8;
482 Value = (Sbit << 19) | (Lo10 << 9) | (Mid1 << 8) | Hi8;
483 return Value;
484 }
485 case RISCV::fixup_riscv_qc_e_branch:
486 case RISCV::fixup_riscv_branch: {
487 if (!isInt<13>(x: Value))
488 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
489 if (Value & 0x1)
490 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value must be 2-byte aligned");
491 // Need to extract imm[12], imm[10:5], imm[4:1], imm[11] from the 13-bit
492 // Value.
493 unsigned Sbit = (Value >> 12) & 0x1;
494 unsigned Hi1 = (Value >> 11) & 0x1;
495 unsigned Mid6 = (Value >> 5) & 0x3f;
496 unsigned Lo4 = (Value >> 1) & 0xf;
497 // Inst{31} = Sbit;
498 // Inst{30-25} = Mid6;
499 // Inst{11-8} = Lo4;
500 // Inst{7} = Hi1;
501 Value = (Sbit << 31) | (Mid6 << 25) | (Lo4 << 8) | (Hi1 << 7);
502 return Value;
503 }
504 case RISCV::fixup_riscv_call:
505 case RISCV::fixup_riscv_call_plt: {
506 // Jalr will add UpperImm with the sign-extended 12-bit LowerImm,
507 // we need to add 0x800ULL before extract upper bits to reflect the
508 // effect of the sign extension.
509 uint64_t UpperImm = (Value + 0x800ULL) & 0xfffff000ULL;
510 uint64_t LowerImm = Value & 0xfffULL;
511 return UpperImm | ((LowerImm << 20) << 32);
512 }
513 case RISCV::fixup_riscv_rvc_jump: {
514 if (!isInt<12>(x: Value))
515 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
516 // Need to produce offset[11|4|9:8|10|6|7|3:1|5] from the 11-bit Value.
517 unsigned Bit11 = (Value >> 11) & 0x1;
518 unsigned Bit4 = (Value >> 4) & 0x1;
519 unsigned Bit9_8 = (Value >> 8) & 0x3;
520 unsigned Bit10 = (Value >> 10) & 0x1;
521 unsigned Bit6 = (Value >> 6) & 0x1;
522 unsigned Bit7 = (Value >> 7) & 0x1;
523 unsigned Bit3_1 = (Value >> 1) & 0x7;
524 unsigned Bit5 = (Value >> 5) & 0x1;
525 Value = (Bit11 << 10) | (Bit4 << 9) | (Bit9_8 << 7) | (Bit10 << 6) |
526 (Bit6 << 5) | (Bit7 << 4) | (Bit3_1 << 1) | Bit5;
527 return Value;
528 }
529 case RISCV::fixup_riscv_rvc_branch: {
530 if (!isInt<9>(x: Value))
531 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
532 // Need to produce offset[8|4:3], [reg 3 bit], offset[7:6|2:1|5]
533 unsigned Bit8 = (Value >> 8) & 0x1;
534 unsigned Bit7_6 = (Value >> 6) & 0x3;
535 unsigned Bit5 = (Value >> 5) & 0x1;
536 unsigned Bit4_3 = (Value >> 3) & 0x3;
537 unsigned Bit2_1 = (Value >> 1) & 0x3;
538 Value = (Bit8 << 12) | (Bit4_3 << 10) | (Bit7_6 << 5) | (Bit2_1 << 3) |
539 (Bit5 << 2);
540 return Value;
541 }
542 case RISCV::fixup_riscv_qc_e_32: {
543 if (!isInt<32>(x: Value))
544 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
545 return ((Value & 0xffffffff) << 16);
546 }
547 case RISCV::fixup_riscv_qc_abs20_u: {
548 if (!isInt<20>(x: Value))
549 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
550 unsigned Bit19 = (Value >> 19) & 0x1;
551 unsigned Bit14_0 = Value & 0x7fff;
552 unsigned Bit18_15 = (Value >> 15) & 0xf;
553 Value = (Bit19 << 31) | (Bit14_0 << 16) | (Bit18_15 << 12);
554 return Value;
555 }
556 case RISCV::fixup_riscv_qc_e_call_plt: {
557 if (!isInt<32>(x: Value))
558 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
559 if (Value & 0x1)
560 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value must be 2-byte aligned");
561 uint64_t Bit31_16 = (Value >> 16) & 0xffff;
562 uint64_t Bit12 = (Value >> 12) & 0x1;
563 uint64_t Bit10_5 = (Value >> 5) & 0x3f;
564 uint64_t Bit15_13 = (Value >> 13) & 0x7;
565 uint64_t Bit4_1 = (Value >> 1) & 0xf;
566 uint64_t Bit11 = (Value >> 11) & 0x1;
567 Value = (Bit31_16 << 32ull) | (Bit12 << 31) | (Bit10_5 << 25) |
568 (Bit15_13 << 17) | (Bit4_1 << 8) | (Bit11 << 7);
569 return Value;
570 }
571 case RISCV::fixup_riscv_nds_branch_10: {
572 if (!isInt<11>(x: Value))
573 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value out of range");
574 if (Value & 0x1)
575 Ctx.reportError(L: Fixup.getLoc(), Msg: "fixup value must be 2-byte aligned");
576 // Need to extract imm[10], imm[9:5], imm[4:1] from the 11-bit Value.
577 unsigned Sbit = (Value >> 10) & 0x1;
578 unsigned Hi5 = (Value >> 5) & 0x1f;
579 unsigned Lo4 = (Value >> 1) & 0xf;
580 // Inst{31} = Sbit;
581 // Inst{29-25} = Hi5;
582 // Inst{11-8} = Lo4;
583 Value = (Sbit << 31) | (Hi5 << 25) | (Lo4 << 8);
584 return Value;
585 }
586 }
587}
588
589bool RISCVAsmBackend::isPCRelFixupResolved(const MCSymbol *SymA,
590 const MCFragment &F) {
591 // If the section does not contain linker-relaxable fragments, PC-relative
592 // fixups can be resolved.
593 if (!F.getParent()->isLinkerRelaxable())
594 return true;
595
596 // Otherwise, check if the offset between the symbol and fragment is fully
597 // resolved, unaffected by linker-relaxable fragments (e.g. instructions or
598 // offset-affected MCAlignFragment). Complements the generic
599 // isSymbolRefDifferenceFullyResolvedImpl.
600 if (!PCRelTemp)
601 PCRelTemp = getContext().createTempSymbol();
602 PCRelTemp->setFragment(const_cast<MCFragment *>(&F));
603 MCValue Res;
604 MCExpr::evaluateSymbolicAdd(Asm, false, MCValue::get(SymA),
605 MCValue::get(SymA: nullptr, SymB: PCRelTemp), Res);
606 return !Res.getSubSym();
607}
608
609// Get the corresponding PC-relative HI fixup that a S_PCREL_LO points to, and
610// optionally the fragment containing it.
611//
612// \returns nullptr if this isn't a S_PCREL_LO pointing to a known PC-relative
613// HI fixup.
614static const MCFixup *getPCRelHiFixup(const MCSpecifierExpr &Expr,
615 const MCFragment **DFOut) {
616 MCValue AUIPCLoc;
617 if (!Expr.getSubExpr()->evaluateAsRelocatable(Res&: AUIPCLoc, Asm: nullptr))
618 return nullptr;
619
620 const MCSymbol *AUIPCSymbol = AUIPCLoc.getAddSym();
621 if (!AUIPCSymbol)
622 return nullptr;
623 const auto *DF = dyn_cast_or_null<MCDataFragment>(Val: AUIPCSymbol->getFragment());
624
625 if (!DF)
626 return nullptr;
627
628 uint64_t Offset = AUIPCSymbol->getOffset();
629 if (DF->getContents().size() == Offset) {
630 DF = dyn_cast_or_null<MCDataFragment>(Val: DF->getNext());
631 if (!DF)
632 return nullptr;
633 Offset = 0;
634 }
635
636 for (const MCFixup &F : DF->getFixups()) {
637 if (F.getOffset() != Offset)
638 continue;
639 auto Kind = F.getTargetKind();
640 if (!mc::isRelocation(FixupKind: F.getKind())) {
641 if (Kind == RISCV::fixup_riscv_pcrel_hi20) {
642 *DFOut = DF;
643 return &F;
644 }
645 break;
646 }
647 switch (Kind) {
648 case ELF::R_RISCV_GOT_HI20:
649 case ELF::R_RISCV_TLS_GOT_HI20:
650 case ELF::R_RISCV_TLS_GD_HI20:
651 case ELF::R_RISCV_TLSDESC_HI20:
652 *DFOut = DF;
653 return &F;
654 }
655 }
656
657 return nullptr;
658}
659
660bool RISCVAsmBackend::evaluateTargetFixup(const MCFixup &Fixup,
661 const MCValue &Target,
662 uint64_t &Value) {
663 const MCFixup *AUIPCFixup;
664 const MCFragment *AUIPCDF;
665 MCValue AUIPCTarget;
666 switch (Fixup.getTargetKind()) {
667 default:
668 llvm_unreachable("Unexpected fixup kind!");
669 case RISCV::fixup_riscv_pcrel_lo12_i:
670 case RISCV::fixup_riscv_pcrel_lo12_s: {
671 AUIPCFixup =
672 getPCRelHiFixup(Expr: cast<MCSpecifierExpr>(Val: *Fixup.getValue()), DFOut: &AUIPCDF);
673 if (!AUIPCFixup) {
674 getContext().reportError(L: Fixup.getLoc(),
675 Msg: "could not find corresponding %pcrel_hi");
676 return true;
677 }
678
679 // MCAssembler::evaluateFixup will emit an error for this case when it sees
680 // the %pcrel_hi, so don't duplicate it when also seeing the %pcrel_lo.
681 const MCExpr *AUIPCExpr = AUIPCFixup->getValue();
682 if (!AUIPCExpr->evaluateAsRelocatable(Res&: AUIPCTarget, Asm))
683 return true;
684 break;
685 }
686 }
687
688 if (!AUIPCTarget.getAddSym())
689 return false;
690
691 const MCSymbolELF &SA = cast<MCSymbolELF>(Val: *AUIPCTarget.getAddSym());
692 if (SA.isUndefined())
693 return false;
694
695 bool IsResolved = &SA.getSection() == AUIPCDF->getParent() &&
696 SA.getBinding() == ELF::STB_LOCAL &&
697 SA.getType() != ELF::STT_GNU_IFUNC;
698 if (!IsResolved)
699 return false;
700
701 Value = Asm->getSymbolOffset(S: SA) + AUIPCTarget.getConstant();
702 Value -= Asm->getFragmentOffset(F: *AUIPCDF) + AUIPCFixup->getOffset();
703
704 return AUIPCFixup->getTargetKind() == RISCV::fixup_riscv_pcrel_hi20 &&
705 isPCRelFixupResolved(SymA: AUIPCTarget.getAddSym(), F: *AUIPCDF);
706}
707
708void RISCVAsmBackend::maybeAddVendorReloc(const MCFragment &F,
709 const MCFixup &Fixup) {
710 StringRef VendorIdentifier;
711 switch (Fixup.getTargetKind()) {
712 default:
713 // No Vendor Relocation Required.
714 return;
715 case RISCV::fixup_riscv_qc_e_branch:
716 case RISCV::fixup_riscv_qc_abs20_u:
717 case RISCV::fixup_riscv_qc_e_32:
718 case RISCV::fixup_riscv_qc_e_call_plt:
719 VendorIdentifier = "QUALCOMM";
720 break;
721 case RISCV::fixup_riscv_nds_branch_10:
722 VendorIdentifier = "ANDES";
723 break;
724 }
725
726 // Create a local symbol for the vendor relocation to reference. It's fine if
727 // the symbol has the same name as an existing symbol.
728 MCContext &Ctx = Asm->getContext();
729 MCSymbol *VendorSymbol = Ctx.createLocalSymbol(Name: VendorIdentifier);
730 auto [It, Inserted] =
731 VendorSymbols.try_emplace(Key: VendorIdentifier, Args&: VendorSymbol);
732
733 if (Inserted) {
734 // Setup the just-created symbol
735 VendorSymbol->setVariableValue(MCConstantExpr::create(Value: 0, Ctx));
736 Asm->registerSymbol(Symbol: *VendorSymbol);
737 } else {
738 // Fetch the existing symbol
739 VendorSymbol = It->getValue();
740 }
741
742 MCFixup VendorFixup =
743 MCFixup::create(Offset: Fixup.getOffset(), Value: nullptr, Kind: ELF::R_RISCV_VENDOR);
744 // Explicitly create MCValue rather than using an MCExpr and evaluating it so
745 // that the absolute vendor symbol is not evaluated to constant 0.
746 MCValue VendorTarget = MCValue::get(SymA: VendorSymbol);
747 uint64_t VendorValue;
748 Asm->getWriter().recordRelocation(F, Fixup: VendorFixup, Target: VendorTarget, FixedValue&: VendorValue);
749}
750
751bool RISCVAsmBackend::addReloc(const MCFragment &F, const MCFixup &Fixup,
752 const MCValue &Target, uint64_t &FixedValue,
753 bool IsResolved) {
754 uint64_t FixedValueA, FixedValueB;
755 if (Target.getSubSym()) {
756 assert(Target.getSpecifier() == 0 &&
757 "relocatable SymA-SymB cannot have relocation specifier");
758 unsigned TA = 0, TB = 0;
759 switch (Fixup.getKind()) {
760 case llvm::FK_Data_1:
761 TA = ELF::R_RISCV_ADD8;
762 TB = ELF::R_RISCV_SUB8;
763 break;
764 case llvm::FK_Data_2:
765 TA = ELF::R_RISCV_ADD16;
766 TB = ELF::R_RISCV_SUB16;
767 break;
768 case llvm::FK_Data_4:
769 TA = ELF::R_RISCV_ADD32;
770 TB = ELF::R_RISCV_SUB32;
771 break;
772 case llvm::FK_Data_8:
773 TA = ELF::R_RISCV_ADD64;
774 TB = ELF::R_RISCV_SUB64;
775 break;
776 case llvm::FK_Data_leb128:
777 TA = ELF::R_RISCV_SET_ULEB128;
778 TB = ELF::R_RISCV_SUB_ULEB128;
779 break;
780 default:
781 llvm_unreachable("unsupported fixup size");
782 }
783 MCValue A = MCValue::get(SymA: Target.getAddSym(), SymB: nullptr, Val: Target.getConstant());
784 MCValue B = MCValue::get(SymA: Target.getSubSym());
785 auto FA = MCFixup::create(Offset: Fixup.getOffset(), Value: nullptr, Kind: TA);
786 auto FB = MCFixup::create(Offset: Fixup.getOffset(), Value: nullptr, Kind: TB);
787 Asm->getWriter().recordRelocation(F, Fixup: FA, Target: A, FixedValue&: FixedValueA);
788 Asm->getWriter().recordRelocation(F, Fixup: FB, Target: B, FixedValue&: FixedValueB);
789 FixedValue = FixedValueA - FixedValueB;
790 return false;
791 }
792
793 // If linker relaxation is enabled and supported by the current relocation,
794 // generate a relocation and then append a RELAX.
795 if (Fixup.isLinkerRelaxable())
796 IsResolved = false;
797 if (IsResolved && Fixup.isPCRel())
798 IsResolved = isPCRelFixupResolved(SymA: Target.getAddSym(), F);
799
800 if (!IsResolved) {
801 // Some Fixups require a vendor relocation, record it (directly) before we
802 // add the relocation.
803 maybeAddVendorReloc(F, Fixup);
804
805 Asm->getWriter().recordRelocation(F, Fixup, Target, FixedValue);
806 }
807
808 if (Fixup.isLinkerRelaxable()) {
809 auto FA = MCFixup::create(Offset: Fixup.getOffset(), Value: nullptr, Kind: ELF::R_RISCV_RELAX);
810 Asm->getWriter().recordRelocation(F, Fixup: FA, Target: MCValue::get(SymA: nullptr),
811 FixedValue&: FixedValueA);
812 }
813
814 return false;
815}
816
817void RISCVAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
818 const MCValue &Target,
819 MutableArrayRef<char> Data, uint64_t Value,
820 bool IsResolved) {
821 IsResolved = addReloc(F, Fixup, Target, FixedValue&: Value, IsResolved);
822 MCFixupKind Kind = Fixup.getKind();
823 if (mc::isRelocation(FixupKind: Kind))
824 return;
825 MCContext &Ctx = getContext();
826 MCFixupKindInfo Info = getFixupKindInfo(Kind);
827 if (!Value)
828 return; // Doesn't change encoding.
829 // Apply any target-specific value adjustments.
830 Value = adjustFixupValue(Fixup, Value, Ctx);
831
832 // Shift the value into position.
833 Value <<= Info.TargetOffset;
834
835 unsigned Offset = Fixup.getOffset();
836 unsigned NumBytes = alignTo(Value: Info.TargetSize + Info.TargetOffset, Align: 8) / 8;
837
838 assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
839
840 // For each byte of the fragment that the fixup touches, mask in the
841 // bits from the fixup value.
842 for (unsigned i = 0; i != NumBytes; ++i) {
843 Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
844 }
845}
846
847// Linker relaxation may change code size. We have to insert Nops
848// for .align directive when linker relaxation enabled. So then Linker
849// could satisfy alignment by removing Nops.
850// The function return the total Nops Size we need to insert.
851bool RISCVAsmBackend::shouldInsertExtraNopBytesForCodeAlign(
852 const MCAlignFragment &AF, unsigned &Size) {
853 // Calculate Nops Size only when linker relaxation enabled.
854 const MCSubtargetInfo *STI = AF.getSubtargetInfo();
855 if (!STI->hasFeature(Feature: RISCV::FeatureRelax))
856 return false;
857
858 unsigned MinNopLen = STI->hasFeature(Feature: RISCV::FeatureStdExtZca) ? 2 : 4;
859
860 if (AF.getAlignment() <= MinNopLen) {
861 return false;
862 } else {
863 Size = AF.getAlignment().value() - MinNopLen;
864 return true;
865 }
866}
867
868// We need to insert R_RISCV_ALIGN relocation type to indicate the
869// position of Nops and the total bytes of the Nops have been inserted
870// when linker relaxation enabled.
871// The function insert fixup_riscv_align fixup which eventually will
872// transfer to R_RISCV_ALIGN relocation type.
873bool RISCVAsmBackend::shouldInsertFixupForCodeAlign(MCAssembler &Asm,
874 MCAlignFragment &AF) {
875 // Insert the fixup only when linker relaxation enabled.
876 const MCSubtargetInfo *STI = AF.getSubtargetInfo();
877 if (!STI->hasFeature(Feature: RISCV::FeatureRelax))
878 return false;
879
880 // Calculate total Nops we need to insert. If there are none to insert
881 // then simply return.
882 unsigned Count;
883 if (!shouldInsertExtraNopBytesForCodeAlign(AF, Size&: Count) || (Count == 0))
884 return false;
885
886 MCContext &Ctx = getContext();
887 const MCExpr *Dummy = MCConstantExpr::create(Value: 0, Ctx);
888 MCFixup Fixup = MCFixup::create(Offset: 0, Value: Dummy, Kind: ELF::R_RISCV_ALIGN, Loc: SMLoc());
889
890 uint64_t FixedValue = 0;
891 MCValue NopBytes = MCValue::get(Val: Count);
892 Asm.getWriter().recordRelocation(F: AF, Fixup, Target: NopBytes, FixedValue);
893 return true;
894}
895
896std::unique_ptr<MCObjectTargetWriter>
897RISCVAsmBackend::createObjectTargetWriter() const {
898 return createRISCVELFObjectWriter(OSABI, Is64Bit);
899}
900
901MCAsmBackend *llvm::createRISCVAsmBackend(const Target &T,
902 const MCSubtargetInfo &STI,
903 const MCRegisterInfo &MRI,
904 const MCTargetOptions &Options) {
905 const Triple &TT = STI.getTargetTriple();
906 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(OSType: TT.getOS());
907 return new RISCVAsmBackend(STI, OSABI, TT.isArch64Bit(), Options);
908}
909