1//===-- RISCVMCCodeEmitter.cpp - Convert RISC-V code to machine code ------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the RISCVMCCodeEmitter class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "MCTargetDesc/RISCVBaseInfo.h"
14#include "MCTargetDesc/RISCVFixupKinds.h"
15#include "MCTargetDesc/RISCVMCAsmInfo.h"
16#include "MCTargetDesc/RISCVMCTargetDesc.h"
17#include "llvm/ADT/Statistic.h"
18#include "llvm/MC/MCAsmInfo.h"
19#include "llvm/MC/MCCodeEmitter.h"
20#include "llvm/MC/MCContext.h"
21#include "llvm/MC/MCExpr.h"
22#include "llvm/MC/MCInst.h"
23#include "llvm/MC/MCInstBuilder.h"
24#include "llvm/MC/MCInstrInfo.h"
25#include "llvm/MC/MCRegisterInfo.h"
26#include "llvm/MC/MCSubtargetInfo.h"
27#include "llvm/MC/MCSymbol.h"
28#include "llvm/Support/Casting.h"
29#include "llvm/Support/EndianStream.h"
30
31using namespace llvm;
32
33#define DEBUG_TYPE "mccodeemitter"
34
35STATISTIC(MCNumEmitted, "Number of MC instructions emitted");
36STATISTIC(MCNumFixups, "Number of MC fixups created");
37
38namespace {
39class RISCVMCCodeEmitter : public MCCodeEmitter {
40 RISCVMCCodeEmitter(const RISCVMCCodeEmitter &) = delete;
41 void operator=(const RISCVMCCodeEmitter &) = delete;
42 MCContext &Ctx;
43 MCInstrInfo const &MCII;
44
45public:
46 RISCVMCCodeEmitter(MCContext &ctx, MCInstrInfo const &MCII)
47 : Ctx(ctx), MCII(MCII) {}
48
49 ~RISCVMCCodeEmitter() override = default;
50
51 void encodeInstruction(const MCInst &MI, SmallVectorImpl<char> &CB,
52 SmallVectorImpl<MCFixup> &Fixups,
53 const MCSubtargetInfo &STI) const override;
54
55 void expandFunctionCall(const MCInst &MI, SmallVectorImpl<char> &CB,
56 SmallVectorImpl<MCFixup> &Fixups,
57 const MCSubtargetInfo &STI) const;
58
59 void expandTLSDESCCall(const MCInst &MI, SmallVectorImpl<char> &CB,
60 SmallVectorImpl<MCFixup> &Fixups,
61 const MCSubtargetInfo &STI) const;
62
63 void expandAddTPRel(const MCInst &MI, SmallVectorImpl<char> &CB,
64 SmallVectorImpl<MCFixup> &Fixups,
65 const MCSubtargetInfo &STI) const;
66
67 void expandLongCondBr(const MCInst &MI, SmallVectorImpl<char> &CB,
68 SmallVectorImpl<MCFixup> &Fixups,
69 const MCSubtargetInfo &STI) const;
70
71 void expandQCLongCondBrImm(const MCInst &MI, SmallVectorImpl<char> &CB,
72 SmallVectorImpl<MCFixup> &Fixups,
73 const MCSubtargetInfo &STI, unsigned Size) const;
74
75 /// TableGen'erated function for getting the binary encoding for an
76 /// instruction.
77 uint64_t getBinaryCodeForInstr(const MCInst &MI,
78 SmallVectorImpl<MCFixup> &Fixups,
79 const MCSubtargetInfo &STI) const;
80
81 /// Return binary encoding of operand. If the machine operand requires
82 /// relocation, record the relocation and return zero.
83 uint64_t getMachineOpValue(const MCInst &MI, const MCOperand &MO,
84 SmallVectorImpl<MCFixup> &Fixups,
85 const MCSubtargetInfo &STI) const;
86
87 uint64_t getImmOpValueMinus1(const MCInst &MI, unsigned OpNo,
88 SmallVectorImpl<MCFixup> &Fixups,
89 const MCSubtargetInfo &STI) const;
90
91 uint64_t getImmOpValueSlist(const MCInst &MI, unsigned OpNo,
92 SmallVectorImpl<MCFixup> &Fixups,
93 const MCSubtargetInfo &STI) const;
94
95 template <unsigned N>
96 unsigned getImmOpValueAsrN(const MCInst &MI, unsigned OpNo,
97 SmallVectorImpl<MCFixup> &Fixups,
98 const MCSubtargetInfo &STI) const;
99
100 uint64_t getImmOpValueZibi(const MCInst &MI, unsigned OpNo,
101 SmallVectorImpl<MCFixup> &Fixups,
102 const MCSubtargetInfo &STI) const;
103
104 uint64_t getImmOpValue(const MCInst &MI, unsigned OpNo,
105 SmallVectorImpl<MCFixup> &Fixups,
106 const MCSubtargetInfo &STI) const;
107
108 unsigned getVMaskReg(const MCInst &MI, unsigned OpNo,
109 SmallVectorImpl<MCFixup> &Fixups,
110 const MCSubtargetInfo &STI) const;
111
112 unsigned getRlistOpValue(const MCInst &MI, unsigned OpNo,
113 SmallVectorImpl<MCFixup> &Fixups,
114 const MCSubtargetInfo &STI) const;
115
116 unsigned getRlistS0OpValue(const MCInst &MI, unsigned OpNo,
117 SmallVectorImpl<MCFixup> &Fixups,
118 const MCSubtargetInfo &STI) const;
119};
120} // end anonymous namespace
121
122MCCodeEmitter *llvm::createRISCVMCCodeEmitter(const MCInstrInfo &MCII,
123 MCContext &Ctx) {
124 return new RISCVMCCodeEmitter(Ctx, MCII);
125}
126
127static void addFixup(SmallVectorImpl<MCFixup> &Fixups, uint32_t Offset,
128 const MCExpr *Value, uint16_t Kind) {
129 bool PCRel = false;
130 switch (Kind) {
131 case ELF::R_RISCV_CALL_PLT:
132 case RISCV::fixup_riscv_pcrel_hi20:
133 case RISCV::fixup_riscv_pcrel_lo12_i:
134 case RISCV::fixup_riscv_pcrel_lo12_s:
135 case RISCV::fixup_riscv_jal:
136 case RISCV::fixup_riscv_branch:
137 case RISCV::fixup_riscv_rvc_jump:
138 case RISCV::fixup_riscv_rvc_branch:
139 case RISCV::fixup_riscv_call:
140 case RISCV::fixup_riscv_call_plt:
141 case RISCV::fixup_riscv_qc_e_branch:
142 case RISCV::fixup_riscv_qc_e_call_plt:
143 case RISCV::fixup_riscv_nds_branch_10:
144 PCRel = true;
145 }
146 Fixups.push_back(Elt: MCFixup::create(Offset, Value, Kind, PCRel));
147}
148
149// Expand PseudoCALL(Reg), PseudoTAIL and PseudoJump to AUIPC and JALR with
150// relocation types. We expand those pseudo-instructions while encoding them,
151// meaning AUIPC and JALR won't go through RISC-V MC to MC compressed
152// instruction transformation. This is acceptable because AUIPC has no 16-bit
153// form and C_JALR has no immediate operand field. We let linker relaxation
154// deal with it. When linker relaxation is enabled, AUIPC and JALR have a
155// chance to relax to JAL.
156// If the C extension is enabled, JAL has a chance relax to C_JAL.
157void RISCVMCCodeEmitter::expandFunctionCall(const MCInst &MI,
158 SmallVectorImpl<char> &CB,
159 SmallVectorImpl<MCFixup> &Fixups,
160 const MCSubtargetInfo &STI) const {
161 MCInst TmpInst;
162 MCOperand Func;
163 MCRegister Ra;
164 if (MI.getOpcode() == RISCV::PseudoTAIL) {
165 Func = MI.getOperand(i: 0);
166 Ra = RISCVII::getTailExpandUseRegNo(FeatureBits: STI.getFeatureBits());
167 } else if (MI.getOpcode() == RISCV::PseudoCALLReg) {
168 Func = MI.getOperand(i: 1);
169 Ra = MI.getOperand(i: 0).getReg();
170 } else if (MI.getOpcode() == RISCV::PseudoCALL) {
171 Func = MI.getOperand(i: 0);
172 Ra = RISCV::X1;
173 } else if (MI.getOpcode() == RISCV::PseudoJump) {
174 Func = MI.getOperand(i: 1);
175 Ra = MI.getOperand(i: 0).getReg();
176 }
177 uint32_t Binary;
178
179 assert(Func.isExpr() && "Expected expression");
180
181 const MCExpr *CallExpr = Func.getExpr();
182
183 if (STI.getTargetTriple().isOSBinFormatMachO()) {
184 MCOperand FuncOp = MCOperand::createExpr(Val: CallExpr);
185 if (MI.getOpcode() == RISCV::PseudoTAIL ||
186 MI.getOpcode() == RISCV::PseudoJump)
187 // Emit JAL X0, Func
188 TmpInst = MCInstBuilder(RISCV::JAL).addReg(Reg: RISCV::X0).addOperand(Op: FuncOp);
189 else
190 // Emit JAL Ra, Func
191 TmpInst = MCInstBuilder(RISCV::JAL).addReg(Reg: Ra).addOperand(Op: FuncOp);
192 Binary = getBinaryCodeForInstr(MI: TmpInst, Fixups, STI);
193 support::endian::write(Out&: CB, V: Binary, E: llvm::endianness::little);
194 return;
195 }
196 // Emit AUIPC Ra, Func with R_RISCV_CALL relocation type.
197 TmpInst = MCInstBuilder(RISCV::AUIPC).addReg(Reg: Ra).addExpr(Val: CallExpr);
198 Binary = getBinaryCodeForInstr(MI: TmpInst, Fixups, STI);
199 support::endian::write(Out&: CB, V: Binary, E: llvm::endianness::little);
200
201 if (MI.getOpcode() == RISCV::PseudoTAIL ||
202 MI.getOpcode() == RISCV::PseudoJump)
203 // Emit JALR X0, Ra, 0
204 TmpInst = MCInstBuilder(RISCV::JALR).addReg(Reg: RISCV::X0).addReg(Reg: Ra).addImm(Val: 0);
205 else
206 // Emit JALR Ra, Ra, 0
207 TmpInst = MCInstBuilder(RISCV::JALR).addReg(Reg: Ra).addReg(Reg: Ra).addImm(Val: 0);
208 Binary = getBinaryCodeForInstr(MI: TmpInst, Fixups, STI);
209 support::endian::write(Out&: CB, V: Binary, E: llvm::endianness::little);
210}
211
212void RISCVMCCodeEmitter::expandTLSDESCCall(const MCInst &MI,
213 SmallVectorImpl<char> &CB,
214 SmallVectorImpl<MCFixup> &Fixups,
215 const MCSubtargetInfo &STI) const {
216 MCOperand SrcSymbol = MI.getOperand(i: 3);
217 assert(SrcSymbol.isExpr() &&
218 "Expected expression as first input to TLSDESCCALL");
219 const auto *Expr = dyn_cast<MCSpecifierExpr>(Val: SrcSymbol.getExpr());
220 MCRegister Link = MI.getOperand(i: 0).getReg();
221 MCRegister Dest = MI.getOperand(i: 1).getReg();
222 int64_t Imm = MI.getOperand(i: 2).getImm();
223 addFixup(Fixups, Offset: 0, Value: Expr, Kind: ELF::R_RISCV_TLSDESC_CALL);
224 MCInst Call =
225 MCInstBuilder(RISCV::JALR).addReg(Reg: Link).addReg(Reg: Dest).addImm(Val: Imm);
226
227 uint32_t Binary = getBinaryCodeForInstr(MI: Call, Fixups, STI);
228 support::endian::write(Out&: CB, V: Binary, E: llvm::endianness::little);
229}
230
231// Expand PseudoAddTPRel to a simple ADD with the correct relocation.
232void RISCVMCCodeEmitter::expandAddTPRel(const MCInst &MI,
233 SmallVectorImpl<char> &CB,
234 SmallVectorImpl<MCFixup> &Fixups,
235 const MCSubtargetInfo &STI) const {
236 MCOperand DestReg = MI.getOperand(i: 0);
237 MCOperand SrcReg = MI.getOperand(i: 1);
238 MCOperand TPReg = MI.getOperand(i: 2);
239 assert(TPReg.isReg() && TPReg.getReg() == RISCV::X4 &&
240 "Expected thread pointer as second input to TP-relative add");
241
242 MCOperand SrcSymbol = MI.getOperand(i: 3);
243 assert(SrcSymbol.isExpr() &&
244 "Expected expression as third input to TP-relative add");
245
246 const auto *Expr = dyn_cast<MCSpecifierExpr>(Val: SrcSymbol.getExpr());
247 assert(Expr && Expr->getSpecifier() == ELF::R_RISCV_TPREL_ADD &&
248 "Expected tprel_add relocation on TP-relative symbol");
249
250 addFixup(Fixups, Offset: 0, Value: Expr, Kind: ELF::R_RISCV_TPREL_ADD);
251 if (STI.hasFeature(Feature: RISCV::FeatureRelax))
252 Fixups.back().setLinkerRelaxable();
253
254 // Emit a normal ADD instruction with the given operands.
255 MCInst TmpInst = MCInstBuilder(RISCV::ADD)
256 .addOperand(Op: DestReg)
257 .addOperand(Op: SrcReg)
258 .addOperand(Op: TPReg);
259 uint32_t Binary = getBinaryCodeForInstr(MI: TmpInst, Fixups, STI);
260 support::endian::write(Out&: CB, V: Binary, E: llvm::endianness::little);
261}
262
263static unsigned getInvertedBranchOp(unsigned BrOp) {
264 switch (BrOp) {
265 default:
266 llvm_unreachable("Unexpected branch opcode!");
267 case RISCV::PseudoLongBEQ:
268 return RISCV::BNE;
269 case RISCV::PseudoLongBNE:
270 return RISCV::BEQ;
271 case RISCV::PseudoLongBLT:
272 return RISCV::BGE;
273 case RISCV::PseudoLongBGE:
274 return RISCV::BLT;
275 case RISCV::PseudoLongBLTU:
276 return RISCV::BGEU;
277 case RISCV::PseudoLongBGEU:
278 return RISCV::BLTU;
279 case RISCV::PseudoLongQC_BEQI:
280 return RISCV::QC_BNEI;
281 case RISCV::PseudoLongQC_BNEI:
282 return RISCV::QC_BEQI;
283 case RISCV::PseudoLongQC_BLTI:
284 return RISCV::QC_BGEI;
285 case RISCV::PseudoLongQC_BGEI:
286 return RISCV::QC_BLTI;
287 case RISCV::PseudoLongQC_BLTUI:
288 return RISCV::QC_BGEUI;
289 case RISCV::PseudoLongQC_BGEUI:
290 return RISCV::QC_BLTUI;
291 case RISCV::PseudoLongQC_E_BEQI:
292 return RISCV::QC_E_BNEI;
293 case RISCV::PseudoLongQC_E_BNEI:
294 return RISCV::QC_E_BEQI;
295 case RISCV::PseudoLongQC_E_BLTI:
296 return RISCV::QC_E_BGEI;
297 case RISCV::PseudoLongQC_E_BGEI:
298 return RISCV::QC_E_BLTI;
299 case RISCV::PseudoLongQC_E_BLTUI:
300 return RISCV::QC_E_BGEUI;
301 case RISCV::PseudoLongQC_E_BGEUI:
302 return RISCV::QC_E_BLTUI;
303 }
304}
305
306// Expand PseudoLongBxx to an inverted conditional branch and an unconditional
307// jump.
308void RISCVMCCodeEmitter::expandLongCondBr(const MCInst &MI,
309 SmallVectorImpl<char> &CB,
310 SmallVectorImpl<MCFixup> &Fixups,
311 const MCSubtargetInfo &STI) const {
312 MCRegister SrcReg1 = MI.getOperand(i: 0).getReg();
313 MCRegister SrcReg2 = MI.getOperand(i: 1).getReg();
314 MCOperand SrcSymbol = MI.getOperand(i: 2);
315 unsigned Opcode = MI.getOpcode();
316 bool IsEqTest =
317 Opcode == RISCV::PseudoLongBNE || Opcode == RISCV::PseudoLongBEQ;
318
319 bool UseCompressedBr = false;
320 if (IsEqTest && STI.hasFeature(Feature: RISCV::FeatureStdExtZca)) {
321 if (RISCV::X8 <= SrcReg1.id() && SrcReg1.id() <= RISCV::X15 &&
322 SrcReg2.id() == RISCV::X0) {
323 UseCompressedBr = true;
324 } else if (RISCV::X8 <= SrcReg2.id() && SrcReg2.id() <= RISCV::X15 &&
325 SrcReg1.id() == RISCV::X0) {
326 std::swap(a&: SrcReg1, b&: SrcReg2);
327 UseCompressedBr = true;
328 }
329 }
330
331 uint32_t Offset;
332 if (UseCompressedBr) {
333 unsigned InvOpc =
334 Opcode == RISCV::PseudoLongBNE ? RISCV::C_BEQZ : RISCV::C_BNEZ;
335 MCInst TmpInst = MCInstBuilder(InvOpc).addReg(Reg: SrcReg1).addImm(Val: 6);
336 uint16_t Binary = getBinaryCodeForInstr(MI: TmpInst, Fixups, STI);
337 support::endian::write<uint16_t>(Out&: CB, V: Binary, E: llvm::endianness::little);
338 Offset = 2;
339 } else {
340 unsigned InvOpc = getInvertedBranchOp(BrOp: Opcode);
341 MCInst TmpInst =
342 MCInstBuilder(InvOpc).addReg(Reg: SrcReg1).addReg(Reg: SrcReg2).addImm(Val: 8);
343 uint32_t Binary = getBinaryCodeForInstr(MI: TmpInst, Fixups, STI);
344 support::endian::write(Out&: CB, V: Binary, E: llvm::endianness::little);
345 Offset = 4;
346 }
347
348 // Save the number fixups.
349 size_t FixupStartIndex = Fixups.size();
350
351 // Emit an unconditional jump to the destination.
352 MCInst TmpInst =
353 MCInstBuilder(RISCV::JAL).addReg(Reg: RISCV::X0).addOperand(Op: SrcSymbol);
354 uint32_t Binary = getBinaryCodeForInstr(MI: TmpInst, Fixups, STI);
355 support::endian::write(Out&: CB, V: Binary, E: llvm::endianness::little);
356
357 // Drop any fixup added so we can add the correct one.
358 Fixups.resize(N: FixupStartIndex);
359
360 if (SrcSymbol.isExpr()) {
361 addFixup(Fixups, Offset, Value: SrcSymbol.getExpr(), Kind: RISCV::fixup_riscv_jal);
362 if (STI.hasFeature(Feature: RISCV::FeatureRelax))
363 Fixups.back().setLinkerRelaxable();
364 }
365}
366
367// Expand PseudoLongQC_(E_)Bxxx to an inverted conditional branch and an
368// unconditional jump.
369void RISCVMCCodeEmitter::expandQCLongCondBrImm(const MCInst &MI,
370 SmallVectorImpl<char> &CB,
371 SmallVectorImpl<MCFixup> &Fixups,
372 const MCSubtargetInfo &STI,
373 unsigned Size) const {
374 MCRegister SrcReg1 = MI.getOperand(i: 0).getReg();
375 auto BrImm = MI.getOperand(i: 1).getImm();
376 MCOperand SrcSymbol = MI.getOperand(i: 2);
377 unsigned Opcode = MI.getOpcode();
378 uint32_t Offset;
379 unsigned InvOpc = getInvertedBranchOp(BrOp: Opcode);
380 // Emit inverted conditional branch with offset:
381 // 8 (QC.BXXX(4) + JAL(4))
382 // or
383 // 10 (QC.E.BXXX(6) + JAL(4)).
384 if (Size == 4) {
385 MCInst TmpBr =
386 MCInstBuilder(InvOpc).addReg(Reg: SrcReg1).addImm(Val: BrImm).addImm(Val: 8);
387 uint32_t BrBinary = getBinaryCodeForInstr(MI: TmpBr, Fixups, STI);
388 support::endian::write(Out&: CB, V: BrBinary, E: llvm::endianness::little);
389 } else {
390 MCInst TmpBr =
391 MCInstBuilder(InvOpc).addReg(Reg: SrcReg1).addImm(Val: BrImm).addImm(Val: 10);
392 uint64_t BrBinary =
393 getBinaryCodeForInstr(MI: TmpBr, Fixups, STI) & 0xffff'ffff'ffffu;
394 SmallVector<char, 8> Encoding;
395 support::endian::write(Out&: Encoding, V: BrBinary, E: llvm::endianness::little);
396 assert(Encoding[6] == 0 && Encoding[7] == 0 &&
397 "Unexpected encoding for 48-bit instruction");
398 Encoding.truncate(N: 6);
399 CB.append(RHS: Encoding);
400 }
401 Offset = Size;
402 // Save the number fixups.
403 size_t FixupStartIndex = Fixups.size();
404 // Emit an unconditional jump to the destination.
405 MCInst TmpJ =
406 MCInstBuilder(RISCV::JAL).addReg(Reg: RISCV::X0).addOperand(Op: SrcSymbol);
407 uint32_t JBinary = getBinaryCodeForInstr(MI: TmpJ, Fixups, STI);
408 support::endian::write(Out&: CB, V: JBinary, E: llvm::endianness::little);
409 // Drop any fixup added so we can add the correct one.
410 Fixups.resize(N: FixupStartIndex);
411 if (SrcSymbol.isExpr()) {
412 addFixup(Fixups, Offset, Value: SrcSymbol.getExpr(), Kind: RISCV::fixup_riscv_jal);
413 if (STI.hasFeature(Feature: RISCV::FeatureRelax))
414 Fixups.back().setLinkerRelaxable();
415 }
416}
417
418void RISCVMCCodeEmitter::encodeInstruction(const MCInst &MI,
419 SmallVectorImpl<char> &CB,
420 SmallVectorImpl<MCFixup> &Fixups,
421 const MCSubtargetInfo &STI) const {
422 const MCInstrDesc &Desc = MCII.get(Opcode: MI.getOpcode());
423 // Get byte count of instruction.
424 unsigned Size = Desc.getSize();
425
426 // RISCVInstrInfo::getInstSizeInBytes expects that the total size of the
427 // expanded instructions for each pseudo is correct in the Size field of the
428 // tablegen definition for the pseudo.
429 switch (MI.getOpcode()) {
430 default:
431 break;
432 case RISCV::PseudoCALLReg:
433 case RISCV::PseudoCALL:
434 case RISCV::PseudoTAIL:
435 case RISCV::PseudoJump:
436 expandFunctionCall(MI, CB, Fixups, STI);
437 MCNumEmitted += 2;
438 return;
439 case RISCV::PseudoAddTPRel:
440 expandAddTPRel(MI, CB, Fixups, STI);
441 MCNumEmitted += 1;
442 return;
443 case RISCV::PseudoLongBEQ:
444 case RISCV::PseudoLongBNE:
445 case RISCV::PseudoLongBLT:
446 case RISCV::PseudoLongBGE:
447 case RISCV::PseudoLongBLTU:
448 case RISCV::PseudoLongBGEU:
449 expandLongCondBr(MI, CB, Fixups, STI);
450 MCNumEmitted += 2;
451 return;
452 case RISCV::PseudoLongQC_BEQI:
453 case RISCV::PseudoLongQC_BNEI:
454 case RISCV::PseudoLongQC_BLTI:
455 case RISCV::PseudoLongQC_BGEI:
456 case RISCV::PseudoLongQC_BLTUI:
457 case RISCV::PseudoLongQC_BGEUI:
458 expandQCLongCondBrImm(MI, CB, Fixups, STI, Size: 4);
459 MCNumEmitted += 2;
460 return;
461 case RISCV::PseudoLongQC_E_BEQI:
462 case RISCV::PseudoLongQC_E_BNEI:
463 case RISCV::PseudoLongQC_E_BLTI:
464 case RISCV::PseudoLongQC_E_BGEI:
465 case RISCV::PseudoLongQC_E_BLTUI:
466 case RISCV::PseudoLongQC_E_BGEUI:
467 expandQCLongCondBrImm(MI, CB, Fixups, STI, Size: 6);
468 MCNumEmitted += 2;
469 return;
470 case RISCV::PseudoTLSDESCCall:
471 expandTLSDESCCall(MI, CB, Fixups, STI);
472 MCNumEmitted += 1;
473 return;
474 }
475
476 switch (Size) {
477 default:
478 llvm_unreachable("Unhandled encodeInstruction length!");
479 case 2: {
480 uint16_t Bits = getBinaryCodeForInstr(MI, Fixups, STI);
481 support::endian::write<uint16_t>(Out&: CB, V: Bits, E: llvm::endianness::little);
482 break;
483 }
484 case 4: {
485 uint32_t Bits = getBinaryCodeForInstr(MI, Fixups, STI);
486 support::endian::write(Out&: CB, V: Bits, E: llvm::endianness::little);
487 break;
488 }
489 case 6: {
490 uint64_t Bits = getBinaryCodeForInstr(MI, Fixups, STI) & 0xffff'ffff'ffffu;
491 SmallVector<char, 8> Encoding;
492 support::endian::write(Out&: Encoding, V: Bits, E: llvm::endianness::little);
493 assert(Encoding[6] == 0 && Encoding[7] == 0 &&
494 "Unexpected encoding for 48-bit instruction");
495 Encoding.truncate(N: 6);
496 CB.append(RHS: Encoding);
497 break;
498 }
499 case 8: {
500 uint64_t Bits = getBinaryCodeForInstr(MI, Fixups, STI);
501 support::endian::write(Out&: CB, V: Bits, E: llvm::endianness::little);
502 break;
503 }
504 }
505
506 ++MCNumEmitted; // Keep track of the # of mi's emitted.
507}
508
509uint64_t
510RISCVMCCodeEmitter::getMachineOpValue(const MCInst &MI, const MCOperand &MO,
511 SmallVectorImpl<MCFixup> &Fixups,
512 const MCSubtargetInfo &STI) const {
513
514 if (MO.isReg())
515 return Ctx.getRegisterInfo()->getEncodingValue(Reg: MO.getReg());
516
517 if (MO.isImm())
518 return MO.getImm();
519
520 llvm_unreachable("Unhandled expression!");
521 return 0;
522}
523
524uint64_t
525RISCVMCCodeEmitter::getImmOpValueMinus1(const MCInst &MI, unsigned OpNo,
526 SmallVectorImpl<MCFixup> &Fixups,
527 const MCSubtargetInfo &STI) const {
528 const MCOperand &MO = MI.getOperand(i: OpNo);
529
530 if (MO.isImm()) {
531 uint64_t Res = MO.getImm();
532 return (Res - 1);
533 }
534
535 llvm_unreachable("Unhandled expression!");
536 return 0;
537}
538
539uint64_t
540RISCVMCCodeEmitter::getImmOpValueSlist(const MCInst &MI, unsigned OpNo,
541 SmallVectorImpl<MCFixup> &Fixups,
542 const MCSubtargetInfo &STI) const {
543 const MCOperand &MO = MI.getOperand(i: OpNo);
544 assert(MO.isImm() && "Slist operand must be immediate");
545
546 uint64_t Res = MO.getImm();
547 switch (Res) {
548 case 0:
549 return 0;
550 case 1:
551 return 1;
552 case 2:
553 return 2;
554 case 4:
555 return 3;
556 case 8:
557 return 4;
558 case 16:
559 return 5;
560 case 15:
561 return 6;
562 case 31:
563 return 7;
564 default:
565 llvm_unreachable("Unhandled Slist value!");
566 }
567}
568
569template <unsigned N>
570unsigned
571RISCVMCCodeEmitter::getImmOpValueAsrN(const MCInst &MI, unsigned OpNo,
572 SmallVectorImpl<MCFixup> &Fixups,
573 const MCSubtargetInfo &STI) const {
574 const MCOperand &MO = MI.getOperand(i: OpNo);
575
576 if (MO.isImm()) {
577 uint64_t Res = MO.getImm();
578 assert((Res & ((1 << N) - 1)) == 0 && "LSB is non-zero");
579 return Res >> N;
580 }
581
582 return getImmOpValue(MI, OpNo, Fixups, STI);
583}
584
585uint64_t
586RISCVMCCodeEmitter::getImmOpValueZibi(const MCInst &MI, unsigned OpNo,
587 SmallVectorImpl<MCFixup> &Fixups,
588 const MCSubtargetInfo &STI) const {
589 const MCOperand &MO = MI.getOperand(i: OpNo);
590 assert(MO.isImm() && "Zibi operand must be an immediate");
591 int64_t Res = MO.getImm();
592 if (Res == -1)
593 return 0;
594
595 return Res;
596}
597
598uint64_t RISCVMCCodeEmitter::getImmOpValue(const MCInst &MI, unsigned OpNo,
599 SmallVectorImpl<MCFixup> &Fixups,
600 const MCSubtargetInfo &STI) const {
601 bool EnableRelax = STI.hasFeature(Feature: RISCV::FeatureRelax);
602 const MCOperand &MO = MI.getOperand(i: OpNo);
603
604 MCInstrDesc const &Desc = MCII.get(Opcode: MI.getOpcode());
605 unsigned MIFrm = RISCVII::getFormat(TSFlags: Desc.TSFlags);
606
607 // If the destination is an immediate, there is nothing to do.
608 if (MO.isImm())
609 return MO.getImm();
610
611 assert(MO.isExpr() &&
612 "getImmOpValue expects only expressions or immediates");
613 const MCExpr *Expr = MO.getExpr();
614 MCExpr::ExprKind Kind = Expr->getKind();
615
616 // `RelaxCandidate` must be set to `true` in two cases:
617 // - The fixup's relocation gets a R_RISCV_RELAX relocation
618 // - The underlying instruction may be relaxed to an instruction that gets a
619 // `R_RISCV_RELAX` relocation.
620 //
621 // The actual emission of `R_RISCV_RELAX` will be handled in
622 // `RISCVAsmBackend::applyFixup`.
623 bool RelaxCandidate = false;
624 auto AsmRelaxToLinkerRelaxable = [&]() -> void {
625 if (!STI.hasFeature(Feature: RISCV::FeatureExactAssembly))
626 RelaxCandidate = true;
627 };
628
629 unsigned FixupKind = RISCV::fixup_riscv_invalid;
630 if (Kind == MCExpr::Specifier) {
631 const auto *RVExpr = cast<MCSpecifierExpr>(Val: Expr);
632 FixupKind = RVExpr->getSpecifier();
633 switch (RVExpr->getSpecifier()) {
634 default:
635 assert(FixupKind && FixupKind < FirstTargetFixupKind &&
636 "invalid specifier");
637 break;
638 case ELF::R_RISCV_TPREL_ADD:
639 // tprel_add is only used to indicate that a relocation should be emitted
640 // for an add instruction used in TP-relative addressing. It should not be
641 // expanded as if representing an actual instruction operand and so to
642 // encounter it here is an error.
643 llvm_unreachable(
644 "ELF::R_RISCV_TPREL_ADD should not represent an instruction operand");
645 case RISCV::S_LO:
646 if (MIFrm == RISCVII::InstFormatI)
647 FixupKind = RISCV::fixup_riscv_lo12_i;
648 else if (MIFrm == RISCVII::InstFormatS)
649 FixupKind = RISCV::fixup_riscv_lo12_s;
650 else
651 llvm_unreachable("VK_LO used with unexpected instruction format");
652 RelaxCandidate = true;
653 break;
654 case ELF::R_RISCV_HI20:
655 FixupKind = RISCV::fixup_riscv_hi20;
656 RelaxCandidate = true;
657 break;
658 case RISCV::S_PCREL_LO:
659 if (MIFrm == RISCVII::InstFormatI)
660 FixupKind = RISCV::fixup_riscv_pcrel_lo12_i;
661 else if (MIFrm == RISCVII::InstFormatS)
662 FixupKind = RISCV::fixup_riscv_pcrel_lo12_s;
663 else
664 llvm_unreachable("VK_PCREL_LO used with unexpected instruction format");
665 RelaxCandidate = true;
666 break;
667 case RISCV::S_PCREL_HI:
668 FixupKind = RISCV::fixup_riscv_pcrel_hi20;
669 RelaxCandidate = true;
670 break;
671 case RISCV::S_GOT_HI:
672 FixupKind = ELF::R_RISCV_GOT_HI20;
673 RelaxCandidate = true;
674 break;
675 case RISCV::S_TPREL_LO:
676 if (MIFrm == RISCVII::InstFormatI)
677 FixupKind = ELF::R_RISCV_TPREL_LO12_I;
678 else if (MIFrm == RISCVII::InstFormatS)
679 FixupKind = ELF::R_RISCV_TPREL_LO12_S;
680 else
681 llvm_unreachable("VK_TPREL_LO used with unexpected instruction format");
682 RelaxCandidate = true;
683 break;
684 case RISCV::S_CALL_PLT:
685 if (Ctx.getTargetTriple().isOSBinFormatMachO()) {
686 FixupKind = RISCV::fixup_riscv_jal;
687 break;
688 }
689 FixupKind = RISCV::fixup_riscv_call_plt;
690 RelaxCandidate = true;
691 break;
692 case RISCV::S_QC_ABS20:
693 FixupKind = RISCV::fixup_riscv_qc_abs20_u;
694 RelaxCandidate = true;
695 break;
696 case ELF::R_RISCV_GOT_HI20:
697 case ELF::R_RISCV_TPREL_HI20:
698 case ELF::R_RISCV_TLSDESC_HI20:
699 RelaxCandidate = true;
700 break;
701 }
702 } else if (Kind == MCExpr::SymbolRef || Kind == MCExpr::Binary) {
703 // FIXME: Sub kind binary exprs have chance of underflow.
704 if (MIFrm == RISCVII::InstFormatJ) {
705 FixupKind = RISCV::fixup_riscv_jal;
706 RelaxCandidate = true;
707 } else if (MIFrm == RISCVII::InstFormatB) {
708 FixupKind = RISCV::fixup_riscv_branch;
709 // Relaxes to B<cc>; JAL, with fixup_riscv_jal
710 AsmRelaxToLinkerRelaxable();
711 } else if (MIFrm == RISCVII::InstFormatCJ) {
712 FixupKind = RISCV::fixup_riscv_rvc_jump;
713 // Relaxes to JAL with fixup_riscv_jal
714 AsmRelaxToLinkerRelaxable();
715 } else if (MIFrm == RISCVII::InstFormatCB) {
716 FixupKind = RISCV::fixup_riscv_rvc_branch;
717 // Relaxes to B<cc>; JAL, with fixup_riscv_jal
718 AsmRelaxToLinkerRelaxable();
719 } else if (MIFrm == RISCVII::InstFormatCI) {
720 FixupKind = RISCV::fixup_riscv_rvc_imm;
721 // Relaxes to `QC.E.LI` with fixup_riscv_qc_e_32
722 if (STI.hasFeature(Feature: RISCV::FeatureVendorXqcili))
723 AsmRelaxToLinkerRelaxable();
724 } else if (MIFrm == RISCVII::InstFormatI) {
725 FixupKind = RISCV::fixup_riscv_12_i;
726 } else if (MIFrm == RISCVII::InstFormatQC_EB) {
727 FixupKind = RISCV::fixup_riscv_qc_e_branch;
728 // Relaxes to QC.E.B<cc>I; JAL, with fixup_riscv_jal
729 AsmRelaxToLinkerRelaxable();
730 } else if (MIFrm == RISCVII::InstFormatQC_EAI) {
731 FixupKind = RISCV::fixup_riscv_qc_e_32;
732 RelaxCandidate = true;
733 } else if (MIFrm == RISCVII::InstFormatQC_EJ) {
734 FixupKind = RISCV::fixup_riscv_qc_e_call_plt;
735 RelaxCandidate = true;
736 } else if (MIFrm == RISCVII::InstFormatNDS_BRANCH_10) {
737 FixupKind = RISCV::fixup_riscv_nds_branch_10;
738 }
739 }
740
741 assert(FixupKind != RISCV::fixup_riscv_invalid && "Unhandled expression!");
742
743 addFixup(Fixups, Offset: 0, Value: Expr, Kind: FixupKind);
744 // If linker relaxation is enabled and supported by this relocation, set a bit
745 // so that the assembler knows the size of the instruction is not fixed/known,
746 // and the relocation will need a R_RISCV_RELAX relocation.
747 if (EnableRelax && RelaxCandidate)
748 Fixups.back().setLinkerRelaxable();
749 ++MCNumFixups;
750
751 return 0;
752}
753
754unsigned RISCVMCCodeEmitter::getVMaskReg(const MCInst &MI, unsigned OpNo,
755 SmallVectorImpl<MCFixup> &Fixups,
756 const MCSubtargetInfo &STI) const {
757 MCOperand MO = MI.getOperand(i: OpNo);
758 assert(MO.isReg() && "Expected a register.");
759
760 switch (MO.getReg().id()) {
761 default:
762 llvm_unreachable("Invalid mask register.");
763 case RISCV::V0:
764 return 0;
765 case RISCV::NoRegister:
766 return 1;
767 }
768}
769
770unsigned RISCVMCCodeEmitter::getRlistOpValue(const MCInst &MI, unsigned OpNo,
771 SmallVectorImpl<MCFixup> &Fixups,
772 const MCSubtargetInfo &STI) const {
773 const MCOperand &MO = MI.getOperand(i: OpNo);
774 assert(MO.isImm() && "Rlist operand must be immediate");
775 auto Imm = MO.getImm();
776 assert(Imm >= 4 && "EABI is currently not implemented");
777 return Imm;
778}
779unsigned
780RISCVMCCodeEmitter::getRlistS0OpValue(const MCInst &MI, unsigned OpNo,
781 SmallVectorImpl<MCFixup> &Fixups,
782 const MCSubtargetInfo &STI) const {
783 const MCOperand &MO = MI.getOperand(i: OpNo);
784 assert(MO.isImm() && "Rlist operand must be immediate");
785 auto Imm = MO.getImm();
786 assert(Imm >= 4 && "EABI is currently not implemented");
787 assert(Imm != RISCVZC::RA && "Rlist operand must include s0");
788 return Imm;
789}
790
791#include "RISCVGenMCCodeEmitter.inc"
792