| 1 | //=- LoongArchMCCodeEmitter.cpp - Convert LoongArch code to machine code --===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file implements the LoongArchMCCodeEmitter class. |
| 10 | // |
| 11 | //===----------------------------------------------------------------------===// |
| 12 | |
| 13 | #include "LoongArchFixupKinds.h" |
| 14 | #include "MCTargetDesc/LoongArchMCAsmInfo.h" |
| 15 | #include "MCTargetDesc/LoongArchMCTargetDesc.h" |
| 16 | #include "llvm/BinaryFormat/ELF.h" |
| 17 | #include "llvm/MC/MCCodeEmitter.h" |
| 18 | #include "llvm/MC/MCContext.h" |
| 19 | #include "llvm/MC/MCInstBuilder.h" |
| 20 | #include "llvm/MC/MCInstrInfo.h" |
| 21 | #include "llvm/MC/MCRegisterInfo.h" |
| 22 | #include "llvm/MC/MCSubtargetInfo.h" |
| 23 | #include "llvm/Support/Casting.h" |
| 24 | #include "llvm/Support/EndianStream.h" |
| 25 | |
| 26 | using namespace llvm; |
| 27 | |
| 28 | #define DEBUG_TYPE "mccodeemitter" |
| 29 | |
| 30 | namespace { |
| 31 | class LoongArchMCCodeEmitter : public MCCodeEmitter { |
| 32 | LoongArchMCCodeEmitter(const LoongArchMCCodeEmitter &) = delete; |
| 33 | void operator=(const LoongArchMCCodeEmitter &) = delete; |
| 34 | MCContext &Ctx; |
| 35 | MCInstrInfo const &MCII; |
| 36 | |
| 37 | public: |
| 38 | LoongArchMCCodeEmitter(MCContext &ctx, MCInstrInfo const &MCII) |
| 39 | : Ctx(ctx), MCII(MCII) {} |
| 40 | |
| 41 | ~LoongArchMCCodeEmitter() override = default; |
| 42 | |
| 43 | void encodeInstruction(const MCInst &MI, SmallVectorImpl<char> &CB, |
| 44 | SmallVectorImpl<MCFixup> &Fixups, |
| 45 | const MCSubtargetInfo &STI) const override; |
| 46 | |
| 47 | template <unsigned Opc> |
| 48 | void expandToVectorLDI(const MCInst &MI, SmallVectorImpl<char> &CB, |
| 49 | SmallVectorImpl<MCFixup> &Fixups, |
| 50 | const MCSubtargetInfo &STI) const; |
| 51 | |
| 52 | void expandAddTPRel(const MCInst &MI, SmallVectorImpl<char> &CB, |
| 53 | SmallVectorImpl<MCFixup> &Fixups, |
| 54 | const MCSubtargetInfo &STI) const; |
| 55 | |
| 56 | /// TableGen'erated function for getting the binary encoding for an |
| 57 | /// instruction. |
| 58 | uint64_t getBinaryCodeForInstr(const MCInst &MI, |
| 59 | SmallVectorImpl<MCFixup> &Fixups, |
| 60 | const MCSubtargetInfo &STI) const; |
| 61 | |
| 62 | /// Return binary encoding of operand. If the machine operand requires |
| 63 | /// relocation, record the relocation and return zero. |
| 64 | unsigned getMachineOpValue(const MCInst &MI, const MCOperand &MO, |
| 65 | SmallVectorImpl<MCFixup> &Fixups, |
| 66 | const MCSubtargetInfo &STI) const; |
| 67 | |
| 68 | /// Return binary encoding of an immediate operand specified by OpNo. |
| 69 | /// The value returned is the value of the immediate minus 1. |
| 70 | /// Note that this function is dedicated to specific immediate types, |
| 71 | /// e.g. uimm2_plus1. |
| 72 | unsigned getImmOpValueSub1(const MCInst &MI, unsigned OpNo, |
| 73 | SmallVectorImpl<MCFixup> &Fixups, |
| 74 | const MCSubtargetInfo &STI) const; |
| 75 | |
| 76 | /// Return binary encoding of an immediate operand specified by OpNo. |
| 77 | /// The value returned is the value of the immediate shifted right |
| 78 | // arithmetically by N. |
| 79 | /// Note that this function is dedicated to specific immediate types, |
| 80 | /// e.g. simm14_lsl2, simm16_lsl2, simm21_lsl2 and simm26_lsl2. |
| 81 | template <unsigned N> |
| 82 | unsigned getImmOpValueAsr(const MCInst &MI, unsigned OpNo, |
| 83 | SmallVectorImpl<MCFixup> &Fixups, |
| 84 | const MCSubtargetInfo &STI) const { |
| 85 | const MCOperand &MO = MI.getOperand(i: OpNo); |
| 86 | if (MO.isImm()) { |
| 87 | unsigned Res = MI.getOperand(i: OpNo).getImm(); |
| 88 | assert((Res & ((1U << N) - 1U)) == 0 && "lowest N bits are non-zero" ); |
| 89 | return Res >> N; |
| 90 | } |
| 91 | return getExprOpValue(MI, MO, Fixups, STI); |
| 92 | } |
| 93 | |
| 94 | unsigned getExprOpValue(const MCInst &MI, const MCOperand &MO, |
| 95 | SmallVectorImpl<MCFixup> &Fixups, |
| 96 | const MCSubtargetInfo &STI) const; |
| 97 | }; |
| 98 | } // end namespace |
| 99 | |
| 100 | static void addFixup(SmallVectorImpl<MCFixup> &Fixups, uint32_t Offset, |
| 101 | const MCExpr *Value, uint16_t Kind) { |
| 102 | bool PCRel = false; |
| 103 | switch (Kind) { |
| 104 | case LoongArch::fixup_loongarch_b16: |
| 105 | case LoongArch::fixup_loongarch_b21: |
| 106 | case LoongArch::fixup_loongarch_b26: |
| 107 | PCRel = true; |
| 108 | } |
| 109 | Fixups.push_back(Elt: MCFixup::create(Offset, Value, Kind, PCRel)); |
| 110 | } |
| 111 | |
| 112 | unsigned |
| 113 | LoongArchMCCodeEmitter::getMachineOpValue(const MCInst &MI, const MCOperand &MO, |
| 114 | SmallVectorImpl<MCFixup> &Fixups, |
| 115 | const MCSubtargetInfo &STI) const { |
| 116 | |
| 117 | if (MO.isReg()) |
| 118 | return Ctx.getRegisterInfo()->getEncodingValue(Reg: MO.getReg()); |
| 119 | |
| 120 | if (MO.isImm()) |
| 121 | return static_cast<unsigned>(MO.getImm()); |
| 122 | |
| 123 | // MO must be an Expr. |
| 124 | assert(MO.isExpr()); |
| 125 | return getExprOpValue(MI, MO, Fixups, STI); |
| 126 | } |
| 127 | |
| 128 | unsigned |
| 129 | LoongArchMCCodeEmitter::getImmOpValueSub1(const MCInst &MI, unsigned OpNo, |
| 130 | SmallVectorImpl<MCFixup> &Fixups, |
| 131 | const MCSubtargetInfo &STI) const { |
| 132 | return MI.getOperand(i: OpNo).getImm() - 1; |
| 133 | } |
| 134 | |
| 135 | unsigned |
| 136 | LoongArchMCCodeEmitter::getExprOpValue(const MCInst &MI, const MCOperand &MO, |
| 137 | SmallVectorImpl<MCFixup> &Fixups, |
| 138 | const MCSubtargetInfo &STI) const { |
| 139 | assert(MO.isExpr() && "getExprOpValue expects only expressions" ); |
| 140 | bool RelaxCandidate = false; |
| 141 | bool EnableRelax = STI.hasFeature(Feature: LoongArch::FeatureRelax); |
| 142 | const MCExpr *Expr = MO.getExpr(); |
| 143 | MCExpr::ExprKind Kind = Expr->getKind(); |
| 144 | unsigned FixupKind = LoongArch::fixup_loongarch_invalid; |
| 145 | if (Kind == MCExpr::Specifier) { |
| 146 | const LoongArchMCExpr *LAExpr = cast<LoongArchMCExpr>(Val: Expr); |
| 147 | FixupKind = LAExpr->getSpecifier(); |
| 148 | RelaxCandidate = LAExpr->getRelaxHint(); |
| 149 | switch (uint16_t(LAExpr->getSpecifier())) { |
| 150 | case LoongArchMCExpr::VK_None: |
| 151 | llvm_unreachable("Unhandled fixup kind!" ); |
| 152 | case ELF::R_LARCH_TLS_LE_ADD_R: |
| 153 | llvm_unreachable("ELF::R_LARCH_TLS_LE_ADD_R should not represent an " |
| 154 | "instruction operand" ); |
| 155 | case ELF::R_LARCH_B16: |
| 156 | FixupKind = LoongArch::fixup_loongarch_b16; |
| 157 | break; |
| 158 | case ELF::R_LARCH_B21: |
| 159 | FixupKind = LoongArch::fixup_loongarch_b21; |
| 160 | break; |
| 161 | case ELF::R_LARCH_B26: |
| 162 | FixupKind = LoongArch::fixup_loongarch_b26; |
| 163 | break; |
| 164 | case ELF::R_LARCH_MARK_LA: |
| 165 | // Match gas behavior: generate `R_LARCH_MARK_LA` relocation when using |
| 166 | // `la.abs`. |
| 167 | Fixups.push_back( |
| 168 | Elt: MCFixup::create(Offset: 0, Value: MCConstantExpr::create(Value: 0, Ctx), |
| 169 | Kind: FirstLiteralRelocationKind + ELF::R_LARCH_MARK_LA)); |
| 170 | [[fallthrough]]; |
| 171 | case ELF::R_LARCH_ABS_HI20: |
| 172 | FixupKind = LoongArch::fixup_loongarch_abs_hi20; |
| 173 | break; |
| 174 | case ELF::R_LARCH_ABS_LO12: |
| 175 | FixupKind = LoongArch::fixup_loongarch_abs_lo12; |
| 176 | break; |
| 177 | case ELF::R_LARCH_ABS64_LO20: |
| 178 | FixupKind = LoongArch::fixup_loongarch_abs64_lo20; |
| 179 | break; |
| 180 | case ELF::R_LARCH_ABS64_HI12: |
| 181 | FixupKind = LoongArch::fixup_loongarch_abs64_hi12; |
| 182 | break; |
| 183 | case ELF::R_LARCH_CALL30: |
| 184 | case ELF::R_LARCH_CALL36: |
| 185 | case ELF::R_LARCH_TLS_LE_HI20_R: |
| 186 | case ELF::R_LARCH_TLS_LE_LO12_R: |
| 187 | RelaxCandidate = true; |
| 188 | break; |
| 189 | } |
| 190 | } else if (Kind == MCExpr::SymbolRef) { |
| 191 | switch (MI.getOpcode()) { |
| 192 | default: |
| 193 | break; |
| 194 | case LoongArch::BEQ: |
| 195 | case LoongArch::BNE: |
| 196 | case LoongArch::BLT: |
| 197 | case LoongArch::BGE: |
| 198 | case LoongArch::BLTU: |
| 199 | case LoongArch::BGEU: |
| 200 | FixupKind = LoongArch::fixup_loongarch_b16; |
| 201 | break; |
| 202 | case LoongArch::BEQZ: |
| 203 | case LoongArch::BNEZ: |
| 204 | case LoongArch::BCEQZ: |
| 205 | case LoongArch::BCNEZ: |
| 206 | FixupKind = LoongArch::fixup_loongarch_b21; |
| 207 | break; |
| 208 | case LoongArch::B: |
| 209 | case LoongArch::BL: |
| 210 | FixupKind = LoongArch::fixup_loongarch_b26; |
| 211 | break; |
| 212 | } |
| 213 | } |
| 214 | |
| 215 | assert(FixupKind != LoongArch::fixup_loongarch_invalid && |
| 216 | "Unhandled expression!" ); |
| 217 | |
| 218 | addFixup(Fixups, Offset: 0, Value: Expr, Kind: FixupKind); |
| 219 | // If linker relaxation is enabled and supported by this relocation, set |
| 220 | // a bit so that if fixup is unresolved, a R_LARCH_RELAX relocation will be |
| 221 | // appended. |
| 222 | if (EnableRelax && RelaxCandidate) |
| 223 | Fixups.back().setLinkerRelaxable(); |
| 224 | |
| 225 | return 0; |
| 226 | } |
| 227 | |
| 228 | template <unsigned Opc> |
| 229 | void LoongArchMCCodeEmitter::expandToVectorLDI( |
| 230 | const MCInst &MI, SmallVectorImpl<char> &CB, |
| 231 | SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const { |
| 232 | int64_t Imm = MI.getOperand(i: 1).getImm() & 0x3FF; |
| 233 | switch (MI.getOpcode()) { |
| 234 | case LoongArch::PseudoVREPLI_B: |
| 235 | case LoongArch::PseudoXVREPLI_B: |
| 236 | break; |
| 237 | case LoongArch::PseudoVREPLI_H: |
| 238 | case LoongArch::PseudoXVREPLI_H: |
| 239 | Imm |= 0x400; |
| 240 | break; |
| 241 | case LoongArch::PseudoVREPLI_W: |
| 242 | case LoongArch::PseudoXVREPLI_W: |
| 243 | Imm |= 0x800; |
| 244 | break; |
| 245 | case LoongArch::PseudoVREPLI_D: |
| 246 | case LoongArch::PseudoXVREPLI_D: |
| 247 | Imm |= 0xC00; |
| 248 | break; |
| 249 | } |
| 250 | MCInst TmpInst = MCInstBuilder(Opc).addOperand(Op: MI.getOperand(i: 0)).addImm(Val: Imm); |
| 251 | uint32_t Binary = getBinaryCodeForInstr(MI: TmpInst, Fixups, STI); |
| 252 | support::endian::write(Out&: CB, V: Binary, E: llvm::endianness::little); |
| 253 | } |
| 254 | |
| 255 | void LoongArchMCCodeEmitter::expandAddTPRel(const MCInst &MI, |
| 256 | SmallVectorImpl<char> &CB, |
| 257 | SmallVectorImpl<MCFixup> &Fixups, |
| 258 | const MCSubtargetInfo &STI) const { |
| 259 | MCOperand Rd = MI.getOperand(i: 0); |
| 260 | MCOperand Rj = MI.getOperand(i: 1); |
| 261 | MCOperand Rk = MI.getOperand(i: 2); |
| 262 | MCOperand Symbol = MI.getOperand(i: 3); |
| 263 | assert(Symbol.isExpr() && |
| 264 | "Expected expression as third input to TP-relative add" ); |
| 265 | |
| 266 | const LoongArchMCExpr *Expr = dyn_cast<LoongArchMCExpr>(Val: Symbol.getExpr()); |
| 267 | assert(Expr && Expr->getSpecifier() == ELF::R_LARCH_TLS_LE_ADD_R && |
| 268 | "Expected %le_add_r relocation on TP-relative symbol" ); |
| 269 | |
| 270 | // Emit the correct %le_add_r relocation for the symbol. |
| 271 | addFixup(Fixups, Offset: 0, Value: Expr, Kind: ELF::R_LARCH_TLS_LE_ADD_R); |
| 272 | if (STI.hasFeature(Feature: LoongArch::FeatureRelax)) |
| 273 | Fixups.back().setLinkerRelaxable(); |
| 274 | |
| 275 | // Emit a normal ADD instruction with the given operands. |
| 276 | unsigned ADD = MI.getOpcode() == LoongArch::PseudoAddTPRel_D |
| 277 | ? LoongArch::ADD_D |
| 278 | : LoongArch::ADD_W; |
| 279 | MCInst TmpInst = |
| 280 | MCInstBuilder(ADD).addOperand(Op: Rd).addOperand(Op: Rj).addOperand(Op: Rk); |
| 281 | uint32_t Binary = getBinaryCodeForInstr(MI: TmpInst, Fixups, STI); |
| 282 | support::endian::write(Out&: CB, V: Binary, E: llvm::endianness::little); |
| 283 | } |
| 284 | |
| 285 | void LoongArchMCCodeEmitter::encodeInstruction( |
| 286 | const MCInst &MI, SmallVectorImpl<char> &CB, |
| 287 | SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const { |
| 288 | const MCInstrDesc &Desc = MCII.get(Opcode: MI.getOpcode()); |
| 289 | // Get byte count of instruction. |
| 290 | unsigned Size = Desc.getSize(); |
| 291 | |
| 292 | switch (MI.getOpcode()) { |
| 293 | default: |
| 294 | break; |
| 295 | case LoongArch::PseudoVREPLI_B: |
| 296 | case LoongArch::PseudoVREPLI_H: |
| 297 | case LoongArch::PseudoVREPLI_W: |
| 298 | case LoongArch::PseudoVREPLI_D: |
| 299 | return expandToVectorLDI<LoongArch::VLDI>(MI, CB, Fixups, STI); |
| 300 | case LoongArch::PseudoXVREPLI_B: |
| 301 | case LoongArch::PseudoXVREPLI_H: |
| 302 | case LoongArch::PseudoXVREPLI_W: |
| 303 | case LoongArch::PseudoXVREPLI_D: |
| 304 | return expandToVectorLDI<LoongArch::XVLDI>(MI, CB, Fixups, STI); |
| 305 | case LoongArch::PseudoAddTPRel_W: |
| 306 | case LoongArch::PseudoAddTPRel_D: |
| 307 | return expandAddTPRel(MI, CB, Fixups, STI); |
| 308 | } |
| 309 | |
| 310 | switch (Size) { |
| 311 | default: |
| 312 | llvm_unreachable("Unhandled encodeInstruction length!" ); |
| 313 | case 4: { |
| 314 | uint32_t Bits = getBinaryCodeForInstr(MI, Fixups, STI); |
| 315 | support::endian::write(Out&: CB, V: Bits, E: llvm::endianness::little); |
| 316 | break; |
| 317 | } |
| 318 | } |
| 319 | } |
| 320 | |
| 321 | MCCodeEmitter *llvm::createLoongArchMCCodeEmitter(const MCInstrInfo &MCII, |
| 322 | MCContext &Ctx) { |
| 323 | return new LoongArchMCCodeEmitter(Ctx, MCII); |
| 324 | } |
| 325 | |
| 326 | #include "LoongArchGenMCCodeEmitter.inc" |
| 327 | |