1//===-- AMDGPUMCCodeEmitter.cpp - AMDGPU Code Emitter ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// The AMDGPU code emitter produces machine code that can be executed
11/// directly on the GPU device.
12//
13//===----------------------------------------------------------------------===//
14
15#include "MCTargetDesc/AMDGPUFixupKinds.h"
16#include "MCTargetDesc/AMDGPUMCExpr.h"
17#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
18#include "SIDefines.h"
19#include "Utils/AMDGPUBaseInfo.h"
20#include "llvm/ADT/APInt.h"
21#include "llvm/MC/MCCodeEmitter.h"
22#include "llvm/MC/MCContext.h"
23#include "llvm/MC/MCExpr.h"
24#include "llvm/MC/MCInstrInfo.h"
25#include "llvm/MC/MCRegisterInfo.h"
26#include "llvm/MC/MCSubtargetInfo.h"
27#include "llvm/Support/Casting.h"
28#include "llvm/Support/EndianStream.h"
29#include <optional>
30
31using namespace llvm;
32
33namespace {
34
35class AMDGPUMCCodeEmitter : public MCCodeEmitter {
36 const MCRegisterInfo &MRI;
37 const MCInstrInfo &MCII;
38
39public:
40 AMDGPUMCCodeEmitter(const MCInstrInfo &MCII, const MCRegisterInfo &MRI)
41 : MRI(MRI), MCII(MCII) {}
42
43 /// Encode the instruction and write it to the OS.
44 void encodeInstruction(const MCInst &MI, SmallVectorImpl<char> &CB,
45 SmallVectorImpl<MCFixup> &Fixups,
46 const MCSubtargetInfo &STI) const override;
47
48 void getMachineOpValue(const MCInst &MI, const MCOperand &MO, APInt &Op,
49 SmallVectorImpl<MCFixup> &Fixups,
50 const MCSubtargetInfo &STI) const;
51
52 void getMachineOpValueT16(const MCInst &MI, unsigned OpNo, APInt &Op,
53 SmallVectorImpl<MCFixup> &Fixups,
54 const MCSubtargetInfo &STI) const;
55
56 void getMachineOpValueT16Lo128(const MCInst &MI, unsigned OpNo, APInt &Op,
57 SmallVectorImpl<MCFixup> &Fixups,
58 const MCSubtargetInfo &STI) const;
59
60 /// Use a fixup to encode the simm16 field for SOPP branch
61 /// instructions.
62 void getSOPPBrEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
63 SmallVectorImpl<MCFixup> &Fixups,
64 const MCSubtargetInfo &STI) const;
65
66 void getSMEMOffsetEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
67 SmallVectorImpl<MCFixup> &Fixups,
68 const MCSubtargetInfo &STI) const;
69
70 void getSDWASrcEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
71 SmallVectorImpl<MCFixup> &Fixups,
72 const MCSubtargetInfo &STI) const;
73
74 void getSDWAVopcDstEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
75 SmallVectorImpl<MCFixup> &Fixups,
76 const MCSubtargetInfo &STI) const;
77
78 void getAVOperandEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
79 SmallVectorImpl<MCFixup> &Fixups,
80 const MCSubtargetInfo &STI) const;
81
82private:
83 uint64_t getImplicitOpSelHiEncoding(int Opcode) const;
84 void getMachineOpValueCommon(const MCInst &MI, const MCOperand &MO,
85 unsigned OpNo, APInt &Op,
86 SmallVectorImpl<MCFixup> &Fixups,
87 const MCSubtargetInfo &STI) const;
88
89 /// Encode an fp or int literal.
90 std::optional<uint64_t>
91 getLitEncoding(const MCInstrDesc &Desc, const MCOperand &MO, unsigned OpNo,
92 const MCSubtargetInfo &STI,
93 bool HasMandatoryLiteral = false) const;
94
95 void getBinaryCodeForInstr(const MCInst &MI, SmallVectorImpl<MCFixup> &Fixups,
96 APInt &Inst, APInt &Scratch,
97 const MCSubtargetInfo &STI) const;
98
99 template <bool HasSrc0, bool HasSrc1, bool HasSrc2>
100 APInt postEncodeVOP3(const MCInst &MI, APInt EncodedValue,
101 const MCSubtargetInfo &STI) const;
102
103 APInt postEncodeVOPCX(const MCInst &MI, APInt EncodedValue,
104 const MCSubtargetInfo &STI) const;
105};
106
107} // end anonymous namespace
108
109MCCodeEmitter *llvm::createAMDGPUMCCodeEmitter(const MCInstrInfo &MCII,
110 MCContext &Ctx) {
111 return new AMDGPUMCCodeEmitter(MCII, *Ctx.getRegisterInfo());
112}
113
114static void addFixup(SmallVectorImpl<MCFixup> &Fixups, uint32_t Offset,
115 const MCExpr *Value, uint16_t Kind, bool PCRel = false) {
116 Fixups.push_back(Elt: MCFixup::create(Offset, Value, Kind, PCRel));
117}
118
119// Returns the encoding value to use if the given integer is an integer inline
120// immediate value, or 0 if it is not.
121template <typename IntTy>
122static uint32_t getIntInlineImmEncoding(IntTy Imm) {
123 if (Imm >= 0 && Imm <= 64)
124 return 128 + Imm;
125
126 if (Imm >= -16 && Imm <= -1)
127 return 192 + std::abs(Imm);
128
129 return 0;
130}
131
132static uint32_t getLit16Encoding(uint16_t Val, const MCSubtargetInfo &STI) {
133 uint16_t IntImm = getIntInlineImmEncoding(Imm: static_cast<int16_t>(Val));
134 if (IntImm != 0)
135 return IntImm;
136
137 if (Val == 0x3800) // 0.5
138 return 240;
139
140 if (Val == 0xB800) // -0.5
141 return 241;
142
143 if (Val == 0x3C00) // 1.0
144 return 242;
145
146 if (Val == 0xBC00) // -1.0
147 return 243;
148
149 if (Val == 0x4000) // 2.0
150 return 244;
151
152 if (Val == 0xC000) // -2.0
153 return 245;
154
155 if (Val == 0x4400) // 4.0
156 return 246;
157
158 if (Val == 0xC400) // -4.0
159 return 247;
160
161 if (Val == 0x3118 && // 1.0 / (2.0 * pi)
162 STI.hasFeature(Feature: AMDGPU::FeatureInv2PiInlineImm))
163 return 248;
164
165 return 255;
166}
167
168static uint32_t getLitBF16Encoding(uint16_t Val) {
169 uint16_t IntImm = getIntInlineImmEncoding(Imm: static_cast<int16_t>(Val));
170 if (IntImm != 0)
171 return IntImm;
172
173 // clang-format off
174 switch (Val) {
175 case 0x3F00: return 240; // 0.5
176 case 0xBF00: return 241; // -0.5
177 case 0x3F80: return 242; // 1.0
178 case 0xBF80: return 243; // -1.0
179 case 0x4000: return 244; // 2.0
180 case 0xC000: return 245; // -2.0
181 case 0x4080: return 246; // 4.0
182 case 0xC080: return 247; // -4.0
183 case 0x3E22: return 248; // 1.0 / (2.0 * pi)
184 default: return 255;
185 }
186 // clang-format on
187}
188
189static uint32_t getLit32Encoding(uint32_t Val, const MCSubtargetInfo &STI) {
190 uint32_t IntImm = getIntInlineImmEncoding(Imm: static_cast<int32_t>(Val));
191 if (IntImm != 0)
192 return IntImm;
193
194 if (Val == llvm::bit_cast<uint32_t>(from: 0.5f))
195 return 240;
196
197 if (Val == llvm::bit_cast<uint32_t>(from: -0.5f))
198 return 241;
199
200 if (Val == llvm::bit_cast<uint32_t>(from: 1.0f))
201 return 242;
202
203 if (Val == llvm::bit_cast<uint32_t>(from: -1.0f))
204 return 243;
205
206 if (Val == llvm::bit_cast<uint32_t>(from: 2.0f))
207 return 244;
208
209 if (Val == llvm::bit_cast<uint32_t>(from: -2.0f))
210 return 245;
211
212 if (Val == llvm::bit_cast<uint32_t>(from: 4.0f))
213 return 246;
214
215 if (Val == llvm::bit_cast<uint32_t>(from: -4.0f))
216 return 247;
217
218 if (Val == 0x3e22f983 && // 1.0 / (2.0 * pi)
219 STI.hasFeature(Feature: AMDGPU::FeatureInv2PiInlineImm))
220 return 248;
221
222 return 255;
223}
224
225static uint32_t getLit16IntEncoding(uint32_t Val, const MCSubtargetInfo &STI) {
226 return getLit32Encoding(Val, STI);
227}
228
229static uint32_t getLit64Encoding(const MCInstrDesc &Desc, uint64_t Val,
230 const MCSubtargetInfo &STI, bool IsFP) {
231 uint32_t IntImm = getIntInlineImmEncoding(Imm: static_cast<int64_t>(Val));
232 if (IntImm != 0)
233 return IntImm;
234
235 if (Val == llvm::bit_cast<uint64_t>(from: 0.5))
236 return 240;
237
238 if (Val == llvm::bit_cast<uint64_t>(from: -0.5))
239 return 241;
240
241 if (Val == llvm::bit_cast<uint64_t>(from: 1.0))
242 return 242;
243
244 if (Val == llvm::bit_cast<uint64_t>(from: -1.0))
245 return 243;
246
247 if (Val == llvm::bit_cast<uint64_t>(from: 2.0))
248 return 244;
249
250 if (Val == llvm::bit_cast<uint64_t>(from: -2.0))
251 return 245;
252
253 if (Val == llvm::bit_cast<uint64_t>(from: 4.0))
254 return 246;
255
256 if (Val == llvm::bit_cast<uint64_t>(from: -4.0))
257 return 247;
258
259 if (Val == 0x3fc45f306dc9c882 && // 1.0 / (2.0 * pi)
260 STI.hasFeature(Feature: AMDGPU::FeatureInv2PiInlineImm))
261 return 248;
262
263 // The rest part needs to align with AMDGPUInstPrinter::printLiteral64.
264
265 bool CanUse64BitLiterals =
266 STI.hasFeature(Feature: AMDGPU::Feature64BitLiterals) &&
267 !(Desc.TSFlags & (SIInstrFlags::VOP3 | SIInstrFlags::VOP3P));
268 if (IsFP) {
269 return CanUse64BitLiterals && Lo_32(Value: Val) ? 254 : 255;
270 }
271
272 return CanUse64BitLiterals && (!isInt<32>(x: Val) || !isUInt<32>(x: Val)) ? 254
273 : 255;
274}
275
276std::optional<uint64_t> AMDGPUMCCodeEmitter::getLitEncoding(
277 const MCInstrDesc &Desc, const MCOperand &MO, unsigned OpNo,
278 const MCSubtargetInfo &STI, bool HasMandatoryLiteral) const {
279 const MCOperandInfo &OpInfo = Desc.operands()[OpNo];
280 int64_t Imm = 0;
281 if (MO.isExpr()) {
282 if (!MO.getExpr()->evaluateAsAbsolute(Res&: Imm) ||
283 AMDGPU::isLitExpr(Expr: MO.getExpr())) {
284 if (OpInfo.OperandType == AMDGPU::OPERAND_KIMM16 ||
285 OpInfo.OperandType == AMDGPU::OPERAND_KIMM32 ||
286 OpInfo.OperandType == AMDGPU::OPERAND_KIMM64)
287 return Imm;
288 if (STI.hasFeature(Feature: AMDGPU::Feature64BitLiterals) &&
289 AMDGPU::getOperandSize(OpInfo) == 8)
290 return 254;
291 return 255;
292 }
293 } else {
294 assert(!MO.isDFPImm());
295
296 if (!MO.isImm())
297 return {};
298
299 Imm = MO.getImm();
300 }
301
302 switch (OpInfo.OperandType) {
303 case AMDGPU::OPERAND_REG_IMM_INT32:
304 case AMDGPU::OPERAND_REG_IMM_FP32:
305 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
306 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
307 case AMDGPU::OPERAND_REG_INLINE_AC_INT32:
308 case AMDGPU::OPERAND_REG_INLINE_AC_FP32:
309 case AMDGPU::OPERAND_REG_IMM_V2INT32:
310 case AMDGPU::OPERAND_REG_IMM_V2FP32:
311 case AMDGPU::OPERAND_INLINE_SPLIT_BARRIER_INT32:
312 return getLit32Encoding(Val: static_cast<uint32_t>(Imm), STI);
313
314 case AMDGPU::OPERAND_REG_IMM_INT64:
315 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
316 return getLit64Encoding(Desc, Val: static_cast<uint64_t>(Imm), STI, IsFP: false);
317
318 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
319 case AMDGPU::OPERAND_REG_INLINE_AC_FP64:
320 return getLit64Encoding(Desc, Val: static_cast<uint64_t>(Imm), STI, IsFP: true);
321
322 case AMDGPU::OPERAND_REG_IMM_FP64: {
323 auto Enc = getLit64Encoding(Desc, Val: static_cast<uint64_t>(Imm), STI, IsFP: true);
324 return (HasMandatoryLiteral && Enc == 255) ? 254 : Enc;
325 }
326
327 case AMDGPU::OPERAND_REG_IMM_INT16:
328 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
329 return getLit16IntEncoding(Val: static_cast<uint32_t>(Imm), STI);
330
331 case AMDGPU::OPERAND_REG_IMM_FP16:
332 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
333 // FIXME Is this correct? What do inline immediates do on SI for f16 src
334 // which does not have f16 support?
335 return getLit16Encoding(Val: static_cast<uint16_t>(Imm), STI);
336
337 case AMDGPU::OPERAND_REG_IMM_BF16:
338 case AMDGPU::OPERAND_REG_INLINE_C_BF16:
339 // We don't actually need to check Inv2Pi here because BF16 instructions can
340 // only be emitted for targets that already support the feature.
341 return getLitBF16Encoding(Val: static_cast<uint16_t>(Imm));
342
343 case AMDGPU::OPERAND_REG_IMM_V2INT16:
344 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
345 return AMDGPU::getInlineEncodingV2I16(Literal: static_cast<uint32_t>(Imm))
346 .value_or(u: 255);
347
348 case AMDGPU::OPERAND_REG_IMM_V2FP16:
349 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
350 return AMDGPU::getInlineEncodingV2F16(Literal: static_cast<uint32_t>(Imm))
351 .value_or(u: 255);
352
353 case AMDGPU::OPERAND_REG_IMM_V2FP16_SPLAT:
354 // V_PK_FMAC_F16 has different inline constant behavior on pre-GFX11 vs
355 // GFX11+: pre-GFX11 produces (f16, 0), GFX11+ duplicates f16 to both
356 // halves.
357 return AMDGPU::getPKFMACF16InlineEncoding(Literal: static_cast<uint32_t>(Imm),
358 IsGFX11Plus: AMDGPU::isGFX11Plus(STI))
359 .value_or(u: 255);
360
361 case AMDGPU::OPERAND_REG_IMM_V2BF16:
362 case AMDGPU::OPERAND_REG_INLINE_C_V2BF16:
363 return AMDGPU::getInlineEncodingV2BF16(Literal: static_cast<uint32_t>(Imm))
364 .value_or(u: 255);
365
366 case AMDGPU::OPERAND_REG_IMM_NOINLINE_V2FP16:
367 return 255;
368
369 case AMDGPU::OPERAND_KIMM32:
370 case AMDGPU::OPERAND_KIMM16:
371 case AMDGPU::OPERAND_KIMM64:
372 return Imm;
373 default:
374 llvm_unreachable("invalid operand size");
375 }
376}
377
378uint64_t AMDGPUMCCodeEmitter::getImplicitOpSelHiEncoding(int Opcode) const {
379 using namespace AMDGPU::VOP3PEncoding;
380
381 if (AMDGPU::hasNamedOperand(Opcode, NamedIdx: AMDGPU::OpName::op_sel_hi)) {
382 if (AMDGPU::hasNamedOperand(Opcode, NamedIdx: AMDGPU::OpName::src2))
383 return 0;
384 if (AMDGPU::hasNamedOperand(Opcode, NamedIdx: AMDGPU::OpName::src1))
385 return OP_SEL_HI_2;
386 if (AMDGPU::hasNamedOperand(Opcode, NamedIdx: AMDGPU::OpName::src0))
387 return OP_SEL_HI_1 | OP_SEL_HI_2;
388 }
389 return OP_SEL_HI_0 | OP_SEL_HI_1 | OP_SEL_HI_2;
390}
391
392void AMDGPUMCCodeEmitter::encodeInstruction(const MCInst &MI,
393 SmallVectorImpl<char> &CB,
394 SmallVectorImpl<MCFixup> &Fixups,
395 const MCSubtargetInfo &STI) const {
396 int Opcode = MI.getOpcode();
397 APInt Encoding, Scratch;
398 getBinaryCodeForInstr(MI, Fixups, Inst&: Encoding, Scratch, STI);
399 const MCInstrDesc &Desc = MCII.get(Opcode: MI.getOpcode());
400 unsigned bytes = Desc.getSize();
401
402 // Set unused op_sel_hi bits to 1 for VOP3P and MAI instructions.
403 // Note that accvgpr_read/write are MAI, have src0, but do not use op_sel.
404 if (((Desc.TSFlags & SIInstrFlags::VOP3P) ||
405 Opcode == AMDGPU::V_ACCVGPR_READ_B32_vi ||
406 Opcode == AMDGPU::V_ACCVGPR_WRITE_B32_vi) &&
407 // Matrix B format operand reuses op_sel_hi.
408 !AMDGPU::hasNamedOperand(Opcode, NamedIdx: AMDGPU::OpName::matrix_b_fmt) &&
409 // Matrix B scale operand reuses op_sel_hi.
410 !AMDGPU::hasNamedOperand(Opcode, NamedIdx: AMDGPU::OpName::matrix_b_scale) &&
411 // Matrix B reuse operand reuses op_sel_hi.
412 !AMDGPU::hasNamedOperand(Opcode, NamedIdx: AMDGPU::OpName::matrix_b_reuse)) {
413 Encoding |= getImplicitOpSelHiEncoding(Opcode);
414 }
415
416 for (unsigned i = 0; i < bytes; i++) {
417 CB.push_back(Elt: (uint8_t)Encoding.extractBitsAsZExtValue(numBits: 8, bitPosition: 8 * i));
418 }
419
420 // NSA encoding.
421 if (AMDGPU::isGFX10Plus(STI) && Desc.TSFlags & SIInstrFlags::MIMG) {
422 int vaddr0 = AMDGPU::getNamedOperandIdx(Opcode: MI.getOpcode(),
423 Name: AMDGPU::OpName::vaddr0);
424 int srsrc = AMDGPU::getNamedOperandIdx(Opcode: MI.getOpcode(),
425 Name: AMDGPU::OpName::srsrc);
426 assert(vaddr0 >= 0 && srsrc > vaddr0);
427 unsigned NumExtraAddrs = srsrc - vaddr0 - 1;
428 unsigned NumPadding = (-NumExtraAddrs) & 3;
429
430 for (unsigned i = 0; i < NumExtraAddrs; ++i) {
431 getMachineOpValue(MI, MO: MI.getOperand(i: vaddr0 + 1 + i), Op&: Encoding, Fixups,
432 STI);
433 CB.push_back(Elt: (uint8_t)Encoding.getLimitedValue());
434 }
435 CB.append(NumInputs: NumPadding, Elt: 0);
436 }
437
438 if ((bytes > 8 && STI.hasFeature(Feature: AMDGPU::FeatureVOP3Literal)) ||
439 (bytes > 4 && !STI.hasFeature(Feature: AMDGPU::FeatureVOP3Literal)))
440 return;
441
442 // Do not print literals from SISrc Operands for insts with mandatory literals
443 if (AMDGPU::hasNamedOperand(Opcode: MI.getOpcode(), NamedIdx: AMDGPU::OpName::imm))
444 return;
445
446 // Check for additional literals
447 for (unsigned i = 0, e = Desc.getNumOperands(); i < e; ++i) {
448
449 // Check if this operand should be encoded as [SV]Src
450 if (!AMDGPU::isSISrcOperand(Desc, OpNo: i))
451 continue;
452
453 // Is this operand a literal immediate?
454 const MCOperand &Op = MI.getOperand(i);
455 auto Enc = getLitEncoding(Desc, MO: Op, OpNo: i, STI);
456 if (!Enc || (*Enc != 255 && *Enc != 254))
457 continue;
458
459 // Yes! Encode it
460 int64_t Imm = 0;
461
462 bool IsLit = false;
463 if (Op.isImm())
464 Imm = Op.getImm();
465 else if (Op.isExpr()) {
466 if (const auto *C = dyn_cast<MCConstantExpr>(Val: Op.getExpr())) {
467 Imm = C->getValue();
468 } else if (AMDGPU::isLitExpr(Expr: Op.getExpr())) {
469 IsLit = true;
470 Imm = AMDGPU::getLitValue(Expr: Op.getExpr());
471 }
472 } else // Exprs will be replaced with a fixup value.
473 llvm_unreachable("Must be immediate or expr");
474
475 if (*Enc == 254) {
476 assert(STI.hasFeature(AMDGPU::Feature64BitLiterals));
477 support::endian::write<uint64_t>(Out&: CB, V: Imm, E: llvm::endianness::little);
478 } else {
479 auto OpType =
480 static_cast<AMDGPU::OperandType>(Desc.operands()[i].OperandType);
481 Imm = AMDGPU::encode32BitLiteral(Imm, Type: OpType, IsLit);
482 support::endian::write<uint32_t>(Out&: CB, V: Imm, E: llvm::endianness::little);
483 }
484
485 // Only one literal value allowed
486 break;
487 }
488}
489
490void AMDGPUMCCodeEmitter::getSOPPBrEncoding(const MCInst &MI, unsigned OpNo,
491 APInt &Op,
492 SmallVectorImpl<MCFixup> &Fixups,
493 const MCSubtargetInfo &STI) const {
494 const MCOperand &MO = MI.getOperand(i: OpNo);
495
496 if (MO.isExpr()) {
497 const MCExpr *Expr = MO.getExpr();
498 addFixup(Fixups, Offset: 0, Value: Expr, Kind: AMDGPU::fixup_si_sopp_br, PCRel: true);
499 Op = APInt::getZero(numBits: 96);
500 } else {
501 getMachineOpValue(MI, MO, Op, Fixups, STI);
502 }
503}
504
505void AMDGPUMCCodeEmitter::getSMEMOffsetEncoding(
506 const MCInst &MI, unsigned OpNo, APInt &Op,
507 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
508 auto Offset = MI.getOperand(i: OpNo).getImm();
509 // VI only supports 20-bit unsigned offsets.
510 assert(!AMDGPU::isVI(STI) || isUInt<20>(Offset));
511 Op = Offset;
512}
513
514void AMDGPUMCCodeEmitter::getSDWASrcEncoding(const MCInst &MI, unsigned OpNo,
515 APInt &Op,
516 SmallVectorImpl<MCFixup> &Fixups,
517 const MCSubtargetInfo &STI) const {
518 using namespace AMDGPU::SDWA;
519
520 uint64_t RegEnc = 0;
521
522 const MCOperand &MO = MI.getOperand(i: OpNo);
523
524 if (MO.isReg()) {
525 MCRegister Reg = MO.getReg();
526 RegEnc |= MRI.getEncodingValue(Reg);
527 RegEnc &= SDWA9EncValues::SRC_VGPR_MASK;
528 if (AMDGPU::isSGPR(Reg: AMDGPU::mc2PseudoReg(Reg), TRI: &MRI)) {
529 RegEnc |= SDWA9EncValues::SRC_SGPR_MASK;
530 }
531 Op = RegEnc;
532 return;
533 } else {
534 const MCInstrDesc &Desc = MCII.get(Opcode: MI.getOpcode());
535 auto Enc = getLitEncoding(Desc, MO, OpNo, STI);
536 if (Enc && *Enc != 255) {
537 Op = *Enc | SDWA9EncValues::SRC_SGPR_MASK;
538 return;
539 }
540 }
541
542 llvm_unreachable("Unsupported operand kind");
543}
544
545void AMDGPUMCCodeEmitter::getSDWAVopcDstEncoding(
546 const MCInst &MI, unsigned OpNo, APInt &Op,
547 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
548 using namespace AMDGPU::SDWA;
549
550 uint64_t RegEnc = 0;
551
552 const MCOperand &MO = MI.getOperand(i: OpNo);
553
554 MCRegister Reg = MO.getReg();
555 if (Reg != AMDGPU::VCC && Reg != AMDGPU::VCC_LO) {
556 RegEnc |= MRI.getEncodingValue(Reg);
557 RegEnc &= SDWA9EncValues::VOPC_DST_SGPR_MASK;
558 RegEnc |= SDWA9EncValues::VOPC_DST_VCC_MASK;
559 }
560 Op = RegEnc;
561}
562
563void AMDGPUMCCodeEmitter::getAVOperandEncoding(
564 const MCInst &MI, unsigned OpNo, APInt &Op,
565 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
566 MCRegister Reg = MI.getOperand(i: OpNo).getReg();
567 unsigned Enc = MRI.getEncodingValue(Reg);
568 unsigned Idx = Enc & AMDGPU::HWEncoding::LO256_REG_IDX_MASK;
569 bool IsVGPROrAGPR =
570 Enc & (AMDGPU::HWEncoding::IS_VGPR | AMDGPU::HWEncoding::IS_AGPR);
571
572 // VGPR and AGPR have the same encoding, but SrcA and SrcB operands of mfma
573 // instructions use acc[0:1] modifier bits to distinguish. These bits are
574 // encoded as a virtual 9th bit of the register for these operands.
575 bool IsAGPR = Enc & AMDGPU::HWEncoding::IS_AGPR;
576
577 Op = Idx | (IsVGPROrAGPR << 8) | (IsAGPR << 9);
578}
579
580static bool needsPCRel(const MCExpr *Expr) {
581 switch (Expr->getKind()) {
582 case MCExpr::SymbolRef: {
583 auto *SE = cast<MCSymbolRefExpr>(Val: Expr);
584 auto Spec = AMDGPU::getSpecifier(SRE: SE);
585 return Spec != AMDGPUMCExpr::S_ABS32_LO &&
586 Spec != AMDGPUMCExpr::S_ABS32_HI && Spec != AMDGPUMCExpr::S_ABS64;
587 }
588 case MCExpr::Binary: {
589 auto *BE = cast<MCBinaryExpr>(Val: Expr);
590 if (BE->getOpcode() == MCBinaryExpr::Sub)
591 return false;
592 return needsPCRel(Expr: BE->getLHS()) || needsPCRel(Expr: BE->getRHS());
593 }
594 case MCExpr::Unary:
595 return needsPCRel(Expr: cast<MCUnaryExpr>(Val: Expr)->getSubExpr());
596 case MCExpr::Specifier:
597 case MCExpr::Target:
598 case MCExpr::Constant:
599 return false;
600 }
601 llvm_unreachable("invalid kind");
602}
603
604void AMDGPUMCCodeEmitter::getMachineOpValue(const MCInst &MI,
605 const MCOperand &MO, APInt &Op,
606 SmallVectorImpl<MCFixup> &Fixups,
607 const MCSubtargetInfo &STI) const {
608 if (MO.isReg()){
609 unsigned Enc = MRI.getEncodingValue(Reg: MO.getReg());
610 unsigned Idx = Enc & AMDGPU::HWEncoding::LO256_REG_IDX_MASK;
611 bool IsVGPROrAGPR =
612 Enc & (AMDGPU::HWEncoding::IS_VGPR | AMDGPU::HWEncoding::IS_AGPR);
613 Op = Idx | (IsVGPROrAGPR << 8);
614 return;
615 }
616 unsigned OpNo = &MO - MI.begin();
617 getMachineOpValueCommon(MI, MO, OpNo, Op, Fixups, STI);
618}
619
620void AMDGPUMCCodeEmitter::getMachineOpValueT16(
621 const MCInst &MI, unsigned OpNo, APInt &Op,
622 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
623 const MCOperand &MO = MI.getOperand(i: OpNo);
624 if (MO.isReg()) {
625 unsigned Enc = MRI.getEncodingValue(Reg: MO.getReg());
626 unsigned Idx = Enc & AMDGPU::HWEncoding::REG_IDX_MASK;
627 bool IsVGPR = Enc & AMDGPU::HWEncoding::IS_VGPR;
628 Op = Idx | (IsVGPR << 8);
629 return;
630 }
631 getMachineOpValueCommon(MI, MO, OpNo, Op, Fixups, STI);
632 // VGPRs include the suffix/op_sel bit in the register encoding, but
633 // immediates and SGPRs include it in src_modifiers. Therefore, copy the
634 // op_sel bit from the src operands into src_modifier operands if Op is
635 // src_modifiers and the corresponding src is a VGPR
636 int SrcMOIdx = -1;
637 assert(OpNo < INT_MAX);
638 if ((int)OpNo == AMDGPU::getNamedOperandIdx(Opcode: MI.getOpcode(),
639 Name: AMDGPU::OpName::src0_modifiers)) {
640 SrcMOIdx = AMDGPU::getNamedOperandIdx(Opcode: MI.getOpcode(), Name: AMDGPU::OpName::src0);
641 int VDstMOIdx =
642 AMDGPU::getNamedOperandIdx(Opcode: MI.getOpcode(), Name: AMDGPU::OpName::vdst);
643 if (VDstMOIdx != -1) {
644 auto DstReg = MI.getOperand(i: VDstMOIdx).getReg();
645 if (AMDGPU::isHi16Reg(Reg: DstReg, MRI))
646 Op |= SISrcMods::DST_OP_SEL;
647 }
648 } else if ((int)OpNo == AMDGPU::getNamedOperandIdx(
649 Opcode: MI.getOpcode(), Name: AMDGPU::OpName::src1_modifiers))
650 SrcMOIdx = AMDGPU::getNamedOperandIdx(Opcode: MI.getOpcode(), Name: AMDGPU::OpName::src1);
651 else if ((int)OpNo == AMDGPU::getNamedOperandIdx(
652 Opcode: MI.getOpcode(), Name: AMDGPU::OpName::src2_modifiers))
653 SrcMOIdx = AMDGPU::getNamedOperandIdx(Opcode: MI.getOpcode(), Name: AMDGPU::OpName::src2);
654 if (SrcMOIdx == -1)
655 return;
656
657 const MCOperand &SrcMO = MI.getOperand(i: SrcMOIdx);
658 if (!SrcMO.isReg())
659 return;
660 auto SrcReg = SrcMO.getReg();
661 if (AMDGPU::isSGPR(Reg: SrcReg, TRI: &MRI))
662 return;
663 if (AMDGPU::isHi16Reg(Reg: SrcReg, MRI))
664 Op |= SISrcMods::OP_SEL_0;
665}
666
667void AMDGPUMCCodeEmitter::getMachineOpValueT16Lo128(
668 const MCInst &MI, unsigned OpNo, APInt &Op,
669 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
670 const MCOperand &MO = MI.getOperand(i: OpNo);
671 if (MO.isReg()) {
672 uint16_t Encoding = MRI.getEncodingValue(Reg: MO.getReg());
673 unsigned RegIdx = Encoding & AMDGPU::HWEncoding::LO256_REG_IDX_MASK;
674 bool IsHi = Encoding & AMDGPU::HWEncoding::IS_HI16;
675 bool IsVGPR = Encoding & AMDGPU::HWEncoding::IS_VGPR;
676 assert((!IsVGPR || isUInt<7>(RegIdx)) && "VGPR0-VGPR127 expected!");
677 Op = (IsVGPR ? 0x100 : 0) | (IsHi ? 0x80 : 0) | RegIdx;
678 return;
679 }
680 getMachineOpValueCommon(MI, MO, OpNo, Op, Fixups, STI);
681}
682
683void AMDGPUMCCodeEmitter::getMachineOpValueCommon(
684 const MCInst &MI, const MCOperand &MO, unsigned OpNo, APInt &Op,
685 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
686 bool isLikeImm = false;
687 int64_t Val;
688
689 if (MO.isImm()) {
690 Val = MO.getImm();
691 isLikeImm = true;
692 } else if (MO.isExpr() && MO.getExpr()->evaluateAsAbsolute(Res&: Val)) {
693 isLikeImm = true;
694 } else if (MO.isExpr()) {
695 // FIXME: If this is expression is PCRel or not should not depend on what
696 // the expression looks like. Given that this is just a general expression,
697 // it should probably be FK_Data_4 and whatever is producing
698 //
699 // s_add_u32 s2, s2, (extern_const_addrspace+16
700 //
701 // And expecting a PCRel should instead produce
702 //
703 // .Ltmp1:
704 // s_add_u32 s2, s2, (extern_const_addrspace+16)-.Ltmp1
705 bool PCRel = needsPCRel(Expr: MO.getExpr());
706 const MCInstrDesc &Desc = MCII.get(Opcode: MI.getOpcode());
707 uint32_t Offset = Desc.getSize();
708 assert(Offset == 4 || Offset == 8);
709 unsigned Size = AMDGPU::getOperandSize(Desc, OpNo);
710 MCFixupKind Kind = MCFixup::getDataKindForSize(Size);
711 addFixup(Fixups, Offset, Value: MO.getExpr(), Kind, PCRel);
712 }
713
714 const MCInstrDesc &Desc = MCII.get(Opcode: MI.getOpcode());
715 if (AMDGPU::isSISrcOperand(Desc, OpNo)) {
716 bool HasMandatoryLiteral =
717 AMDGPU::hasNamedOperand(Opcode: MI.getOpcode(), NamedIdx: AMDGPU::OpName::imm);
718 if (auto Enc = getLitEncoding(Desc, MO, OpNo, STI, HasMandatoryLiteral)) {
719 Op = *Enc;
720 return;
721 }
722
723 llvm_unreachable("Operand not supported for SISrc");
724 }
725
726 if (isLikeImm) {
727 Op = Val;
728 return;
729 }
730
731 llvm_unreachable("Encoding of this operand type is not supported yet.");
732}
733
734template <bool HasSrc0, bool HasSrc1, bool HasSrc2>
735APInt AMDGPUMCCodeEmitter::postEncodeVOP3(const MCInst &MI, APInt EncodedValue,
736 const MCSubtargetInfo &STI) const {
737 if (!AMDGPU::isGFX10Plus(STI))
738 return EncodedValue;
739 // Set unused source fields in VOP3 encodings to inline immediate 0 to avoid
740 // hardware conservatively assuming the instruction reads SGPRs.
741 constexpr uint64_t InlineImmediate0 = 0x80;
742 if (!HasSrc0)
743 EncodedValue |= InlineImmediate0 << 32;
744 if (!HasSrc1)
745 EncodedValue |= InlineImmediate0 << 41;
746 if (!HasSrc2)
747 EncodedValue |= InlineImmediate0 << 50;
748 return EncodedValue;
749}
750
751APInt AMDGPUMCCodeEmitter::postEncodeVOPCX(const MCInst &MI, APInt EncodedValue,
752 const MCSubtargetInfo &STI) const {
753 // GFX10+ v_cmpx opcodes promoted to VOP3 have implied dst=EXEC.
754 // Documentation requires dst to be encoded as EXEC (0x7E),
755 // but it looks like the actual value encoded for dst operand
756 // is ignored by HW. It was decided to define dst as "do not care"
757 // in td files to allow disassembler accept any dst value.
758 // However, dst is encoded as EXEC for compatibility with SP3.
759 [[maybe_unused]] const MCInstrDesc &Desc = MCII.get(Opcode: MI.getOpcode());
760 assert((Desc.TSFlags & SIInstrFlags::VOP3) &&
761 Desc.hasImplicitDefOfPhysReg(AMDGPU::EXEC));
762 EncodedValue |= MRI.getEncodingValue(Reg: AMDGPU::EXEC_LO) &
763 AMDGPU::HWEncoding::LO256_REG_IDX_MASK;
764 return postEncodeVOP3<true, true, false>(MI, EncodedValue, STI);
765}
766
767#include "AMDGPUGenMCCodeEmitter.inc"
768