1//===- AMDGPUBaseInfo.h - Top level definitions for AMDGPU ------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef LLVM_LIB_TARGET_AMDGPU_UTILS_AMDGPUBASEINFO_H
10#define LLVM_LIB_TARGET_AMDGPU_UTILS_AMDGPUBASEINFO_H
11
12#include "AMDGPUSubtarget.h"
13#include "SIDefines.h"
14#include "llvm/IR/CallingConv.h"
15#include "llvm/IR/InstrTypes.h"
16#include "llvm/IR/Module.h"
17#include "llvm/Support/Alignment.h"
18#include <array>
19#include <functional>
20#include <utility>
21
22// Pull in OpName enum definition and getNamedOperandIdx() declaration.
23#define GET_INSTRINFO_OPERAND_ENUM
24#include "AMDGPUGenInstrInfo.inc"
25
26struct amd_kernel_code_t;
27
28namespace llvm {
29
30struct Align;
31class Argument;
32class Function;
33class GlobalValue;
34class MCInstrInfo;
35class MCRegisterClass;
36class MCRegisterInfo;
37class MCSubtargetInfo;
38class MDNode;
39class StringRef;
40class Triple;
41class raw_ostream;
42
43namespace AMDGPU {
44
45struct AMDGPUMCKernelCodeT;
46struct IsaVersion;
47
48/// Generic target versions emitted by this version of LLVM.
49///
50/// These numbers are incremented every time a codegen breaking change occurs
51/// within a generic family.
52namespace GenericVersion {
53static constexpr unsigned GFX9 = 1;
54static constexpr unsigned GFX9_4 = 1;
55static constexpr unsigned GFX10_1 = 1;
56static constexpr unsigned GFX10_3 = 1;
57static constexpr unsigned GFX11 = 1;
58static constexpr unsigned GFX12 = 1;
59} // namespace GenericVersion
60
61enum { AMDHSA_COV4 = 4, AMDHSA_COV5 = 5, AMDHSA_COV6 = 6 };
62
63enum class FPType { None, FP4, FP8 };
64
65/// \returns True if \p STI is AMDHSA.
66bool isHsaAbi(const MCSubtargetInfo &STI);
67
68/// \returns Code object version from the IR module flag.
69unsigned getAMDHSACodeObjectVersion(const Module &M);
70
71/// \returns Code object version from ELF's e_ident[EI_ABIVERSION].
72unsigned getAMDHSACodeObjectVersion(unsigned ABIVersion);
73
74/// \returns The default HSA code object version. This should only be used when
75/// we lack a more accurate CodeObjectVersion value (e.g. from the IR module
76/// flag or a .amdhsa_code_object_version directive)
77unsigned getDefaultAMDHSACodeObjectVersion();
78
79/// \returns ABIVersion suitable for use in ELF's e_ident[EI_ABIVERSION]. \param
80/// CodeObjectVersion is a value returned by getAMDHSACodeObjectVersion().
81uint8_t getELFABIVersion(const Triple &OS, unsigned CodeObjectVersion);
82
83/// \returns The offset of the multigrid_sync_arg argument from implicitarg_ptr
84unsigned getMultigridSyncArgImplicitArgPosition(unsigned COV);
85
86/// \returns The offset of the hostcall pointer argument from implicitarg_ptr
87unsigned getHostcallImplicitArgPosition(unsigned COV);
88
89unsigned getDefaultQueueImplicitArgPosition(unsigned COV);
90unsigned getCompletionActionImplicitArgPosition(unsigned COV);
91
92struct GcnBufferFormatInfo {
93 unsigned Format;
94 unsigned BitsPerComp;
95 unsigned NumComponents;
96 unsigned NumFormat;
97 unsigned DataFormat;
98};
99
100struct MAIInstInfo {
101 uint16_t Opcode;
102 bool is_dgemm;
103 bool is_gfx940_xdl;
104};
105
106struct MFMA_F8F6F4_Info {
107 unsigned Opcode;
108 unsigned F8F8Opcode;
109 uint8_t NumRegsSrcA;
110 uint8_t NumRegsSrcB;
111};
112
113struct CvtScaleF32_F32F16ToF8F4_Info {
114 unsigned Opcode;
115};
116
117struct True16D16Info {
118 unsigned T16Op;
119 unsigned HiOp;
120 unsigned LoOp;
121};
122
123struct WMMAInstInfo {
124 uint16_t Opcode;
125 bool is_wmma_xdl;
126};
127
128#define GET_MIMGBaseOpcode_DECL
129#define GET_MIMGDim_DECL
130#define GET_MIMGEncoding_DECL
131#define GET_MIMGLZMapping_DECL
132#define GET_MIMGMIPMapping_DECL
133#define GET_MIMGBiASMapping_DECL
134#define GET_MAIInstInfoTable_DECL
135#define GET_isMFMA_F8F6F4Table_DECL
136#define GET_isCvtScaleF32_F32F16ToF8F4Table_DECL
137#define GET_True16D16Table_DECL
138#define GET_WMMAInstInfoTable_DECL
139#include "AMDGPUGenSearchableTables.inc"
140
141namespace IsaInfo {
142
143enum {
144 // The closed Vulkan driver sets 96, which limits the wave count to 8 but
145 // doesn't spill SGPRs as much as when 80 is set.
146 FIXED_NUM_SGPRS_FOR_INIT_BUG = 96,
147 TRAP_NUM_SGPRS = 16
148};
149
150enum class TargetIDSetting { Unsupported, Any, Off, On };
151
152class AMDGPUTargetID {
153private:
154 const MCSubtargetInfo &STI;
155 TargetIDSetting XnackSetting;
156 TargetIDSetting SramEccSetting;
157
158public:
159 explicit AMDGPUTargetID(const MCSubtargetInfo &STI);
160 ~AMDGPUTargetID() = default;
161
162 /// \return True if the current xnack setting is not "Unsupported".
163 bool isXnackSupported() const {
164 return XnackSetting != TargetIDSetting::Unsupported;
165 }
166
167 /// \returns True if the current xnack setting is "On" or "Any".
168 bool isXnackOnOrAny() const {
169 return XnackSetting == TargetIDSetting::On ||
170 XnackSetting == TargetIDSetting::Any;
171 }
172
173 /// \returns True if current xnack setting is "On" or "Off",
174 /// false otherwise.
175 bool isXnackOnOrOff() const {
176 return getXnackSetting() == TargetIDSetting::On ||
177 getXnackSetting() == TargetIDSetting::Off;
178 }
179
180 /// \returns The current xnack TargetIDSetting, possible options are
181 /// "Unsupported", "Any", "Off", and "On".
182 TargetIDSetting getXnackSetting() const { return XnackSetting; }
183
184 /// Sets xnack setting to \p NewXnackSetting.
185 void setXnackSetting(TargetIDSetting NewXnackSetting) {
186 XnackSetting = NewXnackSetting;
187 }
188
189 /// \return True if the current sramecc setting is not "Unsupported".
190 bool isSramEccSupported() const {
191 return SramEccSetting != TargetIDSetting::Unsupported;
192 }
193
194 /// \returns True if the current sramecc setting is "On" or "Any".
195 bool isSramEccOnOrAny() const {
196 return SramEccSetting == TargetIDSetting::On ||
197 SramEccSetting == TargetIDSetting::Any;
198 }
199
200 /// \returns True if current sramecc setting is "On" or "Off",
201 /// false otherwise.
202 bool isSramEccOnOrOff() const {
203 return getSramEccSetting() == TargetIDSetting::On ||
204 getSramEccSetting() == TargetIDSetting::Off;
205 }
206
207 /// \returns The current sramecc TargetIDSetting, possible options are
208 /// "Unsupported", "Any", "Off", and "On".
209 TargetIDSetting getSramEccSetting() const { return SramEccSetting; }
210
211 /// Sets sramecc setting to \p NewSramEccSetting.
212 void setSramEccSetting(TargetIDSetting NewSramEccSetting) {
213 SramEccSetting = NewSramEccSetting;
214 }
215
216 void setTargetIDFromFeaturesString(StringRef FS);
217 void setTargetIDFromTargetIDStream(StringRef TargetID);
218
219 /// \returns String representation of an object.
220 std::string toString() const;
221};
222
223/// \returns Wavefront size for given subtarget \p STI.
224unsigned getWavefrontSize(const MCSubtargetInfo *STI);
225
226/// \returns Local memory size in bytes for given subtarget \p STI.
227unsigned getLocalMemorySize(const MCSubtargetInfo *STI);
228
229/// \returns Maximum addressable local memory size in bytes for given subtarget
230/// \p STI.
231unsigned getAddressableLocalMemorySize(const MCSubtargetInfo *STI);
232
233/// \returns Number of execution units per compute unit for given subtarget \p
234/// STI.
235unsigned getEUsPerCU(const MCSubtargetInfo *STI);
236
237/// \returns Maximum number of work groups per compute unit for given subtarget
238/// \p STI and limited by given \p FlatWorkGroupSize.
239unsigned getMaxWorkGroupsPerCU(const MCSubtargetInfo *STI,
240 unsigned FlatWorkGroupSize);
241
242/// \returns Minimum number of waves per execution unit for given subtarget \p
243/// STI.
244unsigned getMinWavesPerEU(const MCSubtargetInfo *STI);
245
246/// \returns Maximum number of waves per execution unit for given subtarget \p
247/// STI without any kind of limitation.
248unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI);
249
250/// \returns Number of waves per execution unit required to support the given \p
251/// FlatWorkGroupSize.
252unsigned getWavesPerEUForWorkGroup(const MCSubtargetInfo *STI,
253 unsigned FlatWorkGroupSize);
254
255/// \returns Minimum flat work group size for given subtarget \p STI.
256unsigned getMinFlatWorkGroupSize(const MCSubtargetInfo *STI);
257
258/// \returns Maximum flat work group size for given subtarget \p STI.
259unsigned getMaxFlatWorkGroupSize(const MCSubtargetInfo *STI);
260
261/// \returns Number of waves per work group for given subtarget \p STI and
262/// \p FlatWorkGroupSize.
263unsigned getWavesPerWorkGroup(const MCSubtargetInfo *STI,
264 unsigned FlatWorkGroupSize);
265
266/// \returns SGPR allocation granularity for given subtarget \p STI.
267unsigned getSGPRAllocGranule(const MCSubtargetInfo *STI);
268
269/// \returns SGPR encoding granularity for given subtarget \p STI.
270unsigned getSGPREncodingGranule(const MCSubtargetInfo *STI);
271
272/// \returns Total number of SGPRs for given subtarget \p STI.
273unsigned getTotalNumSGPRs(const MCSubtargetInfo *STI);
274
275/// \returns Addressable number of SGPRs for given subtarget \p STI.
276unsigned getAddressableNumSGPRs(const MCSubtargetInfo *STI);
277
278/// \returns Minimum number of SGPRs that meets the given number of waves per
279/// execution unit requirement for given subtarget \p STI.
280unsigned getMinNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU);
281
282/// \returns Maximum number of SGPRs that meets the given number of waves per
283/// execution unit requirement for given subtarget \p STI.
284unsigned getMaxNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU,
285 bool Addressable);
286
287/// \returns Number of extra SGPRs implicitly required by given subtarget \p
288/// STI when the given special registers are used.
289unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed,
290 bool FlatScrUsed, bool XNACKUsed);
291
292/// \returns Number of extra SGPRs implicitly required by given subtarget \p
293/// STI when the given special registers are used. XNACK is inferred from
294/// \p STI.
295unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed,
296 bool FlatScrUsed);
297
298/// \returns Number of SGPR blocks needed for given subtarget \p STI when
299/// \p NumSGPRs are used. \p NumSGPRs should already include any special
300/// register counts.
301unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs);
302
303/// \returns VGPR allocation granularity for given subtarget \p STI.
304///
305/// For subtargets which support it, \p EnableWavefrontSize32 should match
306/// the ENABLE_WAVEFRONT_SIZE32 kernel descriptor field.
307unsigned
308getVGPRAllocGranule(const MCSubtargetInfo *STI, unsigned DynamicVGPRBlockSize,
309 std::optional<bool> EnableWavefrontSize32 = std::nullopt);
310
311/// \returns VGPR encoding granularity for given subtarget \p STI.
312///
313/// For subtargets which support it, \p EnableWavefrontSize32 should match
314/// the ENABLE_WAVEFRONT_SIZE32 kernel descriptor field.
315unsigned getVGPREncodingGranule(
316 const MCSubtargetInfo *STI,
317 std::optional<bool> EnableWavefrontSize32 = std::nullopt);
318
319/// For subtargets with a unified VGPR file and mixed ArchVGPR/AGPR usage,
320/// returns the allocation granule for ArchVGPRs.
321unsigned getArchVGPRAllocGranule();
322
323/// \returns Total number of VGPRs for given subtarget \p STI.
324unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI);
325
326/// \returns Addressable number of architectural VGPRs for a given subtarget \p
327/// STI.
328unsigned getAddressableNumArchVGPRs(const MCSubtargetInfo *STI);
329
330/// \returns Addressable number of VGPRs for given subtarget \p STI.
331unsigned getAddressableNumVGPRs(const MCSubtargetInfo *STI,
332 unsigned DynamicVGPRBlockSize);
333
334/// \returns Minimum number of VGPRs that meets given number of waves per
335/// execution unit requirement for given subtarget \p STI.
336unsigned getMinNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU,
337 unsigned DynamicVGPRBlockSize);
338
339/// \returns Maximum number of VGPRs that meets given number of waves per
340/// execution unit requirement for given subtarget \p STI.
341unsigned getMaxNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU,
342 unsigned DynamicVGPRBlockSize);
343
344/// \returns Number of waves reachable for a given \p NumVGPRs usage for given
345/// subtarget \p STI.
346unsigned getNumWavesPerEUWithNumVGPRs(const MCSubtargetInfo *STI,
347 unsigned NumVGPRs,
348 unsigned DynamicVGPRBlockSize);
349
350/// \returns Number of waves reachable for a given \p NumVGPRs usage, \p Granule
351/// size, \p MaxWaves possible, and \p TotalNumVGPRs available.
352unsigned getNumWavesPerEUWithNumVGPRs(unsigned NumVGPRs, unsigned Granule,
353 unsigned MaxWaves,
354 unsigned TotalNumVGPRs);
355
356/// \returns Occupancy for a given \p SGPRs usage, \p MaxWaves possible, and \p
357/// Gen.
358unsigned getOccupancyWithNumSGPRs(unsigned SGPRs, unsigned MaxWaves,
359 AMDGPUSubtarget::Generation Gen);
360
361/// \returns Number of VGPR blocks needed for given subtarget \p STI when
362/// \p NumVGPRs are used. We actually return the number of blocks -1, since
363/// that's what we encode.
364///
365/// For subtargets which support it, \p EnableWavefrontSize32 should match the
366/// ENABLE_WAVEFRONT_SIZE32 kernel descriptor field.
367unsigned getEncodedNumVGPRBlocks(
368 const MCSubtargetInfo *STI, unsigned NumVGPRs,
369 std::optional<bool> EnableWavefrontSize32 = std::nullopt);
370
371/// \returns Number of VGPR blocks that need to be allocated for the given
372/// subtarget \p STI when \p NumVGPRs are used.
373unsigned getAllocatedNumVGPRBlocks(
374 const MCSubtargetInfo *STI, unsigned NumVGPRs,
375 unsigned DynamicVGPRBlockSize,
376 std::optional<bool> EnableWavefrontSize32 = std::nullopt);
377
378} // end namespace IsaInfo
379
380// Represents a field in an encoded value.
381template <unsigned HighBit, unsigned LowBit, unsigned D = 0>
382struct EncodingField {
383 static_assert(HighBit >= LowBit, "Invalid bit range!");
384 static constexpr unsigned Offset = LowBit;
385 static constexpr unsigned Width = HighBit - LowBit + 1;
386
387 using ValueType = unsigned;
388 static constexpr ValueType Default = D;
389
390 ValueType Value;
391 constexpr EncodingField(ValueType Value) : Value(Value) {}
392
393 constexpr uint64_t encode() const { return Value; }
394 static ValueType decode(uint64_t Encoded) { return Encoded; }
395};
396
397// Represents a single bit in an encoded value.
398template <unsigned Bit, unsigned D = 0>
399using EncodingBit = EncodingField<Bit, Bit, D>;
400
401// A helper for encoding and decoding multiple fields.
402template <typename... Fields> struct EncodingFields {
403 static constexpr uint64_t encode(Fields... Values) {
404 return ((Values.encode() << Values.Offset) | ...);
405 }
406
407 static std::tuple<typename Fields::ValueType...> decode(uint64_t Encoded) {
408 return {Fields::decode((Encoded >> Fields::Offset) &
409 maxUIntN(Fields::Width))...};
410 }
411};
412
413LLVM_READONLY
414inline bool hasNamedOperand(uint64_t Opcode, OpName NamedIdx) {
415 return getNamedOperandIdx(Opcode, Name: NamedIdx) != -1;
416}
417
418LLVM_READONLY
419int getSOPPWithRelaxation(uint16_t Opcode);
420
421struct MIMGBaseOpcodeInfo {
422 MIMGBaseOpcode BaseOpcode;
423 bool Store;
424 bool Atomic;
425 bool AtomicX2;
426 bool Sampler;
427 bool Gather4;
428
429 uint8_t NumExtraArgs;
430 bool Gradients;
431 bool G16;
432 bool Coordinates;
433 bool LodOrClampOrMip;
434 bool HasD16;
435 bool MSAA;
436 bool BVH;
437 bool A16;
438 bool NoReturn;
439 bool PointSampleAccel;
440};
441
442LLVM_READONLY
443const MIMGBaseOpcodeInfo *getMIMGBaseOpcode(unsigned Opc);
444
445LLVM_READONLY
446const MIMGBaseOpcodeInfo *getMIMGBaseOpcodeInfo(unsigned BaseOpcode);
447
448struct MIMGDimInfo {
449 MIMGDim Dim;
450 uint8_t NumCoords;
451 uint8_t NumGradients;
452 bool MSAA;
453 bool DA;
454 uint8_t Encoding;
455 const char *AsmSuffix;
456};
457
458LLVM_READONLY
459const MIMGDimInfo *getMIMGDimInfo(unsigned DimEnum);
460
461LLVM_READONLY
462const MIMGDimInfo *getMIMGDimInfoByEncoding(uint8_t DimEnc);
463
464LLVM_READONLY
465const MIMGDimInfo *getMIMGDimInfoByAsmSuffix(StringRef AsmSuffix);
466
467struct MIMGLZMappingInfo {
468 MIMGBaseOpcode L;
469 MIMGBaseOpcode LZ;
470};
471
472struct MIMGMIPMappingInfo {
473 MIMGBaseOpcode MIP;
474 MIMGBaseOpcode NONMIP;
475};
476
477struct MIMGBiasMappingInfo {
478 MIMGBaseOpcode Bias;
479 MIMGBaseOpcode NoBias;
480};
481
482struct MIMGOffsetMappingInfo {
483 MIMGBaseOpcode Offset;
484 MIMGBaseOpcode NoOffset;
485};
486
487struct MIMGG16MappingInfo {
488 MIMGBaseOpcode G;
489 MIMGBaseOpcode G16;
490};
491
492LLVM_READONLY
493const MIMGLZMappingInfo *getMIMGLZMappingInfo(unsigned L);
494
495struct WMMAOpcodeMappingInfo {
496 unsigned Opcode2Addr;
497 unsigned Opcode3Addr;
498};
499
500LLVM_READONLY
501const MIMGMIPMappingInfo *getMIMGMIPMappingInfo(unsigned MIP);
502
503LLVM_READONLY
504const MIMGBiasMappingInfo *getMIMGBiasMappingInfo(unsigned Bias);
505
506LLVM_READONLY
507const MIMGOffsetMappingInfo *getMIMGOffsetMappingInfo(unsigned Offset);
508
509LLVM_READONLY
510const MIMGG16MappingInfo *getMIMGG16MappingInfo(unsigned G);
511
512LLVM_READONLY
513int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding,
514 unsigned VDataDwords, unsigned VAddrDwords);
515
516LLVM_READONLY
517int getMaskedMIMGOp(unsigned Opc, unsigned NewChannels);
518
519LLVM_READONLY
520unsigned getAddrSizeMIMGOp(const MIMGBaseOpcodeInfo *BaseOpcode,
521 const MIMGDimInfo *Dim, bool IsA16,
522 bool IsG16Supported);
523
524struct MIMGInfo {
525 uint16_t Opcode;
526 uint16_t BaseOpcode;
527 uint8_t MIMGEncoding;
528 uint8_t VDataDwords;
529 uint8_t VAddrDwords;
530 uint8_t VAddrOperands;
531};
532
533LLVM_READONLY
534const MIMGInfo *getMIMGInfo(unsigned Opc);
535
536LLVM_READONLY
537int getMTBUFBaseOpcode(unsigned Opc);
538
539LLVM_READONLY
540int getMTBUFOpcode(unsigned BaseOpc, unsigned Elements);
541
542LLVM_READONLY
543int getMTBUFElements(unsigned Opc);
544
545LLVM_READONLY
546bool getMTBUFHasVAddr(unsigned Opc);
547
548LLVM_READONLY
549bool getMTBUFHasSrsrc(unsigned Opc);
550
551LLVM_READONLY
552bool getMTBUFHasSoffset(unsigned Opc);
553
554LLVM_READONLY
555int getMUBUFBaseOpcode(unsigned Opc);
556
557LLVM_READONLY
558int getMUBUFOpcode(unsigned BaseOpc, unsigned Elements);
559
560LLVM_READONLY
561int getMUBUFElements(unsigned Opc);
562
563LLVM_READONLY
564bool getMUBUFHasVAddr(unsigned Opc);
565
566LLVM_READONLY
567bool getMUBUFHasSrsrc(unsigned Opc);
568
569LLVM_READONLY
570bool getMUBUFHasSoffset(unsigned Opc);
571
572LLVM_READONLY
573bool getMUBUFIsBufferInv(unsigned Opc);
574
575LLVM_READONLY
576bool getMUBUFTfe(unsigned Opc);
577
578LLVM_READONLY
579bool getSMEMIsBuffer(unsigned Opc);
580
581LLVM_READONLY
582bool getVOP1IsSingle(unsigned Opc);
583
584LLVM_READONLY
585bool getVOP2IsSingle(unsigned Opc);
586
587LLVM_READONLY
588bool getVOP3IsSingle(unsigned Opc);
589
590LLVM_READONLY
591bool isVOPC64DPP(unsigned Opc);
592
593LLVM_READONLY
594bool isVOPCAsmOnly(unsigned Opc);
595
596/// Returns true if MAI operation is a double precision GEMM.
597LLVM_READONLY
598bool getMAIIsDGEMM(unsigned Opc);
599
600LLVM_READONLY
601bool getMAIIsGFX940XDL(unsigned Opc);
602
603LLVM_READONLY
604bool getWMMAIsXDL(unsigned Opc);
605
606// Get an equivalent BitOp3 for a binary logical \p Opc.
607// \returns BitOp3 modifier for the logical operation or zero.
608// Used in VOPD3 conversion.
609unsigned getBitOp2(unsigned Opc);
610
611struct CanBeVOPD {
612 bool X;
613 bool Y;
614};
615
616/// \returns SIEncodingFamily used for VOPD encoding on a \p ST.
617LLVM_READONLY
618unsigned getVOPDEncodingFamily(const MCSubtargetInfo &ST);
619
620LLVM_READONLY
621CanBeVOPD getCanBeVOPD(unsigned Opc, unsigned EncodingFamily, bool VOPD3);
622
623LLVM_READNONE
624uint8_t mfmaScaleF8F6F4FormatToNumRegs(unsigned EncodingVal);
625
626LLVM_READONLY
627const MFMA_F8F6F4_Info *getMFMA_F8F6F4_WithFormatArgs(unsigned CBSZ,
628 unsigned BLGP,
629 unsigned F8F8Opcode);
630
631LLVM_READNONE
632uint8_t wmmaScaleF8F6F4FormatToNumRegs(unsigned Fmt);
633
634LLVM_READONLY
635const MFMA_F8F6F4_Info *getWMMA_F8F6F4_WithFormatArgs(unsigned FmtA,
636 unsigned FmtB,
637 unsigned F8F8Opcode);
638
639LLVM_READONLY
640const GcnBufferFormatInfo *getGcnBufferFormatInfo(uint8_t BitsPerComp,
641 uint8_t NumComponents,
642 uint8_t NumFormat,
643 const MCSubtargetInfo &STI);
644LLVM_READONLY
645const GcnBufferFormatInfo *getGcnBufferFormatInfo(uint8_t Format,
646 const MCSubtargetInfo &STI);
647
648LLVM_READONLY
649int getMCOpcode(uint16_t Opcode, unsigned Gen);
650
651LLVM_READONLY
652unsigned getVOPDOpcode(unsigned Opc, bool VOPD3);
653
654LLVM_READONLY
655int getVOPDFull(unsigned OpX, unsigned OpY, unsigned EncodingFamily,
656 bool VOPD3);
657
658LLVM_READONLY
659bool isVOPD(unsigned Opc);
660
661LLVM_READNONE
662bool isMAC(unsigned Opc);
663
664LLVM_READNONE
665bool isPermlane16(unsigned Opc);
666
667LLVM_READNONE
668bool isGenericAtomic(unsigned Opc);
669
670LLVM_READNONE
671bool isCvt_F32_Fp8_Bf8_e64(unsigned Opc);
672
673namespace VOPD {
674
675enum Component : unsigned {
676 DST = 0,
677 SRC0,
678 SRC1,
679 SRC2,
680
681 DST_NUM = 1,
682 MAX_SRC_NUM = 3,
683 MAX_OPR_NUM = DST_NUM + MAX_SRC_NUM
684};
685
686// LSB mask for VGPR banks per VOPD component operand.
687// 4 banks result in a mask 3, setting 2 lower bits.
688constexpr unsigned VOPD_VGPR_BANK_MASKS[] = {1, 3, 3, 1};
689constexpr unsigned VOPD3_VGPR_BANK_MASKS[] = {1, 3, 3, 3};
690
691enum ComponentIndex : unsigned { X = 0, Y = 1 };
692constexpr unsigned COMPONENTS[] = {ComponentIndex::X, ComponentIndex::Y};
693constexpr unsigned COMPONENTS_NUM = 2;
694
695// Properties of VOPD components.
696class ComponentProps {
697private:
698 unsigned SrcOperandsNum = 0;
699 unsigned MandatoryLiteralIdx = ~0u;
700 bool HasSrc2Acc = false;
701 unsigned NumVOPD3Mods = 0;
702 unsigned Opcode = 0;
703 bool IsVOP3 = false;
704
705public:
706 ComponentProps() = default;
707 ComponentProps(const MCInstrDesc &OpDesc, bool VOP3Layout = false);
708
709 // Return the total number of src operands this component has.
710 unsigned getCompSrcOperandsNum() const { return SrcOperandsNum; }
711
712 // Return the number of src operands of this component visible to the parser.
713 unsigned getCompParsedSrcOperandsNum() const {
714 return SrcOperandsNum - HasSrc2Acc;
715 }
716
717 // Return true iif this component has a mandatory literal.
718 bool hasMandatoryLiteral() const { return MandatoryLiteralIdx != ~0u; }
719
720 // If this component has a mandatory literal, return component operand
721 // index of this literal (i.e. either Component::SRC1 or Component::SRC2).
722 unsigned getMandatoryLiteralCompOperandIndex() const {
723 assert(hasMandatoryLiteral());
724 return MandatoryLiteralIdx;
725 }
726
727 // Return true iif this component has operand
728 // with component index CompSrcIdx and this operand may be a register.
729 bool hasRegSrcOperand(unsigned CompSrcIdx) const {
730 assert(CompSrcIdx < Component::MAX_SRC_NUM);
731 return SrcOperandsNum > CompSrcIdx && !hasMandatoryLiteralAt(CompSrcIdx);
732 }
733
734 // Return true iif this component has tied src2.
735 bool hasSrc2Acc() const { return HasSrc2Acc; }
736
737 // Return a number of source modifiers if instruction is used in VOPD3.
738 unsigned getCompVOPD3ModsNum() const { return NumVOPD3Mods; }
739
740 // Return opcode of the component.
741 unsigned getOpcode() const { return Opcode; }
742
743 // Returns if component opcode is in VOP3 encoding.
744 unsigned isVOP3() const { return IsVOP3; }
745
746 // Return index of BitOp3 operand or -1.
747 int getBitOp3OperandIdx() const;
748
749private:
750 bool hasMandatoryLiteralAt(unsigned CompSrcIdx) const {
751 assert(CompSrcIdx < Component::MAX_SRC_NUM);
752 return MandatoryLiteralIdx == Component::DST_NUM + CompSrcIdx;
753 }
754};
755
756enum ComponentKind : unsigned {
757 SINGLE = 0, // A single VOP1 or VOP2 instruction which may be used in VOPD.
758 COMPONENT_X, // A VOPD instruction, X component.
759 COMPONENT_Y, // A VOPD instruction, Y component.
760 MAX = COMPONENT_Y
761};
762
763// Interface functions of this class map VOPD component operand indices
764// to indices of operands in MachineInstr/MCInst or parsed operands array.
765//
766// Note that this class operates with 3 kinds of indices:
767// - VOPD component operand indices (Component::DST, Component::SRC0, etc.);
768// - MC operand indices (they refer operands in a MachineInstr/MCInst);
769// - parsed operand indices (they refer operands in parsed operands array).
770//
771// For SINGLE components mapping between these indices is trivial.
772// But things get more complicated for COMPONENT_X and
773// COMPONENT_Y because these components share the same
774// MachineInstr/MCInst and the same parsed operands array.
775// Below is an example of component operand to parsed operand
776// mapping for the following instruction:
777//
778// v_dual_add_f32 v255, v4, v5 :: v_dual_mov_b32 v6, v1
779//
780// PARSED COMPONENT PARSED
781// COMPONENT OPERANDS OPERAND INDEX OPERAND INDEX
782// -------------------------------------------------------------------
783// "v_dual_add_f32" 0
784// v_dual_add_f32 v255 0 (DST) --> 1
785// v4 1 (SRC0) --> 2
786// v5 2 (SRC1) --> 3
787// "::" 4
788// "v_dual_mov_b32" 5
789// v_dual_mov_b32 v6 0 (DST) --> 6
790// v1 1 (SRC0) --> 7
791// -------------------------------------------------------------------
792//
793class ComponentLayout {
794private:
795 // Regular MachineInstr/MCInst operands are ordered as follows:
796 // dst, src0 [, other src operands]
797 // VOPD MachineInstr/MCInst operands are ordered as follows:
798 // dstX, dstY, src0X [, other OpX operands], src0Y [, other OpY operands]
799 // Each ComponentKind has operand indices defined below.
800 static constexpr unsigned MC_DST_IDX[] = {0, 0, 1};
801
802 // VOPD3 instructions may have 2 or 3 source modifiers, src2 modifier is not
803 // used if there is tied accumulator. Indexing of this array:
804 // MC_SRC_IDX[VOPD3ModsNum][SrcNo]. This returns an index for a SINGLE
805 // instruction layout, add 1 for COMPONENT_X or COMPONENT_Y. For the second
806 // component add OpX.MCSrcNum + OpX.VOPD3ModsNum.
807 // For VOPD1/VOPD2 use column with zero modifiers.
808 static constexpr unsigned SINGLE_MC_SRC_IDX[4][3] = {
809 {1, 2, 3}, {2, 3, 4}, {2, 4, 5}, {2, 4, 6}};
810
811 // Parsed operands of regular instructions are ordered as follows:
812 // Mnemo dst src0 [vsrc1 ...]
813 // Parsed VOPD operands are ordered as follows:
814 // OpXMnemo dstX src0X [vsrc1X|imm vsrc1X|vsrc1X imm] '::'
815 // OpYMnemo dstY src0Y [vsrc1Y|imm vsrc1Y|vsrc1Y imm]
816 // Each ComponentKind has operand indices defined below.
817 static constexpr unsigned PARSED_DST_IDX[] = {1, 1,
818 4 /* + OpX.ParsedSrcNum */};
819 static constexpr unsigned FIRST_PARSED_SRC_IDX[] = {
820 2, 2, 5 /* + OpX.ParsedSrcNum */};
821
822private:
823 const ComponentKind Kind;
824 const ComponentProps PrevComp;
825 const unsigned VOPD3ModsNum;
826 const int BitOp3Idx; // Index of bitop3 operand or -1
827
828public:
829 // Create layout for COMPONENT_X or SINGLE component.
830 ComponentLayout(ComponentKind Kind, unsigned VOPD3ModsNum, int BitOp3Idx)
831 : Kind(Kind), VOPD3ModsNum(VOPD3ModsNum), BitOp3Idx(BitOp3Idx) {
832 assert(Kind == ComponentKind::SINGLE || Kind == ComponentKind::COMPONENT_X);
833 }
834
835 // Create layout for COMPONENT_Y which depends on COMPONENT_X layout.
836 ComponentLayout(const ComponentProps &OpXProps, unsigned VOPD3ModsNum,
837 int BitOp3Idx)
838 : Kind(ComponentKind::COMPONENT_Y), PrevComp(OpXProps),
839 VOPD3ModsNum(VOPD3ModsNum), BitOp3Idx(BitOp3Idx) {}
840
841public:
842 // Return the index of dst operand in MCInst operands.
843 unsigned getIndexOfDstInMCOperands() const { return MC_DST_IDX[Kind]; }
844
845 // Return the index of the specified src operand in MCInst operands.
846 unsigned getIndexOfSrcInMCOperands(unsigned CompSrcIdx, bool VOPD3) const {
847 assert(CompSrcIdx < Component::MAX_SRC_NUM);
848
849 if (Kind == SINGLE && CompSrcIdx == 2 && BitOp3Idx != -1)
850 return BitOp3Idx;
851
852 if (VOPD3) {
853 return SINGLE_MC_SRC_IDX[VOPD3ModsNum][CompSrcIdx] + getPrevCompSrcNum() +
854 getPrevCompVOPD3ModsNum() + (Kind != SINGLE ? 1 : 0);
855 }
856
857 return SINGLE_MC_SRC_IDX[0][CompSrcIdx] + getPrevCompSrcNum() +
858 (Kind != SINGLE ? 1 : 0);
859 }
860
861 // Return the index of dst operand in the parsed operands array.
862 unsigned getIndexOfDstInParsedOperands() const {
863 return PARSED_DST_IDX[Kind] + getPrevCompParsedSrcNum();
864 }
865
866 // Return the index of the specified src operand in the parsed operands array.
867 unsigned getIndexOfSrcInParsedOperands(unsigned CompSrcIdx) const {
868 assert(CompSrcIdx < Component::MAX_SRC_NUM);
869 return FIRST_PARSED_SRC_IDX[Kind] + getPrevCompParsedSrcNum() + CompSrcIdx;
870 }
871
872private:
873 unsigned getPrevCompSrcNum() const {
874 return PrevComp.getCompSrcOperandsNum();
875 }
876 unsigned getPrevCompParsedSrcNum() const {
877 return PrevComp.getCompParsedSrcOperandsNum();
878 }
879 unsigned getPrevCompVOPD3ModsNum() const {
880 return PrevComp.getCompVOPD3ModsNum();
881 }
882};
883
884// Layout and properties of VOPD components.
885class ComponentInfo : public ComponentProps, public ComponentLayout {
886public:
887 // Create ComponentInfo for COMPONENT_X or SINGLE component.
888 ComponentInfo(const MCInstrDesc &OpDesc,
889 ComponentKind Kind = ComponentKind::SINGLE,
890 bool VOP3Layout = false)
891 : ComponentProps(OpDesc, VOP3Layout),
892 ComponentLayout(Kind, getCompVOPD3ModsNum(), getBitOp3OperandIdx()) {}
893
894 // Create ComponentInfo for COMPONENT_Y which depends on COMPONENT_X layout.
895 ComponentInfo(const MCInstrDesc &OpDesc, const ComponentProps &OpXProps,
896 bool VOP3Layout = false)
897 : ComponentProps(OpDesc, VOP3Layout),
898 ComponentLayout(OpXProps, getCompVOPD3ModsNum(),
899 getBitOp3OperandIdx()) {}
900
901 // Map component operand index to parsed operand index.
902 // Return 0 if the specified operand does not exist.
903 unsigned getIndexInParsedOperands(unsigned CompOprIdx) const;
904};
905
906// Properties of VOPD instructions.
907class InstInfo {
908private:
909 const ComponentInfo CompInfo[COMPONENTS_NUM];
910
911public:
912 using RegIndices = std::array<MCRegister, Component::MAX_OPR_NUM>;
913
914 InstInfo(const MCInstrDesc &OpX, const MCInstrDesc &OpY)
915 : CompInfo{OpX, OpY} {}
916
917 InstInfo(const ComponentInfo &OprInfoX, const ComponentInfo &OprInfoY)
918 : CompInfo{OprInfoX, OprInfoY} {}
919
920 const ComponentInfo &operator[](size_t ComponentIdx) const {
921 assert(ComponentIdx < COMPONENTS_NUM);
922 return CompInfo[ComponentIdx];
923 }
924
925 // Check VOPD operands constraints.
926 // GetRegIdx(Component, MCOperandIdx) must return a VGPR register index
927 // for the specified component and MC operand. The callback must return 0
928 // if the operand is not a register or not a VGPR.
929 // If \p SkipSrc is set to true then constraints for source operands are not
930 // checked.
931 // If \p AllowSameVGPR is set then same VGPRs are allowed for X and Y sources
932 // even though it violates requirement to be from different banks.
933 // If \p VOPD3 is set to true both dst registers allowed to be either odd
934 // or even and instruction may have real src2 as opposed to tied accumulator.
935 bool
936 hasInvalidOperand(std::function<MCRegister(unsigned, unsigned)> GetRegIdx,
937 const MCRegisterInfo &MRI, bool SkipSrc = false,
938 bool AllowSameVGPR = false, bool VOPD3 = false) const {
939 return getInvalidCompOperandIndex(GetRegIdx, MRI, SkipSrc, AllowSameVGPR,
940 VOPD3)
941 .has_value();
942 }
943
944 // Check VOPD operands constraints.
945 // Return the index of an invalid component operand, if any.
946 // If \p SkipSrc is set to true then constraints for source operands are not
947 // checked except for being from the same halves of VGPR file on gfx1250.
948 // If \p AllowSameVGPR is set then same VGPRs are allowed for X and Y sources
949 // even though it violates requirement to be from different banks.
950 // If \p VOPD3 is set to true both dst registers allowed to be either odd
951 // or even and instruction may have real src2 as opposed to tied accumulator.
952 std::optional<unsigned> getInvalidCompOperandIndex(
953 std::function<MCRegister(unsigned, unsigned)> GetRegIdx,
954 const MCRegisterInfo &MRI, bool SkipSrc = false,
955 bool AllowSameVGPR = false, bool VOPD3 = false) const;
956
957private:
958 RegIndices
959 getRegIndices(unsigned ComponentIdx,
960 std::function<MCRegister(unsigned, unsigned)> GetRegIdx,
961 bool VOPD3) const;
962};
963
964} // namespace VOPD
965
966LLVM_READONLY
967std::pair<unsigned, unsigned> getVOPDComponents(unsigned VOPDOpcode);
968
969LLVM_READONLY
970// Get properties of 2 single VOP1/VOP2 instructions
971// used as components to create a VOPD instruction.
972VOPD::InstInfo getVOPDInstInfo(const MCInstrDesc &OpX, const MCInstrDesc &OpY);
973
974LLVM_READONLY
975// Get properties of VOPD X and Y components.
976VOPD::InstInfo getVOPDInstInfo(unsigned VOPDOpcode,
977 const MCInstrInfo *InstrInfo);
978
979LLVM_READONLY
980bool isAsyncStore(unsigned Opc);
981LLVM_READONLY
982bool isTensorStore(unsigned Opc);
983LLVM_READONLY
984unsigned getTemporalHintType(const MCInstrDesc TID);
985
986LLVM_READONLY
987bool isTrue16Inst(unsigned Opc);
988
989LLVM_READONLY
990FPType getFPDstSelType(unsigned Opc);
991
992LLVM_READONLY
993bool isInvalidSingleUseConsumerInst(unsigned Opc);
994
995LLVM_READONLY
996bool isInvalidSingleUseProducerInst(unsigned Opc);
997
998bool isDPMACCInstruction(unsigned Opc);
999
1000LLVM_READONLY
1001unsigned mapWMMA2AddrTo3AddrOpcode(unsigned Opc);
1002
1003LLVM_READONLY
1004unsigned mapWMMA3AddrTo2AddrOpcode(unsigned Opc);
1005
1006void initDefaultAMDKernelCodeT(AMDGPUMCKernelCodeT &Header,
1007 const MCSubtargetInfo *STI);
1008
1009bool isGroupSegment(const GlobalValue *GV);
1010bool isGlobalSegment(const GlobalValue *GV);
1011bool isReadOnlySegment(const GlobalValue *GV);
1012
1013/// \returns True if constants should be emitted to .text section for given
1014/// target triple \p TT, false otherwise.
1015bool shouldEmitConstantsToTextSection(const Triple &TT);
1016
1017/// Returns a valid charcode or 0 in the first entry if this is a valid physical
1018/// register name. Followed by the start register number, and the register
1019/// width. Does not validate the number of registers exists in the class. Unlike
1020/// parseAsmConstraintPhysReg, this does not expect the name to be wrapped in
1021/// "{}".
1022std::tuple<char, unsigned, unsigned> parseAsmPhysRegName(StringRef TupleString);
1023
1024/// Returns a valid charcode or 0 in the first entry if this is a valid physical
1025/// register constraint. Followed by the start register number, and the register
1026/// width. Does not validate the number of registers exists in the class.
1027std::tuple<char, unsigned, unsigned>
1028parseAsmConstraintPhysReg(StringRef Constraint);
1029
1030/// \returns Integer value requested using \p F's \p Name attribute.
1031///
1032/// \returns \p Default if attribute is not present.
1033///
1034/// \returns \p Default and emits error if requested value cannot be converted
1035/// to integer.
1036int getIntegerAttribute(const Function &F, StringRef Name, int Default);
1037
1038/// \returns A pair of integer values requested using \p F's \p Name attribute
1039/// in "first[,second]" format ("second" is optional unless \p OnlyFirstRequired
1040/// is false).
1041///
1042/// \returns \p Default if attribute is not present.
1043///
1044/// \returns \p Default and emits error if one of the requested values cannot be
1045/// converted to integer, or \p OnlyFirstRequired is false and "second" value is
1046/// not present.
1047std::pair<unsigned, unsigned>
1048getIntegerPairAttribute(const Function &F, StringRef Name,
1049 std::pair<unsigned, unsigned> Default,
1050 bool OnlyFirstRequired = false);
1051
1052/// \returns A pair of integer values requested using \p F's \p Name attribute
1053/// in "first[,second]" format ("second" is optional unless \p OnlyFirstRequired
1054/// is false).
1055///
1056/// \returns \p std::nullopt if attribute is not present.
1057///
1058/// \returns \p std::nullopt and emits error if one of the requested values
1059/// cannot be converted to integer, or \p OnlyFirstRequired is false and
1060/// "second" value is not present.
1061std::optional<std::pair<unsigned, std::optional<unsigned>>>
1062getIntegerPairAttribute(const Function &F, StringRef Name,
1063 bool OnlyFirstRequired = false);
1064
1065/// \returns Generate a vector of integer values requested using \p F's \p Name
1066/// attribute.
1067/// \returns A vector of size \p Size, with all elements set to \p DefaultVal,
1068/// if any error occurs. The corresponding error will also be emitted.
1069SmallVector<unsigned> getIntegerVecAttribute(const Function &F, StringRef Name,
1070 unsigned Size,
1071 unsigned DefaultVal);
1072/// Similar to the function above, but returns std::nullopt if any error occurs.
1073std::optional<SmallVector<unsigned>>
1074getIntegerVecAttribute(const Function &F, StringRef Name, unsigned Size);
1075
1076/// Checks if \p Val is inside \p MD, a !range-like metadata.
1077bool hasValueInRangeLikeMetadata(const MDNode &MD, int64_t Val);
1078
1079/// Represents the counter values to wait for in an s_waitcnt instruction.
1080///
1081/// Large values (including the maximum possible integer) can be used to
1082/// represent "don't care" waits.
1083struct Waitcnt {
1084 unsigned LoadCnt = ~0u; // Corresponds to Vmcnt prior to gfx12.
1085 unsigned ExpCnt = ~0u;
1086 unsigned DsCnt = ~0u; // Corresponds to LGKMcnt prior to gfx12.
1087 unsigned StoreCnt = ~0u; // Corresponds to VScnt on gfx10/gfx11.
1088 unsigned SampleCnt = ~0u; // gfx12+ only.
1089 unsigned BvhCnt = ~0u; // gfx12+ only.
1090 unsigned KmCnt = ~0u; // gfx12+ only.
1091 unsigned XCnt = ~0u; // gfx1250.
1092 unsigned VaVdst = ~0u; // gfx12+ expert scheduling mode only.
1093 unsigned VmVsrc = ~0u; // gfx12+ expert scheduling mode only.
1094
1095 Waitcnt() = default;
1096 // Pre-gfx12 constructor.
1097 Waitcnt(unsigned VmCnt, unsigned ExpCnt, unsigned LgkmCnt, unsigned VsCnt)
1098 : LoadCnt(VmCnt), ExpCnt(ExpCnt), DsCnt(LgkmCnt), StoreCnt(VsCnt) {}
1099
1100 // gfx12+ constructor.
1101 Waitcnt(unsigned LoadCnt, unsigned ExpCnt, unsigned DsCnt, unsigned StoreCnt,
1102 unsigned SampleCnt, unsigned BvhCnt, unsigned KmCnt, unsigned XCnt,
1103 unsigned VaVdst, unsigned VmVsrc)
1104 : LoadCnt(LoadCnt), ExpCnt(ExpCnt), DsCnt(DsCnt), StoreCnt(StoreCnt),
1105 SampleCnt(SampleCnt), BvhCnt(BvhCnt), KmCnt(KmCnt), XCnt(XCnt),
1106 VaVdst(VaVdst), VmVsrc(VmVsrc) {}
1107
1108 bool hasWait() const { return StoreCnt != ~0u || hasWaitExceptStoreCnt(); }
1109
1110 bool hasWaitExceptStoreCnt() const {
1111 return LoadCnt != ~0u || ExpCnt != ~0u || DsCnt != ~0u ||
1112 SampleCnt != ~0u || BvhCnt != ~0u || KmCnt != ~0u || XCnt != ~0u ||
1113 VaVdst != ~0u || VmVsrc != ~0u;
1114 }
1115
1116 bool hasWaitStoreCnt() const { return StoreCnt != ~0u; }
1117
1118 bool hasWaitDepctr() const { return VaVdst != ~0u || VmVsrc != ~0u; }
1119
1120 Waitcnt combined(const Waitcnt &Other) const {
1121 // Does the right thing provided self and Other are either both pre-gfx12
1122 // or both gfx12+.
1123 return Waitcnt(
1124 std::min(a: LoadCnt, b: Other.LoadCnt), std::min(a: ExpCnt, b: Other.ExpCnt),
1125 std::min(a: DsCnt, b: Other.DsCnt), std::min(a: StoreCnt, b: Other.StoreCnt),
1126 std::min(a: SampleCnt, b: Other.SampleCnt), std::min(a: BvhCnt, b: Other.BvhCnt),
1127 std::min(a: KmCnt, b: Other.KmCnt), std::min(a: XCnt, b: Other.XCnt),
1128 std::min(a: VaVdst, b: Other.VaVdst), std::min(a: VmVsrc, b: Other.VmVsrc));
1129 }
1130
1131 friend raw_ostream &operator<<(raw_ostream &OS, const AMDGPU::Waitcnt &Wait);
1132};
1133
1134/// Represents the hardware counter limits for different wait count types.
1135struct HardwareLimits {
1136 unsigned LoadcntMax; // Corresponds to Vmcnt prior to gfx12.
1137 unsigned ExpcntMax;
1138 unsigned DscntMax; // Corresponds to LGKMcnt prior to gfx12.
1139 unsigned StorecntMax; // Corresponds to VScnt in gfx10/gfx11.
1140 unsigned SamplecntMax; // gfx12+ only.
1141 unsigned BvhcntMax; // gfx12+ only.
1142 unsigned KmcntMax; // gfx12+ only.
1143 unsigned XcntMax; // gfx1250.
1144 unsigned VaVdstMax; // gfx12+ expert mode only.
1145 unsigned VmVsrcMax; // gfx12+ expert mode only.
1146
1147 HardwareLimits() = default;
1148
1149 /// Initializes hardware limits from ISA version.
1150 HardwareLimits(const IsaVersion &IV);
1151};
1152
1153// The following methods are only meaningful on targets that support
1154// S_WAITCNT.
1155
1156/// \returns Vmcnt bit mask for given isa \p Version.
1157unsigned getVmcntBitMask(const IsaVersion &Version);
1158
1159/// \returns Expcnt bit mask for given isa \p Version.
1160unsigned getExpcntBitMask(const IsaVersion &Version);
1161
1162/// \returns Lgkmcnt bit mask for given isa \p Version.
1163unsigned getLgkmcntBitMask(const IsaVersion &Version);
1164
1165/// \returns Waitcnt bit mask for given isa \p Version.
1166unsigned getWaitcntBitMask(const IsaVersion &Version);
1167
1168/// \returns Decoded Vmcnt from given \p Waitcnt for given isa \p Version.
1169unsigned decodeVmcnt(const IsaVersion &Version, unsigned Waitcnt);
1170
1171/// \returns Decoded Expcnt from given \p Waitcnt for given isa \p Version.
1172unsigned decodeExpcnt(const IsaVersion &Version, unsigned Waitcnt);
1173
1174/// \returns Decoded Lgkmcnt from given \p Waitcnt for given isa \p Version.
1175unsigned decodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt);
1176
1177/// Decodes Vmcnt, Expcnt and Lgkmcnt from given \p Waitcnt for given isa
1178/// \p Version, and writes decoded values into \p Vmcnt, \p Expcnt and
1179/// \p Lgkmcnt respectively. Should not be used on gfx12+, the instruction
1180/// which needs it is deprecated
1181///
1182/// \details \p Vmcnt, \p Expcnt and \p Lgkmcnt are decoded as follows:
1183/// \p Vmcnt = \p Waitcnt[3:0] (pre-gfx9)
1184/// \p Vmcnt = \p Waitcnt[15:14,3:0] (gfx9,10)
1185/// \p Vmcnt = \p Waitcnt[15:10] (gfx11)
1186/// \p Expcnt = \p Waitcnt[6:4] (pre-gfx11)
1187/// \p Expcnt = \p Waitcnt[2:0] (gfx11)
1188/// \p Lgkmcnt = \p Waitcnt[11:8] (pre-gfx10)
1189/// \p Lgkmcnt = \p Waitcnt[13:8] (gfx10)
1190/// \p Lgkmcnt = \p Waitcnt[9:4] (gfx11)
1191///
1192void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned &Vmcnt,
1193 unsigned &Expcnt, unsigned &Lgkmcnt);
1194
1195Waitcnt decodeWaitcnt(const IsaVersion &Version, unsigned Encoded);
1196
1197/// \returns \p Waitcnt with encoded \p Vmcnt for given isa \p Version.
1198unsigned encodeVmcnt(const IsaVersion &Version, unsigned Waitcnt,
1199 unsigned Vmcnt);
1200
1201/// \returns \p Waitcnt with encoded \p Expcnt for given isa \p Version.
1202unsigned encodeExpcnt(const IsaVersion &Version, unsigned Waitcnt,
1203 unsigned Expcnt);
1204
1205/// \returns \p Waitcnt with encoded \p Lgkmcnt for given isa \p Version.
1206unsigned encodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt,
1207 unsigned Lgkmcnt);
1208
1209/// Encodes \p Vmcnt, \p Expcnt and \p Lgkmcnt into Waitcnt for given isa
1210/// \p Version. Should not be used on gfx12+, the instruction which needs
1211/// it is deprecated
1212///
1213/// \details \p Vmcnt, \p Expcnt and \p Lgkmcnt are encoded as follows:
1214/// Waitcnt[2:0] = \p Expcnt (gfx11+)
1215/// Waitcnt[3:0] = \p Vmcnt (pre-gfx9)
1216/// Waitcnt[3:0] = \p Vmcnt[3:0] (gfx9,10)
1217/// Waitcnt[6:4] = \p Expcnt (pre-gfx11)
1218/// Waitcnt[9:4] = \p Lgkmcnt (gfx11)
1219/// Waitcnt[11:8] = \p Lgkmcnt (pre-gfx10)
1220/// Waitcnt[13:8] = \p Lgkmcnt (gfx10)
1221/// Waitcnt[15:10] = \p Vmcnt (gfx11)
1222/// Waitcnt[15:14] = \p Vmcnt[5:4] (gfx9,10)
1223///
1224/// \returns Waitcnt with encoded \p Vmcnt, \p Expcnt and \p Lgkmcnt for given
1225/// isa \p Version.
1226///
1227unsigned encodeWaitcnt(const IsaVersion &Version, unsigned Vmcnt,
1228 unsigned Expcnt, unsigned Lgkmcnt);
1229
1230unsigned encodeWaitcnt(const IsaVersion &Version, const Waitcnt &Decoded);
1231
1232// The following methods are only meaningful on targets that support
1233// S_WAIT_*CNT, introduced with gfx12.
1234
1235/// \returns Loadcnt bit mask for given isa \p Version.
1236/// Returns 0 for versions that do not support LOADcnt
1237unsigned getLoadcntBitMask(const IsaVersion &Version);
1238
1239/// \returns Samplecnt bit mask for given isa \p Version.
1240/// Returns 0 for versions that do not support SAMPLEcnt
1241unsigned getSamplecntBitMask(const IsaVersion &Version);
1242
1243/// \returns Bvhcnt bit mask for given isa \p Version.
1244/// Returns 0 for versions that do not support BVHcnt
1245unsigned getBvhcntBitMask(const IsaVersion &Version);
1246
1247/// \returns Dscnt bit mask for given isa \p Version.
1248/// Returns 0 for versions that do not support DScnt
1249unsigned getDscntBitMask(const IsaVersion &Version);
1250
1251/// \returns Dscnt bit mask for given isa \p Version.
1252/// Returns 0 for versions that do not support KMcnt
1253unsigned getKmcntBitMask(const IsaVersion &Version);
1254
1255/// \returns Xcnt bit mask for given isa \p Version.
1256/// Returns 0 for versions that do not support Xcnt.
1257unsigned getXcntBitMask(const IsaVersion &Version);
1258
1259/// \return STOREcnt or VScnt bit mask for given isa \p Version.
1260/// returns 0 for versions that do not support STOREcnt or VScnt.
1261/// STOREcnt and VScnt are the same counter, the name used
1262/// depends on the ISA version.
1263unsigned getStorecntBitMask(const IsaVersion &Version);
1264
1265// The following are only meaningful on targets that support
1266// S_WAIT_LOADCNT_DSCNT and S_WAIT_STORECNT_DSCNT.
1267
1268/// \returns Decoded Waitcnt structure from given \p LoadcntDscnt for given
1269/// isa \p Version.
1270Waitcnt decodeLoadcntDscnt(const IsaVersion &Version, unsigned LoadcntDscnt);
1271
1272/// \returns Decoded Waitcnt structure from given \p StorecntDscnt for given
1273/// isa \p Version.
1274Waitcnt decodeStorecntDscnt(const IsaVersion &Version, unsigned StorecntDscnt);
1275
1276/// \returns \p Loadcnt and \p Dscnt components of \p Decoded encoded as an
1277/// immediate that can be used with S_WAIT_LOADCNT_DSCNT for given isa
1278/// \p Version.
1279unsigned encodeLoadcntDscnt(const IsaVersion &Version, const Waitcnt &Decoded);
1280
1281/// \returns \p Storecnt and \p Dscnt components of \p Decoded encoded as an
1282/// immediate that can be used with S_WAIT_STORECNT_DSCNT for given isa
1283/// \p Version.
1284unsigned encodeStorecntDscnt(const IsaVersion &Version, const Waitcnt &Decoded);
1285
1286namespace Hwreg {
1287
1288using HwregId = EncodingField<5, 0>;
1289using HwregOffset = EncodingField<10, 6>;
1290
1291struct HwregSize : EncodingField<15, 11, 32> {
1292 using EncodingField::EncodingField;
1293 constexpr uint64_t encode() const { return Value - 1; }
1294 static ValueType decode(uint64_t Encoded) { return Encoded + 1; }
1295};
1296
1297using HwregEncoding = EncodingFields<HwregId, HwregOffset, HwregSize>;
1298
1299} // namespace Hwreg
1300
1301namespace DepCtr {
1302
1303int getDefaultDepCtrEncoding(const MCSubtargetInfo &STI);
1304int encodeDepCtr(const StringRef Name, int64_t Val, unsigned &UsedOprMask,
1305 const MCSubtargetInfo &STI);
1306bool isSymbolicDepCtrEncoding(unsigned Code, bool &HasNonDefaultVal,
1307 const MCSubtargetInfo &STI);
1308bool decodeDepCtr(unsigned Code, int &Id, StringRef &Name, unsigned &Val,
1309 bool &IsDefault, const MCSubtargetInfo &STI);
1310
1311/// \returns Maximum VaVdst value that can be encoded.
1312unsigned getVaVdstBitMask();
1313
1314/// \returns Maximum VaSdst value that can be encoded.
1315unsigned getVaSdstBitMask();
1316
1317/// \returns Maximum VaSsrc value that can be encoded.
1318unsigned getVaSsrcBitMask();
1319
1320/// \returns Maximum HoldCnt value that can be encoded.
1321unsigned getHoldCntBitMask(const IsaVersion &Version);
1322
1323/// \returns Maximum VmVsrc value that can be encoded.
1324unsigned getVmVsrcBitMask();
1325
1326/// \returns Maximum VaVcc value that can be encoded.
1327unsigned getVaVccBitMask();
1328
1329/// \returns Maximum SaSdst value that can be encoded.
1330unsigned getSaSdstBitMask();
1331
1332/// \returns Decoded VaVdst from given immediate \p Encoded.
1333unsigned decodeFieldVaVdst(unsigned Encoded);
1334
1335/// \returns Decoded VmVsrc from given immediate \p Encoded.
1336unsigned decodeFieldVmVsrc(unsigned Encoded);
1337
1338/// \returns Decoded SaSdst from given immediate \p Encoded.
1339unsigned decodeFieldSaSdst(unsigned Encoded);
1340
1341/// \returns Decoded VaSdst from given immediate \p Encoded.
1342unsigned decodeFieldVaSdst(unsigned Encoded);
1343
1344/// \returns Decoded VaVcc from given immediate \p Encoded.
1345unsigned decodeFieldVaVcc(unsigned Encoded);
1346
1347/// \returns Decoded SaSrc from given immediate \p Encoded.
1348unsigned decodeFieldVaSsrc(unsigned Encoded);
1349
1350/// \returns Decoded HoldCnt from given immediate \p Encoded.
1351unsigned decodeFieldHoldCnt(unsigned Encoded, const IsaVersion &Version);
1352
1353/// \returns \p VmVsrc as an encoded Depctr immediate.
1354unsigned encodeFieldVmVsrc(unsigned VmVsrc, const MCSubtargetInfo &STI);
1355
1356/// \returns \p Encoded combined with encoded \p VmVsrc.
1357unsigned encodeFieldVmVsrc(unsigned Encoded, unsigned VmVsrc);
1358
1359/// \returns \p VaVdst as an encoded Depctr immediate.
1360unsigned encodeFieldVaVdst(unsigned VaVdst, const MCSubtargetInfo &STI);
1361
1362/// \returns \p Encoded combined with encoded \p VaVdst.
1363unsigned encodeFieldVaVdst(unsigned Encoded, unsigned VaVdst);
1364
1365/// \returns \p SaSdst as an encoded Depctr immediate.
1366unsigned encodeFieldSaSdst(unsigned SaSdst, const MCSubtargetInfo &STI);
1367
1368/// \returns \p Encoded combined with encoded \p SaSdst.
1369unsigned encodeFieldSaSdst(unsigned Encoded, unsigned SaSdst);
1370
1371/// \returns \p VaSdst as an encoded Depctr immediate.
1372unsigned encodeFieldVaSdst(unsigned VaSdst, const MCSubtargetInfo &STI);
1373
1374/// \returns \p Encoded combined with encoded \p VaSdst.
1375unsigned encodeFieldVaSdst(unsigned Encoded, unsigned VaSdst);
1376
1377/// \returns \p VaVcc as an encoded Depctr immediate.
1378unsigned encodeFieldVaVcc(unsigned VaVcc, const MCSubtargetInfo &STI);
1379
1380/// \returns \p Encoded combined with encoded \p VaVcc.
1381unsigned encodeFieldVaVcc(unsigned Encoded, unsigned VaVcc);
1382
1383/// \returns \p HoldCnt as an encoded Depctr immediate.
1384unsigned encodeFieldHoldCnt(unsigned HoldCnt, const MCSubtargetInfo &STI);
1385
1386/// \returns \p Encoded combined with encoded \p HoldCnt.
1387unsigned encodeFieldHoldCnt(unsigned Encoded, unsigned HoldCnt,
1388 const IsaVersion &Version);
1389
1390/// \returns \p VaSsrc as an encoded Depctr immediate.
1391unsigned encodeFieldVaSsrc(unsigned VaSsrc, const MCSubtargetInfo &STI);
1392
1393/// \returns \p Encoded combined with encoded \p VaSsrc.
1394unsigned encodeFieldVaSsrc(unsigned Encoded, unsigned VaSsrc);
1395
1396} // namespace DepCtr
1397
1398namespace Exp {
1399
1400bool getTgtName(unsigned Id, StringRef &Name, int &Index);
1401
1402LLVM_READONLY
1403unsigned getTgtId(const StringRef Name);
1404
1405LLVM_READNONE
1406bool isSupportedTgtId(unsigned Id, const MCSubtargetInfo &STI);
1407
1408} // namespace Exp
1409
1410namespace MTBUFFormat {
1411
1412LLVM_READNONE
1413int64_t encodeDfmtNfmt(unsigned Dfmt, unsigned Nfmt);
1414
1415void decodeDfmtNfmt(unsigned Format, unsigned &Dfmt, unsigned &Nfmt);
1416
1417int64_t getDfmt(const StringRef Name);
1418
1419StringRef getDfmtName(unsigned Id);
1420
1421int64_t getNfmt(const StringRef Name, const MCSubtargetInfo &STI);
1422
1423StringRef getNfmtName(unsigned Id, const MCSubtargetInfo &STI);
1424
1425bool isValidDfmtNfmt(unsigned Val, const MCSubtargetInfo &STI);
1426
1427bool isValidNfmt(unsigned Val, const MCSubtargetInfo &STI);
1428
1429int64_t getUnifiedFormat(const StringRef Name, const MCSubtargetInfo &STI);
1430
1431StringRef getUnifiedFormatName(unsigned Id, const MCSubtargetInfo &STI);
1432
1433bool isValidUnifiedFormat(unsigned Val, const MCSubtargetInfo &STI);
1434
1435int64_t convertDfmtNfmt2Ufmt(unsigned Dfmt, unsigned Nfmt,
1436 const MCSubtargetInfo &STI);
1437
1438bool isValidFormatEncoding(unsigned Val, const MCSubtargetInfo &STI);
1439
1440unsigned getDefaultFormatEncoding(const MCSubtargetInfo &STI);
1441
1442} // namespace MTBUFFormat
1443
1444namespace SendMsg {
1445
1446LLVM_READNONE
1447bool isValidMsgId(int64_t MsgId, const MCSubtargetInfo &STI);
1448
1449LLVM_READNONE
1450bool isValidMsgOp(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI,
1451 bool Strict = true);
1452
1453LLVM_READNONE
1454bool isValidMsgStream(int64_t MsgId, int64_t OpId, int64_t StreamId,
1455 const MCSubtargetInfo &STI, bool Strict = true);
1456
1457LLVM_READNONE
1458bool msgRequiresOp(int64_t MsgId, const MCSubtargetInfo &STI);
1459
1460LLVM_READNONE
1461bool msgSupportsStream(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI);
1462
1463void decodeMsg(unsigned Val, uint16_t &MsgId, uint16_t &OpId,
1464 uint16_t &StreamId, const MCSubtargetInfo &STI);
1465
1466LLVM_READNONE
1467uint64_t encodeMsg(uint64_t MsgId, uint64_t OpId, uint64_t StreamId);
1468
1469} // namespace SendMsg
1470
1471unsigned getInitialPSInputAddr(const Function &F);
1472
1473bool getHasColorExport(const Function &F);
1474
1475bool getHasDepthExport(const Function &F);
1476
1477bool hasDynamicVGPR(const Function &F);
1478
1479// Returns the value of the "amdgpu-dynamic-vgpr-block-size" attribute, or 0 if
1480// the attribute is missing or its value is invalid.
1481unsigned getDynamicVGPRBlockSize(const Function &F);
1482
1483LLVM_READNONE
1484constexpr bool isShader(CallingConv::ID CC) {
1485 switch (CC) {
1486 case CallingConv::AMDGPU_VS:
1487 case CallingConv::AMDGPU_LS:
1488 case CallingConv::AMDGPU_HS:
1489 case CallingConv::AMDGPU_ES:
1490 case CallingConv::AMDGPU_GS:
1491 case CallingConv::AMDGPU_PS:
1492 case CallingConv::AMDGPU_CS_Chain:
1493 case CallingConv::AMDGPU_CS_ChainPreserve:
1494 case CallingConv::AMDGPU_CS:
1495 return true;
1496 default:
1497 return false;
1498 }
1499}
1500
1501LLVM_READNONE
1502constexpr bool isGraphics(CallingConv::ID CC) {
1503 return isShader(CC) || CC == CallingConv::AMDGPU_Gfx ||
1504 CC == CallingConv::AMDGPU_Gfx_WholeWave;
1505}
1506
1507LLVM_READNONE
1508constexpr bool isCompute(CallingConv::ID CC) {
1509 return !isGraphics(CC) || CC == CallingConv::AMDGPU_CS;
1510}
1511
1512LLVM_READNONE
1513constexpr bool isEntryFunctionCC(CallingConv::ID CC) {
1514 switch (CC) {
1515 case CallingConv::AMDGPU_KERNEL:
1516 case CallingConv::SPIR_KERNEL:
1517 case CallingConv::AMDGPU_VS:
1518 case CallingConv::AMDGPU_GS:
1519 case CallingConv::AMDGPU_PS:
1520 case CallingConv::AMDGPU_CS:
1521 case CallingConv::AMDGPU_ES:
1522 case CallingConv::AMDGPU_HS:
1523 case CallingConv::AMDGPU_LS:
1524 return true;
1525 default:
1526 return false;
1527 }
1528}
1529
1530LLVM_READNONE
1531constexpr bool isChainCC(CallingConv::ID CC) {
1532 switch (CC) {
1533 case CallingConv::AMDGPU_CS_Chain:
1534 case CallingConv::AMDGPU_CS_ChainPreserve:
1535 return true;
1536 default:
1537 return false;
1538 }
1539}
1540
1541// These functions are considered entrypoints into the current module, i.e. they
1542// are allowed to be called from outside the current module. This is different
1543// from isEntryFunctionCC, which is only true for functions that are entered by
1544// the hardware. Module entry points include all entry functions but also
1545// include functions that can be called from other functions inside or outside
1546// the current module. Module entry functions are allowed to allocate LDS.
1547LLVM_READNONE
1548constexpr bool isModuleEntryFunctionCC(CallingConv::ID CC) {
1549 switch (CC) {
1550 case CallingConv::AMDGPU_Gfx:
1551 return true;
1552 default:
1553 return isEntryFunctionCC(CC) || isChainCC(CC);
1554 }
1555}
1556
1557LLVM_READNONE
1558constexpr inline bool isKernel(CallingConv::ID CC) {
1559 switch (CC) {
1560 case CallingConv::AMDGPU_KERNEL:
1561 case CallingConv::SPIR_KERNEL:
1562 return true;
1563 default:
1564 return false;
1565 }
1566}
1567
1568inline bool isKernel(const Function &F) { return isKernel(CC: F.getCallingConv()); }
1569
1570LLVM_READNONE
1571constexpr bool canGuaranteeTCO(CallingConv::ID CC) {
1572 return CC == CallingConv::Fast;
1573}
1574
1575/// Return true if we might ever do TCO for calls with this calling convention.
1576LLVM_READNONE
1577constexpr bool mayTailCallThisCC(CallingConv::ID CC) {
1578 switch (CC) {
1579 case CallingConv::C:
1580 case CallingConv::AMDGPU_Gfx:
1581 case CallingConv::AMDGPU_Gfx_WholeWave:
1582 return true;
1583 default:
1584 return canGuaranteeTCO(CC);
1585 }
1586}
1587
1588bool hasXNACK(const MCSubtargetInfo &STI);
1589bool hasSRAMECC(const MCSubtargetInfo &STI);
1590bool hasMIMG_R128(const MCSubtargetInfo &STI);
1591bool hasA16(const MCSubtargetInfo &STI);
1592bool hasG16(const MCSubtargetInfo &STI);
1593bool hasPackedD16(const MCSubtargetInfo &STI);
1594bool hasGDS(const MCSubtargetInfo &STI);
1595unsigned getNSAMaxSize(const MCSubtargetInfo &STI, bool HasSampler = false);
1596unsigned getMaxNumUserSGPRs(const MCSubtargetInfo &STI);
1597
1598bool isSI(const MCSubtargetInfo &STI);
1599bool isCI(const MCSubtargetInfo &STI);
1600bool isVI(const MCSubtargetInfo &STI);
1601bool isGFX9(const MCSubtargetInfo &STI);
1602bool isGFX9_GFX10(const MCSubtargetInfo &STI);
1603bool isGFX9_GFX10_GFX11(const MCSubtargetInfo &STI);
1604bool isGFX8_GFX9_GFX10(const MCSubtargetInfo &STI);
1605bool isGFX8Plus(const MCSubtargetInfo &STI);
1606bool isGFX9Plus(const MCSubtargetInfo &STI);
1607bool isNotGFX9Plus(const MCSubtargetInfo &STI);
1608bool isGFX10(const MCSubtargetInfo &STI);
1609bool isGFX10_GFX11(const MCSubtargetInfo &STI);
1610bool isGFX10Plus(const MCSubtargetInfo &STI);
1611bool isNotGFX10Plus(const MCSubtargetInfo &STI);
1612bool isGFX10Before1030(const MCSubtargetInfo &STI);
1613bool isGFX11(const MCSubtargetInfo &STI);
1614bool isGFX11Plus(const MCSubtargetInfo &STI);
1615bool isGFX12(const MCSubtargetInfo &STI);
1616bool isGFX12Plus(const MCSubtargetInfo &STI);
1617bool isGFX1250(const MCSubtargetInfo &STI);
1618bool isGFX1250Plus(const MCSubtargetInfo &STI);
1619bool isGFX13(const MCSubtargetInfo &STI);
1620bool isGFX13Plus(const MCSubtargetInfo &STI);
1621bool supportsWGP(const MCSubtargetInfo &STI);
1622bool isNotGFX12Plus(const MCSubtargetInfo &STI);
1623bool isNotGFX11Plus(const MCSubtargetInfo &STI);
1624bool isGCN3Encoding(const MCSubtargetInfo &STI);
1625bool isGFX10_AEncoding(const MCSubtargetInfo &STI);
1626bool isGFX10_BEncoding(const MCSubtargetInfo &STI);
1627bool hasGFX10_3Insts(const MCSubtargetInfo &STI);
1628bool isGFX10_3_GFX11(const MCSubtargetInfo &STI);
1629bool isGFX90A(const MCSubtargetInfo &STI);
1630bool isGFX940(const MCSubtargetInfo &STI);
1631bool hasArchitectedFlatScratch(const MCSubtargetInfo &STI);
1632bool hasMAIInsts(const MCSubtargetInfo &STI);
1633bool hasVOPD(const MCSubtargetInfo &STI);
1634bool hasDPPSrc1SGPR(const MCSubtargetInfo &STI);
1635
1636inline bool supportsWave32(const MCSubtargetInfo &STI) {
1637 return AMDGPU::isGFX10Plus(STI) && !AMDGPU::isGFX1250(STI);
1638}
1639
1640int getTotalNumVGPRs(bool has90AInsts, int32_t ArgNumAGPR, int32_t ArgNumVGPR);
1641unsigned hasKernargPreload(const MCSubtargetInfo &STI);
1642bool hasSMRDSignedImmOffset(const MCSubtargetInfo &ST);
1643
1644/// Is Reg - scalar register
1645bool isSGPR(MCRegister Reg, const MCRegisterInfo *TRI);
1646
1647/// \returns if \p Reg occupies the high 16-bits of a 32-bit register.
1648bool isHi16Reg(MCRegister Reg, const MCRegisterInfo &MRI);
1649
1650/// If \p Reg is a pseudo reg, return the correct hardware register given
1651/// \p STI otherwise return \p Reg.
1652MCRegister getMCReg(MCRegister Reg, const MCSubtargetInfo &STI);
1653
1654/// Convert hardware register \p Reg to a pseudo register
1655LLVM_READNONE
1656MCRegister mc2PseudoReg(MCRegister Reg);
1657
1658LLVM_READNONE
1659bool isInlineValue(MCRegister Reg);
1660
1661/// Is this an AMDGPU specific source operand? These include registers,
1662/// inline constants, literals and mandatory literals (KImm).
1663constexpr bool isSISrcOperand(const MCOperandInfo &OpInfo) {
1664 return OpInfo.OperandType >= AMDGPU::OPERAND_SRC_FIRST &&
1665 OpInfo.OperandType <= AMDGPU::OPERAND_SRC_LAST;
1666}
1667
1668inline bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo) {
1669 return isSISrcOperand(OpInfo: Desc.operands()[OpNo]);
1670}
1671
1672/// Is this a KImm operand?
1673bool isKImmOperand(const MCInstrDesc &Desc, unsigned OpNo);
1674
1675/// Is this floating-point operand?
1676bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo);
1677
1678/// Does this operand support only inlinable literals?
1679bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo);
1680
1681/// Get the size in bits of a register from the register class \p RC.
1682unsigned getRegBitWidth(unsigned RCID);
1683
1684/// Get the size in bits of a register from the register class \p RC.
1685unsigned getRegBitWidth(const MCRegisterClass &RC);
1686
1687LLVM_READNONE
1688inline unsigned getOperandSize(const MCOperandInfo &OpInfo) {
1689 switch (OpInfo.OperandType) {
1690 case AMDGPU::OPERAND_REG_IMM_INT32:
1691 case AMDGPU::OPERAND_REG_IMM_FP32:
1692 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
1693 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
1694 case AMDGPU::OPERAND_REG_INLINE_AC_INT32:
1695 case AMDGPU::OPERAND_REG_INLINE_AC_FP32:
1696 case AMDGPU::OPERAND_REG_IMM_V2INT32:
1697 case AMDGPU::OPERAND_REG_IMM_V2FP32:
1698 case AMDGPU::OPERAND_KIMM32:
1699 case AMDGPU::OPERAND_KIMM16: // mandatory literal is always size 4
1700 case AMDGPU::OPERAND_INLINE_SPLIT_BARRIER_INT32:
1701 return 4;
1702
1703 case AMDGPU::OPERAND_REG_IMM_INT64:
1704 case AMDGPU::OPERAND_REG_IMM_FP64:
1705 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
1706 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
1707 case AMDGPU::OPERAND_REG_INLINE_AC_FP64:
1708 case AMDGPU::OPERAND_KIMM64:
1709 return 8;
1710
1711 case AMDGPU::OPERAND_REG_IMM_INT16:
1712 case AMDGPU::OPERAND_REG_IMM_BF16:
1713 case AMDGPU::OPERAND_REG_IMM_FP16:
1714 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
1715 case AMDGPU::OPERAND_REG_INLINE_C_BF16:
1716 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
1717 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1718 case AMDGPU::OPERAND_REG_INLINE_C_V2BF16:
1719 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
1720 case AMDGPU::OPERAND_REG_IMM_V2INT16:
1721 case AMDGPU::OPERAND_REG_IMM_V2BF16:
1722 case AMDGPU::OPERAND_REG_IMM_V2FP16:
1723 case AMDGPU::OPERAND_REG_IMM_V2FP16_SPLAT:
1724 case AMDGPU::OPERAND_REG_IMM_NOINLINE_V2FP16:
1725 return 2;
1726
1727 default:
1728 llvm_unreachable("unhandled operand type");
1729 }
1730}
1731
1732LLVM_READNONE
1733inline unsigned getOperandSize(const MCInstrDesc &Desc, unsigned OpNo) {
1734 return getOperandSize(OpInfo: Desc.operands()[OpNo]);
1735}
1736
1737/// Is this literal inlinable, and not one of the values intended for floating
1738/// point values.
1739LLVM_READNONE
1740inline bool isInlinableIntLiteral(int64_t Literal) {
1741 return Literal >= -16 && Literal <= 64;
1742}
1743
1744/// Is this literal inlinable
1745LLVM_READNONE
1746bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi);
1747
1748LLVM_READNONE
1749bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi);
1750
1751LLVM_READNONE
1752bool isInlinableLiteralBF16(int16_t Literal, bool HasInv2Pi);
1753
1754LLVM_READNONE
1755bool isInlinableLiteralFP16(int16_t Literal, bool HasInv2Pi);
1756
1757LLVM_READNONE
1758bool isInlinableLiteralI16(int32_t Literal, bool HasInv2Pi);
1759
1760LLVM_READNONE
1761std::optional<unsigned> getInlineEncodingV2I16(uint32_t Literal);
1762
1763LLVM_READNONE
1764std::optional<unsigned> getInlineEncodingV2BF16(uint32_t Literal);
1765
1766LLVM_READNONE
1767std::optional<unsigned> getInlineEncodingV2F16(uint32_t Literal);
1768
1769LLVM_READNONE
1770std::optional<unsigned> getPKFMACF16InlineEncoding(uint32_t Literal,
1771 bool IsGFX11Plus);
1772
1773LLVM_READNONE
1774bool isInlinableLiteralV216(uint32_t Literal, uint8_t OpType);
1775
1776LLVM_READNONE
1777bool isInlinableLiteralV2I16(uint32_t Literal);
1778
1779LLVM_READNONE
1780bool isInlinableLiteralV2BF16(uint32_t Literal);
1781
1782LLVM_READNONE
1783bool isInlinableLiteralV2F16(uint32_t Literal);
1784
1785LLVM_READNONE
1786bool isPKFMACF16InlineConstant(uint32_t Literal, bool IsGFX11Plus);
1787
1788LLVM_READNONE
1789bool isValid32BitLiteral(uint64_t Val, bool IsFP64);
1790
1791LLVM_READNONE
1792int64_t encode32BitLiteral(int64_t Imm, OperandType Type, bool IsLit);
1793
1794bool isArgPassedInSGPR(const Argument *Arg);
1795
1796bool isArgPassedInSGPR(const CallBase *CB, unsigned ArgNo);
1797
1798LLVM_READONLY bool isPackedFP32Inst(unsigned Opc);
1799
1800LLVM_READONLY
1801bool isLegalSMRDEncodedUnsignedOffset(const MCSubtargetInfo &ST,
1802 int64_t EncodedOffset);
1803
1804LLVM_READONLY
1805bool isLegalSMRDEncodedSignedOffset(const MCSubtargetInfo &ST,
1806 int64_t EncodedOffset, bool IsBuffer);
1807
1808/// Convert \p ByteOffset to dwords if the subtarget uses dword SMRD immediate
1809/// offsets.
1810uint64_t convertSMRDOffsetUnits(const MCSubtargetInfo &ST, uint64_t ByteOffset);
1811
1812/// \returns The encoding that will be used for \p ByteOffset in the
1813/// SMRD offset field, or std::nullopt if it won't fit. On GFX9 and GFX10
1814/// S_LOAD instructions have a signed offset, on other subtargets it is
1815/// unsigned. S_BUFFER has an unsigned offset for all subtargets.
1816std::optional<int64_t> getSMRDEncodedOffset(const MCSubtargetInfo &ST,
1817 int64_t ByteOffset, bool IsBuffer,
1818 bool HasSOffset = false);
1819
1820/// \return The encoding that can be used for a 32-bit literal offset in an SMRD
1821/// instruction. This is only useful on CI.s
1822std::optional<int64_t> getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST,
1823 int64_t ByteOffset);
1824
1825/// For pre-GFX12 FLAT instructions the offset must be positive;
1826/// MSB is ignored and forced to zero.
1827///
1828/// \return The number of bits available for the signed offset field in flat
1829/// instructions. Note that some forms of the instruction disallow negative
1830/// offsets.
1831unsigned getNumFlatOffsetBits(const MCSubtargetInfo &ST);
1832
1833/// \returns true if this offset is small enough to fit in the SMRD
1834/// offset field. \p ByteOffset should be the offset in bytes and
1835/// not the encoded offset.
1836bool isLegalSMRDImmOffset(const MCSubtargetInfo &ST, int64_t ByteOffset);
1837
1838LLVM_READNONE
1839inline bool isLegalDPALU_DPPControl(const MCSubtargetInfo &ST, unsigned DC) {
1840 if (isGFX12(STI: ST))
1841 return DC >= DPP::ROW_SHARE_FIRST && DC <= DPP::ROW_SHARE_LAST;
1842 if (isGFX90A(STI: ST))
1843 return DC >= DPP::ROW_NEWBCAST_FIRST && DC <= DPP::ROW_NEWBCAST_LAST;
1844 return false;
1845}
1846
1847/// \returns true if an instruction may have a 64-bit VGPR operand.
1848bool hasAny64BitVGPROperands(const MCInstrDesc &OpDesc,
1849 const MCSubtargetInfo &ST);
1850
1851/// \returns true if an instruction is a DP ALU DPP without any 64-bit operands.
1852bool isDPALU_DPP32BitOpc(unsigned Opc);
1853
1854/// \returns true if an instruction is a DP ALU DPP.
1855bool isDPALU_DPP(const MCInstrDesc &OpDesc, const MCInstrInfo &MII,
1856 const MCSubtargetInfo &ST);
1857
1858/// \returns true if the intrinsic is divergent
1859bool isIntrinsicSourceOfDivergence(unsigned IntrID);
1860
1861/// \returns true if the intrinsic is uniform
1862bool isIntrinsicAlwaysUniform(unsigned IntrID);
1863
1864/// \returns a register class for the physical register \p Reg if it is a VGPR
1865/// or nullptr otherwise.
1866const MCRegisterClass *getVGPRPhysRegClass(MCRegister Reg,
1867 const MCRegisterInfo &MRI);
1868
1869/// \returns the MODE bits which have to be set by the S_SET_VGPR_MSB for the
1870/// physical register \p Reg.
1871unsigned getVGPREncodingMSBs(MCRegister Reg, const MCRegisterInfo &MRI);
1872
1873/// If \p Reg is a low VGPR return a corresponding high VGPR with \p MSBs set.
1874MCRegister getVGPRWithMSBs(MCRegister Reg, unsigned MSBs,
1875 const MCRegisterInfo &MRI);
1876
1877// Returns a table for the opcode with a given \p Desc to map the VGPR MSB
1878// set by the S_SET_VGPR_MSB to one of 4 sources. In case of VOPD returns 2
1879// maps, one for X and one for Y component.
1880std::pair<const AMDGPU::OpName *, const AMDGPU::OpName *>
1881getVGPRLoweringOperandTables(const MCInstrDesc &Desc);
1882
1883/// \returns true if a memory instruction supports scale_offset modifier.
1884bool supportsScaleOffset(const MCInstrInfo &MII, unsigned Opcode);
1885
1886/// \returns lds block size in terms of dwords. \p
1887/// This is used to calculate the lds size encoded for PAL metadata 3.0+ which
1888/// must be defined in terms of bytes.
1889unsigned getLdsDwGranularity(const MCSubtargetInfo &ST);
1890
1891class ClusterDimsAttr {
1892public:
1893 enum class Kind { Unknown, NoCluster, VariableDims, FixedDims };
1894
1895 ClusterDimsAttr() = default;
1896
1897 Kind getKind() const { return AttrKind; }
1898
1899 bool isUnknown() const { return getKind() == Kind::Unknown; }
1900
1901 bool isNoCluster() const { return getKind() == Kind::NoCluster; }
1902
1903 bool isFixedDims() const { return getKind() == Kind::FixedDims; }
1904
1905 bool isVariableDims() const { return getKind() == Kind::VariableDims; }
1906
1907 void setUnknown() { *this = ClusterDimsAttr(Kind::Unknown); }
1908
1909 void setNoCluster() { *this = ClusterDimsAttr(Kind::NoCluster); }
1910
1911 void setVariableDims() { *this = ClusterDimsAttr(Kind::VariableDims); }
1912
1913 /// \returns the dims stored. Note that this function can only be called if
1914 /// the kind is \p Fixed.
1915 const std::array<unsigned, 3> &getDims() const;
1916
1917 bool operator==(const ClusterDimsAttr &RHS) const {
1918 return AttrKind == RHS.AttrKind && Dims == RHS.Dims;
1919 }
1920
1921 std::string to_string() const;
1922
1923 static ClusterDimsAttr get(const Function &F);
1924
1925private:
1926 enum Encoding { EncoNoCluster = 0, EncoVariableDims = 1024 };
1927
1928 ClusterDimsAttr(Kind AttrKind) : AttrKind(AttrKind) {}
1929
1930 std::array<unsigned, 3> Dims = {0, 0, 0};
1931
1932 Kind AttrKind = Kind::Unknown;
1933};
1934
1935} // end namespace AMDGPU
1936
1937raw_ostream &operator<<(raw_ostream &OS,
1938 const AMDGPU::IsaInfo::TargetIDSetting S);
1939
1940} // end namespace llvm
1941
1942#endif // LLVM_LIB_TARGET_AMDGPU_UTILS_AMDGPUBASEINFO_H
1943