1//===-- SIProgramInfo.cpp ----------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10///
11/// The SIProgramInfo tracks resource usage and hardware flags for kernels and
12/// entry functions.
13//
14//===----------------------------------------------------------------------===//
15//
16
17#include "SIProgramInfo.h"
18#include "GCNSubtarget.h"
19#include "SIDefines.h"
20#include "Utils/AMDGPUBaseInfo.h"
21#include "llvm/MC/MCExpr.h"
22
23using namespace llvm;
24
25void SIProgramInfo::reset(const MachineFunction &MF) {
26 MCContext &Ctx = MF.getContext();
27
28 const MCExpr *ZeroExpr = MCConstantExpr::create(Value: 0, Ctx);
29
30 CodeSizeInBytes.reset();
31
32 VGPRBlocks = ZeroExpr;
33 SGPRBlocks = ZeroExpr;
34 Priority = 0;
35 FloatMode = 0;
36 Priv = 0;
37 DX10Clamp = 0;
38 DebugMode = 0;
39 IEEEMode = 0;
40 WgpMode = 0;
41 MemOrdered = 0;
42 FwdProgress = 0;
43 RrWgMode = 0;
44 ScratchSize = ZeroExpr;
45
46 LDSBlocks = 0;
47 ScratchBlocks = ZeroExpr;
48
49 ScratchEnable = ZeroExpr;
50 UserSGPR = 0;
51 TrapHandlerEnable = 0;
52 TGIdXEnable = 0;
53 TGIdYEnable = 0;
54 TGIdZEnable = 0;
55 TGSizeEnable = 0;
56 TIdIGCompCount = 0;
57 EXCPEnMSB = 0;
58 LdsSize = 0;
59 EXCPEnable = 0;
60
61 ComputePGMRSrc3 = ZeroExpr;
62
63 NumVGPR = ZeroExpr;
64 NumArchVGPR = ZeroExpr;
65 NumAccVGPR = ZeroExpr;
66 AccumOffset = ZeroExpr;
67 TgSplit = 0;
68 NumSGPR = ZeroExpr;
69 SGPRSpill = 0;
70 VGPRSpill = 0;
71 LDSSize = 0;
72 FlatUsed = ZeroExpr;
73
74 NumSGPRsForWavesPerEU = ZeroExpr;
75 NumVGPRsForWavesPerEU = ZeroExpr;
76 NamedBarCnt = ZeroExpr;
77 Occupancy = ZeroExpr;
78 DynamicCallStack = ZeroExpr;
79 VCCUsed = ZeroExpr;
80}
81
82static uint64_t getComputePGMRSrc1Reg(const SIProgramInfo &ProgInfo,
83 const GCNSubtarget &ST) {
84 uint64_t Reg = S_00B848_PRIORITY(ProgInfo.Priority) |
85 S_00B848_FLOAT_MODE(ProgInfo.FloatMode) |
86 S_00B848_PRIV(ProgInfo.Priv) |
87 S_00B848_DEBUG_MODE(ProgInfo.DebugMode) |
88 S_00B848_WGP_MODE(ProgInfo.WgpMode) |
89 S_00B848_MEM_ORDERED(ProgInfo.MemOrdered) |
90 S_00B848_FWD_PROGRESS(ProgInfo.FwdProgress);
91
92 if (ST.hasDX10ClampMode())
93 Reg |= S_00B848_DX10_CLAMP(ProgInfo.DX10Clamp);
94
95 if (ST.hasIEEEMode())
96 Reg |= S_00B848_IEEE_MODE(ProgInfo.IEEEMode);
97
98 if (ST.hasRrWGMode())
99 Reg |= S_00B848_RR_WG_MODE(ProgInfo.RrWgMode);
100
101 return Reg;
102}
103
104static uint64_t getPGMRSrc1Reg(const SIProgramInfo &ProgInfo,
105 CallingConv::ID CC, const GCNSubtarget &ST) {
106 uint64_t Reg = S_00B848_PRIORITY(ProgInfo.Priority) |
107 S_00B848_FLOAT_MODE(ProgInfo.FloatMode) |
108 S_00B848_PRIV(ProgInfo.Priv) |
109 S_00B848_DEBUG_MODE(ProgInfo.DebugMode);
110
111 if (ST.hasDX10ClampMode())
112 Reg |= S_00B848_DX10_CLAMP(ProgInfo.DX10Clamp);
113
114 if (ST.hasIEEEMode())
115 Reg |= S_00B848_IEEE_MODE(ProgInfo.IEEEMode);
116
117 if (ST.hasRrWGMode())
118 Reg |= S_00B848_RR_WG_MODE(ProgInfo.RrWgMode);
119
120 switch (CC) {
121 case CallingConv::AMDGPU_PS:
122 Reg |= S_00B028_MEM_ORDERED(ProgInfo.MemOrdered);
123 break;
124 case CallingConv::AMDGPU_VS:
125 Reg |= S_00B128_MEM_ORDERED(ProgInfo.MemOrdered);
126 break;
127 case CallingConv::AMDGPU_GS:
128 Reg |= S_00B228_WGP_MODE(ProgInfo.WgpMode) |
129 S_00B228_MEM_ORDERED(ProgInfo.MemOrdered);
130 break;
131 case CallingConv::AMDGPU_HS:
132 Reg |= S_00B428_WGP_MODE(ProgInfo.WgpMode) |
133 S_00B428_MEM_ORDERED(ProgInfo.MemOrdered);
134 break;
135 default:
136 break;
137 }
138 return Reg;
139}
140
141static uint64_t getComputePGMRSrc2Reg(const SIProgramInfo &ProgInfo) {
142 uint64_t Reg = S_00B84C_USER_SGPR(ProgInfo.UserSGPR) |
143 S_00B84C_TRAP_HANDLER(ProgInfo.TrapHandlerEnable) |
144 S_00B84C_TGID_X_EN(ProgInfo.TGIdXEnable) |
145 S_00B84C_TGID_Y_EN(ProgInfo.TGIdYEnable) |
146 S_00B84C_TGID_Z_EN(ProgInfo.TGIdZEnable) |
147 S_00B84C_TG_SIZE_EN(ProgInfo.TGSizeEnable) |
148 S_00B84C_TIDIG_COMP_CNT(ProgInfo.TIdIGCompCount) |
149 S_00B84C_EXCP_EN_MSB(ProgInfo.EXCPEnMSB) |
150 S_00B84C_LDS_SIZE(ProgInfo.LdsSize) |
151 S_00B84C_EXCP_EN(ProgInfo.EXCPEnable);
152
153 return Reg;
154}
155
156static const MCExpr *MaskShift(const MCExpr *Val, uint32_t Mask, uint32_t Shift,
157 MCContext &Ctx) {
158 if (Mask) {
159 const MCExpr *MaskExpr = MCConstantExpr::create(Value: Mask, Ctx);
160 Val = MCBinaryExpr::createAnd(LHS: Val, RHS: MaskExpr, Ctx);
161 }
162 if (Shift) {
163 const MCExpr *ShiftExpr = MCConstantExpr::create(Value: Shift, Ctx);
164 Val = MCBinaryExpr::createShl(LHS: Val, RHS: ShiftExpr, Ctx);
165 }
166 return Val;
167}
168
169const MCExpr *SIProgramInfo::getComputePGMRSrc1(const GCNSubtarget &ST,
170 MCContext &Ctx) const {
171 uint64_t Reg = getComputePGMRSrc1Reg(ProgInfo: *this, ST);
172 const MCExpr *RegExpr = MCConstantExpr::create(Value: Reg, Ctx);
173 const MCExpr *Res = MCBinaryExpr::createOr(
174 LHS: MaskShift(Val: VGPRBlocks, /*Mask=*/0x3F, /*Shift=*/0, Ctx),
175 RHS: MaskShift(Val: SGPRBlocks, /*Mask=*/0xF, /*Shift=*/6, Ctx), Ctx);
176 return MCBinaryExpr::createOr(LHS: RegExpr, RHS: Res, Ctx);
177}
178
179const MCExpr *SIProgramInfo::getPGMRSrc1(CallingConv::ID CC,
180 const GCNSubtarget &ST,
181 MCContext &Ctx) const {
182 if (AMDGPU::isCompute(CC)) {
183 return getComputePGMRSrc1(ST, Ctx);
184 }
185
186 uint64_t Reg = getPGMRSrc1Reg(ProgInfo: *this, CC, ST);
187 const MCExpr *RegExpr = MCConstantExpr::create(Value: Reg, Ctx);
188 const MCExpr *Res = MCBinaryExpr::createOr(
189 LHS: MaskShift(Val: VGPRBlocks, /*Mask=*/0x3F, /*Shift=*/0, Ctx),
190 RHS: MaskShift(Val: SGPRBlocks, /*Mask=*/0xF, /*Shift=*/6, Ctx), Ctx);
191 return MCBinaryExpr::createOr(LHS: RegExpr, RHS: Res, Ctx);
192}
193
194const MCExpr *SIProgramInfo::getComputePGMRSrc2(MCContext &Ctx) const {
195 uint64_t Reg = getComputePGMRSrc2Reg(ProgInfo: *this);
196 const MCExpr *RegExpr = MCConstantExpr::create(Value: Reg, Ctx);
197 return MCBinaryExpr::createOr(LHS: ScratchEnable, RHS: RegExpr, Ctx);
198}
199
200const MCExpr *SIProgramInfo::getPGMRSrc2(CallingConv::ID CC,
201 MCContext &Ctx) const {
202 if (AMDGPU::isCompute(CC))
203 return getComputePGMRSrc2(Ctx);
204
205 return MCConstantExpr::create(Value: 0, Ctx);
206}
207
208uint64_t SIProgramInfo::getFunctionCodeSize(const MachineFunction &MF,
209 bool IsLowerBound) {
210 if (!IsLowerBound && CodeSizeInBytes.has_value())
211 return *CodeSizeInBytes;
212
213 const GCNSubtarget &STM = MF.getSubtarget<GCNSubtarget>();
214 const SIInstrInfo *TII = STM.getInstrInfo();
215
216 uint64_t CodeSize = 0;
217
218 for (const MachineBasicBlock &MBB : MF) {
219 // The amount of padding to align code can be both underestimated and
220 // overestimated. In case of inline asm used getInstSizeInBytes() will
221 // return a maximum size of a single instruction, where the real size may
222 // differ. At this point CodeSize may be already off.
223 if (!IsLowerBound)
224 CodeSize = alignTo(Size: CodeSize, A: MBB.getAlignment());
225
226 for (const MachineInstr &MI : MBB) {
227 // TODO: CodeSize should account for multiple functions.
228
229 if (MI.isMetaInstruction())
230 continue;
231
232 // We cannot properly estimate inline asm size. It can be as small as zero
233 // if that is just a comment.
234 if (IsLowerBound && MI.isInlineAsm())
235 continue;
236
237 CodeSize += TII->getInstSizeInBytes(MI);
238 }
239 }
240
241 CodeSizeInBytes = CodeSize;
242 return CodeSize;
243}
244