1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AMDGPUArgumentUsageInfo.h"
10#include "AMDGPU.h"
11#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
12#include "SIRegisterInfo.h"
13#include "llvm/CodeGen/TargetRegisterInfo.h"
14#include "llvm/IR/Function.h"
15#include "llvm/Support/NativeFormatting.h"
16#include "llvm/Support/raw_ostream.h"
17
18using namespace llvm;
19
20#define DEBUG_TYPE "amdgpu-argument-reg-usage-info"
21
22INITIALIZE_PASS(AMDGPUArgumentUsageInfo, DEBUG_TYPE,
23 "Argument Register Usage Information Storage", false, true)
24
25void ArgDescriptor::print(raw_ostream &OS,
26 const TargetRegisterInfo *TRI) const {
27 if (!isSet()) {
28 OS << "<not set>\n";
29 return;
30 }
31
32 if (isRegister())
33 OS << "Reg " << printReg(Reg: getRegister(), TRI);
34 else
35 OS << "Stack offset " << getStackOffset();
36
37 if (isMasked()) {
38 OS << " & ";
39 llvm::write_hex(S&: OS, N: Mask, Style: llvm::HexPrintStyle::PrefixLower);
40 }
41
42 OS << '\n';
43}
44
45char AMDGPUArgumentUsageInfo::ID = 0;
46
47const AMDGPUFunctionArgInfo AMDGPUArgumentUsageInfo::ExternFunctionInfo{};
48
49// Hardcoded registers from fixed function ABI
50const AMDGPUFunctionArgInfo AMDGPUArgumentUsageInfo::FixedABIFunctionInfo
51 = AMDGPUFunctionArgInfo::fixedABILayout();
52
53bool AMDGPUArgumentUsageInfo::doInitialization(Module &M) {
54 return false;
55}
56
57bool AMDGPUArgumentUsageInfo::doFinalization(Module &M) {
58 ArgInfoMap.clear();
59 return false;
60}
61
62// TODO: Print preload kernargs?
63void AMDGPUArgumentUsageInfo::print(raw_ostream &OS, const Module *M) const {
64 for (const auto &FI : ArgInfoMap) {
65 OS << "Arguments for " << FI.first->getName() << '\n'
66 << " PrivateSegmentBuffer: " << FI.second.PrivateSegmentBuffer
67 << " DispatchPtr: " << FI.second.DispatchPtr
68 << " QueuePtr: " << FI.second.QueuePtr
69 << " KernargSegmentPtr: " << FI.second.KernargSegmentPtr
70 << " DispatchID: " << FI.second.DispatchID
71 << " FlatScratchInit: " << FI.second.FlatScratchInit
72 << " PrivateSegmentSize: " << FI.second.PrivateSegmentSize
73 << " WorkGroupIDX: " << FI.second.WorkGroupIDX
74 << " WorkGroupIDY: " << FI.second.WorkGroupIDY
75 << " WorkGroupIDZ: " << FI.second.WorkGroupIDZ
76 << " WorkGroupInfo: " << FI.second.WorkGroupInfo
77 << " LDSKernelId: " << FI.second.LDSKernelId
78 << " PrivateSegmentWaveByteOffset: "
79 << FI.second.PrivateSegmentWaveByteOffset
80 << " ImplicitBufferPtr: " << FI.second.ImplicitBufferPtr
81 << " ImplicitArgPtr: " << FI.second.ImplicitArgPtr
82 << " WorkItemIDX " << FI.second.WorkItemIDX
83 << " WorkItemIDY " << FI.second.WorkItemIDY
84 << " WorkItemIDZ " << FI.second.WorkItemIDZ
85 << '\n';
86 }
87}
88
89std::tuple<const ArgDescriptor *, const TargetRegisterClass *, LLT>
90AMDGPUFunctionArgInfo::getPreloadedValue(
91 AMDGPUFunctionArgInfo::PreloadedValue Value) const {
92 switch (Value) {
93 case AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER: {
94 return std::tuple(PrivateSegmentBuffer ? &PrivateSegmentBuffer : nullptr,
95 &AMDGPU::SGPR_128RegClass, LLT::fixed_vector(NumElements: 4, ScalarSizeInBits: 32));
96 }
97 case AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR:
98 return std::tuple(ImplicitBufferPtr ? &ImplicitBufferPtr : nullptr,
99 &AMDGPU::SGPR_64RegClass,
100 LLT::pointer(AddressSpace: AMDGPUAS::CONSTANT_ADDRESS, SizeInBits: 64));
101 case AMDGPUFunctionArgInfo::WORKGROUP_ID_X:
102 return std::tuple(WorkGroupIDX ? &WorkGroupIDX : nullptr,
103 &AMDGPU::SGPR_32RegClass, LLT::scalar(SizeInBits: 32));
104 case AMDGPUFunctionArgInfo::WORKGROUP_ID_Y:
105 return std::tuple(WorkGroupIDY ? &WorkGroupIDY : nullptr,
106 &AMDGPU::SGPR_32RegClass, LLT::scalar(SizeInBits: 32));
107 case AMDGPUFunctionArgInfo::WORKGROUP_ID_Z:
108 return std::tuple(WorkGroupIDZ ? &WorkGroupIDZ : nullptr,
109 &AMDGPU::SGPR_32RegClass, LLT::scalar(SizeInBits: 32));
110 case AMDGPUFunctionArgInfo::LDS_KERNEL_ID:
111 return std::tuple(LDSKernelId ? &LDSKernelId : nullptr,
112 &AMDGPU::SGPR_32RegClass, LLT::scalar(SizeInBits: 32));
113 case AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET:
114 return std::tuple(
115 PrivateSegmentWaveByteOffset ? &PrivateSegmentWaveByteOffset : nullptr,
116 &AMDGPU::SGPR_32RegClass, LLT::scalar(SizeInBits: 32));
117 case AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_SIZE:
118 return {PrivateSegmentSize ? &PrivateSegmentSize : nullptr,
119 &AMDGPU::SGPR_32RegClass, LLT::scalar(SizeInBits: 32)};
120 case AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR:
121 return std::tuple(KernargSegmentPtr ? &KernargSegmentPtr : nullptr,
122 &AMDGPU::SGPR_64RegClass,
123 LLT::pointer(AddressSpace: AMDGPUAS::CONSTANT_ADDRESS, SizeInBits: 64));
124 case AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR:
125 return std::tuple(ImplicitArgPtr ? &ImplicitArgPtr : nullptr,
126 &AMDGPU::SGPR_64RegClass,
127 LLT::pointer(AddressSpace: AMDGPUAS::CONSTANT_ADDRESS, SizeInBits: 64));
128 case AMDGPUFunctionArgInfo::DISPATCH_ID:
129 return std::tuple(DispatchID ? &DispatchID : nullptr,
130 &AMDGPU::SGPR_64RegClass, LLT::scalar(SizeInBits: 64));
131 case AMDGPUFunctionArgInfo::FLAT_SCRATCH_INIT:
132 return std::tuple(FlatScratchInit ? &FlatScratchInit : nullptr,
133 &AMDGPU::SGPR_64RegClass, LLT::scalar(SizeInBits: 64));
134 case AMDGPUFunctionArgInfo::DISPATCH_PTR:
135 return std::tuple(DispatchPtr ? &DispatchPtr : nullptr,
136 &AMDGPU::SGPR_64RegClass,
137 LLT::pointer(AddressSpace: AMDGPUAS::CONSTANT_ADDRESS, SizeInBits: 64));
138 case AMDGPUFunctionArgInfo::QUEUE_PTR:
139 return std::tuple(QueuePtr ? &QueuePtr : nullptr, &AMDGPU::SGPR_64RegClass,
140 LLT::pointer(AddressSpace: AMDGPUAS::CONSTANT_ADDRESS, SizeInBits: 64));
141 case AMDGPUFunctionArgInfo::WORKITEM_ID_X:
142 return std::tuple(WorkItemIDX ? &WorkItemIDX : nullptr,
143 &AMDGPU::VGPR_32RegClass, LLT::scalar(SizeInBits: 32));
144 case AMDGPUFunctionArgInfo::WORKITEM_ID_Y:
145 return std::tuple(WorkItemIDY ? &WorkItemIDY : nullptr,
146 &AMDGPU::VGPR_32RegClass, LLT::scalar(SizeInBits: 32));
147 case AMDGPUFunctionArgInfo::WORKITEM_ID_Z:
148 return std::tuple(WorkItemIDZ ? &WorkItemIDZ : nullptr,
149 &AMDGPU::VGPR_32RegClass, LLT::scalar(SizeInBits: 32));
150 }
151 llvm_unreachable("unexpected preloaded value type");
152}
153
154AMDGPUFunctionArgInfo AMDGPUFunctionArgInfo::fixedABILayout() {
155 AMDGPUFunctionArgInfo AI;
156 AI.PrivateSegmentBuffer
157 = ArgDescriptor::createRegister(Reg: AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3);
158 AI.DispatchPtr = ArgDescriptor::createRegister(Reg: AMDGPU::SGPR4_SGPR5);
159 AI.QueuePtr = ArgDescriptor::createRegister(Reg: AMDGPU::SGPR6_SGPR7);
160
161 // Do not pass kernarg segment pointer, only pass increment version in its
162 // place.
163 AI.ImplicitArgPtr = ArgDescriptor::createRegister(Reg: AMDGPU::SGPR8_SGPR9);
164 AI.DispatchID = ArgDescriptor::createRegister(Reg: AMDGPU::SGPR10_SGPR11);
165
166 // Skip FlatScratchInit/PrivateSegmentSize
167 AI.WorkGroupIDX = ArgDescriptor::createRegister(Reg: AMDGPU::SGPR12);
168 AI.WorkGroupIDY = ArgDescriptor::createRegister(Reg: AMDGPU::SGPR13);
169 AI.WorkGroupIDZ = ArgDescriptor::createRegister(Reg: AMDGPU::SGPR14);
170 AI.LDSKernelId = ArgDescriptor::createRegister(Reg: AMDGPU::SGPR15);
171
172 const unsigned Mask = 0x3ff;
173 AI.WorkItemIDX = ArgDescriptor::createRegister(Reg: AMDGPU::VGPR31, Mask);
174 AI.WorkItemIDY = ArgDescriptor::createRegister(Reg: AMDGPU::VGPR31, Mask: Mask << 10);
175 AI.WorkItemIDZ = ArgDescriptor::createRegister(Reg: AMDGPU::VGPR31, Mask: Mask << 20);
176 return AI;
177}
178
179const AMDGPUFunctionArgInfo &
180AMDGPUArgumentUsageInfo::lookupFuncArgInfo(const Function &F) const {
181 auto I = ArgInfoMap.find(Val: &F);
182 if (I == ArgInfoMap.end())
183 return FixedABIFunctionInfo;
184 return I->second;
185}
186