1 | //===-- AMDGPULowerKernelArguments.cpp ------------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | /// \file This pass replaces accesses to kernel arguments with loads from |
10 | /// offsets from the kernarg base pointer. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #include "AMDGPU.h" |
15 | #include "GCNSubtarget.h" |
16 | #include "llvm/CodeGen/TargetPassConfig.h" |
17 | #include "llvm/IR/IRBuilder.h" |
18 | #include "llvm/IR/IntrinsicsAMDGPU.h" |
19 | #include "llvm/IR/MDBuilder.h" |
20 | #include "llvm/Target/TargetMachine.h" |
21 | |
22 | #define DEBUG_TYPE "amdgpu-lower-kernel-arguments" |
23 | |
24 | using namespace llvm; |
25 | |
26 | namespace { |
27 | |
28 | class PreloadKernelArgInfo { |
29 | private: |
30 | Function &F; |
31 | const GCNSubtarget &ST; |
32 | unsigned NumFreeUserSGPRs; |
33 | |
34 | public: |
35 | SmallVector<llvm::Metadata *, 8> KernelArgMetadata; |
36 | |
37 | PreloadKernelArgInfo(Function &F, const GCNSubtarget &ST) : F(F), ST(ST) { |
38 | setInitialFreeUserSGPRsCount(); |
39 | } |
40 | |
41 | // Returns the maximum number of user SGPRs that we have available to preload |
42 | // arguments. |
43 | void setInitialFreeUserSGPRsCount() { |
44 | const unsigned MaxUserSGPRs = ST.getMaxNumUserSGPRs(); |
45 | GCNUserSGPRUsageInfo UserSGPRInfo(F, ST); |
46 | |
47 | NumFreeUserSGPRs = MaxUserSGPRs - UserSGPRInfo.getNumUsedUserSGPRs(); |
48 | } |
49 | |
50 | bool tryAllocPreloadSGPRs(unsigned AllocSize, uint64_t ArgOffset, |
51 | uint64_t LastExplicitArgOffset) { |
52 | // Check if this argument may be loaded into the same register as the |
53 | // previous argument. |
54 | if (!isAligned(Lhs: Align(4), SizeInBytes: ArgOffset) && AllocSize < 4) |
55 | return true; |
56 | |
57 | // Pad SGPRs for kernarg alignment. |
58 | unsigned Padding = ArgOffset - LastExplicitArgOffset; |
59 | unsigned PaddingSGPRs = alignTo(Value: Padding, Align: 4) / 4; |
60 | unsigned NumPreloadSGPRs = alignTo(Value: AllocSize, Align: 4) / 4; |
61 | if (NumPreloadSGPRs + PaddingSGPRs > NumFreeUserSGPRs) |
62 | return false; |
63 | |
64 | NumFreeUserSGPRs -= (NumPreloadSGPRs + PaddingSGPRs); |
65 | return true; |
66 | } |
67 | }; |
68 | |
69 | class AMDGPULowerKernelArguments : public FunctionPass { |
70 | public: |
71 | static char ID; |
72 | |
73 | AMDGPULowerKernelArguments() : FunctionPass(ID) {} |
74 | |
75 | bool runOnFunction(Function &F) override; |
76 | |
77 | void getAnalysisUsage(AnalysisUsage &AU) const override { |
78 | AU.addRequired<TargetPassConfig>(); |
79 | AU.setPreservesAll(); |
80 | } |
81 | }; |
82 | |
83 | } // end anonymous namespace |
84 | |
85 | // skip allocas |
86 | static BasicBlock::iterator getInsertPt(BasicBlock &BB) { |
87 | BasicBlock::iterator InsPt = BB.getFirstInsertionPt(); |
88 | for (BasicBlock::iterator E = BB.end(); InsPt != E; ++InsPt) { |
89 | AllocaInst *AI = dyn_cast<AllocaInst>(Val: &*InsPt); |
90 | |
91 | // If this is a dynamic alloca, the value may depend on the loaded kernargs, |
92 | // so loads will need to be inserted before it. |
93 | if (!AI || !AI->isStaticAlloca()) |
94 | break; |
95 | } |
96 | |
97 | return InsPt; |
98 | } |
99 | |
100 | static bool lowerKernelArguments(Function &F, const TargetMachine &TM) { |
101 | CallingConv::ID CC = F.getCallingConv(); |
102 | if (CC != CallingConv::AMDGPU_KERNEL || F.arg_empty()) |
103 | return false; |
104 | |
105 | const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F); |
106 | LLVMContext &Ctx = F.getParent()->getContext(); |
107 | const DataLayout &DL = F.getDataLayout(); |
108 | BasicBlock &EntryBlock = *F.begin(); |
109 | IRBuilder<> Builder(&EntryBlock, getInsertPt(BB&: EntryBlock)); |
110 | |
111 | const Align KernArgBaseAlign(16); // FIXME: Increase if necessary |
112 | const uint64_t BaseOffset = ST.getExplicitKernelArgOffset(); |
113 | |
114 | Align MaxAlign; |
115 | // FIXME: Alignment is broken with explicit arg offset.; |
116 | const uint64_t TotalKernArgSize = ST.getKernArgSegmentSize(F, MaxAlign); |
117 | if (TotalKernArgSize == 0) |
118 | return false; |
119 | |
120 | CallInst *KernArgSegment = |
121 | Builder.CreateIntrinsic(ID: Intrinsic::amdgcn_kernarg_segment_ptr, Types: {}, Args: {}, |
122 | FMFSource: nullptr, Name: F.getName() + ".kernarg.segment" ); |
123 | KernArgSegment->addRetAttr(Kind: Attribute::NonNull); |
124 | KernArgSegment->addRetAttr( |
125 | Attr: Attribute::getWithDereferenceableBytes(Context&: Ctx, Bytes: TotalKernArgSize)); |
126 | |
127 | uint64_t ExplicitArgOffset = 0; |
128 | // Preloaded kernel arguments must be sequential. |
129 | bool InPreloadSequence = true; |
130 | PreloadKernelArgInfo PreloadInfo(F, ST); |
131 | |
132 | for (Argument &Arg : F.args()) { |
133 | const bool IsByRef = Arg.hasByRefAttr(); |
134 | Type *ArgTy = IsByRef ? Arg.getParamByRefType() : Arg.getType(); |
135 | MaybeAlign ParamAlign = IsByRef ? Arg.getParamAlign() : std::nullopt; |
136 | Align ABITypeAlign = DL.getValueOrABITypeAlignment(Alignment: ParamAlign, Ty: ArgTy); |
137 | |
138 | uint64_t Size = DL.getTypeSizeInBits(Ty: ArgTy); |
139 | uint64_t AllocSize = DL.getTypeAllocSize(Ty: ArgTy); |
140 | |
141 | uint64_t EltOffset = alignTo(Size: ExplicitArgOffset, A: ABITypeAlign) + BaseOffset; |
142 | uint64_t LastExplicitArgOffset = ExplicitArgOffset; |
143 | ExplicitArgOffset = alignTo(Size: ExplicitArgOffset, A: ABITypeAlign) + AllocSize; |
144 | |
145 | // Try to preload this argument into user SGPRs. |
146 | if (Arg.hasInRegAttr() && InPreloadSequence && ST.hasKernargPreload() && |
147 | !Arg.getType()->isAggregateType()) |
148 | if (PreloadInfo.tryAllocPreloadSGPRs(AllocSize, ArgOffset: EltOffset, |
149 | LastExplicitArgOffset)) |
150 | continue; |
151 | |
152 | InPreloadSequence = false; |
153 | |
154 | if (Arg.use_empty()) |
155 | continue; |
156 | |
157 | // If this is byval, the loads are already explicit in the function. We just |
158 | // need to rewrite the pointer values. |
159 | if (IsByRef) { |
160 | Value *ArgOffsetPtr = Builder.CreateConstInBoundsGEP1_64( |
161 | Ty: Builder.getInt8Ty(), Ptr: KernArgSegment, Idx0: EltOffset, |
162 | Name: Arg.getName() + ".byval.kernarg.offset" ); |
163 | |
164 | Value *CastOffsetPtr = |
165 | Builder.CreateAddrSpaceCast(V: ArgOffsetPtr, DestTy: Arg.getType()); |
166 | Arg.replaceAllUsesWith(V: CastOffsetPtr); |
167 | continue; |
168 | } |
169 | |
170 | if (PointerType *PT = dyn_cast<PointerType>(Val: ArgTy)) { |
171 | // FIXME: Hack. We rely on AssertZext to be able to fold DS addressing |
172 | // modes on SI to know the high bits are 0 so pointer adds don't wrap. We |
173 | // can't represent this with range metadata because it's only allowed for |
174 | // integer types. |
175 | if ((PT->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS || |
176 | PT->getAddressSpace() == AMDGPUAS::REGION_ADDRESS) && |
177 | !ST.hasUsableDSOffset()) |
178 | continue; |
179 | |
180 | // FIXME: We can replace this with equivalent alias.scope/noalias |
181 | // metadata, but this appears to be a lot of work. |
182 | if (Arg.hasNoAliasAttr()) |
183 | continue; |
184 | } |
185 | |
186 | auto *VT = dyn_cast<FixedVectorType>(Val: ArgTy); |
187 | bool IsV3 = VT && VT->getNumElements() == 3; |
188 | bool DoShiftOpt = Size < 32 && !ArgTy->isAggregateType(); |
189 | |
190 | VectorType *V4Ty = nullptr; |
191 | |
192 | int64_t AlignDownOffset = alignDown(Value: EltOffset, Align: 4); |
193 | int64_t OffsetDiff = EltOffset - AlignDownOffset; |
194 | Align AdjustedAlign = commonAlignment( |
195 | A: KernArgBaseAlign, Offset: DoShiftOpt ? AlignDownOffset : EltOffset); |
196 | |
197 | Value *ArgPtr; |
198 | Type *AdjustedArgTy; |
199 | if (DoShiftOpt) { // FIXME: Handle aggregate types |
200 | // Since we don't have sub-dword scalar loads, avoid doing an extload by |
201 | // loading earlier than the argument address, and extracting the relevant |
202 | // bits. |
203 | // TODO: Update this for GFX12 which does have scalar sub-dword loads. |
204 | // |
205 | // Additionally widen any sub-dword load to i32 even if suitably aligned, |
206 | // so that CSE between different argument loads works easily. |
207 | ArgPtr = Builder.CreateConstInBoundsGEP1_64( |
208 | Ty: Builder.getInt8Ty(), Ptr: KernArgSegment, Idx0: AlignDownOffset, |
209 | Name: Arg.getName() + ".kernarg.offset.align.down" ); |
210 | AdjustedArgTy = Builder.getInt32Ty(); |
211 | } else { |
212 | ArgPtr = Builder.CreateConstInBoundsGEP1_64( |
213 | Ty: Builder.getInt8Ty(), Ptr: KernArgSegment, Idx0: EltOffset, |
214 | Name: Arg.getName() + ".kernarg.offset" ); |
215 | AdjustedArgTy = ArgTy; |
216 | } |
217 | |
218 | if (IsV3 && Size >= 32) { |
219 | V4Ty = FixedVectorType::get(ElementType: VT->getElementType(), NumElts: 4); |
220 | // Use the hack that clang uses to avoid SelectionDAG ruining v3 loads |
221 | AdjustedArgTy = V4Ty; |
222 | } |
223 | |
224 | LoadInst *Load = |
225 | Builder.CreateAlignedLoad(Ty: AdjustedArgTy, Ptr: ArgPtr, Align: AdjustedAlign); |
226 | Load->setMetadata(KindID: LLVMContext::MD_invariant_load, Node: MDNode::get(Context&: Ctx, MDs: {})); |
227 | |
228 | MDBuilder MDB(Ctx); |
229 | |
230 | if (isa<PointerType>(Val: ArgTy)) { |
231 | if (Arg.hasNonNullAttr()) |
232 | Load->setMetadata(KindID: LLVMContext::MD_nonnull, Node: MDNode::get(Context&: Ctx, MDs: {})); |
233 | |
234 | uint64_t DerefBytes = Arg.getDereferenceableBytes(); |
235 | if (DerefBytes != 0) { |
236 | Load->setMetadata( |
237 | KindID: LLVMContext::MD_dereferenceable, |
238 | Node: MDNode::get(Context&: Ctx, |
239 | MDs: MDB.createConstant( |
240 | C: ConstantInt::get(Ty: Builder.getInt64Ty(), V: DerefBytes)))); |
241 | } |
242 | |
243 | uint64_t DerefOrNullBytes = Arg.getDereferenceableOrNullBytes(); |
244 | if (DerefOrNullBytes != 0) { |
245 | Load->setMetadata( |
246 | KindID: LLVMContext::MD_dereferenceable_or_null, |
247 | Node: MDNode::get(Context&: Ctx, |
248 | MDs: MDB.createConstant(C: ConstantInt::get(Ty: Builder.getInt64Ty(), |
249 | V: DerefOrNullBytes)))); |
250 | } |
251 | |
252 | if (MaybeAlign ParamAlign = Arg.getParamAlign()) { |
253 | Load->setMetadata( |
254 | KindID: LLVMContext::MD_align, |
255 | Node: MDNode::get(Context&: Ctx, MDs: MDB.createConstant(C: ConstantInt::get( |
256 | Ty: Builder.getInt64Ty(), V: ParamAlign->value())))); |
257 | } |
258 | } |
259 | |
260 | // TODO: Convert noalias arg to !noalias |
261 | |
262 | if (DoShiftOpt) { |
263 | Value * = OffsetDiff == 0 ? |
264 | Load : Builder.CreateLShr(LHS: Load, RHS: OffsetDiff * 8); |
265 | |
266 | IntegerType *ArgIntTy = Builder.getIntNTy(N: Size); |
267 | Value *Trunc = Builder.CreateTrunc(V: ExtractBits, DestTy: ArgIntTy); |
268 | Value *NewVal = Builder.CreateBitCast(V: Trunc, DestTy: ArgTy, |
269 | Name: Arg.getName() + ".load" ); |
270 | Arg.replaceAllUsesWith(V: NewVal); |
271 | } else if (IsV3) { |
272 | Value *Shuf = Builder.CreateShuffleVector(V: Load, Mask: ArrayRef<int>{0, 1, 2}, |
273 | Name: Arg.getName() + ".load" ); |
274 | Arg.replaceAllUsesWith(V: Shuf); |
275 | } else { |
276 | Load->setName(Arg.getName() + ".load" ); |
277 | Arg.replaceAllUsesWith(V: Load); |
278 | } |
279 | } |
280 | |
281 | KernArgSegment->addRetAttr( |
282 | Attr: Attribute::getWithAlignment(Context&: Ctx, Alignment: std::max(a: KernArgBaseAlign, b: MaxAlign))); |
283 | |
284 | return true; |
285 | } |
286 | |
287 | bool AMDGPULowerKernelArguments::runOnFunction(Function &F) { |
288 | auto &TPC = getAnalysis<TargetPassConfig>(); |
289 | const TargetMachine &TM = TPC.getTM<TargetMachine>(); |
290 | return lowerKernelArguments(F, TM); |
291 | } |
292 | |
293 | INITIALIZE_PASS_BEGIN(AMDGPULowerKernelArguments, DEBUG_TYPE, |
294 | "AMDGPU Lower Kernel Arguments" , false, false) |
295 | INITIALIZE_PASS_END(AMDGPULowerKernelArguments, DEBUG_TYPE, "AMDGPU Lower Kernel Arguments" , |
296 | false, false) |
297 | |
298 | char AMDGPULowerKernelArguments::ID = 0; |
299 | |
300 | FunctionPass *llvm::createAMDGPULowerKernelArgumentsPass() { |
301 | return new AMDGPULowerKernelArguments(); |
302 | } |
303 | |
304 | PreservedAnalyses |
305 | AMDGPULowerKernelArgumentsPass::run(Function &F, FunctionAnalysisManager &AM) { |
306 | bool Changed = lowerKernelArguments(F, TM); |
307 | if (Changed) { |
308 | // TODO: Preserves a lot more. |
309 | PreservedAnalyses PA; |
310 | PA.preserveSet<CFGAnalyses>(); |
311 | return PA; |
312 | } |
313 | |
314 | return PreservedAnalyses::all(); |
315 | } |
316 | |