1//===-- AMDGPULowerKernelArguments.cpp ------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file This pass replaces accesses to kernel arguments with loads from
10/// offsets from the kernarg base pointer.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AMDGPU.h"
15#include "AMDGPUAsanInstrumentation.h"
16#include "GCNSubtarget.h"
17#include "llvm/Analysis/AliasAnalysis.h"
18#include "llvm/Analysis/CaptureTracking.h"
19#include "llvm/Analysis/ScopedNoAliasAA.h"
20#include "llvm/Analysis/ValueTracking.h"
21#include "llvm/CodeGen/TargetPassConfig.h"
22#include "llvm/IR/Argument.h"
23#include "llvm/IR/Attributes.h"
24#include "llvm/IR/Dominators.h"
25#include "llvm/IR/IRBuilder.h"
26#include "llvm/IR/InstIterator.h"
27#include "llvm/IR/Instruction.h"
28#include "llvm/IR/Instructions.h"
29#include "llvm/IR/IntrinsicsAMDGPU.h"
30#include "llvm/IR/LLVMContext.h"
31#include "llvm/IR/MDBuilder.h"
32#include "llvm/Target/TargetMachine.h"
33#include <optional>
34#include <string>
35
36#define DEBUG_TYPE "amdgpu-lower-kernel-arguments"
37
38using namespace llvm;
39
40namespace {
41
42class AMDGPULowerKernelArguments : public FunctionPass {
43public:
44 static char ID;
45
46 AMDGPULowerKernelArguments() : FunctionPass(ID) {}
47
48 bool runOnFunction(Function &F) override;
49
50 void getAnalysisUsage(AnalysisUsage &AU) const override {
51 AU.addRequired<TargetPassConfig>();
52 AU.addRequired<DominatorTreeWrapperPass>();
53 AU.setPreservesAll();
54 }
55};
56
57} // end anonymous namespace
58
59// skip allocas
60static BasicBlock::iterator getInsertPt(BasicBlock &BB) {
61 BasicBlock::iterator InsPt = BB.getFirstInsertionPt();
62 for (BasicBlock::iterator E = BB.end(); InsPt != E; ++InsPt) {
63 AllocaInst *AI = dyn_cast<AllocaInst>(Val: &*InsPt);
64
65 // If this is a dynamic alloca, the value may depend on the loaded kernargs,
66 // so loads will need to be inserted before it.
67 if (!AI || !AI->isStaticAlloca())
68 break;
69 }
70
71 return InsPt;
72}
73
74static void addAliasScopeMetadata(Function &F, const DataLayout &DL,
75 DominatorTree &DT) {
76 // Collect noalias arguments.
77 SmallVector<const Argument *, 4u> NoAliasArgs;
78
79 for (Argument &Arg : F.args())
80 if (Arg.hasNoAliasAttr() && !Arg.use_empty())
81 NoAliasArgs.push_back(Elt: &Arg);
82
83 if (NoAliasArgs.empty())
84 return;
85
86 // Add alias scopes for each noalias argument.
87 MDBuilder MDB(F.getContext());
88 DenseMap<const Argument *, MDNode *> NewScopes;
89 MDNode *NewDomain = MDB.createAnonymousAliasScopeDomain(Name: F.getName());
90
91 for (unsigned I = 0u; I < NoAliasArgs.size(); ++I) {
92 const Argument *Arg = NoAliasArgs[I];
93 MDNode *NewScope = MDB.createAnonymousAliasScope(Domain: NewDomain, Name: Arg->getName());
94 NewScopes.insert(KV: {Arg, NewScope});
95 }
96
97 // Iterate over all instructions.
98 for (inst_iterator Inst = inst_begin(F), InstEnd = inst_end(F);
99 Inst != InstEnd; ++Inst) {
100 // If instruction accesses memory, collect its pointer arguments.
101 Instruction *I = &(*Inst);
102 SmallVector<const Value *, 2u> PtrArgs;
103
104 if (std::optional<MemoryLocation> MO = MemoryLocation::getOrNone(Inst: I))
105 PtrArgs.push_back(Elt: MO->Ptr);
106 else if (const CallBase *Call = dyn_cast<CallBase>(Val: I)) {
107 if (Call->doesNotAccessMemory())
108 continue;
109
110 for (Value *Arg : Call->args()) {
111 if (!Arg->getType()->isPointerTy())
112 continue;
113
114 PtrArgs.push_back(Elt: Arg);
115 }
116 }
117
118 if (PtrArgs.empty())
119 continue;
120
121 // Collect underlying objects of pointer arguments.
122 SmallVector<Metadata *, 4u> Scopes;
123 SmallPtrSet<const Value *, 4u> ObjSet;
124 SmallVector<Metadata *, 4u> NoAliases;
125
126 for (const Value *Val : PtrArgs) {
127 SmallVector<const Value *, 4u> Objects;
128 getUnderlyingObjects(V: Val, Objects);
129 ObjSet.insert_range(R&: Objects);
130 }
131
132 bool RequiresNoCaptureBefore = false;
133 bool UsesUnknownObject = false;
134 bool UsesAliasingPtr = false;
135
136 for (const Value *Val : ObjSet) {
137 if (isa<ConstantData>(Val))
138 continue;
139
140 if (const Argument *Arg = dyn_cast<Argument>(Val)) {
141 if (!Arg->hasAttribute(Kind: Attribute::NoAlias))
142 UsesAliasingPtr = true;
143 } else
144 UsesAliasingPtr = true;
145
146 if (isEscapeSource(V: Val))
147 RequiresNoCaptureBefore = true;
148 else if (!isa<Argument>(Val) && isIdentifiedObject(V: Val))
149 UsesUnknownObject = true;
150 }
151
152 if (UsesUnknownObject)
153 continue;
154
155 // Collect noalias scopes for instruction.
156 for (const Argument *Arg : NoAliasArgs) {
157 if (ObjSet.contains(Ptr: Arg))
158 continue;
159
160 if (!RequiresNoCaptureBefore ||
161 !capturesAnything(CC: PointerMayBeCapturedBefore(
162 V: Arg, ReturnCaptures: false, I, DT: &DT, IncludeI: false, Mask: CaptureComponents::Provenance)))
163 NoAliases.push_back(Elt: NewScopes[Arg]);
164 }
165
166 // Add noalias metadata to instruction.
167 if (!NoAliases.empty()) {
168 MDNode *NewMD =
169 MDNode::concatenate(A: Inst->getMetadata(KindID: LLVMContext::MD_noalias),
170 B: MDNode::get(Context&: F.getContext(), MDs: NoAliases));
171 Inst->setMetadata(KindID: LLVMContext::MD_noalias, Node: NewMD);
172 }
173
174 // Collect scopes for alias.scope metadata.
175 if (!UsesAliasingPtr)
176 for (const Argument *Arg : NoAliasArgs) {
177 if (ObjSet.count(Ptr: Arg))
178 Scopes.push_back(Elt: NewScopes[Arg]);
179 }
180
181 // Add alias.scope metadata to instruction.
182 if (!Scopes.empty()) {
183 MDNode *NewMD =
184 MDNode::concatenate(A: Inst->getMetadata(KindID: LLVMContext::MD_alias_scope),
185 B: MDNode::get(Context&: F.getContext(), MDs: Scopes));
186 Inst->setMetadata(KindID: LLVMContext::MD_alias_scope, Node: NewMD);
187 }
188 }
189}
190
191static bool lowerKernelArguments(Function &F, const TargetMachine &TM,
192 DominatorTree &DT) {
193 CallingConv::ID CC = F.getCallingConv();
194 if (CC != CallingConv::AMDGPU_KERNEL || F.arg_empty())
195 return false;
196
197 const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
198 LLVMContext &Ctx = F.getContext();
199 const DataLayout &DL = F.getDataLayout();
200 BasicBlock &EntryBlock = *F.begin();
201 IRBuilder<> Builder(&EntryBlock, getInsertPt(BB&: EntryBlock));
202
203 const Align KernArgBaseAlign(16); // FIXME: Increase if necessary
204 const uint64_t BaseOffset = ST.getExplicitKernelArgOffset();
205
206 Align MaxAlign;
207 // FIXME: Alignment is broken with explicit arg offset.;
208 const uint64_t TotalKernArgSize = ST.getKernArgSegmentSize(F, MaxAlign);
209 if (TotalKernArgSize == 0)
210 return false;
211
212 CallInst *KernArgSegment =
213 Builder.CreateIntrinsic(ID: Intrinsic::amdgcn_kernarg_segment_ptr, Args: {},
214 FMFSource: nullptr, Name: F.getName() + ".kernarg.segment");
215 KernArgSegment->addRetAttr(Kind: Attribute::NonNull);
216 KernArgSegment->addRetAttr(
217 Attr: Attribute::getWithDereferenceableBytes(Context&: Ctx, Bytes: TotalKernArgSize));
218
219 uint64_t ExplicitArgOffset = 0;
220
221 addAliasScopeMetadata(F, DL: F.getParent()->getDataLayout(), DT);
222
223 for (Argument &Arg : F.args()) {
224 const bool IsByRef = Arg.hasByRefAttr();
225 Type *ArgTy = IsByRef ? Arg.getParamByRefType() : Arg.getType();
226 MaybeAlign ParamAlign = IsByRef ? Arg.getParamAlign() : std::nullopt;
227 Align ABITypeAlign = DL.getValueOrABITypeAlignment(Alignment: ParamAlign, Ty: ArgTy);
228
229 uint64_t Size = DL.getTypeSizeInBits(Ty: ArgTy);
230 uint64_t AllocSize = DL.getTypeAllocSize(Ty: ArgTy);
231
232 uint64_t EltOffset = alignTo(Size: ExplicitArgOffset, A: ABITypeAlign) + BaseOffset;
233 ExplicitArgOffset = alignTo(Size: ExplicitArgOffset, A: ABITypeAlign) + AllocSize;
234
235 // Skip inreg arguments which should be preloaded.
236 if (Arg.use_empty() || Arg.hasInRegAttr())
237 continue;
238
239 // If this is byval, the loads are already explicit in the function. We just
240 // need to rewrite the pointer values.
241 if (IsByRef) {
242 Value *ArgOffsetPtr = Builder.CreateConstInBoundsGEP1_64(
243 Ty: Builder.getInt8Ty(), Ptr: KernArgSegment, Idx0: EltOffset,
244 Name: Arg.getName() + ".byval.kernarg.offset");
245
246 Value *CastOffsetPtr =
247 Builder.CreateAddrSpaceCast(V: ArgOffsetPtr, DestTy: Arg.getType());
248 Arg.replaceAllUsesWith(V: CastOffsetPtr);
249 continue;
250 }
251
252 if (PointerType *PT = dyn_cast<PointerType>(Val: ArgTy)) {
253 // FIXME: Hack. We rely on AssertZext to be able to fold DS addressing
254 // modes on SI to know the high bits are 0 so pointer adds don't wrap. We
255 // can't represent this with range metadata because it's only allowed for
256 // integer types.
257 if ((PT->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
258 PT->getAddressSpace() == AMDGPUAS::REGION_ADDRESS) &&
259 !ST.hasUsableDSOffset())
260 continue;
261 }
262
263 auto *VT = dyn_cast<FixedVectorType>(Val: ArgTy);
264 bool IsV3 = VT && VT->getNumElements() == 3;
265 bool DoShiftOpt = Size < 32 && !ArgTy->isAggregateType();
266
267 VectorType *V4Ty = nullptr;
268
269 int64_t AlignDownOffset = alignDown(Value: EltOffset, Align: 4);
270 int64_t OffsetDiff = EltOffset - AlignDownOffset;
271 Align AdjustedAlign = commonAlignment(
272 A: KernArgBaseAlign, Offset: DoShiftOpt ? AlignDownOffset : EltOffset);
273
274 Value *ArgPtr;
275 Type *AdjustedArgTy;
276 if (DoShiftOpt) { // FIXME: Handle aggregate types
277 // Since we don't have sub-dword scalar loads, avoid doing an extload by
278 // loading earlier than the argument address, and extracting the relevant
279 // bits.
280 // TODO: Update this for GFX12 which does have scalar sub-dword loads.
281 //
282 // Additionally widen any sub-dword load to i32 even if suitably aligned,
283 // so that CSE between different argument loads works easily.
284 ArgPtr = Builder.CreateConstInBoundsGEP1_64(
285 Ty: Builder.getInt8Ty(), Ptr: KernArgSegment, Idx0: AlignDownOffset,
286 Name: Arg.getName() + ".kernarg.offset.align.down");
287 AdjustedArgTy = Builder.getInt32Ty();
288 } else {
289 ArgPtr = Builder.CreateConstInBoundsGEP1_64(
290 Ty: Builder.getInt8Ty(), Ptr: KernArgSegment, Idx0: EltOffset,
291 Name: Arg.getName() + ".kernarg.offset");
292 AdjustedArgTy = ArgTy;
293 }
294
295 if (IsV3 && Size >= 32) {
296 V4Ty = FixedVectorType::get(ElementType: VT->getElementType(), NumElts: 4);
297 // Use the hack that clang uses to avoid SelectionDAG ruining v3 loads
298 AdjustedArgTy = V4Ty;
299 }
300
301 LoadInst *Load =
302 Builder.CreateAlignedLoad(Ty: AdjustedArgTy, Ptr: ArgPtr, Align: AdjustedAlign);
303 Load->setMetadata(KindID: LLVMContext::MD_invariant_load, Node: MDNode::get(Context&: Ctx, MDs: {}));
304
305 MDBuilder MDB(Ctx);
306
307 if (Arg.hasAttribute(Kind: Attribute::NoUndef))
308 Load->setMetadata(KindID: LLVMContext::MD_noundef, Node: MDNode::get(Context&: Ctx, MDs: {}));
309
310 if (Arg.hasAttribute(Kind: Attribute::Range)) {
311 const ConstantRange &Range =
312 Arg.getAttribute(Kind: Attribute::Range).getValueAsConstantRange();
313 Load->setMetadata(KindID: LLVMContext::MD_range,
314 Node: MDB.createRange(Lo: Range.getLower(), Hi: Range.getUpper()));
315 }
316
317 if (isa<PointerType>(Val: ArgTy)) {
318 if (Arg.hasNonNullAttr())
319 Load->setMetadata(KindID: LLVMContext::MD_nonnull, Node: MDNode::get(Context&: Ctx, MDs: {}));
320
321 uint64_t DerefBytes = Arg.getDereferenceableBytes();
322 if (DerefBytes != 0) {
323 Load->setMetadata(
324 KindID: LLVMContext::MD_dereferenceable,
325 Node: MDNode::get(Context&: Ctx,
326 MDs: MDB.createConstant(
327 C: ConstantInt::get(Ty: Builder.getInt64Ty(), V: DerefBytes))));
328 }
329
330 uint64_t DerefOrNullBytes = Arg.getDereferenceableOrNullBytes();
331 if (DerefOrNullBytes != 0) {
332 Load->setMetadata(
333 KindID: LLVMContext::MD_dereferenceable_or_null,
334 Node: MDNode::get(Context&: Ctx,
335 MDs: MDB.createConstant(C: ConstantInt::get(Ty: Builder.getInt64Ty(),
336 V: DerefOrNullBytes))));
337 }
338
339 if (MaybeAlign ParamAlign = Arg.getParamAlign()) {
340 Load->setMetadata(
341 KindID: LLVMContext::MD_align,
342 Node: MDNode::get(Context&: Ctx, MDs: MDB.createConstant(C: ConstantInt::get(
343 Ty: Builder.getInt64Ty(), V: ParamAlign->value()))));
344 }
345 }
346
347 if (DoShiftOpt) {
348 Value *ExtractBits = OffsetDiff == 0 ?
349 Load : Builder.CreateLShr(LHS: Load, RHS: OffsetDiff * 8);
350
351 IntegerType *ArgIntTy = Builder.getIntNTy(N: Size);
352 Value *Trunc = Builder.CreateTrunc(V: ExtractBits, DestTy: ArgIntTy);
353 Value *NewVal = Builder.CreateBitCast(V: Trunc, DestTy: ArgTy,
354 Name: Arg.getName() + ".load");
355 Arg.replaceAllUsesWith(V: NewVal);
356 } else if (IsV3) {
357 Value *Shuf = Builder.CreateShuffleVector(V: Load, Mask: ArrayRef<int>{0, 1, 2},
358 Name: Arg.getName() + ".load");
359 Arg.replaceAllUsesWith(V: Shuf);
360 } else {
361 Load->setName(Arg.getName() + ".load");
362 Arg.replaceAllUsesWith(V: Load);
363 }
364 }
365
366 KernArgSegment->addRetAttr(
367 Attr: Attribute::getWithAlignment(Context&: Ctx, Alignment: std::max(a: KernArgBaseAlign, b: MaxAlign)));
368
369 return true;
370}
371
372bool AMDGPULowerKernelArguments::runOnFunction(Function &F) {
373 auto &TPC = getAnalysis<TargetPassConfig>();
374 const TargetMachine &TM = TPC.getTM<TargetMachine>();
375 DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
376 return lowerKernelArguments(F, TM, DT);
377}
378
379INITIALIZE_PASS_BEGIN(AMDGPULowerKernelArguments, DEBUG_TYPE,
380 "AMDGPU Lower Kernel Arguments", false, false)
381INITIALIZE_PASS_END(AMDGPULowerKernelArguments, DEBUG_TYPE, "AMDGPU Lower Kernel Arguments",
382 false, false)
383
384char AMDGPULowerKernelArguments::ID = 0;
385
386FunctionPass *llvm::createAMDGPULowerKernelArgumentsPass() {
387 return new AMDGPULowerKernelArguments();
388}
389
390PreservedAnalyses
391AMDGPULowerKernelArgumentsPass::run(Function &F, FunctionAnalysisManager &AM) {
392 DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(IR&: F);
393 bool Changed = lowerKernelArguments(F, TM, DT);
394 if (Changed) {
395 // TODO: Preserves a lot more.
396 PreservedAnalyses PA;
397 PA.preserveSet<CFGAnalyses>();
398 return PA;
399 }
400
401 return PreservedAnalyses::all();
402}
403