1 | //===-- NVPTXLowerArgs.cpp - Lower arguments ------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // |
10 | // Arguments to kernel and device functions are passed via param space, |
11 | // which imposes certain restrictions: |
12 | // http://docs.nvidia.com/cuda/parallel-thread-execution/#state-spaces |
13 | // |
14 | // Kernel parameters are read-only and accessible only via ld.param |
15 | // instruction, directly or via a pointer. |
16 | // |
17 | // Device function parameters are directly accessible via |
18 | // ld.param/st.param, but taking the address of one returns a pointer |
19 | // to a copy created in local space which *can't* be used with |
20 | // ld.param/st.param. |
21 | // |
22 | // Copying a byval struct into local memory in IR allows us to enforce |
23 | // the param space restrictions, gives the rest of IR a pointer w/o |
24 | // param space restrictions, and gives us an opportunity to eliminate |
25 | // the copy. |
26 | // |
27 | // Pointer arguments to kernel functions need more work to be lowered: |
28 | // |
29 | // 1. Convert non-byval pointer arguments of CUDA kernels to pointers in the |
30 | // global address space. This allows later optimizations to emit |
31 | // ld.global.*/st.global.* for accessing these pointer arguments. For |
32 | // example, |
33 | // |
34 | // define void @foo(float* %input) { |
35 | // %v = load float, float* %input, align 4 |
36 | // ... |
37 | // } |
38 | // |
39 | // becomes |
40 | // |
41 | // define void @foo(float* %input) { |
42 | // %input2 = addrspacecast float* %input to float addrspace(1)* |
43 | // %input3 = addrspacecast float addrspace(1)* %input2 to float* |
44 | // %v = load float, float* %input3, align 4 |
45 | // ... |
46 | // } |
47 | // |
48 | // Later, NVPTXInferAddressSpaces will optimize it to |
49 | // |
50 | // define void @foo(float* %input) { |
51 | // %input2 = addrspacecast float* %input to float addrspace(1)* |
52 | // %v = load float, float addrspace(1)* %input2, align 4 |
53 | // ... |
54 | // } |
55 | // |
56 | // 2. Convert byval kernel parameters to pointers in the param address space |
57 | // (so that NVPTX emits ld/st.param). Convert pointers *within* a byval |
58 | // kernel parameter to pointers in the global address space. This allows |
59 | // NVPTX to emit ld/st.global. |
60 | // |
61 | // struct S { |
62 | // int *x; |
63 | // int *y; |
64 | // }; |
65 | // __global__ void foo(S s) { |
66 | // int *b = s.y; |
67 | // // use b |
68 | // } |
69 | // |
70 | // "b" points to the global address space. In the IR level, |
71 | // |
72 | // define void @foo(ptr byval %input) { |
73 | // %b_ptr = getelementptr {ptr, ptr}, ptr %input, i64 0, i32 1 |
74 | // %b = load ptr, ptr %b_ptr |
75 | // ; use %b |
76 | // } |
77 | // |
78 | // becomes |
79 | // |
80 | // define void @foo({i32*, i32*}* byval %input) { |
81 | // %b_param = addrspacecat ptr %input to ptr addrspace(101) |
82 | // %b_ptr = getelementptr {ptr, ptr}, ptr addrspace(101) %b_param, i64 0, i32 1 |
83 | // %b = load ptr, ptr addrspace(101) %b_ptr |
84 | // %b_global = addrspacecast ptr %b to ptr addrspace(1) |
85 | // ; use %b_generic |
86 | // } |
87 | // |
88 | // Create a local copy of kernel byval parameters used in a way that *might* mutate |
89 | // the parameter, by storing it in an alloca. Mutations to "grid_constant" parameters |
90 | // are undefined behaviour, and don't require local copies. |
91 | // |
92 | // define void @foo(ptr byval(%struct.s) align 4 %input) { |
93 | // store i32 42, ptr %input |
94 | // ret void |
95 | // } |
96 | // |
97 | // becomes |
98 | // |
99 | // define void @foo(ptr byval(%struct.s) align 4 %input) #1 { |
100 | // %input1 = alloca %struct.s, align 4 |
101 | // %input2 = addrspacecast ptr %input to ptr addrspace(101) |
102 | // %input3 = load %struct.s, ptr addrspace(101) %input2, align 4 |
103 | // store %struct.s %input3, ptr %input1, align 4 |
104 | // store i32 42, ptr %input1, align 4 |
105 | // ret void |
106 | // } |
107 | // |
108 | // If %input were passed to a device function, or written to memory, |
109 | // conservatively assume that %input gets mutated, and create a local copy. |
110 | // |
111 | // Convert param pointers to grid_constant byval kernel parameters that are |
112 | // passed into calls (device functions, intrinsics, inline asm), or otherwise |
113 | // "escape" (into stores/ptrtoints) to the generic address space, using the |
114 | // `nvvm.ptr.param.to.gen` intrinsic, so that NVPTX emits cvta.param |
115 | // (available for sm70+) |
116 | // |
117 | // define void @foo(ptr byval(%struct.s) %input) { |
118 | // ; %input is a grid_constant |
119 | // %call = call i32 @escape(ptr %input) |
120 | // ret void |
121 | // } |
122 | // |
123 | // becomes |
124 | // |
125 | // define void @foo(ptr byval(%struct.s) %input) { |
126 | // %input1 = addrspacecast ptr %input to ptr addrspace(101) |
127 | // ; the following intrinsic converts pointer to generic. We don't use an addrspacecast |
128 | // ; to prevent generic -> param -> generic from getting cancelled out |
129 | // %input1.gen = call ptr @llvm.nvvm.ptr.param.to.gen.p0.p101(ptr addrspace(101) %input1) |
130 | // %call = call i32 @escape(ptr %input1.gen) |
131 | // ret void |
132 | // } |
133 | // |
134 | // TODO: merge this pass with NVPTXInferAddressSpaces so that other passes don't |
135 | // cancel the addrspacecast pair this pass emits. |
136 | //===----------------------------------------------------------------------===// |
137 | |
138 | #include "MCTargetDesc/NVPTXBaseInfo.h" |
139 | #include "NVPTX.h" |
140 | #include "NVPTXTargetMachine.h" |
141 | #include "NVPTXUtilities.h" |
142 | #include "llvm/ADT/STLExtras.h" |
143 | #include "llvm/Analysis/PtrUseVisitor.h" |
144 | #include "llvm/Analysis/ValueTracking.h" |
145 | #include "llvm/CodeGen/TargetPassConfig.h" |
146 | #include "llvm/IR/Function.h" |
147 | #include "llvm/IR/IRBuilder.h" |
148 | #include "llvm/IR/Instructions.h" |
149 | #include "llvm/IR/IntrinsicInst.h" |
150 | #include "llvm/IR/IntrinsicsNVPTX.h" |
151 | #include "llvm/IR/Type.h" |
152 | #include "llvm/InitializePasses.h" |
153 | #include "llvm/Pass.h" |
154 | #include "llvm/Support/Debug.h" |
155 | #include "llvm/Support/ErrorHandling.h" |
156 | #include "llvm/Support/NVPTXAddrSpace.h" |
157 | #include <numeric> |
158 | #include <queue> |
159 | |
160 | #define DEBUG_TYPE "nvptx-lower-args" |
161 | |
162 | using namespace llvm; |
163 | |
164 | namespace { |
165 | class NVPTXLowerArgsLegacyPass : public FunctionPass { |
166 | bool runOnFunction(Function &F) override; |
167 | |
168 | public: |
169 | static char ID; // Pass identification, replacement for typeid |
170 | NVPTXLowerArgsLegacyPass() : FunctionPass(ID) {} |
171 | StringRef getPassName() const override { |
172 | return "Lower pointer arguments of CUDA kernels" ; |
173 | } |
174 | void getAnalysisUsage(AnalysisUsage &AU) const override { |
175 | AU.addRequired<TargetPassConfig>(); |
176 | } |
177 | }; |
178 | } // namespace |
179 | |
180 | char NVPTXLowerArgsLegacyPass::ID = 1; |
181 | |
182 | INITIALIZE_PASS_BEGIN(NVPTXLowerArgsLegacyPass, "nvptx-lower-args" , |
183 | "Lower arguments (NVPTX)" , false, false) |
184 | INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) |
185 | INITIALIZE_PASS_END(NVPTXLowerArgsLegacyPass, "nvptx-lower-args" , |
186 | "Lower arguments (NVPTX)" , false, false) |
187 | |
188 | // ============================================================================= |
189 | // If the function had a byval struct ptr arg, say foo(%struct.x* byval %d), |
190 | // and we can't guarantee that the only accesses are loads, |
191 | // then add the following instructions to the first basic block: |
192 | // |
193 | // %temp = alloca %struct.x, align 8 |
194 | // %tempd = addrspacecast %struct.x* %d to %struct.x addrspace(101)* |
195 | // %tv = load %struct.x addrspace(101)* %tempd |
196 | // store %struct.x %tv, %struct.x* %temp, align 8 |
197 | // |
198 | // The above code allocates some space in the stack and copies the incoming |
199 | // struct from param space to local space. |
200 | // Then replace all occurrences of %d by %temp. |
201 | // |
202 | // In case we know that all users are GEPs or Loads, replace them with the same |
203 | // ones in parameter AS, so we can access them using ld.param. |
204 | // ============================================================================= |
205 | |
206 | // For Loads, replaces the \p OldUse of the pointer with a Use of the same |
207 | // pointer in parameter AS. |
208 | // For "escapes" (to memory, a function call, or a ptrtoint), cast the OldUse to |
209 | // generic using cvta.param. |
210 | static void convertToParamAS(Use *OldUse, Value *Param, bool HasCvtaParam, |
211 | bool IsGridConstant) { |
212 | Instruction *I = dyn_cast<Instruction>(Val: OldUse->getUser()); |
213 | assert(I && "OldUse must be in an instruction" ); |
214 | struct IP { |
215 | Use *OldUse; |
216 | Instruction *OldInstruction; |
217 | Value *NewParam; |
218 | }; |
219 | SmallVector<IP> ItemsToConvert = {{.OldUse: OldUse, .OldInstruction: I, .NewParam: Param}}; |
220 | SmallVector<Instruction *> InstructionsToDelete; |
221 | |
222 | auto CloneInstInParamAS = [HasCvtaParam, |
223 | IsGridConstant](const IP &I) -> Value * { |
224 | if (auto *LI = dyn_cast<LoadInst>(Val: I.OldInstruction)) { |
225 | LI->setOperand(i_nocapture: 0, Val_nocapture: I.NewParam); |
226 | return LI; |
227 | } |
228 | if (auto *GEP = dyn_cast<GetElementPtrInst>(Val: I.OldInstruction)) { |
229 | SmallVector<Value *, 4> Indices(GEP->indices()); |
230 | auto *NewGEP = GetElementPtrInst::Create( |
231 | PointeeType: GEP->getSourceElementType(), Ptr: I.NewParam, IdxList: Indices, NameStr: GEP->getName(), |
232 | InsertBefore: GEP->getIterator()); |
233 | NewGEP->setIsInBounds(GEP->isInBounds()); |
234 | return NewGEP; |
235 | } |
236 | if (auto *BC = dyn_cast<BitCastInst>(Val: I.OldInstruction)) { |
237 | auto *NewBCType = PointerType::get(C&: BC->getContext(), AddressSpace: ADDRESS_SPACE_PARAM); |
238 | return BitCastInst::Create(BC->getOpcode(), S: I.NewParam, Ty: NewBCType, |
239 | Name: BC->getName(), InsertBefore: BC->getIterator()); |
240 | } |
241 | if (auto *ASC = dyn_cast<AddrSpaceCastInst>(Val: I.OldInstruction)) { |
242 | assert(ASC->getDestAddressSpace() == ADDRESS_SPACE_PARAM); |
243 | (void)ASC; |
244 | // Just pass through the argument, the old ASC is no longer needed. |
245 | return I.NewParam; |
246 | } |
247 | if (auto *MI = dyn_cast<MemTransferInst>(Val: I.OldInstruction)) { |
248 | if (MI->getRawSource() == I.OldUse->get()) { |
249 | // convert to memcpy/memmove from param space. |
250 | IRBuilder<> Builder(I.OldInstruction); |
251 | Intrinsic::ID ID = MI->getIntrinsicID(); |
252 | |
253 | CallInst *B = Builder.CreateMemTransferInst( |
254 | IntrID: ID, Dst: MI->getRawDest(), DstAlign: MI->getDestAlign(), Src: I.NewParam, |
255 | SrcAlign: MI->getSourceAlign(), Size: MI->getLength(), isVolatile: MI->isVolatile()); |
256 | for (unsigned I : {0, 1}) |
257 | if (uint64_t Bytes = MI->getParamDereferenceableBytes(i: I)) |
258 | B->addDereferenceableParamAttr(i: I, Bytes); |
259 | return B; |
260 | } |
261 | // We may be able to handle other cases if the argument is |
262 | // __grid_constant__ |
263 | } |
264 | |
265 | if (HasCvtaParam) { |
266 | auto GetParamAddrCastToGeneric = |
267 | [](Value *Addr, Instruction *OriginalUser) -> Value * { |
268 | IRBuilder<> IRB(OriginalUser); |
269 | Type *GenTy = IRB.getPtrTy(AddrSpace: ADDRESS_SPACE_GENERIC); |
270 | return IRB.CreateAddrSpaceCast(V: Addr, DestTy: GenTy, Name: Addr->getName() + ".gen" ); |
271 | }; |
272 | auto *ParamInGenericAS = |
273 | GetParamAddrCastToGeneric(I.NewParam, I.OldInstruction); |
274 | |
275 | // phi/select could use generic arg pointers w/o __grid_constant__ |
276 | if (auto *PHI = dyn_cast<PHINode>(Val: I.OldInstruction)) { |
277 | for (auto [Idx, V] : enumerate(First: PHI->incoming_values())) { |
278 | if (V.get() == I.OldUse->get()) |
279 | PHI->setIncomingValue(i: Idx, V: ParamInGenericAS); |
280 | } |
281 | } |
282 | if (auto *SI = dyn_cast<SelectInst>(Val: I.OldInstruction)) { |
283 | if (SI->getTrueValue() == I.OldUse->get()) |
284 | SI->setTrueValue(ParamInGenericAS); |
285 | if (SI->getFalseValue() == I.OldUse->get()) |
286 | SI->setFalseValue(ParamInGenericAS); |
287 | } |
288 | |
289 | // Escapes or writes can only use generic param pointers if |
290 | // __grid_constant__ is in effect. |
291 | if (IsGridConstant) { |
292 | if (auto *CI = dyn_cast<CallInst>(Val: I.OldInstruction)) { |
293 | I.OldUse->set(ParamInGenericAS); |
294 | return CI; |
295 | } |
296 | if (auto *SI = dyn_cast<StoreInst>(Val: I.OldInstruction)) { |
297 | // byval address is being stored, cast it to generic |
298 | if (SI->getValueOperand() == I.OldUse->get()) |
299 | SI->setOperand(i_nocapture: 0, Val_nocapture: ParamInGenericAS); |
300 | return SI; |
301 | } |
302 | if (auto *PI = dyn_cast<PtrToIntInst>(Val: I.OldInstruction)) { |
303 | if (PI->getPointerOperand() == I.OldUse->get()) |
304 | PI->setOperand(i_nocapture: 0, Val_nocapture: ParamInGenericAS); |
305 | return PI; |
306 | } |
307 | // TODO: iIf we allow stores, we should allow memcpy/memset to |
308 | // parameter, too. |
309 | } |
310 | } |
311 | |
312 | llvm_unreachable("Unsupported instruction" ); |
313 | }; |
314 | |
315 | while (!ItemsToConvert.empty()) { |
316 | IP I = ItemsToConvert.pop_back_val(); |
317 | Value *NewInst = CloneInstInParamAS(I); |
318 | |
319 | if (NewInst && NewInst != I.OldInstruction) { |
320 | // We've created a new instruction. Queue users of the old instruction to |
321 | // be converted and the instruction itself to be deleted. We can't delete |
322 | // the old instruction yet, because it's still in use by a load somewhere. |
323 | for (Use &U : I.OldInstruction->uses()) |
324 | ItemsToConvert.push_back(Elt: {.OldUse: &U, .OldInstruction: cast<Instruction>(Val: U.getUser()), .NewParam: NewInst}); |
325 | |
326 | InstructionsToDelete.push_back(Elt: I.OldInstruction); |
327 | } |
328 | } |
329 | |
330 | // Now we know that all argument loads are using addresses in parameter space |
331 | // and we can finally remove the old instructions in generic AS. Instructions |
332 | // scheduled for removal should be processed in reverse order so the ones |
333 | // closest to the load are deleted first. Otherwise they may still be in use. |
334 | // E.g if we have Value = Load(BitCast(GEP(arg))), InstructionsToDelete will |
335 | // have {GEP,BitCast}. GEP can't be deleted first, because it's still used by |
336 | // the BitCast. |
337 | for (Instruction *I : llvm::reverse(C&: InstructionsToDelete)) |
338 | I->eraseFromParent(); |
339 | } |
340 | |
341 | // Adjust alignment of arguments passed byval in .param address space. We can |
342 | // increase alignment of such arguments in a way that ensures that we can |
343 | // effectively vectorize their loads. We should also traverse all loads from |
344 | // byval pointer and adjust their alignment, if those were using known offset. |
345 | // Such alignment changes must be conformed with parameter store and load in |
346 | // NVPTXTargetLowering::LowerCall. |
347 | static void adjustByValArgAlignment(Argument *Arg, Value *ArgInParamAS, |
348 | const NVPTXTargetLowering *TLI) { |
349 | Function *Func = Arg->getParent(); |
350 | Type *StructType = Arg->getParamByValType(); |
351 | const DataLayout &DL = Func->getDataLayout(); |
352 | |
353 | const Align NewArgAlign = |
354 | TLI->getFunctionParamOptimizedAlign(F: Func, ArgTy: StructType, DL); |
355 | const Align CurArgAlign = Arg->getParamAlign().valueOrOne(); |
356 | |
357 | if (CurArgAlign >= NewArgAlign) |
358 | return; |
359 | |
360 | LLVM_DEBUG(dbgs() << "Try to use alignment " << NewArgAlign.value() |
361 | << " instead of " << CurArgAlign.value() << " for " << *Arg |
362 | << '\n'); |
363 | |
364 | auto NewAlignAttr = |
365 | Attribute::getWithAlignment(Context&: Func->getContext(), Alignment: NewArgAlign); |
366 | Arg->removeAttr(Kind: Attribute::Alignment); |
367 | Arg->addAttr(Attr: NewAlignAttr); |
368 | |
369 | struct Load { |
370 | LoadInst *Inst; |
371 | uint64_t Offset; |
372 | }; |
373 | |
374 | struct LoadContext { |
375 | Value *InitialVal; |
376 | uint64_t Offset; |
377 | }; |
378 | |
379 | SmallVector<Load> Loads; |
380 | std::queue<LoadContext> Worklist; |
381 | Worklist.push(x: {.InitialVal: ArgInParamAS, .Offset: 0}); |
382 | |
383 | while (!Worklist.empty()) { |
384 | LoadContext Ctx = Worklist.front(); |
385 | Worklist.pop(); |
386 | |
387 | for (User *CurUser : Ctx.InitialVal->users()) { |
388 | if (auto *I = dyn_cast<LoadInst>(Val: CurUser)) |
389 | Loads.push_back(Elt: {.Inst: I, .Offset: Ctx.Offset}); |
390 | else if (isa<BitCastInst>(Val: CurUser) || isa<AddrSpaceCastInst>(Val: CurUser)) |
391 | Worklist.push(x: {.InitialVal: cast<Instruction>(Val: CurUser), .Offset: Ctx.Offset}); |
392 | else if (auto *I = dyn_cast<GetElementPtrInst>(Val: CurUser)) { |
393 | APInt OffsetAccumulated = |
394 | APInt::getZero(numBits: DL.getIndexSizeInBits(AS: ADDRESS_SPACE_PARAM)); |
395 | |
396 | if (!I->accumulateConstantOffset(DL, Offset&: OffsetAccumulated)) |
397 | continue; |
398 | |
399 | uint64_t OffsetLimit = -1; |
400 | uint64_t Offset = OffsetAccumulated.getLimitedValue(Limit: OffsetLimit); |
401 | assert(Offset != OffsetLimit && "Expect Offset less than UINT64_MAX" ); |
402 | |
403 | Worklist.push(x: {.InitialVal: I, .Offset: Ctx.Offset + Offset}); |
404 | } |
405 | } |
406 | } |
407 | |
408 | for (Load &CurLoad : Loads) { |
409 | Align NewLoadAlign(std::gcd(m: NewArgAlign.value(), n: CurLoad.Offset)); |
410 | Align CurLoadAlign = CurLoad.Inst->getAlign(); |
411 | CurLoad.Inst->setAlignment(std::max(a: NewLoadAlign, b: CurLoadAlign)); |
412 | } |
413 | } |
414 | |
415 | namespace { |
416 | struct ArgUseChecker : PtrUseVisitor<ArgUseChecker> { |
417 | using Base = PtrUseVisitor<ArgUseChecker>; |
418 | |
419 | bool IsGridConstant; |
420 | // Set of phi/select instructions using the Arg |
421 | SmallPtrSet<Instruction *, 4> Conditionals; |
422 | |
423 | ArgUseChecker(const DataLayout &DL, bool IsGridConstant) |
424 | : PtrUseVisitor(DL), IsGridConstant(IsGridConstant) {} |
425 | |
426 | PtrInfo visitArgPtr(Argument &A) { |
427 | assert(A.getType()->isPointerTy()); |
428 | IntegerType *IntIdxTy = cast<IntegerType>(Val: DL.getIndexType(PtrTy: A.getType())); |
429 | IsOffsetKnown = false; |
430 | Offset = APInt(IntIdxTy->getBitWidth(), 0); |
431 | PI.reset(); |
432 | Conditionals.clear(); |
433 | |
434 | LLVM_DEBUG(dbgs() << "Checking Argument " << A << "\n" ); |
435 | // Enqueue the uses of this pointer. |
436 | enqueueUsers(I&: A); |
437 | |
438 | // Visit all the uses off the worklist until it is empty. |
439 | // Note that unlike PtrUseVisitor we intentionally do not track offsets. |
440 | // We're only interested in how we use the pointer. |
441 | while (!(Worklist.empty() || PI.isAborted())) { |
442 | UseToVisit ToVisit = Worklist.pop_back_val(); |
443 | U = ToVisit.UseAndIsOffsetKnown.getPointer(); |
444 | Instruction *I = cast<Instruction>(Val: U->getUser()); |
445 | if (isa<PHINode>(Val: I) || isa<SelectInst>(Val: I)) |
446 | Conditionals.insert(Ptr: I); |
447 | LLVM_DEBUG(dbgs() << "Processing " << *I << "\n" ); |
448 | Base::visit(I); |
449 | } |
450 | if (PI.isEscaped()) |
451 | LLVM_DEBUG(dbgs() << "Argument pointer escaped: " << *PI.getEscapingInst() |
452 | << "\n" ); |
453 | else if (PI.isAborted()) |
454 | LLVM_DEBUG(dbgs() << "Pointer use needs a copy: " << *PI.getAbortingInst() |
455 | << "\n" ); |
456 | LLVM_DEBUG(dbgs() << "Traversed " << Conditionals.size() |
457 | << " conditionals\n" ); |
458 | return PI; |
459 | } |
460 | |
461 | void visitStoreInst(StoreInst &SI) { |
462 | // Storing the pointer escapes it. |
463 | if (U->get() == SI.getValueOperand()) |
464 | return PI.setEscapedAndAborted(&SI); |
465 | // Writes to the pointer are UB w/ __grid_constant__, but do not force a |
466 | // copy. |
467 | if (!IsGridConstant) |
468 | return PI.setAborted(&SI); |
469 | } |
470 | |
471 | void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) { |
472 | // ASC to param space are no-ops and do not need a copy |
473 | if (ASC.getDestAddressSpace() != ADDRESS_SPACE_PARAM) |
474 | return PI.setEscapedAndAborted(&ASC); |
475 | Base::visitAddrSpaceCastInst(ASC); |
476 | } |
477 | |
478 | void visitPtrToIntInst(PtrToIntInst &I) { |
479 | if (IsGridConstant) |
480 | return; |
481 | Base::visitPtrToIntInst(I); |
482 | } |
483 | void visitPHINodeOrSelectInst(Instruction &I) { |
484 | assert(isa<PHINode>(I) || isa<SelectInst>(I)); |
485 | } |
486 | // PHI and select just pass through the pointers. |
487 | void visitPHINode(PHINode &PN) { enqueueUsers(I&: PN); } |
488 | void visitSelectInst(SelectInst &SI) { enqueueUsers(I&: SI); } |
489 | |
490 | void visitMemTransferInst(MemTransferInst &II) { |
491 | if (*U == II.getRawDest() && !IsGridConstant) |
492 | PI.setAborted(&II); |
493 | // memcpy/memmove are OK when the pointer is source. We can convert them to |
494 | // AS-specific memcpy. |
495 | } |
496 | |
497 | void visitMemSetInst(MemSetInst &II) { |
498 | if (!IsGridConstant) |
499 | PI.setAborted(&II); |
500 | } |
501 | }; // struct ArgUseChecker |
502 | |
503 | void copyByValParam(Function &F, Argument &Arg) { |
504 | LLVM_DEBUG(dbgs() << "Creating a local copy of " << Arg << "\n" ); |
505 | // Otherwise we have to create a temporary copy. |
506 | BasicBlock::iterator FirstInst = F.getEntryBlock().begin(); |
507 | Type *StructType = Arg.getParamByValType(); |
508 | const DataLayout &DL = F.getDataLayout(); |
509 | IRBuilder<> IRB(&*FirstInst); |
510 | AllocaInst *AllocA = IRB.CreateAlloca(Ty: StructType, ArraySize: nullptr, Name: Arg.getName()); |
511 | // Set the alignment to alignment of the byval parameter. This is because, |
512 | // later load/stores assume that alignment, and we are going to replace |
513 | // the use of the byval parameter with this alloca instruction. |
514 | AllocA->setAlignment( |
515 | Arg.getParamAlign().value_or(u: DL.getPrefTypeAlign(Ty: StructType))); |
516 | Arg.replaceAllUsesWith(V: AllocA); |
517 | |
518 | Value *ArgInParam = |
519 | IRB.CreateIntrinsic(ID: Intrinsic::nvvm_internal_addrspace_wrap, |
520 | Types: {IRB.getPtrTy(AddrSpace: ADDRESS_SPACE_PARAM), Arg.getType()}, |
521 | Args: &Arg, FMFSource: {}, Name: Arg.getName()); |
522 | |
523 | // Be sure to propagate alignment to this load; LLVM doesn't know that NVPTX |
524 | // addrspacecast preserves alignment. Since params are constant, this load |
525 | // is definitely not volatile. |
526 | const auto ArgSize = *AllocA->getAllocationSize(DL); |
527 | IRB.CreateMemCpy(Dst: AllocA, DstAlign: AllocA->getAlign(), Src: ArgInParam, SrcAlign: AllocA->getAlign(), |
528 | Size: ArgSize); |
529 | } |
530 | } // namespace |
531 | |
532 | static void handleByValParam(const NVPTXTargetMachine &TM, Argument *Arg) { |
533 | Function *Func = Arg->getParent(); |
534 | assert(isKernelFunction(*Func)); |
535 | const bool HasCvtaParam = TM.getSubtargetImpl(*Func)->hasCvtaParam(); |
536 | const bool IsGridConstant = HasCvtaParam && isParamGridConstant(*Arg); |
537 | const DataLayout &DL = Func->getDataLayout(); |
538 | BasicBlock::iterator FirstInst = Func->getEntryBlock().begin(); |
539 | [[maybe_unused]] Type *StructType = Arg->getParamByValType(); |
540 | assert(StructType && "Missing byval type" ); |
541 | |
542 | ArgUseChecker AUC(DL, IsGridConstant); |
543 | ArgUseChecker::PtrInfo PI = AUC.visitArgPtr(A&: *Arg); |
544 | bool ArgUseIsReadOnly = !(PI.isEscaped() || PI.isAborted()); |
545 | // Easy case, accessing parameter directly is fine. |
546 | if (ArgUseIsReadOnly && AUC.Conditionals.empty()) { |
547 | // Convert all loads and intermediate operations to use parameter AS and |
548 | // skip creation of a local copy of the argument. |
549 | SmallVector<Use *, 16> UsesToUpdate(llvm::make_pointer_range(Range: Arg->uses())); |
550 | |
551 | IRBuilder<> IRB(&*FirstInst); |
552 | Value *ArgInParamAS = IRB.CreateIntrinsic( |
553 | ID: Intrinsic::nvvm_internal_addrspace_wrap, |
554 | Types: {IRB.getPtrTy(AddrSpace: ADDRESS_SPACE_PARAM), Arg->getType()}, Args: {Arg}); |
555 | |
556 | for (Use *U : UsesToUpdate) |
557 | convertToParamAS(OldUse: U, Param: ArgInParamAS, HasCvtaParam, IsGridConstant); |
558 | LLVM_DEBUG(dbgs() << "No need to copy or cast " << *Arg << "\n" ); |
559 | |
560 | const auto *TLI = |
561 | cast<NVPTXTargetLowering>(Val: TM.getSubtargetImpl()->getTargetLowering()); |
562 | |
563 | adjustByValArgAlignment(Arg, ArgInParamAS, TLI); |
564 | |
565 | return; |
566 | } |
567 | |
568 | // We can't access byval arg directly and need a pointer. on sm_70+ we have |
569 | // ability to take a pointer to the argument without making a local copy. |
570 | // However, we're still not allowed to write to it. If the user specified |
571 | // `__grid_constant__` for the argument, we'll consider escaped pointer as |
572 | // read-only. |
573 | if (IsGridConstant || (HasCvtaParam && ArgUseIsReadOnly)) { |
574 | LLVM_DEBUG(dbgs() << "Using non-copy pointer to " << *Arg << "\n" ); |
575 | // Replace all argument pointer uses (which might include a device function |
576 | // call) with a cast to the generic address space using cvta.param |
577 | // instruction, which avoids a local copy. |
578 | IRBuilder<> IRB(&Func->getEntryBlock().front()); |
579 | |
580 | // Cast argument to param address space. Because the backend will emit the |
581 | // argument already in the param address space, we need to use the noop |
582 | // intrinsic, this had the added benefit of preventing other optimizations |
583 | // from folding away this pair of addrspacecasts. |
584 | auto *ParamSpaceArg = |
585 | IRB.CreateIntrinsic(ID: Intrinsic::nvvm_internal_addrspace_wrap, |
586 | Types: {IRB.getPtrTy(AddrSpace: ADDRESS_SPACE_PARAM), Arg->getType()}, |
587 | Args: Arg, FMFSource: {}, Name: Arg->getName() + ".param" ); |
588 | |
589 | // Cast param address to generic address space. |
590 | Value *GenericArg = IRB.CreateAddrSpaceCast( |
591 | V: ParamSpaceArg, DestTy: IRB.getPtrTy(AddrSpace: ADDRESS_SPACE_GENERIC), |
592 | Name: Arg->getName() + ".gen" ); |
593 | |
594 | Arg->replaceAllUsesWith(V: GenericArg); |
595 | |
596 | // Do not replace Arg in the cast to param space |
597 | ParamSpaceArg->setOperand(i_nocapture: 0, Val_nocapture: Arg); |
598 | } else |
599 | copyByValParam(F&: *Func, Arg&: *Arg); |
600 | } |
601 | |
602 | static void markPointerAsAS(Value *Ptr, const unsigned AS) { |
603 | if (Ptr->getType()->getPointerAddressSpace() != ADDRESS_SPACE_GENERIC) |
604 | return; |
605 | |
606 | // Deciding where to emit the addrspacecast pair. |
607 | BasicBlock::iterator InsertPt; |
608 | if (Argument *Arg = dyn_cast<Argument>(Val: Ptr)) { |
609 | // Insert at the functon entry if Ptr is an argument. |
610 | InsertPt = Arg->getParent()->getEntryBlock().begin(); |
611 | } else { |
612 | // Insert right after Ptr if Ptr is an instruction. |
613 | InsertPt = ++cast<Instruction>(Val: Ptr)->getIterator(); |
614 | assert(InsertPt != InsertPt->getParent()->end() && |
615 | "We don't call this function with Ptr being a terminator." ); |
616 | } |
617 | |
618 | Instruction *PtrInGlobal = new AddrSpaceCastInst( |
619 | Ptr, PointerType::get(C&: Ptr->getContext(), AddressSpace: AS), Ptr->getName(), InsertPt); |
620 | Value *PtrInGeneric = new AddrSpaceCastInst(PtrInGlobal, Ptr->getType(), |
621 | Ptr->getName(), InsertPt); |
622 | // Replace with PtrInGeneric all uses of Ptr except PtrInGlobal. |
623 | Ptr->replaceAllUsesWith(V: PtrInGeneric); |
624 | PtrInGlobal->setOperand(i: 0, Val: Ptr); |
625 | } |
626 | |
627 | static void markPointerAsGlobal(Value *Ptr) { |
628 | markPointerAsAS(Ptr, AS: ADDRESS_SPACE_GLOBAL); |
629 | } |
630 | |
631 | // ============================================================================= |
632 | // Main function for this pass. |
633 | // ============================================================================= |
634 | static bool runOnKernelFunction(const NVPTXTargetMachine &TM, Function &F) { |
635 | // Copying of byval aggregates + SROA may result in pointers being loaded as |
636 | // integers, followed by intotoptr. We may want to mark those as global, too, |
637 | // but only if the loaded integer is used exclusively for conversion to a |
638 | // pointer with inttoptr. |
639 | auto HandleIntToPtr = [](Value &V) { |
640 | if (llvm::all_of(Range: V.users(), P: [](User *U) { return isa<IntToPtrInst>(Val: U); })) { |
641 | SmallVector<User *, 16> UsersToUpdate(V.users()); |
642 | for (User *U : UsersToUpdate) |
643 | markPointerAsGlobal(Ptr: U); |
644 | } |
645 | }; |
646 | if (TM.getDrvInterface() == NVPTX::CUDA) { |
647 | // Mark pointers in byval structs as global. |
648 | for (auto &B : F) { |
649 | for (auto &I : B) { |
650 | if (LoadInst *LI = dyn_cast<LoadInst>(Val: &I)) { |
651 | if (LI->getType()->isPointerTy() || LI->getType()->isIntegerTy()) { |
652 | Value *UO = getUnderlyingObject(V: LI->getPointerOperand()); |
653 | if (Argument *Arg = dyn_cast<Argument>(Val: UO)) { |
654 | if (Arg->hasByValAttr()) { |
655 | // LI is a load from a pointer within a byval kernel parameter. |
656 | if (LI->getType()->isPointerTy()) |
657 | markPointerAsGlobal(Ptr: LI); |
658 | else |
659 | HandleIntToPtr(*LI); |
660 | } |
661 | } |
662 | } |
663 | } |
664 | } |
665 | } |
666 | } |
667 | |
668 | LLVM_DEBUG(dbgs() << "Lowering kernel args of " << F.getName() << "\n" ); |
669 | for (Argument &Arg : F.args()) { |
670 | if (Arg.getType()->isPointerTy() && Arg.hasByValAttr()) { |
671 | handleByValParam(TM, Arg: &Arg); |
672 | } else if (Arg.getType()->isIntegerTy() && |
673 | TM.getDrvInterface() == NVPTX::CUDA) { |
674 | HandleIntToPtr(Arg); |
675 | } |
676 | } |
677 | return true; |
678 | } |
679 | |
680 | // Device functions only need to copy byval args into local memory. |
681 | static bool runOnDeviceFunction(const NVPTXTargetMachine &TM, Function &F) { |
682 | LLVM_DEBUG(dbgs() << "Lowering function args of " << F.getName() << "\n" ); |
683 | |
684 | const auto *TLI = |
685 | cast<NVPTXTargetLowering>(Val: TM.getSubtargetImpl()->getTargetLowering()); |
686 | |
687 | for (Argument &Arg : F.args()) |
688 | if (Arg.getType()->isPointerTy() && Arg.hasByValAttr()) |
689 | adjustByValArgAlignment(Arg: &Arg, ArgInParamAS: &Arg, TLI); |
690 | |
691 | return true; |
692 | } |
693 | |
694 | static bool processFunction(Function &F, NVPTXTargetMachine &TM) { |
695 | return isKernelFunction(F) ? runOnKernelFunction(TM, F) |
696 | : runOnDeviceFunction(TM, F); |
697 | } |
698 | |
699 | bool NVPTXLowerArgsLegacyPass::runOnFunction(Function &F) { |
700 | auto &TM = getAnalysis<TargetPassConfig>().getTM<NVPTXTargetMachine>(); |
701 | return processFunction(F, TM); |
702 | } |
703 | FunctionPass *llvm::createNVPTXLowerArgsPass() { |
704 | return new NVPTXLowerArgsLegacyPass(); |
705 | } |
706 | |
707 | static bool copyFunctionByValArgs(Function &F) { |
708 | LLVM_DEBUG(dbgs() << "Creating a copy of byval args of " << F.getName() |
709 | << "\n" ); |
710 | bool Changed = false; |
711 | if (isKernelFunction(F)) { |
712 | for (Argument &Arg : F.args()) |
713 | if (Arg.getType()->isPointerTy() && Arg.hasByValAttr() && |
714 | !isParamGridConstant(Arg)) { |
715 | copyByValParam(F, Arg); |
716 | Changed = true; |
717 | } |
718 | } |
719 | return Changed; |
720 | } |
721 | |
722 | PreservedAnalyses NVPTXCopyByValArgsPass::run(Function &F, |
723 | FunctionAnalysisManager &AM) { |
724 | return copyFunctionByValArgs(F) ? PreservedAnalyses::none() |
725 | : PreservedAnalyses::all(); |
726 | } |
727 | |
728 | PreservedAnalyses NVPTXLowerArgsPass::run(Function &F, |
729 | FunctionAnalysisManager &AM) { |
730 | auto &NTM = static_cast<NVPTXTargetMachine &>(TM); |
731 | bool Changed = processFunction(F, TM&: NTM); |
732 | return Changed ? PreservedAnalyses::none() : PreservedAnalyses::all(); |
733 | } |
734 | |