1//===- NVPTXUtilities.cpp - Utility Functions -----------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains miscellaneous utility functions
10//
11//===----------------------------------------------------------------------===//
12
13#include "NVPTXUtilities.h"
14#include "NVPTX.h"
15#include "NVPTXTargetMachine.h"
16#include "llvm/ADT/ArrayRef.h"
17#include "llvm/ADT/SmallVector.h"
18#include "llvm/ADT/StringRef.h"
19#include "llvm/IR/Argument.h"
20#include "llvm/IR/Constants.h"
21#include "llvm/IR/DataLayout.h"
22#include "llvm/IR/Function.h"
23#include "llvm/IR/GlobalVariable.h"
24#include "llvm/IR/Module.h"
25#include "llvm/Support/Alignment.h"
26#include "llvm/Support/CommandLine.h"
27#include "llvm/Support/ModRef.h"
28#include "llvm/Support/Mutex.h"
29#include <cstdint>
30#include <cstring>
31#include <map>
32#include <mutex>
33#include <optional>
34#include <string>
35#include <vector>
36
37namespace llvm {
38
39static cl::opt<bool> ForceMinByValParamAlign(
40 "nvptx-force-min-byval-param-align", cl::Hidden,
41 cl::desc("NVPTX Specific: force 4-byte minimal alignment for byval"
42 " params of device functions."),
43 cl::init(Val: false));
44
45namespace {
46typedef std::map<std::string, std::vector<unsigned>> key_val_pair_t;
47typedef std::map<const GlobalValue *, key_val_pair_t> global_val_annot_t;
48
49struct AnnotationCache {
50 sys::Mutex Lock;
51 std::map<const Module *, global_val_annot_t> Cache;
52};
53
54AnnotationCache &getAnnotationCache() {
55 static AnnotationCache AC;
56 return AC;
57}
58} // anonymous namespace
59
60void clearAnnotationCache(const Module *Mod) {
61 auto &AC = getAnnotationCache();
62 std::lock_guard<sys::Mutex> Guard(AC.Lock);
63 AC.Cache.erase(x: Mod);
64}
65
66static void cacheAnnotationFromMD(const MDNode *MetadataNode,
67 key_val_pair_t &retval) {
68 auto &AC = getAnnotationCache();
69 std::lock_guard<sys::Mutex> Guard(AC.Lock);
70 assert(MetadataNode && "Invalid mdnode for annotation");
71 assert((MetadataNode->getNumOperands() % 2) == 1 &&
72 "Invalid number of operands");
73 // start index = 1, to skip the global variable key
74 // increment = 2, to skip the value for each property-value pairs
75 for (unsigned i = 1, e = MetadataNode->getNumOperands(); i != e; i += 2) {
76 // property
77 const MDString *prop = dyn_cast<MDString>(Val: MetadataNode->getOperand(I: i));
78 assert(prop && "Annotation property not a string");
79 std::string Key = prop->getString().str();
80
81 // value
82 if (ConstantInt *Val = mdconst::dyn_extract<ConstantInt>(
83 MD: MetadataNode->getOperand(I: i + 1))) {
84 retval[Key].push_back(x: Val->getZExtValue());
85 } else {
86 llvm_unreachable("Value operand not a constant int");
87 }
88 }
89}
90
91static void cacheAnnotationFromMD(const Module *m, const GlobalValue *gv) {
92 auto &AC = getAnnotationCache();
93 std::lock_guard<sys::Mutex> Guard(AC.Lock);
94 NamedMDNode *NMD = m->getNamedMetadata(Name: "nvvm.annotations");
95 if (!NMD)
96 return;
97 key_val_pair_t tmp;
98 for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) {
99 const MDNode *elem = NMD->getOperand(i);
100
101 GlobalValue *entity =
102 mdconst::dyn_extract_or_null<GlobalValue>(MD: elem->getOperand(I: 0));
103 // entity may be null due to DCE
104 if (!entity)
105 continue;
106 if (entity != gv)
107 continue;
108
109 // accumulate annotations for entity in tmp
110 cacheAnnotationFromMD(MetadataNode: elem, retval&: tmp);
111 }
112
113 if (tmp.empty()) // no annotations for this gv
114 return;
115
116 AC.Cache[m][gv] = std::move(tmp);
117}
118
119static std::optional<unsigned> findOneNVVMAnnotation(const GlobalValue *gv,
120 const std::string &prop) {
121 auto &AC = getAnnotationCache();
122 std::lock_guard<sys::Mutex> Guard(AC.Lock);
123 const Module *m = gv->getParent();
124 auto ACIt = AC.Cache.find(x: m);
125 if (ACIt == AC.Cache.end())
126 cacheAnnotationFromMD(m, gv);
127 else if (ACIt->second.find(x: gv) == ACIt->second.end())
128 cacheAnnotationFromMD(m, gv);
129 // Look up AC.Cache[m][gv] again because cacheAnnotationFromMD may have
130 // inserted the entry.
131 auto &KVP = AC.Cache[m][gv];
132 auto It = KVP.find(x: prop);
133 if (It == KVP.end())
134 return std::nullopt;
135 return It->second[0];
136}
137
138static bool findAllNVVMAnnotation(const GlobalValue *gv,
139 const std::string &prop,
140 std::vector<unsigned> &retval) {
141 auto &AC = getAnnotationCache();
142 std::lock_guard<sys::Mutex> Guard(AC.Lock);
143 const Module *m = gv->getParent();
144 auto ACIt = AC.Cache.find(x: m);
145 if (ACIt == AC.Cache.end())
146 cacheAnnotationFromMD(m, gv);
147 else if (ACIt->second.find(x: gv) == ACIt->second.end())
148 cacheAnnotationFromMD(m, gv);
149 // Look up AC.Cache[m][gv] again because cacheAnnotationFromMD may have
150 // inserted the entry.
151 auto &KVP = AC.Cache[m][gv];
152 auto It = KVP.find(x: prop);
153 if (It == KVP.end())
154 return false;
155 retval = It->second;
156 return true;
157}
158
159static bool globalHasNVVMAnnotation(const Value &V, const std::string &Prop) {
160 if (const auto *GV = dyn_cast<GlobalValue>(Val: &V))
161 if (const auto Annot = findOneNVVMAnnotation(gv: GV, prop: Prop)) {
162 assert((*Annot == 1) && "Unexpected annotation on a symbol");
163 return true;
164 }
165
166 return false;
167}
168
169static bool argHasNVVMAnnotation(const Value &Val,
170 const std::string &Annotation) {
171 if (const Argument *Arg = dyn_cast<Argument>(Val: &Val)) {
172 const Function *Func = Arg->getParent();
173 std::vector<unsigned> Annot;
174 if (findAllNVVMAnnotation(gv: Func, prop: Annotation, retval&: Annot)) {
175 if (is_contained(Range&: Annot, Element: Arg->getArgNo()))
176 return true;
177 }
178 }
179 return false;
180}
181
182static std::optional<unsigned> getFnAttrParsedInt(const Function &F,
183 StringRef Attr) {
184 return F.hasFnAttribute(Kind: Attr)
185 ? std::optional(F.getFnAttributeAsParsedInteger(Kind: Attr))
186 : std::nullopt;
187}
188
189static SmallVector<unsigned, 3> getFnAttrParsedVector(const Function &F,
190 StringRef Attr) {
191 SmallVector<unsigned, 3> V;
192 auto &Ctx = F.getContext();
193
194 if (F.hasFnAttribute(Kind: Attr)) {
195 // We expect the attribute value to be of the form "x[,y[,z]]", where x, y,
196 // and z are unsigned values.
197 StringRef S = F.getFnAttribute(Kind: Attr).getValueAsString();
198 for (unsigned I = 0; I < 3 && !S.empty(); I++) {
199 auto [First, Rest] = S.split(Separator: ",");
200 unsigned IntVal;
201 if (First.trim().getAsInteger(Radix: 0, Result&: IntVal))
202 Ctx.emitError(ErrorStr: "can't parse integer attribute " + First + " in " + Attr);
203
204 V.push_back(Elt: IntVal);
205 S = Rest;
206 }
207 }
208 return V;
209}
210
211static std::optional<uint64_t> getVectorProduct(ArrayRef<unsigned> V) {
212 if (V.empty())
213 return std::nullopt;
214
215 return std::accumulate(first: V.begin(), last: V.end(), init: 1, binary_op: std::multiplies<uint64_t>{});
216}
217
218bool isParamGridConstant(const Argument &Arg) {
219 assert(isKernelFunction(*Arg.getParent()) &&
220 "only kernel arguments can be grid_constant");
221
222 if (!Arg.hasByValAttr())
223 return false;
224
225 // Lowering an argument as a grid_constant violates the byval semantics (and
226 // the C++ API) by reusing the same memory location for the argument across
227 // multiple threads. If an argument doesn't read memory and its address is not
228 // captured (its address is not compared with any value), then the tweak of
229 // the C++ API and byval semantics is unobservable by the program and we can
230 // lower the arg as a grid_constant.
231 if (Arg.onlyReadsMemory()) {
232 const auto CI = Arg.getAttributes().getCaptureInfo();
233 if (!capturesAddress(CC: CI) && !capturesFullProvenance(CC: CI))
234 return true;
235 }
236
237 // "grid_constant" counts argument indices starting from 1
238 if (Arg.hasAttribute(Kind: "nvvm.grid_constant"))
239 return true;
240
241 return false;
242}
243
244bool isTexture(const Value &V) { return globalHasNVVMAnnotation(V, Prop: "texture"); }
245
246bool isSurface(const Value &V) { return globalHasNVVMAnnotation(V, Prop: "surface"); }
247
248bool isSampler(const Value &V) {
249 const char *AnnotationName = "sampler";
250
251 return globalHasNVVMAnnotation(V, Prop: AnnotationName) ||
252 argHasNVVMAnnotation(Val: V, Annotation: AnnotationName);
253}
254
255bool isImageReadOnly(const Value &V) {
256 return argHasNVVMAnnotation(Val: V, Annotation: "rdoimage");
257}
258
259bool isImageWriteOnly(const Value &V) {
260 return argHasNVVMAnnotation(Val: V, Annotation: "wroimage");
261}
262
263bool isImageReadWrite(const Value &V) {
264 return argHasNVVMAnnotation(Val: V, Annotation: "rdwrimage");
265}
266
267bool isImage(const Value &V) {
268 return isImageReadOnly(V) || isImageWriteOnly(V) || isImageReadWrite(V);
269}
270
271bool isManaged(const Value &V) { return globalHasNVVMAnnotation(V, Prop: "managed"); }
272
273StringRef getTextureName(const Value &V) {
274 assert(V.hasName() && "Found texture variable with no name");
275 return V.getName();
276}
277
278StringRef getSurfaceName(const Value &V) {
279 assert(V.hasName() && "Found surface variable with no name");
280 return V.getName();
281}
282
283StringRef getSamplerName(const Value &V) {
284 assert(V.hasName() && "Found sampler variable with no name");
285 return V.getName();
286}
287
288SmallVector<unsigned, 3> getMaxNTID(const Function &F) {
289 return getFnAttrParsedVector(F, Attr: "nvvm.maxntid");
290}
291
292SmallVector<unsigned, 3> getReqNTID(const Function &F) {
293 return getFnAttrParsedVector(F, Attr: "nvvm.reqntid");
294}
295
296SmallVector<unsigned, 3> getClusterDim(const Function &F) {
297 return getFnAttrParsedVector(F, Attr: "nvvm.cluster_dim");
298}
299
300std::optional<uint64_t> getOverallMaxNTID(const Function &F) {
301 // Note: The semantics here are a bit strange. The PTX ISA states the
302 // following (11.4.2. Performance-Tuning Directives: .maxntid):
303 //
304 // Note that this directive guarantees that the total number of threads does
305 // not exceed the maximum, but does not guarantee that the limit in any
306 // particular dimension is not exceeded.
307 const auto MaxNTID = getMaxNTID(F);
308 return getVectorProduct(V: MaxNTID);
309}
310
311std::optional<uint64_t> getOverallReqNTID(const Function &F) {
312 // Note: The semantics here are a bit strange. See getMaxNTID.
313 const auto ReqNTID = getReqNTID(F);
314 return getVectorProduct(V: ReqNTID);
315}
316
317std::optional<uint64_t> getOverallClusterRank(const Function &F) {
318 // maxclusterrank and cluster_dim are mutually exclusive.
319 if (const auto ClusterRank = getMaxClusterRank(F))
320 return ClusterRank;
321
322 // Note: The semantics here are a bit strange. See getMaxNTID.
323 const auto ClusterDim = getClusterDim(F);
324 return getVectorProduct(V: ClusterDim);
325}
326
327std::optional<unsigned> getMaxClusterRank(const Function &F) {
328 return getFnAttrParsedInt(F, Attr: "nvvm.maxclusterrank");
329}
330
331std::optional<unsigned> getMinCTASm(const Function &F) {
332 return getFnAttrParsedInt(F, Attr: "nvvm.minctasm");
333}
334
335std::optional<unsigned> getMaxNReg(const Function &F) {
336 return getFnAttrParsedInt(F, Attr: "nvvm.maxnreg");
337}
338
339bool hasBlocksAreClusters(const Function &F) {
340 return F.hasFnAttribute(Kind: "nvvm.blocksareclusters");
341}
342
343MaybeAlign getAlign(const CallInst &I, unsigned Index) {
344 // First check the alignstack metadata
345 if (MaybeAlign StackAlign =
346 I.getAttributes().getAttributes(Index).getStackAlignment())
347 return StackAlign;
348
349 // If that is missing, check the legacy nvvm metadata
350 if (MDNode *alignNode = I.getMetadata(Kind: "callalign")) {
351 for (int i = 0, n = alignNode->getNumOperands(); i < n; i++) {
352 if (const ConstantInt *CI =
353 mdconst::dyn_extract<ConstantInt>(MD: alignNode->getOperand(I: i))) {
354 unsigned V = CI->getZExtValue();
355 if ((V >> 16) == Index)
356 return Align(V & 0xFFFF);
357 if ((V >> 16) > Index)
358 return std::nullopt;
359 }
360 }
361 }
362 return std::nullopt;
363}
364
365Function *getMaybeBitcastedCallee(const CallBase *CB) {
366 return dyn_cast<Function>(Val: CB->getCalledOperand()->stripPointerCasts());
367}
368
369Align getFunctionParamOptimizedAlign(const Function *F, Type *ArgTy,
370 const DataLayout &DL) {
371 // Capping the alignment to 128 bytes as that is the maximum alignment
372 // supported by PTX.
373 const Align ABITypeAlign = std::min(a: Align(128), b: DL.getABITypeAlign(Ty: ArgTy));
374
375 // If a function has linkage different from internal or private, we
376 // must use default ABI alignment as external users rely on it. Same
377 // for a function that may be called from a function pointer.
378 if (!F || !F->hasLocalLinkage() ||
379 F->hasAddressTaken(/*Users=*/nullptr,
380 /*IgnoreCallbackUses=*/false,
381 /*IgnoreAssumeLikeCalls=*/true,
382 /*IgnoreLLVMUsed=*/IngoreLLVMUsed: true))
383 return ABITypeAlign;
384
385 assert(!isKernelFunction(*F) && "Expect kernels to have non-local linkage");
386 return std::max(a: Align(16), b: ABITypeAlign);
387}
388
389Align getFunctionArgumentAlignment(const Function *F, Type *Ty, unsigned Idx,
390 const DataLayout &DL) {
391 return getAlign(F: *F, Index: Idx).value_or(u: getFunctionParamOptimizedAlign(F, ArgTy: Ty, DL));
392}
393
394Align getFunctionByValParamAlign(const Function *F, Type *ArgTy,
395 Align InitialAlign, const DataLayout &DL) {
396 Align ArgAlign = InitialAlign;
397 if (F)
398 ArgAlign = std::max(a: ArgAlign, b: getFunctionParamOptimizedAlign(F, ArgTy, DL));
399
400 // Old ptx versions have a bug. When PTX code takes address of
401 // byval parameter with alignment < 4, ptxas generates code to
402 // spill argument into memory. Alas on sm_50+ ptxas generates
403 // SASS code that fails with misaligned access. To work around
404 // the problem, make sure that we align byval parameters by at
405 // least 4. This bug seems to be fixed at least starting from
406 // ptxas > 9.0.
407 // TODO: remove this after verifying the bug is not reproduced
408 // on non-deprecated ptxas versions.
409 if (ForceMinByValParamAlign)
410 ArgAlign = std::max(a: ArgAlign, b: Align(4));
411
412 return ArgAlign;
413}
414
415bool shouldEmitPTXNoReturn(const Value *V, const TargetMachine &TM) {
416 const auto &ST =
417 *static_cast<const NVPTXTargetMachine &>(TM).getSubtargetImpl();
418 if (!ST.hasNoReturn())
419 return false;
420
421 assert((isa<Function>(V) || isa<CallInst>(V)) &&
422 "Expect either a call instruction or a function");
423
424 if (const CallInst *CallI = dyn_cast<CallInst>(Val: V))
425 return CallI->doesNotReturn() &&
426 CallI->getFunctionType()->getReturnType()->isVoidTy();
427
428 const Function *F = cast<Function>(Val: V);
429 return F->doesNotReturn() &&
430 F->getFunctionType()->getReturnType()->isVoidTy() &&
431 !isKernelFunction(F: *F);
432}
433
434} // namespace llvm
435