1//===- AMDGPU.cpp ---------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "ABIInfoImpl.h"
10#include "TargetInfo.h"
11#include "llvm/ADT/StringExtras.h"
12#include "llvm/Support/AMDGPUAddrSpace.h"
13
14using namespace clang;
15using namespace clang::CodeGen;
16
17//===----------------------------------------------------------------------===//
18// AMDGPU ABI Implementation
19//===----------------------------------------------------------------------===//
20
21namespace {
22
23class AMDGPUABIInfo final : public DefaultABIInfo {
24private:
25 static const unsigned MaxNumRegsForArgsRet = 16;
26
27 uint64_t numRegsForType(QualType Ty) const;
28
29 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
30 bool isHomogeneousAggregateSmallEnough(const Type *Base,
31 uint64_t Members) const override;
32
33 // Coerce HIP scalar pointer arguments from generic pointers to global ones.
34 llvm::Type *coerceKernelArgumentType(llvm::Type *Ty, unsigned FromAS,
35 unsigned ToAS) const {
36 // Single value types.
37 auto *PtrTy = llvm::dyn_cast<llvm::PointerType>(Val: Ty);
38 if (PtrTy && PtrTy->getAddressSpace() == FromAS)
39 return llvm::PointerType::get(C&: Ty->getContext(), AddressSpace: ToAS);
40 return Ty;
41 }
42
43public:
44 explicit AMDGPUABIInfo(CodeGen::CodeGenTypes &CGT) :
45 DefaultABIInfo(CGT) {}
46
47 ABIArgInfo classifyReturnType(QualType RetTy) const;
48 ABIArgInfo classifyKernelArgumentType(QualType Ty) const;
49 ABIArgInfo classifyArgumentType(QualType Ty, bool Variadic,
50 unsigned &NumRegsLeft) const;
51
52 void computeInfo(CGFunctionInfo &FI) const override;
53 RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
54 AggValueSlot Slot) const override;
55
56 llvm::FixedVectorType *
57 getOptimalVectorMemoryType(llvm::FixedVectorType *T,
58 const LangOptions &Opt) const override {
59 // We have legal instructions for 96-bit so 3x32 can be supported.
60 // FIXME: This check should be a subtarget feature as technically SI doesn't
61 // support it.
62 if (T->getNumElements() == 3 && getDataLayout().getTypeSizeInBits(Ty: T) == 96)
63 return T;
64 return DefaultABIInfo::getOptimalVectorMemoryType(T, Opt);
65 }
66};
67
68bool AMDGPUABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
69 return true;
70}
71
72bool AMDGPUABIInfo::isHomogeneousAggregateSmallEnough(
73 const Type *Base, uint64_t Members) const {
74 uint32_t NumRegs = (getContext().getTypeSize(T: Base) + 31) / 32;
75
76 // Homogeneous Aggregates may occupy at most 16 registers.
77 return Members * NumRegs <= MaxNumRegsForArgsRet;
78}
79
80/// Estimate number of registers the type will use when passed in registers.
81uint64_t AMDGPUABIInfo::numRegsForType(QualType Ty) const {
82 uint64_t NumRegs = 0;
83
84 if (const VectorType *VT = Ty->getAs<VectorType>()) {
85 // Compute from the number of elements. The reported size is based on the
86 // in-memory size, which includes the padding 4th element for 3-vectors.
87 QualType EltTy = VT->getElementType();
88 uint64_t EltSize = getContext().getTypeSize(T: EltTy);
89
90 // 16-bit element vectors should be passed as packed.
91 if (EltSize == 16)
92 return (VT->getNumElements() + 1) / 2;
93
94 uint64_t EltNumRegs = (EltSize + 31) / 32;
95 return EltNumRegs * VT->getNumElements();
96 }
97
98 if (const auto *RD = Ty->getAsRecordDecl()) {
99 assert(!RD->hasFlexibleArrayMember());
100
101 for (const FieldDecl *Field : RD->fields()) {
102 QualType FieldTy = Field->getType();
103 NumRegs += numRegsForType(Ty: FieldTy);
104 }
105
106 return NumRegs;
107 }
108
109 return (getContext().getTypeSize(T: Ty) + 31) / 32;
110}
111
112void AMDGPUABIInfo::computeInfo(CGFunctionInfo &FI) const {
113 llvm::CallingConv::ID CC = FI.getCallingConvention();
114
115 if (!getCXXABI().classifyReturnType(FI))
116 FI.getReturnInfo() = classifyReturnType(RetTy: FI.getReturnType());
117
118 unsigned ArgumentIndex = 0;
119 const unsigned numFixedArguments = FI.getNumRequiredArgs();
120
121 unsigned NumRegsLeft = MaxNumRegsForArgsRet;
122 for (auto &Arg : FI.arguments()) {
123 if (CC == llvm::CallingConv::AMDGPU_KERNEL) {
124 Arg.info = classifyKernelArgumentType(Ty: Arg.type);
125 } else {
126 bool FixedArgument = ArgumentIndex++ < numFixedArguments;
127 Arg.info = classifyArgumentType(Ty: Arg.type, Variadic: !FixedArgument, NumRegsLeft);
128 }
129 }
130}
131
132RValue AMDGPUABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
133 QualType Ty, AggValueSlot Slot) const {
134 const bool IsIndirect = false;
135 const bool AllowHigherAlign = false;
136 return emitVoidPtrVAArg(CGF, VAListAddr, ValueTy: Ty, IsIndirect,
137 ValueInfo: getContext().getTypeInfoInChars(T: Ty),
138 SlotSizeAndAlign: CharUnits::fromQuantity(Quantity: 4), AllowHigherAlign, Slot);
139}
140
141ABIArgInfo AMDGPUABIInfo::classifyReturnType(QualType RetTy) const {
142 if (isAggregateTypeForABI(T: RetTy)) {
143 // Records with non-trivial destructors/copy-constructors should not be
144 // returned by value.
145 if (!getRecordArgABI(T: RetTy, CXXABI&: getCXXABI())) {
146 // Ignore empty structs/unions.
147 if (isEmptyRecord(Context&: getContext(), T: RetTy, AllowArrays: true))
148 return ABIArgInfo::getIgnore();
149
150 // Lower single-element structs to just return a regular value.
151 if (const Type *SeltTy = isSingleElementStruct(T: RetTy, Context&: getContext()))
152 return ABIArgInfo::getDirect(T: CGT.ConvertType(T: QualType(SeltTy, 0)));
153
154 if (const auto *RD = RetTy->getAsRecordDecl();
155 RD && RD->hasFlexibleArrayMember())
156 return DefaultABIInfo::classifyReturnType(RetTy);
157
158 // Pack aggregates <= 4 bytes into single VGPR or pair.
159 uint64_t Size = getContext().getTypeSize(T: RetTy);
160 if (Size <= 16)
161 return ABIArgInfo::getDirect(T: llvm::Type::getInt16Ty(C&: getVMContext()));
162
163 if (Size <= 32)
164 return ABIArgInfo::getDirect(T: llvm::Type::getInt32Ty(C&: getVMContext()));
165
166 if (Size <= 64) {
167 llvm::Type *I32Ty = llvm::Type::getInt32Ty(C&: getVMContext());
168 return ABIArgInfo::getDirect(T: llvm::ArrayType::get(ElementType: I32Ty, NumElements: 2));
169 }
170
171 if (numRegsForType(Ty: RetTy) <= MaxNumRegsForArgsRet)
172 return ABIArgInfo::getDirect();
173 }
174 }
175
176 // Otherwise just do the default thing.
177 return DefaultABIInfo::classifyReturnType(RetTy);
178}
179
180/// For kernels all parameters are really passed in a special buffer. It doesn't
181/// make sense to pass anything byval, so everything must be direct.
182ABIArgInfo AMDGPUABIInfo::classifyKernelArgumentType(QualType Ty) const {
183 Ty = useFirstFieldIfTransparentUnion(Ty);
184
185 // TODO: Can we omit empty structs?
186
187 if (const Type *SeltTy = isSingleElementStruct(T: Ty, Context&: getContext()))
188 Ty = QualType(SeltTy, 0);
189
190 llvm::Type *OrigLTy = CGT.ConvertType(T: Ty);
191 llvm::Type *LTy = OrigLTy;
192 if (getContext().getLangOpts().HIP) {
193 LTy = coerceKernelArgumentType(
194 Ty: OrigLTy, /*FromAS=*/getContext().getTargetAddressSpace(AS: LangAS::Default),
195 /*ToAS=*/getContext().getTargetAddressSpace(AS: LangAS::cuda_device));
196 }
197
198 // FIXME: This doesn't apply the optimization of coercing pointers in structs
199 // to global address space when using byref. This would require implementing a
200 // new kind of coercion of the in-memory type when for indirect arguments.
201 if (LTy == OrigLTy && isAggregateTypeForABI(T: Ty)) {
202 return ABIArgInfo::getIndirectAliased(
203 Alignment: getContext().getTypeAlignInChars(T: Ty),
204 AddrSpace: getContext().getTargetAddressSpace(AS: LangAS::opencl_constant),
205 Realign: false /*Realign*/, Padding: nullptr /*Padding*/);
206 }
207
208 // If we set CanBeFlattened to true, CodeGen will expand the struct to its
209 // individual elements, which confuses the Clover OpenCL backend; therefore we
210 // have to set it to false here. Other args of getDirect() are just defaults.
211 return ABIArgInfo::getDirect(T: LTy, Offset: 0, Padding: nullptr, CanBeFlattened: false);
212}
213
214ABIArgInfo AMDGPUABIInfo::classifyArgumentType(QualType Ty, bool Variadic,
215 unsigned &NumRegsLeft) const {
216 assert(NumRegsLeft <= MaxNumRegsForArgsRet && "register estimate underflow");
217
218 Ty = useFirstFieldIfTransparentUnion(Ty);
219
220 if (Variadic) {
221 return ABIArgInfo::getDirect(/*T=*/nullptr,
222 /*Offset=*/0,
223 /*Padding=*/nullptr,
224 /*CanBeFlattened=*/false,
225 /*Align=*/0);
226 }
227
228 if (isAggregateTypeForABI(T: Ty)) {
229 // Records with non-trivial destructors/copy-constructors should not be
230 // passed by value.
231 if (auto RAA = getRecordArgABI(T: Ty, CXXABI&: getCXXABI()))
232 return getNaturalAlignIndirect(Ty, AddrSpace: getDataLayout().getAllocaAddrSpace(),
233 ByVal: RAA == CGCXXABI::RAA_DirectInMemory);
234
235 // Ignore empty structs/unions.
236 if (isEmptyRecord(Context&: getContext(), T: Ty, AllowArrays: true))
237 return ABIArgInfo::getIgnore();
238
239 // Lower single-element structs to just pass a regular value. TODO: We
240 // could do reasonable-size multiple-element structs too, using getExpand(),
241 // though watch out for things like bitfields.
242 if (const Type *SeltTy = isSingleElementStruct(T: Ty, Context&: getContext()))
243 return ABIArgInfo::getDirect(T: CGT.ConvertType(T: QualType(SeltTy, 0)));
244
245 if (const auto *RD = Ty->getAsRecordDecl();
246 RD && RD->hasFlexibleArrayMember())
247 return DefaultABIInfo::classifyArgumentType(RetTy: Ty);
248
249 // Pack aggregates <= 8 bytes into single VGPR or pair.
250 uint64_t Size = getContext().getTypeSize(T: Ty);
251 if (Size <= 64) {
252 unsigned NumRegs = (Size + 31) / 32;
253 NumRegsLeft -= std::min(a: NumRegsLeft, b: NumRegs);
254
255 if (Size <= 16)
256 return ABIArgInfo::getDirect(T: llvm::Type::getInt16Ty(C&: getVMContext()));
257
258 if (Size <= 32)
259 return ABIArgInfo::getDirect(T: llvm::Type::getInt32Ty(C&: getVMContext()));
260
261 // XXX: Should this be i64 instead, and should the limit increase?
262 llvm::Type *I32Ty = llvm::Type::getInt32Ty(C&: getVMContext());
263 return ABIArgInfo::getDirect(T: llvm::ArrayType::get(ElementType: I32Ty, NumElements: 2));
264 }
265
266 if (NumRegsLeft > 0) {
267 uint64_t NumRegs = numRegsForType(Ty);
268 if (NumRegsLeft >= NumRegs) {
269 NumRegsLeft -= NumRegs;
270 return ABIArgInfo::getDirect();
271 }
272 }
273
274 // Use pass-by-reference in stead of pass-by-value for struct arguments in
275 // function ABI.
276 return ABIArgInfo::getIndirectAliased(
277 Alignment: getContext().getTypeAlignInChars(T: Ty),
278 AddrSpace: getContext().getTargetAddressSpace(AS: LangAS::opencl_private));
279 }
280
281 // Otherwise just do the default thing.
282 ABIArgInfo ArgInfo = DefaultABIInfo::classifyArgumentType(RetTy: Ty);
283 if (!ArgInfo.isIndirect()) {
284 uint64_t NumRegs = numRegsForType(Ty);
285 NumRegsLeft -= std::min(a: NumRegs, b: uint64_t{NumRegsLeft});
286 }
287
288 return ArgInfo;
289}
290
291class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo {
292public:
293 AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT)
294 : TargetCodeGenInfo(std::make_unique<AMDGPUABIInfo>(args&: CGT)) {}
295
296 bool supportsLibCall() const override { return false; }
297 void setFunctionDeclAttributes(const FunctionDecl *FD, llvm::Function *F,
298 CodeGenModule &CGM) const;
299
300 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
301 CodeGen::CodeGenModule &M) const override;
302 unsigned getDeviceKernelCallingConv() const override;
303
304 llvm::Constant *getNullPointer(const CodeGen::CodeGenModule &CGM,
305 llvm::PointerType *T, QualType QT) const override;
306
307 LangAS getASTAllocaAddressSpace() const override {
308 return getLangASFromTargetAS(
309 TargetAS: getABIInfo().getDataLayout().getAllocaAddrSpace());
310 }
311 LangAS getGlobalVarAddressSpace(CodeGenModule &CGM,
312 const VarDecl *D) const override;
313 llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts,
314 SyncScope Scope,
315 llvm::AtomicOrdering Ordering,
316 llvm::LLVMContext &Ctx) const override;
317 void setTargetAtomicMetadata(CodeGenFunction &CGF,
318 llvm::Instruction &AtomicInst,
319 const AtomicExpr *Expr = nullptr) const override;
320 llvm::Value *createEnqueuedBlockKernel(CodeGenFunction &CGF,
321 llvm::Function *BlockInvokeFunc,
322 llvm::Type *BlockTy) const override;
323 bool shouldEmitStaticExternCAliases() const override;
324 bool shouldEmitDWARFBitFieldSeparators() const override;
325 void setCUDAKernelCallingConvention(const FunctionType *&FT) const override;
326};
327}
328
329static bool requiresAMDGPUProtectedVisibility(const Decl *D,
330 llvm::GlobalValue *GV) {
331 if (GV->getVisibility() != llvm::GlobalValue::HiddenVisibility)
332 return false;
333
334 return !D->hasAttr<OMPDeclareTargetDeclAttr>() &&
335 (D->hasAttr<DeviceKernelAttr>() ||
336 (isa<FunctionDecl>(Val: D) && D->hasAttr<CUDAGlobalAttr>()) ||
337 (isa<VarDecl>(Val: D) &&
338 (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
339 cast<VarDecl>(Val: D)->getType()->isCUDADeviceBuiltinSurfaceType() ||
340 cast<VarDecl>(Val: D)->getType()->isCUDADeviceBuiltinTextureType())));
341}
342
343void AMDGPUTargetCodeGenInfo::setFunctionDeclAttributes(
344 const FunctionDecl *FD, llvm::Function *F, CodeGenModule &M) const {
345 const auto *ReqdWGS =
346 M.getLangOpts().OpenCL ? FD->getAttr<ReqdWorkGroupSizeAttr>() : nullptr;
347 const bool IsOpenCLKernel =
348 M.getLangOpts().OpenCL && FD->hasAttr<DeviceKernelAttr>();
349 const bool IsHIPKernel = M.getLangOpts().HIP && FD->hasAttr<CUDAGlobalAttr>();
350
351 const auto *FlatWGS = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>();
352 if (ReqdWGS || FlatWGS) {
353 M.handleAMDGPUFlatWorkGroupSizeAttr(F, A: FlatWGS, ReqdWGS);
354 } else if (IsOpenCLKernel || IsHIPKernel) {
355 // By default, restrict the maximum size to a value specified by
356 // --gpu-max-threads-per-block=n or its default value for HIP.
357 const unsigned OpenCLDefaultMaxWorkGroupSize = 256;
358 const unsigned DefaultMaxWorkGroupSize =
359 IsOpenCLKernel ? OpenCLDefaultMaxWorkGroupSize
360 : M.getLangOpts().GPUMaxThreadsPerBlock;
361 std::string AttrVal =
362 std::string("1,") + llvm::utostr(X: DefaultMaxWorkGroupSize);
363 F->addFnAttr(Kind: "amdgpu-flat-work-group-size", Val: AttrVal);
364 }
365
366 if (const auto *Attr = FD->getAttr<AMDGPUWavesPerEUAttr>())
367 M.handleAMDGPUWavesPerEUAttr(F, A: Attr);
368
369 if (const auto *Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) {
370 unsigned NumSGPR = Attr->getNumSGPR();
371
372 if (NumSGPR != 0)
373 F->addFnAttr(Kind: "amdgpu-num-sgpr", Val: llvm::utostr(X: NumSGPR));
374 }
375
376 if (const auto *Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) {
377 uint32_t NumVGPR = Attr->getNumVGPR();
378
379 if (NumVGPR != 0)
380 F->addFnAttr(Kind: "amdgpu-num-vgpr", Val: llvm::utostr(X: NumVGPR));
381 }
382
383 if (const auto *Attr = FD->getAttr<AMDGPUMaxNumWorkGroupsAttr>()) {
384 uint32_t X = Attr->getMaxNumWorkGroupsX()
385 ->EvaluateKnownConstInt(Ctx: M.getContext())
386 .getExtValue();
387 // Y and Z dimensions default to 1 if not specified
388 uint32_t Y = Attr->getMaxNumWorkGroupsY()
389 ? Attr->getMaxNumWorkGroupsY()
390 ->EvaluateKnownConstInt(Ctx: M.getContext())
391 .getExtValue()
392 : 1;
393 uint32_t Z = Attr->getMaxNumWorkGroupsZ()
394 ? Attr->getMaxNumWorkGroupsZ()
395 ->EvaluateKnownConstInt(Ctx: M.getContext())
396 .getExtValue()
397 : 1;
398
399 llvm::SmallString<32> AttrVal;
400 llvm::raw_svector_ostream OS(AttrVal);
401 OS << X << ',' << Y << ',' << Z;
402
403 F->addFnAttr(Kind: "amdgpu-max-num-workgroups", Val: AttrVal.str());
404 }
405
406 if (auto *Attr = FD->getAttr<CUDAClusterDimsAttr>()) {
407 auto GetExprVal = [&](const auto &E) {
408 return E ? E->EvaluateKnownConstInt(M.getContext()).getExtValue() : 1;
409 };
410 unsigned X = GetExprVal(Attr->getX());
411 unsigned Y = GetExprVal(Attr->getY());
412 unsigned Z = GetExprVal(Attr->getZ());
413 llvm::SmallString<32> AttrVal;
414 llvm::raw_svector_ostream OS(AttrVal);
415 OS << X << ',' << Y << ',' << Z;
416 F->addFnAttr(Kind: "amdgpu-cluster-dims", Val: AttrVal.str());
417 }
418
419 // OpenCL doesn't support cluster feature.
420 const TargetInfo &TTI = M.getContext().getTargetInfo();
421 if ((IsOpenCLKernel &&
422 TTI.hasFeatureEnabled(Features: TTI.getTargetOpts().FeatureMap, Name: "clusters")) ||
423 FD->hasAttr<CUDANoClusterAttr>())
424 F->addFnAttr(Kind: "amdgpu-cluster-dims", Val: "0,0,0");
425}
426
427void AMDGPUTargetCodeGenInfo::setTargetAttributes(
428 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
429 if (requiresAMDGPUProtectedVisibility(D, GV)) {
430 GV->setVisibility(llvm::GlobalValue::ProtectedVisibility);
431 GV->setDSOLocal(true);
432 }
433
434 if (GV->isDeclaration())
435 return;
436
437 llvm::Function *F = dyn_cast<llvm::Function>(Val: GV);
438 if (!F)
439 return;
440
441 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(Val: D);
442 if (FD)
443 setFunctionDeclAttributes(FD, F, M);
444 if (!getABIInfo().getCodeGenOpts().EmitIEEENaNCompliantInsts)
445 F->addFnAttr(Kind: "amdgpu-ieee", Val: "false");
446 if (getABIInfo().getCodeGenOpts().AMDGPUExpandWaitcntProfiling)
447 F->addFnAttr(Kind: "amdgpu-expand-waitcnt-profiling");
448}
449
450unsigned AMDGPUTargetCodeGenInfo::getDeviceKernelCallingConv() const {
451 return llvm::CallingConv::AMDGPU_KERNEL;
452}
453
454// Currently LLVM assumes null pointers always have value 0,
455// which results in incorrectly transformed IR. Therefore, instead of
456// emitting null pointers in private and local address spaces, a null
457// pointer in generic address space is emitted which is casted to a
458// pointer in local or private address space.
459llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer(
460 const CodeGen::CodeGenModule &CGM, llvm::PointerType *PT,
461 QualType QT) const {
462 if (CGM.getContext().getTargetNullPointerValue(QT) == 0)
463 return llvm::ConstantPointerNull::get(T: PT);
464
465 auto &Ctx = CGM.getContext();
466 auto NPT = llvm::PointerType::get(
467 C&: PT->getContext(), AddressSpace: Ctx.getTargetAddressSpace(AS: LangAS::opencl_generic));
468 return llvm::ConstantExpr::getAddrSpaceCast(
469 C: llvm::ConstantPointerNull::get(T: NPT), Ty: PT);
470}
471
472LangAS
473AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
474 const VarDecl *D) const {
475 assert(!CGM.getLangOpts().OpenCL &&
476 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) &&
477 "Address space agnostic languages only");
478 LangAS DefaultGlobalAS = getLangASFromTargetAS(
479 TargetAS: CGM.getContext().getTargetAddressSpace(AS: LangAS::opencl_global));
480 if (!D)
481 return DefaultGlobalAS;
482
483 LangAS AddrSpace = D->getType().getAddressSpace();
484 if (AddrSpace != LangAS::Default)
485 return AddrSpace;
486
487 // Only promote to address space 4 if VarDecl has constant initialization.
488 if (D->getType().isConstantStorage(Ctx: CGM.getContext(), ExcludeCtor: false, ExcludeDtor: false) &&
489 D->hasConstantInitialization()) {
490 if (auto ConstAS = CGM.getTarget().getConstantAddressSpace())
491 return *ConstAS;
492 }
493 return DefaultGlobalAS;
494}
495
496llvm::SyncScope::ID
497AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts,
498 SyncScope Scope,
499 llvm::AtomicOrdering Ordering,
500 llvm::LLVMContext &Ctx) const {
501 std::string Name;
502 switch (Scope) {
503 case SyncScope::HIPSingleThread:
504 case SyncScope::SingleScope:
505 Name = "singlethread";
506 break;
507 case SyncScope::HIPWavefront:
508 case SyncScope::OpenCLSubGroup:
509 case SyncScope::WavefrontScope:
510 Name = "wavefront";
511 break;
512 case SyncScope::HIPCluster:
513 case SyncScope::ClusterScope:
514 Name = "cluster";
515 break;
516 case SyncScope::HIPWorkgroup:
517 case SyncScope::OpenCLWorkGroup:
518 case SyncScope::WorkgroupScope:
519 Name = "workgroup";
520 break;
521 case SyncScope::HIPAgent:
522 case SyncScope::OpenCLDevice:
523 case SyncScope::DeviceScope:
524 Name = "agent";
525 break;
526 case SyncScope::SystemScope:
527 case SyncScope::HIPSystem:
528 case SyncScope::OpenCLAllSVMDevices:
529 Name = "";
530 break;
531 }
532
533 // OpenCL assumes by default that atomic scopes are per-address space for
534 // non-sequentially consistent operations.
535 if (Scope >= SyncScope::OpenCLWorkGroup &&
536 Scope <= SyncScope::OpenCLSubGroup &&
537 Ordering != llvm::AtomicOrdering::SequentiallyConsistent) {
538 if (!Name.empty())
539 Name = Twine(Twine(Name) + Twine("-")).str();
540
541 Name = Twine(Twine(Name) + Twine("one-as")).str();
542 }
543
544 return Ctx.getOrInsertSyncScopeID(SSN: Name);
545}
546
547void AMDGPUTargetCodeGenInfo::setTargetAtomicMetadata(
548 CodeGenFunction &CGF, llvm::Instruction &AtomicInst,
549 const AtomicExpr *AE) const {
550 auto *RMW = dyn_cast<llvm::AtomicRMWInst>(Val: &AtomicInst);
551 auto *CmpX = dyn_cast<llvm::AtomicCmpXchgInst>(Val: &AtomicInst);
552
553 // OpenCL and old style HIP atomics consider atomics targeting thread private
554 // memory to be undefined.
555 //
556 // TODO: This is probably undefined for atomic load/store, but there's not
557 // much direct codegen benefit to knowing this.
558 if (((RMW && RMW->getPointerAddressSpace() == llvm::AMDGPUAS::FLAT_ADDRESS) ||
559 (CmpX &&
560 CmpX->getPointerAddressSpace() == llvm::AMDGPUAS::FLAT_ADDRESS)) &&
561 AE && AE->threadPrivateMemoryAtomicsAreUndefined()) {
562 llvm::MDBuilder MDHelper(CGF.getLLVMContext());
563 llvm::MDNode *ASRange = MDHelper.createRange(
564 Lo: llvm::APInt(32, llvm::AMDGPUAS::PRIVATE_ADDRESS),
565 Hi: llvm::APInt(32, llvm::AMDGPUAS::PRIVATE_ADDRESS + 1));
566 AtomicInst.setMetadata(KindID: llvm::LLVMContext::MD_noalias_addrspace, Node: ASRange);
567 }
568
569 if (!RMW)
570 return;
571
572 AtomicOptions AO = CGF.CGM.getAtomicOpts();
573 llvm::MDNode *Empty = llvm::MDNode::get(Context&: CGF.getLLVMContext(), MDs: {});
574 if (!AO.getOption(Kind: clang::AtomicOptionKind::FineGrainedMemory))
575 RMW->setMetadata(Kind: "amdgpu.no.fine.grained.memory", Node: Empty);
576 if (!AO.getOption(Kind: clang::AtomicOptionKind::RemoteMemory))
577 RMW->setMetadata(Kind: "amdgpu.no.remote.memory", Node: Empty);
578 if (AO.getOption(Kind: clang::AtomicOptionKind::IgnoreDenormalMode) &&
579 RMW->getOperation() == llvm::AtomicRMWInst::FAdd &&
580 RMW->getType()->isFloatTy())
581 RMW->setMetadata(Kind: "amdgpu.ignore.denormal.mode", Node: Empty);
582}
583
584bool AMDGPUTargetCodeGenInfo::shouldEmitStaticExternCAliases() const {
585 return false;
586}
587
588bool AMDGPUTargetCodeGenInfo::shouldEmitDWARFBitFieldSeparators() const {
589 return true;
590}
591
592void AMDGPUTargetCodeGenInfo::setCUDAKernelCallingConvention(
593 const FunctionType *&FT) const {
594 FT = getABIInfo().getContext().adjustFunctionType(
595 Fn: FT, EInfo: FT->getExtInfo().withCallingConv(cc: CC_DeviceKernel));
596}
597
598/// Return IR struct type for rtinfo struct in rocm-device-libs used for device
599/// enqueue.
600///
601/// ptr addrspace(1) kernel_object, i32 private_segment_size,
602/// i32 group_segment_size
603
604static llvm::StructType *
605getAMDGPURuntimeHandleType(llvm::LLVMContext &C,
606 llvm::Type *KernelDescriptorPtrTy) {
607 llvm::Type *Int32 = llvm::Type::getInt32Ty(C);
608 return llvm::StructType::create(Context&: C, Elements: {KernelDescriptorPtrTy, Int32, Int32},
609 Name: "block.runtime.handle.t");
610}
611
612/// Create an OpenCL kernel for an enqueued block.
613///
614/// The type of the first argument (the block literal) is the struct type
615/// of the block literal instead of a pointer type. The first argument
616/// (block literal) is passed directly by value to the kernel. The kernel
617/// allocates the same type of struct on stack and stores the block literal
618/// to it and passes its pointer to the block invoke function. The kernel
619/// has "enqueued-block" function attribute and kernel argument metadata.
620llvm::Value *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel(
621 CodeGenFunction &CGF, llvm::Function *Invoke, llvm::Type *BlockTy) const {
622 auto &Builder = CGF.Builder;
623 auto &C = CGF.getLLVMContext();
624
625 auto *InvokeFT = Invoke->getFunctionType();
626 llvm::SmallVector<llvm::Type *, 2> ArgTys;
627 llvm::SmallVector<llvm::Metadata *, 8> AddressQuals;
628 llvm::SmallVector<llvm::Metadata *, 8> AccessQuals;
629 llvm::SmallVector<llvm::Metadata *, 8> ArgTypeNames;
630 llvm::SmallVector<llvm::Metadata *, 8> ArgBaseTypeNames;
631 llvm::SmallVector<llvm::Metadata *, 8> ArgTypeQuals;
632 llvm::SmallVector<llvm::Metadata *, 8> ArgNames;
633
634 ArgTys.push_back(Elt: BlockTy);
635 ArgTypeNames.push_back(Elt: llvm::MDString::get(Context&: C, Str: "__block_literal"));
636 AddressQuals.push_back(Elt: llvm::ConstantAsMetadata::get(C: Builder.getInt32(C: 0)));
637 ArgBaseTypeNames.push_back(Elt: llvm::MDString::get(Context&: C, Str: "__block_literal"));
638 ArgTypeQuals.push_back(Elt: llvm::MDString::get(Context&: C, Str: ""));
639 AccessQuals.push_back(Elt: llvm::MDString::get(Context&: C, Str: "none"));
640 ArgNames.push_back(Elt: llvm::MDString::get(Context&: C, Str: "block_literal"));
641 for (unsigned I = 1, E = InvokeFT->getNumParams(); I < E; ++I) {
642 ArgTys.push_back(Elt: InvokeFT->getParamType(i: I));
643 ArgTypeNames.push_back(Elt: llvm::MDString::get(Context&: C, Str: "void*"));
644 AddressQuals.push_back(Elt: llvm::ConstantAsMetadata::get(C: Builder.getInt32(C: 3)));
645 AccessQuals.push_back(Elt: llvm::MDString::get(Context&: C, Str: "none"));
646 ArgBaseTypeNames.push_back(Elt: llvm::MDString::get(Context&: C, Str: "void*"));
647 ArgTypeQuals.push_back(Elt: llvm::MDString::get(Context&: C, Str: ""));
648 ArgNames.push_back(
649 Elt: llvm::MDString::get(Context&: C, Str: (Twine("local_arg") + Twine(I)).str()));
650 }
651
652 llvm::Module &Mod = CGF.CGM.getModule();
653 const llvm::DataLayout &DL = Mod.getDataLayout();
654
655 llvm::Twine Name = Invoke->getName() + "_kernel";
656 auto *FT = llvm::FunctionType::get(Result: llvm::Type::getVoidTy(C), Params: ArgTys, isVarArg: false);
657
658 // The kernel itself can be internal, the runtime does not directly access the
659 // kernel address (only the kernel descriptor).
660 auto *F = llvm::Function::Create(Ty: FT, Linkage: llvm::GlobalValue::InternalLinkage, N: Name,
661 M: &Mod);
662 F->setCallingConv(getDeviceKernelCallingConv());
663
664 llvm::AttrBuilder KernelAttrs(C);
665 // FIXME: The invoke isn't applying the right attributes either
666 // FIXME: This is missing setTargetAttributes
667 CGF.CGM.addDefaultFunctionDefinitionAttributes(attrs&: KernelAttrs);
668 F->addFnAttrs(Attrs: KernelAttrs);
669
670 auto IP = CGF.Builder.saveIP();
671 auto *BB = llvm::BasicBlock::Create(Context&: C, Name: "entry", Parent: F);
672 Builder.SetInsertPoint(BB);
673 const auto BlockAlign = DL.getPrefTypeAlign(Ty: BlockTy);
674 auto *BlockPtr = Builder.CreateAlloca(Ty: BlockTy, ArraySize: nullptr);
675 BlockPtr->setAlignment(BlockAlign);
676 Builder.CreateAlignedStore(Val: F->arg_begin(), Ptr: BlockPtr, Align: BlockAlign);
677 auto *Cast = Builder.CreatePointerCast(V: BlockPtr, DestTy: InvokeFT->getParamType(i: 0));
678 llvm::SmallVector<llvm::Value *, 2> Args;
679 Args.push_back(Elt: Cast);
680 for (llvm::Argument &A : llvm::drop_begin(RangeOrContainer: F->args()))
681 Args.push_back(Elt: &A);
682 llvm::CallInst *call = Builder.CreateCall(Callee: Invoke, Args);
683 call->setCallingConv(Invoke->getCallingConv());
684 Builder.CreateRetVoid();
685 Builder.restoreIP(IP);
686
687 F->setMetadata(Kind: "kernel_arg_addr_space", Node: llvm::MDNode::get(Context&: C, MDs: AddressQuals));
688 F->setMetadata(Kind: "kernel_arg_access_qual", Node: llvm::MDNode::get(Context&: C, MDs: AccessQuals));
689 F->setMetadata(Kind: "kernel_arg_type", Node: llvm::MDNode::get(Context&: C, MDs: ArgTypeNames));
690 F->setMetadata(Kind: "kernel_arg_base_type",
691 Node: llvm::MDNode::get(Context&: C, MDs: ArgBaseTypeNames));
692 F->setMetadata(Kind: "kernel_arg_type_qual", Node: llvm::MDNode::get(Context&: C, MDs: ArgTypeQuals));
693 if (CGF.CGM.getCodeGenOpts().EmitOpenCLArgMetadata)
694 F->setMetadata(Kind: "kernel_arg_name", Node: llvm::MDNode::get(Context&: C, MDs: ArgNames));
695
696 llvm::StructType *HandleTy = getAMDGPURuntimeHandleType(
697 C, KernelDescriptorPtrTy: llvm::PointerType::get(C, AddressSpace: DL.getDefaultGlobalsAddressSpace()));
698 llvm::Constant *RuntimeHandleInitializer =
699 llvm::ConstantAggregateZero::get(Ty: HandleTy);
700
701 llvm::Twine RuntimeHandleName = F->getName() + ".runtime.handle";
702
703 // The runtime needs access to the runtime handle as an external symbol. The
704 // runtime handle will need to be made external later, in
705 // AMDGPUExportOpenCLEnqueuedBlocks. The kernel itself has a hidden reference
706 // inside the runtime handle, and is not directly referenced.
707
708 // TODO: We would initialize the first field by declaring F->getName() + ".kd"
709 // to reference the kernel descriptor. The runtime wouldn't need to bother
710 // setting it. We would need to have a final symbol name though.
711 // TODO: Can we directly use an external symbol with getGlobalIdentifier?
712 auto *RuntimeHandle = new llvm::GlobalVariable(
713 Mod, HandleTy,
714 /*isConstant=*/true, llvm::GlobalValue::InternalLinkage,
715 /*Initializer=*/RuntimeHandleInitializer, RuntimeHandleName,
716 /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal,
717 DL.getDefaultGlobalsAddressSpace(),
718 /*isExternallyInitialized=*/true);
719
720 llvm::MDNode *HandleAsMD =
721 llvm::MDNode::get(Context&: C, MDs: llvm::ValueAsMetadata::get(V: RuntimeHandle));
722 F->setMetadata(KindID: llvm::LLVMContext::MD_associated, Node: HandleAsMD);
723
724 RuntimeHandle->setSection(".amdgpu.kernel.runtime.handle");
725
726 CGF.CGM.addUsedGlobal(GV: F);
727 CGF.CGM.addUsedGlobal(GV: RuntimeHandle);
728 return RuntimeHandle;
729}
730
731void CodeGenModule::handleAMDGPUFlatWorkGroupSizeAttr(
732 llvm::Function *F, const AMDGPUFlatWorkGroupSizeAttr *FlatWGS,
733 const ReqdWorkGroupSizeAttr *ReqdWGS, int32_t *MinThreadsVal,
734 int32_t *MaxThreadsVal) {
735 unsigned Min = 0;
736 unsigned Max = 0;
737 auto Eval = [&](Expr *E) {
738 return E->EvaluateKnownConstInt(Ctx: getContext()).getExtValue();
739 };
740 if (FlatWGS) {
741 Min = Eval(FlatWGS->getMin());
742 Max = Eval(FlatWGS->getMax());
743 }
744 if (ReqdWGS && Min == 0 && Max == 0)
745 Min = Max = Eval(ReqdWGS->getXDim()) * Eval(ReqdWGS->getYDim()) *
746 Eval(ReqdWGS->getZDim());
747
748 if (Min != 0) {
749 assert(Min <= Max && "Min must be less than or equal Max");
750
751 if (MinThreadsVal)
752 *MinThreadsVal = Min;
753 if (MaxThreadsVal)
754 *MaxThreadsVal = Max;
755 std::string AttrVal = llvm::utostr(X: Min) + "," + llvm::utostr(X: Max);
756 if (F)
757 F->addFnAttr(Kind: "amdgpu-flat-work-group-size", Val: AttrVal);
758 } else
759 assert(Max == 0 && "Max must be zero");
760}
761
762void CodeGenModule::handleAMDGPUWavesPerEUAttr(
763 llvm::Function *F, const AMDGPUWavesPerEUAttr *Attr) {
764 unsigned Min =
765 Attr->getMin()->EvaluateKnownConstInt(Ctx: getContext()).getExtValue();
766 unsigned Max =
767 Attr->getMax()
768 ? Attr->getMax()->EvaluateKnownConstInt(Ctx: getContext()).getExtValue()
769 : 0;
770
771 if (Min != 0) {
772 assert((Max == 0 || Min <= Max) && "Min must be less than or equal Max");
773
774 std::string AttrVal = llvm::utostr(X: Min);
775 if (Max != 0)
776 AttrVal = AttrVal + "," + llvm::utostr(X: Max);
777 F->addFnAttr(Kind: "amdgpu-waves-per-eu", Val: AttrVal);
778 } else
779 assert(Max == 0 && "Max must be zero");
780}
781
782std::unique_ptr<TargetCodeGenInfo>
783CodeGen::createAMDGPUTargetCodeGenInfo(CodeGenModule &CGM) {
784 return std::make_unique<AMDGPUTargetCodeGenInfo>(args&: CGM.getTypes());
785}
786