1//===- SPIR.cpp -----------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "ABIInfoImpl.h"
10#include "HLSLBufferLayoutBuilder.h"
11#include "TargetInfo.h"
12#include "clang/Basic/LangOptions.h"
13#include "llvm/IR/DerivedTypes.h"
14
15#include <stdint.h>
16#include <utility>
17
18using namespace clang;
19using namespace clang::CodeGen;
20
21//===----------------------------------------------------------------------===//
22// Base ABI and target codegen info implementation common between SPIR and
23// SPIR-V.
24//===----------------------------------------------------------------------===//
25
26namespace {
27class CommonSPIRABIInfo : public DefaultABIInfo {
28public:
29 CommonSPIRABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) { setCCs(); }
30
31private:
32 void setCCs();
33};
34
35class SPIRVABIInfo : public CommonSPIRABIInfo {
36public:
37 SPIRVABIInfo(CodeGenTypes &CGT) : CommonSPIRABIInfo(CGT) {}
38 void computeInfo(CGFunctionInfo &FI) const override;
39 RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
40 AggValueSlot Slot) const override;
41
42private:
43 ABIArgInfo classifyKernelArgumentType(QualType Ty) const;
44};
45
46class AMDGCNSPIRVABIInfo : public SPIRVABIInfo {
47 // TODO: this should be unified / shared with AMDGPU, ideally we'd like to
48 // re-use AMDGPUABIInfo eventually, rather than duplicate.
49 static constexpr unsigned MaxNumRegsForArgsRet = 16; // 16 32-bit registers
50 mutable unsigned NumRegsLeft = 0;
51
52 uint64_t numRegsForType(QualType Ty) const;
53
54 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
55 return true;
56 }
57 bool isHomogeneousAggregateSmallEnough(const Type *Base,
58 uint64_t Members) const override {
59 uint32_t NumRegs = (getContext().getTypeSize(T: Base) + 31) / 32;
60
61 // Homogeneous Aggregates may occupy at most 16 registers.
62 return Members * NumRegs <= MaxNumRegsForArgsRet;
63 }
64
65 // Coerce HIP scalar pointer arguments from generic pointers to global ones.
66 llvm::Type *coerceKernelArgumentType(llvm::Type *Ty, unsigned FromAS,
67 unsigned ToAS) const;
68
69 ABIArgInfo classifyReturnType(QualType RetTy) const;
70 ABIArgInfo classifyKernelArgumentType(QualType Ty) const;
71 ABIArgInfo classifyArgumentType(QualType Ty) const;
72
73public:
74 AMDGCNSPIRVABIInfo(CodeGenTypes &CGT) : SPIRVABIInfo(CGT) {}
75 void computeInfo(CGFunctionInfo &FI) const override;
76
77 llvm::FixedVectorType *
78 getOptimalVectorMemoryType(llvm::FixedVectorType *Ty,
79 const LangOptions &LangOpt) const override;
80};
81} // end anonymous namespace
82namespace {
83class CommonSPIRTargetCodeGenInfo : public TargetCodeGenInfo {
84public:
85 CommonSPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
86 : TargetCodeGenInfo(std::make_unique<CommonSPIRABIInfo>(args&: CGT)) {}
87 CommonSPIRTargetCodeGenInfo(std::unique_ptr<ABIInfo> ABIInfo)
88 : TargetCodeGenInfo(std::move(ABIInfo)) {}
89
90 LangAS getASTAllocaAddressSpace() const override {
91 return getLangASFromTargetAS(
92 TargetAS: getABIInfo().getDataLayout().getAllocaAddrSpace());
93 }
94
95 unsigned getDeviceKernelCallingConv() const override;
96 llvm::Type *getOpenCLType(CodeGenModule &CGM, const Type *T) const override;
97 llvm::Type *getHLSLType(CodeGenModule &CGM, const Type *Ty,
98 const CGHLSLOffsetInfo &OffsetInfo) const override;
99
100 llvm::Type *getHLSLPadding(CodeGenModule &CGM,
101 CharUnits NumBytes) const override {
102 unsigned Size = NumBytes.getQuantity();
103 return llvm::TargetExtType::get(Context&: CGM.getLLVMContext(), Name: "spirv.Padding", Types: {},
104 Ints: {Size});
105 }
106
107 bool isHLSLPadding(llvm::Type *Ty) const override {
108 if (auto *TET = dyn_cast<llvm::TargetExtType>(Val: Ty))
109 return TET->getName() == "spirv.Padding";
110 return false;
111 }
112
113 llvm::Type *getSPIRVImageTypeFromHLSLResource(
114 const HLSLAttributedResourceType::Attributes &attributes,
115 QualType SampledType, CodeGenModule &CGM) const;
116 void
117 setOCLKernelStubCallingConvention(const FunctionType *&FT) const override;
118 llvm::Constant *getNullPointer(const CodeGen::CodeGenModule &CGM,
119 llvm::PointerType *T,
120 QualType QT) const override;
121};
122class SPIRVTargetCodeGenInfo : public CommonSPIRTargetCodeGenInfo {
123public:
124 SPIRVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
125 : CommonSPIRTargetCodeGenInfo(
126 (CGT.getTarget().getTriple().getVendor() == llvm::Triple::AMD)
127 ? std::make_unique<AMDGCNSPIRVABIInfo>(args&: CGT)
128 : std::make_unique<SPIRVABIInfo>(args&: CGT)) {}
129 void setCUDAKernelCallingConvention(const FunctionType *&FT) const override;
130 LangAS getGlobalVarAddressSpace(CodeGenModule &CGM,
131 const VarDecl *D) const override;
132 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
133 CodeGen::CodeGenModule &M) const override;
134 StringRef getLLVMSyncScopeStr(const LangOptions &LangOpts, SyncScope Scope,
135 llvm::AtomicOrdering Ordering) const override;
136 bool supportsLibCall() const override {
137 return getABIInfo().getTarget().getTriple().getVendor() !=
138 llvm::Triple::AMD;
139 }
140};
141} // End anonymous namespace.
142
143void CommonSPIRABIInfo::setCCs() {
144 assert(getRuntimeCC() == llvm::CallingConv::C);
145 RuntimeCC = llvm::CallingConv::SPIR_FUNC;
146}
147
148ABIArgInfo SPIRVABIInfo::classifyKernelArgumentType(QualType Ty) const {
149 // Coerce pointer arguments with default address space to CrossWorkGroup
150 // pointers as default address space kernel
151 // arguments are not allowed. We use the opencl_global language address
152 // space which always maps to CrossWorkGroup.
153 llvm::Type *LTy = CGT.ConvertType(T: Ty);
154 auto DefaultAS = getContext().getTargetAddressSpace(AS: LangAS::Default);
155 auto GlobalAS = getContext().getTargetAddressSpace(AS: LangAS::opencl_global);
156 auto *PtrTy = llvm::dyn_cast<llvm::PointerType>(Val: LTy);
157 if (PtrTy && PtrTy->getAddressSpace() == DefaultAS) {
158 LTy = llvm::PointerType::get(C&: PtrTy->getContext(), AddressSpace: GlobalAS);
159 return ABIArgInfo::getDirect(T: LTy, Offset: 0, Padding: nullptr, CanBeFlattened: false);
160 }
161
162 if (getContext().getLangOpts().isTargetDevice() &&
163 isAggregateTypeForABI(T: Ty)) {
164 // Force copying aggregate type in kernel arguments by value when
165 // compiling CUDA targeting SPIR-V. This is required for the object
166 // copied to be valid on the device.
167 // This behavior follows the CUDA spec
168 // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#global-function-argument-processing,
169 // and matches the NVPTX implementation. TODO: hardcoding to 0 should be
170 // revisited if HIPSPV / byval starts making use of the AS of an indirect
171 // arg.
172 return getNaturalAlignIndirect(Ty, /*AddrSpace=*/0, /*byval=*/ByVal: true);
173 }
174 return classifyArgumentType(RetTy: Ty);
175}
176
177void SPIRVABIInfo::computeInfo(CGFunctionInfo &FI) const {
178 // The logic is same as in DefaultABIInfo with an exception on the kernel
179 // arguments handling.
180 llvm::CallingConv::ID CC = FI.getCallingConvention();
181
182 for (auto &&[ArgumentsCount, I] : llvm::enumerate(First: FI.arguments()))
183 I.info = ArgumentsCount < FI.getNumRequiredArgs()
184 ? classifyArgumentType(RetTy: I.type)
185 : ABIArgInfo::getDirect();
186
187 if (!getCXXABI().classifyReturnType(FI))
188 FI.getReturnInfo() = classifyReturnType(RetTy: FI.getReturnType());
189
190 for (auto &I : FI.arguments()) {
191 if (CC == llvm::CallingConv::SPIR_KERNEL) {
192 I.info = classifyKernelArgumentType(Ty: I.type);
193 } else {
194 I.info = classifyArgumentType(RetTy: I.type);
195 }
196 }
197}
198
199RValue SPIRVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
200 QualType Ty, AggValueSlot Slot) const {
201 return emitVoidPtrVAArg(CGF, VAListAddr, ValueTy: Ty, /*IsIndirect=*/false,
202 ValueInfo: getContext().getTypeInfoInChars(T: Ty),
203 SlotSizeAndAlign: CharUnits::fromQuantity(Quantity: 1),
204 /*AllowHigherAlign=*/true, Slot);
205}
206
207uint64_t AMDGCNSPIRVABIInfo::numRegsForType(QualType Ty) const {
208 // This duplicates the AMDGPUABI computation.
209 uint64_t NumRegs = 0;
210
211 if (const VectorType *VT = Ty->getAs<VectorType>()) {
212 // Compute from the number of elements. The reported size is based on the
213 // in-memory size, which includes the padding 4th element for 3-vectors.
214 QualType EltTy = VT->getElementType();
215 uint64_t EltSize = getContext().getTypeSize(T: EltTy);
216
217 // 16-bit element vectors should be passed as packed.
218 if (EltSize == 16)
219 return (VT->getNumElements() + 1) / 2;
220
221 uint64_t EltNumRegs = (EltSize + 31) / 32;
222 return EltNumRegs * VT->getNumElements();
223 }
224
225 if (const auto *RD = Ty->getAsRecordDecl()) {
226 assert(!RD->hasFlexibleArrayMember());
227
228 for (const FieldDecl *Field : RD->fields()) {
229 QualType FieldTy = Field->getType();
230 NumRegs += numRegsForType(Ty: FieldTy);
231 }
232
233 return NumRegs;
234 }
235
236 return (getContext().getTypeSize(T: Ty) + 31) / 32;
237}
238
239llvm::Type *AMDGCNSPIRVABIInfo::coerceKernelArgumentType(llvm::Type *Ty,
240 unsigned FromAS,
241 unsigned ToAS) const {
242 // Single value types.
243 auto *PtrTy = llvm::dyn_cast<llvm::PointerType>(Val: Ty);
244 if (PtrTy && PtrTy->getAddressSpace() == FromAS)
245 return llvm::PointerType::get(C&: Ty->getContext(), AddressSpace: ToAS);
246 return Ty;
247}
248
249ABIArgInfo AMDGCNSPIRVABIInfo::classifyReturnType(QualType RetTy) const {
250 if (!isAggregateTypeForABI(T: RetTy) || getRecordArgABI(T: RetTy, CXXABI&: getCXXABI()))
251 return DefaultABIInfo::classifyReturnType(RetTy);
252
253 // Ignore empty structs/unions.
254 if (isEmptyRecord(Context&: getContext(), T: RetTy, AllowArrays: true))
255 return ABIArgInfo::getIgnore();
256
257 // Lower single-element structs to just return a regular value.
258 if (const Type *SeltTy = isSingleElementStruct(T: RetTy, Context&: getContext()))
259 return ABIArgInfo::getDirect(T: CGT.ConvertType(T: QualType(SeltTy, 0)));
260
261 if (const auto *RD = RetTy->getAsRecordDecl();
262 RD && RD->hasFlexibleArrayMember())
263 return DefaultABIInfo::classifyReturnType(RetTy);
264
265 // Pack aggregates <= 4 bytes into single VGPR or pair.
266 uint64_t Size = getContext().getTypeSize(T: RetTy);
267 if (Size <= 16)
268 return ABIArgInfo::getDirect(T: llvm::Type::getInt16Ty(C&: getVMContext()));
269
270 if (Size <= 32)
271 return ABIArgInfo::getDirect(T: llvm::Type::getInt32Ty(C&: getVMContext()));
272
273 // TODO: This carried over from AMDGPU oddity, we retain it to
274 // ensure consistency, but it might be reasonable to return Int64.
275 if (Size <= 64) {
276 llvm::Type *I32Ty = llvm::Type::getInt32Ty(C&: getVMContext());
277 return ABIArgInfo::getDirect(T: llvm::ArrayType::get(ElementType: I32Ty, NumElements: 2));
278 }
279
280 if (numRegsForType(Ty: RetTy) <= MaxNumRegsForArgsRet)
281 return ABIArgInfo::getDirect();
282 return DefaultABIInfo::classifyReturnType(RetTy);
283}
284
285/// For kernels all parameters are really passed in a special buffer. It doesn't
286/// make sense to pass anything byval, so everything must be direct.
287ABIArgInfo AMDGCNSPIRVABIInfo::classifyKernelArgumentType(QualType Ty) const {
288 Ty = useFirstFieldIfTransparentUnion(Ty);
289
290 // TODO: Can we omit empty structs?
291
292 if (const Type *SeltTy = isSingleElementStruct(T: Ty, Context&: getContext()))
293 Ty = QualType(SeltTy, 0);
294
295 llvm::Type *OrigLTy = CGT.ConvertType(T: Ty);
296 llvm::Type *LTy = OrigLTy;
297 if (getContext().getLangOpts().isTargetDevice()) {
298 LTy = coerceKernelArgumentType(
299 Ty: OrigLTy, /*FromAS=*/getContext().getTargetAddressSpace(AS: LangAS::Default),
300 /*ToAS=*/getContext().getTargetAddressSpace(AS: LangAS::opencl_global));
301 }
302
303 // FIXME: This doesn't apply the optimization of coercing pointers in structs
304 // to global address space when using byref. This would require implementing a
305 // new kind of coercion of the in-memory type when for indirect arguments.
306 if (LTy == OrigLTy && isAggregateTypeForABI(T: Ty)) {
307 return ABIArgInfo::getIndirectAliased(
308 Alignment: getContext().getTypeAlignInChars(T: Ty),
309 AddrSpace: getContext().getTargetAddressSpace(AS: LangAS::opencl_constant),
310 Realign: false /*Realign*/, Padding: nullptr /*Padding*/);
311 }
312
313 // TODO: inhibiting flattening is an AMDGPU workaround for Clover, which might
314 // be vestigial and should be revisited.
315 return ABIArgInfo::getDirect(T: LTy, Offset: 0, Padding: nullptr, CanBeFlattened: false);
316}
317
318ABIArgInfo AMDGCNSPIRVABIInfo::classifyArgumentType(QualType Ty) const {
319 assert(NumRegsLeft <= MaxNumRegsForArgsRet && "register estimate underflow");
320
321 Ty = useFirstFieldIfTransparentUnion(Ty);
322
323 // TODO: support for variadics.
324
325 if (!isAggregateTypeForABI(T: Ty)) {
326 ABIArgInfo ArgInfo = DefaultABIInfo::classifyArgumentType(RetTy: Ty);
327 if (!ArgInfo.isIndirect()) {
328 uint64_t NumRegs = numRegsForType(Ty);
329 NumRegsLeft -= std::min(a: NumRegs, b: uint64_t{NumRegsLeft});
330 }
331
332 return ArgInfo;
333 }
334
335 // Records with non-trivial destructors/copy-constructors should not be
336 // passed by value.
337 if (auto RAA = getRecordArgABI(T: Ty, CXXABI&: getCXXABI()))
338 return getNaturalAlignIndirect(Ty, AddrSpace: getDataLayout().getAllocaAddrSpace(),
339 ByVal: RAA == CGCXXABI::RAA_DirectInMemory);
340
341 // Ignore empty structs/unions.
342 if (isEmptyRecord(Context&: getContext(), T: Ty, AllowArrays: true))
343 return ABIArgInfo::getIgnore();
344
345 // Lower single-element structs to just pass a regular value. TODO: We
346 // could do reasonable-size multiple-element structs too, using getExpand(),
347 // though watch out for things like bitfields.
348 if (const Type *SeltTy = isSingleElementStruct(T: Ty, Context&: getContext()))
349 return ABIArgInfo::getDirect(T: CGT.ConvertType(T: QualType(SeltTy, 0)));
350
351 if (const auto *RD = Ty->getAsRecordDecl();
352 RD && RD->hasFlexibleArrayMember())
353 return DefaultABIInfo::classifyArgumentType(RetTy: Ty);
354
355 uint64_t Size = getContext().getTypeSize(T: Ty);
356 if (Size <= 64) {
357 // Pack aggregates <= 8 bytes into single VGPR or pair.
358 unsigned NumRegs = (Size + 31) / 32;
359 NumRegsLeft -= std::min(a: NumRegsLeft, b: NumRegs);
360
361 if (Size <= 16)
362 return ABIArgInfo::getDirect(T: llvm::Type::getInt16Ty(C&: getVMContext()));
363
364 if (Size <= 32)
365 return ABIArgInfo::getDirect(T: llvm::Type::getInt32Ty(C&: getVMContext()));
366
367 // TODO: This is an AMDGPU oddity, and might be vestigial, we retain it to
368 // ensure consistency, but it should be revisited.
369 llvm::Type *I32Ty = llvm::Type::getInt32Ty(C&: getVMContext());
370 return ABIArgInfo::getDirect(T: llvm::ArrayType::get(ElementType: I32Ty, NumElements: 2));
371 }
372
373 if (NumRegsLeft > 0) {
374 uint64_t NumRegs = numRegsForType(Ty);
375 if (NumRegsLeft >= NumRegs) {
376 NumRegsLeft -= NumRegs;
377 return ABIArgInfo::getDirect();
378 }
379 }
380
381 // Use pass-by-reference in stead of pass-by-value for struct arguments in
382 // function ABI.
383 return ABIArgInfo::getIndirectAliased(
384 Alignment: getContext().getTypeAlignInChars(T: Ty),
385 AddrSpace: getContext().getTargetAddressSpace(AS: LangAS::opencl_private));
386}
387
388void AMDGCNSPIRVABIInfo::computeInfo(CGFunctionInfo &FI) const {
389 llvm::CallingConv::ID CC = FI.getCallingConvention();
390
391 if (!getCXXABI().classifyReturnType(FI))
392 FI.getReturnInfo() = classifyReturnType(RetTy: FI.getReturnType());
393
394 NumRegsLeft = MaxNumRegsForArgsRet;
395 for (auto &I : FI.arguments()) {
396 if (CC == llvm::CallingConv::SPIR_KERNEL)
397 I.info = classifyKernelArgumentType(Ty: I.type);
398 else
399 I.info = classifyArgumentType(Ty: I.type);
400 }
401}
402
403llvm::FixedVectorType *AMDGCNSPIRVABIInfo::getOptimalVectorMemoryType(
404 llvm::FixedVectorType *Ty, const LangOptions &LangOpt) const {
405 // AMDGPU has legal instructions for 96-bit so 3x32 can be supported.
406 if (Ty->getNumElements() == 3 && getDataLayout().getTypeSizeInBits(Ty) == 96)
407 return Ty;
408 return DefaultABIInfo::getOptimalVectorMemoryType(T: Ty, Opt: LangOpt);
409}
410
411namespace clang {
412namespace CodeGen {
413void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) {
414 if (CGM.getTarget().getTriple().isSPIRV()) {
415 if (CGM.getTarget().getTriple().getVendor() == llvm::Triple::AMD)
416 AMDGCNSPIRVABIInfo(CGM.getTypes()).computeInfo(FI);
417 else
418 SPIRVABIInfo(CGM.getTypes()).computeInfo(FI);
419 } else {
420 CommonSPIRABIInfo(CGM.getTypes()).computeInfo(FI);
421 }
422}
423}
424}
425
426unsigned CommonSPIRTargetCodeGenInfo::getDeviceKernelCallingConv() const {
427 return llvm::CallingConv::SPIR_KERNEL;
428}
429
430void SPIRVTargetCodeGenInfo::setCUDAKernelCallingConvention(
431 const FunctionType *&FT) const {
432 // Convert HIP kernels to SPIR-V kernels.
433 if (getABIInfo().getContext().getLangOpts().HIP) {
434 FT = getABIInfo().getContext().adjustFunctionType(
435 Fn: FT, EInfo: FT->getExtInfo().withCallingConv(cc: CC_DeviceKernel));
436 return;
437 }
438}
439
440void CommonSPIRTargetCodeGenInfo::setOCLKernelStubCallingConvention(
441 const FunctionType *&FT) const {
442 FT = getABIInfo().getContext().adjustFunctionType(
443 Fn: FT, EInfo: FT->getExtInfo().withCallingConv(cc: CC_SpirFunction));
444}
445
446// LLVM currently assumes a null pointer has the bit pattern 0, but some GPU
447// targets use a non-zero encoding for null in certain address spaces.
448// Because SPIR(-V) is a generic target and the bit pattern of null in
449// non-generic AS is unspecified, materialize null in non-generic AS via an
450// addrspacecast from null in generic AS. This allows later lowering to
451// substitute the target's real sentinel value.
452llvm::Constant *
453CommonSPIRTargetCodeGenInfo::getNullPointer(const CodeGen::CodeGenModule &CGM,
454 llvm::PointerType *PT,
455 QualType QT) const {
456 LangAS AS = QT->getUnqualifiedDesugaredType()->isNullPtrType()
457 ? LangAS::Default
458 : QT->getPointeeType().getAddressSpace();
459 unsigned ASAsInt = static_cast<unsigned>(AS);
460 unsigned FirstTargetASAsInt =
461 static_cast<unsigned>(LangAS::FirstTargetAddressSpace);
462 unsigned CodeSectionINTELAS = FirstTargetASAsInt + 9;
463 // As per SPV_INTEL_function_pointers, it is illegal to addrspacecast
464 // function pointers to/from the generic AS.
465 bool IsFunctionPtrAS =
466 CGM.getTriple().isSPIRV() && ASAsInt == CodeSectionINTELAS;
467 if (AS == LangAS::Default || AS == LangAS::opencl_generic ||
468 AS == LangAS::opencl_constant || IsFunctionPtrAS)
469 return llvm::ConstantPointerNull::get(T: PT);
470
471 auto &Ctx = CGM.getContext();
472 auto NPT = llvm::PointerType::get(
473 C&: PT->getContext(), AddressSpace: Ctx.getTargetAddressSpace(AS: LangAS::opencl_generic));
474 return llvm::ConstantExpr::getAddrSpaceCast(
475 C: llvm::ConstantPointerNull::get(T: NPT), Ty: PT);
476}
477
478LangAS
479SPIRVTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
480 const VarDecl *D) const {
481 assert(!CGM.getLangOpts().OpenCL &&
482 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) &&
483 "Address space agnostic languages only");
484 // If we're here it means that we're using the SPIRDefIsGen ASMap, hence for
485 // the global AS we can rely on either cuda_device or sycl_global to be
486 // correct; however, since this is not a CUDA Device context, we use
487 // sycl_global to prevent confusion with the assertion.
488 LangAS DefaultGlobalAS = getLangASFromTargetAS(
489 TargetAS: CGM.getContext().getTargetAddressSpace(AS: LangAS::sycl_global));
490 if (!D)
491 return DefaultGlobalAS;
492
493 LangAS AddrSpace = D->getType().getAddressSpace();
494 if (AddrSpace != LangAS::Default)
495 return AddrSpace;
496
497 return DefaultGlobalAS;
498}
499
500void SPIRVTargetCodeGenInfo::setTargetAttributes(
501 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
502 if (GV->isDeclaration())
503 return;
504
505 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(Val: D);
506 if (!FD)
507 return;
508
509 llvm::Function *F = dyn_cast<llvm::Function>(Val: GV);
510 assert(F && "Expected GlobalValue to be a Function");
511
512 if (!M.getLangOpts().HIP ||
513 M.getTarget().getTriple().getVendor() != llvm::Triple::AMD)
514 return;
515
516 if (!FD->hasAttr<CUDAGlobalAttr>())
517 return;
518
519 unsigned N = M.getLangOpts().GPUMaxThreadsPerBlock;
520 if (auto FlatWGS = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>())
521 N = FlatWGS->getMax()->EvaluateKnownConstInt(Ctx: M.getContext()).getExtValue();
522
523 // We encode the maximum flat WG size in the first component of the 3D
524 // max_work_group_size attribute, which will get reverse translated into the
525 // original AMDGPU attribute when targeting AMDGPU.
526 auto Int32Ty = llvm::IntegerType::getInt32Ty(C&: M.getLLVMContext());
527 llvm::Metadata *AttrMDArgs[] = {
528 llvm::ConstantAsMetadata::get(C: llvm::ConstantInt::get(Ty: Int32Ty, V: N)),
529 llvm::ConstantAsMetadata::get(C: llvm::ConstantInt::get(Ty: Int32Ty, V: 1)),
530 llvm::ConstantAsMetadata::get(C: llvm::ConstantInt::get(Ty: Int32Ty, V: 1))};
531
532 F->setMetadata(Kind: "max_work_group_size",
533 Node: llvm::MDNode::get(Context&: M.getLLVMContext(), MDs: AttrMDArgs));
534}
535
536StringRef SPIRVTargetCodeGenInfo::getLLVMSyncScopeStr(
537 const LangOptions &, SyncScope Scope, llvm::AtomicOrdering) const {
538 switch (Scope) {
539 case SyncScope::HIPSingleThread:
540 case SyncScope::SingleScope:
541 return "singlethread";
542 case SyncScope::HIPWavefront:
543 case SyncScope::OpenCLSubGroup:
544 case SyncScope::WavefrontScope:
545 return "subgroup";
546 case SyncScope::HIPCluster:
547 case SyncScope::ClusterScope:
548 case SyncScope::HIPWorkgroup:
549 case SyncScope::OpenCLWorkGroup:
550 case SyncScope::WorkgroupScope:
551 return "workgroup";
552 case SyncScope::HIPAgent:
553 case SyncScope::OpenCLDevice:
554 case SyncScope::DeviceScope:
555 return "device";
556 case SyncScope::SystemScope:
557 case SyncScope::HIPSystem:
558 case SyncScope::OpenCLAllSVMDevices:
559 return "";
560 }
561 return "";
562}
563
564/// Construct a SPIR-V target extension type for the given OpenCL image type.
565static llvm::Type *getSPIRVImageType(llvm::LLVMContext &Ctx, StringRef BaseType,
566 StringRef OpenCLName,
567 unsigned AccessQualifier) {
568 // These parameters compare to the operands of OpTypeImage (see
569 // https://registry.khronos.org/SPIR-V/specs/unified1/SPIRV.html#OpTypeImage
570 // for more details). The first 6 integer parameters all default to 0, and
571 // will be changed to 1 only for the image type(s) that set the parameter to
572 // one. The 7th integer parameter is the access qualifier, which is tacked on
573 // at the end.
574 SmallVector<unsigned, 7> IntParams = {0, 0, 0, 0, 0, 0};
575
576 // Choose the dimension of the image--this corresponds to the Dim enum in
577 // SPIR-V (first integer parameter of OpTypeImage).
578 if (OpenCLName.starts_with(Prefix: "image2d"))
579 IntParams[0] = 1;
580 else if (OpenCLName.starts_with(Prefix: "image3d"))
581 IntParams[0] = 2;
582 else if (OpenCLName == "image1d_buffer")
583 IntParams[0] = 5; // Buffer
584 else
585 assert(OpenCLName.starts_with("image1d") && "Unknown image type");
586
587 // Set the other integer parameters of OpTypeImage if necessary. Note that the
588 // OpenCL image types don't provide any information for the Sampled or
589 // Image Format parameters.
590 if (OpenCLName.contains(Other: "_depth"))
591 IntParams[1] = 1;
592 if (OpenCLName.contains(Other: "_array"))
593 IntParams[2] = 1;
594 if (OpenCLName.contains(Other: "_msaa"))
595 IntParams[3] = 1;
596
597 // Access qualifier
598 IntParams.push_back(Elt: AccessQualifier);
599
600 return llvm::TargetExtType::get(Context&: Ctx, Name: BaseType, Types: {llvm::Type::getVoidTy(C&: Ctx)},
601 Ints: IntParams);
602}
603
604llvm::Type *CommonSPIRTargetCodeGenInfo::getOpenCLType(CodeGenModule &CGM,
605 const Type *Ty) const {
606 llvm::LLVMContext &Ctx = CGM.getLLVMContext();
607 if (auto *PipeTy = dyn_cast<PipeType>(Val: Ty))
608 return llvm::TargetExtType::get(Context&: Ctx, Name: "spirv.Pipe", Types: {},
609 Ints: {!PipeTy->isReadOnly()});
610 if (auto *BuiltinTy = dyn_cast<BuiltinType>(Val: Ty)) {
611 enum AccessQualifier : unsigned { AQ_ro = 0, AQ_wo = 1, AQ_rw = 2 };
612 switch (BuiltinTy->getKind()) {
613#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
614 case BuiltinType::Id: \
615 return getSPIRVImageType(Ctx, "spirv.Image", #ImgType, AQ_##Suffix);
616#include "clang/Basic/OpenCLImageTypes.def"
617 case BuiltinType::OCLSampler:
618 return llvm::TargetExtType::get(Context&: Ctx, Name: "spirv.Sampler");
619 case BuiltinType::OCLEvent:
620 return llvm::TargetExtType::get(Context&: Ctx, Name: "spirv.Event");
621 case BuiltinType::OCLClkEvent:
622 return llvm::TargetExtType::get(Context&: Ctx, Name: "spirv.DeviceEvent");
623 case BuiltinType::OCLQueue:
624 return llvm::TargetExtType::get(Context&: Ctx, Name: "spirv.Queue");
625 case BuiltinType::OCLReserveID:
626 return llvm::TargetExtType::get(Context&: Ctx, Name: "spirv.ReserveId");
627#define INTEL_SUBGROUP_AVC_TYPE(Name, Id) \
628 case BuiltinType::OCLIntelSubgroupAVC##Id: \
629 return llvm::TargetExtType::get(Ctx, "spirv.Avc" #Id "INTEL");
630#include "clang/Basic/OpenCLExtensionTypes.def"
631 default:
632 return nullptr;
633 }
634 }
635
636 return nullptr;
637}
638
639// Gets a spirv.IntegralConstant or spirv.Literal. If IntegralType is present,
640// returns an IntegralConstant, otherwise returns a Literal.
641static llvm::Type *getInlineSpirvConstant(CodeGenModule &CGM,
642 llvm::Type *IntegralType,
643 llvm::APInt Value) {
644 llvm::LLVMContext &Ctx = CGM.getLLVMContext();
645
646 // Convert the APInt value to an array of uint32_t words
647 llvm::SmallVector<uint32_t> Words;
648
649 while (Value.ugt(RHS: 0)) {
650 uint32_t Word = Value.trunc(width: 32).getZExtValue();
651 Value.lshrInPlace(ShiftAmt: 32);
652
653 Words.push_back(Elt: Word);
654 }
655 if (Words.size() == 0)
656 Words.push_back(Elt: 0);
657
658 if (IntegralType)
659 return llvm::TargetExtType::get(Context&: Ctx, Name: "spirv.IntegralConstant",
660 Types: {IntegralType}, Ints: Words);
661 return llvm::TargetExtType::get(Context&: Ctx, Name: "spirv.Literal", Types: {}, Ints: Words);
662}
663
664static llvm::Type *getInlineSpirvType(CodeGenModule &CGM,
665 const HLSLInlineSpirvType *SpirvType) {
666 llvm::LLVMContext &Ctx = CGM.getLLVMContext();
667
668 llvm::SmallVector<llvm::Type *> Operands;
669
670 for (auto &Operand : SpirvType->getOperands()) {
671 using SpirvOperandKind = SpirvOperand::SpirvOperandKind;
672
673 llvm::Type *Result = nullptr;
674 switch (Operand.getKind()) {
675 case SpirvOperandKind::ConstantId: {
676 llvm::Type *IntegralType =
677 CGM.getTypes().ConvertType(T: Operand.getResultType());
678
679 Result = getInlineSpirvConstant(CGM, IntegralType, Value: Operand.getValue());
680 break;
681 }
682 case SpirvOperandKind::Literal: {
683 Result = getInlineSpirvConstant(CGM, IntegralType: nullptr, Value: Operand.getValue());
684 break;
685 }
686 case SpirvOperandKind::TypeId: {
687 QualType TypeOperand = Operand.getResultType();
688 if (const auto *RD = TypeOperand->getAsRecordDecl()) {
689 assert(RD->isCompleteDefinition() &&
690 "Type completion should have been required in Sema");
691
692 const FieldDecl *HandleField = RD->findFirstNamedDataMember();
693 if (HandleField) {
694 QualType ResourceType = HandleField->getType();
695 if (ResourceType->getAs<HLSLAttributedResourceType>()) {
696 TypeOperand = ResourceType;
697 }
698 }
699 }
700 Result = CGM.getTypes().ConvertType(T: TypeOperand);
701 break;
702 }
703 default:
704 llvm_unreachable("HLSLInlineSpirvType had invalid operand!");
705 break;
706 }
707
708 assert(Result);
709 Operands.push_back(Elt: Result);
710 }
711
712 return llvm::TargetExtType::get(Context&: Ctx, Name: "spirv.Type", Types: Operands,
713 Ints: {SpirvType->getOpcode(), SpirvType->getSize(),
714 SpirvType->getAlignment()});
715}
716
717llvm::Type *CommonSPIRTargetCodeGenInfo::getHLSLType(
718 CodeGenModule &CGM, const Type *Ty,
719 const CGHLSLOffsetInfo &OffsetInfo) const {
720 llvm::LLVMContext &Ctx = CGM.getLLVMContext();
721
722 if (auto *SpirvType = dyn_cast<HLSLInlineSpirvType>(Val: Ty))
723 return getInlineSpirvType(CGM, SpirvType);
724
725 auto *ResType = dyn_cast<HLSLAttributedResourceType>(Val: Ty);
726 if (!ResType)
727 return nullptr;
728
729 const HLSLAttributedResourceType::Attributes &ResAttrs = ResType->getAttrs();
730 switch (ResAttrs.ResourceClass) {
731 case llvm::dxil::ResourceClass::UAV:
732 case llvm::dxil::ResourceClass::SRV: {
733 // TypedBuffer and RawBuffer both need element type
734 QualType ContainedTy = ResType->getContainedType();
735 if (ContainedTy.isNull())
736 return nullptr;
737
738 assert(!ResAttrs.IsROV &&
739 "Rasterizer order views not implemented for SPIR-V yet");
740
741 if (!ResAttrs.RawBuffer) {
742 // convert element type
743 return getSPIRVImageTypeFromHLSLResource(attributes: ResAttrs, SampledType: ContainedTy, CGM);
744 }
745
746 if (ResAttrs.IsCounter) {
747 llvm::Type *ElemType = llvm::Type::getInt32Ty(C&: Ctx);
748 uint32_t StorageClass = /* StorageBuffer storage class */ 12;
749 return llvm::TargetExtType::get(Context&: Ctx, Name: "spirv.VulkanBuffer", Types: {ElemType},
750 Ints: {StorageClass, true});
751 }
752 llvm::Type *ElemType = CGM.getTypes().ConvertTypeForMem(T: ContainedTy);
753 llvm::ArrayType *RuntimeArrayType = llvm::ArrayType::get(ElementType: ElemType, NumElements: 0);
754 uint32_t StorageClass = /* StorageBuffer storage class */ 12;
755 bool IsWritable = ResAttrs.ResourceClass == llvm::dxil::ResourceClass::UAV;
756 return llvm::TargetExtType::get(Context&: Ctx, Name: "spirv.VulkanBuffer",
757 Types: {RuntimeArrayType},
758 Ints: {StorageClass, IsWritable});
759 }
760 case llvm::dxil::ResourceClass::CBuffer: {
761 QualType ContainedTy = ResType->getContainedType();
762 if (ContainedTy.isNull() || !ContainedTy->isStructureType())
763 return nullptr;
764
765 llvm::StructType *BufferLayoutTy =
766 HLSLBufferLayoutBuilder(CGM).layOutStruct(
767 StructType: ContainedTy->getAsCanonical<RecordType>(), OffsetInfo);
768 uint32_t StorageClass = /* Uniform storage class */ 2;
769 return llvm::TargetExtType::get(Context&: Ctx, Name: "spirv.VulkanBuffer", Types: {BufferLayoutTy},
770 Ints: {StorageClass, false});
771 break;
772 }
773 case llvm::dxil::ResourceClass::Sampler:
774 return llvm::TargetExtType::get(Context&: Ctx, Name: "spirv.Sampler");
775 }
776 return nullptr;
777}
778
779static unsigned
780getImageFormat(const LangOptions &LangOpts,
781 const HLSLAttributedResourceType::Attributes &attributes,
782 llvm::Type *SampledType, QualType Ty, unsigned NumChannels) {
783 // For images with `Sampled` operand equal to 2, there are restrictions on
784 // using the Unknown image format. To avoid these restrictions in common
785 // cases, we guess an image format for them based on the sampled type and the
786 // number of channels. This is intended to match the behaviour of DXC.
787 if (LangOpts.HLSLSpvUseUnknownImageFormat ||
788 attributes.ResourceClass != llvm::dxil::ResourceClass::UAV) {
789 return 0; // Unknown
790 }
791
792 if (SampledType->isIntegerTy(Bitwidth: 32)) {
793 if (Ty->isSignedIntegerType()) {
794 if (NumChannels == 1)
795 return 24; // R32i
796 if (NumChannels == 2)
797 return 25; // Rg32i
798 if (NumChannels == 4)
799 return 21; // Rgba32i
800 } else {
801 if (NumChannels == 1)
802 return 33; // R32ui
803 if (NumChannels == 2)
804 return 35; // Rg32ui
805 if (NumChannels == 4)
806 return 30; // Rgba32ui
807 }
808 } else if (SampledType->isIntegerTy(Bitwidth: 64)) {
809 if (NumChannels == 1) {
810 if (Ty->isSignedIntegerType()) {
811 return 41; // R64i
812 }
813 return 40; // R64ui
814 }
815 } else if (SampledType->isFloatTy()) {
816 if (NumChannels == 1)
817 return 3; // R32f
818 if (NumChannels == 2)
819 return 6; // Rg32f
820 if (NumChannels == 4)
821 return 1; // Rgba32f
822 }
823
824 return 0; // Unknown
825}
826
827llvm::Type *CommonSPIRTargetCodeGenInfo::getSPIRVImageTypeFromHLSLResource(
828 const HLSLAttributedResourceType::Attributes &attributes, QualType Ty,
829 CodeGenModule &CGM) const {
830 llvm::LLVMContext &Ctx = CGM.getLLVMContext();
831
832 unsigned NumChannels = 1;
833 Ty = Ty->getCanonicalTypeUnqualified();
834 if (const VectorType *V = dyn_cast<VectorType>(Val&: Ty)) {
835 NumChannels = V->getNumElements();
836 Ty = V->getElementType();
837 }
838 assert(!Ty->isVectorType() && "We still have a vector type.");
839
840 llvm::Type *SampledType = CGM.getTypes().ConvertTypeForMem(T: Ty);
841
842 assert((SampledType->isIntegerTy() || SampledType->isFloatingPointTy()) &&
843 "The element type for a SPIR-V resource must be a scalar integer or "
844 "floating point type.");
845
846 // These parameters correspond to the operands to the OpTypeImage SPIR-V
847 // instruction. See
848 // https://registry.khronos.org/SPIR-V/specs/unified1/SPIRV.html#OpTypeImage.
849 SmallVector<unsigned, 6> IntParams(6, 0);
850
851 const char *Name =
852 Ty->isSignedIntegerType() ? "spirv.SignedImage" : "spirv.Image";
853
854 // Dim
855 switch (attributes.ResourceDimension) {
856 case llvm::dxil::ResourceDimension::Dim1D:
857 IntParams[0] = 0;
858 break;
859 case llvm::dxil::ResourceDimension::Dim2D:
860 IntParams[0] = 1;
861 break;
862 case llvm::dxil::ResourceDimension::Dim3D:
863 IntParams[0] = 2;
864 break;
865 case llvm::dxil::ResourceDimension::Cube:
866 IntParams[0] = 3;
867 break;
868 case llvm::dxil::ResourceDimension::Unknown:
869 IntParams[0] = 5;
870 break;
871 }
872
873 // Depth
874 // HLSL does not indicate if it is a depth texture or not, so we use unknown.
875 IntParams[1] = 2;
876
877 // Arrayed
878 IntParams[2] = 0;
879
880 // MS
881 IntParams[3] = 0;
882
883 // Sampled
884 IntParams[4] =
885 attributes.ResourceClass == llvm::dxil::ResourceClass::UAV ? 2 : 1;
886
887 // Image format.
888 IntParams[5] = getImageFormat(LangOpts: CGM.getLangOpts(), attributes, SampledType, Ty,
889 NumChannels);
890
891 llvm::TargetExtType *ImageType =
892 llvm::TargetExtType::get(Context&: Ctx, Name, Types: {SampledType}, Ints: IntParams);
893 return ImageType;
894}
895
896std::unique_ptr<TargetCodeGenInfo>
897CodeGen::createCommonSPIRTargetCodeGenInfo(CodeGenModule &CGM) {
898 return std::make_unique<CommonSPIRTargetCodeGenInfo>(args&: CGM.getTypes());
899}
900
901std::unique_ptr<TargetCodeGenInfo>
902CodeGen::createSPIRVTargetCodeGenInfo(CodeGenModule &CGM) {
903 return std::make_unique<SPIRVTargetCodeGenInfo>(args&: CGM.getTypes());
904}
905