1//===--- AMDGPU.h - Declare AMDGPU target feature support -------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares AMDGPU TargetInfo objects.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_CLANG_LIB_BASIC_TARGETS_AMDGPU_H
14#define LLVM_CLANG_LIB_BASIC_TARGETS_AMDGPU_H
15
16#include "clang/Basic/TargetID.h"
17#include "clang/Basic/TargetInfo.h"
18#include "clang/Basic/TargetOptions.h"
19#include "llvm/ADT/StringSet.h"
20#include "llvm/Support/AMDGPUAddrSpace.h"
21#include "llvm/Support/Compiler.h"
22#include "llvm/TargetParser/TargetParser.h"
23#include "llvm/TargetParser/Triple.h"
24#include <optional>
25
26namespace clang {
27namespace targets {
28
29class LLVM_LIBRARY_VISIBILITY AMDGPUTargetInfo final : public TargetInfo {
30
31 static const char *const GCCRegNames[];
32
33 static const LangASMap AMDGPUDefIsGenMap;
34 static const LangASMap AMDGPUDefIsPrivMap;
35
36 llvm::AMDGPU::GPUKind GPUKind;
37 unsigned GPUFeatures;
38 unsigned WavefrontSize;
39
40 /// Whether to use cumode or WGP mode. True for cumode. False for WGP mode.
41 bool CUMode;
42
43 /// Whether having image instructions.
44 bool HasImage = false;
45
46 /// Target ID is device name followed by optional feature name postfixed
47 /// by plus or minus sign delimitted by colon, e.g. gfx908:xnack+:sramecc-.
48 /// If the target ID contains feature+, map it to true.
49 /// If the target ID contains feature-, map it to false.
50 /// If the target ID does not contain a feature (default), do not map it.
51 llvm::StringMap<bool> OffloadArchFeatures;
52 std::string TargetID;
53
54 bool hasFP64() const {
55 return getTriple().isAMDGCN() ||
56 !!(GPUFeatures & llvm::AMDGPU::FEATURE_FP64);
57 }
58
59 /// Has fast fma f32
60 bool hasFastFMAF() const {
61 return !!(GPUFeatures & llvm::AMDGPU::FEATURE_FAST_FMA_F32);
62 }
63
64 /// Has fast fma f64
65 bool hasFastFMA() const { return getTriple().isAMDGCN(); }
66
67 bool hasFMAF() const {
68 return getTriple().isAMDGCN() ||
69 !!(GPUFeatures & llvm::AMDGPU::FEATURE_FMA);
70 }
71
72 bool hasFullRateDenormalsF32() const {
73 return !!(GPUFeatures & llvm::AMDGPU::FEATURE_FAST_DENORMAL_F32);
74 }
75
76 bool hasLDEXPF() const {
77 return getTriple().isAMDGCN() ||
78 !!(GPUFeatures & llvm::AMDGPU::FEATURE_LDEXP);
79 }
80
81 static bool isR600(const llvm::Triple &TT) {
82 return TT.getArch() == llvm::Triple::r600;
83 }
84
85 bool hasFlatSupport() const {
86 if (GPUKind >= llvm::AMDGPU::GK_GFX700)
87 return true;
88
89 // Dummy target is assumed to be gfx700+ for amdhsa.
90 if (GPUKind == llvm::AMDGPU::GK_NONE &&
91 getTriple().getOS() == llvm::Triple::AMDHSA)
92 return true;
93
94 return false;
95 }
96
97public:
98 AMDGPUTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts);
99
100 void setAddressSpaceMap(bool DefaultIsPrivate);
101
102 void adjust(DiagnosticsEngine &Diags, LangOptions &Opts,
103 const TargetInfo *Aux) override;
104
105 uint64_t getPointerWidthV(LangAS AS) const override {
106 if (isR600(TT: getTriple()))
107 return 32;
108 unsigned TargetAS = getTargetAddressSpace(AS);
109
110 if (TargetAS == llvm::AMDGPUAS::PRIVATE_ADDRESS ||
111 TargetAS == llvm::AMDGPUAS::LOCAL_ADDRESS)
112 return 32;
113
114 return 64;
115 }
116
117 uint64_t getPointerAlignV(LangAS AddrSpace) const override {
118 return getPointerWidthV(AS: AddrSpace);
119 }
120
121 virtual bool isAddressSpaceSupersetOf(LangAS A, LangAS B) const override {
122 // The flat address space AS(0) is a superset of all the other address
123 // spaces used by the backend target.
124 return A == B ||
125 ((A == LangAS::Default ||
126 (isTargetAddressSpace(AS: A) &&
127 toTargetAddressSpace(AS: A) == llvm::AMDGPUAS::FLAT_ADDRESS)) &&
128 isTargetAddressSpace(AS: B) &&
129 toTargetAddressSpace(AS: B) >= llvm::AMDGPUAS::FLAT_ADDRESS &&
130 toTargetAddressSpace(AS: B) <= llvm::AMDGPUAS::PRIVATE_ADDRESS &&
131 toTargetAddressSpace(AS: B) != llvm::AMDGPUAS::REGION_ADDRESS);
132 }
133
134 uint64_t getMaxPointerWidth() const override {
135 return getTriple().isAMDGCN() ? 64 : 32;
136 }
137
138 bool hasBFloat16Type() const override { return getTriple().isAMDGCN(); }
139
140 std::string_view getClobbers() const override { return ""; }
141
142 ArrayRef<const char *> getGCCRegNames() const override;
143
144 ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
145 return {};
146 }
147
148 /// Accepted register names: (n, m is unsigned integer, n < m)
149 /// v
150 /// s
151 /// a
152 /// {vn}, {v[n]}
153 /// {sn}, {s[n]}
154 /// {an}, {a[n]}
155 /// {S} , where S is a special register name
156 ////{v[n:m]}
157 /// {s[n:m]}
158 /// {a[n:m]}
159 bool validateAsmConstraint(const char *&Name,
160 TargetInfo::ConstraintInfo &Info) const override {
161 static const ::llvm::StringSet<> SpecialRegs({
162 "exec", "vcc", "flat_scratch", "m0", "scc", "tba", "tma",
163 "flat_scratch_lo", "flat_scratch_hi", "vcc_lo", "vcc_hi", "exec_lo",
164 "exec_hi", "tma_lo", "tma_hi", "tba_lo", "tba_hi",
165 });
166
167 switch (*Name) {
168 case 'I':
169 Info.setRequiresImmediate(Min: -16, Max: 64);
170 return true;
171 case 'J':
172 Info.setRequiresImmediate(Min: -32768, Max: 32767);
173 return true;
174 case 'A':
175 case 'B':
176 case 'C':
177 Info.setRequiresImmediate();
178 return true;
179 default:
180 break;
181 }
182
183 StringRef S(Name);
184
185 if (S == "DA" || S == "DB") {
186 Name++;
187 Info.setRequiresImmediate();
188 return true;
189 }
190
191 bool HasLeftParen = S.consume_front(Prefix: "{");
192 if (S.empty())
193 return false;
194 if (S.front() != 'v' && S.front() != 's' && S.front() != 'a') {
195 if (!HasLeftParen)
196 return false;
197 auto E = S.find(C: '}');
198 if (!SpecialRegs.count(Key: S.substr(Start: 0, N: E)))
199 return false;
200 S = S.drop_front(N: E + 1);
201 if (!S.empty())
202 return false;
203 // Found {S} where S is a special register.
204 Info.setAllowsRegister();
205 Name = S.data() - 1;
206 return true;
207 }
208 S = S.drop_front();
209 if (!HasLeftParen) {
210 if (!S.empty())
211 return false;
212 // Found s, v or a.
213 Info.setAllowsRegister();
214 Name = S.data() - 1;
215 return true;
216 }
217 bool HasLeftBracket = S.consume_front(Prefix: "[");
218 unsigned long long N;
219 if (S.empty() || consumeUnsignedInteger(Str&: S, Radix: 10, Result&: N))
220 return false;
221 if (S.consume_front(Prefix: ":")) {
222 if (!HasLeftBracket)
223 return false;
224 unsigned long long M;
225 if (consumeUnsignedInteger(Str&: S, Radix: 10, Result&: M) || N >= M)
226 return false;
227 }
228 if (HasLeftBracket) {
229 if (!S.consume_front(Prefix: "]"))
230 return false;
231 }
232 if (!S.consume_front(Prefix: "}"))
233 return false;
234 if (!S.empty())
235 return false;
236 // Found {vn}, {sn}, {an}, {v[n]}, {s[n]}, {a[n]}, {v[n:m]}, {s[n:m]}
237 // or {a[n:m]}.
238 Info.setAllowsRegister();
239 Name = S.data() - 1;
240 return true;
241 }
242
243 // \p Constraint will be left pointing at the last character of
244 // the constraint. In practice, it won't be changed unless the
245 // constraint is longer than one character.
246 std::string convertConstraint(const char *&Constraint) const override {
247
248 StringRef S(Constraint);
249 if (S == "DA" || S == "DB") {
250 return std::string("^") + std::string(Constraint++, 2);
251 }
252
253 const char *Begin = Constraint;
254 TargetInfo::ConstraintInfo Info("", "");
255 if (validateAsmConstraint(Name&: Constraint, Info))
256 return std::string(Begin).substr(pos: 0, n: Constraint - Begin + 1);
257
258 Constraint = Begin;
259 return std::string(1, *Constraint);
260 }
261
262 bool
263 initFeatureMap(llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags,
264 StringRef CPU,
265 const std::vector<std::string> &FeatureVec) const override;
266
267 llvm::SmallVector<Builtin::InfosShard> getTargetBuiltins() const override;
268
269 bool useFP16ConversionIntrinsics() const override { return false; }
270
271 void getTargetDefines(const LangOptions &Opts,
272 MacroBuilder &Builder) const override;
273
274 BuiltinVaListKind getBuiltinVaListKind() const override {
275 return TargetInfo::CharPtrBuiltinVaList;
276 }
277
278 bool isValidCPUName(StringRef Name) const override {
279 if (getTriple().isAMDGCN())
280 return llvm::AMDGPU::parseArchAMDGCN(CPU: Name) != llvm::AMDGPU::GK_NONE;
281 return llvm::AMDGPU::parseArchR600(CPU: Name) != llvm::AMDGPU::GK_NONE;
282 }
283
284 void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override;
285
286 bool setCPU(const std::string &Name) override {
287 if (getTriple().isAMDGCN()) {
288 GPUKind = llvm::AMDGPU::parseArchAMDGCN(CPU: Name);
289 GPUFeatures = llvm::AMDGPU::getArchAttrAMDGCN(AK: GPUKind);
290 } else {
291 GPUKind = llvm::AMDGPU::parseArchR600(CPU: Name);
292 GPUFeatures = llvm::AMDGPU::getArchAttrR600(AK: GPUKind);
293 }
294
295 return GPUKind != llvm::AMDGPU::GK_NONE;
296 }
297
298 void setSupportedOpenCLOpts() override {
299 auto &Opts = getSupportedOpenCLOpts();
300 Opts["cl_clang_storage_class_specifiers"] = true;
301 Opts["__cl_clang_variadic_functions"] = true;
302 Opts["__cl_clang_function_pointers"] = true;
303 Opts["__cl_clang_function_scope_local_variables"] = true;
304 Opts["__cl_clang_non_portable_kernel_param_types"] = true;
305 Opts["__cl_clang_bitfields"] = true;
306
307 bool IsAMDGCN = getTriple().isAMDGCN();
308
309 Opts["cl_khr_fp64"] = hasFP64();
310 Opts["__opencl_c_fp64"] = hasFP64();
311
312 if (IsAMDGCN || GPUKind >= llvm::AMDGPU::GK_CEDAR) {
313 Opts["cl_khr_byte_addressable_store"] = true;
314 Opts["cl_khr_global_int32_base_atomics"] = true;
315 Opts["cl_khr_global_int32_extended_atomics"] = true;
316 Opts["cl_khr_local_int32_base_atomics"] = true;
317 Opts["cl_khr_local_int32_extended_atomics"] = true;
318 }
319
320 if (IsAMDGCN) {
321 Opts["cl_khr_fp16"] = true;
322 Opts["cl_khr_int64_base_atomics"] = true;
323 Opts["cl_khr_int64_extended_atomics"] = true;
324 Opts["cl_khr_mipmap_image"] = true;
325 Opts["cl_khr_mipmap_image_writes"] = true;
326 Opts["cl_khr_subgroups"] = true;
327 Opts["cl_amd_media_ops"] = true;
328 Opts["cl_amd_media_ops2"] = true;
329
330 // FIXME: Check subtarget for image support.
331 Opts["__opencl_c_images"] = true;
332 Opts["__opencl_c_3d_image_writes"] = true;
333 Opts["__opencl_c_read_write_images"] = true;
334 Opts["cl_khr_3d_image_writes"] = true;
335 Opts["__opencl_c_program_scope_global_variables"] = true;
336 Opts["__opencl_c_atomic_order_acq_rel"] = true;
337 Opts["__opencl_c_atomic_order_seq_cst"] = true;
338 Opts["__opencl_c_atomic_scope_device"] = true;
339 Opts["__opencl_c_atomic_scope_all_devices"] = true;
340 Opts["__opencl_c_work_group_collective_functions"] = true;
341
342 if (hasFlatSupport()) {
343 Opts["__opencl_c_generic_address_space"] = true;
344 Opts["__opencl_c_device_enqueue"] = true;
345 Opts["__opencl_c_pipes"] = true;
346 }
347
348 if (getTriple().getEnvironment() == llvm::Triple::LLVM) {
349 Opts["cl_khr_subgroup_extended_types"] = true;
350 }
351 }
352 }
353
354 LangAS getOpenCLTypeAddrSpace(OpenCLTypeKind TK) const override {
355 switch (TK) {
356 case OCLTK_Image:
357 return LangAS::opencl_constant;
358
359 case OCLTK_ClkEvent:
360 case OCLTK_Queue:
361 case OCLTK_ReserveID:
362 return LangAS::opencl_global;
363
364 default:
365 return TargetInfo::getOpenCLTypeAddrSpace(TK);
366 }
367 }
368
369 LangAS getOpenCLBuiltinAddressSpace(unsigned AS) const override {
370 switch (AS) {
371 case 0:
372 return LangAS::opencl_generic;
373 case 1:
374 return LangAS::opencl_global;
375 case 3:
376 return LangAS::opencl_local;
377 case 4:
378 return LangAS::opencl_constant;
379 case 5:
380 return LangAS::opencl_private;
381 default:
382 return getLangASFromTargetAS(TargetAS: AS);
383 }
384 }
385
386 LangAS getCUDABuiltinAddressSpace(unsigned AS) const override {
387 switch (AS) {
388 case 0:
389 return LangAS::Default;
390 case 1:
391 return LangAS::cuda_device;
392 case 3:
393 return LangAS::cuda_shared;
394 case 4:
395 return LangAS::cuda_constant;
396 default:
397 return getLangASFromTargetAS(TargetAS: AS);
398 }
399 }
400
401 std::optional<LangAS> getConstantAddressSpace() const override {
402 return getLangASFromTargetAS(TargetAS: llvm::AMDGPUAS::CONSTANT_ADDRESS);
403 }
404
405 const llvm::omp::GV &getGridValue() const override {
406 switch (WavefrontSize) {
407 case 32:
408 return llvm::omp::getAMDGPUGridValues<32>();
409 case 64:
410 return llvm::omp::getAMDGPUGridValues<64>();
411 default:
412 llvm_unreachable("getGridValue not implemented for this wavesize");
413 }
414 }
415
416 /// \returns Target specific vtbl ptr address space.
417 unsigned getVtblPtrAddressSpace() const override {
418 return static_cast<unsigned>(llvm::AMDGPUAS::CONSTANT_ADDRESS);
419 }
420
421 /// \returns If a target requires an address within a target specific address
422 /// space \p AddressSpace to be converted in order to be used, then return the
423 /// corresponding target specific DWARF address space.
424 ///
425 /// \returns Otherwise return std::nullopt and no conversion will be emitted
426 /// in the DWARF.
427 std::optional<unsigned>
428 getDWARFAddressSpace(unsigned AddressSpace) const override {
429 int DWARFAS = llvm::AMDGPU::mapToDWARFAddrSpace(LLVMAddrSpace: AddressSpace);
430 // If there is no corresponding address space identifier, or it would be
431 // the default, then don't emit the attribute.
432 if (DWARFAS == -1 || DWARFAS == llvm::AMDGPU::DWARFAS::DEFAULT)
433 return std::nullopt;
434 return DWARFAS;
435 }
436
437 CallingConvCheckResult checkCallingConvention(CallingConv CC) const override {
438 switch (CC) {
439 default:
440 return CCCR_Warning;
441 case CC_C:
442 case CC_DeviceKernel:
443 return CCCR_OK;
444 }
445 }
446
447 // In amdgcn target the null pointer in global, constant, and generic
448 // address space has value 0 but in private and local address space has
449 // value ~0.
450 uint64_t getNullPointerValue(LangAS AS) const override {
451 // Check language-specific address spaces
452 if (AS == LangAS::opencl_local || AS == LangAS::opencl_private ||
453 AS == LangAS::sycl_local || AS == LangAS::sycl_private)
454 return ~0;
455 if (isTargetAddressSpace(AS))
456 return llvm::AMDGPU::getNullPointerValue(AS: toTargetAddressSpace(AS));
457 return 0;
458 }
459
460 void setAuxTarget(const TargetInfo *Aux) override;
461
462 bool hasBitIntType() const override { return true; }
463
464 // Record offload arch features since they are needed for defining the
465 // pre-defined macros.
466 bool handleTargetFeatures(std::vector<std::string> &Features,
467 DiagnosticsEngine &Diags) override {
468 HasFullBFloat16 = true;
469 auto TargetIDFeatures =
470 getAllPossibleTargetIDFeatures(T: getTriple(), Processor: getArchNameAMDGCN(AK: GPUKind));
471 for (const auto &F : Features) {
472 assert(F.front() == '+' || F.front() == '-');
473 if (F == "+wavefrontsize64")
474 WavefrontSize = 64;
475 else if (F == "+cumode")
476 CUMode = true;
477 else if (F == "-cumode")
478 CUMode = false;
479 else if (F == "+image-insts")
480 HasImage = true;
481 bool IsOn = F.front() == '+';
482 StringRef Name = StringRef(F).drop_front();
483 if (!llvm::is_contained(Range&: TargetIDFeatures, Element: Name))
484 continue;
485 assert(!OffloadArchFeatures.contains(Name));
486 OffloadArchFeatures[Name] = IsOn;
487 }
488 return true;
489 }
490
491 std::optional<std::string> getTargetID() const override {
492 if (!getTriple().isAMDGCN())
493 return std::nullopt;
494 // When -target-cpu is not set, we assume generic code that it is valid
495 // for all GPU and use an empty string as target ID to represent that.
496 if (GPUKind == llvm::AMDGPU::GK_NONE)
497 return std::string("");
498 return getCanonicalTargetID(Processor: getArchNameAMDGCN(AK: GPUKind),
499 Features: OffloadArchFeatures);
500 }
501
502 bool hasHIPImageSupport() const override { return HasImage; }
503
504 std::pair<unsigned, unsigned> hardwareInterferenceSizes() const override {
505 // This is imprecise as the value can vary between 64, 128 (even 256!) bytes
506 // depending on the level of cache and the target architecture. We select
507 // the size that corresponds to the largest L1 cache line for all
508 // architectures.
509 return std::make_pair(x: 128, y: 128);
510 }
511};
512
513} // namespace targets
514} // namespace clang
515
516#endif // LLVM_CLANG_LIB_BASIC_TARGETS_AMDGPU_H
517