1//===------ SemaAMDGPU.cpp ------- AMDGPU target-specific routines --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements semantic analysis functions specific to AMDGPU.
10//
11//===----------------------------------------------------------------------===//
12
13#include "clang/Sema/SemaAMDGPU.h"
14#include "clang/Basic/DiagnosticFrontend.h"
15#include "clang/Basic/DiagnosticSema.h"
16#include "clang/Basic/TargetBuiltins.h"
17#include "clang/Sema/Ownership.h"
18#include "clang/Sema/Sema.h"
19#include "llvm/Support/AMDGPUAddrSpace.h"
20#include "llvm/Support/AtomicOrdering.h"
21#include <cstdint>
22
23namespace clang {
24
25SemaAMDGPU::SemaAMDGPU(Sema &S) : SemaBase(S) {}
26
27bool SemaAMDGPU::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
28 CallExpr *TheCall) {
29 // position of memory order and scope arguments in the builtin
30 unsigned OrderIndex, ScopeIndex;
31
32 const auto *FD = SemaRef.getCurFunctionDecl(/*AllowLambda=*/true);
33 assert(FD && "AMDGPU builtins should not be used outside of a function");
34 llvm::StringMap<bool> CallerFeatureMap;
35 getASTContext().getFunctionFeatureMap(FeatureMap&: CallerFeatureMap, FD);
36 bool HasGFX950Insts =
37 Builtin::evaluateRequiredTargetFeatures(RequiredFatures: "gfx950-insts", TargetFetureMap: CallerFeatureMap);
38
39 switch (BuiltinID) {
40 case AMDGPU::BI__builtin_amdgcn_raw_ptr_buffer_load_lds:
41 case AMDGPU::BI__builtin_amdgcn_raw_ptr_buffer_load_async_lds:
42 case AMDGPU::BI__builtin_amdgcn_struct_ptr_buffer_load_lds:
43 case AMDGPU::BI__builtin_amdgcn_struct_ptr_buffer_load_async_lds:
44 case AMDGPU::BI__builtin_amdgcn_load_to_lds:
45 case AMDGPU::BI__builtin_amdgcn_load_async_to_lds:
46 case AMDGPU::BI__builtin_amdgcn_global_load_lds:
47 case AMDGPU::BI__builtin_amdgcn_global_load_async_lds: {
48 constexpr const int SizeIdx = 2;
49 llvm::APSInt Size;
50 Expr *ArgExpr = TheCall->getArg(Arg: SizeIdx);
51 // Check for instantiation-dependent expressions (e.g., involving template
52 // parameters). These will be checked again during template instantiation.
53 if (ArgExpr->isInstantiationDependent())
54 return false;
55 [[maybe_unused]] ExprResult R =
56 SemaRef.VerifyIntegerConstantExpression(E: ArgExpr, Result: &Size);
57 assert(!R.isInvalid());
58 switch (Size.getSExtValue()) {
59 case 1:
60 case 2:
61 case 4:
62 return false;
63 case 12:
64 case 16: {
65 if (HasGFX950Insts)
66 return false;
67 [[fallthrough]];
68 }
69 default:
70 SemaRef.targetDiag(Loc: ArgExpr->getExprLoc(),
71 DiagID: diag::err_amdgcn_load_lds_size_invalid_value)
72 << ArgExpr->getSourceRange();
73 SemaRef.targetDiag(Loc: ArgExpr->getExprLoc(),
74 DiagID: diag::note_amdgcn_load_lds_size_valid_value)
75 << HasGFX950Insts << ArgExpr->getSourceRange();
76 return true;
77 }
78 }
79 case AMDGPU::BI__builtin_amdgcn_get_fpenv:
80 case AMDGPU::BI__builtin_amdgcn_set_fpenv:
81 return false;
82 case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
83 case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
84 case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
85 case AMDGPU::BI__builtin_amdgcn_atomic_dec64:
86 OrderIndex = 2;
87 ScopeIndex = 3;
88 break;
89 case AMDGPU::BI__builtin_amdgcn_fence:
90 OrderIndex = 0;
91 ScopeIndex = 1;
92 break;
93 case AMDGPU::BI__builtin_amdgcn_s_setreg:
94 return SemaRef.BuiltinConstantArgRange(TheCall, /*ArgNum=*/0, /*Low=*/0,
95 /*High=*/UINT16_MAX);
96 case AMDGPU::BI__builtin_amdgcn_s_wait_event: {
97 llvm::APSInt Result;
98 if (SemaRef.BuiltinConstantArg(TheCall, ArgNum: 0, Result))
99 return true;
100
101 bool IsGFX12Plus = Builtin::evaluateRequiredTargetFeatures(
102 RequiredFatures: "gfx12-insts", TargetFetureMap: CallerFeatureMap);
103
104 // gfx11 -> gfx12 changed the interpretation of the bitmask. gfx12 inverted
105 // the intepretation for export_ready, but shifted the used bit by 1. Thus
106 // waiting for the export_ready event can use a value of 2 universally.
107 if (((IsGFX12Plus && !Result[1]) || (!IsGFX12Plus && Result[0])) ||
108 Result.getZExtValue() > 2) {
109 Expr *ArgExpr = TheCall->getArg(Arg: 0);
110 SemaRef.targetDiag(Loc: ArgExpr->getExprLoc(),
111 DiagID: diag::warn_amdgpu_s_wait_event_mask_no_effect_target)
112 << ArgExpr->getSourceRange();
113 SemaRef.targetDiag(Loc: ArgExpr->getExprLoc(),
114 DiagID: diag::note_amdgpu_s_wait_event_suggested_value)
115 << ArgExpr->getSourceRange();
116 }
117
118 return false;
119 }
120 case AMDGPU::BI__builtin_amdgcn_mov_dpp:
121 return checkMovDPPFunctionCall(TheCall, NumArgs: 5, NumDataArgs: 1);
122 case AMDGPU::BI__builtin_amdgcn_mov_dpp8:
123 return checkMovDPPFunctionCall(TheCall, NumArgs: 2, NumDataArgs: 1);
124 case AMDGPU::BI__builtin_amdgcn_update_dpp:
125 return checkMovDPPFunctionCall(TheCall, NumArgs: 6, NumDataArgs: 2);
126 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_f16_fp8:
127 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_bf16_fp8:
128 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_f16_bf8:
129 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_bf16_bf8:
130 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_f16_fp4:
131 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_bf16_fp4:
132 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_f32_fp8:
133 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_f32_bf8:
134 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_f32_fp4:
135 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk16_f16_fp6:
136 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk16_bf16_fp6:
137 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk16_f16_bf6:
138 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk16_bf16_bf6:
139 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk16_f32_fp6:
140 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk16_f32_bf6:
141 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 15);
142 case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_load_32x4B:
143 case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_load_16x8B:
144 case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_load_8x16B:
145 return checkCoopAtomicFunctionCall(TheCall, /*IsStore=*/false);
146 case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_store_32x4B:
147 case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_store_16x8B:
148 case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_store_8x16B:
149 return checkCoopAtomicFunctionCall(TheCall, /*IsStore=*/true);
150 case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b32:
151 case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b64:
152 case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b128:
153 case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b32:
154 case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b64:
155 case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b128:
156 return checkAtomicMonitorLoad(TheCall);
157 case AMDGPU::BI__builtin_amdgcn_image_load_1d_v4f32_i32:
158 case AMDGPU::BI__builtin_amdgcn_image_load_1darray_v4f32_i32:
159 case AMDGPU::BI__builtin_amdgcn_image_load_1d_v4f16_i32:
160 case AMDGPU::BI__builtin_amdgcn_image_load_1darray_v4f16_i32:
161 case AMDGPU::BI__builtin_amdgcn_image_load_2d_f32_i32:
162 case AMDGPU::BI__builtin_amdgcn_image_load_2d_v4f32_i32:
163 case AMDGPU::BI__builtin_amdgcn_image_load_2d_v4f16_i32:
164 case AMDGPU::BI__builtin_amdgcn_image_load_2darray_f32_i32:
165 case AMDGPU::BI__builtin_amdgcn_image_load_2darray_v4f32_i32:
166 case AMDGPU::BI__builtin_amdgcn_image_load_2darray_v4f16_i32:
167 case AMDGPU::BI__builtin_amdgcn_image_load_3d_v4f32_i32:
168 case AMDGPU::BI__builtin_amdgcn_image_load_3d_v4f16_i32:
169 case AMDGPU::BI__builtin_amdgcn_image_load_cube_v4f32_i32:
170 case AMDGPU::BI__builtin_amdgcn_image_load_cube_v4f16_i32:
171 case AMDGPU::BI__builtin_amdgcn_image_load_mip_1d_v4f32_i32:
172 case AMDGPU::BI__builtin_amdgcn_image_load_mip_1d_v4f16_i32:
173 case AMDGPU::BI__builtin_amdgcn_image_load_mip_1darray_v4f32_i32:
174 case AMDGPU::BI__builtin_amdgcn_image_load_mip_1darray_v4f16_i32:
175 case AMDGPU::BI__builtin_amdgcn_image_load_mip_2d_f32_i32:
176 case AMDGPU::BI__builtin_amdgcn_image_load_mip_2d_v4f32_i32:
177 case AMDGPU::BI__builtin_amdgcn_image_load_mip_2d_v4f16_i32:
178 case AMDGPU::BI__builtin_amdgcn_image_load_mip_2darray_f32_i32:
179 case AMDGPU::BI__builtin_amdgcn_image_load_mip_2darray_v4f32_i32:
180 case AMDGPU::BI__builtin_amdgcn_image_load_mip_2darray_v4f16_i32:
181 case AMDGPU::BI__builtin_amdgcn_image_load_mip_3d_v4f32_i32:
182 case AMDGPU::BI__builtin_amdgcn_image_load_mip_3d_v4f16_i32:
183 case AMDGPU::BI__builtin_amdgcn_image_load_mip_cube_v4f32_i32:
184 case AMDGPU::BI__builtin_amdgcn_image_load_mip_cube_v4f16_i32:
185 case AMDGPU::BI__builtin_amdgcn_image_sample_1d_v4f32_f32:
186 case AMDGPU::BI__builtin_amdgcn_image_sample_1darray_v4f32_f32:
187 case AMDGPU::BI__builtin_amdgcn_image_sample_1d_v4f16_f32:
188 case AMDGPU::BI__builtin_amdgcn_image_sample_1darray_v4f16_f32:
189 case AMDGPU::BI__builtin_amdgcn_image_sample_2d_f32_f32:
190 case AMDGPU::BI__builtin_amdgcn_image_sample_2d_v4f32_f32:
191 case AMDGPU::BI__builtin_amdgcn_image_sample_2d_v4f16_f32:
192 case AMDGPU::BI__builtin_amdgcn_image_sample_2darray_f32_f32:
193 case AMDGPU::BI__builtin_amdgcn_image_sample_2darray_v4f32_f32:
194 case AMDGPU::BI__builtin_amdgcn_image_sample_2darray_v4f16_f32:
195 case AMDGPU::BI__builtin_amdgcn_image_sample_3d_v4f32_f32:
196 case AMDGPU::BI__builtin_amdgcn_image_sample_3d_v4f16_f32:
197 case AMDGPU::BI__builtin_amdgcn_image_sample_cube_v4f32_f32:
198 case AMDGPU::BI__builtin_amdgcn_image_sample_cube_v4f16_f32:
199 case AMDGPU::BI__builtin_amdgcn_image_sample_lz_1d_v4f32_f32:
200 case AMDGPU::BI__builtin_amdgcn_image_sample_lz_1d_v4f16_f32:
201 case AMDGPU::BI__builtin_amdgcn_image_sample_lz_1darray_v4f32_f32:
202 case AMDGPU::BI__builtin_amdgcn_image_sample_lz_1darray_v4f16_f32:
203 case AMDGPU::BI__builtin_amdgcn_image_sample_lz_2d_f32_f32:
204 case AMDGPU::BI__builtin_amdgcn_image_sample_lz_2d_v4f32_f32:
205 case AMDGPU::BI__builtin_amdgcn_image_sample_lz_2d_v4f16_f32:
206 case AMDGPU::BI__builtin_amdgcn_image_sample_lz_2darray_f32_f32:
207 case AMDGPU::BI__builtin_amdgcn_image_sample_lz_2darray_v4f32_f32:
208 case AMDGPU::BI__builtin_amdgcn_image_sample_lz_2darray_v4f16_f32:
209 case AMDGPU::BI__builtin_amdgcn_image_sample_lz_3d_v4f32_f32:
210 case AMDGPU::BI__builtin_amdgcn_image_sample_lz_3d_v4f16_f32:
211 case AMDGPU::BI__builtin_amdgcn_image_sample_lz_cube_v4f32_f32:
212 case AMDGPU::BI__builtin_amdgcn_image_sample_lz_cube_v4f16_f32:
213 case AMDGPU::BI__builtin_amdgcn_image_sample_l_1d_v4f32_f32:
214 case AMDGPU::BI__builtin_amdgcn_image_sample_l_1d_v4f16_f32:
215 case AMDGPU::BI__builtin_amdgcn_image_sample_l_1darray_v4f32_f32:
216 case AMDGPU::BI__builtin_amdgcn_image_sample_l_1darray_v4f16_f32:
217 case AMDGPU::BI__builtin_amdgcn_image_sample_l_2d_f32_f32:
218 case AMDGPU::BI__builtin_amdgcn_image_sample_l_2d_v4f16_f32:
219 case AMDGPU::BI__builtin_amdgcn_image_sample_l_2d_v4f32_f32:
220 case AMDGPU::BI__builtin_amdgcn_image_sample_l_2darray_f32_f32:
221 case AMDGPU::BI__builtin_amdgcn_image_sample_l_2darray_v4f32_f32:
222 case AMDGPU::BI__builtin_amdgcn_image_sample_l_2darray_v4f16_f32:
223 case AMDGPU::BI__builtin_amdgcn_image_sample_l_3d_v4f32_f32:
224 case AMDGPU::BI__builtin_amdgcn_image_sample_l_3d_v4f16_f32:
225 case AMDGPU::BI__builtin_amdgcn_image_sample_l_cube_v4f32_f32:
226 case AMDGPU::BI__builtin_amdgcn_image_sample_l_cube_v4f16_f32:
227 case AMDGPU::BI__builtin_amdgcn_image_sample_d_1d_v4f32_f32:
228 case AMDGPU::BI__builtin_amdgcn_image_sample_d_1d_v4f16_f32:
229 case AMDGPU::BI__builtin_amdgcn_image_sample_d_1darray_v4f32_f32:
230 case AMDGPU::BI__builtin_amdgcn_image_sample_d_1darray_v4f16_f32:
231 case AMDGPU::BI__builtin_amdgcn_image_sample_d_2d_f32_f32:
232 case AMDGPU::BI__builtin_amdgcn_image_sample_d_2d_v4f32_f32:
233 case AMDGPU::BI__builtin_amdgcn_image_sample_d_2d_v4f16_f32:
234 case AMDGPU::BI__builtin_amdgcn_image_sample_d_2darray_f32_f32:
235 case AMDGPU::BI__builtin_amdgcn_image_sample_d_2darray_v4f32_f32:
236 case AMDGPU::BI__builtin_amdgcn_image_sample_d_2darray_v4f16_f32:
237 case AMDGPU::BI__builtin_amdgcn_image_sample_d_3d_v4f32_f32:
238 case AMDGPU::BI__builtin_amdgcn_image_sample_d_3d_v4f16_f32:
239 case AMDGPU::BI__builtin_amdgcn_image_gather4_lz_2d_v4f32_f32: {
240 StringRef FeatureList(
241 getASTContext().BuiltinInfo.getRequiredFeatures(ID: BuiltinID));
242 if (!Builtin::evaluateRequiredTargetFeatures(RequiredFatures: FeatureList,
243 TargetFetureMap: CallerFeatureMap)) {
244 Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_builtin_needs_feature)
245 << FD->getDeclName() << FeatureList;
246 return false;
247 }
248
249 unsigned ArgCount = TheCall->getNumArgs() - 1;
250 llvm::APSInt Result;
251
252 return (SemaRef.BuiltinConstantArg(TheCall, ArgNum: 0, Result)) ||
253 (SemaRef.BuiltinConstantArg(TheCall, ArgNum: ArgCount, Result)) ||
254 (SemaRef.BuiltinConstantArg(TheCall, ArgNum: (ArgCount - 1), Result));
255 }
256 case AMDGPU::BI__builtin_amdgcn_image_store_1d_v4f32_i32:
257 case AMDGPU::BI__builtin_amdgcn_image_store_1darray_v4f32_i32:
258 case AMDGPU::BI__builtin_amdgcn_image_store_1d_v4f16_i32:
259 case AMDGPU::BI__builtin_amdgcn_image_store_1darray_v4f16_i32:
260 case AMDGPU::BI__builtin_amdgcn_image_store_2d_f32_i32:
261 case AMDGPU::BI__builtin_amdgcn_image_store_2d_v4f32_i32:
262 case AMDGPU::BI__builtin_amdgcn_image_store_2d_v4f16_i32:
263 case AMDGPU::BI__builtin_amdgcn_image_store_2darray_f32_i32:
264 case AMDGPU::BI__builtin_amdgcn_image_store_2darray_v4f32_i32:
265 case AMDGPU::BI__builtin_amdgcn_image_store_2darray_v4f16_i32:
266 case AMDGPU::BI__builtin_amdgcn_image_store_3d_v4f32_i32:
267 case AMDGPU::BI__builtin_amdgcn_image_store_3d_v4f16_i32:
268 case AMDGPU::BI__builtin_amdgcn_image_store_cube_v4f32_i32:
269 case AMDGPU::BI__builtin_amdgcn_image_store_cube_v4f16_i32:
270 case AMDGPU::BI__builtin_amdgcn_image_store_mip_1d_v4f32_i32:
271 case AMDGPU::BI__builtin_amdgcn_image_store_mip_1d_v4f16_i32:
272 case AMDGPU::BI__builtin_amdgcn_image_store_mip_1darray_v4f32_i32:
273 case AMDGPU::BI__builtin_amdgcn_image_store_mip_1darray_v4f16_i32:
274 case AMDGPU::BI__builtin_amdgcn_image_store_mip_2d_f32_i32:
275 case AMDGPU::BI__builtin_amdgcn_image_store_mip_2d_v4f32_i32:
276 case AMDGPU::BI__builtin_amdgcn_image_store_mip_2d_v4f16_i32:
277 case AMDGPU::BI__builtin_amdgcn_image_store_mip_2darray_f32_i32:
278 case AMDGPU::BI__builtin_amdgcn_image_store_mip_2darray_v4f32_i32:
279 case AMDGPU::BI__builtin_amdgcn_image_store_mip_2darray_v4f16_i32:
280 case AMDGPU::BI__builtin_amdgcn_image_store_mip_3d_v4f32_i32:
281 case AMDGPU::BI__builtin_amdgcn_image_store_mip_3d_v4f16_i32:
282 case AMDGPU::BI__builtin_amdgcn_image_store_mip_cube_v4f32_i32:
283 case AMDGPU::BI__builtin_amdgcn_image_store_mip_cube_v4f16_i32: {
284 StringRef FeatureList(
285 getASTContext().BuiltinInfo.getRequiredFeatures(ID: BuiltinID));
286 if (!Builtin::evaluateRequiredTargetFeatures(RequiredFatures: FeatureList,
287 TargetFetureMap: CallerFeatureMap)) {
288 Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_builtin_needs_feature)
289 << FD->getDeclName() << FeatureList;
290 return false;
291 }
292
293 unsigned ArgCount = TheCall->getNumArgs() - 1;
294 llvm::APSInt Result;
295
296 return (SemaRef.BuiltinConstantArg(TheCall, ArgNum: 1, Result)) ||
297 (SemaRef.BuiltinConstantArg(TheCall, ArgNum: ArgCount, Result)) ||
298 (SemaRef.BuiltinConstantArg(TheCall, ArgNum: (ArgCount - 1), Result));
299 }
300 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x64_iu8:
301 case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x128_iu8: {
302 if (BuiltinID == AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x64_iu8) {
303 if (SemaRef.checkArgCountRange(Call: TheCall, MinArgCount: 7, MaxArgCount: 8))
304 return true;
305 if (TheCall->getNumArgs() == 7)
306 return false;
307 } else if (BuiltinID ==
308 AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x128_iu8) {
309 if (SemaRef.checkArgCountRange(Call: TheCall, MinArgCount: 8, MaxArgCount: 9))
310 return true;
311 if (TheCall->getNumArgs() == 8)
312 return false;
313 }
314 // Check if the last argument (clamp operand) is a constant and is
315 // convertible to bool.
316 Expr *ClampArg = TheCall->getArg(Arg: TheCall->getNumArgs() - 1);
317 // 1) Ensure clamp argument is a constant expression
318 llvm::APSInt ClampValue;
319 if (!SemaRef.VerifyIntegerConstantExpression(E: ClampArg, Result: &ClampValue)
320 .isUsable())
321 return true;
322 // 2) Check if the argument can be converted to bool type
323 if (!SemaRef.Context.hasSameType(T1: ClampArg->getType(),
324 T2: SemaRef.Context.BoolTy)) {
325 // Try to convert to bool
326 QualType BoolTy = SemaRef.Context.BoolTy;
327 ExprResult ClampExpr(ClampArg);
328 SemaRef.CheckSingleAssignmentConstraints(LHSType: BoolTy, RHS&: ClampExpr);
329 if (ClampExpr.isInvalid())
330 return true;
331 }
332 return false;
333 }
334 default:
335 return false;
336 }
337
338 ExprResult Arg = TheCall->getArg(Arg: OrderIndex);
339 auto ArgExpr = Arg.get();
340 Expr::EvalResult ArgResult;
341
342 if (!ArgExpr->EvaluateAsInt(Result&: ArgResult, Ctx: getASTContext()))
343 return Diag(Loc: ArgExpr->getExprLoc(), DiagID: diag::err_typecheck_expect_int)
344 << ArgExpr->getType();
345 auto Ord = ArgResult.Val.getInt().getZExtValue();
346
347 // Check validity of memory ordering as per C11 / C++11's memory model.
348 // Only fence needs check. Atomic dec/inc allow all memory orders.
349 if (!llvm::isValidAtomicOrderingCABI(I: Ord))
350 return Diag(Loc: ArgExpr->getBeginLoc(),
351 DiagID: diag::warn_atomic_op_has_invalid_memory_order)
352 << 0 << ArgExpr->getSourceRange();
353 switch (static_cast<llvm::AtomicOrderingCABI>(Ord)) {
354 case llvm::AtomicOrderingCABI::relaxed:
355 case llvm::AtomicOrderingCABI::consume:
356 if (BuiltinID == AMDGPU::BI__builtin_amdgcn_fence)
357 return Diag(Loc: ArgExpr->getBeginLoc(),
358 DiagID: diag::warn_atomic_op_has_invalid_memory_order)
359 << 0 << ArgExpr->getSourceRange();
360 break;
361 case llvm::AtomicOrderingCABI::acquire:
362 case llvm::AtomicOrderingCABI::release:
363 case llvm::AtomicOrderingCABI::acq_rel:
364 case llvm::AtomicOrderingCABI::seq_cst:
365 break;
366 }
367
368 Arg = TheCall->getArg(Arg: ScopeIndex);
369 ArgExpr = Arg.get();
370 Expr::EvalResult ArgResult1;
371 // Check that sync scope is a constant literal
372 if (!ArgExpr->EvaluateAsConstantExpr(Result&: ArgResult1, Ctx: getASTContext()))
373 return Diag(Loc: ArgExpr->getExprLoc(), DiagID: diag::err_expr_not_string_literal)
374 << ArgExpr->getType();
375
376 return false;
377}
378
379bool SemaAMDGPU::checkAtomicOrderingCABIArg(Expr *E, bool MayLoad,
380 bool MayStore) {
381 Expr::EvalResult AtomicOrdArgRes;
382 if (!E->EvaluateAsInt(Result&: AtomicOrdArgRes, Ctx: getASTContext()))
383 llvm_unreachable("Intrinsic requires imm for atomic ordering argument!");
384 auto Ord =
385 llvm::AtomicOrderingCABI(AtomicOrdArgRes.Val.getInt().getZExtValue());
386
387 // Atomic ordering cannot be acq_rel in any case, acquire for stores or
388 // release for loads.
389 if (!llvm::isValidAtomicOrderingCABI(I: (unsigned)Ord) ||
390 (!(MayLoad && MayStore) && (Ord == llvm::AtomicOrderingCABI::acq_rel)) ||
391 (!MayLoad && Ord == llvm::AtomicOrderingCABI::acquire) ||
392 (!MayStore && Ord == llvm::AtomicOrderingCABI::release)) {
393 return Diag(Loc: E->getBeginLoc(), DiagID: diag::warn_atomic_op_has_invalid_memory_order)
394 << 0 << E->getSourceRange();
395 }
396
397 return false;
398}
399
400bool SemaAMDGPU::checkCoopAtomicFunctionCall(CallExpr *TheCall, bool IsStore) {
401 bool Fail = false;
402
403 // First argument is a global or generic pointer.
404 Expr *PtrArg = TheCall->getArg(Arg: 0);
405 QualType PtrTy = PtrArg->getType()->getPointeeType();
406 unsigned AS = getASTContext().getTargetAddressSpace(AS: PtrTy.getAddressSpace());
407 if (AS != llvm::AMDGPUAS::FLAT_ADDRESS &&
408 AS != llvm::AMDGPUAS::GLOBAL_ADDRESS) {
409 Fail = true;
410 Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_amdgcn_coop_atomic_invalid_as)
411 << PtrArg->getSourceRange();
412 }
413
414 Expr *AO = TheCall->getArg(Arg: IsStore ? 2 : 1);
415 Expr *Scope = TheCall->getArg(Arg: TheCall->getNumArgs() - 1);
416
417 if (AO->isValueDependent() || Scope->isValueDependent())
418 return false;
419
420 // Check atomic ordering
421 Fail |=
422 checkAtomicOrderingCABIArg(E: TheCall->getArg(Arg: IsStore ? 2 : 1),
423 /*MayLoad=*/!IsStore, /*MayStore=*/IsStore);
424
425 // Last argument is the syncscope as a string literal.
426 if (!isa<StringLiteral>(Val: Scope->IgnoreParenImpCasts())) {
427 Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_expr_not_string_literal)
428 << Scope->getSourceRange();
429 Fail = true;
430 }
431
432 return Fail;
433}
434
435bool SemaAMDGPU::checkAtomicMonitorLoad(CallExpr *TheCall) {
436 bool Fail = false;
437
438 Expr *AO = TheCall->getArg(Arg: 1);
439 Expr *Scope = TheCall->getArg(Arg: TheCall->getNumArgs() - 1);
440
441 if (AO->isValueDependent() || Scope->isValueDependent())
442 return false;
443
444 Fail |= checkAtomicOrderingCABIArg(E: TheCall->getArg(Arg: 1), /*MayLoad=*/true,
445 /*MayStore=*/false);
446
447 auto ScopeModel = AtomicScopeModel::create(K: AtomicScopeModelKind::Generic);
448 if (std::optional<llvm::APSInt> Result =
449 Scope->getIntegerConstantExpr(Ctx: SemaRef.Context)) {
450 if (!ScopeModel->isValid(S: Result->getZExtValue())) {
451 Diag(Loc: Scope->getBeginLoc(), DiagID: diag::err_atomic_op_has_invalid_sync_scope)
452 << Scope->getSourceRange();
453 Fail = true;
454 }
455 }
456
457 return Fail;
458}
459
460bool SemaAMDGPU::checkMovDPPFunctionCall(CallExpr *TheCall, unsigned NumArgs,
461 unsigned NumDataArgs) {
462 assert(NumDataArgs <= 2);
463 if (SemaRef.checkArgCountRange(Call: TheCall, MinArgCount: NumArgs, MaxArgCount: NumArgs))
464 return true;
465 Expr *Args[2];
466 QualType ArgTys[2];
467 for (unsigned I = 0; I != NumDataArgs; ++I) {
468 Args[I] = TheCall->getArg(Arg: I);
469 ArgTys[I] = Args[I]->getType();
470 // TODO: Vectors can also be supported.
471 if (!ArgTys[I]->isArithmeticType() || ArgTys[I]->isAnyComplexType()) {
472 SemaRef.Diag(Loc: Args[I]->getBeginLoc(),
473 DiagID: diag::err_typecheck_cond_expect_int_float)
474 << ArgTys[I] << Args[I]->getSourceRange();
475 return true;
476 }
477 }
478 if (NumDataArgs < 2)
479 return false;
480
481 if (getASTContext().hasSameUnqualifiedType(T1: ArgTys[0], T2: ArgTys[1]))
482 return false;
483
484 if (((ArgTys[0]->isUnsignedIntegerType() &&
485 ArgTys[1]->isSignedIntegerType()) ||
486 (ArgTys[0]->isSignedIntegerType() &&
487 ArgTys[1]->isUnsignedIntegerType())) &&
488 getASTContext().getTypeSize(T: ArgTys[0]) ==
489 getASTContext().getTypeSize(T: ArgTys[1]))
490 return false;
491
492 SemaRef.Diag(Loc: Args[1]->getBeginLoc(),
493 DiagID: diag::err_typecheck_call_different_arg_types)
494 << ArgTys[0] << ArgTys[1];
495 return true;
496}
497
498static bool
499checkAMDGPUFlatWorkGroupSizeArguments(Sema &S, Expr *MinExpr, Expr *MaxExpr,
500 const AMDGPUFlatWorkGroupSizeAttr &Attr) {
501 // Accept template arguments for now as they depend on something else.
502 // We'll get to check them when they eventually get instantiated.
503 if (MinExpr->isValueDependent() || MaxExpr->isValueDependent())
504 return false;
505
506 uint32_t Min = 0;
507 if (!S.checkUInt32Argument(AI: Attr, Expr: MinExpr, Val&: Min, Idx: 0))
508 return true;
509
510 uint32_t Max = 0;
511 if (!S.checkUInt32Argument(AI: Attr, Expr: MaxExpr, Val&: Max, Idx: 1))
512 return true;
513
514 if (Min == 0 && Max != 0) {
515 S.Diag(Loc: Attr.getLocation(), DiagID: diag::err_attribute_argument_invalid)
516 << &Attr << 0;
517 return true;
518 }
519 if (Min > Max) {
520 S.Diag(Loc: Attr.getLocation(), DiagID: diag::err_attribute_argument_invalid)
521 << &Attr << 1;
522 return true;
523 }
524
525 return false;
526}
527
528AMDGPUFlatWorkGroupSizeAttr *
529SemaAMDGPU::CreateAMDGPUFlatWorkGroupSizeAttr(const AttributeCommonInfo &CI,
530 Expr *MinExpr, Expr *MaxExpr) {
531 ASTContext &Context = getASTContext();
532 AMDGPUFlatWorkGroupSizeAttr TmpAttr(Context, CI, MinExpr, MaxExpr);
533
534 if (checkAMDGPUFlatWorkGroupSizeArguments(S&: SemaRef, MinExpr, MaxExpr, Attr: TmpAttr))
535 return nullptr;
536 return ::new (Context)
537 AMDGPUFlatWorkGroupSizeAttr(Context, CI, MinExpr, MaxExpr);
538}
539
540void SemaAMDGPU::addAMDGPUFlatWorkGroupSizeAttr(Decl *D,
541 const AttributeCommonInfo &CI,
542 Expr *MinExpr, Expr *MaxExpr) {
543 if (auto *Attr = CreateAMDGPUFlatWorkGroupSizeAttr(CI, MinExpr, MaxExpr))
544 D->addAttr(A: Attr);
545}
546
547void SemaAMDGPU::handleAMDGPUFlatWorkGroupSizeAttr(Decl *D,
548 const ParsedAttr &AL) {
549 Expr *MinExpr = AL.getArgAsExpr(Arg: 0);
550 Expr *MaxExpr = AL.getArgAsExpr(Arg: 1);
551
552 addAMDGPUFlatWorkGroupSizeAttr(D, CI: AL, MinExpr, MaxExpr);
553}
554
555static bool checkAMDGPUWavesPerEUArguments(Sema &S, Expr *MinExpr,
556 Expr *MaxExpr,
557 const AMDGPUWavesPerEUAttr &Attr) {
558 if (S.DiagnoseUnexpandedParameterPack(E: MinExpr) ||
559 (MaxExpr && S.DiagnoseUnexpandedParameterPack(E: MaxExpr)))
560 return true;
561
562 // Accept template arguments for now as they depend on something else.
563 // We'll get to check them when they eventually get instantiated.
564 if (MinExpr->isValueDependent() || (MaxExpr && MaxExpr->isValueDependent()))
565 return false;
566
567 uint32_t Min = 0;
568 if (!S.checkUInt32Argument(AI: Attr, Expr: MinExpr, Val&: Min, Idx: 0))
569 return true;
570
571 uint32_t Max = 0;
572 if (MaxExpr && !S.checkUInt32Argument(AI: Attr, Expr: MaxExpr, Val&: Max, Idx: 1))
573 return true;
574
575 if (Min == 0 && Max != 0) {
576 S.Diag(Loc: Attr.getLocation(), DiagID: diag::err_attribute_argument_invalid)
577 << &Attr << 0;
578 return true;
579 }
580 if (Max != 0 && Min > Max) {
581 S.Diag(Loc: Attr.getLocation(), DiagID: diag::err_attribute_argument_invalid)
582 << &Attr << 1;
583 return true;
584 }
585
586 return false;
587}
588
589AMDGPUWavesPerEUAttr *
590SemaAMDGPU::CreateAMDGPUWavesPerEUAttr(const AttributeCommonInfo &CI,
591 Expr *MinExpr, Expr *MaxExpr) {
592 ASTContext &Context = getASTContext();
593 AMDGPUWavesPerEUAttr TmpAttr(Context, CI, MinExpr, MaxExpr);
594
595 if (checkAMDGPUWavesPerEUArguments(S&: SemaRef, MinExpr, MaxExpr, Attr: TmpAttr))
596 return nullptr;
597
598 return ::new (Context) AMDGPUWavesPerEUAttr(Context, CI, MinExpr, MaxExpr);
599}
600
601void SemaAMDGPU::addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
602 Expr *MinExpr, Expr *MaxExpr) {
603 if (auto *Attr = CreateAMDGPUWavesPerEUAttr(CI, MinExpr, MaxExpr))
604 D->addAttr(A: Attr);
605}
606
607void SemaAMDGPU::handleAMDGPUWavesPerEUAttr(Decl *D, const ParsedAttr &AL) {
608 if (!AL.checkAtLeastNumArgs(S&: SemaRef, Num: 1) || !AL.checkAtMostNumArgs(S&: SemaRef, Num: 2))
609 return;
610
611 Expr *MinExpr = AL.getArgAsExpr(Arg: 0);
612 Expr *MaxExpr = (AL.getNumArgs() > 1) ? AL.getArgAsExpr(Arg: 1) : nullptr;
613
614 addAMDGPUWavesPerEUAttr(D, CI: AL, MinExpr, MaxExpr);
615}
616
617void SemaAMDGPU::handleAMDGPUNumSGPRAttr(Decl *D, const ParsedAttr &AL) {
618 uint32_t NumSGPR = 0;
619 Expr *NumSGPRExpr = AL.getArgAsExpr(Arg: 0);
620 if (!SemaRef.checkUInt32Argument(AI: AL, Expr: NumSGPRExpr, Val&: NumSGPR))
621 return;
622
623 D->addAttr(A: ::new (getASTContext())
624 AMDGPUNumSGPRAttr(getASTContext(), AL, NumSGPR));
625}
626
627void SemaAMDGPU::handleAMDGPUNumVGPRAttr(Decl *D, const ParsedAttr &AL) {
628 uint32_t NumVGPR = 0;
629 Expr *NumVGPRExpr = AL.getArgAsExpr(Arg: 0);
630 if (!SemaRef.checkUInt32Argument(AI: AL, Expr: NumVGPRExpr, Val&: NumVGPR))
631 return;
632
633 D->addAttr(A: ::new (getASTContext())
634 AMDGPUNumVGPRAttr(getASTContext(), AL, NumVGPR));
635}
636
637static bool
638checkAMDGPUMaxNumWorkGroupsArguments(Sema &S, Expr *XExpr, Expr *YExpr,
639 Expr *ZExpr,
640 const AMDGPUMaxNumWorkGroupsAttr &Attr) {
641 if (S.DiagnoseUnexpandedParameterPack(E: XExpr) ||
642 (YExpr && S.DiagnoseUnexpandedParameterPack(E: YExpr)) ||
643 (ZExpr && S.DiagnoseUnexpandedParameterPack(E: ZExpr)))
644 return true;
645
646 // Accept template arguments for now as they depend on something else.
647 // We'll get to check them when they eventually get instantiated.
648 if (XExpr->isValueDependent() || (YExpr && YExpr->isValueDependent()) ||
649 (ZExpr && ZExpr->isValueDependent()))
650 return false;
651
652 uint32_t NumWG = 0;
653 Expr *Exprs[3] = {XExpr, YExpr, ZExpr};
654 for (int i = 0; i < 3; i++) {
655 if (Exprs[i]) {
656 if (!S.checkUInt32Argument(AI: Attr, Expr: Exprs[i], Val&: NumWG, Idx: i,
657 /*StrictlyUnsigned=*/true))
658 return true;
659 if (NumWG == 0) {
660 S.Diag(Loc: Attr.getLoc(), DiagID: diag::err_attribute_argument_is_zero)
661 << &Attr << Exprs[i]->getSourceRange();
662 return true;
663 }
664 }
665 }
666
667 return false;
668}
669
670AMDGPUMaxNumWorkGroupsAttr *SemaAMDGPU::CreateAMDGPUMaxNumWorkGroupsAttr(
671 const AttributeCommonInfo &CI, Expr *XExpr, Expr *YExpr, Expr *ZExpr) {
672 ASTContext &Context = getASTContext();
673 AMDGPUMaxNumWorkGroupsAttr TmpAttr(Context, CI, XExpr, YExpr, ZExpr);
674 assert(!SemaRef.isSFINAEContext() &&
675 "Can't produce SFINAE diagnostic pointing to temporary attribute");
676
677 if (checkAMDGPUMaxNumWorkGroupsArguments(S&: SemaRef, XExpr, YExpr, ZExpr,
678 Attr: TmpAttr))
679 return nullptr;
680
681 return ::new (Context)
682 AMDGPUMaxNumWorkGroupsAttr(Context, CI, XExpr, YExpr, ZExpr);
683}
684
685void SemaAMDGPU::addAMDGPUMaxNumWorkGroupsAttr(Decl *D,
686 const AttributeCommonInfo &CI,
687 Expr *XExpr, Expr *YExpr,
688 Expr *ZExpr) {
689 if (auto *Attr = CreateAMDGPUMaxNumWorkGroupsAttr(CI, XExpr, YExpr, ZExpr))
690 D->addAttr(A: Attr);
691}
692
693void SemaAMDGPU::handleAMDGPUMaxNumWorkGroupsAttr(Decl *D,
694 const ParsedAttr &AL) {
695 Expr *YExpr = (AL.getNumArgs() > 1) ? AL.getArgAsExpr(Arg: 1) : nullptr;
696 Expr *ZExpr = (AL.getNumArgs() > 2) ? AL.getArgAsExpr(Arg: 2) : nullptr;
697 addAMDGPUMaxNumWorkGroupsAttr(D, CI: AL, XExpr: AL.getArgAsExpr(Arg: 0), YExpr, ZExpr);
698}
699
700} // namespace clang
701