| 1 | //===------ SemaAMDGPU.cpp ------- AMDGPU target-specific routines --------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file implements semantic analysis functions specific to AMDGPU. |
| 10 | // |
| 11 | //===----------------------------------------------------------------------===// |
| 12 | |
| 13 | #include "clang/Sema/SemaAMDGPU.h" |
| 14 | #include "clang/AST/Decl.h" |
| 15 | #include "clang/AST/DynamicRecursiveASTVisitor.h" |
| 16 | #include "clang/AST/Expr.h" |
| 17 | #include "clang/Basic/DiagnosticFrontend.h" |
| 18 | #include "clang/Basic/DiagnosticSema.h" |
| 19 | #include "clang/Basic/TargetBuiltins.h" |
| 20 | #include "clang/Basic/TargetInfo.h" |
| 21 | #include "clang/Sema/Ownership.h" |
| 22 | #include "clang/Sema/Scope.h" |
| 23 | #include "clang/Sema/Sema.h" |
| 24 | #include "llvm/ADT/SmallVector.h" |
| 25 | #include "llvm/ADT/StringExtras.h" |
| 26 | #include "llvm/ADT/StringMap.h" |
| 27 | #include "llvm/Support/AMDGPUAddrSpace.h" |
| 28 | #include "llvm/Support/AtomicOrdering.h" |
| 29 | #include "llvm/TargetParser/TargetParser.h" |
| 30 | #include <cstdint> |
| 31 | #include <utility> |
| 32 | |
| 33 | namespace clang { |
| 34 | |
| 35 | SemaAMDGPU::SemaAMDGPU(Sema &S) : SemaBase(S) {} |
| 36 | |
| 37 | bool SemaAMDGPU::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, |
| 38 | CallExpr *TheCall) { |
| 39 | // position of memory order and scope arguments in the builtin |
| 40 | unsigned OrderIndex, ScopeIndex; |
| 41 | |
| 42 | const auto *FD = SemaRef.getCurFunctionDecl(/*AllowLambda=*/true); |
| 43 | assert(FD && "AMDGPU builtins should not be used outside of a function" ); |
| 44 | llvm::StringMap<bool> CallerFeatureMap; |
| 45 | getASTContext().getFunctionFeatureMap(FeatureMap&: CallerFeatureMap, FD); |
| 46 | bool HasGFX950Insts = |
| 47 | Builtin::evaluateRequiredTargetFeatures(RequiredFatures: "gfx950-insts" , TargetFetureMap: CallerFeatureMap); |
| 48 | |
| 49 | switch (BuiltinID) { |
| 50 | case AMDGPU::BI__builtin_amdgcn_raw_ptr_buffer_load_lds: |
| 51 | case AMDGPU::BI__builtin_amdgcn_raw_ptr_buffer_load_async_lds: |
| 52 | case AMDGPU::BI__builtin_amdgcn_struct_ptr_buffer_load_lds: |
| 53 | case AMDGPU::BI__builtin_amdgcn_struct_ptr_buffer_load_async_lds: |
| 54 | case AMDGPU::BI__builtin_amdgcn_load_to_lds: |
| 55 | case AMDGPU::BI__builtin_amdgcn_load_async_to_lds: |
| 56 | case AMDGPU::BI__builtin_amdgcn_global_load_lds: |
| 57 | case AMDGPU::BI__builtin_amdgcn_global_load_async_lds: { |
| 58 | constexpr const int SizeIdx = 2; |
| 59 | llvm::APSInt Size; |
| 60 | Expr *ArgExpr = TheCall->getArg(Arg: SizeIdx); |
| 61 | // Check for instantiation-dependent expressions (e.g., involving template |
| 62 | // parameters). These will be checked again during template instantiation. |
| 63 | if (ArgExpr->isInstantiationDependent()) |
| 64 | return false; |
| 65 | [[maybe_unused]] ExprResult R = |
| 66 | SemaRef.VerifyIntegerConstantExpression(E: ArgExpr, Result: &Size); |
| 67 | assert(!R.isInvalid()); |
| 68 | switch (Size.getSExtValue()) { |
| 69 | case 1: |
| 70 | case 2: |
| 71 | case 4: |
| 72 | return false; |
| 73 | case 12: |
| 74 | case 16: { |
| 75 | if (HasGFX950Insts) |
| 76 | return false; |
| 77 | [[fallthrough]]; |
| 78 | } |
| 79 | default: |
| 80 | SemaRef.targetDiag(Loc: ArgExpr->getExprLoc(), |
| 81 | DiagID: diag::err_amdgcn_load_lds_size_invalid_value) |
| 82 | << ArgExpr->getSourceRange(); |
| 83 | SemaRef.targetDiag(Loc: ArgExpr->getExprLoc(), |
| 84 | DiagID: diag::note_amdgcn_load_lds_size_valid_value) |
| 85 | << HasGFX950Insts << ArgExpr->getSourceRange(); |
| 86 | return true; |
| 87 | } |
| 88 | } |
| 89 | case AMDGPU::BI__builtin_amdgcn_get_fpenv: |
| 90 | case AMDGPU::BI__builtin_amdgcn_set_fpenv: |
| 91 | return false; |
| 92 | case AMDGPU::BI__builtin_amdgcn_atomic_inc32: |
| 93 | case AMDGPU::BI__builtin_amdgcn_atomic_inc64: |
| 94 | case AMDGPU::BI__builtin_amdgcn_atomic_dec32: |
| 95 | case AMDGPU::BI__builtin_amdgcn_atomic_dec64: |
| 96 | OrderIndex = 2; |
| 97 | ScopeIndex = 3; |
| 98 | break; |
| 99 | case AMDGPU::BI__builtin_amdgcn_fence: |
| 100 | OrderIndex = 0; |
| 101 | ScopeIndex = 1; |
| 102 | break; |
| 103 | case AMDGPU::BI__builtin_amdgcn_s_setreg: |
| 104 | return SemaRef.BuiltinConstantArgRange(TheCall, /*ArgNum=*/0, /*Low=*/0, |
| 105 | /*High=*/UINT16_MAX); |
| 106 | case AMDGPU::BI__builtin_amdgcn_s_wait_event: { |
| 107 | llvm::APSInt Result; |
| 108 | if (SemaRef.BuiltinConstantArg(TheCall, ArgNum: 0, Result)) |
| 109 | return true; |
| 110 | |
| 111 | bool IsGFX12Plus = Builtin::evaluateRequiredTargetFeatures( |
| 112 | RequiredFatures: "gfx12-insts" , TargetFetureMap: CallerFeatureMap); |
| 113 | |
| 114 | // gfx11 -> gfx12 changed the interpretation of the bitmask. gfx12 inverted |
| 115 | // the intepretation for export_ready, but shifted the used bit by 1. Thus |
| 116 | // waiting for the export_ready event can use a value of 2 universally. |
| 117 | if (((IsGFX12Plus && !Result[1]) || (!IsGFX12Plus && Result[0])) || |
| 118 | Result.getZExtValue() > 2) { |
| 119 | Expr *ArgExpr = TheCall->getArg(Arg: 0); |
| 120 | SemaRef.targetDiag(Loc: ArgExpr->getExprLoc(), |
| 121 | DiagID: diag::warn_amdgpu_s_wait_event_mask_no_effect_target) |
| 122 | << ArgExpr->getSourceRange(); |
| 123 | SemaRef.targetDiag(Loc: ArgExpr->getExprLoc(), |
| 124 | DiagID: diag::note_amdgpu_s_wait_event_suggested_value) |
| 125 | << ArgExpr->getSourceRange(); |
| 126 | } |
| 127 | |
| 128 | return false; |
| 129 | } |
| 130 | case AMDGPU::BI__builtin_amdgcn_mov_dpp: |
| 131 | return checkMovDPPFunctionCall(TheCall, NumArgs: 5, NumDataArgs: 1); |
| 132 | case AMDGPU::BI__builtin_amdgcn_mov_dpp8: |
| 133 | return checkMovDPPFunctionCall(TheCall, NumArgs: 2, NumDataArgs: 1); |
| 134 | case AMDGPU::BI__builtin_amdgcn_update_dpp: |
| 135 | return checkMovDPPFunctionCall(TheCall, NumArgs: 6, NumDataArgs: 2); |
| 136 | case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_f16_fp8: |
| 137 | case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_bf16_fp8: |
| 138 | case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_f16_bf8: |
| 139 | case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_bf16_bf8: |
| 140 | case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_f16_fp4: |
| 141 | case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_bf16_fp4: |
| 142 | case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_f32_fp8: |
| 143 | case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_f32_bf8: |
| 144 | case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_f32_fp4: |
| 145 | case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk16_f16_fp6: |
| 146 | case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk16_bf16_fp6: |
| 147 | case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk16_f16_bf6: |
| 148 | case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk16_bf16_bf6: |
| 149 | case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk16_f32_fp6: |
| 150 | case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk16_f32_bf6: |
| 151 | return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 15); |
| 152 | case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_load_32x4B: |
| 153 | case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_load_16x8B: |
| 154 | case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_load_8x16B: |
| 155 | return checkCoopAtomicFunctionCall(TheCall, /*IsStore=*/false); |
| 156 | case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_store_32x4B: |
| 157 | case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_store_16x8B: |
| 158 | case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_store_8x16B: |
| 159 | return checkCoopAtomicFunctionCall(TheCall, /*IsStore=*/true); |
| 160 | case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b32: |
| 161 | case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b64: |
| 162 | case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b128: |
| 163 | case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b32: |
| 164 | case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b64: |
| 165 | case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b128: |
| 166 | return checkAtomicMonitorLoad(TheCall); |
| 167 | case AMDGPU::BI__builtin_amdgcn_image_load_1d_v4f32_i32: |
| 168 | case AMDGPU::BI__builtin_amdgcn_image_load_1darray_v4f32_i32: |
| 169 | case AMDGPU::BI__builtin_amdgcn_image_load_1d_v4f16_i32: |
| 170 | case AMDGPU::BI__builtin_amdgcn_image_load_1darray_v4f16_i32: |
| 171 | case AMDGPU::BI__builtin_amdgcn_image_load_2d_f32_i32: |
| 172 | case AMDGPU::BI__builtin_amdgcn_image_load_2d_v4f32_i32: |
| 173 | case AMDGPU::BI__builtin_amdgcn_image_load_2d_v4f16_i32: |
| 174 | case AMDGPU::BI__builtin_amdgcn_image_load_2darray_f32_i32: |
| 175 | case AMDGPU::BI__builtin_amdgcn_image_load_2darray_v4f32_i32: |
| 176 | case AMDGPU::BI__builtin_amdgcn_image_load_2darray_v4f16_i32: |
| 177 | case AMDGPU::BI__builtin_amdgcn_image_load_3d_v4f32_i32: |
| 178 | case AMDGPU::BI__builtin_amdgcn_image_load_3d_v4f16_i32: |
| 179 | case AMDGPU::BI__builtin_amdgcn_image_load_cube_v4f32_i32: |
| 180 | case AMDGPU::BI__builtin_amdgcn_image_load_cube_v4f16_i32: |
| 181 | case AMDGPU::BI__builtin_amdgcn_image_load_mip_1d_v4f32_i32: |
| 182 | case AMDGPU::BI__builtin_amdgcn_image_load_mip_1d_v4f16_i32: |
| 183 | case AMDGPU::BI__builtin_amdgcn_image_load_mip_1darray_v4f32_i32: |
| 184 | case AMDGPU::BI__builtin_amdgcn_image_load_mip_1darray_v4f16_i32: |
| 185 | case AMDGPU::BI__builtin_amdgcn_image_load_mip_2d_f32_i32: |
| 186 | case AMDGPU::BI__builtin_amdgcn_image_load_mip_2d_v4f32_i32: |
| 187 | case AMDGPU::BI__builtin_amdgcn_image_load_mip_2d_v4f16_i32: |
| 188 | case AMDGPU::BI__builtin_amdgcn_image_load_mip_2darray_f32_i32: |
| 189 | case AMDGPU::BI__builtin_amdgcn_image_load_mip_2darray_v4f32_i32: |
| 190 | case AMDGPU::BI__builtin_amdgcn_image_load_mip_2darray_v4f16_i32: |
| 191 | case AMDGPU::BI__builtin_amdgcn_image_load_mip_3d_v4f32_i32: |
| 192 | case AMDGPU::BI__builtin_amdgcn_image_load_mip_3d_v4f16_i32: |
| 193 | case AMDGPU::BI__builtin_amdgcn_image_load_mip_cube_v4f32_i32: |
| 194 | case AMDGPU::BI__builtin_amdgcn_image_load_mip_cube_v4f16_i32: |
| 195 | case AMDGPU::BI__builtin_amdgcn_image_sample_1d_v4f32_f32: |
| 196 | case AMDGPU::BI__builtin_amdgcn_image_sample_1darray_v4f32_f32: |
| 197 | case AMDGPU::BI__builtin_amdgcn_image_sample_1d_v4f16_f32: |
| 198 | case AMDGPU::BI__builtin_amdgcn_image_sample_1darray_v4f16_f32: |
| 199 | case AMDGPU::BI__builtin_amdgcn_image_sample_2d_f32_f32: |
| 200 | case AMDGPU::BI__builtin_amdgcn_image_sample_2d_v4f32_f32: |
| 201 | case AMDGPU::BI__builtin_amdgcn_image_sample_2d_v4f16_f32: |
| 202 | case AMDGPU::BI__builtin_amdgcn_image_sample_2darray_f32_f32: |
| 203 | case AMDGPU::BI__builtin_amdgcn_image_sample_2darray_v4f32_f32: |
| 204 | case AMDGPU::BI__builtin_amdgcn_image_sample_2darray_v4f16_f32: |
| 205 | case AMDGPU::BI__builtin_amdgcn_image_sample_3d_v4f32_f32: |
| 206 | case AMDGPU::BI__builtin_amdgcn_image_sample_3d_v4f16_f32: |
| 207 | case AMDGPU::BI__builtin_amdgcn_image_sample_cube_v4f32_f32: |
| 208 | case AMDGPU::BI__builtin_amdgcn_image_sample_cube_v4f16_f32: |
| 209 | case AMDGPU::BI__builtin_amdgcn_image_sample_lz_1d_v4f32_f32: |
| 210 | case AMDGPU::BI__builtin_amdgcn_image_sample_lz_1d_v4f16_f32: |
| 211 | case AMDGPU::BI__builtin_amdgcn_image_sample_lz_1darray_v4f32_f32: |
| 212 | case AMDGPU::BI__builtin_amdgcn_image_sample_lz_1darray_v4f16_f32: |
| 213 | case AMDGPU::BI__builtin_amdgcn_image_sample_lz_2d_f32_f32: |
| 214 | case AMDGPU::BI__builtin_amdgcn_image_sample_lz_2d_v4f32_f32: |
| 215 | case AMDGPU::BI__builtin_amdgcn_image_sample_lz_2d_v4f16_f32: |
| 216 | case AMDGPU::BI__builtin_amdgcn_image_sample_lz_2darray_f32_f32: |
| 217 | case AMDGPU::BI__builtin_amdgcn_image_sample_lz_2darray_v4f32_f32: |
| 218 | case AMDGPU::BI__builtin_amdgcn_image_sample_lz_2darray_v4f16_f32: |
| 219 | case AMDGPU::BI__builtin_amdgcn_image_sample_lz_3d_v4f32_f32: |
| 220 | case AMDGPU::BI__builtin_amdgcn_image_sample_lz_3d_v4f16_f32: |
| 221 | case AMDGPU::BI__builtin_amdgcn_image_sample_lz_cube_v4f32_f32: |
| 222 | case AMDGPU::BI__builtin_amdgcn_image_sample_lz_cube_v4f16_f32: |
| 223 | case AMDGPU::BI__builtin_amdgcn_image_sample_l_1d_v4f32_f32: |
| 224 | case AMDGPU::BI__builtin_amdgcn_image_sample_l_1d_v4f16_f32: |
| 225 | case AMDGPU::BI__builtin_amdgcn_image_sample_l_1darray_v4f32_f32: |
| 226 | case AMDGPU::BI__builtin_amdgcn_image_sample_l_1darray_v4f16_f32: |
| 227 | case AMDGPU::BI__builtin_amdgcn_image_sample_l_2d_f32_f32: |
| 228 | case AMDGPU::BI__builtin_amdgcn_image_sample_l_2d_v4f16_f32: |
| 229 | case AMDGPU::BI__builtin_amdgcn_image_sample_l_2d_v4f32_f32: |
| 230 | case AMDGPU::BI__builtin_amdgcn_image_sample_l_2darray_f32_f32: |
| 231 | case AMDGPU::BI__builtin_amdgcn_image_sample_l_2darray_v4f32_f32: |
| 232 | case AMDGPU::BI__builtin_amdgcn_image_sample_l_2darray_v4f16_f32: |
| 233 | case AMDGPU::BI__builtin_amdgcn_image_sample_l_3d_v4f32_f32: |
| 234 | case AMDGPU::BI__builtin_amdgcn_image_sample_l_3d_v4f16_f32: |
| 235 | case AMDGPU::BI__builtin_amdgcn_image_sample_l_cube_v4f32_f32: |
| 236 | case AMDGPU::BI__builtin_amdgcn_image_sample_l_cube_v4f16_f32: |
| 237 | case AMDGPU::BI__builtin_amdgcn_image_sample_d_1d_v4f32_f32: |
| 238 | case AMDGPU::BI__builtin_amdgcn_image_sample_d_1d_v4f16_f32: |
| 239 | case AMDGPU::BI__builtin_amdgcn_image_sample_d_1darray_v4f32_f32: |
| 240 | case AMDGPU::BI__builtin_amdgcn_image_sample_d_1darray_v4f16_f32: |
| 241 | case AMDGPU::BI__builtin_amdgcn_image_sample_d_2d_f32_f32: |
| 242 | case AMDGPU::BI__builtin_amdgcn_image_sample_d_2d_v4f32_f32: |
| 243 | case AMDGPU::BI__builtin_amdgcn_image_sample_d_2d_v4f16_f32: |
| 244 | case AMDGPU::BI__builtin_amdgcn_image_sample_d_2darray_f32_f32: |
| 245 | case AMDGPU::BI__builtin_amdgcn_image_sample_d_2darray_v4f32_f32: |
| 246 | case AMDGPU::BI__builtin_amdgcn_image_sample_d_2darray_v4f16_f32: |
| 247 | case AMDGPU::BI__builtin_amdgcn_image_sample_d_3d_v4f32_f32: |
| 248 | case AMDGPU::BI__builtin_amdgcn_image_sample_d_3d_v4f16_f32: |
| 249 | case AMDGPU::BI__builtin_amdgcn_image_gather4_lz_2d_v4f32_f32: { |
| 250 | StringRef FeatureList( |
| 251 | getASTContext().BuiltinInfo.getRequiredFeatures(ID: BuiltinID)); |
| 252 | if (!Builtin::evaluateRequiredTargetFeatures(RequiredFatures: FeatureList, |
| 253 | TargetFetureMap: CallerFeatureMap)) { |
| 254 | Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_builtin_needs_feature) |
| 255 | << FD->getDeclName() << FeatureList; |
| 256 | return false; |
| 257 | } |
| 258 | |
| 259 | unsigned ArgCount = TheCall->getNumArgs() - 1; |
| 260 | llvm::APSInt Result; |
| 261 | |
| 262 | // Compilain about dmask values which are too huge to fully fit into 4 bits |
| 263 | // (which is the actual size of the dmask in corresponding HW instructions). |
| 264 | constexpr unsigned DMaskArgNo = 0; |
| 265 | constexpr int Low = 0; |
| 266 | constexpr int High = 15; |
| 267 | if (SemaRef.BuiltinConstantArg(TheCall, ArgNum: DMaskArgNo, Result) || |
| 268 | SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: DMaskArgNo, Low, High, |
| 269 | /* RangeIsError = */ true)) |
| 270 | return true; |
| 271 | |
| 272 | // Dmask indicates which elements should be returned and it is not possible |
| 273 | // to return more values than there are elements in return type. |
| 274 | int NumElementsInRetTy = 1; |
| 275 | const Type *RetTy = TheCall->getType().getTypePtr(); |
| 276 | if (auto *VTy = dyn_cast<VectorType>(Val: RetTy)) |
| 277 | NumElementsInRetTy = VTy->getNumElements(); |
| 278 | int NumActiveBitsInDMask = |
| 279 | llvm::popcount(Value: static_cast<uint8_t>(Result.getExtValue())); |
| 280 | if (NumActiveBitsInDMask > NumElementsInRetTy) { |
| 281 | Diag(Loc: TheCall->getBeginLoc(), |
| 282 | DiagID: diag::err_amdgcn_dmask_has_too_many_bits_set); |
| 283 | return true; |
| 284 | } |
| 285 | |
| 286 | // For gather, only one bit can be set indicating which exact component to |
| 287 | // return. |
| 288 | bool = |
| 289 | BuiltinID == AMDGPU::BI__builtin_amdgcn_image_gather4_lz_2d_v4f32_f32 && |
| 290 | SemaRef.BuiltinConstantArgPower2(TheCall, ArgNum: 0); |
| 291 | |
| 292 | return ExtraGatherChecks || |
| 293 | (SemaRef.BuiltinConstantArg(TheCall, ArgNum: ArgCount, Result)) || |
| 294 | (SemaRef.BuiltinConstantArg(TheCall, ArgNum: (ArgCount - 1), Result)); |
| 295 | } |
| 296 | case AMDGPU::BI__builtin_amdgcn_image_store_1d_v4f32_i32: |
| 297 | case AMDGPU::BI__builtin_amdgcn_image_store_1darray_v4f32_i32: |
| 298 | case AMDGPU::BI__builtin_amdgcn_image_store_1d_v4f16_i32: |
| 299 | case AMDGPU::BI__builtin_amdgcn_image_store_1darray_v4f16_i32: |
| 300 | case AMDGPU::BI__builtin_amdgcn_image_store_2d_f32_i32: |
| 301 | case AMDGPU::BI__builtin_amdgcn_image_store_2d_v4f32_i32: |
| 302 | case AMDGPU::BI__builtin_amdgcn_image_store_2d_v4f16_i32: |
| 303 | case AMDGPU::BI__builtin_amdgcn_image_store_2darray_f32_i32: |
| 304 | case AMDGPU::BI__builtin_amdgcn_image_store_2darray_v4f32_i32: |
| 305 | case AMDGPU::BI__builtin_amdgcn_image_store_2darray_v4f16_i32: |
| 306 | case AMDGPU::BI__builtin_amdgcn_image_store_3d_v4f32_i32: |
| 307 | case AMDGPU::BI__builtin_amdgcn_image_store_3d_v4f16_i32: |
| 308 | case AMDGPU::BI__builtin_amdgcn_image_store_cube_v4f32_i32: |
| 309 | case AMDGPU::BI__builtin_amdgcn_image_store_cube_v4f16_i32: |
| 310 | case AMDGPU::BI__builtin_amdgcn_image_store_mip_1d_v4f32_i32: |
| 311 | case AMDGPU::BI__builtin_amdgcn_image_store_mip_1d_v4f16_i32: |
| 312 | case AMDGPU::BI__builtin_amdgcn_image_store_mip_1darray_v4f32_i32: |
| 313 | case AMDGPU::BI__builtin_amdgcn_image_store_mip_1darray_v4f16_i32: |
| 314 | case AMDGPU::BI__builtin_amdgcn_image_store_mip_2d_f32_i32: |
| 315 | case AMDGPU::BI__builtin_amdgcn_image_store_mip_2d_v4f32_i32: |
| 316 | case AMDGPU::BI__builtin_amdgcn_image_store_mip_2d_v4f16_i32: |
| 317 | case AMDGPU::BI__builtin_amdgcn_image_store_mip_2darray_f32_i32: |
| 318 | case AMDGPU::BI__builtin_amdgcn_image_store_mip_2darray_v4f32_i32: |
| 319 | case AMDGPU::BI__builtin_amdgcn_image_store_mip_2darray_v4f16_i32: |
| 320 | case AMDGPU::BI__builtin_amdgcn_image_store_mip_3d_v4f32_i32: |
| 321 | case AMDGPU::BI__builtin_amdgcn_image_store_mip_3d_v4f16_i32: |
| 322 | case AMDGPU::BI__builtin_amdgcn_image_store_mip_cube_v4f32_i32: |
| 323 | case AMDGPU::BI__builtin_amdgcn_image_store_mip_cube_v4f16_i32: { |
| 324 | StringRef FeatureList( |
| 325 | getASTContext().BuiltinInfo.getRequiredFeatures(ID: BuiltinID)); |
| 326 | if (!Builtin::evaluateRequiredTargetFeatures(RequiredFatures: FeatureList, |
| 327 | TargetFetureMap: CallerFeatureMap)) { |
| 328 | Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_builtin_needs_feature) |
| 329 | << FD->getDeclName() << FeatureList; |
| 330 | return false; |
| 331 | } |
| 332 | |
| 333 | unsigned ArgCount = TheCall->getNumArgs() - 1; |
| 334 | llvm::APSInt Result; |
| 335 | |
| 336 | // Complain about dmask values which are too huge to fully fit into 4 bits |
| 337 | // (which is the actual size of the dmask in corresponding HW instructions). |
| 338 | constexpr unsigned DMaskArgNo = 1; |
| 339 | return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: DMaskArgNo, /*Low=*/0, |
| 340 | /*High=*/15, |
| 341 | /*RangeIsError=*/true) || |
| 342 | SemaRef.BuiltinConstantArg(TheCall, ArgNum: ArgCount, Result) || |
| 343 | SemaRef.BuiltinConstantArg(TheCall, ArgNum: (ArgCount - 1), Result); |
| 344 | } |
| 345 | case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x64_iu8: |
| 346 | case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x128_iu8: { |
| 347 | if (BuiltinID == AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x64_iu8) { |
| 348 | if (SemaRef.checkArgCountRange(Call: TheCall, MinArgCount: 7, MaxArgCount: 8)) |
| 349 | return true; |
| 350 | if (TheCall->getNumArgs() == 7) |
| 351 | return false; |
| 352 | } else if (BuiltinID == |
| 353 | AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x128_iu8) { |
| 354 | if (SemaRef.checkArgCountRange(Call: TheCall, MinArgCount: 8, MaxArgCount: 9)) |
| 355 | return true; |
| 356 | if (TheCall->getNumArgs() == 8) |
| 357 | return false; |
| 358 | } |
| 359 | // Check if the last argument (clamp operand) is a constant and is |
| 360 | // convertible to bool. |
| 361 | Expr *ClampArg = TheCall->getArg(Arg: TheCall->getNumArgs() - 1); |
| 362 | // 1) Ensure clamp argument is a constant expression |
| 363 | llvm::APSInt ClampValue; |
| 364 | if (!SemaRef.VerifyIntegerConstantExpression(E: ClampArg, Result: &ClampValue) |
| 365 | .isUsable()) |
| 366 | return true; |
| 367 | // 2) Check if the argument can be converted to bool type |
| 368 | if (!SemaRef.Context.hasSameType(T1: ClampArg->getType(), |
| 369 | T2: SemaRef.Context.BoolTy)) { |
| 370 | // Try to convert to bool |
| 371 | QualType BoolTy = SemaRef.Context.BoolTy; |
| 372 | ExprResult ClampExpr(ClampArg); |
| 373 | SemaRef.CheckSingleAssignmentConstraints(LHSType: BoolTy, RHS&: ClampExpr); |
| 374 | if (ClampExpr.isInvalid()) |
| 375 | return true; |
| 376 | } |
| 377 | return false; |
| 378 | } |
| 379 | case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x32_bf16: |
| 380 | case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x4_f32: |
| 381 | case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x32_f16: |
| 382 | case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x32_f16: |
| 383 | case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x32_bf16: |
| 384 | case AMDGPU::BI__builtin_amdgcn_wmma_bf16f32_16x16x32_bf16: |
| 385 | return SemaRef.BuiltinConstantArgRange(TheCall, /*ArgNum=*/0, /*Low=*/0, |
| 386 | /*High=*/0) || |
| 387 | SemaRef.BuiltinConstantArgRange(TheCall, /*ArgNum=*/2, /*Low=*/0, |
| 388 | /*High=*/0); |
| 389 | default: |
| 390 | return false; |
| 391 | } |
| 392 | |
| 393 | ExprResult Arg = TheCall->getArg(Arg: OrderIndex); |
| 394 | auto ArgExpr = Arg.get(); |
| 395 | Expr::EvalResult ArgResult; |
| 396 | |
| 397 | if (!ArgExpr->EvaluateAsInt(Result&: ArgResult, Ctx: getASTContext())) |
| 398 | return Diag(Loc: ArgExpr->getExprLoc(), DiagID: diag::err_typecheck_expect_int) |
| 399 | << ArgExpr->getType(); |
| 400 | auto Ord = ArgResult.Val.getInt().getZExtValue(); |
| 401 | |
| 402 | // Check validity of memory ordering as per C11 / C++11's memory model. |
| 403 | // Only fence needs check. Atomic dec/inc allow all memory orders. |
| 404 | if (!llvm::isValidAtomicOrderingCABI(I: Ord)) |
| 405 | return Diag(Loc: ArgExpr->getBeginLoc(), |
| 406 | DiagID: diag::warn_atomic_op_has_invalid_memory_order) |
| 407 | << 0 << ArgExpr->getSourceRange(); |
| 408 | switch (static_cast<llvm::AtomicOrderingCABI>(Ord)) { |
| 409 | case llvm::AtomicOrderingCABI::relaxed: |
| 410 | case llvm::AtomicOrderingCABI::consume: |
| 411 | if (BuiltinID == AMDGPU::BI__builtin_amdgcn_fence) |
| 412 | return Diag(Loc: ArgExpr->getBeginLoc(), |
| 413 | DiagID: diag::warn_atomic_op_has_invalid_memory_order) |
| 414 | << 0 << ArgExpr->getSourceRange(); |
| 415 | break; |
| 416 | case llvm::AtomicOrderingCABI::acquire: |
| 417 | case llvm::AtomicOrderingCABI::release: |
| 418 | case llvm::AtomicOrderingCABI::acq_rel: |
| 419 | case llvm::AtomicOrderingCABI::seq_cst: |
| 420 | break; |
| 421 | } |
| 422 | |
| 423 | Arg = TheCall->getArg(Arg: ScopeIndex); |
| 424 | ArgExpr = Arg.get(); |
| 425 | Expr::EvalResult ArgResult1; |
| 426 | // Check that sync scope is a constant literal |
| 427 | if (!ArgExpr->EvaluateAsConstantExpr(Result&: ArgResult1, Ctx: getASTContext())) |
| 428 | return Diag(Loc: ArgExpr->getExprLoc(), DiagID: diag::err_expr_not_string_literal) |
| 429 | << ArgExpr->getType(); |
| 430 | |
| 431 | return false; |
| 432 | } |
| 433 | |
| 434 | bool SemaAMDGPU::checkAtomicOrderingCABIArg(Expr *E, bool MayLoad, |
| 435 | bool MayStore) { |
| 436 | Expr::EvalResult AtomicOrdArgRes; |
| 437 | if (!E->EvaluateAsInt(Result&: AtomicOrdArgRes, Ctx: getASTContext())) |
| 438 | llvm_unreachable("Intrinsic requires imm for atomic ordering argument!" ); |
| 439 | auto Ord = |
| 440 | llvm::AtomicOrderingCABI(AtomicOrdArgRes.Val.getInt().getZExtValue()); |
| 441 | |
| 442 | // Atomic ordering cannot be acq_rel in any case, acquire for stores or |
| 443 | // release for loads. |
| 444 | if (!llvm::isValidAtomicOrderingCABI(I: (unsigned)Ord) || |
| 445 | (!(MayLoad && MayStore) && (Ord == llvm::AtomicOrderingCABI::acq_rel)) || |
| 446 | (!MayLoad && Ord == llvm::AtomicOrderingCABI::acquire) || |
| 447 | (!MayStore && Ord == llvm::AtomicOrderingCABI::release)) { |
| 448 | return Diag(Loc: E->getBeginLoc(), DiagID: diag::warn_atomic_op_has_invalid_memory_order) |
| 449 | << 0 << E->getSourceRange(); |
| 450 | } |
| 451 | |
| 452 | return false; |
| 453 | } |
| 454 | |
| 455 | bool SemaAMDGPU::checkCoopAtomicFunctionCall(CallExpr *TheCall, bool IsStore) { |
| 456 | bool Fail = false; |
| 457 | |
| 458 | // First argument is a global or generic pointer. |
| 459 | Expr *PtrArg = TheCall->getArg(Arg: 0); |
| 460 | QualType PtrTy = PtrArg->getType()->getPointeeType(); |
| 461 | unsigned AS = getASTContext().getTargetAddressSpace(AS: PtrTy.getAddressSpace()); |
| 462 | if (AS != llvm::AMDGPUAS::FLAT_ADDRESS && |
| 463 | AS != llvm::AMDGPUAS::GLOBAL_ADDRESS) { |
| 464 | Fail = true; |
| 465 | Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_amdgcn_coop_atomic_invalid_as) |
| 466 | << PtrArg->getSourceRange(); |
| 467 | } |
| 468 | |
| 469 | Expr *AO = TheCall->getArg(Arg: IsStore ? 2 : 1); |
| 470 | Expr *Scope = TheCall->getArg(Arg: TheCall->getNumArgs() - 1); |
| 471 | |
| 472 | if (AO->isValueDependent() || Scope->isValueDependent()) |
| 473 | return false; |
| 474 | |
| 475 | // Check atomic ordering |
| 476 | Fail |= |
| 477 | checkAtomicOrderingCABIArg(E: TheCall->getArg(Arg: IsStore ? 2 : 1), |
| 478 | /*MayLoad=*/!IsStore, /*MayStore=*/IsStore); |
| 479 | |
| 480 | // Last argument is the syncscope as a string literal. |
| 481 | if (!isa<StringLiteral>(Val: Scope->IgnoreParenImpCasts())) { |
| 482 | Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_expr_not_string_literal) |
| 483 | << Scope->getSourceRange(); |
| 484 | Fail = true; |
| 485 | } |
| 486 | |
| 487 | return Fail; |
| 488 | } |
| 489 | |
| 490 | bool SemaAMDGPU::checkAtomicMonitorLoad(CallExpr *TheCall) { |
| 491 | bool Fail = false; |
| 492 | |
| 493 | Expr *AO = TheCall->getArg(Arg: 1); |
| 494 | Expr *Scope = TheCall->getArg(Arg: TheCall->getNumArgs() - 1); |
| 495 | |
| 496 | if (AO->isValueDependent() || Scope->isValueDependent()) |
| 497 | return false; |
| 498 | |
| 499 | Fail |= checkAtomicOrderingCABIArg(E: TheCall->getArg(Arg: 1), /*MayLoad=*/true, |
| 500 | /*MayStore=*/false); |
| 501 | |
| 502 | auto ScopeModel = AtomicScopeModel::create(K: AtomicScopeModelKind::Generic); |
| 503 | if (std::optional<llvm::APSInt> Result = |
| 504 | Scope->getIntegerConstantExpr(Ctx: SemaRef.Context)) { |
| 505 | if (!ScopeModel->isValid(S: Result->getZExtValue())) { |
| 506 | Diag(Loc: Scope->getBeginLoc(), DiagID: diag::err_atomic_op_has_invalid_sync_scope) |
| 507 | << Scope->getSourceRange(); |
| 508 | Fail = true; |
| 509 | } |
| 510 | } |
| 511 | |
| 512 | return Fail; |
| 513 | } |
| 514 | |
| 515 | bool SemaAMDGPU::checkMovDPPFunctionCall(CallExpr *TheCall, unsigned NumArgs, |
| 516 | unsigned NumDataArgs) { |
| 517 | assert(NumDataArgs <= 2); |
| 518 | if (SemaRef.checkArgCountRange(Call: TheCall, MinArgCount: NumArgs, MaxArgCount: NumArgs)) |
| 519 | return true; |
| 520 | Expr *Args[2]; |
| 521 | QualType ArgTys[2]; |
| 522 | for (unsigned I = 0; I != NumDataArgs; ++I) { |
| 523 | Args[I] = TheCall->getArg(Arg: I); |
| 524 | ArgTys[I] = Args[I]->getType(); |
| 525 | // TODO: Vectors can also be supported. |
| 526 | if (!ArgTys[I]->isArithmeticType() || ArgTys[I]->isAnyComplexType()) { |
| 527 | SemaRef.Diag(Loc: Args[I]->getBeginLoc(), |
| 528 | DiagID: diag::err_typecheck_cond_expect_int_float) |
| 529 | << ArgTys[I] << Args[I]->getSourceRange(); |
| 530 | return true; |
| 531 | } |
| 532 | } |
| 533 | if (NumDataArgs < 2) |
| 534 | return false; |
| 535 | |
| 536 | if (getASTContext().hasSameUnqualifiedType(T1: ArgTys[0], T2: ArgTys[1])) |
| 537 | return false; |
| 538 | |
| 539 | if (((ArgTys[0]->isUnsignedIntegerType() && |
| 540 | ArgTys[1]->isSignedIntegerType()) || |
| 541 | (ArgTys[0]->isSignedIntegerType() && |
| 542 | ArgTys[1]->isUnsignedIntegerType())) && |
| 543 | getASTContext().getTypeSize(T: ArgTys[0]) == |
| 544 | getASTContext().getTypeSize(T: ArgTys[1])) |
| 545 | return false; |
| 546 | |
| 547 | SemaRef.Diag(Loc: Args[1]->getBeginLoc(), |
| 548 | DiagID: diag::err_typecheck_call_different_arg_types) |
| 549 | << ArgTys[0] << ArgTys[1]; |
| 550 | return true; |
| 551 | } |
| 552 | |
| 553 | static bool |
| 554 | checkAMDGPUFlatWorkGroupSizeArguments(Sema &S, Expr *MinExpr, Expr *MaxExpr, |
| 555 | const AMDGPUFlatWorkGroupSizeAttr &Attr) { |
| 556 | // Accept template arguments for now as they depend on something else. |
| 557 | // We'll get to check them when they eventually get instantiated. |
| 558 | if (MinExpr->isValueDependent() || MaxExpr->isValueDependent()) |
| 559 | return false; |
| 560 | |
| 561 | uint32_t Min = 0; |
| 562 | if (!S.checkUInt32Argument(AI: Attr, Expr: MinExpr, Val&: Min, Idx: 0)) |
| 563 | return true; |
| 564 | |
| 565 | uint32_t Max = 0; |
| 566 | if (!S.checkUInt32Argument(AI: Attr, Expr: MaxExpr, Val&: Max, Idx: 1)) |
| 567 | return true; |
| 568 | |
| 569 | if (Min == 0 && Max != 0) { |
| 570 | S.Diag(Loc: Attr.getLocation(), DiagID: diag::err_attribute_argument_invalid) |
| 571 | << &Attr << 0; |
| 572 | return true; |
| 573 | } |
| 574 | if (Min > Max) { |
| 575 | S.Diag(Loc: Attr.getLocation(), DiagID: diag::err_attribute_argument_invalid) |
| 576 | << &Attr << 1; |
| 577 | return true; |
| 578 | } |
| 579 | |
| 580 | return false; |
| 581 | } |
| 582 | |
| 583 | AMDGPUFlatWorkGroupSizeAttr * |
| 584 | SemaAMDGPU::CreateAMDGPUFlatWorkGroupSizeAttr(const AttributeCommonInfo &CI, |
| 585 | Expr *MinExpr, Expr *MaxExpr) { |
| 586 | ASTContext &Context = getASTContext(); |
| 587 | AMDGPUFlatWorkGroupSizeAttr TmpAttr(Context, CI, MinExpr, MaxExpr); |
| 588 | |
| 589 | if (checkAMDGPUFlatWorkGroupSizeArguments(S&: SemaRef, MinExpr, MaxExpr, Attr: TmpAttr)) |
| 590 | return nullptr; |
| 591 | return ::new (Context) |
| 592 | AMDGPUFlatWorkGroupSizeAttr(Context, CI, MinExpr, MaxExpr); |
| 593 | } |
| 594 | |
| 595 | void SemaAMDGPU::addAMDGPUFlatWorkGroupSizeAttr(Decl *D, |
| 596 | const AttributeCommonInfo &CI, |
| 597 | Expr *MinExpr, Expr *MaxExpr) { |
| 598 | if (auto *Attr = CreateAMDGPUFlatWorkGroupSizeAttr(CI, MinExpr, MaxExpr)) |
| 599 | D->addAttr(A: Attr); |
| 600 | } |
| 601 | |
| 602 | void SemaAMDGPU::handleAMDGPUFlatWorkGroupSizeAttr(Decl *D, |
| 603 | const ParsedAttr &AL) { |
| 604 | Expr *MinExpr = AL.getArgAsExpr(Arg: 0); |
| 605 | Expr *MaxExpr = AL.getArgAsExpr(Arg: 1); |
| 606 | |
| 607 | addAMDGPUFlatWorkGroupSizeAttr(D, CI: AL, MinExpr, MaxExpr); |
| 608 | } |
| 609 | |
| 610 | static bool checkAMDGPUWavesPerEUArguments(Sema &S, Expr *MinExpr, |
| 611 | Expr *MaxExpr, |
| 612 | const AMDGPUWavesPerEUAttr &Attr) { |
| 613 | if (S.DiagnoseUnexpandedParameterPack(E: MinExpr) || |
| 614 | (MaxExpr && S.DiagnoseUnexpandedParameterPack(E: MaxExpr))) |
| 615 | return true; |
| 616 | |
| 617 | // Accept template arguments for now as they depend on something else. |
| 618 | // We'll get to check them when they eventually get instantiated. |
| 619 | if (MinExpr->isValueDependent() || (MaxExpr && MaxExpr->isValueDependent())) |
| 620 | return false; |
| 621 | |
| 622 | uint32_t Min = 0; |
| 623 | if (!S.checkUInt32Argument(AI: Attr, Expr: MinExpr, Val&: Min, Idx: 0)) |
| 624 | return true; |
| 625 | |
| 626 | uint32_t Max = 0; |
| 627 | if (MaxExpr && !S.checkUInt32Argument(AI: Attr, Expr: MaxExpr, Val&: Max, Idx: 1)) |
| 628 | return true; |
| 629 | |
| 630 | if (Min == 0 && Max != 0) { |
| 631 | S.Diag(Loc: Attr.getLocation(), DiagID: diag::err_attribute_argument_invalid) |
| 632 | << &Attr << 0; |
| 633 | return true; |
| 634 | } |
| 635 | if (Max != 0 && Min > Max) { |
| 636 | S.Diag(Loc: Attr.getLocation(), DiagID: diag::err_attribute_argument_invalid) |
| 637 | << &Attr << 1; |
| 638 | return true; |
| 639 | } |
| 640 | |
| 641 | return false; |
| 642 | } |
| 643 | |
| 644 | AMDGPUWavesPerEUAttr * |
| 645 | SemaAMDGPU::CreateAMDGPUWavesPerEUAttr(const AttributeCommonInfo &CI, |
| 646 | Expr *MinExpr, Expr *MaxExpr) { |
| 647 | ASTContext &Context = getASTContext(); |
| 648 | AMDGPUWavesPerEUAttr TmpAttr(Context, CI, MinExpr, MaxExpr); |
| 649 | |
| 650 | if (checkAMDGPUWavesPerEUArguments(S&: SemaRef, MinExpr, MaxExpr, Attr: TmpAttr)) |
| 651 | return nullptr; |
| 652 | |
| 653 | return ::new (Context) AMDGPUWavesPerEUAttr(Context, CI, MinExpr, MaxExpr); |
| 654 | } |
| 655 | |
| 656 | void SemaAMDGPU::addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI, |
| 657 | Expr *MinExpr, Expr *MaxExpr) { |
| 658 | if (auto *Attr = CreateAMDGPUWavesPerEUAttr(CI, MinExpr, MaxExpr)) |
| 659 | D->addAttr(A: Attr); |
| 660 | } |
| 661 | |
| 662 | void SemaAMDGPU::handleAMDGPUWavesPerEUAttr(Decl *D, const ParsedAttr &AL) { |
| 663 | if (!AL.checkAtLeastNumArgs(S&: SemaRef, Num: 1) || !AL.checkAtMostNumArgs(S&: SemaRef, Num: 2)) |
| 664 | return; |
| 665 | |
| 666 | Expr *MinExpr = AL.getArgAsExpr(Arg: 0); |
| 667 | Expr *MaxExpr = (AL.getNumArgs() > 1) ? AL.getArgAsExpr(Arg: 1) : nullptr; |
| 668 | |
| 669 | addAMDGPUWavesPerEUAttr(D, CI: AL, MinExpr, MaxExpr); |
| 670 | } |
| 671 | |
| 672 | void SemaAMDGPU::handleAMDGPUNumSGPRAttr(Decl *D, const ParsedAttr &AL) { |
| 673 | uint32_t NumSGPR = 0; |
| 674 | Expr *NumSGPRExpr = AL.getArgAsExpr(Arg: 0); |
| 675 | if (!SemaRef.checkUInt32Argument(AI: AL, Expr: NumSGPRExpr, Val&: NumSGPR)) |
| 676 | return; |
| 677 | |
| 678 | D->addAttr(A: ::new (getASTContext()) |
| 679 | AMDGPUNumSGPRAttr(getASTContext(), AL, NumSGPR)); |
| 680 | } |
| 681 | |
| 682 | void SemaAMDGPU::handleAMDGPUNumVGPRAttr(Decl *D, const ParsedAttr &AL) { |
| 683 | uint32_t NumVGPR = 0; |
| 684 | Expr *NumVGPRExpr = AL.getArgAsExpr(Arg: 0); |
| 685 | if (!SemaRef.checkUInt32Argument(AI: AL, Expr: NumVGPRExpr, Val&: NumVGPR)) |
| 686 | return; |
| 687 | |
| 688 | D->addAttr(A: ::new (getASTContext()) |
| 689 | AMDGPUNumVGPRAttr(getASTContext(), AL, NumVGPR)); |
| 690 | } |
| 691 | |
| 692 | static bool |
| 693 | checkAMDGPUMaxNumWorkGroupsArguments(Sema &S, Expr *XExpr, Expr *YExpr, |
| 694 | Expr *ZExpr, |
| 695 | const AMDGPUMaxNumWorkGroupsAttr &Attr) { |
| 696 | if (S.DiagnoseUnexpandedParameterPack(E: XExpr) || |
| 697 | (YExpr && S.DiagnoseUnexpandedParameterPack(E: YExpr)) || |
| 698 | (ZExpr && S.DiagnoseUnexpandedParameterPack(E: ZExpr))) |
| 699 | return true; |
| 700 | |
| 701 | // Accept template arguments for now as they depend on something else. |
| 702 | // We'll get to check them when they eventually get instantiated. |
| 703 | if (XExpr->isValueDependent() || (YExpr && YExpr->isValueDependent()) || |
| 704 | (ZExpr && ZExpr->isValueDependent())) |
| 705 | return false; |
| 706 | |
| 707 | uint32_t NumWG = 0; |
| 708 | Expr *Exprs[3] = {XExpr, YExpr, ZExpr}; |
| 709 | for (int i = 0; i < 3; i++) { |
| 710 | if (Exprs[i]) { |
| 711 | if (!S.checkUInt32Argument(AI: Attr, Expr: Exprs[i], Val&: NumWG, Idx: i, |
| 712 | /*StrictlyUnsigned=*/true)) |
| 713 | return true; |
| 714 | if (NumWG == 0) { |
| 715 | S.Diag(Loc: Attr.getLoc(), DiagID: diag::err_attribute_argument_is_zero) |
| 716 | << &Attr << Exprs[i]->getSourceRange(); |
| 717 | return true; |
| 718 | } |
| 719 | } |
| 720 | } |
| 721 | |
| 722 | return false; |
| 723 | } |
| 724 | |
| 725 | AMDGPUMaxNumWorkGroupsAttr *SemaAMDGPU::CreateAMDGPUMaxNumWorkGroupsAttr( |
| 726 | const AttributeCommonInfo &CI, Expr *XExpr, Expr *YExpr, Expr *ZExpr) { |
| 727 | ASTContext &Context = getASTContext(); |
| 728 | AMDGPUMaxNumWorkGroupsAttr TmpAttr(Context, CI, XExpr, YExpr, ZExpr); |
| 729 | assert(!SemaRef.isSFINAEContext() && |
| 730 | "Can't produce SFINAE diagnostic pointing to temporary attribute" ); |
| 731 | |
| 732 | if (checkAMDGPUMaxNumWorkGroupsArguments(S&: SemaRef, XExpr, YExpr, ZExpr, |
| 733 | Attr: TmpAttr)) |
| 734 | return nullptr; |
| 735 | |
| 736 | return ::new (Context) |
| 737 | AMDGPUMaxNumWorkGroupsAttr(Context, CI, XExpr, YExpr, ZExpr); |
| 738 | } |
| 739 | |
| 740 | void SemaAMDGPU::addAMDGPUMaxNumWorkGroupsAttr(Decl *D, |
| 741 | const AttributeCommonInfo &CI, |
| 742 | Expr *XExpr, Expr *YExpr, |
| 743 | Expr *ZExpr) { |
| 744 | if (auto *Attr = CreateAMDGPUMaxNumWorkGroupsAttr(CI, XExpr, YExpr, ZExpr)) |
| 745 | D->addAttr(A: Attr); |
| 746 | } |
| 747 | |
| 748 | void SemaAMDGPU::handleAMDGPUMaxNumWorkGroupsAttr(Decl *D, |
| 749 | const ParsedAttr &AL) { |
| 750 | Expr *YExpr = (AL.getNumArgs() > 1) ? AL.getArgAsExpr(Arg: 1) : nullptr; |
| 751 | Expr *ZExpr = (AL.getNumArgs() > 2) ? AL.getArgAsExpr(Arg: 2) : nullptr; |
| 752 | addAMDGPUMaxNumWorkGroupsAttr(D, CI: AL, XExpr: AL.getArgAsExpr(Arg: 0), YExpr, ZExpr); |
| 753 | } |
| 754 | |
| 755 | Expr *SemaAMDGPU::ExpandAMDGPUPredicateBuiltIn(Expr *E) { |
| 756 | CallExpr *CE = cast<CallExpr>(Val: E->IgnoreParens()); |
| 757 | ASTContext &Ctx = getASTContext(); |
| 758 | QualType BoolTy = Ctx.getLogicalOperationType(); |
| 759 | llvm::APInt False = llvm::APInt::getZero(numBits: Ctx.getIntWidth(T: BoolTy)); |
| 760 | llvm::APInt True = llvm::APInt::getAllOnes(numBits: Ctx.getIntWidth(T: BoolTy)); |
| 761 | SourceLocation Loc = CE->getExprLoc(); |
| 762 | |
| 763 | if (!CE->getBuiltinCallee()) |
| 764 | return *ExpandedPredicates |
| 765 | .insert(Ptr: IntegerLiteral::Create(C: Ctx, V: False, type: BoolTy, l: Loc)) |
| 766 | .first; |
| 767 | |
| 768 | bool P = false; |
| 769 | unsigned BI = CE->getBuiltinCallee(); |
| 770 | if (Ctx.BuiltinInfo.isAuxBuiltinID(ID: BI)) |
| 771 | BI = Ctx.BuiltinInfo.getAuxBuiltinID(ID: BI); |
| 772 | |
| 773 | if (BI == AMDGPU::BI__builtin_amdgcn_processor_is) { |
| 774 | auto *GFX = dyn_cast<StringLiteral>(Val: CE->getArg(Arg: 0)->IgnoreParenCasts()); |
| 775 | if (!GFX) { |
| 776 | Diag(Loc, DiagID: diag::err_amdgcn_processor_is_arg_not_literal); |
| 777 | return nullptr; |
| 778 | } |
| 779 | |
| 780 | StringRef N = GFX->getString(); |
| 781 | const TargetInfo &TI = Ctx.getTargetInfo(); |
| 782 | const TargetInfo *AuxTI = Ctx.getAuxTargetInfo(); |
| 783 | if (!TI.isValidCPUName(Name: N) && (!AuxTI || !AuxTI->isValidCPUName(Name: N))) { |
| 784 | Diag(Loc, DiagID: diag::err_amdgcn_processor_is_arg_invalid_value) << N; |
| 785 | SmallVector<StringRef, 32> ValidList; |
| 786 | if (TI.getTriple().getVendor() == llvm::Triple::VendorType::AMD) |
| 787 | TI.fillValidCPUList(Values&: ValidList); |
| 788 | else if (AuxTI) // Since the BI is present it must be an AMDGPU triple. |
| 789 | AuxTI->fillValidCPUList(Values&: ValidList); |
| 790 | if (!ValidList.empty()) |
| 791 | Diag(Loc, DiagID: diag::note_amdgcn_processor_is_valid_options) |
| 792 | << llvm::join(R&: ValidList, Separator: ", " ); |
| 793 | return nullptr; |
| 794 | } |
| 795 | if (Ctx.getTargetInfo().getTriple().isSPIRV()) { |
| 796 | CE->setType(BoolTy); |
| 797 | return *ExpandedPredicates.insert(Ptr: CE).first; |
| 798 | } |
| 799 | |
| 800 | if (auto TID = Ctx.getTargetInfo().getTargetID()) |
| 801 | P = TID->find(svt: N) == 0; |
| 802 | } else { |
| 803 | Expr *Arg = CE->getArg(Arg: 0); |
| 804 | if (!Arg || Arg->getType() != Ctx.BuiltinFnTy) { |
| 805 | Diag(Loc, DiagID: diag::err_amdgcn_is_invocable_arg_invalid_value) << Arg; |
| 806 | return nullptr; |
| 807 | } |
| 808 | |
| 809 | if (Ctx.getTargetInfo().getTriple().isSPIRV()) { |
| 810 | CE->setType(BoolTy); |
| 811 | return *ExpandedPredicates.insert(Ptr: CE).first; |
| 812 | } |
| 813 | |
| 814 | auto *FD = cast<FunctionDecl>(Val: Arg->getReferencedDeclOfCallee()); |
| 815 | |
| 816 | StringRef RF = Ctx.BuiltinInfo.getRequiredFeatures(ID: FD->getBuiltinID()); |
| 817 | llvm::StringMap<bool> CF; |
| 818 | Ctx.getFunctionFeatureMap(FeatureMap&: CF, FD); |
| 819 | |
| 820 | P = Builtin::evaluateRequiredTargetFeatures(RequiredFatures: RF, TargetFetureMap: CF); |
| 821 | } |
| 822 | |
| 823 | return *ExpandedPredicates |
| 824 | .insert( |
| 825 | Ptr: IntegerLiteral::Create(C: Ctx, V: P ? True : False, type: BoolTy, l: Loc)) |
| 826 | .first; |
| 827 | } |
| 828 | |
| 829 | bool SemaAMDGPU::IsPredicate(Expr *E) const { |
| 830 | return ExpandedPredicates.contains(Ptr: E); |
| 831 | } |
| 832 | |
| 833 | void SemaAMDGPU::AddPotentiallyUnguardedBuiltinUser(FunctionDecl *FD) { |
| 834 | PotentiallyUnguardedBuiltinUsers.insert(Ptr: FD); |
| 835 | } |
| 836 | |
| 837 | bool SemaAMDGPU::HasPotentiallyUnguardedBuiltinUsage(FunctionDecl *FD) const { |
| 838 | return PotentiallyUnguardedBuiltinUsers.contains(Ptr: FD); |
| 839 | } |
| 840 | |
| 841 | namespace { |
| 842 | /// This class implements -Wamdgpu-unguarded-builtin-usage. |
| 843 | /// |
| 844 | /// This is done with a traversal of the AST of a function that includes a |
| 845 | /// call to a target specific builtin. Whenever we encounter an \c if of the |
| 846 | /// form: \c if(__builtin_amdgcn_is_invocable), we consider the then statement |
| 847 | /// guarded. |
| 848 | class DiagnoseUnguardedBuiltins : public DynamicRecursiveASTVisitor { |
| 849 | // TODO: this could eventually be extended to consider what happens when there |
| 850 | // are multiple target architectures specified via target("arch=gfxXXX") |
| 851 | // target("arch=gfxyyy") etc., as well as feature disabling via "-XXX". |
| 852 | Sema &SemaRef; |
| 853 | |
| 854 | SmallVector<StringRef> TargetFeatures; |
| 855 | SmallVector<std::pair<SourceLocation, StringRef>> CurrentGFXIP; |
| 856 | SmallVector<unsigned> GuardedBuiltins; |
| 857 | |
| 858 | static Expr *FindPredicate(Expr *Cond) { |
| 859 | if (auto *CE = dyn_cast<CallExpr>(Val: Cond)) { |
| 860 | if (CE->getBuiltinCallee() == AMDGPU::BI__builtin_amdgcn_is_invocable || |
| 861 | CE->getBuiltinCallee() == AMDGPU::BI__builtin_amdgcn_processor_is) |
| 862 | return Cond; |
| 863 | } else if (auto *UO = dyn_cast<UnaryOperator>(Val: Cond)) { |
| 864 | return FindPredicate(Cond: UO->getSubExpr()); |
| 865 | } else if (auto *BO = dyn_cast<BinaryOperator>(Val: Cond)) { |
| 866 | if ((Cond = FindPredicate(Cond: BO->getLHS()))) |
| 867 | return Cond; |
| 868 | return FindPredicate(Cond: BO->getRHS()); |
| 869 | } |
| 870 | return nullptr; |
| 871 | } |
| 872 | |
| 873 | bool EnterPredicateGuardedContext(CallExpr *P); |
| 874 | void ExitPredicateGuardedContext(bool WasProcessorCheck); |
| 875 | bool TraverseGuardedStmt(Stmt *S, CallExpr *P); |
| 876 | |
| 877 | public: |
| 878 | DiagnoseUnguardedBuiltins(Sema &SemaRef) : SemaRef(SemaRef) { |
| 879 | if (auto *TAT = SemaRef.getCurFunctionDecl(AllowLambda: true)->getAttr<TargetAttr>()) { |
| 880 | // We use the somewhat misnamed x86 accessors because they provide exactly |
| 881 | // what we require. |
| 882 | TAT->getX86AddedFeatures(Out&: TargetFeatures); |
| 883 | if (auto GFXIP = TAT->getX86Architecture()) |
| 884 | CurrentGFXIP.emplace_back(Args: TAT->getLocation(), Args&: *GFXIP); |
| 885 | } |
| 886 | } |
| 887 | |
| 888 | bool TraverseLambdaExpr(LambdaExpr *LE) override { |
| 889 | if (SemaRef.AMDGPU().HasPotentiallyUnguardedBuiltinUsage( |
| 890 | FD: LE->getCallOperator())) |
| 891 | return true; // We have already handled this. |
| 892 | return DynamicRecursiveASTVisitor::TraverseLambdaExpr(S: LE); |
| 893 | } |
| 894 | |
| 895 | bool TraverseStmt(Stmt *S) override { |
| 896 | if (!S) |
| 897 | return true; |
| 898 | return DynamicRecursiveASTVisitor::TraverseStmt(S); |
| 899 | } |
| 900 | |
| 901 | void IssueDiagnostics(Stmt *S) { TraverseStmt(S); } |
| 902 | |
| 903 | bool TraverseIfStmt(IfStmt *If) override { |
| 904 | if (auto *CE = dyn_cast_or_null<CallExpr>(Val: FindPredicate(Cond: If->getCond()))) |
| 905 | return TraverseGuardedStmt(S: If, P: CE); |
| 906 | return DynamicRecursiveASTVisitor::TraverseIfStmt(S: If); |
| 907 | } |
| 908 | |
| 909 | bool TraverseCaseStmt(CaseStmt *CS) override { |
| 910 | return TraverseStmt(S: CS->getSubStmt()); |
| 911 | } |
| 912 | |
| 913 | bool TraverseConditionalOperator(ConditionalOperator *CO) override { |
| 914 | if (auto *CE = dyn_cast_or_null<CallExpr>(Val: FindPredicate(Cond: CO->getCond()))) |
| 915 | return TraverseGuardedStmt(S: CO, P: CE); |
| 916 | return DynamicRecursiveASTVisitor::TraverseConditionalOperator(S: CO); |
| 917 | } |
| 918 | |
| 919 | bool VisitAsmStmt(AsmStmt *ASM) override; |
| 920 | bool VisitCallExpr(CallExpr *CE) override; |
| 921 | }; |
| 922 | |
| 923 | bool DiagnoseUnguardedBuiltins::EnterPredicateGuardedContext(CallExpr *P) { |
| 924 | bool IsProcessorCheck = |
| 925 | P->getBuiltinCallee() == AMDGPU::BI__builtin_amdgcn_processor_is; |
| 926 | |
| 927 | if (IsProcessorCheck) { |
| 928 | StringRef G = cast<clang::StringLiteral>(Val: P->getArg(Arg: 0))->getString(); |
| 929 | // TODO: handle generic ISAs. |
| 930 | if (!CurrentGFXIP.empty() && G != CurrentGFXIP.back().second) { |
| 931 | SemaRef.Diag(Loc: P->getExprLoc(), |
| 932 | DiagID: diag::err_amdgcn_conflicting_is_processor_options) |
| 933 | << P; |
| 934 | SemaRef.Diag(Loc: CurrentGFXIP.back().first, |
| 935 | DiagID: diag::note_amdgcn_previous_is_processor_guard); |
| 936 | } |
| 937 | CurrentGFXIP.emplace_back(Args: P->getExprLoc(), Args&: G); |
| 938 | } else { |
| 939 | auto *FD = cast<FunctionDecl>( |
| 940 | Val: cast<DeclRefExpr>(Val: P->getArg(Arg: 0))->getReferencedDeclOfCallee()); |
| 941 | GuardedBuiltins.push_back(Elt: FD->getBuiltinID()); |
| 942 | } |
| 943 | |
| 944 | return IsProcessorCheck; |
| 945 | } |
| 946 | |
| 947 | void DiagnoseUnguardedBuiltins::ExitPredicateGuardedContext(bool WasProcCheck) { |
| 948 | if (WasProcCheck) |
| 949 | CurrentGFXIP.pop_back(); |
| 950 | else |
| 951 | GuardedBuiltins.pop_back(); |
| 952 | } |
| 953 | |
| 954 | inline std::pair<Stmt *, Stmt *> GetTraversalOrder(Stmt *S) { |
| 955 | std::pair<Stmt *, Stmt *> Ordered; |
| 956 | Expr *Condition = nullptr; |
| 957 | |
| 958 | if (auto *CO = dyn_cast<ConditionalOperator>(Val: S)) { |
| 959 | Condition = CO->getCond(); |
| 960 | Ordered = {CO->getTrueExpr(), CO->getFalseExpr()}; |
| 961 | } else if (auto *If = dyn_cast<IfStmt>(Val: S)) { |
| 962 | Condition = If->getCond(); |
| 963 | Ordered = {If->getThen(), If->getElse()}; |
| 964 | } |
| 965 | |
| 966 | if (auto *UO = dyn_cast<UnaryOperator>(Val: Condition)) |
| 967 | if (UO->getOpcode() == UnaryOperatorKind::UO_LNot) |
| 968 | std::swap(a&: Ordered.first, b&: Ordered.second); |
| 969 | |
| 970 | return Ordered; |
| 971 | } |
| 972 | |
| 973 | bool DiagnoseUnguardedBuiltins::TraverseGuardedStmt(Stmt *S, CallExpr *P) { |
| 974 | assert(S && "Unexpected missing Statement!" ); |
| 975 | assert(P && "Unexpected missing Predicate!" ); |
| 976 | |
| 977 | auto [Guarded, Unguarded] = GetTraversalOrder(S); |
| 978 | |
| 979 | bool WasProcessorCheck = EnterPredicateGuardedContext(P); |
| 980 | |
| 981 | bool Continue = TraverseStmt(S: Guarded); |
| 982 | |
| 983 | ExitPredicateGuardedContext(WasProcCheck: WasProcessorCheck); |
| 984 | |
| 985 | return Continue && TraverseStmt(S: Unguarded); |
| 986 | } |
| 987 | |
| 988 | bool DiagnoseUnguardedBuiltins::VisitAsmStmt(AsmStmt *ASM) { |
| 989 | // TODO: should we check if the ASM is valid for the target? Can we? |
| 990 | if (!CurrentGFXIP.empty()) |
| 991 | return true; |
| 992 | |
| 993 | std::string S = ASM->generateAsmString(C: SemaRef.getASTContext()); |
| 994 | SemaRef.Diag(Loc: ASM->getAsmLoc(), DiagID: diag::warn_amdgcn_unguarded_asm_stmt) << S; |
| 995 | SemaRef.Diag(Loc: ASM->getAsmLoc(), DiagID: diag::note_amdgcn_unguarded_asm_silence) << S; |
| 996 | |
| 997 | return true; |
| 998 | } |
| 999 | |
| 1000 | bool DiagnoseUnguardedBuiltins::VisitCallExpr(CallExpr *CE) { |
| 1001 | unsigned ID = CE->getBuiltinCallee(); |
| 1002 | Builtin::Context &BInfo = SemaRef.getASTContext().BuiltinInfo; |
| 1003 | |
| 1004 | if (!ID) |
| 1005 | return true; |
| 1006 | if (!BInfo.isTSBuiltin(ID)) |
| 1007 | return true; |
| 1008 | if (ID == AMDGPU::BI__builtin_amdgcn_processor_is || |
| 1009 | ID == AMDGPU::BI__builtin_amdgcn_is_invocable) |
| 1010 | return true; |
| 1011 | if (llvm::find(Range&: GuardedBuiltins, Val: ID) != GuardedBuiltins.end()) |
| 1012 | return true; |
| 1013 | |
| 1014 | StringRef FL(BInfo.getRequiredFeatures(ID)); |
| 1015 | llvm::StringMap<bool> FeatureMap; |
| 1016 | if (CurrentGFXIP.empty()) { |
| 1017 | for (auto &&F : TargetFeatures) |
| 1018 | FeatureMap[F] = true; |
| 1019 | for (auto &&GID : GuardedBuiltins) |
| 1020 | for (auto &&F : llvm::split(Str: BInfo.getRequiredFeatures(ID: GID), Separator: ',')) |
| 1021 | FeatureMap[F] = true; |
| 1022 | } else { |
| 1023 | static const llvm::Triple AMDGCN("amdgcn-amd-amdhsa" ); |
| 1024 | llvm::AMDGPU::fillAMDGPUFeatureMap(GPU: CurrentGFXIP.back().second, T: AMDGCN, |
| 1025 | Features&: FeatureMap); |
| 1026 | } |
| 1027 | |
| 1028 | FunctionDecl *BI = CE->getDirectCallee(); |
| 1029 | SourceLocation BICallLoc = CE->getExprLoc(); |
| 1030 | if (Builtin::evaluateRequiredTargetFeatures(RequiredFatures: FL, TargetFetureMap: FeatureMap)) { |
| 1031 | SemaRef.Diag(Loc: BICallLoc, DiagID: diag::warn_amdgcn_unguarded_builtin) << BI; |
| 1032 | SemaRef.Diag(Loc: BICallLoc, DiagID: diag::note_amdgcn_unguarded_builtin_silence) << BI; |
| 1033 | } else { |
| 1034 | StringRef GFXIP = CurrentGFXIP.empty() ? "" : CurrentGFXIP.back().second; |
| 1035 | SemaRef.Diag(Loc: BICallLoc, DiagID: diag::err_amdgcn_incompatible_builtin) |
| 1036 | << BI << FL << !CurrentGFXIP.empty() << GFXIP; |
| 1037 | if (!CurrentGFXIP.empty()) |
| 1038 | SemaRef.Diag(Loc: CurrentGFXIP.back().first, |
| 1039 | DiagID: diag::note_amdgcn_previous_is_processor_guard); |
| 1040 | } |
| 1041 | |
| 1042 | return true; |
| 1043 | } |
| 1044 | } // Unnamed namespace |
| 1045 | |
| 1046 | void SemaAMDGPU::DiagnoseUnguardedBuiltinUsage(FunctionDecl *FD) { |
| 1047 | DiagnoseUnguardedBuiltins(SemaRef).IssueDiagnostics(S: FD->getBody()); |
| 1048 | } |
| 1049 | } // namespace clang |
| 1050 | |