1 | //===------ SemaARM.cpp ---------- ARM target-specific routines -----------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file implements semantic analysis functions specific to ARM. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "clang/Sema/SemaARM.h" |
14 | #include "clang/Basic/DiagnosticSema.h" |
15 | #include "clang/Basic/TargetBuiltins.h" |
16 | #include "clang/Basic/TargetInfo.h" |
17 | #include "clang/Sema/Initialization.h" |
18 | #include "clang/Sema/ParsedAttr.h" |
19 | #include "clang/Sema/Sema.h" |
20 | |
21 | namespace clang { |
22 | |
23 | SemaARM::SemaARM(Sema &S) : SemaBase(S) {} |
24 | |
25 | /// BuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions |
26 | bool SemaARM::BuiltinARMMemoryTaggingCall(unsigned BuiltinID, |
27 | CallExpr *TheCall) { |
28 | ASTContext &Context = getASTContext(); |
29 | |
30 | if (BuiltinID == AArch64::BI__builtin_arm_irg) { |
31 | if (SemaRef.checkArgCount(Call: TheCall, DesiredArgCount: 2)) |
32 | return true; |
33 | Expr *Arg0 = TheCall->getArg(Arg: 0); |
34 | Expr *Arg1 = TheCall->getArg(Arg: 1); |
35 | |
36 | ExprResult FirstArg = SemaRef.DefaultFunctionArrayLvalueConversion(E: Arg0); |
37 | if (FirstArg.isInvalid()) |
38 | return true; |
39 | QualType FirstArgType = FirstArg.get()->getType(); |
40 | if (!FirstArgType->isAnyPointerType()) |
41 | return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_must_be_pointer) |
42 | << "first" << FirstArgType << Arg0->getSourceRange(); |
43 | TheCall->setArg(Arg: 0, ArgExpr: FirstArg.get()); |
44 | |
45 | ExprResult SecArg = SemaRef.DefaultLvalueConversion(E: Arg1); |
46 | if (SecArg.isInvalid()) |
47 | return true; |
48 | QualType SecArgType = SecArg.get()->getType(); |
49 | if (!SecArgType->isIntegerType()) |
50 | return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_must_be_integer) |
51 | << "second" << SecArgType << Arg1->getSourceRange(); |
52 | |
53 | // Derive the return type from the pointer argument. |
54 | TheCall->setType(FirstArgType); |
55 | return false; |
56 | } |
57 | |
58 | if (BuiltinID == AArch64::BI__builtin_arm_addg) { |
59 | if (SemaRef.checkArgCount(Call: TheCall, DesiredArgCount: 2)) |
60 | return true; |
61 | |
62 | Expr *Arg0 = TheCall->getArg(Arg: 0); |
63 | ExprResult FirstArg = SemaRef.DefaultFunctionArrayLvalueConversion(E: Arg0); |
64 | if (FirstArg.isInvalid()) |
65 | return true; |
66 | QualType FirstArgType = FirstArg.get()->getType(); |
67 | if (!FirstArgType->isAnyPointerType()) |
68 | return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_must_be_pointer) |
69 | << "first" << FirstArgType << Arg0->getSourceRange(); |
70 | TheCall->setArg(Arg: 0, ArgExpr: FirstArg.get()); |
71 | |
72 | // Derive the return type from the pointer argument. |
73 | TheCall->setType(FirstArgType); |
74 | |
75 | // Second arg must be an constant in range [0,15] |
76 | return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 15); |
77 | } |
78 | |
79 | if (BuiltinID == AArch64::BI__builtin_arm_gmi) { |
80 | if (SemaRef.checkArgCount(Call: TheCall, DesiredArgCount: 2)) |
81 | return true; |
82 | Expr *Arg0 = TheCall->getArg(Arg: 0); |
83 | Expr *Arg1 = TheCall->getArg(Arg: 1); |
84 | |
85 | ExprResult FirstArg = SemaRef.DefaultFunctionArrayLvalueConversion(E: Arg0); |
86 | if (FirstArg.isInvalid()) |
87 | return true; |
88 | QualType FirstArgType = FirstArg.get()->getType(); |
89 | if (!FirstArgType->isAnyPointerType()) |
90 | return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_must_be_pointer) |
91 | << "first" << FirstArgType << Arg0->getSourceRange(); |
92 | |
93 | QualType SecArgType = Arg1->getType(); |
94 | if (!SecArgType->isIntegerType()) |
95 | return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_must_be_integer) |
96 | << "second" << SecArgType << Arg1->getSourceRange(); |
97 | TheCall->setType(Context.IntTy); |
98 | return false; |
99 | } |
100 | |
101 | if (BuiltinID == AArch64::BI__builtin_arm_ldg || |
102 | BuiltinID == AArch64::BI__builtin_arm_stg) { |
103 | if (SemaRef.checkArgCount(Call: TheCall, DesiredArgCount: 1)) |
104 | return true; |
105 | Expr *Arg0 = TheCall->getArg(Arg: 0); |
106 | ExprResult FirstArg = SemaRef.DefaultFunctionArrayLvalueConversion(E: Arg0); |
107 | if (FirstArg.isInvalid()) |
108 | return true; |
109 | |
110 | QualType FirstArgType = FirstArg.get()->getType(); |
111 | if (!FirstArgType->isAnyPointerType()) |
112 | return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_must_be_pointer) |
113 | << "first" << FirstArgType << Arg0->getSourceRange(); |
114 | TheCall->setArg(Arg: 0, ArgExpr: FirstArg.get()); |
115 | |
116 | // Derive the return type from the pointer argument. |
117 | if (BuiltinID == AArch64::BI__builtin_arm_ldg) |
118 | TheCall->setType(FirstArgType); |
119 | return false; |
120 | } |
121 | |
122 | if (BuiltinID == AArch64::BI__builtin_arm_subp) { |
123 | Expr *ArgA = TheCall->getArg(Arg: 0); |
124 | Expr *ArgB = TheCall->getArg(Arg: 1); |
125 | |
126 | ExprResult ArgExprA = SemaRef.DefaultFunctionArrayLvalueConversion(E: ArgA); |
127 | ExprResult ArgExprB = SemaRef.DefaultFunctionArrayLvalueConversion(E: ArgB); |
128 | |
129 | if (ArgExprA.isInvalid() || ArgExprB.isInvalid()) |
130 | return true; |
131 | |
132 | QualType ArgTypeA = ArgExprA.get()->getType(); |
133 | QualType ArgTypeB = ArgExprB.get()->getType(); |
134 | |
135 | auto isNull = [&](Expr *E) -> bool { |
136 | return E->isNullPointerConstant(Ctx&: Context, |
137 | NPC: Expr::NPC_ValueDependentIsNotNull); |
138 | }; |
139 | |
140 | // argument should be either a pointer or null |
141 | if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA)) |
142 | return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_null_or_pointer) |
143 | << "first" << ArgTypeA << ArgA->getSourceRange(); |
144 | |
145 | if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB)) |
146 | return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_null_or_pointer) |
147 | << "second" << ArgTypeB << ArgB->getSourceRange(); |
148 | |
149 | // Ensure Pointee types are compatible |
150 | if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) && |
151 | ArgTypeB->isAnyPointerType() && !isNull(ArgB)) { |
152 | QualType pointeeA = ArgTypeA->getPointeeType(); |
153 | QualType pointeeB = ArgTypeB->getPointeeType(); |
154 | if (!Context.typesAreCompatible( |
155 | T1: Context.getCanonicalType(T: pointeeA).getUnqualifiedType(), |
156 | T2: Context.getCanonicalType(T: pointeeB).getUnqualifiedType())) { |
157 | return Diag(Loc: TheCall->getBeginLoc(), |
158 | DiagID: diag::err_typecheck_sub_ptr_compatible) |
159 | << ArgTypeA << ArgTypeB << ArgA->getSourceRange() |
160 | << ArgB->getSourceRange(); |
161 | } |
162 | } |
163 | |
164 | // at least one argument should be pointer type |
165 | if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType()) |
166 | return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_any2arg_pointer) |
167 | << ArgTypeA << ArgTypeB << ArgA->getSourceRange(); |
168 | |
169 | if (isNull(ArgA)) // adopt type of the other pointer |
170 | ArgExprA = |
171 | SemaRef.ImpCastExprToType(E: ArgExprA.get(), Type: ArgTypeB, CK: CK_NullToPointer); |
172 | |
173 | if (isNull(ArgB)) |
174 | ArgExprB = |
175 | SemaRef.ImpCastExprToType(E: ArgExprB.get(), Type: ArgTypeA, CK: CK_NullToPointer); |
176 | |
177 | TheCall->setArg(Arg: 0, ArgExpr: ArgExprA.get()); |
178 | TheCall->setArg(Arg: 1, ArgExpr: ArgExprB.get()); |
179 | TheCall->setType(Context.LongLongTy); |
180 | return false; |
181 | } |
182 | assert(false && "Unhandled ARM MTE intrinsic" ); |
183 | return true; |
184 | } |
185 | |
186 | /// BuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr |
187 | /// TheCall is an ARM/AArch64 special register string literal. |
188 | bool SemaARM::BuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, |
189 | int ArgNum, unsigned ExpectedFieldNum, |
190 | bool AllowName) { |
191 | bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 || |
192 | BuiltinID == ARM::BI__builtin_arm_wsr64 || |
193 | BuiltinID == ARM::BI__builtin_arm_rsr || |
194 | BuiltinID == ARM::BI__builtin_arm_rsrp || |
195 | BuiltinID == ARM::BI__builtin_arm_wsr || |
196 | BuiltinID == ARM::BI__builtin_arm_wsrp; |
197 | bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 || |
198 | BuiltinID == AArch64::BI__builtin_arm_wsr64 || |
199 | BuiltinID == AArch64::BI__builtin_arm_rsr128 || |
200 | BuiltinID == AArch64::BI__builtin_arm_wsr128 || |
201 | BuiltinID == AArch64::BI__builtin_arm_rsr || |
202 | BuiltinID == AArch64::BI__builtin_arm_rsrp || |
203 | BuiltinID == AArch64::BI__builtin_arm_wsr || |
204 | BuiltinID == AArch64::BI__builtin_arm_wsrp; |
205 | assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin." ); |
206 | |
207 | // We can't check the value of a dependent argument. |
208 | Expr *Arg = TheCall->getArg(Arg: ArgNum); |
209 | if (Arg->isTypeDependent() || Arg->isValueDependent()) |
210 | return false; |
211 | |
212 | // Check if the argument is a string literal. |
213 | if (!isa<StringLiteral>(Val: Arg->IgnoreParenImpCasts())) |
214 | return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_expr_not_string_literal) |
215 | << Arg->getSourceRange(); |
216 | |
217 | // Check the type of special register given. |
218 | StringRef Reg = cast<StringLiteral>(Val: Arg->IgnoreParenImpCasts())->getString(); |
219 | SmallVector<StringRef, 6> Fields; |
220 | Reg.split(A&: Fields, Separator: ":" ); |
221 | |
222 | if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1)) |
223 | return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_arm_invalid_specialreg) |
224 | << Arg->getSourceRange(); |
225 | |
226 | // If the string is the name of a register then we cannot check that it is |
227 | // valid here but if the string is of one the forms described in ACLE then we |
228 | // can check that the supplied fields are integers and within the valid |
229 | // ranges. |
230 | if (Fields.size() > 1) { |
231 | bool FiveFields = Fields.size() == 5; |
232 | |
233 | bool ValidString = true; |
234 | if (IsARMBuiltin) { |
235 | ValidString &= Fields[0].starts_with_insensitive(Prefix: "cp" ) || |
236 | Fields[0].starts_with_insensitive(Prefix: "p" ); |
237 | if (ValidString) |
238 | Fields[0] = Fields[0].drop_front( |
239 | N: Fields[0].starts_with_insensitive(Prefix: "cp" ) ? 2 : 1); |
240 | |
241 | ValidString &= Fields[2].starts_with_insensitive(Prefix: "c" ); |
242 | if (ValidString) |
243 | Fields[2] = Fields[2].drop_front(N: 1); |
244 | |
245 | if (FiveFields) { |
246 | ValidString &= Fields[3].starts_with_insensitive(Prefix: "c" ); |
247 | if (ValidString) |
248 | Fields[3] = Fields[3].drop_front(N: 1); |
249 | } |
250 | } |
251 | |
252 | SmallVector<int, 5> FieldBitWidths; |
253 | if (FiveFields) |
254 | FieldBitWidths.append(IL: {IsAArch64Builtin ? 2 : 4, 3, 4, 4, 3}); |
255 | else |
256 | FieldBitWidths.append(IL: {4, 3, 4}); |
257 | |
258 | for (unsigned i = 0; i < Fields.size(); ++i) { |
259 | int IntField; |
260 | ValidString &= !Fields[i].getAsInteger(Radix: 10, Result&: IntField); |
261 | ValidString &= (IntField >= 0 && IntField < (1 << FieldBitWidths[i])); |
262 | } |
263 | |
264 | if (!ValidString) |
265 | return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_arm_invalid_specialreg) |
266 | << Arg->getSourceRange(); |
267 | } else if (IsAArch64Builtin && Fields.size() == 1) { |
268 | // This code validates writes to PSTATE registers. |
269 | |
270 | // Not a write. |
271 | if (TheCall->getNumArgs() != 2) |
272 | return false; |
273 | |
274 | // The 128-bit system register accesses do not touch PSTATE. |
275 | if (BuiltinID == AArch64::BI__builtin_arm_rsr128 || |
276 | BuiltinID == AArch64::BI__builtin_arm_wsr128) |
277 | return false; |
278 | |
279 | // These are the named PSTATE accesses using "MSR (immediate)" instructions, |
280 | // along with the upper limit on the immediates allowed. |
281 | auto MaxLimit = llvm::StringSwitch<std::optional<unsigned>>(Reg) |
282 | .CaseLower(S: "spsel" , Value: 15) |
283 | .CaseLower(S: "daifclr" , Value: 15) |
284 | .CaseLower(S: "daifset" , Value: 15) |
285 | .CaseLower(S: "pan" , Value: 15) |
286 | .CaseLower(S: "uao" , Value: 15) |
287 | .CaseLower(S: "dit" , Value: 15) |
288 | .CaseLower(S: "ssbs" , Value: 15) |
289 | .CaseLower(S: "tco" , Value: 15) |
290 | .CaseLower(S: "allint" , Value: 1) |
291 | .CaseLower(S: "pm" , Value: 1) |
292 | .Default(Value: std::nullopt); |
293 | |
294 | // If this is not a named PSTATE, just continue without validating, as this |
295 | // will be lowered to an "MSR (register)" instruction directly |
296 | if (!MaxLimit) |
297 | return false; |
298 | |
299 | // Here we only allow constants in the range for that pstate, as required by |
300 | // the ACLE. |
301 | // |
302 | // While clang also accepts the names of system registers in its ACLE |
303 | // intrinsics, we prevent this with the PSTATE names used in MSR (immediate) |
304 | // as the value written via a register is different to the value used as an |
305 | // immediate to have the same effect. e.g., for the instruction `msr tco, |
306 | // x0`, it is bit 25 of register x0 that is written into PSTATE.TCO, but |
307 | // with `msr tco, #imm`, it is bit 0 of xN that is written into PSTATE.TCO. |
308 | // |
309 | // If a programmer wants to codegen the MSR (register) form of `msr tco, |
310 | // xN`, they can still do so by specifying the register using five |
311 | // colon-separated numbers in a string. |
312 | return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: *MaxLimit); |
313 | } |
314 | |
315 | return false; |
316 | } |
317 | |
318 | /// getNeonEltType - Return the QualType corresponding to the elements of |
319 | /// the vector type specified by the NeonTypeFlags. This is used to check |
320 | /// the pointer arguments for Neon load/store intrinsics. |
321 | static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context, |
322 | bool IsPolyUnsigned, bool IsInt64Long) { |
323 | switch (Flags.getEltType()) { |
324 | case NeonTypeFlags::Int8: |
325 | return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy; |
326 | case NeonTypeFlags::Int16: |
327 | return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy; |
328 | case NeonTypeFlags::Int32: |
329 | return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy; |
330 | case NeonTypeFlags::Int64: |
331 | if (IsInt64Long) |
332 | return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy; |
333 | else |
334 | return Flags.isUnsigned() ? Context.UnsignedLongLongTy |
335 | : Context.LongLongTy; |
336 | case NeonTypeFlags::Poly8: |
337 | return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy; |
338 | case NeonTypeFlags::Poly16: |
339 | return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy; |
340 | case NeonTypeFlags::Poly64: |
341 | if (IsInt64Long) |
342 | return Context.UnsignedLongTy; |
343 | else |
344 | return Context.UnsignedLongLongTy; |
345 | case NeonTypeFlags::Poly128: |
346 | break; |
347 | case NeonTypeFlags::Float16: |
348 | return Context.HalfTy; |
349 | case NeonTypeFlags::Float32: |
350 | return Context.FloatTy; |
351 | case NeonTypeFlags::Float64: |
352 | return Context.DoubleTy; |
353 | case NeonTypeFlags::BFloat16: |
354 | return Context.BFloat16Ty; |
355 | case NeonTypeFlags::MFloat8: |
356 | return Context.MFloat8Ty; |
357 | } |
358 | llvm_unreachable("Invalid NeonTypeFlag!" ); |
359 | } |
360 | |
361 | enum ArmSMEState : unsigned { |
362 | ArmNoState = 0, |
363 | |
364 | ArmInZA = 0b01, |
365 | ArmOutZA = 0b10, |
366 | ArmInOutZA = 0b11, |
367 | ArmZAMask = 0b11, |
368 | |
369 | ArmInZT0 = 0b01 << 2, |
370 | ArmOutZT0 = 0b10 << 2, |
371 | ArmInOutZT0 = 0b11 << 2, |
372 | ArmZT0Mask = 0b11 << 2 |
373 | }; |
374 | |
375 | bool SemaARM::CheckImmediateArg(CallExpr *TheCall, unsigned CheckTy, |
376 | unsigned ArgIdx, unsigned EltBitWidth, |
377 | unsigned ContainerBitWidth) { |
378 | // Function that checks whether the operand (ArgIdx) is an immediate |
379 | // that is one of a given set of values. |
380 | auto CheckImmediateInSet = [&](std::initializer_list<int64_t> Set, |
381 | int ErrDiag) -> bool { |
382 | // We can't check the value of a dependent argument. |
383 | Expr *Arg = TheCall->getArg(Arg: ArgIdx); |
384 | if (Arg->isTypeDependent() || Arg->isValueDependent()) |
385 | return false; |
386 | |
387 | // Check constant-ness first. |
388 | llvm::APSInt Imm; |
389 | if (SemaRef.BuiltinConstantArg(TheCall, ArgNum: ArgIdx, Result&: Imm)) |
390 | return true; |
391 | |
392 | if (!llvm::is_contained(Set, Element: Imm.getSExtValue())) |
393 | return Diag(Loc: TheCall->getBeginLoc(), DiagID: ErrDiag) << Arg->getSourceRange(); |
394 | return false; |
395 | }; |
396 | |
397 | switch ((ImmCheckType)CheckTy) { |
398 | case ImmCheckType::ImmCheck0_31: |
399 | if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 31)) |
400 | return true; |
401 | break; |
402 | case ImmCheckType::ImmCheck0_13: |
403 | if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 13)) |
404 | return true; |
405 | break; |
406 | case ImmCheckType::ImmCheck0_63: |
407 | if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 63)) |
408 | return true; |
409 | break; |
410 | case ImmCheckType::ImmCheck1_16: |
411 | if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: 16)) |
412 | return true; |
413 | break; |
414 | case ImmCheckType::ImmCheck0_7: |
415 | if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 7)) |
416 | return true; |
417 | break; |
418 | case ImmCheckType::ImmCheck1_1: |
419 | if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: 1)) |
420 | return true; |
421 | break; |
422 | case ImmCheckType::ImmCheck1_3: |
423 | if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: 3)) |
424 | return true; |
425 | break; |
426 | case ImmCheckType::ImmCheck1_7: |
427 | if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: 7)) |
428 | return true; |
429 | break; |
430 | case ImmCheckType::ImmCheckExtract: |
431 | if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, |
432 | High: (2048 / EltBitWidth) - 1)) |
433 | return true; |
434 | break; |
435 | case ImmCheckType::ImmCheckCvt: |
436 | case ImmCheckType::ImmCheckShiftRight: |
437 | if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: EltBitWidth)) |
438 | return true; |
439 | break; |
440 | case ImmCheckType::ImmCheckShiftRightNarrow: |
441 | if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: EltBitWidth / 2)) |
442 | return true; |
443 | break; |
444 | case ImmCheckType::ImmCheckShiftLeft: |
445 | if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: EltBitWidth - 1)) |
446 | return true; |
447 | break; |
448 | case ImmCheckType::ImmCheckLaneIndex: |
449 | if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, |
450 | High: (ContainerBitWidth / EltBitWidth) - 1)) |
451 | return true; |
452 | break; |
453 | case ImmCheckType::ImmCheckLaneIndexCompRotate: |
454 | if (SemaRef.BuiltinConstantArgRange( |
455 | TheCall, ArgNum: ArgIdx, Low: 0, High: (ContainerBitWidth / (2 * EltBitWidth)) - 1)) |
456 | return true; |
457 | break; |
458 | case ImmCheckType::ImmCheckLaneIndexDot: |
459 | if (SemaRef.BuiltinConstantArgRange( |
460 | TheCall, ArgNum: ArgIdx, Low: 0, High: (ContainerBitWidth / (4 * EltBitWidth)) - 1)) |
461 | return true; |
462 | break; |
463 | case ImmCheckType::ImmCheckComplexRot90_270: |
464 | if (CheckImmediateInSet({90, 270}, diag::err_rotation_argument_to_cadd)) |
465 | return true; |
466 | break; |
467 | case ImmCheckType::ImmCheckComplexRotAll90: |
468 | if (CheckImmediateInSet({0, 90, 180, 270}, |
469 | diag::err_rotation_argument_to_cmla)) |
470 | return true; |
471 | break; |
472 | case ImmCheckType::ImmCheck0_1: |
473 | if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 1)) |
474 | return true; |
475 | break; |
476 | case ImmCheckType::ImmCheck0_2: |
477 | if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 2)) |
478 | return true; |
479 | break; |
480 | case ImmCheckType::ImmCheck0_3: |
481 | if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 3)) |
482 | return true; |
483 | break; |
484 | case ImmCheckType::ImmCheck0_0: |
485 | if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 0)) |
486 | return true; |
487 | break; |
488 | case ImmCheckType::ImmCheck0_15: |
489 | if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 15)) |
490 | return true; |
491 | break; |
492 | case ImmCheckType::ImmCheck0_255: |
493 | if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 255)) |
494 | return true; |
495 | break; |
496 | case ImmCheckType::ImmCheck1_32: |
497 | if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: 32)) |
498 | return true; |
499 | break; |
500 | case ImmCheckType::ImmCheck1_64: |
501 | if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: 64)) |
502 | return true; |
503 | break; |
504 | case ImmCheckType::ImmCheck2_4_Mul2: |
505 | if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 2, High: 4) || |
506 | SemaRef.BuiltinConstantArgMultiple(TheCall, ArgNum: ArgIdx, Multiple: 2)) |
507 | return true; |
508 | break; |
509 | } |
510 | return false; |
511 | } |
512 | |
513 | bool SemaARM::PerformNeonImmChecks( |
514 | CallExpr *TheCall, |
515 | SmallVectorImpl<std::tuple<int, int, int, int>> &ImmChecks, |
516 | int OverloadType) { |
517 | bool HasError = false; |
518 | |
519 | for (const auto &I : ImmChecks) { |
520 | auto [ArgIdx, CheckTy, ElementBitWidth, VecBitWidth] = I; |
521 | |
522 | if (OverloadType >= 0) |
523 | ElementBitWidth = NeonTypeFlags(OverloadType).getEltSizeInBits(); |
524 | |
525 | HasError |= CheckImmediateArg(TheCall, CheckTy, ArgIdx, EltBitWidth: ElementBitWidth, |
526 | ContainerBitWidth: VecBitWidth); |
527 | } |
528 | |
529 | return HasError; |
530 | } |
531 | |
532 | bool SemaARM::PerformSVEImmChecks( |
533 | CallExpr *TheCall, SmallVectorImpl<std::tuple<int, int, int>> &ImmChecks) { |
534 | bool HasError = false; |
535 | |
536 | for (const auto &I : ImmChecks) { |
537 | auto [ArgIdx, CheckTy, ElementBitWidth] = I; |
538 | HasError |= |
539 | CheckImmediateArg(TheCall, CheckTy, ArgIdx, EltBitWidth: ElementBitWidth, ContainerBitWidth: 128); |
540 | } |
541 | |
542 | return HasError; |
543 | } |
544 | |
545 | SemaARM::ArmStreamingType getArmStreamingFnType(const FunctionDecl *FD) { |
546 | if (FD->hasAttr<ArmLocallyStreamingAttr>()) |
547 | return SemaARM::ArmStreaming; |
548 | if (const Type *Ty = FD->getType().getTypePtrOrNull()) { |
549 | if (const auto *FPT = Ty->getAs<FunctionProtoType>()) { |
550 | if (FPT->getAArch64SMEAttributes() & |
551 | FunctionType::SME_PStateSMEnabledMask) |
552 | return SemaARM::ArmStreaming; |
553 | if (FPT->getAArch64SMEAttributes() & |
554 | FunctionType::SME_PStateSMCompatibleMask) |
555 | return SemaARM::ArmStreamingCompatible; |
556 | } |
557 | } |
558 | return SemaARM::ArmNonStreaming; |
559 | } |
560 | |
561 | static bool checkArmStreamingBuiltin(Sema &S, CallExpr *TheCall, |
562 | const FunctionDecl *FD, |
563 | SemaARM::ArmStreamingType BuiltinType, |
564 | unsigned BuiltinID) { |
565 | SemaARM::ArmStreamingType FnType = getArmStreamingFnType(FD); |
566 | |
567 | // Check if the intrinsic is available in the right mode, i.e. |
568 | // * When compiling for SME only, the caller must be in streaming mode. |
569 | // * When compiling for SVE only, the caller must be in non-streaming mode. |
570 | // * When compiling for both SVE and SME, the caller can be in either mode. |
571 | if (BuiltinType == SemaARM::VerifyRuntimeMode) { |
572 | llvm::StringMap<bool> CallerFeatureMapWithoutSVE; |
573 | S.Context.getFunctionFeatureMap(FeatureMap&: CallerFeatureMapWithoutSVE, FD); |
574 | CallerFeatureMapWithoutSVE["sve" ] = false; |
575 | |
576 | // Avoid emitting diagnostics for a function that can never compile. |
577 | if (FnType == SemaARM::ArmStreaming && !CallerFeatureMapWithoutSVE["sme" ]) |
578 | return false; |
579 | |
580 | llvm::StringMap<bool> CallerFeatureMapWithoutSME; |
581 | S.Context.getFunctionFeatureMap(FeatureMap&: CallerFeatureMapWithoutSME, FD); |
582 | CallerFeatureMapWithoutSME["sme" ] = false; |
583 | |
584 | // We know the builtin requires either some combination of SVE flags, or |
585 | // some combination of SME flags, but we need to figure out which part |
586 | // of the required features is satisfied by the target features. |
587 | // |
588 | // For a builtin with target guard 'sve2p1|sme2', if we compile with |
589 | // '+sve2p1,+sme', then we know that it satisfies the 'sve2p1' part if we |
590 | // evaluate the features for '+sve2p1,+sme,+nosme'. |
591 | // |
592 | // Similarly, if we compile with '+sve2,+sme2', then we know it satisfies |
593 | // the 'sme2' part if we evaluate the features for '+sve2,+sme2,+nosve'. |
594 | StringRef BuiltinTargetGuards( |
595 | S.Context.BuiltinInfo.getRequiredFeatures(ID: BuiltinID)); |
596 | bool SatisfiesSVE = Builtin::evaluateRequiredTargetFeatures( |
597 | RequiredFatures: BuiltinTargetGuards, TargetFetureMap: CallerFeatureMapWithoutSME); |
598 | bool SatisfiesSME = Builtin::evaluateRequiredTargetFeatures( |
599 | RequiredFatures: BuiltinTargetGuards, TargetFetureMap: CallerFeatureMapWithoutSVE); |
600 | |
601 | if ((SatisfiesSVE && SatisfiesSME) || |
602 | (SatisfiesSVE && FnType == SemaARM::ArmStreamingCompatible)) |
603 | return false; |
604 | else if (SatisfiesSVE) |
605 | BuiltinType = SemaARM::ArmNonStreaming; |
606 | else if (SatisfiesSME) |
607 | BuiltinType = SemaARM::ArmStreaming; |
608 | else |
609 | // This should be diagnosed by CodeGen |
610 | return false; |
611 | } |
612 | |
613 | if (FnType != SemaARM::ArmNonStreaming && |
614 | BuiltinType == SemaARM::ArmNonStreaming) |
615 | S.Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_attribute_arm_sm_incompat_builtin) |
616 | << TheCall->getSourceRange() << "non-streaming" ; |
617 | else if (FnType != SemaARM::ArmStreaming && |
618 | BuiltinType == SemaARM::ArmStreaming) |
619 | S.Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_attribute_arm_sm_incompat_builtin) |
620 | << TheCall->getSourceRange() << "streaming" ; |
621 | else |
622 | return false; |
623 | |
624 | return true; |
625 | } |
626 | |
627 | static ArmSMEState getSMEState(unsigned BuiltinID) { |
628 | switch (BuiltinID) { |
629 | default: |
630 | return ArmNoState; |
631 | #define GET_SME_BUILTIN_GET_STATE |
632 | #include "clang/Basic/arm_sme_builtins_za_state.inc" |
633 | #undef GET_SME_BUILTIN_GET_STATE |
634 | } |
635 | } |
636 | |
637 | bool SemaARM::CheckSMEBuiltinFunctionCall(unsigned BuiltinID, |
638 | CallExpr *TheCall) { |
639 | if (const FunctionDecl *FD = |
640 | SemaRef.getCurFunctionDecl(/*AllowLambda=*/true)) { |
641 | std::optional<ArmStreamingType> BuiltinType; |
642 | |
643 | switch (BuiltinID) { |
644 | #define GET_SME_STREAMING_ATTRS |
645 | #include "clang/Basic/arm_sme_streaming_attrs.inc" |
646 | #undef GET_SME_STREAMING_ATTRS |
647 | } |
648 | |
649 | if (BuiltinType && |
650 | checkArmStreamingBuiltin(S&: SemaRef, TheCall, FD, BuiltinType: *BuiltinType, BuiltinID)) |
651 | return true; |
652 | |
653 | if ((getSMEState(BuiltinID) & ArmZAMask) && !hasArmZAState(FD)) |
654 | Diag(Loc: TheCall->getBeginLoc(), |
655 | DiagID: diag::warn_attribute_arm_za_builtin_no_za_state) |
656 | << TheCall->getSourceRange(); |
657 | |
658 | if ((getSMEState(BuiltinID) & ArmZT0Mask) && !hasArmZT0State(FD)) |
659 | Diag(Loc: TheCall->getBeginLoc(), |
660 | DiagID: diag::warn_attribute_arm_zt0_builtin_no_zt0_state) |
661 | << TheCall->getSourceRange(); |
662 | } |
663 | |
664 | // Range check SME intrinsics that take immediate values. |
665 | SmallVector<std::tuple<int, int, int>, 3> ImmChecks; |
666 | |
667 | switch (BuiltinID) { |
668 | default: |
669 | return false; |
670 | #define GET_SME_IMMEDIATE_CHECK |
671 | #include "clang/Basic/arm_sme_sema_rangechecks.inc" |
672 | #undef GET_SME_IMMEDIATE_CHECK |
673 | } |
674 | |
675 | return PerformSVEImmChecks(TheCall, ImmChecks); |
676 | } |
677 | |
678 | bool SemaARM::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, |
679 | CallExpr *TheCall) { |
680 | if (const FunctionDecl *FD = |
681 | SemaRef.getCurFunctionDecl(/*AllowLambda=*/true)) { |
682 | std::optional<ArmStreamingType> BuiltinType; |
683 | |
684 | switch (BuiltinID) { |
685 | #define GET_SVE_STREAMING_ATTRS |
686 | #include "clang/Basic/arm_sve_streaming_attrs.inc" |
687 | #undef GET_SVE_STREAMING_ATTRS |
688 | } |
689 | if (BuiltinType && |
690 | checkArmStreamingBuiltin(S&: SemaRef, TheCall, FD, BuiltinType: *BuiltinType, BuiltinID)) |
691 | return true; |
692 | } |
693 | // Range check SVE intrinsics that take immediate values. |
694 | SmallVector<std::tuple<int, int, int>, 3> ImmChecks; |
695 | |
696 | switch (BuiltinID) { |
697 | default: |
698 | return false; |
699 | #define GET_SVE_IMMEDIATE_CHECK |
700 | #include "clang/Basic/arm_sve_sema_rangechecks.inc" |
701 | #undef GET_SVE_IMMEDIATE_CHECK |
702 | } |
703 | |
704 | return PerformSVEImmChecks(TheCall, ImmChecks); |
705 | } |
706 | |
707 | bool SemaARM::CheckNeonBuiltinFunctionCall(const TargetInfo &TI, |
708 | unsigned BuiltinID, |
709 | CallExpr *TheCall) { |
710 | if (const FunctionDecl *FD = |
711 | SemaRef.getCurFunctionDecl(/*AllowLambda=*/true)) { |
712 | std::optional<ArmStreamingType> BuiltinType; |
713 | |
714 | switch (BuiltinID) { |
715 | default: |
716 | break; |
717 | #define GET_NEON_STREAMING_COMPAT_FLAG |
718 | #include "clang/Basic/arm_neon.inc" |
719 | #undef GET_NEON_STREAMING_COMPAT_FLAG |
720 | } |
721 | if (BuiltinType && |
722 | checkArmStreamingBuiltin(S&: SemaRef, TheCall, FD, BuiltinType: *BuiltinType, BuiltinID)) |
723 | return true; |
724 | } |
725 | |
726 | llvm::APSInt Result; |
727 | uint64_t mask = 0; |
728 | int TV = -1; |
729 | int PtrArgNum = -1; |
730 | bool HasConstPtr = false; |
731 | switch (BuiltinID) { |
732 | #define GET_NEON_OVERLOAD_CHECK |
733 | #include "clang/Basic/arm_fp16.inc" |
734 | #include "clang/Basic/arm_neon.inc" |
735 | #undef GET_NEON_OVERLOAD_CHECK |
736 | } |
737 | |
738 | // For NEON intrinsics which are overloaded on vector element type, validate |
739 | // the immediate which specifies which variant to emit. |
740 | unsigned ImmArg = TheCall->getNumArgs() - 1; |
741 | if (mask) { |
742 | if (SemaRef.BuiltinConstantArg(TheCall, ArgNum: ImmArg, Result)) |
743 | return true; |
744 | |
745 | TV = Result.getLimitedValue(Limit: 64); |
746 | if ((TV > 63) || (mask & (1ULL << TV)) == 0) |
747 | return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_invalid_neon_type_code) |
748 | << TheCall->getArg(Arg: ImmArg)->getSourceRange(); |
749 | } |
750 | |
751 | if (PtrArgNum >= 0) { |
752 | // Check that pointer arguments have the specified type. |
753 | Expr *Arg = TheCall->getArg(Arg: PtrArgNum); |
754 | if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Val: Arg)) |
755 | Arg = ICE->getSubExpr(); |
756 | ExprResult RHS = SemaRef.DefaultFunctionArrayLvalueConversion(E: Arg); |
757 | QualType RHSTy = RHS.get()->getType(); |
758 | |
759 | llvm::Triple::ArchType Arch = TI.getTriple().getArch(); |
760 | bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 || |
761 | Arch == llvm::Triple::aarch64_32 || |
762 | Arch == llvm::Triple::aarch64_be; |
763 | bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong; |
764 | QualType EltTy = getNeonEltType(Flags: NeonTypeFlags(TV), Context&: getASTContext(), |
765 | IsPolyUnsigned, IsInt64Long); |
766 | if (HasConstPtr) |
767 | EltTy = EltTy.withConst(); |
768 | QualType LHSTy = getASTContext().getPointerType(T: EltTy); |
769 | AssignConvertType ConvTy; |
770 | ConvTy = SemaRef.CheckSingleAssignmentConstraints(LHSType: LHSTy, RHS); |
771 | if (RHS.isInvalid()) |
772 | return true; |
773 | if (SemaRef.DiagnoseAssignmentResult(ConvTy, Loc: Arg->getBeginLoc(), DstType: LHSTy, |
774 | SrcType: RHSTy, SrcExpr: RHS.get(), |
775 | Action: AssignmentAction::Assigning)) |
776 | return true; |
777 | } |
778 | |
779 | // For NEON intrinsics which take an immediate value as part of the |
780 | // instruction, range check them here. |
781 | SmallVector<std::tuple<int, int, int, int>, 2> ImmChecks; |
782 | switch (BuiltinID) { |
783 | default: |
784 | return false; |
785 | #define GET_NEON_IMMEDIATE_CHECK |
786 | #include "clang/Basic/arm_fp16.inc" |
787 | #include "clang/Basic/arm_neon.inc" |
788 | #undef GET_NEON_IMMEDIATE_CHECK |
789 | } |
790 | |
791 | return PerformNeonImmChecks(TheCall, ImmChecks, OverloadType: TV); |
792 | } |
793 | |
794 | bool SemaARM::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, |
795 | CallExpr *TheCall) { |
796 | switch (BuiltinID) { |
797 | default: |
798 | return false; |
799 | #include "clang/Basic/arm_mve_builtin_sema.inc" |
800 | } |
801 | } |
802 | |
803 | bool SemaARM::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, |
804 | unsigned BuiltinID, |
805 | CallExpr *TheCall) { |
806 | bool Err = false; |
807 | switch (BuiltinID) { |
808 | default: |
809 | return false; |
810 | #include "clang/Basic/arm_cde_builtin_sema.inc" |
811 | } |
812 | |
813 | if (Err) |
814 | return true; |
815 | |
816 | return CheckARMCoprocessorImmediate(TI, CoprocArg: TheCall->getArg(Arg: 0), /*WantCDE*/ true); |
817 | } |
818 | |
819 | bool SemaARM::CheckARMCoprocessorImmediate(const TargetInfo &TI, |
820 | const Expr *CoprocArg, |
821 | bool WantCDE) { |
822 | ASTContext &Context = getASTContext(); |
823 | if (SemaRef.isConstantEvaluatedContext()) |
824 | return false; |
825 | |
826 | // We can't check the value of a dependent argument. |
827 | if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent()) |
828 | return false; |
829 | |
830 | llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Ctx: Context); |
831 | int64_t CoprocNo = CoprocNoAP.getExtValue(); |
832 | assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative" ); |
833 | |
834 | uint32_t CDECoprocMask = TI.getARMCDECoprocMask(); |
835 | bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo)); |
836 | |
837 | if (IsCDECoproc != WantCDE) |
838 | return Diag(Loc: CoprocArg->getBeginLoc(), DiagID: diag::err_arm_invalid_coproc) |
839 | << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange(); |
840 | |
841 | return false; |
842 | } |
843 | |
844 | bool SemaARM::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, |
845 | CallExpr *TheCall, |
846 | unsigned MaxWidth) { |
847 | assert((BuiltinID == ARM::BI__builtin_arm_ldrex || |
848 | BuiltinID == ARM::BI__builtin_arm_ldaex || |
849 | BuiltinID == ARM::BI__builtin_arm_strex || |
850 | BuiltinID == ARM::BI__builtin_arm_stlex || |
851 | BuiltinID == AArch64::BI__builtin_arm_ldrex || |
852 | BuiltinID == AArch64::BI__builtin_arm_ldaex || |
853 | BuiltinID == AArch64::BI__builtin_arm_strex || |
854 | BuiltinID == AArch64::BI__builtin_arm_stlex) && |
855 | "unexpected ARM builtin" ); |
856 | bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex || |
857 | BuiltinID == ARM::BI__builtin_arm_ldaex || |
858 | BuiltinID == AArch64::BI__builtin_arm_ldrex || |
859 | BuiltinID == AArch64::BI__builtin_arm_ldaex; |
860 | |
861 | ASTContext &Context = getASTContext(); |
862 | DeclRefExpr *DRE = |
863 | cast<DeclRefExpr>(Val: TheCall->getCallee()->IgnoreParenCasts()); |
864 | |
865 | // Ensure that we have the proper number of arguments. |
866 | if (SemaRef.checkArgCount(Call: TheCall, DesiredArgCount: IsLdrex ? 1 : 2)) |
867 | return true; |
868 | |
869 | // Inspect the pointer argument of the atomic builtin. This should always be |
870 | // a pointer type, whose element is an integral scalar or pointer type. |
871 | // Because it is a pointer type, we don't have to worry about any implicit |
872 | // casts here. |
873 | Expr *PointerArg = TheCall->getArg(Arg: IsLdrex ? 0 : 1); |
874 | ExprResult PointerArgRes = |
875 | SemaRef.DefaultFunctionArrayLvalueConversion(E: PointerArg); |
876 | if (PointerArgRes.isInvalid()) |
877 | return true; |
878 | PointerArg = PointerArgRes.get(); |
879 | |
880 | const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); |
881 | if (!pointerType) { |
882 | Diag(Loc: DRE->getBeginLoc(), DiagID: diag::err_atomic_builtin_must_be_pointer) |
883 | << PointerArg->getType() << 0 << PointerArg->getSourceRange(); |
884 | return true; |
885 | } |
886 | |
887 | // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next |
888 | // task is to insert the appropriate casts into the AST. First work out just |
889 | // what the appropriate type is. |
890 | QualType ValType = pointerType->getPointeeType(); |
891 | QualType AddrType = ValType.getUnqualifiedType().withVolatile(); |
892 | if (IsLdrex) |
893 | AddrType.addConst(); |
894 | |
895 | // Issue a warning if the cast is dodgy. |
896 | CastKind CastNeeded = CK_NoOp; |
897 | if (!AddrType.isAtLeastAsQualifiedAs(other: ValType, Ctx: getASTContext())) { |
898 | CastNeeded = CK_BitCast; |
899 | Diag(Loc: DRE->getBeginLoc(), DiagID: diag::ext_typecheck_convert_discards_qualifiers) |
900 | << PointerArg->getType() << Context.getPointerType(T: AddrType) |
901 | << AssignmentAction::Passing << PointerArg->getSourceRange(); |
902 | } |
903 | |
904 | // Finally, do the cast and replace the argument with the corrected version. |
905 | AddrType = Context.getPointerType(T: AddrType); |
906 | PointerArgRes = SemaRef.ImpCastExprToType(E: PointerArg, Type: AddrType, CK: CastNeeded); |
907 | if (PointerArgRes.isInvalid()) |
908 | return true; |
909 | PointerArg = PointerArgRes.get(); |
910 | |
911 | TheCall->setArg(Arg: IsLdrex ? 0 : 1, ArgExpr: PointerArg); |
912 | |
913 | // In general, we allow ints, floats and pointers to be loaded and stored. |
914 | if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && |
915 | !ValType->isBlockPointerType() && !ValType->isFloatingType()) { |
916 | Diag(Loc: DRE->getBeginLoc(), DiagID: diag::err_atomic_builtin_must_be_pointer_intfltptr) |
917 | << PointerArg->getType() << 0 << PointerArg->getSourceRange(); |
918 | return true; |
919 | } |
920 | |
921 | // But ARM doesn't have instructions to deal with 128-bit versions. |
922 | if (Context.getTypeSize(T: ValType) > MaxWidth) { |
923 | assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate" ); |
924 | Diag(Loc: DRE->getBeginLoc(), DiagID: diag::err_atomic_exclusive_builtin_pointer_size) |
925 | << PointerArg->getType() << PointerArg->getSourceRange(); |
926 | return true; |
927 | } |
928 | |
929 | switch (ValType.getObjCLifetime()) { |
930 | case Qualifiers::OCL_None: |
931 | case Qualifiers::OCL_ExplicitNone: |
932 | // okay |
933 | break; |
934 | |
935 | case Qualifiers::OCL_Weak: |
936 | case Qualifiers::OCL_Strong: |
937 | case Qualifiers::OCL_Autoreleasing: |
938 | Diag(Loc: DRE->getBeginLoc(), DiagID: diag::err_arc_atomic_ownership) |
939 | << ValType << PointerArg->getSourceRange(); |
940 | return true; |
941 | } |
942 | |
943 | if (IsLdrex) { |
944 | TheCall->setType(ValType); |
945 | return false; |
946 | } |
947 | |
948 | // Initialize the argument to be stored. |
949 | ExprResult ValArg = TheCall->getArg(Arg: 0); |
950 | InitializedEntity Entity = InitializedEntity::InitializeParameter( |
951 | Context, Type: ValType, /*consume*/ Consumed: false); |
952 | ValArg = SemaRef.PerformCopyInitialization(Entity, EqualLoc: SourceLocation(), Init: ValArg); |
953 | if (ValArg.isInvalid()) |
954 | return true; |
955 | TheCall->setArg(Arg: 0, ArgExpr: ValArg.get()); |
956 | |
957 | // __builtin_arm_strex always returns an int. It's marked as such in the .def, |
958 | // but the custom checker bypasses all default analysis. |
959 | TheCall->setType(Context.IntTy); |
960 | return false; |
961 | } |
962 | |
963 | bool SemaARM::CheckARMBuiltinFunctionCall(const TargetInfo &TI, |
964 | unsigned BuiltinID, |
965 | CallExpr *TheCall) { |
966 | if (BuiltinID == ARM::BI__builtin_arm_ldrex || |
967 | BuiltinID == ARM::BI__builtin_arm_ldaex || |
968 | BuiltinID == ARM::BI__builtin_arm_strex || |
969 | BuiltinID == ARM::BI__builtin_arm_stlex) { |
970 | return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, MaxWidth: 64); |
971 | } |
972 | |
973 | if (BuiltinID == ARM::BI__builtin_arm_prefetch) { |
974 | return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 1) || |
975 | SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 1); |
976 | } |
977 | |
978 | if (BuiltinID == ARM::BI__builtin_arm_rsr64 || |
979 | BuiltinID == ARM::BI__builtin_arm_wsr64) |
980 | return BuiltinARMSpecialReg(BuiltinID, TheCall, ArgNum: 0, ExpectedFieldNum: 3, AllowName: false); |
981 | |
982 | if (BuiltinID == ARM::BI__builtin_arm_rsr || |
983 | BuiltinID == ARM::BI__builtin_arm_rsrp || |
984 | BuiltinID == ARM::BI__builtin_arm_wsr || |
985 | BuiltinID == ARM::BI__builtin_arm_wsrp) |
986 | return BuiltinARMSpecialReg(BuiltinID, TheCall, ArgNum: 0, ExpectedFieldNum: 5, AllowName: true); |
987 | |
988 | if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) |
989 | return true; |
990 | if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall)) |
991 | return true; |
992 | if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall)) |
993 | return true; |
994 | |
995 | // For intrinsics which take an immediate value as part of the instruction, |
996 | // range check them here. |
997 | // FIXME: VFP Intrinsics should error if VFP not present. |
998 | switch (BuiltinID) { |
999 | default: |
1000 | return false; |
1001 | case ARM::BI__builtin_arm_ssat: |
1002 | return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 1, High: 32); |
1003 | case ARM::BI__builtin_arm_usat: |
1004 | return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 31); |
1005 | case ARM::BI__builtin_arm_ssat16: |
1006 | return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 1, High: 16); |
1007 | case ARM::BI__builtin_arm_usat16: |
1008 | return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 15); |
1009 | case ARM::BI__builtin_arm_vcvtr_f: |
1010 | case ARM::BI__builtin_arm_vcvtr_d: |
1011 | return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 1); |
1012 | case ARM::BI__builtin_arm_dmb: |
1013 | case ARM::BI__dmb: |
1014 | case ARM::BI__builtin_arm_dsb: |
1015 | case ARM::BI__dsb: |
1016 | case ARM::BI__builtin_arm_isb: |
1017 | case ARM::BI__isb: |
1018 | case ARM::BI__builtin_arm_dbg: |
1019 | return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 15); |
1020 | case ARM::BI__builtin_arm_cdp: |
1021 | case ARM::BI__builtin_arm_cdp2: |
1022 | case ARM::BI__builtin_arm_mcr: |
1023 | case ARM::BI__builtin_arm_mcr2: |
1024 | case ARM::BI__builtin_arm_mrc: |
1025 | case ARM::BI__builtin_arm_mrc2: |
1026 | case ARM::BI__builtin_arm_mcrr: |
1027 | case ARM::BI__builtin_arm_mcrr2: |
1028 | case ARM::BI__builtin_arm_mrrc: |
1029 | case ARM::BI__builtin_arm_mrrc2: |
1030 | case ARM::BI__builtin_arm_ldc: |
1031 | case ARM::BI__builtin_arm_ldcl: |
1032 | case ARM::BI__builtin_arm_ldc2: |
1033 | case ARM::BI__builtin_arm_ldc2l: |
1034 | case ARM::BI__builtin_arm_stc: |
1035 | case ARM::BI__builtin_arm_stcl: |
1036 | case ARM::BI__builtin_arm_stc2: |
1037 | case ARM::BI__builtin_arm_stc2l: |
1038 | return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 15) || |
1039 | CheckARMCoprocessorImmediate(TI, CoprocArg: TheCall->getArg(Arg: 0), |
1040 | /*WantCDE*/ false); |
1041 | } |
1042 | } |
1043 | |
1044 | bool SemaARM::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, |
1045 | unsigned BuiltinID, |
1046 | CallExpr *TheCall) { |
1047 | if (BuiltinID == AArch64::BI__builtin_arm_ldrex || |
1048 | BuiltinID == AArch64::BI__builtin_arm_ldaex || |
1049 | BuiltinID == AArch64::BI__builtin_arm_strex || |
1050 | BuiltinID == AArch64::BI__builtin_arm_stlex) { |
1051 | return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, MaxWidth: 128); |
1052 | } |
1053 | |
1054 | if (BuiltinID == AArch64::BI__builtin_arm_prefetch) { |
1055 | return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 1) || |
1056 | SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 3) || |
1057 | SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 3, Low: 0, High: 1) || |
1058 | SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 4, Low: 0, High: 1); |
1059 | } |
1060 | |
1061 | if (BuiltinID == AArch64::BI__builtin_arm_rsr64 || |
1062 | BuiltinID == AArch64::BI__builtin_arm_wsr64 || |
1063 | BuiltinID == AArch64::BI__builtin_arm_rsr128 || |
1064 | BuiltinID == AArch64::BI__builtin_arm_wsr128) |
1065 | return BuiltinARMSpecialReg(BuiltinID, TheCall, ArgNum: 0, ExpectedFieldNum: 5, AllowName: true); |
1066 | |
1067 | // Memory Tagging Extensions (MTE) Intrinsics |
1068 | if (BuiltinID == AArch64::BI__builtin_arm_irg || |
1069 | BuiltinID == AArch64::BI__builtin_arm_addg || |
1070 | BuiltinID == AArch64::BI__builtin_arm_gmi || |
1071 | BuiltinID == AArch64::BI__builtin_arm_ldg || |
1072 | BuiltinID == AArch64::BI__builtin_arm_stg || |
1073 | BuiltinID == AArch64::BI__builtin_arm_subp) { |
1074 | return BuiltinARMMemoryTaggingCall(BuiltinID, TheCall); |
1075 | } |
1076 | |
1077 | if (BuiltinID == AArch64::BI__builtin_arm_rsr || |
1078 | BuiltinID == AArch64::BI__builtin_arm_rsrp || |
1079 | BuiltinID == AArch64::BI__builtin_arm_wsr || |
1080 | BuiltinID == AArch64::BI__builtin_arm_wsrp) |
1081 | return BuiltinARMSpecialReg(BuiltinID, TheCall, ArgNum: 0, ExpectedFieldNum: 5, AllowName: true); |
1082 | |
1083 | // Only check the valid encoding range. Any constant in this range would be |
1084 | // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw |
1085 | // an exception for incorrect registers. This matches MSVC behavior. |
1086 | if (BuiltinID == AArch64::BI_ReadStatusReg || |
1087 | BuiltinID == AArch64::BI_WriteStatusReg || BuiltinID == AArch64::BI__sys) |
1088 | return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 0x7fff); |
1089 | |
1090 | if (BuiltinID == AArch64::BI__getReg) |
1091 | return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 31); |
1092 | |
1093 | if (BuiltinID == AArch64::BI__break) |
1094 | return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 0xffff); |
1095 | |
1096 | if (BuiltinID == AArch64::BI__hlt) |
1097 | return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 0xffff); |
1098 | |
1099 | if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) |
1100 | return true; |
1101 | |
1102 | if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall)) |
1103 | return true; |
1104 | |
1105 | if (CheckSMEBuiltinFunctionCall(BuiltinID, TheCall)) |
1106 | return true; |
1107 | |
1108 | // For intrinsics which take an immediate value as part of the instruction, |
1109 | // range check them here. |
1110 | unsigned i = 0, l = 0, u = 0; |
1111 | switch (BuiltinID) { |
1112 | default: return false; |
1113 | case AArch64::BI__builtin_arm_dmb: |
1114 | case AArch64::BI__dmb: |
1115 | case AArch64::BI__builtin_arm_dsb: |
1116 | case AArch64::BI__dsb: |
1117 | case AArch64::BI__builtin_arm_isb: |
1118 | case AArch64::BI__isb: |
1119 | l = 0; |
1120 | u = 15; |
1121 | break; |
1122 | case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break; |
1123 | } |
1124 | |
1125 | return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: i, Low: l, High: u + l); |
1126 | } |
1127 | |
1128 | namespace { |
1129 | struct IntrinToName { |
1130 | uint32_t Id; |
1131 | int32_t FullName; |
1132 | int32_t ShortName; |
1133 | }; |
1134 | } // unnamed namespace |
1135 | |
1136 | static bool BuiltinAliasValid(unsigned BuiltinID, StringRef AliasName, |
1137 | ArrayRef<IntrinToName> Map, |
1138 | const char *IntrinNames) { |
1139 | AliasName.consume_front(Prefix: "__arm_" ); |
1140 | const IntrinToName *It = |
1141 | llvm::lower_bound(Range&: Map, Value&: BuiltinID, C: [](const IntrinToName &L, unsigned Id) { |
1142 | return L.Id < Id; |
1143 | }); |
1144 | if (It == Map.end() || It->Id != BuiltinID) |
1145 | return false; |
1146 | StringRef FullName(&IntrinNames[It->FullName]); |
1147 | if (AliasName == FullName) |
1148 | return true; |
1149 | if (It->ShortName == -1) |
1150 | return false; |
1151 | StringRef ShortName(&IntrinNames[It->ShortName]); |
1152 | return AliasName == ShortName; |
1153 | } |
1154 | |
1155 | bool SemaARM::MveAliasValid(unsigned BuiltinID, StringRef AliasName) { |
1156 | #include "clang/Basic/arm_mve_builtin_aliases.inc" |
1157 | // The included file defines: |
1158 | // - ArrayRef<IntrinToName> Map |
1159 | // - const char IntrinNames[] |
1160 | return BuiltinAliasValid(BuiltinID, AliasName, Map, IntrinNames); |
1161 | } |
1162 | |
1163 | bool SemaARM::CdeAliasValid(unsigned BuiltinID, StringRef AliasName) { |
1164 | #include "clang/Basic/arm_cde_builtin_aliases.inc" |
1165 | return BuiltinAliasValid(BuiltinID, AliasName, Map, IntrinNames); |
1166 | } |
1167 | |
1168 | bool SemaARM::SveAliasValid(unsigned BuiltinID, StringRef AliasName) { |
1169 | if (getASTContext().BuiltinInfo.isAuxBuiltinID(ID: BuiltinID)) |
1170 | BuiltinID = getASTContext().BuiltinInfo.getAuxBuiltinID(ID: BuiltinID); |
1171 | return BuiltinID >= AArch64::FirstSVEBuiltin && |
1172 | BuiltinID <= AArch64::LastSVEBuiltin; |
1173 | } |
1174 | |
1175 | bool SemaARM::SmeAliasValid(unsigned BuiltinID, StringRef AliasName) { |
1176 | if (getASTContext().BuiltinInfo.isAuxBuiltinID(ID: BuiltinID)) |
1177 | BuiltinID = getASTContext().BuiltinInfo.getAuxBuiltinID(ID: BuiltinID); |
1178 | return BuiltinID >= AArch64::FirstSMEBuiltin && |
1179 | BuiltinID <= AArch64::LastSMEBuiltin; |
1180 | } |
1181 | |
1182 | void SemaARM::handleBuiltinAliasAttr(Decl *D, const ParsedAttr &AL) { |
1183 | ASTContext &Context = getASTContext(); |
1184 | if (!AL.isArgIdent(Arg: 0)) { |
1185 | Diag(Loc: AL.getLoc(), DiagID: diag::err_attribute_argument_n_type) |
1186 | << AL << 1 << AANT_ArgumentIdentifier; |
1187 | return; |
1188 | } |
1189 | |
1190 | IdentifierInfo *Ident = AL.getArgAsIdent(Arg: 0)->getIdentifierInfo(); |
1191 | unsigned BuiltinID = Ident->getBuiltinID(); |
1192 | StringRef AliasName = cast<FunctionDecl>(Val: D)->getIdentifier()->getName(); |
1193 | |
1194 | bool IsAArch64 = Context.getTargetInfo().getTriple().isAArch64(); |
1195 | if ((IsAArch64 && !SveAliasValid(BuiltinID, AliasName) && |
1196 | !SmeAliasValid(BuiltinID, AliasName)) || |
1197 | (!IsAArch64 && !MveAliasValid(BuiltinID, AliasName) && |
1198 | !CdeAliasValid(BuiltinID, AliasName))) { |
1199 | Diag(Loc: AL.getLoc(), DiagID: diag::err_attribute_arm_builtin_alias); |
1200 | return; |
1201 | } |
1202 | |
1203 | D->addAttr(A: ::new (Context) ArmBuiltinAliasAttr(Context, AL, Ident)); |
1204 | } |
1205 | |
1206 | static bool checkNewAttrMutualExclusion( |
1207 | Sema &S, const ParsedAttr &AL, const FunctionProtoType *FPT, |
1208 | FunctionType::ArmStateValue CurrentState, StringRef StateName) { |
1209 | auto CheckForIncompatibleAttr = |
1210 | [&](FunctionType::ArmStateValue IncompatibleState, |
1211 | StringRef IncompatibleStateName) { |
1212 | if (CurrentState == IncompatibleState) { |
1213 | S.Diag(Loc: AL.getLoc(), DiagID: diag::err_attributes_are_not_compatible) |
1214 | << (std::string("'__arm_new(\"" ) + StateName.str() + "\")'" ) |
1215 | << (std::string("'" ) + IncompatibleStateName.str() + "(\"" + |
1216 | StateName.str() + "\")'" ) |
1217 | << true; |
1218 | AL.setInvalid(); |
1219 | } |
1220 | }; |
1221 | |
1222 | CheckForIncompatibleAttr(FunctionType::ARM_In, "__arm_in" ); |
1223 | CheckForIncompatibleAttr(FunctionType::ARM_Out, "__arm_out" ); |
1224 | CheckForIncompatibleAttr(FunctionType::ARM_InOut, "__arm_inout" ); |
1225 | CheckForIncompatibleAttr(FunctionType::ARM_Preserves, "__arm_preserves" ); |
1226 | return AL.isInvalid(); |
1227 | } |
1228 | |
1229 | void SemaARM::handleNewAttr(Decl *D, const ParsedAttr &AL) { |
1230 | if (!AL.getNumArgs()) { |
1231 | Diag(Loc: AL.getLoc(), DiagID: diag::err_missing_arm_state) << AL; |
1232 | AL.setInvalid(); |
1233 | return; |
1234 | } |
1235 | |
1236 | std::vector<StringRef> NewState; |
1237 | if (const auto *ExistingAttr = D->getAttr<ArmNewAttr>()) { |
1238 | for (StringRef S : ExistingAttr->newArgs()) |
1239 | NewState.push_back(x: S); |
1240 | } |
1241 | |
1242 | bool HasZA = false; |
1243 | bool HasZT0 = false; |
1244 | for (unsigned I = 0, E = AL.getNumArgs(); I != E; ++I) { |
1245 | StringRef StateName; |
1246 | SourceLocation LiteralLoc; |
1247 | if (!SemaRef.checkStringLiteralArgumentAttr(Attr: AL, ArgNum: I, Str&: StateName, ArgLocation: &LiteralLoc)) |
1248 | return; |
1249 | |
1250 | if (StateName == "za" ) |
1251 | HasZA = true; |
1252 | else if (StateName == "zt0" ) |
1253 | HasZT0 = true; |
1254 | else { |
1255 | Diag(Loc: LiteralLoc, DiagID: diag::err_unknown_arm_state) << StateName; |
1256 | AL.setInvalid(); |
1257 | return; |
1258 | } |
1259 | |
1260 | if (!llvm::is_contained(Range&: NewState, Element: StateName)) // Avoid adding duplicates. |
1261 | NewState.push_back(x: StateName); |
1262 | } |
1263 | |
1264 | if (auto *FPT = dyn_cast<FunctionProtoType>(Val: D->getFunctionType())) { |
1265 | FunctionType::ArmStateValue ZAState = |
1266 | FunctionType::getArmZAState(AttrBits: FPT->getAArch64SMEAttributes()); |
1267 | if (HasZA && ZAState != FunctionType::ARM_None && |
1268 | checkNewAttrMutualExclusion(S&: SemaRef, AL, FPT, CurrentState: ZAState, StateName: "za" )) |
1269 | return; |
1270 | FunctionType::ArmStateValue ZT0State = |
1271 | FunctionType::getArmZT0State(AttrBits: FPT->getAArch64SMEAttributes()); |
1272 | if (HasZT0 && ZT0State != FunctionType::ARM_None && |
1273 | checkNewAttrMutualExclusion(S&: SemaRef, AL, FPT, CurrentState: ZT0State, StateName: "zt0" )) |
1274 | return; |
1275 | } |
1276 | |
1277 | D->dropAttr<ArmNewAttr>(); |
1278 | D->addAttr(A: ::new (getASTContext()) ArmNewAttr( |
1279 | getASTContext(), AL, NewState.data(), NewState.size())); |
1280 | } |
1281 | |
1282 | void SemaARM::handleCmseNSEntryAttr(Decl *D, const ParsedAttr &AL) { |
1283 | if (getLangOpts().CPlusPlus && !D->getDeclContext()->isExternCContext()) { |
1284 | Diag(Loc: AL.getLoc(), DiagID: diag::err_attribute_not_clinkage) << AL; |
1285 | return; |
1286 | } |
1287 | |
1288 | const auto *FD = cast<FunctionDecl>(Val: D); |
1289 | if (!FD->isExternallyVisible()) { |
1290 | Diag(Loc: AL.getLoc(), DiagID: diag::warn_attribute_cmse_entry_static); |
1291 | return; |
1292 | } |
1293 | |
1294 | D->addAttr(A: ::new (getASTContext()) CmseNSEntryAttr(getASTContext(), AL)); |
1295 | } |
1296 | |
1297 | void SemaARM::handleInterruptAttr(Decl *D, const ParsedAttr &AL) { |
1298 | // Check the attribute arguments. |
1299 | if (AL.getNumArgs() > 1) { |
1300 | Diag(Loc: AL.getLoc(), DiagID: diag::err_attribute_too_many_arguments) << AL << 1; |
1301 | return; |
1302 | } |
1303 | |
1304 | StringRef Str; |
1305 | SourceLocation ArgLoc; |
1306 | |
1307 | if (AL.getNumArgs() == 0) |
1308 | Str = "" ; |
1309 | else if (!SemaRef.checkStringLiteralArgumentAttr(Attr: AL, ArgNum: 0, Str, ArgLocation: &ArgLoc)) |
1310 | return; |
1311 | |
1312 | ARMInterruptAttr::InterruptType Kind; |
1313 | if (!ARMInterruptAttr::ConvertStrToInterruptType(Val: Str, Out&: Kind)) { |
1314 | Diag(Loc: AL.getLoc(), DiagID: diag::warn_attribute_type_not_supported) |
1315 | << AL << Str << ArgLoc; |
1316 | return; |
1317 | } |
1318 | |
1319 | if (!D->hasAttr<ARMSaveFPAttr>()) { |
1320 | const TargetInfo &TI = getASTContext().getTargetInfo(); |
1321 | if (TI.hasFeature(Feature: "vfp" )) |
1322 | Diag(Loc: D->getLocation(), DiagID: diag::warn_arm_interrupt_vfp_clobber); |
1323 | } |
1324 | |
1325 | D->addAttr(A: ::new (getASTContext()) |
1326 | ARMInterruptAttr(getASTContext(), AL, Kind)); |
1327 | } |
1328 | |
1329 | void SemaARM::handleInterruptSaveFPAttr(Decl *D, const ParsedAttr &AL) { |
1330 | // Go ahead and add ARMSaveFPAttr because handleInterruptAttr() checks for |
1331 | // it when deciding to issue a diagnostic about clobbering floating point |
1332 | // registers, which ARMSaveFPAttr prevents. |
1333 | D->addAttr(A: ::new (SemaRef.Context) ARMSaveFPAttr(SemaRef.Context, AL)); |
1334 | SemaRef.ARM().handleInterruptAttr(D, AL); |
1335 | |
1336 | // If ARM().handleInterruptAttr() failed, remove ARMSaveFPAttr. |
1337 | if (!D->hasAttr<ARMInterruptAttr>()) { |
1338 | D->dropAttr<ARMSaveFPAttr>(); |
1339 | return; |
1340 | } |
1341 | |
1342 | // If VFP not enabled, remove ARMSaveFPAttr but leave ARMInterruptAttr. |
1343 | bool VFP = SemaRef.Context.getTargetInfo().hasFeature(Feature: "vfp" ); |
1344 | |
1345 | if (!VFP) { |
1346 | SemaRef.Diag(Loc: D->getLocation(), DiagID: diag::warn_arm_interrupt_save_fp_without_vfp_unit); |
1347 | D->dropAttr<ARMSaveFPAttr>(); |
1348 | } |
1349 | } |
1350 | |
1351 | // Check if the function definition uses any AArch64 SME features without |
1352 | // having the '+sme' feature enabled and warn user if sme locally streaming |
1353 | // function returns or uses arguments with VL-based types. |
1354 | void SemaARM::CheckSMEFunctionDefAttributes(const FunctionDecl *FD) { |
1355 | const auto *Attr = FD->getAttr<ArmNewAttr>(); |
1356 | bool UsesSM = FD->hasAttr<ArmLocallyStreamingAttr>(); |
1357 | bool UsesZA = Attr && Attr->isNewZA(); |
1358 | bool UsesZT0 = Attr && Attr->isNewZT0(); |
1359 | |
1360 | if (UsesZA || UsesZT0) { |
1361 | if (const auto *FPT = FD->getType()->getAs<FunctionProtoType>()) { |
1362 | FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); |
1363 | if (EPI.AArch64SMEAttributes & FunctionType::SME_AgnosticZAStateMask) |
1364 | Diag(Loc: FD->getLocation(), DiagID: diag::err_sme_unsupported_agnostic_new); |
1365 | } |
1366 | } |
1367 | |
1368 | if (FD->hasAttr<ArmLocallyStreamingAttr>()) { |
1369 | if (FD->getReturnType()->isSizelessVectorType()) |
1370 | Diag(Loc: FD->getLocation(), |
1371 | DiagID: diag::warn_sme_locally_streaming_has_vl_args_returns) |
1372 | << /*IsArg=*/false; |
1373 | if (llvm::any_of(Range: FD->parameters(), P: [](ParmVarDecl *P) { |
1374 | return P->getOriginalType()->isSizelessVectorType(); |
1375 | })) |
1376 | Diag(Loc: FD->getLocation(), |
1377 | DiagID: diag::warn_sme_locally_streaming_has_vl_args_returns) |
1378 | << /*IsArg=*/true; |
1379 | } |
1380 | if (const auto *FPT = FD->getType()->getAs<FunctionProtoType>()) { |
1381 | FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); |
1382 | UsesSM |= EPI.AArch64SMEAttributes & FunctionType::SME_PStateSMEnabledMask; |
1383 | UsesZA |= FunctionType::getArmZAState(AttrBits: EPI.AArch64SMEAttributes) != |
1384 | FunctionType::ARM_None; |
1385 | UsesZT0 |= FunctionType::getArmZT0State(AttrBits: EPI.AArch64SMEAttributes) != |
1386 | FunctionType::ARM_None; |
1387 | } |
1388 | |
1389 | ASTContext &Context = getASTContext(); |
1390 | if (UsesSM || UsesZA) { |
1391 | llvm::StringMap<bool> FeatureMap; |
1392 | Context.getFunctionFeatureMap(FeatureMap, FD); |
1393 | if (!FeatureMap.contains(Key: "sme" )) { |
1394 | if (UsesSM) |
1395 | Diag(Loc: FD->getLocation(), |
1396 | DiagID: diag::err_sme_definition_using_sm_in_non_sme_target); |
1397 | else |
1398 | Diag(Loc: FD->getLocation(), |
1399 | DiagID: diag::err_sme_definition_using_za_in_non_sme_target); |
1400 | } |
1401 | } |
1402 | if (UsesZT0) { |
1403 | llvm::StringMap<bool> FeatureMap; |
1404 | Context.getFunctionFeatureMap(FeatureMap, FD); |
1405 | if (!FeatureMap.contains(Key: "sme2" )) { |
1406 | Diag(Loc: FD->getLocation(), |
1407 | DiagID: diag::err_sme_definition_using_zt0_in_non_sme2_target); |
1408 | } |
1409 | } |
1410 | } |
1411 | |
1412 | /// getSVETypeSize - Return SVE vector or predicate register size. |
1413 | static uint64_t getSVETypeSize(ASTContext &Context, const BuiltinType *Ty, |
1414 | bool IsStreaming) { |
1415 | assert(Ty->isSveVLSBuiltinType() && "Invalid SVE Type" ); |
1416 | uint64_t VScale = IsStreaming ? Context.getLangOpts().VScaleStreamingMin |
1417 | : Context.getLangOpts().VScaleMin; |
1418 | if (Ty->getKind() == BuiltinType::SveBool || |
1419 | Ty->getKind() == BuiltinType::SveCount) |
1420 | return (VScale * 128) / Context.getCharWidth(); |
1421 | return VScale * 128; |
1422 | } |
1423 | |
1424 | bool SemaARM::areCompatibleSveTypes(QualType FirstType, QualType SecondType) { |
1425 | bool IsStreaming = false; |
1426 | if (getLangOpts().VScaleMin != getLangOpts().VScaleStreamingMin || |
1427 | getLangOpts().VScaleMax != getLangOpts().VScaleStreamingMax) { |
1428 | if (const FunctionDecl *FD = |
1429 | SemaRef.getCurFunctionDecl(/*AllowLambda=*/true)) { |
1430 | // For streaming-compatible functions, we don't know vector length. |
1431 | if (const auto *T = FD->getType()->getAs<FunctionProtoType>()) { |
1432 | if (T->getAArch64SMEAttributes() & |
1433 | FunctionType::SME_PStateSMCompatibleMask) |
1434 | return false; |
1435 | } |
1436 | |
1437 | if (IsArmStreamingFunction(FD, /*IncludeLocallyStreaming=*/true)) |
1438 | IsStreaming = true; |
1439 | } |
1440 | } |
1441 | |
1442 | auto IsValidCast = [&](QualType FirstType, QualType SecondType) { |
1443 | if (const auto *BT = FirstType->getAs<BuiltinType>()) { |
1444 | if (const auto *VT = SecondType->getAs<VectorType>()) { |
1445 | // Predicates have the same representation as uint8 so we also have to |
1446 | // check the kind to make these types incompatible. |
1447 | ASTContext &Context = getASTContext(); |
1448 | if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate) |
1449 | return BT->getKind() == BuiltinType::SveBool; |
1450 | else if (VT->getVectorKind() == VectorKind::SveFixedLengthData) |
1451 | return VT->getElementType().getCanonicalType() == |
1452 | FirstType->getSveEltType(Ctx: Context); |
1453 | else if (VT->getVectorKind() == VectorKind::Generic) |
1454 | return Context.getTypeSize(T: SecondType) == |
1455 | getSVETypeSize(Context, Ty: BT, IsStreaming) && |
1456 | Context.hasSameType( |
1457 | T1: VT->getElementType(), |
1458 | T2: Context.getBuiltinVectorTypeInfo(VecTy: BT).ElementType); |
1459 | } |
1460 | } |
1461 | return false; |
1462 | }; |
1463 | |
1464 | return IsValidCast(FirstType, SecondType) || |
1465 | IsValidCast(SecondType, FirstType); |
1466 | } |
1467 | |
1468 | bool SemaARM::areLaxCompatibleSveTypes(QualType FirstType, |
1469 | QualType SecondType) { |
1470 | bool IsStreaming = false; |
1471 | if (getLangOpts().VScaleMin != getLangOpts().VScaleStreamingMin || |
1472 | getLangOpts().VScaleMax != getLangOpts().VScaleStreamingMax) { |
1473 | if (const FunctionDecl *FD = |
1474 | SemaRef.getCurFunctionDecl(/*AllowLambda=*/true)) { |
1475 | // For streaming-compatible functions, we don't know vector length. |
1476 | if (const auto *T = FD->getType()->getAs<FunctionProtoType>()) |
1477 | if (T->getAArch64SMEAttributes() & |
1478 | FunctionType::SME_PStateSMCompatibleMask) |
1479 | return false; |
1480 | |
1481 | if (IsArmStreamingFunction(FD, /*IncludeLocallyStreaming=*/true)) |
1482 | IsStreaming = true; |
1483 | } |
1484 | } |
1485 | |
1486 | auto IsLaxCompatible = [&](QualType FirstType, QualType SecondType) { |
1487 | const auto *BT = FirstType->getAs<BuiltinType>(); |
1488 | if (!BT) |
1489 | return false; |
1490 | |
1491 | const auto *VecTy = SecondType->getAs<VectorType>(); |
1492 | if (VecTy && (VecTy->getVectorKind() == VectorKind::SveFixedLengthData || |
1493 | VecTy->getVectorKind() == VectorKind::Generic)) { |
1494 | const LangOptions::LaxVectorConversionKind LVCKind = |
1495 | getLangOpts().getLaxVectorConversions(); |
1496 | ASTContext &Context = getASTContext(); |
1497 | |
1498 | // Can not convert between sve predicates and sve vectors because of |
1499 | // different size. |
1500 | if (BT->getKind() == BuiltinType::SveBool && |
1501 | VecTy->getVectorKind() == VectorKind::SveFixedLengthData) |
1502 | return false; |
1503 | |
1504 | // If __ARM_FEATURE_SVE_BITS != N do not allow GNU vector lax conversion. |
1505 | // "Whenever __ARM_FEATURE_SVE_BITS==N, GNUT implicitly |
1506 | // converts to VLAT and VLAT implicitly converts to GNUT." |
1507 | // ACLE Spec Version 00bet6, 3.7.3.2. Behavior common to vectors and |
1508 | // predicates. |
1509 | if (VecTy->getVectorKind() == VectorKind::Generic && |
1510 | Context.getTypeSize(T: SecondType) != |
1511 | getSVETypeSize(Context, Ty: BT, IsStreaming)) |
1512 | return false; |
1513 | |
1514 | // If -flax-vector-conversions=all is specified, the types are |
1515 | // certainly compatible. |
1516 | if (LVCKind == LangOptions::LaxVectorConversionKind::All) |
1517 | return true; |
1518 | |
1519 | // If -flax-vector-conversions=integer is specified, the types are |
1520 | // compatible if the elements are integer types. |
1521 | if (LVCKind == LangOptions::LaxVectorConversionKind::Integer) |
1522 | return VecTy->getElementType().getCanonicalType()->isIntegerType() && |
1523 | FirstType->getSveEltType(Ctx: Context)->isIntegerType(); |
1524 | } |
1525 | |
1526 | return false; |
1527 | }; |
1528 | |
1529 | return IsLaxCompatible(FirstType, SecondType) || |
1530 | IsLaxCompatible(SecondType, FirstType); |
1531 | } |
1532 | |
1533 | } // namespace clang |
1534 | |