1//===------ SemaARM.cpp ---------- ARM target-specific routines -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements semantic analysis functions specific to ARM.
10//
11//===----------------------------------------------------------------------===//
12
13#include "clang/Sema/SemaARM.h"
14#include "clang/Basic/DiagnosticSema.h"
15#include "clang/Basic/TargetBuiltins.h"
16#include "clang/Basic/TargetInfo.h"
17#include "clang/Sema/Initialization.h"
18#include "clang/Sema/ParsedAttr.h"
19#include "clang/Sema/Sema.h"
20
21namespace clang {
22
23SemaARM::SemaARM(Sema &S) : SemaBase(S) {}
24
25/// BuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions
26bool SemaARM::BuiltinARMMemoryTaggingCall(unsigned BuiltinID,
27 CallExpr *TheCall) {
28 ASTContext &Context = getASTContext();
29
30 if (BuiltinID == AArch64::BI__builtin_arm_irg) {
31 if (SemaRef.checkArgCount(Call: TheCall, DesiredArgCount: 2))
32 return true;
33 Expr *Arg0 = TheCall->getArg(Arg: 0);
34 Expr *Arg1 = TheCall->getArg(Arg: 1);
35
36 ExprResult FirstArg = SemaRef.DefaultFunctionArrayLvalueConversion(E: Arg0);
37 if (FirstArg.isInvalid())
38 return true;
39 QualType FirstArgType = FirstArg.get()->getType();
40 if (!FirstArgType->isAnyPointerType())
41 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_must_be_pointer)
42 << "first" << FirstArgType << Arg0->getSourceRange();
43 TheCall->setArg(Arg: 0, ArgExpr: FirstArg.get());
44
45 ExprResult SecArg = SemaRef.DefaultLvalueConversion(E: Arg1);
46 if (SecArg.isInvalid())
47 return true;
48 QualType SecArgType = SecArg.get()->getType();
49 if (!SecArgType->isIntegerType())
50 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_must_be_integer)
51 << "second" << SecArgType << Arg1->getSourceRange();
52
53 // Derive the return type from the pointer argument.
54 TheCall->setType(FirstArgType);
55 return false;
56 }
57
58 if (BuiltinID == AArch64::BI__builtin_arm_addg) {
59 if (SemaRef.checkArgCount(Call: TheCall, DesiredArgCount: 2))
60 return true;
61
62 Expr *Arg0 = TheCall->getArg(Arg: 0);
63 ExprResult FirstArg = SemaRef.DefaultFunctionArrayLvalueConversion(E: Arg0);
64 if (FirstArg.isInvalid())
65 return true;
66 QualType FirstArgType = FirstArg.get()->getType();
67 if (!FirstArgType->isAnyPointerType())
68 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_must_be_pointer)
69 << "first" << FirstArgType << Arg0->getSourceRange();
70 TheCall->setArg(Arg: 0, ArgExpr: FirstArg.get());
71
72 // Derive the return type from the pointer argument.
73 TheCall->setType(FirstArgType);
74
75 // Second arg must be an constant in range [0,15]
76 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 15);
77 }
78
79 if (BuiltinID == AArch64::BI__builtin_arm_gmi) {
80 if (SemaRef.checkArgCount(Call: TheCall, DesiredArgCount: 2))
81 return true;
82 Expr *Arg0 = TheCall->getArg(Arg: 0);
83 Expr *Arg1 = TheCall->getArg(Arg: 1);
84
85 ExprResult FirstArg = SemaRef.DefaultFunctionArrayLvalueConversion(E: Arg0);
86 if (FirstArg.isInvalid())
87 return true;
88 QualType FirstArgType = FirstArg.get()->getType();
89 if (!FirstArgType->isAnyPointerType())
90 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_must_be_pointer)
91 << "first" << FirstArgType << Arg0->getSourceRange();
92
93 QualType SecArgType = Arg1->getType();
94 if (!SecArgType->isIntegerType())
95 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_must_be_integer)
96 << "second" << SecArgType << Arg1->getSourceRange();
97 TheCall->setType(Context.IntTy);
98 return false;
99 }
100
101 if (BuiltinID == AArch64::BI__builtin_arm_ldg ||
102 BuiltinID == AArch64::BI__builtin_arm_stg) {
103 if (SemaRef.checkArgCount(Call: TheCall, DesiredArgCount: 1))
104 return true;
105 Expr *Arg0 = TheCall->getArg(Arg: 0);
106 ExprResult FirstArg = SemaRef.DefaultFunctionArrayLvalueConversion(E: Arg0);
107 if (FirstArg.isInvalid())
108 return true;
109
110 QualType FirstArgType = FirstArg.get()->getType();
111 if (!FirstArgType->isAnyPointerType())
112 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_must_be_pointer)
113 << "first" << FirstArgType << Arg0->getSourceRange();
114 TheCall->setArg(Arg: 0, ArgExpr: FirstArg.get());
115
116 // Derive the return type from the pointer argument.
117 if (BuiltinID == AArch64::BI__builtin_arm_ldg)
118 TheCall->setType(FirstArgType);
119 return false;
120 }
121
122 if (BuiltinID == AArch64::BI__builtin_arm_subp) {
123 Expr *ArgA = TheCall->getArg(Arg: 0);
124 Expr *ArgB = TheCall->getArg(Arg: 1);
125
126 ExprResult ArgExprA = SemaRef.DefaultFunctionArrayLvalueConversion(E: ArgA);
127 ExprResult ArgExprB = SemaRef.DefaultFunctionArrayLvalueConversion(E: ArgB);
128
129 if (ArgExprA.isInvalid() || ArgExprB.isInvalid())
130 return true;
131
132 QualType ArgTypeA = ArgExprA.get()->getType();
133 QualType ArgTypeB = ArgExprB.get()->getType();
134
135 auto isNull = [&](Expr *E) -> bool {
136 return E->isNullPointerConstant(Ctx&: Context,
137 NPC: Expr::NPC_ValueDependentIsNotNull);
138 };
139
140 // argument should be either a pointer or null
141 if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA))
142 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_null_or_pointer)
143 << "first" << ArgTypeA << ArgA->getSourceRange();
144
145 if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB))
146 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_null_or_pointer)
147 << "second" << ArgTypeB << ArgB->getSourceRange();
148
149 // Ensure Pointee types are compatible
150 if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) &&
151 ArgTypeB->isAnyPointerType() && !isNull(ArgB)) {
152 QualType pointeeA = ArgTypeA->getPointeeType();
153 QualType pointeeB = ArgTypeB->getPointeeType();
154 if (!Context.typesAreCompatible(
155 T1: Context.getCanonicalType(T: pointeeA).getUnqualifiedType(),
156 T2: Context.getCanonicalType(T: pointeeB).getUnqualifiedType())) {
157 return Diag(Loc: TheCall->getBeginLoc(),
158 DiagID: diag::err_typecheck_sub_ptr_compatible)
159 << ArgTypeA << ArgTypeB << ArgA->getSourceRange()
160 << ArgB->getSourceRange();
161 }
162 }
163
164 // at least one argument should be pointer type
165 if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType())
166 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_any2arg_pointer)
167 << ArgTypeA << ArgTypeB << ArgA->getSourceRange();
168
169 if (isNull(ArgA)) // adopt type of the other pointer
170 ArgExprA =
171 SemaRef.ImpCastExprToType(E: ArgExprA.get(), Type: ArgTypeB, CK: CK_NullToPointer);
172
173 if (isNull(ArgB))
174 ArgExprB =
175 SemaRef.ImpCastExprToType(E: ArgExprB.get(), Type: ArgTypeA, CK: CK_NullToPointer);
176
177 TheCall->setArg(Arg: 0, ArgExpr: ArgExprA.get());
178 TheCall->setArg(Arg: 1, ArgExpr: ArgExprB.get());
179 TheCall->setType(Context.LongLongTy);
180 return false;
181 }
182 assert(false && "Unhandled ARM MTE intrinsic");
183 return true;
184}
185
186/// BuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr
187/// TheCall is an ARM/AArch64 special register string literal.
188bool SemaARM::BuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
189 int ArgNum, unsigned ExpectedFieldNum,
190 bool AllowName) {
191 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 ||
192 BuiltinID == ARM::BI__builtin_arm_wsr64 ||
193 BuiltinID == ARM::BI__builtin_arm_rsr ||
194 BuiltinID == ARM::BI__builtin_arm_rsrp ||
195 BuiltinID == ARM::BI__builtin_arm_wsr ||
196 BuiltinID == ARM::BI__builtin_arm_wsrp;
197 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
198 BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
199 BuiltinID == AArch64::BI__builtin_arm_rsr128 ||
200 BuiltinID == AArch64::BI__builtin_arm_wsr128 ||
201 BuiltinID == AArch64::BI__builtin_arm_rsr ||
202 BuiltinID == AArch64::BI__builtin_arm_rsrp ||
203 BuiltinID == AArch64::BI__builtin_arm_wsr ||
204 BuiltinID == AArch64::BI__builtin_arm_wsrp;
205 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin.");
206
207 // We can't check the value of a dependent argument.
208 Expr *Arg = TheCall->getArg(Arg: ArgNum);
209 if (Arg->isTypeDependent() || Arg->isValueDependent())
210 return false;
211
212 // Check if the argument is a string literal.
213 if (!isa<StringLiteral>(Val: Arg->IgnoreParenImpCasts()))
214 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_expr_not_string_literal)
215 << Arg->getSourceRange();
216
217 // Check the type of special register given.
218 StringRef Reg = cast<StringLiteral>(Val: Arg->IgnoreParenImpCasts())->getString();
219 SmallVector<StringRef, 6> Fields;
220 Reg.split(A&: Fields, Separator: ":");
221
222 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1))
223 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_arm_invalid_specialreg)
224 << Arg->getSourceRange();
225
226 // If the string is the name of a register then we cannot check that it is
227 // valid here but if the string is of one the forms described in ACLE then we
228 // can check that the supplied fields are integers and within the valid
229 // ranges.
230 if (Fields.size() > 1) {
231 bool FiveFields = Fields.size() == 5;
232
233 bool ValidString = true;
234 if (IsARMBuiltin) {
235 ValidString &= Fields[0].starts_with_insensitive(Prefix: "cp") ||
236 Fields[0].starts_with_insensitive(Prefix: "p");
237 if (ValidString)
238 Fields[0] = Fields[0].drop_front(
239 N: Fields[0].starts_with_insensitive(Prefix: "cp") ? 2 : 1);
240
241 ValidString &= Fields[2].starts_with_insensitive(Prefix: "c");
242 if (ValidString)
243 Fields[2] = Fields[2].drop_front(N: 1);
244
245 if (FiveFields) {
246 ValidString &= Fields[3].starts_with_insensitive(Prefix: "c");
247 if (ValidString)
248 Fields[3] = Fields[3].drop_front(N: 1);
249 }
250 }
251
252 SmallVector<int, 5> FieldBitWidths;
253 if (FiveFields)
254 FieldBitWidths.append(IL: {IsAArch64Builtin ? 2 : 4, 3, 4, 4, 3});
255 else
256 FieldBitWidths.append(IL: {4, 3, 4});
257
258 for (unsigned i = 0; i < Fields.size(); ++i) {
259 int IntField;
260 ValidString &= !Fields[i].getAsInteger(Radix: 10, Result&: IntField);
261 ValidString &= (IntField >= 0 && IntField < (1 << FieldBitWidths[i]));
262 }
263
264 if (!ValidString)
265 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_arm_invalid_specialreg)
266 << Arg->getSourceRange();
267 } else if (IsAArch64Builtin && Fields.size() == 1) {
268 // This code validates writes to PSTATE registers.
269
270 // Not a write.
271 if (TheCall->getNumArgs() != 2)
272 return false;
273
274 // The 128-bit system register accesses do not touch PSTATE.
275 if (BuiltinID == AArch64::BI__builtin_arm_rsr128 ||
276 BuiltinID == AArch64::BI__builtin_arm_wsr128)
277 return false;
278
279 // These are the named PSTATE accesses using "MSR (immediate)" instructions,
280 // along with the upper limit on the immediates allowed.
281 auto MaxLimit = llvm::StringSwitch<std::optional<unsigned>>(Reg)
282 .CaseLower(S: "spsel", Value: 15)
283 .CaseLower(S: "daifclr", Value: 15)
284 .CaseLower(S: "daifset", Value: 15)
285 .CaseLower(S: "pan", Value: 15)
286 .CaseLower(S: "uao", Value: 15)
287 .CaseLower(S: "dit", Value: 15)
288 .CaseLower(S: "ssbs", Value: 15)
289 .CaseLower(S: "tco", Value: 15)
290 .CaseLower(S: "allint", Value: 1)
291 .CaseLower(S: "pm", Value: 1)
292 .Default(Value: std::nullopt);
293
294 // If this is not a named PSTATE, just continue without validating, as this
295 // will be lowered to an "MSR (register)" instruction directly
296 if (!MaxLimit)
297 return false;
298
299 // Here we only allow constants in the range for that pstate, as required by
300 // the ACLE.
301 //
302 // While clang also accepts the names of system registers in its ACLE
303 // intrinsics, we prevent this with the PSTATE names used in MSR (immediate)
304 // as the value written via a register is different to the value used as an
305 // immediate to have the same effect. e.g., for the instruction `msr tco,
306 // x0`, it is bit 25 of register x0 that is written into PSTATE.TCO, but
307 // with `msr tco, #imm`, it is bit 0 of xN that is written into PSTATE.TCO.
308 //
309 // If a programmer wants to codegen the MSR (register) form of `msr tco,
310 // xN`, they can still do so by specifying the register using five
311 // colon-separated numbers in a string.
312 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: *MaxLimit);
313 }
314
315 return false;
316}
317
318/// getNeonEltType - Return the QualType corresponding to the elements of
319/// the vector type specified by the NeonTypeFlags. This is used to check
320/// the pointer arguments for Neon load/store intrinsics.
321static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context,
322 bool IsPolyUnsigned, bool IsInt64Long) {
323 switch (Flags.getEltType()) {
324 case NeonTypeFlags::Int8:
325 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy;
326 case NeonTypeFlags::Int16:
327 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy;
328 case NeonTypeFlags::Int32:
329 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy;
330 case NeonTypeFlags::Int64:
331 if (IsInt64Long)
332 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy;
333 else
334 return Flags.isUnsigned() ? Context.UnsignedLongLongTy
335 : Context.LongLongTy;
336 case NeonTypeFlags::Poly8:
337 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy;
338 case NeonTypeFlags::Poly16:
339 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy;
340 case NeonTypeFlags::Poly64:
341 if (IsInt64Long)
342 return Context.UnsignedLongTy;
343 else
344 return Context.UnsignedLongLongTy;
345 case NeonTypeFlags::Poly128:
346 break;
347 case NeonTypeFlags::Float16:
348 return Context.HalfTy;
349 case NeonTypeFlags::Float32:
350 return Context.FloatTy;
351 case NeonTypeFlags::Float64:
352 return Context.DoubleTy;
353 case NeonTypeFlags::BFloat16:
354 return Context.BFloat16Ty;
355 case NeonTypeFlags::MFloat8:
356 return Context.MFloat8Ty;
357 }
358 llvm_unreachable("Invalid NeonTypeFlag!");
359}
360
361enum ArmSMEState : unsigned {
362 ArmNoState = 0,
363
364 ArmInZA = 0b01,
365 ArmOutZA = 0b10,
366 ArmInOutZA = 0b11,
367 ArmZAMask = 0b11,
368
369 ArmInZT0 = 0b01 << 2,
370 ArmOutZT0 = 0b10 << 2,
371 ArmInOutZT0 = 0b11 << 2,
372 ArmZT0Mask = 0b11 << 2
373};
374
375bool SemaARM::CheckImmediateArg(CallExpr *TheCall, unsigned CheckTy,
376 unsigned ArgIdx, unsigned EltBitWidth,
377 unsigned ContainerBitWidth) {
378 // Function that checks whether the operand (ArgIdx) is an immediate
379 // that is one of a given set of values.
380 auto CheckImmediateInSet = [&](std::initializer_list<int64_t> Set,
381 int ErrDiag) -> bool {
382 // We can't check the value of a dependent argument.
383 Expr *Arg = TheCall->getArg(Arg: ArgIdx);
384 if (Arg->isTypeDependent() || Arg->isValueDependent())
385 return false;
386
387 // Check constant-ness first.
388 llvm::APSInt Imm;
389 if (SemaRef.BuiltinConstantArg(TheCall, ArgNum: ArgIdx, Result&: Imm))
390 return true;
391
392 if (!llvm::is_contained(Set, Element: Imm.getSExtValue()))
393 return Diag(Loc: TheCall->getBeginLoc(), DiagID: ErrDiag) << Arg->getSourceRange();
394 return false;
395 };
396
397 switch ((ImmCheckType)CheckTy) {
398 case ImmCheckType::ImmCheck0_31:
399 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 31))
400 return true;
401 break;
402 case ImmCheckType::ImmCheck0_13:
403 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 13))
404 return true;
405 break;
406 case ImmCheckType::ImmCheck0_63:
407 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 63))
408 return true;
409 break;
410 case ImmCheckType::ImmCheck1_16:
411 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: 16))
412 return true;
413 break;
414 case ImmCheckType::ImmCheck0_7:
415 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 7))
416 return true;
417 break;
418 case ImmCheckType::ImmCheck1_1:
419 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: 1))
420 return true;
421 break;
422 case ImmCheckType::ImmCheck1_3:
423 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: 3))
424 return true;
425 break;
426 case ImmCheckType::ImmCheck1_7:
427 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: 7))
428 return true;
429 break;
430 case ImmCheckType::ImmCheckExtract:
431 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0,
432 High: (2048 / EltBitWidth) - 1))
433 return true;
434 break;
435 case ImmCheckType::ImmCheckCvt:
436 case ImmCheckType::ImmCheckShiftRight:
437 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: EltBitWidth))
438 return true;
439 break;
440 case ImmCheckType::ImmCheckShiftRightNarrow:
441 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: EltBitWidth / 2))
442 return true;
443 break;
444 case ImmCheckType::ImmCheckShiftLeft:
445 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: EltBitWidth - 1))
446 return true;
447 break;
448 case ImmCheckType::ImmCheckLaneIndex:
449 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0,
450 High: (ContainerBitWidth / EltBitWidth) - 1))
451 return true;
452 break;
453 case ImmCheckType::ImmCheckLaneIndexCompRotate:
454 if (SemaRef.BuiltinConstantArgRange(
455 TheCall, ArgNum: ArgIdx, Low: 0, High: (ContainerBitWidth / (2 * EltBitWidth)) - 1))
456 return true;
457 break;
458 case ImmCheckType::ImmCheckLaneIndexDot:
459 if (SemaRef.BuiltinConstantArgRange(
460 TheCall, ArgNum: ArgIdx, Low: 0, High: (ContainerBitWidth / (4 * EltBitWidth)) - 1))
461 return true;
462 break;
463 case ImmCheckType::ImmCheckComplexRot90_270:
464 if (CheckImmediateInSet({90, 270}, diag::err_rotation_argument_to_cadd))
465 return true;
466 break;
467 case ImmCheckType::ImmCheckComplexRotAll90:
468 if (CheckImmediateInSet({0, 90, 180, 270},
469 diag::err_rotation_argument_to_cmla))
470 return true;
471 break;
472 case ImmCheckType::ImmCheck0_1:
473 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 1))
474 return true;
475 break;
476 case ImmCheckType::ImmCheck0_2:
477 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 2))
478 return true;
479 break;
480 case ImmCheckType::ImmCheck0_3:
481 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 3))
482 return true;
483 break;
484 case ImmCheckType::ImmCheck0_0:
485 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 0))
486 return true;
487 break;
488 case ImmCheckType::ImmCheck0_15:
489 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 15))
490 return true;
491 break;
492 case ImmCheckType::ImmCheck0_255:
493 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 255))
494 return true;
495 break;
496 case ImmCheckType::ImmCheck1_32:
497 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: 32))
498 return true;
499 break;
500 case ImmCheckType::ImmCheck1_64:
501 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: 64))
502 return true;
503 break;
504 case ImmCheckType::ImmCheck2_4_Mul2:
505 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 2, High: 4) ||
506 SemaRef.BuiltinConstantArgMultiple(TheCall, ArgNum: ArgIdx, Multiple: 2))
507 return true;
508 break;
509 }
510 return false;
511}
512
513bool SemaARM::PerformNeonImmChecks(
514 CallExpr *TheCall,
515 SmallVectorImpl<std::tuple<int, int, int, int>> &ImmChecks,
516 int OverloadType) {
517 bool HasError = false;
518
519 for (const auto &I : ImmChecks) {
520 auto [ArgIdx, CheckTy, ElementBitWidth, VecBitWidth] = I;
521
522 if (OverloadType >= 0)
523 ElementBitWidth = NeonTypeFlags(OverloadType).getEltSizeInBits();
524
525 HasError |= CheckImmediateArg(TheCall, CheckTy, ArgIdx, EltBitWidth: ElementBitWidth,
526 ContainerBitWidth: VecBitWidth);
527 }
528
529 return HasError;
530}
531
532bool SemaARM::PerformSVEImmChecks(
533 CallExpr *TheCall, SmallVectorImpl<std::tuple<int, int, int>> &ImmChecks) {
534 bool HasError = false;
535
536 for (const auto &I : ImmChecks) {
537 auto [ArgIdx, CheckTy, ElementBitWidth] = I;
538 HasError |=
539 CheckImmediateArg(TheCall, CheckTy, ArgIdx, EltBitWidth: ElementBitWidth, ContainerBitWidth: 128);
540 }
541
542 return HasError;
543}
544
545SemaARM::ArmStreamingType getArmStreamingFnType(const FunctionDecl *FD) {
546 if (FD->hasAttr<ArmLocallyStreamingAttr>())
547 return SemaARM::ArmStreaming;
548 if (const Type *Ty = FD->getType().getTypePtrOrNull()) {
549 if (const auto *FPT = Ty->getAs<FunctionProtoType>()) {
550 if (FPT->getAArch64SMEAttributes() &
551 FunctionType::SME_PStateSMEnabledMask)
552 return SemaARM::ArmStreaming;
553 if (FPT->getAArch64SMEAttributes() &
554 FunctionType::SME_PStateSMCompatibleMask)
555 return SemaARM::ArmStreamingCompatible;
556 }
557 }
558 return SemaARM::ArmNonStreaming;
559}
560
561static bool checkArmStreamingBuiltin(Sema &S, CallExpr *TheCall,
562 const FunctionDecl *FD,
563 SemaARM::ArmStreamingType BuiltinType,
564 unsigned BuiltinID) {
565 SemaARM::ArmStreamingType FnType = getArmStreamingFnType(FD);
566
567 // Check if the intrinsic is available in the right mode, i.e.
568 // * When compiling for SME only, the caller must be in streaming mode.
569 // * When compiling for SVE only, the caller must be in non-streaming mode.
570 // * When compiling for both SVE and SME, the caller can be in either mode.
571 if (BuiltinType == SemaARM::VerifyRuntimeMode) {
572 llvm::StringMap<bool> CallerFeatures;
573 S.Context.getFunctionFeatureMap(FeatureMap&: CallerFeatures, FD);
574
575 // Avoid emitting diagnostics for a function that can never compile.
576 if (FnType == SemaARM::ArmStreaming && !CallerFeatures["sme"])
577 return false;
578
579 const auto FindTopLevelPipe = [](const char *S) {
580 unsigned Depth = 0;
581 unsigned I = 0, E = strlen(s: S);
582 for (; I < E; ++I) {
583 if (S[I] == '|' && Depth == 0)
584 break;
585 if (S[I] == '(')
586 ++Depth;
587 else if (S[I] == ')')
588 --Depth;
589 }
590 return I;
591 };
592
593 const char *RequiredFeatures =
594 S.Context.BuiltinInfo.getRequiredFeatures(ID: BuiltinID);
595 unsigned PipeIdx = FindTopLevelPipe(RequiredFeatures);
596 assert(PipeIdx != 0 && PipeIdx != strlen(RequiredFeatures) &&
597 "Expected feature string of the form 'SVE-EXPR|SME-EXPR'");
598 StringRef NonStreamingBuiltinGuard = StringRef(RequiredFeatures, PipeIdx);
599 StringRef StreamingBuiltinGuard = StringRef(RequiredFeatures + PipeIdx + 1);
600
601 bool SatisfiesSVE = Builtin::evaluateRequiredTargetFeatures(
602 RequiredFatures: NonStreamingBuiltinGuard, TargetFetureMap: CallerFeatures);
603 bool SatisfiesSME = Builtin::evaluateRequiredTargetFeatures(
604 RequiredFatures: StreamingBuiltinGuard, TargetFetureMap: CallerFeatures);
605
606 if (SatisfiesSVE && SatisfiesSME)
607 // Function type is irrelevant for streaming-agnostic builtins.
608 return false;
609 else if (SatisfiesSVE)
610 BuiltinType = SemaARM::ArmNonStreaming;
611 else if (SatisfiesSME)
612 BuiltinType = SemaARM::ArmStreaming;
613 else
614 // This should be diagnosed by CodeGen
615 return false;
616 }
617
618 if (FnType != SemaARM::ArmNonStreaming &&
619 BuiltinType == SemaARM::ArmNonStreaming)
620 S.Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_attribute_arm_sm_incompat_builtin)
621 << TheCall->getSourceRange() << "non-streaming";
622 else if (FnType != SemaARM::ArmStreaming &&
623 BuiltinType == SemaARM::ArmStreaming)
624 S.Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_attribute_arm_sm_incompat_builtin)
625 << TheCall->getSourceRange() << "streaming";
626 else
627 return false;
628
629 return true;
630}
631
632static ArmSMEState getSMEState(unsigned BuiltinID) {
633 switch (BuiltinID) {
634 default:
635 return ArmNoState;
636#define GET_SME_BUILTIN_GET_STATE
637#include "clang/Basic/arm_sme_builtins_za_state.inc"
638#undef GET_SME_BUILTIN_GET_STATE
639 }
640}
641
642bool SemaARM::CheckSMEBuiltinFunctionCall(unsigned BuiltinID,
643 CallExpr *TheCall) {
644 if (const FunctionDecl *FD =
645 SemaRef.getCurFunctionDecl(/*AllowLambda=*/true)) {
646 std::optional<ArmStreamingType> BuiltinType;
647
648 switch (BuiltinID) {
649#define GET_SME_STREAMING_ATTRS
650#include "clang/Basic/arm_sme_streaming_attrs.inc"
651#undef GET_SME_STREAMING_ATTRS
652 }
653
654 if (BuiltinType &&
655 checkArmStreamingBuiltin(S&: SemaRef, TheCall, FD, BuiltinType: *BuiltinType, BuiltinID))
656 return true;
657
658 if ((getSMEState(BuiltinID) & ArmZAMask) && !hasArmZAState(FD))
659 Diag(Loc: TheCall->getBeginLoc(),
660 DiagID: diag::warn_attribute_arm_za_builtin_no_za_state)
661 << TheCall->getSourceRange();
662
663 if ((getSMEState(BuiltinID) & ArmZT0Mask) && !hasArmZT0State(FD))
664 Diag(Loc: TheCall->getBeginLoc(),
665 DiagID: diag::warn_attribute_arm_zt0_builtin_no_zt0_state)
666 << TheCall->getSourceRange();
667 }
668
669 // Range check SME intrinsics that take immediate values.
670 SmallVector<std::tuple<int, int, int>, 3> ImmChecks;
671
672 switch (BuiltinID) {
673 default:
674 return false;
675#define GET_SME_IMMEDIATE_CHECK
676#include "clang/Basic/arm_sme_sema_rangechecks.inc"
677#undef GET_SME_IMMEDIATE_CHECK
678 }
679
680 return PerformSVEImmChecks(TheCall, ImmChecks);
681}
682
683bool SemaARM::CheckSVEBuiltinFunctionCall(unsigned BuiltinID,
684 CallExpr *TheCall) {
685 if (const FunctionDecl *FD =
686 SemaRef.getCurFunctionDecl(/*AllowLambda=*/true)) {
687 std::optional<ArmStreamingType> BuiltinType;
688
689 switch (BuiltinID) {
690#define GET_SVE_STREAMING_ATTRS
691#include "clang/Basic/arm_sve_streaming_attrs.inc"
692#undef GET_SVE_STREAMING_ATTRS
693 }
694 if (BuiltinType &&
695 checkArmStreamingBuiltin(S&: SemaRef, TheCall, FD, BuiltinType: *BuiltinType, BuiltinID))
696 return true;
697 }
698 // Range check SVE intrinsics that take immediate values.
699 SmallVector<std::tuple<int, int, int>, 3> ImmChecks;
700
701 switch (BuiltinID) {
702 default:
703 return false;
704#define GET_SVE_IMMEDIATE_CHECK
705#include "clang/Basic/arm_sve_sema_rangechecks.inc"
706#undef GET_SVE_IMMEDIATE_CHECK
707 }
708
709 return PerformSVEImmChecks(TheCall, ImmChecks);
710}
711
712bool SemaARM::CheckNeonBuiltinFunctionCall(const TargetInfo &TI,
713 unsigned BuiltinID,
714 CallExpr *TheCall) {
715 if (const FunctionDecl *FD =
716 SemaRef.getCurFunctionDecl(/*AllowLambda=*/true)) {
717 std::optional<ArmStreamingType> BuiltinType;
718
719 switch (BuiltinID) {
720 default:
721 break;
722#define GET_NEON_STREAMING_COMPAT_FLAG
723#include "clang/Basic/arm_neon.inc"
724#undef GET_NEON_STREAMING_COMPAT_FLAG
725 }
726 if (BuiltinType &&
727 checkArmStreamingBuiltin(S&: SemaRef, TheCall, FD, BuiltinType: *BuiltinType, BuiltinID))
728 return true;
729 }
730
731 llvm::APSInt Result;
732 uint64_t mask = 0;
733 int TV = -1;
734 int PtrArgNum = -1;
735 bool HasConstPtr = false;
736 switch (BuiltinID) {
737#define GET_NEON_OVERLOAD_CHECK
738#include "clang/Basic/arm_fp16.inc"
739#include "clang/Basic/arm_neon.inc"
740#undef GET_NEON_OVERLOAD_CHECK
741 }
742
743 // For NEON intrinsics which are overloaded on vector element type, validate
744 // the immediate which specifies which variant to emit.
745 if (mask) {
746 unsigned ImmArg = TheCall->getNumArgs() - 1;
747 if (SemaRef.BuiltinConstantArg(TheCall, ArgNum: ImmArg, Result))
748 return true;
749
750 // FIXME: This is effectively dead code. Change the logic above so that the
751 // following check is actually run.
752 TV = Result.getLimitedValue(Limit: 64);
753 if ((TV > 63) || (mask & (1ULL << TV)) == 0)
754 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_invalid_neon_type_code)
755 << TheCall->getArg(Arg: ImmArg)->getSourceRange();
756 }
757
758 if (PtrArgNum >= 0) {
759 // Check that pointer arguments have the specified type.
760 Expr *Arg = TheCall->getArg(Arg: PtrArgNum);
761 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Val: Arg))
762 Arg = ICE->getSubExpr();
763 ExprResult RHS = SemaRef.DefaultFunctionArrayLvalueConversion(E: Arg);
764 QualType RHSTy = RHS.get()->getType();
765
766 llvm::Triple::ArchType Arch = TI.getTriple().getArch();
767 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 ||
768 Arch == llvm::Triple::aarch64_32 ||
769 Arch == llvm::Triple::aarch64_be;
770 bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong;
771 QualType EltTy = getNeonEltType(Flags: NeonTypeFlags(TV), Context&: getASTContext(),
772 IsPolyUnsigned, IsInt64Long);
773 if (HasConstPtr)
774 EltTy = EltTy.withConst();
775 QualType LHSTy = getASTContext().getPointerType(T: EltTy);
776 AssignConvertType ConvTy;
777 ConvTy = SemaRef.CheckSingleAssignmentConstraints(LHSType: LHSTy, RHS);
778 if (RHS.isInvalid())
779 return true;
780 if (SemaRef.DiagnoseAssignmentResult(ConvTy, Loc: Arg->getBeginLoc(), DstType: LHSTy,
781 SrcType: RHSTy, SrcExpr: RHS.get(),
782 Action: AssignmentAction::Assigning))
783 return true;
784 }
785
786 // For NEON intrinsics which take an immediate value as part of the
787 // instruction, range check them here.
788 SmallVector<std::tuple<int, int, int, int>, 2> ImmChecks;
789 switch (BuiltinID) {
790 default:
791 return false;
792#define GET_NEON_IMMEDIATE_CHECK
793#include "clang/Basic/arm_fp16.inc"
794#include "clang/Basic/arm_neon.inc"
795#undef GET_NEON_IMMEDIATE_CHECK
796 }
797
798 return PerformNeonImmChecks(TheCall, ImmChecks, OverloadType: TV);
799}
800
801bool SemaARM::CheckMVEBuiltinFunctionCall(unsigned BuiltinID,
802 CallExpr *TheCall) {
803 switch (BuiltinID) {
804 default:
805 return false;
806#include "clang/Basic/arm_mve_builtin_sema.inc"
807 }
808}
809
810bool SemaARM::CheckCDEBuiltinFunctionCall(const TargetInfo &TI,
811 unsigned BuiltinID,
812 CallExpr *TheCall) {
813 bool Err = false;
814 switch (BuiltinID) {
815 default:
816 return false;
817#include "clang/Basic/arm_cde_builtin_sema.inc"
818 }
819
820 if (Err)
821 return true;
822
823 return CheckARMCoprocessorImmediate(TI, CoprocArg: TheCall->getArg(Arg: 0), /*WantCDE*/ true);
824}
825
826bool SemaARM::CheckARMCoprocessorImmediate(const TargetInfo &TI,
827 const Expr *CoprocArg,
828 bool WantCDE) {
829 ASTContext &Context = getASTContext();
830 if (SemaRef.isConstantEvaluatedContext())
831 return false;
832
833 // We can't check the value of a dependent argument.
834 if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent())
835 return false;
836
837 llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Ctx: Context);
838 int64_t CoprocNo = CoprocNoAP.getExtValue();
839 assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative");
840
841 uint32_t CDECoprocMask = TI.getARMCDECoprocMask();
842 bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo));
843
844 if (IsCDECoproc != WantCDE)
845 return Diag(Loc: CoprocArg->getBeginLoc(), DiagID: diag::err_arm_invalid_coproc)
846 << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange();
847
848 return false;
849}
850
851bool SemaARM::CheckARMBuiltinExclusiveCall(const TargetInfo &TI,
852 unsigned BuiltinID,
853 CallExpr *TheCall) {
854 assert((BuiltinID == ARM::BI__builtin_arm_ldrex ||
855 BuiltinID == ARM::BI__builtin_arm_ldrexd ||
856 BuiltinID == ARM::BI__builtin_arm_ldaex ||
857 BuiltinID == ARM::BI__builtin_arm_strex ||
858 BuiltinID == ARM::BI__builtin_arm_strexd ||
859 BuiltinID == ARM::BI__builtin_arm_stlex ||
860 BuiltinID == AArch64::BI__builtin_arm_ldrex ||
861 BuiltinID == AArch64::BI__builtin_arm_ldaex ||
862 BuiltinID == AArch64::BI__builtin_arm_strex ||
863 BuiltinID == AArch64::BI__builtin_arm_stlex) &&
864 "unexpected ARM builtin");
865 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex ||
866 BuiltinID == ARM::BI__builtin_arm_ldrexd ||
867 BuiltinID == ARM::BI__builtin_arm_ldaex ||
868 BuiltinID == AArch64::BI__builtin_arm_ldrex ||
869 BuiltinID == AArch64::BI__builtin_arm_ldaex;
870 bool IsDoubleWord = BuiltinID == ARM::BI__builtin_arm_ldrexd ||
871 BuiltinID == ARM::BI__builtin_arm_strexd;
872
873 ASTContext &Context = getASTContext();
874 DeclRefExpr *DRE =
875 cast<DeclRefExpr>(Val: TheCall->getCallee()->IgnoreParenCasts());
876
877 // Ensure that we have the proper number of arguments.
878 if (SemaRef.checkArgCount(Call: TheCall, DesiredArgCount: IsLdrex ? 1 : 2))
879 return true;
880
881 // Inspect the pointer argument of the atomic builtin. This should always be
882 // a pointer type, whose element is an integral scalar or pointer type.
883 // Because it is a pointer type, we don't have to worry about any implicit
884 // casts here.
885 Expr *PointerArg = TheCall->getArg(Arg: IsLdrex ? 0 : 1);
886 ExprResult PointerArgRes =
887 SemaRef.DefaultFunctionArrayLvalueConversion(E: PointerArg);
888 if (PointerArgRes.isInvalid())
889 return true;
890 PointerArg = PointerArgRes.get();
891
892 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>();
893 if (!pointerType) {
894 Diag(Loc: DRE->getBeginLoc(), DiagID: diag::err_atomic_builtin_must_be_pointer)
895 << PointerArg->getType() << 0 << PointerArg->getSourceRange();
896 return true;
897 }
898
899 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next
900 // task is to insert the appropriate casts into the AST. First work out just
901 // what the appropriate type is.
902 QualType ValType = pointerType->getPointeeType();
903 QualType AddrType = ValType.getUnqualifiedType().withVolatile();
904 if (IsLdrex)
905 AddrType.addConst();
906
907 // Issue a warning if the cast is dodgy.
908 CastKind CastNeeded = CK_NoOp;
909 if (!AddrType.isAtLeastAsQualifiedAs(other: ValType, Ctx: getASTContext())) {
910 CastNeeded = CK_BitCast;
911 Diag(Loc: DRE->getBeginLoc(), DiagID: diag::ext_typecheck_convert_discards_qualifiers)
912 << PointerArg->getType() << Context.getPointerType(T: AddrType)
913 << AssignmentAction::Passing << PointerArg->getSourceRange();
914 }
915
916 // Finally, do the cast and replace the argument with the corrected version.
917 AddrType = Context.getPointerType(T: AddrType);
918 PointerArgRes = SemaRef.ImpCastExprToType(E: PointerArg, Type: AddrType, CK: CastNeeded);
919 if (PointerArgRes.isInvalid())
920 return true;
921 PointerArg = PointerArgRes.get();
922
923 TheCall->setArg(Arg: IsLdrex ? 0 : 1, ArgExpr: PointerArg);
924
925 // In general, we allow ints, floats and pointers to be loaded and stored.
926 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
927 !ValType->isBlockPointerType() && !ValType->isFloatingType()) {
928 Diag(Loc: DRE->getBeginLoc(), DiagID: diag::err_atomic_builtin_must_be_pointer_intfltptr)
929 << PointerArg->getType() << 0 << PointerArg->getSourceRange();
930 return true;
931 }
932
933 // Check whether the size of the type can be handled atomically on this
934 // target.
935 if (!TI.getTriple().isAArch64()) {
936 unsigned Mask = TI.getARMLDREXMask();
937 unsigned Bits = Context.getTypeSize(T: ValType);
938 if (IsDoubleWord) {
939 // Explicit request for ldrexd/strexd means only double word sizes
940 // supported if the target supports them.
941 Mask &= TargetInfo::ARM_LDREX_D;
942 }
943 bool Supported =
944 (llvm::isPowerOf2_64(Value: Bits)) && Bits >= 8 && (Mask & (Bits / 8));
945
946 if (!Supported) {
947 // Emit a diagnostic saying that this size isn't available. If _no_ size
948 // of exclusive access is supported on this target, we emit a diagnostic
949 // with special wording for that case, but otherwise, we emit
950 // err_atomic_exclusive_builtin_pointer_size and loop over `Mask` to
951 // control what subset of sizes it lists as legal.
952 if (Mask) {
953 auto D = Diag(Loc: DRE->getBeginLoc(),
954 DiagID: diag::err_atomic_exclusive_builtin_pointer_size)
955 << PointerArg->getType();
956 bool Started = false;
957 for (unsigned Size = 1; Size <= 8; Size <<= 1) {
958 // For each of the sizes 1,2,4,8, pass two integers into the
959 // diagnostic. The first selects a separator from the previous
960 // number: 0 for no separator at all, 1 for a comma, 2 for " or "
961 // which appears before the final number in a list of more than one.
962 // The second integer just indicates whether we print this size in
963 // the message at all.
964 if (!(Mask & Size)) {
965 // This size isn't one of the supported ones, so emit no separator
966 // text and don't print the size itself.
967 D << 0 << 0;
968 } else {
969 // This size is supported, so print it, and an appropriate
970 // separator.
971 Mask &= ~Size;
972 if (!Started)
973 D << 0; // No separator if this is the first size we've printed
974 else if (Mask)
975 D << 1; // "," if there's still another size to come
976 else
977 D << 2; // " or " if the size we're about to print is the last
978 D << 1; // print the size itself
979 Started = true;
980 }
981 }
982 } else {
983 bool EmitDoubleWordDiagnostic =
984 IsDoubleWord && !Mask && TI.getARMLDREXMask();
985 Diag(Loc: DRE->getBeginLoc(),
986 DiagID: diag::err_atomic_exclusive_builtin_pointer_size_none)
987 << (EmitDoubleWordDiagnostic ? 1 : 0)
988 << PointerArg->getSourceRange();
989 }
990 }
991 }
992
993 switch (ValType.getObjCLifetime()) {
994 case Qualifiers::OCL_None:
995 case Qualifiers::OCL_ExplicitNone:
996 // okay
997 break;
998
999 case Qualifiers::OCL_Weak:
1000 case Qualifiers::OCL_Strong:
1001 case Qualifiers::OCL_Autoreleasing:
1002 Diag(Loc: DRE->getBeginLoc(), DiagID: diag::err_arc_atomic_ownership)
1003 << ValType << PointerArg->getSourceRange();
1004 return true;
1005 }
1006
1007 if (IsLdrex) {
1008 TheCall->setType(ValType);
1009 return false;
1010 }
1011
1012 // Initialize the argument to be stored.
1013 ExprResult ValArg = TheCall->getArg(Arg: 0);
1014 InitializedEntity Entity = InitializedEntity::InitializeParameter(
1015 Context, Type: ValType, /*consume*/ Consumed: false);
1016 ValArg = SemaRef.PerformCopyInitialization(Entity, EqualLoc: SourceLocation(), Init: ValArg);
1017 if (ValArg.isInvalid())
1018 return true;
1019 TheCall->setArg(Arg: 0, ArgExpr: ValArg.get());
1020
1021 // __builtin_arm_strex always returns an int. It's marked as such in the .def,
1022 // but the custom checker bypasses all default analysis.
1023 TheCall->setType(Context.IntTy);
1024 return false;
1025}
1026
1027bool SemaARM::CheckARMBuiltinFunctionCall(const TargetInfo &TI,
1028 unsigned BuiltinID,
1029 CallExpr *TheCall) {
1030 if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
1031 BuiltinID == ARM::BI__builtin_arm_ldrexd ||
1032 BuiltinID == ARM::BI__builtin_arm_ldaex ||
1033 BuiltinID == ARM::BI__builtin_arm_strex ||
1034 BuiltinID == ARM::BI__builtin_arm_strexd ||
1035 BuiltinID == ARM::BI__builtin_arm_stlex) {
1036 return CheckARMBuiltinExclusiveCall(TI, BuiltinID, TheCall);
1037 }
1038
1039 if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
1040 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 1) ||
1041 SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 1);
1042 }
1043
1044 if (BuiltinID == ARM::BI__builtin_arm_rsr64 ||
1045 BuiltinID == ARM::BI__builtin_arm_wsr64)
1046 return BuiltinARMSpecialReg(BuiltinID, TheCall, ArgNum: 0, ExpectedFieldNum: 3, AllowName: false);
1047
1048 if (BuiltinID == ARM::BI__builtin_arm_rsr ||
1049 BuiltinID == ARM::BI__builtin_arm_rsrp ||
1050 BuiltinID == ARM::BI__builtin_arm_wsr ||
1051 BuiltinID == ARM::BI__builtin_arm_wsrp)
1052 return BuiltinARMSpecialReg(BuiltinID, TheCall, ArgNum: 0, ExpectedFieldNum: 5, AllowName: true);
1053
1054 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
1055 return true;
1056 if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall))
1057 return true;
1058 if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall))
1059 return true;
1060
1061 // For intrinsics which take an immediate value as part of the instruction,
1062 // range check them here.
1063 // FIXME: VFP Intrinsics should error if VFP not present.
1064 switch (BuiltinID) {
1065 default:
1066 return false;
1067 case ARM::BI__builtin_arm_ssat:
1068 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 1, High: 32);
1069 case ARM::BI__builtin_arm_usat:
1070 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 31);
1071 case ARM::BI__builtin_arm_ssat16:
1072 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 1, High: 16);
1073 case ARM::BI__builtin_arm_usat16:
1074 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 15);
1075 case ARM::BI__builtin_arm_vcvtr_f:
1076 case ARM::BI__builtin_arm_vcvtr_d:
1077 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 1);
1078 case ARM::BI__builtin_arm_dmb:
1079 case ARM::BI__dmb:
1080 case ARM::BI__builtin_arm_dsb:
1081 case ARM::BI__dsb:
1082 case ARM::BI__builtin_arm_isb:
1083 case ARM::BI__isb:
1084 case ARM::BI__builtin_arm_dbg:
1085 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 15);
1086 case ARM::BI__builtin_arm_cdp:
1087 case ARM::BI__builtin_arm_cdp2:
1088 case ARM::BI__builtin_arm_mcr:
1089 case ARM::BI__builtin_arm_mcr2:
1090 case ARM::BI__builtin_arm_mrc:
1091 case ARM::BI__builtin_arm_mrc2:
1092 case ARM::BI__builtin_arm_mcrr:
1093 case ARM::BI__builtin_arm_mcrr2:
1094 case ARM::BI__builtin_arm_mrrc:
1095 case ARM::BI__builtin_arm_mrrc2:
1096 case ARM::BI__builtin_arm_ldc:
1097 case ARM::BI__builtin_arm_ldcl:
1098 case ARM::BI__builtin_arm_ldc2:
1099 case ARM::BI__builtin_arm_ldc2l:
1100 case ARM::BI__builtin_arm_stc:
1101 case ARM::BI__builtin_arm_stcl:
1102 case ARM::BI__builtin_arm_stc2:
1103 case ARM::BI__builtin_arm_stc2l:
1104 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 15) ||
1105 CheckARMCoprocessorImmediate(TI, CoprocArg: TheCall->getArg(Arg: 0),
1106 /*WantCDE*/ false);
1107 }
1108}
1109
1110static bool CheckAArch64AtomicStoreWithStshhCall(SemaARM &S,
1111 CallExpr *TheCall) {
1112 Sema &SemaRef = S.SemaRef;
1113 ASTContext &Context = S.getASTContext();
1114 // Ensure we have the proper number of arguments.
1115 if (SemaRef.checkArgCount(Call: TheCall, DesiredArgCount: 4))
1116 return true;
1117
1118 // Normalize arg0/arg1 into value form, and check valid
1119 ExprResult PtrRes =
1120 SemaRef.DefaultFunctionArrayLvalueConversion(E: TheCall->getArg(Arg: 0));
1121 ExprResult ValRes =
1122 SemaRef.DefaultFunctionArrayLvalueConversion(E: TheCall->getArg(Arg: 1));
1123
1124 if (PtrRes.isInvalid() || ValRes.isInvalid())
1125 return true;
1126
1127 Expr *OrderArg = TheCall->getArg(Arg: 2);
1128 TheCall->setArg(Arg: 0, ArgExpr: PtrRes.get());
1129 TheCall->setArg(Arg: 1, ArgExpr: ValRes.get());
1130
1131 // Defer validation for dependent memory_order arguments.
1132 if (OrderArg->isValueDependent())
1133 return false;
1134
1135 Expr *PointerArg = PtrRes.get();
1136 QualType PtrType = PointerArg->getType();
1137
1138 // Check arg 0 is a pointer type, err out if not
1139 const PointerType *PointerTy = PtrType->getAs<PointerType>();
1140 if (!PointerTy) {
1141 SemaRef.Diag(Loc: PointerArg->getBeginLoc(),
1142 DiagID: diag::err_atomic_builtin_must_be_pointer)
1143 << PtrType << 0 << PointerArg->getSourceRange();
1144 return true;
1145 }
1146
1147 // Reject const-qualified pointee types
1148 QualType ValType = PointerTy->getPointeeType();
1149 if (ValType.isConstQualified()) {
1150 SemaRef.Diag(Loc: PointerArg->getBeginLoc(),
1151 DiagID: diag::err_atomic_builtin_cannot_be_const)
1152 << PtrType << PointerArg->getSourceRange();
1153 return true;
1154 }
1155
1156 ValType = ValType.getUnqualifiedType();
1157 unsigned Bits = ValType->isIntegerType() ? Context.getTypeSize(T: ValType) : 0;
1158 if (Bits != 8 && Bits != 16 && Bits != 32 && Bits != 64) {
1159 SemaRef.Diag(Loc: PointerArg->getBeginLoc(),
1160 DiagID: diag::err_arm_atomic_store_with_stshh_bad_type)
1161 << PtrType << PointerArg->getSourceRange();
1162 return true;
1163 }
1164
1165 Expr *ValArg = TheCall->getArg(Arg: 1);
1166 QualType ValArgType = ValArg->getType().getUnqualifiedType();
1167
1168 // Check value type and width
1169 if (!Context.hasSameType(T1: ValArgType, T2: ValType)) {
1170 SemaRef.Diag(Loc: ValArg->getBeginLoc(),
1171 DiagID: diag::err_arm_atomic_store_with_stshh_bad_value_type)
1172 << ValType << ValArg->getType() << ValArg->getSourceRange();
1173 return true;
1174 }
1175
1176 // Require an order value.
1177 std::optional<llvm::APSInt> OrderValOpt =
1178 OrderArg->getIntegerConstantExpr(Ctx: Context);
1179 if (!OrderValOpt) {
1180 SemaRef.Diag(Loc: OrderArg->getBeginLoc(),
1181 DiagID: diag::err_arm_atomic_store_with_stshh_bad_order)
1182 << OrderArg->getSourceRange();
1183 return true;
1184 }
1185
1186 // __ATOMIC_RELAXED=0, __ATOMIC_RELEASE=3, __ATOMIC_SEQ_CST=5.
1187 int64_t Order = OrderValOpt->getSExtValue();
1188 if (Order != 0 && Order != 3 && Order != 5) {
1189 SemaRef.Diag(Loc: OrderArg->getBeginLoc(),
1190 DiagID: diag::err_arm_atomic_store_with_stshh_bad_order)
1191 << OrderArg->getSourceRange();
1192 return true;
1193 }
1194
1195 // Value type already matches ValType above; apply a no-op cast for
1196 // consistency with other builtin argument rewriting paths.
1197 ExprResult ValArgRes = SemaRef.ImpCastExprToType(E: ValArg, Type: ValType, CK: CK_NoOp);
1198 if (ValArgRes.isInvalid())
1199 return true;
1200
1201 TheCall->setArg(Arg: 1, ArgExpr: ValArgRes.get());
1202
1203 // Arg 3 (retention policy) must be between KEEP(0) and STRM(1).
1204 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 3, Low: 0, High: 1);
1205}
1206
1207bool SemaARM::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI,
1208 unsigned BuiltinID,
1209 CallExpr *TheCall) {
1210 if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
1211 BuiltinID == AArch64::BI__builtin_arm_ldaex ||
1212 BuiltinID == AArch64::BI__builtin_arm_strex ||
1213 BuiltinID == AArch64::BI__builtin_arm_stlex) {
1214 return CheckARMBuiltinExclusiveCall(TI, BuiltinID, TheCall);
1215 }
1216
1217 if (BuiltinID == AArch64::BI__builtin_arm_atomic_store_with_stshh)
1218 return CheckAArch64AtomicStoreWithStshhCall(S&: *this, TheCall);
1219
1220 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
1221 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 1) ||
1222 SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 3) ||
1223 SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 3, Low: 0, High: 1) ||
1224 SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 4, Low: 0, High: 1);
1225 }
1226
1227 if (BuiltinID == AArch64::BI__builtin_arm_range_prefetch_x) {
1228 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 1) ||
1229 SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 1) ||
1230 SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 3, Low: -2097152, High: 2097151) ||
1231 SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 4, Low: 1, High: 65536) ||
1232 SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 5, Low: -2097152, High: 2097151);
1233 }
1234
1235 if (BuiltinID == AArch64::BI__builtin_arm_range_prefetch) {
1236 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 1) ||
1237 SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 1);
1238 }
1239
1240 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
1241 BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
1242 BuiltinID == AArch64::BI__builtin_arm_rsr128 ||
1243 BuiltinID == AArch64::BI__builtin_arm_wsr128)
1244 return BuiltinARMSpecialReg(BuiltinID, TheCall, ArgNum: 0, ExpectedFieldNum: 5, AllowName: true);
1245
1246 // Memory Tagging Extensions (MTE) Intrinsics
1247 if (BuiltinID == AArch64::BI__builtin_arm_irg ||
1248 BuiltinID == AArch64::BI__builtin_arm_addg ||
1249 BuiltinID == AArch64::BI__builtin_arm_gmi ||
1250 BuiltinID == AArch64::BI__builtin_arm_ldg ||
1251 BuiltinID == AArch64::BI__builtin_arm_stg ||
1252 BuiltinID == AArch64::BI__builtin_arm_subp) {
1253 return BuiltinARMMemoryTaggingCall(BuiltinID, TheCall);
1254 }
1255
1256 if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
1257 BuiltinID == AArch64::BI__builtin_arm_rsrp ||
1258 BuiltinID == AArch64::BI__builtin_arm_wsr ||
1259 BuiltinID == AArch64::BI__builtin_arm_wsrp)
1260 return BuiltinARMSpecialReg(BuiltinID, TheCall, ArgNum: 0, ExpectedFieldNum: 5, AllowName: true);
1261
1262 // Only check the valid encoding range. Any constant in this range would be
1263 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw
1264 // an exception for incorrect registers. This matches MSVC behavior.
1265 if (BuiltinID == AArch64::BI_ReadStatusReg ||
1266 BuiltinID == AArch64::BI_WriteStatusReg || BuiltinID == AArch64::BI__sys)
1267 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 0x7fff);
1268
1269 if (BuiltinID == AArch64::BI__getReg)
1270 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 31);
1271
1272 if (BuiltinID == AArch64::BI__break)
1273 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 0xffff);
1274
1275 if (BuiltinID == AArch64::BI__hlt)
1276 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 0xffff);
1277
1278 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
1279 return true;
1280
1281 if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall))
1282 return true;
1283
1284 if (CheckSMEBuiltinFunctionCall(BuiltinID, TheCall))
1285 return true;
1286
1287 // For intrinsics which take an immediate value as part of the instruction,
1288 // range check them here.
1289 unsigned i = 0, l = 0, u = 0;
1290 switch (BuiltinID) {
1291 default: return false;
1292 case AArch64::BI__builtin_arm_dmb:
1293 case AArch64::BI__dmb:
1294 case AArch64::BI__builtin_arm_dsb:
1295 case AArch64::BI__dsb:
1296 case AArch64::BI__builtin_arm_isb:
1297 case AArch64::BI__isb:
1298 l = 0;
1299 u = 15;
1300 break;
1301 }
1302
1303 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: i, Low: l, High: u + l);
1304}
1305
1306namespace {
1307struct IntrinToName {
1308 uint32_t Id;
1309 int32_t FullName;
1310 int32_t ShortName;
1311};
1312} // unnamed namespace
1313
1314static bool BuiltinAliasValid(unsigned BuiltinID, StringRef AliasName,
1315 ArrayRef<IntrinToName> Map,
1316 const char *IntrinNames) {
1317 AliasName.consume_front(Prefix: "__arm_");
1318 const IntrinToName *It =
1319 llvm::lower_bound(Range&: Map, Value&: BuiltinID, C: [](const IntrinToName &L, unsigned Id) {
1320 return L.Id < Id;
1321 });
1322 if (It == Map.end() || It->Id != BuiltinID)
1323 return false;
1324 StringRef FullName(&IntrinNames[It->FullName]);
1325 if (AliasName == FullName)
1326 return true;
1327 if (It->ShortName == -1)
1328 return false;
1329 StringRef ShortName(&IntrinNames[It->ShortName]);
1330 return AliasName == ShortName;
1331}
1332
1333bool SemaARM::MveAliasValid(unsigned BuiltinID, StringRef AliasName) {
1334#include "clang/Basic/arm_mve_builtin_aliases.inc"
1335 // The included file defines:
1336 // - ArrayRef<IntrinToName> Map
1337 // - const char IntrinNames[]
1338 return BuiltinAliasValid(BuiltinID, AliasName, Map, IntrinNames);
1339}
1340
1341bool SemaARM::CdeAliasValid(unsigned BuiltinID, StringRef AliasName) {
1342#include "clang/Basic/arm_cde_builtin_aliases.inc"
1343 return BuiltinAliasValid(BuiltinID, AliasName, Map, IntrinNames);
1344}
1345
1346bool SemaARM::SveAliasValid(unsigned BuiltinID, StringRef AliasName) {
1347 if (getASTContext().BuiltinInfo.isAuxBuiltinID(ID: BuiltinID))
1348 BuiltinID = getASTContext().BuiltinInfo.getAuxBuiltinID(ID: BuiltinID);
1349 return BuiltinID >= AArch64::FirstSVEBuiltin &&
1350 BuiltinID <= AArch64::LastSVEBuiltin;
1351}
1352
1353bool SemaARM::SmeAliasValid(unsigned BuiltinID, StringRef AliasName) {
1354 if (getASTContext().BuiltinInfo.isAuxBuiltinID(ID: BuiltinID))
1355 BuiltinID = getASTContext().BuiltinInfo.getAuxBuiltinID(ID: BuiltinID);
1356 return BuiltinID >= AArch64::FirstSMEBuiltin &&
1357 BuiltinID <= AArch64::LastSMEBuiltin;
1358}
1359
1360void SemaARM::handleBuiltinAliasAttr(Decl *D, const ParsedAttr &AL) {
1361 ASTContext &Context = getASTContext();
1362 if (!AL.isArgIdent(Arg: 0)) {
1363 Diag(Loc: AL.getLoc(), DiagID: diag::err_attribute_argument_n_type)
1364 << AL << 1 << AANT_ArgumentIdentifier;
1365 return;
1366 }
1367
1368 IdentifierInfo *Ident = AL.getArgAsIdent(Arg: 0)->getIdentifierInfo();
1369 unsigned BuiltinID = Ident->getBuiltinID();
1370 StringRef AliasName = cast<FunctionDecl>(Val: D)->getIdentifier()->getName();
1371
1372 bool IsAArch64 = Context.getTargetInfo().getTriple().isAArch64();
1373 if ((IsAArch64 && !SveAliasValid(BuiltinID, AliasName) &&
1374 !SmeAliasValid(BuiltinID, AliasName)) ||
1375 (!IsAArch64 && !MveAliasValid(BuiltinID, AliasName) &&
1376 !CdeAliasValid(BuiltinID, AliasName))) {
1377 Diag(Loc: AL.getLoc(), DiagID: diag::err_attribute_arm_builtin_alias);
1378 return;
1379 }
1380
1381 D->addAttr(A: ::new (Context) ArmBuiltinAliasAttr(Context, AL, Ident));
1382}
1383
1384static bool checkNewAttrMutualExclusion(
1385 Sema &S, const ParsedAttr &AL, const FunctionProtoType *FPT,
1386 FunctionType::ArmStateValue CurrentState, StringRef StateName) {
1387 auto CheckForIncompatibleAttr =
1388 [&](FunctionType::ArmStateValue IncompatibleState,
1389 StringRef IncompatibleStateName) {
1390 if (CurrentState == IncompatibleState) {
1391 S.Diag(Loc: AL.getLoc(), DiagID: diag::err_attributes_are_not_compatible)
1392 << (std::string("'__arm_new(\"") + StateName.str() + "\")'")
1393 << (std::string("'") + IncompatibleStateName.str() + "(\"" +
1394 StateName.str() + "\")'")
1395 << true;
1396 AL.setInvalid();
1397 }
1398 };
1399
1400 CheckForIncompatibleAttr(FunctionType::ARM_In, "__arm_in");
1401 CheckForIncompatibleAttr(FunctionType::ARM_Out, "__arm_out");
1402 CheckForIncompatibleAttr(FunctionType::ARM_InOut, "__arm_inout");
1403 CheckForIncompatibleAttr(FunctionType::ARM_Preserves, "__arm_preserves");
1404 return AL.isInvalid();
1405}
1406
1407void SemaARM::handleNewAttr(Decl *D, const ParsedAttr &AL) {
1408 if (!AL.getNumArgs()) {
1409 Diag(Loc: AL.getLoc(), DiagID: diag::err_missing_arm_state) << AL;
1410 AL.setInvalid();
1411 return;
1412 }
1413
1414 std::vector<StringRef> NewState;
1415 if (const auto *ExistingAttr = D->getAttr<ArmNewAttr>()) {
1416 for (StringRef S : ExistingAttr->newArgs())
1417 NewState.push_back(x: S);
1418 }
1419
1420 bool HasZA = false;
1421 bool HasZT0 = false;
1422 for (unsigned I = 0, E = AL.getNumArgs(); I != E; ++I) {
1423 StringRef StateName;
1424 SourceLocation LiteralLoc;
1425 if (!SemaRef.checkStringLiteralArgumentAttr(Attr: AL, ArgNum: I, Str&: StateName, ArgLocation: &LiteralLoc))
1426 return;
1427
1428 if (StateName == "za")
1429 HasZA = true;
1430 else if (StateName == "zt0")
1431 HasZT0 = true;
1432 else {
1433 Diag(Loc: LiteralLoc, DiagID: diag::err_unknown_arm_state) << StateName;
1434 AL.setInvalid();
1435 return;
1436 }
1437
1438 if (!llvm::is_contained(Range&: NewState, Element: StateName)) // Avoid adding duplicates.
1439 NewState.push_back(x: StateName);
1440 }
1441
1442 if (auto *FPT = dyn_cast<FunctionProtoType>(Val: D->getFunctionType())) {
1443 FunctionType::ArmStateValue ZAState =
1444 FunctionType::getArmZAState(AttrBits: FPT->getAArch64SMEAttributes());
1445 if (HasZA && ZAState != FunctionType::ARM_None &&
1446 checkNewAttrMutualExclusion(S&: SemaRef, AL, FPT, CurrentState: ZAState, StateName: "za"))
1447 return;
1448 FunctionType::ArmStateValue ZT0State =
1449 FunctionType::getArmZT0State(AttrBits: FPT->getAArch64SMEAttributes());
1450 if (HasZT0 && ZT0State != FunctionType::ARM_None &&
1451 checkNewAttrMutualExclusion(S&: SemaRef, AL, FPT, CurrentState: ZT0State, StateName: "zt0"))
1452 return;
1453 }
1454
1455 D->dropAttr<ArmNewAttr>();
1456 D->addAttr(A: ::new (getASTContext()) ArmNewAttr(
1457 getASTContext(), AL, NewState.data(), NewState.size()));
1458}
1459
1460void SemaARM::handleCmseNSEntryAttr(Decl *D, const ParsedAttr &AL) {
1461 if (getLangOpts().CPlusPlus && !D->getDeclContext()->isExternCContext()) {
1462 Diag(Loc: AL.getLoc(), DiagID: diag::err_attribute_not_clinkage) << AL;
1463 return;
1464 }
1465
1466 const auto *FD = cast<FunctionDecl>(Val: D);
1467 if (!FD->isExternallyVisible()) {
1468 Diag(Loc: AL.getLoc(), DiagID: diag::warn_attribute_cmse_entry_static);
1469 return;
1470 }
1471
1472 D->addAttr(A: ::new (getASTContext()) CmseNSEntryAttr(getASTContext(), AL));
1473}
1474
1475void SemaARM::handleInterruptAttr(Decl *D, const ParsedAttr &AL) {
1476 // Check the attribute arguments.
1477 if (AL.getNumArgs() > 1) {
1478 Diag(Loc: AL.getLoc(), DiagID: diag::err_attribute_too_many_arguments) << AL << 1;
1479 return;
1480 }
1481
1482 StringRef Str;
1483 SourceLocation ArgLoc;
1484
1485 if (AL.getNumArgs() == 0)
1486 Str = "";
1487 else if (!SemaRef.checkStringLiteralArgumentAttr(Attr: AL, ArgNum: 0, Str, ArgLocation: &ArgLoc))
1488 return;
1489
1490 ARMInterruptAttr::InterruptType Kind;
1491 if (!ARMInterruptAttr::ConvertStrToInterruptType(Val: Str, Out&: Kind)) {
1492 Diag(Loc: AL.getLoc(), DiagID: diag::warn_attribute_type_not_supported)
1493 << AL << Str << ArgLoc;
1494 return;
1495 }
1496
1497 if (!D->hasAttr<ARMSaveFPAttr>()) {
1498 const TargetInfo &TI = getASTContext().getTargetInfo();
1499 if (TI.hasFeature(Feature: "vfp"))
1500 Diag(Loc: D->getLocation(), DiagID: diag::warn_arm_interrupt_vfp_clobber);
1501 }
1502
1503 D->addAttr(A: ::new (getASTContext())
1504 ARMInterruptAttr(getASTContext(), AL, Kind));
1505}
1506
1507void SemaARM::handleInterruptSaveFPAttr(Decl *D, const ParsedAttr &AL) {
1508 // Go ahead and add ARMSaveFPAttr because handleInterruptAttr() checks for
1509 // it when deciding to issue a diagnostic about clobbering floating point
1510 // registers, which ARMSaveFPAttr prevents.
1511 D->addAttr(A: ::new (SemaRef.Context) ARMSaveFPAttr(SemaRef.Context, AL));
1512 SemaRef.ARM().handleInterruptAttr(D, AL);
1513
1514 // If ARM().handleInterruptAttr() failed, remove ARMSaveFPAttr.
1515 if (!D->hasAttr<ARMInterruptAttr>()) {
1516 D->dropAttr<ARMSaveFPAttr>();
1517 return;
1518 }
1519
1520 // If VFP not enabled, remove ARMSaveFPAttr but leave ARMInterruptAttr.
1521 bool VFP = SemaRef.Context.getTargetInfo().hasFeature(Feature: "vfp");
1522
1523 if (!VFP) {
1524 SemaRef.Diag(Loc: D->getLocation(), DiagID: diag::warn_arm_interrupt_save_fp_without_vfp_unit);
1525 D->dropAttr<ARMSaveFPAttr>();
1526 }
1527}
1528
1529// Check if the function definition uses any AArch64 SME features without
1530// having the '+sme' feature enabled and warn user if sme locally streaming
1531// function returns or uses arguments with VL-based types.
1532void SemaARM::CheckSMEFunctionDefAttributes(const FunctionDecl *FD) {
1533 const auto *Attr = FD->getAttr<ArmNewAttr>();
1534 bool UsesSM = FD->hasAttr<ArmLocallyStreamingAttr>();
1535 bool UsesZA = Attr && Attr->isNewZA();
1536 bool UsesZT0 = Attr && Attr->isNewZT0();
1537
1538 if (UsesZA || UsesZT0) {
1539 if (const auto *FPT = FD->getType()->getAs<FunctionProtoType>()) {
1540 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
1541 if (EPI.AArch64SMEAttributes & FunctionType::SME_AgnosticZAStateMask)
1542 Diag(Loc: FD->getLocation(), DiagID: diag::err_sme_unsupported_agnostic_new);
1543 }
1544 }
1545
1546 if (FD->hasAttr<ArmLocallyStreamingAttr>()) {
1547 if (FD->getReturnType()->isSizelessVectorType())
1548 Diag(Loc: FD->getLocation(),
1549 DiagID: diag::warn_sme_locally_streaming_has_vl_args_returns)
1550 << /*IsArg=*/false;
1551 if (llvm::any_of(Range: FD->parameters(), P: [](ParmVarDecl *P) {
1552 return P->getOriginalType()->isSizelessVectorType();
1553 }))
1554 Diag(Loc: FD->getLocation(),
1555 DiagID: diag::warn_sme_locally_streaming_has_vl_args_returns)
1556 << /*IsArg=*/true;
1557 }
1558 if (const auto *FPT = FD->getType()->getAs<FunctionProtoType>()) {
1559 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
1560 UsesSM |= EPI.AArch64SMEAttributes & FunctionType::SME_PStateSMEnabledMask;
1561 UsesZA |= FunctionType::getArmZAState(AttrBits: EPI.AArch64SMEAttributes) !=
1562 FunctionType::ARM_None;
1563 UsesZT0 |= FunctionType::getArmZT0State(AttrBits: EPI.AArch64SMEAttributes) !=
1564 FunctionType::ARM_None;
1565 }
1566
1567 ASTContext &Context = getASTContext();
1568 if (UsesSM || UsesZA) {
1569 llvm::StringMap<bool> FeatureMap;
1570 Context.getFunctionFeatureMap(FeatureMap, FD);
1571 if (!FeatureMap.contains(Key: "sme")) {
1572 if (UsesSM)
1573 Diag(Loc: FD->getLocation(),
1574 DiagID: diag::err_sme_definition_using_sm_in_non_sme_target);
1575 else
1576 Diag(Loc: FD->getLocation(),
1577 DiagID: diag::err_sme_definition_using_za_in_non_sme_target);
1578 }
1579 }
1580 if (UsesZT0) {
1581 llvm::StringMap<bool> FeatureMap;
1582 Context.getFunctionFeatureMap(FeatureMap, FD);
1583 if (!FeatureMap.contains(Key: "sme2")) {
1584 Diag(Loc: FD->getLocation(),
1585 DiagID: diag::err_sme_definition_using_zt0_in_non_sme2_target);
1586 }
1587 }
1588}
1589
1590/// getSVETypeSize - Return SVE vector or predicate register size.
1591static uint64_t getSVETypeSize(ASTContext &Context, const BuiltinType *Ty,
1592 bool IsStreaming) {
1593 assert(Ty->isSveVLSBuiltinType() && "Invalid SVE Type");
1594 uint64_t VScale = IsStreaming ? Context.getLangOpts().VScaleStreamingMin
1595 : Context.getLangOpts().VScaleMin;
1596 if (Ty->getKind() == BuiltinType::SveBool ||
1597 Ty->getKind() == BuiltinType::SveCount)
1598 return (VScale * 128) / Context.getCharWidth();
1599 return VScale * 128;
1600}
1601
1602bool SemaARM::areCompatibleSveTypes(QualType FirstType, QualType SecondType) {
1603 bool IsStreaming = false;
1604 if (getLangOpts().VScaleMin != getLangOpts().VScaleStreamingMin ||
1605 getLangOpts().VScaleMax != getLangOpts().VScaleStreamingMax) {
1606 if (const FunctionDecl *FD =
1607 SemaRef.getCurFunctionDecl(/*AllowLambda=*/true)) {
1608 // For streaming-compatible functions, we don't know vector length.
1609 if (const auto *T = FD->getType()->getAs<FunctionProtoType>()) {
1610 if (T->getAArch64SMEAttributes() &
1611 FunctionType::SME_PStateSMCompatibleMask)
1612 return false;
1613 }
1614
1615 if (IsArmStreamingFunction(FD, /*IncludeLocallyStreaming=*/true))
1616 IsStreaming = true;
1617 }
1618 }
1619
1620 auto IsValidCast = [&](QualType FirstType, QualType SecondType) {
1621 if (const auto *BT = FirstType->getAs<BuiltinType>()) {
1622 if (const auto *VT = SecondType->getAs<VectorType>()) {
1623 // Predicates have the same representation as uint8 so we also have to
1624 // check the kind to make these types incompatible.
1625 ASTContext &Context = getASTContext();
1626 if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate)
1627 return BT->getKind() == BuiltinType::SveBool;
1628 else if (VT->getVectorKind() == VectorKind::SveFixedLengthData)
1629 return VT->getElementType().getCanonicalType() ==
1630 FirstType->getSveEltType(Ctx: Context) &&
1631 BT->getKind() != BuiltinType::SveBool;
1632 else if (VT->getVectorKind() == VectorKind::Generic)
1633 return Context.getTypeSize(T: SecondType) ==
1634 getSVETypeSize(Context, Ty: BT, IsStreaming) &&
1635 Context.hasSameType(
1636 T1: VT->getElementType(),
1637 T2: Context.getBuiltinVectorTypeInfo(VecTy: BT).ElementType);
1638 }
1639 }
1640 return false;
1641 };
1642
1643 return IsValidCast(FirstType, SecondType) ||
1644 IsValidCast(SecondType, FirstType);
1645}
1646
1647bool SemaARM::areLaxCompatibleSveTypes(QualType FirstType,
1648 QualType SecondType) {
1649 bool IsStreaming = false;
1650 if (getLangOpts().VScaleMin != getLangOpts().VScaleStreamingMin ||
1651 getLangOpts().VScaleMax != getLangOpts().VScaleStreamingMax) {
1652 if (const FunctionDecl *FD =
1653 SemaRef.getCurFunctionDecl(/*AllowLambda=*/true)) {
1654 // For streaming-compatible functions, we don't know vector length.
1655 if (const auto *T = FD->getType()->getAs<FunctionProtoType>())
1656 if (T->getAArch64SMEAttributes() &
1657 FunctionType::SME_PStateSMCompatibleMask)
1658 return false;
1659
1660 if (IsArmStreamingFunction(FD, /*IncludeLocallyStreaming=*/true))
1661 IsStreaming = true;
1662 }
1663 }
1664
1665 auto IsLaxCompatible = [&](QualType FirstType, QualType SecondType) {
1666 const auto *BT = FirstType->getAs<BuiltinType>();
1667 if (!BT)
1668 return false;
1669
1670 const auto *VecTy = SecondType->getAs<VectorType>();
1671 if (VecTy && (VecTy->getVectorKind() == VectorKind::SveFixedLengthData ||
1672 VecTy->getVectorKind() == VectorKind::Generic)) {
1673 const LangOptions::LaxVectorConversionKind LVCKind =
1674 getLangOpts().getLaxVectorConversions();
1675 ASTContext &Context = getASTContext();
1676
1677 // Can not convert between sve predicates and sve vectors because of
1678 // different size.
1679 if (BT->getKind() == BuiltinType::SveBool &&
1680 VecTy->getVectorKind() == VectorKind::SveFixedLengthData)
1681 return false;
1682
1683 // If __ARM_FEATURE_SVE_BITS != N do not allow GNU vector lax conversion.
1684 // "Whenever __ARM_FEATURE_SVE_BITS==N, GNUT implicitly
1685 // converts to VLAT and VLAT implicitly converts to GNUT."
1686 // ACLE Spec Version 00bet6, 3.7.3.2. Behavior common to vectors and
1687 // predicates.
1688 if (VecTy->getVectorKind() == VectorKind::Generic &&
1689 Context.getTypeSize(T: SecondType) !=
1690 getSVETypeSize(Context, Ty: BT, IsStreaming))
1691 return false;
1692
1693 // If -flax-vector-conversions=all is specified, the types are
1694 // certainly compatible.
1695 if (LVCKind == LangOptions::LaxVectorConversionKind::All)
1696 return true;
1697
1698 // If -flax-vector-conversions=integer is specified, the types are
1699 // compatible if the elements are integer types.
1700 if (LVCKind == LangOptions::LaxVectorConversionKind::Integer)
1701 return VecTy->getElementType().getCanonicalType()->isIntegerType() &&
1702 FirstType->getSveEltType(Ctx: Context)->isIntegerType();
1703 }
1704
1705 return false;
1706 };
1707
1708 return IsLaxCompatible(FirstType, SecondType) ||
1709 IsLaxCompatible(SecondType, FirstType);
1710}
1711
1712static void appendFeature(StringRef Feat, SmallString<64> &Buffer) {
1713 if (!Buffer.empty())
1714 Buffer.append(RHS: "+");
1715 Buffer.append(RHS: Feat);
1716}
1717
1718static void convertPriorityString(unsigned Priority,
1719 SmallString<64> &NewParam) {
1720 StringRef PriorityString[8] = {"P0", "P1", "P2", "P3",
1721 "P4", "P5", "P6", "P7"};
1722
1723 assert(Priority > 0 && Priority < 256 && "priority out of range");
1724 // Convert priority=[1-255] -> P0 + ... + P7
1725 for (unsigned BitPos = 0; BitPos < 8; ++BitPos)
1726 if (Priority & (1U << BitPos))
1727 appendFeature(Feat: PriorityString[BitPos], Buffer&: NewParam);
1728}
1729
1730bool SemaARM::checkTargetVersionAttr(const StringRef Param,
1731 const SourceLocation Loc,
1732 SmallString<64> &NewParam) {
1733 using namespace DiagAttrParams;
1734
1735 auto [LHS, RHS] = Param.split(Separator: ';');
1736 RHS = RHS.trim();
1737 bool IsDefault = false;
1738 llvm::SmallVector<StringRef, 8> Features;
1739 LHS.split(A&: Features, Separator: '+');
1740 for (StringRef Feat : Features) {
1741 Feat = Feat.trim();
1742 if (Feat == "default")
1743 IsDefault = true;
1744 else if (!getASTContext().getTargetInfo().validateCpuSupports(Name: Feat))
1745 return Diag(Loc, DiagID: diag::warn_unsupported_target_attribute)
1746 << Unsupported << None << Feat << TargetVersion;
1747 appendFeature(Feat, Buffer&: NewParam);
1748 }
1749
1750 if (!RHS.empty() && RHS.consume_front(Prefix: "priority=")) {
1751 if (IsDefault)
1752 Diag(Loc, DiagID: diag::warn_invalid_default_version_priority);
1753 else {
1754 unsigned Digit;
1755 if (RHS.getAsInteger(Radix: 0, Result&: Digit) || Digit < 1 || Digit > 255)
1756 Diag(Loc, DiagID: diag::warn_version_priority_out_of_range) << RHS;
1757 else
1758 convertPriorityString(Priority: Digit, NewParam);
1759 }
1760 }
1761 return false;
1762}
1763
1764bool SemaARM::checkTargetClonesAttr(
1765 SmallVectorImpl<StringRef> &Params, SmallVectorImpl<SourceLocation> &Locs,
1766 SmallVectorImpl<SmallString<64>> &NewParams) {
1767 using namespace DiagAttrParams;
1768
1769 if (!getASTContext().getTargetInfo().hasFeature(Feature: "fmv"))
1770 return true;
1771
1772 assert(Params.size() == Locs.size() &&
1773 "Mismatch between number of string parameters and locations");
1774
1775 bool HasDefault = false;
1776 bool HasNonDefault = false;
1777 for (unsigned I = 0, E = Params.size(); I < E; ++I) {
1778 const StringRef Param = Params[I].trim();
1779 const SourceLocation &Loc = Locs[I];
1780
1781 auto [LHS, RHS] = Param.split(Separator: ';');
1782 RHS = RHS.trim();
1783 bool HasPriority = !RHS.empty() && RHS.consume_front(Prefix: "priority=");
1784
1785 if (LHS.empty())
1786 return Diag(Loc, DiagID: diag::warn_unsupported_target_attribute)
1787 << Unsupported << None << "" << TargetClones;
1788
1789 if (LHS == "default") {
1790 if (HasDefault)
1791 Diag(Loc, DiagID: diag::warn_target_clone_duplicate_options);
1792 else {
1793 if (HasPriority)
1794 Diag(Loc, DiagID: diag::warn_invalid_default_version_priority);
1795 NewParams.push_back(Elt: LHS);
1796 HasDefault = true;
1797 }
1798 continue;
1799 }
1800
1801 bool HasCodeGenImpact = false;
1802 llvm::SmallVector<StringRef, 8> Features;
1803 llvm::SmallVector<StringRef, 8> ValidFeatures;
1804 LHS.split(A&: Features, Separator: '+');
1805 for (StringRef Feat : Features) {
1806 Feat = Feat.trim();
1807 if (!getASTContext().getTargetInfo().validateCpuSupports(Name: Feat)) {
1808 Diag(Loc, DiagID: diag::warn_unsupported_target_attribute)
1809 << Unsupported << None << Feat << TargetClones;
1810 continue;
1811 }
1812 if (getASTContext().getTargetInfo().doesFeatureAffectCodeGen(Feature: Feat))
1813 HasCodeGenImpact = true;
1814 ValidFeatures.push_back(Elt: Feat);
1815 }
1816
1817 // Ignore features that don't impact code generation.
1818 if (!HasCodeGenImpact) {
1819 Diag(Loc, DiagID: diag::warn_target_clone_no_impact_options);
1820 continue;
1821 }
1822
1823 if (ValidFeatures.empty())
1824 continue;
1825
1826 // Canonicalize attribute parameter.
1827 llvm::sort(C&: ValidFeatures);
1828 SmallString<64> NewParam(llvm::join(R&: ValidFeatures, Separator: "+"));
1829 if (llvm::is_contained(Range&: NewParams, Element: NewParam)) {
1830 Diag(Loc, DiagID: diag::warn_target_clone_duplicate_options);
1831 continue;
1832 }
1833
1834 if (HasPriority) {
1835 unsigned Digit;
1836 if (RHS.getAsInteger(Radix: 0, Result&: Digit) || Digit < 1 || Digit > 255)
1837 Diag(Loc, DiagID: diag::warn_version_priority_out_of_range) << RHS;
1838 else
1839 convertPriorityString(Priority: Digit, NewParam);
1840 }
1841
1842 // Valid non-default argument.
1843 NewParams.push_back(Elt: NewParam);
1844 HasNonDefault = true;
1845 }
1846 if (!HasNonDefault)
1847 return true;
1848
1849 return false;
1850}
1851
1852bool SemaARM::checkSVETypeSupport(QualType Ty, SourceLocation Loc,
1853 const FunctionDecl *FD,
1854 const llvm::StringMap<bool> &FeatureMap) {
1855 if (!Ty->isSVESizelessBuiltinType())
1856 return false;
1857
1858 if (FeatureMap.lookup(Key: "sve"))
1859 return false;
1860
1861 // No SVE environment available.
1862 if (!FeatureMap.lookup(Key: "sme"))
1863 return Diag(Loc, DiagID: diag::err_sve_vector_in_non_sve_target) << Ty;
1864
1865 // SVE environment only available to streaming functions.
1866 if (FD && !FD->getType().isNull() &&
1867 !IsArmStreamingFunction(FD, /*IncludeLocallyStreaming=*/true))
1868 return Diag(Loc, DiagID: diag::err_sve_vector_in_non_streaming_function) << Ty;
1869
1870 return false;
1871}
1872} // namespace clang
1873