1//===------ SemaARM.cpp ---------- ARM target-specific routines -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements semantic analysis functions specific to ARM.
10//
11//===----------------------------------------------------------------------===//
12
13#include "clang/Sema/SemaARM.h"
14#include "clang/Basic/DiagnosticSema.h"
15#include "clang/Basic/TargetBuiltins.h"
16#include "clang/Basic/TargetInfo.h"
17#include "clang/Sema/Initialization.h"
18#include "clang/Sema/ParsedAttr.h"
19#include "clang/Sema/Sema.h"
20
21namespace clang {
22
23SemaARM::SemaARM(Sema &S) : SemaBase(S) {}
24
25/// BuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions
26bool SemaARM::BuiltinARMMemoryTaggingCall(unsigned BuiltinID,
27 CallExpr *TheCall) {
28 ASTContext &Context = getASTContext();
29
30 if (BuiltinID == AArch64::BI__builtin_arm_irg) {
31 if (SemaRef.checkArgCount(Call: TheCall, DesiredArgCount: 2))
32 return true;
33 Expr *Arg0 = TheCall->getArg(Arg: 0);
34 Expr *Arg1 = TheCall->getArg(Arg: 1);
35
36 ExprResult FirstArg = SemaRef.DefaultFunctionArrayLvalueConversion(E: Arg0);
37 if (FirstArg.isInvalid())
38 return true;
39 QualType FirstArgType = FirstArg.get()->getType();
40 if (!FirstArgType->isAnyPointerType())
41 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_must_be_pointer)
42 << "first" << FirstArgType << Arg0->getSourceRange();
43 TheCall->setArg(Arg: 0, ArgExpr: FirstArg.get());
44
45 ExprResult SecArg = SemaRef.DefaultLvalueConversion(E: Arg1);
46 if (SecArg.isInvalid())
47 return true;
48 QualType SecArgType = SecArg.get()->getType();
49 if (!SecArgType->isIntegerType())
50 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_must_be_integer)
51 << "second" << SecArgType << Arg1->getSourceRange();
52
53 // Derive the return type from the pointer argument.
54 TheCall->setType(FirstArgType);
55 return false;
56 }
57
58 if (BuiltinID == AArch64::BI__builtin_arm_addg) {
59 if (SemaRef.checkArgCount(Call: TheCall, DesiredArgCount: 2))
60 return true;
61
62 Expr *Arg0 = TheCall->getArg(Arg: 0);
63 ExprResult FirstArg = SemaRef.DefaultFunctionArrayLvalueConversion(E: Arg0);
64 if (FirstArg.isInvalid())
65 return true;
66 QualType FirstArgType = FirstArg.get()->getType();
67 if (!FirstArgType->isAnyPointerType())
68 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_must_be_pointer)
69 << "first" << FirstArgType << Arg0->getSourceRange();
70 TheCall->setArg(Arg: 0, ArgExpr: FirstArg.get());
71
72 // Derive the return type from the pointer argument.
73 TheCall->setType(FirstArgType);
74
75 // Second arg must be an constant in range [0,15]
76 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 15);
77 }
78
79 if (BuiltinID == AArch64::BI__builtin_arm_gmi) {
80 if (SemaRef.checkArgCount(Call: TheCall, DesiredArgCount: 2))
81 return true;
82 Expr *Arg0 = TheCall->getArg(Arg: 0);
83 Expr *Arg1 = TheCall->getArg(Arg: 1);
84
85 ExprResult FirstArg = SemaRef.DefaultFunctionArrayLvalueConversion(E: Arg0);
86 if (FirstArg.isInvalid())
87 return true;
88 QualType FirstArgType = FirstArg.get()->getType();
89 if (!FirstArgType->isAnyPointerType())
90 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_must_be_pointer)
91 << "first" << FirstArgType << Arg0->getSourceRange();
92
93 QualType SecArgType = Arg1->getType();
94 if (!SecArgType->isIntegerType())
95 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_must_be_integer)
96 << "second" << SecArgType << Arg1->getSourceRange();
97 TheCall->setType(Context.IntTy);
98 return false;
99 }
100
101 if (BuiltinID == AArch64::BI__builtin_arm_ldg ||
102 BuiltinID == AArch64::BI__builtin_arm_stg) {
103 if (SemaRef.checkArgCount(Call: TheCall, DesiredArgCount: 1))
104 return true;
105 Expr *Arg0 = TheCall->getArg(Arg: 0);
106 ExprResult FirstArg = SemaRef.DefaultFunctionArrayLvalueConversion(E: Arg0);
107 if (FirstArg.isInvalid())
108 return true;
109
110 QualType FirstArgType = FirstArg.get()->getType();
111 if (!FirstArgType->isAnyPointerType())
112 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_must_be_pointer)
113 << "first" << FirstArgType << Arg0->getSourceRange();
114 TheCall->setArg(Arg: 0, ArgExpr: FirstArg.get());
115
116 // Derive the return type from the pointer argument.
117 if (BuiltinID == AArch64::BI__builtin_arm_ldg)
118 TheCall->setType(FirstArgType);
119 return false;
120 }
121
122 if (BuiltinID == AArch64::BI__builtin_arm_subp) {
123 Expr *ArgA = TheCall->getArg(Arg: 0);
124 Expr *ArgB = TheCall->getArg(Arg: 1);
125
126 ExprResult ArgExprA = SemaRef.DefaultFunctionArrayLvalueConversion(E: ArgA);
127 ExprResult ArgExprB = SemaRef.DefaultFunctionArrayLvalueConversion(E: ArgB);
128
129 if (ArgExprA.isInvalid() || ArgExprB.isInvalid())
130 return true;
131
132 QualType ArgTypeA = ArgExprA.get()->getType();
133 QualType ArgTypeB = ArgExprB.get()->getType();
134
135 auto isNull = [&](Expr *E) -> bool {
136 return E->isNullPointerConstant(Ctx&: Context,
137 NPC: Expr::NPC_ValueDependentIsNotNull);
138 };
139
140 // argument should be either a pointer or null
141 if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA))
142 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_null_or_pointer)
143 << "first" << ArgTypeA << ArgA->getSourceRange();
144
145 if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB))
146 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_null_or_pointer)
147 << "second" << ArgTypeB << ArgB->getSourceRange();
148
149 // Ensure Pointee types are compatible
150 if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) &&
151 ArgTypeB->isAnyPointerType() && !isNull(ArgB)) {
152 QualType pointeeA = ArgTypeA->getPointeeType();
153 QualType pointeeB = ArgTypeB->getPointeeType();
154 if (!Context.typesAreCompatible(
155 T1: Context.getCanonicalType(T: pointeeA).getUnqualifiedType(),
156 T2: Context.getCanonicalType(T: pointeeB).getUnqualifiedType())) {
157 return Diag(Loc: TheCall->getBeginLoc(),
158 DiagID: diag::err_typecheck_sub_ptr_compatible)
159 << ArgTypeA << ArgTypeB << ArgA->getSourceRange()
160 << ArgB->getSourceRange();
161 }
162 }
163
164 // at least one argument should be pointer type
165 if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType())
166 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_any2arg_pointer)
167 << ArgTypeA << ArgTypeB << ArgA->getSourceRange();
168
169 if (isNull(ArgA)) // adopt type of the other pointer
170 ArgExprA =
171 SemaRef.ImpCastExprToType(E: ArgExprA.get(), Type: ArgTypeB, CK: CK_NullToPointer);
172
173 if (isNull(ArgB))
174 ArgExprB =
175 SemaRef.ImpCastExprToType(E: ArgExprB.get(), Type: ArgTypeA, CK: CK_NullToPointer);
176
177 TheCall->setArg(Arg: 0, ArgExpr: ArgExprA.get());
178 TheCall->setArg(Arg: 1, ArgExpr: ArgExprB.get());
179 TheCall->setType(Context.LongLongTy);
180 return false;
181 }
182 assert(false && "Unhandled ARM MTE intrinsic");
183 return true;
184}
185
186/// BuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr
187/// TheCall is an ARM/AArch64 special register string literal.
188bool SemaARM::BuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
189 int ArgNum, unsigned ExpectedFieldNum,
190 bool AllowName) {
191 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 ||
192 BuiltinID == ARM::BI__builtin_arm_wsr64 ||
193 BuiltinID == ARM::BI__builtin_arm_rsr ||
194 BuiltinID == ARM::BI__builtin_arm_rsrp ||
195 BuiltinID == ARM::BI__builtin_arm_wsr ||
196 BuiltinID == ARM::BI__builtin_arm_wsrp;
197 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
198 BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
199 BuiltinID == AArch64::BI__builtin_arm_rsr128 ||
200 BuiltinID == AArch64::BI__builtin_arm_wsr128 ||
201 BuiltinID == AArch64::BI__builtin_arm_rsr ||
202 BuiltinID == AArch64::BI__builtin_arm_rsrp ||
203 BuiltinID == AArch64::BI__builtin_arm_wsr ||
204 BuiltinID == AArch64::BI__builtin_arm_wsrp;
205 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin.");
206
207 // We can't check the value of a dependent argument.
208 Expr *Arg = TheCall->getArg(Arg: ArgNum);
209 if (Arg->isTypeDependent() || Arg->isValueDependent())
210 return false;
211
212 // Check if the argument is a string literal.
213 if (!isa<StringLiteral>(Val: Arg->IgnoreParenImpCasts()))
214 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_expr_not_string_literal)
215 << Arg->getSourceRange();
216
217 // Check the type of special register given.
218 StringRef Reg = cast<StringLiteral>(Val: Arg->IgnoreParenImpCasts())->getString();
219 SmallVector<StringRef, 6> Fields;
220 Reg.split(A&: Fields, Separator: ":");
221
222 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1))
223 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_arm_invalid_specialreg)
224 << Arg->getSourceRange();
225
226 // If the string is the name of a register then we cannot check that it is
227 // valid here but if the string is of one the forms described in ACLE then we
228 // can check that the supplied fields are integers and within the valid
229 // ranges.
230 if (Fields.size() > 1) {
231 bool FiveFields = Fields.size() == 5;
232
233 bool ValidString = true;
234 if (IsARMBuiltin) {
235 ValidString &= Fields[0].starts_with_insensitive(Prefix: "cp") ||
236 Fields[0].starts_with_insensitive(Prefix: "p");
237 if (ValidString)
238 Fields[0] = Fields[0].drop_front(
239 N: Fields[0].starts_with_insensitive(Prefix: "cp") ? 2 : 1);
240
241 ValidString &= Fields[2].starts_with_insensitive(Prefix: "c");
242 if (ValidString)
243 Fields[2] = Fields[2].drop_front(N: 1);
244
245 if (FiveFields) {
246 ValidString &= Fields[3].starts_with_insensitive(Prefix: "c");
247 if (ValidString)
248 Fields[3] = Fields[3].drop_front(N: 1);
249 }
250 }
251
252 SmallVector<int, 5> FieldBitWidths;
253 if (FiveFields)
254 FieldBitWidths.append(IL: {IsAArch64Builtin ? 2 : 4, 3, 4, 4, 3});
255 else
256 FieldBitWidths.append(IL: {4, 3, 4});
257
258 for (unsigned i = 0; i < Fields.size(); ++i) {
259 int IntField;
260 ValidString &= !Fields[i].getAsInteger(Radix: 10, Result&: IntField);
261 ValidString &= (IntField >= 0 && IntField < (1 << FieldBitWidths[i]));
262 }
263
264 if (!ValidString)
265 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_arm_invalid_specialreg)
266 << Arg->getSourceRange();
267 } else if (IsAArch64Builtin && Fields.size() == 1) {
268 // This code validates writes to PSTATE registers.
269
270 // Not a write.
271 if (TheCall->getNumArgs() != 2)
272 return false;
273
274 // The 128-bit system register accesses do not touch PSTATE.
275 if (BuiltinID == AArch64::BI__builtin_arm_rsr128 ||
276 BuiltinID == AArch64::BI__builtin_arm_wsr128)
277 return false;
278
279 // These are the named PSTATE accesses using "MSR (immediate)" instructions,
280 // along with the upper limit on the immediates allowed.
281 auto MaxLimit = llvm::StringSwitch<std::optional<unsigned>>(Reg)
282 .CaseLower(S: "spsel", Value: 15)
283 .CaseLower(S: "daifclr", Value: 15)
284 .CaseLower(S: "daifset", Value: 15)
285 .CaseLower(S: "pan", Value: 15)
286 .CaseLower(S: "uao", Value: 15)
287 .CaseLower(S: "dit", Value: 15)
288 .CaseLower(S: "ssbs", Value: 15)
289 .CaseLower(S: "tco", Value: 15)
290 .CaseLower(S: "allint", Value: 1)
291 .CaseLower(S: "pm", Value: 1)
292 .Default(Value: std::nullopt);
293
294 // If this is not a named PSTATE, just continue without validating, as this
295 // will be lowered to an "MSR (register)" instruction directly
296 if (!MaxLimit)
297 return false;
298
299 // Here we only allow constants in the range for that pstate, as required by
300 // the ACLE.
301 //
302 // While clang also accepts the names of system registers in its ACLE
303 // intrinsics, we prevent this with the PSTATE names used in MSR (immediate)
304 // as the value written via a register is different to the value used as an
305 // immediate to have the same effect. e.g., for the instruction `msr tco,
306 // x0`, it is bit 25 of register x0 that is written into PSTATE.TCO, but
307 // with `msr tco, #imm`, it is bit 0 of xN that is written into PSTATE.TCO.
308 //
309 // If a programmer wants to codegen the MSR (register) form of `msr tco,
310 // xN`, they can still do so by specifying the register using five
311 // colon-separated numbers in a string.
312 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: *MaxLimit);
313 }
314
315 return false;
316}
317
318/// getNeonEltType - Return the QualType corresponding to the elements of
319/// the vector type specified by the NeonTypeFlags. This is used to check
320/// the pointer arguments for Neon load/store intrinsics.
321static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context,
322 bool IsPolyUnsigned, bool IsInt64Long) {
323 switch (Flags.getEltType()) {
324 case NeonTypeFlags::Int8:
325 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy;
326 case NeonTypeFlags::Int16:
327 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy;
328 case NeonTypeFlags::Int32:
329 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy;
330 case NeonTypeFlags::Int64:
331 if (IsInt64Long)
332 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy;
333 else
334 return Flags.isUnsigned() ? Context.UnsignedLongLongTy
335 : Context.LongLongTy;
336 case NeonTypeFlags::Poly8:
337 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy;
338 case NeonTypeFlags::Poly16:
339 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy;
340 case NeonTypeFlags::Poly64:
341 if (IsInt64Long)
342 return Context.UnsignedLongTy;
343 else
344 return Context.UnsignedLongLongTy;
345 case NeonTypeFlags::Poly128:
346 break;
347 case NeonTypeFlags::Float16:
348 return Context.HalfTy;
349 case NeonTypeFlags::Float32:
350 return Context.FloatTy;
351 case NeonTypeFlags::Float64:
352 return Context.DoubleTy;
353 case NeonTypeFlags::BFloat16:
354 return Context.BFloat16Ty;
355 case NeonTypeFlags::MFloat8:
356 return Context.MFloat8Ty;
357 }
358 llvm_unreachable("Invalid NeonTypeFlag!");
359}
360
361enum ArmSMEState : unsigned {
362 ArmNoState = 0,
363
364 ArmInZA = 0b01,
365 ArmOutZA = 0b10,
366 ArmInOutZA = 0b11,
367 ArmZAMask = 0b11,
368
369 ArmInZT0 = 0b01 << 2,
370 ArmOutZT0 = 0b10 << 2,
371 ArmInOutZT0 = 0b11 << 2,
372 ArmZT0Mask = 0b11 << 2
373};
374
375bool SemaARM::CheckImmediateArg(CallExpr *TheCall, unsigned CheckTy,
376 unsigned ArgIdx, unsigned EltBitWidth,
377 unsigned ContainerBitWidth) {
378 // Function that checks whether the operand (ArgIdx) is an immediate
379 // that is one of a given set of values.
380 auto CheckImmediateInSet = [&](std::initializer_list<int64_t> Set,
381 int ErrDiag) -> bool {
382 // We can't check the value of a dependent argument.
383 Expr *Arg = TheCall->getArg(Arg: ArgIdx);
384 if (Arg->isTypeDependent() || Arg->isValueDependent())
385 return false;
386
387 // Check constant-ness first.
388 llvm::APSInt Imm;
389 if (SemaRef.BuiltinConstantArg(TheCall, ArgNum: ArgIdx, Result&: Imm))
390 return true;
391
392 if (!llvm::is_contained(Set, Element: Imm.getSExtValue()))
393 return Diag(Loc: TheCall->getBeginLoc(), DiagID: ErrDiag) << Arg->getSourceRange();
394 return false;
395 };
396
397 switch ((ImmCheckType)CheckTy) {
398 case ImmCheckType::ImmCheck0_31:
399 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 31))
400 return true;
401 break;
402 case ImmCheckType::ImmCheck0_13:
403 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 13))
404 return true;
405 break;
406 case ImmCheckType::ImmCheck0_63:
407 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 63))
408 return true;
409 break;
410 case ImmCheckType::ImmCheck1_16:
411 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: 16))
412 return true;
413 break;
414 case ImmCheckType::ImmCheck0_7:
415 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 7))
416 return true;
417 break;
418 case ImmCheckType::ImmCheck1_1:
419 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: 1))
420 return true;
421 break;
422 case ImmCheckType::ImmCheck1_3:
423 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: 3))
424 return true;
425 break;
426 case ImmCheckType::ImmCheck1_7:
427 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: 7))
428 return true;
429 break;
430 case ImmCheckType::ImmCheckExtract:
431 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0,
432 High: (2048 / EltBitWidth) - 1))
433 return true;
434 break;
435 case ImmCheckType::ImmCheckCvt:
436 case ImmCheckType::ImmCheckShiftRight:
437 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: EltBitWidth))
438 return true;
439 break;
440 case ImmCheckType::ImmCheckShiftRightNarrow:
441 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: EltBitWidth / 2))
442 return true;
443 break;
444 case ImmCheckType::ImmCheckShiftLeft:
445 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: EltBitWidth - 1))
446 return true;
447 break;
448 case ImmCheckType::ImmCheckLaneIndex:
449 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0,
450 High: (ContainerBitWidth / EltBitWidth) - 1))
451 return true;
452 break;
453 case ImmCheckType::ImmCheckLaneIndexCompRotate:
454 if (SemaRef.BuiltinConstantArgRange(
455 TheCall, ArgNum: ArgIdx, Low: 0, High: (ContainerBitWidth / (2 * EltBitWidth)) - 1))
456 return true;
457 break;
458 case ImmCheckType::ImmCheckLaneIndexDot:
459 if (SemaRef.BuiltinConstantArgRange(
460 TheCall, ArgNum: ArgIdx, Low: 0, High: (ContainerBitWidth / (4 * EltBitWidth)) - 1))
461 return true;
462 break;
463 case ImmCheckType::ImmCheckComplexRot90_270:
464 if (CheckImmediateInSet({90, 270}, diag::err_rotation_argument_to_cadd))
465 return true;
466 break;
467 case ImmCheckType::ImmCheckComplexRotAll90:
468 if (CheckImmediateInSet({0, 90, 180, 270},
469 diag::err_rotation_argument_to_cmla))
470 return true;
471 break;
472 case ImmCheckType::ImmCheck0_1:
473 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 1))
474 return true;
475 break;
476 case ImmCheckType::ImmCheck0_2:
477 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 2))
478 return true;
479 break;
480 case ImmCheckType::ImmCheck0_3:
481 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 3))
482 return true;
483 break;
484 case ImmCheckType::ImmCheck0_0:
485 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 0))
486 return true;
487 break;
488 case ImmCheckType::ImmCheck0_15:
489 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 15))
490 return true;
491 break;
492 case ImmCheckType::ImmCheck0_255:
493 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 255))
494 return true;
495 break;
496 case ImmCheckType::ImmCheck1_32:
497 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: 32))
498 return true;
499 break;
500 case ImmCheckType::ImmCheck1_64:
501 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: 64))
502 return true;
503 break;
504 case ImmCheckType::ImmCheck2_4_Mul2:
505 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 2, High: 4) ||
506 SemaRef.BuiltinConstantArgMultiple(TheCall, ArgNum: ArgIdx, Multiple: 2))
507 return true;
508 break;
509 }
510 return false;
511}
512
513bool SemaARM::PerformNeonImmChecks(
514 CallExpr *TheCall,
515 SmallVectorImpl<std::tuple<int, int, int, int>> &ImmChecks,
516 int OverloadType) {
517 bool HasError = false;
518
519 for (const auto &I : ImmChecks) {
520 auto [ArgIdx, CheckTy, ElementBitWidth, VecBitWidth] = I;
521
522 if (OverloadType >= 0)
523 ElementBitWidth = NeonTypeFlags(OverloadType).getEltSizeInBits();
524
525 HasError |= CheckImmediateArg(TheCall, CheckTy, ArgIdx, EltBitWidth: ElementBitWidth,
526 ContainerBitWidth: VecBitWidth);
527 }
528
529 return HasError;
530}
531
532bool SemaARM::PerformSVEImmChecks(
533 CallExpr *TheCall, SmallVectorImpl<std::tuple<int, int, int>> &ImmChecks) {
534 bool HasError = false;
535
536 for (const auto &I : ImmChecks) {
537 auto [ArgIdx, CheckTy, ElementBitWidth] = I;
538 HasError |=
539 CheckImmediateArg(TheCall, CheckTy, ArgIdx, EltBitWidth: ElementBitWidth, ContainerBitWidth: 128);
540 }
541
542 return HasError;
543}
544
545SemaARM::ArmStreamingType getArmStreamingFnType(const FunctionDecl *FD) {
546 if (FD->hasAttr<ArmLocallyStreamingAttr>())
547 return SemaARM::ArmStreaming;
548 if (const Type *Ty = FD->getType().getTypePtrOrNull()) {
549 if (const auto *FPT = Ty->getAs<FunctionProtoType>()) {
550 if (FPT->getAArch64SMEAttributes() &
551 FunctionType::SME_PStateSMEnabledMask)
552 return SemaARM::ArmStreaming;
553 if (FPT->getAArch64SMEAttributes() &
554 FunctionType::SME_PStateSMCompatibleMask)
555 return SemaARM::ArmStreamingCompatible;
556 }
557 }
558 return SemaARM::ArmNonStreaming;
559}
560
561static bool checkArmStreamingBuiltin(Sema &S, CallExpr *TheCall,
562 const FunctionDecl *FD,
563 SemaARM::ArmStreamingType BuiltinType,
564 unsigned BuiltinID) {
565 SemaARM::ArmStreamingType FnType = getArmStreamingFnType(FD);
566
567 // Check if the intrinsic is available in the right mode, i.e.
568 // * When compiling for SME only, the caller must be in streaming mode.
569 // * When compiling for SVE only, the caller must be in non-streaming mode.
570 // * When compiling for both SVE and SME, the caller can be in either mode.
571 if (BuiltinType == SemaARM::VerifyRuntimeMode) {
572 llvm::StringMap<bool> CallerFeatures;
573 S.Context.getFunctionFeatureMap(FeatureMap&: CallerFeatures, FD);
574
575 // Avoid emitting diagnostics for a function that can never compile.
576 if (FnType == SemaARM::ArmStreaming && !CallerFeatures["sme"])
577 return false;
578
579 const auto FindTopLevelPipe = [](const char *S) {
580 unsigned Depth = 0;
581 unsigned I = 0, E = strlen(s: S);
582 for (; I < E; ++I) {
583 if (S[I] == '|' && Depth == 0)
584 break;
585 if (S[I] == '(')
586 ++Depth;
587 else if (S[I] == ')')
588 --Depth;
589 }
590 return I;
591 };
592
593 const char *RequiredFeatures =
594 S.Context.BuiltinInfo.getRequiredFeatures(ID: BuiltinID);
595 unsigned PipeIdx = FindTopLevelPipe(RequiredFeatures);
596 assert(PipeIdx != 0 && PipeIdx != strlen(RequiredFeatures) &&
597 "Expected feature string of the form 'SVE-EXPR|SME-EXPR'");
598 StringRef NonStreamingBuiltinGuard = StringRef(RequiredFeatures, PipeIdx);
599 StringRef StreamingBuiltinGuard = StringRef(RequiredFeatures + PipeIdx + 1);
600
601 bool SatisfiesSVE = Builtin::evaluateRequiredTargetFeatures(
602 RequiredFatures: NonStreamingBuiltinGuard, TargetFetureMap: CallerFeatures);
603 bool SatisfiesSME = Builtin::evaluateRequiredTargetFeatures(
604 RequiredFatures: StreamingBuiltinGuard, TargetFetureMap: CallerFeatures);
605
606 if (SatisfiesSVE && SatisfiesSME)
607 // Function type is irrelevant for streaming-agnostic builtins.
608 return false;
609 else if (SatisfiesSVE)
610 BuiltinType = SemaARM::ArmNonStreaming;
611 else if (SatisfiesSME)
612 BuiltinType = SemaARM::ArmStreaming;
613 else
614 // This should be diagnosed by CodeGen
615 return false;
616 }
617
618 if (FnType != SemaARM::ArmNonStreaming &&
619 BuiltinType == SemaARM::ArmNonStreaming)
620 S.Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_attribute_arm_sm_incompat_builtin)
621 << TheCall->getSourceRange() << "non-streaming";
622 else if (FnType != SemaARM::ArmStreaming &&
623 BuiltinType == SemaARM::ArmStreaming)
624 S.Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_attribute_arm_sm_incompat_builtin)
625 << TheCall->getSourceRange() << "streaming";
626 else
627 return false;
628
629 return true;
630}
631
632static ArmSMEState getSMEState(unsigned BuiltinID) {
633 switch (BuiltinID) {
634 default:
635 return ArmNoState;
636#define GET_SME_BUILTIN_GET_STATE
637#include "clang/Basic/arm_sme_builtins_za_state.inc"
638#undef GET_SME_BUILTIN_GET_STATE
639 }
640}
641
642bool SemaARM::CheckSMEBuiltinFunctionCall(unsigned BuiltinID,
643 CallExpr *TheCall) {
644 if (const FunctionDecl *FD =
645 SemaRef.getCurFunctionDecl(/*AllowLambda=*/true)) {
646 std::optional<ArmStreamingType> BuiltinType;
647
648 switch (BuiltinID) {
649#define GET_SME_STREAMING_ATTRS
650#include "clang/Basic/arm_sme_streaming_attrs.inc"
651#undef GET_SME_STREAMING_ATTRS
652 }
653
654 if (BuiltinType &&
655 checkArmStreamingBuiltin(S&: SemaRef, TheCall, FD, BuiltinType: *BuiltinType, BuiltinID))
656 return true;
657
658 if ((getSMEState(BuiltinID) & ArmZAMask) && !hasArmZAState(FD))
659 Diag(Loc: TheCall->getBeginLoc(),
660 DiagID: diag::warn_attribute_arm_za_builtin_no_za_state)
661 << TheCall->getSourceRange();
662
663 if ((getSMEState(BuiltinID) & ArmZT0Mask) && !hasArmZT0State(FD))
664 Diag(Loc: TheCall->getBeginLoc(),
665 DiagID: diag::warn_attribute_arm_zt0_builtin_no_zt0_state)
666 << TheCall->getSourceRange();
667 }
668
669 // Range check SME intrinsics that take immediate values.
670 SmallVector<std::tuple<int, int, int>, 3> ImmChecks;
671
672 switch (BuiltinID) {
673 default:
674 return false;
675#define GET_SME_IMMEDIATE_CHECK
676#include "clang/Basic/arm_sme_sema_rangechecks.inc"
677#undef GET_SME_IMMEDIATE_CHECK
678 }
679
680 return PerformSVEImmChecks(TheCall, ImmChecks);
681}
682
683bool SemaARM::CheckSVEBuiltinFunctionCall(unsigned BuiltinID,
684 CallExpr *TheCall) {
685 if (const FunctionDecl *FD =
686 SemaRef.getCurFunctionDecl(/*AllowLambda=*/true)) {
687 std::optional<ArmStreamingType> BuiltinType;
688
689 switch (BuiltinID) {
690#define GET_SVE_STREAMING_ATTRS
691#include "clang/Basic/arm_sve_streaming_attrs.inc"
692#undef GET_SVE_STREAMING_ATTRS
693 }
694 if (BuiltinType &&
695 checkArmStreamingBuiltin(S&: SemaRef, TheCall, FD, BuiltinType: *BuiltinType, BuiltinID))
696 return true;
697 }
698 // Range check SVE intrinsics that take immediate values.
699 SmallVector<std::tuple<int, int, int>, 3> ImmChecks;
700
701 switch (BuiltinID) {
702 default:
703 return false;
704#define GET_SVE_IMMEDIATE_CHECK
705#include "clang/Basic/arm_sve_sema_rangechecks.inc"
706#undef GET_SVE_IMMEDIATE_CHECK
707 }
708
709 return PerformSVEImmChecks(TheCall, ImmChecks);
710}
711
712bool SemaARM::CheckNeonBuiltinFunctionCall(const TargetInfo &TI,
713 unsigned BuiltinID,
714 CallExpr *TheCall) {
715 if (const FunctionDecl *FD =
716 SemaRef.getCurFunctionDecl(/*AllowLambda=*/true)) {
717 std::optional<ArmStreamingType> BuiltinType;
718
719 switch (BuiltinID) {
720 default:
721 break;
722#define GET_NEON_STREAMING_COMPAT_FLAG
723#include "clang/Basic/arm_neon.inc"
724#undef GET_NEON_STREAMING_COMPAT_FLAG
725 }
726 if (BuiltinType &&
727 checkArmStreamingBuiltin(S&: SemaRef, TheCall, FD, BuiltinType: *BuiltinType, BuiltinID))
728 return true;
729 }
730
731 llvm::APSInt Result;
732 uint64_t mask = 0;
733 int TV = -1;
734 int PtrArgNum = -1;
735 bool HasConstPtr = false;
736 switch (BuiltinID) {
737#define GET_NEON_OVERLOAD_CHECK
738#include "clang/Basic/arm_fp16.inc"
739#include "clang/Basic/arm_neon.inc"
740#undef GET_NEON_OVERLOAD_CHECK
741 }
742
743 // For NEON intrinsics which are overloaded on vector element type, validate
744 // the immediate which specifies which variant to emit.
745 if (mask) {
746 unsigned ImmArg = TheCall->getNumArgs() - 1;
747 if (SemaRef.BuiltinConstantArg(TheCall, ArgNum: ImmArg, Result))
748 return true;
749
750 // FIXME: This is effectively dead code. Change the logic above so that the
751 // following check is actually run.
752 TV = Result.getLimitedValue(Limit: 64);
753 if ((TV > 63) || (mask & (1ULL << TV)) == 0)
754 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_invalid_neon_type_code)
755 << TheCall->getArg(Arg: ImmArg)->getSourceRange();
756 }
757
758 if (PtrArgNum >= 0) {
759 // Check that pointer arguments have the specified type.
760 Expr *Arg = TheCall->getArg(Arg: PtrArgNum);
761 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Val: Arg))
762 Arg = ICE->getSubExpr();
763 ExprResult RHS = SemaRef.DefaultFunctionArrayLvalueConversion(E: Arg);
764 QualType RHSTy = RHS.get()->getType();
765
766 llvm::Triple::ArchType Arch = TI.getTriple().getArch();
767 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 ||
768 Arch == llvm::Triple::aarch64_32 ||
769 Arch == llvm::Triple::aarch64_be;
770 bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong;
771 QualType EltTy = getNeonEltType(Flags: NeonTypeFlags(TV), Context&: getASTContext(),
772 IsPolyUnsigned, IsInt64Long);
773 if (HasConstPtr)
774 EltTy = EltTy.withConst();
775 QualType LHSTy = getASTContext().getPointerType(T: EltTy);
776 AssignConvertType ConvTy;
777 ConvTy = SemaRef.CheckSingleAssignmentConstraints(LHSType: LHSTy, RHS);
778 if (RHS.isInvalid())
779 return true;
780 if (SemaRef.DiagnoseAssignmentResult(ConvTy, Loc: Arg->getBeginLoc(), DstType: LHSTy,
781 SrcType: RHSTy, SrcExpr: RHS.get(),
782 Action: AssignmentAction::Assigning))
783 return true;
784 }
785
786 // For NEON intrinsics which take an immediate value as part of the
787 // instruction, range check them here.
788 SmallVector<std::tuple<int, int, int, int>, 2> ImmChecks;
789 switch (BuiltinID) {
790 default:
791 return false;
792#define GET_NEON_IMMEDIATE_CHECK
793#include "clang/Basic/arm_fp16.inc"
794#include "clang/Basic/arm_neon.inc"
795#undef GET_NEON_IMMEDIATE_CHECK
796 }
797
798 return PerformNeonImmChecks(TheCall, ImmChecks, OverloadType: TV);
799}
800
801bool SemaARM::CheckMVEBuiltinFunctionCall(unsigned BuiltinID,
802 CallExpr *TheCall) {
803 switch (BuiltinID) {
804 default:
805 return false;
806#include "clang/Basic/arm_mve_builtin_sema.inc"
807 }
808}
809
810bool SemaARM::CheckCDEBuiltinFunctionCall(const TargetInfo &TI,
811 unsigned BuiltinID,
812 CallExpr *TheCall) {
813 bool Err = false;
814 switch (BuiltinID) {
815 default:
816 return false;
817#include "clang/Basic/arm_cde_builtin_sema.inc"
818 }
819
820 if (Err)
821 return true;
822
823 return CheckARMCoprocessorImmediate(TI, CoprocArg: TheCall->getArg(Arg: 0), /*WantCDE*/ true);
824}
825
826bool SemaARM::CheckARMCoprocessorImmediate(const TargetInfo &TI,
827 const Expr *CoprocArg,
828 bool WantCDE) {
829 ASTContext &Context = getASTContext();
830 if (SemaRef.isConstantEvaluatedContext())
831 return false;
832
833 // We can't check the value of a dependent argument.
834 if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent())
835 return false;
836
837 llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Ctx: Context);
838 int64_t CoprocNo = CoprocNoAP.getExtValue();
839 assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative");
840
841 uint32_t CDECoprocMask = TI.getARMCDECoprocMask();
842 bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo));
843
844 if (IsCDECoproc != WantCDE)
845 return Diag(Loc: CoprocArg->getBeginLoc(), DiagID: diag::err_arm_invalid_coproc)
846 << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange();
847
848 return false;
849}
850
851bool SemaARM::CheckARMBuiltinExclusiveCall(const TargetInfo &TI,
852 unsigned BuiltinID,
853 CallExpr *TheCall) {
854 assert((BuiltinID == ARM::BI__builtin_arm_ldrex ||
855 BuiltinID == ARM::BI__builtin_arm_ldrexd ||
856 BuiltinID == ARM::BI__builtin_arm_ldaex ||
857 BuiltinID == ARM::BI__builtin_arm_strex ||
858 BuiltinID == ARM::BI__builtin_arm_strexd ||
859 BuiltinID == ARM::BI__builtin_arm_stlex ||
860 BuiltinID == AArch64::BI__builtin_arm_ldrex ||
861 BuiltinID == AArch64::BI__builtin_arm_ldaex ||
862 BuiltinID == AArch64::BI__builtin_arm_strex ||
863 BuiltinID == AArch64::BI__builtin_arm_stlex) &&
864 "unexpected ARM builtin");
865 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex ||
866 BuiltinID == ARM::BI__builtin_arm_ldrexd ||
867 BuiltinID == ARM::BI__builtin_arm_ldaex ||
868 BuiltinID == AArch64::BI__builtin_arm_ldrex ||
869 BuiltinID == AArch64::BI__builtin_arm_ldaex;
870 bool IsDoubleWord = BuiltinID == ARM::BI__builtin_arm_ldrexd ||
871 BuiltinID == ARM::BI__builtin_arm_strexd;
872
873 ASTContext &Context = getASTContext();
874 DeclRefExpr *DRE =
875 cast<DeclRefExpr>(Val: TheCall->getCallee()->IgnoreParenCasts());
876
877 // Ensure that we have the proper number of arguments.
878 if (SemaRef.checkArgCount(Call: TheCall, DesiredArgCount: IsLdrex ? 1 : 2))
879 return true;
880
881 // Inspect the pointer argument of the atomic builtin. This should always be
882 // a pointer type, whose element is an integral scalar or pointer type.
883 // Because it is a pointer type, we don't have to worry about any implicit
884 // casts here.
885 Expr *PointerArg = TheCall->getArg(Arg: IsLdrex ? 0 : 1);
886 ExprResult PointerArgRes =
887 SemaRef.DefaultFunctionArrayLvalueConversion(E: PointerArg);
888 if (PointerArgRes.isInvalid())
889 return true;
890 PointerArg = PointerArgRes.get();
891
892 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>();
893 if (!pointerType) {
894 Diag(Loc: DRE->getBeginLoc(), DiagID: diag::err_atomic_builtin_must_be_pointer)
895 << PointerArg->getType() << 0 << PointerArg->getSourceRange();
896 return true;
897 }
898
899 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next
900 // task is to insert the appropriate casts into the AST. First work out just
901 // what the appropriate type is.
902 QualType ValType = pointerType->getPointeeType();
903 QualType AddrType = ValType.getUnqualifiedType().withVolatile();
904 if (IsLdrex)
905 AddrType.addConst();
906
907 // Issue a warning if the cast is dodgy.
908 CastKind CastNeeded = CK_NoOp;
909 if (!AddrType.isAtLeastAsQualifiedAs(other: ValType, Ctx: getASTContext())) {
910 CastNeeded = CK_BitCast;
911 Diag(Loc: DRE->getBeginLoc(), DiagID: diag::ext_typecheck_convert_discards_qualifiers)
912 << PointerArg->getType() << Context.getPointerType(T: AddrType)
913 << AssignmentAction::Passing << PointerArg->getSourceRange();
914 }
915
916 // Finally, do the cast and replace the argument with the corrected version.
917 AddrType = Context.getPointerType(T: AddrType);
918 PointerArgRes = SemaRef.ImpCastExprToType(E: PointerArg, Type: AddrType, CK: CastNeeded);
919 if (PointerArgRes.isInvalid())
920 return true;
921 PointerArg = PointerArgRes.get();
922
923 TheCall->setArg(Arg: IsLdrex ? 0 : 1, ArgExpr: PointerArg);
924
925 // In general, we allow ints, floats and pointers to be loaded and stored.
926 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
927 !ValType->isBlockPointerType() && !ValType->isFloatingType()) {
928 Diag(Loc: DRE->getBeginLoc(), DiagID: diag::err_atomic_builtin_must_be_pointer_intfltptr)
929 << PointerArg->getType() << 0 << PointerArg->getSourceRange();
930 return true;
931 }
932
933 // Check whether the size of the type can be handled atomically on this
934 // target.
935 if (!TI.getTriple().isAArch64()) {
936 unsigned Mask = TI.getARMLDREXMask();
937 unsigned Bits = Context.getTypeSize(T: ValType);
938 if (IsDoubleWord) {
939 // Explicit request for ldrexd/strexd means only double word sizes
940 // supported if the target supports them.
941 Mask &= TargetInfo::ARM_LDREX_D;
942 }
943 bool Supported =
944 (llvm::isPowerOf2_64(Value: Bits)) && Bits >= 8 && (Mask & (Bits / 8));
945
946 if (!Supported) {
947 // Emit a diagnostic saying that this size isn't available. If _no_ size
948 // of exclusive access is supported on this target, we emit a diagnostic
949 // with special wording for that case, but otherwise, we emit
950 // err_atomic_exclusive_builtin_pointer_size and loop over `Mask` to
951 // control what subset of sizes it lists as legal.
952 if (Mask) {
953 auto D = Diag(Loc: DRE->getBeginLoc(),
954 DiagID: diag::err_atomic_exclusive_builtin_pointer_size)
955 << PointerArg->getType();
956 bool Started = false;
957 for (unsigned Size = 1; Size <= 8; Size <<= 1) {
958 // For each of the sizes 1,2,4,8, pass two integers into the
959 // diagnostic. The first selects a separator from the previous
960 // number: 0 for no separator at all, 1 for a comma, 2 for " or "
961 // which appears before the final number in a list of more than one.
962 // The second integer just indicates whether we print this size in
963 // the message at all.
964 if (!(Mask & Size)) {
965 // This size isn't one of the supported ones, so emit no separator
966 // text and don't print the size itself.
967 D << 0 << 0;
968 } else {
969 // This size is supported, so print it, and an appropriate
970 // separator.
971 Mask &= ~Size;
972 if (!Started)
973 D << 0; // No separator if this is the first size we've printed
974 else if (Mask)
975 D << 1; // "," if there's still another size to come
976 else
977 D << 2; // " or " if the size we're about to print is the last
978 D << 1; // print the size itself
979 Started = true;
980 }
981 }
982 } else {
983 bool EmitDoubleWordDiagnostic =
984 IsDoubleWord && !Mask && TI.getARMLDREXMask();
985 Diag(Loc: DRE->getBeginLoc(),
986 DiagID: diag::err_atomic_exclusive_builtin_pointer_size_none)
987 << (EmitDoubleWordDiagnostic ? 1 : 0)
988 << PointerArg->getSourceRange();
989 }
990 }
991 }
992
993 switch (ValType.getObjCLifetime()) {
994 case Qualifiers::OCL_None:
995 case Qualifiers::OCL_ExplicitNone:
996 // okay
997 break;
998
999 case Qualifiers::OCL_Weak:
1000 case Qualifiers::OCL_Strong:
1001 case Qualifiers::OCL_Autoreleasing:
1002 Diag(Loc: DRE->getBeginLoc(), DiagID: diag::err_arc_atomic_ownership)
1003 << ValType << PointerArg->getSourceRange();
1004 return true;
1005 }
1006
1007 if (IsLdrex) {
1008 TheCall->setType(ValType);
1009 return false;
1010 }
1011
1012 // Initialize the argument to be stored.
1013 ExprResult ValArg = TheCall->getArg(Arg: 0);
1014 InitializedEntity Entity = InitializedEntity::InitializeParameter(
1015 Context, Type: ValType, /*consume*/ Consumed: false);
1016 ValArg = SemaRef.PerformCopyInitialization(Entity, EqualLoc: SourceLocation(), Init: ValArg);
1017 if (ValArg.isInvalid())
1018 return true;
1019 TheCall->setArg(Arg: 0, ArgExpr: ValArg.get());
1020
1021 // __builtin_arm_strex always returns an int. It's marked as such in the .def,
1022 // but the custom checker bypasses all default analysis.
1023 TheCall->setType(Context.IntTy);
1024 return false;
1025}
1026
1027bool SemaARM::CheckARMBuiltinFunctionCall(const TargetInfo &TI,
1028 unsigned BuiltinID,
1029 CallExpr *TheCall) {
1030 if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
1031 BuiltinID == ARM::BI__builtin_arm_ldrexd ||
1032 BuiltinID == ARM::BI__builtin_arm_ldaex ||
1033 BuiltinID == ARM::BI__builtin_arm_strex ||
1034 BuiltinID == ARM::BI__builtin_arm_strexd ||
1035 BuiltinID == ARM::BI__builtin_arm_stlex) {
1036 return CheckARMBuiltinExclusiveCall(TI, BuiltinID, TheCall);
1037 }
1038
1039 if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
1040 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 1) ||
1041 SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 1);
1042 }
1043
1044 if (BuiltinID == ARM::BI__builtin_arm_rsr64 ||
1045 BuiltinID == ARM::BI__builtin_arm_wsr64)
1046 return BuiltinARMSpecialReg(BuiltinID, TheCall, ArgNum: 0, ExpectedFieldNum: 3, AllowName: false);
1047
1048 if (BuiltinID == ARM::BI__builtin_arm_rsr ||
1049 BuiltinID == ARM::BI__builtin_arm_rsrp ||
1050 BuiltinID == ARM::BI__builtin_arm_wsr ||
1051 BuiltinID == ARM::BI__builtin_arm_wsrp)
1052 return BuiltinARMSpecialReg(BuiltinID, TheCall, ArgNum: 0, ExpectedFieldNum: 5, AllowName: true);
1053
1054 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
1055 return true;
1056 if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall))
1057 return true;
1058 if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall))
1059 return true;
1060
1061 // For intrinsics which take an immediate value as part of the instruction,
1062 // range check them here.
1063 // FIXME: VFP Intrinsics should error if VFP not present.
1064 switch (BuiltinID) {
1065 default:
1066 return false;
1067 case ARM::BI__builtin_arm_ssat:
1068 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 1, High: 32);
1069 case ARM::BI__builtin_arm_usat:
1070 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 31);
1071 case ARM::BI__builtin_arm_ssat16:
1072 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 1, High: 16);
1073 case ARM::BI__builtin_arm_usat16:
1074 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 15);
1075 case ARM::BI__builtin_arm_vcvtr_f:
1076 case ARM::BI__builtin_arm_vcvtr_d:
1077 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 1);
1078 case ARM::BI__builtin_arm_dmb:
1079 case ARM::BI__dmb:
1080 case ARM::BI__builtin_arm_dsb:
1081 case ARM::BI__dsb:
1082 case ARM::BI__builtin_arm_isb:
1083 case ARM::BI__isb:
1084 case ARM::BI__builtin_arm_dbg:
1085 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 15);
1086 case ARM::BI__builtin_arm_cdp:
1087 case ARM::BI__builtin_arm_cdp2:
1088 case ARM::BI__builtin_arm_mcr:
1089 case ARM::BI__builtin_arm_mcr2:
1090 case ARM::BI__builtin_arm_mrc:
1091 case ARM::BI__builtin_arm_mrc2:
1092 case ARM::BI__builtin_arm_mcrr:
1093 case ARM::BI__builtin_arm_mcrr2:
1094 case ARM::BI__builtin_arm_mrrc:
1095 case ARM::BI__builtin_arm_mrrc2:
1096 case ARM::BI__builtin_arm_ldc:
1097 case ARM::BI__builtin_arm_ldcl:
1098 case ARM::BI__builtin_arm_ldc2:
1099 case ARM::BI__builtin_arm_ldc2l:
1100 case ARM::BI__builtin_arm_stc:
1101 case ARM::BI__builtin_arm_stcl:
1102 case ARM::BI__builtin_arm_stc2:
1103 case ARM::BI__builtin_arm_stc2l:
1104 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 15) ||
1105 CheckARMCoprocessorImmediate(TI, CoprocArg: TheCall->getArg(Arg: 0),
1106 /*WantCDE*/ false);
1107 }
1108}
1109
1110bool SemaARM::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI,
1111 unsigned BuiltinID,
1112 CallExpr *TheCall) {
1113 if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
1114 BuiltinID == AArch64::BI__builtin_arm_ldaex ||
1115 BuiltinID == AArch64::BI__builtin_arm_strex ||
1116 BuiltinID == AArch64::BI__builtin_arm_stlex) {
1117 return CheckARMBuiltinExclusiveCall(TI, BuiltinID, TheCall);
1118 }
1119
1120 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
1121 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 1) ||
1122 SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 3) ||
1123 SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 3, Low: 0, High: 1) ||
1124 SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 4, Low: 0, High: 1);
1125 }
1126
1127 if (BuiltinID == AArch64::BI__builtin_arm_range_prefetch_x) {
1128 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 1) ||
1129 SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 1) ||
1130 SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 3, Low: -2097152, High: 2097151) ||
1131 SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 4, Low: 1, High: 65536) ||
1132 SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 5, Low: -2097152, High: 2097151);
1133 }
1134
1135 if (BuiltinID == AArch64::BI__builtin_arm_range_prefetch) {
1136 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 1) ||
1137 SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 1);
1138 }
1139
1140 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
1141 BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
1142 BuiltinID == AArch64::BI__builtin_arm_rsr128 ||
1143 BuiltinID == AArch64::BI__builtin_arm_wsr128)
1144 return BuiltinARMSpecialReg(BuiltinID, TheCall, ArgNum: 0, ExpectedFieldNum: 5, AllowName: true);
1145
1146 // Memory Tagging Extensions (MTE) Intrinsics
1147 if (BuiltinID == AArch64::BI__builtin_arm_irg ||
1148 BuiltinID == AArch64::BI__builtin_arm_addg ||
1149 BuiltinID == AArch64::BI__builtin_arm_gmi ||
1150 BuiltinID == AArch64::BI__builtin_arm_ldg ||
1151 BuiltinID == AArch64::BI__builtin_arm_stg ||
1152 BuiltinID == AArch64::BI__builtin_arm_subp) {
1153 return BuiltinARMMemoryTaggingCall(BuiltinID, TheCall);
1154 }
1155
1156 if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
1157 BuiltinID == AArch64::BI__builtin_arm_rsrp ||
1158 BuiltinID == AArch64::BI__builtin_arm_wsr ||
1159 BuiltinID == AArch64::BI__builtin_arm_wsrp)
1160 return BuiltinARMSpecialReg(BuiltinID, TheCall, ArgNum: 0, ExpectedFieldNum: 5, AllowName: true);
1161
1162 // Only check the valid encoding range. Any constant in this range would be
1163 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw
1164 // an exception for incorrect registers. This matches MSVC behavior.
1165 if (BuiltinID == AArch64::BI_ReadStatusReg ||
1166 BuiltinID == AArch64::BI_WriteStatusReg || BuiltinID == AArch64::BI__sys)
1167 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 0x7fff);
1168
1169 if (BuiltinID == AArch64::BI__getReg)
1170 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 31);
1171
1172 if (BuiltinID == AArch64::BI__break)
1173 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 0xffff);
1174
1175 if (BuiltinID == AArch64::BI__hlt)
1176 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 0xffff);
1177
1178 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
1179 return true;
1180
1181 if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall))
1182 return true;
1183
1184 if (CheckSMEBuiltinFunctionCall(BuiltinID, TheCall))
1185 return true;
1186
1187 // For intrinsics which take an immediate value as part of the instruction,
1188 // range check them here.
1189 unsigned i = 0, l = 0, u = 0;
1190 switch (BuiltinID) {
1191 default: return false;
1192 case AArch64::BI__builtin_arm_dmb:
1193 case AArch64::BI__dmb:
1194 case AArch64::BI__builtin_arm_dsb:
1195 case AArch64::BI__dsb:
1196 case AArch64::BI__builtin_arm_isb:
1197 case AArch64::BI__isb:
1198 l = 0;
1199 u = 15;
1200 break;
1201 }
1202
1203 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: i, Low: l, High: u + l);
1204}
1205
1206namespace {
1207struct IntrinToName {
1208 uint32_t Id;
1209 int32_t FullName;
1210 int32_t ShortName;
1211};
1212} // unnamed namespace
1213
1214static bool BuiltinAliasValid(unsigned BuiltinID, StringRef AliasName,
1215 ArrayRef<IntrinToName> Map,
1216 const char *IntrinNames) {
1217 AliasName.consume_front(Prefix: "__arm_");
1218 const IntrinToName *It =
1219 llvm::lower_bound(Range&: Map, Value&: BuiltinID, C: [](const IntrinToName &L, unsigned Id) {
1220 return L.Id < Id;
1221 });
1222 if (It == Map.end() || It->Id != BuiltinID)
1223 return false;
1224 StringRef FullName(&IntrinNames[It->FullName]);
1225 if (AliasName == FullName)
1226 return true;
1227 if (It->ShortName == -1)
1228 return false;
1229 StringRef ShortName(&IntrinNames[It->ShortName]);
1230 return AliasName == ShortName;
1231}
1232
1233bool SemaARM::MveAliasValid(unsigned BuiltinID, StringRef AliasName) {
1234#include "clang/Basic/arm_mve_builtin_aliases.inc"
1235 // The included file defines:
1236 // - ArrayRef<IntrinToName> Map
1237 // - const char IntrinNames[]
1238 return BuiltinAliasValid(BuiltinID, AliasName, Map, IntrinNames);
1239}
1240
1241bool SemaARM::CdeAliasValid(unsigned BuiltinID, StringRef AliasName) {
1242#include "clang/Basic/arm_cde_builtin_aliases.inc"
1243 return BuiltinAliasValid(BuiltinID, AliasName, Map, IntrinNames);
1244}
1245
1246bool SemaARM::SveAliasValid(unsigned BuiltinID, StringRef AliasName) {
1247 if (getASTContext().BuiltinInfo.isAuxBuiltinID(ID: BuiltinID))
1248 BuiltinID = getASTContext().BuiltinInfo.getAuxBuiltinID(ID: BuiltinID);
1249 return BuiltinID >= AArch64::FirstSVEBuiltin &&
1250 BuiltinID <= AArch64::LastSVEBuiltin;
1251}
1252
1253bool SemaARM::SmeAliasValid(unsigned BuiltinID, StringRef AliasName) {
1254 if (getASTContext().BuiltinInfo.isAuxBuiltinID(ID: BuiltinID))
1255 BuiltinID = getASTContext().BuiltinInfo.getAuxBuiltinID(ID: BuiltinID);
1256 return BuiltinID >= AArch64::FirstSMEBuiltin &&
1257 BuiltinID <= AArch64::LastSMEBuiltin;
1258}
1259
1260void SemaARM::handleBuiltinAliasAttr(Decl *D, const ParsedAttr &AL) {
1261 ASTContext &Context = getASTContext();
1262 if (!AL.isArgIdent(Arg: 0)) {
1263 Diag(Loc: AL.getLoc(), DiagID: diag::err_attribute_argument_n_type)
1264 << AL << 1 << AANT_ArgumentIdentifier;
1265 return;
1266 }
1267
1268 IdentifierInfo *Ident = AL.getArgAsIdent(Arg: 0)->getIdentifierInfo();
1269 unsigned BuiltinID = Ident->getBuiltinID();
1270 StringRef AliasName = cast<FunctionDecl>(Val: D)->getIdentifier()->getName();
1271
1272 bool IsAArch64 = Context.getTargetInfo().getTriple().isAArch64();
1273 if ((IsAArch64 && !SveAliasValid(BuiltinID, AliasName) &&
1274 !SmeAliasValid(BuiltinID, AliasName)) ||
1275 (!IsAArch64 && !MveAliasValid(BuiltinID, AliasName) &&
1276 !CdeAliasValid(BuiltinID, AliasName))) {
1277 Diag(Loc: AL.getLoc(), DiagID: diag::err_attribute_arm_builtin_alias);
1278 return;
1279 }
1280
1281 D->addAttr(A: ::new (Context) ArmBuiltinAliasAttr(Context, AL, Ident));
1282}
1283
1284static bool checkNewAttrMutualExclusion(
1285 Sema &S, const ParsedAttr &AL, const FunctionProtoType *FPT,
1286 FunctionType::ArmStateValue CurrentState, StringRef StateName) {
1287 auto CheckForIncompatibleAttr =
1288 [&](FunctionType::ArmStateValue IncompatibleState,
1289 StringRef IncompatibleStateName) {
1290 if (CurrentState == IncompatibleState) {
1291 S.Diag(Loc: AL.getLoc(), DiagID: diag::err_attributes_are_not_compatible)
1292 << (std::string("'__arm_new(\"") + StateName.str() + "\")'")
1293 << (std::string("'") + IncompatibleStateName.str() + "(\"" +
1294 StateName.str() + "\")'")
1295 << true;
1296 AL.setInvalid();
1297 }
1298 };
1299
1300 CheckForIncompatibleAttr(FunctionType::ARM_In, "__arm_in");
1301 CheckForIncompatibleAttr(FunctionType::ARM_Out, "__arm_out");
1302 CheckForIncompatibleAttr(FunctionType::ARM_InOut, "__arm_inout");
1303 CheckForIncompatibleAttr(FunctionType::ARM_Preserves, "__arm_preserves");
1304 return AL.isInvalid();
1305}
1306
1307void SemaARM::handleNewAttr(Decl *D, const ParsedAttr &AL) {
1308 if (!AL.getNumArgs()) {
1309 Diag(Loc: AL.getLoc(), DiagID: diag::err_missing_arm_state) << AL;
1310 AL.setInvalid();
1311 return;
1312 }
1313
1314 std::vector<StringRef> NewState;
1315 if (const auto *ExistingAttr = D->getAttr<ArmNewAttr>()) {
1316 for (StringRef S : ExistingAttr->newArgs())
1317 NewState.push_back(x: S);
1318 }
1319
1320 bool HasZA = false;
1321 bool HasZT0 = false;
1322 for (unsigned I = 0, E = AL.getNumArgs(); I != E; ++I) {
1323 StringRef StateName;
1324 SourceLocation LiteralLoc;
1325 if (!SemaRef.checkStringLiteralArgumentAttr(Attr: AL, ArgNum: I, Str&: StateName, ArgLocation: &LiteralLoc))
1326 return;
1327
1328 if (StateName == "za")
1329 HasZA = true;
1330 else if (StateName == "zt0")
1331 HasZT0 = true;
1332 else {
1333 Diag(Loc: LiteralLoc, DiagID: diag::err_unknown_arm_state) << StateName;
1334 AL.setInvalid();
1335 return;
1336 }
1337
1338 if (!llvm::is_contained(Range&: NewState, Element: StateName)) // Avoid adding duplicates.
1339 NewState.push_back(x: StateName);
1340 }
1341
1342 if (auto *FPT = dyn_cast<FunctionProtoType>(Val: D->getFunctionType())) {
1343 FunctionType::ArmStateValue ZAState =
1344 FunctionType::getArmZAState(AttrBits: FPT->getAArch64SMEAttributes());
1345 if (HasZA && ZAState != FunctionType::ARM_None &&
1346 checkNewAttrMutualExclusion(S&: SemaRef, AL, FPT, CurrentState: ZAState, StateName: "za"))
1347 return;
1348 FunctionType::ArmStateValue ZT0State =
1349 FunctionType::getArmZT0State(AttrBits: FPT->getAArch64SMEAttributes());
1350 if (HasZT0 && ZT0State != FunctionType::ARM_None &&
1351 checkNewAttrMutualExclusion(S&: SemaRef, AL, FPT, CurrentState: ZT0State, StateName: "zt0"))
1352 return;
1353 }
1354
1355 D->dropAttr<ArmNewAttr>();
1356 D->addAttr(A: ::new (getASTContext()) ArmNewAttr(
1357 getASTContext(), AL, NewState.data(), NewState.size()));
1358}
1359
1360void SemaARM::handleCmseNSEntryAttr(Decl *D, const ParsedAttr &AL) {
1361 if (getLangOpts().CPlusPlus && !D->getDeclContext()->isExternCContext()) {
1362 Diag(Loc: AL.getLoc(), DiagID: diag::err_attribute_not_clinkage) << AL;
1363 return;
1364 }
1365
1366 const auto *FD = cast<FunctionDecl>(Val: D);
1367 if (!FD->isExternallyVisible()) {
1368 Diag(Loc: AL.getLoc(), DiagID: diag::warn_attribute_cmse_entry_static);
1369 return;
1370 }
1371
1372 D->addAttr(A: ::new (getASTContext()) CmseNSEntryAttr(getASTContext(), AL));
1373}
1374
1375void SemaARM::handleInterruptAttr(Decl *D, const ParsedAttr &AL) {
1376 // Check the attribute arguments.
1377 if (AL.getNumArgs() > 1) {
1378 Diag(Loc: AL.getLoc(), DiagID: diag::err_attribute_too_many_arguments) << AL << 1;
1379 return;
1380 }
1381
1382 StringRef Str;
1383 SourceLocation ArgLoc;
1384
1385 if (AL.getNumArgs() == 0)
1386 Str = "";
1387 else if (!SemaRef.checkStringLiteralArgumentAttr(Attr: AL, ArgNum: 0, Str, ArgLocation: &ArgLoc))
1388 return;
1389
1390 ARMInterruptAttr::InterruptType Kind;
1391 if (!ARMInterruptAttr::ConvertStrToInterruptType(Val: Str, Out&: Kind)) {
1392 Diag(Loc: AL.getLoc(), DiagID: diag::warn_attribute_type_not_supported)
1393 << AL << Str << ArgLoc;
1394 return;
1395 }
1396
1397 if (!D->hasAttr<ARMSaveFPAttr>()) {
1398 const TargetInfo &TI = getASTContext().getTargetInfo();
1399 if (TI.hasFeature(Feature: "vfp"))
1400 Diag(Loc: D->getLocation(), DiagID: diag::warn_arm_interrupt_vfp_clobber);
1401 }
1402
1403 D->addAttr(A: ::new (getASTContext())
1404 ARMInterruptAttr(getASTContext(), AL, Kind));
1405}
1406
1407void SemaARM::handleInterruptSaveFPAttr(Decl *D, const ParsedAttr &AL) {
1408 // Go ahead and add ARMSaveFPAttr because handleInterruptAttr() checks for
1409 // it when deciding to issue a diagnostic about clobbering floating point
1410 // registers, which ARMSaveFPAttr prevents.
1411 D->addAttr(A: ::new (SemaRef.Context) ARMSaveFPAttr(SemaRef.Context, AL));
1412 SemaRef.ARM().handleInterruptAttr(D, AL);
1413
1414 // If ARM().handleInterruptAttr() failed, remove ARMSaveFPAttr.
1415 if (!D->hasAttr<ARMInterruptAttr>()) {
1416 D->dropAttr<ARMSaveFPAttr>();
1417 return;
1418 }
1419
1420 // If VFP not enabled, remove ARMSaveFPAttr but leave ARMInterruptAttr.
1421 bool VFP = SemaRef.Context.getTargetInfo().hasFeature(Feature: "vfp");
1422
1423 if (!VFP) {
1424 SemaRef.Diag(Loc: D->getLocation(), DiagID: diag::warn_arm_interrupt_save_fp_without_vfp_unit);
1425 D->dropAttr<ARMSaveFPAttr>();
1426 }
1427}
1428
1429// Check if the function definition uses any AArch64 SME features without
1430// having the '+sme' feature enabled and warn user if sme locally streaming
1431// function returns or uses arguments with VL-based types.
1432void SemaARM::CheckSMEFunctionDefAttributes(const FunctionDecl *FD) {
1433 const auto *Attr = FD->getAttr<ArmNewAttr>();
1434 bool UsesSM = FD->hasAttr<ArmLocallyStreamingAttr>();
1435 bool UsesZA = Attr && Attr->isNewZA();
1436 bool UsesZT0 = Attr && Attr->isNewZT0();
1437
1438 if (UsesZA || UsesZT0) {
1439 if (const auto *FPT = FD->getType()->getAs<FunctionProtoType>()) {
1440 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
1441 if (EPI.AArch64SMEAttributes & FunctionType::SME_AgnosticZAStateMask)
1442 Diag(Loc: FD->getLocation(), DiagID: diag::err_sme_unsupported_agnostic_new);
1443 }
1444 }
1445
1446 if (FD->hasAttr<ArmLocallyStreamingAttr>()) {
1447 if (FD->getReturnType()->isSizelessVectorType())
1448 Diag(Loc: FD->getLocation(),
1449 DiagID: diag::warn_sme_locally_streaming_has_vl_args_returns)
1450 << /*IsArg=*/false;
1451 if (llvm::any_of(Range: FD->parameters(), P: [](ParmVarDecl *P) {
1452 return P->getOriginalType()->isSizelessVectorType();
1453 }))
1454 Diag(Loc: FD->getLocation(),
1455 DiagID: diag::warn_sme_locally_streaming_has_vl_args_returns)
1456 << /*IsArg=*/true;
1457 }
1458 if (const auto *FPT = FD->getType()->getAs<FunctionProtoType>()) {
1459 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
1460 UsesSM |= EPI.AArch64SMEAttributes & FunctionType::SME_PStateSMEnabledMask;
1461 UsesZA |= FunctionType::getArmZAState(AttrBits: EPI.AArch64SMEAttributes) !=
1462 FunctionType::ARM_None;
1463 UsesZT0 |= FunctionType::getArmZT0State(AttrBits: EPI.AArch64SMEAttributes) !=
1464 FunctionType::ARM_None;
1465 }
1466
1467 ASTContext &Context = getASTContext();
1468 if (UsesSM || UsesZA) {
1469 llvm::StringMap<bool> FeatureMap;
1470 Context.getFunctionFeatureMap(FeatureMap, FD);
1471 if (!FeatureMap.contains(Key: "sme")) {
1472 if (UsesSM)
1473 Diag(Loc: FD->getLocation(),
1474 DiagID: diag::err_sme_definition_using_sm_in_non_sme_target);
1475 else
1476 Diag(Loc: FD->getLocation(),
1477 DiagID: diag::err_sme_definition_using_za_in_non_sme_target);
1478 }
1479 }
1480 if (UsesZT0) {
1481 llvm::StringMap<bool> FeatureMap;
1482 Context.getFunctionFeatureMap(FeatureMap, FD);
1483 if (!FeatureMap.contains(Key: "sme2")) {
1484 Diag(Loc: FD->getLocation(),
1485 DiagID: diag::err_sme_definition_using_zt0_in_non_sme2_target);
1486 }
1487 }
1488}
1489
1490/// getSVETypeSize - Return SVE vector or predicate register size.
1491static uint64_t getSVETypeSize(ASTContext &Context, const BuiltinType *Ty,
1492 bool IsStreaming) {
1493 assert(Ty->isSveVLSBuiltinType() && "Invalid SVE Type");
1494 uint64_t VScale = IsStreaming ? Context.getLangOpts().VScaleStreamingMin
1495 : Context.getLangOpts().VScaleMin;
1496 if (Ty->getKind() == BuiltinType::SveBool ||
1497 Ty->getKind() == BuiltinType::SveCount)
1498 return (VScale * 128) / Context.getCharWidth();
1499 return VScale * 128;
1500}
1501
1502bool SemaARM::areCompatibleSveTypes(QualType FirstType, QualType SecondType) {
1503 bool IsStreaming = false;
1504 if (getLangOpts().VScaleMin != getLangOpts().VScaleStreamingMin ||
1505 getLangOpts().VScaleMax != getLangOpts().VScaleStreamingMax) {
1506 if (const FunctionDecl *FD =
1507 SemaRef.getCurFunctionDecl(/*AllowLambda=*/true)) {
1508 // For streaming-compatible functions, we don't know vector length.
1509 if (const auto *T = FD->getType()->getAs<FunctionProtoType>()) {
1510 if (T->getAArch64SMEAttributes() &
1511 FunctionType::SME_PStateSMCompatibleMask)
1512 return false;
1513 }
1514
1515 if (IsArmStreamingFunction(FD, /*IncludeLocallyStreaming=*/true))
1516 IsStreaming = true;
1517 }
1518 }
1519
1520 auto IsValidCast = [&](QualType FirstType, QualType SecondType) {
1521 if (const auto *BT = FirstType->getAs<BuiltinType>()) {
1522 if (const auto *VT = SecondType->getAs<VectorType>()) {
1523 // Predicates have the same representation as uint8 so we also have to
1524 // check the kind to make these types incompatible.
1525 ASTContext &Context = getASTContext();
1526 if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate)
1527 return BT->getKind() == BuiltinType::SveBool;
1528 else if (VT->getVectorKind() == VectorKind::SveFixedLengthData)
1529 return VT->getElementType().getCanonicalType() ==
1530 FirstType->getSveEltType(Ctx: Context);
1531 else if (VT->getVectorKind() == VectorKind::Generic)
1532 return Context.getTypeSize(T: SecondType) ==
1533 getSVETypeSize(Context, Ty: BT, IsStreaming) &&
1534 Context.hasSameType(
1535 T1: VT->getElementType(),
1536 T2: Context.getBuiltinVectorTypeInfo(VecTy: BT).ElementType);
1537 }
1538 }
1539 return false;
1540 };
1541
1542 return IsValidCast(FirstType, SecondType) ||
1543 IsValidCast(SecondType, FirstType);
1544}
1545
1546bool SemaARM::areLaxCompatibleSveTypes(QualType FirstType,
1547 QualType SecondType) {
1548 bool IsStreaming = false;
1549 if (getLangOpts().VScaleMin != getLangOpts().VScaleStreamingMin ||
1550 getLangOpts().VScaleMax != getLangOpts().VScaleStreamingMax) {
1551 if (const FunctionDecl *FD =
1552 SemaRef.getCurFunctionDecl(/*AllowLambda=*/true)) {
1553 // For streaming-compatible functions, we don't know vector length.
1554 if (const auto *T = FD->getType()->getAs<FunctionProtoType>())
1555 if (T->getAArch64SMEAttributes() &
1556 FunctionType::SME_PStateSMCompatibleMask)
1557 return false;
1558
1559 if (IsArmStreamingFunction(FD, /*IncludeLocallyStreaming=*/true))
1560 IsStreaming = true;
1561 }
1562 }
1563
1564 auto IsLaxCompatible = [&](QualType FirstType, QualType SecondType) {
1565 const auto *BT = FirstType->getAs<BuiltinType>();
1566 if (!BT)
1567 return false;
1568
1569 const auto *VecTy = SecondType->getAs<VectorType>();
1570 if (VecTy && (VecTy->getVectorKind() == VectorKind::SveFixedLengthData ||
1571 VecTy->getVectorKind() == VectorKind::Generic)) {
1572 const LangOptions::LaxVectorConversionKind LVCKind =
1573 getLangOpts().getLaxVectorConversions();
1574 ASTContext &Context = getASTContext();
1575
1576 // Can not convert between sve predicates and sve vectors because of
1577 // different size.
1578 if (BT->getKind() == BuiltinType::SveBool &&
1579 VecTy->getVectorKind() == VectorKind::SveFixedLengthData)
1580 return false;
1581
1582 // If __ARM_FEATURE_SVE_BITS != N do not allow GNU vector lax conversion.
1583 // "Whenever __ARM_FEATURE_SVE_BITS==N, GNUT implicitly
1584 // converts to VLAT and VLAT implicitly converts to GNUT."
1585 // ACLE Spec Version 00bet6, 3.7.3.2. Behavior common to vectors and
1586 // predicates.
1587 if (VecTy->getVectorKind() == VectorKind::Generic &&
1588 Context.getTypeSize(T: SecondType) !=
1589 getSVETypeSize(Context, Ty: BT, IsStreaming))
1590 return false;
1591
1592 // If -flax-vector-conversions=all is specified, the types are
1593 // certainly compatible.
1594 if (LVCKind == LangOptions::LaxVectorConversionKind::All)
1595 return true;
1596
1597 // If -flax-vector-conversions=integer is specified, the types are
1598 // compatible if the elements are integer types.
1599 if (LVCKind == LangOptions::LaxVectorConversionKind::Integer)
1600 return VecTy->getElementType().getCanonicalType()->isIntegerType() &&
1601 FirstType->getSveEltType(Ctx: Context)->isIntegerType();
1602 }
1603
1604 return false;
1605 };
1606
1607 return IsLaxCompatible(FirstType, SecondType) ||
1608 IsLaxCompatible(SecondType, FirstType);
1609}
1610
1611static void appendFeature(StringRef Feat, SmallString<64> &Buffer) {
1612 if (!Buffer.empty())
1613 Buffer.append(RHS: "+");
1614 Buffer.append(RHS: Feat);
1615}
1616
1617static void convertPriorityString(unsigned Priority,
1618 SmallString<64> &NewParam) {
1619 StringRef PriorityString[8] = {"P0", "P1", "P2", "P3",
1620 "P4", "P5", "P6", "P7"};
1621
1622 assert(Priority > 0 && Priority < 256 && "priority out of range");
1623 // Convert priority=[1-255] -> P0 + ... + P7
1624 for (unsigned BitPos = 0; BitPos < 8; ++BitPos)
1625 if (Priority & (1U << BitPos))
1626 appendFeature(Feat: PriorityString[BitPos], Buffer&: NewParam);
1627}
1628
1629bool SemaARM::checkTargetVersionAttr(const StringRef Param,
1630 const SourceLocation Loc,
1631 SmallString<64> &NewParam) {
1632 using namespace DiagAttrParams;
1633
1634 auto [LHS, RHS] = Param.split(Separator: ';');
1635 RHS = RHS.trim();
1636 bool IsDefault = false;
1637 llvm::SmallVector<StringRef, 8> Features;
1638 LHS.split(A&: Features, Separator: '+');
1639 for (StringRef Feat : Features) {
1640 Feat = Feat.trim();
1641 if (Feat == "default")
1642 IsDefault = true;
1643 else if (!getASTContext().getTargetInfo().validateCpuSupports(Name: Feat))
1644 return Diag(Loc, DiagID: diag::warn_unsupported_target_attribute)
1645 << Unsupported << None << Feat << TargetVersion;
1646 appendFeature(Feat, Buffer&: NewParam);
1647 }
1648
1649 if (!RHS.empty() && RHS.consume_front(Prefix: "priority=")) {
1650 if (IsDefault)
1651 Diag(Loc, DiagID: diag::warn_invalid_default_version_priority);
1652 else {
1653 unsigned Digit;
1654 if (RHS.getAsInteger(Radix: 0, Result&: Digit) || Digit < 1 || Digit > 255)
1655 Diag(Loc, DiagID: diag::warn_version_priority_out_of_range) << RHS;
1656 else
1657 convertPriorityString(Priority: Digit, NewParam);
1658 }
1659 }
1660 return false;
1661}
1662
1663bool SemaARM::checkTargetClonesAttr(
1664 SmallVectorImpl<StringRef> &Params, SmallVectorImpl<SourceLocation> &Locs,
1665 SmallVectorImpl<SmallString<64>> &NewParams) {
1666 using namespace DiagAttrParams;
1667
1668 if (!getASTContext().getTargetInfo().hasFeature(Feature: "fmv"))
1669 return true;
1670
1671 assert(Params.size() == Locs.size() &&
1672 "Mismatch between number of string parameters and locations");
1673
1674 bool HasDefault = false;
1675 bool HasNonDefault = false;
1676 for (unsigned I = 0, E = Params.size(); I < E; ++I) {
1677 const StringRef Param = Params[I].trim();
1678 const SourceLocation &Loc = Locs[I];
1679
1680 auto [LHS, RHS] = Param.split(Separator: ';');
1681 RHS = RHS.trim();
1682 bool HasPriority = !RHS.empty() && RHS.consume_front(Prefix: "priority=");
1683
1684 if (LHS.empty())
1685 return Diag(Loc, DiagID: diag::warn_unsupported_target_attribute)
1686 << Unsupported << None << "" << TargetClones;
1687
1688 if (LHS == "default") {
1689 if (HasDefault)
1690 Diag(Loc, DiagID: diag::warn_target_clone_duplicate_options);
1691 else {
1692 if (HasPriority)
1693 Diag(Loc, DiagID: diag::warn_invalid_default_version_priority);
1694 NewParams.push_back(Elt: LHS);
1695 HasDefault = true;
1696 }
1697 continue;
1698 }
1699
1700 bool HasCodeGenImpact = false;
1701 llvm::SmallVector<StringRef, 8> Features;
1702 llvm::SmallVector<StringRef, 8> ValidFeatures;
1703 LHS.split(A&: Features, Separator: '+');
1704 for (StringRef Feat : Features) {
1705 Feat = Feat.trim();
1706 if (!getASTContext().getTargetInfo().validateCpuSupports(Name: Feat)) {
1707 Diag(Loc, DiagID: diag::warn_unsupported_target_attribute)
1708 << Unsupported << None << Feat << TargetClones;
1709 continue;
1710 }
1711 if (getASTContext().getTargetInfo().doesFeatureAffectCodeGen(Feature: Feat))
1712 HasCodeGenImpact = true;
1713 ValidFeatures.push_back(Elt: Feat);
1714 }
1715
1716 // Ignore features that don't impact code generation.
1717 if (!HasCodeGenImpact) {
1718 Diag(Loc, DiagID: diag::warn_target_clone_no_impact_options);
1719 continue;
1720 }
1721
1722 if (ValidFeatures.empty())
1723 continue;
1724
1725 // Canonicalize attribute parameter.
1726 llvm::sort(C&: ValidFeatures);
1727 SmallString<64> NewParam(llvm::join(R&: ValidFeatures, Separator: "+"));
1728 if (llvm::is_contained(Range&: NewParams, Element: NewParam)) {
1729 Diag(Loc, DiagID: diag::warn_target_clone_duplicate_options);
1730 continue;
1731 }
1732
1733 if (HasPriority) {
1734 unsigned Digit;
1735 if (RHS.getAsInteger(Radix: 0, Result&: Digit) || Digit < 1 || Digit > 255)
1736 Diag(Loc, DiagID: diag::warn_version_priority_out_of_range) << RHS;
1737 else
1738 convertPriorityString(Priority: Digit, NewParam);
1739 }
1740
1741 // Valid non-default argument.
1742 NewParams.push_back(Elt: NewParam);
1743 HasNonDefault = true;
1744 }
1745 if (!HasNonDefault)
1746 return true;
1747
1748 return false;
1749}
1750
1751bool SemaARM::checkSVETypeSupport(QualType Ty, SourceLocation Loc,
1752 const FunctionDecl *FD,
1753 const llvm::StringMap<bool> &FeatureMap) {
1754 if (!Ty->isSVESizelessBuiltinType())
1755 return false;
1756
1757 if (FeatureMap.lookup(Key: "sve"))
1758 return false;
1759
1760 // No SVE environment available.
1761 if (!FeatureMap.lookup(Key: "sme"))
1762 return Diag(Loc, DiagID: diag::err_sve_vector_in_non_sve_target) << Ty;
1763
1764 // SVE environment only available to streaming functions.
1765 if (FD && !FD->getType().isNull() &&
1766 !IsArmStreamingFunction(FD, /*IncludeLocallyStreaming=*/true))
1767 return Diag(Loc, DiagID: diag::err_sve_vector_in_non_streaming_function) << Ty;
1768
1769 return false;
1770}
1771} // namespace clang
1772