1//===------ SemaARM.cpp ---------- ARM target-specific routines -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements semantic analysis functions specific to ARM.
10//
11//===----------------------------------------------------------------------===//
12
13#include "clang/Sema/SemaARM.h"
14#include "clang/Basic/DiagnosticSema.h"
15#include "clang/Basic/TargetBuiltins.h"
16#include "clang/Basic/TargetInfo.h"
17#include "clang/Sema/Initialization.h"
18#include "clang/Sema/ParsedAttr.h"
19#include "clang/Sema/Sema.h"
20
21namespace clang {
22
23SemaARM::SemaARM(Sema &S) : SemaBase(S) {}
24
25/// BuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions
26bool SemaARM::BuiltinARMMemoryTaggingCall(unsigned BuiltinID,
27 CallExpr *TheCall) {
28 ASTContext &Context = getASTContext();
29
30 if (BuiltinID == AArch64::BI__builtin_arm_irg) {
31 if (SemaRef.checkArgCount(Call: TheCall, DesiredArgCount: 2))
32 return true;
33 Expr *Arg0 = TheCall->getArg(Arg: 0);
34 Expr *Arg1 = TheCall->getArg(Arg: 1);
35
36 ExprResult FirstArg = SemaRef.DefaultFunctionArrayLvalueConversion(E: Arg0);
37 if (FirstArg.isInvalid())
38 return true;
39 QualType FirstArgType = FirstArg.get()->getType();
40 if (!FirstArgType->isAnyPointerType())
41 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_must_be_pointer)
42 << "first" << FirstArgType << Arg0->getSourceRange();
43 TheCall->setArg(Arg: 0, ArgExpr: FirstArg.get());
44
45 ExprResult SecArg = SemaRef.DefaultLvalueConversion(E: Arg1);
46 if (SecArg.isInvalid())
47 return true;
48 QualType SecArgType = SecArg.get()->getType();
49 if (!SecArgType->isIntegerType())
50 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_must_be_integer)
51 << "second" << SecArgType << Arg1->getSourceRange();
52
53 // Derive the return type from the pointer argument.
54 TheCall->setType(FirstArgType);
55 return false;
56 }
57
58 if (BuiltinID == AArch64::BI__builtin_arm_addg) {
59 if (SemaRef.checkArgCount(Call: TheCall, DesiredArgCount: 2))
60 return true;
61
62 Expr *Arg0 = TheCall->getArg(Arg: 0);
63 ExprResult FirstArg = SemaRef.DefaultFunctionArrayLvalueConversion(E: Arg0);
64 if (FirstArg.isInvalid())
65 return true;
66 QualType FirstArgType = FirstArg.get()->getType();
67 if (!FirstArgType->isAnyPointerType())
68 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_must_be_pointer)
69 << "first" << FirstArgType << Arg0->getSourceRange();
70 TheCall->setArg(Arg: 0, ArgExpr: FirstArg.get());
71
72 // Derive the return type from the pointer argument.
73 TheCall->setType(FirstArgType);
74
75 // Second arg must be an constant in range [0,15]
76 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 15);
77 }
78
79 if (BuiltinID == AArch64::BI__builtin_arm_gmi) {
80 if (SemaRef.checkArgCount(Call: TheCall, DesiredArgCount: 2))
81 return true;
82 Expr *Arg0 = TheCall->getArg(Arg: 0);
83 Expr *Arg1 = TheCall->getArg(Arg: 1);
84
85 ExprResult FirstArg = SemaRef.DefaultFunctionArrayLvalueConversion(E: Arg0);
86 if (FirstArg.isInvalid())
87 return true;
88 QualType FirstArgType = FirstArg.get()->getType();
89 if (!FirstArgType->isAnyPointerType())
90 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_must_be_pointer)
91 << "first" << FirstArgType << Arg0->getSourceRange();
92
93 QualType SecArgType = Arg1->getType();
94 if (!SecArgType->isIntegerType())
95 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_must_be_integer)
96 << "second" << SecArgType << Arg1->getSourceRange();
97 TheCall->setType(Context.IntTy);
98 return false;
99 }
100
101 if (BuiltinID == AArch64::BI__builtin_arm_ldg ||
102 BuiltinID == AArch64::BI__builtin_arm_stg) {
103 if (SemaRef.checkArgCount(Call: TheCall, DesiredArgCount: 1))
104 return true;
105 Expr *Arg0 = TheCall->getArg(Arg: 0);
106 ExprResult FirstArg = SemaRef.DefaultFunctionArrayLvalueConversion(E: Arg0);
107 if (FirstArg.isInvalid())
108 return true;
109
110 QualType FirstArgType = FirstArg.get()->getType();
111 if (!FirstArgType->isAnyPointerType())
112 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_must_be_pointer)
113 << "first" << FirstArgType << Arg0->getSourceRange();
114 TheCall->setArg(Arg: 0, ArgExpr: FirstArg.get());
115
116 // Derive the return type from the pointer argument.
117 if (BuiltinID == AArch64::BI__builtin_arm_ldg)
118 TheCall->setType(FirstArgType);
119 return false;
120 }
121
122 if (BuiltinID == AArch64::BI__builtin_arm_subp) {
123 Expr *ArgA = TheCall->getArg(Arg: 0);
124 Expr *ArgB = TheCall->getArg(Arg: 1);
125
126 ExprResult ArgExprA = SemaRef.DefaultFunctionArrayLvalueConversion(E: ArgA);
127 ExprResult ArgExprB = SemaRef.DefaultFunctionArrayLvalueConversion(E: ArgB);
128
129 if (ArgExprA.isInvalid() || ArgExprB.isInvalid())
130 return true;
131
132 QualType ArgTypeA = ArgExprA.get()->getType();
133 QualType ArgTypeB = ArgExprB.get()->getType();
134
135 auto isNull = [&](Expr *E) -> bool {
136 return E->isNullPointerConstant(Ctx&: Context,
137 NPC: Expr::NPC_ValueDependentIsNotNull);
138 };
139
140 // argument should be either a pointer or null
141 if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA))
142 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_null_or_pointer)
143 << "first" << ArgTypeA << ArgA->getSourceRange();
144
145 if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB))
146 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_arg_null_or_pointer)
147 << "second" << ArgTypeB << ArgB->getSourceRange();
148
149 // Ensure Pointee types are compatible
150 if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) &&
151 ArgTypeB->isAnyPointerType() && !isNull(ArgB)) {
152 QualType pointeeA = ArgTypeA->getPointeeType();
153 QualType pointeeB = ArgTypeB->getPointeeType();
154 if (!Context.typesAreCompatible(
155 T1: Context.getCanonicalType(T: pointeeA).getUnqualifiedType(),
156 T2: Context.getCanonicalType(T: pointeeB).getUnqualifiedType())) {
157 return Diag(Loc: TheCall->getBeginLoc(),
158 DiagID: diag::err_typecheck_sub_ptr_compatible)
159 << ArgTypeA << ArgTypeB << ArgA->getSourceRange()
160 << ArgB->getSourceRange();
161 }
162 }
163
164 // at least one argument should be pointer type
165 if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType())
166 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_memtag_any2arg_pointer)
167 << ArgTypeA << ArgTypeB << ArgA->getSourceRange();
168
169 if (isNull(ArgA)) // adopt type of the other pointer
170 ArgExprA =
171 SemaRef.ImpCastExprToType(E: ArgExprA.get(), Type: ArgTypeB, CK: CK_NullToPointer);
172
173 if (isNull(ArgB))
174 ArgExprB =
175 SemaRef.ImpCastExprToType(E: ArgExprB.get(), Type: ArgTypeA, CK: CK_NullToPointer);
176
177 TheCall->setArg(Arg: 0, ArgExpr: ArgExprA.get());
178 TheCall->setArg(Arg: 1, ArgExpr: ArgExprB.get());
179 TheCall->setType(Context.LongLongTy);
180 return false;
181 }
182 assert(false && "Unhandled ARM MTE intrinsic");
183 return true;
184}
185
186/// BuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr
187/// TheCall is an ARM/AArch64 special register string literal.
188bool SemaARM::BuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
189 int ArgNum, unsigned ExpectedFieldNum,
190 bool AllowName) {
191 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 ||
192 BuiltinID == ARM::BI__builtin_arm_wsr64 ||
193 BuiltinID == ARM::BI__builtin_arm_rsr ||
194 BuiltinID == ARM::BI__builtin_arm_rsrp ||
195 BuiltinID == ARM::BI__builtin_arm_wsr ||
196 BuiltinID == ARM::BI__builtin_arm_wsrp;
197 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
198 BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
199 BuiltinID == AArch64::BI__builtin_arm_rsr128 ||
200 BuiltinID == AArch64::BI__builtin_arm_wsr128 ||
201 BuiltinID == AArch64::BI__builtin_arm_rsr ||
202 BuiltinID == AArch64::BI__builtin_arm_rsrp ||
203 BuiltinID == AArch64::BI__builtin_arm_wsr ||
204 BuiltinID == AArch64::BI__builtin_arm_wsrp;
205 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin.");
206
207 // We can't check the value of a dependent argument.
208 Expr *Arg = TheCall->getArg(Arg: ArgNum);
209 if (Arg->isTypeDependent() || Arg->isValueDependent())
210 return false;
211
212 // Check if the argument is a string literal.
213 if (!isa<StringLiteral>(Val: Arg->IgnoreParenImpCasts()))
214 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_expr_not_string_literal)
215 << Arg->getSourceRange();
216
217 // Check the type of special register given.
218 StringRef Reg = cast<StringLiteral>(Val: Arg->IgnoreParenImpCasts())->getString();
219 SmallVector<StringRef, 6> Fields;
220 Reg.split(A&: Fields, Separator: ":");
221
222 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1))
223 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_arm_invalid_specialreg)
224 << Arg->getSourceRange();
225
226 // If the string is the name of a register then we cannot check that it is
227 // valid here but if the string is of one the forms described in ACLE then we
228 // can check that the supplied fields are integers and within the valid
229 // ranges.
230 if (Fields.size() > 1) {
231 bool FiveFields = Fields.size() == 5;
232
233 bool ValidString = true;
234 if (IsARMBuiltin) {
235 ValidString &= Fields[0].starts_with_insensitive(Prefix: "cp") ||
236 Fields[0].starts_with_insensitive(Prefix: "p");
237 if (ValidString)
238 Fields[0] = Fields[0].drop_front(
239 N: Fields[0].starts_with_insensitive(Prefix: "cp") ? 2 : 1);
240
241 ValidString &= Fields[2].starts_with_insensitive(Prefix: "c");
242 if (ValidString)
243 Fields[2] = Fields[2].drop_front(N: 1);
244
245 if (FiveFields) {
246 ValidString &= Fields[3].starts_with_insensitive(Prefix: "c");
247 if (ValidString)
248 Fields[3] = Fields[3].drop_front(N: 1);
249 }
250 }
251
252 SmallVector<int, 5> FieldBitWidths;
253 if (FiveFields)
254 FieldBitWidths.append(IL: {IsAArch64Builtin ? 2 : 4, 3, 4, 4, 3});
255 else
256 FieldBitWidths.append(IL: {4, 3, 4});
257
258 for (unsigned i = 0; i < Fields.size(); ++i) {
259 int IntField;
260 ValidString &= !Fields[i].getAsInteger(Radix: 10, Result&: IntField);
261 ValidString &= (IntField >= 0 && IntField < (1 << FieldBitWidths[i]));
262 }
263
264 if (!ValidString)
265 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_arm_invalid_specialreg)
266 << Arg->getSourceRange();
267 } else if (IsAArch64Builtin && Fields.size() == 1) {
268 // This code validates writes to PSTATE registers.
269
270 // Not a write.
271 if (TheCall->getNumArgs() != 2)
272 return false;
273
274 // The 128-bit system register accesses do not touch PSTATE.
275 if (BuiltinID == AArch64::BI__builtin_arm_rsr128 ||
276 BuiltinID == AArch64::BI__builtin_arm_wsr128)
277 return false;
278
279 // These are the named PSTATE accesses using "MSR (immediate)" instructions,
280 // along with the upper limit on the immediates allowed.
281 auto MaxLimit = llvm::StringSwitch<std::optional<unsigned>>(Reg)
282 .CaseLower(S: "spsel", Value: 15)
283 .CaseLower(S: "daifclr", Value: 15)
284 .CaseLower(S: "daifset", Value: 15)
285 .CaseLower(S: "pan", Value: 15)
286 .CaseLower(S: "uao", Value: 15)
287 .CaseLower(S: "dit", Value: 15)
288 .CaseLower(S: "ssbs", Value: 15)
289 .CaseLower(S: "tco", Value: 15)
290 .CaseLower(S: "allint", Value: 1)
291 .CaseLower(S: "pm", Value: 1)
292 .Default(Value: std::nullopt);
293
294 // If this is not a named PSTATE, just continue without validating, as this
295 // will be lowered to an "MSR (register)" instruction directly
296 if (!MaxLimit)
297 return false;
298
299 // Here we only allow constants in the range for that pstate, as required by
300 // the ACLE.
301 //
302 // While clang also accepts the names of system registers in its ACLE
303 // intrinsics, we prevent this with the PSTATE names used in MSR (immediate)
304 // as the value written via a register is different to the value used as an
305 // immediate to have the same effect. e.g., for the instruction `msr tco,
306 // x0`, it is bit 25 of register x0 that is written into PSTATE.TCO, but
307 // with `msr tco, #imm`, it is bit 0 of xN that is written into PSTATE.TCO.
308 //
309 // If a programmer wants to codegen the MSR (register) form of `msr tco,
310 // xN`, they can still do so by specifying the register using five
311 // colon-separated numbers in a string.
312 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: *MaxLimit);
313 }
314
315 return false;
316}
317
318/// getNeonEltType - Return the QualType corresponding to the elements of
319/// the vector type specified by the NeonTypeFlags. This is used to check
320/// the pointer arguments for Neon load/store intrinsics.
321static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context,
322 bool IsPolyUnsigned, bool IsInt64Long) {
323 switch (Flags.getEltType()) {
324 case NeonTypeFlags::Int8:
325 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy;
326 case NeonTypeFlags::Int16:
327 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy;
328 case NeonTypeFlags::Int32:
329 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy;
330 case NeonTypeFlags::Int64:
331 if (IsInt64Long)
332 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy;
333 else
334 return Flags.isUnsigned() ? Context.UnsignedLongLongTy
335 : Context.LongLongTy;
336 case NeonTypeFlags::Poly8:
337 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy;
338 case NeonTypeFlags::Poly16:
339 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy;
340 case NeonTypeFlags::Poly64:
341 if (IsInt64Long)
342 return Context.UnsignedLongTy;
343 else
344 return Context.UnsignedLongLongTy;
345 case NeonTypeFlags::Poly128:
346 break;
347 case NeonTypeFlags::Float16:
348 return Context.HalfTy;
349 case NeonTypeFlags::Float32:
350 return Context.FloatTy;
351 case NeonTypeFlags::Float64:
352 return Context.DoubleTy;
353 case NeonTypeFlags::BFloat16:
354 return Context.BFloat16Ty;
355 case NeonTypeFlags::MFloat8:
356 return Context.MFloat8Ty;
357 }
358 llvm_unreachable("Invalid NeonTypeFlag!");
359}
360
361enum ArmSMEState : unsigned {
362 ArmNoState = 0,
363
364 ArmInZA = 0b01,
365 ArmOutZA = 0b10,
366 ArmInOutZA = 0b11,
367 ArmZAMask = 0b11,
368
369 ArmInZT0 = 0b01 << 2,
370 ArmOutZT0 = 0b10 << 2,
371 ArmInOutZT0 = 0b11 << 2,
372 ArmZT0Mask = 0b11 << 2
373};
374
375bool SemaARM::CheckImmediateArg(CallExpr *TheCall, unsigned CheckTy,
376 unsigned ArgIdx, unsigned EltBitWidth,
377 unsigned ContainerBitWidth) {
378 // Function that checks whether the operand (ArgIdx) is an immediate
379 // that is one of a given set of values.
380 auto CheckImmediateInSet = [&](std::initializer_list<int64_t> Set,
381 int ErrDiag) -> bool {
382 // We can't check the value of a dependent argument.
383 Expr *Arg = TheCall->getArg(Arg: ArgIdx);
384 if (Arg->isTypeDependent() || Arg->isValueDependent())
385 return false;
386
387 // Check constant-ness first.
388 llvm::APSInt Imm;
389 if (SemaRef.BuiltinConstantArg(TheCall, ArgNum: ArgIdx, Result&: Imm))
390 return true;
391
392 if (!llvm::is_contained(Set, Element: Imm.getSExtValue()))
393 return Diag(Loc: TheCall->getBeginLoc(), DiagID: ErrDiag) << Arg->getSourceRange();
394 return false;
395 };
396
397 switch ((ImmCheckType)CheckTy) {
398 case ImmCheckType::ImmCheck0_31:
399 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 31))
400 return true;
401 break;
402 case ImmCheckType::ImmCheck0_13:
403 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 13))
404 return true;
405 break;
406 case ImmCheckType::ImmCheck0_63:
407 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 63))
408 return true;
409 break;
410 case ImmCheckType::ImmCheck1_16:
411 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: 16))
412 return true;
413 break;
414 case ImmCheckType::ImmCheck0_7:
415 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 7))
416 return true;
417 break;
418 case ImmCheckType::ImmCheck1_1:
419 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: 1))
420 return true;
421 break;
422 case ImmCheckType::ImmCheck1_3:
423 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: 3))
424 return true;
425 break;
426 case ImmCheckType::ImmCheck1_7:
427 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: 7))
428 return true;
429 break;
430 case ImmCheckType::ImmCheckExtract:
431 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0,
432 High: (2048 / EltBitWidth) - 1))
433 return true;
434 break;
435 case ImmCheckType::ImmCheckCvt:
436 case ImmCheckType::ImmCheckShiftRight:
437 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: EltBitWidth))
438 return true;
439 break;
440 case ImmCheckType::ImmCheckShiftRightNarrow:
441 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: EltBitWidth / 2))
442 return true;
443 break;
444 case ImmCheckType::ImmCheckShiftLeft:
445 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: EltBitWidth - 1))
446 return true;
447 break;
448 case ImmCheckType::ImmCheckLaneIndex:
449 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0,
450 High: (ContainerBitWidth / EltBitWidth) - 1))
451 return true;
452 break;
453 case ImmCheckType::ImmCheckLaneIndexCompRotate:
454 if (SemaRef.BuiltinConstantArgRange(
455 TheCall, ArgNum: ArgIdx, Low: 0, High: (ContainerBitWidth / (2 * EltBitWidth)) - 1))
456 return true;
457 break;
458 case ImmCheckType::ImmCheckLaneIndexDot:
459 if (SemaRef.BuiltinConstantArgRange(
460 TheCall, ArgNum: ArgIdx, Low: 0, High: (ContainerBitWidth / (4 * EltBitWidth)) - 1))
461 return true;
462 break;
463 case ImmCheckType::ImmCheckComplexRot90_270:
464 if (CheckImmediateInSet({90, 270}, diag::err_rotation_argument_to_cadd))
465 return true;
466 break;
467 case ImmCheckType::ImmCheckComplexRotAll90:
468 if (CheckImmediateInSet({0, 90, 180, 270},
469 diag::err_rotation_argument_to_cmla))
470 return true;
471 break;
472 case ImmCheckType::ImmCheck0_1:
473 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 1))
474 return true;
475 break;
476 case ImmCheckType::ImmCheck0_2:
477 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 2))
478 return true;
479 break;
480 case ImmCheckType::ImmCheck0_3:
481 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 3))
482 return true;
483 break;
484 case ImmCheckType::ImmCheck0_0:
485 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 0))
486 return true;
487 break;
488 case ImmCheckType::ImmCheck0_15:
489 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 15))
490 return true;
491 break;
492 case ImmCheckType::ImmCheck0_255:
493 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 0, High: 255))
494 return true;
495 break;
496 case ImmCheckType::ImmCheck1_32:
497 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: 32))
498 return true;
499 break;
500 case ImmCheckType::ImmCheck1_64:
501 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 1, High: 64))
502 return true;
503 break;
504 case ImmCheckType::ImmCheck2_4_Mul2:
505 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: ArgIdx, Low: 2, High: 4) ||
506 SemaRef.BuiltinConstantArgMultiple(TheCall, ArgNum: ArgIdx, Multiple: 2))
507 return true;
508 break;
509 }
510 return false;
511}
512
513bool SemaARM::PerformNeonImmChecks(
514 CallExpr *TheCall,
515 SmallVectorImpl<std::tuple<int, int, int, int>> &ImmChecks,
516 int OverloadType) {
517 bool HasError = false;
518
519 for (const auto &I : ImmChecks) {
520 auto [ArgIdx, CheckTy, ElementBitWidth, VecBitWidth] = I;
521
522 if (OverloadType >= 0)
523 ElementBitWidth = NeonTypeFlags(OverloadType).getEltSizeInBits();
524
525 HasError |= CheckImmediateArg(TheCall, CheckTy, ArgIdx, EltBitWidth: ElementBitWidth,
526 ContainerBitWidth: VecBitWidth);
527 }
528
529 return HasError;
530}
531
532bool SemaARM::PerformSVEImmChecks(
533 CallExpr *TheCall, SmallVectorImpl<std::tuple<int, int, int>> &ImmChecks) {
534 bool HasError = false;
535
536 for (const auto &I : ImmChecks) {
537 auto [ArgIdx, CheckTy, ElementBitWidth] = I;
538 HasError |=
539 CheckImmediateArg(TheCall, CheckTy, ArgIdx, EltBitWidth: ElementBitWidth, ContainerBitWidth: 128);
540 }
541
542 return HasError;
543}
544
545SemaARM::ArmStreamingType getArmStreamingFnType(const FunctionDecl *FD) {
546 if (FD->hasAttr<ArmLocallyStreamingAttr>())
547 return SemaARM::ArmStreaming;
548 if (const Type *Ty = FD->getType().getTypePtrOrNull()) {
549 if (const auto *FPT = Ty->getAs<FunctionProtoType>()) {
550 if (FPT->getAArch64SMEAttributes() &
551 FunctionType::SME_PStateSMEnabledMask)
552 return SemaARM::ArmStreaming;
553 if (FPT->getAArch64SMEAttributes() &
554 FunctionType::SME_PStateSMCompatibleMask)
555 return SemaARM::ArmStreamingCompatible;
556 }
557 }
558 return SemaARM::ArmNonStreaming;
559}
560
561static bool checkArmStreamingBuiltin(Sema &S, CallExpr *TheCall,
562 const FunctionDecl *FD,
563 SemaARM::ArmStreamingType BuiltinType,
564 unsigned BuiltinID) {
565 SemaARM::ArmStreamingType FnType = getArmStreamingFnType(FD);
566
567 // Check if the intrinsic is available in the right mode, i.e.
568 // * When compiling for SME only, the caller must be in streaming mode.
569 // * When compiling for SVE only, the caller must be in non-streaming mode.
570 // * When compiling for both SVE and SME, the caller can be in either mode.
571 if (BuiltinType == SemaARM::VerifyRuntimeMode) {
572 llvm::StringMap<bool> CallerFeatures;
573 S.Context.getFunctionFeatureMap(FeatureMap&: CallerFeatures, FD);
574
575 // Avoid emitting diagnostics for a function that can never compile.
576 if (FnType == SemaARM::ArmStreaming && !CallerFeatures["sme"])
577 return false;
578
579 const auto FindTopLevelPipe = [](const char *S) {
580 unsigned Depth = 0;
581 unsigned I = 0, E = strlen(s: S);
582 for (; I < E; ++I) {
583 if (S[I] == '|' && Depth == 0)
584 break;
585 if (S[I] == '(')
586 ++Depth;
587 else if (S[I] == ')')
588 --Depth;
589 }
590 return I;
591 };
592
593 const char *RequiredFeatures =
594 S.Context.BuiltinInfo.getRequiredFeatures(ID: BuiltinID);
595 unsigned PipeIdx = FindTopLevelPipe(RequiredFeatures);
596 assert(PipeIdx != 0 && PipeIdx != strlen(RequiredFeatures) &&
597 "Expected feature string of the form 'SVE-EXPR|SME-EXPR'");
598 StringRef NonStreamingBuiltinGuard = StringRef(RequiredFeatures, PipeIdx);
599 StringRef StreamingBuiltinGuard = StringRef(RequiredFeatures + PipeIdx + 1);
600
601 bool SatisfiesSVE = Builtin::evaluateRequiredTargetFeatures(
602 RequiredFatures: NonStreamingBuiltinGuard, TargetFetureMap: CallerFeatures);
603 bool SatisfiesSME = Builtin::evaluateRequiredTargetFeatures(
604 RequiredFatures: StreamingBuiltinGuard, TargetFetureMap: CallerFeatures);
605
606 if (SatisfiesSVE && SatisfiesSME)
607 // Function type is irrelevant for streaming-agnostic builtins.
608 return false;
609 else if (SatisfiesSVE)
610 BuiltinType = SemaARM::ArmNonStreaming;
611 else if (SatisfiesSME)
612 BuiltinType = SemaARM::ArmStreaming;
613 else
614 // This should be diagnosed by CodeGen
615 return false;
616 }
617
618 if (FnType != SemaARM::ArmNonStreaming &&
619 BuiltinType == SemaARM::ArmNonStreaming)
620 S.Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_attribute_arm_sm_incompat_builtin)
621 << TheCall->getSourceRange() << "non-streaming";
622 else if (FnType != SemaARM::ArmStreaming &&
623 BuiltinType == SemaARM::ArmStreaming)
624 S.Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_attribute_arm_sm_incompat_builtin)
625 << TheCall->getSourceRange() << "streaming";
626 else
627 return false;
628
629 return true;
630}
631
632static ArmSMEState getSMEState(unsigned BuiltinID) {
633 switch (BuiltinID) {
634 default:
635 return ArmNoState;
636#define GET_SME_BUILTIN_GET_STATE
637#include "clang/Basic/arm_sme_builtins_za_state.inc"
638#undef GET_SME_BUILTIN_GET_STATE
639 }
640}
641
642bool SemaARM::CheckSMEBuiltinFunctionCall(unsigned BuiltinID,
643 CallExpr *TheCall) {
644 if (const FunctionDecl *FD =
645 SemaRef.getCurFunctionDecl(/*AllowLambda=*/true)) {
646 std::optional<ArmStreamingType> BuiltinType;
647
648 switch (BuiltinID) {
649#define GET_SME_STREAMING_ATTRS
650#include "clang/Basic/arm_sme_streaming_attrs.inc"
651#undef GET_SME_STREAMING_ATTRS
652 }
653
654 if (BuiltinType &&
655 checkArmStreamingBuiltin(S&: SemaRef, TheCall, FD, BuiltinType: *BuiltinType, BuiltinID))
656 return true;
657
658 if ((getSMEState(BuiltinID) & ArmZAMask) && !hasArmZAState(FD))
659 Diag(Loc: TheCall->getBeginLoc(),
660 DiagID: diag::warn_attribute_arm_za_builtin_no_za_state)
661 << TheCall->getSourceRange();
662
663 if ((getSMEState(BuiltinID) & ArmZT0Mask) && !hasArmZT0State(FD))
664 Diag(Loc: TheCall->getBeginLoc(),
665 DiagID: diag::warn_attribute_arm_zt0_builtin_no_zt0_state)
666 << TheCall->getSourceRange();
667 }
668
669 // Range check SME intrinsics that take immediate values.
670 SmallVector<std::tuple<int, int, int>, 3> ImmChecks;
671
672 switch (BuiltinID) {
673 default:
674 return false;
675#define GET_SME_IMMEDIATE_CHECK
676#include "clang/Basic/arm_sme_sema_rangechecks.inc"
677#undef GET_SME_IMMEDIATE_CHECK
678 }
679
680 return PerformSVEImmChecks(TheCall, ImmChecks);
681}
682
683bool SemaARM::CheckSVEBuiltinFunctionCall(unsigned BuiltinID,
684 CallExpr *TheCall) {
685 if (const FunctionDecl *FD =
686 SemaRef.getCurFunctionDecl(/*AllowLambda=*/true)) {
687 std::optional<ArmStreamingType> BuiltinType;
688
689 switch (BuiltinID) {
690#define GET_SVE_STREAMING_ATTRS
691#include "clang/Basic/arm_sve_streaming_attrs.inc"
692#undef GET_SVE_STREAMING_ATTRS
693 }
694 if (BuiltinType &&
695 checkArmStreamingBuiltin(S&: SemaRef, TheCall, FD, BuiltinType: *BuiltinType, BuiltinID))
696 return true;
697 }
698 // Range check SVE intrinsics that take immediate values.
699 SmallVector<std::tuple<int, int, int>, 3> ImmChecks;
700
701 switch (BuiltinID) {
702 default:
703 return false;
704#define GET_SVE_IMMEDIATE_CHECK
705#include "clang/Basic/arm_sve_sema_rangechecks.inc"
706#undef GET_SVE_IMMEDIATE_CHECK
707 }
708
709 return PerformSVEImmChecks(TheCall, ImmChecks);
710}
711
712bool SemaARM::CheckNeonBuiltinFunctionCall(const TargetInfo &TI,
713 unsigned BuiltinID,
714 CallExpr *TheCall) {
715 if (const FunctionDecl *FD =
716 SemaRef.getCurFunctionDecl(/*AllowLambda=*/true)) {
717 std::optional<ArmStreamingType> BuiltinType;
718
719 switch (BuiltinID) {
720 default:
721 break;
722#define GET_NEON_STREAMING_COMPAT_FLAG
723#include "clang/Basic/arm_neon.inc"
724#undef GET_NEON_STREAMING_COMPAT_FLAG
725 }
726 if (BuiltinType &&
727 checkArmStreamingBuiltin(S&: SemaRef, TheCall, FD, BuiltinType: *BuiltinType, BuiltinID))
728 return true;
729 }
730
731 llvm::APSInt Result;
732 uint64_t mask = 0;
733 int TV = -1;
734 int PtrArgNum = -1;
735 bool HasConstPtr = false;
736 switch (BuiltinID) {
737#define GET_NEON_OVERLOAD_CHECK
738#include "clang/Basic/arm_fp16.inc"
739#include "clang/Basic/arm_neon.inc"
740#undef GET_NEON_OVERLOAD_CHECK
741 }
742
743 // For NEON intrinsics which are overloaded on vector element type, validate
744 // the immediate which specifies which variant to emit.
745 unsigned ImmArg = TheCall->getNumArgs() - 1;
746 if (mask) {
747 if (SemaRef.BuiltinConstantArg(TheCall, ArgNum: ImmArg, Result))
748 return true;
749
750 TV = Result.getLimitedValue(Limit: 64);
751 if ((TV > 63) || (mask & (1ULL << TV)) == 0)
752 return Diag(Loc: TheCall->getBeginLoc(), DiagID: diag::err_invalid_neon_type_code)
753 << TheCall->getArg(Arg: ImmArg)->getSourceRange();
754 }
755
756 if (PtrArgNum >= 0) {
757 // Check that pointer arguments have the specified type.
758 Expr *Arg = TheCall->getArg(Arg: PtrArgNum);
759 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Val: Arg))
760 Arg = ICE->getSubExpr();
761 ExprResult RHS = SemaRef.DefaultFunctionArrayLvalueConversion(E: Arg);
762 QualType RHSTy = RHS.get()->getType();
763
764 llvm::Triple::ArchType Arch = TI.getTriple().getArch();
765 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 ||
766 Arch == llvm::Triple::aarch64_32 ||
767 Arch == llvm::Triple::aarch64_be;
768 bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong;
769 QualType EltTy = getNeonEltType(Flags: NeonTypeFlags(TV), Context&: getASTContext(),
770 IsPolyUnsigned, IsInt64Long);
771 if (HasConstPtr)
772 EltTy = EltTy.withConst();
773 QualType LHSTy = getASTContext().getPointerType(T: EltTy);
774 AssignConvertType ConvTy;
775 ConvTy = SemaRef.CheckSingleAssignmentConstraints(LHSType: LHSTy, RHS);
776 if (RHS.isInvalid())
777 return true;
778 if (SemaRef.DiagnoseAssignmentResult(ConvTy, Loc: Arg->getBeginLoc(), DstType: LHSTy,
779 SrcType: RHSTy, SrcExpr: RHS.get(),
780 Action: AssignmentAction::Assigning))
781 return true;
782 }
783
784 // For NEON intrinsics which take an immediate value as part of the
785 // instruction, range check them here.
786 SmallVector<std::tuple<int, int, int, int>, 2> ImmChecks;
787 switch (BuiltinID) {
788 default:
789 return false;
790#define GET_NEON_IMMEDIATE_CHECK
791#include "clang/Basic/arm_fp16.inc"
792#include "clang/Basic/arm_neon.inc"
793#undef GET_NEON_IMMEDIATE_CHECK
794 }
795
796 return PerformNeonImmChecks(TheCall, ImmChecks, OverloadType: TV);
797}
798
799bool SemaARM::CheckMVEBuiltinFunctionCall(unsigned BuiltinID,
800 CallExpr *TheCall) {
801 switch (BuiltinID) {
802 default:
803 return false;
804#include "clang/Basic/arm_mve_builtin_sema.inc"
805 }
806}
807
808bool SemaARM::CheckCDEBuiltinFunctionCall(const TargetInfo &TI,
809 unsigned BuiltinID,
810 CallExpr *TheCall) {
811 bool Err = false;
812 switch (BuiltinID) {
813 default:
814 return false;
815#include "clang/Basic/arm_cde_builtin_sema.inc"
816 }
817
818 if (Err)
819 return true;
820
821 return CheckARMCoprocessorImmediate(TI, CoprocArg: TheCall->getArg(Arg: 0), /*WantCDE*/ true);
822}
823
824bool SemaARM::CheckARMCoprocessorImmediate(const TargetInfo &TI,
825 const Expr *CoprocArg,
826 bool WantCDE) {
827 ASTContext &Context = getASTContext();
828 if (SemaRef.isConstantEvaluatedContext())
829 return false;
830
831 // We can't check the value of a dependent argument.
832 if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent())
833 return false;
834
835 llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Ctx: Context);
836 int64_t CoprocNo = CoprocNoAP.getExtValue();
837 assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative");
838
839 uint32_t CDECoprocMask = TI.getARMCDECoprocMask();
840 bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo));
841
842 if (IsCDECoproc != WantCDE)
843 return Diag(Loc: CoprocArg->getBeginLoc(), DiagID: diag::err_arm_invalid_coproc)
844 << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange();
845
846 return false;
847}
848
849bool SemaARM::CheckARMBuiltinExclusiveCall(const TargetInfo &TI,
850 unsigned BuiltinID,
851 CallExpr *TheCall) {
852 assert((BuiltinID == ARM::BI__builtin_arm_ldrex ||
853 BuiltinID == ARM::BI__builtin_arm_ldrexd ||
854 BuiltinID == ARM::BI__builtin_arm_ldaex ||
855 BuiltinID == ARM::BI__builtin_arm_strex ||
856 BuiltinID == ARM::BI__builtin_arm_strexd ||
857 BuiltinID == ARM::BI__builtin_arm_stlex ||
858 BuiltinID == AArch64::BI__builtin_arm_ldrex ||
859 BuiltinID == AArch64::BI__builtin_arm_ldaex ||
860 BuiltinID == AArch64::BI__builtin_arm_strex ||
861 BuiltinID == AArch64::BI__builtin_arm_stlex) &&
862 "unexpected ARM builtin");
863 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex ||
864 BuiltinID == ARM::BI__builtin_arm_ldrexd ||
865 BuiltinID == ARM::BI__builtin_arm_ldaex ||
866 BuiltinID == AArch64::BI__builtin_arm_ldrex ||
867 BuiltinID == AArch64::BI__builtin_arm_ldaex;
868 bool IsDoubleWord = BuiltinID == ARM::BI__builtin_arm_ldrexd ||
869 BuiltinID == ARM::BI__builtin_arm_strexd;
870
871 ASTContext &Context = getASTContext();
872 DeclRefExpr *DRE =
873 cast<DeclRefExpr>(Val: TheCall->getCallee()->IgnoreParenCasts());
874
875 // Ensure that we have the proper number of arguments.
876 if (SemaRef.checkArgCount(Call: TheCall, DesiredArgCount: IsLdrex ? 1 : 2))
877 return true;
878
879 // Inspect the pointer argument of the atomic builtin. This should always be
880 // a pointer type, whose element is an integral scalar or pointer type.
881 // Because it is a pointer type, we don't have to worry about any implicit
882 // casts here.
883 Expr *PointerArg = TheCall->getArg(Arg: IsLdrex ? 0 : 1);
884 ExprResult PointerArgRes =
885 SemaRef.DefaultFunctionArrayLvalueConversion(E: PointerArg);
886 if (PointerArgRes.isInvalid())
887 return true;
888 PointerArg = PointerArgRes.get();
889
890 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>();
891 if (!pointerType) {
892 Diag(Loc: DRE->getBeginLoc(), DiagID: diag::err_atomic_builtin_must_be_pointer)
893 << PointerArg->getType() << 0 << PointerArg->getSourceRange();
894 return true;
895 }
896
897 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next
898 // task is to insert the appropriate casts into the AST. First work out just
899 // what the appropriate type is.
900 QualType ValType = pointerType->getPointeeType();
901 QualType AddrType = ValType.getUnqualifiedType().withVolatile();
902 if (IsLdrex)
903 AddrType.addConst();
904
905 // Issue a warning if the cast is dodgy.
906 CastKind CastNeeded = CK_NoOp;
907 if (!AddrType.isAtLeastAsQualifiedAs(other: ValType, Ctx: getASTContext())) {
908 CastNeeded = CK_BitCast;
909 Diag(Loc: DRE->getBeginLoc(), DiagID: diag::ext_typecheck_convert_discards_qualifiers)
910 << PointerArg->getType() << Context.getPointerType(T: AddrType)
911 << AssignmentAction::Passing << PointerArg->getSourceRange();
912 }
913
914 // Finally, do the cast and replace the argument with the corrected version.
915 AddrType = Context.getPointerType(T: AddrType);
916 PointerArgRes = SemaRef.ImpCastExprToType(E: PointerArg, Type: AddrType, CK: CastNeeded);
917 if (PointerArgRes.isInvalid())
918 return true;
919 PointerArg = PointerArgRes.get();
920
921 TheCall->setArg(Arg: IsLdrex ? 0 : 1, ArgExpr: PointerArg);
922
923 // In general, we allow ints, floats and pointers to be loaded and stored.
924 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
925 !ValType->isBlockPointerType() && !ValType->isFloatingType()) {
926 Diag(Loc: DRE->getBeginLoc(), DiagID: diag::err_atomic_builtin_must_be_pointer_intfltptr)
927 << PointerArg->getType() << 0 << PointerArg->getSourceRange();
928 return true;
929 }
930
931 // Check whether the size of the type can be handled atomically on this
932 // target.
933 if (!TI.getTriple().isAArch64()) {
934 unsigned Mask = TI.getARMLDREXMask();
935 unsigned Bits = Context.getTypeSize(T: ValType);
936 if (IsDoubleWord) {
937 // Explicit request for ldrexd/strexd means only double word sizes
938 // supported if the target supports them.
939 Mask &= TargetInfo::ARM_LDREX_D;
940 }
941 bool Supported =
942 (llvm::isPowerOf2_64(Value: Bits)) && Bits >= 8 && (Mask & (Bits / 8));
943
944 if (!Supported) {
945 // Emit a diagnostic saying that this size isn't available. If _no_ size
946 // of exclusive access is supported on this target, we emit a diagnostic
947 // with special wording for that case, but otherwise, we emit
948 // err_atomic_exclusive_builtin_pointer_size and loop over `Mask` to
949 // control what subset of sizes it lists as legal.
950 if (Mask) {
951 auto D = Diag(Loc: DRE->getBeginLoc(),
952 DiagID: diag::err_atomic_exclusive_builtin_pointer_size)
953 << PointerArg->getType();
954 bool Started = false;
955 for (unsigned Size = 1; Size <= 8; Size <<= 1) {
956 // For each of the sizes 1,2,4,8, pass two integers into the
957 // diagnostic. The first selects a separator from the previous
958 // number: 0 for no separator at all, 1 for a comma, 2 for " or "
959 // which appears before the final number in a list of more than one.
960 // The second integer just indicates whether we print this size in
961 // the message at all.
962 if (!(Mask & Size)) {
963 // This size isn't one of the supported ones, so emit no separator
964 // text and don't print the size itself.
965 D << 0 << 0;
966 } else {
967 // This size is supported, so print it, and an appropriate
968 // separator.
969 Mask &= ~Size;
970 if (!Started)
971 D << 0; // No separator if this is the first size we've printed
972 else if (Mask)
973 D << 1; // "," if there's still another size to come
974 else
975 D << 2; // " or " if the size we're about to print is the last
976 D << 1; // print the size itself
977 Started = true;
978 }
979 }
980 } else {
981 bool EmitDoubleWordDiagnostic =
982 IsDoubleWord && !Mask && TI.getARMLDREXMask();
983 Diag(Loc: DRE->getBeginLoc(),
984 DiagID: diag::err_atomic_exclusive_builtin_pointer_size_none)
985 << (EmitDoubleWordDiagnostic ? 1 : 0)
986 << PointerArg->getSourceRange();
987 }
988 }
989 }
990
991 switch (ValType.getObjCLifetime()) {
992 case Qualifiers::OCL_None:
993 case Qualifiers::OCL_ExplicitNone:
994 // okay
995 break;
996
997 case Qualifiers::OCL_Weak:
998 case Qualifiers::OCL_Strong:
999 case Qualifiers::OCL_Autoreleasing:
1000 Diag(Loc: DRE->getBeginLoc(), DiagID: diag::err_arc_atomic_ownership)
1001 << ValType << PointerArg->getSourceRange();
1002 return true;
1003 }
1004
1005 if (IsLdrex) {
1006 TheCall->setType(ValType);
1007 return false;
1008 }
1009
1010 // Initialize the argument to be stored.
1011 ExprResult ValArg = TheCall->getArg(Arg: 0);
1012 InitializedEntity Entity = InitializedEntity::InitializeParameter(
1013 Context, Type: ValType, /*consume*/ Consumed: false);
1014 ValArg = SemaRef.PerformCopyInitialization(Entity, EqualLoc: SourceLocation(), Init: ValArg);
1015 if (ValArg.isInvalid())
1016 return true;
1017 TheCall->setArg(Arg: 0, ArgExpr: ValArg.get());
1018
1019 // __builtin_arm_strex always returns an int. It's marked as such in the .def,
1020 // but the custom checker bypasses all default analysis.
1021 TheCall->setType(Context.IntTy);
1022 return false;
1023}
1024
1025bool SemaARM::CheckARMBuiltinFunctionCall(const TargetInfo &TI,
1026 unsigned BuiltinID,
1027 CallExpr *TheCall) {
1028 if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
1029 BuiltinID == ARM::BI__builtin_arm_ldrexd ||
1030 BuiltinID == ARM::BI__builtin_arm_ldaex ||
1031 BuiltinID == ARM::BI__builtin_arm_strex ||
1032 BuiltinID == ARM::BI__builtin_arm_strexd ||
1033 BuiltinID == ARM::BI__builtin_arm_stlex) {
1034 return CheckARMBuiltinExclusiveCall(TI, BuiltinID, TheCall);
1035 }
1036
1037 if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
1038 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 1) ||
1039 SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 1);
1040 }
1041
1042 if (BuiltinID == ARM::BI__builtin_arm_rsr64 ||
1043 BuiltinID == ARM::BI__builtin_arm_wsr64)
1044 return BuiltinARMSpecialReg(BuiltinID, TheCall, ArgNum: 0, ExpectedFieldNum: 3, AllowName: false);
1045
1046 if (BuiltinID == ARM::BI__builtin_arm_rsr ||
1047 BuiltinID == ARM::BI__builtin_arm_rsrp ||
1048 BuiltinID == ARM::BI__builtin_arm_wsr ||
1049 BuiltinID == ARM::BI__builtin_arm_wsrp)
1050 return BuiltinARMSpecialReg(BuiltinID, TheCall, ArgNum: 0, ExpectedFieldNum: 5, AllowName: true);
1051
1052 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
1053 return true;
1054 if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall))
1055 return true;
1056 if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall))
1057 return true;
1058
1059 // For intrinsics which take an immediate value as part of the instruction,
1060 // range check them here.
1061 // FIXME: VFP Intrinsics should error if VFP not present.
1062 switch (BuiltinID) {
1063 default:
1064 return false;
1065 case ARM::BI__builtin_arm_ssat:
1066 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 1, High: 32);
1067 case ARM::BI__builtin_arm_usat:
1068 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 31);
1069 case ARM::BI__builtin_arm_ssat16:
1070 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 1, High: 16);
1071 case ARM::BI__builtin_arm_usat16:
1072 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 15);
1073 case ARM::BI__builtin_arm_vcvtr_f:
1074 case ARM::BI__builtin_arm_vcvtr_d:
1075 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 1);
1076 case ARM::BI__builtin_arm_dmb:
1077 case ARM::BI__dmb:
1078 case ARM::BI__builtin_arm_dsb:
1079 case ARM::BI__dsb:
1080 case ARM::BI__builtin_arm_isb:
1081 case ARM::BI__isb:
1082 case ARM::BI__builtin_arm_dbg:
1083 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 15);
1084 case ARM::BI__builtin_arm_cdp:
1085 case ARM::BI__builtin_arm_cdp2:
1086 case ARM::BI__builtin_arm_mcr:
1087 case ARM::BI__builtin_arm_mcr2:
1088 case ARM::BI__builtin_arm_mrc:
1089 case ARM::BI__builtin_arm_mrc2:
1090 case ARM::BI__builtin_arm_mcrr:
1091 case ARM::BI__builtin_arm_mcrr2:
1092 case ARM::BI__builtin_arm_mrrc:
1093 case ARM::BI__builtin_arm_mrrc2:
1094 case ARM::BI__builtin_arm_ldc:
1095 case ARM::BI__builtin_arm_ldcl:
1096 case ARM::BI__builtin_arm_ldc2:
1097 case ARM::BI__builtin_arm_ldc2l:
1098 case ARM::BI__builtin_arm_stc:
1099 case ARM::BI__builtin_arm_stcl:
1100 case ARM::BI__builtin_arm_stc2:
1101 case ARM::BI__builtin_arm_stc2l:
1102 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 15) ||
1103 CheckARMCoprocessorImmediate(TI, CoprocArg: TheCall->getArg(Arg: 0),
1104 /*WantCDE*/ false);
1105 }
1106}
1107
1108bool SemaARM::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI,
1109 unsigned BuiltinID,
1110 CallExpr *TheCall) {
1111 if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
1112 BuiltinID == AArch64::BI__builtin_arm_ldaex ||
1113 BuiltinID == AArch64::BI__builtin_arm_strex ||
1114 BuiltinID == AArch64::BI__builtin_arm_stlex) {
1115 return CheckARMBuiltinExclusiveCall(TI, BuiltinID, TheCall);
1116 }
1117
1118 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
1119 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 1) ||
1120 SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 3) ||
1121 SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 3, Low: 0, High: 1) ||
1122 SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 4, Low: 0, High: 1);
1123 }
1124
1125 if (BuiltinID == AArch64::BI__builtin_arm_range_prefetch_x) {
1126 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 1) ||
1127 SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 1) ||
1128 SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 3, Low: -2097152, High: 2097151) ||
1129 SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 4, Low: 1, High: 65536) ||
1130 SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 5, Low: -2097152, High: 2097151);
1131 }
1132
1133 if (BuiltinID == AArch64::BI__builtin_arm_range_prefetch) {
1134 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 1) ||
1135 SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 1);
1136 }
1137
1138 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
1139 BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
1140 BuiltinID == AArch64::BI__builtin_arm_rsr128 ||
1141 BuiltinID == AArch64::BI__builtin_arm_wsr128)
1142 return BuiltinARMSpecialReg(BuiltinID, TheCall, ArgNum: 0, ExpectedFieldNum: 5, AllowName: true);
1143
1144 // Memory Tagging Extensions (MTE) Intrinsics
1145 if (BuiltinID == AArch64::BI__builtin_arm_irg ||
1146 BuiltinID == AArch64::BI__builtin_arm_addg ||
1147 BuiltinID == AArch64::BI__builtin_arm_gmi ||
1148 BuiltinID == AArch64::BI__builtin_arm_ldg ||
1149 BuiltinID == AArch64::BI__builtin_arm_stg ||
1150 BuiltinID == AArch64::BI__builtin_arm_subp) {
1151 return BuiltinARMMemoryTaggingCall(BuiltinID, TheCall);
1152 }
1153
1154 if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
1155 BuiltinID == AArch64::BI__builtin_arm_rsrp ||
1156 BuiltinID == AArch64::BI__builtin_arm_wsr ||
1157 BuiltinID == AArch64::BI__builtin_arm_wsrp)
1158 return BuiltinARMSpecialReg(BuiltinID, TheCall, ArgNum: 0, ExpectedFieldNum: 5, AllowName: true);
1159
1160 // Only check the valid encoding range. Any constant in this range would be
1161 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw
1162 // an exception for incorrect registers. This matches MSVC behavior.
1163 if (BuiltinID == AArch64::BI_ReadStatusReg ||
1164 BuiltinID == AArch64::BI_WriteStatusReg || BuiltinID == AArch64::BI__sys)
1165 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 0x7fff);
1166
1167 if (BuiltinID == AArch64::BI__getReg)
1168 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 31);
1169
1170 if (BuiltinID == AArch64::BI__break)
1171 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 0xffff);
1172
1173 if (BuiltinID == AArch64::BI__hlt)
1174 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 0xffff);
1175
1176 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
1177 return true;
1178
1179 if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall))
1180 return true;
1181
1182 if (CheckSMEBuiltinFunctionCall(BuiltinID, TheCall))
1183 return true;
1184
1185 // For intrinsics which take an immediate value as part of the instruction,
1186 // range check them here.
1187 unsigned i = 0, l = 0, u = 0;
1188 switch (BuiltinID) {
1189 default: return false;
1190 case AArch64::BI__builtin_arm_dmb:
1191 case AArch64::BI__dmb:
1192 case AArch64::BI__builtin_arm_dsb:
1193 case AArch64::BI__dsb:
1194 case AArch64::BI__builtin_arm_isb:
1195 case AArch64::BI__isb:
1196 l = 0;
1197 u = 15;
1198 break;
1199 }
1200
1201 return SemaRef.BuiltinConstantArgRange(TheCall, ArgNum: i, Low: l, High: u + l);
1202}
1203
1204namespace {
1205struct IntrinToName {
1206 uint32_t Id;
1207 int32_t FullName;
1208 int32_t ShortName;
1209};
1210} // unnamed namespace
1211
1212static bool BuiltinAliasValid(unsigned BuiltinID, StringRef AliasName,
1213 ArrayRef<IntrinToName> Map,
1214 const char *IntrinNames) {
1215 AliasName.consume_front(Prefix: "__arm_");
1216 const IntrinToName *It =
1217 llvm::lower_bound(Range&: Map, Value&: BuiltinID, C: [](const IntrinToName &L, unsigned Id) {
1218 return L.Id < Id;
1219 });
1220 if (It == Map.end() || It->Id != BuiltinID)
1221 return false;
1222 StringRef FullName(&IntrinNames[It->FullName]);
1223 if (AliasName == FullName)
1224 return true;
1225 if (It->ShortName == -1)
1226 return false;
1227 StringRef ShortName(&IntrinNames[It->ShortName]);
1228 return AliasName == ShortName;
1229}
1230
1231bool SemaARM::MveAliasValid(unsigned BuiltinID, StringRef AliasName) {
1232#include "clang/Basic/arm_mve_builtin_aliases.inc"
1233 // The included file defines:
1234 // - ArrayRef<IntrinToName> Map
1235 // - const char IntrinNames[]
1236 return BuiltinAliasValid(BuiltinID, AliasName, Map, IntrinNames);
1237}
1238
1239bool SemaARM::CdeAliasValid(unsigned BuiltinID, StringRef AliasName) {
1240#include "clang/Basic/arm_cde_builtin_aliases.inc"
1241 return BuiltinAliasValid(BuiltinID, AliasName, Map, IntrinNames);
1242}
1243
1244bool SemaARM::SveAliasValid(unsigned BuiltinID, StringRef AliasName) {
1245 if (getASTContext().BuiltinInfo.isAuxBuiltinID(ID: BuiltinID))
1246 BuiltinID = getASTContext().BuiltinInfo.getAuxBuiltinID(ID: BuiltinID);
1247 return BuiltinID >= AArch64::FirstSVEBuiltin &&
1248 BuiltinID <= AArch64::LastSVEBuiltin;
1249}
1250
1251bool SemaARM::SmeAliasValid(unsigned BuiltinID, StringRef AliasName) {
1252 if (getASTContext().BuiltinInfo.isAuxBuiltinID(ID: BuiltinID))
1253 BuiltinID = getASTContext().BuiltinInfo.getAuxBuiltinID(ID: BuiltinID);
1254 return BuiltinID >= AArch64::FirstSMEBuiltin &&
1255 BuiltinID <= AArch64::LastSMEBuiltin;
1256}
1257
1258void SemaARM::handleBuiltinAliasAttr(Decl *D, const ParsedAttr &AL) {
1259 ASTContext &Context = getASTContext();
1260 if (!AL.isArgIdent(Arg: 0)) {
1261 Diag(Loc: AL.getLoc(), DiagID: diag::err_attribute_argument_n_type)
1262 << AL << 1 << AANT_ArgumentIdentifier;
1263 return;
1264 }
1265
1266 IdentifierInfo *Ident = AL.getArgAsIdent(Arg: 0)->getIdentifierInfo();
1267 unsigned BuiltinID = Ident->getBuiltinID();
1268 StringRef AliasName = cast<FunctionDecl>(Val: D)->getIdentifier()->getName();
1269
1270 bool IsAArch64 = Context.getTargetInfo().getTriple().isAArch64();
1271 if ((IsAArch64 && !SveAliasValid(BuiltinID, AliasName) &&
1272 !SmeAliasValid(BuiltinID, AliasName)) ||
1273 (!IsAArch64 && !MveAliasValid(BuiltinID, AliasName) &&
1274 !CdeAliasValid(BuiltinID, AliasName))) {
1275 Diag(Loc: AL.getLoc(), DiagID: diag::err_attribute_arm_builtin_alias);
1276 return;
1277 }
1278
1279 D->addAttr(A: ::new (Context) ArmBuiltinAliasAttr(Context, AL, Ident));
1280}
1281
1282static bool checkNewAttrMutualExclusion(
1283 Sema &S, const ParsedAttr &AL, const FunctionProtoType *FPT,
1284 FunctionType::ArmStateValue CurrentState, StringRef StateName) {
1285 auto CheckForIncompatibleAttr =
1286 [&](FunctionType::ArmStateValue IncompatibleState,
1287 StringRef IncompatibleStateName) {
1288 if (CurrentState == IncompatibleState) {
1289 S.Diag(Loc: AL.getLoc(), DiagID: diag::err_attributes_are_not_compatible)
1290 << (std::string("'__arm_new(\"") + StateName.str() + "\")'")
1291 << (std::string("'") + IncompatibleStateName.str() + "(\"" +
1292 StateName.str() + "\")'")
1293 << true;
1294 AL.setInvalid();
1295 }
1296 };
1297
1298 CheckForIncompatibleAttr(FunctionType::ARM_In, "__arm_in");
1299 CheckForIncompatibleAttr(FunctionType::ARM_Out, "__arm_out");
1300 CheckForIncompatibleAttr(FunctionType::ARM_InOut, "__arm_inout");
1301 CheckForIncompatibleAttr(FunctionType::ARM_Preserves, "__arm_preserves");
1302 return AL.isInvalid();
1303}
1304
1305void SemaARM::handleNewAttr(Decl *D, const ParsedAttr &AL) {
1306 if (!AL.getNumArgs()) {
1307 Diag(Loc: AL.getLoc(), DiagID: diag::err_missing_arm_state) << AL;
1308 AL.setInvalid();
1309 return;
1310 }
1311
1312 std::vector<StringRef> NewState;
1313 if (const auto *ExistingAttr = D->getAttr<ArmNewAttr>()) {
1314 for (StringRef S : ExistingAttr->newArgs())
1315 NewState.push_back(x: S);
1316 }
1317
1318 bool HasZA = false;
1319 bool HasZT0 = false;
1320 for (unsigned I = 0, E = AL.getNumArgs(); I != E; ++I) {
1321 StringRef StateName;
1322 SourceLocation LiteralLoc;
1323 if (!SemaRef.checkStringLiteralArgumentAttr(Attr: AL, ArgNum: I, Str&: StateName, ArgLocation: &LiteralLoc))
1324 return;
1325
1326 if (StateName == "za")
1327 HasZA = true;
1328 else if (StateName == "zt0")
1329 HasZT0 = true;
1330 else {
1331 Diag(Loc: LiteralLoc, DiagID: diag::err_unknown_arm_state) << StateName;
1332 AL.setInvalid();
1333 return;
1334 }
1335
1336 if (!llvm::is_contained(Range&: NewState, Element: StateName)) // Avoid adding duplicates.
1337 NewState.push_back(x: StateName);
1338 }
1339
1340 if (auto *FPT = dyn_cast<FunctionProtoType>(Val: D->getFunctionType())) {
1341 FunctionType::ArmStateValue ZAState =
1342 FunctionType::getArmZAState(AttrBits: FPT->getAArch64SMEAttributes());
1343 if (HasZA && ZAState != FunctionType::ARM_None &&
1344 checkNewAttrMutualExclusion(S&: SemaRef, AL, FPT, CurrentState: ZAState, StateName: "za"))
1345 return;
1346 FunctionType::ArmStateValue ZT0State =
1347 FunctionType::getArmZT0State(AttrBits: FPT->getAArch64SMEAttributes());
1348 if (HasZT0 && ZT0State != FunctionType::ARM_None &&
1349 checkNewAttrMutualExclusion(S&: SemaRef, AL, FPT, CurrentState: ZT0State, StateName: "zt0"))
1350 return;
1351 }
1352
1353 D->dropAttr<ArmNewAttr>();
1354 D->addAttr(A: ::new (getASTContext()) ArmNewAttr(
1355 getASTContext(), AL, NewState.data(), NewState.size()));
1356}
1357
1358void SemaARM::handleCmseNSEntryAttr(Decl *D, const ParsedAttr &AL) {
1359 if (getLangOpts().CPlusPlus && !D->getDeclContext()->isExternCContext()) {
1360 Diag(Loc: AL.getLoc(), DiagID: diag::err_attribute_not_clinkage) << AL;
1361 return;
1362 }
1363
1364 const auto *FD = cast<FunctionDecl>(Val: D);
1365 if (!FD->isExternallyVisible()) {
1366 Diag(Loc: AL.getLoc(), DiagID: diag::warn_attribute_cmse_entry_static);
1367 return;
1368 }
1369
1370 D->addAttr(A: ::new (getASTContext()) CmseNSEntryAttr(getASTContext(), AL));
1371}
1372
1373void SemaARM::handleInterruptAttr(Decl *D, const ParsedAttr &AL) {
1374 // Check the attribute arguments.
1375 if (AL.getNumArgs() > 1) {
1376 Diag(Loc: AL.getLoc(), DiagID: diag::err_attribute_too_many_arguments) << AL << 1;
1377 return;
1378 }
1379
1380 StringRef Str;
1381 SourceLocation ArgLoc;
1382
1383 if (AL.getNumArgs() == 0)
1384 Str = "";
1385 else if (!SemaRef.checkStringLiteralArgumentAttr(Attr: AL, ArgNum: 0, Str, ArgLocation: &ArgLoc))
1386 return;
1387
1388 ARMInterruptAttr::InterruptType Kind;
1389 if (!ARMInterruptAttr::ConvertStrToInterruptType(Val: Str, Out&: Kind)) {
1390 Diag(Loc: AL.getLoc(), DiagID: diag::warn_attribute_type_not_supported)
1391 << AL << Str << ArgLoc;
1392 return;
1393 }
1394
1395 if (!D->hasAttr<ARMSaveFPAttr>()) {
1396 const TargetInfo &TI = getASTContext().getTargetInfo();
1397 if (TI.hasFeature(Feature: "vfp"))
1398 Diag(Loc: D->getLocation(), DiagID: diag::warn_arm_interrupt_vfp_clobber);
1399 }
1400
1401 D->addAttr(A: ::new (getASTContext())
1402 ARMInterruptAttr(getASTContext(), AL, Kind));
1403}
1404
1405void SemaARM::handleInterruptSaveFPAttr(Decl *D, const ParsedAttr &AL) {
1406 // Go ahead and add ARMSaveFPAttr because handleInterruptAttr() checks for
1407 // it when deciding to issue a diagnostic about clobbering floating point
1408 // registers, which ARMSaveFPAttr prevents.
1409 D->addAttr(A: ::new (SemaRef.Context) ARMSaveFPAttr(SemaRef.Context, AL));
1410 SemaRef.ARM().handleInterruptAttr(D, AL);
1411
1412 // If ARM().handleInterruptAttr() failed, remove ARMSaveFPAttr.
1413 if (!D->hasAttr<ARMInterruptAttr>()) {
1414 D->dropAttr<ARMSaveFPAttr>();
1415 return;
1416 }
1417
1418 // If VFP not enabled, remove ARMSaveFPAttr but leave ARMInterruptAttr.
1419 bool VFP = SemaRef.Context.getTargetInfo().hasFeature(Feature: "vfp");
1420
1421 if (!VFP) {
1422 SemaRef.Diag(Loc: D->getLocation(), DiagID: diag::warn_arm_interrupt_save_fp_without_vfp_unit);
1423 D->dropAttr<ARMSaveFPAttr>();
1424 }
1425}
1426
1427// Check if the function definition uses any AArch64 SME features without
1428// having the '+sme' feature enabled and warn user if sme locally streaming
1429// function returns or uses arguments with VL-based types.
1430void SemaARM::CheckSMEFunctionDefAttributes(const FunctionDecl *FD) {
1431 const auto *Attr = FD->getAttr<ArmNewAttr>();
1432 bool UsesSM = FD->hasAttr<ArmLocallyStreamingAttr>();
1433 bool UsesZA = Attr && Attr->isNewZA();
1434 bool UsesZT0 = Attr && Attr->isNewZT0();
1435
1436 if (UsesZA || UsesZT0) {
1437 if (const auto *FPT = FD->getType()->getAs<FunctionProtoType>()) {
1438 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
1439 if (EPI.AArch64SMEAttributes & FunctionType::SME_AgnosticZAStateMask)
1440 Diag(Loc: FD->getLocation(), DiagID: diag::err_sme_unsupported_agnostic_new);
1441 }
1442 }
1443
1444 if (FD->hasAttr<ArmLocallyStreamingAttr>()) {
1445 if (FD->getReturnType()->isSizelessVectorType())
1446 Diag(Loc: FD->getLocation(),
1447 DiagID: diag::warn_sme_locally_streaming_has_vl_args_returns)
1448 << /*IsArg=*/false;
1449 if (llvm::any_of(Range: FD->parameters(), P: [](ParmVarDecl *P) {
1450 return P->getOriginalType()->isSizelessVectorType();
1451 }))
1452 Diag(Loc: FD->getLocation(),
1453 DiagID: diag::warn_sme_locally_streaming_has_vl_args_returns)
1454 << /*IsArg=*/true;
1455 }
1456 if (const auto *FPT = FD->getType()->getAs<FunctionProtoType>()) {
1457 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
1458 UsesSM |= EPI.AArch64SMEAttributes & FunctionType::SME_PStateSMEnabledMask;
1459 UsesZA |= FunctionType::getArmZAState(AttrBits: EPI.AArch64SMEAttributes) !=
1460 FunctionType::ARM_None;
1461 UsesZT0 |= FunctionType::getArmZT0State(AttrBits: EPI.AArch64SMEAttributes) !=
1462 FunctionType::ARM_None;
1463 }
1464
1465 ASTContext &Context = getASTContext();
1466 if (UsesSM || UsesZA) {
1467 llvm::StringMap<bool> FeatureMap;
1468 Context.getFunctionFeatureMap(FeatureMap, FD);
1469 if (!FeatureMap.contains(Key: "sme")) {
1470 if (UsesSM)
1471 Diag(Loc: FD->getLocation(),
1472 DiagID: diag::err_sme_definition_using_sm_in_non_sme_target);
1473 else
1474 Diag(Loc: FD->getLocation(),
1475 DiagID: diag::err_sme_definition_using_za_in_non_sme_target);
1476 }
1477 }
1478 if (UsesZT0) {
1479 llvm::StringMap<bool> FeatureMap;
1480 Context.getFunctionFeatureMap(FeatureMap, FD);
1481 if (!FeatureMap.contains(Key: "sme2")) {
1482 Diag(Loc: FD->getLocation(),
1483 DiagID: diag::err_sme_definition_using_zt0_in_non_sme2_target);
1484 }
1485 }
1486}
1487
1488/// getSVETypeSize - Return SVE vector or predicate register size.
1489static uint64_t getSVETypeSize(ASTContext &Context, const BuiltinType *Ty,
1490 bool IsStreaming) {
1491 assert(Ty->isSveVLSBuiltinType() && "Invalid SVE Type");
1492 uint64_t VScale = IsStreaming ? Context.getLangOpts().VScaleStreamingMin
1493 : Context.getLangOpts().VScaleMin;
1494 if (Ty->getKind() == BuiltinType::SveBool ||
1495 Ty->getKind() == BuiltinType::SveCount)
1496 return (VScale * 128) / Context.getCharWidth();
1497 return VScale * 128;
1498}
1499
1500bool SemaARM::areCompatibleSveTypes(QualType FirstType, QualType SecondType) {
1501 bool IsStreaming = false;
1502 if (getLangOpts().VScaleMin != getLangOpts().VScaleStreamingMin ||
1503 getLangOpts().VScaleMax != getLangOpts().VScaleStreamingMax) {
1504 if (const FunctionDecl *FD =
1505 SemaRef.getCurFunctionDecl(/*AllowLambda=*/true)) {
1506 // For streaming-compatible functions, we don't know vector length.
1507 if (const auto *T = FD->getType()->getAs<FunctionProtoType>()) {
1508 if (T->getAArch64SMEAttributes() &
1509 FunctionType::SME_PStateSMCompatibleMask)
1510 return false;
1511 }
1512
1513 if (IsArmStreamingFunction(FD, /*IncludeLocallyStreaming=*/true))
1514 IsStreaming = true;
1515 }
1516 }
1517
1518 auto IsValidCast = [&](QualType FirstType, QualType SecondType) {
1519 if (const auto *BT = FirstType->getAs<BuiltinType>()) {
1520 if (const auto *VT = SecondType->getAs<VectorType>()) {
1521 // Predicates have the same representation as uint8 so we also have to
1522 // check the kind to make these types incompatible.
1523 ASTContext &Context = getASTContext();
1524 if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate)
1525 return BT->getKind() == BuiltinType::SveBool;
1526 else if (VT->getVectorKind() == VectorKind::SveFixedLengthData)
1527 return VT->getElementType().getCanonicalType() ==
1528 FirstType->getSveEltType(Ctx: Context);
1529 else if (VT->getVectorKind() == VectorKind::Generic)
1530 return Context.getTypeSize(T: SecondType) ==
1531 getSVETypeSize(Context, Ty: BT, IsStreaming) &&
1532 Context.hasSameType(
1533 T1: VT->getElementType(),
1534 T2: Context.getBuiltinVectorTypeInfo(VecTy: BT).ElementType);
1535 }
1536 }
1537 return false;
1538 };
1539
1540 return IsValidCast(FirstType, SecondType) ||
1541 IsValidCast(SecondType, FirstType);
1542}
1543
1544bool SemaARM::areLaxCompatibleSveTypes(QualType FirstType,
1545 QualType SecondType) {
1546 bool IsStreaming = false;
1547 if (getLangOpts().VScaleMin != getLangOpts().VScaleStreamingMin ||
1548 getLangOpts().VScaleMax != getLangOpts().VScaleStreamingMax) {
1549 if (const FunctionDecl *FD =
1550 SemaRef.getCurFunctionDecl(/*AllowLambda=*/true)) {
1551 // For streaming-compatible functions, we don't know vector length.
1552 if (const auto *T = FD->getType()->getAs<FunctionProtoType>())
1553 if (T->getAArch64SMEAttributes() &
1554 FunctionType::SME_PStateSMCompatibleMask)
1555 return false;
1556
1557 if (IsArmStreamingFunction(FD, /*IncludeLocallyStreaming=*/true))
1558 IsStreaming = true;
1559 }
1560 }
1561
1562 auto IsLaxCompatible = [&](QualType FirstType, QualType SecondType) {
1563 const auto *BT = FirstType->getAs<BuiltinType>();
1564 if (!BT)
1565 return false;
1566
1567 const auto *VecTy = SecondType->getAs<VectorType>();
1568 if (VecTy && (VecTy->getVectorKind() == VectorKind::SveFixedLengthData ||
1569 VecTy->getVectorKind() == VectorKind::Generic)) {
1570 const LangOptions::LaxVectorConversionKind LVCKind =
1571 getLangOpts().getLaxVectorConversions();
1572 ASTContext &Context = getASTContext();
1573
1574 // Can not convert between sve predicates and sve vectors because of
1575 // different size.
1576 if (BT->getKind() == BuiltinType::SveBool &&
1577 VecTy->getVectorKind() == VectorKind::SveFixedLengthData)
1578 return false;
1579
1580 // If __ARM_FEATURE_SVE_BITS != N do not allow GNU vector lax conversion.
1581 // "Whenever __ARM_FEATURE_SVE_BITS==N, GNUT implicitly
1582 // converts to VLAT and VLAT implicitly converts to GNUT."
1583 // ACLE Spec Version 00bet6, 3.7.3.2. Behavior common to vectors and
1584 // predicates.
1585 if (VecTy->getVectorKind() == VectorKind::Generic &&
1586 Context.getTypeSize(T: SecondType) !=
1587 getSVETypeSize(Context, Ty: BT, IsStreaming))
1588 return false;
1589
1590 // If -flax-vector-conversions=all is specified, the types are
1591 // certainly compatible.
1592 if (LVCKind == LangOptions::LaxVectorConversionKind::All)
1593 return true;
1594
1595 // If -flax-vector-conversions=integer is specified, the types are
1596 // compatible if the elements are integer types.
1597 if (LVCKind == LangOptions::LaxVectorConversionKind::Integer)
1598 return VecTy->getElementType().getCanonicalType()->isIntegerType() &&
1599 FirstType->getSveEltType(Ctx: Context)->isIntegerType();
1600 }
1601
1602 return false;
1603 };
1604
1605 return IsLaxCompatible(FirstType, SecondType) ||
1606 IsLaxCompatible(SecondType, FirstType);
1607}
1608
1609static void appendFeature(StringRef Feat, SmallString<64> &Buffer) {
1610 if (!Buffer.empty())
1611 Buffer.append(RHS: "+");
1612 Buffer.append(RHS: Feat);
1613}
1614
1615static void convertPriorityString(unsigned Priority,
1616 SmallString<64> &NewParam) {
1617 StringRef PriorityString[8] = {"P0", "P1", "P2", "P3",
1618 "P4", "P5", "P6", "P7"};
1619
1620 assert(Priority > 0 && Priority < 256 && "priority out of range");
1621 // Convert priority=[1-255] -> P0 + ... + P7
1622 for (unsigned BitPos = 0; BitPos < 8; ++BitPos)
1623 if (Priority & (1U << BitPos))
1624 appendFeature(Feat: PriorityString[BitPos], Buffer&: NewParam);
1625}
1626
1627bool SemaARM::checkTargetVersionAttr(const StringRef Param,
1628 const SourceLocation Loc,
1629 SmallString<64> &NewParam) {
1630 using namespace DiagAttrParams;
1631
1632 auto [LHS, RHS] = Param.split(Separator: ';');
1633 RHS = RHS.trim();
1634 bool IsDefault = false;
1635 llvm::SmallVector<StringRef, 8> Features;
1636 LHS.split(A&: Features, Separator: '+');
1637 for (StringRef Feat : Features) {
1638 Feat = Feat.trim();
1639 if (Feat == "default")
1640 IsDefault = true;
1641 else if (!getASTContext().getTargetInfo().validateCpuSupports(Name: Feat))
1642 return Diag(Loc, DiagID: diag::warn_unsupported_target_attribute)
1643 << Unsupported << None << Feat << TargetVersion;
1644 appendFeature(Feat, Buffer&: NewParam);
1645 }
1646
1647 if (!RHS.empty() && RHS.consume_front(Prefix: "priority=")) {
1648 if (IsDefault)
1649 Diag(Loc, DiagID: diag::warn_invalid_default_version_priority);
1650 else {
1651 unsigned Digit;
1652 if (RHS.getAsInteger(Radix: 0, Result&: Digit) || Digit < 1 || Digit > 255)
1653 Diag(Loc, DiagID: diag::warn_version_priority_out_of_range) << RHS;
1654 else
1655 convertPriorityString(Priority: Digit, NewParam);
1656 }
1657 }
1658 return false;
1659}
1660
1661bool SemaARM::checkTargetClonesAttr(
1662 SmallVectorImpl<StringRef> &Params, SmallVectorImpl<SourceLocation> &Locs,
1663 SmallVectorImpl<SmallString<64>> &NewParams) {
1664 using namespace DiagAttrParams;
1665
1666 if (!getASTContext().getTargetInfo().hasFeature(Feature: "fmv"))
1667 return true;
1668
1669 assert(Params.size() == Locs.size() &&
1670 "Mismatch between number of string parameters and locations");
1671
1672 bool HasDefault = false;
1673 bool HasNonDefault = false;
1674 for (unsigned I = 0, E = Params.size(); I < E; ++I) {
1675 const StringRef Param = Params[I].trim();
1676 const SourceLocation &Loc = Locs[I];
1677
1678 auto [LHS, RHS] = Param.split(Separator: ';');
1679 RHS = RHS.trim();
1680 bool HasPriority = !RHS.empty() && RHS.consume_front(Prefix: "priority=");
1681
1682 if (LHS.empty())
1683 return Diag(Loc, DiagID: diag::warn_unsupported_target_attribute)
1684 << Unsupported << None << "" << TargetClones;
1685
1686 if (LHS == "default") {
1687 if (HasDefault)
1688 Diag(Loc, DiagID: diag::warn_target_clone_duplicate_options);
1689 else {
1690 if (HasPriority)
1691 Diag(Loc, DiagID: diag::warn_invalid_default_version_priority);
1692 NewParams.push_back(Elt: LHS);
1693 HasDefault = true;
1694 }
1695 continue;
1696 }
1697
1698 bool HasCodeGenImpact = false;
1699 llvm::SmallVector<StringRef, 8> Features;
1700 llvm::SmallVector<StringRef, 8> ValidFeatures;
1701 LHS.split(A&: Features, Separator: '+');
1702 for (StringRef Feat : Features) {
1703 Feat = Feat.trim();
1704 if (!getASTContext().getTargetInfo().validateCpuSupports(Name: Feat)) {
1705 Diag(Loc, DiagID: diag::warn_unsupported_target_attribute)
1706 << Unsupported << None << Feat << TargetClones;
1707 continue;
1708 }
1709 if (getASTContext().getTargetInfo().doesFeatureAffectCodeGen(Feature: Feat))
1710 HasCodeGenImpact = true;
1711 ValidFeatures.push_back(Elt: Feat);
1712 }
1713
1714 // Ignore features that don't impact code generation.
1715 if (!HasCodeGenImpact) {
1716 Diag(Loc, DiagID: diag::warn_target_clone_no_impact_options);
1717 continue;
1718 }
1719
1720 if (ValidFeatures.empty())
1721 continue;
1722
1723 // Canonicalize attribute parameter.
1724 llvm::sort(C&: ValidFeatures);
1725 SmallString<64> NewParam(llvm::join(R&: ValidFeatures, Separator: "+"));
1726 if (llvm::is_contained(Range&: NewParams, Element: NewParam)) {
1727 Diag(Loc, DiagID: diag::warn_target_clone_duplicate_options);
1728 continue;
1729 }
1730
1731 if (HasPriority) {
1732 unsigned Digit;
1733 if (RHS.getAsInteger(Radix: 0, Result&: Digit) || Digit < 1 || Digit > 255)
1734 Diag(Loc, DiagID: diag::warn_version_priority_out_of_range) << RHS;
1735 else
1736 convertPriorityString(Priority: Digit, NewParam);
1737 }
1738
1739 // Valid non-default argument.
1740 NewParams.push_back(Elt: NewParam);
1741 HasNonDefault = true;
1742 }
1743 if (!HasNonDefault)
1744 return true;
1745
1746 return false;
1747}
1748
1749bool SemaARM::checkSVETypeSupport(QualType Ty, SourceLocation Loc,
1750 const FunctionDecl *FD,
1751 const llvm::StringMap<bool> &FeatureMap) {
1752 if (!Ty->isSVESizelessBuiltinType())
1753 return false;
1754
1755 if (FeatureMap.lookup(Key: "sve"))
1756 return false;
1757
1758 // No SVE environment available.
1759 if (!FeatureMap.lookup(Key: "sme"))
1760 return Diag(Loc, DiagID: diag::err_sve_vector_in_non_sve_target) << Ty;
1761
1762 // SVE environment only available to streaming functions.
1763 if (FD && !FD->getType().isNull() &&
1764 !IsArmStreamingFunction(FD, /*IncludeLocallyStreaming=*/true))
1765 return Diag(Loc, DiagID: diag::err_sve_vector_in_non_streaming_function) << Ty;
1766
1767 return false;
1768}
1769} // namespace clang
1770