1 //===------ SemaARM.cpp ---------- ARM target-specific routines -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements semantic analysis functions specific to ARM. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "clang/Sema/SemaARM.h" 14 #include "clang/Basic/DiagnosticSema.h" 15 #include "clang/Basic/TargetBuiltins.h" 16 #include "clang/Sema/Initialization.h" 17 #include "clang/Sema/ParsedAttr.h" 18 #include "clang/Sema/Sema.h" 19 20 namespace clang { 21 22 SemaARM::SemaARM(Sema &S) : SemaBase(S) {} 23 24 /// BuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions 25 bool SemaARM::BuiltinARMMemoryTaggingCall(unsigned BuiltinID, 26 CallExpr *TheCall) { 27 ASTContext &Context = getASTContext(); 28 29 if (BuiltinID == AArch64::BI__builtin_arm_irg) { 30 if (SemaRef.checkArgCount(TheCall, 2)) 31 return true; 32 Expr *Arg0 = TheCall->getArg(0); 33 Expr *Arg1 = TheCall->getArg(1); 34 35 ExprResult FirstArg = SemaRef.DefaultFunctionArrayLvalueConversion(Arg0); 36 if (FirstArg.isInvalid()) 37 return true; 38 QualType FirstArgType = FirstArg.get()->getType(); 39 if (!FirstArgType->isAnyPointerType()) 40 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 41 << "first" << FirstArgType << Arg0->getSourceRange(); 42 TheCall->setArg(0, FirstArg.get()); 43 44 ExprResult SecArg = SemaRef.DefaultLvalueConversion(Arg1); 45 if (SecArg.isInvalid()) 46 return true; 47 QualType SecArgType = SecArg.get()->getType(); 48 if (!SecArgType->isIntegerType()) 49 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 50 << "second" << SecArgType << Arg1->getSourceRange(); 51 52 // Derive the return type from the pointer argument. 53 TheCall->setType(FirstArgType); 54 return false; 55 } 56 57 if (BuiltinID == AArch64::BI__builtin_arm_addg) { 58 if (SemaRef.checkArgCount(TheCall, 2)) 59 return true; 60 61 Expr *Arg0 = TheCall->getArg(0); 62 ExprResult FirstArg = SemaRef.DefaultFunctionArrayLvalueConversion(Arg0); 63 if (FirstArg.isInvalid()) 64 return true; 65 QualType FirstArgType = FirstArg.get()->getType(); 66 if (!FirstArgType->isAnyPointerType()) 67 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 68 << "first" << FirstArgType << Arg0->getSourceRange(); 69 TheCall->setArg(0, FirstArg.get()); 70 71 // Derive the return type from the pointer argument. 72 TheCall->setType(FirstArgType); 73 74 // Second arg must be an constant in range [0,15] 75 return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 15); 76 } 77 78 if (BuiltinID == AArch64::BI__builtin_arm_gmi) { 79 if (SemaRef.checkArgCount(TheCall, 2)) 80 return true; 81 Expr *Arg0 = TheCall->getArg(0); 82 Expr *Arg1 = TheCall->getArg(1); 83 84 ExprResult FirstArg = SemaRef.DefaultFunctionArrayLvalueConversion(Arg0); 85 if (FirstArg.isInvalid()) 86 return true; 87 QualType FirstArgType = FirstArg.get()->getType(); 88 if (!FirstArgType->isAnyPointerType()) 89 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 90 << "first" << FirstArgType << Arg0->getSourceRange(); 91 92 QualType SecArgType = Arg1->getType(); 93 if (!SecArgType->isIntegerType()) 94 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 95 << "second" << SecArgType << Arg1->getSourceRange(); 96 TheCall->setType(Context.IntTy); 97 return false; 98 } 99 100 if (BuiltinID == AArch64::BI__builtin_arm_ldg || 101 BuiltinID == AArch64::BI__builtin_arm_stg) { 102 if (SemaRef.checkArgCount(TheCall, 1)) 103 return true; 104 Expr *Arg0 = TheCall->getArg(0); 105 ExprResult FirstArg = SemaRef.DefaultFunctionArrayLvalueConversion(Arg0); 106 if (FirstArg.isInvalid()) 107 return true; 108 109 QualType FirstArgType = FirstArg.get()->getType(); 110 if (!FirstArgType->isAnyPointerType()) 111 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 112 << "first" << FirstArgType << Arg0->getSourceRange(); 113 TheCall->setArg(0, FirstArg.get()); 114 115 // Derive the return type from the pointer argument. 116 if (BuiltinID == AArch64::BI__builtin_arm_ldg) 117 TheCall->setType(FirstArgType); 118 return false; 119 } 120 121 if (BuiltinID == AArch64::BI__builtin_arm_subp) { 122 Expr *ArgA = TheCall->getArg(0); 123 Expr *ArgB = TheCall->getArg(1); 124 125 ExprResult ArgExprA = SemaRef.DefaultFunctionArrayLvalueConversion(ArgA); 126 ExprResult ArgExprB = SemaRef.DefaultFunctionArrayLvalueConversion(ArgB); 127 128 if (ArgExprA.isInvalid() || ArgExprB.isInvalid()) 129 return true; 130 131 QualType ArgTypeA = ArgExprA.get()->getType(); 132 QualType ArgTypeB = ArgExprB.get()->getType(); 133 134 auto isNull = [&](Expr *E) -> bool { 135 return E->isNullPointerConstant(Context, 136 Expr::NPC_ValueDependentIsNotNull); 137 }; 138 139 // argument should be either a pointer or null 140 if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA)) 141 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 142 << "first" << ArgTypeA << ArgA->getSourceRange(); 143 144 if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB)) 145 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 146 << "second" << ArgTypeB << ArgB->getSourceRange(); 147 148 // Ensure Pointee types are compatible 149 if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) && 150 ArgTypeB->isAnyPointerType() && !isNull(ArgB)) { 151 QualType pointeeA = ArgTypeA->getPointeeType(); 152 QualType pointeeB = ArgTypeB->getPointeeType(); 153 if (!Context.typesAreCompatible( 154 Context.getCanonicalType(pointeeA).getUnqualifiedType(), 155 Context.getCanonicalType(pointeeB).getUnqualifiedType())) { 156 return Diag(TheCall->getBeginLoc(), 157 diag::err_typecheck_sub_ptr_compatible) 158 << ArgTypeA << ArgTypeB << ArgA->getSourceRange() 159 << ArgB->getSourceRange(); 160 } 161 } 162 163 // at least one argument should be pointer type 164 if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType()) 165 return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer) 166 << ArgTypeA << ArgTypeB << ArgA->getSourceRange(); 167 168 if (isNull(ArgA)) // adopt type of the other pointer 169 ArgExprA = 170 SemaRef.ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer); 171 172 if (isNull(ArgB)) 173 ArgExprB = 174 SemaRef.ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer); 175 176 TheCall->setArg(0, ArgExprA.get()); 177 TheCall->setArg(1, ArgExprB.get()); 178 TheCall->setType(Context.LongLongTy); 179 return false; 180 } 181 assert(false && "Unhandled ARM MTE intrinsic"); 182 return true; 183 } 184 185 /// BuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr 186 /// TheCall is an ARM/AArch64 special register string literal. 187 bool SemaARM::BuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, 188 int ArgNum, unsigned ExpectedFieldNum, 189 bool AllowName) { 190 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 || 191 BuiltinID == ARM::BI__builtin_arm_wsr64 || 192 BuiltinID == ARM::BI__builtin_arm_rsr || 193 BuiltinID == ARM::BI__builtin_arm_rsrp || 194 BuiltinID == ARM::BI__builtin_arm_wsr || 195 BuiltinID == ARM::BI__builtin_arm_wsrp; 196 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 || 197 BuiltinID == AArch64::BI__builtin_arm_wsr64 || 198 BuiltinID == AArch64::BI__builtin_arm_rsr128 || 199 BuiltinID == AArch64::BI__builtin_arm_wsr128 || 200 BuiltinID == AArch64::BI__builtin_arm_rsr || 201 BuiltinID == AArch64::BI__builtin_arm_rsrp || 202 BuiltinID == AArch64::BI__builtin_arm_wsr || 203 BuiltinID == AArch64::BI__builtin_arm_wsrp; 204 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin."); 205 206 // We can't check the value of a dependent argument. 207 Expr *Arg = TheCall->getArg(ArgNum); 208 if (Arg->isTypeDependent() || Arg->isValueDependent()) 209 return false; 210 211 // Check if the argument is a string literal. 212 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 213 return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 214 << Arg->getSourceRange(); 215 216 // Check the type of special register given. 217 StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 218 SmallVector<StringRef, 6> Fields; 219 Reg.split(Fields, ":"); 220 221 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1)) 222 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 223 << Arg->getSourceRange(); 224 225 // If the string is the name of a register then we cannot check that it is 226 // valid here but if the string is of one the forms described in ACLE then we 227 // can check that the supplied fields are integers and within the valid 228 // ranges. 229 if (Fields.size() > 1) { 230 bool FiveFields = Fields.size() == 5; 231 232 bool ValidString = true; 233 if (IsARMBuiltin) { 234 ValidString &= Fields[0].starts_with_insensitive("cp") || 235 Fields[0].starts_with_insensitive("p"); 236 if (ValidString) 237 Fields[0] = Fields[0].drop_front( 238 Fields[0].starts_with_insensitive("cp") ? 2 : 1); 239 240 ValidString &= Fields[2].starts_with_insensitive("c"); 241 if (ValidString) 242 Fields[2] = Fields[2].drop_front(1); 243 244 if (FiveFields) { 245 ValidString &= Fields[3].starts_with_insensitive("c"); 246 if (ValidString) 247 Fields[3] = Fields[3].drop_front(1); 248 } 249 } 250 251 SmallVector<int, 5> Ranges; 252 if (FiveFields) 253 Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7}); 254 else 255 Ranges.append({15, 7, 15}); 256 257 for (unsigned i = 0; i < Fields.size(); ++i) { 258 int IntField; 259 ValidString &= !Fields[i].getAsInteger(10, IntField); 260 ValidString &= (IntField >= 0 && IntField <= Ranges[i]); 261 } 262 263 if (!ValidString) 264 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 265 << Arg->getSourceRange(); 266 } else if (IsAArch64Builtin && Fields.size() == 1) { 267 // This code validates writes to PSTATE registers. 268 269 // Not a write. 270 if (TheCall->getNumArgs() != 2) 271 return false; 272 273 // The 128-bit system register accesses do not touch PSTATE. 274 if (BuiltinID == AArch64::BI__builtin_arm_rsr128 || 275 BuiltinID == AArch64::BI__builtin_arm_wsr128) 276 return false; 277 278 // These are the named PSTATE accesses using "MSR (immediate)" instructions, 279 // along with the upper limit on the immediates allowed. 280 auto MaxLimit = llvm::StringSwitch<std::optional<unsigned>>(Reg) 281 .CaseLower("spsel", 15) 282 .CaseLower("daifclr", 15) 283 .CaseLower("daifset", 15) 284 .CaseLower("pan", 15) 285 .CaseLower("uao", 15) 286 .CaseLower("dit", 15) 287 .CaseLower("ssbs", 15) 288 .CaseLower("tco", 15) 289 .CaseLower("allint", 1) 290 .CaseLower("pm", 1) 291 .Default(std::nullopt); 292 293 // If this is not a named PSTATE, just continue without validating, as this 294 // will be lowered to an "MSR (register)" instruction directly 295 if (!MaxLimit) 296 return false; 297 298 // Here we only allow constants in the range for that pstate, as required by 299 // the ACLE. 300 // 301 // While clang also accepts the names of system registers in its ACLE 302 // intrinsics, we prevent this with the PSTATE names used in MSR (immediate) 303 // as the value written via a register is different to the value used as an 304 // immediate to have the same effect. e.g., for the instruction `msr tco, 305 // x0`, it is bit 25 of register x0 that is written into PSTATE.TCO, but 306 // with `msr tco, #imm`, it is bit 0 of xN that is written into PSTATE.TCO. 307 // 308 // If a programmer wants to codegen the MSR (register) form of `msr tco, 309 // xN`, they can still do so by specifying the register using five 310 // colon-separated numbers in a string. 311 return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, *MaxLimit); 312 } 313 314 return false; 315 } 316 317 // Get the valid immediate range for the specified NEON type code. 318 static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) { 319 NeonTypeFlags Type(t); 320 int IsQuad = ForceQuad ? true : Type.isQuad(); 321 switch (Type.getEltType()) { 322 case NeonTypeFlags::Int8: 323 case NeonTypeFlags::Poly8: 324 return shift ? 7 : (8 << IsQuad) - 1; 325 case NeonTypeFlags::Int16: 326 case NeonTypeFlags::Poly16: 327 return shift ? 15 : (4 << IsQuad) - 1; 328 case NeonTypeFlags::Int32: 329 return shift ? 31 : (2 << IsQuad) - 1; 330 case NeonTypeFlags::Int64: 331 case NeonTypeFlags::Poly64: 332 return shift ? 63 : (1 << IsQuad) - 1; 333 case NeonTypeFlags::Poly128: 334 return shift ? 127 : (1 << IsQuad) - 1; 335 case NeonTypeFlags::Float16: 336 assert(!shift && "cannot shift float types!"); 337 return (4 << IsQuad) - 1; 338 case NeonTypeFlags::Float32: 339 assert(!shift && "cannot shift float types!"); 340 return (2 << IsQuad) - 1; 341 case NeonTypeFlags::Float64: 342 assert(!shift && "cannot shift float types!"); 343 return (1 << IsQuad) - 1; 344 case NeonTypeFlags::BFloat16: 345 assert(!shift && "cannot shift float types!"); 346 return (4 << IsQuad) - 1; 347 } 348 llvm_unreachable("Invalid NeonTypeFlag!"); 349 } 350 351 /// getNeonEltType - Return the QualType corresponding to the elements of 352 /// the vector type specified by the NeonTypeFlags. This is used to check 353 /// the pointer arguments for Neon load/store intrinsics. 354 static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context, 355 bool IsPolyUnsigned, bool IsInt64Long) { 356 switch (Flags.getEltType()) { 357 case NeonTypeFlags::Int8: 358 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy; 359 case NeonTypeFlags::Int16: 360 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy; 361 case NeonTypeFlags::Int32: 362 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy; 363 case NeonTypeFlags::Int64: 364 if (IsInt64Long) 365 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy; 366 else 367 return Flags.isUnsigned() ? Context.UnsignedLongLongTy 368 : Context.LongLongTy; 369 case NeonTypeFlags::Poly8: 370 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy; 371 case NeonTypeFlags::Poly16: 372 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy; 373 case NeonTypeFlags::Poly64: 374 if (IsInt64Long) 375 return Context.UnsignedLongTy; 376 else 377 return Context.UnsignedLongLongTy; 378 case NeonTypeFlags::Poly128: 379 break; 380 case NeonTypeFlags::Float16: 381 return Context.HalfTy; 382 case NeonTypeFlags::Float32: 383 return Context.FloatTy; 384 case NeonTypeFlags::Float64: 385 return Context.DoubleTy; 386 case NeonTypeFlags::BFloat16: 387 return Context.BFloat16Ty; 388 } 389 llvm_unreachable("Invalid NeonTypeFlag!"); 390 } 391 392 enum ArmSMEState : unsigned { 393 ArmNoState = 0, 394 395 ArmInZA = 0b01, 396 ArmOutZA = 0b10, 397 ArmInOutZA = 0b11, 398 ArmZAMask = 0b11, 399 400 ArmInZT0 = 0b01 << 2, 401 ArmOutZT0 = 0b10 << 2, 402 ArmInOutZT0 = 0b11 << 2, 403 ArmZT0Mask = 0b11 << 2 404 }; 405 406 bool SemaARM::ParseSVEImmChecks( 407 CallExpr *TheCall, SmallVector<std::tuple<int, int, int>, 3> &ImmChecks) { 408 // Perform all the immediate checks for this builtin call. 409 bool HasError = false; 410 for (auto &I : ImmChecks) { 411 int ArgNum, CheckTy, ElementSizeInBits; 412 std::tie(ArgNum, CheckTy, ElementSizeInBits) = I; 413 414 typedef bool (*OptionSetCheckFnTy)(int64_t Value); 415 416 // Function that checks whether the operand (ArgNum) is an immediate 417 // that is one of the predefined values. 418 auto CheckImmediateInSet = [&](OptionSetCheckFnTy CheckImm, 419 int ErrDiag) -> bool { 420 // We can't check the value of a dependent argument. 421 Expr *Arg = TheCall->getArg(ArgNum); 422 if (Arg->isTypeDependent() || Arg->isValueDependent()) 423 return false; 424 425 // Check constant-ness first. 426 llvm::APSInt Imm; 427 if (SemaRef.BuiltinConstantArg(TheCall, ArgNum, Imm)) 428 return true; 429 430 if (!CheckImm(Imm.getSExtValue())) 431 return Diag(TheCall->getBeginLoc(), ErrDiag) << Arg->getSourceRange(); 432 return false; 433 }; 434 435 switch ((SVETypeFlags::ImmCheckType)CheckTy) { 436 case SVETypeFlags::ImmCheck0_31: 437 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0, 31)) 438 HasError = true; 439 break; 440 case SVETypeFlags::ImmCheck0_13: 441 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0, 13)) 442 HasError = true; 443 break; 444 case SVETypeFlags::ImmCheck1_16: 445 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 1, 16)) 446 HasError = true; 447 break; 448 case SVETypeFlags::ImmCheck0_7: 449 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0, 7)) 450 HasError = true; 451 break; 452 case SVETypeFlags::ImmCheck1_1: 453 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 1, 1)) 454 HasError = true; 455 break; 456 case SVETypeFlags::ImmCheck1_3: 457 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 1, 3)) 458 HasError = true; 459 break; 460 case SVETypeFlags::ImmCheck1_7: 461 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 1, 7)) 462 HasError = true; 463 break; 464 case SVETypeFlags::ImmCheckExtract: 465 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0, 466 (2048 / ElementSizeInBits) - 1)) 467 HasError = true; 468 break; 469 case SVETypeFlags::ImmCheckShiftRight: 470 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 1, 471 ElementSizeInBits)) 472 HasError = true; 473 break; 474 case SVETypeFlags::ImmCheckShiftRightNarrow: 475 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 1, 476 ElementSizeInBits / 2)) 477 HasError = true; 478 break; 479 case SVETypeFlags::ImmCheckShiftLeft: 480 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0, 481 ElementSizeInBits - 1)) 482 HasError = true; 483 break; 484 case SVETypeFlags::ImmCheckLaneIndex: 485 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0, 486 (128 / (1 * ElementSizeInBits)) - 1)) 487 HasError = true; 488 break; 489 case SVETypeFlags::ImmCheckLaneIndexCompRotate: 490 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0, 491 (128 / (2 * ElementSizeInBits)) - 1)) 492 HasError = true; 493 break; 494 case SVETypeFlags::ImmCheckLaneIndexDot: 495 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0, 496 (128 / (4 * ElementSizeInBits)) - 1)) 497 HasError = true; 498 break; 499 case SVETypeFlags::ImmCheckComplexRot90_270: 500 if (CheckImmediateInSet([](int64_t V) { return V == 90 || V == 270; }, 501 diag::err_rotation_argument_to_cadd)) 502 HasError = true; 503 break; 504 case SVETypeFlags::ImmCheckComplexRotAll90: 505 if (CheckImmediateInSet( 506 [](int64_t V) { 507 return V == 0 || V == 90 || V == 180 || V == 270; 508 }, 509 diag::err_rotation_argument_to_cmla)) 510 HasError = true; 511 break; 512 case SVETypeFlags::ImmCheck0_1: 513 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0, 1)) 514 HasError = true; 515 break; 516 case SVETypeFlags::ImmCheck0_2: 517 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0, 2)) 518 HasError = true; 519 break; 520 case SVETypeFlags::ImmCheck0_3: 521 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0, 3)) 522 HasError = true; 523 break; 524 case SVETypeFlags::ImmCheck0_0: 525 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0, 0)) 526 HasError = true; 527 break; 528 case SVETypeFlags::ImmCheck0_15: 529 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0, 15)) 530 HasError = true; 531 break; 532 case SVETypeFlags::ImmCheck0_255: 533 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0, 255)) 534 HasError = true; 535 break; 536 case SVETypeFlags::ImmCheck2_4_Mul2: 537 if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 2, 4) || 538 SemaRef.BuiltinConstantArgMultiple(TheCall, ArgNum, 2)) 539 HasError = true; 540 break; 541 } 542 } 543 544 return HasError; 545 } 546 547 SemaARM::ArmStreamingType getArmStreamingFnType(const FunctionDecl *FD) { 548 if (FD->hasAttr<ArmLocallyStreamingAttr>()) 549 return SemaARM::ArmStreaming; 550 if (const Type *Ty = FD->getType().getTypePtrOrNull()) { 551 if (const auto *FPT = Ty->getAs<FunctionProtoType>()) { 552 if (FPT->getAArch64SMEAttributes() & 553 FunctionType::SME_PStateSMEnabledMask) 554 return SemaARM::ArmStreaming; 555 if (FPT->getAArch64SMEAttributes() & 556 FunctionType::SME_PStateSMCompatibleMask) 557 return SemaARM::ArmStreamingCompatible; 558 } 559 } 560 return SemaARM::ArmNonStreaming; 561 } 562 563 static bool checkArmStreamingBuiltin(Sema &S, CallExpr *TheCall, 564 const FunctionDecl *FD, 565 SemaARM::ArmStreamingType BuiltinType, 566 unsigned BuiltinID) { 567 SemaARM::ArmStreamingType FnType = getArmStreamingFnType(FD); 568 569 // Check if the intrinsic is available in the right mode, i.e. 570 // * When compiling for SME only, the caller must be in streaming mode. 571 // * When compiling for SVE only, the caller must be in non-streaming mode. 572 // * When compiling for both SVE and SME, the caller can be in either mode. 573 if (BuiltinType == SemaARM::VerifyRuntimeMode) { 574 auto DisableFeatures = [](llvm::StringMap<bool> &Map, StringRef S) { 575 for (StringRef K : Map.keys()) 576 if (K.starts_with(S)) 577 Map[K] = false; 578 }; 579 580 llvm::StringMap<bool> CallerFeatureMapWithoutSVE; 581 S.Context.getFunctionFeatureMap(CallerFeatureMapWithoutSVE, FD); 582 DisableFeatures(CallerFeatureMapWithoutSVE, "sve"); 583 584 // Avoid emitting diagnostics for a function that can never compile. 585 if (FnType == SemaARM::ArmStreaming && !CallerFeatureMapWithoutSVE["sme"]) 586 return false; 587 588 llvm::StringMap<bool> CallerFeatureMapWithoutSME; 589 S.Context.getFunctionFeatureMap(CallerFeatureMapWithoutSME, FD); 590 DisableFeatures(CallerFeatureMapWithoutSME, "sme"); 591 592 // We know the builtin requires either some combination of SVE flags, or 593 // some combination of SME flags, but we need to figure out which part 594 // of the required features is satisfied by the target features. 595 // 596 // For a builtin with target guard 'sve2p1|sme2', if we compile with 597 // '+sve2p1,+sme', then we know that it satisfies the 'sve2p1' part if we 598 // evaluate the features for '+sve2p1,+sme,+nosme'. 599 // 600 // Similarly, if we compile with '+sve2,+sme2', then we know it satisfies 601 // the 'sme2' part if we evaluate the features for '+sve2,+sme2,+nosve'. 602 StringRef BuiltinTargetGuards( 603 S.Context.BuiltinInfo.getRequiredFeatures(BuiltinID)); 604 bool SatisfiesSVE = Builtin::evaluateRequiredTargetFeatures( 605 BuiltinTargetGuards, CallerFeatureMapWithoutSME); 606 bool SatisfiesSME = Builtin::evaluateRequiredTargetFeatures( 607 BuiltinTargetGuards, CallerFeatureMapWithoutSVE); 608 609 if ((SatisfiesSVE && SatisfiesSME) || 610 (SatisfiesSVE && FnType == SemaARM::ArmStreamingCompatible)) 611 return false; 612 else if (SatisfiesSVE) 613 BuiltinType = SemaARM::ArmNonStreaming; 614 else if (SatisfiesSME) 615 BuiltinType = SemaARM::ArmStreaming; 616 else 617 // This should be diagnosed by CodeGen 618 return false; 619 } 620 621 if (FnType != SemaARM::ArmNonStreaming && 622 BuiltinType == SemaARM::ArmNonStreaming) 623 S.Diag(TheCall->getBeginLoc(), diag::err_attribute_arm_sm_incompat_builtin) 624 << TheCall->getSourceRange() << "non-streaming"; 625 else if (FnType != SemaARM::ArmStreaming && 626 BuiltinType == SemaARM::ArmStreaming) 627 S.Diag(TheCall->getBeginLoc(), diag::err_attribute_arm_sm_incompat_builtin) 628 << TheCall->getSourceRange() << "streaming"; 629 else 630 return false; 631 632 return true; 633 } 634 635 static bool hasArmZAState(const FunctionDecl *FD) { 636 const auto *T = FD->getType()->getAs<FunctionProtoType>(); 637 return (T && FunctionType::getArmZAState(T->getAArch64SMEAttributes()) != 638 FunctionType::ARM_None) || 639 (FD->hasAttr<ArmNewAttr>() && FD->getAttr<ArmNewAttr>()->isNewZA()); 640 } 641 642 static bool hasArmZT0State(const FunctionDecl *FD) { 643 const auto *T = FD->getType()->getAs<FunctionProtoType>(); 644 return (T && FunctionType::getArmZT0State(T->getAArch64SMEAttributes()) != 645 FunctionType::ARM_None) || 646 (FD->hasAttr<ArmNewAttr>() && FD->getAttr<ArmNewAttr>()->isNewZT0()); 647 } 648 649 static ArmSMEState getSMEState(unsigned BuiltinID) { 650 switch (BuiltinID) { 651 default: 652 return ArmNoState; 653 #define GET_SME_BUILTIN_GET_STATE 654 #include "clang/Basic/arm_sme_builtins_za_state.inc" 655 #undef GET_SME_BUILTIN_GET_STATE 656 } 657 } 658 659 bool SemaARM::CheckSMEBuiltinFunctionCall(unsigned BuiltinID, 660 CallExpr *TheCall) { 661 if (const FunctionDecl *FD = SemaRef.getCurFunctionDecl()) { 662 std::optional<ArmStreamingType> BuiltinType; 663 664 switch (BuiltinID) { 665 #define GET_SME_STREAMING_ATTRS 666 #include "clang/Basic/arm_sme_streaming_attrs.inc" 667 #undef GET_SME_STREAMING_ATTRS 668 } 669 670 if (BuiltinType && 671 checkArmStreamingBuiltin(SemaRef, TheCall, FD, *BuiltinType, BuiltinID)) 672 return true; 673 674 if ((getSMEState(BuiltinID) & ArmZAMask) && !hasArmZAState(FD)) 675 Diag(TheCall->getBeginLoc(), 676 diag::warn_attribute_arm_za_builtin_no_za_state) 677 << TheCall->getSourceRange(); 678 679 if ((getSMEState(BuiltinID) & ArmZT0Mask) && !hasArmZT0State(FD)) 680 Diag(TheCall->getBeginLoc(), 681 diag::warn_attribute_arm_zt0_builtin_no_zt0_state) 682 << TheCall->getSourceRange(); 683 } 684 685 // Range check SME intrinsics that take immediate values. 686 SmallVector<std::tuple<int, int, int>, 3> ImmChecks; 687 688 switch (BuiltinID) { 689 default: 690 return false; 691 #define GET_SME_IMMEDIATE_CHECK 692 #include "clang/Basic/arm_sme_sema_rangechecks.inc" 693 #undef GET_SME_IMMEDIATE_CHECK 694 } 695 696 return ParseSVEImmChecks(TheCall, ImmChecks); 697 } 698 699 bool SemaARM::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, 700 CallExpr *TheCall) { 701 if (const FunctionDecl *FD = SemaRef.getCurFunctionDecl()) { 702 std::optional<ArmStreamingType> BuiltinType; 703 704 switch (BuiltinID) { 705 #define GET_SVE_STREAMING_ATTRS 706 #include "clang/Basic/arm_sve_streaming_attrs.inc" 707 #undef GET_SVE_STREAMING_ATTRS 708 } 709 if (BuiltinType && 710 checkArmStreamingBuiltin(SemaRef, TheCall, FD, *BuiltinType, BuiltinID)) 711 return true; 712 } 713 // Range check SVE intrinsics that take immediate values. 714 SmallVector<std::tuple<int, int, int>, 3> ImmChecks; 715 716 switch (BuiltinID) { 717 default: 718 return false; 719 #define GET_SVE_IMMEDIATE_CHECK 720 #include "clang/Basic/arm_sve_sema_rangechecks.inc" 721 #undef GET_SVE_IMMEDIATE_CHECK 722 } 723 724 return ParseSVEImmChecks(TheCall, ImmChecks); 725 } 726 727 bool SemaARM::CheckNeonBuiltinFunctionCall(const TargetInfo &TI, 728 unsigned BuiltinID, 729 CallExpr *TheCall) { 730 if (const FunctionDecl *FD = SemaRef.getCurFunctionDecl()) { 731 732 switch (BuiltinID) { 733 default: 734 break; 735 #define GET_NEON_BUILTINS 736 #define TARGET_BUILTIN(id, ...) case NEON::BI##id: 737 #define BUILTIN(id, ...) case NEON::BI##id: 738 #include "clang/Basic/arm_neon.inc" 739 if (checkArmStreamingBuiltin(SemaRef, TheCall, FD, ArmNonStreaming, 740 BuiltinID)) 741 return true; 742 break; 743 #undef TARGET_BUILTIN 744 #undef BUILTIN 745 #undef GET_NEON_BUILTINS 746 } 747 } 748 749 llvm::APSInt Result; 750 uint64_t mask = 0; 751 unsigned TV = 0; 752 int PtrArgNum = -1; 753 bool HasConstPtr = false; 754 switch (BuiltinID) { 755 #define GET_NEON_OVERLOAD_CHECK 756 #include "clang/Basic/arm_fp16.inc" 757 #include "clang/Basic/arm_neon.inc" 758 #undef GET_NEON_OVERLOAD_CHECK 759 } 760 761 // For NEON intrinsics which are overloaded on vector element type, validate 762 // the immediate which specifies which variant to emit. 763 unsigned ImmArg = TheCall->getNumArgs() - 1; 764 if (mask) { 765 if (SemaRef.BuiltinConstantArg(TheCall, ImmArg, Result)) 766 return true; 767 768 TV = Result.getLimitedValue(64); 769 if ((TV > 63) || (mask & (1ULL << TV)) == 0) 770 return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code) 771 << TheCall->getArg(ImmArg)->getSourceRange(); 772 } 773 774 if (PtrArgNum >= 0) { 775 // Check that pointer arguments have the specified type. 776 Expr *Arg = TheCall->getArg(PtrArgNum); 777 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) 778 Arg = ICE->getSubExpr(); 779 ExprResult RHS = SemaRef.DefaultFunctionArrayLvalueConversion(Arg); 780 QualType RHSTy = RHS.get()->getType(); 781 782 llvm::Triple::ArchType Arch = TI.getTriple().getArch(); 783 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 || 784 Arch == llvm::Triple::aarch64_32 || 785 Arch == llvm::Triple::aarch64_be; 786 bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong; 787 QualType EltTy = getNeonEltType(NeonTypeFlags(TV), getASTContext(), 788 IsPolyUnsigned, IsInt64Long); 789 if (HasConstPtr) 790 EltTy = EltTy.withConst(); 791 QualType LHSTy = getASTContext().getPointerType(EltTy); 792 Sema::AssignConvertType ConvTy; 793 ConvTy = SemaRef.CheckSingleAssignmentConstraints(LHSTy, RHS); 794 if (RHS.isInvalid()) 795 return true; 796 if (SemaRef.DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, 797 RHSTy, RHS.get(), Sema::AA_Assigning)) 798 return true; 799 } 800 801 // For NEON intrinsics which take an immediate value as part of the 802 // instruction, range check them here. 803 unsigned i = 0, l = 0, u = 0; 804 switch (BuiltinID) { 805 default: 806 return false; 807 #define GET_NEON_IMMEDIATE_CHECK 808 #include "clang/Basic/arm_fp16.inc" 809 #include "clang/Basic/arm_neon.inc" 810 #undef GET_NEON_IMMEDIATE_CHECK 811 } 812 813 return SemaRef.BuiltinConstantArgRange(TheCall, i, l, u + l); 814 } 815 816 bool SemaARM::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, 817 CallExpr *TheCall) { 818 switch (BuiltinID) { 819 default: 820 return false; 821 #include "clang/Basic/arm_mve_builtin_sema.inc" 822 } 823 } 824 825 bool SemaARM::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, 826 unsigned BuiltinID, 827 CallExpr *TheCall) { 828 bool Err = false; 829 switch (BuiltinID) { 830 default: 831 return false; 832 #include "clang/Basic/arm_cde_builtin_sema.inc" 833 } 834 835 if (Err) 836 return true; 837 838 return CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), /*WantCDE*/ true); 839 } 840 841 bool SemaARM::CheckARMCoprocessorImmediate(const TargetInfo &TI, 842 const Expr *CoprocArg, 843 bool WantCDE) { 844 ASTContext &Context = getASTContext(); 845 if (SemaRef.isConstantEvaluatedContext()) 846 return false; 847 848 // We can't check the value of a dependent argument. 849 if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent()) 850 return false; 851 852 llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Context); 853 int64_t CoprocNo = CoprocNoAP.getExtValue(); 854 assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative"); 855 856 uint32_t CDECoprocMask = TI.getARMCDECoprocMask(); 857 bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo)); 858 859 if (IsCDECoproc != WantCDE) 860 return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc) 861 << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange(); 862 863 return false; 864 } 865 866 bool SemaARM::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, 867 CallExpr *TheCall, 868 unsigned MaxWidth) { 869 assert((BuiltinID == ARM::BI__builtin_arm_ldrex || 870 BuiltinID == ARM::BI__builtin_arm_ldaex || 871 BuiltinID == ARM::BI__builtin_arm_strex || 872 BuiltinID == ARM::BI__builtin_arm_stlex || 873 BuiltinID == AArch64::BI__builtin_arm_ldrex || 874 BuiltinID == AArch64::BI__builtin_arm_ldaex || 875 BuiltinID == AArch64::BI__builtin_arm_strex || 876 BuiltinID == AArch64::BI__builtin_arm_stlex) && 877 "unexpected ARM builtin"); 878 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex || 879 BuiltinID == ARM::BI__builtin_arm_ldaex || 880 BuiltinID == AArch64::BI__builtin_arm_ldrex || 881 BuiltinID == AArch64::BI__builtin_arm_ldaex; 882 883 ASTContext &Context = getASTContext(); 884 DeclRefExpr *DRE = 885 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 886 887 // Ensure that we have the proper number of arguments. 888 if (SemaRef.checkArgCount(TheCall, IsLdrex ? 1 : 2)) 889 return true; 890 891 // Inspect the pointer argument of the atomic builtin. This should always be 892 // a pointer type, whose element is an integral scalar or pointer type. 893 // Because it is a pointer type, we don't have to worry about any implicit 894 // casts here. 895 Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1); 896 ExprResult PointerArgRes = 897 SemaRef.DefaultFunctionArrayLvalueConversion(PointerArg); 898 if (PointerArgRes.isInvalid()) 899 return true; 900 PointerArg = PointerArgRes.get(); 901 902 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 903 if (!pointerType) { 904 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 905 << PointerArg->getType() << 0 << PointerArg->getSourceRange(); 906 return true; 907 } 908 909 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next 910 // task is to insert the appropriate casts into the AST. First work out just 911 // what the appropriate type is. 912 QualType ValType = pointerType->getPointeeType(); 913 QualType AddrType = ValType.getUnqualifiedType().withVolatile(); 914 if (IsLdrex) 915 AddrType.addConst(); 916 917 // Issue a warning if the cast is dodgy. 918 CastKind CastNeeded = CK_NoOp; 919 if (!AddrType.isAtLeastAsQualifiedAs(ValType)) { 920 CastNeeded = CK_BitCast; 921 Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers) 922 << PointerArg->getType() << Context.getPointerType(AddrType) 923 << Sema::AA_Passing << PointerArg->getSourceRange(); 924 } 925 926 // Finally, do the cast and replace the argument with the corrected version. 927 AddrType = Context.getPointerType(AddrType); 928 PointerArgRes = SemaRef.ImpCastExprToType(PointerArg, AddrType, CastNeeded); 929 if (PointerArgRes.isInvalid()) 930 return true; 931 PointerArg = PointerArgRes.get(); 932 933 TheCall->setArg(IsLdrex ? 0 : 1, PointerArg); 934 935 // In general, we allow ints, floats and pointers to be loaded and stored. 936 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 937 !ValType->isBlockPointerType() && !ValType->isFloatingType()) { 938 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr) 939 << PointerArg->getType() << 0 << PointerArg->getSourceRange(); 940 return true; 941 } 942 943 // But ARM doesn't have instructions to deal with 128-bit versions. 944 if (Context.getTypeSize(ValType) > MaxWidth) { 945 assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate"); 946 Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size) 947 << PointerArg->getType() << PointerArg->getSourceRange(); 948 return true; 949 } 950 951 switch (ValType.getObjCLifetime()) { 952 case Qualifiers::OCL_None: 953 case Qualifiers::OCL_ExplicitNone: 954 // okay 955 break; 956 957 case Qualifiers::OCL_Weak: 958 case Qualifiers::OCL_Strong: 959 case Qualifiers::OCL_Autoreleasing: 960 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 961 << ValType << PointerArg->getSourceRange(); 962 return true; 963 } 964 965 if (IsLdrex) { 966 TheCall->setType(ValType); 967 return false; 968 } 969 970 // Initialize the argument to be stored. 971 ExprResult ValArg = TheCall->getArg(0); 972 InitializedEntity Entity = InitializedEntity::InitializeParameter( 973 Context, ValType, /*consume*/ false); 974 ValArg = SemaRef.PerformCopyInitialization(Entity, SourceLocation(), ValArg); 975 if (ValArg.isInvalid()) 976 return true; 977 TheCall->setArg(0, ValArg.get()); 978 979 // __builtin_arm_strex always returns an int. It's marked as such in the .def, 980 // but the custom checker bypasses all default analysis. 981 TheCall->setType(Context.IntTy); 982 return false; 983 } 984 985 bool SemaARM::CheckARMBuiltinFunctionCall(const TargetInfo &TI, 986 unsigned BuiltinID, 987 CallExpr *TheCall) { 988 if (BuiltinID == ARM::BI__builtin_arm_ldrex || 989 BuiltinID == ARM::BI__builtin_arm_ldaex || 990 BuiltinID == ARM::BI__builtin_arm_strex || 991 BuiltinID == ARM::BI__builtin_arm_stlex) { 992 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64); 993 } 994 995 if (BuiltinID == ARM::BI__builtin_arm_prefetch) { 996 return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 1) || 997 SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 1); 998 } 999 1000 if (BuiltinID == ARM::BI__builtin_arm_rsr64 || 1001 BuiltinID == ARM::BI__builtin_arm_wsr64) 1002 return BuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false); 1003 1004 if (BuiltinID == ARM::BI__builtin_arm_rsr || 1005 BuiltinID == ARM::BI__builtin_arm_rsrp || 1006 BuiltinID == ARM::BI__builtin_arm_wsr || 1007 BuiltinID == ARM::BI__builtin_arm_wsrp) 1008 return BuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 1009 1010 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 1011 return true; 1012 if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall)) 1013 return true; 1014 if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall)) 1015 return true; 1016 1017 // For intrinsics which take an immediate value as part of the instruction, 1018 // range check them here. 1019 // FIXME: VFP Intrinsics should error if VFP not present. 1020 switch (BuiltinID) { 1021 default: 1022 return false; 1023 case ARM::BI__builtin_arm_ssat: 1024 return SemaRef.BuiltinConstantArgRange(TheCall, 1, 1, 32); 1025 case ARM::BI__builtin_arm_usat: 1026 return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31); 1027 case ARM::BI__builtin_arm_ssat16: 1028 return SemaRef.BuiltinConstantArgRange(TheCall, 1, 1, 16); 1029 case ARM::BI__builtin_arm_usat16: 1030 return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 15); 1031 case ARM::BI__builtin_arm_vcvtr_f: 1032 case ARM::BI__builtin_arm_vcvtr_d: 1033 return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 1); 1034 case ARM::BI__builtin_arm_dmb: 1035 case ARM::BI__builtin_arm_dsb: 1036 case ARM::BI__builtin_arm_isb: 1037 case ARM::BI__builtin_arm_dbg: 1038 return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 15); 1039 case ARM::BI__builtin_arm_cdp: 1040 case ARM::BI__builtin_arm_cdp2: 1041 case ARM::BI__builtin_arm_mcr: 1042 case ARM::BI__builtin_arm_mcr2: 1043 case ARM::BI__builtin_arm_mrc: 1044 case ARM::BI__builtin_arm_mrc2: 1045 case ARM::BI__builtin_arm_mcrr: 1046 case ARM::BI__builtin_arm_mcrr2: 1047 case ARM::BI__builtin_arm_mrrc: 1048 case ARM::BI__builtin_arm_mrrc2: 1049 case ARM::BI__builtin_arm_ldc: 1050 case ARM::BI__builtin_arm_ldcl: 1051 case ARM::BI__builtin_arm_ldc2: 1052 case ARM::BI__builtin_arm_ldc2l: 1053 case ARM::BI__builtin_arm_stc: 1054 case ARM::BI__builtin_arm_stcl: 1055 case ARM::BI__builtin_arm_stc2: 1056 case ARM::BI__builtin_arm_stc2l: 1057 return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 15) || 1058 CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), 1059 /*WantCDE*/ false); 1060 } 1061 } 1062 1063 bool SemaARM::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, 1064 unsigned BuiltinID, 1065 CallExpr *TheCall) { 1066 if (BuiltinID == AArch64::BI__builtin_arm_ldrex || 1067 BuiltinID == AArch64::BI__builtin_arm_ldaex || 1068 BuiltinID == AArch64::BI__builtin_arm_strex || 1069 BuiltinID == AArch64::BI__builtin_arm_stlex) { 1070 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128); 1071 } 1072 1073 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) { 1074 return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 1) || 1075 SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 3) || 1076 SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 1) || 1077 SemaRef.BuiltinConstantArgRange(TheCall, 4, 0, 1); 1078 } 1079 1080 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 || 1081 BuiltinID == AArch64::BI__builtin_arm_wsr64 || 1082 BuiltinID == AArch64::BI__builtin_arm_rsr128 || 1083 BuiltinID == AArch64::BI__builtin_arm_wsr128) 1084 return BuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 1085 1086 // Memory Tagging Extensions (MTE) Intrinsics 1087 if (BuiltinID == AArch64::BI__builtin_arm_irg || 1088 BuiltinID == AArch64::BI__builtin_arm_addg || 1089 BuiltinID == AArch64::BI__builtin_arm_gmi || 1090 BuiltinID == AArch64::BI__builtin_arm_ldg || 1091 BuiltinID == AArch64::BI__builtin_arm_stg || 1092 BuiltinID == AArch64::BI__builtin_arm_subp) { 1093 return BuiltinARMMemoryTaggingCall(BuiltinID, TheCall); 1094 } 1095 1096 if (BuiltinID == AArch64::BI__builtin_arm_rsr || 1097 BuiltinID == AArch64::BI__builtin_arm_rsrp || 1098 BuiltinID == AArch64::BI__builtin_arm_wsr || 1099 BuiltinID == AArch64::BI__builtin_arm_wsrp) 1100 return BuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 1101 1102 // Only check the valid encoding range. Any constant in this range would be 1103 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw 1104 // an exception for incorrect registers. This matches MSVC behavior. 1105 if (BuiltinID == AArch64::BI_ReadStatusReg || 1106 BuiltinID == AArch64::BI_WriteStatusReg) 1107 return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 0x7fff); 1108 1109 if (BuiltinID == AArch64::BI__getReg) 1110 return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 31); 1111 1112 if (BuiltinID == AArch64::BI__break) 1113 return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 0xffff); 1114 1115 if (BuiltinID == AArch64::BI__hlt) 1116 return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 0xffff); 1117 1118 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 1119 return true; 1120 1121 if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall)) 1122 return true; 1123 1124 if (CheckSMEBuiltinFunctionCall(BuiltinID, TheCall)) 1125 return true; 1126 1127 // For intrinsics which take an immediate value as part of the instruction, 1128 // range check them here. 1129 unsigned i = 0, l = 0, u = 0; 1130 switch (BuiltinID) { 1131 default: return false; 1132 case AArch64::BI__builtin_arm_dmb: 1133 case AArch64::BI__builtin_arm_dsb: 1134 case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break; 1135 case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break; 1136 } 1137 1138 return SemaRef.BuiltinConstantArgRange(TheCall, i, l, u + l); 1139 } 1140 1141 namespace { 1142 struct IntrinToName { 1143 uint32_t Id; 1144 int32_t FullName; 1145 int32_t ShortName; 1146 }; 1147 } // unnamed namespace 1148 1149 static bool BuiltinAliasValid(unsigned BuiltinID, StringRef AliasName, 1150 ArrayRef<IntrinToName> Map, 1151 const char *IntrinNames) { 1152 AliasName.consume_front("__arm_"); 1153 const IntrinToName *It = 1154 llvm::lower_bound(Map, BuiltinID, [](const IntrinToName &L, unsigned Id) { 1155 return L.Id < Id; 1156 }); 1157 if (It == Map.end() || It->Id != BuiltinID) 1158 return false; 1159 StringRef FullName(&IntrinNames[It->FullName]); 1160 if (AliasName == FullName) 1161 return true; 1162 if (It->ShortName == -1) 1163 return false; 1164 StringRef ShortName(&IntrinNames[It->ShortName]); 1165 return AliasName == ShortName; 1166 } 1167 1168 bool SemaARM::MveAliasValid(unsigned BuiltinID, StringRef AliasName) { 1169 #include "clang/Basic/arm_mve_builtin_aliases.inc" 1170 // The included file defines: 1171 // - ArrayRef<IntrinToName> Map 1172 // - const char IntrinNames[] 1173 return BuiltinAliasValid(BuiltinID, AliasName, Map, IntrinNames); 1174 } 1175 1176 bool SemaARM::CdeAliasValid(unsigned BuiltinID, StringRef AliasName) { 1177 #include "clang/Basic/arm_cde_builtin_aliases.inc" 1178 return BuiltinAliasValid(BuiltinID, AliasName, Map, IntrinNames); 1179 } 1180 1181 bool SemaARM::SveAliasValid(unsigned BuiltinID, StringRef AliasName) { 1182 if (getASTContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) 1183 BuiltinID = getASTContext().BuiltinInfo.getAuxBuiltinID(BuiltinID); 1184 return BuiltinID >= AArch64::FirstSVEBuiltin && 1185 BuiltinID <= AArch64::LastSVEBuiltin; 1186 } 1187 1188 bool SemaARM::SmeAliasValid(unsigned BuiltinID, StringRef AliasName) { 1189 if (getASTContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) 1190 BuiltinID = getASTContext().BuiltinInfo.getAuxBuiltinID(BuiltinID); 1191 return BuiltinID >= AArch64::FirstSMEBuiltin && 1192 BuiltinID <= AArch64::LastSMEBuiltin; 1193 } 1194 1195 void SemaARM::handleBuiltinAliasAttr(Decl *D, const ParsedAttr &AL) { 1196 ASTContext &Context = getASTContext(); 1197 if (!AL.isArgIdent(0)) { 1198 Diag(AL.getLoc(), diag::err_attribute_argument_n_type) 1199 << AL << 1 << AANT_ArgumentIdentifier; 1200 return; 1201 } 1202 1203 IdentifierInfo *Ident = AL.getArgAsIdent(0)->Ident; 1204 unsigned BuiltinID = Ident->getBuiltinID(); 1205 StringRef AliasName = cast<FunctionDecl>(D)->getIdentifier()->getName(); 1206 1207 bool IsAArch64 = Context.getTargetInfo().getTriple().isAArch64(); 1208 if ((IsAArch64 && !SveAliasValid(BuiltinID, AliasName) && 1209 !SmeAliasValid(BuiltinID, AliasName)) || 1210 (!IsAArch64 && !MveAliasValid(BuiltinID, AliasName) && 1211 !CdeAliasValid(BuiltinID, AliasName))) { 1212 Diag(AL.getLoc(), diag::err_attribute_arm_builtin_alias); 1213 return; 1214 } 1215 1216 D->addAttr(::new (Context) ArmBuiltinAliasAttr(Context, AL, Ident)); 1217 } 1218 1219 static bool checkNewAttrMutualExclusion( 1220 Sema &S, const ParsedAttr &AL, const FunctionProtoType *FPT, 1221 FunctionType::ArmStateValue CurrentState, StringRef StateName) { 1222 auto CheckForIncompatibleAttr = 1223 [&](FunctionType::ArmStateValue IncompatibleState, 1224 StringRef IncompatibleStateName) { 1225 if (CurrentState == IncompatibleState) { 1226 S.Diag(AL.getLoc(), diag::err_attributes_are_not_compatible) 1227 << (std::string("'__arm_new(\"") + StateName.str() + "\")'") 1228 << (std::string("'") + IncompatibleStateName.str() + "(\"" + 1229 StateName.str() + "\")'") 1230 << true; 1231 AL.setInvalid(); 1232 } 1233 }; 1234 1235 CheckForIncompatibleAttr(FunctionType::ARM_In, "__arm_in"); 1236 CheckForIncompatibleAttr(FunctionType::ARM_Out, "__arm_out"); 1237 CheckForIncompatibleAttr(FunctionType::ARM_InOut, "__arm_inout"); 1238 CheckForIncompatibleAttr(FunctionType::ARM_Preserves, "__arm_preserves"); 1239 return AL.isInvalid(); 1240 } 1241 1242 void SemaARM::handleNewAttr(Decl *D, const ParsedAttr &AL) { 1243 if (!AL.getNumArgs()) { 1244 Diag(AL.getLoc(), diag::err_missing_arm_state) << AL; 1245 AL.setInvalid(); 1246 return; 1247 } 1248 1249 std::vector<StringRef> NewState; 1250 if (const auto *ExistingAttr = D->getAttr<ArmNewAttr>()) { 1251 for (StringRef S : ExistingAttr->newArgs()) 1252 NewState.push_back(S); 1253 } 1254 1255 bool HasZA = false; 1256 bool HasZT0 = false; 1257 for (unsigned I = 0, E = AL.getNumArgs(); I != E; ++I) { 1258 StringRef StateName; 1259 SourceLocation LiteralLoc; 1260 if (!SemaRef.checkStringLiteralArgumentAttr(AL, I, StateName, &LiteralLoc)) 1261 return; 1262 1263 if (StateName == "za") 1264 HasZA = true; 1265 else if (StateName == "zt0") 1266 HasZT0 = true; 1267 else { 1268 Diag(LiteralLoc, diag::err_unknown_arm_state) << StateName; 1269 AL.setInvalid(); 1270 return; 1271 } 1272 1273 if (!llvm::is_contained(NewState, StateName)) // Avoid adding duplicates. 1274 NewState.push_back(StateName); 1275 } 1276 1277 if (auto *FPT = dyn_cast<FunctionProtoType>(D->getFunctionType())) { 1278 FunctionType::ArmStateValue ZAState = 1279 FunctionType::getArmZAState(FPT->getAArch64SMEAttributes()); 1280 if (HasZA && ZAState != FunctionType::ARM_None && 1281 checkNewAttrMutualExclusion(SemaRef, AL, FPT, ZAState, "za")) 1282 return; 1283 FunctionType::ArmStateValue ZT0State = 1284 FunctionType::getArmZT0State(FPT->getAArch64SMEAttributes()); 1285 if (HasZT0 && ZT0State != FunctionType::ARM_None && 1286 checkNewAttrMutualExclusion(SemaRef, AL, FPT, ZT0State, "zt0")) 1287 return; 1288 } 1289 1290 D->dropAttr<ArmNewAttr>(); 1291 D->addAttr(::new (getASTContext()) ArmNewAttr( 1292 getASTContext(), AL, NewState.data(), NewState.size())); 1293 } 1294 1295 void SemaARM::handleCmseNSEntryAttr(Decl *D, const ParsedAttr &AL) { 1296 if (getLangOpts().CPlusPlus && !D->getDeclContext()->isExternCContext()) { 1297 Diag(AL.getLoc(), diag::err_attribute_not_clinkage) << AL; 1298 return; 1299 } 1300 1301 const auto *FD = cast<FunctionDecl>(D); 1302 if (!FD->isExternallyVisible()) { 1303 Diag(AL.getLoc(), diag::warn_attribute_cmse_entry_static); 1304 return; 1305 } 1306 1307 D->addAttr(::new (getASTContext()) CmseNSEntryAttr(getASTContext(), AL)); 1308 } 1309 1310 void SemaARM::handleInterruptAttr(Decl *D, const ParsedAttr &AL) { 1311 // Check the attribute arguments. 1312 if (AL.getNumArgs() > 1) { 1313 Diag(AL.getLoc(), diag::err_attribute_too_many_arguments) << AL << 1; 1314 return; 1315 } 1316 1317 StringRef Str; 1318 SourceLocation ArgLoc; 1319 1320 if (AL.getNumArgs() == 0) 1321 Str = ""; 1322 else if (!SemaRef.checkStringLiteralArgumentAttr(AL, 0, Str, &ArgLoc)) 1323 return; 1324 1325 ARMInterruptAttr::InterruptType Kind; 1326 if (!ARMInterruptAttr::ConvertStrToInterruptType(Str, Kind)) { 1327 Diag(AL.getLoc(), diag::warn_attribute_type_not_supported) 1328 << AL << Str << ArgLoc; 1329 return; 1330 } 1331 1332 const TargetInfo &TI = getASTContext().getTargetInfo(); 1333 if (TI.hasFeature("vfp")) 1334 Diag(D->getLocation(), diag::warn_arm_interrupt_vfp_clobber); 1335 1336 D->addAttr(::new (getASTContext()) 1337 ARMInterruptAttr(getASTContext(), AL, Kind)); 1338 } 1339 1340 } // namespace clang 1341