1 //===- SemaChecking.cpp - Extra Semantic Checking -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements extra semantic analysis beyond what is enforced 10 // by the C type system. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "clang/AST/APValue.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/Attr.h" 17 #include "clang/AST/AttrIterator.h" 18 #include "clang/AST/CharUnits.h" 19 #include "clang/AST/Decl.h" 20 #include "clang/AST/DeclBase.h" 21 #include "clang/AST/DeclCXX.h" 22 #include "clang/AST/DeclObjC.h" 23 #include "clang/AST/DeclarationName.h" 24 #include "clang/AST/EvaluatedExprVisitor.h" 25 #include "clang/AST/Expr.h" 26 #include "clang/AST/ExprCXX.h" 27 #include "clang/AST/ExprObjC.h" 28 #include "clang/AST/ExprOpenMP.h" 29 #include "clang/AST/FormatString.h" 30 #include "clang/AST/NSAPI.h" 31 #include "clang/AST/NonTrivialTypeVisitor.h" 32 #include "clang/AST/OperationKinds.h" 33 #include "clang/AST/RecordLayout.h" 34 #include "clang/AST/Stmt.h" 35 #include "clang/AST/TemplateBase.h" 36 #include "clang/AST/Type.h" 37 #include "clang/AST/TypeLoc.h" 38 #include "clang/AST/UnresolvedSet.h" 39 #include "clang/Basic/AddressSpaces.h" 40 #include "clang/Basic/CharInfo.h" 41 #include "clang/Basic/Diagnostic.h" 42 #include "clang/Basic/IdentifierTable.h" 43 #include "clang/Basic/LLVM.h" 44 #include "clang/Basic/LangOptions.h" 45 #include "clang/Basic/OpenCLOptions.h" 46 #include "clang/Basic/OperatorKinds.h" 47 #include "clang/Basic/PartialDiagnostic.h" 48 #include "clang/Basic/SourceLocation.h" 49 #include "clang/Basic/SourceManager.h" 50 #include "clang/Basic/Specifiers.h" 51 #include "clang/Basic/SyncScope.h" 52 #include "clang/Basic/TargetBuiltins.h" 53 #include "clang/Basic/TargetCXXABI.h" 54 #include "clang/Basic/TargetInfo.h" 55 #include "clang/Basic/TypeTraits.h" 56 #include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering. 57 #include "clang/Sema/Initialization.h" 58 #include "clang/Sema/Lookup.h" 59 #include "clang/Sema/Ownership.h" 60 #include "clang/Sema/Scope.h" 61 #include "clang/Sema/ScopeInfo.h" 62 #include "clang/Sema/Sema.h" 63 #include "clang/Sema/SemaInternal.h" 64 #include "llvm/ADT/APFloat.h" 65 #include "llvm/ADT/APInt.h" 66 #include "llvm/ADT/APSInt.h" 67 #include "llvm/ADT/ArrayRef.h" 68 #include "llvm/ADT/DenseMap.h" 69 #include "llvm/ADT/FoldingSet.h" 70 #include "llvm/ADT/STLExtras.h" 71 #include "llvm/ADT/SmallBitVector.h" 72 #include "llvm/ADT/SmallPtrSet.h" 73 #include "llvm/ADT/SmallString.h" 74 #include "llvm/ADT/SmallVector.h" 75 #include "llvm/ADT/StringExtras.h" 76 #include "llvm/ADT/StringRef.h" 77 #include "llvm/ADT/StringSet.h" 78 #include "llvm/ADT/StringSwitch.h" 79 #include "llvm/Support/AtomicOrdering.h" 80 #include "llvm/Support/Casting.h" 81 #include "llvm/Support/Compiler.h" 82 #include "llvm/Support/ConvertUTF.h" 83 #include "llvm/Support/ErrorHandling.h" 84 #include "llvm/Support/Format.h" 85 #include "llvm/Support/Locale.h" 86 #include "llvm/Support/MathExtras.h" 87 #include "llvm/Support/SaveAndRestore.h" 88 #include "llvm/Support/raw_ostream.h" 89 #include "llvm/TargetParser/RISCVTargetParser.h" 90 #include "llvm/TargetParser/Triple.h" 91 #include <algorithm> 92 #include <bitset> 93 #include <cassert> 94 #include <cctype> 95 #include <cstddef> 96 #include <cstdint> 97 #include <functional> 98 #include <limits> 99 #include <optional> 100 #include <string> 101 #include <tuple> 102 #include <utility> 103 104 using namespace clang; 105 using namespace sema; 106 107 SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL, 108 unsigned ByteNo) const { 109 return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts, 110 Context.getTargetInfo()); 111 } 112 113 static constexpr unsigned short combineFAPK(Sema::FormatArgumentPassingKind A, 114 Sema::FormatArgumentPassingKind B) { 115 return (A << 8) | B; 116 } 117 118 /// Checks that a call expression's argument count is at least the desired 119 /// number. This is useful when doing custom type-checking on a variadic 120 /// function. Returns true on error. 121 static bool checkArgCountAtLeast(Sema &S, CallExpr *Call, 122 unsigned MinArgCount) { 123 unsigned ArgCount = Call->getNumArgs(); 124 if (ArgCount >= MinArgCount) 125 return false; 126 127 return S.Diag(Call->getEndLoc(), diag::err_typecheck_call_too_few_args) 128 << 0 /*function call*/ << MinArgCount << ArgCount 129 << /*is non object*/ 0 << Call->getSourceRange(); 130 } 131 132 /// Checks that a call expression's argument count is at most the desired 133 /// number. This is useful when doing custom type-checking on a variadic 134 /// function. Returns true on error. 135 static bool checkArgCountAtMost(Sema &S, CallExpr *Call, unsigned MaxArgCount) { 136 unsigned ArgCount = Call->getNumArgs(); 137 if (ArgCount <= MaxArgCount) 138 return false; 139 return S.Diag(Call->getEndLoc(), 140 diag::err_typecheck_call_too_many_args_at_most) 141 << 0 /*function call*/ << MaxArgCount << ArgCount 142 << /*is non object*/ 0 << Call->getSourceRange(); 143 } 144 145 /// Checks that a call expression's argument count is in the desired range. This 146 /// is useful when doing custom type-checking on a variadic function. Returns 147 /// true on error. 148 static bool checkArgCountRange(Sema &S, CallExpr *Call, unsigned MinArgCount, 149 unsigned MaxArgCount) { 150 return checkArgCountAtLeast(S, Call, MinArgCount) || 151 checkArgCountAtMost(S, Call, MaxArgCount); 152 } 153 154 /// Checks that a call expression's argument count is the desired number. 155 /// This is useful when doing custom type-checking. Returns true on error. 156 static bool checkArgCount(Sema &S, CallExpr *Call, unsigned DesiredArgCount) { 157 unsigned ArgCount = Call->getNumArgs(); 158 if (ArgCount == DesiredArgCount) 159 return false; 160 161 if (checkArgCountAtLeast(S, Call, DesiredArgCount)) 162 return true; 163 assert(ArgCount > DesiredArgCount && "should have diagnosed this"); 164 165 // Highlight all the excess arguments. 166 SourceRange Range(Call->getArg(DesiredArgCount)->getBeginLoc(), 167 Call->getArg(ArgCount - 1)->getEndLoc()); 168 169 return S.Diag(Range.getBegin(), diag::err_typecheck_call_too_many_args) 170 << 0 /*function call*/ << DesiredArgCount << ArgCount 171 << /*is non object*/ 0 << Call->getArg(1)->getSourceRange(); 172 } 173 174 static bool convertArgumentToType(Sema &S, Expr *&Value, QualType Ty) { 175 if (Value->isTypeDependent()) 176 return false; 177 178 InitializedEntity Entity = 179 InitializedEntity::InitializeParameter(S.Context, Ty, false); 180 ExprResult Result = 181 S.PerformCopyInitialization(Entity, SourceLocation(), Value); 182 if (Result.isInvalid()) 183 return true; 184 Value = Result.get(); 185 return false; 186 } 187 188 /// Check that the first argument to __builtin_annotation is an integer 189 /// and the second argument is a non-wide string literal. 190 static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) { 191 if (checkArgCount(S, TheCall, 2)) 192 return true; 193 194 // First argument should be an integer. 195 Expr *ValArg = TheCall->getArg(0); 196 QualType Ty = ValArg->getType(); 197 if (!Ty->isIntegerType()) { 198 S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg) 199 << ValArg->getSourceRange(); 200 return true; 201 } 202 203 // Second argument should be a constant string. 204 Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts(); 205 StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg); 206 if (!Literal || !Literal->isOrdinary()) { 207 S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg) 208 << StrArg->getSourceRange(); 209 return true; 210 } 211 212 TheCall->setType(Ty); 213 return false; 214 } 215 216 static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) { 217 // We need at least one argument. 218 if (TheCall->getNumArgs() < 1) { 219 S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 220 << 0 << 1 << TheCall->getNumArgs() << /*is non object*/ 0 221 << TheCall->getCallee()->getSourceRange(); 222 return true; 223 } 224 225 // All arguments should be wide string literals. 226 for (Expr *Arg : TheCall->arguments()) { 227 auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts()); 228 if (!Literal || !Literal->isWide()) { 229 S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str) 230 << Arg->getSourceRange(); 231 return true; 232 } 233 } 234 235 return false; 236 } 237 238 /// Check that the argument to __builtin_addressof is a glvalue, and set the 239 /// result type to the corresponding pointer type. 240 static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) { 241 if (checkArgCount(S, TheCall, 1)) 242 return true; 243 244 ExprResult Arg(TheCall->getArg(0)); 245 QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc()); 246 if (ResultType.isNull()) 247 return true; 248 249 TheCall->setArg(0, Arg.get()); 250 TheCall->setType(ResultType); 251 return false; 252 } 253 254 /// Check that the argument to __builtin_function_start is a function. 255 static bool SemaBuiltinFunctionStart(Sema &S, CallExpr *TheCall) { 256 if (checkArgCount(S, TheCall, 1)) 257 return true; 258 259 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 260 if (Arg.isInvalid()) 261 return true; 262 263 TheCall->setArg(0, Arg.get()); 264 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>( 265 Arg.get()->getAsBuiltinConstantDeclRef(S.getASTContext())); 266 267 if (!FD) { 268 S.Diag(TheCall->getBeginLoc(), diag::err_function_start_invalid_type) 269 << TheCall->getSourceRange(); 270 return true; 271 } 272 273 return !S.checkAddressOfFunctionIsAvailable(FD, /*Complain=*/true, 274 TheCall->getBeginLoc()); 275 } 276 277 /// Check the number of arguments and set the result type to 278 /// the argument type. 279 static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) { 280 if (checkArgCount(S, TheCall, 1)) 281 return true; 282 283 TheCall->setType(TheCall->getArg(0)->getType()); 284 return false; 285 } 286 287 /// Check that the value argument for __builtin_is_aligned(value, alignment) and 288 /// __builtin_aligned_{up,down}(value, alignment) is an integer or a pointer 289 /// type (but not a function pointer) and that the alignment is a power-of-two. 290 static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) { 291 if (checkArgCount(S, TheCall, 2)) 292 return true; 293 294 clang::Expr *Source = TheCall->getArg(0); 295 bool IsBooleanAlignBuiltin = ID == Builtin::BI__builtin_is_aligned; 296 297 auto IsValidIntegerType = [](QualType Ty) { 298 return Ty->isIntegerType() && !Ty->isEnumeralType() && !Ty->isBooleanType(); 299 }; 300 QualType SrcTy = Source->getType(); 301 // We should also be able to use it with arrays (but not functions!). 302 if (SrcTy->canDecayToPointerType() && SrcTy->isArrayType()) { 303 SrcTy = S.Context.getDecayedType(SrcTy); 304 } 305 if ((!SrcTy->isPointerType() && !IsValidIntegerType(SrcTy)) || 306 SrcTy->isFunctionPointerType()) { 307 // FIXME: this is not quite the right error message since we don't allow 308 // floating point types, or member pointers. 309 S.Diag(Source->getExprLoc(), diag::err_typecheck_expect_scalar_operand) 310 << SrcTy; 311 return true; 312 } 313 314 clang::Expr *AlignOp = TheCall->getArg(1); 315 if (!IsValidIntegerType(AlignOp->getType())) { 316 S.Diag(AlignOp->getExprLoc(), diag::err_typecheck_expect_int) 317 << AlignOp->getType(); 318 return true; 319 } 320 Expr::EvalResult AlignResult; 321 unsigned MaxAlignmentBits = S.Context.getIntWidth(SrcTy) - 1; 322 // We can't check validity of alignment if it is value dependent. 323 if (!AlignOp->isValueDependent() && 324 AlignOp->EvaluateAsInt(AlignResult, S.Context, 325 Expr::SE_AllowSideEffects)) { 326 llvm::APSInt AlignValue = AlignResult.Val.getInt(); 327 llvm::APSInt MaxValue( 328 llvm::APInt::getOneBitSet(MaxAlignmentBits + 1, MaxAlignmentBits)); 329 if (AlignValue < 1) { 330 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_small) << 1; 331 return true; 332 } 333 if (llvm::APSInt::compareValues(AlignValue, MaxValue) > 0) { 334 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_big) 335 << toString(MaxValue, 10); 336 return true; 337 } 338 if (!AlignValue.isPowerOf2()) { 339 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_not_power_of_two); 340 return true; 341 } 342 if (AlignValue == 1) { 343 S.Diag(AlignOp->getExprLoc(), diag::warn_alignment_builtin_useless) 344 << IsBooleanAlignBuiltin; 345 } 346 } 347 348 ExprResult SrcArg = S.PerformCopyInitialization( 349 InitializedEntity::InitializeParameter(S.Context, SrcTy, false), 350 SourceLocation(), Source); 351 if (SrcArg.isInvalid()) 352 return true; 353 TheCall->setArg(0, SrcArg.get()); 354 ExprResult AlignArg = 355 S.PerformCopyInitialization(InitializedEntity::InitializeParameter( 356 S.Context, AlignOp->getType(), false), 357 SourceLocation(), AlignOp); 358 if (AlignArg.isInvalid()) 359 return true; 360 TheCall->setArg(1, AlignArg.get()); 361 // For align_up/align_down, the return type is the same as the (potentially 362 // decayed) argument type including qualifiers. For is_aligned(), the result 363 // is always bool. 364 TheCall->setType(IsBooleanAlignBuiltin ? S.Context.BoolTy : SrcTy); 365 return false; 366 } 367 368 static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall, 369 unsigned BuiltinID) { 370 if (checkArgCount(S, TheCall, 3)) 371 return true; 372 373 std::pair<unsigned, const char *> Builtins[] = { 374 { Builtin::BI__builtin_add_overflow, "ckd_add" }, 375 { Builtin::BI__builtin_sub_overflow, "ckd_sub" }, 376 { Builtin::BI__builtin_mul_overflow, "ckd_mul" }, 377 }; 378 379 bool CkdOperation = llvm::any_of(Builtins, [&](const std::pair<unsigned, 380 const char *> &P) { 381 return BuiltinID == P.first && TheCall->getExprLoc().isMacroID() && 382 Lexer::getImmediateMacroName(TheCall->getExprLoc(), 383 S.getSourceManager(), S.getLangOpts()) == P.second; 384 }); 385 386 auto ValidCkdIntType = [](QualType QT) { 387 // A valid checked integer type is an integer type other than a plain char, 388 // bool, a bit-precise type, or an enumeration type. 389 if (const auto *BT = QT.getCanonicalType()->getAs<BuiltinType>()) 390 return (BT->getKind() >= BuiltinType::Short && 391 BT->getKind() <= BuiltinType::Int128) || ( 392 BT->getKind() >= BuiltinType::UShort && 393 BT->getKind() <= BuiltinType::UInt128) || 394 BT->getKind() == BuiltinType::UChar || 395 BT->getKind() == BuiltinType::SChar; 396 return false; 397 }; 398 399 // First two arguments should be integers. 400 for (unsigned I = 0; I < 2; ++I) { 401 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(I)); 402 if (Arg.isInvalid()) return true; 403 TheCall->setArg(I, Arg.get()); 404 405 QualType Ty = Arg.get()->getType(); 406 bool IsValid = CkdOperation ? ValidCkdIntType(Ty) : Ty->isIntegerType(); 407 if (!IsValid) { 408 S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int) 409 << CkdOperation << Ty << Arg.get()->getSourceRange(); 410 return true; 411 } 412 } 413 414 // Third argument should be a pointer to a non-const integer. 415 // IRGen correctly handles volatile, restrict, and address spaces, and 416 // the other qualifiers aren't possible. 417 { 418 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(2)); 419 if (Arg.isInvalid()) return true; 420 TheCall->setArg(2, Arg.get()); 421 422 QualType Ty = Arg.get()->getType(); 423 const auto *PtrTy = Ty->getAs<PointerType>(); 424 if (!PtrTy || 425 !PtrTy->getPointeeType()->isIntegerType() || 426 (!ValidCkdIntType(PtrTy->getPointeeType()) && CkdOperation) || 427 PtrTy->getPointeeType().isConstQualified()) { 428 S.Diag(Arg.get()->getBeginLoc(), 429 diag::err_overflow_builtin_must_be_ptr_int) 430 << CkdOperation << Ty << Arg.get()->getSourceRange(); 431 return true; 432 } 433 } 434 435 // Disallow signed bit-precise integer args larger than 128 bits to mul 436 // function until we improve backend support. 437 if (BuiltinID == Builtin::BI__builtin_mul_overflow) { 438 for (unsigned I = 0; I < 3; ++I) { 439 const auto Arg = TheCall->getArg(I); 440 // Third argument will be a pointer. 441 auto Ty = I < 2 ? Arg->getType() : Arg->getType()->getPointeeType(); 442 if (Ty->isBitIntType() && Ty->isSignedIntegerType() && 443 S.getASTContext().getIntWidth(Ty) > 128) 444 return S.Diag(Arg->getBeginLoc(), 445 diag::err_overflow_builtin_bit_int_max_size) 446 << 128; 447 } 448 } 449 450 return false; 451 } 452 453 namespace { 454 struct BuiltinDumpStructGenerator { 455 Sema &S; 456 CallExpr *TheCall; 457 SourceLocation Loc = TheCall->getBeginLoc(); 458 SmallVector<Expr *, 32> Actions; 459 DiagnosticErrorTrap ErrorTracker; 460 PrintingPolicy Policy; 461 462 BuiltinDumpStructGenerator(Sema &S, CallExpr *TheCall) 463 : S(S), TheCall(TheCall), ErrorTracker(S.getDiagnostics()), 464 Policy(S.Context.getPrintingPolicy()) { 465 Policy.AnonymousTagLocations = false; 466 } 467 468 Expr *makeOpaqueValueExpr(Expr *Inner) { 469 auto *OVE = new (S.Context) 470 OpaqueValueExpr(Loc, Inner->getType(), Inner->getValueKind(), 471 Inner->getObjectKind(), Inner); 472 Actions.push_back(OVE); 473 return OVE; 474 } 475 476 Expr *getStringLiteral(llvm::StringRef Str) { 477 Expr *Lit = S.Context.getPredefinedStringLiteralFromCache(Str); 478 // Wrap the literal in parentheses to attach a source location. 479 return new (S.Context) ParenExpr(Loc, Loc, Lit); 480 } 481 482 bool callPrintFunction(llvm::StringRef Format, 483 llvm::ArrayRef<Expr *> Exprs = {}) { 484 SmallVector<Expr *, 8> Args; 485 assert(TheCall->getNumArgs() >= 2); 486 Args.reserve((TheCall->getNumArgs() - 2) + /*Format*/ 1 + Exprs.size()); 487 Args.assign(TheCall->arg_begin() + 2, TheCall->arg_end()); 488 Args.push_back(getStringLiteral(Format)); 489 Args.insert(Args.end(), Exprs.begin(), Exprs.end()); 490 491 // Register a note to explain why we're performing the call. 492 Sema::CodeSynthesisContext Ctx; 493 Ctx.Kind = Sema::CodeSynthesisContext::BuildingBuiltinDumpStructCall; 494 Ctx.PointOfInstantiation = Loc; 495 Ctx.CallArgs = Args.data(); 496 Ctx.NumCallArgs = Args.size(); 497 S.pushCodeSynthesisContext(Ctx); 498 499 ExprResult RealCall = 500 S.BuildCallExpr(/*Scope=*/nullptr, TheCall->getArg(1), 501 TheCall->getBeginLoc(), Args, TheCall->getRParenLoc()); 502 503 S.popCodeSynthesisContext(); 504 if (!RealCall.isInvalid()) 505 Actions.push_back(RealCall.get()); 506 // Bail out if we've hit any errors, even if we managed to build the 507 // call. We don't want to produce more than one error. 508 return RealCall.isInvalid() || ErrorTracker.hasErrorOccurred(); 509 } 510 511 Expr *getIndentString(unsigned Depth) { 512 if (!Depth) 513 return nullptr; 514 515 llvm::SmallString<32> Indent; 516 Indent.resize(Depth * Policy.Indentation, ' '); 517 return getStringLiteral(Indent); 518 } 519 520 Expr *getTypeString(QualType T) { 521 return getStringLiteral(T.getAsString(Policy)); 522 } 523 524 bool appendFormatSpecifier(QualType T, llvm::SmallVectorImpl<char> &Str) { 525 llvm::raw_svector_ostream OS(Str); 526 527 // Format 'bool', 'char', 'signed char', 'unsigned char' as numbers, rather 528 // than trying to print a single character. 529 if (auto *BT = T->getAs<BuiltinType>()) { 530 switch (BT->getKind()) { 531 case BuiltinType::Bool: 532 OS << "%d"; 533 return true; 534 case BuiltinType::Char_U: 535 case BuiltinType::UChar: 536 OS << "%hhu"; 537 return true; 538 case BuiltinType::Char_S: 539 case BuiltinType::SChar: 540 OS << "%hhd"; 541 return true; 542 default: 543 break; 544 } 545 } 546 547 analyze_printf::PrintfSpecifier Specifier; 548 if (Specifier.fixType(T, S.getLangOpts(), S.Context, /*IsObjCLiteral=*/false)) { 549 // We were able to guess how to format this. 550 if (Specifier.getConversionSpecifier().getKind() == 551 analyze_printf::PrintfConversionSpecifier::sArg) { 552 // Wrap double-quotes around a '%s' specifier and limit its maximum 553 // length. Ideally we'd also somehow escape special characters in the 554 // contents but printf doesn't support that. 555 // FIXME: '%s' formatting is not safe in general. 556 OS << '"'; 557 Specifier.setPrecision(analyze_printf::OptionalAmount(32u)); 558 Specifier.toString(OS); 559 OS << '"'; 560 // FIXME: It would be nice to include a '...' if the string doesn't fit 561 // in the length limit. 562 } else { 563 Specifier.toString(OS); 564 } 565 return true; 566 } 567 568 if (T->isPointerType()) { 569 // Format all pointers with '%p'. 570 OS << "%p"; 571 return true; 572 } 573 574 return false; 575 } 576 577 bool dumpUnnamedRecord(const RecordDecl *RD, Expr *E, unsigned Depth) { 578 Expr *IndentLit = getIndentString(Depth); 579 Expr *TypeLit = getTypeString(S.Context.getRecordType(RD)); 580 if (IndentLit ? callPrintFunction("%s%s", {IndentLit, TypeLit}) 581 : callPrintFunction("%s", {TypeLit})) 582 return true; 583 584 return dumpRecordValue(RD, E, IndentLit, Depth); 585 } 586 587 // Dump a record value. E should be a pointer or lvalue referring to an RD. 588 bool dumpRecordValue(const RecordDecl *RD, Expr *E, Expr *RecordIndent, 589 unsigned Depth) { 590 // FIXME: Decide what to do if RD is a union. At least we should probably 591 // turn off printing `const char*` members with `%s`, because that is very 592 // likely to crash if that's not the active member. Whatever we decide, we 593 // should document it. 594 595 // Build an OpaqueValueExpr so we can refer to E more than once without 596 // triggering re-evaluation. 597 Expr *RecordArg = makeOpaqueValueExpr(E); 598 bool RecordArgIsPtr = RecordArg->getType()->isPointerType(); 599 600 if (callPrintFunction(" {\n")) 601 return true; 602 603 // Dump each base class, regardless of whether they're aggregates. 604 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 605 for (const auto &Base : CXXRD->bases()) { 606 QualType BaseType = 607 RecordArgIsPtr ? S.Context.getPointerType(Base.getType()) 608 : S.Context.getLValueReferenceType(Base.getType()); 609 ExprResult BasePtr = S.BuildCStyleCastExpr( 610 Loc, S.Context.getTrivialTypeSourceInfo(BaseType, Loc), Loc, 611 RecordArg); 612 if (BasePtr.isInvalid() || 613 dumpUnnamedRecord(Base.getType()->getAsRecordDecl(), BasePtr.get(), 614 Depth + 1)) 615 return true; 616 } 617 } 618 619 Expr *FieldIndentArg = getIndentString(Depth + 1); 620 621 // Dump each field. 622 for (auto *D : RD->decls()) { 623 auto *IFD = dyn_cast<IndirectFieldDecl>(D); 624 auto *FD = IFD ? IFD->getAnonField() : dyn_cast<FieldDecl>(D); 625 if (!FD || FD->isUnnamedBitfield() || FD->isAnonymousStructOrUnion()) 626 continue; 627 628 llvm::SmallString<20> Format = llvm::StringRef("%s%s %s "); 629 llvm::SmallVector<Expr *, 5> Args = {FieldIndentArg, 630 getTypeString(FD->getType()), 631 getStringLiteral(FD->getName())}; 632 633 if (FD->isBitField()) { 634 Format += ": %zu "; 635 QualType SizeT = S.Context.getSizeType(); 636 llvm::APInt BitWidth(S.Context.getIntWidth(SizeT), 637 FD->getBitWidthValue(S.Context)); 638 Args.push_back(IntegerLiteral::Create(S.Context, BitWidth, SizeT, Loc)); 639 } 640 641 Format += "="; 642 643 ExprResult Field = 644 IFD ? S.BuildAnonymousStructUnionMemberReference( 645 CXXScopeSpec(), Loc, IFD, 646 DeclAccessPair::make(IFD, AS_public), RecordArg, Loc) 647 : S.BuildFieldReferenceExpr( 648 RecordArg, RecordArgIsPtr, Loc, CXXScopeSpec(), FD, 649 DeclAccessPair::make(FD, AS_public), 650 DeclarationNameInfo(FD->getDeclName(), Loc)); 651 if (Field.isInvalid()) 652 return true; 653 654 auto *InnerRD = FD->getType()->getAsRecordDecl(); 655 auto *InnerCXXRD = dyn_cast_or_null<CXXRecordDecl>(InnerRD); 656 if (InnerRD && (!InnerCXXRD || InnerCXXRD->isAggregate())) { 657 // Recursively print the values of members of aggregate record type. 658 if (callPrintFunction(Format, Args) || 659 dumpRecordValue(InnerRD, Field.get(), FieldIndentArg, Depth + 1)) 660 return true; 661 } else { 662 Format += " "; 663 if (appendFormatSpecifier(FD->getType(), Format)) { 664 // We know how to print this field. 665 Args.push_back(Field.get()); 666 } else { 667 // We don't know how to print this field. Print out its address 668 // with a format specifier that a smart tool will be able to 669 // recognize and treat specially. 670 Format += "*%p"; 671 ExprResult FieldAddr = 672 S.BuildUnaryOp(nullptr, Loc, UO_AddrOf, Field.get()); 673 if (FieldAddr.isInvalid()) 674 return true; 675 Args.push_back(FieldAddr.get()); 676 } 677 Format += "\n"; 678 if (callPrintFunction(Format, Args)) 679 return true; 680 } 681 } 682 683 return RecordIndent ? callPrintFunction("%s}\n", RecordIndent) 684 : callPrintFunction("}\n"); 685 } 686 687 Expr *buildWrapper() { 688 auto *Wrapper = PseudoObjectExpr::Create(S.Context, TheCall, Actions, 689 PseudoObjectExpr::NoResult); 690 TheCall->setType(Wrapper->getType()); 691 TheCall->setValueKind(Wrapper->getValueKind()); 692 return Wrapper; 693 } 694 }; 695 } // namespace 696 697 static ExprResult SemaBuiltinDumpStruct(Sema &S, CallExpr *TheCall) { 698 if (checkArgCountAtLeast(S, TheCall, 2)) 699 return ExprError(); 700 701 ExprResult PtrArgResult = S.DefaultLvalueConversion(TheCall->getArg(0)); 702 if (PtrArgResult.isInvalid()) 703 return ExprError(); 704 TheCall->setArg(0, PtrArgResult.get()); 705 706 // First argument should be a pointer to a struct. 707 QualType PtrArgType = PtrArgResult.get()->getType(); 708 if (!PtrArgType->isPointerType() || 709 !PtrArgType->getPointeeType()->isRecordType()) { 710 S.Diag(PtrArgResult.get()->getBeginLoc(), 711 diag::err_expected_struct_pointer_argument) 712 << 1 << TheCall->getDirectCallee() << PtrArgType; 713 return ExprError(); 714 } 715 QualType Pointee = PtrArgType->getPointeeType(); 716 const RecordDecl *RD = Pointee->getAsRecordDecl(); 717 // Try to instantiate the class template as appropriate; otherwise, access to 718 // its data() may lead to a crash. 719 if (S.RequireCompleteType(PtrArgResult.get()->getBeginLoc(), Pointee, 720 diag::err_incomplete_type)) 721 return ExprError(); 722 // Second argument is a callable, but we can't fully validate it until we try 723 // calling it. 724 QualType FnArgType = TheCall->getArg(1)->getType(); 725 if (!FnArgType->isFunctionType() && !FnArgType->isFunctionPointerType() && 726 !FnArgType->isBlockPointerType() && 727 !(S.getLangOpts().CPlusPlus && FnArgType->isRecordType())) { 728 auto *BT = FnArgType->getAs<BuiltinType>(); 729 switch (BT ? BT->getKind() : BuiltinType::Void) { 730 case BuiltinType::Dependent: 731 case BuiltinType::Overload: 732 case BuiltinType::BoundMember: 733 case BuiltinType::PseudoObject: 734 case BuiltinType::UnknownAny: 735 case BuiltinType::BuiltinFn: 736 // This might be a callable. 737 break; 738 739 default: 740 S.Diag(TheCall->getArg(1)->getBeginLoc(), 741 diag::err_expected_callable_argument) 742 << 2 << TheCall->getDirectCallee() << FnArgType; 743 return ExprError(); 744 } 745 } 746 747 BuiltinDumpStructGenerator Generator(S, TheCall); 748 749 // Wrap parentheses around the given pointer. This is not necessary for 750 // correct code generation, but it means that when we pretty-print the call 751 // arguments in our diagnostics we will produce '(&s)->n' instead of the 752 // incorrect '&s->n'. 753 Expr *PtrArg = PtrArgResult.get(); 754 PtrArg = new (S.Context) 755 ParenExpr(PtrArg->getBeginLoc(), 756 S.getLocForEndOfToken(PtrArg->getEndLoc()), PtrArg); 757 if (Generator.dumpUnnamedRecord(RD, PtrArg, 0)) 758 return ExprError(); 759 760 return Generator.buildWrapper(); 761 } 762 763 static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) { 764 if (checkArgCount(S, BuiltinCall, 2)) 765 return true; 766 767 SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc(); 768 Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts(); 769 Expr *Call = BuiltinCall->getArg(0); 770 Expr *Chain = BuiltinCall->getArg(1); 771 772 if (Call->getStmtClass() != Stmt::CallExprClass) { 773 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call) 774 << Call->getSourceRange(); 775 return true; 776 } 777 778 auto CE = cast<CallExpr>(Call); 779 if (CE->getCallee()->getType()->isBlockPointerType()) { 780 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call) 781 << Call->getSourceRange(); 782 return true; 783 } 784 785 const Decl *TargetDecl = CE->getCalleeDecl(); 786 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) 787 if (FD->getBuiltinID()) { 788 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call) 789 << Call->getSourceRange(); 790 return true; 791 } 792 793 if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) { 794 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call) 795 << Call->getSourceRange(); 796 return true; 797 } 798 799 ExprResult ChainResult = S.UsualUnaryConversions(Chain); 800 if (ChainResult.isInvalid()) 801 return true; 802 if (!ChainResult.get()->getType()->isPointerType()) { 803 S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer) 804 << Chain->getSourceRange(); 805 return true; 806 } 807 808 QualType ReturnTy = CE->getCallReturnType(S.Context); 809 QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() }; 810 QualType BuiltinTy = S.Context.getFunctionType( 811 ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo()); 812 QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy); 813 814 Builtin = 815 S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get(); 816 817 BuiltinCall->setType(CE->getType()); 818 BuiltinCall->setValueKind(CE->getValueKind()); 819 BuiltinCall->setObjectKind(CE->getObjectKind()); 820 BuiltinCall->setCallee(Builtin); 821 BuiltinCall->setArg(1, ChainResult.get()); 822 823 return false; 824 } 825 826 namespace { 827 828 class ScanfDiagnosticFormatHandler 829 : public analyze_format_string::FormatStringHandler { 830 // Accepts the argument index (relative to the first destination index) of the 831 // argument whose size we want. 832 using ComputeSizeFunction = 833 llvm::function_ref<std::optional<llvm::APSInt>(unsigned)>; 834 835 // Accepts the argument index (relative to the first destination index), the 836 // destination size, and the source size). 837 using DiagnoseFunction = 838 llvm::function_ref<void(unsigned, unsigned, unsigned)>; 839 840 ComputeSizeFunction ComputeSizeArgument; 841 DiagnoseFunction Diagnose; 842 843 public: 844 ScanfDiagnosticFormatHandler(ComputeSizeFunction ComputeSizeArgument, 845 DiagnoseFunction Diagnose) 846 : ComputeSizeArgument(ComputeSizeArgument), Diagnose(Diagnose) {} 847 848 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 849 const char *StartSpecifier, 850 unsigned specifierLen) override { 851 if (!FS.consumesDataArgument()) 852 return true; 853 854 unsigned NulByte = 0; 855 switch ((FS.getConversionSpecifier().getKind())) { 856 default: 857 return true; 858 case analyze_format_string::ConversionSpecifier::sArg: 859 case analyze_format_string::ConversionSpecifier::ScanListArg: 860 NulByte = 1; 861 break; 862 case analyze_format_string::ConversionSpecifier::cArg: 863 break; 864 } 865 866 analyze_format_string::OptionalAmount FW = FS.getFieldWidth(); 867 if (FW.getHowSpecified() != 868 analyze_format_string::OptionalAmount::HowSpecified::Constant) 869 return true; 870 871 unsigned SourceSize = FW.getConstantAmount() + NulByte; 872 873 std::optional<llvm::APSInt> DestSizeAPS = 874 ComputeSizeArgument(FS.getArgIndex()); 875 if (!DestSizeAPS) 876 return true; 877 878 unsigned DestSize = DestSizeAPS->getZExtValue(); 879 880 if (DestSize < SourceSize) 881 Diagnose(FS.getArgIndex(), DestSize, SourceSize); 882 883 return true; 884 } 885 }; 886 887 class EstimateSizeFormatHandler 888 : public analyze_format_string::FormatStringHandler { 889 size_t Size; 890 /// Whether the format string contains Linux kernel's format specifier 891 /// extension. 892 bool IsKernelCompatible = true; 893 894 public: 895 EstimateSizeFormatHandler(StringRef Format) 896 : Size(std::min(Format.find(0), Format.size()) + 897 1 /* null byte always written by sprintf */) {} 898 899 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 900 const char *, unsigned SpecifierLen, 901 const TargetInfo &) override { 902 903 const size_t FieldWidth = computeFieldWidth(FS); 904 const size_t Precision = computePrecision(FS); 905 906 // The actual format. 907 switch (FS.getConversionSpecifier().getKind()) { 908 // Just a char. 909 case analyze_format_string::ConversionSpecifier::cArg: 910 case analyze_format_string::ConversionSpecifier::CArg: 911 Size += std::max(FieldWidth, (size_t)1); 912 break; 913 // Just an integer. 914 case analyze_format_string::ConversionSpecifier::dArg: 915 case analyze_format_string::ConversionSpecifier::DArg: 916 case analyze_format_string::ConversionSpecifier::iArg: 917 case analyze_format_string::ConversionSpecifier::oArg: 918 case analyze_format_string::ConversionSpecifier::OArg: 919 case analyze_format_string::ConversionSpecifier::uArg: 920 case analyze_format_string::ConversionSpecifier::UArg: 921 case analyze_format_string::ConversionSpecifier::xArg: 922 case analyze_format_string::ConversionSpecifier::XArg: 923 Size += std::max(FieldWidth, Precision); 924 break; 925 926 // %g style conversion switches between %f or %e style dynamically. 927 // %g removes trailing zeros, and does not print decimal point if there are 928 // no digits that follow it. Thus %g can print a single digit. 929 // FIXME: If it is alternative form: 930 // For g and G conversions, trailing zeros are not removed from the result. 931 case analyze_format_string::ConversionSpecifier::gArg: 932 case analyze_format_string::ConversionSpecifier::GArg: 933 Size += 1; 934 break; 935 936 // Floating point number in the form '[+]ddd.ddd'. 937 case analyze_format_string::ConversionSpecifier::fArg: 938 case analyze_format_string::ConversionSpecifier::FArg: 939 Size += std::max(FieldWidth, 1 /* integer part */ + 940 (Precision ? 1 + Precision 941 : 0) /* period + decimal */); 942 break; 943 944 // Floating point number in the form '[-]d.ddde[+-]dd'. 945 case analyze_format_string::ConversionSpecifier::eArg: 946 case analyze_format_string::ConversionSpecifier::EArg: 947 Size += 948 std::max(FieldWidth, 949 1 /* integer part */ + 950 (Precision ? 1 + Precision : 0) /* period + decimal */ + 951 1 /* e or E letter */ + 2 /* exponent */); 952 break; 953 954 // Floating point number in the form '[-]0xh.hhhhp±dd'. 955 case analyze_format_string::ConversionSpecifier::aArg: 956 case analyze_format_string::ConversionSpecifier::AArg: 957 Size += 958 std::max(FieldWidth, 959 2 /* 0x */ + 1 /* integer part */ + 960 (Precision ? 1 + Precision : 0) /* period + decimal */ + 961 1 /* p or P letter */ + 1 /* + or - */ + 1 /* value */); 962 break; 963 964 // Just a string. 965 case analyze_format_string::ConversionSpecifier::sArg: 966 case analyze_format_string::ConversionSpecifier::SArg: 967 Size += FieldWidth; 968 break; 969 970 // Just a pointer in the form '0xddd'. 971 case analyze_format_string::ConversionSpecifier::pArg: 972 // Linux kernel has its own extesion for `%p` specifier. 973 // Kernel Document: 974 // https://docs.kernel.org/core-api/printk-formats.html#pointer-types 975 IsKernelCompatible = false; 976 Size += std::max(FieldWidth, 2 /* leading 0x */ + Precision); 977 break; 978 979 // A plain percent. 980 case analyze_format_string::ConversionSpecifier::PercentArg: 981 Size += 1; 982 break; 983 984 default: 985 break; 986 } 987 988 Size += FS.hasPlusPrefix() || FS.hasSpacePrefix(); 989 990 if (FS.hasAlternativeForm()) { 991 switch (FS.getConversionSpecifier().getKind()) { 992 // For o conversion, it increases the precision, if and only if necessary, 993 // to force the first digit of the result to be a zero 994 // (if the value and precision are both 0, a single 0 is printed) 995 case analyze_format_string::ConversionSpecifier::oArg: 996 // For b conversion, a nonzero result has 0b prefixed to it. 997 case analyze_format_string::ConversionSpecifier::bArg: 998 // For x (or X) conversion, a nonzero result has 0x (or 0X) prefixed to 999 // it. 1000 case analyze_format_string::ConversionSpecifier::xArg: 1001 case analyze_format_string::ConversionSpecifier::XArg: 1002 // Note: even when the prefix is added, if 1003 // (prefix_width <= FieldWidth - formatted_length) holds, 1004 // the prefix does not increase the format 1005 // size. e.g.(("%#3x", 0xf) is "0xf") 1006 1007 // If the result is zero, o, b, x, X adds nothing. 1008 break; 1009 // For a, A, e, E, f, F, g, and G conversions, 1010 // the result of converting a floating-point number always contains a 1011 // decimal-point 1012 case analyze_format_string::ConversionSpecifier::aArg: 1013 case analyze_format_string::ConversionSpecifier::AArg: 1014 case analyze_format_string::ConversionSpecifier::eArg: 1015 case analyze_format_string::ConversionSpecifier::EArg: 1016 case analyze_format_string::ConversionSpecifier::fArg: 1017 case analyze_format_string::ConversionSpecifier::FArg: 1018 case analyze_format_string::ConversionSpecifier::gArg: 1019 case analyze_format_string::ConversionSpecifier::GArg: 1020 Size += (Precision ? 0 : 1); 1021 break; 1022 // For other conversions, the behavior is undefined. 1023 default: 1024 break; 1025 } 1026 } 1027 assert(SpecifierLen <= Size && "no underflow"); 1028 Size -= SpecifierLen; 1029 return true; 1030 } 1031 1032 size_t getSizeLowerBound() const { return Size; } 1033 bool isKernelCompatible() const { return IsKernelCompatible; } 1034 1035 private: 1036 static size_t computeFieldWidth(const analyze_printf::PrintfSpecifier &FS) { 1037 const analyze_format_string::OptionalAmount &FW = FS.getFieldWidth(); 1038 size_t FieldWidth = 0; 1039 if (FW.getHowSpecified() == analyze_format_string::OptionalAmount::Constant) 1040 FieldWidth = FW.getConstantAmount(); 1041 return FieldWidth; 1042 } 1043 1044 static size_t computePrecision(const analyze_printf::PrintfSpecifier &FS) { 1045 const analyze_format_string::OptionalAmount &FW = FS.getPrecision(); 1046 size_t Precision = 0; 1047 1048 // See man 3 printf for default precision value based on the specifier. 1049 switch (FW.getHowSpecified()) { 1050 case analyze_format_string::OptionalAmount::NotSpecified: 1051 switch (FS.getConversionSpecifier().getKind()) { 1052 default: 1053 break; 1054 case analyze_format_string::ConversionSpecifier::dArg: // %d 1055 case analyze_format_string::ConversionSpecifier::DArg: // %D 1056 case analyze_format_string::ConversionSpecifier::iArg: // %i 1057 Precision = 1; 1058 break; 1059 case analyze_format_string::ConversionSpecifier::oArg: // %d 1060 case analyze_format_string::ConversionSpecifier::OArg: // %D 1061 case analyze_format_string::ConversionSpecifier::uArg: // %d 1062 case analyze_format_string::ConversionSpecifier::UArg: // %D 1063 case analyze_format_string::ConversionSpecifier::xArg: // %d 1064 case analyze_format_string::ConversionSpecifier::XArg: // %D 1065 Precision = 1; 1066 break; 1067 case analyze_format_string::ConversionSpecifier::fArg: // %f 1068 case analyze_format_string::ConversionSpecifier::FArg: // %F 1069 case analyze_format_string::ConversionSpecifier::eArg: // %e 1070 case analyze_format_string::ConversionSpecifier::EArg: // %E 1071 case analyze_format_string::ConversionSpecifier::gArg: // %g 1072 case analyze_format_string::ConversionSpecifier::GArg: // %G 1073 Precision = 6; 1074 break; 1075 case analyze_format_string::ConversionSpecifier::pArg: // %d 1076 Precision = 1; 1077 break; 1078 } 1079 break; 1080 case analyze_format_string::OptionalAmount::Constant: 1081 Precision = FW.getConstantAmount(); 1082 break; 1083 default: 1084 break; 1085 } 1086 return Precision; 1087 } 1088 }; 1089 1090 } // namespace 1091 1092 static bool ProcessFormatStringLiteral(const Expr *FormatExpr, 1093 StringRef &FormatStrRef, size_t &StrLen, 1094 ASTContext &Context) { 1095 if (const auto *Format = dyn_cast<StringLiteral>(FormatExpr); 1096 Format && (Format->isOrdinary() || Format->isUTF8())) { 1097 FormatStrRef = Format->getString(); 1098 const ConstantArrayType *T = 1099 Context.getAsConstantArrayType(Format->getType()); 1100 assert(T && "String literal not of constant array type!"); 1101 size_t TypeSize = T->getSize().getZExtValue(); 1102 // In case there's a null byte somewhere. 1103 StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0)); 1104 return true; 1105 } 1106 return false; 1107 } 1108 1109 void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, 1110 CallExpr *TheCall) { 1111 if (TheCall->isValueDependent() || TheCall->isTypeDependent() || 1112 isConstantEvaluatedContext()) 1113 return; 1114 1115 bool UseDABAttr = false; 1116 const FunctionDecl *UseDecl = FD; 1117 1118 const auto *DABAttr = FD->getAttr<DiagnoseAsBuiltinAttr>(); 1119 if (DABAttr) { 1120 UseDecl = DABAttr->getFunction(); 1121 assert(UseDecl && "Missing FunctionDecl in DiagnoseAsBuiltin attribute!"); 1122 UseDABAttr = true; 1123 } 1124 1125 unsigned BuiltinID = UseDecl->getBuiltinID(/*ConsiderWrappers=*/true); 1126 1127 if (!BuiltinID) 1128 return; 1129 1130 const TargetInfo &TI = getASTContext().getTargetInfo(); 1131 unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType()); 1132 1133 auto TranslateIndex = [&](unsigned Index) -> std::optional<unsigned> { 1134 // If we refer to a diagnose_as_builtin attribute, we need to change the 1135 // argument index to refer to the arguments of the called function. Unless 1136 // the index is out of bounds, which presumably means it's a variadic 1137 // function. 1138 if (!UseDABAttr) 1139 return Index; 1140 unsigned DABIndices = DABAttr->argIndices_size(); 1141 unsigned NewIndex = Index < DABIndices 1142 ? DABAttr->argIndices_begin()[Index] 1143 : Index - DABIndices + FD->getNumParams(); 1144 if (NewIndex >= TheCall->getNumArgs()) 1145 return std::nullopt; 1146 return NewIndex; 1147 }; 1148 1149 auto ComputeExplicitObjectSizeArgument = 1150 [&](unsigned Index) -> std::optional<llvm::APSInt> { 1151 std::optional<unsigned> IndexOptional = TranslateIndex(Index); 1152 if (!IndexOptional) 1153 return std::nullopt; 1154 unsigned NewIndex = *IndexOptional; 1155 Expr::EvalResult Result; 1156 Expr *SizeArg = TheCall->getArg(NewIndex); 1157 if (!SizeArg->EvaluateAsInt(Result, getASTContext())) 1158 return std::nullopt; 1159 llvm::APSInt Integer = Result.Val.getInt(); 1160 Integer.setIsUnsigned(true); 1161 return Integer; 1162 }; 1163 1164 auto ComputeSizeArgument = 1165 [&](unsigned Index) -> std::optional<llvm::APSInt> { 1166 // If the parameter has a pass_object_size attribute, then we should use its 1167 // (potentially) more strict checking mode. Otherwise, conservatively assume 1168 // type 0. 1169 int BOSType = 0; 1170 // This check can fail for variadic functions. 1171 if (Index < FD->getNumParams()) { 1172 if (const auto *POS = 1173 FD->getParamDecl(Index)->getAttr<PassObjectSizeAttr>()) 1174 BOSType = POS->getType(); 1175 } 1176 1177 std::optional<unsigned> IndexOptional = TranslateIndex(Index); 1178 if (!IndexOptional) 1179 return std::nullopt; 1180 unsigned NewIndex = *IndexOptional; 1181 1182 if (NewIndex >= TheCall->getNumArgs()) 1183 return std::nullopt; 1184 1185 const Expr *ObjArg = TheCall->getArg(NewIndex); 1186 uint64_t Result; 1187 if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType)) 1188 return std::nullopt; 1189 1190 // Get the object size in the target's size_t width. 1191 return llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth); 1192 }; 1193 1194 auto ComputeStrLenArgument = 1195 [&](unsigned Index) -> std::optional<llvm::APSInt> { 1196 std::optional<unsigned> IndexOptional = TranslateIndex(Index); 1197 if (!IndexOptional) 1198 return std::nullopt; 1199 unsigned NewIndex = *IndexOptional; 1200 1201 const Expr *ObjArg = TheCall->getArg(NewIndex); 1202 uint64_t Result; 1203 if (!ObjArg->tryEvaluateStrLen(Result, getASTContext())) 1204 return std::nullopt; 1205 // Add 1 for null byte. 1206 return llvm::APSInt::getUnsigned(Result + 1).extOrTrunc(SizeTypeWidth); 1207 }; 1208 1209 std::optional<llvm::APSInt> SourceSize; 1210 std::optional<llvm::APSInt> DestinationSize; 1211 unsigned DiagID = 0; 1212 bool IsChkVariant = false; 1213 1214 auto GetFunctionName = [&]() { 1215 StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID); 1216 // Skim off the details of whichever builtin was called to produce a better 1217 // diagnostic, as it's unlikely that the user wrote the __builtin 1218 // explicitly. 1219 if (IsChkVariant) { 1220 FunctionName = FunctionName.drop_front(std::strlen("__builtin___")); 1221 FunctionName = FunctionName.drop_back(std::strlen("_chk")); 1222 } else { 1223 FunctionName.consume_front("__builtin_"); 1224 } 1225 return FunctionName; 1226 }; 1227 1228 switch (BuiltinID) { 1229 default: 1230 return; 1231 case Builtin::BI__builtin_strcpy: 1232 case Builtin::BIstrcpy: { 1233 DiagID = diag::warn_fortify_strlen_overflow; 1234 SourceSize = ComputeStrLenArgument(1); 1235 DestinationSize = ComputeSizeArgument(0); 1236 break; 1237 } 1238 1239 case Builtin::BI__builtin___strcpy_chk: { 1240 DiagID = diag::warn_fortify_strlen_overflow; 1241 SourceSize = ComputeStrLenArgument(1); 1242 DestinationSize = ComputeExplicitObjectSizeArgument(2); 1243 IsChkVariant = true; 1244 break; 1245 } 1246 1247 case Builtin::BIscanf: 1248 case Builtin::BIfscanf: 1249 case Builtin::BIsscanf: { 1250 unsigned FormatIndex = 1; 1251 unsigned DataIndex = 2; 1252 if (BuiltinID == Builtin::BIscanf) { 1253 FormatIndex = 0; 1254 DataIndex = 1; 1255 } 1256 1257 const auto *FormatExpr = 1258 TheCall->getArg(FormatIndex)->IgnoreParenImpCasts(); 1259 1260 StringRef FormatStrRef; 1261 size_t StrLen; 1262 if (!ProcessFormatStringLiteral(FormatExpr, FormatStrRef, StrLen, Context)) 1263 return; 1264 1265 auto Diagnose = [&](unsigned ArgIndex, unsigned DestSize, 1266 unsigned SourceSize) { 1267 DiagID = diag::warn_fortify_scanf_overflow; 1268 unsigned Index = ArgIndex + DataIndex; 1269 StringRef FunctionName = GetFunctionName(); 1270 DiagRuntimeBehavior(TheCall->getArg(Index)->getBeginLoc(), TheCall, 1271 PDiag(DiagID) << FunctionName << (Index + 1) 1272 << DestSize << SourceSize); 1273 }; 1274 1275 auto ShiftedComputeSizeArgument = [&](unsigned Index) { 1276 return ComputeSizeArgument(Index + DataIndex); 1277 }; 1278 ScanfDiagnosticFormatHandler H(ShiftedComputeSizeArgument, Diagnose); 1279 const char *FormatBytes = FormatStrRef.data(); 1280 analyze_format_string::ParseScanfString(H, FormatBytes, 1281 FormatBytes + StrLen, getLangOpts(), 1282 Context.getTargetInfo()); 1283 1284 // Unlike the other cases, in this one we have already issued the diagnostic 1285 // here, so no need to continue (because unlike the other cases, here the 1286 // diagnostic refers to the argument number). 1287 return; 1288 } 1289 1290 case Builtin::BIsprintf: 1291 case Builtin::BI__builtin___sprintf_chk: { 1292 size_t FormatIndex = BuiltinID == Builtin::BIsprintf ? 1 : 3; 1293 auto *FormatExpr = TheCall->getArg(FormatIndex)->IgnoreParenImpCasts(); 1294 1295 StringRef FormatStrRef; 1296 size_t StrLen; 1297 if (ProcessFormatStringLiteral(FormatExpr, FormatStrRef, StrLen, Context)) { 1298 EstimateSizeFormatHandler H(FormatStrRef); 1299 const char *FormatBytes = FormatStrRef.data(); 1300 if (!analyze_format_string::ParsePrintfString( 1301 H, FormatBytes, FormatBytes + StrLen, getLangOpts(), 1302 Context.getTargetInfo(), false)) { 1303 DiagID = H.isKernelCompatible() 1304 ? diag::warn_format_overflow 1305 : diag::warn_format_overflow_non_kprintf; 1306 SourceSize = llvm::APSInt::getUnsigned(H.getSizeLowerBound()) 1307 .extOrTrunc(SizeTypeWidth); 1308 if (BuiltinID == Builtin::BI__builtin___sprintf_chk) { 1309 DestinationSize = ComputeExplicitObjectSizeArgument(2); 1310 IsChkVariant = true; 1311 } else { 1312 DestinationSize = ComputeSizeArgument(0); 1313 } 1314 break; 1315 } 1316 } 1317 return; 1318 } 1319 case Builtin::BI__builtin___memcpy_chk: 1320 case Builtin::BI__builtin___memmove_chk: 1321 case Builtin::BI__builtin___memset_chk: 1322 case Builtin::BI__builtin___strlcat_chk: 1323 case Builtin::BI__builtin___strlcpy_chk: 1324 case Builtin::BI__builtin___strncat_chk: 1325 case Builtin::BI__builtin___strncpy_chk: 1326 case Builtin::BI__builtin___stpncpy_chk: 1327 case Builtin::BI__builtin___memccpy_chk: 1328 case Builtin::BI__builtin___mempcpy_chk: { 1329 DiagID = diag::warn_builtin_chk_overflow; 1330 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 2); 1331 DestinationSize = 1332 ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 1333 IsChkVariant = true; 1334 break; 1335 } 1336 1337 case Builtin::BI__builtin___snprintf_chk: 1338 case Builtin::BI__builtin___vsnprintf_chk: { 1339 DiagID = diag::warn_builtin_chk_overflow; 1340 SourceSize = ComputeExplicitObjectSizeArgument(1); 1341 DestinationSize = ComputeExplicitObjectSizeArgument(3); 1342 IsChkVariant = true; 1343 break; 1344 } 1345 1346 case Builtin::BIstrncat: 1347 case Builtin::BI__builtin_strncat: 1348 case Builtin::BIstrncpy: 1349 case Builtin::BI__builtin_strncpy: 1350 case Builtin::BIstpncpy: 1351 case Builtin::BI__builtin_stpncpy: { 1352 // Whether these functions overflow depends on the runtime strlen of the 1353 // string, not just the buffer size, so emitting the "always overflow" 1354 // diagnostic isn't quite right. We should still diagnose passing a buffer 1355 // size larger than the destination buffer though; this is a runtime abort 1356 // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise. 1357 DiagID = diag::warn_fortify_source_size_mismatch; 1358 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 1359 DestinationSize = ComputeSizeArgument(0); 1360 break; 1361 } 1362 1363 case Builtin::BImemcpy: 1364 case Builtin::BI__builtin_memcpy: 1365 case Builtin::BImemmove: 1366 case Builtin::BI__builtin_memmove: 1367 case Builtin::BImemset: 1368 case Builtin::BI__builtin_memset: 1369 case Builtin::BImempcpy: 1370 case Builtin::BI__builtin_mempcpy: { 1371 DiagID = diag::warn_fortify_source_overflow; 1372 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 1373 DestinationSize = ComputeSizeArgument(0); 1374 break; 1375 } 1376 case Builtin::BIsnprintf: 1377 case Builtin::BI__builtin_snprintf: 1378 case Builtin::BIvsnprintf: 1379 case Builtin::BI__builtin_vsnprintf: { 1380 DiagID = diag::warn_fortify_source_size_mismatch; 1381 SourceSize = ComputeExplicitObjectSizeArgument(1); 1382 const auto *FormatExpr = TheCall->getArg(2)->IgnoreParenImpCasts(); 1383 StringRef FormatStrRef; 1384 size_t StrLen; 1385 if (SourceSize && 1386 ProcessFormatStringLiteral(FormatExpr, FormatStrRef, StrLen, Context)) { 1387 EstimateSizeFormatHandler H(FormatStrRef); 1388 const char *FormatBytes = FormatStrRef.data(); 1389 if (!analyze_format_string::ParsePrintfString( 1390 H, FormatBytes, FormatBytes + StrLen, getLangOpts(), 1391 Context.getTargetInfo(), /*isFreeBSDKPrintf=*/false)) { 1392 llvm::APSInt FormatSize = 1393 llvm::APSInt::getUnsigned(H.getSizeLowerBound()) 1394 .extOrTrunc(SizeTypeWidth); 1395 if (FormatSize > *SourceSize && *SourceSize != 0) { 1396 unsigned TruncationDiagID = 1397 H.isKernelCompatible() ? diag::warn_format_truncation 1398 : diag::warn_format_truncation_non_kprintf; 1399 SmallString<16> SpecifiedSizeStr; 1400 SmallString<16> FormatSizeStr; 1401 SourceSize->toString(SpecifiedSizeStr, /*Radix=*/10); 1402 FormatSize.toString(FormatSizeStr, /*Radix=*/10); 1403 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 1404 PDiag(TruncationDiagID) 1405 << GetFunctionName() << SpecifiedSizeStr 1406 << FormatSizeStr); 1407 } 1408 } 1409 } 1410 DestinationSize = ComputeSizeArgument(0); 1411 } 1412 } 1413 1414 if (!SourceSize || !DestinationSize || 1415 llvm::APSInt::compareValues(*SourceSize, *DestinationSize) <= 0) 1416 return; 1417 1418 StringRef FunctionName = GetFunctionName(); 1419 1420 SmallString<16> DestinationStr; 1421 SmallString<16> SourceStr; 1422 DestinationSize->toString(DestinationStr, /*Radix=*/10); 1423 SourceSize->toString(SourceStr, /*Radix=*/10); 1424 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 1425 PDiag(DiagID) 1426 << FunctionName << DestinationStr << SourceStr); 1427 } 1428 1429 static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall, 1430 Scope::ScopeFlags NeededScopeFlags, 1431 unsigned DiagID) { 1432 // Scopes aren't available during instantiation. Fortunately, builtin 1433 // functions cannot be template args so they cannot be formed through template 1434 // instantiation. Therefore checking once during the parse is sufficient. 1435 if (SemaRef.inTemplateInstantiation()) 1436 return false; 1437 1438 Scope *S = SemaRef.getCurScope(); 1439 while (S && !S->isSEHExceptScope()) 1440 S = S->getParent(); 1441 if (!S || !(S->getFlags() & NeededScopeFlags)) { 1442 auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 1443 SemaRef.Diag(TheCall->getExprLoc(), DiagID) 1444 << DRE->getDecl()->getIdentifier(); 1445 return true; 1446 } 1447 1448 return false; 1449 } 1450 1451 static inline bool isBlockPointer(Expr *Arg) { 1452 return Arg->getType()->isBlockPointerType(); 1453 } 1454 1455 /// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local 1456 /// void*, which is a requirement of device side enqueue. 1457 static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) { 1458 const BlockPointerType *BPT = 1459 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 1460 ArrayRef<QualType> Params = 1461 BPT->getPointeeType()->castAs<FunctionProtoType>()->getParamTypes(); 1462 unsigned ArgCounter = 0; 1463 bool IllegalParams = false; 1464 // Iterate through the block parameters until either one is found that is not 1465 // a local void*, or the block is valid. 1466 for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end(); 1467 I != E; ++I, ++ArgCounter) { 1468 if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() || 1469 (*I)->getPointeeType().getQualifiers().getAddressSpace() != 1470 LangAS::opencl_local) { 1471 // Get the location of the error. If a block literal has been passed 1472 // (BlockExpr) then we can point straight to the offending argument, 1473 // else we just point to the variable reference. 1474 SourceLocation ErrorLoc; 1475 if (isa<BlockExpr>(BlockArg)) { 1476 BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl(); 1477 ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc(); 1478 } else if (isa<DeclRefExpr>(BlockArg)) { 1479 ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc(); 1480 } 1481 S.Diag(ErrorLoc, 1482 diag::err_opencl_enqueue_kernel_blocks_non_local_void_args); 1483 IllegalParams = true; 1484 } 1485 } 1486 1487 return IllegalParams; 1488 } 1489 1490 static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) { 1491 // OpenCL device can support extension but not the feature as extension 1492 // requires subgroup independent forward progress, but subgroup independent 1493 // forward progress is optional in OpenCL C 3.0 __opencl_c_subgroups feature. 1494 if (!S.getOpenCLOptions().isSupported("cl_khr_subgroups", S.getLangOpts()) && 1495 !S.getOpenCLOptions().isSupported("__opencl_c_subgroups", 1496 S.getLangOpts())) { 1497 S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension) 1498 << 1 << Call->getDirectCallee() 1499 << "cl_khr_subgroups or __opencl_c_subgroups"; 1500 return true; 1501 } 1502 return false; 1503 } 1504 1505 static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) { 1506 if (checkArgCount(S, TheCall, 2)) 1507 return true; 1508 1509 if (checkOpenCLSubgroupExt(S, TheCall)) 1510 return true; 1511 1512 // First argument is an ndrange_t type. 1513 Expr *NDRangeArg = TheCall->getArg(0); 1514 if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 1515 S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1516 << TheCall->getDirectCallee() << "'ndrange_t'"; 1517 return true; 1518 } 1519 1520 Expr *BlockArg = TheCall->getArg(1); 1521 if (!isBlockPointer(BlockArg)) { 1522 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1523 << TheCall->getDirectCallee() << "block"; 1524 return true; 1525 } 1526 return checkOpenCLBlockArgs(S, BlockArg); 1527 } 1528 1529 /// OpenCL C v2.0, s6.13.17.6 - Check the argument to the 1530 /// get_kernel_work_group_size 1531 /// and get_kernel_preferred_work_group_size_multiple builtin functions. 1532 static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) { 1533 if (checkArgCount(S, TheCall, 1)) 1534 return true; 1535 1536 Expr *BlockArg = TheCall->getArg(0); 1537 if (!isBlockPointer(BlockArg)) { 1538 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1539 << TheCall->getDirectCallee() << "block"; 1540 return true; 1541 } 1542 return checkOpenCLBlockArgs(S, BlockArg); 1543 } 1544 1545 /// Diagnose integer type and any valid implicit conversion to it. 1546 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, 1547 const QualType &IntType); 1548 1549 static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall, 1550 unsigned Start, unsigned End) { 1551 bool IllegalParams = false; 1552 for (unsigned I = Start; I <= End; ++I) 1553 IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I), 1554 S.Context.getSizeType()); 1555 return IllegalParams; 1556 } 1557 1558 /// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all 1559 /// 'local void*' parameter of passed block. 1560 static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall, 1561 Expr *BlockArg, 1562 unsigned NumNonVarArgs) { 1563 const BlockPointerType *BPT = 1564 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 1565 unsigned NumBlockParams = 1566 BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams(); 1567 unsigned TotalNumArgs = TheCall->getNumArgs(); 1568 1569 // For each argument passed to the block, a corresponding uint needs to 1570 // be passed to describe the size of the local memory. 1571 if (TotalNumArgs != NumBlockParams + NumNonVarArgs) { 1572 S.Diag(TheCall->getBeginLoc(), 1573 diag::err_opencl_enqueue_kernel_local_size_args); 1574 return true; 1575 } 1576 1577 // Check that the sizes of the local memory are specified by integers. 1578 return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs, 1579 TotalNumArgs - 1); 1580 } 1581 1582 /// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different 1583 /// overload formats specified in Table 6.13.17.1. 1584 /// int enqueue_kernel(queue_t queue, 1585 /// kernel_enqueue_flags_t flags, 1586 /// const ndrange_t ndrange, 1587 /// void (^block)(void)) 1588 /// int enqueue_kernel(queue_t queue, 1589 /// kernel_enqueue_flags_t flags, 1590 /// const ndrange_t ndrange, 1591 /// uint num_events_in_wait_list, 1592 /// clk_event_t *event_wait_list, 1593 /// clk_event_t *event_ret, 1594 /// void (^block)(void)) 1595 /// int enqueue_kernel(queue_t queue, 1596 /// kernel_enqueue_flags_t flags, 1597 /// const ndrange_t ndrange, 1598 /// void (^block)(local void*, ...), 1599 /// uint size0, ...) 1600 /// int enqueue_kernel(queue_t queue, 1601 /// kernel_enqueue_flags_t flags, 1602 /// const ndrange_t ndrange, 1603 /// uint num_events_in_wait_list, 1604 /// clk_event_t *event_wait_list, 1605 /// clk_event_t *event_ret, 1606 /// void (^block)(local void*, ...), 1607 /// uint size0, ...) 1608 static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) { 1609 unsigned NumArgs = TheCall->getNumArgs(); 1610 1611 if (NumArgs < 4) { 1612 S.Diag(TheCall->getBeginLoc(), 1613 diag::err_typecheck_call_too_few_args_at_least) 1614 << 0 << 4 << NumArgs << /*is non object*/ 0; 1615 return true; 1616 } 1617 1618 Expr *Arg0 = TheCall->getArg(0); 1619 Expr *Arg1 = TheCall->getArg(1); 1620 Expr *Arg2 = TheCall->getArg(2); 1621 Expr *Arg3 = TheCall->getArg(3); 1622 1623 // First argument always needs to be a queue_t type. 1624 if (!Arg0->getType()->isQueueT()) { 1625 S.Diag(TheCall->getArg(0)->getBeginLoc(), 1626 diag::err_opencl_builtin_expected_type) 1627 << TheCall->getDirectCallee() << S.Context.OCLQueueTy; 1628 return true; 1629 } 1630 1631 // Second argument always needs to be a kernel_enqueue_flags_t enum value. 1632 if (!Arg1->getType()->isIntegerType()) { 1633 S.Diag(TheCall->getArg(1)->getBeginLoc(), 1634 diag::err_opencl_builtin_expected_type) 1635 << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)"; 1636 return true; 1637 } 1638 1639 // Third argument is always an ndrange_t type. 1640 if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 1641 S.Diag(TheCall->getArg(2)->getBeginLoc(), 1642 diag::err_opencl_builtin_expected_type) 1643 << TheCall->getDirectCallee() << "'ndrange_t'"; 1644 return true; 1645 } 1646 1647 // With four arguments, there is only one form that the function could be 1648 // called in: no events and no variable arguments. 1649 if (NumArgs == 4) { 1650 // check that the last argument is the right block type. 1651 if (!isBlockPointer(Arg3)) { 1652 S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1653 << TheCall->getDirectCallee() << "block"; 1654 return true; 1655 } 1656 // we have a block type, check the prototype 1657 const BlockPointerType *BPT = 1658 cast<BlockPointerType>(Arg3->getType().getCanonicalType()); 1659 if (BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams() > 0) { 1660 S.Diag(Arg3->getBeginLoc(), 1661 diag::err_opencl_enqueue_kernel_blocks_no_args); 1662 return true; 1663 } 1664 return false; 1665 } 1666 // we can have block + varargs. 1667 if (isBlockPointer(Arg3)) 1668 return (checkOpenCLBlockArgs(S, Arg3) || 1669 checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4)); 1670 // last two cases with either exactly 7 args or 7 args and varargs. 1671 if (NumArgs >= 7) { 1672 // check common block argument. 1673 Expr *Arg6 = TheCall->getArg(6); 1674 if (!isBlockPointer(Arg6)) { 1675 S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1676 << TheCall->getDirectCallee() << "block"; 1677 return true; 1678 } 1679 if (checkOpenCLBlockArgs(S, Arg6)) 1680 return true; 1681 1682 // Forth argument has to be any integer type. 1683 if (!Arg3->getType()->isIntegerType()) { 1684 S.Diag(TheCall->getArg(3)->getBeginLoc(), 1685 diag::err_opencl_builtin_expected_type) 1686 << TheCall->getDirectCallee() << "integer"; 1687 return true; 1688 } 1689 // check remaining common arguments. 1690 Expr *Arg4 = TheCall->getArg(4); 1691 Expr *Arg5 = TheCall->getArg(5); 1692 1693 // Fifth argument is always passed as a pointer to clk_event_t. 1694 if (!Arg4->isNullPointerConstant(S.Context, 1695 Expr::NPC_ValueDependentIsNotNull) && 1696 !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) { 1697 S.Diag(TheCall->getArg(4)->getBeginLoc(), 1698 diag::err_opencl_builtin_expected_type) 1699 << TheCall->getDirectCallee() 1700 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1701 return true; 1702 } 1703 1704 // Sixth argument is always passed as a pointer to clk_event_t. 1705 if (!Arg5->isNullPointerConstant(S.Context, 1706 Expr::NPC_ValueDependentIsNotNull) && 1707 !(Arg5->getType()->isPointerType() && 1708 Arg5->getType()->getPointeeType()->isClkEventT())) { 1709 S.Diag(TheCall->getArg(5)->getBeginLoc(), 1710 diag::err_opencl_builtin_expected_type) 1711 << TheCall->getDirectCallee() 1712 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1713 return true; 1714 } 1715 1716 if (NumArgs == 7) 1717 return false; 1718 1719 return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7); 1720 } 1721 1722 // None of the specific case has been detected, give generic error 1723 S.Diag(TheCall->getBeginLoc(), 1724 diag::err_opencl_enqueue_kernel_incorrect_args); 1725 return true; 1726 } 1727 1728 /// Returns OpenCL access qual. 1729 static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) { 1730 return D->getAttr<OpenCLAccessAttr>(); 1731 } 1732 1733 /// Returns true if pipe element type is different from the pointer. 1734 static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) { 1735 const Expr *Arg0 = Call->getArg(0); 1736 // First argument type should always be pipe. 1737 if (!Arg0->getType()->isPipeType()) { 1738 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1739 << Call->getDirectCallee() << Arg0->getSourceRange(); 1740 return true; 1741 } 1742 OpenCLAccessAttr *AccessQual = 1743 getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl()); 1744 // Validates the access qualifier is compatible with the call. 1745 // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be 1746 // read_only and write_only, and assumed to be read_only if no qualifier is 1747 // specified. 1748 switch (Call->getDirectCallee()->getBuiltinID()) { 1749 case Builtin::BIread_pipe: 1750 case Builtin::BIreserve_read_pipe: 1751 case Builtin::BIcommit_read_pipe: 1752 case Builtin::BIwork_group_reserve_read_pipe: 1753 case Builtin::BIsub_group_reserve_read_pipe: 1754 case Builtin::BIwork_group_commit_read_pipe: 1755 case Builtin::BIsub_group_commit_read_pipe: 1756 if (!(!AccessQual || AccessQual->isReadOnly())) { 1757 S.Diag(Arg0->getBeginLoc(), 1758 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1759 << "read_only" << Arg0->getSourceRange(); 1760 return true; 1761 } 1762 break; 1763 case Builtin::BIwrite_pipe: 1764 case Builtin::BIreserve_write_pipe: 1765 case Builtin::BIcommit_write_pipe: 1766 case Builtin::BIwork_group_reserve_write_pipe: 1767 case Builtin::BIsub_group_reserve_write_pipe: 1768 case Builtin::BIwork_group_commit_write_pipe: 1769 case Builtin::BIsub_group_commit_write_pipe: 1770 if (!(AccessQual && AccessQual->isWriteOnly())) { 1771 S.Diag(Arg0->getBeginLoc(), 1772 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1773 << "write_only" << Arg0->getSourceRange(); 1774 return true; 1775 } 1776 break; 1777 default: 1778 break; 1779 } 1780 return false; 1781 } 1782 1783 /// Returns true if pipe element type is different from the pointer. 1784 static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) { 1785 const Expr *Arg0 = Call->getArg(0); 1786 const Expr *ArgIdx = Call->getArg(Idx); 1787 const PipeType *PipeTy = cast<PipeType>(Arg0->getType()); 1788 const QualType EltTy = PipeTy->getElementType(); 1789 const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>(); 1790 // The Idx argument should be a pointer and the type of the pointer and 1791 // the type of pipe element should also be the same. 1792 if (!ArgTy || 1793 !S.Context.hasSameType( 1794 EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) { 1795 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1796 << Call->getDirectCallee() << S.Context.getPointerType(EltTy) 1797 << ArgIdx->getType() << ArgIdx->getSourceRange(); 1798 return true; 1799 } 1800 return false; 1801 } 1802 1803 // Performs semantic analysis for the read/write_pipe call. 1804 // \param S Reference to the semantic analyzer. 1805 // \param Call A pointer to the builtin call. 1806 // \return True if a semantic error has been found, false otherwise. 1807 static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) { 1808 // OpenCL v2.0 s6.13.16.2 - The built-in read/write 1809 // functions have two forms. 1810 switch (Call->getNumArgs()) { 1811 case 2: 1812 if (checkOpenCLPipeArg(S, Call)) 1813 return true; 1814 // The call with 2 arguments should be 1815 // read/write_pipe(pipe T, T*). 1816 // Check packet type T. 1817 if (checkOpenCLPipePacketType(S, Call, 1)) 1818 return true; 1819 break; 1820 1821 case 4: { 1822 if (checkOpenCLPipeArg(S, Call)) 1823 return true; 1824 // The call with 4 arguments should be 1825 // read/write_pipe(pipe T, reserve_id_t, uint, T*). 1826 // Check reserve_id_t. 1827 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1828 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1829 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1830 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1831 return true; 1832 } 1833 1834 // Check the index. 1835 const Expr *Arg2 = Call->getArg(2); 1836 if (!Arg2->getType()->isIntegerType() && 1837 !Arg2->getType()->isUnsignedIntegerType()) { 1838 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1839 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1840 << Arg2->getType() << Arg2->getSourceRange(); 1841 return true; 1842 } 1843 1844 // Check packet type T. 1845 if (checkOpenCLPipePacketType(S, Call, 3)) 1846 return true; 1847 } break; 1848 default: 1849 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num) 1850 << Call->getDirectCallee() << Call->getSourceRange(); 1851 return true; 1852 } 1853 1854 return false; 1855 } 1856 1857 // Performs a semantic analysis on the {work_group_/sub_group_ 1858 // /_}reserve_{read/write}_pipe 1859 // \param S Reference to the semantic analyzer. 1860 // \param Call The call to the builtin function to be analyzed. 1861 // \return True if a semantic error was found, false otherwise. 1862 static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) { 1863 if (checkArgCount(S, Call, 2)) 1864 return true; 1865 1866 if (checkOpenCLPipeArg(S, Call)) 1867 return true; 1868 1869 // Check the reserve size. 1870 if (!Call->getArg(1)->getType()->isIntegerType() && 1871 !Call->getArg(1)->getType()->isUnsignedIntegerType()) { 1872 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1873 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1874 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1875 return true; 1876 } 1877 1878 // Since return type of reserve_read/write_pipe built-in function is 1879 // reserve_id_t, which is not defined in the builtin def file , we used int 1880 // as return type and need to override the return type of these functions. 1881 Call->setType(S.Context.OCLReserveIDTy); 1882 1883 return false; 1884 } 1885 1886 // Performs a semantic analysis on {work_group_/sub_group_ 1887 // /_}commit_{read/write}_pipe 1888 // \param S Reference to the semantic analyzer. 1889 // \param Call The call to the builtin function to be analyzed. 1890 // \return True if a semantic error was found, false otherwise. 1891 static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) { 1892 if (checkArgCount(S, Call, 2)) 1893 return true; 1894 1895 if (checkOpenCLPipeArg(S, Call)) 1896 return true; 1897 1898 // Check reserve_id_t. 1899 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1900 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1901 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1902 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1903 return true; 1904 } 1905 1906 return false; 1907 } 1908 1909 // Performs a semantic analysis on the call to built-in Pipe 1910 // Query Functions. 1911 // \param S Reference to the semantic analyzer. 1912 // \param Call The call to the builtin function to be analyzed. 1913 // \return True if a semantic error was found, false otherwise. 1914 static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) { 1915 if (checkArgCount(S, Call, 1)) 1916 return true; 1917 1918 if (!Call->getArg(0)->getType()->isPipeType()) { 1919 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1920 << Call->getDirectCallee() << Call->getArg(0)->getSourceRange(); 1921 return true; 1922 } 1923 1924 return false; 1925 } 1926 1927 // OpenCL v2.0 s6.13.9 - Address space qualifier functions. 1928 // Performs semantic analysis for the to_global/local/private call. 1929 // \param S Reference to the semantic analyzer. 1930 // \param BuiltinID ID of the builtin function. 1931 // \param Call A pointer to the builtin call. 1932 // \return True if a semantic error has been found, false otherwise. 1933 static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID, 1934 CallExpr *Call) { 1935 if (checkArgCount(S, Call, 1)) 1936 return true; 1937 1938 auto RT = Call->getArg(0)->getType(); 1939 if (!RT->isPointerType() || RT->getPointeeType() 1940 .getAddressSpace() == LangAS::opencl_constant) { 1941 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg) 1942 << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange(); 1943 return true; 1944 } 1945 1946 if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) { 1947 S.Diag(Call->getArg(0)->getBeginLoc(), 1948 diag::warn_opencl_generic_address_space_arg) 1949 << Call->getDirectCallee()->getNameInfo().getAsString() 1950 << Call->getArg(0)->getSourceRange(); 1951 } 1952 1953 RT = RT->getPointeeType(); 1954 auto Qual = RT.getQualifiers(); 1955 switch (BuiltinID) { 1956 case Builtin::BIto_global: 1957 Qual.setAddressSpace(LangAS::opencl_global); 1958 break; 1959 case Builtin::BIto_local: 1960 Qual.setAddressSpace(LangAS::opencl_local); 1961 break; 1962 case Builtin::BIto_private: 1963 Qual.setAddressSpace(LangAS::opencl_private); 1964 break; 1965 default: 1966 llvm_unreachable("Invalid builtin function"); 1967 } 1968 Call->setType(S.Context.getPointerType(S.Context.getQualifiedType( 1969 RT.getUnqualifiedType(), Qual))); 1970 1971 return false; 1972 } 1973 1974 static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) { 1975 if (checkArgCount(S, TheCall, 1)) 1976 return ExprError(); 1977 1978 // Compute __builtin_launder's parameter type from the argument. 1979 // The parameter type is: 1980 // * The type of the argument if it's not an array or function type, 1981 // Otherwise, 1982 // * The decayed argument type. 1983 QualType ParamTy = [&]() { 1984 QualType ArgTy = TheCall->getArg(0)->getType(); 1985 if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe()) 1986 return S.Context.getPointerType(Ty->getElementType()); 1987 if (ArgTy->isFunctionType()) { 1988 return S.Context.getPointerType(ArgTy); 1989 } 1990 return ArgTy; 1991 }(); 1992 1993 TheCall->setType(ParamTy); 1994 1995 auto DiagSelect = [&]() -> std::optional<unsigned> { 1996 if (!ParamTy->isPointerType()) 1997 return 0; 1998 if (ParamTy->isFunctionPointerType()) 1999 return 1; 2000 if (ParamTy->isVoidPointerType()) 2001 return 2; 2002 return std::optional<unsigned>{}; 2003 }(); 2004 if (DiagSelect) { 2005 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg) 2006 << *DiagSelect << TheCall->getSourceRange(); 2007 return ExprError(); 2008 } 2009 2010 // We either have an incomplete class type, or we have a class template 2011 // whose instantiation has not been forced. Example: 2012 // 2013 // template <class T> struct Foo { T value; }; 2014 // Foo<int> *p = nullptr; 2015 // auto *d = __builtin_launder(p); 2016 if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(), 2017 diag::err_incomplete_type)) 2018 return ExprError(); 2019 2020 assert(ParamTy->getPointeeType()->isObjectType() && 2021 "Unhandled non-object pointer case"); 2022 2023 InitializedEntity Entity = 2024 InitializedEntity::InitializeParameter(S.Context, ParamTy, false); 2025 ExprResult Arg = 2026 S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0)); 2027 if (Arg.isInvalid()) 2028 return ExprError(); 2029 TheCall->setArg(0, Arg.get()); 2030 2031 return TheCall; 2032 } 2033 2034 // Emit an error and return true if the current object format type is in the 2035 // list of unsupported types. 2036 static bool CheckBuiltinTargetNotInUnsupported( 2037 Sema &S, unsigned BuiltinID, CallExpr *TheCall, 2038 ArrayRef<llvm::Triple::ObjectFormatType> UnsupportedObjectFormatTypes) { 2039 llvm::Triple::ObjectFormatType CurObjFormat = 2040 S.getASTContext().getTargetInfo().getTriple().getObjectFormat(); 2041 if (llvm::is_contained(UnsupportedObjectFormatTypes, CurObjFormat)) { 2042 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 2043 << TheCall->getSourceRange(); 2044 return true; 2045 } 2046 return false; 2047 } 2048 2049 // Emit an error and return true if the current architecture is not in the list 2050 // of supported architectures. 2051 static bool 2052 CheckBuiltinTargetInSupported(Sema &S, unsigned BuiltinID, CallExpr *TheCall, 2053 ArrayRef<llvm::Triple::ArchType> SupportedArchs) { 2054 llvm::Triple::ArchType CurArch = 2055 S.getASTContext().getTargetInfo().getTriple().getArch(); 2056 if (llvm::is_contained(SupportedArchs, CurArch)) 2057 return false; 2058 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 2059 << TheCall->getSourceRange(); 2060 return true; 2061 } 2062 2063 static void CheckNonNullArgument(Sema &S, const Expr *ArgExpr, 2064 SourceLocation CallSiteLoc); 2065 2066 bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 2067 CallExpr *TheCall) { 2068 switch (TI.getTriple().getArch()) { 2069 default: 2070 // Some builtins don't require additional checking, so just consider these 2071 // acceptable. 2072 return false; 2073 case llvm::Triple::arm: 2074 case llvm::Triple::armeb: 2075 case llvm::Triple::thumb: 2076 case llvm::Triple::thumbeb: 2077 return CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall); 2078 case llvm::Triple::aarch64: 2079 case llvm::Triple::aarch64_32: 2080 case llvm::Triple::aarch64_be: 2081 return CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall); 2082 case llvm::Triple::bpfeb: 2083 case llvm::Triple::bpfel: 2084 return CheckBPFBuiltinFunctionCall(BuiltinID, TheCall); 2085 case llvm::Triple::hexagon: 2086 return CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall); 2087 case llvm::Triple::mips: 2088 case llvm::Triple::mipsel: 2089 case llvm::Triple::mips64: 2090 case llvm::Triple::mips64el: 2091 return CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall); 2092 case llvm::Triple::systemz: 2093 return CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall); 2094 case llvm::Triple::x86: 2095 case llvm::Triple::x86_64: 2096 return CheckX86BuiltinFunctionCall(TI, BuiltinID, TheCall); 2097 case llvm::Triple::ppc: 2098 case llvm::Triple::ppcle: 2099 case llvm::Triple::ppc64: 2100 case llvm::Triple::ppc64le: 2101 return CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall); 2102 case llvm::Triple::amdgcn: 2103 return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall); 2104 case llvm::Triple::riscv32: 2105 case llvm::Triple::riscv64: 2106 return CheckRISCVBuiltinFunctionCall(TI, BuiltinID, TheCall); 2107 case llvm::Triple::loongarch32: 2108 case llvm::Triple::loongarch64: 2109 return CheckLoongArchBuiltinFunctionCall(TI, BuiltinID, TheCall); 2110 case llvm::Triple::wasm32: 2111 case llvm::Triple::wasm64: 2112 return CheckWebAssemblyBuiltinFunctionCall(TI, BuiltinID, TheCall); 2113 case llvm::Triple::nvptx: 2114 case llvm::Triple::nvptx64: 2115 return CheckNVPTXBuiltinFunctionCall(TI, BuiltinID, TheCall); 2116 } 2117 } 2118 2119 // Check if \p Ty is a valid type for the elementwise math builtins. If it is 2120 // not a valid type, emit an error message and return true. Otherwise return 2121 // false. 2122 static bool checkMathBuiltinElementType(Sema &S, SourceLocation Loc, 2123 QualType Ty) { 2124 if (!Ty->getAs<VectorType>() && !ConstantMatrixType::isValidElementType(Ty)) { 2125 return S.Diag(Loc, diag::err_builtin_invalid_arg_type) 2126 << 1 << /* vector, integer or float ty*/ 0 << Ty; 2127 } 2128 2129 return false; 2130 } 2131 2132 static bool checkFPMathBuiltinElementType(Sema &S, SourceLocation Loc, 2133 QualType ArgTy, int ArgIndex) { 2134 QualType EltTy = ArgTy; 2135 if (auto *VecTy = EltTy->getAs<VectorType>()) 2136 EltTy = VecTy->getElementType(); 2137 2138 if (!EltTy->isRealFloatingType()) { 2139 return S.Diag(Loc, diag::err_builtin_invalid_arg_type) 2140 << ArgIndex << /* vector or float ty*/ 5 << ArgTy; 2141 } 2142 2143 return false; 2144 } 2145 2146 ExprResult 2147 Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, 2148 CallExpr *TheCall) { 2149 ExprResult TheCallResult(TheCall); 2150 2151 // Find out if any arguments are required to be integer constant expressions. 2152 unsigned ICEArguments = 0; 2153 ASTContext::GetBuiltinTypeError Error; 2154 Context.GetBuiltinType(BuiltinID, Error, &ICEArguments); 2155 if (Error != ASTContext::GE_None) 2156 ICEArguments = 0; // Don't diagnose previously diagnosed errors. 2157 2158 // If any arguments are required to be ICE's, check and diagnose. 2159 for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) { 2160 // Skip arguments not required to be ICE's. 2161 if ((ICEArguments & (1 << ArgNo)) == 0) continue; 2162 2163 llvm::APSInt Result; 2164 // If we don't have enough arguments, continue so we can issue better 2165 // diagnostic in checkArgCount(...) 2166 if (ArgNo < TheCall->getNumArgs() && 2167 SemaBuiltinConstantArg(TheCall, ArgNo, Result)) 2168 return true; 2169 ICEArguments &= ~(1 << ArgNo); 2170 } 2171 2172 FPOptions FPO; 2173 switch (BuiltinID) { 2174 case Builtin::BI__builtin___CFStringMakeConstantString: 2175 // CFStringMakeConstantString is currently not implemented for GOFF (i.e., 2176 // on z/OS) and for XCOFF (i.e., on AIX). Emit unsupported 2177 if (CheckBuiltinTargetNotInUnsupported( 2178 *this, BuiltinID, TheCall, 2179 {llvm::Triple::GOFF, llvm::Triple::XCOFF})) 2180 return ExprError(); 2181 assert(TheCall->getNumArgs() == 1 && 2182 "Wrong # arguments to builtin CFStringMakeConstantString"); 2183 if (CheckObjCString(TheCall->getArg(0))) 2184 return ExprError(); 2185 break; 2186 case Builtin::BI__builtin_ms_va_start: 2187 case Builtin::BI__builtin_stdarg_start: 2188 case Builtin::BI__builtin_va_start: 2189 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 2190 return ExprError(); 2191 break; 2192 case Builtin::BI__va_start: { 2193 switch (Context.getTargetInfo().getTriple().getArch()) { 2194 case llvm::Triple::aarch64: 2195 case llvm::Triple::arm: 2196 case llvm::Triple::thumb: 2197 if (SemaBuiltinVAStartARMMicrosoft(TheCall)) 2198 return ExprError(); 2199 break; 2200 default: 2201 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 2202 return ExprError(); 2203 break; 2204 } 2205 break; 2206 } 2207 2208 // The acquire, release, and no fence variants are ARM and AArch64 only. 2209 case Builtin::BI_interlockedbittestandset_acq: 2210 case Builtin::BI_interlockedbittestandset_rel: 2211 case Builtin::BI_interlockedbittestandset_nf: 2212 case Builtin::BI_interlockedbittestandreset_acq: 2213 case Builtin::BI_interlockedbittestandreset_rel: 2214 case Builtin::BI_interlockedbittestandreset_nf: 2215 if (CheckBuiltinTargetInSupported( 2216 *this, BuiltinID, TheCall, 2217 {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64})) 2218 return ExprError(); 2219 break; 2220 2221 // The 64-bit bittest variants are x64, ARM, and AArch64 only. 2222 case Builtin::BI_bittest64: 2223 case Builtin::BI_bittestandcomplement64: 2224 case Builtin::BI_bittestandreset64: 2225 case Builtin::BI_bittestandset64: 2226 case Builtin::BI_interlockedbittestandreset64: 2227 case Builtin::BI_interlockedbittestandset64: 2228 if (CheckBuiltinTargetInSupported(*this, BuiltinID, TheCall, 2229 {llvm::Triple::x86_64, llvm::Triple::arm, 2230 llvm::Triple::thumb, 2231 llvm::Triple::aarch64})) 2232 return ExprError(); 2233 break; 2234 2235 case Builtin::BI__builtin_set_flt_rounds: 2236 if (CheckBuiltinTargetInSupported(*this, BuiltinID, TheCall, 2237 {llvm::Triple::x86, llvm::Triple::x86_64, 2238 llvm::Triple::arm, llvm::Triple::thumb, 2239 llvm::Triple::aarch64})) 2240 return ExprError(); 2241 break; 2242 2243 case Builtin::BI__builtin_isgreater: 2244 case Builtin::BI__builtin_isgreaterequal: 2245 case Builtin::BI__builtin_isless: 2246 case Builtin::BI__builtin_islessequal: 2247 case Builtin::BI__builtin_islessgreater: 2248 case Builtin::BI__builtin_isunordered: 2249 if (SemaBuiltinUnorderedCompare(TheCall, BuiltinID)) 2250 return ExprError(); 2251 break; 2252 case Builtin::BI__builtin_fpclassify: 2253 if (SemaBuiltinFPClassification(TheCall, 6, BuiltinID)) 2254 return ExprError(); 2255 break; 2256 case Builtin::BI__builtin_isfpclass: 2257 if (SemaBuiltinFPClassification(TheCall, 2, BuiltinID)) 2258 return ExprError(); 2259 break; 2260 case Builtin::BI__builtin_isfinite: 2261 case Builtin::BI__builtin_isinf: 2262 case Builtin::BI__builtin_isinf_sign: 2263 case Builtin::BI__builtin_isnan: 2264 case Builtin::BI__builtin_issignaling: 2265 case Builtin::BI__builtin_isnormal: 2266 case Builtin::BI__builtin_issubnormal: 2267 case Builtin::BI__builtin_iszero: 2268 case Builtin::BI__builtin_signbit: 2269 case Builtin::BI__builtin_signbitf: 2270 case Builtin::BI__builtin_signbitl: 2271 if (SemaBuiltinFPClassification(TheCall, 1, BuiltinID)) 2272 return ExprError(); 2273 break; 2274 case Builtin::BI__builtin_shufflevector: 2275 return SemaBuiltinShuffleVector(TheCall); 2276 // TheCall will be freed by the smart pointer here, but that's fine, since 2277 // SemaBuiltinShuffleVector guts it, but then doesn't release it. 2278 case Builtin::BI__builtin_prefetch: 2279 if (SemaBuiltinPrefetch(TheCall)) 2280 return ExprError(); 2281 break; 2282 case Builtin::BI__builtin_alloca_with_align: 2283 case Builtin::BI__builtin_alloca_with_align_uninitialized: 2284 if (SemaBuiltinAllocaWithAlign(TheCall)) 2285 return ExprError(); 2286 [[fallthrough]]; 2287 case Builtin::BI__builtin_alloca: 2288 case Builtin::BI__builtin_alloca_uninitialized: 2289 Diag(TheCall->getBeginLoc(), diag::warn_alloca) 2290 << TheCall->getDirectCallee(); 2291 break; 2292 case Builtin::BI__arithmetic_fence: 2293 if (SemaBuiltinArithmeticFence(TheCall)) 2294 return ExprError(); 2295 break; 2296 case Builtin::BI__assume: 2297 case Builtin::BI__builtin_assume: 2298 if (SemaBuiltinAssume(TheCall)) 2299 return ExprError(); 2300 break; 2301 case Builtin::BI__builtin_assume_aligned: 2302 if (SemaBuiltinAssumeAligned(TheCall)) 2303 return ExprError(); 2304 break; 2305 case Builtin::BI__builtin_dynamic_object_size: 2306 case Builtin::BI__builtin_object_size: 2307 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3)) 2308 return ExprError(); 2309 break; 2310 case Builtin::BI__builtin_longjmp: 2311 if (SemaBuiltinLongjmp(TheCall)) 2312 return ExprError(); 2313 break; 2314 case Builtin::BI__builtin_setjmp: 2315 if (SemaBuiltinSetjmp(TheCall)) 2316 return ExprError(); 2317 break; 2318 case Builtin::BI__builtin_classify_type: 2319 if (checkArgCount(*this, TheCall, 1)) return true; 2320 TheCall->setType(Context.IntTy); 2321 break; 2322 case Builtin::BI__builtin_complex: 2323 if (SemaBuiltinComplex(TheCall)) 2324 return ExprError(); 2325 break; 2326 case Builtin::BI__builtin_constant_p: { 2327 if (checkArgCount(*this, TheCall, 1)) return true; 2328 ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 2329 if (Arg.isInvalid()) return true; 2330 TheCall->setArg(0, Arg.get()); 2331 TheCall->setType(Context.IntTy); 2332 break; 2333 } 2334 case Builtin::BI__builtin_launder: 2335 return SemaBuiltinLaunder(*this, TheCall); 2336 case Builtin::BI__sync_fetch_and_add: 2337 case Builtin::BI__sync_fetch_and_add_1: 2338 case Builtin::BI__sync_fetch_and_add_2: 2339 case Builtin::BI__sync_fetch_and_add_4: 2340 case Builtin::BI__sync_fetch_and_add_8: 2341 case Builtin::BI__sync_fetch_and_add_16: 2342 case Builtin::BI__sync_fetch_and_sub: 2343 case Builtin::BI__sync_fetch_and_sub_1: 2344 case Builtin::BI__sync_fetch_and_sub_2: 2345 case Builtin::BI__sync_fetch_and_sub_4: 2346 case Builtin::BI__sync_fetch_and_sub_8: 2347 case Builtin::BI__sync_fetch_and_sub_16: 2348 case Builtin::BI__sync_fetch_and_or: 2349 case Builtin::BI__sync_fetch_and_or_1: 2350 case Builtin::BI__sync_fetch_and_or_2: 2351 case Builtin::BI__sync_fetch_and_or_4: 2352 case Builtin::BI__sync_fetch_and_or_8: 2353 case Builtin::BI__sync_fetch_and_or_16: 2354 case Builtin::BI__sync_fetch_and_and: 2355 case Builtin::BI__sync_fetch_and_and_1: 2356 case Builtin::BI__sync_fetch_and_and_2: 2357 case Builtin::BI__sync_fetch_and_and_4: 2358 case Builtin::BI__sync_fetch_and_and_8: 2359 case Builtin::BI__sync_fetch_and_and_16: 2360 case Builtin::BI__sync_fetch_and_xor: 2361 case Builtin::BI__sync_fetch_and_xor_1: 2362 case Builtin::BI__sync_fetch_and_xor_2: 2363 case Builtin::BI__sync_fetch_and_xor_4: 2364 case Builtin::BI__sync_fetch_and_xor_8: 2365 case Builtin::BI__sync_fetch_and_xor_16: 2366 case Builtin::BI__sync_fetch_and_nand: 2367 case Builtin::BI__sync_fetch_and_nand_1: 2368 case Builtin::BI__sync_fetch_and_nand_2: 2369 case Builtin::BI__sync_fetch_and_nand_4: 2370 case Builtin::BI__sync_fetch_and_nand_8: 2371 case Builtin::BI__sync_fetch_and_nand_16: 2372 case Builtin::BI__sync_add_and_fetch: 2373 case Builtin::BI__sync_add_and_fetch_1: 2374 case Builtin::BI__sync_add_and_fetch_2: 2375 case Builtin::BI__sync_add_and_fetch_4: 2376 case Builtin::BI__sync_add_and_fetch_8: 2377 case Builtin::BI__sync_add_and_fetch_16: 2378 case Builtin::BI__sync_sub_and_fetch: 2379 case Builtin::BI__sync_sub_and_fetch_1: 2380 case Builtin::BI__sync_sub_and_fetch_2: 2381 case Builtin::BI__sync_sub_and_fetch_4: 2382 case Builtin::BI__sync_sub_and_fetch_8: 2383 case Builtin::BI__sync_sub_and_fetch_16: 2384 case Builtin::BI__sync_and_and_fetch: 2385 case Builtin::BI__sync_and_and_fetch_1: 2386 case Builtin::BI__sync_and_and_fetch_2: 2387 case Builtin::BI__sync_and_and_fetch_4: 2388 case Builtin::BI__sync_and_and_fetch_8: 2389 case Builtin::BI__sync_and_and_fetch_16: 2390 case Builtin::BI__sync_or_and_fetch: 2391 case Builtin::BI__sync_or_and_fetch_1: 2392 case Builtin::BI__sync_or_and_fetch_2: 2393 case Builtin::BI__sync_or_and_fetch_4: 2394 case Builtin::BI__sync_or_and_fetch_8: 2395 case Builtin::BI__sync_or_and_fetch_16: 2396 case Builtin::BI__sync_xor_and_fetch: 2397 case Builtin::BI__sync_xor_and_fetch_1: 2398 case Builtin::BI__sync_xor_and_fetch_2: 2399 case Builtin::BI__sync_xor_and_fetch_4: 2400 case Builtin::BI__sync_xor_and_fetch_8: 2401 case Builtin::BI__sync_xor_and_fetch_16: 2402 case Builtin::BI__sync_nand_and_fetch: 2403 case Builtin::BI__sync_nand_and_fetch_1: 2404 case Builtin::BI__sync_nand_and_fetch_2: 2405 case Builtin::BI__sync_nand_and_fetch_4: 2406 case Builtin::BI__sync_nand_and_fetch_8: 2407 case Builtin::BI__sync_nand_and_fetch_16: 2408 case Builtin::BI__sync_val_compare_and_swap: 2409 case Builtin::BI__sync_val_compare_and_swap_1: 2410 case Builtin::BI__sync_val_compare_and_swap_2: 2411 case Builtin::BI__sync_val_compare_and_swap_4: 2412 case Builtin::BI__sync_val_compare_and_swap_8: 2413 case Builtin::BI__sync_val_compare_and_swap_16: 2414 case Builtin::BI__sync_bool_compare_and_swap: 2415 case Builtin::BI__sync_bool_compare_and_swap_1: 2416 case Builtin::BI__sync_bool_compare_and_swap_2: 2417 case Builtin::BI__sync_bool_compare_and_swap_4: 2418 case Builtin::BI__sync_bool_compare_and_swap_8: 2419 case Builtin::BI__sync_bool_compare_and_swap_16: 2420 case Builtin::BI__sync_lock_test_and_set: 2421 case Builtin::BI__sync_lock_test_and_set_1: 2422 case Builtin::BI__sync_lock_test_and_set_2: 2423 case Builtin::BI__sync_lock_test_and_set_4: 2424 case Builtin::BI__sync_lock_test_and_set_8: 2425 case Builtin::BI__sync_lock_test_and_set_16: 2426 case Builtin::BI__sync_lock_release: 2427 case Builtin::BI__sync_lock_release_1: 2428 case Builtin::BI__sync_lock_release_2: 2429 case Builtin::BI__sync_lock_release_4: 2430 case Builtin::BI__sync_lock_release_8: 2431 case Builtin::BI__sync_lock_release_16: 2432 case Builtin::BI__sync_swap: 2433 case Builtin::BI__sync_swap_1: 2434 case Builtin::BI__sync_swap_2: 2435 case Builtin::BI__sync_swap_4: 2436 case Builtin::BI__sync_swap_8: 2437 case Builtin::BI__sync_swap_16: 2438 return SemaBuiltinAtomicOverloaded(TheCallResult); 2439 case Builtin::BI__sync_synchronize: 2440 Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst) 2441 << TheCall->getCallee()->getSourceRange(); 2442 break; 2443 case Builtin::BI__builtin_nontemporal_load: 2444 case Builtin::BI__builtin_nontemporal_store: 2445 return SemaBuiltinNontemporalOverloaded(TheCallResult); 2446 case Builtin::BI__builtin_memcpy_inline: { 2447 clang::Expr *SizeOp = TheCall->getArg(2); 2448 // We warn about copying to or from `nullptr` pointers when `size` is 2449 // greater than 0. When `size` is value dependent we cannot evaluate its 2450 // value so we bail out. 2451 if (SizeOp->isValueDependent()) 2452 break; 2453 if (!SizeOp->EvaluateKnownConstInt(Context).isZero()) { 2454 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc()); 2455 CheckNonNullArgument(*this, TheCall->getArg(1), TheCall->getExprLoc()); 2456 } 2457 break; 2458 } 2459 case Builtin::BI__builtin_memset_inline: { 2460 clang::Expr *SizeOp = TheCall->getArg(2); 2461 // We warn about filling to `nullptr` pointers when `size` is greater than 2462 // 0. When `size` is value dependent we cannot evaluate its value so we bail 2463 // out. 2464 if (SizeOp->isValueDependent()) 2465 break; 2466 if (!SizeOp->EvaluateKnownConstInt(Context).isZero()) 2467 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc()); 2468 break; 2469 } 2470 #define BUILTIN(ID, TYPE, ATTRS) 2471 #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \ 2472 case Builtin::BI##ID: \ 2473 return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID); 2474 #include "clang/Basic/Builtins.def" 2475 case Builtin::BI__annotation: 2476 if (SemaBuiltinMSVCAnnotation(*this, TheCall)) 2477 return ExprError(); 2478 break; 2479 case Builtin::BI__builtin_annotation: 2480 if (SemaBuiltinAnnotation(*this, TheCall)) 2481 return ExprError(); 2482 break; 2483 case Builtin::BI__builtin_addressof: 2484 if (SemaBuiltinAddressof(*this, TheCall)) 2485 return ExprError(); 2486 break; 2487 case Builtin::BI__builtin_function_start: 2488 if (SemaBuiltinFunctionStart(*this, TheCall)) 2489 return ExprError(); 2490 break; 2491 case Builtin::BI__builtin_is_aligned: 2492 case Builtin::BI__builtin_align_up: 2493 case Builtin::BI__builtin_align_down: 2494 if (SemaBuiltinAlignment(*this, TheCall, BuiltinID)) 2495 return ExprError(); 2496 break; 2497 case Builtin::BI__builtin_add_overflow: 2498 case Builtin::BI__builtin_sub_overflow: 2499 case Builtin::BI__builtin_mul_overflow: 2500 if (SemaBuiltinOverflow(*this, TheCall, BuiltinID)) 2501 return ExprError(); 2502 break; 2503 case Builtin::BI__builtin_operator_new: 2504 case Builtin::BI__builtin_operator_delete: { 2505 bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete; 2506 ExprResult Res = 2507 SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete); 2508 if (Res.isInvalid()) 2509 CorrectDelayedTyposInExpr(TheCallResult.get()); 2510 return Res; 2511 } 2512 case Builtin::BI__builtin_dump_struct: 2513 return SemaBuiltinDumpStruct(*this, TheCall); 2514 case Builtin::BI__builtin_expect_with_probability: { 2515 // We first want to ensure we are called with 3 arguments 2516 if (checkArgCount(*this, TheCall, 3)) 2517 return ExprError(); 2518 // then check probability is constant float in range [0.0, 1.0] 2519 const Expr *ProbArg = TheCall->getArg(2); 2520 SmallVector<PartialDiagnosticAt, 8> Notes; 2521 Expr::EvalResult Eval; 2522 Eval.Diag = &Notes; 2523 if ((!ProbArg->EvaluateAsConstantExpr(Eval, Context)) || 2524 !Eval.Val.isFloat()) { 2525 Diag(ProbArg->getBeginLoc(), diag::err_probability_not_constant_float) 2526 << ProbArg->getSourceRange(); 2527 for (const PartialDiagnosticAt &PDiag : Notes) 2528 Diag(PDiag.first, PDiag.second); 2529 return ExprError(); 2530 } 2531 llvm::APFloat Probability = Eval.Val.getFloat(); 2532 bool LoseInfo = false; 2533 Probability.convert(llvm::APFloat::IEEEdouble(), 2534 llvm::RoundingMode::Dynamic, &LoseInfo); 2535 if (!(Probability >= llvm::APFloat(0.0) && 2536 Probability <= llvm::APFloat(1.0))) { 2537 Diag(ProbArg->getBeginLoc(), diag::err_probability_out_of_range) 2538 << ProbArg->getSourceRange(); 2539 return ExprError(); 2540 } 2541 break; 2542 } 2543 case Builtin::BI__builtin_preserve_access_index: 2544 if (SemaBuiltinPreserveAI(*this, TheCall)) 2545 return ExprError(); 2546 break; 2547 case Builtin::BI__builtin_call_with_static_chain: 2548 if (SemaBuiltinCallWithStaticChain(*this, TheCall)) 2549 return ExprError(); 2550 break; 2551 case Builtin::BI__exception_code: 2552 case Builtin::BI_exception_code: 2553 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope, 2554 diag::err_seh___except_block)) 2555 return ExprError(); 2556 break; 2557 case Builtin::BI__exception_info: 2558 case Builtin::BI_exception_info: 2559 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope, 2560 diag::err_seh___except_filter)) 2561 return ExprError(); 2562 break; 2563 case Builtin::BI__GetExceptionInfo: 2564 if (checkArgCount(*this, TheCall, 1)) 2565 return ExprError(); 2566 2567 if (CheckCXXThrowOperand( 2568 TheCall->getBeginLoc(), 2569 Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()), 2570 TheCall)) 2571 return ExprError(); 2572 2573 TheCall->setType(Context.VoidPtrTy); 2574 break; 2575 case Builtin::BIaddressof: 2576 case Builtin::BI__addressof: 2577 case Builtin::BIforward: 2578 case Builtin::BIforward_like: 2579 case Builtin::BImove: 2580 case Builtin::BImove_if_noexcept: 2581 case Builtin::BIas_const: { 2582 // These are all expected to be of the form 2583 // T &/&&/* f(U &/&&) 2584 // where T and U only differ in qualification. 2585 if (checkArgCount(*this, TheCall, 1)) 2586 return ExprError(); 2587 QualType Param = FDecl->getParamDecl(0)->getType(); 2588 QualType Result = FDecl->getReturnType(); 2589 bool ReturnsPointer = BuiltinID == Builtin::BIaddressof || 2590 BuiltinID == Builtin::BI__addressof; 2591 if (!(Param->isReferenceType() && 2592 (ReturnsPointer ? Result->isAnyPointerType() 2593 : Result->isReferenceType()) && 2594 Context.hasSameUnqualifiedType(Param->getPointeeType(), 2595 Result->getPointeeType()))) { 2596 Diag(TheCall->getBeginLoc(), diag::err_builtin_move_forward_unsupported) 2597 << FDecl; 2598 return ExprError(); 2599 } 2600 break; 2601 } 2602 // OpenCL v2.0, s6.13.16 - Pipe functions 2603 case Builtin::BIread_pipe: 2604 case Builtin::BIwrite_pipe: 2605 // Since those two functions are declared with var args, we need a semantic 2606 // check for the argument. 2607 if (SemaBuiltinRWPipe(*this, TheCall)) 2608 return ExprError(); 2609 break; 2610 case Builtin::BIreserve_read_pipe: 2611 case Builtin::BIreserve_write_pipe: 2612 case Builtin::BIwork_group_reserve_read_pipe: 2613 case Builtin::BIwork_group_reserve_write_pipe: 2614 if (SemaBuiltinReserveRWPipe(*this, TheCall)) 2615 return ExprError(); 2616 break; 2617 case Builtin::BIsub_group_reserve_read_pipe: 2618 case Builtin::BIsub_group_reserve_write_pipe: 2619 if (checkOpenCLSubgroupExt(*this, TheCall) || 2620 SemaBuiltinReserveRWPipe(*this, TheCall)) 2621 return ExprError(); 2622 break; 2623 case Builtin::BIcommit_read_pipe: 2624 case Builtin::BIcommit_write_pipe: 2625 case Builtin::BIwork_group_commit_read_pipe: 2626 case Builtin::BIwork_group_commit_write_pipe: 2627 if (SemaBuiltinCommitRWPipe(*this, TheCall)) 2628 return ExprError(); 2629 break; 2630 case Builtin::BIsub_group_commit_read_pipe: 2631 case Builtin::BIsub_group_commit_write_pipe: 2632 if (checkOpenCLSubgroupExt(*this, TheCall) || 2633 SemaBuiltinCommitRWPipe(*this, TheCall)) 2634 return ExprError(); 2635 break; 2636 case Builtin::BIget_pipe_num_packets: 2637 case Builtin::BIget_pipe_max_packets: 2638 if (SemaBuiltinPipePackets(*this, TheCall)) 2639 return ExprError(); 2640 break; 2641 case Builtin::BIto_global: 2642 case Builtin::BIto_local: 2643 case Builtin::BIto_private: 2644 if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall)) 2645 return ExprError(); 2646 break; 2647 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions. 2648 case Builtin::BIenqueue_kernel: 2649 if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall)) 2650 return ExprError(); 2651 break; 2652 case Builtin::BIget_kernel_work_group_size: 2653 case Builtin::BIget_kernel_preferred_work_group_size_multiple: 2654 if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall)) 2655 return ExprError(); 2656 break; 2657 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange: 2658 case Builtin::BIget_kernel_sub_group_count_for_ndrange: 2659 if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall)) 2660 return ExprError(); 2661 break; 2662 case Builtin::BI__builtin_os_log_format: 2663 Cleanup.setExprNeedsCleanups(true); 2664 [[fallthrough]]; 2665 case Builtin::BI__builtin_os_log_format_buffer_size: 2666 if (SemaBuiltinOSLogFormat(TheCall)) 2667 return ExprError(); 2668 break; 2669 case Builtin::BI__builtin_frame_address: 2670 case Builtin::BI__builtin_return_address: { 2671 if (SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xFFFF)) 2672 return ExprError(); 2673 2674 // -Wframe-address warning if non-zero passed to builtin 2675 // return/frame address. 2676 Expr::EvalResult Result; 2677 if (!TheCall->getArg(0)->isValueDependent() && 2678 TheCall->getArg(0)->EvaluateAsInt(Result, getASTContext()) && 2679 Result.Val.getInt() != 0) 2680 Diag(TheCall->getBeginLoc(), diag::warn_frame_address) 2681 << ((BuiltinID == Builtin::BI__builtin_return_address) 2682 ? "__builtin_return_address" 2683 : "__builtin_frame_address") 2684 << TheCall->getSourceRange(); 2685 break; 2686 } 2687 2688 case Builtin::BI__builtin_nondeterministic_value: { 2689 if (SemaBuiltinNonDeterministicValue(TheCall)) 2690 return ExprError(); 2691 break; 2692 } 2693 2694 // __builtin_elementwise_abs restricts the element type to signed integers or 2695 // floating point types only. 2696 case Builtin::BI__builtin_elementwise_abs: { 2697 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall)) 2698 return ExprError(); 2699 2700 QualType ArgTy = TheCall->getArg(0)->getType(); 2701 QualType EltTy = ArgTy; 2702 2703 if (auto *VecTy = EltTy->getAs<VectorType>()) 2704 EltTy = VecTy->getElementType(); 2705 if (EltTy->isUnsignedIntegerType()) { 2706 Diag(TheCall->getArg(0)->getBeginLoc(), 2707 diag::err_builtin_invalid_arg_type) 2708 << 1 << /* signed integer or float ty*/ 3 << ArgTy; 2709 return ExprError(); 2710 } 2711 break; 2712 } 2713 2714 // These builtins restrict the element type to floating point 2715 // types only. 2716 case Builtin::BI__builtin_elementwise_ceil: 2717 case Builtin::BI__builtin_elementwise_cos: 2718 case Builtin::BI__builtin_elementwise_exp: 2719 case Builtin::BI__builtin_elementwise_exp2: 2720 case Builtin::BI__builtin_elementwise_floor: 2721 case Builtin::BI__builtin_elementwise_log: 2722 case Builtin::BI__builtin_elementwise_log2: 2723 case Builtin::BI__builtin_elementwise_log10: 2724 case Builtin::BI__builtin_elementwise_roundeven: 2725 case Builtin::BI__builtin_elementwise_round: 2726 case Builtin::BI__builtin_elementwise_rint: 2727 case Builtin::BI__builtin_elementwise_nearbyint: 2728 case Builtin::BI__builtin_elementwise_sin: 2729 case Builtin::BI__builtin_elementwise_sqrt: 2730 case Builtin::BI__builtin_elementwise_trunc: 2731 case Builtin::BI__builtin_elementwise_canonicalize: { 2732 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall)) 2733 return ExprError(); 2734 2735 QualType ArgTy = TheCall->getArg(0)->getType(); 2736 if (checkFPMathBuiltinElementType(*this, TheCall->getArg(0)->getBeginLoc(), 2737 ArgTy, 1)) 2738 return ExprError(); 2739 break; 2740 } 2741 case Builtin::BI__builtin_elementwise_fma: { 2742 if (SemaBuiltinElementwiseTernaryMath(TheCall)) 2743 return ExprError(); 2744 break; 2745 } 2746 2747 // These builtins restrict the element type to floating point 2748 // types only, and take in two arguments. 2749 case Builtin::BI__builtin_elementwise_pow: { 2750 if (SemaBuiltinElementwiseMath(TheCall)) 2751 return ExprError(); 2752 2753 QualType ArgTy = TheCall->getArg(0)->getType(); 2754 if (checkFPMathBuiltinElementType(*this, TheCall->getArg(0)->getBeginLoc(), 2755 ArgTy, 1) || 2756 checkFPMathBuiltinElementType(*this, TheCall->getArg(1)->getBeginLoc(), 2757 ArgTy, 2)) 2758 return ExprError(); 2759 break; 2760 } 2761 2762 // These builtins restrict the element type to integer 2763 // types only. 2764 case Builtin::BI__builtin_elementwise_add_sat: 2765 case Builtin::BI__builtin_elementwise_sub_sat: { 2766 if (SemaBuiltinElementwiseMath(TheCall)) 2767 return ExprError(); 2768 2769 const Expr *Arg = TheCall->getArg(0); 2770 QualType ArgTy = Arg->getType(); 2771 QualType EltTy = ArgTy; 2772 2773 if (auto *VecTy = EltTy->getAs<VectorType>()) 2774 EltTy = VecTy->getElementType(); 2775 2776 if (!EltTy->isIntegerType()) { 2777 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2778 << 1 << /* integer ty */ 6 << ArgTy; 2779 return ExprError(); 2780 } 2781 break; 2782 } 2783 2784 case Builtin::BI__builtin_elementwise_min: 2785 case Builtin::BI__builtin_elementwise_max: 2786 if (SemaBuiltinElementwiseMath(TheCall)) 2787 return ExprError(); 2788 break; 2789 2790 case Builtin::BI__builtin_elementwise_bitreverse: { 2791 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall)) 2792 return ExprError(); 2793 2794 const Expr *Arg = TheCall->getArg(0); 2795 QualType ArgTy = Arg->getType(); 2796 QualType EltTy = ArgTy; 2797 2798 if (auto *VecTy = EltTy->getAs<VectorType>()) 2799 EltTy = VecTy->getElementType(); 2800 2801 if (!EltTy->isIntegerType()) { 2802 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2803 << 1 << /* integer ty */ 6 << ArgTy; 2804 return ExprError(); 2805 } 2806 break; 2807 } 2808 2809 case Builtin::BI__builtin_elementwise_copysign: { 2810 if (checkArgCount(*this, TheCall, 2)) 2811 return ExprError(); 2812 2813 ExprResult Magnitude = UsualUnaryConversions(TheCall->getArg(0)); 2814 ExprResult Sign = UsualUnaryConversions(TheCall->getArg(1)); 2815 if (Magnitude.isInvalid() || Sign.isInvalid()) 2816 return ExprError(); 2817 2818 QualType MagnitudeTy = Magnitude.get()->getType(); 2819 QualType SignTy = Sign.get()->getType(); 2820 if (checkFPMathBuiltinElementType(*this, TheCall->getArg(0)->getBeginLoc(), 2821 MagnitudeTy, 1) || 2822 checkFPMathBuiltinElementType(*this, TheCall->getArg(1)->getBeginLoc(), 2823 SignTy, 2)) { 2824 return ExprError(); 2825 } 2826 2827 if (MagnitudeTy.getCanonicalType() != SignTy.getCanonicalType()) { 2828 return Diag(Sign.get()->getBeginLoc(), 2829 diag::err_typecheck_call_different_arg_types) 2830 << MagnitudeTy << SignTy; 2831 } 2832 2833 TheCall->setArg(0, Magnitude.get()); 2834 TheCall->setArg(1, Sign.get()); 2835 TheCall->setType(Magnitude.get()->getType()); 2836 break; 2837 } 2838 case Builtin::BI__builtin_reduce_max: 2839 case Builtin::BI__builtin_reduce_min: { 2840 if (PrepareBuiltinReduceMathOneArgCall(TheCall)) 2841 return ExprError(); 2842 2843 const Expr *Arg = TheCall->getArg(0); 2844 const auto *TyA = Arg->getType()->getAs<VectorType>(); 2845 if (!TyA) { 2846 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2847 << 1 << /* vector ty*/ 4 << Arg->getType(); 2848 return ExprError(); 2849 } 2850 2851 TheCall->setType(TyA->getElementType()); 2852 break; 2853 } 2854 2855 // These builtins support vectors of integers only. 2856 // TODO: ADD/MUL should support floating-point types. 2857 case Builtin::BI__builtin_reduce_add: 2858 case Builtin::BI__builtin_reduce_mul: 2859 case Builtin::BI__builtin_reduce_xor: 2860 case Builtin::BI__builtin_reduce_or: 2861 case Builtin::BI__builtin_reduce_and: { 2862 if (PrepareBuiltinReduceMathOneArgCall(TheCall)) 2863 return ExprError(); 2864 2865 const Expr *Arg = TheCall->getArg(0); 2866 const auto *TyA = Arg->getType()->getAs<VectorType>(); 2867 if (!TyA || !TyA->getElementType()->isIntegerType()) { 2868 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2869 << 1 << /* vector of integers */ 6 << Arg->getType(); 2870 return ExprError(); 2871 } 2872 TheCall->setType(TyA->getElementType()); 2873 break; 2874 } 2875 2876 case Builtin::BI__builtin_matrix_transpose: 2877 return SemaBuiltinMatrixTranspose(TheCall, TheCallResult); 2878 2879 case Builtin::BI__builtin_matrix_column_major_load: 2880 return SemaBuiltinMatrixColumnMajorLoad(TheCall, TheCallResult); 2881 2882 case Builtin::BI__builtin_matrix_column_major_store: 2883 return SemaBuiltinMatrixColumnMajorStore(TheCall, TheCallResult); 2884 2885 case Builtin::BI__builtin_get_device_side_mangled_name: { 2886 auto Check = [](CallExpr *TheCall) { 2887 if (TheCall->getNumArgs() != 1) 2888 return false; 2889 auto *DRE = dyn_cast<DeclRefExpr>(TheCall->getArg(0)->IgnoreImpCasts()); 2890 if (!DRE) 2891 return false; 2892 auto *D = DRE->getDecl(); 2893 if (!isa<FunctionDecl>(D) && !isa<VarDecl>(D)) 2894 return false; 2895 return D->hasAttr<CUDAGlobalAttr>() || D->hasAttr<CUDADeviceAttr>() || 2896 D->hasAttr<CUDAConstantAttr>() || D->hasAttr<HIPManagedAttr>(); 2897 }; 2898 if (!Check(TheCall)) { 2899 Diag(TheCall->getBeginLoc(), 2900 diag::err_hip_invalid_args_builtin_mangled_name); 2901 return ExprError(); 2902 } 2903 } 2904 } 2905 2906 // Since the target specific builtins for each arch overlap, only check those 2907 // of the arch we are compiling for. 2908 if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) { 2909 if (Context.BuiltinInfo.isAuxBuiltinID(BuiltinID)) { 2910 assert(Context.getAuxTargetInfo() && 2911 "Aux Target Builtin, but not an aux target?"); 2912 2913 if (CheckTSBuiltinFunctionCall( 2914 *Context.getAuxTargetInfo(), 2915 Context.BuiltinInfo.getAuxBuiltinID(BuiltinID), TheCall)) 2916 return ExprError(); 2917 } else { 2918 if (CheckTSBuiltinFunctionCall(Context.getTargetInfo(), BuiltinID, 2919 TheCall)) 2920 return ExprError(); 2921 } 2922 } 2923 2924 return TheCallResult; 2925 } 2926 2927 // Get the valid immediate range for the specified NEON type code. 2928 static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) { 2929 NeonTypeFlags Type(t); 2930 int IsQuad = ForceQuad ? true : Type.isQuad(); 2931 switch (Type.getEltType()) { 2932 case NeonTypeFlags::Int8: 2933 case NeonTypeFlags::Poly8: 2934 return shift ? 7 : (8 << IsQuad) - 1; 2935 case NeonTypeFlags::Int16: 2936 case NeonTypeFlags::Poly16: 2937 return shift ? 15 : (4 << IsQuad) - 1; 2938 case NeonTypeFlags::Int32: 2939 return shift ? 31 : (2 << IsQuad) - 1; 2940 case NeonTypeFlags::Int64: 2941 case NeonTypeFlags::Poly64: 2942 return shift ? 63 : (1 << IsQuad) - 1; 2943 case NeonTypeFlags::Poly128: 2944 return shift ? 127 : (1 << IsQuad) - 1; 2945 case NeonTypeFlags::Float16: 2946 assert(!shift && "cannot shift float types!"); 2947 return (4 << IsQuad) - 1; 2948 case NeonTypeFlags::Float32: 2949 assert(!shift && "cannot shift float types!"); 2950 return (2 << IsQuad) - 1; 2951 case NeonTypeFlags::Float64: 2952 assert(!shift && "cannot shift float types!"); 2953 return (1 << IsQuad) - 1; 2954 case NeonTypeFlags::BFloat16: 2955 assert(!shift && "cannot shift float types!"); 2956 return (4 << IsQuad) - 1; 2957 } 2958 llvm_unreachable("Invalid NeonTypeFlag!"); 2959 } 2960 2961 /// getNeonEltType - Return the QualType corresponding to the elements of 2962 /// the vector type specified by the NeonTypeFlags. This is used to check 2963 /// the pointer arguments for Neon load/store intrinsics. 2964 static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context, 2965 bool IsPolyUnsigned, bool IsInt64Long) { 2966 switch (Flags.getEltType()) { 2967 case NeonTypeFlags::Int8: 2968 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy; 2969 case NeonTypeFlags::Int16: 2970 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy; 2971 case NeonTypeFlags::Int32: 2972 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy; 2973 case NeonTypeFlags::Int64: 2974 if (IsInt64Long) 2975 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy; 2976 else 2977 return Flags.isUnsigned() ? Context.UnsignedLongLongTy 2978 : Context.LongLongTy; 2979 case NeonTypeFlags::Poly8: 2980 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy; 2981 case NeonTypeFlags::Poly16: 2982 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy; 2983 case NeonTypeFlags::Poly64: 2984 if (IsInt64Long) 2985 return Context.UnsignedLongTy; 2986 else 2987 return Context.UnsignedLongLongTy; 2988 case NeonTypeFlags::Poly128: 2989 break; 2990 case NeonTypeFlags::Float16: 2991 return Context.HalfTy; 2992 case NeonTypeFlags::Float32: 2993 return Context.FloatTy; 2994 case NeonTypeFlags::Float64: 2995 return Context.DoubleTy; 2996 case NeonTypeFlags::BFloat16: 2997 return Context.BFloat16Ty; 2998 } 2999 llvm_unreachable("Invalid NeonTypeFlag!"); 3000 } 3001 3002 enum ArmStreamingType { 3003 ArmNonStreaming, 3004 ArmStreaming, 3005 ArmStreamingCompatible, 3006 ArmStreamingOrSVE2p1 3007 }; 3008 3009 enum ArmSMEState : unsigned { 3010 ArmNoState = 0, 3011 3012 ArmInZA = 0b01, 3013 ArmOutZA = 0b10, 3014 ArmInOutZA = 0b11, 3015 ArmZAMask = 0b11, 3016 3017 ArmInZT0 = 0b01 << 2, 3018 ArmOutZT0 = 0b10 << 2, 3019 ArmInOutZT0 = 0b11 << 2, 3020 ArmZT0Mask = 0b11 << 2 3021 }; 3022 3023 bool Sema::ParseSVEImmChecks( 3024 CallExpr *TheCall, SmallVector<std::tuple<int, int, int>, 3> &ImmChecks) { 3025 // Perform all the immediate checks for this builtin call. 3026 bool HasError = false; 3027 for (auto &I : ImmChecks) { 3028 int ArgNum, CheckTy, ElementSizeInBits; 3029 std::tie(ArgNum, CheckTy, ElementSizeInBits) = I; 3030 3031 typedef bool (*OptionSetCheckFnTy)(int64_t Value); 3032 3033 // Function that checks whether the operand (ArgNum) is an immediate 3034 // that is one of the predefined values. 3035 auto CheckImmediateInSet = [&](OptionSetCheckFnTy CheckImm, 3036 int ErrDiag) -> bool { 3037 // We can't check the value of a dependent argument. 3038 Expr *Arg = TheCall->getArg(ArgNum); 3039 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3040 return false; 3041 3042 // Check constant-ness first. 3043 llvm::APSInt Imm; 3044 if (SemaBuiltinConstantArg(TheCall, ArgNum, Imm)) 3045 return true; 3046 3047 if (!CheckImm(Imm.getSExtValue())) 3048 return Diag(TheCall->getBeginLoc(), ErrDiag) << Arg->getSourceRange(); 3049 return false; 3050 }; 3051 3052 switch ((SVETypeFlags::ImmCheckType)CheckTy) { 3053 case SVETypeFlags::ImmCheck0_31: 3054 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 31)) 3055 HasError = true; 3056 break; 3057 case SVETypeFlags::ImmCheck0_13: 3058 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 13)) 3059 HasError = true; 3060 break; 3061 case SVETypeFlags::ImmCheck1_16: 3062 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 16)) 3063 HasError = true; 3064 break; 3065 case SVETypeFlags::ImmCheck0_7: 3066 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 7)) 3067 HasError = true; 3068 break; 3069 case SVETypeFlags::ImmCheck1_1: 3070 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 1)) 3071 HasError = true; 3072 break; 3073 case SVETypeFlags::ImmCheck1_3: 3074 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 3)) 3075 HasError = true; 3076 break; 3077 case SVETypeFlags::ImmCheck1_7: 3078 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 7)) 3079 HasError = true; 3080 break; 3081 case SVETypeFlags::ImmCheckExtract: 3082 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3083 (2048 / ElementSizeInBits) - 1)) 3084 HasError = true; 3085 break; 3086 case SVETypeFlags::ImmCheckShiftRight: 3087 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits)) 3088 HasError = true; 3089 break; 3090 case SVETypeFlags::ImmCheckShiftRightNarrow: 3091 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 3092 ElementSizeInBits / 2)) 3093 HasError = true; 3094 break; 3095 case SVETypeFlags::ImmCheckShiftLeft: 3096 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3097 ElementSizeInBits - 1)) 3098 HasError = true; 3099 break; 3100 case SVETypeFlags::ImmCheckLaneIndex: 3101 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3102 (128 / (1 * ElementSizeInBits)) - 1)) 3103 HasError = true; 3104 break; 3105 case SVETypeFlags::ImmCheckLaneIndexCompRotate: 3106 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3107 (128 / (2 * ElementSizeInBits)) - 1)) 3108 HasError = true; 3109 break; 3110 case SVETypeFlags::ImmCheckLaneIndexDot: 3111 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3112 (128 / (4 * ElementSizeInBits)) - 1)) 3113 HasError = true; 3114 break; 3115 case SVETypeFlags::ImmCheckComplexRot90_270: 3116 if (CheckImmediateInSet([](int64_t V) { return V == 90 || V == 270; }, 3117 diag::err_rotation_argument_to_cadd)) 3118 HasError = true; 3119 break; 3120 case SVETypeFlags::ImmCheckComplexRotAll90: 3121 if (CheckImmediateInSet( 3122 [](int64_t V) { 3123 return V == 0 || V == 90 || V == 180 || V == 270; 3124 }, 3125 diag::err_rotation_argument_to_cmla)) 3126 HasError = true; 3127 break; 3128 case SVETypeFlags::ImmCheck0_1: 3129 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 1)) 3130 HasError = true; 3131 break; 3132 case SVETypeFlags::ImmCheck0_2: 3133 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2)) 3134 HasError = true; 3135 break; 3136 case SVETypeFlags::ImmCheck0_3: 3137 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3)) 3138 HasError = true; 3139 break; 3140 case SVETypeFlags::ImmCheck0_0: 3141 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 0)) 3142 HasError = true; 3143 break; 3144 case SVETypeFlags::ImmCheck0_15: 3145 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 15)) 3146 HasError = true; 3147 break; 3148 case SVETypeFlags::ImmCheck0_255: 3149 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 255)) 3150 HasError = true; 3151 break; 3152 case SVETypeFlags::ImmCheck2_4_Mul2: 3153 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 2, 4) || 3154 SemaBuiltinConstantArgMultiple(TheCall, ArgNum, 2)) 3155 HasError = true; 3156 break; 3157 } 3158 } 3159 3160 return HasError; 3161 } 3162 3163 static ArmStreamingType getArmStreamingFnType(const FunctionDecl *FD) { 3164 if (FD->hasAttr<ArmLocallyStreamingAttr>()) 3165 return ArmStreaming; 3166 if (const auto *T = FD->getType()->getAs<FunctionProtoType>()) { 3167 if (T->getAArch64SMEAttributes() & FunctionType::SME_PStateSMEnabledMask) 3168 return ArmStreaming; 3169 if (T->getAArch64SMEAttributes() & FunctionType::SME_PStateSMCompatibleMask) 3170 return ArmStreamingCompatible; 3171 } 3172 return ArmNonStreaming; 3173 } 3174 3175 static void checkArmStreamingBuiltin(Sema &S, CallExpr *TheCall, 3176 const FunctionDecl *FD, 3177 ArmStreamingType BuiltinType) { 3178 ArmStreamingType FnType = getArmStreamingFnType(FD); 3179 if (BuiltinType == ArmStreamingOrSVE2p1) { 3180 // Check intrinsics that are available in [sve2p1 or sme/sme2]. 3181 llvm::StringMap<bool> CallerFeatureMap; 3182 S.Context.getFunctionFeatureMap(CallerFeatureMap, FD); 3183 if (Builtin::evaluateRequiredTargetFeatures("sve2p1", CallerFeatureMap)) 3184 BuiltinType = ArmStreamingCompatible; 3185 else 3186 BuiltinType = ArmStreaming; 3187 } 3188 3189 if (FnType == ArmStreaming && BuiltinType == ArmNonStreaming) { 3190 S.Diag(TheCall->getBeginLoc(), diag::warn_attribute_arm_sm_incompat_builtin) 3191 << TheCall->getSourceRange() << "streaming"; 3192 } 3193 3194 if (FnType == ArmStreamingCompatible && 3195 BuiltinType != ArmStreamingCompatible) { 3196 S.Diag(TheCall->getBeginLoc(), diag::warn_attribute_arm_sm_incompat_builtin) 3197 << TheCall->getSourceRange() << "streaming compatible"; 3198 return; 3199 } 3200 3201 if (FnType == ArmNonStreaming && BuiltinType == ArmStreaming) { 3202 S.Diag(TheCall->getBeginLoc(), diag::warn_attribute_arm_sm_incompat_builtin) 3203 << TheCall->getSourceRange() << "non-streaming"; 3204 } 3205 } 3206 3207 static bool hasArmZAState(const FunctionDecl *FD) { 3208 const auto *T = FD->getType()->getAs<FunctionProtoType>(); 3209 return (T && FunctionType::getArmZAState(T->getAArch64SMEAttributes()) != 3210 FunctionType::ARM_None) || 3211 (FD->hasAttr<ArmNewAttr>() && FD->getAttr<ArmNewAttr>()->isNewZA()); 3212 } 3213 3214 static bool hasArmZT0State(const FunctionDecl *FD) { 3215 const auto *T = FD->getType()->getAs<FunctionProtoType>(); 3216 return (T && FunctionType::getArmZT0State(T->getAArch64SMEAttributes()) != 3217 FunctionType::ARM_None) || 3218 (FD->hasAttr<ArmNewAttr>() && FD->getAttr<ArmNewAttr>()->isNewZT0()); 3219 } 3220 3221 static ArmSMEState getSMEState(unsigned BuiltinID) { 3222 switch (BuiltinID) { 3223 default: 3224 return ArmNoState; 3225 #define GET_SME_BUILTIN_GET_STATE 3226 #include "clang/Basic/arm_sme_builtins_za_state.inc" 3227 #undef GET_SME_BUILTIN_GET_STATE 3228 } 3229 } 3230 3231 bool Sema::CheckSMEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 3232 if (const FunctionDecl *FD = getCurFunctionDecl()) { 3233 std::optional<ArmStreamingType> BuiltinType; 3234 3235 switch (BuiltinID) { 3236 #define GET_SME_STREAMING_ATTRS 3237 #include "clang/Basic/arm_sme_streaming_attrs.inc" 3238 #undef GET_SME_STREAMING_ATTRS 3239 } 3240 3241 if (BuiltinType) 3242 checkArmStreamingBuiltin(*this, TheCall, FD, *BuiltinType); 3243 3244 if ((getSMEState(BuiltinID) & ArmZAMask) && !hasArmZAState(FD)) 3245 Diag(TheCall->getBeginLoc(), 3246 diag::warn_attribute_arm_za_builtin_no_za_state) 3247 << TheCall->getSourceRange(); 3248 3249 if ((getSMEState(BuiltinID) & ArmZT0Mask) && !hasArmZT0State(FD)) 3250 Diag(TheCall->getBeginLoc(), 3251 diag::warn_attribute_arm_zt0_builtin_no_zt0_state) 3252 << TheCall->getSourceRange(); 3253 } 3254 3255 // Range check SME intrinsics that take immediate values. 3256 SmallVector<std::tuple<int, int, int>, 3> ImmChecks; 3257 3258 switch (BuiltinID) { 3259 default: 3260 return false; 3261 #define GET_SME_IMMEDIATE_CHECK 3262 #include "clang/Basic/arm_sme_sema_rangechecks.inc" 3263 #undef GET_SME_IMMEDIATE_CHECK 3264 } 3265 3266 return ParseSVEImmChecks(TheCall, ImmChecks); 3267 } 3268 3269 bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 3270 if (const FunctionDecl *FD = getCurFunctionDecl()) { 3271 std::optional<ArmStreamingType> BuiltinType; 3272 3273 switch (BuiltinID) { 3274 #define GET_SVE_STREAMING_ATTRS 3275 #include "clang/Basic/arm_sve_streaming_attrs.inc" 3276 #undef GET_SVE_STREAMING_ATTRS 3277 } 3278 if (BuiltinType) 3279 checkArmStreamingBuiltin(*this, TheCall, FD, *BuiltinType); 3280 } 3281 // Range check SVE intrinsics that take immediate values. 3282 SmallVector<std::tuple<int, int, int>, 3> ImmChecks; 3283 3284 switch (BuiltinID) { 3285 default: 3286 return false; 3287 #define GET_SVE_IMMEDIATE_CHECK 3288 #include "clang/Basic/arm_sve_sema_rangechecks.inc" 3289 #undef GET_SVE_IMMEDIATE_CHECK 3290 } 3291 3292 return ParseSVEImmChecks(TheCall, ImmChecks); 3293 } 3294 3295 bool Sema::CheckNeonBuiltinFunctionCall(const TargetInfo &TI, 3296 unsigned BuiltinID, CallExpr *TheCall) { 3297 if (const FunctionDecl *FD = getCurFunctionDecl()) { 3298 3299 switch (BuiltinID) { 3300 default: 3301 break; 3302 #define GET_NEON_BUILTINS 3303 #define TARGET_BUILTIN(id, ...) case NEON::BI##id: 3304 #define BUILTIN(id, ...) case NEON::BI##id: 3305 #include "clang/Basic/arm_neon.inc" 3306 checkArmStreamingBuiltin(*this, TheCall, FD, ArmNonStreaming); 3307 break; 3308 #undef TARGET_BUILTIN 3309 #undef BUILTIN 3310 #undef GET_NEON_BUILTINS 3311 } 3312 } 3313 3314 llvm::APSInt Result; 3315 uint64_t mask = 0; 3316 unsigned TV = 0; 3317 int PtrArgNum = -1; 3318 bool HasConstPtr = false; 3319 switch (BuiltinID) { 3320 #define GET_NEON_OVERLOAD_CHECK 3321 #include "clang/Basic/arm_neon.inc" 3322 #include "clang/Basic/arm_fp16.inc" 3323 #undef GET_NEON_OVERLOAD_CHECK 3324 } 3325 3326 // For NEON intrinsics which are overloaded on vector element type, validate 3327 // the immediate which specifies which variant to emit. 3328 unsigned ImmArg = TheCall->getNumArgs()-1; 3329 if (mask) { 3330 if (SemaBuiltinConstantArg(TheCall, ImmArg, Result)) 3331 return true; 3332 3333 TV = Result.getLimitedValue(64); 3334 if ((TV > 63) || (mask & (1ULL << TV)) == 0) 3335 return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code) 3336 << TheCall->getArg(ImmArg)->getSourceRange(); 3337 } 3338 3339 if (PtrArgNum >= 0) { 3340 // Check that pointer arguments have the specified type. 3341 Expr *Arg = TheCall->getArg(PtrArgNum); 3342 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) 3343 Arg = ICE->getSubExpr(); 3344 ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg); 3345 QualType RHSTy = RHS.get()->getType(); 3346 3347 llvm::Triple::ArchType Arch = TI.getTriple().getArch(); 3348 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 || 3349 Arch == llvm::Triple::aarch64_32 || 3350 Arch == llvm::Triple::aarch64_be; 3351 bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong; 3352 QualType EltTy = 3353 getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long); 3354 if (HasConstPtr) 3355 EltTy = EltTy.withConst(); 3356 QualType LHSTy = Context.getPointerType(EltTy); 3357 AssignConvertType ConvTy; 3358 ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS); 3359 if (RHS.isInvalid()) 3360 return true; 3361 if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy, 3362 RHS.get(), AA_Assigning)) 3363 return true; 3364 } 3365 3366 // For NEON intrinsics which take an immediate value as part of the 3367 // instruction, range check them here. 3368 unsigned i = 0, l = 0, u = 0; 3369 switch (BuiltinID) { 3370 default: 3371 return false; 3372 #define GET_NEON_IMMEDIATE_CHECK 3373 #include "clang/Basic/arm_neon.inc" 3374 #include "clang/Basic/arm_fp16.inc" 3375 #undef GET_NEON_IMMEDIATE_CHECK 3376 } 3377 3378 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 3379 } 3380 3381 bool Sema::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 3382 switch (BuiltinID) { 3383 default: 3384 return false; 3385 #include "clang/Basic/arm_mve_builtin_sema.inc" 3386 } 3387 } 3388 3389 bool Sema::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 3390 CallExpr *TheCall) { 3391 bool Err = false; 3392 switch (BuiltinID) { 3393 default: 3394 return false; 3395 #include "clang/Basic/arm_cde_builtin_sema.inc" 3396 } 3397 3398 if (Err) 3399 return true; 3400 3401 return CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), /*WantCDE*/ true); 3402 } 3403 3404 bool Sema::CheckARMCoprocessorImmediate(const TargetInfo &TI, 3405 const Expr *CoprocArg, bool WantCDE) { 3406 if (isConstantEvaluatedContext()) 3407 return false; 3408 3409 // We can't check the value of a dependent argument. 3410 if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent()) 3411 return false; 3412 3413 llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Context); 3414 int64_t CoprocNo = CoprocNoAP.getExtValue(); 3415 assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative"); 3416 3417 uint32_t CDECoprocMask = TI.getARMCDECoprocMask(); 3418 bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo)); 3419 3420 if (IsCDECoproc != WantCDE) 3421 return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc) 3422 << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange(); 3423 3424 return false; 3425 } 3426 3427 bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, 3428 unsigned MaxWidth) { 3429 assert((BuiltinID == ARM::BI__builtin_arm_ldrex || 3430 BuiltinID == ARM::BI__builtin_arm_ldaex || 3431 BuiltinID == ARM::BI__builtin_arm_strex || 3432 BuiltinID == ARM::BI__builtin_arm_stlex || 3433 BuiltinID == AArch64::BI__builtin_arm_ldrex || 3434 BuiltinID == AArch64::BI__builtin_arm_ldaex || 3435 BuiltinID == AArch64::BI__builtin_arm_strex || 3436 BuiltinID == AArch64::BI__builtin_arm_stlex) && 3437 "unexpected ARM builtin"); 3438 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex || 3439 BuiltinID == ARM::BI__builtin_arm_ldaex || 3440 BuiltinID == AArch64::BI__builtin_arm_ldrex || 3441 BuiltinID == AArch64::BI__builtin_arm_ldaex; 3442 3443 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 3444 3445 // Ensure that we have the proper number of arguments. 3446 if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2)) 3447 return true; 3448 3449 // Inspect the pointer argument of the atomic builtin. This should always be 3450 // a pointer type, whose element is an integral scalar or pointer type. 3451 // Because it is a pointer type, we don't have to worry about any implicit 3452 // casts here. 3453 Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1); 3454 ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg); 3455 if (PointerArgRes.isInvalid()) 3456 return true; 3457 PointerArg = PointerArgRes.get(); 3458 3459 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 3460 if (!pointerType) { 3461 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 3462 << PointerArg->getType() << PointerArg->getSourceRange(); 3463 return true; 3464 } 3465 3466 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next 3467 // task is to insert the appropriate casts into the AST. First work out just 3468 // what the appropriate type is. 3469 QualType ValType = pointerType->getPointeeType(); 3470 QualType AddrType = ValType.getUnqualifiedType().withVolatile(); 3471 if (IsLdrex) 3472 AddrType.addConst(); 3473 3474 // Issue a warning if the cast is dodgy. 3475 CastKind CastNeeded = CK_NoOp; 3476 if (!AddrType.isAtLeastAsQualifiedAs(ValType)) { 3477 CastNeeded = CK_BitCast; 3478 Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers) 3479 << PointerArg->getType() << Context.getPointerType(AddrType) 3480 << AA_Passing << PointerArg->getSourceRange(); 3481 } 3482 3483 // Finally, do the cast and replace the argument with the corrected version. 3484 AddrType = Context.getPointerType(AddrType); 3485 PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded); 3486 if (PointerArgRes.isInvalid()) 3487 return true; 3488 PointerArg = PointerArgRes.get(); 3489 3490 TheCall->setArg(IsLdrex ? 0 : 1, PointerArg); 3491 3492 // In general, we allow ints, floats and pointers to be loaded and stored. 3493 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 3494 !ValType->isBlockPointerType() && !ValType->isFloatingType()) { 3495 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr) 3496 << PointerArg->getType() << PointerArg->getSourceRange(); 3497 return true; 3498 } 3499 3500 // But ARM doesn't have instructions to deal with 128-bit versions. 3501 if (Context.getTypeSize(ValType) > MaxWidth) { 3502 assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate"); 3503 Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size) 3504 << PointerArg->getType() << PointerArg->getSourceRange(); 3505 return true; 3506 } 3507 3508 switch (ValType.getObjCLifetime()) { 3509 case Qualifiers::OCL_None: 3510 case Qualifiers::OCL_ExplicitNone: 3511 // okay 3512 break; 3513 3514 case Qualifiers::OCL_Weak: 3515 case Qualifiers::OCL_Strong: 3516 case Qualifiers::OCL_Autoreleasing: 3517 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 3518 << ValType << PointerArg->getSourceRange(); 3519 return true; 3520 } 3521 3522 if (IsLdrex) { 3523 TheCall->setType(ValType); 3524 return false; 3525 } 3526 3527 // Initialize the argument to be stored. 3528 ExprResult ValArg = TheCall->getArg(0); 3529 InitializedEntity Entity = InitializedEntity::InitializeParameter( 3530 Context, ValType, /*consume*/ false); 3531 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 3532 if (ValArg.isInvalid()) 3533 return true; 3534 TheCall->setArg(0, ValArg.get()); 3535 3536 // __builtin_arm_strex always returns an int. It's marked as such in the .def, 3537 // but the custom checker bypasses all default analysis. 3538 TheCall->setType(Context.IntTy); 3539 return false; 3540 } 3541 3542 bool Sema::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 3543 CallExpr *TheCall) { 3544 if (BuiltinID == ARM::BI__builtin_arm_ldrex || 3545 BuiltinID == ARM::BI__builtin_arm_ldaex || 3546 BuiltinID == ARM::BI__builtin_arm_strex || 3547 BuiltinID == ARM::BI__builtin_arm_stlex) { 3548 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64); 3549 } 3550 3551 if (BuiltinID == ARM::BI__builtin_arm_prefetch) { 3552 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3553 SemaBuiltinConstantArgRange(TheCall, 2, 0, 1); 3554 } 3555 3556 if (BuiltinID == ARM::BI__builtin_arm_rsr64 || 3557 BuiltinID == ARM::BI__builtin_arm_wsr64) 3558 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false); 3559 3560 if (BuiltinID == ARM::BI__builtin_arm_rsr || 3561 BuiltinID == ARM::BI__builtin_arm_rsrp || 3562 BuiltinID == ARM::BI__builtin_arm_wsr || 3563 BuiltinID == ARM::BI__builtin_arm_wsrp) 3564 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 3565 3566 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 3567 return true; 3568 if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall)) 3569 return true; 3570 if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall)) 3571 return true; 3572 3573 // For intrinsics which take an immediate value as part of the instruction, 3574 // range check them here. 3575 // FIXME: VFP Intrinsics should error if VFP not present. 3576 switch (BuiltinID) { 3577 default: return false; 3578 case ARM::BI__builtin_arm_ssat: 3579 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32); 3580 case ARM::BI__builtin_arm_usat: 3581 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 3582 case ARM::BI__builtin_arm_ssat16: 3583 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 3584 case ARM::BI__builtin_arm_usat16: 3585 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 3586 case ARM::BI__builtin_arm_vcvtr_f: 3587 case ARM::BI__builtin_arm_vcvtr_d: 3588 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 3589 case ARM::BI__builtin_arm_dmb: 3590 case ARM::BI__builtin_arm_dsb: 3591 case ARM::BI__builtin_arm_isb: 3592 case ARM::BI__builtin_arm_dbg: 3593 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15); 3594 case ARM::BI__builtin_arm_cdp: 3595 case ARM::BI__builtin_arm_cdp2: 3596 case ARM::BI__builtin_arm_mcr: 3597 case ARM::BI__builtin_arm_mcr2: 3598 case ARM::BI__builtin_arm_mrc: 3599 case ARM::BI__builtin_arm_mrc2: 3600 case ARM::BI__builtin_arm_mcrr: 3601 case ARM::BI__builtin_arm_mcrr2: 3602 case ARM::BI__builtin_arm_mrrc: 3603 case ARM::BI__builtin_arm_mrrc2: 3604 case ARM::BI__builtin_arm_ldc: 3605 case ARM::BI__builtin_arm_ldcl: 3606 case ARM::BI__builtin_arm_ldc2: 3607 case ARM::BI__builtin_arm_ldc2l: 3608 case ARM::BI__builtin_arm_stc: 3609 case ARM::BI__builtin_arm_stcl: 3610 case ARM::BI__builtin_arm_stc2: 3611 case ARM::BI__builtin_arm_stc2l: 3612 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15) || 3613 CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), 3614 /*WantCDE*/ false); 3615 } 3616 } 3617 3618 bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, 3619 unsigned BuiltinID, 3620 CallExpr *TheCall) { 3621 if (BuiltinID == AArch64::BI__builtin_arm_ldrex || 3622 BuiltinID == AArch64::BI__builtin_arm_ldaex || 3623 BuiltinID == AArch64::BI__builtin_arm_strex || 3624 BuiltinID == AArch64::BI__builtin_arm_stlex) { 3625 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128); 3626 } 3627 3628 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) { 3629 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3630 SemaBuiltinConstantArgRange(TheCall, 2, 0, 3) || 3631 SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) || 3632 SemaBuiltinConstantArgRange(TheCall, 4, 0, 1); 3633 } 3634 3635 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 || 3636 BuiltinID == AArch64::BI__builtin_arm_wsr64 || 3637 BuiltinID == AArch64::BI__builtin_arm_rsr128 || 3638 BuiltinID == AArch64::BI__builtin_arm_wsr128) 3639 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 3640 3641 // Memory Tagging Extensions (MTE) Intrinsics 3642 if (BuiltinID == AArch64::BI__builtin_arm_irg || 3643 BuiltinID == AArch64::BI__builtin_arm_addg || 3644 BuiltinID == AArch64::BI__builtin_arm_gmi || 3645 BuiltinID == AArch64::BI__builtin_arm_ldg || 3646 BuiltinID == AArch64::BI__builtin_arm_stg || 3647 BuiltinID == AArch64::BI__builtin_arm_subp) { 3648 return SemaBuiltinARMMemoryTaggingCall(BuiltinID, TheCall); 3649 } 3650 3651 if (BuiltinID == AArch64::BI__builtin_arm_rsr || 3652 BuiltinID == AArch64::BI__builtin_arm_rsrp || 3653 BuiltinID == AArch64::BI__builtin_arm_wsr || 3654 BuiltinID == AArch64::BI__builtin_arm_wsrp) 3655 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 3656 3657 // Only check the valid encoding range. Any constant in this range would be 3658 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw 3659 // an exception for incorrect registers. This matches MSVC behavior. 3660 if (BuiltinID == AArch64::BI_ReadStatusReg || 3661 BuiltinID == AArch64::BI_WriteStatusReg) 3662 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff); 3663 3664 if (BuiltinID == AArch64::BI__getReg) 3665 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 3666 3667 if (BuiltinID == AArch64::BI__break) 3668 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xffff); 3669 3670 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 3671 return true; 3672 3673 if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall)) 3674 return true; 3675 3676 if (CheckSMEBuiltinFunctionCall(BuiltinID, TheCall)) 3677 return true; 3678 3679 // For intrinsics which take an immediate value as part of the instruction, 3680 // range check them here. 3681 unsigned i = 0, l = 0, u = 0; 3682 switch (BuiltinID) { 3683 default: return false; 3684 case AArch64::BI__builtin_arm_dmb: 3685 case AArch64::BI__builtin_arm_dsb: 3686 case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break; 3687 case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break; 3688 } 3689 3690 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 3691 } 3692 3693 static bool isValidBPFPreserveFieldInfoArg(Expr *Arg) { 3694 if (Arg->getType()->getAsPlaceholderType()) 3695 return false; 3696 3697 // The first argument needs to be a record field access. 3698 // If it is an array element access, we delay decision 3699 // to BPF backend to check whether the access is a 3700 // field access or not. 3701 return (Arg->IgnoreParens()->getObjectKind() == OK_BitField || 3702 isa<MemberExpr>(Arg->IgnoreParens()) || 3703 isa<ArraySubscriptExpr>(Arg->IgnoreParens())); 3704 } 3705 3706 static bool isValidBPFPreserveTypeInfoArg(Expr *Arg) { 3707 QualType ArgType = Arg->getType(); 3708 if (ArgType->getAsPlaceholderType()) 3709 return false; 3710 3711 // for TYPE_EXISTENCE/TYPE_MATCH/TYPE_SIZEOF reloc type 3712 // format: 3713 // 1. __builtin_preserve_type_info(*(<type> *)0, flag); 3714 // 2. <type> var; 3715 // __builtin_preserve_type_info(var, flag); 3716 if (!isa<DeclRefExpr>(Arg->IgnoreParens()) && 3717 !isa<UnaryOperator>(Arg->IgnoreParens())) 3718 return false; 3719 3720 // Typedef type. 3721 if (ArgType->getAs<TypedefType>()) 3722 return true; 3723 3724 // Record type or Enum type. 3725 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 3726 if (const auto *RT = Ty->getAs<RecordType>()) { 3727 if (!RT->getDecl()->getDeclName().isEmpty()) 3728 return true; 3729 } else if (const auto *ET = Ty->getAs<EnumType>()) { 3730 if (!ET->getDecl()->getDeclName().isEmpty()) 3731 return true; 3732 } 3733 3734 return false; 3735 } 3736 3737 static bool isValidBPFPreserveEnumValueArg(Expr *Arg) { 3738 QualType ArgType = Arg->getType(); 3739 if (ArgType->getAsPlaceholderType()) 3740 return false; 3741 3742 // for ENUM_VALUE_EXISTENCE/ENUM_VALUE reloc type 3743 // format: 3744 // __builtin_preserve_enum_value(*(<enum_type> *)<enum_value>, 3745 // flag); 3746 const auto *UO = dyn_cast<UnaryOperator>(Arg->IgnoreParens()); 3747 if (!UO) 3748 return false; 3749 3750 const auto *CE = dyn_cast<CStyleCastExpr>(UO->getSubExpr()); 3751 if (!CE) 3752 return false; 3753 if (CE->getCastKind() != CK_IntegralToPointer && 3754 CE->getCastKind() != CK_NullToPointer) 3755 return false; 3756 3757 // The integer must be from an EnumConstantDecl. 3758 const auto *DR = dyn_cast<DeclRefExpr>(CE->getSubExpr()); 3759 if (!DR) 3760 return false; 3761 3762 const EnumConstantDecl *Enumerator = 3763 dyn_cast<EnumConstantDecl>(DR->getDecl()); 3764 if (!Enumerator) 3765 return false; 3766 3767 // The type must be EnumType. 3768 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 3769 const auto *ET = Ty->getAs<EnumType>(); 3770 if (!ET) 3771 return false; 3772 3773 // The enum value must be supported. 3774 return llvm::is_contained(ET->getDecl()->enumerators(), Enumerator); 3775 } 3776 3777 bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID, 3778 CallExpr *TheCall) { 3779 assert((BuiltinID == BPF::BI__builtin_preserve_field_info || 3780 BuiltinID == BPF::BI__builtin_btf_type_id || 3781 BuiltinID == BPF::BI__builtin_preserve_type_info || 3782 BuiltinID == BPF::BI__builtin_preserve_enum_value) && 3783 "unexpected BPF builtin"); 3784 3785 if (checkArgCount(*this, TheCall, 2)) 3786 return true; 3787 3788 // The second argument needs to be a constant int 3789 Expr *Arg = TheCall->getArg(1); 3790 std::optional<llvm::APSInt> Value = Arg->getIntegerConstantExpr(Context); 3791 diag::kind kind; 3792 if (!Value) { 3793 if (BuiltinID == BPF::BI__builtin_preserve_field_info) 3794 kind = diag::err_preserve_field_info_not_const; 3795 else if (BuiltinID == BPF::BI__builtin_btf_type_id) 3796 kind = diag::err_btf_type_id_not_const; 3797 else if (BuiltinID == BPF::BI__builtin_preserve_type_info) 3798 kind = diag::err_preserve_type_info_not_const; 3799 else 3800 kind = diag::err_preserve_enum_value_not_const; 3801 Diag(Arg->getBeginLoc(), kind) << 2 << Arg->getSourceRange(); 3802 return true; 3803 } 3804 3805 // The first argument 3806 Arg = TheCall->getArg(0); 3807 bool InvalidArg = false; 3808 bool ReturnUnsignedInt = true; 3809 if (BuiltinID == BPF::BI__builtin_preserve_field_info) { 3810 if (!isValidBPFPreserveFieldInfoArg(Arg)) { 3811 InvalidArg = true; 3812 kind = diag::err_preserve_field_info_not_field; 3813 } 3814 } else if (BuiltinID == BPF::BI__builtin_preserve_type_info) { 3815 if (!isValidBPFPreserveTypeInfoArg(Arg)) { 3816 InvalidArg = true; 3817 kind = diag::err_preserve_type_info_invalid; 3818 } 3819 } else if (BuiltinID == BPF::BI__builtin_preserve_enum_value) { 3820 if (!isValidBPFPreserveEnumValueArg(Arg)) { 3821 InvalidArg = true; 3822 kind = diag::err_preserve_enum_value_invalid; 3823 } 3824 ReturnUnsignedInt = false; 3825 } else if (BuiltinID == BPF::BI__builtin_btf_type_id) { 3826 ReturnUnsignedInt = false; 3827 } 3828 3829 if (InvalidArg) { 3830 Diag(Arg->getBeginLoc(), kind) << 1 << Arg->getSourceRange(); 3831 return true; 3832 } 3833 3834 if (ReturnUnsignedInt) 3835 TheCall->setType(Context.UnsignedIntTy); 3836 else 3837 TheCall->setType(Context.UnsignedLongTy); 3838 return false; 3839 } 3840 3841 bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 3842 struct ArgInfo { 3843 uint8_t OpNum; 3844 bool IsSigned; 3845 uint8_t BitWidth; 3846 uint8_t Align; 3847 }; 3848 struct BuiltinInfo { 3849 unsigned BuiltinID; 3850 ArgInfo Infos[2]; 3851 }; 3852 3853 static BuiltinInfo Infos[] = { 3854 { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} }, 3855 { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} }, 3856 { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} }, 3857 { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 1 }} }, 3858 { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} }, 3859 { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} }, 3860 { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} }, 3861 { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} }, 3862 { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} }, 3863 { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} }, 3864 { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} }, 3865 3866 { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} }, 3867 { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} }, 3868 { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} }, 3869 { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} }, 3870 { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} }, 3871 { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} }, 3872 { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} }, 3873 { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} }, 3874 { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} }, 3875 { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} }, 3876 { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} }, 3877 3878 { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} }, 3879 { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} }, 3880 { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} }, 3881 { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} }, 3882 { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} }, 3883 { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} }, 3884 { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} }, 3885 { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} }, 3886 { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} }, 3887 { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} }, 3888 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} }, 3889 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} }, 3890 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} }, 3891 { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} }, 3892 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} }, 3893 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} }, 3894 { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} }, 3895 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} }, 3896 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} }, 3897 { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} }, 3898 { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} }, 3899 { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} }, 3900 { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} }, 3901 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} }, 3902 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} }, 3903 { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} }, 3904 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} }, 3905 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} }, 3906 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} }, 3907 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} }, 3908 { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} }, 3909 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} }, 3910 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} }, 3911 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} }, 3912 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} }, 3913 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} }, 3914 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} }, 3915 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} }, 3916 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} }, 3917 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} }, 3918 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} }, 3919 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} }, 3920 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} }, 3921 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} }, 3922 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} }, 3923 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} }, 3924 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} }, 3925 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} }, 3926 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} }, 3927 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} }, 3928 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} }, 3929 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax, 3930 {{ 1, false, 6, 0 }} }, 3931 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} }, 3932 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} }, 3933 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} }, 3934 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} }, 3935 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} }, 3936 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} }, 3937 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax, 3938 {{ 1, false, 5, 0 }} }, 3939 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} }, 3940 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} }, 3941 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} }, 3942 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} }, 3943 { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} }, 3944 { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 }, 3945 { 2, false, 5, 0 }} }, 3946 { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 }, 3947 { 2, false, 6, 0 }} }, 3948 { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 }, 3949 { 3, false, 5, 0 }} }, 3950 { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 }, 3951 { 3, false, 6, 0 }} }, 3952 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} }, 3953 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} }, 3954 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} }, 3955 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} }, 3956 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} }, 3957 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} }, 3958 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} }, 3959 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} }, 3960 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} }, 3961 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} }, 3962 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} }, 3963 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} }, 3964 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} }, 3965 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} }, 3966 { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} }, 3967 { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax, 3968 {{ 2, false, 4, 0 }, 3969 { 3, false, 5, 0 }} }, 3970 { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax, 3971 {{ 2, false, 4, 0 }, 3972 { 3, false, 5, 0 }} }, 3973 { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax, 3974 {{ 2, false, 4, 0 }, 3975 { 3, false, 5, 0 }} }, 3976 { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax, 3977 {{ 2, false, 4, 0 }, 3978 { 3, false, 5, 0 }} }, 3979 { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} }, 3980 { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} }, 3981 { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} }, 3982 { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} }, 3983 { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} }, 3984 { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} }, 3985 { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} }, 3986 { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} }, 3987 { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} }, 3988 { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} }, 3989 { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 }, 3990 { 2, false, 5, 0 }} }, 3991 { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 }, 3992 { 2, false, 6, 0 }} }, 3993 { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} }, 3994 { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} }, 3995 { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} }, 3996 { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} }, 3997 { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} }, 3998 { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} }, 3999 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} }, 4000 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} }, 4001 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax, 4002 {{ 1, false, 4, 0 }} }, 4003 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} }, 4004 { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax, 4005 {{ 1, false, 4, 0 }} }, 4006 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} }, 4007 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} }, 4008 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} }, 4009 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} }, 4010 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} }, 4011 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} }, 4012 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} }, 4013 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} }, 4014 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} }, 4015 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} }, 4016 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} }, 4017 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} }, 4018 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} }, 4019 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} }, 4020 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} }, 4021 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} }, 4022 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} }, 4023 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} }, 4024 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} }, 4025 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, 4026 {{ 3, false, 1, 0 }} }, 4027 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} }, 4028 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} }, 4029 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} }, 4030 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, 4031 {{ 3, false, 1, 0 }} }, 4032 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} }, 4033 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} }, 4034 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} }, 4035 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, 4036 {{ 3, false, 1, 0 }} }, 4037 4038 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10, {{ 2, false, 2, 0 }} }, 4039 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_128B, 4040 {{ 2, false, 2, 0 }} }, 4041 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_vxx, 4042 {{ 3, false, 2, 0 }} }, 4043 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_vxx_128B, 4044 {{ 3, false, 2, 0 }} }, 4045 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10, {{ 2, false, 2, 0 }} }, 4046 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_128B, 4047 {{ 2, false, 2, 0 }} }, 4048 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_vxx, 4049 {{ 3, false, 2, 0 }} }, 4050 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_vxx_128B, 4051 {{ 3, false, 2, 0 }} }, 4052 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi, {{ 2, false, 3, 0 }} }, 4053 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi_128B, {{ 2, false, 3, 0 }} }, 4054 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci, {{ 3, false, 3, 0 }} }, 4055 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci_128B, 4056 {{ 3, false, 3, 0 }} }, 4057 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi, {{ 2, false, 3, 0 }} }, 4058 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi_128B, {{ 2, false, 3, 0 }} }, 4059 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci, {{ 3, false, 3, 0 }} }, 4060 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci_128B, 4061 {{ 3, false, 3, 0 }} }, 4062 }; 4063 4064 // Use a dynamically initialized static to sort the table exactly once on 4065 // first run. 4066 static const bool SortOnce = 4067 (llvm::sort(Infos, 4068 [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) { 4069 return LHS.BuiltinID < RHS.BuiltinID; 4070 }), 4071 true); 4072 (void)SortOnce; 4073 4074 const BuiltinInfo *F = llvm::partition_point( 4075 Infos, [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; }); 4076 if (F == std::end(Infos) || F->BuiltinID != BuiltinID) 4077 return false; 4078 4079 bool Error = false; 4080 4081 for (const ArgInfo &A : F->Infos) { 4082 // Ignore empty ArgInfo elements. 4083 if (A.BitWidth == 0) 4084 continue; 4085 4086 int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0; 4087 int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1; 4088 if (!A.Align) { 4089 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 4090 } else { 4091 unsigned M = 1 << A.Align; 4092 Min *= M; 4093 Max *= M; 4094 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 4095 Error |= SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M); 4096 } 4097 } 4098 return Error; 4099 } 4100 4101 bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, 4102 CallExpr *TheCall) { 4103 return CheckHexagonBuiltinArgument(BuiltinID, TheCall); 4104 } 4105 4106 bool Sema::CheckLoongArchBuiltinFunctionCall(const TargetInfo &TI, 4107 unsigned BuiltinID, 4108 CallExpr *TheCall) { 4109 switch (BuiltinID) { 4110 default: 4111 break; 4112 // Basic intrinsics. 4113 case LoongArch::BI__builtin_loongarch_cacop_d: 4114 case LoongArch::BI__builtin_loongarch_cacop_w: { 4115 SemaBuiltinConstantArgRange(TheCall, 0, 0, llvm::maxUIntN(5)); 4116 SemaBuiltinConstantArgRange(TheCall, 2, llvm::minIntN(12), 4117 llvm::maxIntN(12)); 4118 break; 4119 } 4120 case LoongArch::BI__builtin_loongarch_break: 4121 case LoongArch::BI__builtin_loongarch_dbar: 4122 case LoongArch::BI__builtin_loongarch_ibar: 4123 case LoongArch::BI__builtin_loongarch_syscall: 4124 // Check if immediate is in [0, 32767]. 4125 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 32767); 4126 case LoongArch::BI__builtin_loongarch_csrrd_w: 4127 case LoongArch::BI__builtin_loongarch_csrrd_d: 4128 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 16383); 4129 case LoongArch::BI__builtin_loongarch_csrwr_w: 4130 case LoongArch::BI__builtin_loongarch_csrwr_d: 4131 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 16383); 4132 case LoongArch::BI__builtin_loongarch_csrxchg_w: 4133 case LoongArch::BI__builtin_loongarch_csrxchg_d: 4134 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 16383); 4135 case LoongArch::BI__builtin_loongarch_lddir_d: 4136 case LoongArch::BI__builtin_loongarch_ldpte_d: 4137 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 4138 case LoongArch::BI__builtin_loongarch_movfcsr2gr: 4139 case LoongArch::BI__builtin_loongarch_movgr2fcsr: 4140 return SemaBuiltinConstantArgRange(TheCall, 0, 0, llvm::maxUIntN(2)); 4141 4142 // LSX intrinsics. 4143 case LoongArch::BI__builtin_lsx_vbitclri_b: 4144 case LoongArch::BI__builtin_lsx_vbitrevi_b: 4145 case LoongArch::BI__builtin_lsx_vbitseti_b: 4146 case LoongArch::BI__builtin_lsx_vsat_b: 4147 case LoongArch::BI__builtin_lsx_vsat_bu: 4148 case LoongArch::BI__builtin_lsx_vslli_b: 4149 case LoongArch::BI__builtin_lsx_vsrai_b: 4150 case LoongArch::BI__builtin_lsx_vsrari_b: 4151 case LoongArch::BI__builtin_lsx_vsrli_b: 4152 case LoongArch::BI__builtin_lsx_vsllwil_h_b: 4153 case LoongArch::BI__builtin_lsx_vsllwil_hu_bu: 4154 case LoongArch::BI__builtin_lsx_vrotri_b: 4155 case LoongArch::BI__builtin_lsx_vsrlri_b: 4156 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 7); 4157 case LoongArch::BI__builtin_lsx_vbitclri_h: 4158 case LoongArch::BI__builtin_lsx_vbitrevi_h: 4159 case LoongArch::BI__builtin_lsx_vbitseti_h: 4160 case LoongArch::BI__builtin_lsx_vsat_h: 4161 case LoongArch::BI__builtin_lsx_vsat_hu: 4162 case LoongArch::BI__builtin_lsx_vslli_h: 4163 case LoongArch::BI__builtin_lsx_vsrai_h: 4164 case LoongArch::BI__builtin_lsx_vsrari_h: 4165 case LoongArch::BI__builtin_lsx_vsrli_h: 4166 case LoongArch::BI__builtin_lsx_vsllwil_w_h: 4167 case LoongArch::BI__builtin_lsx_vsllwil_wu_hu: 4168 case LoongArch::BI__builtin_lsx_vrotri_h: 4169 case LoongArch::BI__builtin_lsx_vsrlri_h: 4170 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 4171 case LoongArch::BI__builtin_lsx_vssrarni_b_h: 4172 case LoongArch::BI__builtin_lsx_vssrarni_bu_h: 4173 case LoongArch::BI__builtin_lsx_vssrani_b_h: 4174 case LoongArch::BI__builtin_lsx_vssrani_bu_h: 4175 case LoongArch::BI__builtin_lsx_vsrarni_b_h: 4176 case LoongArch::BI__builtin_lsx_vsrlni_b_h: 4177 case LoongArch::BI__builtin_lsx_vsrlrni_b_h: 4178 case LoongArch::BI__builtin_lsx_vssrlni_b_h: 4179 case LoongArch::BI__builtin_lsx_vssrlni_bu_h: 4180 case LoongArch::BI__builtin_lsx_vssrlrni_b_h: 4181 case LoongArch::BI__builtin_lsx_vssrlrni_bu_h: 4182 case LoongArch::BI__builtin_lsx_vsrani_b_h: 4183 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 4184 case LoongArch::BI__builtin_lsx_vslei_bu: 4185 case LoongArch::BI__builtin_lsx_vslei_hu: 4186 case LoongArch::BI__builtin_lsx_vslei_wu: 4187 case LoongArch::BI__builtin_lsx_vslei_du: 4188 case LoongArch::BI__builtin_lsx_vslti_bu: 4189 case LoongArch::BI__builtin_lsx_vslti_hu: 4190 case LoongArch::BI__builtin_lsx_vslti_wu: 4191 case LoongArch::BI__builtin_lsx_vslti_du: 4192 case LoongArch::BI__builtin_lsx_vmaxi_bu: 4193 case LoongArch::BI__builtin_lsx_vmaxi_hu: 4194 case LoongArch::BI__builtin_lsx_vmaxi_wu: 4195 case LoongArch::BI__builtin_lsx_vmaxi_du: 4196 case LoongArch::BI__builtin_lsx_vmini_bu: 4197 case LoongArch::BI__builtin_lsx_vmini_hu: 4198 case LoongArch::BI__builtin_lsx_vmini_wu: 4199 case LoongArch::BI__builtin_lsx_vmini_du: 4200 case LoongArch::BI__builtin_lsx_vaddi_bu: 4201 case LoongArch::BI__builtin_lsx_vaddi_hu: 4202 case LoongArch::BI__builtin_lsx_vaddi_wu: 4203 case LoongArch::BI__builtin_lsx_vaddi_du: 4204 case LoongArch::BI__builtin_lsx_vbitclri_w: 4205 case LoongArch::BI__builtin_lsx_vbitrevi_w: 4206 case LoongArch::BI__builtin_lsx_vbitseti_w: 4207 case LoongArch::BI__builtin_lsx_vsat_w: 4208 case LoongArch::BI__builtin_lsx_vsat_wu: 4209 case LoongArch::BI__builtin_lsx_vslli_w: 4210 case LoongArch::BI__builtin_lsx_vsrai_w: 4211 case LoongArch::BI__builtin_lsx_vsrari_w: 4212 case LoongArch::BI__builtin_lsx_vsrli_w: 4213 case LoongArch::BI__builtin_lsx_vsllwil_d_w: 4214 case LoongArch::BI__builtin_lsx_vsllwil_du_wu: 4215 case LoongArch::BI__builtin_lsx_vsrlri_w: 4216 case LoongArch::BI__builtin_lsx_vrotri_w: 4217 case LoongArch::BI__builtin_lsx_vsubi_bu: 4218 case LoongArch::BI__builtin_lsx_vsubi_hu: 4219 case LoongArch::BI__builtin_lsx_vbsrl_v: 4220 case LoongArch::BI__builtin_lsx_vbsll_v: 4221 case LoongArch::BI__builtin_lsx_vsubi_wu: 4222 case LoongArch::BI__builtin_lsx_vsubi_du: 4223 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 4224 case LoongArch::BI__builtin_lsx_vssrarni_h_w: 4225 case LoongArch::BI__builtin_lsx_vssrarni_hu_w: 4226 case LoongArch::BI__builtin_lsx_vssrani_h_w: 4227 case LoongArch::BI__builtin_lsx_vssrani_hu_w: 4228 case LoongArch::BI__builtin_lsx_vsrarni_h_w: 4229 case LoongArch::BI__builtin_lsx_vsrani_h_w: 4230 case LoongArch::BI__builtin_lsx_vfrstpi_b: 4231 case LoongArch::BI__builtin_lsx_vfrstpi_h: 4232 case LoongArch::BI__builtin_lsx_vsrlni_h_w: 4233 case LoongArch::BI__builtin_lsx_vsrlrni_h_w: 4234 case LoongArch::BI__builtin_lsx_vssrlni_h_w: 4235 case LoongArch::BI__builtin_lsx_vssrlni_hu_w: 4236 case LoongArch::BI__builtin_lsx_vssrlrni_h_w: 4237 case LoongArch::BI__builtin_lsx_vssrlrni_hu_w: 4238 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 31); 4239 case LoongArch::BI__builtin_lsx_vbitclri_d: 4240 case LoongArch::BI__builtin_lsx_vbitrevi_d: 4241 case LoongArch::BI__builtin_lsx_vbitseti_d: 4242 case LoongArch::BI__builtin_lsx_vsat_d: 4243 case LoongArch::BI__builtin_lsx_vsat_du: 4244 case LoongArch::BI__builtin_lsx_vslli_d: 4245 case LoongArch::BI__builtin_lsx_vsrai_d: 4246 case LoongArch::BI__builtin_lsx_vsrli_d: 4247 case LoongArch::BI__builtin_lsx_vsrari_d: 4248 case LoongArch::BI__builtin_lsx_vrotri_d: 4249 case LoongArch::BI__builtin_lsx_vsrlri_d: 4250 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 63); 4251 case LoongArch::BI__builtin_lsx_vssrarni_w_d: 4252 case LoongArch::BI__builtin_lsx_vssrarni_wu_d: 4253 case LoongArch::BI__builtin_lsx_vssrani_w_d: 4254 case LoongArch::BI__builtin_lsx_vssrani_wu_d: 4255 case LoongArch::BI__builtin_lsx_vsrarni_w_d: 4256 case LoongArch::BI__builtin_lsx_vsrlni_w_d: 4257 case LoongArch::BI__builtin_lsx_vsrlrni_w_d: 4258 case LoongArch::BI__builtin_lsx_vssrlni_w_d: 4259 case LoongArch::BI__builtin_lsx_vssrlni_wu_d: 4260 case LoongArch::BI__builtin_lsx_vssrlrni_w_d: 4261 case LoongArch::BI__builtin_lsx_vssrlrni_wu_d: 4262 case LoongArch::BI__builtin_lsx_vsrani_w_d: 4263 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 63); 4264 case LoongArch::BI__builtin_lsx_vssrarni_d_q: 4265 case LoongArch::BI__builtin_lsx_vssrarni_du_q: 4266 case LoongArch::BI__builtin_lsx_vssrani_d_q: 4267 case LoongArch::BI__builtin_lsx_vssrani_du_q: 4268 case LoongArch::BI__builtin_lsx_vsrarni_d_q: 4269 case LoongArch::BI__builtin_lsx_vssrlni_d_q: 4270 case LoongArch::BI__builtin_lsx_vssrlni_du_q: 4271 case LoongArch::BI__builtin_lsx_vssrlrni_d_q: 4272 case LoongArch::BI__builtin_lsx_vssrlrni_du_q: 4273 case LoongArch::BI__builtin_lsx_vsrani_d_q: 4274 case LoongArch::BI__builtin_lsx_vsrlrni_d_q: 4275 case LoongArch::BI__builtin_lsx_vsrlni_d_q: 4276 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 127); 4277 case LoongArch::BI__builtin_lsx_vseqi_b: 4278 case LoongArch::BI__builtin_lsx_vseqi_h: 4279 case LoongArch::BI__builtin_lsx_vseqi_w: 4280 case LoongArch::BI__builtin_lsx_vseqi_d: 4281 case LoongArch::BI__builtin_lsx_vslti_b: 4282 case LoongArch::BI__builtin_lsx_vslti_h: 4283 case LoongArch::BI__builtin_lsx_vslti_w: 4284 case LoongArch::BI__builtin_lsx_vslti_d: 4285 case LoongArch::BI__builtin_lsx_vslei_b: 4286 case LoongArch::BI__builtin_lsx_vslei_h: 4287 case LoongArch::BI__builtin_lsx_vslei_w: 4288 case LoongArch::BI__builtin_lsx_vslei_d: 4289 case LoongArch::BI__builtin_lsx_vmaxi_b: 4290 case LoongArch::BI__builtin_lsx_vmaxi_h: 4291 case LoongArch::BI__builtin_lsx_vmaxi_w: 4292 case LoongArch::BI__builtin_lsx_vmaxi_d: 4293 case LoongArch::BI__builtin_lsx_vmini_b: 4294 case LoongArch::BI__builtin_lsx_vmini_h: 4295 case LoongArch::BI__builtin_lsx_vmini_w: 4296 case LoongArch::BI__builtin_lsx_vmini_d: 4297 return SemaBuiltinConstantArgRange(TheCall, 1, -16, 15); 4298 case LoongArch::BI__builtin_lsx_vandi_b: 4299 case LoongArch::BI__builtin_lsx_vnori_b: 4300 case LoongArch::BI__builtin_lsx_vori_b: 4301 case LoongArch::BI__builtin_lsx_vshuf4i_b: 4302 case LoongArch::BI__builtin_lsx_vshuf4i_h: 4303 case LoongArch::BI__builtin_lsx_vshuf4i_w: 4304 case LoongArch::BI__builtin_lsx_vxori_b: 4305 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 255); 4306 case LoongArch::BI__builtin_lsx_vbitseli_b: 4307 case LoongArch::BI__builtin_lsx_vshuf4i_d: 4308 case LoongArch::BI__builtin_lsx_vextrins_b: 4309 case LoongArch::BI__builtin_lsx_vextrins_h: 4310 case LoongArch::BI__builtin_lsx_vextrins_w: 4311 case LoongArch::BI__builtin_lsx_vextrins_d: 4312 case LoongArch::BI__builtin_lsx_vpermi_w: 4313 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 255); 4314 case LoongArch::BI__builtin_lsx_vpickve2gr_b: 4315 case LoongArch::BI__builtin_lsx_vpickve2gr_bu: 4316 case LoongArch::BI__builtin_lsx_vreplvei_b: 4317 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 4318 case LoongArch::BI__builtin_lsx_vinsgr2vr_b: 4319 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 4320 case LoongArch::BI__builtin_lsx_vpickve2gr_h: 4321 case LoongArch::BI__builtin_lsx_vpickve2gr_hu: 4322 case LoongArch::BI__builtin_lsx_vreplvei_h: 4323 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 7); 4324 case LoongArch::BI__builtin_lsx_vinsgr2vr_h: 4325 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 4326 case LoongArch::BI__builtin_lsx_vpickve2gr_w: 4327 case LoongArch::BI__builtin_lsx_vpickve2gr_wu: 4328 case LoongArch::BI__builtin_lsx_vreplvei_w: 4329 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3); 4330 case LoongArch::BI__builtin_lsx_vinsgr2vr_w: 4331 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 4332 case LoongArch::BI__builtin_lsx_vpickve2gr_d: 4333 case LoongArch::BI__builtin_lsx_vpickve2gr_du: 4334 case LoongArch::BI__builtin_lsx_vreplvei_d: 4335 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 4336 case LoongArch::BI__builtin_lsx_vinsgr2vr_d: 4337 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 1); 4338 case LoongArch::BI__builtin_lsx_vstelm_b: 4339 return SemaBuiltinConstantArgRange(TheCall, 2, -128, 127) || 4340 SemaBuiltinConstantArgRange(TheCall, 3, 0, 15); 4341 case LoongArch::BI__builtin_lsx_vstelm_h: 4342 return SemaBuiltinConstantArgRange(TheCall, 2, -256, 254) || 4343 SemaBuiltinConstantArgRange(TheCall, 3, 0, 7); 4344 case LoongArch::BI__builtin_lsx_vstelm_w: 4345 return SemaBuiltinConstantArgRange(TheCall, 2, -512, 508) || 4346 SemaBuiltinConstantArgRange(TheCall, 3, 0, 3); 4347 case LoongArch::BI__builtin_lsx_vstelm_d: 4348 return SemaBuiltinConstantArgRange(TheCall, 2, -1024, 1016) || 4349 SemaBuiltinConstantArgRange(TheCall, 3, 0, 1); 4350 case LoongArch::BI__builtin_lsx_vldrepl_b: 4351 case LoongArch::BI__builtin_lsx_vld: 4352 return SemaBuiltinConstantArgRange(TheCall, 1, -2048, 2047); 4353 case LoongArch::BI__builtin_lsx_vldrepl_h: 4354 return SemaBuiltinConstantArgRange(TheCall, 1, -2048, 2046); 4355 case LoongArch::BI__builtin_lsx_vldrepl_w: 4356 return SemaBuiltinConstantArgRange(TheCall, 1, -2048, 2044); 4357 case LoongArch::BI__builtin_lsx_vldrepl_d: 4358 return SemaBuiltinConstantArgRange(TheCall, 1, -2048, 2040); 4359 case LoongArch::BI__builtin_lsx_vst: 4360 return SemaBuiltinConstantArgRange(TheCall, 2, -2048, 2047); 4361 case LoongArch::BI__builtin_lsx_vldi: 4362 return SemaBuiltinConstantArgRange(TheCall, 0, -4096, 4095); 4363 case LoongArch::BI__builtin_lsx_vrepli_b: 4364 case LoongArch::BI__builtin_lsx_vrepli_h: 4365 case LoongArch::BI__builtin_lsx_vrepli_w: 4366 case LoongArch::BI__builtin_lsx_vrepli_d: 4367 return SemaBuiltinConstantArgRange(TheCall, 0, -512, 511); 4368 4369 // LASX intrinsics. 4370 case LoongArch::BI__builtin_lasx_xvbitclri_b: 4371 case LoongArch::BI__builtin_lasx_xvbitrevi_b: 4372 case LoongArch::BI__builtin_lasx_xvbitseti_b: 4373 case LoongArch::BI__builtin_lasx_xvsat_b: 4374 case LoongArch::BI__builtin_lasx_xvsat_bu: 4375 case LoongArch::BI__builtin_lasx_xvslli_b: 4376 case LoongArch::BI__builtin_lasx_xvsrai_b: 4377 case LoongArch::BI__builtin_lasx_xvsrari_b: 4378 case LoongArch::BI__builtin_lasx_xvsrli_b: 4379 case LoongArch::BI__builtin_lasx_xvsllwil_h_b: 4380 case LoongArch::BI__builtin_lasx_xvsllwil_hu_bu: 4381 case LoongArch::BI__builtin_lasx_xvrotri_b: 4382 case LoongArch::BI__builtin_lasx_xvsrlri_b: 4383 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 7); 4384 case LoongArch::BI__builtin_lasx_xvbitclri_h: 4385 case LoongArch::BI__builtin_lasx_xvbitrevi_h: 4386 case LoongArch::BI__builtin_lasx_xvbitseti_h: 4387 case LoongArch::BI__builtin_lasx_xvsat_h: 4388 case LoongArch::BI__builtin_lasx_xvsat_hu: 4389 case LoongArch::BI__builtin_lasx_xvslli_h: 4390 case LoongArch::BI__builtin_lasx_xvsrai_h: 4391 case LoongArch::BI__builtin_lasx_xvsrari_h: 4392 case LoongArch::BI__builtin_lasx_xvsrli_h: 4393 case LoongArch::BI__builtin_lasx_xvsllwil_w_h: 4394 case LoongArch::BI__builtin_lasx_xvsllwil_wu_hu: 4395 case LoongArch::BI__builtin_lasx_xvrotri_h: 4396 case LoongArch::BI__builtin_lasx_xvsrlri_h: 4397 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 4398 case LoongArch::BI__builtin_lasx_xvssrarni_b_h: 4399 case LoongArch::BI__builtin_lasx_xvssrarni_bu_h: 4400 case LoongArch::BI__builtin_lasx_xvssrani_b_h: 4401 case LoongArch::BI__builtin_lasx_xvssrani_bu_h: 4402 case LoongArch::BI__builtin_lasx_xvsrarni_b_h: 4403 case LoongArch::BI__builtin_lasx_xvsrlni_b_h: 4404 case LoongArch::BI__builtin_lasx_xvsrlrni_b_h: 4405 case LoongArch::BI__builtin_lasx_xvssrlni_b_h: 4406 case LoongArch::BI__builtin_lasx_xvssrlni_bu_h: 4407 case LoongArch::BI__builtin_lasx_xvssrlrni_b_h: 4408 case LoongArch::BI__builtin_lasx_xvssrlrni_bu_h: 4409 case LoongArch::BI__builtin_lasx_xvsrani_b_h: 4410 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 4411 case LoongArch::BI__builtin_lasx_xvslei_bu: 4412 case LoongArch::BI__builtin_lasx_xvslei_hu: 4413 case LoongArch::BI__builtin_lasx_xvslei_wu: 4414 case LoongArch::BI__builtin_lasx_xvslei_du: 4415 case LoongArch::BI__builtin_lasx_xvslti_bu: 4416 case LoongArch::BI__builtin_lasx_xvslti_hu: 4417 case LoongArch::BI__builtin_lasx_xvslti_wu: 4418 case LoongArch::BI__builtin_lasx_xvslti_du: 4419 case LoongArch::BI__builtin_lasx_xvmaxi_bu: 4420 case LoongArch::BI__builtin_lasx_xvmaxi_hu: 4421 case LoongArch::BI__builtin_lasx_xvmaxi_wu: 4422 case LoongArch::BI__builtin_lasx_xvmaxi_du: 4423 case LoongArch::BI__builtin_lasx_xvmini_bu: 4424 case LoongArch::BI__builtin_lasx_xvmini_hu: 4425 case LoongArch::BI__builtin_lasx_xvmini_wu: 4426 case LoongArch::BI__builtin_lasx_xvmini_du: 4427 case LoongArch::BI__builtin_lasx_xvaddi_bu: 4428 case LoongArch::BI__builtin_lasx_xvaddi_hu: 4429 case LoongArch::BI__builtin_lasx_xvaddi_wu: 4430 case LoongArch::BI__builtin_lasx_xvaddi_du: 4431 case LoongArch::BI__builtin_lasx_xvbitclri_w: 4432 case LoongArch::BI__builtin_lasx_xvbitrevi_w: 4433 case LoongArch::BI__builtin_lasx_xvbitseti_w: 4434 case LoongArch::BI__builtin_lasx_xvsat_w: 4435 case LoongArch::BI__builtin_lasx_xvsat_wu: 4436 case LoongArch::BI__builtin_lasx_xvslli_w: 4437 case LoongArch::BI__builtin_lasx_xvsrai_w: 4438 case LoongArch::BI__builtin_lasx_xvsrari_w: 4439 case LoongArch::BI__builtin_lasx_xvsrli_w: 4440 case LoongArch::BI__builtin_lasx_xvsllwil_d_w: 4441 case LoongArch::BI__builtin_lasx_xvsllwil_du_wu: 4442 case LoongArch::BI__builtin_lasx_xvsrlri_w: 4443 case LoongArch::BI__builtin_lasx_xvrotri_w: 4444 case LoongArch::BI__builtin_lasx_xvsubi_bu: 4445 case LoongArch::BI__builtin_lasx_xvsubi_hu: 4446 case LoongArch::BI__builtin_lasx_xvsubi_wu: 4447 case LoongArch::BI__builtin_lasx_xvsubi_du: 4448 case LoongArch::BI__builtin_lasx_xvbsrl_v: 4449 case LoongArch::BI__builtin_lasx_xvbsll_v: 4450 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 4451 case LoongArch::BI__builtin_lasx_xvssrarni_h_w: 4452 case LoongArch::BI__builtin_lasx_xvssrarni_hu_w: 4453 case LoongArch::BI__builtin_lasx_xvssrani_h_w: 4454 case LoongArch::BI__builtin_lasx_xvssrani_hu_w: 4455 case LoongArch::BI__builtin_lasx_xvsrarni_h_w: 4456 case LoongArch::BI__builtin_lasx_xvsrani_h_w: 4457 case LoongArch::BI__builtin_lasx_xvfrstpi_b: 4458 case LoongArch::BI__builtin_lasx_xvfrstpi_h: 4459 case LoongArch::BI__builtin_lasx_xvsrlni_h_w: 4460 case LoongArch::BI__builtin_lasx_xvsrlrni_h_w: 4461 case LoongArch::BI__builtin_lasx_xvssrlni_h_w: 4462 case LoongArch::BI__builtin_lasx_xvssrlni_hu_w: 4463 case LoongArch::BI__builtin_lasx_xvssrlrni_h_w: 4464 case LoongArch::BI__builtin_lasx_xvssrlrni_hu_w: 4465 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 31); 4466 case LoongArch::BI__builtin_lasx_xvbitclri_d: 4467 case LoongArch::BI__builtin_lasx_xvbitrevi_d: 4468 case LoongArch::BI__builtin_lasx_xvbitseti_d: 4469 case LoongArch::BI__builtin_lasx_xvsat_d: 4470 case LoongArch::BI__builtin_lasx_xvsat_du: 4471 case LoongArch::BI__builtin_lasx_xvslli_d: 4472 case LoongArch::BI__builtin_lasx_xvsrai_d: 4473 case LoongArch::BI__builtin_lasx_xvsrli_d: 4474 case LoongArch::BI__builtin_lasx_xvsrari_d: 4475 case LoongArch::BI__builtin_lasx_xvrotri_d: 4476 case LoongArch::BI__builtin_lasx_xvsrlri_d: 4477 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 63); 4478 case LoongArch::BI__builtin_lasx_xvssrarni_w_d: 4479 case LoongArch::BI__builtin_lasx_xvssrarni_wu_d: 4480 case LoongArch::BI__builtin_lasx_xvssrani_w_d: 4481 case LoongArch::BI__builtin_lasx_xvssrani_wu_d: 4482 case LoongArch::BI__builtin_lasx_xvsrarni_w_d: 4483 case LoongArch::BI__builtin_lasx_xvsrlni_w_d: 4484 case LoongArch::BI__builtin_lasx_xvsrlrni_w_d: 4485 case LoongArch::BI__builtin_lasx_xvssrlni_w_d: 4486 case LoongArch::BI__builtin_lasx_xvssrlni_wu_d: 4487 case LoongArch::BI__builtin_lasx_xvssrlrni_w_d: 4488 case LoongArch::BI__builtin_lasx_xvssrlrni_wu_d: 4489 case LoongArch::BI__builtin_lasx_xvsrani_w_d: 4490 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 63); 4491 case LoongArch::BI__builtin_lasx_xvssrarni_d_q: 4492 case LoongArch::BI__builtin_lasx_xvssrarni_du_q: 4493 case LoongArch::BI__builtin_lasx_xvssrani_d_q: 4494 case LoongArch::BI__builtin_lasx_xvssrani_du_q: 4495 case LoongArch::BI__builtin_lasx_xvsrarni_d_q: 4496 case LoongArch::BI__builtin_lasx_xvssrlni_d_q: 4497 case LoongArch::BI__builtin_lasx_xvssrlni_du_q: 4498 case LoongArch::BI__builtin_lasx_xvssrlrni_d_q: 4499 case LoongArch::BI__builtin_lasx_xvssrlrni_du_q: 4500 case LoongArch::BI__builtin_lasx_xvsrani_d_q: 4501 case LoongArch::BI__builtin_lasx_xvsrlni_d_q: 4502 case LoongArch::BI__builtin_lasx_xvsrlrni_d_q: 4503 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 127); 4504 case LoongArch::BI__builtin_lasx_xvseqi_b: 4505 case LoongArch::BI__builtin_lasx_xvseqi_h: 4506 case LoongArch::BI__builtin_lasx_xvseqi_w: 4507 case LoongArch::BI__builtin_lasx_xvseqi_d: 4508 case LoongArch::BI__builtin_lasx_xvslti_b: 4509 case LoongArch::BI__builtin_lasx_xvslti_h: 4510 case LoongArch::BI__builtin_lasx_xvslti_w: 4511 case LoongArch::BI__builtin_lasx_xvslti_d: 4512 case LoongArch::BI__builtin_lasx_xvslei_b: 4513 case LoongArch::BI__builtin_lasx_xvslei_h: 4514 case LoongArch::BI__builtin_lasx_xvslei_w: 4515 case LoongArch::BI__builtin_lasx_xvslei_d: 4516 case LoongArch::BI__builtin_lasx_xvmaxi_b: 4517 case LoongArch::BI__builtin_lasx_xvmaxi_h: 4518 case LoongArch::BI__builtin_lasx_xvmaxi_w: 4519 case LoongArch::BI__builtin_lasx_xvmaxi_d: 4520 case LoongArch::BI__builtin_lasx_xvmini_b: 4521 case LoongArch::BI__builtin_lasx_xvmini_h: 4522 case LoongArch::BI__builtin_lasx_xvmini_w: 4523 case LoongArch::BI__builtin_lasx_xvmini_d: 4524 return SemaBuiltinConstantArgRange(TheCall, 1, -16, 15); 4525 case LoongArch::BI__builtin_lasx_xvandi_b: 4526 case LoongArch::BI__builtin_lasx_xvnori_b: 4527 case LoongArch::BI__builtin_lasx_xvori_b: 4528 case LoongArch::BI__builtin_lasx_xvshuf4i_b: 4529 case LoongArch::BI__builtin_lasx_xvshuf4i_h: 4530 case LoongArch::BI__builtin_lasx_xvshuf4i_w: 4531 case LoongArch::BI__builtin_lasx_xvxori_b: 4532 case LoongArch::BI__builtin_lasx_xvpermi_d: 4533 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 255); 4534 case LoongArch::BI__builtin_lasx_xvbitseli_b: 4535 case LoongArch::BI__builtin_lasx_xvshuf4i_d: 4536 case LoongArch::BI__builtin_lasx_xvextrins_b: 4537 case LoongArch::BI__builtin_lasx_xvextrins_h: 4538 case LoongArch::BI__builtin_lasx_xvextrins_w: 4539 case LoongArch::BI__builtin_lasx_xvextrins_d: 4540 case LoongArch::BI__builtin_lasx_xvpermi_q: 4541 case LoongArch::BI__builtin_lasx_xvpermi_w: 4542 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 255); 4543 case LoongArch::BI__builtin_lasx_xvrepl128vei_b: 4544 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 4545 case LoongArch::BI__builtin_lasx_xvrepl128vei_h: 4546 case LoongArch::BI__builtin_lasx_xvpickve2gr_w: 4547 case LoongArch::BI__builtin_lasx_xvpickve2gr_wu: 4548 case LoongArch::BI__builtin_lasx_xvpickve_w_f: 4549 case LoongArch::BI__builtin_lasx_xvpickve_w: 4550 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 7); 4551 case LoongArch::BI__builtin_lasx_xvinsgr2vr_w: 4552 case LoongArch::BI__builtin_lasx_xvinsve0_w: 4553 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 4554 case LoongArch::BI__builtin_lasx_xvrepl128vei_w: 4555 case LoongArch::BI__builtin_lasx_xvpickve2gr_d: 4556 case LoongArch::BI__builtin_lasx_xvpickve2gr_du: 4557 case LoongArch::BI__builtin_lasx_xvpickve_d_f: 4558 case LoongArch::BI__builtin_lasx_xvpickve_d: 4559 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3); 4560 case LoongArch::BI__builtin_lasx_xvinsve0_d: 4561 case LoongArch::BI__builtin_lasx_xvinsgr2vr_d: 4562 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 4563 case LoongArch::BI__builtin_lasx_xvstelm_b: 4564 return SemaBuiltinConstantArgRange(TheCall, 2, -128, 127) || 4565 SemaBuiltinConstantArgRange(TheCall, 3, 0, 31); 4566 case LoongArch::BI__builtin_lasx_xvstelm_h: 4567 return SemaBuiltinConstantArgRange(TheCall, 2, -256, 254) || 4568 SemaBuiltinConstantArgRange(TheCall, 3, 0, 15); 4569 case LoongArch::BI__builtin_lasx_xvstelm_w: 4570 return SemaBuiltinConstantArgRange(TheCall, 2, -512, 508) || 4571 SemaBuiltinConstantArgRange(TheCall, 3, 0, 7); 4572 case LoongArch::BI__builtin_lasx_xvstelm_d: 4573 return SemaBuiltinConstantArgRange(TheCall, 2, -1024, 1016) || 4574 SemaBuiltinConstantArgRange(TheCall, 3, 0, 3); 4575 case LoongArch::BI__builtin_lasx_xvrepl128vei_d: 4576 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 4577 case LoongArch::BI__builtin_lasx_xvldrepl_b: 4578 case LoongArch::BI__builtin_lasx_xvld: 4579 return SemaBuiltinConstantArgRange(TheCall, 1, -2048, 2047); 4580 case LoongArch::BI__builtin_lasx_xvldrepl_h: 4581 return SemaBuiltinConstantArgRange(TheCall, 1, -2048, 2046); 4582 case LoongArch::BI__builtin_lasx_xvldrepl_w: 4583 return SemaBuiltinConstantArgRange(TheCall, 1, -2048, 2044); 4584 case LoongArch::BI__builtin_lasx_xvldrepl_d: 4585 return SemaBuiltinConstantArgRange(TheCall, 1, -2048, 2040); 4586 case LoongArch::BI__builtin_lasx_xvst: 4587 return SemaBuiltinConstantArgRange(TheCall, 2, -2048, 2047); 4588 case LoongArch::BI__builtin_lasx_xvldi: 4589 return SemaBuiltinConstantArgRange(TheCall, 0, -4096, 4095); 4590 case LoongArch::BI__builtin_lasx_xvrepli_b: 4591 case LoongArch::BI__builtin_lasx_xvrepli_h: 4592 case LoongArch::BI__builtin_lasx_xvrepli_w: 4593 case LoongArch::BI__builtin_lasx_xvrepli_d: 4594 return SemaBuiltinConstantArgRange(TheCall, 0, -512, 511); 4595 } 4596 return false; 4597 } 4598 4599 bool Sema::CheckMipsBuiltinFunctionCall(const TargetInfo &TI, 4600 unsigned BuiltinID, CallExpr *TheCall) { 4601 return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) || 4602 CheckMipsBuiltinArgument(BuiltinID, TheCall); 4603 } 4604 4605 bool Sema::CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, 4606 CallExpr *TheCall) { 4607 4608 if (Mips::BI__builtin_mips_addu_qb <= BuiltinID && 4609 BuiltinID <= Mips::BI__builtin_mips_lwx) { 4610 if (!TI.hasFeature("dsp")) 4611 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_dsp); 4612 } 4613 4614 if (Mips::BI__builtin_mips_absq_s_qb <= BuiltinID && 4615 BuiltinID <= Mips::BI__builtin_mips_subuh_r_qb) { 4616 if (!TI.hasFeature("dspr2")) 4617 return Diag(TheCall->getBeginLoc(), 4618 diag::err_mips_builtin_requires_dspr2); 4619 } 4620 4621 if (Mips::BI__builtin_msa_add_a_b <= BuiltinID && 4622 BuiltinID <= Mips::BI__builtin_msa_xori_b) { 4623 if (!TI.hasFeature("msa")) 4624 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_msa); 4625 } 4626 4627 return false; 4628 } 4629 4630 // CheckMipsBuiltinArgument - Checks the constant value passed to the 4631 // intrinsic is correct. The switch statement is ordered by DSP, MSA. The 4632 // ordering for DSP is unspecified. MSA is ordered by the data format used 4633 // by the underlying instruction i.e., df/m, df/n and then by size. 4634 // 4635 // FIXME: The size tests here should instead be tablegen'd along with the 4636 // definitions from include/clang/Basic/BuiltinsMips.def. 4637 // FIXME: GCC is strict on signedness for some of these intrinsics, we should 4638 // be too. 4639 bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 4640 unsigned i = 0, l = 0, u = 0, m = 0; 4641 switch (BuiltinID) { 4642 default: return false; 4643 case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break; 4644 case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break; 4645 case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break; 4646 case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break; 4647 case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break; 4648 case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break; 4649 case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break; 4650 // MSA intrinsics. Instructions (which the intrinsics maps to) which use the 4651 // df/m field. 4652 // These intrinsics take an unsigned 3 bit immediate. 4653 case Mips::BI__builtin_msa_bclri_b: 4654 case Mips::BI__builtin_msa_bnegi_b: 4655 case Mips::BI__builtin_msa_bseti_b: 4656 case Mips::BI__builtin_msa_sat_s_b: 4657 case Mips::BI__builtin_msa_sat_u_b: 4658 case Mips::BI__builtin_msa_slli_b: 4659 case Mips::BI__builtin_msa_srai_b: 4660 case Mips::BI__builtin_msa_srari_b: 4661 case Mips::BI__builtin_msa_srli_b: 4662 case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break; 4663 case Mips::BI__builtin_msa_binsli_b: 4664 case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break; 4665 // These intrinsics take an unsigned 4 bit immediate. 4666 case Mips::BI__builtin_msa_bclri_h: 4667 case Mips::BI__builtin_msa_bnegi_h: 4668 case Mips::BI__builtin_msa_bseti_h: 4669 case Mips::BI__builtin_msa_sat_s_h: 4670 case Mips::BI__builtin_msa_sat_u_h: 4671 case Mips::BI__builtin_msa_slli_h: 4672 case Mips::BI__builtin_msa_srai_h: 4673 case Mips::BI__builtin_msa_srari_h: 4674 case Mips::BI__builtin_msa_srli_h: 4675 case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break; 4676 case Mips::BI__builtin_msa_binsli_h: 4677 case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break; 4678 // These intrinsics take an unsigned 5 bit immediate. 4679 // The first block of intrinsics actually have an unsigned 5 bit field, 4680 // not a df/n field. 4681 case Mips::BI__builtin_msa_cfcmsa: 4682 case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break; 4683 case Mips::BI__builtin_msa_clei_u_b: 4684 case Mips::BI__builtin_msa_clei_u_h: 4685 case Mips::BI__builtin_msa_clei_u_w: 4686 case Mips::BI__builtin_msa_clei_u_d: 4687 case Mips::BI__builtin_msa_clti_u_b: 4688 case Mips::BI__builtin_msa_clti_u_h: 4689 case Mips::BI__builtin_msa_clti_u_w: 4690 case Mips::BI__builtin_msa_clti_u_d: 4691 case Mips::BI__builtin_msa_maxi_u_b: 4692 case Mips::BI__builtin_msa_maxi_u_h: 4693 case Mips::BI__builtin_msa_maxi_u_w: 4694 case Mips::BI__builtin_msa_maxi_u_d: 4695 case Mips::BI__builtin_msa_mini_u_b: 4696 case Mips::BI__builtin_msa_mini_u_h: 4697 case Mips::BI__builtin_msa_mini_u_w: 4698 case Mips::BI__builtin_msa_mini_u_d: 4699 case Mips::BI__builtin_msa_addvi_b: 4700 case Mips::BI__builtin_msa_addvi_h: 4701 case Mips::BI__builtin_msa_addvi_w: 4702 case Mips::BI__builtin_msa_addvi_d: 4703 case Mips::BI__builtin_msa_bclri_w: 4704 case Mips::BI__builtin_msa_bnegi_w: 4705 case Mips::BI__builtin_msa_bseti_w: 4706 case Mips::BI__builtin_msa_sat_s_w: 4707 case Mips::BI__builtin_msa_sat_u_w: 4708 case Mips::BI__builtin_msa_slli_w: 4709 case Mips::BI__builtin_msa_srai_w: 4710 case Mips::BI__builtin_msa_srari_w: 4711 case Mips::BI__builtin_msa_srli_w: 4712 case Mips::BI__builtin_msa_srlri_w: 4713 case Mips::BI__builtin_msa_subvi_b: 4714 case Mips::BI__builtin_msa_subvi_h: 4715 case Mips::BI__builtin_msa_subvi_w: 4716 case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break; 4717 case Mips::BI__builtin_msa_binsli_w: 4718 case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break; 4719 // These intrinsics take an unsigned 6 bit immediate. 4720 case Mips::BI__builtin_msa_bclri_d: 4721 case Mips::BI__builtin_msa_bnegi_d: 4722 case Mips::BI__builtin_msa_bseti_d: 4723 case Mips::BI__builtin_msa_sat_s_d: 4724 case Mips::BI__builtin_msa_sat_u_d: 4725 case Mips::BI__builtin_msa_slli_d: 4726 case Mips::BI__builtin_msa_srai_d: 4727 case Mips::BI__builtin_msa_srari_d: 4728 case Mips::BI__builtin_msa_srli_d: 4729 case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break; 4730 case Mips::BI__builtin_msa_binsli_d: 4731 case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break; 4732 // These intrinsics take a signed 5 bit immediate. 4733 case Mips::BI__builtin_msa_ceqi_b: 4734 case Mips::BI__builtin_msa_ceqi_h: 4735 case Mips::BI__builtin_msa_ceqi_w: 4736 case Mips::BI__builtin_msa_ceqi_d: 4737 case Mips::BI__builtin_msa_clti_s_b: 4738 case Mips::BI__builtin_msa_clti_s_h: 4739 case Mips::BI__builtin_msa_clti_s_w: 4740 case Mips::BI__builtin_msa_clti_s_d: 4741 case Mips::BI__builtin_msa_clei_s_b: 4742 case Mips::BI__builtin_msa_clei_s_h: 4743 case Mips::BI__builtin_msa_clei_s_w: 4744 case Mips::BI__builtin_msa_clei_s_d: 4745 case Mips::BI__builtin_msa_maxi_s_b: 4746 case Mips::BI__builtin_msa_maxi_s_h: 4747 case Mips::BI__builtin_msa_maxi_s_w: 4748 case Mips::BI__builtin_msa_maxi_s_d: 4749 case Mips::BI__builtin_msa_mini_s_b: 4750 case Mips::BI__builtin_msa_mini_s_h: 4751 case Mips::BI__builtin_msa_mini_s_w: 4752 case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break; 4753 // These intrinsics take an unsigned 8 bit immediate. 4754 case Mips::BI__builtin_msa_andi_b: 4755 case Mips::BI__builtin_msa_nori_b: 4756 case Mips::BI__builtin_msa_ori_b: 4757 case Mips::BI__builtin_msa_shf_b: 4758 case Mips::BI__builtin_msa_shf_h: 4759 case Mips::BI__builtin_msa_shf_w: 4760 case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break; 4761 case Mips::BI__builtin_msa_bseli_b: 4762 case Mips::BI__builtin_msa_bmnzi_b: 4763 case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break; 4764 // df/n format 4765 // These intrinsics take an unsigned 4 bit immediate. 4766 case Mips::BI__builtin_msa_copy_s_b: 4767 case Mips::BI__builtin_msa_copy_u_b: 4768 case Mips::BI__builtin_msa_insve_b: 4769 case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break; 4770 case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break; 4771 // These intrinsics take an unsigned 3 bit immediate. 4772 case Mips::BI__builtin_msa_copy_s_h: 4773 case Mips::BI__builtin_msa_copy_u_h: 4774 case Mips::BI__builtin_msa_insve_h: 4775 case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break; 4776 case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break; 4777 // These intrinsics take an unsigned 2 bit immediate. 4778 case Mips::BI__builtin_msa_copy_s_w: 4779 case Mips::BI__builtin_msa_copy_u_w: 4780 case Mips::BI__builtin_msa_insve_w: 4781 case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break; 4782 case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break; 4783 // These intrinsics take an unsigned 1 bit immediate. 4784 case Mips::BI__builtin_msa_copy_s_d: 4785 case Mips::BI__builtin_msa_copy_u_d: 4786 case Mips::BI__builtin_msa_insve_d: 4787 case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break; 4788 case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break; 4789 // Memory offsets and immediate loads. 4790 // These intrinsics take a signed 10 bit immediate. 4791 case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break; 4792 case Mips::BI__builtin_msa_ldi_h: 4793 case Mips::BI__builtin_msa_ldi_w: 4794 case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break; 4795 case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break; 4796 case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break; 4797 case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break; 4798 case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break; 4799 case Mips::BI__builtin_msa_ldr_d: i = 1; l = -4096; u = 4088; m = 8; break; 4800 case Mips::BI__builtin_msa_ldr_w: i = 1; l = -2048; u = 2044; m = 4; break; 4801 case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break; 4802 case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break; 4803 case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break; 4804 case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break; 4805 case Mips::BI__builtin_msa_str_d: i = 2; l = -4096; u = 4088; m = 8; break; 4806 case Mips::BI__builtin_msa_str_w: i = 2; l = -2048; u = 2044; m = 4; break; 4807 } 4808 4809 if (!m) 4810 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 4811 4812 return SemaBuiltinConstantArgRange(TheCall, i, l, u) || 4813 SemaBuiltinConstantArgMultiple(TheCall, i, m); 4814 } 4815 4816 /// DecodePPCMMATypeFromStr - This decodes one PPC MMA type descriptor from Str, 4817 /// advancing the pointer over the consumed characters. The decoded type is 4818 /// returned. If the decoded type represents a constant integer with a 4819 /// constraint on its value then Mask is set to that value. The type descriptors 4820 /// used in Str are specific to PPC MMA builtins and are documented in the file 4821 /// defining the PPC builtins. 4822 static QualType DecodePPCMMATypeFromStr(ASTContext &Context, const char *&Str, 4823 unsigned &Mask) { 4824 bool RequireICE = false; 4825 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 4826 switch (*Str++) { 4827 case 'V': 4828 return Context.getVectorType(Context.UnsignedCharTy, 16, 4829 VectorKind::AltiVecVector); 4830 case 'i': { 4831 char *End; 4832 unsigned size = strtoul(Str, &End, 10); 4833 assert(End != Str && "Missing constant parameter constraint"); 4834 Str = End; 4835 Mask = size; 4836 return Context.IntTy; 4837 } 4838 case 'W': { 4839 char *End; 4840 unsigned size = strtoul(Str, &End, 10); 4841 assert(End != Str && "Missing PowerPC MMA type size"); 4842 Str = End; 4843 QualType Type; 4844 switch (size) { 4845 #define PPC_VECTOR_TYPE(typeName, Id, size) \ 4846 case size: Type = Context.Id##Ty; break; 4847 #include "clang/Basic/PPCTypes.def" 4848 default: llvm_unreachable("Invalid PowerPC MMA vector type"); 4849 } 4850 bool CheckVectorArgs = false; 4851 while (!CheckVectorArgs) { 4852 switch (*Str++) { 4853 case '*': 4854 Type = Context.getPointerType(Type); 4855 break; 4856 case 'C': 4857 Type = Type.withConst(); 4858 break; 4859 default: 4860 CheckVectorArgs = true; 4861 --Str; 4862 break; 4863 } 4864 } 4865 return Type; 4866 } 4867 default: 4868 return Context.DecodeTypeStr(--Str, Context, Error, RequireICE, true); 4869 } 4870 } 4871 4872 static bool isPPC_64Builtin(unsigned BuiltinID) { 4873 // These builtins only work on PPC 64bit targets. 4874 switch (BuiltinID) { 4875 case PPC::BI__builtin_divde: 4876 case PPC::BI__builtin_divdeu: 4877 case PPC::BI__builtin_bpermd: 4878 case PPC::BI__builtin_pdepd: 4879 case PPC::BI__builtin_pextd: 4880 case PPC::BI__builtin_ppc_ldarx: 4881 case PPC::BI__builtin_ppc_stdcx: 4882 case PPC::BI__builtin_ppc_tdw: 4883 case PPC::BI__builtin_ppc_trapd: 4884 case PPC::BI__builtin_ppc_cmpeqb: 4885 case PPC::BI__builtin_ppc_setb: 4886 case PPC::BI__builtin_ppc_mulhd: 4887 case PPC::BI__builtin_ppc_mulhdu: 4888 case PPC::BI__builtin_ppc_maddhd: 4889 case PPC::BI__builtin_ppc_maddhdu: 4890 case PPC::BI__builtin_ppc_maddld: 4891 case PPC::BI__builtin_ppc_load8r: 4892 case PPC::BI__builtin_ppc_store8r: 4893 case PPC::BI__builtin_ppc_insert_exp: 4894 case PPC::BI__builtin_ppc_extract_sig: 4895 case PPC::BI__builtin_ppc_addex: 4896 case PPC::BI__builtin_darn: 4897 case PPC::BI__builtin_darn_raw: 4898 case PPC::BI__builtin_ppc_compare_and_swaplp: 4899 case PPC::BI__builtin_ppc_fetch_and_addlp: 4900 case PPC::BI__builtin_ppc_fetch_and_andlp: 4901 case PPC::BI__builtin_ppc_fetch_and_orlp: 4902 case PPC::BI__builtin_ppc_fetch_and_swaplp: 4903 return true; 4904 } 4905 return false; 4906 } 4907 4908 /// Returns true if the argument consists of one contiguous run of 1s with any 4909 /// number of 0s on either side. The 1s are allowed to wrap from LSB to MSB, so 4910 /// 0x000FFF0, 0x0000FFFF, 0xFF0000FF, 0x0 are all runs. 0x0F0F0000 is not, 4911 /// since all 1s are not contiguous. 4912 bool Sema::SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum) { 4913 llvm::APSInt Result; 4914 // We can't check the value of a dependent argument. 4915 Expr *Arg = TheCall->getArg(ArgNum); 4916 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4917 return false; 4918 4919 // Check constant-ness first. 4920 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4921 return true; 4922 4923 // Check contiguous run of 1s, 0xFF0000FF is also a run of 1s. 4924 if (Result.isShiftedMask() || (~Result).isShiftedMask()) 4925 return false; 4926 4927 return Diag(TheCall->getBeginLoc(), 4928 diag::err_argument_not_contiguous_bit_field) 4929 << ArgNum << Arg->getSourceRange(); 4930 } 4931 4932 bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 4933 CallExpr *TheCall) { 4934 unsigned i = 0, l = 0, u = 0; 4935 bool IsTarget64Bit = TI.getTypeWidth(TI.getIntPtrType()) == 64; 4936 llvm::APSInt Result; 4937 4938 if (isPPC_64Builtin(BuiltinID) && !IsTarget64Bit) 4939 return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt) 4940 << TheCall->getSourceRange(); 4941 4942 switch (BuiltinID) { 4943 default: return false; 4944 case PPC::BI__builtin_altivec_crypto_vshasigmaw: 4945 case PPC::BI__builtin_altivec_crypto_vshasigmad: 4946 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 4947 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 4948 case PPC::BI__builtin_altivec_dss: 4949 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3); 4950 case PPC::BI__builtin_tbegin: 4951 case PPC::BI__builtin_tend: 4952 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1); 4953 case PPC::BI__builtin_tsr: 4954 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7); 4955 case PPC::BI__builtin_tabortwc: 4956 case PPC::BI__builtin_tabortdc: 4957 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 4958 case PPC::BI__builtin_tabortwci: 4959 case PPC::BI__builtin_tabortdci: 4960 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || 4961 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31); 4962 // According to GCC 'Basic PowerPC Built-in Functions Available on ISA 2.05', 4963 // __builtin_(un)pack_longdouble are available only if long double uses IBM 4964 // extended double representation. 4965 case PPC::BI__builtin_unpack_longdouble: 4966 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 1)) 4967 return true; 4968 [[fallthrough]]; 4969 case PPC::BI__builtin_pack_longdouble: 4970 if (&TI.getLongDoubleFormat() != &llvm::APFloat::PPCDoubleDouble()) 4971 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_requires_abi) 4972 << "ibmlongdouble"; 4973 return false; 4974 case PPC::BI__builtin_altivec_dst: 4975 case PPC::BI__builtin_altivec_dstt: 4976 case PPC::BI__builtin_altivec_dstst: 4977 case PPC::BI__builtin_altivec_dststt: 4978 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 4979 case PPC::BI__builtin_vsx_xxpermdi: 4980 case PPC::BI__builtin_vsx_xxsldwi: 4981 return SemaBuiltinVSX(TheCall); 4982 case PPC::BI__builtin_unpack_vector_int128: 4983 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 4984 case PPC::BI__builtin_altivec_vgnb: 4985 return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7); 4986 case PPC::BI__builtin_vsx_xxeval: 4987 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 255); 4988 case PPC::BI__builtin_altivec_vsldbi: 4989 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 4990 case PPC::BI__builtin_altivec_vsrdbi: 4991 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 4992 case PPC::BI__builtin_vsx_xxpermx: 4993 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 7); 4994 case PPC::BI__builtin_ppc_tw: 4995 case PPC::BI__builtin_ppc_tdw: 4996 return SemaBuiltinConstantArgRange(TheCall, 2, 1, 31); 4997 case PPC::BI__builtin_ppc_cmprb: 4998 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1); 4999 // For __rlwnm, __rlwimi and __rldimi, the last parameter mask must 5000 // be a constant that represents a contiguous bit field. 5001 case PPC::BI__builtin_ppc_rlwnm: 5002 return SemaValueIsRunOfOnes(TheCall, 2); 5003 case PPC::BI__builtin_ppc_rlwimi: 5004 case PPC::BI__builtin_ppc_rldimi: 5005 return SemaBuiltinConstantArg(TheCall, 2, Result) || 5006 SemaValueIsRunOfOnes(TheCall, 3); 5007 case PPC::BI__builtin_ppc_addex: { 5008 if (SemaBuiltinConstantArgRange(TheCall, 2, 0, 3)) 5009 return true; 5010 // Output warning for reserved values 1 to 3. 5011 int ArgValue = 5012 TheCall->getArg(2)->getIntegerConstantExpr(Context)->getSExtValue(); 5013 if (ArgValue != 0) 5014 Diag(TheCall->getBeginLoc(), diag::warn_argument_undefined_behaviour) 5015 << ArgValue; 5016 return false; 5017 } 5018 case PPC::BI__builtin_ppc_mtfsb0: 5019 case PPC::BI__builtin_ppc_mtfsb1: 5020 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 5021 case PPC::BI__builtin_ppc_mtfsf: 5022 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 255); 5023 case PPC::BI__builtin_ppc_mtfsfi: 5024 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) || 5025 SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 5026 case PPC::BI__builtin_ppc_alignx: 5027 return SemaBuiltinConstantArgPower2(TheCall, 0); 5028 case PPC::BI__builtin_ppc_rdlam: 5029 return SemaValueIsRunOfOnes(TheCall, 2); 5030 case PPC::BI__builtin_vsx_ldrmb: 5031 case PPC::BI__builtin_vsx_strmb: 5032 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 5033 case PPC::BI__builtin_altivec_vcntmbb: 5034 case PPC::BI__builtin_altivec_vcntmbh: 5035 case PPC::BI__builtin_altivec_vcntmbw: 5036 case PPC::BI__builtin_altivec_vcntmbd: 5037 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 5038 case PPC::BI__builtin_vsx_xxgenpcvbm: 5039 case PPC::BI__builtin_vsx_xxgenpcvhm: 5040 case PPC::BI__builtin_vsx_xxgenpcvwm: 5041 case PPC::BI__builtin_vsx_xxgenpcvdm: 5042 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3); 5043 case PPC::BI__builtin_ppc_test_data_class: { 5044 // Check if the first argument of the __builtin_ppc_test_data_class call is 5045 // valid. The argument must be 'float' or 'double' or '__float128'. 5046 QualType ArgType = TheCall->getArg(0)->getType(); 5047 if (ArgType != QualType(Context.FloatTy) && 5048 ArgType != QualType(Context.DoubleTy) && 5049 ArgType != QualType(Context.Float128Ty)) 5050 return Diag(TheCall->getBeginLoc(), 5051 diag::err_ppc_invalid_test_data_class_type); 5052 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 127); 5053 } 5054 case PPC::BI__builtin_ppc_maxfe: 5055 case PPC::BI__builtin_ppc_minfe: 5056 case PPC::BI__builtin_ppc_maxfl: 5057 case PPC::BI__builtin_ppc_minfl: 5058 case PPC::BI__builtin_ppc_maxfs: 5059 case PPC::BI__builtin_ppc_minfs: { 5060 if (Context.getTargetInfo().getTriple().isOSAIX() && 5061 (BuiltinID == PPC::BI__builtin_ppc_maxfe || 5062 BuiltinID == PPC::BI__builtin_ppc_minfe)) 5063 return Diag(TheCall->getBeginLoc(), diag::err_target_unsupported_type) 5064 << "builtin" << true << 128 << QualType(Context.LongDoubleTy) 5065 << false << Context.getTargetInfo().getTriple().str(); 5066 // Argument type should be exact. 5067 QualType ArgType = QualType(Context.LongDoubleTy); 5068 if (BuiltinID == PPC::BI__builtin_ppc_maxfl || 5069 BuiltinID == PPC::BI__builtin_ppc_minfl) 5070 ArgType = QualType(Context.DoubleTy); 5071 else if (BuiltinID == PPC::BI__builtin_ppc_maxfs || 5072 BuiltinID == PPC::BI__builtin_ppc_minfs) 5073 ArgType = QualType(Context.FloatTy); 5074 for (unsigned I = 0, E = TheCall->getNumArgs(); I < E; ++I) 5075 if (TheCall->getArg(I)->getType() != ArgType) 5076 return Diag(TheCall->getBeginLoc(), 5077 diag::err_typecheck_convert_incompatible) 5078 << TheCall->getArg(I)->getType() << ArgType << 1 << 0 << 0; 5079 return false; 5080 } 5081 #define CUSTOM_BUILTIN(Name, Intr, Types, Acc, Feature) \ 5082 case PPC::BI__builtin_##Name: \ 5083 return SemaBuiltinPPCMMACall(TheCall, BuiltinID, Types); 5084 #include "clang/Basic/BuiltinsPPC.def" 5085 } 5086 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 5087 } 5088 5089 // Check if the given type is a non-pointer PPC MMA type. This function is used 5090 // in Sema to prevent invalid uses of restricted PPC MMA types. 5091 bool Sema::CheckPPCMMAType(QualType Type, SourceLocation TypeLoc) { 5092 if (Type->isPointerType() || Type->isArrayType()) 5093 return false; 5094 5095 QualType CoreType = Type.getCanonicalType().getUnqualifiedType(); 5096 #define PPC_VECTOR_TYPE(Name, Id, Size) || CoreType == Context.Id##Ty 5097 if (false 5098 #include "clang/Basic/PPCTypes.def" 5099 ) { 5100 Diag(TypeLoc, diag::err_ppc_invalid_use_mma_type); 5101 return true; 5102 } 5103 return false; 5104 } 5105 5106 bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, 5107 CallExpr *TheCall) { 5108 // position of memory order and scope arguments in the builtin 5109 unsigned OrderIndex, ScopeIndex; 5110 switch (BuiltinID) { 5111 case AMDGPU::BI__builtin_amdgcn_atomic_inc32: 5112 case AMDGPU::BI__builtin_amdgcn_atomic_inc64: 5113 case AMDGPU::BI__builtin_amdgcn_atomic_dec32: 5114 case AMDGPU::BI__builtin_amdgcn_atomic_dec64: 5115 OrderIndex = 2; 5116 ScopeIndex = 3; 5117 break; 5118 case AMDGPU::BI__builtin_amdgcn_fence: 5119 OrderIndex = 0; 5120 ScopeIndex = 1; 5121 break; 5122 default: 5123 return false; 5124 } 5125 5126 ExprResult Arg = TheCall->getArg(OrderIndex); 5127 auto ArgExpr = Arg.get(); 5128 Expr::EvalResult ArgResult; 5129 5130 if (!ArgExpr->EvaluateAsInt(ArgResult, Context)) 5131 return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int) 5132 << ArgExpr->getType(); 5133 auto Ord = ArgResult.Val.getInt().getZExtValue(); 5134 5135 // Check validity of memory ordering as per C11 / C++11's memody model. 5136 // Only fence needs check. Atomic dec/inc allow all memory orders. 5137 if (!llvm::isValidAtomicOrderingCABI(Ord)) 5138 return Diag(ArgExpr->getBeginLoc(), 5139 diag::warn_atomic_op_has_invalid_memory_order) 5140 << 0 << ArgExpr->getSourceRange(); 5141 switch (static_cast<llvm::AtomicOrderingCABI>(Ord)) { 5142 case llvm::AtomicOrderingCABI::relaxed: 5143 case llvm::AtomicOrderingCABI::consume: 5144 if (BuiltinID == AMDGPU::BI__builtin_amdgcn_fence) 5145 return Diag(ArgExpr->getBeginLoc(), 5146 diag::warn_atomic_op_has_invalid_memory_order) 5147 << 0 << ArgExpr->getSourceRange(); 5148 break; 5149 case llvm::AtomicOrderingCABI::acquire: 5150 case llvm::AtomicOrderingCABI::release: 5151 case llvm::AtomicOrderingCABI::acq_rel: 5152 case llvm::AtomicOrderingCABI::seq_cst: 5153 break; 5154 } 5155 5156 Arg = TheCall->getArg(ScopeIndex); 5157 ArgExpr = Arg.get(); 5158 Expr::EvalResult ArgResult1; 5159 // Check that sync scope is a constant literal 5160 if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, Context)) 5161 return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal) 5162 << ArgExpr->getType(); 5163 5164 return false; 5165 } 5166 5167 bool Sema::CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum) { 5168 llvm::APSInt Result; 5169 5170 // We can't check the value of a dependent argument. 5171 Expr *Arg = TheCall->getArg(ArgNum); 5172 if (Arg->isTypeDependent() || Arg->isValueDependent()) 5173 return false; 5174 5175 // Check constant-ness first. 5176 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 5177 return true; 5178 5179 int64_t Val = Result.getSExtValue(); 5180 if ((Val >= 0 && Val <= 3) || (Val >= 5 && Val <= 7)) 5181 return false; 5182 5183 return Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_invalid_lmul) 5184 << Arg->getSourceRange(); 5185 } 5186 5187 static bool CheckInvalidVLENandLMUL(const TargetInfo &TI, CallExpr *TheCall, 5188 Sema &S, QualType Type, int EGW) { 5189 assert((EGW == 128 || EGW == 256) && "EGW can only be 128 or 256 bits"); 5190 5191 // LMUL * VLEN >= EGW 5192 ASTContext::BuiltinVectorTypeInfo Info = 5193 S.Context.getBuiltinVectorTypeInfo(Type->castAs<BuiltinType>()); 5194 unsigned ElemSize = S.Context.getTypeSize(Info.ElementType); 5195 unsigned MinElemCount = Info.EC.getKnownMinValue(); 5196 5197 unsigned EGS = EGW / ElemSize; 5198 // If EGS is less than or equal to the minimum number of elements, then the 5199 // type is valid. 5200 if (EGS <= MinElemCount) 5201 return false; 5202 5203 // Otherwise, we need vscale to be at least EGS / MinElemCont. 5204 assert(EGS % MinElemCount == 0); 5205 unsigned VScaleFactor = EGS / MinElemCount; 5206 // Vscale is VLEN/RVVBitsPerBlock. 5207 unsigned MinRequiredVLEN = VScaleFactor * llvm::RISCV::RVVBitsPerBlock; 5208 std::string RequiredExt = "zvl" + std::to_string(MinRequiredVLEN) + "b"; 5209 if (!TI.hasFeature(RequiredExt)) 5210 return S.Diag(TheCall->getBeginLoc(), 5211 diag::err_riscv_type_requires_extension) << Type << RequiredExt; 5212 5213 return false; 5214 } 5215 5216 bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, 5217 unsigned BuiltinID, 5218 CallExpr *TheCall) { 5219 // CodeGenFunction can also detect this, but this gives a better error 5220 // message. 5221 bool FeatureMissing = false; 5222 SmallVector<StringRef> ReqFeatures; 5223 StringRef Features = Context.BuiltinInfo.getRequiredFeatures(BuiltinID); 5224 Features.split(ReqFeatures, ',', -1, false); 5225 5226 // Check if each required feature is included 5227 for (StringRef F : ReqFeatures) { 5228 SmallVector<StringRef> ReqOpFeatures; 5229 F.split(ReqOpFeatures, '|'); 5230 5231 if (llvm::none_of(ReqOpFeatures, 5232 [&TI](StringRef OF) { return TI.hasFeature(OF); })) { 5233 std::string FeatureStrs; 5234 bool IsExtension = true; 5235 for (StringRef OF : ReqOpFeatures) { 5236 // If the feature is 64bit, alter the string so it will print better in 5237 // the diagnostic. 5238 if (OF == "64bit") { 5239 assert(ReqOpFeatures.size() == 1 && "Expected '64bit' to be alone"); 5240 OF = "RV64"; 5241 IsExtension = false; 5242 } 5243 if (OF == "32bit") { 5244 assert(ReqOpFeatures.size() == 1 && "Expected '32bit' to be alone"); 5245 OF = "RV32"; 5246 IsExtension = false; 5247 } 5248 5249 // Convert features like "zbr" and "experimental-zbr" to "Zbr". 5250 OF.consume_front("experimental-"); 5251 std::string FeatureStr = OF.str(); 5252 FeatureStr[0] = std::toupper(FeatureStr[0]); 5253 // Combine strings. 5254 FeatureStrs += FeatureStrs.empty() ? "" : ", "; 5255 FeatureStrs += "'"; 5256 FeatureStrs += FeatureStr; 5257 FeatureStrs += "'"; 5258 } 5259 // Error message 5260 FeatureMissing = true; 5261 Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_requires_extension) 5262 << IsExtension 5263 << TheCall->getSourceRange() << StringRef(FeatureStrs); 5264 } 5265 } 5266 5267 if (FeatureMissing) 5268 return true; 5269 5270 // vmulh.vv, vmulh.vx, vmulhu.vv, vmulhu.vx, vmulhsu.vv, vmulhsu.vx, 5271 // vsmul.vv, vsmul.vx are not included for EEW=64 in Zve64*. 5272 switch (BuiltinID) { 5273 default: 5274 break; 5275 case RISCVVector::BI__builtin_rvv_vmulhsu_vv: 5276 case RISCVVector::BI__builtin_rvv_vmulhsu_vx: 5277 case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tu: 5278 case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tu: 5279 case RISCVVector::BI__builtin_rvv_vmulhsu_vv_m: 5280 case RISCVVector::BI__builtin_rvv_vmulhsu_vx_m: 5281 case RISCVVector::BI__builtin_rvv_vmulhsu_vv_mu: 5282 case RISCVVector::BI__builtin_rvv_vmulhsu_vx_mu: 5283 case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tum: 5284 case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tum: 5285 case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tumu: 5286 case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tumu: 5287 case RISCVVector::BI__builtin_rvv_vmulhu_vv: 5288 case RISCVVector::BI__builtin_rvv_vmulhu_vx: 5289 case RISCVVector::BI__builtin_rvv_vmulhu_vv_tu: 5290 case RISCVVector::BI__builtin_rvv_vmulhu_vx_tu: 5291 case RISCVVector::BI__builtin_rvv_vmulhu_vv_m: 5292 case RISCVVector::BI__builtin_rvv_vmulhu_vx_m: 5293 case RISCVVector::BI__builtin_rvv_vmulhu_vv_mu: 5294 case RISCVVector::BI__builtin_rvv_vmulhu_vx_mu: 5295 case RISCVVector::BI__builtin_rvv_vmulhu_vv_tum: 5296 case RISCVVector::BI__builtin_rvv_vmulhu_vx_tum: 5297 case RISCVVector::BI__builtin_rvv_vmulhu_vv_tumu: 5298 case RISCVVector::BI__builtin_rvv_vmulhu_vx_tumu: 5299 case RISCVVector::BI__builtin_rvv_vmulh_vv: 5300 case RISCVVector::BI__builtin_rvv_vmulh_vx: 5301 case RISCVVector::BI__builtin_rvv_vmulh_vv_tu: 5302 case RISCVVector::BI__builtin_rvv_vmulh_vx_tu: 5303 case RISCVVector::BI__builtin_rvv_vmulh_vv_m: 5304 case RISCVVector::BI__builtin_rvv_vmulh_vx_m: 5305 case RISCVVector::BI__builtin_rvv_vmulh_vv_mu: 5306 case RISCVVector::BI__builtin_rvv_vmulh_vx_mu: 5307 case RISCVVector::BI__builtin_rvv_vmulh_vv_tum: 5308 case RISCVVector::BI__builtin_rvv_vmulh_vx_tum: 5309 case RISCVVector::BI__builtin_rvv_vmulh_vv_tumu: 5310 case RISCVVector::BI__builtin_rvv_vmulh_vx_tumu: 5311 case RISCVVector::BI__builtin_rvv_vsmul_vv: 5312 case RISCVVector::BI__builtin_rvv_vsmul_vx: 5313 case RISCVVector::BI__builtin_rvv_vsmul_vv_tu: 5314 case RISCVVector::BI__builtin_rvv_vsmul_vx_tu: 5315 case RISCVVector::BI__builtin_rvv_vsmul_vv_m: 5316 case RISCVVector::BI__builtin_rvv_vsmul_vx_m: 5317 case RISCVVector::BI__builtin_rvv_vsmul_vv_mu: 5318 case RISCVVector::BI__builtin_rvv_vsmul_vx_mu: 5319 case RISCVVector::BI__builtin_rvv_vsmul_vv_tum: 5320 case RISCVVector::BI__builtin_rvv_vsmul_vx_tum: 5321 case RISCVVector::BI__builtin_rvv_vsmul_vv_tumu: 5322 case RISCVVector::BI__builtin_rvv_vsmul_vx_tumu: { 5323 ASTContext::BuiltinVectorTypeInfo Info = Context.getBuiltinVectorTypeInfo( 5324 TheCall->getType()->castAs<BuiltinType>()); 5325 5326 if (Context.getTypeSize(Info.ElementType) == 64 && !TI.hasFeature("v")) 5327 return Diag(TheCall->getBeginLoc(), 5328 diag::err_riscv_builtin_requires_extension) 5329 << /* IsExtension */ true << TheCall->getSourceRange() << "v"; 5330 5331 break; 5332 } 5333 } 5334 5335 switch (BuiltinID) { 5336 case RISCVVector::BI__builtin_rvv_vsetvli: 5337 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3) || 5338 CheckRISCVLMUL(TheCall, 2); 5339 case RISCVVector::BI__builtin_rvv_vsetvlimax: 5340 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || 5341 CheckRISCVLMUL(TheCall, 1); 5342 case RISCVVector::BI__builtin_rvv_vget_v: { 5343 ASTContext::BuiltinVectorTypeInfo ResVecInfo = 5344 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 5345 TheCall->getType().getCanonicalType().getTypePtr())); 5346 ASTContext::BuiltinVectorTypeInfo VecInfo = 5347 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 5348 TheCall->getArg(0)->getType().getCanonicalType().getTypePtr())); 5349 unsigned MaxIndex; 5350 if (VecInfo.NumVectors != 1) // vget for tuple type 5351 MaxIndex = VecInfo.NumVectors; 5352 else // vget for non-tuple type 5353 MaxIndex = (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors) / 5354 (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors); 5355 return SemaBuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1); 5356 } 5357 case RISCVVector::BI__builtin_rvv_vset_v: { 5358 ASTContext::BuiltinVectorTypeInfo ResVecInfo = 5359 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 5360 TheCall->getType().getCanonicalType().getTypePtr())); 5361 ASTContext::BuiltinVectorTypeInfo VecInfo = 5362 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 5363 TheCall->getArg(2)->getType().getCanonicalType().getTypePtr())); 5364 unsigned MaxIndex; 5365 if (ResVecInfo.NumVectors != 1) // vset for tuple type 5366 MaxIndex = ResVecInfo.NumVectors; 5367 else // vset fo non-tuple type 5368 MaxIndex = (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors) / 5369 (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors); 5370 return SemaBuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1); 5371 } 5372 // Vector Crypto 5373 case RISCVVector::BI__builtin_rvv_vaeskf1_vi_tu: 5374 case RISCVVector::BI__builtin_rvv_vaeskf2_vi_tu: 5375 case RISCVVector::BI__builtin_rvv_vaeskf2_vi: 5376 case RISCVVector::BI__builtin_rvv_vsm4k_vi_tu: { 5377 QualType Op1Type = TheCall->getArg(0)->getType(); 5378 QualType Op2Type = TheCall->getArg(1)->getType(); 5379 return CheckInvalidVLENandLMUL(TI, TheCall, *this, Op1Type, 128) || 5380 CheckInvalidVLENandLMUL(TI, TheCall, *this, Op2Type, 128) || 5381 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31); 5382 } 5383 case RISCVVector::BI__builtin_rvv_vsm3c_vi_tu: 5384 case RISCVVector::BI__builtin_rvv_vsm3c_vi: { 5385 QualType Op1Type = TheCall->getArg(0)->getType(); 5386 return CheckInvalidVLENandLMUL(TI, TheCall, *this, Op1Type, 256) || 5387 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31); 5388 } 5389 case RISCVVector::BI__builtin_rvv_vaeskf1_vi: 5390 case RISCVVector::BI__builtin_rvv_vsm4k_vi: { 5391 QualType Op1Type = TheCall->getArg(0)->getType(); 5392 return CheckInvalidVLENandLMUL(TI, TheCall, *this, Op1Type, 128) || 5393 SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 5394 } 5395 case RISCVVector::BI__builtin_rvv_vaesdf_vv: 5396 case RISCVVector::BI__builtin_rvv_vaesdf_vs: 5397 case RISCVVector::BI__builtin_rvv_vaesdm_vv: 5398 case RISCVVector::BI__builtin_rvv_vaesdm_vs: 5399 case RISCVVector::BI__builtin_rvv_vaesef_vv: 5400 case RISCVVector::BI__builtin_rvv_vaesef_vs: 5401 case RISCVVector::BI__builtin_rvv_vaesem_vv: 5402 case RISCVVector::BI__builtin_rvv_vaesem_vs: 5403 case RISCVVector::BI__builtin_rvv_vaesz_vs: 5404 case RISCVVector::BI__builtin_rvv_vsm4r_vv: 5405 case RISCVVector::BI__builtin_rvv_vsm4r_vs: 5406 case RISCVVector::BI__builtin_rvv_vaesdf_vv_tu: 5407 case RISCVVector::BI__builtin_rvv_vaesdf_vs_tu: 5408 case RISCVVector::BI__builtin_rvv_vaesdm_vv_tu: 5409 case RISCVVector::BI__builtin_rvv_vaesdm_vs_tu: 5410 case RISCVVector::BI__builtin_rvv_vaesef_vv_tu: 5411 case RISCVVector::BI__builtin_rvv_vaesef_vs_tu: 5412 case RISCVVector::BI__builtin_rvv_vaesem_vv_tu: 5413 case RISCVVector::BI__builtin_rvv_vaesem_vs_tu: 5414 case RISCVVector::BI__builtin_rvv_vaesz_vs_tu: 5415 case RISCVVector::BI__builtin_rvv_vsm4r_vv_tu: 5416 case RISCVVector::BI__builtin_rvv_vsm4r_vs_tu: { 5417 QualType Op1Type = TheCall->getArg(0)->getType(); 5418 QualType Op2Type = TheCall->getArg(1)->getType(); 5419 return CheckInvalidVLENandLMUL(TI, TheCall, *this, Op1Type, 128) || 5420 CheckInvalidVLENandLMUL(TI, TheCall, *this, Op2Type, 128); 5421 } 5422 case RISCVVector::BI__builtin_rvv_vsha2ch_vv: 5423 case RISCVVector::BI__builtin_rvv_vsha2cl_vv: 5424 case RISCVVector::BI__builtin_rvv_vsha2ms_vv: 5425 case RISCVVector::BI__builtin_rvv_vsha2ch_vv_tu: 5426 case RISCVVector::BI__builtin_rvv_vsha2cl_vv_tu: 5427 case RISCVVector::BI__builtin_rvv_vsha2ms_vv_tu: { 5428 QualType Op1Type = TheCall->getArg(0)->getType(); 5429 QualType Op2Type = TheCall->getArg(1)->getType(); 5430 QualType Op3Type = TheCall->getArg(2)->getType(); 5431 ASTContext::BuiltinVectorTypeInfo Info = 5432 Context.getBuiltinVectorTypeInfo(Op1Type->castAs<BuiltinType>()); 5433 uint64_t ElemSize = Context.getTypeSize(Info.ElementType); 5434 if (ElemSize == 64 && !TI.hasFeature("zvknhb")) 5435 return Diag(TheCall->getBeginLoc(), 5436 diag::err_riscv_builtin_requires_extension) 5437 << /* IsExtension */ true << TheCall->getSourceRange() << "zvknb"; 5438 5439 return CheckInvalidVLENandLMUL(TI, TheCall, *this, Op1Type, ElemSize * 4) || 5440 CheckInvalidVLENandLMUL(TI, TheCall, *this, Op2Type, ElemSize * 4) || 5441 CheckInvalidVLENandLMUL(TI, TheCall, *this, Op3Type, ElemSize * 4); 5442 } 5443 5444 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8mf8: 5445 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8mf4: 5446 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8mf2: 5447 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8m1: 5448 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8m2: 5449 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8m4: 5450 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8m8: 5451 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16mf4: 5452 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16mf2: 5453 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16m1: 5454 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16m2: 5455 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16m4: 5456 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16m8: 5457 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32mf2: 5458 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32m1: 5459 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32m2: 5460 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32m4: 5461 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32m8: 5462 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u64m1: 5463 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u64m2: 5464 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u64m4: 5465 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u64m8: 5466 // bit_27_26, bit_24_20, bit_11_7, simm5 5467 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || 5468 SemaBuiltinConstantArgRange(TheCall, 1, 0, 31) || 5469 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31) || 5470 SemaBuiltinConstantArgRange(TheCall, 3, -16, 15); 5471 case RISCVVector::BI__builtin_rvv_sf_vc_iv_se: 5472 // bit_27_26, bit_11_7, vs2, simm5 5473 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || 5474 SemaBuiltinConstantArgRange(TheCall, 1, 0, 31) || 5475 SemaBuiltinConstantArgRange(TheCall, 3, -16, 15); 5476 case RISCVVector::BI__builtin_rvv_sf_vc_v_i: 5477 case RISCVVector::BI__builtin_rvv_sf_vc_v_i_se: 5478 // bit_27_26, bit_24_20, simm5 5479 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || 5480 SemaBuiltinConstantArgRange(TheCall, 1, 0, 31) || 5481 SemaBuiltinConstantArgRange(TheCall, 2, -16, 15); 5482 case RISCVVector::BI__builtin_rvv_sf_vc_v_iv: 5483 case RISCVVector::BI__builtin_rvv_sf_vc_v_iv_se: 5484 // bit_27_26, vs2, simm5 5485 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || 5486 SemaBuiltinConstantArgRange(TheCall, 2, -16, 15); 5487 case RISCVVector::BI__builtin_rvv_sf_vc_ivv_se: 5488 case RISCVVector::BI__builtin_rvv_sf_vc_ivw_se: 5489 case RISCVVector::BI__builtin_rvv_sf_vc_v_ivv: 5490 case RISCVVector::BI__builtin_rvv_sf_vc_v_ivw: 5491 case RISCVVector::BI__builtin_rvv_sf_vc_v_ivv_se: 5492 case RISCVVector::BI__builtin_rvv_sf_vc_v_ivw_se: 5493 // bit_27_26, vd, vs2, simm5 5494 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || 5495 SemaBuiltinConstantArgRange(TheCall, 3, -16, 15); 5496 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8mf8: 5497 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8mf4: 5498 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8mf2: 5499 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8m1: 5500 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8m2: 5501 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8m4: 5502 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8m8: 5503 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16mf4: 5504 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16mf2: 5505 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16m1: 5506 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16m2: 5507 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16m4: 5508 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16m8: 5509 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32mf2: 5510 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32m1: 5511 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32m2: 5512 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32m4: 5513 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32m8: 5514 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u64m1: 5515 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u64m2: 5516 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u64m4: 5517 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u64m8: 5518 // bit_27_26, bit_24_20, bit_11_7, xs1 5519 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || 5520 SemaBuiltinConstantArgRange(TheCall, 1, 0, 31) || 5521 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31); 5522 case RISCVVector::BI__builtin_rvv_sf_vc_xv_se: 5523 case RISCVVector::BI__builtin_rvv_sf_vc_vv_se: 5524 // bit_27_26, bit_11_7, vs2, xs1/vs1 5525 case RISCVVector::BI__builtin_rvv_sf_vc_v_x: 5526 case RISCVVector::BI__builtin_rvv_sf_vc_v_x_se: 5527 // bit_27_26, bit_24-20, xs1 5528 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || 5529 SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 5530 case RISCVVector::BI__builtin_rvv_sf_vc_vvv_se: 5531 case RISCVVector::BI__builtin_rvv_sf_vc_xvv_se: 5532 case RISCVVector::BI__builtin_rvv_sf_vc_vvw_se: 5533 case RISCVVector::BI__builtin_rvv_sf_vc_xvw_se: 5534 // bit_27_26, vd, vs2, xs1 5535 case RISCVVector::BI__builtin_rvv_sf_vc_v_xv: 5536 case RISCVVector::BI__builtin_rvv_sf_vc_v_vv: 5537 case RISCVVector::BI__builtin_rvv_sf_vc_v_xv_se: 5538 case RISCVVector::BI__builtin_rvv_sf_vc_v_vv_se: 5539 // bit_27_26, vs2, xs1/vs1 5540 case RISCVVector::BI__builtin_rvv_sf_vc_v_xvv: 5541 case RISCVVector::BI__builtin_rvv_sf_vc_v_vvv: 5542 case RISCVVector::BI__builtin_rvv_sf_vc_v_xvw: 5543 case RISCVVector::BI__builtin_rvv_sf_vc_v_vvw: 5544 case RISCVVector::BI__builtin_rvv_sf_vc_v_xvv_se: 5545 case RISCVVector::BI__builtin_rvv_sf_vc_v_vvv_se: 5546 case RISCVVector::BI__builtin_rvv_sf_vc_v_xvw_se: 5547 case RISCVVector::BI__builtin_rvv_sf_vc_v_vvw_se: 5548 // bit_27_26, vd, vs2, xs1/vs1 5549 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3); 5550 case RISCVVector::BI__builtin_rvv_sf_vc_fv_se: 5551 // bit_26, bit_11_7, vs2, fs1 5552 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1) || 5553 SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 5554 case RISCVVector::BI__builtin_rvv_sf_vc_fvv_se: 5555 case RISCVVector::BI__builtin_rvv_sf_vc_fvw_se: 5556 case RISCVVector::BI__builtin_rvv_sf_vc_v_fvv: 5557 case RISCVVector::BI__builtin_rvv_sf_vc_v_fvw: 5558 case RISCVVector::BI__builtin_rvv_sf_vc_v_fvv_se: 5559 case RISCVVector::BI__builtin_rvv_sf_vc_v_fvw_se: 5560 // bit_26, vd, vs2, fs1 5561 case RISCVVector::BI__builtin_rvv_sf_vc_v_fv: 5562 case RISCVVector::BI__builtin_rvv_sf_vc_v_fv_se: 5563 // bit_26, vs2, fs1 5564 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1); 5565 // Check if byteselect is in [0, 3] 5566 case RISCV::BI__builtin_riscv_aes32dsi: 5567 case RISCV::BI__builtin_riscv_aes32dsmi: 5568 case RISCV::BI__builtin_riscv_aes32esi: 5569 case RISCV::BI__builtin_riscv_aes32esmi: 5570 case RISCV::BI__builtin_riscv_sm4ks: 5571 case RISCV::BI__builtin_riscv_sm4ed: 5572 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 5573 // Check if rnum is in [0, 10] 5574 case RISCV::BI__builtin_riscv_aes64ks1i: 5575 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 10); 5576 // Check if value range for vxrm is in [0, 3] 5577 case RISCVVector::BI__builtin_rvv_vaaddu_vv: 5578 case RISCVVector::BI__builtin_rvv_vaaddu_vx: 5579 case RISCVVector::BI__builtin_rvv_vaadd_vv: 5580 case RISCVVector::BI__builtin_rvv_vaadd_vx: 5581 case RISCVVector::BI__builtin_rvv_vasubu_vv: 5582 case RISCVVector::BI__builtin_rvv_vasubu_vx: 5583 case RISCVVector::BI__builtin_rvv_vasub_vv: 5584 case RISCVVector::BI__builtin_rvv_vasub_vx: 5585 case RISCVVector::BI__builtin_rvv_vsmul_vv: 5586 case RISCVVector::BI__builtin_rvv_vsmul_vx: 5587 case RISCVVector::BI__builtin_rvv_vssra_vv: 5588 case RISCVVector::BI__builtin_rvv_vssra_vx: 5589 case RISCVVector::BI__builtin_rvv_vssrl_vv: 5590 case RISCVVector::BI__builtin_rvv_vssrl_vx: 5591 case RISCVVector::BI__builtin_rvv_vnclip_wv: 5592 case RISCVVector::BI__builtin_rvv_vnclip_wx: 5593 case RISCVVector::BI__builtin_rvv_vnclipu_wv: 5594 case RISCVVector::BI__builtin_rvv_vnclipu_wx: 5595 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 5596 case RISCVVector::BI__builtin_rvv_vaaddu_vv_tu: 5597 case RISCVVector::BI__builtin_rvv_vaaddu_vx_tu: 5598 case RISCVVector::BI__builtin_rvv_vaadd_vv_tu: 5599 case RISCVVector::BI__builtin_rvv_vaadd_vx_tu: 5600 case RISCVVector::BI__builtin_rvv_vasubu_vv_tu: 5601 case RISCVVector::BI__builtin_rvv_vasubu_vx_tu: 5602 case RISCVVector::BI__builtin_rvv_vasub_vv_tu: 5603 case RISCVVector::BI__builtin_rvv_vasub_vx_tu: 5604 case RISCVVector::BI__builtin_rvv_vsmul_vv_tu: 5605 case RISCVVector::BI__builtin_rvv_vsmul_vx_tu: 5606 case RISCVVector::BI__builtin_rvv_vssra_vv_tu: 5607 case RISCVVector::BI__builtin_rvv_vssra_vx_tu: 5608 case RISCVVector::BI__builtin_rvv_vssrl_vv_tu: 5609 case RISCVVector::BI__builtin_rvv_vssrl_vx_tu: 5610 case RISCVVector::BI__builtin_rvv_vnclip_wv_tu: 5611 case RISCVVector::BI__builtin_rvv_vnclip_wx_tu: 5612 case RISCVVector::BI__builtin_rvv_vnclipu_wv_tu: 5613 case RISCVVector::BI__builtin_rvv_vnclipu_wx_tu: 5614 case RISCVVector::BI__builtin_rvv_vaaddu_vv_m: 5615 case RISCVVector::BI__builtin_rvv_vaaddu_vx_m: 5616 case RISCVVector::BI__builtin_rvv_vaadd_vv_m: 5617 case RISCVVector::BI__builtin_rvv_vaadd_vx_m: 5618 case RISCVVector::BI__builtin_rvv_vasubu_vv_m: 5619 case RISCVVector::BI__builtin_rvv_vasubu_vx_m: 5620 case RISCVVector::BI__builtin_rvv_vasub_vv_m: 5621 case RISCVVector::BI__builtin_rvv_vasub_vx_m: 5622 case RISCVVector::BI__builtin_rvv_vsmul_vv_m: 5623 case RISCVVector::BI__builtin_rvv_vsmul_vx_m: 5624 case RISCVVector::BI__builtin_rvv_vssra_vv_m: 5625 case RISCVVector::BI__builtin_rvv_vssra_vx_m: 5626 case RISCVVector::BI__builtin_rvv_vssrl_vv_m: 5627 case RISCVVector::BI__builtin_rvv_vssrl_vx_m: 5628 case RISCVVector::BI__builtin_rvv_vnclip_wv_m: 5629 case RISCVVector::BI__builtin_rvv_vnclip_wx_m: 5630 case RISCVVector::BI__builtin_rvv_vnclipu_wv_m: 5631 case RISCVVector::BI__builtin_rvv_vnclipu_wx_m: 5632 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 3); 5633 case RISCVVector::BI__builtin_rvv_vaaddu_vv_tum: 5634 case RISCVVector::BI__builtin_rvv_vaaddu_vv_tumu: 5635 case RISCVVector::BI__builtin_rvv_vaaddu_vv_mu: 5636 case RISCVVector::BI__builtin_rvv_vaaddu_vx_tum: 5637 case RISCVVector::BI__builtin_rvv_vaaddu_vx_tumu: 5638 case RISCVVector::BI__builtin_rvv_vaaddu_vx_mu: 5639 case RISCVVector::BI__builtin_rvv_vaadd_vv_tum: 5640 case RISCVVector::BI__builtin_rvv_vaadd_vv_tumu: 5641 case RISCVVector::BI__builtin_rvv_vaadd_vv_mu: 5642 case RISCVVector::BI__builtin_rvv_vaadd_vx_tum: 5643 case RISCVVector::BI__builtin_rvv_vaadd_vx_tumu: 5644 case RISCVVector::BI__builtin_rvv_vaadd_vx_mu: 5645 case RISCVVector::BI__builtin_rvv_vasubu_vv_tum: 5646 case RISCVVector::BI__builtin_rvv_vasubu_vv_tumu: 5647 case RISCVVector::BI__builtin_rvv_vasubu_vv_mu: 5648 case RISCVVector::BI__builtin_rvv_vasubu_vx_tum: 5649 case RISCVVector::BI__builtin_rvv_vasubu_vx_tumu: 5650 case RISCVVector::BI__builtin_rvv_vasubu_vx_mu: 5651 case RISCVVector::BI__builtin_rvv_vasub_vv_tum: 5652 case RISCVVector::BI__builtin_rvv_vasub_vv_tumu: 5653 case RISCVVector::BI__builtin_rvv_vasub_vv_mu: 5654 case RISCVVector::BI__builtin_rvv_vasub_vx_tum: 5655 case RISCVVector::BI__builtin_rvv_vasub_vx_tumu: 5656 case RISCVVector::BI__builtin_rvv_vasub_vx_mu: 5657 case RISCVVector::BI__builtin_rvv_vsmul_vv_mu: 5658 case RISCVVector::BI__builtin_rvv_vsmul_vx_mu: 5659 case RISCVVector::BI__builtin_rvv_vssra_vv_mu: 5660 case RISCVVector::BI__builtin_rvv_vssra_vx_mu: 5661 case RISCVVector::BI__builtin_rvv_vssrl_vv_mu: 5662 case RISCVVector::BI__builtin_rvv_vssrl_vx_mu: 5663 case RISCVVector::BI__builtin_rvv_vnclip_wv_mu: 5664 case RISCVVector::BI__builtin_rvv_vnclip_wx_mu: 5665 case RISCVVector::BI__builtin_rvv_vnclipu_wv_mu: 5666 case RISCVVector::BI__builtin_rvv_vnclipu_wx_mu: 5667 case RISCVVector::BI__builtin_rvv_vsmul_vv_tum: 5668 case RISCVVector::BI__builtin_rvv_vsmul_vx_tum: 5669 case RISCVVector::BI__builtin_rvv_vssra_vv_tum: 5670 case RISCVVector::BI__builtin_rvv_vssra_vx_tum: 5671 case RISCVVector::BI__builtin_rvv_vssrl_vv_tum: 5672 case RISCVVector::BI__builtin_rvv_vssrl_vx_tum: 5673 case RISCVVector::BI__builtin_rvv_vnclip_wv_tum: 5674 case RISCVVector::BI__builtin_rvv_vnclip_wx_tum: 5675 case RISCVVector::BI__builtin_rvv_vnclipu_wv_tum: 5676 case RISCVVector::BI__builtin_rvv_vnclipu_wx_tum: 5677 case RISCVVector::BI__builtin_rvv_vsmul_vv_tumu: 5678 case RISCVVector::BI__builtin_rvv_vsmul_vx_tumu: 5679 case RISCVVector::BI__builtin_rvv_vssra_vv_tumu: 5680 case RISCVVector::BI__builtin_rvv_vssra_vx_tumu: 5681 case RISCVVector::BI__builtin_rvv_vssrl_vv_tumu: 5682 case RISCVVector::BI__builtin_rvv_vssrl_vx_tumu: 5683 case RISCVVector::BI__builtin_rvv_vnclip_wv_tumu: 5684 case RISCVVector::BI__builtin_rvv_vnclip_wx_tumu: 5685 case RISCVVector::BI__builtin_rvv_vnclipu_wv_tumu: 5686 case RISCVVector::BI__builtin_rvv_vnclipu_wx_tumu: 5687 return SemaBuiltinConstantArgRange(TheCall, 4, 0, 3); 5688 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm: 5689 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm: 5690 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm: 5691 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm: 5692 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm: 5693 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm: 5694 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm: 5695 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm: 5696 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm: 5697 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm: 5698 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm: 5699 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm: 5700 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm: 5701 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 4); 5702 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm: 5703 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm: 5704 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm: 5705 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm: 5706 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm: 5707 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm: 5708 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm: 5709 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm: 5710 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm: 5711 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm: 5712 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm: 5713 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm: 5714 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm: 5715 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm: 5716 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm: 5717 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm: 5718 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm: 5719 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm: 5720 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm: 5721 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm: 5722 case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm: 5723 case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm: 5724 case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm: 5725 case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm: 5726 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tu: 5727 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tu: 5728 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tu: 5729 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tu: 5730 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tu: 5731 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tu: 5732 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tu: 5733 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tu: 5734 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tu: 5735 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tu: 5736 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tu: 5737 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tu: 5738 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tu: 5739 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_m: 5740 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_m: 5741 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_m: 5742 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_m: 5743 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_m: 5744 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_m: 5745 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_m: 5746 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_m: 5747 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_m: 5748 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_m: 5749 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_m: 5750 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_m: 5751 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_m: 5752 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 4); 5753 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tu: 5754 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tu: 5755 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tu: 5756 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tu: 5757 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tu: 5758 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tu: 5759 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tu: 5760 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tu: 5761 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tu: 5762 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tu: 5763 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tu: 5764 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tu: 5765 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tu: 5766 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tu: 5767 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tu: 5768 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tu: 5769 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tu: 5770 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tu: 5771 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tu: 5772 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tu: 5773 case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_tu: 5774 case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_tu: 5775 case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_tu: 5776 case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_tu: 5777 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm: 5778 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm: 5779 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm: 5780 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm: 5781 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm: 5782 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm: 5783 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm: 5784 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm: 5785 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm: 5786 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm: 5787 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm: 5788 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm: 5789 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm: 5790 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm: 5791 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm: 5792 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm: 5793 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm: 5794 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm: 5795 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm: 5796 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm: 5797 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm: 5798 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm: 5799 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm: 5800 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm: 5801 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tu: 5802 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tu: 5803 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tu: 5804 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tu: 5805 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tu: 5806 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tu: 5807 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tu: 5808 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tu: 5809 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tu: 5810 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tu: 5811 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tu: 5812 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tu: 5813 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tu: 5814 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tu: 5815 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tu: 5816 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tu: 5817 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tu: 5818 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tu: 5819 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tu: 5820 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tu: 5821 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tu: 5822 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tu: 5823 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tu: 5824 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tu: 5825 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_m: 5826 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_m: 5827 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_m: 5828 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_m: 5829 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_m: 5830 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_m: 5831 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_m: 5832 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_m: 5833 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_m: 5834 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_m: 5835 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_m: 5836 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_m: 5837 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_m: 5838 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_m: 5839 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_m: 5840 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_m: 5841 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_m: 5842 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_m: 5843 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_m: 5844 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_m: 5845 case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_m: 5846 case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_m: 5847 case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_m: 5848 case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_m: 5849 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tum: 5850 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tum: 5851 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tum: 5852 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tum: 5853 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tum: 5854 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tum: 5855 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tum: 5856 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tum: 5857 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tum: 5858 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tum: 5859 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tum: 5860 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tum: 5861 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tum: 5862 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tumu: 5863 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tumu: 5864 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tumu: 5865 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tumu: 5866 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tumu: 5867 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tumu: 5868 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tumu: 5869 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tumu: 5870 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tumu: 5871 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tumu: 5872 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tumu: 5873 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tumu: 5874 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tumu: 5875 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_mu: 5876 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_mu: 5877 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_mu: 5878 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_mu: 5879 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_mu: 5880 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_mu: 5881 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_mu: 5882 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_mu: 5883 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_mu: 5884 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_mu: 5885 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_mu: 5886 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_mu: 5887 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_mu: 5888 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 4); 5889 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_m: 5890 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_m: 5891 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_m: 5892 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_m: 5893 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_m: 5894 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_m: 5895 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_m: 5896 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_m: 5897 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_m: 5898 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_m: 5899 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_m: 5900 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_m: 5901 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_m: 5902 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_m: 5903 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_m: 5904 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_m: 5905 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_m: 5906 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_m: 5907 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_m: 5908 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_m: 5909 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_m: 5910 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_m: 5911 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_m: 5912 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_m: 5913 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tum: 5914 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tum: 5915 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tum: 5916 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tum: 5917 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tum: 5918 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tum: 5919 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tum: 5920 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tum: 5921 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tum: 5922 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tum: 5923 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tum: 5924 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tum: 5925 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tum: 5926 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tum: 5927 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tum: 5928 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tum: 5929 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tum: 5930 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tum: 5931 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tum: 5932 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tum: 5933 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tum: 5934 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tum: 5935 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tum: 5936 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tum: 5937 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tum: 5938 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tum: 5939 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tum: 5940 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tum: 5941 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tum: 5942 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tum: 5943 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tum: 5944 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tum: 5945 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tum: 5946 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tum: 5947 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tum: 5948 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tum: 5949 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tum: 5950 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tum: 5951 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tum: 5952 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tum: 5953 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tum: 5954 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tum: 5955 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tum: 5956 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tum: 5957 case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_tum: 5958 case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_tum: 5959 case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_tum: 5960 case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_tum: 5961 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tumu: 5962 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tumu: 5963 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tumu: 5964 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tumu: 5965 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tumu: 5966 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tumu: 5967 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tumu: 5968 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tumu: 5969 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tumu: 5970 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tumu: 5971 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tumu: 5972 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tumu: 5973 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tumu: 5974 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tumu: 5975 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tumu: 5976 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tumu: 5977 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tumu: 5978 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tumu: 5979 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tumu: 5980 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tumu: 5981 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tumu: 5982 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tumu: 5983 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tumu: 5984 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tumu: 5985 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tumu: 5986 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tumu: 5987 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tumu: 5988 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tumu: 5989 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tumu: 5990 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tumu: 5991 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tumu: 5992 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tumu: 5993 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tumu: 5994 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tumu: 5995 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tumu: 5996 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tumu: 5997 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tumu: 5998 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tumu: 5999 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tumu: 6000 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tumu: 6001 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tumu: 6002 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tumu: 6003 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tumu: 6004 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tumu: 6005 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_mu: 6006 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_mu: 6007 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_mu: 6008 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_mu: 6009 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_mu: 6010 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_mu: 6011 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_mu: 6012 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_mu: 6013 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_mu: 6014 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_mu: 6015 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_mu: 6016 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_mu: 6017 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_mu: 6018 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_mu: 6019 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_mu: 6020 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_mu: 6021 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_mu: 6022 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_mu: 6023 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_mu: 6024 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_mu: 6025 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_mu: 6026 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_mu: 6027 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_mu: 6028 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_mu: 6029 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_mu: 6030 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_mu: 6031 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_mu: 6032 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_mu: 6033 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_mu: 6034 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_mu: 6035 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_mu: 6036 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_mu: 6037 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_mu: 6038 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_mu: 6039 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_mu: 6040 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_mu: 6041 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_mu: 6042 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_mu: 6043 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_mu: 6044 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_mu: 6045 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_mu: 6046 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_mu: 6047 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_mu: 6048 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_mu: 6049 return SemaBuiltinConstantArgRange(TheCall, 4, 0, 4); 6050 case RISCV::BI__builtin_riscv_ntl_load: 6051 case RISCV::BI__builtin_riscv_ntl_store: 6052 DeclRefExpr *DRE = 6053 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 6054 assert((BuiltinID == RISCV::BI__builtin_riscv_ntl_store || 6055 BuiltinID == RISCV::BI__builtin_riscv_ntl_load) && 6056 "Unexpected RISC-V nontemporal load/store builtin!"); 6057 bool IsStore = BuiltinID == RISCV::BI__builtin_riscv_ntl_store; 6058 unsigned NumArgs = IsStore ? 3 : 2; 6059 6060 if (checkArgCountAtLeast(*this, TheCall, NumArgs - 1)) 6061 return true; 6062 6063 if (checkArgCountAtMost(*this, TheCall, NumArgs)) 6064 return true; 6065 6066 // Domain value should be compile-time constant. 6067 // 2 <= domain <= 5 6068 if (TheCall->getNumArgs() == NumArgs && 6069 SemaBuiltinConstantArgRange(TheCall, NumArgs - 1, 2, 5)) 6070 return true; 6071 6072 Expr *PointerArg = TheCall->getArg(0); 6073 ExprResult PointerArgResult = 6074 DefaultFunctionArrayLvalueConversion(PointerArg); 6075 6076 if (PointerArgResult.isInvalid()) 6077 return true; 6078 PointerArg = PointerArgResult.get(); 6079 6080 const PointerType *PtrType = PointerArg->getType()->getAs<PointerType>(); 6081 if (!PtrType) { 6082 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer) 6083 << PointerArg->getType() << PointerArg->getSourceRange(); 6084 return true; 6085 } 6086 6087 QualType ValType = PtrType->getPointeeType(); 6088 ValType = ValType.getUnqualifiedType(); 6089 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 6090 !ValType->isBlockPointerType() && !ValType->isFloatingType() && 6091 !ValType->isVectorType() && !ValType->isRVVSizelessBuiltinType()) { 6092 Diag(DRE->getBeginLoc(), 6093 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector) 6094 << PointerArg->getType() << PointerArg->getSourceRange(); 6095 return true; 6096 } 6097 6098 if (!IsStore) { 6099 TheCall->setType(ValType); 6100 return false; 6101 } 6102 6103 ExprResult ValArg = TheCall->getArg(1); 6104 InitializedEntity Entity = InitializedEntity::InitializeParameter( 6105 Context, ValType, /*consume*/ false); 6106 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 6107 if (ValArg.isInvalid()) 6108 return true; 6109 6110 TheCall->setArg(1, ValArg.get()); 6111 TheCall->setType(Context.VoidTy); 6112 return false; 6113 } 6114 6115 return false; 6116 } 6117 6118 bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, 6119 CallExpr *TheCall) { 6120 if (BuiltinID == SystemZ::BI__builtin_tabort) { 6121 Expr *Arg = TheCall->getArg(0); 6122 if (std::optional<llvm::APSInt> AbortCode = 6123 Arg->getIntegerConstantExpr(Context)) 6124 if (AbortCode->getSExtValue() >= 0 && AbortCode->getSExtValue() < 256) 6125 return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code) 6126 << Arg->getSourceRange(); 6127 } 6128 6129 // For intrinsics which take an immediate value as part of the instruction, 6130 // range check them here. 6131 unsigned i = 0, l = 0, u = 0; 6132 switch (BuiltinID) { 6133 default: return false; 6134 case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break; 6135 case SystemZ::BI__builtin_s390_verimb: 6136 case SystemZ::BI__builtin_s390_verimh: 6137 case SystemZ::BI__builtin_s390_verimf: 6138 case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break; 6139 case SystemZ::BI__builtin_s390_vfaeb: 6140 case SystemZ::BI__builtin_s390_vfaeh: 6141 case SystemZ::BI__builtin_s390_vfaef: 6142 case SystemZ::BI__builtin_s390_vfaebs: 6143 case SystemZ::BI__builtin_s390_vfaehs: 6144 case SystemZ::BI__builtin_s390_vfaefs: 6145 case SystemZ::BI__builtin_s390_vfaezb: 6146 case SystemZ::BI__builtin_s390_vfaezh: 6147 case SystemZ::BI__builtin_s390_vfaezf: 6148 case SystemZ::BI__builtin_s390_vfaezbs: 6149 case SystemZ::BI__builtin_s390_vfaezhs: 6150 case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break; 6151 case SystemZ::BI__builtin_s390_vfisb: 6152 case SystemZ::BI__builtin_s390_vfidb: 6153 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) || 6154 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 6155 case SystemZ::BI__builtin_s390_vftcisb: 6156 case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break; 6157 case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break; 6158 case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break; 6159 case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break; 6160 case SystemZ::BI__builtin_s390_vstrcb: 6161 case SystemZ::BI__builtin_s390_vstrch: 6162 case SystemZ::BI__builtin_s390_vstrcf: 6163 case SystemZ::BI__builtin_s390_vstrczb: 6164 case SystemZ::BI__builtin_s390_vstrczh: 6165 case SystemZ::BI__builtin_s390_vstrczf: 6166 case SystemZ::BI__builtin_s390_vstrcbs: 6167 case SystemZ::BI__builtin_s390_vstrchs: 6168 case SystemZ::BI__builtin_s390_vstrcfs: 6169 case SystemZ::BI__builtin_s390_vstrczbs: 6170 case SystemZ::BI__builtin_s390_vstrczhs: 6171 case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break; 6172 case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break; 6173 case SystemZ::BI__builtin_s390_vfminsb: 6174 case SystemZ::BI__builtin_s390_vfmaxsb: 6175 case SystemZ::BI__builtin_s390_vfmindb: 6176 case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break; 6177 case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break; 6178 case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break; 6179 case SystemZ::BI__builtin_s390_vclfnhs: 6180 case SystemZ::BI__builtin_s390_vclfnls: 6181 case SystemZ::BI__builtin_s390_vcfn: 6182 case SystemZ::BI__builtin_s390_vcnf: i = 1; l = 0; u = 15; break; 6183 case SystemZ::BI__builtin_s390_vcrnfs: i = 2; l = 0; u = 15; break; 6184 } 6185 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 6186 } 6187 6188 bool Sema::CheckWebAssemblyBuiltinFunctionCall(const TargetInfo &TI, 6189 unsigned BuiltinID, 6190 CallExpr *TheCall) { 6191 switch (BuiltinID) { 6192 case WebAssembly::BI__builtin_wasm_ref_null_extern: 6193 return BuiltinWasmRefNullExtern(TheCall); 6194 case WebAssembly::BI__builtin_wasm_ref_null_func: 6195 return BuiltinWasmRefNullFunc(TheCall); 6196 case WebAssembly::BI__builtin_wasm_table_get: 6197 return BuiltinWasmTableGet(TheCall); 6198 case WebAssembly::BI__builtin_wasm_table_set: 6199 return BuiltinWasmTableSet(TheCall); 6200 case WebAssembly::BI__builtin_wasm_table_size: 6201 return BuiltinWasmTableSize(TheCall); 6202 case WebAssembly::BI__builtin_wasm_table_grow: 6203 return BuiltinWasmTableGrow(TheCall); 6204 case WebAssembly::BI__builtin_wasm_table_fill: 6205 return BuiltinWasmTableFill(TheCall); 6206 case WebAssembly::BI__builtin_wasm_table_copy: 6207 return BuiltinWasmTableCopy(TheCall); 6208 } 6209 6210 return false; 6211 } 6212 6213 void Sema::checkRVVTypeSupport(QualType Ty, SourceLocation Loc, Decl *D) { 6214 const TargetInfo &TI = Context.getTargetInfo(); 6215 6216 ASTContext::BuiltinVectorTypeInfo Info = 6217 Context.getBuiltinVectorTypeInfo(Ty->castAs<BuiltinType>()); 6218 unsigned EltSize = Context.getTypeSize(Info.ElementType); 6219 unsigned MinElts = Info.EC.getKnownMinValue(); 6220 6221 // (ELEN, LMUL) pairs of (8, mf8), (16, mf4), (32, mf2), (64, m1) requires at 6222 // least zve64x 6223 if (((EltSize == 64 && Info.ElementType->isIntegerType()) || MinElts == 1) && 6224 !TI.hasFeature("zve64x")) 6225 Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve64x"; 6226 else if (Info.ElementType->isFloat16Type() && !TI.hasFeature("zvfh") && 6227 !TI.hasFeature("zvfhmin")) 6228 Diag(Loc, diag::err_riscv_type_requires_extension, D) 6229 << Ty << "zvfh or zvfhmin"; 6230 else if (Info.ElementType->isBFloat16Type() && 6231 !TI.hasFeature("experimental-zvfbfmin")) 6232 Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zvfbfmin"; 6233 else if (Info.ElementType->isSpecificBuiltinType(BuiltinType::Float) && 6234 !TI.hasFeature("zve32f")) 6235 Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve32f"; 6236 else if (Info.ElementType->isSpecificBuiltinType(BuiltinType::Double) && 6237 !TI.hasFeature("zve64d")) 6238 Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve64d"; 6239 // Given that caller already checked isRVVType() before calling this function, 6240 // if we don't have at least zve32x supported, then we need to emit error. 6241 else if (!TI.hasFeature("zve32x")) 6242 Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve32x"; 6243 } 6244 6245 bool Sema::CheckNVPTXBuiltinFunctionCall(const TargetInfo &TI, 6246 unsigned BuiltinID, 6247 CallExpr *TheCall) { 6248 switch (BuiltinID) { 6249 case NVPTX::BI__nvvm_cp_async_ca_shared_global_4: 6250 case NVPTX::BI__nvvm_cp_async_ca_shared_global_8: 6251 case NVPTX::BI__nvvm_cp_async_ca_shared_global_16: 6252 case NVPTX::BI__nvvm_cp_async_cg_shared_global_16: 6253 return checkArgCountAtMost(*this, TheCall, 3); 6254 } 6255 6256 return false; 6257 } 6258 6259 /// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *). 6260 /// This checks that the target supports __builtin_cpu_supports and 6261 /// that the string argument is constant and valid. 6262 static bool SemaBuiltinCpuSupports(Sema &S, const TargetInfo &TI, 6263 CallExpr *TheCall) { 6264 Expr *Arg = TheCall->getArg(0); 6265 6266 // Check if the argument is a string literal. 6267 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 6268 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 6269 << Arg->getSourceRange(); 6270 6271 // Check the contents of the string. 6272 StringRef Feature = 6273 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 6274 if (!TI.validateCpuSupports(Feature)) 6275 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports) 6276 << Arg->getSourceRange(); 6277 return false; 6278 } 6279 6280 /// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *). 6281 /// This checks that the target supports __builtin_cpu_is and 6282 /// that the string argument is constant and valid. 6283 static bool SemaBuiltinCpuIs(Sema &S, const TargetInfo &TI, CallExpr *TheCall) { 6284 Expr *Arg = TheCall->getArg(0); 6285 6286 // Check if the argument is a string literal. 6287 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 6288 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 6289 << Arg->getSourceRange(); 6290 6291 // Check the contents of the string. 6292 StringRef Feature = 6293 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 6294 if (!TI.validateCpuIs(Feature)) 6295 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is) 6296 << Arg->getSourceRange(); 6297 return false; 6298 } 6299 6300 // Check if the rounding mode is legal. 6301 bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) { 6302 // Indicates if this instruction has rounding control or just SAE. 6303 bool HasRC = false; 6304 6305 unsigned ArgNum = 0; 6306 switch (BuiltinID) { 6307 default: 6308 return false; 6309 case X86::BI__builtin_ia32_vcvttsd2si32: 6310 case X86::BI__builtin_ia32_vcvttsd2si64: 6311 case X86::BI__builtin_ia32_vcvttsd2usi32: 6312 case X86::BI__builtin_ia32_vcvttsd2usi64: 6313 case X86::BI__builtin_ia32_vcvttss2si32: 6314 case X86::BI__builtin_ia32_vcvttss2si64: 6315 case X86::BI__builtin_ia32_vcvttss2usi32: 6316 case X86::BI__builtin_ia32_vcvttss2usi64: 6317 case X86::BI__builtin_ia32_vcvttsh2si32: 6318 case X86::BI__builtin_ia32_vcvttsh2si64: 6319 case X86::BI__builtin_ia32_vcvttsh2usi32: 6320 case X86::BI__builtin_ia32_vcvttsh2usi64: 6321 ArgNum = 1; 6322 break; 6323 case X86::BI__builtin_ia32_maxpd512: 6324 case X86::BI__builtin_ia32_maxps512: 6325 case X86::BI__builtin_ia32_minpd512: 6326 case X86::BI__builtin_ia32_minps512: 6327 case X86::BI__builtin_ia32_maxph512: 6328 case X86::BI__builtin_ia32_minph512: 6329 ArgNum = 2; 6330 break; 6331 case X86::BI__builtin_ia32_vcvtph2pd512_mask: 6332 case X86::BI__builtin_ia32_vcvtph2psx512_mask: 6333 case X86::BI__builtin_ia32_cvtps2pd512_mask: 6334 case X86::BI__builtin_ia32_cvttpd2dq512_mask: 6335 case X86::BI__builtin_ia32_cvttpd2qq512_mask: 6336 case X86::BI__builtin_ia32_cvttpd2udq512_mask: 6337 case X86::BI__builtin_ia32_cvttpd2uqq512_mask: 6338 case X86::BI__builtin_ia32_cvttps2dq512_mask: 6339 case X86::BI__builtin_ia32_cvttps2qq512_mask: 6340 case X86::BI__builtin_ia32_cvttps2udq512_mask: 6341 case X86::BI__builtin_ia32_cvttps2uqq512_mask: 6342 case X86::BI__builtin_ia32_vcvttph2w512_mask: 6343 case X86::BI__builtin_ia32_vcvttph2uw512_mask: 6344 case X86::BI__builtin_ia32_vcvttph2dq512_mask: 6345 case X86::BI__builtin_ia32_vcvttph2udq512_mask: 6346 case X86::BI__builtin_ia32_vcvttph2qq512_mask: 6347 case X86::BI__builtin_ia32_vcvttph2uqq512_mask: 6348 case X86::BI__builtin_ia32_exp2pd_mask: 6349 case X86::BI__builtin_ia32_exp2ps_mask: 6350 case X86::BI__builtin_ia32_getexppd512_mask: 6351 case X86::BI__builtin_ia32_getexpps512_mask: 6352 case X86::BI__builtin_ia32_getexpph512_mask: 6353 case X86::BI__builtin_ia32_rcp28pd_mask: 6354 case X86::BI__builtin_ia32_rcp28ps_mask: 6355 case X86::BI__builtin_ia32_rsqrt28pd_mask: 6356 case X86::BI__builtin_ia32_rsqrt28ps_mask: 6357 case X86::BI__builtin_ia32_vcomisd: 6358 case X86::BI__builtin_ia32_vcomiss: 6359 case X86::BI__builtin_ia32_vcomish: 6360 case X86::BI__builtin_ia32_vcvtph2ps512_mask: 6361 ArgNum = 3; 6362 break; 6363 case X86::BI__builtin_ia32_cmppd512_mask: 6364 case X86::BI__builtin_ia32_cmpps512_mask: 6365 case X86::BI__builtin_ia32_cmpsd_mask: 6366 case X86::BI__builtin_ia32_cmpss_mask: 6367 case X86::BI__builtin_ia32_cmpsh_mask: 6368 case X86::BI__builtin_ia32_vcvtsh2sd_round_mask: 6369 case X86::BI__builtin_ia32_vcvtsh2ss_round_mask: 6370 case X86::BI__builtin_ia32_cvtss2sd_round_mask: 6371 case X86::BI__builtin_ia32_getexpsd128_round_mask: 6372 case X86::BI__builtin_ia32_getexpss128_round_mask: 6373 case X86::BI__builtin_ia32_getexpsh128_round_mask: 6374 case X86::BI__builtin_ia32_getmantpd512_mask: 6375 case X86::BI__builtin_ia32_getmantps512_mask: 6376 case X86::BI__builtin_ia32_getmantph512_mask: 6377 case X86::BI__builtin_ia32_maxsd_round_mask: 6378 case X86::BI__builtin_ia32_maxss_round_mask: 6379 case X86::BI__builtin_ia32_maxsh_round_mask: 6380 case X86::BI__builtin_ia32_minsd_round_mask: 6381 case X86::BI__builtin_ia32_minss_round_mask: 6382 case X86::BI__builtin_ia32_minsh_round_mask: 6383 case X86::BI__builtin_ia32_rcp28sd_round_mask: 6384 case X86::BI__builtin_ia32_rcp28ss_round_mask: 6385 case X86::BI__builtin_ia32_reducepd512_mask: 6386 case X86::BI__builtin_ia32_reduceps512_mask: 6387 case X86::BI__builtin_ia32_reduceph512_mask: 6388 case X86::BI__builtin_ia32_rndscalepd_mask: 6389 case X86::BI__builtin_ia32_rndscaleps_mask: 6390 case X86::BI__builtin_ia32_rndscaleph_mask: 6391 case X86::BI__builtin_ia32_rsqrt28sd_round_mask: 6392 case X86::BI__builtin_ia32_rsqrt28ss_round_mask: 6393 ArgNum = 4; 6394 break; 6395 case X86::BI__builtin_ia32_fixupimmpd512_mask: 6396 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 6397 case X86::BI__builtin_ia32_fixupimmps512_mask: 6398 case X86::BI__builtin_ia32_fixupimmps512_maskz: 6399 case X86::BI__builtin_ia32_fixupimmsd_mask: 6400 case X86::BI__builtin_ia32_fixupimmsd_maskz: 6401 case X86::BI__builtin_ia32_fixupimmss_mask: 6402 case X86::BI__builtin_ia32_fixupimmss_maskz: 6403 case X86::BI__builtin_ia32_getmantsd_round_mask: 6404 case X86::BI__builtin_ia32_getmantss_round_mask: 6405 case X86::BI__builtin_ia32_getmantsh_round_mask: 6406 case X86::BI__builtin_ia32_rangepd512_mask: 6407 case X86::BI__builtin_ia32_rangeps512_mask: 6408 case X86::BI__builtin_ia32_rangesd128_round_mask: 6409 case X86::BI__builtin_ia32_rangess128_round_mask: 6410 case X86::BI__builtin_ia32_reducesd_mask: 6411 case X86::BI__builtin_ia32_reducess_mask: 6412 case X86::BI__builtin_ia32_reducesh_mask: 6413 case X86::BI__builtin_ia32_rndscalesd_round_mask: 6414 case X86::BI__builtin_ia32_rndscaless_round_mask: 6415 case X86::BI__builtin_ia32_rndscalesh_round_mask: 6416 ArgNum = 5; 6417 break; 6418 case X86::BI__builtin_ia32_vcvtsd2si64: 6419 case X86::BI__builtin_ia32_vcvtsd2si32: 6420 case X86::BI__builtin_ia32_vcvtsd2usi32: 6421 case X86::BI__builtin_ia32_vcvtsd2usi64: 6422 case X86::BI__builtin_ia32_vcvtss2si32: 6423 case X86::BI__builtin_ia32_vcvtss2si64: 6424 case X86::BI__builtin_ia32_vcvtss2usi32: 6425 case X86::BI__builtin_ia32_vcvtss2usi64: 6426 case X86::BI__builtin_ia32_vcvtsh2si32: 6427 case X86::BI__builtin_ia32_vcvtsh2si64: 6428 case X86::BI__builtin_ia32_vcvtsh2usi32: 6429 case X86::BI__builtin_ia32_vcvtsh2usi64: 6430 case X86::BI__builtin_ia32_sqrtpd512: 6431 case X86::BI__builtin_ia32_sqrtps512: 6432 case X86::BI__builtin_ia32_sqrtph512: 6433 ArgNum = 1; 6434 HasRC = true; 6435 break; 6436 case X86::BI__builtin_ia32_addph512: 6437 case X86::BI__builtin_ia32_divph512: 6438 case X86::BI__builtin_ia32_mulph512: 6439 case X86::BI__builtin_ia32_subph512: 6440 case X86::BI__builtin_ia32_addpd512: 6441 case X86::BI__builtin_ia32_addps512: 6442 case X86::BI__builtin_ia32_divpd512: 6443 case X86::BI__builtin_ia32_divps512: 6444 case X86::BI__builtin_ia32_mulpd512: 6445 case X86::BI__builtin_ia32_mulps512: 6446 case X86::BI__builtin_ia32_subpd512: 6447 case X86::BI__builtin_ia32_subps512: 6448 case X86::BI__builtin_ia32_cvtsi2sd64: 6449 case X86::BI__builtin_ia32_cvtsi2ss32: 6450 case X86::BI__builtin_ia32_cvtsi2ss64: 6451 case X86::BI__builtin_ia32_cvtusi2sd64: 6452 case X86::BI__builtin_ia32_cvtusi2ss32: 6453 case X86::BI__builtin_ia32_cvtusi2ss64: 6454 case X86::BI__builtin_ia32_vcvtusi2sh: 6455 case X86::BI__builtin_ia32_vcvtusi642sh: 6456 case X86::BI__builtin_ia32_vcvtsi2sh: 6457 case X86::BI__builtin_ia32_vcvtsi642sh: 6458 ArgNum = 2; 6459 HasRC = true; 6460 break; 6461 case X86::BI__builtin_ia32_cvtdq2ps512_mask: 6462 case X86::BI__builtin_ia32_cvtudq2ps512_mask: 6463 case X86::BI__builtin_ia32_vcvtpd2ph512_mask: 6464 case X86::BI__builtin_ia32_vcvtps2phx512_mask: 6465 case X86::BI__builtin_ia32_cvtpd2ps512_mask: 6466 case X86::BI__builtin_ia32_cvtpd2dq512_mask: 6467 case X86::BI__builtin_ia32_cvtpd2qq512_mask: 6468 case X86::BI__builtin_ia32_cvtpd2udq512_mask: 6469 case X86::BI__builtin_ia32_cvtpd2uqq512_mask: 6470 case X86::BI__builtin_ia32_cvtps2dq512_mask: 6471 case X86::BI__builtin_ia32_cvtps2qq512_mask: 6472 case X86::BI__builtin_ia32_cvtps2udq512_mask: 6473 case X86::BI__builtin_ia32_cvtps2uqq512_mask: 6474 case X86::BI__builtin_ia32_cvtqq2pd512_mask: 6475 case X86::BI__builtin_ia32_cvtqq2ps512_mask: 6476 case X86::BI__builtin_ia32_cvtuqq2pd512_mask: 6477 case X86::BI__builtin_ia32_cvtuqq2ps512_mask: 6478 case X86::BI__builtin_ia32_vcvtdq2ph512_mask: 6479 case X86::BI__builtin_ia32_vcvtudq2ph512_mask: 6480 case X86::BI__builtin_ia32_vcvtw2ph512_mask: 6481 case X86::BI__builtin_ia32_vcvtuw2ph512_mask: 6482 case X86::BI__builtin_ia32_vcvtph2w512_mask: 6483 case X86::BI__builtin_ia32_vcvtph2uw512_mask: 6484 case X86::BI__builtin_ia32_vcvtph2dq512_mask: 6485 case X86::BI__builtin_ia32_vcvtph2udq512_mask: 6486 case X86::BI__builtin_ia32_vcvtph2qq512_mask: 6487 case X86::BI__builtin_ia32_vcvtph2uqq512_mask: 6488 case X86::BI__builtin_ia32_vcvtqq2ph512_mask: 6489 case X86::BI__builtin_ia32_vcvtuqq2ph512_mask: 6490 ArgNum = 3; 6491 HasRC = true; 6492 break; 6493 case X86::BI__builtin_ia32_addsh_round_mask: 6494 case X86::BI__builtin_ia32_addss_round_mask: 6495 case X86::BI__builtin_ia32_addsd_round_mask: 6496 case X86::BI__builtin_ia32_divsh_round_mask: 6497 case X86::BI__builtin_ia32_divss_round_mask: 6498 case X86::BI__builtin_ia32_divsd_round_mask: 6499 case X86::BI__builtin_ia32_mulsh_round_mask: 6500 case X86::BI__builtin_ia32_mulss_round_mask: 6501 case X86::BI__builtin_ia32_mulsd_round_mask: 6502 case X86::BI__builtin_ia32_subsh_round_mask: 6503 case X86::BI__builtin_ia32_subss_round_mask: 6504 case X86::BI__builtin_ia32_subsd_round_mask: 6505 case X86::BI__builtin_ia32_scalefph512_mask: 6506 case X86::BI__builtin_ia32_scalefpd512_mask: 6507 case X86::BI__builtin_ia32_scalefps512_mask: 6508 case X86::BI__builtin_ia32_scalefsd_round_mask: 6509 case X86::BI__builtin_ia32_scalefss_round_mask: 6510 case X86::BI__builtin_ia32_scalefsh_round_mask: 6511 case X86::BI__builtin_ia32_cvtsd2ss_round_mask: 6512 case X86::BI__builtin_ia32_vcvtss2sh_round_mask: 6513 case X86::BI__builtin_ia32_vcvtsd2sh_round_mask: 6514 case X86::BI__builtin_ia32_sqrtsd_round_mask: 6515 case X86::BI__builtin_ia32_sqrtss_round_mask: 6516 case X86::BI__builtin_ia32_sqrtsh_round_mask: 6517 case X86::BI__builtin_ia32_vfmaddsd3_mask: 6518 case X86::BI__builtin_ia32_vfmaddsd3_maskz: 6519 case X86::BI__builtin_ia32_vfmaddsd3_mask3: 6520 case X86::BI__builtin_ia32_vfmaddss3_mask: 6521 case X86::BI__builtin_ia32_vfmaddss3_maskz: 6522 case X86::BI__builtin_ia32_vfmaddss3_mask3: 6523 case X86::BI__builtin_ia32_vfmaddsh3_mask: 6524 case X86::BI__builtin_ia32_vfmaddsh3_maskz: 6525 case X86::BI__builtin_ia32_vfmaddsh3_mask3: 6526 case X86::BI__builtin_ia32_vfmaddpd512_mask: 6527 case X86::BI__builtin_ia32_vfmaddpd512_maskz: 6528 case X86::BI__builtin_ia32_vfmaddpd512_mask3: 6529 case X86::BI__builtin_ia32_vfmsubpd512_mask3: 6530 case X86::BI__builtin_ia32_vfmaddps512_mask: 6531 case X86::BI__builtin_ia32_vfmaddps512_maskz: 6532 case X86::BI__builtin_ia32_vfmaddps512_mask3: 6533 case X86::BI__builtin_ia32_vfmsubps512_mask3: 6534 case X86::BI__builtin_ia32_vfmaddph512_mask: 6535 case X86::BI__builtin_ia32_vfmaddph512_maskz: 6536 case X86::BI__builtin_ia32_vfmaddph512_mask3: 6537 case X86::BI__builtin_ia32_vfmsubph512_mask3: 6538 case X86::BI__builtin_ia32_vfmaddsubpd512_mask: 6539 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz: 6540 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3: 6541 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3: 6542 case X86::BI__builtin_ia32_vfmaddsubps512_mask: 6543 case X86::BI__builtin_ia32_vfmaddsubps512_maskz: 6544 case X86::BI__builtin_ia32_vfmaddsubps512_mask3: 6545 case X86::BI__builtin_ia32_vfmsubaddps512_mask3: 6546 case X86::BI__builtin_ia32_vfmaddsubph512_mask: 6547 case X86::BI__builtin_ia32_vfmaddsubph512_maskz: 6548 case X86::BI__builtin_ia32_vfmaddsubph512_mask3: 6549 case X86::BI__builtin_ia32_vfmsubaddph512_mask3: 6550 case X86::BI__builtin_ia32_vfmaddcsh_mask: 6551 case X86::BI__builtin_ia32_vfmaddcsh_round_mask: 6552 case X86::BI__builtin_ia32_vfmaddcsh_round_mask3: 6553 case X86::BI__builtin_ia32_vfmaddcph512_mask: 6554 case X86::BI__builtin_ia32_vfmaddcph512_maskz: 6555 case X86::BI__builtin_ia32_vfmaddcph512_mask3: 6556 case X86::BI__builtin_ia32_vfcmaddcsh_mask: 6557 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask: 6558 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3: 6559 case X86::BI__builtin_ia32_vfcmaddcph512_mask: 6560 case X86::BI__builtin_ia32_vfcmaddcph512_maskz: 6561 case X86::BI__builtin_ia32_vfcmaddcph512_mask3: 6562 case X86::BI__builtin_ia32_vfmulcsh_mask: 6563 case X86::BI__builtin_ia32_vfmulcph512_mask: 6564 case X86::BI__builtin_ia32_vfcmulcsh_mask: 6565 case X86::BI__builtin_ia32_vfcmulcph512_mask: 6566 ArgNum = 4; 6567 HasRC = true; 6568 break; 6569 } 6570 6571 llvm::APSInt Result; 6572 6573 // We can't check the value of a dependent argument. 6574 Expr *Arg = TheCall->getArg(ArgNum); 6575 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6576 return false; 6577 6578 // Check constant-ness first. 6579 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6580 return true; 6581 6582 // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit 6583 // is set. If the intrinsic has rounding control(bits 1:0), make sure its only 6584 // combined with ROUND_NO_EXC. If the intrinsic does not have rounding 6585 // control, allow ROUND_NO_EXC and ROUND_CUR_DIRECTION together. 6586 if (Result == 4/*ROUND_CUR_DIRECTION*/ || 6587 Result == 8/*ROUND_NO_EXC*/ || 6588 (!HasRC && Result == 12/*ROUND_CUR_DIRECTION|ROUND_NO_EXC*/) || 6589 (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11)) 6590 return false; 6591 6592 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding) 6593 << Arg->getSourceRange(); 6594 } 6595 6596 // Check if the gather/scatter scale is legal. 6597 bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, 6598 CallExpr *TheCall) { 6599 unsigned ArgNum = 0; 6600 switch (BuiltinID) { 6601 default: 6602 return false; 6603 case X86::BI__builtin_ia32_gatherpfdpd: 6604 case X86::BI__builtin_ia32_gatherpfdps: 6605 case X86::BI__builtin_ia32_gatherpfqpd: 6606 case X86::BI__builtin_ia32_gatherpfqps: 6607 case X86::BI__builtin_ia32_scatterpfdpd: 6608 case X86::BI__builtin_ia32_scatterpfdps: 6609 case X86::BI__builtin_ia32_scatterpfqpd: 6610 case X86::BI__builtin_ia32_scatterpfqps: 6611 ArgNum = 3; 6612 break; 6613 case X86::BI__builtin_ia32_gatherd_pd: 6614 case X86::BI__builtin_ia32_gatherd_pd256: 6615 case X86::BI__builtin_ia32_gatherq_pd: 6616 case X86::BI__builtin_ia32_gatherq_pd256: 6617 case X86::BI__builtin_ia32_gatherd_ps: 6618 case X86::BI__builtin_ia32_gatherd_ps256: 6619 case X86::BI__builtin_ia32_gatherq_ps: 6620 case X86::BI__builtin_ia32_gatherq_ps256: 6621 case X86::BI__builtin_ia32_gatherd_q: 6622 case X86::BI__builtin_ia32_gatherd_q256: 6623 case X86::BI__builtin_ia32_gatherq_q: 6624 case X86::BI__builtin_ia32_gatherq_q256: 6625 case X86::BI__builtin_ia32_gatherd_d: 6626 case X86::BI__builtin_ia32_gatherd_d256: 6627 case X86::BI__builtin_ia32_gatherq_d: 6628 case X86::BI__builtin_ia32_gatherq_d256: 6629 case X86::BI__builtin_ia32_gather3div2df: 6630 case X86::BI__builtin_ia32_gather3div2di: 6631 case X86::BI__builtin_ia32_gather3div4df: 6632 case X86::BI__builtin_ia32_gather3div4di: 6633 case X86::BI__builtin_ia32_gather3div4sf: 6634 case X86::BI__builtin_ia32_gather3div4si: 6635 case X86::BI__builtin_ia32_gather3div8sf: 6636 case X86::BI__builtin_ia32_gather3div8si: 6637 case X86::BI__builtin_ia32_gather3siv2df: 6638 case X86::BI__builtin_ia32_gather3siv2di: 6639 case X86::BI__builtin_ia32_gather3siv4df: 6640 case X86::BI__builtin_ia32_gather3siv4di: 6641 case X86::BI__builtin_ia32_gather3siv4sf: 6642 case X86::BI__builtin_ia32_gather3siv4si: 6643 case X86::BI__builtin_ia32_gather3siv8sf: 6644 case X86::BI__builtin_ia32_gather3siv8si: 6645 case X86::BI__builtin_ia32_gathersiv8df: 6646 case X86::BI__builtin_ia32_gathersiv16sf: 6647 case X86::BI__builtin_ia32_gatherdiv8df: 6648 case X86::BI__builtin_ia32_gatherdiv16sf: 6649 case X86::BI__builtin_ia32_gathersiv8di: 6650 case X86::BI__builtin_ia32_gathersiv16si: 6651 case X86::BI__builtin_ia32_gatherdiv8di: 6652 case X86::BI__builtin_ia32_gatherdiv16si: 6653 case X86::BI__builtin_ia32_scatterdiv2df: 6654 case X86::BI__builtin_ia32_scatterdiv2di: 6655 case X86::BI__builtin_ia32_scatterdiv4df: 6656 case X86::BI__builtin_ia32_scatterdiv4di: 6657 case X86::BI__builtin_ia32_scatterdiv4sf: 6658 case X86::BI__builtin_ia32_scatterdiv4si: 6659 case X86::BI__builtin_ia32_scatterdiv8sf: 6660 case X86::BI__builtin_ia32_scatterdiv8si: 6661 case X86::BI__builtin_ia32_scattersiv2df: 6662 case X86::BI__builtin_ia32_scattersiv2di: 6663 case X86::BI__builtin_ia32_scattersiv4df: 6664 case X86::BI__builtin_ia32_scattersiv4di: 6665 case X86::BI__builtin_ia32_scattersiv4sf: 6666 case X86::BI__builtin_ia32_scattersiv4si: 6667 case X86::BI__builtin_ia32_scattersiv8sf: 6668 case X86::BI__builtin_ia32_scattersiv8si: 6669 case X86::BI__builtin_ia32_scattersiv8df: 6670 case X86::BI__builtin_ia32_scattersiv16sf: 6671 case X86::BI__builtin_ia32_scatterdiv8df: 6672 case X86::BI__builtin_ia32_scatterdiv16sf: 6673 case X86::BI__builtin_ia32_scattersiv8di: 6674 case X86::BI__builtin_ia32_scattersiv16si: 6675 case X86::BI__builtin_ia32_scatterdiv8di: 6676 case X86::BI__builtin_ia32_scatterdiv16si: 6677 ArgNum = 4; 6678 break; 6679 } 6680 6681 llvm::APSInt Result; 6682 6683 // We can't check the value of a dependent argument. 6684 Expr *Arg = TheCall->getArg(ArgNum); 6685 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6686 return false; 6687 6688 // Check constant-ness first. 6689 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6690 return true; 6691 6692 if (Result == 1 || Result == 2 || Result == 4 || Result == 8) 6693 return false; 6694 6695 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale) 6696 << Arg->getSourceRange(); 6697 } 6698 6699 enum { TileRegLow = 0, TileRegHigh = 7 }; 6700 6701 bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, 6702 ArrayRef<int> ArgNums) { 6703 for (int ArgNum : ArgNums) { 6704 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, TileRegLow, TileRegHigh)) 6705 return true; 6706 } 6707 return false; 6708 } 6709 6710 bool Sema::CheckX86BuiltinTileDuplicate(CallExpr *TheCall, 6711 ArrayRef<int> ArgNums) { 6712 // Because the max number of tile register is TileRegHigh + 1, so here we use 6713 // each bit to represent the usage of them in bitset. 6714 std::bitset<TileRegHigh + 1> ArgValues; 6715 for (int ArgNum : ArgNums) { 6716 Expr *Arg = TheCall->getArg(ArgNum); 6717 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6718 continue; 6719 6720 llvm::APSInt Result; 6721 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6722 return true; 6723 int ArgExtValue = Result.getExtValue(); 6724 assert((ArgExtValue >= TileRegLow && ArgExtValue <= TileRegHigh) && 6725 "Incorrect tile register num."); 6726 if (ArgValues.test(ArgExtValue)) 6727 return Diag(TheCall->getBeginLoc(), 6728 diag::err_x86_builtin_tile_arg_duplicate) 6729 << TheCall->getArg(ArgNum)->getSourceRange(); 6730 ArgValues.set(ArgExtValue); 6731 } 6732 return false; 6733 } 6734 6735 bool Sema::CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, 6736 ArrayRef<int> ArgNums) { 6737 return CheckX86BuiltinTileArgumentsRange(TheCall, ArgNums) || 6738 CheckX86BuiltinTileDuplicate(TheCall, ArgNums); 6739 } 6740 6741 bool Sema::CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall) { 6742 switch (BuiltinID) { 6743 default: 6744 return false; 6745 case X86::BI__builtin_ia32_tileloadd64: 6746 case X86::BI__builtin_ia32_tileloaddt164: 6747 case X86::BI__builtin_ia32_tilestored64: 6748 case X86::BI__builtin_ia32_tilezero: 6749 return CheckX86BuiltinTileArgumentsRange(TheCall, 0); 6750 case X86::BI__builtin_ia32_tdpbssd: 6751 case X86::BI__builtin_ia32_tdpbsud: 6752 case X86::BI__builtin_ia32_tdpbusd: 6753 case X86::BI__builtin_ia32_tdpbuud: 6754 case X86::BI__builtin_ia32_tdpbf16ps: 6755 case X86::BI__builtin_ia32_tdpfp16ps: 6756 case X86::BI__builtin_ia32_tcmmimfp16ps: 6757 case X86::BI__builtin_ia32_tcmmrlfp16ps: 6758 return CheckX86BuiltinTileRangeAndDuplicate(TheCall, {0, 1, 2}); 6759 } 6760 } 6761 static bool isX86_32Builtin(unsigned BuiltinID) { 6762 // These builtins only work on x86-32 targets. 6763 switch (BuiltinID) { 6764 case X86::BI__builtin_ia32_readeflags_u32: 6765 case X86::BI__builtin_ia32_writeeflags_u32: 6766 return true; 6767 } 6768 6769 return false; 6770 } 6771 6772 bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 6773 CallExpr *TheCall) { 6774 if (BuiltinID == X86::BI__builtin_cpu_supports) 6775 return SemaBuiltinCpuSupports(*this, TI, TheCall); 6776 6777 if (BuiltinID == X86::BI__builtin_cpu_is) 6778 return SemaBuiltinCpuIs(*this, TI, TheCall); 6779 6780 // Check for 32-bit only builtins on a 64-bit target. 6781 const llvm::Triple &TT = TI.getTriple(); 6782 if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID)) 6783 return Diag(TheCall->getCallee()->getBeginLoc(), 6784 diag::err_32_bit_builtin_64_bit_tgt); 6785 6786 // If the intrinsic has rounding or SAE make sure its valid. 6787 if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall)) 6788 return true; 6789 6790 // If the intrinsic has a gather/scatter scale immediate make sure its valid. 6791 if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall)) 6792 return true; 6793 6794 // If the intrinsic has a tile arguments, make sure they are valid. 6795 if (CheckX86BuiltinTileArguments(BuiltinID, TheCall)) 6796 return true; 6797 6798 // For intrinsics which take an immediate value as part of the instruction, 6799 // range check them here. 6800 int i = 0, l = 0, u = 0; 6801 switch (BuiltinID) { 6802 default: 6803 return false; 6804 case X86::BI__builtin_ia32_vec_ext_v2si: 6805 case X86::BI__builtin_ia32_vec_ext_v2di: 6806 case X86::BI__builtin_ia32_vextractf128_pd256: 6807 case X86::BI__builtin_ia32_vextractf128_ps256: 6808 case X86::BI__builtin_ia32_vextractf128_si256: 6809 case X86::BI__builtin_ia32_extract128i256: 6810 case X86::BI__builtin_ia32_extractf64x4_mask: 6811 case X86::BI__builtin_ia32_extracti64x4_mask: 6812 case X86::BI__builtin_ia32_extractf32x8_mask: 6813 case X86::BI__builtin_ia32_extracti32x8_mask: 6814 case X86::BI__builtin_ia32_extractf64x2_256_mask: 6815 case X86::BI__builtin_ia32_extracti64x2_256_mask: 6816 case X86::BI__builtin_ia32_extractf32x4_256_mask: 6817 case X86::BI__builtin_ia32_extracti32x4_256_mask: 6818 i = 1; l = 0; u = 1; 6819 break; 6820 case X86::BI__builtin_ia32_vec_set_v2di: 6821 case X86::BI__builtin_ia32_vinsertf128_pd256: 6822 case X86::BI__builtin_ia32_vinsertf128_ps256: 6823 case X86::BI__builtin_ia32_vinsertf128_si256: 6824 case X86::BI__builtin_ia32_insert128i256: 6825 case X86::BI__builtin_ia32_insertf32x8: 6826 case X86::BI__builtin_ia32_inserti32x8: 6827 case X86::BI__builtin_ia32_insertf64x4: 6828 case X86::BI__builtin_ia32_inserti64x4: 6829 case X86::BI__builtin_ia32_insertf64x2_256: 6830 case X86::BI__builtin_ia32_inserti64x2_256: 6831 case X86::BI__builtin_ia32_insertf32x4_256: 6832 case X86::BI__builtin_ia32_inserti32x4_256: 6833 i = 2; l = 0; u = 1; 6834 break; 6835 case X86::BI__builtin_ia32_vpermilpd: 6836 case X86::BI__builtin_ia32_vec_ext_v4hi: 6837 case X86::BI__builtin_ia32_vec_ext_v4si: 6838 case X86::BI__builtin_ia32_vec_ext_v4sf: 6839 case X86::BI__builtin_ia32_vec_ext_v4di: 6840 case X86::BI__builtin_ia32_extractf32x4_mask: 6841 case X86::BI__builtin_ia32_extracti32x4_mask: 6842 case X86::BI__builtin_ia32_extractf64x2_512_mask: 6843 case X86::BI__builtin_ia32_extracti64x2_512_mask: 6844 i = 1; l = 0; u = 3; 6845 break; 6846 case X86::BI_mm_prefetch: 6847 case X86::BI__builtin_ia32_vec_ext_v8hi: 6848 case X86::BI__builtin_ia32_vec_ext_v8si: 6849 i = 1; l = 0; u = 7; 6850 break; 6851 case X86::BI__builtin_ia32_sha1rnds4: 6852 case X86::BI__builtin_ia32_blendpd: 6853 case X86::BI__builtin_ia32_shufpd: 6854 case X86::BI__builtin_ia32_vec_set_v4hi: 6855 case X86::BI__builtin_ia32_vec_set_v4si: 6856 case X86::BI__builtin_ia32_vec_set_v4di: 6857 case X86::BI__builtin_ia32_shuf_f32x4_256: 6858 case X86::BI__builtin_ia32_shuf_f64x2_256: 6859 case X86::BI__builtin_ia32_shuf_i32x4_256: 6860 case X86::BI__builtin_ia32_shuf_i64x2_256: 6861 case X86::BI__builtin_ia32_insertf64x2_512: 6862 case X86::BI__builtin_ia32_inserti64x2_512: 6863 case X86::BI__builtin_ia32_insertf32x4: 6864 case X86::BI__builtin_ia32_inserti32x4: 6865 i = 2; l = 0; u = 3; 6866 break; 6867 case X86::BI__builtin_ia32_vpermil2pd: 6868 case X86::BI__builtin_ia32_vpermil2pd256: 6869 case X86::BI__builtin_ia32_vpermil2ps: 6870 case X86::BI__builtin_ia32_vpermil2ps256: 6871 i = 3; l = 0; u = 3; 6872 break; 6873 case X86::BI__builtin_ia32_cmpb128_mask: 6874 case X86::BI__builtin_ia32_cmpw128_mask: 6875 case X86::BI__builtin_ia32_cmpd128_mask: 6876 case X86::BI__builtin_ia32_cmpq128_mask: 6877 case X86::BI__builtin_ia32_cmpb256_mask: 6878 case X86::BI__builtin_ia32_cmpw256_mask: 6879 case X86::BI__builtin_ia32_cmpd256_mask: 6880 case X86::BI__builtin_ia32_cmpq256_mask: 6881 case X86::BI__builtin_ia32_cmpb512_mask: 6882 case X86::BI__builtin_ia32_cmpw512_mask: 6883 case X86::BI__builtin_ia32_cmpd512_mask: 6884 case X86::BI__builtin_ia32_cmpq512_mask: 6885 case X86::BI__builtin_ia32_ucmpb128_mask: 6886 case X86::BI__builtin_ia32_ucmpw128_mask: 6887 case X86::BI__builtin_ia32_ucmpd128_mask: 6888 case X86::BI__builtin_ia32_ucmpq128_mask: 6889 case X86::BI__builtin_ia32_ucmpb256_mask: 6890 case X86::BI__builtin_ia32_ucmpw256_mask: 6891 case X86::BI__builtin_ia32_ucmpd256_mask: 6892 case X86::BI__builtin_ia32_ucmpq256_mask: 6893 case X86::BI__builtin_ia32_ucmpb512_mask: 6894 case X86::BI__builtin_ia32_ucmpw512_mask: 6895 case X86::BI__builtin_ia32_ucmpd512_mask: 6896 case X86::BI__builtin_ia32_ucmpq512_mask: 6897 case X86::BI__builtin_ia32_vpcomub: 6898 case X86::BI__builtin_ia32_vpcomuw: 6899 case X86::BI__builtin_ia32_vpcomud: 6900 case X86::BI__builtin_ia32_vpcomuq: 6901 case X86::BI__builtin_ia32_vpcomb: 6902 case X86::BI__builtin_ia32_vpcomw: 6903 case X86::BI__builtin_ia32_vpcomd: 6904 case X86::BI__builtin_ia32_vpcomq: 6905 case X86::BI__builtin_ia32_vec_set_v8hi: 6906 case X86::BI__builtin_ia32_vec_set_v8si: 6907 i = 2; l = 0; u = 7; 6908 break; 6909 case X86::BI__builtin_ia32_vpermilpd256: 6910 case X86::BI__builtin_ia32_roundps: 6911 case X86::BI__builtin_ia32_roundpd: 6912 case X86::BI__builtin_ia32_roundps256: 6913 case X86::BI__builtin_ia32_roundpd256: 6914 case X86::BI__builtin_ia32_getmantpd128_mask: 6915 case X86::BI__builtin_ia32_getmantpd256_mask: 6916 case X86::BI__builtin_ia32_getmantps128_mask: 6917 case X86::BI__builtin_ia32_getmantps256_mask: 6918 case X86::BI__builtin_ia32_getmantpd512_mask: 6919 case X86::BI__builtin_ia32_getmantps512_mask: 6920 case X86::BI__builtin_ia32_getmantph128_mask: 6921 case X86::BI__builtin_ia32_getmantph256_mask: 6922 case X86::BI__builtin_ia32_getmantph512_mask: 6923 case X86::BI__builtin_ia32_vec_ext_v16qi: 6924 case X86::BI__builtin_ia32_vec_ext_v16hi: 6925 i = 1; l = 0; u = 15; 6926 break; 6927 case X86::BI__builtin_ia32_pblendd128: 6928 case X86::BI__builtin_ia32_blendps: 6929 case X86::BI__builtin_ia32_blendpd256: 6930 case X86::BI__builtin_ia32_shufpd256: 6931 case X86::BI__builtin_ia32_roundss: 6932 case X86::BI__builtin_ia32_roundsd: 6933 case X86::BI__builtin_ia32_rangepd128_mask: 6934 case X86::BI__builtin_ia32_rangepd256_mask: 6935 case X86::BI__builtin_ia32_rangepd512_mask: 6936 case X86::BI__builtin_ia32_rangeps128_mask: 6937 case X86::BI__builtin_ia32_rangeps256_mask: 6938 case X86::BI__builtin_ia32_rangeps512_mask: 6939 case X86::BI__builtin_ia32_getmantsd_round_mask: 6940 case X86::BI__builtin_ia32_getmantss_round_mask: 6941 case X86::BI__builtin_ia32_getmantsh_round_mask: 6942 case X86::BI__builtin_ia32_vec_set_v16qi: 6943 case X86::BI__builtin_ia32_vec_set_v16hi: 6944 i = 2; l = 0; u = 15; 6945 break; 6946 case X86::BI__builtin_ia32_vec_ext_v32qi: 6947 i = 1; l = 0; u = 31; 6948 break; 6949 case X86::BI__builtin_ia32_cmpps: 6950 case X86::BI__builtin_ia32_cmpss: 6951 case X86::BI__builtin_ia32_cmppd: 6952 case X86::BI__builtin_ia32_cmpsd: 6953 case X86::BI__builtin_ia32_cmpps256: 6954 case X86::BI__builtin_ia32_cmppd256: 6955 case X86::BI__builtin_ia32_cmpps128_mask: 6956 case X86::BI__builtin_ia32_cmppd128_mask: 6957 case X86::BI__builtin_ia32_cmpps256_mask: 6958 case X86::BI__builtin_ia32_cmppd256_mask: 6959 case X86::BI__builtin_ia32_cmpps512_mask: 6960 case X86::BI__builtin_ia32_cmppd512_mask: 6961 case X86::BI__builtin_ia32_cmpsd_mask: 6962 case X86::BI__builtin_ia32_cmpss_mask: 6963 case X86::BI__builtin_ia32_vec_set_v32qi: 6964 i = 2; l = 0; u = 31; 6965 break; 6966 case X86::BI__builtin_ia32_permdf256: 6967 case X86::BI__builtin_ia32_permdi256: 6968 case X86::BI__builtin_ia32_permdf512: 6969 case X86::BI__builtin_ia32_permdi512: 6970 case X86::BI__builtin_ia32_vpermilps: 6971 case X86::BI__builtin_ia32_vpermilps256: 6972 case X86::BI__builtin_ia32_vpermilpd512: 6973 case X86::BI__builtin_ia32_vpermilps512: 6974 case X86::BI__builtin_ia32_pshufd: 6975 case X86::BI__builtin_ia32_pshufd256: 6976 case X86::BI__builtin_ia32_pshufd512: 6977 case X86::BI__builtin_ia32_pshufhw: 6978 case X86::BI__builtin_ia32_pshufhw256: 6979 case X86::BI__builtin_ia32_pshufhw512: 6980 case X86::BI__builtin_ia32_pshuflw: 6981 case X86::BI__builtin_ia32_pshuflw256: 6982 case X86::BI__builtin_ia32_pshuflw512: 6983 case X86::BI__builtin_ia32_vcvtps2ph: 6984 case X86::BI__builtin_ia32_vcvtps2ph_mask: 6985 case X86::BI__builtin_ia32_vcvtps2ph256: 6986 case X86::BI__builtin_ia32_vcvtps2ph256_mask: 6987 case X86::BI__builtin_ia32_vcvtps2ph512_mask: 6988 case X86::BI__builtin_ia32_rndscaleps_128_mask: 6989 case X86::BI__builtin_ia32_rndscalepd_128_mask: 6990 case X86::BI__builtin_ia32_rndscaleps_256_mask: 6991 case X86::BI__builtin_ia32_rndscalepd_256_mask: 6992 case X86::BI__builtin_ia32_rndscaleps_mask: 6993 case X86::BI__builtin_ia32_rndscalepd_mask: 6994 case X86::BI__builtin_ia32_rndscaleph_mask: 6995 case X86::BI__builtin_ia32_reducepd128_mask: 6996 case X86::BI__builtin_ia32_reducepd256_mask: 6997 case X86::BI__builtin_ia32_reducepd512_mask: 6998 case X86::BI__builtin_ia32_reduceps128_mask: 6999 case X86::BI__builtin_ia32_reduceps256_mask: 7000 case X86::BI__builtin_ia32_reduceps512_mask: 7001 case X86::BI__builtin_ia32_reduceph128_mask: 7002 case X86::BI__builtin_ia32_reduceph256_mask: 7003 case X86::BI__builtin_ia32_reduceph512_mask: 7004 case X86::BI__builtin_ia32_prold512: 7005 case X86::BI__builtin_ia32_prolq512: 7006 case X86::BI__builtin_ia32_prold128: 7007 case X86::BI__builtin_ia32_prold256: 7008 case X86::BI__builtin_ia32_prolq128: 7009 case X86::BI__builtin_ia32_prolq256: 7010 case X86::BI__builtin_ia32_prord512: 7011 case X86::BI__builtin_ia32_prorq512: 7012 case X86::BI__builtin_ia32_prord128: 7013 case X86::BI__builtin_ia32_prord256: 7014 case X86::BI__builtin_ia32_prorq128: 7015 case X86::BI__builtin_ia32_prorq256: 7016 case X86::BI__builtin_ia32_fpclasspd128_mask: 7017 case X86::BI__builtin_ia32_fpclasspd256_mask: 7018 case X86::BI__builtin_ia32_fpclassps128_mask: 7019 case X86::BI__builtin_ia32_fpclassps256_mask: 7020 case X86::BI__builtin_ia32_fpclassps512_mask: 7021 case X86::BI__builtin_ia32_fpclasspd512_mask: 7022 case X86::BI__builtin_ia32_fpclassph128_mask: 7023 case X86::BI__builtin_ia32_fpclassph256_mask: 7024 case X86::BI__builtin_ia32_fpclassph512_mask: 7025 case X86::BI__builtin_ia32_fpclasssd_mask: 7026 case X86::BI__builtin_ia32_fpclassss_mask: 7027 case X86::BI__builtin_ia32_fpclasssh_mask: 7028 case X86::BI__builtin_ia32_pslldqi128_byteshift: 7029 case X86::BI__builtin_ia32_pslldqi256_byteshift: 7030 case X86::BI__builtin_ia32_pslldqi512_byteshift: 7031 case X86::BI__builtin_ia32_psrldqi128_byteshift: 7032 case X86::BI__builtin_ia32_psrldqi256_byteshift: 7033 case X86::BI__builtin_ia32_psrldqi512_byteshift: 7034 case X86::BI__builtin_ia32_kshiftliqi: 7035 case X86::BI__builtin_ia32_kshiftlihi: 7036 case X86::BI__builtin_ia32_kshiftlisi: 7037 case X86::BI__builtin_ia32_kshiftlidi: 7038 case X86::BI__builtin_ia32_kshiftriqi: 7039 case X86::BI__builtin_ia32_kshiftrihi: 7040 case X86::BI__builtin_ia32_kshiftrisi: 7041 case X86::BI__builtin_ia32_kshiftridi: 7042 i = 1; l = 0; u = 255; 7043 break; 7044 case X86::BI__builtin_ia32_vperm2f128_pd256: 7045 case X86::BI__builtin_ia32_vperm2f128_ps256: 7046 case X86::BI__builtin_ia32_vperm2f128_si256: 7047 case X86::BI__builtin_ia32_permti256: 7048 case X86::BI__builtin_ia32_pblendw128: 7049 case X86::BI__builtin_ia32_pblendw256: 7050 case X86::BI__builtin_ia32_blendps256: 7051 case X86::BI__builtin_ia32_pblendd256: 7052 case X86::BI__builtin_ia32_palignr128: 7053 case X86::BI__builtin_ia32_palignr256: 7054 case X86::BI__builtin_ia32_palignr512: 7055 case X86::BI__builtin_ia32_alignq512: 7056 case X86::BI__builtin_ia32_alignd512: 7057 case X86::BI__builtin_ia32_alignd128: 7058 case X86::BI__builtin_ia32_alignd256: 7059 case X86::BI__builtin_ia32_alignq128: 7060 case X86::BI__builtin_ia32_alignq256: 7061 case X86::BI__builtin_ia32_vcomisd: 7062 case X86::BI__builtin_ia32_vcomiss: 7063 case X86::BI__builtin_ia32_shuf_f32x4: 7064 case X86::BI__builtin_ia32_shuf_f64x2: 7065 case X86::BI__builtin_ia32_shuf_i32x4: 7066 case X86::BI__builtin_ia32_shuf_i64x2: 7067 case X86::BI__builtin_ia32_shufpd512: 7068 case X86::BI__builtin_ia32_shufps: 7069 case X86::BI__builtin_ia32_shufps256: 7070 case X86::BI__builtin_ia32_shufps512: 7071 case X86::BI__builtin_ia32_dbpsadbw128: 7072 case X86::BI__builtin_ia32_dbpsadbw256: 7073 case X86::BI__builtin_ia32_dbpsadbw512: 7074 case X86::BI__builtin_ia32_vpshldd128: 7075 case X86::BI__builtin_ia32_vpshldd256: 7076 case X86::BI__builtin_ia32_vpshldd512: 7077 case X86::BI__builtin_ia32_vpshldq128: 7078 case X86::BI__builtin_ia32_vpshldq256: 7079 case X86::BI__builtin_ia32_vpshldq512: 7080 case X86::BI__builtin_ia32_vpshldw128: 7081 case X86::BI__builtin_ia32_vpshldw256: 7082 case X86::BI__builtin_ia32_vpshldw512: 7083 case X86::BI__builtin_ia32_vpshrdd128: 7084 case X86::BI__builtin_ia32_vpshrdd256: 7085 case X86::BI__builtin_ia32_vpshrdd512: 7086 case X86::BI__builtin_ia32_vpshrdq128: 7087 case X86::BI__builtin_ia32_vpshrdq256: 7088 case X86::BI__builtin_ia32_vpshrdq512: 7089 case X86::BI__builtin_ia32_vpshrdw128: 7090 case X86::BI__builtin_ia32_vpshrdw256: 7091 case X86::BI__builtin_ia32_vpshrdw512: 7092 i = 2; l = 0; u = 255; 7093 break; 7094 case X86::BI__builtin_ia32_fixupimmpd512_mask: 7095 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 7096 case X86::BI__builtin_ia32_fixupimmps512_mask: 7097 case X86::BI__builtin_ia32_fixupimmps512_maskz: 7098 case X86::BI__builtin_ia32_fixupimmsd_mask: 7099 case X86::BI__builtin_ia32_fixupimmsd_maskz: 7100 case X86::BI__builtin_ia32_fixupimmss_mask: 7101 case X86::BI__builtin_ia32_fixupimmss_maskz: 7102 case X86::BI__builtin_ia32_fixupimmpd128_mask: 7103 case X86::BI__builtin_ia32_fixupimmpd128_maskz: 7104 case X86::BI__builtin_ia32_fixupimmpd256_mask: 7105 case X86::BI__builtin_ia32_fixupimmpd256_maskz: 7106 case X86::BI__builtin_ia32_fixupimmps128_mask: 7107 case X86::BI__builtin_ia32_fixupimmps128_maskz: 7108 case X86::BI__builtin_ia32_fixupimmps256_mask: 7109 case X86::BI__builtin_ia32_fixupimmps256_maskz: 7110 case X86::BI__builtin_ia32_pternlogd512_mask: 7111 case X86::BI__builtin_ia32_pternlogd512_maskz: 7112 case X86::BI__builtin_ia32_pternlogq512_mask: 7113 case X86::BI__builtin_ia32_pternlogq512_maskz: 7114 case X86::BI__builtin_ia32_pternlogd128_mask: 7115 case X86::BI__builtin_ia32_pternlogd128_maskz: 7116 case X86::BI__builtin_ia32_pternlogd256_mask: 7117 case X86::BI__builtin_ia32_pternlogd256_maskz: 7118 case X86::BI__builtin_ia32_pternlogq128_mask: 7119 case X86::BI__builtin_ia32_pternlogq128_maskz: 7120 case X86::BI__builtin_ia32_pternlogq256_mask: 7121 case X86::BI__builtin_ia32_pternlogq256_maskz: 7122 case X86::BI__builtin_ia32_vsm3rnds2: 7123 i = 3; l = 0; u = 255; 7124 break; 7125 case X86::BI__builtin_ia32_gatherpfdpd: 7126 case X86::BI__builtin_ia32_gatherpfdps: 7127 case X86::BI__builtin_ia32_gatherpfqpd: 7128 case X86::BI__builtin_ia32_gatherpfqps: 7129 case X86::BI__builtin_ia32_scatterpfdpd: 7130 case X86::BI__builtin_ia32_scatterpfdps: 7131 case X86::BI__builtin_ia32_scatterpfqpd: 7132 case X86::BI__builtin_ia32_scatterpfqps: 7133 i = 4; l = 2; u = 3; 7134 break; 7135 case X86::BI__builtin_ia32_reducesd_mask: 7136 case X86::BI__builtin_ia32_reducess_mask: 7137 case X86::BI__builtin_ia32_rndscalesd_round_mask: 7138 case X86::BI__builtin_ia32_rndscaless_round_mask: 7139 case X86::BI__builtin_ia32_rndscalesh_round_mask: 7140 case X86::BI__builtin_ia32_reducesh_mask: 7141 i = 4; l = 0; u = 255; 7142 break; 7143 case X86::BI__builtin_ia32_cmpccxadd32: 7144 case X86::BI__builtin_ia32_cmpccxadd64: 7145 i = 3; l = 0; u = 15; 7146 break; 7147 } 7148 7149 // Note that we don't force a hard error on the range check here, allowing 7150 // template-generated or macro-generated dead code to potentially have out-of- 7151 // range values. These need to code generate, but don't need to necessarily 7152 // make any sense. We use a warning that defaults to an error. 7153 return SemaBuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false); 7154 } 7155 7156 /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo 7157 /// parameter with the FormatAttr's correct format_idx and firstDataArg. 7158 /// Returns true when the format fits the function and the FormatStringInfo has 7159 /// been populated. 7160 bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, 7161 bool IsVariadic, FormatStringInfo *FSI) { 7162 if (Format->getFirstArg() == 0) 7163 FSI->ArgPassingKind = FAPK_VAList; 7164 else if (IsVariadic) 7165 FSI->ArgPassingKind = FAPK_Variadic; 7166 else 7167 FSI->ArgPassingKind = FAPK_Fixed; 7168 FSI->FormatIdx = Format->getFormatIdx() - 1; 7169 FSI->FirstDataArg = 7170 FSI->ArgPassingKind == FAPK_VAList ? 0 : Format->getFirstArg() - 1; 7171 7172 // The way the format attribute works in GCC, the implicit this argument 7173 // of member functions is counted. However, it doesn't appear in our own 7174 // lists, so decrement format_idx in that case. 7175 if (IsCXXMember) { 7176 if(FSI->FormatIdx == 0) 7177 return false; 7178 --FSI->FormatIdx; 7179 if (FSI->FirstDataArg != 0) 7180 --FSI->FirstDataArg; 7181 } 7182 return true; 7183 } 7184 7185 /// Checks if a the given expression evaluates to null. 7186 /// 7187 /// Returns true if the value evaluates to null. 7188 static bool CheckNonNullExpr(Sema &S, const Expr *Expr) { 7189 // If the expression has non-null type, it doesn't evaluate to null. 7190 if (auto nullability = Expr->IgnoreImplicit()->getType()->getNullability()) { 7191 if (*nullability == NullabilityKind::NonNull) 7192 return false; 7193 } 7194 7195 // As a special case, transparent unions initialized with zero are 7196 // considered null for the purposes of the nonnull attribute. 7197 if (const RecordType *UT = Expr->getType()->getAsUnionType()) { 7198 if (UT->getDecl()->hasAttr<TransparentUnionAttr>()) 7199 if (const CompoundLiteralExpr *CLE = 7200 dyn_cast<CompoundLiteralExpr>(Expr)) 7201 if (const InitListExpr *ILE = 7202 dyn_cast<InitListExpr>(CLE->getInitializer())) 7203 Expr = ILE->getInit(0); 7204 } 7205 7206 bool Result; 7207 return (!Expr->isValueDependent() && 7208 Expr->EvaluateAsBooleanCondition(Result, S.Context) && 7209 !Result); 7210 } 7211 7212 static void CheckNonNullArgument(Sema &S, 7213 const Expr *ArgExpr, 7214 SourceLocation CallSiteLoc) { 7215 if (CheckNonNullExpr(S, ArgExpr)) 7216 S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr, 7217 S.PDiag(diag::warn_null_arg) 7218 << ArgExpr->getSourceRange()); 7219 } 7220 7221 bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) { 7222 FormatStringInfo FSI; 7223 if ((GetFormatStringType(Format) == FST_NSString) && 7224 getFormatStringInfo(Format, false, true, &FSI)) { 7225 Idx = FSI.FormatIdx; 7226 return true; 7227 } 7228 return false; 7229 } 7230 7231 /// Diagnose use of %s directive in an NSString which is being passed 7232 /// as formatting string to formatting method. 7233 static void 7234 DiagnoseCStringFormatDirectiveInCFAPI(Sema &S, 7235 const NamedDecl *FDecl, 7236 Expr **Args, 7237 unsigned NumArgs) { 7238 unsigned Idx = 0; 7239 bool Format = false; 7240 ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily(); 7241 if (SFFamily == ObjCStringFormatFamily::SFF_CFString) { 7242 Idx = 2; 7243 Format = true; 7244 } 7245 else 7246 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 7247 if (S.GetFormatNSStringIdx(I, Idx)) { 7248 Format = true; 7249 break; 7250 } 7251 } 7252 if (!Format || NumArgs <= Idx) 7253 return; 7254 const Expr *FormatExpr = Args[Idx]; 7255 if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr)) 7256 FormatExpr = CSCE->getSubExpr(); 7257 const StringLiteral *FormatString; 7258 if (const ObjCStringLiteral *OSL = 7259 dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts())) 7260 FormatString = OSL->getString(); 7261 else 7262 FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts()); 7263 if (!FormatString) 7264 return; 7265 if (S.FormatStringHasSArg(FormatString)) { 7266 S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string) 7267 << "%s" << 1 << 1; 7268 S.Diag(FDecl->getLocation(), diag::note_entity_declared_at) 7269 << FDecl->getDeclName(); 7270 } 7271 } 7272 7273 /// Determine whether the given type has a non-null nullability annotation. 7274 static bool isNonNullType(QualType type) { 7275 if (auto nullability = type->getNullability()) 7276 return *nullability == NullabilityKind::NonNull; 7277 7278 return false; 7279 } 7280 7281 static void CheckNonNullArguments(Sema &S, 7282 const NamedDecl *FDecl, 7283 const FunctionProtoType *Proto, 7284 ArrayRef<const Expr *> Args, 7285 SourceLocation CallSiteLoc) { 7286 assert((FDecl || Proto) && "Need a function declaration or prototype"); 7287 7288 // Already checked by constant evaluator. 7289 if (S.isConstantEvaluatedContext()) 7290 return; 7291 // Check the attributes attached to the method/function itself. 7292 llvm::SmallBitVector NonNullArgs; 7293 if (FDecl) { 7294 // Handle the nonnull attribute on the function/method declaration itself. 7295 for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) { 7296 if (!NonNull->args_size()) { 7297 // Easy case: all pointer arguments are nonnull. 7298 for (const auto *Arg : Args) 7299 if (S.isValidPointerAttrType(Arg->getType())) 7300 CheckNonNullArgument(S, Arg, CallSiteLoc); 7301 return; 7302 } 7303 7304 for (const ParamIdx &Idx : NonNull->args()) { 7305 unsigned IdxAST = Idx.getASTIndex(); 7306 if (IdxAST >= Args.size()) 7307 continue; 7308 if (NonNullArgs.empty()) 7309 NonNullArgs.resize(Args.size()); 7310 NonNullArgs.set(IdxAST); 7311 } 7312 } 7313 } 7314 7315 if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) { 7316 // Handle the nonnull attribute on the parameters of the 7317 // function/method. 7318 ArrayRef<ParmVarDecl*> parms; 7319 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl)) 7320 parms = FD->parameters(); 7321 else 7322 parms = cast<ObjCMethodDecl>(FDecl)->parameters(); 7323 7324 unsigned ParamIndex = 0; 7325 for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end(); 7326 I != E; ++I, ++ParamIndex) { 7327 const ParmVarDecl *PVD = *I; 7328 if (PVD->hasAttr<NonNullAttr>() || isNonNullType(PVD->getType())) { 7329 if (NonNullArgs.empty()) 7330 NonNullArgs.resize(Args.size()); 7331 7332 NonNullArgs.set(ParamIndex); 7333 } 7334 } 7335 } else { 7336 // If we have a non-function, non-method declaration but no 7337 // function prototype, try to dig out the function prototype. 7338 if (!Proto) { 7339 if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) { 7340 QualType type = VD->getType().getNonReferenceType(); 7341 if (auto pointerType = type->getAs<PointerType>()) 7342 type = pointerType->getPointeeType(); 7343 else if (auto blockType = type->getAs<BlockPointerType>()) 7344 type = blockType->getPointeeType(); 7345 // FIXME: data member pointers? 7346 7347 // Dig out the function prototype, if there is one. 7348 Proto = type->getAs<FunctionProtoType>(); 7349 } 7350 } 7351 7352 // Fill in non-null argument information from the nullability 7353 // information on the parameter types (if we have them). 7354 if (Proto) { 7355 unsigned Index = 0; 7356 for (auto paramType : Proto->getParamTypes()) { 7357 if (isNonNullType(paramType)) { 7358 if (NonNullArgs.empty()) 7359 NonNullArgs.resize(Args.size()); 7360 7361 NonNullArgs.set(Index); 7362 } 7363 7364 ++Index; 7365 } 7366 } 7367 } 7368 7369 // Check for non-null arguments. 7370 for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size(); 7371 ArgIndex != ArgIndexEnd; ++ArgIndex) { 7372 if (NonNullArgs[ArgIndex]) 7373 CheckNonNullArgument(S, Args[ArgIndex], Args[ArgIndex]->getExprLoc()); 7374 } 7375 } 7376 7377 // 16 byte ByVal alignment not due to a vector member is not honoured by XL 7378 // on AIX. Emit a warning here that users are generating binary incompatible 7379 // code to be safe. 7380 // Here we try to get information about the alignment of the struct member 7381 // from the struct passed to the caller function. We only warn when the struct 7382 // is passed byval, hence the series of checks and early returns if we are a not 7383 // passing a struct byval. 7384 void Sema::checkAIXMemberAlignment(SourceLocation Loc, const Expr *Arg) { 7385 const auto *ICE = dyn_cast<ImplicitCastExpr>(Arg->IgnoreParens()); 7386 if (!ICE) 7387 return; 7388 7389 const auto *DR = dyn_cast<DeclRefExpr>(ICE->getSubExpr()); 7390 if (!DR) 7391 return; 7392 7393 const auto *PD = dyn_cast<ParmVarDecl>(DR->getDecl()); 7394 if (!PD || !PD->getType()->isRecordType()) 7395 return; 7396 7397 QualType ArgType = Arg->getType(); 7398 for (const FieldDecl *FD : 7399 ArgType->castAs<RecordType>()->getDecl()->fields()) { 7400 if (const auto *AA = FD->getAttr<AlignedAttr>()) { 7401 CharUnits Alignment = 7402 Context.toCharUnitsFromBits(AA->getAlignment(Context)); 7403 if (Alignment.getQuantity() == 16) { 7404 Diag(FD->getLocation(), diag::warn_not_xl_compatible) << FD; 7405 Diag(Loc, diag::note_misaligned_member_used_here) << PD; 7406 } 7407 } 7408 } 7409 } 7410 7411 /// Warn if a pointer or reference argument passed to a function points to an 7412 /// object that is less aligned than the parameter. This can happen when 7413 /// creating a typedef with a lower alignment than the original type and then 7414 /// calling functions defined in terms of the original type. 7415 void Sema::CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl, 7416 StringRef ParamName, QualType ArgTy, 7417 QualType ParamTy) { 7418 7419 // If a function accepts a pointer or reference type 7420 if (!ParamTy->isPointerType() && !ParamTy->isReferenceType()) 7421 return; 7422 7423 // If the parameter is a pointer type, get the pointee type for the 7424 // argument too. If the parameter is a reference type, don't try to get 7425 // the pointee type for the argument. 7426 if (ParamTy->isPointerType()) 7427 ArgTy = ArgTy->getPointeeType(); 7428 7429 // Remove reference or pointer 7430 ParamTy = ParamTy->getPointeeType(); 7431 7432 // Find expected alignment, and the actual alignment of the passed object. 7433 // getTypeAlignInChars requires complete types 7434 if (ArgTy.isNull() || ParamTy->isDependentType() || 7435 ParamTy->isIncompleteType() || ArgTy->isIncompleteType() || 7436 ParamTy->isUndeducedType() || ArgTy->isUndeducedType()) 7437 return; 7438 7439 CharUnits ParamAlign = Context.getTypeAlignInChars(ParamTy); 7440 CharUnits ArgAlign = Context.getTypeAlignInChars(ArgTy); 7441 7442 // If the argument is less aligned than the parameter, there is a 7443 // potential alignment issue. 7444 if (ArgAlign < ParamAlign) 7445 Diag(Loc, diag::warn_param_mismatched_alignment) 7446 << (int)ArgAlign.getQuantity() << (int)ParamAlign.getQuantity() 7447 << ParamName << (FDecl != nullptr) << FDecl; 7448 } 7449 7450 /// Handles the checks for format strings, non-POD arguments to vararg 7451 /// functions, NULL arguments passed to non-NULL parameters, diagnose_if 7452 /// attributes and AArch64 SME attributes. 7453 void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, 7454 const Expr *ThisArg, ArrayRef<const Expr *> Args, 7455 bool IsMemberFunction, SourceLocation Loc, 7456 SourceRange Range, VariadicCallType CallType) { 7457 // FIXME: We should check as much as we can in the template definition. 7458 if (CurContext->isDependentContext()) 7459 return; 7460 7461 // Printf and scanf checking. 7462 llvm::SmallBitVector CheckedVarArgs; 7463 if (FDecl) { 7464 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 7465 // Only create vector if there are format attributes. 7466 CheckedVarArgs.resize(Args.size()); 7467 7468 CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range, 7469 CheckedVarArgs); 7470 } 7471 } 7472 7473 // Refuse POD arguments that weren't caught by the format string 7474 // checks above. 7475 auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl); 7476 if (CallType != VariadicDoesNotApply && 7477 (!FD || FD->getBuiltinID() != Builtin::BI__noop)) { 7478 unsigned NumParams = Proto ? Proto->getNumParams() 7479 : FDecl && isa<FunctionDecl>(FDecl) 7480 ? cast<FunctionDecl>(FDecl)->getNumParams() 7481 : FDecl && isa<ObjCMethodDecl>(FDecl) 7482 ? cast<ObjCMethodDecl>(FDecl)->param_size() 7483 : 0; 7484 7485 for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) { 7486 // Args[ArgIdx] can be null in malformed code. 7487 if (const Expr *Arg = Args[ArgIdx]) { 7488 if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx]) 7489 checkVariadicArgument(Arg, CallType); 7490 } 7491 } 7492 } 7493 7494 if (FDecl || Proto) { 7495 CheckNonNullArguments(*this, FDecl, Proto, Args, Loc); 7496 7497 // Type safety checking. 7498 if (FDecl) { 7499 for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>()) 7500 CheckArgumentWithTypeTag(I, Args, Loc); 7501 } 7502 } 7503 7504 // Check that passed arguments match the alignment of original arguments. 7505 // Try to get the missing prototype from the declaration. 7506 if (!Proto && FDecl) { 7507 const auto *FT = FDecl->getFunctionType(); 7508 if (isa_and_nonnull<FunctionProtoType>(FT)) 7509 Proto = cast<FunctionProtoType>(FDecl->getFunctionType()); 7510 } 7511 if (Proto) { 7512 // For variadic functions, we may have more args than parameters. 7513 // For some K&R functions, we may have less args than parameters. 7514 const auto N = std::min<unsigned>(Proto->getNumParams(), Args.size()); 7515 for (unsigned ArgIdx = 0; ArgIdx < N; ++ArgIdx) { 7516 // Args[ArgIdx] can be null in malformed code. 7517 if (const Expr *Arg = Args[ArgIdx]) { 7518 if (Arg->containsErrors()) 7519 continue; 7520 7521 if (Context.getTargetInfo().getTriple().isOSAIX() && FDecl && Arg && 7522 FDecl->hasLinkage() && 7523 FDecl->getFormalLinkage() != Linkage::Internal && 7524 CallType == VariadicDoesNotApply) 7525 checkAIXMemberAlignment((Arg->getExprLoc()), Arg); 7526 7527 QualType ParamTy = Proto->getParamType(ArgIdx); 7528 QualType ArgTy = Arg->getType(); 7529 CheckArgAlignment(Arg->getExprLoc(), FDecl, std::to_string(ArgIdx + 1), 7530 ArgTy, ParamTy); 7531 } 7532 } 7533 7534 // If the callee has an AArch64 SME attribute to indicate that it is an 7535 // __arm_streaming function, then the caller requires SME to be available. 7536 FunctionProtoType::ExtProtoInfo ExtInfo = Proto->getExtProtoInfo(); 7537 if (ExtInfo.AArch64SMEAttributes & FunctionType::SME_PStateSMEnabledMask) { 7538 if (auto *CallerFD = dyn_cast<FunctionDecl>(CurContext)) { 7539 llvm::StringMap<bool> CallerFeatureMap; 7540 Context.getFunctionFeatureMap(CallerFeatureMap, CallerFD); 7541 if (!CallerFeatureMap.contains("sme")) 7542 Diag(Loc, diag::err_sme_call_in_non_sme_target); 7543 } else if (!Context.getTargetInfo().hasFeature("sme")) { 7544 Diag(Loc, diag::err_sme_call_in_non_sme_target); 7545 } 7546 } 7547 7548 FunctionType::ArmStateValue CalleeArmZAState = 7549 FunctionType::getArmZAState(ExtInfo.AArch64SMEAttributes); 7550 FunctionType::ArmStateValue CalleeArmZT0State = 7551 FunctionType::getArmZT0State(ExtInfo.AArch64SMEAttributes); 7552 if (CalleeArmZAState != FunctionType::ARM_None || 7553 CalleeArmZT0State != FunctionType::ARM_None) { 7554 bool CallerHasZAState = false; 7555 bool CallerHasZT0State = false; 7556 if (const auto *CallerFD = dyn_cast<FunctionDecl>(CurContext)) { 7557 auto *Attr = CallerFD->getAttr<ArmNewAttr>(); 7558 if (Attr && Attr->isNewZA()) 7559 CallerHasZAState = true; 7560 if (Attr && Attr->isNewZT0()) 7561 CallerHasZT0State = true; 7562 if (const auto *FPT = CallerFD->getType()->getAs<FunctionProtoType>()) { 7563 CallerHasZAState |= 7564 FunctionType::getArmZAState( 7565 FPT->getExtProtoInfo().AArch64SMEAttributes) != 7566 FunctionType::ARM_None; 7567 CallerHasZT0State |= 7568 FunctionType::getArmZT0State( 7569 FPT->getExtProtoInfo().AArch64SMEAttributes) != 7570 FunctionType::ARM_None; 7571 } 7572 } 7573 7574 if (CalleeArmZAState != FunctionType::ARM_None && !CallerHasZAState) 7575 Diag(Loc, diag::err_sme_za_call_no_za_state); 7576 7577 if (CalleeArmZT0State != FunctionType::ARM_None && !CallerHasZT0State) 7578 Diag(Loc, diag::err_sme_zt0_call_no_zt0_state); 7579 7580 if (CallerHasZAState && CalleeArmZAState == FunctionType::ARM_None && 7581 CalleeArmZT0State != FunctionType::ARM_None) { 7582 Diag(Loc, diag::err_sme_unimplemented_za_save_restore); 7583 Diag(Loc, diag::note_sme_use_preserves_za); 7584 } 7585 } 7586 } 7587 7588 if (FDecl && FDecl->hasAttr<AllocAlignAttr>()) { 7589 auto *AA = FDecl->getAttr<AllocAlignAttr>(); 7590 const Expr *Arg = Args[AA->getParamIndex().getASTIndex()]; 7591 if (!Arg->isValueDependent()) { 7592 Expr::EvalResult Align; 7593 if (Arg->EvaluateAsInt(Align, Context)) { 7594 const llvm::APSInt &I = Align.Val.getInt(); 7595 if (!I.isPowerOf2()) 7596 Diag(Arg->getExprLoc(), diag::warn_alignment_not_power_of_two) 7597 << Arg->getSourceRange(); 7598 7599 if (I > Sema::MaximumAlignment) 7600 Diag(Arg->getExprLoc(), diag::warn_assume_aligned_too_great) 7601 << Arg->getSourceRange() << Sema::MaximumAlignment; 7602 } 7603 } 7604 } 7605 7606 if (FD) 7607 diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc); 7608 } 7609 7610 /// CheckConstructorCall - Check a constructor call for correctness and safety 7611 /// properties not enforced by the C type system. 7612 void Sema::CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType, 7613 ArrayRef<const Expr *> Args, 7614 const FunctionProtoType *Proto, 7615 SourceLocation Loc) { 7616 VariadicCallType CallType = 7617 Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply; 7618 7619 auto *Ctor = cast<CXXConstructorDecl>(FDecl); 7620 CheckArgAlignment( 7621 Loc, FDecl, "'this'", Context.getPointerType(ThisType), 7622 Context.getPointerType(Ctor->getFunctionObjectParameterType())); 7623 7624 checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true, 7625 Loc, SourceRange(), CallType); 7626 } 7627 7628 /// CheckFunctionCall - Check a direct function call for various correctness 7629 /// and safety properties not strictly enforced by the C type system. 7630 bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, 7631 const FunctionProtoType *Proto) { 7632 bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) && 7633 isa<CXXMethodDecl>(FDecl); 7634 bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) || 7635 IsMemberOperatorCall; 7636 VariadicCallType CallType = getVariadicCallType(FDecl, Proto, 7637 TheCall->getCallee()); 7638 Expr** Args = TheCall->getArgs(); 7639 unsigned NumArgs = TheCall->getNumArgs(); 7640 7641 Expr *ImplicitThis = nullptr; 7642 if (IsMemberOperatorCall && !FDecl->hasCXXExplicitFunctionObjectParameter()) { 7643 // If this is a call to a member operator, hide the first 7644 // argument from checkCall. 7645 // FIXME: Our choice of AST representation here is less than ideal. 7646 ImplicitThis = Args[0]; 7647 ++Args; 7648 --NumArgs; 7649 } else if (IsMemberFunction && !FDecl->isStatic() && 7650 !FDecl->hasCXXExplicitFunctionObjectParameter()) 7651 ImplicitThis = 7652 cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument(); 7653 7654 if (ImplicitThis) { 7655 // ImplicitThis may or may not be a pointer, depending on whether . or -> is 7656 // used. 7657 QualType ThisType = ImplicitThis->getType(); 7658 if (!ThisType->isPointerType()) { 7659 assert(!ThisType->isReferenceType()); 7660 ThisType = Context.getPointerType(ThisType); 7661 } 7662 7663 QualType ThisTypeFromDecl = Context.getPointerType( 7664 cast<CXXMethodDecl>(FDecl)->getFunctionObjectParameterType()); 7665 7666 CheckArgAlignment(TheCall->getRParenLoc(), FDecl, "'this'", ThisType, 7667 ThisTypeFromDecl); 7668 } 7669 7670 checkCall(FDecl, Proto, ImplicitThis, llvm::ArrayRef(Args, NumArgs), 7671 IsMemberFunction, TheCall->getRParenLoc(), 7672 TheCall->getCallee()->getSourceRange(), CallType); 7673 7674 IdentifierInfo *FnInfo = FDecl->getIdentifier(); 7675 // None of the checks below are needed for functions that don't have 7676 // simple names (e.g., C++ conversion functions). 7677 if (!FnInfo) 7678 return false; 7679 7680 // Enforce TCB except for builtin calls, which are always allowed. 7681 if (FDecl->getBuiltinID() == 0) 7682 CheckTCBEnforcement(TheCall->getExprLoc(), FDecl); 7683 7684 CheckAbsoluteValueFunction(TheCall, FDecl); 7685 CheckMaxUnsignedZero(TheCall, FDecl); 7686 CheckInfNaNFunction(TheCall, FDecl); 7687 7688 if (getLangOpts().ObjC) 7689 DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs); 7690 7691 unsigned CMId = FDecl->getMemoryFunctionKind(); 7692 7693 // Handle memory setting and copying functions. 7694 switch (CMId) { 7695 case 0: 7696 return false; 7697 case Builtin::BIstrlcpy: // fallthrough 7698 case Builtin::BIstrlcat: 7699 CheckStrlcpycatArguments(TheCall, FnInfo); 7700 break; 7701 case Builtin::BIstrncat: 7702 CheckStrncatArguments(TheCall, FnInfo); 7703 break; 7704 case Builtin::BIfree: 7705 CheckFreeArguments(TheCall); 7706 break; 7707 default: 7708 CheckMemaccessArguments(TheCall, CMId, FnInfo); 7709 } 7710 7711 return false; 7712 } 7713 7714 bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac, 7715 ArrayRef<const Expr *> Args) { 7716 VariadicCallType CallType = 7717 Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply; 7718 7719 checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args, 7720 /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(), 7721 CallType); 7722 7723 CheckTCBEnforcement(lbrac, Method); 7724 7725 return false; 7726 } 7727 7728 bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, 7729 const FunctionProtoType *Proto) { 7730 QualType Ty; 7731 if (const auto *V = dyn_cast<VarDecl>(NDecl)) 7732 Ty = V->getType().getNonReferenceType(); 7733 else if (const auto *F = dyn_cast<FieldDecl>(NDecl)) 7734 Ty = F->getType().getNonReferenceType(); 7735 else 7736 return false; 7737 7738 if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() && 7739 !Ty->isFunctionProtoType()) 7740 return false; 7741 7742 VariadicCallType CallType; 7743 if (!Proto || !Proto->isVariadic()) { 7744 CallType = VariadicDoesNotApply; 7745 } else if (Ty->isBlockPointerType()) { 7746 CallType = VariadicBlock; 7747 } else { // Ty->isFunctionPointerType() 7748 CallType = VariadicFunction; 7749 } 7750 7751 checkCall(NDecl, Proto, /*ThisArg=*/nullptr, 7752 llvm::ArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 7753 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 7754 TheCall->getCallee()->getSourceRange(), CallType); 7755 7756 return false; 7757 } 7758 7759 /// Checks function calls when a FunctionDecl or a NamedDecl is not available, 7760 /// such as function pointers returned from functions. 7761 bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) { 7762 VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto, 7763 TheCall->getCallee()); 7764 checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr, 7765 llvm::ArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 7766 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 7767 TheCall->getCallee()->getSourceRange(), CallType); 7768 7769 return false; 7770 } 7771 7772 static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) { 7773 if (!llvm::isValidAtomicOrderingCABI(Ordering)) 7774 return false; 7775 7776 auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering; 7777 switch (Op) { 7778 case AtomicExpr::AO__c11_atomic_init: 7779 case AtomicExpr::AO__opencl_atomic_init: 7780 llvm_unreachable("There is no ordering argument for an init"); 7781 7782 case AtomicExpr::AO__c11_atomic_load: 7783 case AtomicExpr::AO__opencl_atomic_load: 7784 case AtomicExpr::AO__hip_atomic_load: 7785 case AtomicExpr::AO__atomic_load_n: 7786 case AtomicExpr::AO__atomic_load: 7787 case AtomicExpr::AO__scoped_atomic_load_n: 7788 case AtomicExpr::AO__scoped_atomic_load: 7789 return OrderingCABI != llvm::AtomicOrderingCABI::release && 7790 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 7791 7792 case AtomicExpr::AO__c11_atomic_store: 7793 case AtomicExpr::AO__opencl_atomic_store: 7794 case AtomicExpr::AO__hip_atomic_store: 7795 case AtomicExpr::AO__atomic_store: 7796 case AtomicExpr::AO__atomic_store_n: 7797 case AtomicExpr::AO__scoped_atomic_store: 7798 case AtomicExpr::AO__scoped_atomic_store_n: 7799 return OrderingCABI != llvm::AtomicOrderingCABI::consume && 7800 OrderingCABI != llvm::AtomicOrderingCABI::acquire && 7801 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 7802 7803 default: 7804 return true; 7805 } 7806 } 7807 7808 ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, 7809 AtomicExpr::AtomicOp Op) { 7810 CallExpr *TheCall = cast<CallExpr>(TheCallResult.get()); 7811 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 7812 MultiExprArg Args{TheCall->getArgs(), TheCall->getNumArgs()}; 7813 return BuildAtomicExpr({TheCall->getBeginLoc(), TheCall->getEndLoc()}, 7814 DRE->getSourceRange(), TheCall->getRParenLoc(), Args, 7815 Op); 7816 } 7817 7818 ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, 7819 SourceLocation RParenLoc, MultiExprArg Args, 7820 AtomicExpr::AtomicOp Op, 7821 AtomicArgumentOrder ArgOrder) { 7822 // All the non-OpenCL operations take one of the following forms. 7823 // The OpenCL operations take the __c11 forms with one extra argument for 7824 // synchronization scope. 7825 enum { 7826 // C __c11_atomic_init(A *, C) 7827 Init, 7828 7829 // C __c11_atomic_load(A *, int) 7830 Load, 7831 7832 // void __atomic_load(A *, CP, int) 7833 LoadCopy, 7834 7835 // void __atomic_store(A *, CP, int) 7836 Copy, 7837 7838 // C __c11_atomic_add(A *, M, int) 7839 Arithmetic, 7840 7841 // C __atomic_exchange_n(A *, CP, int) 7842 Xchg, 7843 7844 // void __atomic_exchange(A *, C *, CP, int) 7845 GNUXchg, 7846 7847 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int) 7848 C11CmpXchg, 7849 7850 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int) 7851 GNUCmpXchg 7852 } Form = Init; 7853 7854 const unsigned NumForm = GNUCmpXchg + 1; 7855 const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 }; 7856 const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 }; 7857 // where: 7858 // C is an appropriate type, 7859 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins, 7860 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise, 7861 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and 7862 // the int parameters are for orderings. 7863 7864 static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm 7865 && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm, 7866 "need to update code for modified forms"); 7867 static_assert(AtomicExpr::AO__c11_atomic_init == 0 && 7868 AtomicExpr::AO__c11_atomic_fetch_min + 1 == 7869 AtomicExpr::AO__atomic_load, 7870 "need to update code for modified C11 atomics"); 7871 bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init && 7872 Op <= AtomicExpr::AO__opencl_atomic_fetch_max; 7873 bool IsHIP = Op >= AtomicExpr::AO__hip_atomic_load && 7874 Op <= AtomicExpr::AO__hip_atomic_fetch_max; 7875 bool IsScoped = Op >= AtomicExpr::AO__scoped_atomic_load && 7876 Op <= AtomicExpr::AO__scoped_atomic_fetch_max; 7877 bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init && 7878 Op <= AtomicExpr::AO__c11_atomic_fetch_min) || 7879 IsOpenCL; 7880 bool IsN = Op == AtomicExpr::AO__atomic_load_n || 7881 Op == AtomicExpr::AO__atomic_store_n || 7882 Op == AtomicExpr::AO__atomic_exchange_n || 7883 Op == AtomicExpr::AO__atomic_compare_exchange_n || 7884 Op == AtomicExpr::AO__scoped_atomic_load_n || 7885 Op == AtomicExpr::AO__scoped_atomic_store_n || 7886 Op == AtomicExpr::AO__scoped_atomic_exchange_n || 7887 Op == AtomicExpr::AO__scoped_atomic_compare_exchange_n; 7888 // Bit mask for extra allowed value types other than integers for atomic 7889 // arithmetic operations. Add/sub allow pointer and floating point. Min/max 7890 // allow floating point. 7891 enum ArithOpExtraValueType { 7892 AOEVT_None = 0, 7893 AOEVT_Pointer = 1, 7894 AOEVT_FP = 2, 7895 }; 7896 unsigned ArithAllows = AOEVT_None; 7897 7898 switch (Op) { 7899 case AtomicExpr::AO__c11_atomic_init: 7900 case AtomicExpr::AO__opencl_atomic_init: 7901 Form = Init; 7902 break; 7903 7904 case AtomicExpr::AO__c11_atomic_load: 7905 case AtomicExpr::AO__opencl_atomic_load: 7906 case AtomicExpr::AO__hip_atomic_load: 7907 case AtomicExpr::AO__atomic_load_n: 7908 case AtomicExpr::AO__scoped_atomic_load_n: 7909 Form = Load; 7910 break; 7911 7912 case AtomicExpr::AO__atomic_load: 7913 case AtomicExpr::AO__scoped_atomic_load: 7914 Form = LoadCopy; 7915 break; 7916 7917 case AtomicExpr::AO__c11_atomic_store: 7918 case AtomicExpr::AO__opencl_atomic_store: 7919 case AtomicExpr::AO__hip_atomic_store: 7920 case AtomicExpr::AO__atomic_store: 7921 case AtomicExpr::AO__atomic_store_n: 7922 case AtomicExpr::AO__scoped_atomic_store: 7923 case AtomicExpr::AO__scoped_atomic_store_n: 7924 Form = Copy; 7925 break; 7926 case AtomicExpr::AO__atomic_fetch_add: 7927 case AtomicExpr::AO__atomic_fetch_sub: 7928 case AtomicExpr::AO__atomic_add_fetch: 7929 case AtomicExpr::AO__atomic_sub_fetch: 7930 case AtomicExpr::AO__scoped_atomic_fetch_add: 7931 case AtomicExpr::AO__scoped_atomic_fetch_sub: 7932 case AtomicExpr::AO__scoped_atomic_add_fetch: 7933 case AtomicExpr::AO__scoped_atomic_sub_fetch: 7934 case AtomicExpr::AO__c11_atomic_fetch_add: 7935 case AtomicExpr::AO__c11_atomic_fetch_sub: 7936 case AtomicExpr::AO__opencl_atomic_fetch_add: 7937 case AtomicExpr::AO__opencl_atomic_fetch_sub: 7938 case AtomicExpr::AO__hip_atomic_fetch_add: 7939 case AtomicExpr::AO__hip_atomic_fetch_sub: 7940 ArithAllows = AOEVT_Pointer | AOEVT_FP; 7941 Form = Arithmetic; 7942 break; 7943 case AtomicExpr::AO__atomic_fetch_max: 7944 case AtomicExpr::AO__atomic_fetch_min: 7945 case AtomicExpr::AO__atomic_max_fetch: 7946 case AtomicExpr::AO__atomic_min_fetch: 7947 case AtomicExpr::AO__scoped_atomic_fetch_max: 7948 case AtomicExpr::AO__scoped_atomic_fetch_min: 7949 case AtomicExpr::AO__scoped_atomic_max_fetch: 7950 case AtomicExpr::AO__scoped_atomic_min_fetch: 7951 case AtomicExpr::AO__c11_atomic_fetch_max: 7952 case AtomicExpr::AO__c11_atomic_fetch_min: 7953 case AtomicExpr::AO__opencl_atomic_fetch_max: 7954 case AtomicExpr::AO__opencl_atomic_fetch_min: 7955 case AtomicExpr::AO__hip_atomic_fetch_max: 7956 case AtomicExpr::AO__hip_atomic_fetch_min: 7957 ArithAllows = AOEVT_FP; 7958 Form = Arithmetic; 7959 break; 7960 case AtomicExpr::AO__c11_atomic_fetch_and: 7961 case AtomicExpr::AO__c11_atomic_fetch_or: 7962 case AtomicExpr::AO__c11_atomic_fetch_xor: 7963 case AtomicExpr::AO__hip_atomic_fetch_and: 7964 case AtomicExpr::AO__hip_atomic_fetch_or: 7965 case AtomicExpr::AO__hip_atomic_fetch_xor: 7966 case AtomicExpr::AO__c11_atomic_fetch_nand: 7967 case AtomicExpr::AO__opencl_atomic_fetch_and: 7968 case AtomicExpr::AO__opencl_atomic_fetch_or: 7969 case AtomicExpr::AO__opencl_atomic_fetch_xor: 7970 case AtomicExpr::AO__atomic_fetch_and: 7971 case AtomicExpr::AO__atomic_fetch_or: 7972 case AtomicExpr::AO__atomic_fetch_xor: 7973 case AtomicExpr::AO__atomic_fetch_nand: 7974 case AtomicExpr::AO__atomic_and_fetch: 7975 case AtomicExpr::AO__atomic_or_fetch: 7976 case AtomicExpr::AO__atomic_xor_fetch: 7977 case AtomicExpr::AO__atomic_nand_fetch: 7978 case AtomicExpr::AO__scoped_atomic_fetch_and: 7979 case AtomicExpr::AO__scoped_atomic_fetch_or: 7980 case AtomicExpr::AO__scoped_atomic_fetch_xor: 7981 case AtomicExpr::AO__scoped_atomic_fetch_nand: 7982 case AtomicExpr::AO__scoped_atomic_and_fetch: 7983 case AtomicExpr::AO__scoped_atomic_or_fetch: 7984 case AtomicExpr::AO__scoped_atomic_xor_fetch: 7985 case AtomicExpr::AO__scoped_atomic_nand_fetch: 7986 Form = Arithmetic; 7987 break; 7988 7989 case AtomicExpr::AO__c11_atomic_exchange: 7990 case AtomicExpr::AO__hip_atomic_exchange: 7991 case AtomicExpr::AO__opencl_atomic_exchange: 7992 case AtomicExpr::AO__atomic_exchange_n: 7993 case AtomicExpr::AO__scoped_atomic_exchange_n: 7994 Form = Xchg; 7995 break; 7996 7997 case AtomicExpr::AO__atomic_exchange: 7998 case AtomicExpr::AO__scoped_atomic_exchange: 7999 Form = GNUXchg; 8000 break; 8001 8002 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 8003 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 8004 case AtomicExpr::AO__hip_atomic_compare_exchange_strong: 8005 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: 8006 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: 8007 case AtomicExpr::AO__hip_atomic_compare_exchange_weak: 8008 Form = C11CmpXchg; 8009 break; 8010 8011 case AtomicExpr::AO__atomic_compare_exchange: 8012 case AtomicExpr::AO__atomic_compare_exchange_n: 8013 case AtomicExpr::AO__scoped_atomic_compare_exchange: 8014 case AtomicExpr::AO__scoped_atomic_compare_exchange_n: 8015 Form = GNUCmpXchg; 8016 break; 8017 } 8018 8019 unsigned AdjustedNumArgs = NumArgs[Form]; 8020 if ((IsOpenCL || IsHIP || IsScoped) && 8021 Op != AtomicExpr::AO__opencl_atomic_init) 8022 ++AdjustedNumArgs; 8023 // Check we have the right number of arguments. 8024 if (Args.size() < AdjustedNumArgs) { 8025 Diag(CallRange.getEnd(), diag::err_typecheck_call_too_few_args) 8026 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 8027 << /*is non object*/ 0 << ExprRange; 8028 return ExprError(); 8029 } else if (Args.size() > AdjustedNumArgs) { 8030 Diag(Args[AdjustedNumArgs]->getBeginLoc(), 8031 diag::err_typecheck_call_too_many_args) 8032 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 8033 << /*is non object*/ 0 << ExprRange; 8034 return ExprError(); 8035 } 8036 8037 // Inspect the first argument of the atomic operation. 8038 Expr *Ptr = Args[0]; 8039 ExprResult ConvertedPtr = DefaultFunctionArrayLvalueConversion(Ptr); 8040 if (ConvertedPtr.isInvalid()) 8041 return ExprError(); 8042 8043 Ptr = ConvertedPtr.get(); 8044 const PointerType *pointerType = Ptr->getType()->getAs<PointerType>(); 8045 if (!pointerType) { 8046 Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer) 8047 << Ptr->getType() << Ptr->getSourceRange(); 8048 return ExprError(); 8049 } 8050 8051 // For a __c11 builtin, this should be a pointer to an _Atomic type. 8052 QualType AtomTy = pointerType->getPointeeType(); // 'A' 8053 QualType ValType = AtomTy; // 'C' 8054 if (IsC11) { 8055 if (!AtomTy->isAtomicType()) { 8056 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic) 8057 << Ptr->getType() << Ptr->getSourceRange(); 8058 return ExprError(); 8059 } 8060 if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) || 8061 AtomTy.getAddressSpace() == LangAS::opencl_constant) { 8062 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_atomic) 8063 << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType() 8064 << Ptr->getSourceRange(); 8065 return ExprError(); 8066 } 8067 ValType = AtomTy->castAs<AtomicType>()->getValueType(); 8068 } else if (Form != Load && Form != LoadCopy) { 8069 if (ValType.isConstQualified()) { 8070 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_pointer) 8071 << Ptr->getType() << Ptr->getSourceRange(); 8072 return ExprError(); 8073 } 8074 } 8075 8076 // For an arithmetic operation, the implied arithmetic must be well-formed. 8077 if (Form == Arithmetic) { 8078 // GCC does not enforce these rules for GNU atomics, but we do to help catch 8079 // trivial type errors. 8080 auto IsAllowedValueType = [&](QualType ValType, 8081 unsigned AllowedType) -> bool { 8082 if (ValType->isIntegerType()) 8083 return true; 8084 if (ValType->isPointerType()) 8085 return AllowedType & AOEVT_Pointer; 8086 if (!(ValType->isFloatingType() && (AllowedType & AOEVT_FP))) 8087 return false; 8088 // LLVM Parser does not allow atomicrmw with x86_fp80 type. 8089 if (ValType->isSpecificBuiltinType(BuiltinType::LongDouble) && 8090 &Context.getTargetInfo().getLongDoubleFormat() == 8091 &llvm::APFloat::x87DoubleExtended()) 8092 return false; 8093 return true; 8094 }; 8095 if (!IsAllowedValueType(ValType, ArithAllows)) { 8096 auto DID = ArithAllows & AOEVT_FP 8097 ? (ArithAllows & AOEVT_Pointer 8098 ? diag::err_atomic_op_needs_atomic_int_ptr_or_fp 8099 : diag::err_atomic_op_needs_atomic_int_or_fp) 8100 : diag::err_atomic_op_needs_atomic_int; 8101 Diag(ExprRange.getBegin(), DID) 8102 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 8103 return ExprError(); 8104 } 8105 if (IsC11 && ValType->isPointerType() && 8106 RequireCompleteType(Ptr->getBeginLoc(), ValType->getPointeeType(), 8107 diag::err_incomplete_type)) { 8108 return ExprError(); 8109 } 8110 } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) { 8111 // For __atomic_*_n operations, the value type must be a scalar integral or 8112 // pointer type which is 1, 2, 4, 8 or 16 bytes in length. 8113 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr) 8114 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 8115 return ExprError(); 8116 } 8117 8118 if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) && 8119 !AtomTy->isScalarType()) { 8120 // For GNU atomics, require a trivially-copyable type. This is not part of 8121 // the GNU atomics specification but we enforce it for consistency with 8122 // other atomics which generally all require a trivially-copyable type. This 8123 // is because atomics just copy bits. 8124 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_trivial_copy) 8125 << Ptr->getType() << Ptr->getSourceRange(); 8126 return ExprError(); 8127 } 8128 8129 switch (ValType.getObjCLifetime()) { 8130 case Qualifiers::OCL_None: 8131 case Qualifiers::OCL_ExplicitNone: 8132 // okay 8133 break; 8134 8135 case Qualifiers::OCL_Weak: 8136 case Qualifiers::OCL_Strong: 8137 case Qualifiers::OCL_Autoreleasing: 8138 // FIXME: Can this happen? By this point, ValType should be known 8139 // to be trivially copyable. 8140 Diag(ExprRange.getBegin(), diag::err_arc_atomic_ownership) 8141 << ValType << Ptr->getSourceRange(); 8142 return ExprError(); 8143 } 8144 8145 // All atomic operations have an overload which takes a pointer to a volatile 8146 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself 8147 // into the result or the other operands. Similarly atomic_load takes a 8148 // pointer to a const 'A'. 8149 ValType.removeLocalVolatile(); 8150 ValType.removeLocalConst(); 8151 QualType ResultType = ValType; 8152 if (Form == Copy || Form == LoadCopy || Form == GNUXchg || 8153 Form == Init) 8154 ResultType = Context.VoidTy; 8155 else if (Form == C11CmpXchg || Form == GNUCmpXchg) 8156 ResultType = Context.BoolTy; 8157 8158 // The type of a parameter passed 'by value'. In the GNU atomics, such 8159 // arguments are actually passed as pointers. 8160 QualType ByValType = ValType; // 'CP' 8161 bool IsPassedByAddress = false; 8162 if (!IsC11 && !IsHIP && !IsN) { 8163 ByValType = Ptr->getType(); 8164 IsPassedByAddress = true; 8165 } 8166 8167 SmallVector<Expr *, 5> APIOrderedArgs; 8168 if (ArgOrder == Sema::AtomicArgumentOrder::AST) { 8169 APIOrderedArgs.push_back(Args[0]); 8170 switch (Form) { 8171 case Init: 8172 case Load: 8173 APIOrderedArgs.push_back(Args[1]); // Val1/Order 8174 break; 8175 case LoadCopy: 8176 case Copy: 8177 case Arithmetic: 8178 case Xchg: 8179 APIOrderedArgs.push_back(Args[2]); // Val1 8180 APIOrderedArgs.push_back(Args[1]); // Order 8181 break; 8182 case GNUXchg: 8183 APIOrderedArgs.push_back(Args[2]); // Val1 8184 APIOrderedArgs.push_back(Args[3]); // Val2 8185 APIOrderedArgs.push_back(Args[1]); // Order 8186 break; 8187 case C11CmpXchg: 8188 APIOrderedArgs.push_back(Args[2]); // Val1 8189 APIOrderedArgs.push_back(Args[4]); // Val2 8190 APIOrderedArgs.push_back(Args[1]); // Order 8191 APIOrderedArgs.push_back(Args[3]); // OrderFail 8192 break; 8193 case GNUCmpXchg: 8194 APIOrderedArgs.push_back(Args[2]); // Val1 8195 APIOrderedArgs.push_back(Args[4]); // Val2 8196 APIOrderedArgs.push_back(Args[5]); // Weak 8197 APIOrderedArgs.push_back(Args[1]); // Order 8198 APIOrderedArgs.push_back(Args[3]); // OrderFail 8199 break; 8200 } 8201 } else 8202 APIOrderedArgs.append(Args.begin(), Args.end()); 8203 8204 // The first argument's non-CV pointer type is used to deduce the type of 8205 // subsequent arguments, except for: 8206 // - weak flag (always converted to bool) 8207 // - memory order (always converted to int) 8208 // - scope (always converted to int) 8209 for (unsigned i = 0; i != APIOrderedArgs.size(); ++i) { 8210 QualType Ty; 8211 if (i < NumVals[Form] + 1) { 8212 switch (i) { 8213 case 0: 8214 // The first argument is always a pointer. It has a fixed type. 8215 // It is always dereferenced, a nullptr is undefined. 8216 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 8217 // Nothing else to do: we already know all we want about this pointer. 8218 continue; 8219 case 1: 8220 // The second argument is the non-atomic operand. For arithmetic, this 8221 // is always passed by value, and for a compare_exchange it is always 8222 // passed by address. For the rest, GNU uses by-address and C11 uses 8223 // by-value. 8224 assert(Form != Load); 8225 if (Form == Arithmetic && ValType->isPointerType()) 8226 Ty = Context.getPointerDiffType(); 8227 else if (Form == Init || Form == Arithmetic) 8228 Ty = ValType; 8229 else if (Form == Copy || Form == Xchg) { 8230 if (IsPassedByAddress) { 8231 // The value pointer is always dereferenced, a nullptr is undefined. 8232 CheckNonNullArgument(*this, APIOrderedArgs[i], 8233 ExprRange.getBegin()); 8234 } 8235 Ty = ByValType; 8236 } else { 8237 Expr *ValArg = APIOrderedArgs[i]; 8238 // The value pointer is always dereferenced, a nullptr is undefined. 8239 CheckNonNullArgument(*this, ValArg, ExprRange.getBegin()); 8240 LangAS AS = LangAS::Default; 8241 // Keep address space of non-atomic pointer type. 8242 if (const PointerType *PtrTy = 8243 ValArg->getType()->getAs<PointerType>()) { 8244 AS = PtrTy->getPointeeType().getAddressSpace(); 8245 } 8246 Ty = Context.getPointerType( 8247 Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS)); 8248 } 8249 break; 8250 case 2: 8251 // The third argument to compare_exchange / GNU exchange is the desired 8252 // value, either by-value (for the C11 and *_n variant) or as a pointer. 8253 if (IsPassedByAddress) 8254 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 8255 Ty = ByValType; 8256 break; 8257 case 3: 8258 // The fourth argument to GNU compare_exchange is a 'weak' flag. 8259 Ty = Context.BoolTy; 8260 break; 8261 } 8262 } else { 8263 // The order(s) and scope are always converted to int. 8264 Ty = Context.IntTy; 8265 } 8266 8267 InitializedEntity Entity = 8268 InitializedEntity::InitializeParameter(Context, Ty, false); 8269 ExprResult Arg = APIOrderedArgs[i]; 8270 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 8271 if (Arg.isInvalid()) 8272 return true; 8273 APIOrderedArgs[i] = Arg.get(); 8274 } 8275 8276 // Permute the arguments into a 'consistent' order. 8277 SmallVector<Expr*, 5> SubExprs; 8278 SubExprs.push_back(Ptr); 8279 switch (Form) { 8280 case Init: 8281 // Note, AtomicExpr::getVal1() has a special case for this atomic. 8282 SubExprs.push_back(APIOrderedArgs[1]); // Val1 8283 break; 8284 case Load: 8285 SubExprs.push_back(APIOrderedArgs[1]); // Order 8286 break; 8287 case LoadCopy: 8288 case Copy: 8289 case Arithmetic: 8290 case Xchg: 8291 SubExprs.push_back(APIOrderedArgs[2]); // Order 8292 SubExprs.push_back(APIOrderedArgs[1]); // Val1 8293 break; 8294 case GNUXchg: 8295 // Note, AtomicExpr::getVal2() has a special case for this atomic. 8296 SubExprs.push_back(APIOrderedArgs[3]); // Order 8297 SubExprs.push_back(APIOrderedArgs[1]); // Val1 8298 SubExprs.push_back(APIOrderedArgs[2]); // Val2 8299 break; 8300 case C11CmpXchg: 8301 SubExprs.push_back(APIOrderedArgs[3]); // Order 8302 SubExprs.push_back(APIOrderedArgs[1]); // Val1 8303 SubExprs.push_back(APIOrderedArgs[4]); // OrderFail 8304 SubExprs.push_back(APIOrderedArgs[2]); // Val2 8305 break; 8306 case GNUCmpXchg: 8307 SubExprs.push_back(APIOrderedArgs[4]); // Order 8308 SubExprs.push_back(APIOrderedArgs[1]); // Val1 8309 SubExprs.push_back(APIOrderedArgs[5]); // OrderFail 8310 SubExprs.push_back(APIOrderedArgs[2]); // Val2 8311 SubExprs.push_back(APIOrderedArgs[3]); // Weak 8312 break; 8313 } 8314 8315 // If the memory orders are constants, check they are valid. 8316 if (SubExprs.size() >= 2 && Form != Init) { 8317 std::optional<llvm::APSInt> Success = 8318 SubExprs[1]->getIntegerConstantExpr(Context); 8319 if (Success && !isValidOrderingForOp(Success->getSExtValue(), Op)) { 8320 Diag(SubExprs[1]->getBeginLoc(), 8321 diag::warn_atomic_op_has_invalid_memory_order) 8322 << /*success=*/(Form == C11CmpXchg || Form == GNUCmpXchg) 8323 << SubExprs[1]->getSourceRange(); 8324 } 8325 if (SubExprs.size() >= 5) { 8326 if (std::optional<llvm::APSInt> Failure = 8327 SubExprs[3]->getIntegerConstantExpr(Context)) { 8328 if (!llvm::is_contained( 8329 {llvm::AtomicOrderingCABI::relaxed, 8330 llvm::AtomicOrderingCABI::consume, 8331 llvm::AtomicOrderingCABI::acquire, 8332 llvm::AtomicOrderingCABI::seq_cst}, 8333 (llvm::AtomicOrderingCABI)Failure->getSExtValue())) { 8334 Diag(SubExprs[3]->getBeginLoc(), 8335 diag::warn_atomic_op_has_invalid_memory_order) 8336 << /*failure=*/2 << SubExprs[3]->getSourceRange(); 8337 } 8338 } 8339 } 8340 } 8341 8342 if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) { 8343 auto *Scope = Args[Args.size() - 1]; 8344 if (std::optional<llvm::APSInt> Result = 8345 Scope->getIntegerConstantExpr(Context)) { 8346 if (!ScopeModel->isValid(Result->getZExtValue())) 8347 Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope) 8348 << Scope->getSourceRange(); 8349 } 8350 SubExprs.push_back(Scope); 8351 } 8352 8353 AtomicExpr *AE = new (Context) 8354 AtomicExpr(ExprRange.getBegin(), SubExprs, ResultType, Op, RParenLoc); 8355 8356 if ((Op == AtomicExpr::AO__c11_atomic_load || 8357 Op == AtomicExpr::AO__c11_atomic_store || 8358 Op == AtomicExpr::AO__opencl_atomic_load || 8359 Op == AtomicExpr::AO__hip_atomic_load || 8360 Op == AtomicExpr::AO__opencl_atomic_store || 8361 Op == AtomicExpr::AO__hip_atomic_store) && 8362 Context.AtomicUsesUnsupportedLibcall(AE)) 8363 Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib) 8364 << ((Op == AtomicExpr::AO__c11_atomic_load || 8365 Op == AtomicExpr::AO__opencl_atomic_load || 8366 Op == AtomicExpr::AO__hip_atomic_load) 8367 ? 0 8368 : 1); 8369 8370 if (ValType->isBitIntType()) { 8371 Diag(Ptr->getExprLoc(), diag::err_atomic_builtin_bit_int_prohibit); 8372 return ExprError(); 8373 } 8374 8375 return AE; 8376 } 8377 8378 /// checkBuiltinArgument - Given a call to a builtin function, perform 8379 /// normal type-checking on the given argument, updating the call in 8380 /// place. This is useful when a builtin function requires custom 8381 /// type-checking for some of its arguments but not necessarily all of 8382 /// them. 8383 /// 8384 /// Returns true on error. 8385 static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) { 8386 FunctionDecl *Fn = E->getDirectCallee(); 8387 assert(Fn && "builtin call without direct callee!"); 8388 8389 ParmVarDecl *Param = Fn->getParamDecl(ArgIndex); 8390 InitializedEntity Entity = 8391 InitializedEntity::InitializeParameter(S.Context, Param); 8392 8393 ExprResult Arg = E->getArg(ArgIndex); 8394 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 8395 if (Arg.isInvalid()) 8396 return true; 8397 8398 E->setArg(ArgIndex, Arg.get()); 8399 return false; 8400 } 8401 8402 bool Sema::BuiltinWasmRefNullExtern(CallExpr *TheCall) { 8403 if (TheCall->getNumArgs() != 0) 8404 return true; 8405 8406 TheCall->setType(Context.getWebAssemblyExternrefType()); 8407 8408 return false; 8409 } 8410 8411 bool Sema::BuiltinWasmRefNullFunc(CallExpr *TheCall) { 8412 if (TheCall->getNumArgs() != 0) { 8413 Diag(TheCall->getBeginLoc(), diag::err_typecheck_call_too_many_args) 8414 << 0 /*function call*/ << /*expected*/ 0 << TheCall->getNumArgs() 8415 << /*is non object*/ 0; 8416 return true; 8417 } 8418 8419 // This custom type checking code ensures that the nodes are as expected 8420 // in order to later on generate the necessary builtin. 8421 QualType Pointee = Context.getFunctionType(Context.VoidTy, {}, {}); 8422 QualType Type = Context.getPointerType(Pointee); 8423 Pointee = Context.getAddrSpaceQualType(Pointee, LangAS::wasm_funcref); 8424 Type = Context.getAttributedType(attr::WebAssemblyFuncref, Type, 8425 Context.getPointerType(Pointee)); 8426 TheCall->setType(Type); 8427 8428 return false; 8429 } 8430 8431 /// We have a call to a function like __sync_fetch_and_add, which is an 8432 /// overloaded function based on the pointer type of its first argument. 8433 /// The main BuildCallExpr routines have already promoted the types of 8434 /// arguments because all of these calls are prototyped as void(...). 8435 /// 8436 /// This function goes through and does final semantic checking for these 8437 /// builtins, as well as generating any warnings. 8438 ExprResult 8439 Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) { 8440 CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get()); 8441 Expr *Callee = TheCall->getCallee(); 8442 DeclRefExpr *DRE = cast<DeclRefExpr>(Callee->IgnoreParenCasts()); 8443 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 8444 8445 // Ensure that we have at least one argument to do type inference from. 8446 if (TheCall->getNumArgs() < 1) { 8447 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 8448 << 0 << 1 << TheCall->getNumArgs() << /*is non object*/ 0 8449 << Callee->getSourceRange(); 8450 return ExprError(); 8451 } 8452 8453 // Inspect the first argument of the atomic builtin. This should always be 8454 // a pointer type, whose element is an integral scalar or pointer type. 8455 // Because it is a pointer type, we don't have to worry about any implicit 8456 // casts here. 8457 // FIXME: We don't allow floating point scalars as input. 8458 Expr *FirstArg = TheCall->getArg(0); 8459 ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg); 8460 if (FirstArgResult.isInvalid()) 8461 return ExprError(); 8462 FirstArg = FirstArgResult.get(); 8463 TheCall->setArg(0, FirstArg); 8464 8465 const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>(); 8466 if (!pointerType) { 8467 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 8468 << FirstArg->getType() << FirstArg->getSourceRange(); 8469 return ExprError(); 8470 } 8471 8472 QualType ValType = pointerType->getPointeeType(); 8473 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 8474 !ValType->isBlockPointerType()) { 8475 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr) 8476 << FirstArg->getType() << FirstArg->getSourceRange(); 8477 return ExprError(); 8478 } 8479 8480 if (ValType.isConstQualified()) { 8481 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const) 8482 << FirstArg->getType() << FirstArg->getSourceRange(); 8483 return ExprError(); 8484 } 8485 8486 switch (ValType.getObjCLifetime()) { 8487 case Qualifiers::OCL_None: 8488 case Qualifiers::OCL_ExplicitNone: 8489 // okay 8490 break; 8491 8492 case Qualifiers::OCL_Weak: 8493 case Qualifiers::OCL_Strong: 8494 case Qualifiers::OCL_Autoreleasing: 8495 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 8496 << ValType << FirstArg->getSourceRange(); 8497 return ExprError(); 8498 } 8499 8500 // Strip any qualifiers off ValType. 8501 ValType = ValType.getUnqualifiedType(); 8502 8503 // The majority of builtins return a value, but a few have special return 8504 // types, so allow them to override appropriately below. 8505 QualType ResultType = ValType; 8506 8507 // We need to figure out which concrete builtin this maps onto. For example, 8508 // __sync_fetch_and_add with a 2 byte object turns into 8509 // __sync_fetch_and_add_2. 8510 #define BUILTIN_ROW(x) \ 8511 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \ 8512 Builtin::BI##x##_8, Builtin::BI##x##_16 } 8513 8514 static const unsigned BuiltinIndices[][5] = { 8515 BUILTIN_ROW(__sync_fetch_and_add), 8516 BUILTIN_ROW(__sync_fetch_and_sub), 8517 BUILTIN_ROW(__sync_fetch_and_or), 8518 BUILTIN_ROW(__sync_fetch_and_and), 8519 BUILTIN_ROW(__sync_fetch_and_xor), 8520 BUILTIN_ROW(__sync_fetch_and_nand), 8521 8522 BUILTIN_ROW(__sync_add_and_fetch), 8523 BUILTIN_ROW(__sync_sub_and_fetch), 8524 BUILTIN_ROW(__sync_and_and_fetch), 8525 BUILTIN_ROW(__sync_or_and_fetch), 8526 BUILTIN_ROW(__sync_xor_and_fetch), 8527 BUILTIN_ROW(__sync_nand_and_fetch), 8528 8529 BUILTIN_ROW(__sync_val_compare_and_swap), 8530 BUILTIN_ROW(__sync_bool_compare_and_swap), 8531 BUILTIN_ROW(__sync_lock_test_and_set), 8532 BUILTIN_ROW(__sync_lock_release), 8533 BUILTIN_ROW(__sync_swap) 8534 }; 8535 #undef BUILTIN_ROW 8536 8537 // Determine the index of the size. 8538 unsigned SizeIndex; 8539 switch (Context.getTypeSizeInChars(ValType).getQuantity()) { 8540 case 1: SizeIndex = 0; break; 8541 case 2: SizeIndex = 1; break; 8542 case 4: SizeIndex = 2; break; 8543 case 8: SizeIndex = 3; break; 8544 case 16: SizeIndex = 4; break; 8545 default: 8546 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size) 8547 << FirstArg->getType() << FirstArg->getSourceRange(); 8548 return ExprError(); 8549 } 8550 8551 // Each of these builtins has one pointer argument, followed by some number of 8552 // values (0, 1 or 2) followed by a potentially empty varags list of stuff 8553 // that we ignore. Find out which row of BuiltinIndices to read from as well 8554 // as the number of fixed args. 8555 unsigned BuiltinID = FDecl->getBuiltinID(); 8556 unsigned BuiltinIndex, NumFixed = 1; 8557 bool WarnAboutSemanticsChange = false; 8558 switch (BuiltinID) { 8559 default: llvm_unreachable("Unknown overloaded atomic builtin!"); 8560 case Builtin::BI__sync_fetch_and_add: 8561 case Builtin::BI__sync_fetch_and_add_1: 8562 case Builtin::BI__sync_fetch_and_add_2: 8563 case Builtin::BI__sync_fetch_and_add_4: 8564 case Builtin::BI__sync_fetch_and_add_8: 8565 case Builtin::BI__sync_fetch_and_add_16: 8566 BuiltinIndex = 0; 8567 break; 8568 8569 case Builtin::BI__sync_fetch_and_sub: 8570 case Builtin::BI__sync_fetch_and_sub_1: 8571 case Builtin::BI__sync_fetch_and_sub_2: 8572 case Builtin::BI__sync_fetch_and_sub_4: 8573 case Builtin::BI__sync_fetch_and_sub_8: 8574 case Builtin::BI__sync_fetch_and_sub_16: 8575 BuiltinIndex = 1; 8576 break; 8577 8578 case Builtin::BI__sync_fetch_and_or: 8579 case Builtin::BI__sync_fetch_and_or_1: 8580 case Builtin::BI__sync_fetch_and_or_2: 8581 case Builtin::BI__sync_fetch_and_or_4: 8582 case Builtin::BI__sync_fetch_and_or_8: 8583 case Builtin::BI__sync_fetch_and_or_16: 8584 BuiltinIndex = 2; 8585 break; 8586 8587 case Builtin::BI__sync_fetch_and_and: 8588 case Builtin::BI__sync_fetch_and_and_1: 8589 case Builtin::BI__sync_fetch_and_and_2: 8590 case Builtin::BI__sync_fetch_and_and_4: 8591 case Builtin::BI__sync_fetch_and_and_8: 8592 case Builtin::BI__sync_fetch_and_and_16: 8593 BuiltinIndex = 3; 8594 break; 8595 8596 case Builtin::BI__sync_fetch_and_xor: 8597 case Builtin::BI__sync_fetch_and_xor_1: 8598 case Builtin::BI__sync_fetch_and_xor_2: 8599 case Builtin::BI__sync_fetch_and_xor_4: 8600 case Builtin::BI__sync_fetch_and_xor_8: 8601 case Builtin::BI__sync_fetch_and_xor_16: 8602 BuiltinIndex = 4; 8603 break; 8604 8605 case Builtin::BI__sync_fetch_and_nand: 8606 case Builtin::BI__sync_fetch_and_nand_1: 8607 case Builtin::BI__sync_fetch_and_nand_2: 8608 case Builtin::BI__sync_fetch_and_nand_4: 8609 case Builtin::BI__sync_fetch_and_nand_8: 8610 case Builtin::BI__sync_fetch_and_nand_16: 8611 BuiltinIndex = 5; 8612 WarnAboutSemanticsChange = true; 8613 break; 8614 8615 case Builtin::BI__sync_add_and_fetch: 8616 case Builtin::BI__sync_add_and_fetch_1: 8617 case Builtin::BI__sync_add_and_fetch_2: 8618 case Builtin::BI__sync_add_and_fetch_4: 8619 case Builtin::BI__sync_add_and_fetch_8: 8620 case Builtin::BI__sync_add_and_fetch_16: 8621 BuiltinIndex = 6; 8622 break; 8623 8624 case Builtin::BI__sync_sub_and_fetch: 8625 case Builtin::BI__sync_sub_and_fetch_1: 8626 case Builtin::BI__sync_sub_and_fetch_2: 8627 case Builtin::BI__sync_sub_and_fetch_4: 8628 case Builtin::BI__sync_sub_and_fetch_8: 8629 case Builtin::BI__sync_sub_and_fetch_16: 8630 BuiltinIndex = 7; 8631 break; 8632 8633 case Builtin::BI__sync_and_and_fetch: 8634 case Builtin::BI__sync_and_and_fetch_1: 8635 case Builtin::BI__sync_and_and_fetch_2: 8636 case Builtin::BI__sync_and_and_fetch_4: 8637 case Builtin::BI__sync_and_and_fetch_8: 8638 case Builtin::BI__sync_and_and_fetch_16: 8639 BuiltinIndex = 8; 8640 break; 8641 8642 case Builtin::BI__sync_or_and_fetch: 8643 case Builtin::BI__sync_or_and_fetch_1: 8644 case Builtin::BI__sync_or_and_fetch_2: 8645 case Builtin::BI__sync_or_and_fetch_4: 8646 case Builtin::BI__sync_or_and_fetch_8: 8647 case Builtin::BI__sync_or_and_fetch_16: 8648 BuiltinIndex = 9; 8649 break; 8650 8651 case Builtin::BI__sync_xor_and_fetch: 8652 case Builtin::BI__sync_xor_and_fetch_1: 8653 case Builtin::BI__sync_xor_and_fetch_2: 8654 case Builtin::BI__sync_xor_and_fetch_4: 8655 case Builtin::BI__sync_xor_and_fetch_8: 8656 case Builtin::BI__sync_xor_and_fetch_16: 8657 BuiltinIndex = 10; 8658 break; 8659 8660 case Builtin::BI__sync_nand_and_fetch: 8661 case Builtin::BI__sync_nand_and_fetch_1: 8662 case Builtin::BI__sync_nand_and_fetch_2: 8663 case Builtin::BI__sync_nand_and_fetch_4: 8664 case Builtin::BI__sync_nand_and_fetch_8: 8665 case Builtin::BI__sync_nand_and_fetch_16: 8666 BuiltinIndex = 11; 8667 WarnAboutSemanticsChange = true; 8668 break; 8669 8670 case Builtin::BI__sync_val_compare_and_swap: 8671 case Builtin::BI__sync_val_compare_and_swap_1: 8672 case Builtin::BI__sync_val_compare_and_swap_2: 8673 case Builtin::BI__sync_val_compare_and_swap_4: 8674 case Builtin::BI__sync_val_compare_and_swap_8: 8675 case Builtin::BI__sync_val_compare_and_swap_16: 8676 BuiltinIndex = 12; 8677 NumFixed = 2; 8678 break; 8679 8680 case Builtin::BI__sync_bool_compare_and_swap: 8681 case Builtin::BI__sync_bool_compare_and_swap_1: 8682 case Builtin::BI__sync_bool_compare_and_swap_2: 8683 case Builtin::BI__sync_bool_compare_and_swap_4: 8684 case Builtin::BI__sync_bool_compare_and_swap_8: 8685 case Builtin::BI__sync_bool_compare_and_swap_16: 8686 BuiltinIndex = 13; 8687 NumFixed = 2; 8688 ResultType = Context.BoolTy; 8689 break; 8690 8691 case Builtin::BI__sync_lock_test_and_set: 8692 case Builtin::BI__sync_lock_test_and_set_1: 8693 case Builtin::BI__sync_lock_test_and_set_2: 8694 case Builtin::BI__sync_lock_test_and_set_4: 8695 case Builtin::BI__sync_lock_test_and_set_8: 8696 case Builtin::BI__sync_lock_test_and_set_16: 8697 BuiltinIndex = 14; 8698 break; 8699 8700 case Builtin::BI__sync_lock_release: 8701 case Builtin::BI__sync_lock_release_1: 8702 case Builtin::BI__sync_lock_release_2: 8703 case Builtin::BI__sync_lock_release_4: 8704 case Builtin::BI__sync_lock_release_8: 8705 case Builtin::BI__sync_lock_release_16: 8706 BuiltinIndex = 15; 8707 NumFixed = 0; 8708 ResultType = Context.VoidTy; 8709 break; 8710 8711 case Builtin::BI__sync_swap: 8712 case Builtin::BI__sync_swap_1: 8713 case Builtin::BI__sync_swap_2: 8714 case Builtin::BI__sync_swap_4: 8715 case Builtin::BI__sync_swap_8: 8716 case Builtin::BI__sync_swap_16: 8717 BuiltinIndex = 16; 8718 break; 8719 } 8720 8721 // Now that we know how many fixed arguments we expect, first check that we 8722 // have at least that many. 8723 if (TheCall->getNumArgs() < 1+NumFixed) { 8724 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 8725 << 0 << 1 + NumFixed << TheCall->getNumArgs() << /*is non object*/ 0 8726 << Callee->getSourceRange(); 8727 return ExprError(); 8728 } 8729 8730 Diag(TheCall->getEndLoc(), diag::warn_atomic_implicit_seq_cst) 8731 << Callee->getSourceRange(); 8732 8733 if (WarnAboutSemanticsChange) { 8734 Diag(TheCall->getEndLoc(), diag::warn_sync_fetch_and_nand_semantics_change) 8735 << Callee->getSourceRange(); 8736 } 8737 8738 // Get the decl for the concrete builtin from this, we can tell what the 8739 // concrete integer type we should convert to is. 8740 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex]; 8741 StringRef NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID); 8742 FunctionDecl *NewBuiltinDecl; 8743 if (NewBuiltinID == BuiltinID) 8744 NewBuiltinDecl = FDecl; 8745 else { 8746 // Perform builtin lookup to avoid redeclaring it. 8747 DeclarationName DN(&Context.Idents.get(NewBuiltinName)); 8748 LookupResult Res(*this, DN, DRE->getBeginLoc(), LookupOrdinaryName); 8749 LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true); 8750 assert(Res.getFoundDecl()); 8751 NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl()); 8752 if (!NewBuiltinDecl) 8753 return ExprError(); 8754 } 8755 8756 // The first argument --- the pointer --- has a fixed type; we 8757 // deduce the types of the rest of the arguments accordingly. Walk 8758 // the remaining arguments, converting them to the deduced value type. 8759 for (unsigned i = 0; i != NumFixed; ++i) { 8760 ExprResult Arg = TheCall->getArg(i+1); 8761 8762 // GCC does an implicit conversion to the pointer or integer ValType. This 8763 // can fail in some cases (1i -> int**), check for this error case now. 8764 // Initialize the argument. 8765 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 8766 ValType, /*consume*/ false); 8767 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 8768 if (Arg.isInvalid()) 8769 return ExprError(); 8770 8771 // Okay, we have something that *can* be converted to the right type. Check 8772 // to see if there is a potentially weird extension going on here. This can 8773 // happen when you do an atomic operation on something like an char* and 8774 // pass in 42. The 42 gets converted to char. This is even more strange 8775 // for things like 45.123 -> char, etc. 8776 // FIXME: Do this check. 8777 TheCall->setArg(i+1, Arg.get()); 8778 } 8779 8780 // Create a new DeclRefExpr to refer to the new decl. 8781 DeclRefExpr *NewDRE = DeclRefExpr::Create( 8782 Context, DRE->getQualifierLoc(), SourceLocation(), NewBuiltinDecl, 8783 /*enclosing*/ false, DRE->getLocation(), Context.BuiltinFnTy, 8784 DRE->getValueKind(), nullptr, nullptr, DRE->isNonOdrUse()); 8785 8786 // Set the callee in the CallExpr. 8787 // FIXME: This loses syntactic information. 8788 QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType()); 8789 ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy, 8790 CK_BuiltinFnToFnPtr); 8791 TheCall->setCallee(PromotedCall.get()); 8792 8793 // Change the result type of the call to match the original value type. This 8794 // is arbitrary, but the codegen for these builtins ins design to handle it 8795 // gracefully. 8796 TheCall->setType(ResultType); 8797 8798 // Prohibit problematic uses of bit-precise integer types with atomic 8799 // builtins. The arguments would have already been converted to the first 8800 // argument's type, so only need to check the first argument. 8801 const auto *BitIntValType = ValType->getAs<BitIntType>(); 8802 if (BitIntValType && !llvm::isPowerOf2_64(BitIntValType->getNumBits())) { 8803 Diag(FirstArg->getExprLoc(), diag::err_atomic_builtin_ext_int_size); 8804 return ExprError(); 8805 } 8806 8807 return TheCallResult; 8808 } 8809 8810 /// SemaBuiltinNontemporalOverloaded - We have a call to 8811 /// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an 8812 /// overloaded function based on the pointer type of its last argument. 8813 /// 8814 /// This function goes through and does final semantic checking for these 8815 /// builtins. 8816 ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) { 8817 CallExpr *TheCall = (CallExpr *)TheCallResult.get(); 8818 DeclRefExpr *DRE = 8819 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 8820 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 8821 unsigned BuiltinID = FDecl->getBuiltinID(); 8822 assert((BuiltinID == Builtin::BI__builtin_nontemporal_store || 8823 BuiltinID == Builtin::BI__builtin_nontemporal_load) && 8824 "Unexpected nontemporal load/store builtin!"); 8825 bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store; 8826 unsigned numArgs = isStore ? 2 : 1; 8827 8828 // Ensure that we have the proper number of arguments. 8829 if (checkArgCount(*this, TheCall, numArgs)) 8830 return ExprError(); 8831 8832 // Inspect the last argument of the nontemporal builtin. This should always 8833 // be a pointer type, from which we imply the type of the memory access. 8834 // Because it is a pointer type, we don't have to worry about any implicit 8835 // casts here. 8836 Expr *PointerArg = TheCall->getArg(numArgs - 1); 8837 ExprResult PointerArgResult = 8838 DefaultFunctionArrayLvalueConversion(PointerArg); 8839 8840 if (PointerArgResult.isInvalid()) 8841 return ExprError(); 8842 PointerArg = PointerArgResult.get(); 8843 TheCall->setArg(numArgs - 1, PointerArg); 8844 8845 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 8846 if (!pointerType) { 8847 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer) 8848 << PointerArg->getType() << PointerArg->getSourceRange(); 8849 return ExprError(); 8850 } 8851 8852 QualType ValType = pointerType->getPointeeType(); 8853 8854 // Strip any qualifiers off ValType. 8855 ValType = ValType.getUnqualifiedType(); 8856 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 8857 !ValType->isBlockPointerType() && !ValType->isFloatingType() && 8858 !ValType->isVectorType()) { 8859 Diag(DRE->getBeginLoc(), 8860 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector) 8861 << PointerArg->getType() << PointerArg->getSourceRange(); 8862 return ExprError(); 8863 } 8864 8865 if (!isStore) { 8866 TheCall->setType(ValType); 8867 return TheCallResult; 8868 } 8869 8870 ExprResult ValArg = TheCall->getArg(0); 8871 InitializedEntity Entity = InitializedEntity::InitializeParameter( 8872 Context, ValType, /*consume*/ false); 8873 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 8874 if (ValArg.isInvalid()) 8875 return ExprError(); 8876 8877 TheCall->setArg(0, ValArg.get()); 8878 TheCall->setType(Context.VoidTy); 8879 return TheCallResult; 8880 } 8881 8882 /// CheckObjCString - Checks that the argument to the builtin 8883 /// CFString constructor is correct 8884 /// Note: It might also make sense to do the UTF-16 conversion here (would 8885 /// simplify the backend). 8886 bool Sema::CheckObjCString(Expr *Arg) { 8887 Arg = Arg->IgnoreParenCasts(); 8888 StringLiteral *Literal = dyn_cast<StringLiteral>(Arg); 8889 8890 if (!Literal || !Literal->isOrdinary()) { 8891 Diag(Arg->getBeginLoc(), diag::err_cfstring_literal_not_string_constant) 8892 << Arg->getSourceRange(); 8893 return true; 8894 } 8895 8896 if (Literal->containsNonAsciiOrNull()) { 8897 StringRef String = Literal->getString(); 8898 unsigned NumBytes = String.size(); 8899 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes); 8900 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data(); 8901 llvm::UTF16 *ToPtr = &ToBuf[0]; 8902 8903 llvm::ConversionResult Result = 8904 llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr, 8905 ToPtr + NumBytes, llvm::strictConversion); 8906 // Check for conversion failure. 8907 if (Result != llvm::conversionOK) 8908 Diag(Arg->getBeginLoc(), diag::warn_cfstring_truncated) 8909 << Arg->getSourceRange(); 8910 } 8911 return false; 8912 } 8913 8914 /// CheckObjCString - Checks that the format string argument to the os_log() 8915 /// and os_trace() functions is correct, and converts it to const char *. 8916 ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) { 8917 Arg = Arg->IgnoreParenCasts(); 8918 auto *Literal = dyn_cast<StringLiteral>(Arg); 8919 if (!Literal) { 8920 if (auto *ObjcLiteral = dyn_cast<ObjCStringLiteral>(Arg)) { 8921 Literal = ObjcLiteral->getString(); 8922 } 8923 } 8924 8925 if (!Literal || (!Literal->isOrdinary() && !Literal->isUTF8())) { 8926 return ExprError( 8927 Diag(Arg->getBeginLoc(), diag::err_os_log_format_not_string_constant) 8928 << Arg->getSourceRange()); 8929 } 8930 8931 ExprResult Result(Literal); 8932 QualType ResultTy = Context.getPointerType(Context.CharTy.withConst()); 8933 InitializedEntity Entity = 8934 InitializedEntity::InitializeParameter(Context, ResultTy, false); 8935 Result = PerformCopyInitialization(Entity, SourceLocation(), Result); 8936 return Result; 8937 } 8938 8939 /// Check that the user is calling the appropriate va_start builtin for the 8940 /// target and calling convention. 8941 static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) { 8942 const llvm::Triple &TT = S.Context.getTargetInfo().getTriple(); 8943 bool IsX64 = TT.getArch() == llvm::Triple::x86_64; 8944 bool IsAArch64 = (TT.getArch() == llvm::Triple::aarch64 || 8945 TT.getArch() == llvm::Triple::aarch64_32); 8946 bool IsWindows = TT.isOSWindows(); 8947 bool IsMSVAStart = BuiltinID == Builtin::BI__builtin_ms_va_start; 8948 if (IsX64 || IsAArch64) { 8949 CallingConv CC = CC_C; 8950 if (const FunctionDecl *FD = S.getCurFunctionDecl()) 8951 CC = FD->getType()->castAs<FunctionType>()->getCallConv(); 8952 if (IsMSVAStart) { 8953 // Don't allow this in System V ABI functions. 8954 if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_Win64)) 8955 return S.Diag(Fn->getBeginLoc(), 8956 diag::err_ms_va_start_used_in_sysv_function); 8957 } else { 8958 // On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions. 8959 // On x64 Windows, don't allow this in System V ABI functions. 8960 // (Yes, that means there's no corresponding way to support variadic 8961 // System V ABI functions on Windows.) 8962 if ((IsWindows && CC == CC_X86_64SysV) || 8963 (!IsWindows && CC == CC_Win64)) 8964 return S.Diag(Fn->getBeginLoc(), 8965 diag::err_va_start_used_in_wrong_abi_function) 8966 << !IsWindows; 8967 } 8968 return false; 8969 } 8970 8971 if (IsMSVAStart) 8972 return S.Diag(Fn->getBeginLoc(), diag::err_builtin_x64_aarch64_only); 8973 return false; 8974 } 8975 8976 static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn, 8977 ParmVarDecl **LastParam = nullptr) { 8978 // Determine whether the current function, block, or obj-c method is variadic 8979 // and get its parameter list. 8980 bool IsVariadic = false; 8981 ArrayRef<ParmVarDecl *> Params; 8982 DeclContext *Caller = S.CurContext; 8983 if (auto *Block = dyn_cast<BlockDecl>(Caller)) { 8984 IsVariadic = Block->isVariadic(); 8985 Params = Block->parameters(); 8986 } else if (auto *FD = dyn_cast<FunctionDecl>(Caller)) { 8987 IsVariadic = FD->isVariadic(); 8988 Params = FD->parameters(); 8989 } else if (auto *MD = dyn_cast<ObjCMethodDecl>(Caller)) { 8990 IsVariadic = MD->isVariadic(); 8991 // FIXME: This isn't correct for methods (results in bogus warning). 8992 Params = MD->parameters(); 8993 } else if (isa<CapturedDecl>(Caller)) { 8994 // We don't support va_start in a CapturedDecl. 8995 S.Diag(Fn->getBeginLoc(), diag::err_va_start_captured_stmt); 8996 return true; 8997 } else { 8998 // This must be some other declcontext that parses exprs. 8999 S.Diag(Fn->getBeginLoc(), diag::err_va_start_outside_function); 9000 return true; 9001 } 9002 9003 if (!IsVariadic) { 9004 S.Diag(Fn->getBeginLoc(), diag::err_va_start_fixed_function); 9005 return true; 9006 } 9007 9008 if (LastParam) 9009 *LastParam = Params.empty() ? nullptr : Params.back(); 9010 9011 return false; 9012 } 9013 9014 /// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start' 9015 /// for validity. Emit an error and return true on failure; return false 9016 /// on success. 9017 bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) { 9018 Expr *Fn = TheCall->getCallee(); 9019 9020 if (checkVAStartABI(*this, BuiltinID, Fn)) 9021 return true; 9022 9023 // In C23 mode, va_start only needs one argument. However, the builtin still 9024 // requires two arguments (which matches the behavior of the GCC builtin), 9025 // <stdarg.h> passes `0` as the second argument in C23 mode. 9026 if (checkArgCount(*this, TheCall, 2)) 9027 return true; 9028 9029 // Type-check the first argument normally. 9030 if (checkBuiltinArgument(*this, TheCall, 0)) 9031 return true; 9032 9033 // Check that the current function is variadic, and get its last parameter. 9034 ParmVarDecl *LastParam; 9035 if (checkVAStartIsInVariadicFunction(*this, Fn, &LastParam)) 9036 return true; 9037 9038 // Verify that the second argument to the builtin is the last argument of the 9039 // current function or method. In C23 mode, if the second argument is an 9040 // integer constant expression with value 0, then we don't bother with this 9041 // check. 9042 bool SecondArgIsLastNamedArgument = false; 9043 const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts(); 9044 if (std::optional<llvm::APSInt> Val = 9045 TheCall->getArg(1)->getIntegerConstantExpr(Context); 9046 Val && LangOpts.C23 && *Val == 0) 9047 return false; 9048 9049 // These are valid if SecondArgIsLastNamedArgument is false after the next 9050 // block. 9051 QualType Type; 9052 SourceLocation ParamLoc; 9053 bool IsCRegister = false; 9054 9055 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) { 9056 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) { 9057 SecondArgIsLastNamedArgument = PV == LastParam; 9058 9059 Type = PV->getType(); 9060 ParamLoc = PV->getLocation(); 9061 IsCRegister = 9062 PV->getStorageClass() == SC_Register && !getLangOpts().CPlusPlus; 9063 } 9064 } 9065 9066 if (!SecondArgIsLastNamedArgument) 9067 Diag(TheCall->getArg(1)->getBeginLoc(), 9068 diag::warn_second_arg_of_va_start_not_last_named_param); 9069 else if (IsCRegister || Type->isReferenceType() || 9070 Type->isSpecificBuiltinType(BuiltinType::Float) || [=] { 9071 // Promotable integers are UB, but enumerations need a bit of 9072 // extra checking to see what their promotable type actually is. 9073 if (!Context.isPromotableIntegerType(Type)) 9074 return false; 9075 if (!Type->isEnumeralType()) 9076 return true; 9077 const EnumDecl *ED = Type->castAs<EnumType>()->getDecl(); 9078 return !(ED && 9079 Context.typesAreCompatible(ED->getPromotionType(), Type)); 9080 }()) { 9081 unsigned Reason = 0; 9082 if (Type->isReferenceType()) Reason = 1; 9083 else if (IsCRegister) Reason = 2; 9084 Diag(Arg->getBeginLoc(), diag::warn_va_start_type_is_undefined) << Reason; 9085 Diag(ParamLoc, diag::note_parameter_type) << Type; 9086 } 9087 9088 return false; 9089 } 9090 9091 bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) { 9092 auto IsSuitablyTypedFormatArgument = [this](const Expr *Arg) -> bool { 9093 const LangOptions &LO = getLangOpts(); 9094 9095 if (LO.CPlusPlus) 9096 return Arg->getType() 9097 .getCanonicalType() 9098 .getTypePtr() 9099 ->getPointeeType() 9100 .withoutLocalFastQualifiers() == Context.CharTy; 9101 9102 // In C, allow aliasing through `char *`, this is required for AArch64 at 9103 // least. 9104 return true; 9105 }; 9106 9107 // void __va_start(va_list *ap, const char *named_addr, size_t slot_size, 9108 // const char *named_addr); 9109 9110 Expr *Func = Call->getCallee(); 9111 9112 if (Call->getNumArgs() < 3) 9113 return Diag(Call->getEndLoc(), 9114 diag::err_typecheck_call_too_few_args_at_least) 9115 << 0 /*function call*/ << 3 << Call->getNumArgs() 9116 << /*is non object*/ 0; 9117 9118 // Type-check the first argument normally. 9119 if (checkBuiltinArgument(*this, Call, 0)) 9120 return true; 9121 9122 // Check that the current function is variadic. 9123 if (checkVAStartIsInVariadicFunction(*this, Func)) 9124 return true; 9125 9126 // __va_start on Windows does not validate the parameter qualifiers 9127 9128 const Expr *Arg1 = Call->getArg(1)->IgnoreParens(); 9129 const Type *Arg1Ty = Arg1->getType().getCanonicalType().getTypePtr(); 9130 9131 const Expr *Arg2 = Call->getArg(2)->IgnoreParens(); 9132 const Type *Arg2Ty = Arg2->getType().getCanonicalType().getTypePtr(); 9133 9134 const QualType &ConstCharPtrTy = 9135 Context.getPointerType(Context.CharTy.withConst()); 9136 if (!Arg1Ty->isPointerType() || !IsSuitablyTypedFormatArgument(Arg1)) 9137 Diag(Arg1->getBeginLoc(), diag::err_typecheck_convert_incompatible) 9138 << Arg1->getType() << ConstCharPtrTy << 1 /* different class */ 9139 << 0 /* qualifier difference */ 9140 << 3 /* parameter mismatch */ 9141 << 2 << Arg1->getType() << ConstCharPtrTy; 9142 9143 const QualType SizeTy = Context.getSizeType(); 9144 if (Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy) 9145 Diag(Arg2->getBeginLoc(), diag::err_typecheck_convert_incompatible) 9146 << Arg2->getType() << SizeTy << 1 /* different class */ 9147 << 0 /* qualifier difference */ 9148 << 3 /* parameter mismatch */ 9149 << 3 << Arg2->getType() << SizeTy; 9150 9151 return false; 9152 } 9153 9154 /// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and 9155 /// friends. This is declared to take (...), so we have to check everything. 9156 bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall, unsigned BuiltinID) { 9157 if (checkArgCount(*this, TheCall, 2)) 9158 return true; 9159 9160 if (BuiltinID == Builtin::BI__builtin_isunordered && 9161 TheCall->getFPFeaturesInEffect(getLangOpts()).getNoHonorNaNs()) 9162 Diag(TheCall->getBeginLoc(), diag::warn_fp_nan_inf_when_disabled) 9163 << 1 << 0 << TheCall->getSourceRange(); 9164 9165 ExprResult OrigArg0 = TheCall->getArg(0); 9166 ExprResult OrigArg1 = TheCall->getArg(1); 9167 9168 // Do standard promotions between the two arguments, returning their common 9169 // type. 9170 QualType Res = UsualArithmeticConversions( 9171 OrigArg0, OrigArg1, TheCall->getExprLoc(), ACK_Comparison); 9172 if (OrigArg0.isInvalid() || OrigArg1.isInvalid()) 9173 return true; 9174 9175 // Make sure any conversions are pushed back into the call; this is 9176 // type safe since unordered compare builtins are declared as "_Bool 9177 // foo(...)". 9178 TheCall->setArg(0, OrigArg0.get()); 9179 TheCall->setArg(1, OrigArg1.get()); 9180 9181 if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent()) 9182 return false; 9183 9184 // If the common type isn't a real floating type, then the arguments were 9185 // invalid for this operation. 9186 if (Res.isNull() || !Res->isRealFloatingType()) 9187 return Diag(OrigArg0.get()->getBeginLoc(), 9188 diag::err_typecheck_call_invalid_ordered_compare) 9189 << OrigArg0.get()->getType() << OrigArg1.get()->getType() 9190 << SourceRange(OrigArg0.get()->getBeginLoc(), 9191 OrigArg1.get()->getEndLoc()); 9192 9193 return false; 9194 } 9195 9196 /// SemaBuiltinSemaBuiltinFPClassification - Handle functions like 9197 /// __builtin_isnan and friends. This is declared to take (...), so we have 9198 /// to check everything. 9199 bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs, 9200 unsigned BuiltinID) { 9201 if (checkArgCount(*this, TheCall, NumArgs)) 9202 return true; 9203 9204 FPOptions FPO = TheCall->getFPFeaturesInEffect(getLangOpts()); 9205 if (FPO.getNoHonorInfs() && (BuiltinID == Builtin::BI__builtin_isfinite || 9206 BuiltinID == Builtin::BI__builtin_isinf || 9207 BuiltinID == Builtin::BI__builtin_isinf_sign)) 9208 Diag(TheCall->getBeginLoc(), diag::warn_fp_nan_inf_when_disabled) 9209 << 0 << 0 << TheCall->getSourceRange(); 9210 9211 if (FPO.getNoHonorNaNs() && (BuiltinID == Builtin::BI__builtin_isnan || 9212 BuiltinID == Builtin::BI__builtin_isunordered)) 9213 Diag(TheCall->getBeginLoc(), diag::warn_fp_nan_inf_when_disabled) 9214 << 1 << 0 << TheCall->getSourceRange(); 9215 9216 bool IsFPClass = NumArgs == 2; 9217 9218 // Find out position of floating-point argument. 9219 unsigned FPArgNo = IsFPClass ? 0 : NumArgs - 1; 9220 9221 // We can count on all parameters preceding the floating-point just being int. 9222 // Try all of those. 9223 for (unsigned i = 0; i < FPArgNo; ++i) { 9224 Expr *Arg = TheCall->getArg(i); 9225 9226 if (Arg->isTypeDependent()) 9227 return false; 9228 9229 ExprResult Res = PerformImplicitConversion(Arg, Context.IntTy, AA_Passing); 9230 9231 if (Res.isInvalid()) 9232 return true; 9233 TheCall->setArg(i, Res.get()); 9234 } 9235 9236 Expr *OrigArg = TheCall->getArg(FPArgNo); 9237 9238 if (OrigArg->isTypeDependent()) 9239 return false; 9240 9241 // Usual Unary Conversions will convert half to float, which we want for 9242 // machines that use fp16 conversion intrinsics. Else, we wnat to leave the 9243 // type how it is, but do normal L->Rvalue conversions. 9244 if (Context.getTargetInfo().useFP16ConversionIntrinsics()) 9245 OrigArg = UsualUnaryConversions(OrigArg).get(); 9246 else 9247 OrigArg = DefaultFunctionArrayLvalueConversion(OrigArg).get(); 9248 TheCall->setArg(FPArgNo, OrigArg); 9249 9250 QualType VectorResultTy; 9251 QualType ElementTy = OrigArg->getType(); 9252 // TODO: When all classification function are implemented with is_fpclass, 9253 // vector argument can be supported in all of them. 9254 if (ElementTy->isVectorType() && IsFPClass) { 9255 VectorResultTy = GetSignedVectorType(ElementTy); 9256 ElementTy = ElementTy->getAs<VectorType>()->getElementType(); 9257 } 9258 9259 // This operation requires a non-_Complex floating-point number. 9260 if (!ElementTy->isRealFloatingType()) 9261 return Diag(OrigArg->getBeginLoc(), 9262 diag::err_typecheck_call_invalid_unary_fp) 9263 << OrigArg->getType() << OrigArg->getSourceRange(); 9264 9265 // __builtin_isfpclass has integer parameter that specify test mask. It is 9266 // passed in (...), so it should be analyzed completely here. 9267 if (IsFPClass) 9268 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, llvm::fcAllFlags)) 9269 return true; 9270 9271 // TODO: enable this code to all classification functions. 9272 if (IsFPClass) { 9273 QualType ResultTy; 9274 if (!VectorResultTy.isNull()) 9275 ResultTy = VectorResultTy; 9276 else 9277 ResultTy = Context.IntTy; 9278 TheCall->setType(ResultTy); 9279 } 9280 9281 return false; 9282 } 9283 9284 /// Perform semantic analysis for a call to __builtin_complex. 9285 bool Sema::SemaBuiltinComplex(CallExpr *TheCall) { 9286 if (checkArgCount(*this, TheCall, 2)) 9287 return true; 9288 9289 bool Dependent = false; 9290 for (unsigned I = 0; I != 2; ++I) { 9291 Expr *Arg = TheCall->getArg(I); 9292 QualType T = Arg->getType(); 9293 if (T->isDependentType()) { 9294 Dependent = true; 9295 continue; 9296 } 9297 9298 // Despite supporting _Complex int, GCC requires a real floating point type 9299 // for the operands of __builtin_complex. 9300 if (!T->isRealFloatingType()) { 9301 return Diag(Arg->getBeginLoc(), diag::err_typecheck_call_requires_real_fp) 9302 << Arg->getType() << Arg->getSourceRange(); 9303 } 9304 9305 ExprResult Converted = DefaultLvalueConversion(Arg); 9306 if (Converted.isInvalid()) 9307 return true; 9308 TheCall->setArg(I, Converted.get()); 9309 } 9310 9311 if (Dependent) { 9312 TheCall->setType(Context.DependentTy); 9313 return false; 9314 } 9315 9316 Expr *Real = TheCall->getArg(0); 9317 Expr *Imag = TheCall->getArg(1); 9318 if (!Context.hasSameType(Real->getType(), Imag->getType())) { 9319 return Diag(Real->getBeginLoc(), 9320 diag::err_typecheck_call_different_arg_types) 9321 << Real->getType() << Imag->getType() 9322 << Real->getSourceRange() << Imag->getSourceRange(); 9323 } 9324 9325 // We don't allow _Complex _Float16 nor _Complex __fp16 as type specifiers; 9326 // don't allow this builtin to form those types either. 9327 // FIXME: Should we allow these types? 9328 if (Real->getType()->isFloat16Type()) 9329 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 9330 << "_Float16"; 9331 if (Real->getType()->isHalfType()) 9332 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 9333 << "half"; 9334 9335 TheCall->setType(Context.getComplexType(Real->getType())); 9336 return false; 9337 } 9338 9339 // Customized Sema Checking for VSX builtins that have the following signature: 9340 // vector [...] builtinName(vector [...], vector [...], const int); 9341 // Which takes the same type of vectors (any legal vector type) for the first 9342 // two arguments and takes compile time constant for the third argument. 9343 // Example builtins are : 9344 // vector double vec_xxpermdi(vector double, vector double, int); 9345 // vector short vec_xxsldwi(vector short, vector short, int); 9346 bool Sema::SemaBuiltinVSX(CallExpr *TheCall) { 9347 unsigned ExpectedNumArgs = 3; 9348 if (checkArgCount(*this, TheCall, ExpectedNumArgs)) 9349 return true; 9350 9351 // Check the third argument is a compile time constant 9352 if (!TheCall->getArg(2)->isIntegerConstantExpr(Context)) 9353 return Diag(TheCall->getBeginLoc(), 9354 diag::err_vsx_builtin_nonconstant_argument) 9355 << 3 /* argument index */ << TheCall->getDirectCallee() 9356 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 9357 TheCall->getArg(2)->getEndLoc()); 9358 9359 QualType Arg1Ty = TheCall->getArg(0)->getType(); 9360 QualType Arg2Ty = TheCall->getArg(1)->getType(); 9361 9362 // Check the type of argument 1 and argument 2 are vectors. 9363 SourceLocation BuiltinLoc = TheCall->getBeginLoc(); 9364 if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) || 9365 (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) { 9366 return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector) 9367 << TheCall->getDirectCallee() 9368 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 9369 TheCall->getArg(1)->getEndLoc()); 9370 } 9371 9372 // Check the first two arguments are the same type. 9373 if (!Context.hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) { 9374 return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector) 9375 << TheCall->getDirectCallee() 9376 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 9377 TheCall->getArg(1)->getEndLoc()); 9378 } 9379 9380 // When default clang type checking is turned off and the customized type 9381 // checking is used, the returning type of the function must be explicitly 9382 // set. Otherwise it is _Bool by default. 9383 TheCall->setType(Arg1Ty); 9384 9385 return false; 9386 } 9387 9388 /// SemaBuiltinShuffleVector - Handle __builtin_shufflevector. 9389 // This is declared to take (...), so we have to check everything. 9390 ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) { 9391 if (TheCall->getNumArgs() < 2) 9392 return ExprError(Diag(TheCall->getEndLoc(), 9393 diag::err_typecheck_call_too_few_args_at_least) 9394 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 9395 << /*is non object*/ 0 << TheCall->getSourceRange()); 9396 9397 // Determine which of the following types of shufflevector we're checking: 9398 // 1) unary, vector mask: (lhs, mask) 9399 // 2) binary, scalar mask: (lhs, rhs, index, ..., index) 9400 QualType resType = TheCall->getArg(0)->getType(); 9401 unsigned numElements = 0; 9402 9403 if (!TheCall->getArg(0)->isTypeDependent() && 9404 !TheCall->getArg(1)->isTypeDependent()) { 9405 QualType LHSType = TheCall->getArg(0)->getType(); 9406 QualType RHSType = TheCall->getArg(1)->getType(); 9407 9408 if (!LHSType->isVectorType() || !RHSType->isVectorType()) 9409 return ExprError( 9410 Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_non_vector) 9411 << TheCall->getDirectCallee() 9412 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 9413 TheCall->getArg(1)->getEndLoc())); 9414 9415 numElements = LHSType->castAs<VectorType>()->getNumElements(); 9416 unsigned numResElements = TheCall->getNumArgs() - 2; 9417 9418 // Check to see if we have a call with 2 vector arguments, the unary shuffle 9419 // with mask. If so, verify that RHS is an integer vector type with the 9420 // same number of elts as lhs. 9421 if (TheCall->getNumArgs() == 2) { 9422 if (!RHSType->hasIntegerRepresentation() || 9423 RHSType->castAs<VectorType>()->getNumElements() != numElements) 9424 return ExprError(Diag(TheCall->getBeginLoc(), 9425 diag::err_vec_builtin_incompatible_vector) 9426 << TheCall->getDirectCallee() 9427 << SourceRange(TheCall->getArg(1)->getBeginLoc(), 9428 TheCall->getArg(1)->getEndLoc())); 9429 } else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) { 9430 return ExprError(Diag(TheCall->getBeginLoc(), 9431 diag::err_vec_builtin_incompatible_vector) 9432 << TheCall->getDirectCallee() 9433 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 9434 TheCall->getArg(1)->getEndLoc())); 9435 } else if (numElements != numResElements) { 9436 QualType eltType = LHSType->castAs<VectorType>()->getElementType(); 9437 resType = 9438 Context.getVectorType(eltType, numResElements, VectorKind::Generic); 9439 } 9440 } 9441 9442 for (unsigned i = 2; i < TheCall->getNumArgs(); i++) { 9443 if (TheCall->getArg(i)->isTypeDependent() || 9444 TheCall->getArg(i)->isValueDependent()) 9445 continue; 9446 9447 std::optional<llvm::APSInt> Result; 9448 if (!(Result = TheCall->getArg(i)->getIntegerConstantExpr(Context))) 9449 return ExprError(Diag(TheCall->getBeginLoc(), 9450 diag::err_shufflevector_nonconstant_argument) 9451 << TheCall->getArg(i)->getSourceRange()); 9452 9453 // Allow -1 which will be translated to undef in the IR. 9454 if (Result->isSigned() && Result->isAllOnes()) 9455 continue; 9456 9457 if (Result->getActiveBits() > 64 || 9458 Result->getZExtValue() >= numElements * 2) 9459 return ExprError(Diag(TheCall->getBeginLoc(), 9460 diag::err_shufflevector_argument_too_large) 9461 << TheCall->getArg(i)->getSourceRange()); 9462 } 9463 9464 SmallVector<Expr*, 32> exprs; 9465 9466 for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) { 9467 exprs.push_back(TheCall->getArg(i)); 9468 TheCall->setArg(i, nullptr); 9469 } 9470 9471 return new (Context) ShuffleVectorExpr(Context, exprs, resType, 9472 TheCall->getCallee()->getBeginLoc(), 9473 TheCall->getRParenLoc()); 9474 } 9475 9476 /// SemaConvertVectorExpr - Handle __builtin_convertvector 9477 ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, 9478 SourceLocation BuiltinLoc, 9479 SourceLocation RParenLoc) { 9480 ExprValueKind VK = VK_PRValue; 9481 ExprObjectKind OK = OK_Ordinary; 9482 QualType DstTy = TInfo->getType(); 9483 QualType SrcTy = E->getType(); 9484 9485 if (!SrcTy->isVectorType() && !SrcTy->isDependentType()) 9486 return ExprError(Diag(BuiltinLoc, 9487 diag::err_convertvector_non_vector) 9488 << E->getSourceRange()); 9489 if (!DstTy->isVectorType() && !DstTy->isDependentType()) 9490 return ExprError(Diag(BuiltinLoc, diag::err_builtin_non_vector_type) 9491 << "second" 9492 << "__builtin_convertvector"); 9493 9494 if (!SrcTy->isDependentType() && !DstTy->isDependentType()) { 9495 unsigned SrcElts = SrcTy->castAs<VectorType>()->getNumElements(); 9496 unsigned DstElts = DstTy->castAs<VectorType>()->getNumElements(); 9497 if (SrcElts != DstElts) 9498 return ExprError(Diag(BuiltinLoc, 9499 diag::err_convertvector_incompatible_vector) 9500 << E->getSourceRange()); 9501 } 9502 9503 return new (Context) 9504 ConvertVectorExpr(E, TInfo, DstTy, VK, OK, BuiltinLoc, RParenLoc); 9505 } 9506 9507 /// SemaBuiltinPrefetch - Handle __builtin_prefetch. 9508 // This is declared to take (const void*, ...) and can take two 9509 // optional constant int args. 9510 bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) { 9511 unsigned NumArgs = TheCall->getNumArgs(); 9512 9513 if (NumArgs > 3) 9514 return Diag(TheCall->getEndLoc(), 9515 diag::err_typecheck_call_too_many_args_at_most) 9516 << 0 /*function call*/ << 3 << NumArgs << /*is non object*/ 0 9517 << TheCall->getSourceRange(); 9518 9519 // Argument 0 is checked for us and the remaining arguments must be 9520 // constant integers. 9521 for (unsigned i = 1; i != NumArgs; ++i) 9522 if (SemaBuiltinConstantArgRange(TheCall, i, 0, i == 1 ? 1 : 3)) 9523 return true; 9524 9525 return false; 9526 } 9527 9528 /// SemaBuiltinArithmeticFence - Handle __arithmetic_fence. 9529 bool Sema::SemaBuiltinArithmeticFence(CallExpr *TheCall) { 9530 if (!Context.getTargetInfo().checkArithmeticFenceSupported()) 9531 return Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 9532 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 9533 if (checkArgCount(*this, TheCall, 1)) 9534 return true; 9535 Expr *Arg = TheCall->getArg(0); 9536 if (Arg->isInstantiationDependent()) 9537 return false; 9538 9539 QualType ArgTy = Arg->getType(); 9540 if (!ArgTy->hasFloatingRepresentation()) 9541 return Diag(TheCall->getEndLoc(), diag::err_typecheck_expect_flt_or_vector) 9542 << ArgTy; 9543 if (Arg->isLValue()) { 9544 ExprResult FirstArg = DefaultLvalueConversion(Arg); 9545 TheCall->setArg(0, FirstArg.get()); 9546 } 9547 TheCall->setType(TheCall->getArg(0)->getType()); 9548 return false; 9549 } 9550 9551 /// SemaBuiltinAssume - Handle __assume (MS Extension). 9552 // __assume does not evaluate its arguments, and should warn if its argument 9553 // has side effects. 9554 bool Sema::SemaBuiltinAssume(CallExpr *TheCall) { 9555 Expr *Arg = TheCall->getArg(0); 9556 if (Arg->isInstantiationDependent()) return false; 9557 9558 if (Arg->HasSideEffects(Context)) 9559 Diag(Arg->getBeginLoc(), diag::warn_assume_side_effects) 9560 << Arg->getSourceRange() 9561 << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier(); 9562 9563 return false; 9564 } 9565 9566 /// Handle __builtin_alloca_with_align. This is declared 9567 /// as (size_t, size_t) where the second size_t must be a power of 2 greater 9568 /// than 8. 9569 bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) { 9570 // The alignment must be a constant integer. 9571 Expr *Arg = TheCall->getArg(1); 9572 9573 // We can't check the value of a dependent argument. 9574 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 9575 if (const auto *UE = 9576 dyn_cast<UnaryExprOrTypeTraitExpr>(Arg->IgnoreParenImpCasts())) 9577 if (UE->getKind() == UETT_AlignOf || 9578 UE->getKind() == UETT_PreferredAlignOf) 9579 Diag(TheCall->getBeginLoc(), diag::warn_alloca_align_alignof) 9580 << Arg->getSourceRange(); 9581 9582 llvm::APSInt Result = Arg->EvaluateKnownConstInt(Context); 9583 9584 if (!Result.isPowerOf2()) 9585 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 9586 << Arg->getSourceRange(); 9587 9588 if (Result < Context.getCharWidth()) 9589 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_small) 9590 << (unsigned)Context.getCharWidth() << Arg->getSourceRange(); 9591 9592 if (Result > std::numeric_limits<int32_t>::max()) 9593 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_big) 9594 << std::numeric_limits<int32_t>::max() << Arg->getSourceRange(); 9595 } 9596 9597 return false; 9598 } 9599 9600 /// Handle __builtin_assume_aligned. This is declared 9601 /// as (const void*, size_t, ...) and can take one optional constant int arg. 9602 bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) { 9603 if (checkArgCountRange(*this, TheCall, 2, 3)) 9604 return true; 9605 9606 unsigned NumArgs = TheCall->getNumArgs(); 9607 Expr *FirstArg = TheCall->getArg(0); 9608 9609 { 9610 ExprResult FirstArgResult = 9611 DefaultFunctionArrayLvalueConversion(FirstArg); 9612 if (checkBuiltinArgument(*this, TheCall, 0)) 9613 return true; 9614 /// In-place updation of FirstArg by checkBuiltinArgument is ignored. 9615 TheCall->setArg(0, FirstArgResult.get()); 9616 } 9617 9618 // The alignment must be a constant integer. 9619 Expr *SecondArg = TheCall->getArg(1); 9620 9621 // We can't check the value of a dependent argument. 9622 if (!SecondArg->isValueDependent()) { 9623 llvm::APSInt Result; 9624 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 9625 return true; 9626 9627 if (!Result.isPowerOf2()) 9628 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 9629 << SecondArg->getSourceRange(); 9630 9631 if (Result > Sema::MaximumAlignment) 9632 Diag(TheCall->getBeginLoc(), diag::warn_assume_aligned_too_great) 9633 << SecondArg->getSourceRange() << Sema::MaximumAlignment; 9634 } 9635 9636 if (NumArgs > 2) { 9637 Expr *ThirdArg = TheCall->getArg(2); 9638 if (convertArgumentToType(*this, ThirdArg, Context.getSizeType())) 9639 return true; 9640 TheCall->setArg(2, ThirdArg); 9641 } 9642 9643 return false; 9644 } 9645 9646 bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) { 9647 unsigned BuiltinID = 9648 cast<FunctionDecl>(TheCall->getCalleeDecl())->getBuiltinID(); 9649 bool IsSizeCall = BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size; 9650 9651 unsigned NumArgs = TheCall->getNumArgs(); 9652 unsigned NumRequiredArgs = IsSizeCall ? 1 : 2; 9653 if (NumArgs < NumRequiredArgs) { 9654 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 9655 << 0 /* function call */ << NumRequiredArgs << NumArgs 9656 << /*is non object*/ 0 << TheCall->getSourceRange(); 9657 } 9658 if (NumArgs >= NumRequiredArgs + 0x100) { 9659 return Diag(TheCall->getEndLoc(), 9660 diag::err_typecheck_call_too_many_args_at_most) 9661 << 0 /* function call */ << (NumRequiredArgs + 0xff) << NumArgs 9662 << /*is non object*/ 0 << TheCall->getSourceRange(); 9663 } 9664 unsigned i = 0; 9665 9666 // For formatting call, check buffer arg. 9667 if (!IsSizeCall) { 9668 ExprResult Arg(TheCall->getArg(i)); 9669 InitializedEntity Entity = InitializedEntity::InitializeParameter( 9670 Context, Context.VoidPtrTy, false); 9671 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 9672 if (Arg.isInvalid()) 9673 return true; 9674 TheCall->setArg(i, Arg.get()); 9675 i++; 9676 } 9677 9678 // Check string literal arg. 9679 unsigned FormatIdx = i; 9680 { 9681 ExprResult Arg = CheckOSLogFormatStringArg(TheCall->getArg(i)); 9682 if (Arg.isInvalid()) 9683 return true; 9684 TheCall->setArg(i, Arg.get()); 9685 i++; 9686 } 9687 9688 // Make sure variadic args are scalar. 9689 unsigned FirstDataArg = i; 9690 while (i < NumArgs) { 9691 ExprResult Arg = DefaultVariadicArgumentPromotion( 9692 TheCall->getArg(i), VariadicFunction, nullptr); 9693 if (Arg.isInvalid()) 9694 return true; 9695 CharUnits ArgSize = Context.getTypeSizeInChars(Arg.get()->getType()); 9696 if (ArgSize.getQuantity() >= 0x100) { 9697 return Diag(Arg.get()->getEndLoc(), diag::err_os_log_argument_too_big) 9698 << i << (int)ArgSize.getQuantity() << 0xff 9699 << TheCall->getSourceRange(); 9700 } 9701 TheCall->setArg(i, Arg.get()); 9702 i++; 9703 } 9704 9705 // Check formatting specifiers. NOTE: We're only doing this for the non-size 9706 // call to avoid duplicate diagnostics. 9707 if (!IsSizeCall) { 9708 llvm::SmallBitVector CheckedVarArgs(NumArgs, false); 9709 ArrayRef<const Expr *> Args(TheCall->getArgs(), TheCall->getNumArgs()); 9710 bool Success = CheckFormatArguments( 9711 Args, FAPK_Variadic, FormatIdx, FirstDataArg, FST_OSLog, 9712 VariadicFunction, TheCall->getBeginLoc(), SourceRange(), 9713 CheckedVarArgs); 9714 if (!Success) 9715 return true; 9716 } 9717 9718 if (IsSizeCall) { 9719 TheCall->setType(Context.getSizeType()); 9720 } else { 9721 TheCall->setType(Context.VoidPtrTy); 9722 } 9723 return false; 9724 } 9725 9726 /// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr 9727 /// TheCall is a constant expression. 9728 bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, 9729 llvm::APSInt &Result) { 9730 Expr *Arg = TheCall->getArg(ArgNum); 9731 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 9732 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 9733 9734 if (Arg->isTypeDependent() || Arg->isValueDependent()) return false; 9735 9736 std::optional<llvm::APSInt> R; 9737 if (!(R = Arg->getIntegerConstantExpr(Context))) 9738 return Diag(TheCall->getBeginLoc(), diag::err_constant_integer_arg_type) 9739 << FDecl->getDeclName() << Arg->getSourceRange(); 9740 Result = *R; 9741 return false; 9742 } 9743 9744 /// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr 9745 /// TheCall is a constant expression in the range [Low, High]. 9746 bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, 9747 int Low, int High, bool RangeIsError) { 9748 if (isConstantEvaluatedContext()) 9749 return false; 9750 llvm::APSInt Result; 9751 9752 // We can't check the value of a dependent argument. 9753 Expr *Arg = TheCall->getArg(ArgNum); 9754 if (Arg->isTypeDependent() || Arg->isValueDependent()) 9755 return false; 9756 9757 // Check constant-ness first. 9758 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 9759 return true; 9760 9761 if (Result.getSExtValue() < Low || Result.getSExtValue() > High) { 9762 if (RangeIsError) 9763 return Diag(TheCall->getBeginLoc(), diag::err_argument_invalid_range) 9764 << toString(Result, 10) << Low << High << Arg->getSourceRange(); 9765 else 9766 // Defer the warning until we know if the code will be emitted so that 9767 // dead code can ignore this. 9768 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 9769 PDiag(diag::warn_argument_invalid_range) 9770 << toString(Result, 10) << Low << High 9771 << Arg->getSourceRange()); 9772 } 9773 9774 return false; 9775 } 9776 9777 /// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr 9778 /// TheCall is a constant expression is a multiple of Num.. 9779 bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, 9780 unsigned Num) { 9781 llvm::APSInt Result; 9782 9783 // We can't check the value of a dependent argument. 9784 Expr *Arg = TheCall->getArg(ArgNum); 9785 if (Arg->isTypeDependent() || Arg->isValueDependent()) 9786 return false; 9787 9788 // Check constant-ness first. 9789 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 9790 return true; 9791 9792 if (Result.getSExtValue() % Num != 0) 9793 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_multiple) 9794 << Num << Arg->getSourceRange(); 9795 9796 return false; 9797 } 9798 9799 /// SemaBuiltinConstantArgPower2 - Check if argument ArgNum of TheCall is a 9800 /// constant expression representing a power of 2. 9801 bool Sema::SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum) { 9802 llvm::APSInt Result; 9803 9804 // We can't check the value of a dependent argument. 9805 Expr *Arg = TheCall->getArg(ArgNum); 9806 if (Arg->isTypeDependent() || Arg->isValueDependent()) 9807 return false; 9808 9809 // Check constant-ness first. 9810 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 9811 return true; 9812 9813 // Bit-twiddling to test for a power of 2: for x > 0, x & (x-1) is zero if 9814 // and only if x is a power of 2. 9815 if (Result.isStrictlyPositive() && (Result & (Result - 1)) == 0) 9816 return false; 9817 9818 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_power_of_2) 9819 << Arg->getSourceRange(); 9820 } 9821 9822 static bool IsShiftedByte(llvm::APSInt Value) { 9823 if (Value.isNegative()) 9824 return false; 9825 9826 // Check if it's a shifted byte, by shifting it down 9827 while (true) { 9828 // If the value fits in the bottom byte, the check passes. 9829 if (Value < 0x100) 9830 return true; 9831 9832 // Otherwise, if the value has _any_ bits in the bottom byte, the check 9833 // fails. 9834 if ((Value & 0xFF) != 0) 9835 return false; 9836 9837 // If the bottom 8 bits are all 0, but something above that is nonzero, 9838 // then shifting the value right by 8 bits won't affect whether it's a 9839 // shifted byte or not. So do that, and go round again. 9840 Value >>= 8; 9841 } 9842 } 9843 9844 /// SemaBuiltinConstantArgShiftedByte - Check if argument ArgNum of TheCall is 9845 /// a constant expression representing an arbitrary byte value shifted left by 9846 /// a multiple of 8 bits. 9847 bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, 9848 unsigned ArgBits) { 9849 llvm::APSInt Result; 9850 9851 // We can't check the value of a dependent argument. 9852 Expr *Arg = TheCall->getArg(ArgNum); 9853 if (Arg->isTypeDependent() || Arg->isValueDependent()) 9854 return false; 9855 9856 // Check constant-ness first. 9857 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 9858 return true; 9859 9860 // Truncate to the given size. 9861 Result = Result.getLoBits(ArgBits); 9862 Result.setIsUnsigned(true); 9863 9864 if (IsShiftedByte(Result)) 9865 return false; 9866 9867 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_shifted_byte) 9868 << Arg->getSourceRange(); 9869 } 9870 9871 /// SemaBuiltinConstantArgShiftedByteOr0xFF - Check if argument ArgNum of 9872 /// TheCall is a constant expression representing either a shifted byte value, 9873 /// or a value of the form 0x??FF (i.e. a member of the arithmetic progression 9874 /// 0x00FF, 0x01FF, ..., 0xFFFF). This strange range check is needed for some 9875 /// Arm MVE intrinsics. 9876 bool Sema::SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, 9877 int ArgNum, 9878 unsigned ArgBits) { 9879 llvm::APSInt Result; 9880 9881 // We can't check the value of a dependent argument. 9882 Expr *Arg = TheCall->getArg(ArgNum); 9883 if (Arg->isTypeDependent() || Arg->isValueDependent()) 9884 return false; 9885 9886 // Check constant-ness first. 9887 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 9888 return true; 9889 9890 // Truncate to the given size. 9891 Result = Result.getLoBits(ArgBits); 9892 Result.setIsUnsigned(true); 9893 9894 // Check to see if it's in either of the required forms. 9895 if (IsShiftedByte(Result) || 9896 (Result > 0 && Result < 0x10000 && (Result & 0xFF) == 0xFF)) 9897 return false; 9898 9899 return Diag(TheCall->getBeginLoc(), 9900 diag::err_argument_not_shifted_byte_or_xxff) 9901 << Arg->getSourceRange(); 9902 } 9903 9904 /// SemaBuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions 9905 bool Sema::SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) { 9906 if (BuiltinID == AArch64::BI__builtin_arm_irg) { 9907 if (checkArgCount(*this, TheCall, 2)) 9908 return true; 9909 Expr *Arg0 = TheCall->getArg(0); 9910 Expr *Arg1 = TheCall->getArg(1); 9911 9912 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 9913 if (FirstArg.isInvalid()) 9914 return true; 9915 QualType FirstArgType = FirstArg.get()->getType(); 9916 if (!FirstArgType->isAnyPointerType()) 9917 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 9918 << "first" << FirstArgType << Arg0->getSourceRange(); 9919 TheCall->setArg(0, FirstArg.get()); 9920 9921 ExprResult SecArg = DefaultLvalueConversion(Arg1); 9922 if (SecArg.isInvalid()) 9923 return true; 9924 QualType SecArgType = SecArg.get()->getType(); 9925 if (!SecArgType->isIntegerType()) 9926 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 9927 << "second" << SecArgType << Arg1->getSourceRange(); 9928 9929 // Derive the return type from the pointer argument. 9930 TheCall->setType(FirstArgType); 9931 return false; 9932 } 9933 9934 if (BuiltinID == AArch64::BI__builtin_arm_addg) { 9935 if (checkArgCount(*this, TheCall, 2)) 9936 return true; 9937 9938 Expr *Arg0 = TheCall->getArg(0); 9939 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 9940 if (FirstArg.isInvalid()) 9941 return true; 9942 QualType FirstArgType = FirstArg.get()->getType(); 9943 if (!FirstArgType->isAnyPointerType()) 9944 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 9945 << "first" << FirstArgType << Arg0->getSourceRange(); 9946 TheCall->setArg(0, FirstArg.get()); 9947 9948 // Derive the return type from the pointer argument. 9949 TheCall->setType(FirstArgType); 9950 9951 // Second arg must be an constant in range [0,15] 9952 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 9953 } 9954 9955 if (BuiltinID == AArch64::BI__builtin_arm_gmi) { 9956 if (checkArgCount(*this, TheCall, 2)) 9957 return true; 9958 Expr *Arg0 = TheCall->getArg(0); 9959 Expr *Arg1 = TheCall->getArg(1); 9960 9961 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 9962 if (FirstArg.isInvalid()) 9963 return true; 9964 QualType FirstArgType = FirstArg.get()->getType(); 9965 if (!FirstArgType->isAnyPointerType()) 9966 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 9967 << "first" << FirstArgType << Arg0->getSourceRange(); 9968 9969 QualType SecArgType = Arg1->getType(); 9970 if (!SecArgType->isIntegerType()) 9971 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 9972 << "second" << SecArgType << Arg1->getSourceRange(); 9973 TheCall->setType(Context.IntTy); 9974 return false; 9975 } 9976 9977 if (BuiltinID == AArch64::BI__builtin_arm_ldg || 9978 BuiltinID == AArch64::BI__builtin_arm_stg) { 9979 if (checkArgCount(*this, TheCall, 1)) 9980 return true; 9981 Expr *Arg0 = TheCall->getArg(0); 9982 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 9983 if (FirstArg.isInvalid()) 9984 return true; 9985 9986 QualType FirstArgType = FirstArg.get()->getType(); 9987 if (!FirstArgType->isAnyPointerType()) 9988 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 9989 << "first" << FirstArgType << Arg0->getSourceRange(); 9990 TheCall->setArg(0, FirstArg.get()); 9991 9992 // Derive the return type from the pointer argument. 9993 if (BuiltinID == AArch64::BI__builtin_arm_ldg) 9994 TheCall->setType(FirstArgType); 9995 return false; 9996 } 9997 9998 if (BuiltinID == AArch64::BI__builtin_arm_subp) { 9999 Expr *ArgA = TheCall->getArg(0); 10000 Expr *ArgB = TheCall->getArg(1); 10001 10002 ExprResult ArgExprA = DefaultFunctionArrayLvalueConversion(ArgA); 10003 ExprResult ArgExprB = DefaultFunctionArrayLvalueConversion(ArgB); 10004 10005 if (ArgExprA.isInvalid() || ArgExprB.isInvalid()) 10006 return true; 10007 10008 QualType ArgTypeA = ArgExprA.get()->getType(); 10009 QualType ArgTypeB = ArgExprB.get()->getType(); 10010 10011 auto isNull = [&] (Expr *E) -> bool { 10012 return E->isNullPointerConstant( 10013 Context, Expr::NPC_ValueDependentIsNotNull); }; 10014 10015 // argument should be either a pointer or null 10016 if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA)) 10017 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 10018 << "first" << ArgTypeA << ArgA->getSourceRange(); 10019 10020 if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB)) 10021 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 10022 << "second" << ArgTypeB << ArgB->getSourceRange(); 10023 10024 // Ensure Pointee types are compatible 10025 if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) && 10026 ArgTypeB->isAnyPointerType() && !isNull(ArgB)) { 10027 QualType pointeeA = ArgTypeA->getPointeeType(); 10028 QualType pointeeB = ArgTypeB->getPointeeType(); 10029 if (!Context.typesAreCompatible( 10030 Context.getCanonicalType(pointeeA).getUnqualifiedType(), 10031 Context.getCanonicalType(pointeeB).getUnqualifiedType())) { 10032 return Diag(TheCall->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible) 10033 << ArgTypeA << ArgTypeB << ArgA->getSourceRange() 10034 << ArgB->getSourceRange(); 10035 } 10036 } 10037 10038 // at least one argument should be pointer type 10039 if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType()) 10040 return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer) 10041 << ArgTypeA << ArgTypeB << ArgA->getSourceRange(); 10042 10043 if (isNull(ArgA)) // adopt type of the other pointer 10044 ArgExprA = ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer); 10045 10046 if (isNull(ArgB)) 10047 ArgExprB = ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer); 10048 10049 TheCall->setArg(0, ArgExprA.get()); 10050 TheCall->setArg(1, ArgExprB.get()); 10051 TheCall->setType(Context.LongLongTy); 10052 return false; 10053 } 10054 assert(false && "Unhandled ARM MTE intrinsic"); 10055 return true; 10056 } 10057 10058 /// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr 10059 /// TheCall is an ARM/AArch64 special register string literal. 10060 bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, 10061 int ArgNum, unsigned ExpectedFieldNum, 10062 bool AllowName) { 10063 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 || 10064 BuiltinID == ARM::BI__builtin_arm_wsr64 || 10065 BuiltinID == ARM::BI__builtin_arm_rsr || 10066 BuiltinID == ARM::BI__builtin_arm_rsrp || 10067 BuiltinID == ARM::BI__builtin_arm_wsr || 10068 BuiltinID == ARM::BI__builtin_arm_wsrp; 10069 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 || 10070 BuiltinID == AArch64::BI__builtin_arm_wsr64 || 10071 BuiltinID == AArch64::BI__builtin_arm_rsr128 || 10072 BuiltinID == AArch64::BI__builtin_arm_wsr128 || 10073 BuiltinID == AArch64::BI__builtin_arm_rsr || 10074 BuiltinID == AArch64::BI__builtin_arm_rsrp || 10075 BuiltinID == AArch64::BI__builtin_arm_wsr || 10076 BuiltinID == AArch64::BI__builtin_arm_wsrp; 10077 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin."); 10078 10079 // We can't check the value of a dependent argument. 10080 Expr *Arg = TheCall->getArg(ArgNum); 10081 if (Arg->isTypeDependent() || Arg->isValueDependent()) 10082 return false; 10083 10084 // Check if the argument is a string literal. 10085 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 10086 return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 10087 << Arg->getSourceRange(); 10088 10089 // Check the type of special register given. 10090 StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 10091 SmallVector<StringRef, 6> Fields; 10092 Reg.split(Fields, ":"); 10093 10094 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1)) 10095 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 10096 << Arg->getSourceRange(); 10097 10098 // If the string is the name of a register then we cannot check that it is 10099 // valid here but if the string is of one the forms described in ACLE then we 10100 // can check that the supplied fields are integers and within the valid 10101 // ranges. 10102 if (Fields.size() > 1) { 10103 bool FiveFields = Fields.size() == 5; 10104 10105 bool ValidString = true; 10106 if (IsARMBuiltin) { 10107 ValidString &= Fields[0].starts_with_insensitive("cp") || 10108 Fields[0].starts_with_insensitive("p"); 10109 if (ValidString) 10110 Fields[0] = Fields[0].drop_front( 10111 Fields[0].starts_with_insensitive("cp") ? 2 : 1); 10112 10113 ValidString &= Fields[2].starts_with_insensitive("c"); 10114 if (ValidString) 10115 Fields[2] = Fields[2].drop_front(1); 10116 10117 if (FiveFields) { 10118 ValidString &= Fields[3].starts_with_insensitive("c"); 10119 if (ValidString) 10120 Fields[3] = Fields[3].drop_front(1); 10121 } 10122 } 10123 10124 SmallVector<int, 5> Ranges; 10125 if (FiveFields) 10126 Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7}); 10127 else 10128 Ranges.append({15, 7, 15}); 10129 10130 for (unsigned i=0; i<Fields.size(); ++i) { 10131 int IntField; 10132 ValidString &= !Fields[i].getAsInteger(10, IntField); 10133 ValidString &= (IntField >= 0 && IntField <= Ranges[i]); 10134 } 10135 10136 if (!ValidString) 10137 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 10138 << Arg->getSourceRange(); 10139 } else if (IsAArch64Builtin && Fields.size() == 1) { 10140 // This code validates writes to PSTATE registers. 10141 10142 // Not a write. 10143 if (TheCall->getNumArgs() != 2) 10144 return false; 10145 10146 // The 128-bit system register accesses do not touch PSTATE. 10147 if (BuiltinID == AArch64::BI__builtin_arm_rsr128 || 10148 BuiltinID == AArch64::BI__builtin_arm_wsr128) 10149 return false; 10150 10151 // These are the named PSTATE accesses using "MSR (immediate)" instructions, 10152 // along with the upper limit on the immediates allowed. 10153 auto MaxLimit = llvm::StringSwitch<std::optional<unsigned>>(Reg) 10154 .CaseLower("spsel", 15) 10155 .CaseLower("daifclr", 15) 10156 .CaseLower("daifset", 15) 10157 .CaseLower("pan", 15) 10158 .CaseLower("uao", 15) 10159 .CaseLower("dit", 15) 10160 .CaseLower("ssbs", 15) 10161 .CaseLower("tco", 15) 10162 .CaseLower("allint", 1) 10163 .CaseLower("pm", 1) 10164 .Default(std::nullopt); 10165 10166 // If this is not a named PSTATE, just continue without validating, as this 10167 // will be lowered to an "MSR (register)" instruction directly 10168 if (!MaxLimit) 10169 return false; 10170 10171 // Here we only allow constants in the range for that pstate, as required by 10172 // the ACLE. 10173 // 10174 // While clang also accepts the names of system registers in its ACLE 10175 // intrinsics, we prevent this with the PSTATE names used in MSR (immediate) 10176 // as the value written via a register is different to the value used as an 10177 // immediate to have the same effect. e.g., for the instruction `msr tco, 10178 // x0`, it is bit 25 of register x0 that is written into PSTATE.TCO, but 10179 // with `msr tco, #imm`, it is bit 0 of xN that is written into PSTATE.TCO. 10180 // 10181 // If a programmer wants to codegen the MSR (register) form of `msr tco, 10182 // xN`, they can still do so by specifying the register using five 10183 // colon-separated numbers in a string. 10184 return SemaBuiltinConstantArgRange(TheCall, 1, 0, *MaxLimit); 10185 } 10186 10187 return false; 10188 } 10189 10190 /// SemaBuiltinPPCMMACall - Check the call to a PPC MMA builtin for validity. 10191 /// Emit an error and return true on failure; return false on success. 10192 /// TypeStr is a string containing the type descriptor of the value returned by 10193 /// the builtin and the descriptors of the expected type of the arguments. 10194 bool Sema::SemaBuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID, 10195 const char *TypeStr) { 10196 10197 assert((TypeStr[0] != '\0') && 10198 "Invalid types in PPC MMA builtin declaration"); 10199 10200 unsigned Mask = 0; 10201 unsigned ArgNum = 0; 10202 10203 // The first type in TypeStr is the type of the value returned by the 10204 // builtin. So we first read that type and change the type of TheCall. 10205 QualType type = DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 10206 TheCall->setType(type); 10207 10208 while (*TypeStr != '\0') { 10209 Mask = 0; 10210 QualType ExpectedType = DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 10211 if (ArgNum >= TheCall->getNumArgs()) { 10212 ArgNum++; 10213 break; 10214 } 10215 10216 Expr *Arg = TheCall->getArg(ArgNum); 10217 QualType PassedType = Arg->getType(); 10218 QualType StrippedRVType = PassedType.getCanonicalType(); 10219 10220 // Strip Restrict/Volatile qualifiers. 10221 if (StrippedRVType.isRestrictQualified() || 10222 StrippedRVType.isVolatileQualified()) 10223 StrippedRVType = StrippedRVType.getCanonicalType().getUnqualifiedType(); 10224 10225 // The only case where the argument type and expected type are allowed to 10226 // mismatch is if the argument type is a non-void pointer (or array) and 10227 // expected type is a void pointer. 10228 if (StrippedRVType != ExpectedType) 10229 if (!(ExpectedType->isVoidPointerType() && 10230 (StrippedRVType->isPointerType() || StrippedRVType->isArrayType()))) 10231 return Diag(Arg->getBeginLoc(), 10232 diag::err_typecheck_convert_incompatible) 10233 << PassedType << ExpectedType << 1 << 0 << 0; 10234 10235 // If the value of the Mask is not 0, we have a constraint in the size of 10236 // the integer argument so here we ensure the argument is a constant that 10237 // is in the valid range. 10238 if (Mask != 0 && 10239 SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, Mask, true)) 10240 return true; 10241 10242 ArgNum++; 10243 } 10244 10245 // In case we exited early from the previous loop, there are other types to 10246 // read from TypeStr. So we need to read them all to ensure we have the right 10247 // number of arguments in TheCall and if it is not the case, to display a 10248 // better error message. 10249 while (*TypeStr != '\0') { 10250 (void) DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 10251 ArgNum++; 10252 } 10253 if (checkArgCount(*this, TheCall, ArgNum)) 10254 return true; 10255 10256 return false; 10257 } 10258 10259 /// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val). 10260 /// This checks that the target supports __builtin_longjmp and 10261 /// that val is a constant 1. 10262 bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) { 10263 if (!Context.getTargetInfo().hasSjLjLowering()) 10264 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_unsupported) 10265 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 10266 10267 Expr *Arg = TheCall->getArg(1); 10268 llvm::APSInt Result; 10269 10270 // TODO: This is less than ideal. Overload this to take a value. 10271 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 10272 return true; 10273 10274 if (Result != 1) 10275 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_invalid_val) 10276 << SourceRange(Arg->getBeginLoc(), Arg->getEndLoc()); 10277 10278 return false; 10279 } 10280 10281 /// SemaBuiltinSetjmp - Handle __builtin_setjmp(void *env[5]). 10282 /// This checks that the target supports __builtin_setjmp. 10283 bool Sema::SemaBuiltinSetjmp(CallExpr *TheCall) { 10284 if (!Context.getTargetInfo().hasSjLjLowering()) 10285 return Diag(TheCall->getBeginLoc(), diag::err_builtin_setjmp_unsupported) 10286 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 10287 return false; 10288 } 10289 10290 namespace { 10291 10292 class UncoveredArgHandler { 10293 enum { Unknown = -1, AllCovered = -2 }; 10294 10295 signed FirstUncoveredArg = Unknown; 10296 SmallVector<const Expr *, 4> DiagnosticExprs; 10297 10298 public: 10299 UncoveredArgHandler() = default; 10300 10301 bool hasUncoveredArg() const { 10302 return (FirstUncoveredArg >= 0); 10303 } 10304 10305 unsigned getUncoveredArg() const { 10306 assert(hasUncoveredArg() && "no uncovered argument"); 10307 return FirstUncoveredArg; 10308 } 10309 10310 void setAllCovered() { 10311 // A string has been found with all arguments covered, so clear out 10312 // the diagnostics. 10313 DiagnosticExprs.clear(); 10314 FirstUncoveredArg = AllCovered; 10315 } 10316 10317 void Update(signed NewFirstUncoveredArg, const Expr *StrExpr) { 10318 assert(NewFirstUncoveredArg >= 0 && "Outside range"); 10319 10320 // Don't update if a previous string covers all arguments. 10321 if (FirstUncoveredArg == AllCovered) 10322 return; 10323 10324 // UncoveredArgHandler tracks the highest uncovered argument index 10325 // and with it all the strings that match this index. 10326 if (NewFirstUncoveredArg == FirstUncoveredArg) 10327 DiagnosticExprs.push_back(StrExpr); 10328 else if (NewFirstUncoveredArg > FirstUncoveredArg) { 10329 DiagnosticExprs.clear(); 10330 DiagnosticExprs.push_back(StrExpr); 10331 FirstUncoveredArg = NewFirstUncoveredArg; 10332 } 10333 } 10334 10335 void Diagnose(Sema &S, bool IsFunctionCall, const Expr *ArgExpr); 10336 }; 10337 10338 enum StringLiteralCheckType { 10339 SLCT_NotALiteral, 10340 SLCT_UncheckedLiteral, 10341 SLCT_CheckedLiteral 10342 }; 10343 10344 } // namespace 10345 10346 static void sumOffsets(llvm::APSInt &Offset, llvm::APSInt Addend, 10347 BinaryOperatorKind BinOpKind, 10348 bool AddendIsRight) { 10349 unsigned BitWidth = Offset.getBitWidth(); 10350 unsigned AddendBitWidth = Addend.getBitWidth(); 10351 // There might be negative interim results. 10352 if (Addend.isUnsigned()) { 10353 Addend = Addend.zext(++AddendBitWidth); 10354 Addend.setIsSigned(true); 10355 } 10356 // Adjust the bit width of the APSInts. 10357 if (AddendBitWidth > BitWidth) { 10358 Offset = Offset.sext(AddendBitWidth); 10359 BitWidth = AddendBitWidth; 10360 } else if (BitWidth > AddendBitWidth) { 10361 Addend = Addend.sext(BitWidth); 10362 } 10363 10364 bool Ov = false; 10365 llvm::APSInt ResOffset = Offset; 10366 if (BinOpKind == BO_Add) 10367 ResOffset = Offset.sadd_ov(Addend, Ov); 10368 else { 10369 assert(AddendIsRight && BinOpKind == BO_Sub && 10370 "operator must be add or sub with addend on the right"); 10371 ResOffset = Offset.ssub_ov(Addend, Ov); 10372 } 10373 10374 // We add an offset to a pointer here so we should support an offset as big as 10375 // possible. 10376 if (Ov) { 10377 assert(BitWidth <= std::numeric_limits<unsigned>::max() / 2 && 10378 "index (intermediate) result too big"); 10379 Offset = Offset.sext(2 * BitWidth); 10380 sumOffsets(Offset, Addend, BinOpKind, AddendIsRight); 10381 return; 10382 } 10383 10384 Offset = ResOffset; 10385 } 10386 10387 namespace { 10388 10389 // This is a wrapper class around StringLiteral to support offsetted string 10390 // literals as format strings. It takes the offset into account when returning 10391 // the string and its length or the source locations to display notes correctly. 10392 class FormatStringLiteral { 10393 const StringLiteral *FExpr; 10394 int64_t Offset; 10395 10396 public: 10397 FormatStringLiteral(const StringLiteral *fexpr, int64_t Offset = 0) 10398 : FExpr(fexpr), Offset(Offset) {} 10399 10400 StringRef getString() const { 10401 return FExpr->getString().drop_front(Offset); 10402 } 10403 10404 unsigned getByteLength() const { 10405 return FExpr->getByteLength() - getCharByteWidth() * Offset; 10406 } 10407 10408 unsigned getLength() const { return FExpr->getLength() - Offset; } 10409 unsigned getCharByteWidth() const { return FExpr->getCharByteWidth(); } 10410 10411 StringLiteralKind getKind() const { return FExpr->getKind(); } 10412 10413 QualType getType() const { return FExpr->getType(); } 10414 10415 bool isAscii() const { return FExpr->isOrdinary(); } 10416 bool isWide() const { return FExpr->isWide(); } 10417 bool isUTF8() const { return FExpr->isUTF8(); } 10418 bool isUTF16() const { return FExpr->isUTF16(); } 10419 bool isUTF32() const { return FExpr->isUTF32(); } 10420 bool isPascal() const { return FExpr->isPascal(); } 10421 10422 SourceLocation getLocationOfByte( 10423 unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, 10424 const TargetInfo &Target, unsigned *StartToken = nullptr, 10425 unsigned *StartTokenByteOffset = nullptr) const { 10426 return FExpr->getLocationOfByte(ByteNo + Offset, SM, Features, Target, 10427 StartToken, StartTokenByteOffset); 10428 } 10429 10430 SourceLocation getBeginLoc() const LLVM_READONLY { 10431 return FExpr->getBeginLoc().getLocWithOffset(Offset); 10432 } 10433 10434 SourceLocation getEndLoc() const LLVM_READONLY { return FExpr->getEndLoc(); } 10435 }; 10436 10437 } // namespace 10438 10439 static void CheckFormatString( 10440 Sema &S, const FormatStringLiteral *FExpr, const Expr *OrigFormatExpr, 10441 ArrayRef<const Expr *> Args, Sema::FormatArgumentPassingKind APK, 10442 unsigned format_idx, unsigned firstDataArg, Sema::FormatStringType Type, 10443 bool inFunctionCall, Sema::VariadicCallType CallType, 10444 llvm::SmallBitVector &CheckedVarArgs, UncoveredArgHandler &UncoveredArg, 10445 bool IgnoreStringsWithoutSpecifiers); 10446 10447 static const Expr *maybeConstEvalStringLiteral(ASTContext &Context, 10448 const Expr *E); 10449 10450 // Determine if an expression is a string literal or constant string. 10451 // If this function returns false on the arguments to a function expecting a 10452 // format string, we will usually need to emit a warning. 10453 // True string literals are then checked by CheckFormatString. 10454 static StringLiteralCheckType 10455 checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args, 10456 Sema::FormatArgumentPassingKind APK, unsigned format_idx, 10457 unsigned firstDataArg, Sema::FormatStringType Type, 10458 Sema::VariadicCallType CallType, bool InFunctionCall, 10459 llvm::SmallBitVector &CheckedVarArgs, 10460 UncoveredArgHandler &UncoveredArg, llvm::APSInt Offset, 10461 bool IgnoreStringsWithoutSpecifiers = false) { 10462 if (S.isConstantEvaluatedContext()) 10463 return SLCT_NotALiteral; 10464 tryAgain: 10465 assert(Offset.isSigned() && "invalid offset"); 10466 10467 if (E->isTypeDependent() || E->isValueDependent()) 10468 return SLCT_NotALiteral; 10469 10470 E = E->IgnoreParenCasts(); 10471 10472 if (E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull)) 10473 // Technically -Wformat-nonliteral does not warn about this case. 10474 // The behavior of printf and friends in this case is implementation 10475 // dependent. Ideally if the format string cannot be null then 10476 // it should have a 'nonnull' attribute in the function prototype. 10477 return SLCT_UncheckedLiteral; 10478 10479 switch (E->getStmtClass()) { 10480 case Stmt::InitListExprClass: 10481 // Handle expressions like {"foobar"}. 10482 if (const clang::Expr *SLE = maybeConstEvalStringLiteral(S.Context, E)) { 10483 return checkFormatStringExpr(S, SLE, Args, APK, format_idx, firstDataArg, 10484 Type, CallType, /*InFunctionCall*/ false, 10485 CheckedVarArgs, UncoveredArg, Offset, 10486 IgnoreStringsWithoutSpecifiers); 10487 } 10488 return SLCT_NotALiteral; 10489 case Stmt::BinaryConditionalOperatorClass: 10490 case Stmt::ConditionalOperatorClass: { 10491 // The expression is a literal if both sub-expressions were, and it was 10492 // completely checked only if both sub-expressions were checked. 10493 const AbstractConditionalOperator *C = 10494 cast<AbstractConditionalOperator>(E); 10495 10496 // Determine whether it is necessary to check both sub-expressions, for 10497 // example, because the condition expression is a constant that can be 10498 // evaluated at compile time. 10499 bool CheckLeft = true, CheckRight = true; 10500 10501 bool Cond; 10502 if (C->getCond()->EvaluateAsBooleanCondition( 10503 Cond, S.getASTContext(), S.isConstantEvaluatedContext())) { 10504 if (Cond) 10505 CheckRight = false; 10506 else 10507 CheckLeft = false; 10508 } 10509 10510 // We need to maintain the offsets for the right and the left hand side 10511 // separately to check if every possible indexed expression is a valid 10512 // string literal. They might have different offsets for different string 10513 // literals in the end. 10514 StringLiteralCheckType Left; 10515 if (!CheckLeft) 10516 Left = SLCT_UncheckedLiteral; 10517 else { 10518 Left = checkFormatStringExpr(S, C->getTrueExpr(), Args, APK, format_idx, 10519 firstDataArg, Type, CallType, InFunctionCall, 10520 CheckedVarArgs, UncoveredArg, Offset, 10521 IgnoreStringsWithoutSpecifiers); 10522 if (Left == SLCT_NotALiteral || !CheckRight) { 10523 return Left; 10524 } 10525 } 10526 10527 StringLiteralCheckType Right = checkFormatStringExpr( 10528 S, C->getFalseExpr(), Args, APK, format_idx, firstDataArg, Type, 10529 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 10530 IgnoreStringsWithoutSpecifiers); 10531 10532 return (CheckLeft && Left < Right) ? Left : Right; 10533 } 10534 10535 case Stmt::ImplicitCastExprClass: 10536 E = cast<ImplicitCastExpr>(E)->getSubExpr(); 10537 goto tryAgain; 10538 10539 case Stmt::OpaqueValueExprClass: 10540 if (const Expr *src = cast<OpaqueValueExpr>(E)->getSourceExpr()) { 10541 E = src; 10542 goto tryAgain; 10543 } 10544 return SLCT_NotALiteral; 10545 10546 case Stmt::PredefinedExprClass: 10547 // While __func__, etc., are technically not string literals, they 10548 // cannot contain format specifiers and thus are not a security 10549 // liability. 10550 return SLCT_UncheckedLiteral; 10551 10552 case Stmt::DeclRefExprClass: { 10553 const DeclRefExpr *DR = cast<DeclRefExpr>(E); 10554 10555 // As an exception, do not flag errors for variables binding to 10556 // const string literals. 10557 if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) { 10558 bool isConstant = false; 10559 QualType T = DR->getType(); 10560 10561 if (const ArrayType *AT = S.Context.getAsArrayType(T)) { 10562 isConstant = AT->getElementType().isConstant(S.Context); 10563 } else if (const PointerType *PT = T->getAs<PointerType>()) { 10564 isConstant = T.isConstant(S.Context) && 10565 PT->getPointeeType().isConstant(S.Context); 10566 } else if (T->isObjCObjectPointerType()) { 10567 // In ObjC, there is usually no "const ObjectPointer" type, 10568 // so don't check if the pointee type is constant. 10569 isConstant = T.isConstant(S.Context); 10570 } 10571 10572 if (isConstant) { 10573 if (const Expr *Init = VD->getAnyInitializer()) { 10574 // Look through initializers like const char c[] = { "foo" } 10575 if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) { 10576 if (InitList->isStringLiteralInit()) 10577 Init = InitList->getInit(0)->IgnoreParenImpCasts(); 10578 } 10579 return checkFormatStringExpr( 10580 S, Init, Args, APK, format_idx, firstDataArg, Type, CallType, 10581 /*InFunctionCall*/ false, CheckedVarArgs, UncoveredArg, Offset); 10582 } 10583 } 10584 10585 // When the format argument is an argument of this function, and this 10586 // function also has the format attribute, there are several interactions 10587 // for which there shouldn't be a warning. For instance, when calling 10588 // v*printf from a function that has the printf format attribute, we 10589 // should not emit a warning about using `fmt`, even though it's not 10590 // constant, because the arguments have already been checked for the 10591 // caller of `logmessage`: 10592 // 10593 // __attribute__((format(printf, 1, 2))) 10594 // void logmessage(char const *fmt, ...) { 10595 // va_list ap; 10596 // va_start(ap, fmt); 10597 // vprintf(fmt, ap); /* do not emit a warning about "fmt" */ 10598 // ... 10599 // } 10600 // 10601 // Another interaction that we need to support is calling a variadic 10602 // format function from a format function that has fixed arguments. For 10603 // instance: 10604 // 10605 // __attribute__((format(printf, 1, 2))) 10606 // void logstring(char const *fmt, char const *str) { 10607 // printf(fmt, str); /* do not emit a warning about "fmt" */ 10608 // } 10609 // 10610 // Same (and perhaps more relatably) for the variadic template case: 10611 // 10612 // template<typename... Args> 10613 // __attribute__((format(printf, 1, 2))) 10614 // void log(const char *fmt, Args&&... args) { 10615 // printf(fmt, forward<Args>(args)...); 10616 // /* do not emit a warning about "fmt" */ 10617 // } 10618 // 10619 // Due to implementation difficulty, we only check the format, not the 10620 // format arguments, in all cases. 10621 // 10622 if (const auto *PV = dyn_cast<ParmVarDecl>(VD)) { 10623 if (const auto *D = dyn_cast<Decl>(PV->getDeclContext())) { 10624 for (const auto *PVFormat : D->specific_attrs<FormatAttr>()) { 10625 bool IsCXXMember = false; 10626 if (const auto *MD = dyn_cast<CXXMethodDecl>(D)) 10627 IsCXXMember = MD->isInstance(); 10628 10629 bool IsVariadic = false; 10630 if (const FunctionType *FnTy = D->getFunctionType()) 10631 IsVariadic = cast<FunctionProtoType>(FnTy)->isVariadic(); 10632 else if (const auto *BD = dyn_cast<BlockDecl>(D)) 10633 IsVariadic = BD->isVariadic(); 10634 else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(D)) 10635 IsVariadic = OMD->isVariadic(); 10636 10637 Sema::FormatStringInfo CallerFSI; 10638 if (Sema::getFormatStringInfo(PVFormat, IsCXXMember, IsVariadic, 10639 &CallerFSI)) { 10640 // We also check if the formats are compatible. 10641 // We can't pass a 'scanf' string to a 'printf' function. 10642 if (PV->getFunctionScopeIndex() == CallerFSI.FormatIdx && 10643 Type == S.GetFormatStringType(PVFormat)) { 10644 // Lastly, check that argument passing kinds transition in a 10645 // way that makes sense: 10646 // from a caller with FAPK_VAList, allow FAPK_VAList 10647 // from a caller with FAPK_Fixed, allow FAPK_Fixed 10648 // from a caller with FAPK_Fixed, allow FAPK_Variadic 10649 // from a caller with FAPK_Variadic, allow FAPK_VAList 10650 switch (combineFAPK(CallerFSI.ArgPassingKind, APK)) { 10651 case combineFAPK(Sema::FAPK_VAList, Sema::FAPK_VAList): 10652 case combineFAPK(Sema::FAPK_Fixed, Sema::FAPK_Fixed): 10653 case combineFAPK(Sema::FAPK_Fixed, Sema::FAPK_Variadic): 10654 case combineFAPK(Sema::FAPK_Variadic, Sema::FAPK_VAList): 10655 return SLCT_UncheckedLiteral; 10656 } 10657 } 10658 } 10659 } 10660 } 10661 } 10662 } 10663 10664 return SLCT_NotALiteral; 10665 } 10666 10667 case Stmt::CallExprClass: 10668 case Stmt::CXXMemberCallExprClass: { 10669 const CallExpr *CE = cast<CallExpr>(E); 10670 if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) { 10671 bool IsFirst = true; 10672 StringLiteralCheckType CommonResult; 10673 for (const auto *FA : ND->specific_attrs<FormatArgAttr>()) { 10674 const Expr *Arg = CE->getArg(FA->getFormatIdx().getASTIndex()); 10675 StringLiteralCheckType Result = checkFormatStringExpr( 10676 S, Arg, Args, APK, format_idx, firstDataArg, Type, CallType, 10677 InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 10678 IgnoreStringsWithoutSpecifiers); 10679 if (IsFirst) { 10680 CommonResult = Result; 10681 IsFirst = false; 10682 } 10683 } 10684 if (!IsFirst) 10685 return CommonResult; 10686 10687 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) { 10688 unsigned BuiltinID = FD->getBuiltinID(); 10689 if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString || 10690 BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) { 10691 const Expr *Arg = CE->getArg(0); 10692 return checkFormatStringExpr( 10693 S, Arg, Args, APK, format_idx, firstDataArg, Type, CallType, 10694 InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 10695 IgnoreStringsWithoutSpecifiers); 10696 } 10697 } 10698 } 10699 if (const Expr *SLE = maybeConstEvalStringLiteral(S.Context, E)) 10700 return checkFormatStringExpr(S, SLE, Args, APK, format_idx, firstDataArg, 10701 Type, CallType, /*InFunctionCall*/ false, 10702 CheckedVarArgs, UncoveredArg, Offset, 10703 IgnoreStringsWithoutSpecifiers); 10704 return SLCT_NotALiteral; 10705 } 10706 case Stmt::ObjCMessageExprClass: { 10707 const auto *ME = cast<ObjCMessageExpr>(E); 10708 if (const auto *MD = ME->getMethodDecl()) { 10709 if (const auto *FA = MD->getAttr<FormatArgAttr>()) { 10710 // As a special case heuristic, if we're using the method -[NSBundle 10711 // localizedStringForKey:value:table:], ignore any key strings that lack 10712 // format specifiers. The idea is that if the key doesn't have any 10713 // format specifiers then its probably just a key to map to the 10714 // localized strings. If it does have format specifiers though, then its 10715 // likely that the text of the key is the format string in the 10716 // programmer's language, and should be checked. 10717 const ObjCInterfaceDecl *IFace; 10718 if (MD->isInstanceMethod() && (IFace = MD->getClassInterface()) && 10719 IFace->getIdentifier()->isStr("NSBundle") && 10720 MD->getSelector().isKeywordSelector( 10721 {"localizedStringForKey", "value", "table"})) { 10722 IgnoreStringsWithoutSpecifiers = true; 10723 } 10724 10725 const Expr *Arg = ME->getArg(FA->getFormatIdx().getASTIndex()); 10726 return checkFormatStringExpr( 10727 S, Arg, Args, APK, format_idx, firstDataArg, Type, CallType, 10728 InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 10729 IgnoreStringsWithoutSpecifiers); 10730 } 10731 } 10732 10733 return SLCT_NotALiteral; 10734 } 10735 case Stmt::ObjCStringLiteralClass: 10736 case Stmt::StringLiteralClass: { 10737 const StringLiteral *StrE = nullptr; 10738 10739 if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E)) 10740 StrE = ObjCFExpr->getString(); 10741 else 10742 StrE = cast<StringLiteral>(E); 10743 10744 if (StrE) { 10745 if (Offset.isNegative() || Offset > StrE->getLength()) { 10746 // TODO: It would be better to have an explicit warning for out of 10747 // bounds literals. 10748 return SLCT_NotALiteral; 10749 } 10750 FormatStringLiteral FStr(StrE, Offset.sextOrTrunc(64).getSExtValue()); 10751 CheckFormatString(S, &FStr, E, Args, APK, format_idx, firstDataArg, Type, 10752 InFunctionCall, CallType, CheckedVarArgs, UncoveredArg, 10753 IgnoreStringsWithoutSpecifiers); 10754 return SLCT_CheckedLiteral; 10755 } 10756 10757 return SLCT_NotALiteral; 10758 } 10759 case Stmt::BinaryOperatorClass: { 10760 const BinaryOperator *BinOp = cast<BinaryOperator>(E); 10761 10762 // A string literal + an int offset is still a string literal. 10763 if (BinOp->isAdditiveOp()) { 10764 Expr::EvalResult LResult, RResult; 10765 10766 bool LIsInt = BinOp->getLHS()->EvaluateAsInt( 10767 LResult, S.Context, Expr::SE_NoSideEffects, 10768 S.isConstantEvaluatedContext()); 10769 bool RIsInt = BinOp->getRHS()->EvaluateAsInt( 10770 RResult, S.Context, Expr::SE_NoSideEffects, 10771 S.isConstantEvaluatedContext()); 10772 10773 if (LIsInt != RIsInt) { 10774 BinaryOperatorKind BinOpKind = BinOp->getOpcode(); 10775 10776 if (LIsInt) { 10777 if (BinOpKind == BO_Add) { 10778 sumOffsets(Offset, LResult.Val.getInt(), BinOpKind, RIsInt); 10779 E = BinOp->getRHS(); 10780 goto tryAgain; 10781 } 10782 } else { 10783 sumOffsets(Offset, RResult.Val.getInt(), BinOpKind, RIsInt); 10784 E = BinOp->getLHS(); 10785 goto tryAgain; 10786 } 10787 } 10788 } 10789 10790 return SLCT_NotALiteral; 10791 } 10792 case Stmt::UnaryOperatorClass: { 10793 const UnaryOperator *UnaOp = cast<UnaryOperator>(E); 10794 auto ASE = dyn_cast<ArraySubscriptExpr>(UnaOp->getSubExpr()); 10795 if (UnaOp->getOpcode() == UO_AddrOf && ASE) { 10796 Expr::EvalResult IndexResult; 10797 if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context, 10798 Expr::SE_NoSideEffects, 10799 S.isConstantEvaluatedContext())) { 10800 sumOffsets(Offset, IndexResult.Val.getInt(), BO_Add, 10801 /*RHS is int*/ true); 10802 E = ASE->getBase(); 10803 goto tryAgain; 10804 } 10805 } 10806 10807 return SLCT_NotALiteral; 10808 } 10809 10810 default: 10811 return SLCT_NotALiteral; 10812 } 10813 } 10814 10815 // If this expression can be evaluated at compile-time, 10816 // check if the result is a StringLiteral and return it 10817 // otherwise return nullptr 10818 static const Expr *maybeConstEvalStringLiteral(ASTContext &Context, 10819 const Expr *E) { 10820 Expr::EvalResult Result; 10821 if (E->EvaluateAsRValue(Result, Context) && Result.Val.isLValue()) { 10822 const auto *LVE = Result.Val.getLValueBase().dyn_cast<const Expr *>(); 10823 if (isa_and_nonnull<StringLiteral>(LVE)) 10824 return LVE; 10825 } 10826 return nullptr; 10827 } 10828 10829 Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) { 10830 return llvm::StringSwitch<FormatStringType>(Format->getType()->getName()) 10831 .Case("scanf", FST_Scanf) 10832 .Cases("printf", "printf0", FST_Printf) 10833 .Cases("NSString", "CFString", FST_NSString) 10834 .Case("strftime", FST_Strftime) 10835 .Case("strfmon", FST_Strfmon) 10836 .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf) 10837 .Case("freebsd_kprintf", FST_FreeBSDKPrintf) 10838 .Case("os_trace", FST_OSLog) 10839 .Case("os_log", FST_OSLog) 10840 .Default(FST_Unknown); 10841 } 10842 10843 /// CheckFormatArguments - Check calls to printf and scanf (and similar 10844 /// functions) for correct use of format strings. 10845 /// Returns true if a format string has been fully checked. 10846 bool Sema::CheckFormatArguments(const FormatAttr *Format, 10847 ArrayRef<const Expr *> Args, bool IsCXXMember, 10848 VariadicCallType CallType, SourceLocation Loc, 10849 SourceRange Range, 10850 llvm::SmallBitVector &CheckedVarArgs) { 10851 FormatStringInfo FSI; 10852 if (getFormatStringInfo(Format, IsCXXMember, CallType != VariadicDoesNotApply, 10853 &FSI)) 10854 return CheckFormatArguments(Args, FSI.ArgPassingKind, FSI.FormatIdx, 10855 FSI.FirstDataArg, GetFormatStringType(Format), 10856 CallType, Loc, Range, CheckedVarArgs); 10857 return false; 10858 } 10859 10860 bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args, 10861 Sema::FormatArgumentPassingKind APK, 10862 unsigned format_idx, unsigned firstDataArg, 10863 FormatStringType Type, 10864 VariadicCallType CallType, SourceLocation Loc, 10865 SourceRange Range, 10866 llvm::SmallBitVector &CheckedVarArgs) { 10867 // CHECK: printf/scanf-like function is called with no format string. 10868 if (format_idx >= Args.size()) { 10869 Diag(Loc, diag::warn_missing_format_string) << Range; 10870 return false; 10871 } 10872 10873 const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts(); 10874 10875 // CHECK: format string is not a string literal. 10876 // 10877 // Dynamically generated format strings are difficult to 10878 // automatically vet at compile time. Requiring that format strings 10879 // are string literals: (1) permits the checking of format strings by 10880 // the compiler and thereby (2) can practically remove the source of 10881 // many format string exploits. 10882 10883 // Format string can be either ObjC string (e.g. @"%d") or 10884 // C string (e.g. "%d") 10885 // ObjC string uses the same format specifiers as C string, so we can use 10886 // the same format string checking logic for both ObjC and C strings. 10887 UncoveredArgHandler UncoveredArg; 10888 StringLiteralCheckType CT = checkFormatStringExpr( 10889 *this, OrigFormatExpr, Args, APK, format_idx, firstDataArg, Type, 10890 CallType, 10891 /*IsFunctionCall*/ true, CheckedVarArgs, UncoveredArg, 10892 /*no string offset*/ llvm::APSInt(64, false) = 0); 10893 10894 // Generate a diagnostic where an uncovered argument is detected. 10895 if (UncoveredArg.hasUncoveredArg()) { 10896 unsigned ArgIdx = UncoveredArg.getUncoveredArg() + firstDataArg; 10897 assert(ArgIdx < Args.size() && "ArgIdx outside bounds"); 10898 UncoveredArg.Diagnose(*this, /*IsFunctionCall*/true, Args[ArgIdx]); 10899 } 10900 10901 if (CT != SLCT_NotALiteral) 10902 // Literal format string found, check done! 10903 return CT == SLCT_CheckedLiteral; 10904 10905 // Strftime is particular as it always uses a single 'time' argument, 10906 // so it is safe to pass a non-literal string. 10907 if (Type == FST_Strftime) 10908 return false; 10909 10910 // Do not emit diag when the string param is a macro expansion and the 10911 // format is either NSString or CFString. This is a hack to prevent 10912 // diag when using the NSLocalizedString and CFCopyLocalizedString macros 10913 // which are usually used in place of NS and CF string literals. 10914 SourceLocation FormatLoc = Args[format_idx]->getBeginLoc(); 10915 if (Type == FST_NSString && SourceMgr.isInSystemMacro(FormatLoc)) 10916 return false; 10917 10918 // If there are no arguments specified, warn with -Wformat-security, otherwise 10919 // warn only with -Wformat-nonliteral. 10920 if (Args.size() == firstDataArg) { 10921 Diag(FormatLoc, diag::warn_format_nonliteral_noargs) 10922 << OrigFormatExpr->getSourceRange(); 10923 switch (Type) { 10924 default: 10925 break; 10926 case FST_Kprintf: 10927 case FST_FreeBSDKPrintf: 10928 case FST_Printf: 10929 Diag(FormatLoc, diag::note_format_security_fixit) 10930 << FixItHint::CreateInsertion(FormatLoc, "\"%s\", "); 10931 break; 10932 case FST_NSString: 10933 Diag(FormatLoc, diag::note_format_security_fixit) 10934 << FixItHint::CreateInsertion(FormatLoc, "@\"%@\", "); 10935 break; 10936 } 10937 } else { 10938 Diag(FormatLoc, diag::warn_format_nonliteral) 10939 << OrigFormatExpr->getSourceRange(); 10940 } 10941 return false; 10942 } 10943 10944 namespace { 10945 10946 class CheckFormatHandler : public analyze_format_string::FormatStringHandler { 10947 protected: 10948 Sema &S; 10949 const FormatStringLiteral *FExpr; 10950 const Expr *OrigFormatExpr; 10951 const Sema::FormatStringType FSType; 10952 const unsigned FirstDataArg; 10953 const unsigned NumDataArgs; 10954 const char *Beg; // Start of format string. 10955 const Sema::FormatArgumentPassingKind ArgPassingKind; 10956 ArrayRef<const Expr *> Args; 10957 unsigned FormatIdx; 10958 llvm::SmallBitVector CoveredArgs; 10959 bool usesPositionalArgs = false; 10960 bool atFirstArg = true; 10961 bool inFunctionCall; 10962 Sema::VariadicCallType CallType; 10963 llvm::SmallBitVector &CheckedVarArgs; 10964 UncoveredArgHandler &UncoveredArg; 10965 10966 public: 10967 CheckFormatHandler(Sema &s, const FormatStringLiteral *fexpr, 10968 const Expr *origFormatExpr, 10969 const Sema::FormatStringType type, unsigned firstDataArg, 10970 unsigned numDataArgs, const char *beg, 10971 Sema::FormatArgumentPassingKind APK, 10972 ArrayRef<const Expr *> Args, unsigned formatIdx, 10973 bool inFunctionCall, Sema::VariadicCallType callType, 10974 llvm::SmallBitVector &CheckedVarArgs, 10975 UncoveredArgHandler &UncoveredArg) 10976 : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), FSType(type), 10977 FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), Beg(beg), 10978 ArgPassingKind(APK), Args(Args), FormatIdx(formatIdx), 10979 inFunctionCall(inFunctionCall), CallType(callType), 10980 CheckedVarArgs(CheckedVarArgs), UncoveredArg(UncoveredArg) { 10981 CoveredArgs.resize(numDataArgs); 10982 CoveredArgs.reset(); 10983 } 10984 10985 void DoneProcessing(); 10986 10987 void HandleIncompleteSpecifier(const char *startSpecifier, 10988 unsigned specifierLen) override; 10989 10990 void HandleInvalidLengthModifier( 10991 const analyze_format_string::FormatSpecifier &FS, 10992 const analyze_format_string::ConversionSpecifier &CS, 10993 const char *startSpecifier, unsigned specifierLen, 10994 unsigned DiagID); 10995 10996 void HandleNonStandardLengthModifier( 10997 const analyze_format_string::FormatSpecifier &FS, 10998 const char *startSpecifier, unsigned specifierLen); 10999 11000 void HandleNonStandardConversionSpecifier( 11001 const analyze_format_string::ConversionSpecifier &CS, 11002 const char *startSpecifier, unsigned specifierLen); 11003 11004 void HandlePosition(const char *startPos, unsigned posLen) override; 11005 11006 void HandleInvalidPosition(const char *startSpecifier, 11007 unsigned specifierLen, 11008 analyze_format_string::PositionContext p) override; 11009 11010 void HandleZeroPosition(const char *startPos, unsigned posLen) override; 11011 11012 void HandleNullChar(const char *nullCharacter) override; 11013 11014 template <typename Range> 11015 static void 11016 EmitFormatDiagnostic(Sema &S, bool inFunctionCall, const Expr *ArgumentExpr, 11017 const PartialDiagnostic &PDiag, SourceLocation StringLoc, 11018 bool IsStringLocation, Range StringRange, 11019 ArrayRef<FixItHint> Fixit = std::nullopt); 11020 11021 protected: 11022 bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc, 11023 const char *startSpec, 11024 unsigned specifierLen, 11025 const char *csStart, unsigned csLen); 11026 11027 void HandlePositionalNonpositionalArgs(SourceLocation Loc, 11028 const char *startSpec, 11029 unsigned specifierLen); 11030 11031 SourceRange getFormatStringRange(); 11032 CharSourceRange getSpecifierRange(const char *startSpecifier, 11033 unsigned specifierLen); 11034 SourceLocation getLocationOfByte(const char *x); 11035 11036 const Expr *getDataArg(unsigned i) const; 11037 11038 bool CheckNumArgs(const analyze_format_string::FormatSpecifier &FS, 11039 const analyze_format_string::ConversionSpecifier &CS, 11040 const char *startSpecifier, unsigned specifierLen, 11041 unsigned argIndex); 11042 11043 template <typename Range> 11044 void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc, 11045 bool IsStringLocation, Range StringRange, 11046 ArrayRef<FixItHint> Fixit = std::nullopt); 11047 }; 11048 11049 } // namespace 11050 11051 SourceRange CheckFormatHandler::getFormatStringRange() { 11052 return OrigFormatExpr->getSourceRange(); 11053 } 11054 11055 CharSourceRange CheckFormatHandler:: 11056 getSpecifierRange(const char *startSpecifier, unsigned specifierLen) { 11057 SourceLocation Start = getLocationOfByte(startSpecifier); 11058 SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1); 11059 11060 // Advance the end SourceLocation by one due to half-open ranges. 11061 End = End.getLocWithOffset(1); 11062 11063 return CharSourceRange::getCharRange(Start, End); 11064 } 11065 11066 SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) { 11067 return FExpr->getLocationOfByte(x - Beg, S.getSourceManager(), 11068 S.getLangOpts(), S.Context.getTargetInfo()); 11069 } 11070 11071 void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier, 11072 unsigned specifierLen){ 11073 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier), 11074 getLocationOfByte(startSpecifier), 11075 /*IsStringLocation*/true, 11076 getSpecifierRange(startSpecifier, specifierLen)); 11077 } 11078 11079 void CheckFormatHandler::HandleInvalidLengthModifier( 11080 const analyze_format_string::FormatSpecifier &FS, 11081 const analyze_format_string::ConversionSpecifier &CS, 11082 const char *startSpecifier, unsigned specifierLen, unsigned DiagID) { 11083 using namespace analyze_format_string; 11084 11085 const LengthModifier &LM = FS.getLengthModifier(); 11086 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 11087 11088 // See if we know how to fix this length modifier. 11089 std::optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 11090 if (FixedLM) { 11091 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 11092 getLocationOfByte(LM.getStart()), 11093 /*IsStringLocation*/true, 11094 getSpecifierRange(startSpecifier, specifierLen)); 11095 11096 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 11097 << FixedLM->toString() 11098 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 11099 11100 } else { 11101 FixItHint Hint; 11102 if (DiagID == diag::warn_format_nonsensical_length) 11103 Hint = FixItHint::CreateRemoval(LMRange); 11104 11105 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 11106 getLocationOfByte(LM.getStart()), 11107 /*IsStringLocation*/true, 11108 getSpecifierRange(startSpecifier, specifierLen), 11109 Hint); 11110 } 11111 } 11112 11113 void CheckFormatHandler::HandleNonStandardLengthModifier( 11114 const analyze_format_string::FormatSpecifier &FS, 11115 const char *startSpecifier, unsigned specifierLen) { 11116 using namespace analyze_format_string; 11117 11118 const LengthModifier &LM = FS.getLengthModifier(); 11119 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 11120 11121 // See if we know how to fix this length modifier. 11122 std::optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 11123 if (FixedLM) { 11124 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 11125 << LM.toString() << 0, 11126 getLocationOfByte(LM.getStart()), 11127 /*IsStringLocation*/true, 11128 getSpecifierRange(startSpecifier, specifierLen)); 11129 11130 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 11131 << FixedLM->toString() 11132 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 11133 11134 } else { 11135 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 11136 << LM.toString() << 0, 11137 getLocationOfByte(LM.getStart()), 11138 /*IsStringLocation*/true, 11139 getSpecifierRange(startSpecifier, specifierLen)); 11140 } 11141 } 11142 11143 void CheckFormatHandler::HandleNonStandardConversionSpecifier( 11144 const analyze_format_string::ConversionSpecifier &CS, 11145 const char *startSpecifier, unsigned specifierLen) { 11146 using namespace analyze_format_string; 11147 11148 // See if we know how to fix this conversion specifier. 11149 std::optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier(); 11150 if (FixedCS) { 11151 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 11152 << CS.toString() << /*conversion specifier*/1, 11153 getLocationOfByte(CS.getStart()), 11154 /*IsStringLocation*/true, 11155 getSpecifierRange(startSpecifier, specifierLen)); 11156 11157 CharSourceRange CSRange = getSpecifierRange(CS.getStart(), CS.getLength()); 11158 S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier) 11159 << FixedCS->toString() 11160 << FixItHint::CreateReplacement(CSRange, FixedCS->toString()); 11161 } else { 11162 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 11163 << CS.toString() << /*conversion specifier*/1, 11164 getLocationOfByte(CS.getStart()), 11165 /*IsStringLocation*/true, 11166 getSpecifierRange(startSpecifier, specifierLen)); 11167 } 11168 } 11169 11170 void CheckFormatHandler::HandlePosition(const char *startPos, 11171 unsigned posLen) { 11172 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg), 11173 getLocationOfByte(startPos), 11174 /*IsStringLocation*/true, 11175 getSpecifierRange(startPos, posLen)); 11176 } 11177 11178 void CheckFormatHandler::HandleInvalidPosition( 11179 const char *startSpecifier, unsigned specifierLen, 11180 analyze_format_string::PositionContext p) { 11181 EmitFormatDiagnostic( 11182 S.PDiag(diag::warn_format_invalid_positional_specifier) << (unsigned)p, 11183 getLocationOfByte(startSpecifier), /*IsStringLocation*/ true, 11184 getSpecifierRange(startSpecifier, specifierLen)); 11185 } 11186 11187 void CheckFormatHandler::HandleZeroPosition(const char *startPos, 11188 unsigned posLen) { 11189 EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier), 11190 getLocationOfByte(startPos), 11191 /*IsStringLocation*/true, 11192 getSpecifierRange(startPos, posLen)); 11193 } 11194 11195 void CheckFormatHandler::HandleNullChar(const char *nullCharacter) { 11196 if (!isa<ObjCStringLiteral>(OrigFormatExpr)) { 11197 // The presence of a null character is likely an error. 11198 EmitFormatDiagnostic( 11199 S.PDiag(diag::warn_printf_format_string_contains_null_char), 11200 getLocationOfByte(nullCharacter), /*IsStringLocation*/true, 11201 getFormatStringRange()); 11202 } 11203 } 11204 11205 // Note that this may return NULL if there was an error parsing or building 11206 // one of the argument expressions. 11207 const Expr *CheckFormatHandler::getDataArg(unsigned i) const { 11208 return Args[FirstDataArg + i]; 11209 } 11210 11211 void CheckFormatHandler::DoneProcessing() { 11212 // Does the number of data arguments exceed the number of 11213 // format conversions in the format string? 11214 if (ArgPassingKind != Sema::FAPK_VAList) { 11215 // Find any arguments that weren't covered. 11216 CoveredArgs.flip(); 11217 signed notCoveredArg = CoveredArgs.find_first(); 11218 if (notCoveredArg >= 0) { 11219 assert((unsigned)notCoveredArg < NumDataArgs); 11220 UncoveredArg.Update(notCoveredArg, OrigFormatExpr); 11221 } else { 11222 UncoveredArg.setAllCovered(); 11223 } 11224 } 11225 } 11226 11227 void UncoveredArgHandler::Diagnose(Sema &S, bool IsFunctionCall, 11228 const Expr *ArgExpr) { 11229 assert(hasUncoveredArg() && !DiagnosticExprs.empty() && 11230 "Invalid state"); 11231 11232 if (!ArgExpr) 11233 return; 11234 11235 SourceLocation Loc = ArgExpr->getBeginLoc(); 11236 11237 if (S.getSourceManager().isInSystemMacro(Loc)) 11238 return; 11239 11240 PartialDiagnostic PDiag = S.PDiag(diag::warn_printf_data_arg_not_used); 11241 for (auto E : DiagnosticExprs) 11242 PDiag << E->getSourceRange(); 11243 11244 CheckFormatHandler::EmitFormatDiagnostic( 11245 S, IsFunctionCall, DiagnosticExprs[0], 11246 PDiag, Loc, /*IsStringLocation*/false, 11247 DiagnosticExprs[0]->getSourceRange()); 11248 } 11249 11250 bool 11251 CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex, 11252 SourceLocation Loc, 11253 const char *startSpec, 11254 unsigned specifierLen, 11255 const char *csStart, 11256 unsigned csLen) { 11257 bool keepGoing = true; 11258 if (argIndex < NumDataArgs) { 11259 // Consider the argument coverered, even though the specifier doesn't 11260 // make sense. 11261 CoveredArgs.set(argIndex); 11262 } 11263 else { 11264 // If argIndex exceeds the number of data arguments we 11265 // don't issue a warning because that is just a cascade of warnings (and 11266 // they may have intended '%%' anyway). We don't want to continue processing 11267 // the format string after this point, however, as we will like just get 11268 // gibberish when trying to match arguments. 11269 keepGoing = false; 11270 } 11271 11272 StringRef Specifier(csStart, csLen); 11273 11274 // If the specifier in non-printable, it could be the first byte of a UTF-8 11275 // sequence. In that case, print the UTF-8 code point. If not, print the byte 11276 // hex value. 11277 std::string CodePointStr; 11278 if (!llvm::sys::locale::isPrint(*csStart)) { 11279 llvm::UTF32 CodePoint; 11280 const llvm::UTF8 **B = reinterpret_cast<const llvm::UTF8 **>(&csStart); 11281 const llvm::UTF8 *E = 11282 reinterpret_cast<const llvm::UTF8 *>(csStart + csLen); 11283 llvm::ConversionResult Result = 11284 llvm::convertUTF8Sequence(B, E, &CodePoint, llvm::strictConversion); 11285 11286 if (Result != llvm::conversionOK) { 11287 unsigned char FirstChar = *csStart; 11288 CodePoint = (llvm::UTF32)FirstChar; 11289 } 11290 11291 llvm::raw_string_ostream OS(CodePointStr); 11292 if (CodePoint < 256) 11293 OS << "\\x" << llvm::format("%02x", CodePoint); 11294 else if (CodePoint <= 0xFFFF) 11295 OS << "\\u" << llvm::format("%04x", CodePoint); 11296 else 11297 OS << "\\U" << llvm::format("%08x", CodePoint); 11298 OS.flush(); 11299 Specifier = CodePointStr; 11300 } 11301 11302 EmitFormatDiagnostic( 11303 S.PDiag(diag::warn_format_invalid_conversion) << Specifier, Loc, 11304 /*IsStringLocation*/ true, getSpecifierRange(startSpec, specifierLen)); 11305 11306 return keepGoing; 11307 } 11308 11309 void 11310 CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc, 11311 const char *startSpec, 11312 unsigned specifierLen) { 11313 EmitFormatDiagnostic( 11314 S.PDiag(diag::warn_format_mix_positional_nonpositional_args), 11315 Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen)); 11316 } 11317 11318 bool 11319 CheckFormatHandler::CheckNumArgs( 11320 const analyze_format_string::FormatSpecifier &FS, 11321 const analyze_format_string::ConversionSpecifier &CS, 11322 const char *startSpecifier, unsigned specifierLen, unsigned argIndex) { 11323 11324 if (argIndex >= NumDataArgs) { 11325 PartialDiagnostic PDiag = FS.usesPositionalArg() 11326 ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args) 11327 << (argIndex+1) << NumDataArgs) 11328 : S.PDiag(diag::warn_printf_insufficient_data_args); 11329 EmitFormatDiagnostic( 11330 PDiag, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true, 11331 getSpecifierRange(startSpecifier, specifierLen)); 11332 11333 // Since more arguments than conversion tokens are given, by extension 11334 // all arguments are covered, so mark this as so. 11335 UncoveredArg.setAllCovered(); 11336 return false; 11337 } 11338 return true; 11339 } 11340 11341 template<typename Range> 11342 void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag, 11343 SourceLocation Loc, 11344 bool IsStringLocation, 11345 Range StringRange, 11346 ArrayRef<FixItHint> FixIt) { 11347 EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag, 11348 Loc, IsStringLocation, StringRange, FixIt); 11349 } 11350 11351 /// If the format string is not within the function call, emit a note 11352 /// so that the function call and string are in diagnostic messages. 11353 /// 11354 /// \param InFunctionCall if true, the format string is within the function 11355 /// call and only one diagnostic message will be produced. Otherwise, an 11356 /// extra note will be emitted pointing to location of the format string. 11357 /// 11358 /// \param ArgumentExpr the expression that is passed as the format string 11359 /// argument in the function call. Used for getting locations when two 11360 /// diagnostics are emitted. 11361 /// 11362 /// \param PDiag the callee should already have provided any strings for the 11363 /// diagnostic message. This function only adds locations and fixits 11364 /// to diagnostics. 11365 /// 11366 /// \param Loc primary location for diagnostic. If two diagnostics are 11367 /// required, one will be at Loc and a new SourceLocation will be created for 11368 /// the other one. 11369 /// 11370 /// \param IsStringLocation if true, Loc points to the format string should be 11371 /// used for the note. Otherwise, Loc points to the argument list and will 11372 /// be used with PDiag. 11373 /// 11374 /// \param StringRange some or all of the string to highlight. This is 11375 /// templated so it can accept either a CharSourceRange or a SourceRange. 11376 /// 11377 /// \param FixIt optional fix it hint for the format string. 11378 template <typename Range> 11379 void CheckFormatHandler::EmitFormatDiagnostic( 11380 Sema &S, bool InFunctionCall, const Expr *ArgumentExpr, 11381 const PartialDiagnostic &PDiag, SourceLocation Loc, bool IsStringLocation, 11382 Range StringRange, ArrayRef<FixItHint> FixIt) { 11383 if (InFunctionCall) { 11384 const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PDiag); 11385 D << StringRange; 11386 D << FixIt; 11387 } else { 11388 S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag) 11389 << ArgumentExpr->getSourceRange(); 11390 11391 const Sema::SemaDiagnosticBuilder &Note = 11392 S.Diag(IsStringLocation ? Loc : StringRange.getBegin(), 11393 diag::note_format_string_defined); 11394 11395 Note << StringRange; 11396 Note << FixIt; 11397 } 11398 } 11399 11400 //===--- CHECK: Printf format string checking ------------------------------===// 11401 11402 namespace { 11403 11404 class CheckPrintfHandler : public CheckFormatHandler { 11405 public: 11406 CheckPrintfHandler(Sema &s, const FormatStringLiteral *fexpr, 11407 const Expr *origFormatExpr, 11408 const Sema::FormatStringType type, unsigned firstDataArg, 11409 unsigned numDataArgs, bool isObjC, const char *beg, 11410 Sema::FormatArgumentPassingKind APK, 11411 ArrayRef<const Expr *> Args, unsigned formatIdx, 11412 bool inFunctionCall, Sema::VariadicCallType CallType, 11413 llvm::SmallBitVector &CheckedVarArgs, 11414 UncoveredArgHandler &UncoveredArg) 11415 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 11416 numDataArgs, beg, APK, Args, formatIdx, 11417 inFunctionCall, CallType, CheckedVarArgs, 11418 UncoveredArg) {} 11419 11420 bool isObjCContext() const { return FSType == Sema::FST_NSString; } 11421 11422 /// Returns true if '%@' specifiers are allowed in the format string. 11423 bool allowsObjCArg() const { 11424 return FSType == Sema::FST_NSString || FSType == Sema::FST_OSLog || 11425 FSType == Sema::FST_OSTrace; 11426 } 11427 11428 bool HandleInvalidPrintfConversionSpecifier( 11429 const analyze_printf::PrintfSpecifier &FS, 11430 const char *startSpecifier, 11431 unsigned specifierLen) override; 11432 11433 void handleInvalidMaskType(StringRef MaskType) override; 11434 11435 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 11436 const char *startSpecifier, unsigned specifierLen, 11437 const TargetInfo &Target) override; 11438 bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 11439 const char *StartSpecifier, 11440 unsigned SpecifierLen, 11441 const Expr *E); 11442 11443 bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k, 11444 const char *startSpecifier, unsigned specifierLen); 11445 void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS, 11446 const analyze_printf::OptionalAmount &Amt, 11447 unsigned type, 11448 const char *startSpecifier, unsigned specifierLen); 11449 void HandleFlag(const analyze_printf::PrintfSpecifier &FS, 11450 const analyze_printf::OptionalFlag &flag, 11451 const char *startSpecifier, unsigned specifierLen); 11452 void HandleIgnoredFlag(const analyze_printf::PrintfSpecifier &FS, 11453 const analyze_printf::OptionalFlag &ignoredFlag, 11454 const analyze_printf::OptionalFlag &flag, 11455 const char *startSpecifier, unsigned specifierLen); 11456 bool checkForCStrMembers(const analyze_printf::ArgType &AT, 11457 const Expr *E); 11458 11459 void HandleEmptyObjCModifierFlag(const char *startFlag, 11460 unsigned flagLen) override; 11461 11462 void HandleInvalidObjCModifierFlag(const char *startFlag, 11463 unsigned flagLen) override; 11464 11465 void HandleObjCFlagsWithNonObjCConversion(const char *flagsStart, 11466 const char *flagsEnd, 11467 const char *conversionPosition) 11468 override; 11469 }; 11470 11471 } // namespace 11472 11473 bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier( 11474 const analyze_printf::PrintfSpecifier &FS, 11475 const char *startSpecifier, 11476 unsigned specifierLen) { 11477 const analyze_printf::PrintfConversionSpecifier &CS = 11478 FS.getConversionSpecifier(); 11479 11480 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 11481 getLocationOfByte(CS.getStart()), 11482 startSpecifier, specifierLen, 11483 CS.getStart(), CS.getLength()); 11484 } 11485 11486 void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType) { 11487 S.Diag(getLocationOfByte(MaskType.data()), diag::err_invalid_mask_type_size); 11488 } 11489 11490 bool CheckPrintfHandler::HandleAmount( 11491 const analyze_format_string::OptionalAmount &Amt, unsigned k, 11492 const char *startSpecifier, unsigned specifierLen) { 11493 if (Amt.hasDataArgument()) { 11494 if (ArgPassingKind != Sema::FAPK_VAList) { 11495 unsigned argIndex = Amt.getArgIndex(); 11496 if (argIndex >= NumDataArgs) { 11497 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg) 11498 << k, 11499 getLocationOfByte(Amt.getStart()), 11500 /*IsStringLocation*/ true, 11501 getSpecifierRange(startSpecifier, specifierLen)); 11502 // Don't do any more checking. We will just emit 11503 // spurious errors. 11504 return false; 11505 } 11506 11507 // Type check the data argument. It should be an 'int'. 11508 // Although not in conformance with C99, we also allow the argument to be 11509 // an 'unsigned int' as that is a reasonably safe case. GCC also 11510 // doesn't emit a warning for that case. 11511 CoveredArgs.set(argIndex); 11512 const Expr *Arg = getDataArg(argIndex); 11513 if (!Arg) 11514 return false; 11515 11516 QualType T = Arg->getType(); 11517 11518 const analyze_printf::ArgType &AT = Amt.getArgType(S.Context); 11519 assert(AT.isValid()); 11520 11521 if (!AT.matchesType(S.Context, T)) { 11522 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type) 11523 << k << AT.getRepresentativeTypeName(S.Context) 11524 << T << Arg->getSourceRange(), 11525 getLocationOfByte(Amt.getStart()), 11526 /*IsStringLocation*/true, 11527 getSpecifierRange(startSpecifier, specifierLen)); 11528 // Don't do any more checking. We will just emit 11529 // spurious errors. 11530 return false; 11531 } 11532 } 11533 } 11534 return true; 11535 } 11536 11537 void CheckPrintfHandler::HandleInvalidAmount( 11538 const analyze_printf::PrintfSpecifier &FS, 11539 const analyze_printf::OptionalAmount &Amt, 11540 unsigned type, 11541 const char *startSpecifier, 11542 unsigned specifierLen) { 11543 const analyze_printf::PrintfConversionSpecifier &CS = 11544 FS.getConversionSpecifier(); 11545 11546 FixItHint fixit = 11547 Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant 11548 ? FixItHint::CreateRemoval(getSpecifierRange(Amt.getStart(), 11549 Amt.getConstantLength())) 11550 : FixItHint(); 11551 11552 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount) 11553 << type << CS.toString(), 11554 getLocationOfByte(Amt.getStart()), 11555 /*IsStringLocation*/true, 11556 getSpecifierRange(startSpecifier, specifierLen), 11557 fixit); 11558 } 11559 11560 void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS, 11561 const analyze_printf::OptionalFlag &flag, 11562 const char *startSpecifier, 11563 unsigned specifierLen) { 11564 // Warn about pointless flag with a fixit removal. 11565 const analyze_printf::PrintfConversionSpecifier &CS = 11566 FS.getConversionSpecifier(); 11567 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag) 11568 << flag.toString() << CS.toString(), 11569 getLocationOfByte(flag.getPosition()), 11570 /*IsStringLocation*/true, 11571 getSpecifierRange(startSpecifier, specifierLen), 11572 FixItHint::CreateRemoval( 11573 getSpecifierRange(flag.getPosition(), 1))); 11574 } 11575 11576 void CheckPrintfHandler::HandleIgnoredFlag( 11577 const analyze_printf::PrintfSpecifier &FS, 11578 const analyze_printf::OptionalFlag &ignoredFlag, 11579 const analyze_printf::OptionalFlag &flag, 11580 const char *startSpecifier, 11581 unsigned specifierLen) { 11582 // Warn about ignored flag with a fixit removal. 11583 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag) 11584 << ignoredFlag.toString() << flag.toString(), 11585 getLocationOfByte(ignoredFlag.getPosition()), 11586 /*IsStringLocation*/true, 11587 getSpecifierRange(startSpecifier, specifierLen), 11588 FixItHint::CreateRemoval( 11589 getSpecifierRange(ignoredFlag.getPosition(), 1))); 11590 } 11591 11592 void CheckPrintfHandler::HandleEmptyObjCModifierFlag(const char *startFlag, 11593 unsigned flagLen) { 11594 // Warn about an empty flag. 11595 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_empty_objc_flag), 11596 getLocationOfByte(startFlag), 11597 /*IsStringLocation*/true, 11598 getSpecifierRange(startFlag, flagLen)); 11599 } 11600 11601 void CheckPrintfHandler::HandleInvalidObjCModifierFlag(const char *startFlag, 11602 unsigned flagLen) { 11603 // Warn about an invalid flag. 11604 auto Range = getSpecifierRange(startFlag, flagLen); 11605 StringRef flag(startFlag, flagLen); 11606 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_invalid_objc_flag) << flag, 11607 getLocationOfByte(startFlag), 11608 /*IsStringLocation*/true, 11609 Range, FixItHint::CreateRemoval(Range)); 11610 } 11611 11612 void CheckPrintfHandler::HandleObjCFlagsWithNonObjCConversion( 11613 const char *flagsStart, const char *flagsEnd, const char *conversionPosition) { 11614 // Warn about using '[...]' without a '@' conversion. 11615 auto Range = getSpecifierRange(flagsStart, flagsEnd - flagsStart + 1); 11616 auto diag = diag::warn_printf_ObjCflags_without_ObjCConversion; 11617 EmitFormatDiagnostic(S.PDiag(diag) << StringRef(conversionPosition, 1), 11618 getLocationOfByte(conversionPosition), 11619 /*IsStringLocation*/true, 11620 Range, FixItHint::CreateRemoval(Range)); 11621 } 11622 11623 // Determines if the specified is a C++ class or struct containing 11624 // a member with the specified name and kind (e.g. a CXXMethodDecl named 11625 // "c_str()"). 11626 template<typename MemberKind> 11627 static llvm::SmallPtrSet<MemberKind*, 1> 11628 CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) { 11629 const RecordType *RT = Ty->getAs<RecordType>(); 11630 llvm::SmallPtrSet<MemberKind*, 1> Results; 11631 11632 if (!RT) 11633 return Results; 11634 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 11635 if (!RD || !RD->getDefinition()) 11636 return Results; 11637 11638 LookupResult R(S, &S.Context.Idents.get(Name), SourceLocation(), 11639 Sema::LookupMemberName); 11640 R.suppressDiagnostics(); 11641 11642 // We just need to include all members of the right kind turned up by the 11643 // filter, at this point. 11644 if (S.LookupQualifiedName(R, RT->getDecl())) 11645 for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) { 11646 NamedDecl *decl = (*I)->getUnderlyingDecl(); 11647 if (MemberKind *FK = dyn_cast<MemberKind>(decl)) 11648 Results.insert(FK); 11649 } 11650 return Results; 11651 } 11652 11653 /// Check if we could call '.c_str()' on an object. 11654 /// 11655 /// FIXME: This returns the wrong results in some cases (if cv-qualifiers don't 11656 /// allow the call, or if it would be ambiguous). 11657 bool Sema::hasCStrMethod(const Expr *E) { 11658 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 11659 11660 MethodSet Results = 11661 CXXRecordMembersNamed<CXXMethodDecl>("c_str", *this, E->getType()); 11662 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 11663 MI != ME; ++MI) 11664 if ((*MI)->getMinRequiredArguments() == 0) 11665 return true; 11666 return false; 11667 } 11668 11669 // Check if a (w)string was passed when a (w)char* was needed, and offer a 11670 // better diagnostic if so. AT is assumed to be valid. 11671 // Returns true when a c_str() conversion method is found. 11672 bool CheckPrintfHandler::checkForCStrMembers( 11673 const analyze_printf::ArgType &AT, const Expr *E) { 11674 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 11675 11676 MethodSet Results = 11677 CXXRecordMembersNamed<CXXMethodDecl>("c_str", S, E->getType()); 11678 11679 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 11680 MI != ME; ++MI) { 11681 const CXXMethodDecl *Method = *MI; 11682 if (Method->getMinRequiredArguments() == 0 && 11683 AT.matchesType(S.Context, Method->getReturnType())) { 11684 // FIXME: Suggest parens if the expression needs them. 11685 SourceLocation EndLoc = S.getLocForEndOfToken(E->getEndLoc()); 11686 S.Diag(E->getBeginLoc(), diag::note_printf_c_str) 11687 << "c_str()" << FixItHint::CreateInsertion(EndLoc, ".c_str()"); 11688 return true; 11689 } 11690 } 11691 11692 return false; 11693 } 11694 11695 bool CheckPrintfHandler::HandlePrintfSpecifier( 11696 const analyze_printf::PrintfSpecifier &FS, const char *startSpecifier, 11697 unsigned specifierLen, const TargetInfo &Target) { 11698 using namespace analyze_format_string; 11699 using namespace analyze_printf; 11700 11701 const PrintfConversionSpecifier &CS = FS.getConversionSpecifier(); 11702 11703 if (FS.consumesDataArgument()) { 11704 if (atFirstArg) { 11705 atFirstArg = false; 11706 usesPositionalArgs = FS.usesPositionalArg(); 11707 } 11708 else if (usesPositionalArgs != FS.usesPositionalArg()) { 11709 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 11710 startSpecifier, specifierLen); 11711 return false; 11712 } 11713 } 11714 11715 // First check if the field width, precision, and conversion specifier 11716 // have matching data arguments. 11717 if (!HandleAmount(FS.getFieldWidth(), /* field width */ 0, 11718 startSpecifier, specifierLen)) { 11719 return false; 11720 } 11721 11722 if (!HandleAmount(FS.getPrecision(), /* precision */ 1, 11723 startSpecifier, specifierLen)) { 11724 return false; 11725 } 11726 11727 if (!CS.consumesDataArgument()) { 11728 // FIXME: Technically specifying a precision or field width here 11729 // makes no sense. Worth issuing a warning at some point. 11730 return true; 11731 } 11732 11733 // Consume the argument. 11734 unsigned argIndex = FS.getArgIndex(); 11735 if (argIndex < NumDataArgs) { 11736 // The check to see if the argIndex is valid will come later. 11737 // We set the bit here because we may exit early from this 11738 // function if we encounter some other error. 11739 CoveredArgs.set(argIndex); 11740 } 11741 11742 // FreeBSD kernel extensions. 11743 if (CS.getKind() == ConversionSpecifier::FreeBSDbArg || 11744 CS.getKind() == ConversionSpecifier::FreeBSDDArg) { 11745 // We need at least two arguments. 11746 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex + 1)) 11747 return false; 11748 11749 // Claim the second argument. 11750 CoveredArgs.set(argIndex + 1); 11751 11752 // Type check the first argument (int for %b, pointer for %D) 11753 const Expr *Ex = getDataArg(argIndex); 11754 const analyze_printf::ArgType &AT = 11755 (CS.getKind() == ConversionSpecifier::FreeBSDbArg) ? 11756 ArgType(S.Context.IntTy) : ArgType::CPointerTy; 11757 if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType())) 11758 EmitFormatDiagnostic( 11759 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 11760 << AT.getRepresentativeTypeName(S.Context) << Ex->getType() 11761 << false << Ex->getSourceRange(), 11762 Ex->getBeginLoc(), /*IsStringLocation*/ false, 11763 getSpecifierRange(startSpecifier, specifierLen)); 11764 11765 // Type check the second argument (char * for both %b and %D) 11766 Ex = getDataArg(argIndex + 1); 11767 const analyze_printf::ArgType &AT2 = ArgType::CStrTy; 11768 if (AT2.isValid() && !AT2.matchesType(S.Context, Ex->getType())) 11769 EmitFormatDiagnostic( 11770 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 11771 << AT2.getRepresentativeTypeName(S.Context) << Ex->getType() 11772 << false << Ex->getSourceRange(), 11773 Ex->getBeginLoc(), /*IsStringLocation*/ false, 11774 getSpecifierRange(startSpecifier, specifierLen)); 11775 11776 return true; 11777 } 11778 11779 // Check for using an Objective-C specific conversion specifier 11780 // in a non-ObjC literal. 11781 if (!allowsObjCArg() && CS.isObjCArg()) { 11782 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 11783 specifierLen); 11784 } 11785 11786 // %P can only be used with os_log. 11787 if (FSType != Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::PArg) { 11788 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 11789 specifierLen); 11790 } 11791 11792 // %n is not allowed with os_log. 11793 if (FSType == Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::nArg) { 11794 EmitFormatDiagnostic(S.PDiag(diag::warn_os_log_format_narg), 11795 getLocationOfByte(CS.getStart()), 11796 /*IsStringLocation*/ false, 11797 getSpecifierRange(startSpecifier, specifierLen)); 11798 11799 return true; 11800 } 11801 11802 // Only scalars are allowed for os_trace. 11803 if (FSType == Sema::FST_OSTrace && 11804 (CS.getKind() == ConversionSpecifier::PArg || 11805 CS.getKind() == ConversionSpecifier::sArg || 11806 CS.getKind() == ConversionSpecifier::ObjCObjArg)) { 11807 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 11808 specifierLen); 11809 } 11810 11811 // Check for use of public/private annotation outside of os_log(). 11812 if (FSType != Sema::FST_OSLog) { 11813 if (FS.isPublic().isSet()) { 11814 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 11815 << "public", 11816 getLocationOfByte(FS.isPublic().getPosition()), 11817 /*IsStringLocation*/ false, 11818 getSpecifierRange(startSpecifier, specifierLen)); 11819 } 11820 if (FS.isPrivate().isSet()) { 11821 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 11822 << "private", 11823 getLocationOfByte(FS.isPrivate().getPosition()), 11824 /*IsStringLocation*/ false, 11825 getSpecifierRange(startSpecifier, specifierLen)); 11826 } 11827 } 11828 11829 const llvm::Triple &Triple = Target.getTriple(); 11830 if (CS.getKind() == ConversionSpecifier::nArg && 11831 (Triple.isAndroid() || Triple.isOSFuchsia())) { 11832 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_narg_not_supported), 11833 getLocationOfByte(CS.getStart()), 11834 /*IsStringLocation*/ false, 11835 getSpecifierRange(startSpecifier, specifierLen)); 11836 } 11837 11838 // Check for invalid use of field width 11839 if (!FS.hasValidFieldWidth()) { 11840 HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0, 11841 startSpecifier, specifierLen); 11842 } 11843 11844 // Check for invalid use of precision 11845 if (!FS.hasValidPrecision()) { 11846 HandleInvalidAmount(FS, FS.getPrecision(), /* precision */ 1, 11847 startSpecifier, specifierLen); 11848 } 11849 11850 // Precision is mandatory for %P specifier. 11851 if (CS.getKind() == ConversionSpecifier::PArg && 11852 FS.getPrecision().getHowSpecified() == OptionalAmount::NotSpecified) { 11853 EmitFormatDiagnostic(S.PDiag(diag::warn_format_P_no_precision), 11854 getLocationOfByte(startSpecifier), 11855 /*IsStringLocation*/ false, 11856 getSpecifierRange(startSpecifier, specifierLen)); 11857 } 11858 11859 // Check each flag does not conflict with any other component. 11860 if (!FS.hasValidThousandsGroupingPrefix()) 11861 HandleFlag(FS, FS.hasThousandsGrouping(), startSpecifier, specifierLen); 11862 if (!FS.hasValidLeadingZeros()) 11863 HandleFlag(FS, FS.hasLeadingZeros(), startSpecifier, specifierLen); 11864 if (!FS.hasValidPlusPrefix()) 11865 HandleFlag(FS, FS.hasPlusPrefix(), startSpecifier, specifierLen); 11866 if (!FS.hasValidSpacePrefix()) 11867 HandleFlag(FS, FS.hasSpacePrefix(), startSpecifier, specifierLen); 11868 if (!FS.hasValidAlternativeForm()) 11869 HandleFlag(FS, FS.hasAlternativeForm(), startSpecifier, specifierLen); 11870 if (!FS.hasValidLeftJustified()) 11871 HandleFlag(FS, FS.isLeftJustified(), startSpecifier, specifierLen); 11872 11873 // Check that flags are not ignored by another flag 11874 if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+' 11875 HandleIgnoredFlag(FS, FS.hasSpacePrefix(), FS.hasPlusPrefix(), 11876 startSpecifier, specifierLen); 11877 if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-' 11878 HandleIgnoredFlag(FS, FS.hasLeadingZeros(), FS.isLeftJustified(), 11879 startSpecifier, specifierLen); 11880 11881 // Check the length modifier is valid with the given conversion specifier. 11882 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 11883 S.getLangOpts())) 11884 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 11885 diag::warn_format_nonsensical_length); 11886 else if (!FS.hasStandardLengthModifier()) 11887 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 11888 else if (!FS.hasStandardLengthConversionCombination()) 11889 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 11890 diag::warn_format_non_standard_conversion_spec); 11891 11892 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 11893 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 11894 11895 // The remaining checks depend on the data arguments. 11896 if (ArgPassingKind == Sema::FAPK_VAList) 11897 return true; 11898 11899 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 11900 return false; 11901 11902 const Expr *Arg = getDataArg(argIndex); 11903 if (!Arg) 11904 return true; 11905 11906 return checkFormatExpr(FS, startSpecifier, specifierLen, Arg); 11907 } 11908 11909 static bool requiresParensToAddCast(const Expr *E) { 11910 // FIXME: We should have a general way to reason about operator 11911 // precedence and whether parens are actually needed here. 11912 // Take care of a few common cases where they aren't. 11913 const Expr *Inside = E->IgnoreImpCasts(); 11914 if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Inside)) 11915 Inside = POE->getSyntacticForm()->IgnoreImpCasts(); 11916 11917 switch (Inside->getStmtClass()) { 11918 case Stmt::ArraySubscriptExprClass: 11919 case Stmt::CallExprClass: 11920 case Stmt::CharacterLiteralClass: 11921 case Stmt::CXXBoolLiteralExprClass: 11922 case Stmt::DeclRefExprClass: 11923 case Stmt::FloatingLiteralClass: 11924 case Stmt::IntegerLiteralClass: 11925 case Stmt::MemberExprClass: 11926 case Stmt::ObjCArrayLiteralClass: 11927 case Stmt::ObjCBoolLiteralExprClass: 11928 case Stmt::ObjCBoxedExprClass: 11929 case Stmt::ObjCDictionaryLiteralClass: 11930 case Stmt::ObjCEncodeExprClass: 11931 case Stmt::ObjCIvarRefExprClass: 11932 case Stmt::ObjCMessageExprClass: 11933 case Stmt::ObjCPropertyRefExprClass: 11934 case Stmt::ObjCStringLiteralClass: 11935 case Stmt::ObjCSubscriptRefExprClass: 11936 case Stmt::ParenExprClass: 11937 case Stmt::StringLiteralClass: 11938 case Stmt::UnaryOperatorClass: 11939 return false; 11940 default: 11941 return true; 11942 } 11943 } 11944 11945 static std::pair<QualType, StringRef> 11946 shouldNotPrintDirectly(const ASTContext &Context, 11947 QualType IntendedTy, 11948 const Expr *E) { 11949 // Use a 'while' to peel off layers of typedefs. 11950 QualType TyTy = IntendedTy; 11951 while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) { 11952 StringRef Name = UserTy->getDecl()->getName(); 11953 QualType CastTy = llvm::StringSwitch<QualType>(Name) 11954 .Case("CFIndex", Context.getNSIntegerType()) 11955 .Case("NSInteger", Context.getNSIntegerType()) 11956 .Case("NSUInteger", Context.getNSUIntegerType()) 11957 .Case("SInt32", Context.IntTy) 11958 .Case("UInt32", Context.UnsignedIntTy) 11959 .Default(QualType()); 11960 11961 if (!CastTy.isNull()) 11962 return std::make_pair(CastTy, Name); 11963 11964 TyTy = UserTy->desugar(); 11965 } 11966 11967 // Strip parens if necessary. 11968 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) 11969 return shouldNotPrintDirectly(Context, 11970 PE->getSubExpr()->getType(), 11971 PE->getSubExpr()); 11972 11973 // If this is a conditional expression, then its result type is constructed 11974 // via usual arithmetic conversions and thus there might be no necessary 11975 // typedef sugar there. Recurse to operands to check for NSInteger & 11976 // Co. usage condition. 11977 if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) { 11978 QualType TrueTy, FalseTy; 11979 StringRef TrueName, FalseName; 11980 11981 std::tie(TrueTy, TrueName) = 11982 shouldNotPrintDirectly(Context, 11983 CO->getTrueExpr()->getType(), 11984 CO->getTrueExpr()); 11985 std::tie(FalseTy, FalseName) = 11986 shouldNotPrintDirectly(Context, 11987 CO->getFalseExpr()->getType(), 11988 CO->getFalseExpr()); 11989 11990 if (TrueTy == FalseTy) 11991 return std::make_pair(TrueTy, TrueName); 11992 else if (TrueTy.isNull()) 11993 return std::make_pair(FalseTy, FalseName); 11994 else if (FalseTy.isNull()) 11995 return std::make_pair(TrueTy, TrueName); 11996 } 11997 11998 return std::make_pair(QualType(), StringRef()); 11999 } 12000 12001 /// Return true if \p ICE is an implicit argument promotion of an arithmetic 12002 /// type. Bit-field 'promotions' from a higher ranked type to a lower ranked 12003 /// type do not count. 12004 static bool 12005 isArithmeticArgumentPromotion(Sema &S, const ImplicitCastExpr *ICE) { 12006 QualType From = ICE->getSubExpr()->getType(); 12007 QualType To = ICE->getType(); 12008 // It's an integer promotion if the destination type is the promoted 12009 // source type. 12010 if (ICE->getCastKind() == CK_IntegralCast && 12011 S.Context.isPromotableIntegerType(From) && 12012 S.Context.getPromotedIntegerType(From) == To) 12013 return true; 12014 // Look through vector types, since we do default argument promotion for 12015 // those in OpenCL. 12016 if (const auto *VecTy = From->getAs<ExtVectorType>()) 12017 From = VecTy->getElementType(); 12018 if (const auto *VecTy = To->getAs<ExtVectorType>()) 12019 To = VecTy->getElementType(); 12020 // It's a floating promotion if the source type is a lower rank. 12021 return ICE->getCastKind() == CK_FloatingCast && 12022 S.Context.getFloatingTypeOrder(From, To) < 0; 12023 } 12024 12025 bool 12026 CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 12027 const char *StartSpecifier, 12028 unsigned SpecifierLen, 12029 const Expr *E) { 12030 using namespace analyze_format_string; 12031 using namespace analyze_printf; 12032 12033 // Now type check the data expression that matches the 12034 // format specifier. 12035 const analyze_printf::ArgType &AT = FS.getArgType(S.Context, isObjCContext()); 12036 if (!AT.isValid()) 12037 return true; 12038 12039 QualType ExprTy = E->getType(); 12040 while (const TypeOfExprType *TET = dyn_cast<TypeOfExprType>(ExprTy)) { 12041 ExprTy = TET->getUnderlyingExpr()->getType(); 12042 } 12043 12044 // When using the format attribute in C++, you can receive a function or an 12045 // array that will necessarily decay to a pointer when passed to the final 12046 // format consumer. Apply decay before type comparison. 12047 if (ExprTy->canDecayToPointerType()) 12048 ExprTy = S.Context.getDecayedType(ExprTy); 12049 12050 // Diagnose attempts to print a boolean value as a character. Unlike other 12051 // -Wformat diagnostics, this is fine from a type perspective, but it still 12052 // doesn't make sense. 12053 if (FS.getConversionSpecifier().getKind() == ConversionSpecifier::cArg && 12054 E->isKnownToHaveBooleanValue()) { 12055 const CharSourceRange &CSR = 12056 getSpecifierRange(StartSpecifier, SpecifierLen); 12057 SmallString<4> FSString; 12058 llvm::raw_svector_ostream os(FSString); 12059 FS.toString(os); 12060 EmitFormatDiagnostic(S.PDiag(diag::warn_format_bool_as_character) 12061 << FSString, 12062 E->getExprLoc(), false, CSR); 12063 return true; 12064 } 12065 12066 ArgType::MatchKind ImplicitMatch = ArgType::NoMatch; 12067 ArgType::MatchKind Match = AT.matchesType(S.Context, ExprTy); 12068 if (Match == ArgType::Match) 12069 return true; 12070 12071 // NoMatchPromotionTypeConfusion should be only returned in ImplictCastExpr 12072 assert(Match != ArgType::NoMatchPromotionTypeConfusion); 12073 12074 // Look through argument promotions for our error message's reported type. 12075 // This includes the integral and floating promotions, but excludes array 12076 // and function pointer decay (seeing that an argument intended to be a 12077 // string has type 'char [6]' is probably more confusing than 'char *') and 12078 // certain bitfield promotions (bitfields can be 'demoted' to a lesser type). 12079 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 12080 if (isArithmeticArgumentPromotion(S, ICE)) { 12081 E = ICE->getSubExpr(); 12082 ExprTy = E->getType(); 12083 12084 // Check if we didn't match because of an implicit cast from a 'char' 12085 // or 'short' to an 'int'. This is done because printf is a varargs 12086 // function. 12087 if (ICE->getType() == S.Context.IntTy || 12088 ICE->getType() == S.Context.UnsignedIntTy) { 12089 // All further checking is done on the subexpression 12090 ImplicitMatch = AT.matchesType(S.Context, ExprTy); 12091 if (ImplicitMatch == ArgType::Match) 12092 return true; 12093 } 12094 } 12095 } else if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) { 12096 // Special case for 'a', which has type 'int' in C. 12097 // Note, however, that we do /not/ want to treat multibyte constants like 12098 // 'MooV' as characters! This form is deprecated but still exists. In 12099 // addition, don't treat expressions as of type 'char' if one byte length 12100 // modifier is provided. 12101 if (ExprTy == S.Context.IntTy && 12102 FS.getLengthModifier().getKind() != LengthModifier::AsChar) 12103 if (llvm::isUIntN(S.Context.getCharWidth(), CL->getValue())) { 12104 ExprTy = S.Context.CharTy; 12105 // To improve check results, we consider a character literal in C 12106 // to be a 'char' rather than an 'int'. 'printf("%hd", 'a');' is 12107 // more likely a type confusion situation, so we will suggest to 12108 // use '%hhd' instead by discarding the MatchPromotion. 12109 if (Match == ArgType::MatchPromotion) 12110 Match = ArgType::NoMatch; 12111 } 12112 } 12113 if (Match == ArgType::MatchPromotion) { 12114 // WG14 N2562 only clarified promotions in *printf 12115 // For NSLog in ObjC, just preserve -Wformat behavior 12116 if (!S.getLangOpts().ObjC && 12117 ImplicitMatch != ArgType::NoMatchPromotionTypeConfusion && 12118 ImplicitMatch != ArgType::NoMatchTypeConfusion) 12119 return true; 12120 Match = ArgType::NoMatch; 12121 } 12122 if (ImplicitMatch == ArgType::NoMatchPedantic || 12123 ImplicitMatch == ArgType::NoMatchTypeConfusion) 12124 Match = ImplicitMatch; 12125 assert(Match != ArgType::MatchPromotion); 12126 12127 // Look through unscoped enums to their underlying type. 12128 bool IsEnum = false; 12129 bool IsScopedEnum = false; 12130 QualType IntendedTy = ExprTy; 12131 if (auto EnumTy = ExprTy->getAs<EnumType>()) { 12132 IntendedTy = EnumTy->getDecl()->getIntegerType(); 12133 if (EnumTy->isUnscopedEnumerationType()) { 12134 ExprTy = IntendedTy; 12135 // This controls whether we're talking about the underlying type or not, 12136 // which we only want to do when it's an unscoped enum. 12137 IsEnum = true; 12138 } else { 12139 IsScopedEnum = true; 12140 } 12141 } 12142 12143 // %C in an Objective-C context prints a unichar, not a wchar_t. 12144 // If the argument is an integer of some kind, believe the %C and suggest 12145 // a cast instead of changing the conversion specifier. 12146 if (isObjCContext() && 12147 FS.getConversionSpecifier().getKind() == ConversionSpecifier::CArg) { 12148 if (ExprTy->isIntegralOrUnscopedEnumerationType() && 12149 !ExprTy->isCharType()) { 12150 // 'unichar' is defined as a typedef of unsigned short, but we should 12151 // prefer using the typedef if it is visible. 12152 IntendedTy = S.Context.UnsignedShortTy; 12153 12154 // While we are here, check if the value is an IntegerLiteral that happens 12155 // to be within the valid range. 12156 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) { 12157 const llvm::APInt &V = IL->getValue(); 12158 if (V.getActiveBits() <= S.Context.getTypeSize(IntendedTy)) 12159 return true; 12160 } 12161 12162 LookupResult Result(S, &S.Context.Idents.get("unichar"), E->getBeginLoc(), 12163 Sema::LookupOrdinaryName); 12164 if (S.LookupName(Result, S.getCurScope())) { 12165 NamedDecl *ND = Result.getFoundDecl(); 12166 if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(ND)) 12167 if (TD->getUnderlyingType() == IntendedTy) 12168 IntendedTy = S.Context.getTypedefType(TD); 12169 } 12170 } 12171 } 12172 12173 // Special-case some of Darwin's platform-independence types by suggesting 12174 // casts to primitive types that are known to be large enough. 12175 bool ShouldNotPrintDirectly = false; StringRef CastTyName; 12176 if (S.Context.getTargetInfo().getTriple().isOSDarwin()) { 12177 QualType CastTy; 12178 std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E); 12179 if (!CastTy.isNull()) { 12180 // %zi/%zu and %td/%tu are OK to use for NSInteger/NSUInteger of type int 12181 // (long in ASTContext). Only complain to pedants or when they're the 12182 // underlying type of a scoped enum (which always needs a cast). 12183 if (!IsScopedEnum && 12184 (CastTyName == "NSInteger" || CastTyName == "NSUInteger") && 12185 (AT.isSizeT() || AT.isPtrdiffT()) && 12186 AT.matchesType(S.Context, CastTy)) 12187 Match = ArgType::NoMatchPedantic; 12188 IntendedTy = CastTy; 12189 ShouldNotPrintDirectly = true; 12190 } 12191 } 12192 12193 // We may be able to offer a FixItHint if it is a supported type. 12194 PrintfSpecifier fixedFS = FS; 12195 bool Success = 12196 fixedFS.fixType(IntendedTy, S.getLangOpts(), S.Context, isObjCContext()); 12197 12198 if (Success) { 12199 // Get the fix string from the fixed format specifier 12200 SmallString<16> buf; 12201 llvm::raw_svector_ostream os(buf); 12202 fixedFS.toString(os); 12203 12204 CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen); 12205 12206 if (IntendedTy == ExprTy && !ShouldNotPrintDirectly && !IsScopedEnum) { 12207 unsigned Diag; 12208 switch (Match) { 12209 case ArgType::Match: 12210 case ArgType::MatchPromotion: 12211 case ArgType::NoMatchPromotionTypeConfusion: 12212 llvm_unreachable("expected non-matching"); 12213 case ArgType::NoMatchPedantic: 12214 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 12215 break; 12216 case ArgType::NoMatchTypeConfusion: 12217 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 12218 break; 12219 case ArgType::NoMatch: 12220 Diag = diag::warn_format_conversion_argument_type_mismatch; 12221 break; 12222 } 12223 12224 // In this case, the specifier is wrong and should be changed to match 12225 // the argument. 12226 EmitFormatDiagnostic(S.PDiag(Diag) 12227 << AT.getRepresentativeTypeName(S.Context) 12228 << IntendedTy << IsEnum << E->getSourceRange(), 12229 E->getBeginLoc(), 12230 /*IsStringLocation*/ false, SpecRange, 12231 FixItHint::CreateReplacement(SpecRange, os.str())); 12232 } else { 12233 // The canonical type for formatting this value is different from the 12234 // actual type of the expression. (This occurs, for example, with Darwin's 12235 // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but 12236 // should be printed as 'long' for 64-bit compatibility.) 12237 // Rather than emitting a normal format/argument mismatch, we want to 12238 // add a cast to the recommended type (and correct the format string 12239 // if necessary). We should also do so for scoped enumerations. 12240 SmallString<16> CastBuf; 12241 llvm::raw_svector_ostream CastFix(CastBuf); 12242 CastFix << (S.LangOpts.CPlusPlus ? "static_cast<" : "("); 12243 IntendedTy.print(CastFix, S.Context.getPrintingPolicy()); 12244 CastFix << (S.LangOpts.CPlusPlus ? ">" : ")"); 12245 12246 SmallVector<FixItHint,4> Hints; 12247 if (AT.matchesType(S.Context, IntendedTy) != ArgType::Match || 12248 ShouldNotPrintDirectly) 12249 Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str())); 12250 12251 if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) { 12252 // If there's already a cast present, just replace it. 12253 SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc()); 12254 Hints.push_back(FixItHint::CreateReplacement(CastRange, CastFix.str())); 12255 12256 } else if (!requiresParensToAddCast(E) && !S.LangOpts.CPlusPlus) { 12257 // If the expression has high enough precedence, 12258 // just write the C-style cast. 12259 Hints.push_back( 12260 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 12261 } else { 12262 // Otherwise, add parens around the expression as well as the cast. 12263 CastFix << "("; 12264 Hints.push_back( 12265 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 12266 12267 // We don't use getLocForEndOfToken because it returns invalid source 12268 // locations for macro expansions (by design). 12269 SourceLocation EndLoc = S.SourceMgr.getSpellingLoc(E->getEndLoc()); 12270 SourceLocation After = EndLoc.getLocWithOffset( 12271 Lexer::MeasureTokenLength(EndLoc, S.SourceMgr, S.LangOpts)); 12272 Hints.push_back(FixItHint::CreateInsertion(After, ")")); 12273 } 12274 12275 if (ShouldNotPrintDirectly && !IsScopedEnum) { 12276 // The expression has a type that should not be printed directly. 12277 // We extract the name from the typedef because we don't want to show 12278 // the underlying type in the diagnostic. 12279 StringRef Name; 12280 if (const auto *TypedefTy = ExprTy->getAs<TypedefType>()) 12281 Name = TypedefTy->getDecl()->getName(); 12282 else 12283 Name = CastTyName; 12284 unsigned Diag = Match == ArgType::NoMatchPedantic 12285 ? diag::warn_format_argument_needs_cast_pedantic 12286 : diag::warn_format_argument_needs_cast; 12287 EmitFormatDiagnostic(S.PDiag(Diag) << Name << IntendedTy << IsEnum 12288 << E->getSourceRange(), 12289 E->getBeginLoc(), /*IsStringLocation=*/false, 12290 SpecRange, Hints); 12291 } else { 12292 // In this case, the expression could be printed using a different 12293 // specifier, but we've decided that the specifier is probably correct 12294 // and we should cast instead. Just use the normal warning message. 12295 EmitFormatDiagnostic( 12296 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 12297 << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum 12298 << E->getSourceRange(), 12299 E->getBeginLoc(), /*IsStringLocation*/ false, SpecRange, Hints); 12300 } 12301 } 12302 } else { 12303 const CharSourceRange &CSR = getSpecifierRange(StartSpecifier, 12304 SpecifierLen); 12305 // Since the warning for passing non-POD types to variadic functions 12306 // was deferred until now, we emit a warning for non-POD 12307 // arguments here. 12308 bool EmitTypeMismatch = false; 12309 switch (S.isValidVarArgType(ExprTy)) { 12310 case Sema::VAK_Valid: 12311 case Sema::VAK_ValidInCXX11: { 12312 unsigned Diag; 12313 switch (Match) { 12314 case ArgType::Match: 12315 case ArgType::MatchPromotion: 12316 case ArgType::NoMatchPromotionTypeConfusion: 12317 llvm_unreachable("expected non-matching"); 12318 case ArgType::NoMatchPedantic: 12319 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 12320 break; 12321 case ArgType::NoMatchTypeConfusion: 12322 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 12323 break; 12324 case ArgType::NoMatch: 12325 Diag = diag::warn_format_conversion_argument_type_mismatch; 12326 break; 12327 } 12328 12329 EmitFormatDiagnostic( 12330 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy 12331 << IsEnum << CSR << E->getSourceRange(), 12332 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 12333 break; 12334 } 12335 case Sema::VAK_Undefined: 12336 case Sema::VAK_MSVCUndefined: 12337 if (CallType == Sema::VariadicDoesNotApply) { 12338 EmitTypeMismatch = true; 12339 } else { 12340 EmitFormatDiagnostic( 12341 S.PDiag(diag::warn_non_pod_vararg_with_format_string) 12342 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType 12343 << AT.getRepresentativeTypeName(S.Context) << CSR 12344 << E->getSourceRange(), 12345 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 12346 checkForCStrMembers(AT, E); 12347 } 12348 break; 12349 12350 case Sema::VAK_Invalid: 12351 if (CallType == Sema::VariadicDoesNotApply) 12352 EmitTypeMismatch = true; 12353 else if (ExprTy->isObjCObjectType()) 12354 EmitFormatDiagnostic( 12355 S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format) 12356 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType 12357 << AT.getRepresentativeTypeName(S.Context) << CSR 12358 << E->getSourceRange(), 12359 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 12360 else 12361 // FIXME: If this is an initializer list, suggest removing the braces 12362 // or inserting a cast to the target type. 12363 S.Diag(E->getBeginLoc(), diag::err_cannot_pass_to_vararg_format) 12364 << isa<InitListExpr>(E) << ExprTy << CallType 12365 << AT.getRepresentativeTypeName(S.Context) << E->getSourceRange(); 12366 break; 12367 } 12368 12369 if (EmitTypeMismatch) { 12370 // The function is not variadic, so we do not generate warnings about 12371 // being allowed to pass that object as a variadic argument. Instead, 12372 // since there are inherently no printf specifiers for types which cannot 12373 // be passed as variadic arguments, emit a plain old specifier mismatch 12374 // argument. 12375 EmitFormatDiagnostic( 12376 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 12377 << AT.getRepresentativeTypeName(S.Context) << ExprTy << false 12378 << E->getSourceRange(), 12379 E->getBeginLoc(), false, CSR); 12380 } 12381 12382 assert(FirstDataArg + FS.getArgIndex() < CheckedVarArgs.size() && 12383 "format string specifier index out of range"); 12384 CheckedVarArgs[FirstDataArg + FS.getArgIndex()] = true; 12385 } 12386 12387 return true; 12388 } 12389 12390 //===--- CHECK: Scanf format string checking ------------------------------===// 12391 12392 namespace { 12393 12394 class CheckScanfHandler : public CheckFormatHandler { 12395 public: 12396 CheckScanfHandler(Sema &s, const FormatStringLiteral *fexpr, 12397 const Expr *origFormatExpr, Sema::FormatStringType type, 12398 unsigned firstDataArg, unsigned numDataArgs, 12399 const char *beg, Sema::FormatArgumentPassingKind APK, 12400 ArrayRef<const Expr *> Args, unsigned formatIdx, 12401 bool inFunctionCall, Sema::VariadicCallType CallType, 12402 llvm::SmallBitVector &CheckedVarArgs, 12403 UncoveredArgHandler &UncoveredArg) 12404 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 12405 numDataArgs, beg, APK, Args, formatIdx, 12406 inFunctionCall, CallType, CheckedVarArgs, 12407 UncoveredArg) {} 12408 12409 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 12410 const char *startSpecifier, 12411 unsigned specifierLen) override; 12412 12413 bool HandleInvalidScanfConversionSpecifier( 12414 const analyze_scanf::ScanfSpecifier &FS, 12415 const char *startSpecifier, 12416 unsigned specifierLen) override; 12417 12418 void HandleIncompleteScanList(const char *start, const char *end) override; 12419 }; 12420 12421 } // namespace 12422 12423 void CheckScanfHandler::HandleIncompleteScanList(const char *start, 12424 const char *end) { 12425 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete), 12426 getLocationOfByte(end), /*IsStringLocation*/true, 12427 getSpecifierRange(start, end - start)); 12428 } 12429 12430 bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier( 12431 const analyze_scanf::ScanfSpecifier &FS, 12432 const char *startSpecifier, 12433 unsigned specifierLen) { 12434 const analyze_scanf::ScanfConversionSpecifier &CS = 12435 FS.getConversionSpecifier(); 12436 12437 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 12438 getLocationOfByte(CS.getStart()), 12439 startSpecifier, specifierLen, 12440 CS.getStart(), CS.getLength()); 12441 } 12442 12443 bool CheckScanfHandler::HandleScanfSpecifier( 12444 const analyze_scanf::ScanfSpecifier &FS, 12445 const char *startSpecifier, 12446 unsigned specifierLen) { 12447 using namespace analyze_scanf; 12448 using namespace analyze_format_string; 12449 12450 const ScanfConversionSpecifier &CS = FS.getConversionSpecifier(); 12451 12452 // Handle case where '%' and '*' don't consume an argument. These shouldn't 12453 // be used to decide if we are using positional arguments consistently. 12454 if (FS.consumesDataArgument()) { 12455 if (atFirstArg) { 12456 atFirstArg = false; 12457 usesPositionalArgs = FS.usesPositionalArg(); 12458 } 12459 else if (usesPositionalArgs != FS.usesPositionalArg()) { 12460 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 12461 startSpecifier, specifierLen); 12462 return false; 12463 } 12464 } 12465 12466 // Check if the field with is non-zero. 12467 const OptionalAmount &Amt = FS.getFieldWidth(); 12468 if (Amt.getHowSpecified() == OptionalAmount::Constant) { 12469 if (Amt.getConstantAmount() == 0) { 12470 const CharSourceRange &R = getSpecifierRange(Amt.getStart(), 12471 Amt.getConstantLength()); 12472 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width), 12473 getLocationOfByte(Amt.getStart()), 12474 /*IsStringLocation*/true, R, 12475 FixItHint::CreateRemoval(R)); 12476 } 12477 } 12478 12479 if (!FS.consumesDataArgument()) { 12480 // FIXME: Technically specifying a precision or field width here 12481 // makes no sense. Worth issuing a warning at some point. 12482 return true; 12483 } 12484 12485 // Consume the argument. 12486 unsigned argIndex = FS.getArgIndex(); 12487 if (argIndex < NumDataArgs) { 12488 // The check to see if the argIndex is valid will come later. 12489 // We set the bit here because we may exit early from this 12490 // function if we encounter some other error. 12491 CoveredArgs.set(argIndex); 12492 } 12493 12494 // Check the length modifier is valid with the given conversion specifier. 12495 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 12496 S.getLangOpts())) 12497 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 12498 diag::warn_format_nonsensical_length); 12499 else if (!FS.hasStandardLengthModifier()) 12500 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 12501 else if (!FS.hasStandardLengthConversionCombination()) 12502 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 12503 diag::warn_format_non_standard_conversion_spec); 12504 12505 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 12506 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 12507 12508 // The remaining checks depend on the data arguments. 12509 if (ArgPassingKind == Sema::FAPK_VAList) 12510 return true; 12511 12512 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 12513 return false; 12514 12515 // Check that the argument type matches the format specifier. 12516 const Expr *Ex = getDataArg(argIndex); 12517 if (!Ex) 12518 return true; 12519 12520 const analyze_format_string::ArgType &AT = FS.getArgType(S.Context); 12521 12522 if (!AT.isValid()) { 12523 return true; 12524 } 12525 12526 analyze_format_string::ArgType::MatchKind Match = 12527 AT.matchesType(S.Context, Ex->getType()); 12528 bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic; 12529 if (Match == analyze_format_string::ArgType::Match) 12530 return true; 12531 12532 ScanfSpecifier fixedFS = FS; 12533 bool Success = fixedFS.fixType(Ex->getType(), Ex->IgnoreImpCasts()->getType(), 12534 S.getLangOpts(), S.Context); 12535 12536 unsigned Diag = 12537 Pedantic ? diag::warn_format_conversion_argument_type_mismatch_pedantic 12538 : diag::warn_format_conversion_argument_type_mismatch; 12539 12540 if (Success) { 12541 // Get the fix string from the fixed format specifier. 12542 SmallString<128> buf; 12543 llvm::raw_svector_ostream os(buf); 12544 fixedFS.toString(os); 12545 12546 EmitFormatDiagnostic( 12547 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) 12548 << Ex->getType() << false << Ex->getSourceRange(), 12549 Ex->getBeginLoc(), 12550 /*IsStringLocation*/ false, 12551 getSpecifierRange(startSpecifier, specifierLen), 12552 FixItHint::CreateReplacement( 12553 getSpecifierRange(startSpecifier, specifierLen), os.str())); 12554 } else { 12555 EmitFormatDiagnostic(S.PDiag(Diag) 12556 << AT.getRepresentativeTypeName(S.Context) 12557 << Ex->getType() << false << Ex->getSourceRange(), 12558 Ex->getBeginLoc(), 12559 /*IsStringLocation*/ false, 12560 getSpecifierRange(startSpecifier, specifierLen)); 12561 } 12562 12563 return true; 12564 } 12565 12566 static void CheckFormatString( 12567 Sema &S, const FormatStringLiteral *FExpr, const Expr *OrigFormatExpr, 12568 ArrayRef<const Expr *> Args, Sema::FormatArgumentPassingKind APK, 12569 unsigned format_idx, unsigned firstDataArg, Sema::FormatStringType Type, 12570 bool inFunctionCall, Sema::VariadicCallType CallType, 12571 llvm::SmallBitVector &CheckedVarArgs, UncoveredArgHandler &UncoveredArg, 12572 bool IgnoreStringsWithoutSpecifiers) { 12573 // CHECK: is the format string a wide literal? 12574 if (!FExpr->isAscii() && !FExpr->isUTF8()) { 12575 CheckFormatHandler::EmitFormatDiagnostic( 12576 S, inFunctionCall, Args[format_idx], 12577 S.PDiag(diag::warn_format_string_is_wide_literal), FExpr->getBeginLoc(), 12578 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 12579 return; 12580 } 12581 12582 // Str - The format string. NOTE: this is NOT null-terminated! 12583 StringRef StrRef = FExpr->getString(); 12584 const char *Str = StrRef.data(); 12585 // Account for cases where the string literal is truncated in a declaration. 12586 const ConstantArrayType *T = 12587 S.Context.getAsConstantArrayType(FExpr->getType()); 12588 assert(T && "String literal not of constant array type!"); 12589 size_t TypeSize = T->getSize().getZExtValue(); 12590 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 12591 const unsigned numDataArgs = Args.size() - firstDataArg; 12592 12593 if (IgnoreStringsWithoutSpecifiers && 12594 !analyze_format_string::parseFormatStringHasFormattingSpecifiers( 12595 Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo())) 12596 return; 12597 12598 // Emit a warning if the string literal is truncated and does not contain an 12599 // embedded null character. 12600 if (TypeSize <= StrRef.size() && !StrRef.substr(0, TypeSize).contains('\0')) { 12601 CheckFormatHandler::EmitFormatDiagnostic( 12602 S, inFunctionCall, Args[format_idx], 12603 S.PDiag(diag::warn_printf_format_string_not_null_terminated), 12604 FExpr->getBeginLoc(), 12605 /*IsStringLocation=*/true, OrigFormatExpr->getSourceRange()); 12606 return; 12607 } 12608 12609 // CHECK: empty format string? 12610 if (StrLen == 0 && numDataArgs > 0) { 12611 CheckFormatHandler::EmitFormatDiagnostic( 12612 S, inFunctionCall, Args[format_idx], 12613 S.PDiag(diag::warn_empty_format_string), FExpr->getBeginLoc(), 12614 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 12615 return; 12616 } 12617 12618 if (Type == Sema::FST_Printf || Type == Sema::FST_NSString || 12619 Type == Sema::FST_FreeBSDKPrintf || Type == Sema::FST_OSLog || 12620 Type == Sema::FST_OSTrace) { 12621 CheckPrintfHandler H( 12622 S, FExpr, OrigFormatExpr, Type, firstDataArg, numDataArgs, 12623 (Type == Sema::FST_NSString || Type == Sema::FST_OSTrace), Str, APK, 12624 Args, format_idx, inFunctionCall, CallType, CheckedVarArgs, 12625 UncoveredArg); 12626 12627 if (!analyze_format_string::ParsePrintfString( 12628 H, Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo(), 12629 Type == Sema::FST_FreeBSDKPrintf)) 12630 H.DoneProcessing(); 12631 } else if (Type == Sema::FST_Scanf) { 12632 CheckScanfHandler H(S, FExpr, OrigFormatExpr, Type, firstDataArg, 12633 numDataArgs, Str, APK, Args, format_idx, inFunctionCall, 12634 CallType, CheckedVarArgs, UncoveredArg); 12635 12636 if (!analyze_format_string::ParseScanfString( 12637 H, Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo())) 12638 H.DoneProcessing(); 12639 } // TODO: handle other formats 12640 } 12641 12642 bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) { 12643 // Str - The format string. NOTE: this is NOT null-terminated! 12644 StringRef StrRef = FExpr->getString(); 12645 const char *Str = StrRef.data(); 12646 // Account for cases where the string literal is truncated in a declaration. 12647 const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType()); 12648 assert(T && "String literal not of constant array type!"); 12649 size_t TypeSize = T->getSize().getZExtValue(); 12650 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 12651 return analyze_format_string::ParseFormatStringHasSArg(Str, Str + StrLen, 12652 getLangOpts(), 12653 Context.getTargetInfo()); 12654 } 12655 12656 //===--- CHECK: Warn on use of wrong absolute value function. -------------===// 12657 12658 // Returns the related absolute value function that is larger, of 0 if one 12659 // does not exist. 12660 static unsigned getLargerAbsoluteValueFunction(unsigned AbsFunction) { 12661 switch (AbsFunction) { 12662 default: 12663 return 0; 12664 12665 case Builtin::BI__builtin_abs: 12666 return Builtin::BI__builtin_labs; 12667 case Builtin::BI__builtin_labs: 12668 return Builtin::BI__builtin_llabs; 12669 case Builtin::BI__builtin_llabs: 12670 return 0; 12671 12672 case Builtin::BI__builtin_fabsf: 12673 return Builtin::BI__builtin_fabs; 12674 case Builtin::BI__builtin_fabs: 12675 return Builtin::BI__builtin_fabsl; 12676 case Builtin::BI__builtin_fabsl: 12677 return 0; 12678 12679 case Builtin::BI__builtin_cabsf: 12680 return Builtin::BI__builtin_cabs; 12681 case Builtin::BI__builtin_cabs: 12682 return Builtin::BI__builtin_cabsl; 12683 case Builtin::BI__builtin_cabsl: 12684 return 0; 12685 12686 case Builtin::BIabs: 12687 return Builtin::BIlabs; 12688 case Builtin::BIlabs: 12689 return Builtin::BIllabs; 12690 case Builtin::BIllabs: 12691 return 0; 12692 12693 case Builtin::BIfabsf: 12694 return Builtin::BIfabs; 12695 case Builtin::BIfabs: 12696 return Builtin::BIfabsl; 12697 case Builtin::BIfabsl: 12698 return 0; 12699 12700 case Builtin::BIcabsf: 12701 return Builtin::BIcabs; 12702 case Builtin::BIcabs: 12703 return Builtin::BIcabsl; 12704 case Builtin::BIcabsl: 12705 return 0; 12706 } 12707 } 12708 12709 // Returns the argument type of the absolute value function. 12710 static QualType getAbsoluteValueArgumentType(ASTContext &Context, 12711 unsigned AbsType) { 12712 if (AbsType == 0) 12713 return QualType(); 12714 12715 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 12716 QualType BuiltinType = Context.GetBuiltinType(AbsType, Error); 12717 if (Error != ASTContext::GE_None) 12718 return QualType(); 12719 12720 const FunctionProtoType *FT = BuiltinType->getAs<FunctionProtoType>(); 12721 if (!FT) 12722 return QualType(); 12723 12724 if (FT->getNumParams() != 1) 12725 return QualType(); 12726 12727 return FT->getParamType(0); 12728 } 12729 12730 // Returns the best absolute value function, or zero, based on type and 12731 // current absolute value function. 12732 static unsigned getBestAbsFunction(ASTContext &Context, QualType ArgType, 12733 unsigned AbsFunctionKind) { 12734 unsigned BestKind = 0; 12735 uint64_t ArgSize = Context.getTypeSize(ArgType); 12736 for (unsigned Kind = AbsFunctionKind; Kind != 0; 12737 Kind = getLargerAbsoluteValueFunction(Kind)) { 12738 QualType ParamType = getAbsoluteValueArgumentType(Context, Kind); 12739 if (Context.getTypeSize(ParamType) >= ArgSize) { 12740 if (BestKind == 0) 12741 BestKind = Kind; 12742 else if (Context.hasSameType(ParamType, ArgType)) { 12743 BestKind = Kind; 12744 break; 12745 } 12746 } 12747 } 12748 return BestKind; 12749 } 12750 12751 enum AbsoluteValueKind { 12752 AVK_Integer, 12753 AVK_Floating, 12754 AVK_Complex 12755 }; 12756 12757 static AbsoluteValueKind getAbsoluteValueKind(QualType T) { 12758 if (T->isIntegralOrEnumerationType()) 12759 return AVK_Integer; 12760 if (T->isRealFloatingType()) 12761 return AVK_Floating; 12762 if (T->isAnyComplexType()) 12763 return AVK_Complex; 12764 12765 llvm_unreachable("Type not integer, floating, or complex"); 12766 } 12767 12768 // Changes the absolute value function to a different type. Preserves whether 12769 // the function is a builtin. 12770 static unsigned changeAbsFunction(unsigned AbsKind, 12771 AbsoluteValueKind ValueKind) { 12772 switch (ValueKind) { 12773 case AVK_Integer: 12774 switch (AbsKind) { 12775 default: 12776 return 0; 12777 case Builtin::BI__builtin_fabsf: 12778 case Builtin::BI__builtin_fabs: 12779 case Builtin::BI__builtin_fabsl: 12780 case Builtin::BI__builtin_cabsf: 12781 case Builtin::BI__builtin_cabs: 12782 case Builtin::BI__builtin_cabsl: 12783 return Builtin::BI__builtin_abs; 12784 case Builtin::BIfabsf: 12785 case Builtin::BIfabs: 12786 case Builtin::BIfabsl: 12787 case Builtin::BIcabsf: 12788 case Builtin::BIcabs: 12789 case Builtin::BIcabsl: 12790 return Builtin::BIabs; 12791 } 12792 case AVK_Floating: 12793 switch (AbsKind) { 12794 default: 12795 return 0; 12796 case Builtin::BI__builtin_abs: 12797 case Builtin::BI__builtin_labs: 12798 case Builtin::BI__builtin_llabs: 12799 case Builtin::BI__builtin_cabsf: 12800 case Builtin::BI__builtin_cabs: 12801 case Builtin::BI__builtin_cabsl: 12802 return Builtin::BI__builtin_fabsf; 12803 case Builtin::BIabs: 12804 case Builtin::BIlabs: 12805 case Builtin::BIllabs: 12806 case Builtin::BIcabsf: 12807 case Builtin::BIcabs: 12808 case Builtin::BIcabsl: 12809 return Builtin::BIfabsf; 12810 } 12811 case AVK_Complex: 12812 switch (AbsKind) { 12813 default: 12814 return 0; 12815 case Builtin::BI__builtin_abs: 12816 case Builtin::BI__builtin_labs: 12817 case Builtin::BI__builtin_llabs: 12818 case Builtin::BI__builtin_fabsf: 12819 case Builtin::BI__builtin_fabs: 12820 case Builtin::BI__builtin_fabsl: 12821 return Builtin::BI__builtin_cabsf; 12822 case Builtin::BIabs: 12823 case Builtin::BIlabs: 12824 case Builtin::BIllabs: 12825 case Builtin::BIfabsf: 12826 case Builtin::BIfabs: 12827 case Builtin::BIfabsl: 12828 return Builtin::BIcabsf; 12829 } 12830 } 12831 llvm_unreachable("Unable to convert function"); 12832 } 12833 12834 static unsigned getAbsoluteValueFunctionKind(const FunctionDecl *FDecl) { 12835 const IdentifierInfo *FnInfo = FDecl->getIdentifier(); 12836 if (!FnInfo) 12837 return 0; 12838 12839 switch (FDecl->getBuiltinID()) { 12840 default: 12841 return 0; 12842 case Builtin::BI__builtin_abs: 12843 case Builtin::BI__builtin_fabs: 12844 case Builtin::BI__builtin_fabsf: 12845 case Builtin::BI__builtin_fabsl: 12846 case Builtin::BI__builtin_labs: 12847 case Builtin::BI__builtin_llabs: 12848 case Builtin::BI__builtin_cabs: 12849 case Builtin::BI__builtin_cabsf: 12850 case Builtin::BI__builtin_cabsl: 12851 case Builtin::BIabs: 12852 case Builtin::BIlabs: 12853 case Builtin::BIllabs: 12854 case Builtin::BIfabs: 12855 case Builtin::BIfabsf: 12856 case Builtin::BIfabsl: 12857 case Builtin::BIcabs: 12858 case Builtin::BIcabsf: 12859 case Builtin::BIcabsl: 12860 return FDecl->getBuiltinID(); 12861 } 12862 llvm_unreachable("Unknown Builtin type"); 12863 } 12864 12865 // If the replacement is valid, emit a note with replacement function. 12866 // Additionally, suggest including the proper header if not already included. 12867 static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range, 12868 unsigned AbsKind, QualType ArgType) { 12869 bool EmitHeaderHint = true; 12870 const char *HeaderName = nullptr; 12871 StringRef FunctionName; 12872 if (S.getLangOpts().CPlusPlus && !ArgType->isAnyComplexType()) { 12873 FunctionName = "std::abs"; 12874 if (ArgType->isIntegralOrEnumerationType()) { 12875 HeaderName = "cstdlib"; 12876 } else if (ArgType->isRealFloatingType()) { 12877 HeaderName = "cmath"; 12878 } else { 12879 llvm_unreachable("Invalid Type"); 12880 } 12881 12882 // Lookup all std::abs 12883 if (NamespaceDecl *Std = S.getStdNamespace()) { 12884 LookupResult R(S, &S.Context.Idents.get("abs"), Loc, Sema::LookupAnyName); 12885 R.suppressDiagnostics(); 12886 S.LookupQualifiedName(R, Std); 12887 12888 for (const auto *I : R) { 12889 const FunctionDecl *FDecl = nullptr; 12890 if (const UsingShadowDecl *UsingD = dyn_cast<UsingShadowDecl>(I)) { 12891 FDecl = dyn_cast<FunctionDecl>(UsingD->getTargetDecl()); 12892 } else { 12893 FDecl = dyn_cast<FunctionDecl>(I); 12894 } 12895 if (!FDecl) 12896 continue; 12897 12898 // Found std::abs(), check that they are the right ones. 12899 if (FDecl->getNumParams() != 1) 12900 continue; 12901 12902 // Check that the parameter type can handle the argument. 12903 QualType ParamType = FDecl->getParamDecl(0)->getType(); 12904 if (getAbsoluteValueKind(ArgType) == getAbsoluteValueKind(ParamType) && 12905 S.Context.getTypeSize(ArgType) <= 12906 S.Context.getTypeSize(ParamType)) { 12907 // Found a function, don't need the header hint. 12908 EmitHeaderHint = false; 12909 break; 12910 } 12911 } 12912 } 12913 } else { 12914 FunctionName = S.Context.BuiltinInfo.getName(AbsKind); 12915 HeaderName = S.Context.BuiltinInfo.getHeaderName(AbsKind); 12916 12917 if (HeaderName) { 12918 DeclarationName DN(&S.Context.Idents.get(FunctionName)); 12919 LookupResult R(S, DN, Loc, Sema::LookupAnyName); 12920 R.suppressDiagnostics(); 12921 S.LookupName(R, S.getCurScope()); 12922 12923 if (R.isSingleResult()) { 12924 FunctionDecl *FD = dyn_cast<FunctionDecl>(R.getFoundDecl()); 12925 if (FD && FD->getBuiltinID() == AbsKind) { 12926 EmitHeaderHint = false; 12927 } else { 12928 return; 12929 } 12930 } else if (!R.empty()) { 12931 return; 12932 } 12933 } 12934 } 12935 12936 S.Diag(Loc, diag::note_replace_abs_function) 12937 << FunctionName << FixItHint::CreateReplacement(Range, FunctionName); 12938 12939 if (!HeaderName) 12940 return; 12941 12942 if (!EmitHeaderHint) 12943 return; 12944 12945 S.Diag(Loc, diag::note_include_header_or_declare) << HeaderName 12946 << FunctionName; 12947 } 12948 12949 template <std::size_t StrLen> 12950 static bool IsStdFunction(const FunctionDecl *FDecl, 12951 const char (&Str)[StrLen]) { 12952 if (!FDecl) 12953 return false; 12954 if (!FDecl->getIdentifier() || !FDecl->getIdentifier()->isStr(Str)) 12955 return false; 12956 if (!FDecl->isInStdNamespace()) 12957 return false; 12958 12959 return true; 12960 } 12961 12962 void Sema::CheckInfNaNFunction(const CallExpr *Call, 12963 const FunctionDecl *FDecl) { 12964 FPOptions FPO = Call->getFPFeaturesInEffect(getLangOpts()); 12965 if ((IsStdFunction(FDecl, "isnan") || IsStdFunction(FDecl, "isunordered") || 12966 (Call->getBuiltinCallee() == Builtin::BI__builtin_nanf)) && 12967 FPO.getNoHonorNaNs()) 12968 Diag(Call->getBeginLoc(), diag::warn_fp_nan_inf_when_disabled) 12969 << 1 << 0 << Call->getSourceRange(); 12970 else if ((IsStdFunction(FDecl, "isinf") || 12971 (IsStdFunction(FDecl, "isfinite") || 12972 (FDecl->getIdentifier() && FDecl->getName() == "infinity"))) && 12973 FPO.getNoHonorInfs()) 12974 Diag(Call->getBeginLoc(), diag::warn_fp_nan_inf_when_disabled) 12975 << 0 << 0 << Call->getSourceRange(); 12976 } 12977 12978 // Warn when using the wrong abs() function. 12979 void Sema::CheckAbsoluteValueFunction(const CallExpr *Call, 12980 const FunctionDecl *FDecl) { 12981 if (Call->getNumArgs() != 1) 12982 return; 12983 12984 unsigned AbsKind = getAbsoluteValueFunctionKind(FDecl); 12985 bool IsStdAbs = IsStdFunction(FDecl, "abs"); 12986 if (AbsKind == 0 && !IsStdAbs) 12987 return; 12988 12989 QualType ArgType = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 12990 QualType ParamType = Call->getArg(0)->getType(); 12991 12992 // Unsigned types cannot be negative. Suggest removing the absolute value 12993 // function call. 12994 if (ArgType->isUnsignedIntegerType()) { 12995 StringRef FunctionName = 12996 IsStdAbs ? "std::abs" : Context.BuiltinInfo.getName(AbsKind); 12997 Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType; 12998 Diag(Call->getExprLoc(), diag::note_remove_abs) 12999 << FunctionName 13000 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()); 13001 return; 13002 } 13003 13004 // Taking the absolute value of a pointer is very suspicious, they probably 13005 // wanted to index into an array, dereference a pointer, call a function, etc. 13006 if (ArgType->isPointerType() || ArgType->canDecayToPointerType()) { 13007 unsigned DiagType = 0; 13008 if (ArgType->isFunctionType()) 13009 DiagType = 1; 13010 else if (ArgType->isArrayType()) 13011 DiagType = 2; 13012 13013 Diag(Call->getExprLoc(), diag::warn_pointer_abs) << DiagType << ArgType; 13014 return; 13015 } 13016 13017 // std::abs has overloads which prevent most of the absolute value problems 13018 // from occurring. 13019 if (IsStdAbs) 13020 return; 13021 13022 AbsoluteValueKind ArgValueKind = getAbsoluteValueKind(ArgType); 13023 AbsoluteValueKind ParamValueKind = getAbsoluteValueKind(ParamType); 13024 13025 // The argument and parameter are the same kind. Check if they are the right 13026 // size. 13027 if (ArgValueKind == ParamValueKind) { 13028 if (Context.getTypeSize(ArgType) <= Context.getTypeSize(ParamType)) 13029 return; 13030 13031 unsigned NewAbsKind = getBestAbsFunction(Context, ArgType, AbsKind); 13032 Diag(Call->getExprLoc(), diag::warn_abs_too_small) 13033 << FDecl << ArgType << ParamType; 13034 13035 if (NewAbsKind == 0) 13036 return; 13037 13038 emitReplacement(*this, Call->getExprLoc(), 13039 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 13040 return; 13041 } 13042 13043 // ArgValueKind != ParamValueKind 13044 // The wrong type of absolute value function was used. Attempt to find the 13045 // proper one. 13046 unsigned NewAbsKind = changeAbsFunction(AbsKind, ArgValueKind); 13047 NewAbsKind = getBestAbsFunction(Context, ArgType, NewAbsKind); 13048 if (NewAbsKind == 0) 13049 return; 13050 13051 Diag(Call->getExprLoc(), diag::warn_wrong_absolute_value_type) 13052 << FDecl << ParamValueKind << ArgValueKind; 13053 13054 emitReplacement(*this, Call->getExprLoc(), 13055 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 13056 } 13057 13058 //===--- CHECK: Warn on use of std::max and unsigned zero. r---------------===// 13059 void Sema::CheckMaxUnsignedZero(const CallExpr *Call, 13060 const FunctionDecl *FDecl) { 13061 if (!Call || !FDecl) return; 13062 13063 // Ignore template specializations and macros. 13064 if (inTemplateInstantiation()) return; 13065 if (Call->getExprLoc().isMacroID()) return; 13066 13067 // Only care about the one template argument, two function parameter std::max 13068 if (Call->getNumArgs() != 2) return; 13069 if (!IsStdFunction(FDecl, "max")) return; 13070 const auto * ArgList = FDecl->getTemplateSpecializationArgs(); 13071 if (!ArgList) return; 13072 if (ArgList->size() != 1) return; 13073 13074 // Check that template type argument is unsigned integer. 13075 const auto& TA = ArgList->get(0); 13076 if (TA.getKind() != TemplateArgument::Type) return; 13077 QualType ArgType = TA.getAsType(); 13078 if (!ArgType->isUnsignedIntegerType()) return; 13079 13080 // See if either argument is a literal zero. 13081 auto IsLiteralZeroArg = [](const Expr* E) -> bool { 13082 const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E); 13083 if (!MTE) return false; 13084 const auto *Num = dyn_cast<IntegerLiteral>(MTE->getSubExpr()); 13085 if (!Num) return false; 13086 if (Num->getValue() != 0) return false; 13087 return true; 13088 }; 13089 13090 const Expr *FirstArg = Call->getArg(0); 13091 const Expr *SecondArg = Call->getArg(1); 13092 const bool IsFirstArgZero = IsLiteralZeroArg(FirstArg); 13093 const bool IsSecondArgZero = IsLiteralZeroArg(SecondArg); 13094 13095 // Only warn when exactly one argument is zero. 13096 if (IsFirstArgZero == IsSecondArgZero) return; 13097 13098 SourceRange FirstRange = FirstArg->getSourceRange(); 13099 SourceRange SecondRange = SecondArg->getSourceRange(); 13100 13101 SourceRange ZeroRange = IsFirstArgZero ? FirstRange : SecondRange; 13102 13103 Diag(Call->getExprLoc(), diag::warn_max_unsigned_zero) 13104 << IsFirstArgZero << Call->getCallee()->getSourceRange() << ZeroRange; 13105 13106 // Deduce what parts to remove so that "std::max(0u, foo)" becomes "(foo)". 13107 SourceRange RemovalRange; 13108 if (IsFirstArgZero) { 13109 RemovalRange = SourceRange(FirstRange.getBegin(), 13110 SecondRange.getBegin().getLocWithOffset(-1)); 13111 } else { 13112 RemovalRange = SourceRange(getLocForEndOfToken(FirstRange.getEnd()), 13113 SecondRange.getEnd()); 13114 } 13115 13116 Diag(Call->getExprLoc(), diag::note_remove_max_call) 13117 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()) 13118 << FixItHint::CreateRemoval(RemovalRange); 13119 } 13120 13121 //===--- CHECK: Standard memory functions ---------------------------------===// 13122 13123 /// Takes the expression passed to the size_t parameter of functions 13124 /// such as memcmp, strncat, etc and warns if it's a comparison. 13125 /// 13126 /// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`. 13127 static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E, 13128 IdentifierInfo *FnName, 13129 SourceLocation FnLoc, 13130 SourceLocation RParenLoc) { 13131 const BinaryOperator *Size = dyn_cast<BinaryOperator>(E); 13132 if (!Size) 13133 return false; 13134 13135 // if E is binop and op is <=>, >, <, >=, <=, ==, &&, ||: 13136 if (!Size->isComparisonOp() && !Size->isLogicalOp()) 13137 return false; 13138 13139 SourceRange SizeRange = Size->getSourceRange(); 13140 S.Diag(Size->getOperatorLoc(), diag::warn_memsize_comparison) 13141 << SizeRange << FnName; 13142 S.Diag(FnLoc, diag::note_memsize_comparison_paren) 13143 << FnName 13144 << FixItHint::CreateInsertion( 13145 S.getLocForEndOfToken(Size->getLHS()->getEndLoc()), ")") 13146 << FixItHint::CreateRemoval(RParenLoc); 13147 S.Diag(SizeRange.getBegin(), diag::note_memsize_comparison_cast_silence) 13148 << FixItHint::CreateInsertion(SizeRange.getBegin(), "(size_t)(") 13149 << FixItHint::CreateInsertion(S.getLocForEndOfToken(SizeRange.getEnd()), 13150 ")"); 13151 13152 return true; 13153 } 13154 13155 /// Determine whether the given type is or contains a dynamic class type 13156 /// (e.g., whether it has a vtable). 13157 static const CXXRecordDecl *getContainedDynamicClass(QualType T, 13158 bool &IsContained) { 13159 // Look through array types while ignoring qualifiers. 13160 const Type *Ty = T->getBaseElementTypeUnsafe(); 13161 IsContained = false; 13162 13163 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 13164 RD = RD ? RD->getDefinition() : nullptr; 13165 if (!RD || RD->isInvalidDecl()) 13166 return nullptr; 13167 13168 if (RD->isDynamicClass()) 13169 return RD; 13170 13171 // Check all the fields. If any bases were dynamic, the class is dynamic. 13172 // It's impossible for a class to transitively contain itself by value, so 13173 // infinite recursion is impossible. 13174 for (auto *FD : RD->fields()) { 13175 bool SubContained; 13176 if (const CXXRecordDecl *ContainedRD = 13177 getContainedDynamicClass(FD->getType(), SubContained)) { 13178 IsContained = true; 13179 return ContainedRD; 13180 } 13181 } 13182 13183 return nullptr; 13184 } 13185 13186 static const UnaryExprOrTypeTraitExpr *getAsSizeOfExpr(const Expr *E) { 13187 if (const auto *Unary = dyn_cast<UnaryExprOrTypeTraitExpr>(E)) 13188 if (Unary->getKind() == UETT_SizeOf) 13189 return Unary; 13190 return nullptr; 13191 } 13192 13193 /// If E is a sizeof expression, returns its argument expression, 13194 /// otherwise returns NULL. 13195 static const Expr *getSizeOfExprArg(const Expr *E) { 13196 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 13197 if (!SizeOf->isArgumentType()) 13198 return SizeOf->getArgumentExpr()->IgnoreParenImpCasts(); 13199 return nullptr; 13200 } 13201 13202 /// If E is a sizeof expression, returns its argument type. 13203 static QualType getSizeOfArgType(const Expr *E) { 13204 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 13205 return SizeOf->getTypeOfArgument(); 13206 return QualType(); 13207 } 13208 13209 namespace { 13210 13211 struct SearchNonTrivialToInitializeField 13212 : DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField> { 13213 using Super = 13214 DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField>; 13215 13216 SearchNonTrivialToInitializeField(const Expr *E, Sema &S) : E(E), S(S) {} 13217 13218 void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType FT, 13219 SourceLocation SL) { 13220 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 13221 asDerived().visitArray(PDIK, AT, SL); 13222 return; 13223 } 13224 13225 Super::visitWithKind(PDIK, FT, SL); 13226 } 13227 13228 void visitARCStrong(QualType FT, SourceLocation SL) { 13229 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 13230 } 13231 void visitARCWeak(QualType FT, SourceLocation SL) { 13232 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 13233 } 13234 void visitStruct(QualType FT, SourceLocation SL) { 13235 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 13236 visit(FD->getType(), FD->getLocation()); 13237 } 13238 void visitArray(QualType::PrimitiveDefaultInitializeKind PDIK, 13239 const ArrayType *AT, SourceLocation SL) { 13240 visit(getContext().getBaseElementType(AT), SL); 13241 } 13242 void visitTrivial(QualType FT, SourceLocation SL) {} 13243 13244 static void diag(QualType RT, const Expr *E, Sema &S) { 13245 SearchNonTrivialToInitializeField(E, S).visitStruct(RT, SourceLocation()); 13246 } 13247 13248 ASTContext &getContext() { return S.getASTContext(); } 13249 13250 const Expr *E; 13251 Sema &S; 13252 }; 13253 13254 struct SearchNonTrivialToCopyField 13255 : CopiedTypeVisitor<SearchNonTrivialToCopyField, false> { 13256 using Super = CopiedTypeVisitor<SearchNonTrivialToCopyField, false>; 13257 13258 SearchNonTrivialToCopyField(const Expr *E, Sema &S) : E(E), S(S) {} 13259 13260 void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType FT, 13261 SourceLocation SL) { 13262 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 13263 asDerived().visitArray(PCK, AT, SL); 13264 return; 13265 } 13266 13267 Super::visitWithKind(PCK, FT, SL); 13268 } 13269 13270 void visitARCStrong(QualType FT, SourceLocation SL) { 13271 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 13272 } 13273 void visitARCWeak(QualType FT, SourceLocation SL) { 13274 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 13275 } 13276 void visitStruct(QualType FT, SourceLocation SL) { 13277 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 13278 visit(FD->getType(), FD->getLocation()); 13279 } 13280 void visitArray(QualType::PrimitiveCopyKind PCK, const ArrayType *AT, 13281 SourceLocation SL) { 13282 visit(getContext().getBaseElementType(AT), SL); 13283 } 13284 void preVisit(QualType::PrimitiveCopyKind PCK, QualType FT, 13285 SourceLocation SL) {} 13286 void visitTrivial(QualType FT, SourceLocation SL) {} 13287 void visitVolatileTrivial(QualType FT, SourceLocation SL) {} 13288 13289 static void diag(QualType RT, const Expr *E, Sema &S) { 13290 SearchNonTrivialToCopyField(E, S).visitStruct(RT, SourceLocation()); 13291 } 13292 13293 ASTContext &getContext() { return S.getASTContext(); } 13294 13295 const Expr *E; 13296 Sema &S; 13297 }; 13298 13299 } 13300 13301 /// Detect if \c SizeofExpr is likely to calculate the sizeof an object. 13302 static bool doesExprLikelyComputeSize(const Expr *SizeofExpr) { 13303 SizeofExpr = SizeofExpr->IgnoreParenImpCasts(); 13304 13305 if (const auto *BO = dyn_cast<BinaryOperator>(SizeofExpr)) { 13306 if (BO->getOpcode() != BO_Mul && BO->getOpcode() != BO_Add) 13307 return false; 13308 13309 return doesExprLikelyComputeSize(BO->getLHS()) || 13310 doesExprLikelyComputeSize(BO->getRHS()); 13311 } 13312 13313 return getAsSizeOfExpr(SizeofExpr) != nullptr; 13314 } 13315 13316 /// Check if the ArgLoc originated from a macro passed to the call at CallLoc. 13317 /// 13318 /// \code 13319 /// #define MACRO 0 13320 /// foo(MACRO); 13321 /// foo(0); 13322 /// \endcode 13323 /// 13324 /// This should return true for the first call to foo, but not for the second 13325 /// (regardless of whether foo is a macro or function). 13326 static bool isArgumentExpandedFromMacro(SourceManager &SM, 13327 SourceLocation CallLoc, 13328 SourceLocation ArgLoc) { 13329 if (!CallLoc.isMacroID()) 13330 return SM.getFileID(CallLoc) != SM.getFileID(ArgLoc); 13331 13332 return SM.getFileID(SM.getImmediateMacroCallerLoc(CallLoc)) != 13333 SM.getFileID(SM.getImmediateMacroCallerLoc(ArgLoc)); 13334 } 13335 13336 /// Diagnose cases like 'memset(buf, sizeof(buf), 0)', which should have the 13337 /// last two arguments transposed. 13338 static void CheckMemaccessSize(Sema &S, unsigned BId, const CallExpr *Call) { 13339 if (BId != Builtin::BImemset && BId != Builtin::BIbzero) 13340 return; 13341 13342 const Expr *SizeArg = 13343 Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts(); 13344 13345 auto isLiteralZero = [](const Expr *E) { 13346 return (isa<IntegerLiteral>(E) && 13347 cast<IntegerLiteral>(E)->getValue() == 0) || 13348 (isa<CharacterLiteral>(E) && 13349 cast<CharacterLiteral>(E)->getValue() == 0); 13350 }; 13351 13352 // If we're memsetting or bzeroing 0 bytes, then this is likely an error. 13353 SourceLocation CallLoc = Call->getRParenLoc(); 13354 SourceManager &SM = S.getSourceManager(); 13355 if (isLiteralZero(SizeArg) && 13356 !isArgumentExpandedFromMacro(SM, CallLoc, SizeArg->getExprLoc())) { 13357 13358 SourceLocation DiagLoc = SizeArg->getExprLoc(); 13359 13360 // Some platforms #define bzero to __builtin_memset. See if this is the 13361 // case, and if so, emit a better diagnostic. 13362 if (BId == Builtin::BIbzero || 13363 (CallLoc.isMacroID() && Lexer::getImmediateMacroName( 13364 CallLoc, SM, S.getLangOpts()) == "bzero")) { 13365 S.Diag(DiagLoc, diag::warn_suspicious_bzero_size); 13366 S.Diag(DiagLoc, diag::note_suspicious_bzero_size_silence); 13367 } else if (!isLiteralZero(Call->getArg(1)->IgnoreImpCasts())) { 13368 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 0; 13369 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 0; 13370 } 13371 return; 13372 } 13373 13374 // If the second argument to a memset is a sizeof expression and the third 13375 // isn't, this is also likely an error. This should catch 13376 // 'memset(buf, sizeof(buf), 0xff)'. 13377 if (BId == Builtin::BImemset && 13378 doesExprLikelyComputeSize(Call->getArg(1)) && 13379 !doesExprLikelyComputeSize(Call->getArg(2))) { 13380 SourceLocation DiagLoc = Call->getArg(1)->getExprLoc(); 13381 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 1; 13382 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 1; 13383 return; 13384 } 13385 } 13386 13387 /// Check for dangerous or invalid arguments to memset(). 13388 /// 13389 /// This issues warnings on known problematic, dangerous or unspecified 13390 /// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp' 13391 /// function calls. 13392 /// 13393 /// \param Call The call expression to diagnose. 13394 void Sema::CheckMemaccessArguments(const CallExpr *Call, 13395 unsigned BId, 13396 IdentifierInfo *FnName) { 13397 assert(BId != 0); 13398 13399 // It is possible to have a non-standard definition of memset. Validate 13400 // we have enough arguments, and if not, abort further checking. 13401 unsigned ExpectedNumArgs = 13402 (BId == Builtin::BIstrndup || BId == Builtin::BIbzero ? 2 : 3); 13403 if (Call->getNumArgs() < ExpectedNumArgs) 13404 return; 13405 13406 unsigned LastArg = (BId == Builtin::BImemset || BId == Builtin::BIbzero || 13407 BId == Builtin::BIstrndup ? 1 : 2); 13408 unsigned LenArg = 13409 (BId == Builtin::BIbzero || BId == Builtin::BIstrndup ? 1 : 2); 13410 const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts(); 13411 13412 if (CheckMemorySizeofForComparison(*this, LenExpr, FnName, 13413 Call->getBeginLoc(), Call->getRParenLoc())) 13414 return; 13415 13416 // Catch cases like 'memset(buf, sizeof(buf), 0)'. 13417 CheckMemaccessSize(*this, BId, Call); 13418 13419 // We have special checking when the length is a sizeof expression. 13420 QualType SizeOfArgTy = getSizeOfArgType(LenExpr); 13421 const Expr *SizeOfArg = getSizeOfExprArg(LenExpr); 13422 llvm::FoldingSetNodeID SizeOfArgID; 13423 13424 // Although widely used, 'bzero' is not a standard function. Be more strict 13425 // with the argument types before allowing diagnostics and only allow the 13426 // form bzero(ptr, sizeof(...)). 13427 QualType FirstArgTy = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 13428 if (BId == Builtin::BIbzero && !FirstArgTy->getAs<PointerType>()) 13429 return; 13430 13431 for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) { 13432 const Expr *Dest = Call->getArg(ArgIdx)->IgnoreParenImpCasts(); 13433 SourceRange ArgRange = Call->getArg(ArgIdx)->getSourceRange(); 13434 13435 QualType DestTy = Dest->getType(); 13436 QualType PointeeTy; 13437 if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) { 13438 PointeeTy = DestPtrTy->getPointeeType(); 13439 13440 // Never warn about void type pointers. This can be used to suppress 13441 // false positives. 13442 if (PointeeTy->isVoidType()) 13443 continue; 13444 13445 // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by 13446 // actually comparing the expressions for equality. Because computing the 13447 // expression IDs can be expensive, we only do this if the diagnostic is 13448 // enabled. 13449 if (SizeOfArg && 13450 !Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, 13451 SizeOfArg->getExprLoc())) { 13452 // We only compute IDs for expressions if the warning is enabled, and 13453 // cache the sizeof arg's ID. 13454 if (SizeOfArgID == llvm::FoldingSetNodeID()) 13455 SizeOfArg->Profile(SizeOfArgID, Context, true); 13456 llvm::FoldingSetNodeID DestID; 13457 Dest->Profile(DestID, Context, true); 13458 if (DestID == SizeOfArgID) { 13459 // TODO: For strncpy() and friends, this could suggest sizeof(dst) 13460 // over sizeof(src) as well. 13461 unsigned ActionIdx = 0; // Default is to suggest dereferencing. 13462 StringRef ReadableName = FnName->getName(); 13463 13464 if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Dest)) 13465 if (UnaryOp->getOpcode() == UO_AddrOf) 13466 ActionIdx = 1; // If its an address-of operator, just remove it. 13467 if (!PointeeTy->isIncompleteType() && 13468 (Context.getTypeSize(PointeeTy) == Context.getCharWidth())) 13469 ActionIdx = 2; // If the pointee's size is sizeof(char), 13470 // suggest an explicit length. 13471 13472 // If the function is defined as a builtin macro, do not show macro 13473 // expansion. 13474 SourceLocation SL = SizeOfArg->getExprLoc(); 13475 SourceRange DSR = Dest->getSourceRange(); 13476 SourceRange SSR = SizeOfArg->getSourceRange(); 13477 SourceManager &SM = getSourceManager(); 13478 13479 if (SM.isMacroArgExpansion(SL)) { 13480 ReadableName = Lexer::getImmediateMacroName(SL, SM, LangOpts); 13481 SL = SM.getSpellingLoc(SL); 13482 DSR = SourceRange(SM.getSpellingLoc(DSR.getBegin()), 13483 SM.getSpellingLoc(DSR.getEnd())); 13484 SSR = SourceRange(SM.getSpellingLoc(SSR.getBegin()), 13485 SM.getSpellingLoc(SSR.getEnd())); 13486 } 13487 13488 DiagRuntimeBehavior(SL, SizeOfArg, 13489 PDiag(diag::warn_sizeof_pointer_expr_memaccess) 13490 << ReadableName 13491 << PointeeTy 13492 << DestTy 13493 << DSR 13494 << SSR); 13495 DiagRuntimeBehavior(SL, SizeOfArg, 13496 PDiag(diag::warn_sizeof_pointer_expr_memaccess_note) 13497 << ActionIdx 13498 << SSR); 13499 13500 break; 13501 } 13502 } 13503 13504 // Also check for cases where the sizeof argument is the exact same 13505 // type as the memory argument, and where it points to a user-defined 13506 // record type. 13507 if (SizeOfArgTy != QualType()) { 13508 if (PointeeTy->isRecordType() && 13509 Context.typesAreCompatible(SizeOfArgTy, DestTy)) { 13510 DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest, 13511 PDiag(diag::warn_sizeof_pointer_type_memaccess) 13512 << FnName << SizeOfArgTy << ArgIdx 13513 << PointeeTy << Dest->getSourceRange() 13514 << LenExpr->getSourceRange()); 13515 break; 13516 } 13517 } 13518 } else if (DestTy->isArrayType()) { 13519 PointeeTy = DestTy; 13520 } 13521 13522 if (PointeeTy == QualType()) 13523 continue; 13524 13525 // Always complain about dynamic classes. 13526 bool IsContained; 13527 if (const CXXRecordDecl *ContainedRD = 13528 getContainedDynamicClass(PointeeTy, IsContained)) { 13529 13530 unsigned OperationType = 0; 13531 const bool IsCmp = BId == Builtin::BImemcmp || BId == Builtin::BIbcmp; 13532 // "overwritten" if we're warning about the destination for any call 13533 // but memcmp; otherwise a verb appropriate to the call. 13534 if (ArgIdx != 0 || IsCmp) { 13535 if (BId == Builtin::BImemcpy) 13536 OperationType = 1; 13537 else if(BId == Builtin::BImemmove) 13538 OperationType = 2; 13539 else if (IsCmp) 13540 OperationType = 3; 13541 } 13542 13543 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 13544 PDiag(diag::warn_dyn_class_memaccess) 13545 << (IsCmp ? ArgIdx + 2 : ArgIdx) << FnName 13546 << IsContained << ContainedRD << OperationType 13547 << Call->getCallee()->getSourceRange()); 13548 } else if (PointeeTy.hasNonTrivialObjCLifetime() && 13549 BId != Builtin::BImemset) 13550 DiagRuntimeBehavior( 13551 Dest->getExprLoc(), Dest, 13552 PDiag(diag::warn_arc_object_memaccess) 13553 << ArgIdx << FnName << PointeeTy 13554 << Call->getCallee()->getSourceRange()); 13555 else if (const auto *RT = PointeeTy->getAs<RecordType>()) { 13556 if ((BId == Builtin::BImemset || BId == Builtin::BIbzero) && 13557 RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) { 13558 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 13559 PDiag(diag::warn_cstruct_memaccess) 13560 << ArgIdx << FnName << PointeeTy << 0); 13561 SearchNonTrivialToInitializeField::diag(PointeeTy, Dest, *this); 13562 } else if ((BId == Builtin::BImemcpy || BId == Builtin::BImemmove) && 13563 RT->getDecl()->isNonTrivialToPrimitiveCopy()) { 13564 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 13565 PDiag(diag::warn_cstruct_memaccess) 13566 << ArgIdx << FnName << PointeeTy << 1); 13567 SearchNonTrivialToCopyField::diag(PointeeTy, Dest, *this); 13568 } else { 13569 continue; 13570 } 13571 } else 13572 continue; 13573 13574 DiagRuntimeBehavior( 13575 Dest->getExprLoc(), Dest, 13576 PDiag(diag::note_bad_memaccess_silence) 13577 << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)")); 13578 break; 13579 } 13580 } 13581 13582 // A little helper routine: ignore addition and subtraction of integer literals. 13583 // This intentionally does not ignore all integer constant expressions because 13584 // we don't want to remove sizeof(). 13585 static const Expr *ignoreLiteralAdditions(const Expr *Ex, ASTContext &Ctx) { 13586 Ex = Ex->IgnoreParenCasts(); 13587 13588 while (true) { 13589 const BinaryOperator * BO = dyn_cast<BinaryOperator>(Ex); 13590 if (!BO || !BO->isAdditiveOp()) 13591 break; 13592 13593 const Expr *RHS = BO->getRHS()->IgnoreParenCasts(); 13594 const Expr *LHS = BO->getLHS()->IgnoreParenCasts(); 13595 13596 if (isa<IntegerLiteral>(RHS)) 13597 Ex = LHS; 13598 else if (isa<IntegerLiteral>(LHS)) 13599 Ex = RHS; 13600 else 13601 break; 13602 } 13603 13604 return Ex; 13605 } 13606 13607 static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty, 13608 ASTContext &Context) { 13609 // Only handle constant-sized or VLAs, but not flexible members. 13610 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(Ty)) { 13611 // Only issue the FIXIT for arrays of size > 1. 13612 if (CAT->getSize().getSExtValue() <= 1) 13613 return false; 13614 } else if (!Ty->isVariableArrayType()) { 13615 return false; 13616 } 13617 return true; 13618 } 13619 13620 // Warn if the user has made the 'size' argument to strlcpy or strlcat 13621 // be the size of the source, instead of the destination. 13622 void Sema::CheckStrlcpycatArguments(const CallExpr *Call, 13623 IdentifierInfo *FnName) { 13624 13625 // Don't crash if the user has the wrong number of arguments 13626 unsigned NumArgs = Call->getNumArgs(); 13627 if ((NumArgs != 3) && (NumArgs != 4)) 13628 return; 13629 13630 const Expr *SrcArg = ignoreLiteralAdditions(Call->getArg(1), Context); 13631 const Expr *SizeArg = ignoreLiteralAdditions(Call->getArg(2), Context); 13632 const Expr *CompareWithSrc = nullptr; 13633 13634 if (CheckMemorySizeofForComparison(*this, SizeArg, FnName, 13635 Call->getBeginLoc(), Call->getRParenLoc())) 13636 return; 13637 13638 // Look for 'strlcpy(dst, x, sizeof(x))' 13639 if (const Expr *Ex = getSizeOfExprArg(SizeArg)) 13640 CompareWithSrc = Ex; 13641 else { 13642 // Look for 'strlcpy(dst, x, strlen(x))' 13643 if (const CallExpr *SizeCall = dyn_cast<CallExpr>(SizeArg)) { 13644 if (SizeCall->getBuiltinCallee() == Builtin::BIstrlen && 13645 SizeCall->getNumArgs() == 1) 13646 CompareWithSrc = ignoreLiteralAdditions(SizeCall->getArg(0), Context); 13647 } 13648 } 13649 13650 if (!CompareWithSrc) 13651 return; 13652 13653 // Determine if the argument to sizeof/strlen is equal to the source 13654 // argument. In principle there's all kinds of things you could do 13655 // here, for instance creating an == expression and evaluating it with 13656 // EvaluateAsBooleanCondition, but this uses a more direct technique: 13657 const DeclRefExpr *SrcArgDRE = dyn_cast<DeclRefExpr>(SrcArg); 13658 if (!SrcArgDRE) 13659 return; 13660 13661 const DeclRefExpr *CompareWithSrcDRE = dyn_cast<DeclRefExpr>(CompareWithSrc); 13662 if (!CompareWithSrcDRE || 13663 SrcArgDRE->getDecl() != CompareWithSrcDRE->getDecl()) 13664 return; 13665 13666 const Expr *OriginalSizeArg = Call->getArg(2); 13667 Diag(CompareWithSrcDRE->getBeginLoc(), diag::warn_strlcpycat_wrong_size) 13668 << OriginalSizeArg->getSourceRange() << FnName; 13669 13670 // Output a FIXIT hint if the destination is an array (rather than a 13671 // pointer to an array). This could be enhanced to handle some 13672 // pointers if we know the actual size, like if DstArg is 'array+2' 13673 // we could say 'sizeof(array)-2'. 13674 const Expr *DstArg = Call->getArg(0)->IgnoreParenImpCasts(); 13675 if (!isConstantSizeArrayWithMoreThanOneElement(DstArg->getType(), Context)) 13676 return; 13677 13678 SmallString<128> sizeString; 13679 llvm::raw_svector_ostream OS(sizeString); 13680 OS << "sizeof("; 13681 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 13682 OS << ")"; 13683 13684 Diag(OriginalSizeArg->getBeginLoc(), diag::note_strlcpycat_wrong_size) 13685 << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(), 13686 OS.str()); 13687 } 13688 13689 /// Check if two expressions refer to the same declaration. 13690 static bool referToTheSameDecl(const Expr *E1, const Expr *E2) { 13691 if (const DeclRefExpr *D1 = dyn_cast_or_null<DeclRefExpr>(E1)) 13692 if (const DeclRefExpr *D2 = dyn_cast_or_null<DeclRefExpr>(E2)) 13693 return D1->getDecl() == D2->getDecl(); 13694 return false; 13695 } 13696 13697 static const Expr *getStrlenExprArg(const Expr *E) { 13698 if (const CallExpr *CE = dyn_cast<CallExpr>(E)) { 13699 const FunctionDecl *FD = CE->getDirectCallee(); 13700 if (!FD || FD->getMemoryFunctionKind() != Builtin::BIstrlen) 13701 return nullptr; 13702 return CE->getArg(0)->IgnoreParenCasts(); 13703 } 13704 return nullptr; 13705 } 13706 13707 // Warn on anti-patterns as the 'size' argument to strncat. 13708 // The correct size argument should look like following: 13709 // strncat(dst, src, sizeof(dst) - strlen(dest) - 1); 13710 void Sema::CheckStrncatArguments(const CallExpr *CE, 13711 IdentifierInfo *FnName) { 13712 // Don't crash if the user has the wrong number of arguments. 13713 if (CE->getNumArgs() < 3) 13714 return; 13715 const Expr *DstArg = CE->getArg(0)->IgnoreParenCasts(); 13716 const Expr *SrcArg = CE->getArg(1)->IgnoreParenCasts(); 13717 const Expr *LenArg = CE->getArg(2)->IgnoreParenCasts(); 13718 13719 if (CheckMemorySizeofForComparison(*this, LenArg, FnName, CE->getBeginLoc(), 13720 CE->getRParenLoc())) 13721 return; 13722 13723 // Identify common expressions, which are wrongly used as the size argument 13724 // to strncat and may lead to buffer overflows. 13725 unsigned PatternType = 0; 13726 if (const Expr *SizeOfArg = getSizeOfExprArg(LenArg)) { 13727 // - sizeof(dst) 13728 if (referToTheSameDecl(SizeOfArg, DstArg)) 13729 PatternType = 1; 13730 // - sizeof(src) 13731 else if (referToTheSameDecl(SizeOfArg, SrcArg)) 13732 PatternType = 2; 13733 } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(LenArg)) { 13734 if (BE->getOpcode() == BO_Sub) { 13735 const Expr *L = BE->getLHS()->IgnoreParenCasts(); 13736 const Expr *R = BE->getRHS()->IgnoreParenCasts(); 13737 // - sizeof(dst) - strlen(dst) 13738 if (referToTheSameDecl(DstArg, getSizeOfExprArg(L)) && 13739 referToTheSameDecl(DstArg, getStrlenExprArg(R))) 13740 PatternType = 1; 13741 // - sizeof(src) - (anything) 13742 else if (referToTheSameDecl(SrcArg, getSizeOfExprArg(L))) 13743 PatternType = 2; 13744 } 13745 } 13746 13747 if (PatternType == 0) 13748 return; 13749 13750 // Generate the diagnostic. 13751 SourceLocation SL = LenArg->getBeginLoc(); 13752 SourceRange SR = LenArg->getSourceRange(); 13753 SourceManager &SM = getSourceManager(); 13754 13755 // If the function is defined as a builtin macro, do not show macro expansion. 13756 if (SM.isMacroArgExpansion(SL)) { 13757 SL = SM.getSpellingLoc(SL); 13758 SR = SourceRange(SM.getSpellingLoc(SR.getBegin()), 13759 SM.getSpellingLoc(SR.getEnd())); 13760 } 13761 13762 // Check if the destination is an array (rather than a pointer to an array). 13763 QualType DstTy = DstArg->getType(); 13764 bool isKnownSizeArray = isConstantSizeArrayWithMoreThanOneElement(DstTy, 13765 Context); 13766 if (!isKnownSizeArray) { 13767 if (PatternType == 1) 13768 Diag(SL, diag::warn_strncat_wrong_size) << SR; 13769 else 13770 Diag(SL, diag::warn_strncat_src_size) << SR; 13771 return; 13772 } 13773 13774 if (PatternType == 1) 13775 Diag(SL, diag::warn_strncat_large_size) << SR; 13776 else 13777 Diag(SL, diag::warn_strncat_src_size) << SR; 13778 13779 SmallString<128> sizeString; 13780 llvm::raw_svector_ostream OS(sizeString); 13781 OS << "sizeof("; 13782 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 13783 OS << ") - "; 13784 OS << "strlen("; 13785 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 13786 OS << ") - 1"; 13787 13788 Diag(SL, diag::note_strncat_wrong_size) 13789 << FixItHint::CreateReplacement(SR, OS.str()); 13790 } 13791 13792 namespace { 13793 void CheckFreeArgumentsOnLvalue(Sema &S, const std::string &CalleeName, 13794 const UnaryOperator *UnaryExpr, const Decl *D) { 13795 if (isa<FieldDecl, FunctionDecl, VarDecl>(D)) { 13796 S.Diag(UnaryExpr->getBeginLoc(), diag::warn_free_nonheap_object) 13797 << CalleeName << 0 /*object: */ << cast<NamedDecl>(D); 13798 return; 13799 } 13800 } 13801 13802 void CheckFreeArgumentsAddressof(Sema &S, const std::string &CalleeName, 13803 const UnaryOperator *UnaryExpr) { 13804 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(UnaryExpr->getSubExpr())) { 13805 const Decl *D = Lvalue->getDecl(); 13806 if (isa<DeclaratorDecl>(D)) 13807 if (!dyn_cast<DeclaratorDecl>(D)->getType()->isReferenceType()) 13808 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, D); 13809 } 13810 13811 if (const auto *Lvalue = dyn_cast<MemberExpr>(UnaryExpr->getSubExpr())) 13812 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, 13813 Lvalue->getMemberDecl()); 13814 } 13815 13816 void CheckFreeArgumentsPlus(Sema &S, const std::string &CalleeName, 13817 const UnaryOperator *UnaryExpr) { 13818 const auto *Lambda = dyn_cast<LambdaExpr>( 13819 UnaryExpr->getSubExpr()->IgnoreImplicitAsWritten()->IgnoreParens()); 13820 if (!Lambda) 13821 return; 13822 13823 S.Diag(Lambda->getBeginLoc(), diag::warn_free_nonheap_object) 13824 << CalleeName << 2 /*object: lambda expression*/; 13825 } 13826 13827 void CheckFreeArgumentsStackArray(Sema &S, const std::string &CalleeName, 13828 const DeclRefExpr *Lvalue) { 13829 const auto *Var = dyn_cast<VarDecl>(Lvalue->getDecl()); 13830 if (Var == nullptr) 13831 return; 13832 13833 S.Diag(Lvalue->getBeginLoc(), diag::warn_free_nonheap_object) 13834 << CalleeName << 0 /*object: */ << Var; 13835 } 13836 13837 void CheckFreeArgumentsCast(Sema &S, const std::string &CalleeName, 13838 const CastExpr *Cast) { 13839 SmallString<128> SizeString; 13840 llvm::raw_svector_ostream OS(SizeString); 13841 13842 clang::CastKind Kind = Cast->getCastKind(); 13843 if (Kind == clang::CK_BitCast && 13844 !Cast->getSubExpr()->getType()->isFunctionPointerType()) 13845 return; 13846 if (Kind == clang::CK_IntegralToPointer && 13847 !isa<IntegerLiteral>( 13848 Cast->getSubExpr()->IgnoreParenImpCasts()->IgnoreParens())) 13849 return; 13850 13851 switch (Cast->getCastKind()) { 13852 case clang::CK_BitCast: 13853 case clang::CK_IntegralToPointer: 13854 case clang::CK_FunctionToPointerDecay: 13855 OS << '\''; 13856 Cast->printPretty(OS, nullptr, S.getPrintingPolicy()); 13857 OS << '\''; 13858 break; 13859 default: 13860 return; 13861 } 13862 13863 S.Diag(Cast->getBeginLoc(), diag::warn_free_nonheap_object) 13864 << CalleeName << 0 /*object: */ << OS.str(); 13865 } 13866 } // namespace 13867 13868 /// Alerts the user that they are attempting to free a non-malloc'd object. 13869 void Sema::CheckFreeArguments(const CallExpr *E) { 13870 const std::string CalleeName = 13871 cast<FunctionDecl>(E->getCalleeDecl())->getQualifiedNameAsString(); 13872 13873 { // Prefer something that doesn't involve a cast to make things simpler. 13874 const Expr *Arg = E->getArg(0)->IgnoreParenCasts(); 13875 if (const auto *UnaryExpr = dyn_cast<UnaryOperator>(Arg)) 13876 switch (UnaryExpr->getOpcode()) { 13877 case UnaryOperator::Opcode::UO_AddrOf: 13878 return CheckFreeArgumentsAddressof(*this, CalleeName, UnaryExpr); 13879 case UnaryOperator::Opcode::UO_Plus: 13880 return CheckFreeArgumentsPlus(*this, CalleeName, UnaryExpr); 13881 default: 13882 break; 13883 } 13884 13885 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(Arg)) 13886 if (Lvalue->getType()->isArrayType()) 13887 return CheckFreeArgumentsStackArray(*this, CalleeName, Lvalue); 13888 13889 if (const auto *Label = dyn_cast<AddrLabelExpr>(Arg)) { 13890 Diag(Label->getBeginLoc(), diag::warn_free_nonheap_object) 13891 << CalleeName << 0 /*object: */ << Label->getLabel()->getIdentifier(); 13892 return; 13893 } 13894 13895 if (isa<BlockExpr>(Arg)) { 13896 Diag(Arg->getBeginLoc(), diag::warn_free_nonheap_object) 13897 << CalleeName << 1 /*object: block*/; 13898 return; 13899 } 13900 } 13901 // Maybe the cast was important, check after the other cases. 13902 if (const auto *Cast = dyn_cast<CastExpr>(E->getArg(0))) 13903 return CheckFreeArgumentsCast(*this, CalleeName, Cast); 13904 } 13905 13906 void 13907 Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType, 13908 SourceLocation ReturnLoc, 13909 bool isObjCMethod, 13910 const AttrVec *Attrs, 13911 const FunctionDecl *FD) { 13912 // Check if the return value is null but should not be. 13913 if (((Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs)) || 13914 (!isObjCMethod && isNonNullType(lhsType))) && 13915 CheckNonNullExpr(*this, RetValExp)) 13916 Diag(ReturnLoc, diag::warn_null_ret) 13917 << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange(); 13918 13919 // C++11 [basic.stc.dynamic.allocation]p4: 13920 // If an allocation function declared with a non-throwing 13921 // exception-specification fails to allocate storage, it shall return 13922 // a null pointer. Any other allocation function that fails to allocate 13923 // storage shall indicate failure only by throwing an exception [...] 13924 if (FD) { 13925 OverloadedOperatorKind Op = FD->getOverloadedOperator(); 13926 if (Op == OO_New || Op == OO_Array_New) { 13927 const FunctionProtoType *Proto 13928 = FD->getType()->castAs<FunctionProtoType>(); 13929 if (!Proto->isNothrow(/*ResultIfDependent*/true) && 13930 CheckNonNullExpr(*this, RetValExp)) 13931 Diag(ReturnLoc, diag::warn_operator_new_returns_null) 13932 << FD << getLangOpts().CPlusPlus11; 13933 } 13934 } 13935 13936 if (RetValExp && RetValExp->getType()->isWebAssemblyTableType()) { 13937 Diag(ReturnLoc, diag::err_wasm_table_art) << 1; 13938 } 13939 13940 // PPC MMA non-pointer types are not allowed as return type. Checking the type 13941 // here prevent the user from using a PPC MMA type as trailing return type. 13942 if (Context.getTargetInfo().getTriple().isPPC64()) 13943 CheckPPCMMAType(RetValExp->getType(), ReturnLoc); 13944 } 13945 13946 /// Check for comparisons of floating-point values using == and !=. Issue a 13947 /// warning if the comparison is not likely to do what the programmer intended. 13948 void Sema::CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS, 13949 BinaryOperatorKind Opcode) { 13950 if (!BinaryOperator::isEqualityOp(Opcode)) 13951 return; 13952 13953 // Match and capture subexpressions such as "(float) X == 0.1". 13954 FloatingLiteral *FPLiteral; 13955 CastExpr *FPCast; 13956 auto getCastAndLiteral = [&FPLiteral, &FPCast](Expr *L, Expr *R) { 13957 FPLiteral = dyn_cast<FloatingLiteral>(L->IgnoreParens()); 13958 FPCast = dyn_cast<CastExpr>(R->IgnoreParens()); 13959 return FPLiteral && FPCast; 13960 }; 13961 13962 if (getCastAndLiteral(LHS, RHS) || getCastAndLiteral(RHS, LHS)) { 13963 auto *SourceTy = FPCast->getSubExpr()->getType()->getAs<BuiltinType>(); 13964 auto *TargetTy = FPLiteral->getType()->getAs<BuiltinType>(); 13965 if (SourceTy && TargetTy && SourceTy->isFloatingPoint() && 13966 TargetTy->isFloatingPoint()) { 13967 bool Lossy; 13968 llvm::APFloat TargetC = FPLiteral->getValue(); 13969 TargetC.convert(Context.getFloatTypeSemantics(QualType(SourceTy, 0)), 13970 llvm::APFloat::rmNearestTiesToEven, &Lossy); 13971 if (Lossy) { 13972 // If the literal cannot be represented in the source type, then a 13973 // check for == is always false and check for != is always true. 13974 Diag(Loc, diag::warn_float_compare_literal) 13975 << (Opcode == BO_EQ) << QualType(SourceTy, 0) 13976 << LHS->getSourceRange() << RHS->getSourceRange(); 13977 return; 13978 } 13979 } 13980 } 13981 13982 // Match a more general floating-point equality comparison (-Wfloat-equal). 13983 Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts(); 13984 Expr* RightExprSansParen = RHS->IgnoreParenImpCasts(); 13985 13986 // Special case: check for x == x (which is OK). 13987 // Do not emit warnings for such cases. 13988 if (auto *DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen)) 13989 if (auto *DRR = dyn_cast<DeclRefExpr>(RightExprSansParen)) 13990 if (DRL->getDecl() == DRR->getDecl()) 13991 return; 13992 13993 // Special case: check for comparisons against literals that can be exactly 13994 // represented by APFloat. In such cases, do not emit a warning. This 13995 // is a heuristic: often comparison against such literals are used to 13996 // detect if a value in a variable has not changed. This clearly can 13997 // lead to false negatives. 13998 if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) { 13999 if (FLL->isExact()) 14000 return; 14001 } else 14002 if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen)) 14003 if (FLR->isExact()) 14004 return; 14005 14006 // Check for comparisons with builtin types. 14007 if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen)) 14008 if (CL->getBuiltinCallee()) 14009 return; 14010 14011 if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen)) 14012 if (CR->getBuiltinCallee()) 14013 return; 14014 14015 // Emit the diagnostic. 14016 Diag(Loc, diag::warn_floatingpoint_eq) 14017 << LHS->getSourceRange() << RHS->getSourceRange(); 14018 } 14019 14020 //===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===// 14021 //===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===// 14022 14023 namespace { 14024 14025 /// Structure recording the 'active' range of an integer-valued 14026 /// expression. 14027 struct IntRange { 14028 /// The number of bits active in the int. Note that this includes exactly one 14029 /// sign bit if !NonNegative. 14030 unsigned Width; 14031 14032 /// True if the int is known not to have negative values. If so, all leading 14033 /// bits before Width are known zero, otherwise they are known to be the 14034 /// same as the MSB within Width. 14035 bool NonNegative; 14036 14037 IntRange(unsigned Width, bool NonNegative) 14038 : Width(Width), NonNegative(NonNegative) {} 14039 14040 /// Number of bits excluding the sign bit. 14041 unsigned valueBits() const { 14042 return NonNegative ? Width : Width - 1; 14043 } 14044 14045 /// Returns the range of the bool type. 14046 static IntRange forBoolType() { 14047 return IntRange(1, true); 14048 } 14049 14050 /// Returns the range of an opaque value of the given integral type. 14051 static IntRange forValueOfType(ASTContext &C, QualType T) { 14052 return forValueOfCanonicalType(C, 14053 T->getCanonicalTypeInternal().getTypePtr()); 14054 } 14055 14056 /// Returns the range of an opaque value of a canonical integral type. 14057 static IntRange forValueOfCanonicalType(ASTContext &C, const Type *T) { 14058 assert(T->isCanonicalUnqualified()); 14059 14060 if (const VectorType *VT = dyn_cast<VectorType>(T)) 14061 T = VT->getElementType().getTypePtr(); 14062 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 14063 T = CT->getElementType().getTypePtr(); 14064 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 14065 T = AT->getValueType().getTypePtr(); 14066 14067 if (!C.getLangOpts().CPlusPlus) { 14068 // For enum types in C code, use the underlying datatype. 14069 if (const EnumType *ET = dyn_cast<EnumType>(T)) 14070 T = ET->getDecl()->getIntegerType().getDesugaredType(C).getTypePtr(); 14071 } else if (const EnumType *ET = dyn_cast<EnumType>(T)) { 14072 // For enum types in C++, use the known bit width of the enumerators. 14073 EnumDecl *Enum = ET->getDecl(); 14074 // In C++11, enums can have a fixed underlying type. Use this type to 14075 // compute the range. 14076 if (Enum->isFixed()) { 14077 return IntRange(C.getIntWidth(QualType(T, 0)), 14078 !ET->isSignedIntegerOrEnumerationType()); 14079 } 14080 14081 unsigned NumPositive = Enum->getNumPositiveBits(); 14082 unsigned NumNegative = Enum->getNumNegativeBits(); 14083 14084 if (NumNegative == 0) 14085 return IntRange(NumPositive, true/*NonNegative*/); 14086 else 14087 return IntRange(std::max(NumPositive + 1, NumNegative), 14088 false/*NonNegative*/); 14089 } 14090 14091 if (const auto *EIT = dyn_cast<BitIntType>(T)) 14092 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 14093 14094 const BuiltinType *BT = cast<BuiltinType>(T); 14095 assert(BT->isInteger()); 14096 14097 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 14098 } 14099 14100 /// Returns the "target" range of a canonical integral type, i.e. 14101 /// the range of values expressible in the type. 14102 /// 14103 /// This matches forValueOfCanonicalType except that enums have the 14104 /// full range of their type, not the range of their enumerators. 14105 static IntRange forTargetOfCanonicalType(ASTContext &C, const Type *T) { 14106 assert(T->isCanonicalUnqualified()); 14107 14108 if (const VectorType *VT = dyn_cast<VectorType>(T)) 14109 T = VT->getElementType().getTypePtr(); 14110 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 14111 T = CT->getElementType().getTypePtr(); 14112 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 14113 T = AT->getValueType().getTypePtr(); 14114 if (const EnumType *ET = dyn_cast<EnumType>(T)) 14115 T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr(); 14116 14117 if (const auto *EIT = dyn_cast<BitIntType>(T)) 14118 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 14119 14120 const BuiltinType *BT = cast<BuiltinType>(T); 14121 assert(BT->isInteger()); 14122 14123 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 14124 } 14125 14126 /// Returns the supremum of two ranges: i.e. their conservative merge. 14127 static IntRange join(IntRange L, IntRange R) { 14128 bool Unsigned = L.NonNegative && R.NonNegative; 14129 return IntRange(std::max(L.valueBits(), R.valueBits()) + !Unsigned, 14130 L.NonNegative && R.NonNegative); 14131 } 14132 14133 /// Return the range of a bitwise-AND of the two ranges. 14134 static IntRange bit_and(IntRange L, IntRange R) { 14135 unsigned Bits = std::max(L.Width, R.Width); 14136 bool NonNegative = false; 14137 if (L.NonNegative) { 14138 Bits = std::min(Bits, L.Width); 14139 NonNegative = true; 14140 } 14141 if (R.NonNegative) { 14142 Bits = std::min(Bits, R.Width); 14143 NonNegative = true; 14144 } 14145 return IntRange(Bits, NonNegative); 14146 } 14147 14148 /// Return the range of a sum of the two ranges. 14149 static IntRange sum(IntRange L, IntRange R) { 14150 bool Unsigned = L.NonNegative && R.NonNegative; 14151 return IntRange(std::max(L.valueBits(), R.valueBits()) + 1 + !Unsigned, 14152 Unsigned); 14153 } 14154 14155 /// Return the range of a difference of the two ranges. 14156 static IntRange difference(IntRange L, IntRange R) { 14157 // We need a 1-bit-wider range if: 14158 // 1) LHS can be negative: least value can be reduced. 14159 // 2) RHS can be negative: greatest value can be increased. 14160 bool CanWiden = !L.NonNegative || !R.NonNegative; 14161 bool Unsigned = L.NonNegative && R.Width == 0; 14162 return IntRange(std::max(L.valueBits(), R.valueBits()) + CanWiden + 14163 !Unsigned, 14164 Unsigned); 14165 } 14166 14167 /// Return the range of a product of the two ranges. 14168 static IntRange product(IntRange L, IntRange R) { 14169 // If both LHS and RHS can be negative, we can form 14170 // -2^L * -2^R = 2^(L + R) 14171 // which requires L + R + 1 value bits to represent. 14172 bool CanWiden = !L.NonNegative && !R.NonNegative; 14173 bool Unsigned = L.NonNegative && R.NonNegative; 14174 return IntRange(L.valueBits() + R.valueBits() + CanWiden + !Unsigned, 14175 Unsigned); 14176 } 14177 14178 /// Return the range of a remainder operation between the two ranges. 14179 static IntRange rem(IntRange L, IntRange R) { 14180 // The result of a remainder can't be larger than the result of 14181 // either side. The sign of the result is the sign of the LHS. 14182 bool Unsigned = L.NonNegative; 14183 return IntRange(std::min(L.valueBits(), R.valueBits()) + !Unsigned, 14184 Unsigned); 14185 } 14186 }; 14187 14188 } // namespace 14189 14190 static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value, 14191 unsigned MaxWidth) { 14192 if (value.isSigned() && value.isNegative()) 14193 return IntRange(value.getSignificantBits(), false); 14194 14195 if (value.getBitWidth() > MaxWidth) 14196 value = value.trunc(MaxWidth); 14197 14198 // isNonNegative() just checks the sign bit without considering 14199 // signedness. 14200 return IntRange(value.getActiveBits(), true); 14201 } 14202 14203 static IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty, 14204 unsigned MaxWidth) { 14205 if (result.isInt()) 14206 return GetValueRange(C, result.getInt(), MaxWidth); 14207 14208 if (result.isVector()) { 14209 IntRange R = GetValueRange(C, result.getVectorElt(0), Ty, MaxWidth); 14210 for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) { 14211 IntRange El = GetValueRange(C, result.getVectorElt(i), Ty, MaxWidth); 14212 R = IntRange::join(R, El); 14213 } 14214 return R; 14215 } 14216 14217 if (result.isComplexInt()) { 14218 IntRange R = GetValueRange(C, result.getComplexIntReal(), MaxWidth); 14219 IntRange I = GetValueRange(C, result.getComplexIntImag(), MaxWidth); 14220 return IntRange::join(R, I); 14221 } 14222 14223 // This can happen with lossless casts to intptr_t of "based" lvalues. 14224 // Assume it might use arbitrary bits. 14225 // FIXME: The only reason we need to pass the type in here is to get 14226 // the sign right on this one case. It would be nice if APValue 14227 // preserved this. 14228 assert(result.isLValue() || result.isAddrLabelDiff()); 14229 return IntRange(MaxWidth, Ty->isUnsignedIntegerOrEnumerationType()); 14230 } 14231 14232 static QualType GetExprType(const Expr *E) { 14233 QualType Ty = E->getType(); 14234 if (const AtomicType *AtomicRHS = Ty->getAs<AtomicType>()) 14235 Ty = AtomicRHS->getValueType(); 14236 return Ty; 14237 } 14238 14239 /// Pseudo-evaluate the given integer expression, estimating the 14240 /// range of values it might take. 14241 /// 14242 /// \param MaxWidth The width to which the value will be truncated. 14243 /// \param Approximate If \c true, return a likely range for the result: in 14244 /// particular, assume that arithmetic on narrower types doesn't leave 14245 /// those types. If \c false, return a range including all possible 14246 /// result values. 14247 static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth, 14248 bool InConstantContext, bool Approximate) { 14249 E = E->IgnoreParens(); 14250 14251 // Try a full evaluation first. 14252 Expr::EvalResult result; 14253 if (E->EvaluateAsRValue(result, C, InConstantContext)) 14254 return GetValueRange(C, result.Val, GetExprType(E), MaxWidth); 14255 14256 // I think we only want to look through implicit casts here; if the 14257 // user has an explicit widening cast, we should treat the value as 14258 // being of the new, wider type. 14259 if (const auto *CE = dyn_cast<ImplicitCastExpr>(E)) { 14260 if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue) 14261 return GetExprRange(C, CE->getSubExpr(), MaxWidth, InConstantContext, 14262 Approximate); 14263 14264 IntRange OutputTypeRange = IntRange::forValueOfType(C, GetExprType(CE)); 14265 14266 bool isIntegerCast = CE->getCastKind() == CK_IntegralCast || 14267 CE->getCastKind() == CK_BooleanToSignedIntegral; 14268 14269 // Assume that non-integer casts can span the full range of the type. 14270 if (!isIntegerCast) 14271 return OutputTypeRange; 14272 14273 IntRange SubRange = GetExprRange(C, CE->getSubExpr(), 14274 std::min(MaxWidth, OutputTypeRange.Width), 14275 InConstantContext, Approximate); 14276 14277 // Bail out if the subexpr's range is as wide as the cast type. 14278 if (SubRange.Width >= OutputTypeRange.Width) 14279 return OutputTypeRange; 14280 14281 // Otherwise, we take the smaller width, and we're non-negative if 14282 // either the output type or the subexpr is. 14283 return IntRange(SubRange.Width, 14284 SubRange.NonNegative || OutputTypeRange.NonNegative); 14285 } 14286 14287 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 14288 // If we can fold the condition, just take that operand. 14289 bool CondResult; 14290 if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C)) 14291 return GetExprRange(C, 14292 CondResult ? CO->getTrueExpr() : CO->getFalseExpr(), 14293 MaxWidth, InConstantContext, Approximate); 14294 14295 // Otherwise, conservatively merge. 14296 // GetExprRange requires an integer expression, but a throw expression 14297 // results in a void type. 14298 Expr *E = CO->getTrueExpr(); 14299 IntRange L = E->getType()->isVoidType() 14300 ? IntRange{0, true} 14301 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 14302 E = CO->getFalseExpr(); 14303 IntRange R = E->getType()->isVoidType() 14304 ? IntRange{0, true} 14305 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 14306 return IntRange::join(L, R); 14307 } 14308 14309 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 14310 IntRange (*Combine)(IntRange, IntRange) = IntRange::join; 14311 14312 switch (BO->getOpcode()) { 14313 case BO_Cmp: 14314 llvm_unreachable("builtin <=> should have class type"); 14315 14316 // Boolean-valued operations are single-bit and positive. 14317 case BO_LAnd: 14318 case BO_LOr: 14319 case BO_LT: 14320 case BO_GT: 14321 case BO_LE: 14322 case BO_GE: 14323 case BO_EQ: 14324 case BO_NE: 14325 return IntRange::forBoolType(); 14326 14327 // The type of the assignments is the type of the LHS, so the RHS 14328 // is not necessarily the same type. 14329 case BO_MulAssign: 14330 case BO_DivAssign: 14331 case BO_RemAssign: 14332 case BO_AddAssign: 14333 case BO_SubAssign: 14334 case BO_XorAssign: 14335 case BO_OrAssign: 14336 // TODO: bitfields? 14337 return IntRange::forValueOfType(C, GetExprType(E)); 14338 14339 // Simple assignments just pass through the RHS, which will have 14340 // been coerced to the LHS type. 14341 case BO_Assign: 14342 // TODO: bitfields? 14343 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 14344 Approximate); 14345 14346 // Operations with opaque sources are black-listed. 14347 case BO_PtrMemD: 14348 case BO_PtrMemI: 14349 return IntRange::forValueOfType(C, GetExprType(E)); 14350 14351 // Bitwise-and uses the *infinum* of the two source ranges. 14352 case BO_And: 14353 case BO_AndAssign: 14354 Combine = IntRange::bit_and; 14355 break; 14356 14357 // Left shift gets black-listed based on a judgement call. 14358 case BO_Shl: 14359 // ...except that we want to treat '1 << (blah)' as logically 14360 // positive. It's an important idiom. 14361 if (IntegerLiteral *I 14362 = dyn_cast<IntegerLiteral>(BO->getLHS()->IgnoreParenCasts())) { 14363 if (I->getValue() == 1) { 14364 IntRange R = IntRange::forValueOfType(C, GetExprType(E)); 14365 return IntRange(R.Width, /*NonNegative*/ true); 14366 } 14367 } 14368 [[fallthrough]]; 14369 14370 case BO_ShlAssign: 14371 return IntRange::forValueOfType(C, GetExprType(E)); 14372 14373 // Right shift by a constant can narrow its left argument. 14374 case BO_Shr: 14375 case BO_ShrAssign: { 14376 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext, 14377 Approximate); 14378 14379 // If the shift amount is a positive constant, drop the width by 14380 // that much. 14381 if (std::optional<llvm::APSInt> shift = 14382 BO->getRHS()->getIntegerConstantExpr(C)) { 14383 if (shift->isNonNegative()) { 14384 if (shift->uge(L.Width)) 14385 L.Width = (L.NonNegative ? 0 : 1); 14386 else 14387 L.Width -= shift->getZExtValue(); 14388 } 14389 } 14390 14391 return L; 14392 } 14393 14394 // Comma acts as its right operand. 14395 case BO_Comma: 14396 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 14397 Approximate); 14398 14399 case BO_Add: 14400 if (!Approximate) 14401 Combine = IntRange::sum; 14402 break; 14403 14404 case BO_Sub: 14405 if (BO->getLHS()->getType()->isPointerType()) 14406 return IntRange::forValueOfType(C, GetExprType(E)); 14407 if (!Approximate) 14408 Combine = IntRange::difference; 14409 break; 14410 14411 case BO_Mul: 14412 if (!Approximate) 14413 Combine = IntRange::product; 14414 break; 14415 14416 // The width of a division result is mostly determined by the size 14417 // of the LHS. 14418 case BO_Div: { 14419 // Don't 'pre-truncate' the operands. 14420 unsigned opWidth = C.getIntWidth(GetExprType(E)); 14421 IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, 14422 Approximate); 14423 14424 // If the divisor is constant, use that. 14425 if (std::optional<llvm::APSInt> divisor = 14426 BO->getRHS()->getIntegerConstantExpr(C)) { 14427 unsigned log2 = divisor->logBase2(); // floor(log_2(divisor)) 14428 if (log2 >= L.Width) 14429 L.Width = (L.NonNegative ? 0 : 1); 14430 else 14431 L.Width = std::min(L.Width - log2, MaxWidth); 14432 return L; 14433 } 14434 14435 // Otherwise, just use the LHS's width. 14436 // FIXME: This is wrong if the LHS could be its minimal value and the RHS 14437 // could be -1. 14438 IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, 14439 Approximate); 14440 return IntRange(L.Width, L.NonNegative && R.NonNegative); 14441 } 14442 14443 case BO_Rem: 14444 Combine = IntRange::rem; 14445 break; 14446 14447 // The default behavior is okay for these. 14448 case BO_Xor: 14449 case BO_Or: 14450 break; 14451 } 14452 14453 // Combine the two ranges, but limit the result to the type in which we 14454 // performed the computation. 14455 QualType T = GetExprType(E); 14456 unsigned opWidth = C.getIntWidth(T); 14457 IntRange L = 14458 GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, Approximate); 14459 IntRange R = 14460 GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, Approximate); 14461 IntRange C = Combine(L, R); 14462 C.NonNegative |= T->isUnsignedIntegerOrEnumerationType(); 14463 C.Width = std::min(C.Width, MaxWidth); 14464 return C; 14465 } 14466 14467 if (const auto *UO = dyn_cast<UnaryOperator>(E)) { 14468 switch (UO->getOpcode()) { 14469 // Boolean-valued operations are white-listed. 14470 case UO_LNot: 14471 return IntRange::forBoolType(); 14472 14473 // Operations with opaque sources are black-listed. 14474 case UO_Deref: 14475 case UO_AddrOf: // should be impossible 14476 return IntRange::forValueOfType(C, GetExprType(E)); 14477 14478 default: 14479 return GetExprRange(C, UO->getSubExpr(), MaxWidth, InConstantContext, 14480 Approximate); 14481 } 14482 } 14483 14484 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 14485 return GetExprRange(C, OVE->getSourceExpr(), MaxWidth, InConstantContext, 14486 Approximate); 14487 14488 if (const auto *BitField = E->getSourceBitField()) 14489 return IntRange(BitField->getBitWidthValue(C), 14490 BitField->getType()->isUnsignedIntegerOrEnumerationType()); 14491 14492 return IntRange::forValueOfType(C, GetExprType(E)); 14493 } 14494 14495 static IntRange GetExprRange(ASTContext &C, const Expr *E, 14496 bool InConstantContext, bool Approximate) { 14497 return GetExprRange(C, E, C.getIntWidth(GetExprType(E)), InConstantContext, 14498 Approximate); 14499 } 14500 14501 /// Checks whether the given value, which currently has the given 14502 /// source semantics, has the same value when coerced through the 14503 /// target semantics. 14504 static bool IsSameFloatAfterCast(const llvm::APFloat &value, 14505 const llvm::fltSemantics &Src, 14506 const llvm::fltSemantics &Tgt) { 14507 llvm::APFloat truncated = value; 14508 14509 bool ignored; 14510 truncated.convert(Src, llvm::APFloat::rmNearestTiesToEven, &ignored); 14511 truncated.convert(Tgt, llvm::APFloat::rmNearestTiesToEven, &ignored); 14512 14513 return truncated.bitwiseIsEqual(value); 14514 } 14515 14516 /// Checks whether the given value, which currently has the given 14517 /// source semantics, has the same value when coerced through the 14518 /// target semantics. 14519 /// 14520 /// The value might be a vector of floats (or a complex number). 14521 static bool IsSameFloatAfterCast(const APValue &value, 14522 const llvm::fltSemantics &Src, 14523 const llvm::fltSemantics &Tgt) { 14524 if (value.isFloat()) 14525 return IsSameFloatAfterCast(value.getFloat(), Src, Tgt); 14526 14527 if (value.isVector()) { 14528 for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i) 14529 if (!IsSameFloatAfterCast(value.getVectorElt(i), Src, Tgt)) 14530 return false; 14531 return true; 14532 } 14533 14534 assert(value.isComplexFloat()); 14535 return (IsSameFloatAfterCast(value.getComplexFloatReal(), Src, Tgt) && 14536 IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt)); 14537 } 14538 14539 static void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC, 14540 bool IsListInit = false); 14541 14542 static bool IsEnumConstOrFromMacro(Sema &S, Expr *E) { 14543 // Suppress cases where we are comparing against an enum constant. 14544 if (const DeclRefExpr *DR = 14545 dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) 14546 if (isa<EnumConstantDecl>(DR->getDecl())) 14547 return true; 14548 14549 // Suppress cases where the value is expanded from a macro, unless that macro 14550 // is how a language represents a boolean literal. This is the case in both C 14551 // and Objective-C. 14552 SourceLocation BeginLoc = E->getBeginLoc(); 14553 if (BeginLoc.isMacroID()) { 14554 StringRef MacroName = Lexer::getImmediateMacroName( 14555 BeginLoc, S.getSourceManager(), S.getLangOpts()); 14556 return MacroName != "YES" && MacroName != "NO" && 14557 MacroName != "true" && MacroName != "false"; 14558 } 14559 14560 return false; 14561 } 14562 14563 static bool isKnownToHaveUnsignedValue(Expr *E) { 14564 return E->getType()->isIntegerType() && 14565 (!E->getType()->isSignedIntegerType() || 14566 !E->IgnoreParenImpCasts()->getType()->isSignedIntegerType()); 14567 } 14568 14569 namespace { 14570 /// The promoted range of values of a type. In general this has the 14571 /// following structure: 14572 /// 14573 /// |-----------| . . . |-----------| 14574 /// ^ ^ ^ ^ 14575 /// Min HoleMin HoleMax Max 14576 /// 14577 /// ... where there is only a hole if a signed type is promoted to unsigned 14578 /// (in which case Min and Max are the smallest and largest representable 14579 /// values). 14580 struct PromotedRange { 14581 // Min, or HoleMax if there is a hole. 14582 llvm::APSInt PromotedMin; 14583 // Max, or HoleMin if there is a hole. 14584 llvm::APSInt PromotedMax; 14585 14586 PromotedRange(IntRange R, unsigned BitWidth, bool Unsigned) { 14587 if (R.Width == 0) 14588 PromotedMin = PromotedMax = llvm::APSInt(BitWidth, Unsigned); 14589 else if (R.Width >= BitWidth && !Unsigned) { 14590 // Promotion made the type *narrower*. This happens when promoting 14591 // a < 32-bit unsigned / <= 32-bit signed bit-field to 'signed int'. 14592 // Treat all values of 'signed int' as being in range for now. 14593 PromotedMin = llvm::APSInt::getMinValue(BitWidth, Unsigned); 14594 PromotedMax = llvm::APSInt::getMaxValue(BitWidth, Unsigned); 14595 } else { 14596 PromotedMin = llvm::APSInt::getMinValue(R.Width, R.NonNegative) 14597 .extOrTrunc(BitWidth); 14598 PromotedMin.setIsUnsigned(Unsigned); 14599 14600 PromotedMax = llvm::APSInt::getMaxValue(R.Width, R.NonNegative) 14601 .extOrTrunc(BitWidth); 14602 PromotedMax.setIsUnsigned(Unsigned); 14603 } 14604 } 14605 14606 // Determine whether this range is contiguous (has no hole). 14607 bool isContiguous() const { return PromotedMin <= PromotedMax; } 14608 14609 // Where a constant value is within the range. 14610 enum ComparisonResult { 14611 LT = 0x1, 14612 LE = 0x2, 14613 GT = 0x4, 14614 GE = 0x8, 14615 EQ = 0x10, 14616 NE = 0x20, 14617 InRangeFlag = 0x40, 14618 14619 Less = LE | LT | NE, 14620 Min = LE | InRangeFlag, 14621 InRange = InRangeFlag, 14622 Max = GE | InRangeFlag, 14623 Greater = GE | GT | NE, 14624 14625 OnlyValue = LE | GE | EQ | InRangeFlag, 14626 InHole = NE 14627 }; 14628 14629 ComparisonResult compare(const llvm::APSInt &Value) const { 14630 assert(Value.getBitWidth() == PromotedMin.getBitWidth() && 14631 Value.isUnsigned() == PromotedMin.isUnsigned()); 14632 if (!isContiguous()) { 14633 assert(Value.isUnsigned() && "discontiguous range for signed compare"); 14634 if (Value.isMinValue()) return Min; 14635 if (Value.isMaxValue()) return Max; 14636 if (Value >= PromotedMin) return InRange; 14637 if (Value <= PromotedMax) return InRange; 14638 return InHole; 14639 } 14640 14641 switch (llvm::APSInt::compareValues(Value, PromotedMin)) { 14642 case -1: return Less; 14643 case 0: return PromotedMin == PromotedMax ? OnlyValue : Min; 14644 case 1: 14645 switch (llvm::APSInt::compareValues(Value, PromotedMax)) { 14646 case -1: return InRange; 14647 case 0: return Max; 14648 case 1: return Greater; 14649 } 14650 } 14651 14652 llvm_unreachable("impossible compare result"); 14653 } 14654 14655 static std::optional<StringRef> 14656 constantValue(BinaryOperatorKind Op, ComparisonResult R, bool ConstantOnRHS) { 14657 if (Op == BO_Cmp) { 14658 ComparisonResult LTFlag = LT, GTFlag = GT; 14659 if (ConstantOnRHS) std::swap(LTFlag, GTFlag); 14660 14661 if (R & EQ) return StringRef("'std::strong_ordering::equal'"); 14662 if (R & LTFlag) return StringRef("'std::strong_ordering::less'"); 14663 if (R & GTFlag) return StringRef("'std::strong_ordering::greater'"); 14664 return std::nullopt; 14665 } 14666 14667 ComparisonResult TrueFlag, FalseFlag; 14668 if (Op == BO_EQ) { 14669 TrueFlag = EQ; 14670 FalseFlag = NE; 14671 } else if (Op == BO_NE) { 14672 TrueFlag = NE; 14673 FalseFlag = EQ; 14674 } else { 14675 if ((Op == BO_LT || Op == BO_GE) ^ ConstantOnRHS) { 14676 TrueFlag = LT; 14677 FalseFlag = GE; 14678 } else { 14679 TrueFlag = GT; 14680 FalseFlag = LE; 14681 } 14682 if (Op == BO_GE || Op == BO_LE) 14683 std::swap(TrueFlag, FalseFlag); 14684 } 14685 if (R & TrueFlag) 14686 return StringRef("true"); 14687 if (R & FalseFlag) 14688 return StringRef("false"); 14689 return std::nullopt; 14690 } 14691 }; 14692 } 14693 14694 static bool HasEnumType(Expr *E) { 14695 // Strip off implicit integral promotions. 14696 while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 14697 if (ICE->getCastKind() != CK_IntegralCast && 14698 ICE->getCastKind() != CK_NoOp) 14699 break; 14700 E = ICE->getSubExpr(); 14701 } 14702 14703 return E->getType()->isEnumeralType(); 14704 } 14705 14706 static int classifyConstantValue(Expr *Constant) { 14707 // The values of this enumeration are used in the diagnostics 14708 // diag::warn_out_of_range_compare and diag::warn_tautological_bool_compare. 14709 enum ConstantValueKind { 14710 Miscellaneous = 0, 14711 LiteralTrue, 14712 LiteralFalse 14713 }; 14714 if (auto *BL = dyn_cast<CXXBoolLiteralExpr>(Constant)) 14715 return BL->getValue() ? ConstantValueKind::LiteralTrue 14716 : ConstantValueKind::LiteralFalse; 14717 return ConstantValueKind::Miscellaneous; 14718 } 14719 14720 static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E, 14721 Expr *Constant, Expr *Other, 14722 const llvm::APSInt &Value, 14723 bool RhsConstant) { 14724 if (S.inTemplateInstantiation()) 14725 return false; 14726 14727 Expr *OriginalOther = Other; 14728 14729 Constant = Constant->IgnoreParenImpCasts(); 14730 Other = Other->IgnoreParenImpCasts(); 14731 14732 // Suppress warnings on tautological comparisons between values of the same 14733 // enumeration type. There are only two ways we could warn on this: 14734 // - If the constant is outside the range of representable values of 14735 // the enumeration. In such a case, we should warn about the cast 14736 // to enumeration type, not about the comparison. 14737 // - If the constant is the maximum / minimum in-range value. For an 14738 // enumeratin type, such comparisons can be meaningful and useful. 14739 if (Constant->getType()->isEnumeralType() && 14740 S.Context.hasSameUnqualifiedType(Constant->getType(), Other->getType())) 14741 return false; 14742 14743 IntRange OtherValueRange = GetExprRange( 14744 S.Context, Other, S.isConstantEvaluatedContext(), /*Approximate=*/false); 14745 14746 QualType OtherT = Other->getType(); 14747 if (const auto *AT = OtherT->getAs<AtomicType>()) 14748 OtherT = AT->getValueType(); 14749 IntRange OtherTypeRange = IntRange::forValueOfType(S.Context, OtherT); 14750 14751 // Special case for ObjC BOOL on targets where its a typedef for a signed char 14752 // (Namely, macOS). FIXME: IntRange::forValueOfType should do this. 14753 bool IsObjCSignedCharBool = S.getLangOpts().ObjC && 14754 S.NSAPIObj->isObjCBOOLType(OtherT) && 14755 OtherT->isSpecificBuiltinType(BuiltinType::SChar); 14756 14757 // Whether we're treating Other as being a bool because of the form of 14758 // expression despite it having another type (typically 'int' in C). 14759 bool OtherIsBooleanDespiteType = 14760 !OtherT->isBooleanType() && Other->isKnownToHaveBooleanValue(); 14761 if (OtherIsBooleanDespiteType || IsObjCSignedCharBool) 14762 OtherTypeRange = OtherValueRange = IntRange::forBoolType(); 14763 14764 // Check if all values in the range of possible values of this expression 14765 // lead to the same comparison outcome. 14766 PromotedRange OtherPromotedValueRange(OtherValueRange, Value.getBitWidth(), 14767 Value.isUnsigned()); 14768 auto Cmp = OtherPromotedValueRange.compare(Value); 14769 auto Result = PromotedRange::constantValue(E->getOpcode(), Cmp, RhsConstant); 14770 if (!Result) 14771 return false; 14772 14773 // Also consider the range determined by the type alone. This allows us to 14774 // classify the warning under the proper diagnostic group. 14775 bool TautologicalTypeCompare = false; 14776 { 14777 PromotedRange OtherPromotedTypeRange(OtherTypeRange, Value.getBitWidth(), 14778 Value.isUnsigned()); 14779 auto TypeCmp = OtherPromotedTypeRange.compare(Value); 14780 if (auto TypeResult = PromotedRange::constantValue(E->getOpcode(), TypeCmp, 14781 RhsConstant)) { 14782 TautologicalTypeCompare = true; 14783 Cmp = TypeCmp; 14784 Result = TypeResult; 14785 } 14786 } 14787 14788 // Don't warn if the non-constant operand actually always evaluates to the 14789 // same value. 14790 if (!TautologicalTypeCompare && OtherValueRange.Width == 0) 14791 return false; 14792 14793 // Suppress the diagnostic for an in-range comparison if the constant comes 14794 // from a macro or enumerator. We don't want to diagnose 14795 // 14796 // some_long_value <= INT_MAX 14797 // 14798 // when sizeof(int) == sizeof(long). 14799 bool InRange = Cmp & PromotedRange::InRangeFlag; 14800 if (InRange && IsEnumConstOrFromMacro(S, Constant)) 14801 return false; 14802 14803 // A comparison of an unsigned bit-field against 0 is really a type problem, 14804 // even though at the type level the bit-field might promote to 'signed int'. 14805 if (Other->refersToBitField() && InRange && Value == 0 && 14806 Other->getType()->isUnsignedIntegerOrEnumerationType()) 14807 TautologicalTypeCompare = true; 14808 14809 // If this is a comparison to an enum constant, include that 14810 // constant in the diagnostic. 14811 const EnumConstantDecl *ED = nullptr; 14812 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Constant)) 14813 ED = dyn_cast<EnumConstantDecl>(DR->getDecl()); 14814 14815 // Should be enough for uint128 (39 decimal digits) 14816 SmallString<64> PrettySourceValue; 14817 llvm::raw_svector_ostream OS(PrettySourceValue); 14818 if (ED) { 14819 OS << '\'' << *ED << "' (" << Value << ")"; 14820 } else if (auto *BL = dyn_cast<ObjCBoolLiteralExpr>( 14821 Constant->IgnoreParenImpCasts())) { 14822 OS << (BL->getValue() ? "YES" : "NO"); 14823 } else { 14824 OS << Value; 14825 } 14826 14827 if (!TautologicalTypeCompare) { 14828 S.Diag(E->getOperatorLoc(), diag::warn_tautological_compare_value_range) 14829 << RhsConstant << OtherValueRange.Width << OtherValueRange.NonNegative 14830 << E->getOpcodeStr() << OS.str() << *Result 14831 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 14832 return true; 14833 } 14834 14835 if (IsObjCSignedCharBool) { 14836 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 14837 S.PDiag(diag::warn_tautological_compare_objc_bool) 14838 << OS.str() << *Result); 14839 return true; 14840 } 14841 14842 // FIXME: We use a somewhat different formatting for the in-range cases and 14843 // cases involving boolean values for historical reasons. We should pick a 14844 // consistent way of presenting these diagnostics. 14845 if (!InRange || Other->isKnownToHaveBooleanValue()) { 14846 14847 S.DiagRuntimeBehavior( 14848 E->getOperatorLoc(), E, 14849 S.PDiag(!InRange ? diag::warn_out_of_range_compare 14850 : diag::warn_tautological_bool_compare) 14851 << OS.str() << classifyConstantValue(Constant) << OtherT 14852 << OtherIsBooleanDespiteType << *Result 14853 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange()); 14854 } else { 14855 bool IsCharTy = OtherT.withoutLocalFastQualifiers() == S.Context.CharTy; 14856 unsigned Diag = 14857 (isKnownToHaveUnsignedValue(OriginalOther) && Value == 0) 14858 ? (HasEnumType(OriginalOther) 14859 ? diag::warn_unsigned_enum_always_true_comparison 14860 : IsCharTy ? diag::warn_unsigned_char_always_true_comparison 14861 : diag::warn_unsigned_always_true_comparison) 14862 : diag::warn_tautological_constant_compare; 14863 14864 S.Diag(E->getOperatorLoc(), Diag) 14865 << RhsConstant << OtherT << E->getOpcodeStr() << OS.str() << *Result 14866 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 14867 } 14868 14869 return true; 14870 } 14871 14872 /// Analyze the operands of the given comparison. Implements the 14873 /// fallback case from AnalyzeComparison. 14874 static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) { 14875 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 14876 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 14877 } 14878 14879 /// Implements -Wsign-compare. 14880 /// 14881 /// \param E the binary operator to check for warnings 14882 static void AnalyzeComparison(Sema &S, BinaryOperator *E) { 14883 // The type the comparison is being performed in. 14884 QualType T = E->getLHS()->getType(); 14885 14886 // Only analyze comparison operators where both sides have been converted to 14887 // the same type. 14888 if (!S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType())) 14889 return AnalyzeImpConvsInComparison(S, E); 14890 14891 // Don't analyze value-dependent comparisons directly. 14892 if (E->isValueDependent()) 14893 return AnalyzeImpConvsInComparison(S, E); 14894 14895 Expr *LHS = E->getLHS(); 14896 Expr *RHS = E->getRHS(); 14897 14898 if (T->isIntegralType(S.Context)) { 14899 std::optional<llvm::APSInt> RHSValue = 14900 RHS->getIntegerConstantExpr(S.Context); 14901 std::optional<llvm::APSInt> LHSValue = 14902 LHS->getIntegerConstantExpr(S.Context); 14903 14904 // We don't care about expressions whose result is a constant. 14905 if (RHSValue && LHSValue) 14906 return AnalyzeImpConvsInComparison(S, E); 14907 14908 // We only care about expressions where just one side is literal 14909 if ((bool)RHSValue ^ (bool)LHSValue) { 14910 // Is the constant on the RHS or LHS? 14911 const bool RhsConstant = (bool)RHSValue; 14912 Expr *Const = RhsConstant ? RHS : LHS; 14913 Expr *Other = RhsConstant ? LHS : RHS; 14914 const llvm::APSInt &Value = RhsConstant ? *RHSValue : *LHSValue; 14915 14916 // Check whether an integer constant comparison results in a value 14917 // of 'true' or 'false'. 14918 if (CheckTautologicalComparison(S, E, Const, Other, Value, RhsConstant)) 14919 return AnalyzeImpConvsInComparison(S, E); 14920 } 14921 } 14922 14923 if (!T->hasUnsignedIntegerRepresentation()) { 14924 // We don't do anything special if this isn't an unsigned integral 14925 // comparison: we're only interested in integral comparisons, and 14926 // signed comparisons only happen in cases we don't care to warn about. 14927 return AnalyzeImpConvsInComparison(S, E); 14928 } 14929 14930 LHS = LHS->IgnoreParenImpCasts(); 14931 RHS = RHS->IgnoreParenImpCasts(); 14932 14933 if (!S.getLangOpts().CPlusPlus) { 14934 // Avoid warning about comparison of integers with different signs when 14935 // RHS/LHS has a `typeof(E)` type whose sign is different from the sign of 14936 // the type of `E`. 14937 if (const auto *TET = dyn_cast<TypeOfExprType>(LHS->getType())) 14938 LHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 14939 if (const auto *TET = dyn_cast<TypeOfExprType>(RHS->getType())) 14940 RHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 14941 } 14942 14943 // Check to see if one of the (unmodified) operands is of different 14944 // signedness. 14945 Expr *signedOperand, *unsignedOperand; 14946 if (LHS->getType()->hasSignedIntegerRepresentation()) { 14947 assert(!RHS->getType()->hasSignedIntegerRepresentation() && 14948 "unsigned comparison between two signed integer expressions?"); 14949 signedOperand = LHS; 14950 unsignedOperand = RHS; 14951 } else if (RHS->getType()->hasSignedIntegerRepresentation()) { 14952 signedOperand = RHS; 14953 unsignedOperand = LHS; 14954 } else { 14955 return AnalyzeImpConvsInComparison(S, E); 14956 } 14957 14958 // Otherwise, calculate the effective range of the signed operand. 14959 IntRange signedRange = 14960 GetExprRange(S.Context, signedOperand, S.isConstantEvaluatedContext(), 14961 /*Approximate=*/true); 14962 14963 // Go ahead and analyze implicit conversions in the operands. Note 14964 // that we skip the implicit conversions on both sides. 14965 AnalyzeImplicitConversions(S, LHS, E->getOperatorLoc()); 14966 AnalyzeImplicitConversions(S, RHS, E->getOperatorLoc()); 14967 14968 // If the signed range is non-negative, -Wsign-compare won't fire. 14969 if (signedRange.NonNegative) 14970 return; 14971 14972 // For (in)equality comparisons, if the unsigned operand is a 14973 // constant which cannot collide with a overflowed signed operand, 14974 // then reinterpreting the signed operand as unsigned will not 14975 // change the result of the comparison. 14976 if (E->isEqualityOp()) { 14977 unsigned comparisonWidth = S.Context.getIntWidth(T); 14978 IntRange unsignedRange = 14979 GetExprRange(S.Context, unsignedOperand, S.isConstantEvaluatedContext(), 14980 /*Approximate=*/true); 14981 14982 // We should never be unable to prove that the unsigned operand is 14983 // non-negative. 14984 assert(unsignedRange.NonNegative && "unsigned range includes negative?"); 14985 14986 if (unsignedRange.Width < comparisonWidth) 14987 return; 14988 } 14989 14990 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 14991 S.PDiag(diag::warn_mixed_sign_comparison) 14992 << LHS->getType() << RHS->getType() 14993 << LHS->getSourceRange() << RHS->getSourceRange()); 14994 } 14995 14996 /// Analyzes an attempt to assign the given value to a bitfield. 14997 /// 14998 /// Returns true if there was something fishy about the attempt. 14999 static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init, 15000 SourceLocation InitLoc) { 15001 assert(Bitfield->isBitField()); 15002 if (Bitfield->isInvalidDecl()) 15003 return false; 15004 15005 // White-list bool bitfields. 15006 QualType BitfieldType = Bitfield->getType(); 15007 if (BitfieldType->isBooleanType()) 15008 return false; 15009 15010 if (BitfieldType->isEnumeralType()) { 15011 EnumDecl *BitfieldEnumDecl = BitfieldType->castAs<EnumType>()->getDecl(); 15012 // If the underlying enum type was not explicitly specified as an unsigned 15013 // type and the enum contain only positive values, MSVC++ will cause an 15014 // inconsistency by storing this as a signed type. 15015 if (S.getLangOpts().CPlusPlus11 && 15016 !BitfieldEnumDecl->getIntegerTypeSourceInfo() && 15017 BitfieldEnumDecl->getNumPositiveBits() > 0 && 15018 BitfieldEnumDecl->getNumNegativeBits() == 0) { 15019 S.Diag(InitLoc, diag::warn_no_underlying_type_specified_for_enum_bitfield) 15020 << BitfieldEnumDecl; 15021 } 15022 } 15023 15024 // Ignore value- or type-dependent expressions. 15025 if (Bitfield->getBitWidth()->isValueDependent() || 15026 Bitfield->getBitWidth()->isTypeDependent() || 15027 Init->isValueDependent() || 15028 Init->isTypeDependent()) 15029 return false; 15030 15031 Expr *OriginalInit = Init->IgnoreParenImpCasts(); 15032 unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context); 15033 15034 Expr::EvalResult Result; 15035 if (!OriginalInit->EvaluateAsInt(Result, S.Context, 15036 Expr::SE_AllowSideEffects)) { 15037 // The RHS is not constant. If the RHS has an enum type, make sure the 15038 // bitfield is wide enough to hold all the values of the enum without 15039 // truncation. 15040 if (const auto *EnumTy = OriginalInit->getType()->getAs<EnumType>()) { 15041 EnumDecl *ED = EnumTy->getDecl(); 15042 bool SignedBitfield = BitfieldType->isSignedIntegerType(); 15043 15044 // Enum types are implicitly signed on Windows, so check if there are any 15045 // negative enumerators to see if the enum was intended to be signed or 15046 // not. 15047 bool SignedEnum = ED->getNumNegativeBits() > 0; 15048 15049 // Check for surprising sign changes when assigning enum values to a 15050 // bitfield of different signedness. If the bitfield is signed and we 15051 // have exactly the right number of bits to store this unsigned enum, 15052 // suggest changing the enum to an unsigned type. This typically happens 15053 // on Windows where unfixed enums always use an underlying type of 'int'. 15054 unsigned DiagID = 0; 15055 if (SignedEnum && !SignedBitfield) { 15056 DiagID = diag::warn_unsigned_bitfield_assigned_signed_enum; 15057 } else if (SignedBitfield && !SignedEnum && 15058 ED->getNumPositiveBits() == FieldWidth) { 15059 DiagID = diag::warn_signed_bitfield_enum_conversion; 15060 } 15061 15062 if (DiagID) { 15063 S.Diag(InitLoc, DiagID) << Bitfield << ED; 15064 TypeSourceInfo *TSI = Bitfield->getTypeSourceInfo(); 15065 SourceRange TypeRange = 15066 TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange(); 15067 S.Diag(Bitfield->getTypeSpecStartLoc(), diag::note_change_bitfield_sign) 15068 << SignedEnum << TypeRange; 15069 } 15070 15071 // Compute the required bitwidth. If the enum has negative values, we need 15072 // one more bit than the normal number of positive bits to represent the 15073 // sign bit. 15074 unsigned BitsNeeded = SignedEnum ? std::max(ED->getNumPositiveBits() + 1, 15075 ED->getNumNegativeBits()) 15076 : ED->getNumPositiveBits(); 15077 15078 // Check the bitwidth. 15079 if (BitsNeeded > FieldWidth) { 15080 Expr *WidthExpr = Bitfield->getBitWidth(); 15081 S.Diag(InitLoc, diag::warn_bitfield_too_small_for_enum) 15082 << Bitfield << ED; 15083 S.Diag(WidthExpr->getExprLoc(), diag::note_widen_bitfield) 15084 << BitsNeeded << ED << WidthExpr->getSourceRange(); 15085 } 15086 } 15087 15088 return false; 15089 } 15090 15091 llvm::APSInt Value = Result.Val.getInt(); 15092 15093 unsigned OriginalWidth = Value.getBitWidth(); 15094 15095 // In C, the macro 'true' from stdbool.h will evaluate to '1'; To reduce 15096 // false positives where the user is demonstrating they intend to use the 15097 // bit-field as a Boolean, check to see if the value is 1 and we're assigning 15098 // to a one-bit bit-field to see if the value came from a macro named 'true'. 15099 bool OneAssignedToOneBitBitfield = FieldWidth == 1 && Value == 1; 15100 if (OneAssignedToOneBitBitfield && !S.LangOpts.CPlusPlus) { 15101 SourceLocation MaybeMacroLoc = OriginalInit->getBeginLoc(); 15102 if (S.SourceMgr.isInSystemMacro(MaybeMacroLoc) && 15103 S.findMacroSpelling(MaybeMacroLoc, "true")) 15104 return false; 15105 } 15106 15107 if (!Value.isSigned() || Value.isNegative()) 15108 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(OriginalInit)) 15109 if (UO->getOpcode() == UO_Minus || UO->getOpcode() == UO_Not) 15110 OriginalWidth = Value.getSignificantBits(); 15111 15112 if (OriginalWidth <= FieldWidth) 15113 return false; 15114 15115 // Compute the value which the bitfield will contain. 15116 llvm::APSInt TruncatedValue = Value.trunc(FieldWidth); 15117 TruncatedValue.setIsSigned(BitfieldType->isSignedIntegerType()); 15118 15119 // Check whether the stored value is equal to the original value. 15120 TruncatedValue = TruncatedValue.extend(OriginalWidth); 15121 if (llvm::APSInt::isSameValue(Value, TruncatedValue)) 15122 return false; 15123 15124 std::string PrettyValue = toString(Value, 10); 15125 std::string PrettyTrunc = toString(TruncatedValue, 10); 15126 15127 S.Diag(InitLoc, OneAssignedToOneBitBitfield 15128 ? diag::warn_impcast_single_bit_bitield_precision_constant 15129 : diag::warn_impcast_bitfield_precision_constant) 15130 << PrettyValue << PrettyTrunc << OriginalInit->getType() 15131 << Init->getSourceRange(); 15132 15133 return true; 15134 } 15135 15136 /// Analyze the given simple or compound assignment for warning-worthy 15137 /// operations. 15138 static void AnalyzeAssignment(Sema &S, BinaryOperator *E) { 15139 // Just recurse on the LHS. 15140 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 15141 15142 // We want to recurse on the RHS as normal unless we're assigning to 15143 // a bitfield. 15144 if (FieldDecl *Bitfield = E->getLHS()->getSourceBitField()) { 15145 if (AnalyzeBitFieldAssignment(S, Bitfield, E->getRHS(), 15146 E->getOperatorLoc())) { 15147 // Recurse, ignoring any implicit conversions on the RHS. 15148 return AnalyzeImplicitConversions(S, E->getRHS()->IgnoreParenImpCasts(), 15149 E->getOperatorLoc()); 15150 } 15151 } 15152 15153 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 15154 15155 // Diagnose implicitly sequentially-consistent atomic assignment. 15156 if (E->getLHS()->getType()->isAtomicType()) 15157 S.Diag(E->getRHS()->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 15158 } 15159 15160 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 15161 static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T, 15162 SourceLocation CContext, unsigned diag, 15163 bool pruneControlFlow = false) { 15164 if (pruneControlFlow) { 15165 S.DiagRuntimeBehavior(E->getExprLoc(), E, 15166 S.PDiag(diag) 15167 << SourceType << T << E->getSourceRange() 15168 << SourceRange(CContext)); 15169 return; 15170 } 15171 S.Diag(E->getExprLoc(), diag) 15172 << SourceType << T << E->getSourceRange() << SourceRange(CContext); 15173 } 15174 15175 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 15176 static void DiagnoseImpCast(Sema &S, Expr *E, QualType T, 15177 SourceLocation CContext, 15178 unsigned diag, bool pruneControlFlow = false) { 15179 DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow); 15180 } 15181 15182 static bool isObjCSignedCharBool(Sema &S, QualType Ty) { 15183 return Ty->isSpecificBuiltinType(BuiltinType::SChar) && 15184 S.getLangOpts().ObjC && S.NSAPIObj->isObjCBOOLType(Ty); 15185 } 15186 15187 static void adornObjCBoolConversionDiagWithTernaryFixit( 15188 Sema &S, Expr *SourceExpr, const Sema::SemaDiagnosticBuilder &Builder) { 15189 Expr *Ignored = SourceExpr->IgnoreImplicit(); 15190 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(Ignored)) 15191 Ignored = OVE->getSourceExpr(); 15192 bool NeedsParens = isa<AbstractConditionalOperator>(Ignored) || 15193 isa<BinaryOperator>(Ignored) || 15194 isa<CXXOperatorCallExpr>(Ignored); 15195 SourceLocation EndLoc = S.getLocForEndOfToken(SourceExpr->getEndLoc()); 15196 if (NeedsParens) 15197 Builder << FixItHint::CreateInsertion(SourceExpr->getBeginLoc(), "(") 15198 << FixItHint::CreateInsertion(EndLoc, ")"); 15199 Builder << FixItHint::CreateInsertion(EndLoc, " ? YES : NO"); 15200 } 15201 15202 /// Diagnose an implicit cast from a floating point value to an integer value. 15203 static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T, 15204 SourceLocation CContext) { 15205 const bool IsBool = T->isSpecificBuiltinType(BuiltinType::Bool); 15206 const bool PruneWarnings = S.inTemplateInstantiation(); 15207 15208 Expr *InnerE = E->IgnoreParenImpCasts(); 15209 // We also want to warn on, e.g., "int i = -1.234" 15210 if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(InnerE)) 15211 if (UOp->getOpcode() == UO_Minus || UOp->getOpcode() == UO_Plus) 15212 InnerE = UOp->getSubExpr()->IgnoreParenImpCasts(); 15213 15214 const bool IsLiteral = 15215 isa<FloatingLiteral>(E) || isa<FloatingLiteral>(InnerE); 15216 15217 llvm::APFloat Value(0.0); 15218 bool IsConstant = 15219 E->EvaluateAsFloat(Value, S.Context, Expr::SE_AllowSideEffects); 15220 if (!IsConstant) { 15221 if (isObjCSignedCharBool(S, T)) { 15222 return adornObjCBoolConversionDiagWithTernaryFixit( 15223 S, E, 15224 S.Diag(CContext, diag::warn_impcast_float_to_objc_signed_char_bool) 15225 << E->getType()); 15226 } 15227 15228 return DiagnoseImpCast(S, E, T, CContext, 15229 diag::warn_impcast_float_integer, PruneWarnings); 15230 } 15231 15232 bool isExact = false; 15233 15234 llvm::APSInt IntegerValue(S.Context.getIntWidth(T), 15235 T->hasUnsignedIntegerRepresentation()); 15236 llvm::APFloat::opStatus Result = Value.convertToInteger( 15237 IntegerValue, llvm::APFloat::rmTowardZero, &isExact); 15238 15239 // FIXME: Force the precision of the source value down so we don't print 15240 // digits which are usually useless (we don't really care here if we 15241 // truncate a digit by accident in edge cases). Ideally, APFloat::toString 15242 // would automatically print the shortest representation, but it's a bit 15243 // tricky to implement. 15244 SmallString<16> PrettySourceValue; 15245 unsigned precision = llvm::APFloat::semanticsPrecision(Value.getSemantics()); 15246 precision = (precision * 59 + 195) / 196; 15247 Value.toString(PrettySourceValue, precision); 15248 15249 if (isObjCSignedCharBool(S, T) && IntegerValue != 0 && IntegerValue != 1) { 15250 return adornObjCBoolConversionDiagWithTernaryFixit( 15251 S, E, 15252 S.Diag(CContext, diag::warn_impcast_constant_value_to_objc_bool) 15253 << PrettySourceValue); 15254 } 15255 15256 if (Result == llvm::APFloat::opOK && isExact) { 15257 if (IsLiteral) return; 15258 return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer, 15259 PruneWarnings); 15260 } 15261 15262 // Conversion of a floating-point value to a non-bool integer where the 15263 // integral part cannot be represented by the integer type is undefined. 15264 if (!IsBool && Result == llvm::APFloat::opInvalidOp) 15265 return DiagnoseImpCast( 15266 S, E, T, CContext, 15267 IsLiteral ? diag::warn_impcast_literal_float_to_integer_out_of_range 15268 : diag::warn_impcast_float_to_integer_out_of_range, 15269 PruneWarnings); 15270 15271 unsigned DiagID = 0; 15272 if (IsLiteral) { 15273 // Warn on floating point literal to integer. 15274 DiagID = diag::warn_impcast_literal_float_to_integer; 15275 } else if (IntegerValue == 0) { 15276 if (Value.isZero()) { // Skip -0.0 to 0 conversion. 15277 return DiagnoseImpCast(S, E, T, CContext, 15278 diag::warn_impcast_float_integer, PruneWarnings); 15279 } 15280 // Warn on non-zero to zero conversion. 15281 DiagID = diag::warn_impcast_float_to_integer_zero; 15282 } else { 15283 if (IntegerValue.isUnsigned()) { 15284 if (!IntegerValue.isMaxValue()) { 15285 return DiagnoseImpCast(S, E, T, CContext, 15286 diag::warn_impcast_float_integer, PruneWarnings); 15287 } 15288 } else { // IntegerValue.isSigned() 15289 if (!IntegerValue.isMaxSignedValue() && 15290 !IntegerValue.isMinSignedValue()) { 15291 return DiagnoseImpCast(S, E, T, CContext, 15292 diag::warn_impcast_float_integer, PruneWarnings); 15293 } 15294 } 15295 // Warn on evaluatable floating point expression to integer conversion. 15296 DiagID = diag::warn_impcast_float_to_integer; 15297 } 15298 15299 SmallString<16> PrettyTargetValue; 15300 if (IsBool) 15301 PrettyTargetValue = Value.isZero() ? "false" : "true"; 15302 else 15303 IntegerValue.toString(PrettyTargetValue); 15304 15305 if (PruneWarnings) { 15306 S.DiagRuntimeBehavior(E->getExprLoc(), E, 15307 S.PDiag(DiagID) 15308 << E->getType() << T.getUnqualifiedType() 15309 << PrettySourceValue << PrettyTargetValue 15310 << E->getSourceRange() << SourceRange(CContext)); 15311 } else { 15312 S.Diag(E->getExprLoc(), DiagID) 15313 << E->getType() << T.getUnqualifiedType() << PrettySourceValue 15314 << PrettyTargetValue << E->getSourceRange() << SourceRange(CContext); 15315 } 15316 } 15317 15318 /// Analyze the given compound assignment for the possible losing of 15319 /// floating-point precision. 15320 static void AnalyzeCompoundAssignment(Sema &S, BinaryOperator *E) { 15321 assert(isa<CompoundAssignOperator>(E) && 15322 "Must be compound assignment operation"); 15323 // Recurse on the LHS and RHS in here 15324 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 15325 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 15326 15327 if (E->getLHS()->getType()->isAtomicType()) 15328 S.Diag(E->getOperatorLoc(), diag::warn_atomic_implicit_seq_cst); 15329 15330 // Now check the outermost expression 15331 const auto *ResultBT = E->getLHS()->getType()->getAs<BuiltinType>(); 15332 const auto *RBT = cast<CompoundAssignOperator>(E) 15333 ->getComputationResultType() 15334 ->getAs<BuiltinType>(); 15335 15336 // The below checks assume source is floating point. 15337 if (!ResultBT || !RBT || !RBT->isFloatingPoint()) return; 15338 15339 // If source is floating point but target is an integer. 15340 if (ResultBT->isInteger()) 15341 return DiagnoseImpCast(S, E, E->getRHS()->getType(), E->getLHS()->getType(), 15342 E->getExprLoc(), diag::warn_impcast_float_integer); 15343 15344 if (!ResultBT->isFloatingPoint()) 15345 return; 15346 15347 // If both source and target are floating points, warn about losing precision. 15348 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 15349 QualType(ResultBT, 0), QualType(RBT, 0)); 15350 if (Order < 0 && !S.SourceMgr.isInSystemMacro(E->getOperatorLoc())) 15351 // warn about dropping FP rank. 15352 DiagnoseImpCast(S, E->getRHS(), E->getLHS()->getType(), E->getOperatorLoc(), 15353 diag::warn_impcast_float_result_precision); 15354 } 15355 15356 static std::string PrettyPrintInRange(const llvm::APSInt &Value, 15357 IntRange Range) { 15358 if (!Range.Width) return "0"; 15359 15360 llvm::APSInt ValueInRange = Value; 15361 ValueInRange.setIsSigned(!Range.NonNegative); 15362 ValueInRange = ValueInRange.trunc(Range.Width); 15363 return toString(ValueInRange, 10); 15364 } 15365 15366 static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) { 15367 if (!isa<ImplicitCastExpr>(Ex)) 15368 return false; 15369 15370 Expr *InnerE = Ex->IgnoreParenImpCasts(); 15371 const Type *Target = S.Context.getCanonicalType(Ex->getType()).getTypePtr(); 15372 const Type *Source = 15373 S.Context.getCanonicalType(InnerE->getType()).getTypePtr(); 15374 if (Target->isDependentType()) 15375 return false; 15376 15377 const BuiltinType *FloatCandidateBT = 15378 dyn_cast<BuiltinType>(ToBool ? Source : Target); 15379 const Type *BoolCandidateType = ToBool ? Target : Source; 15380 15381 return (BoolCandidateType->isSpecificBuiltinType(BuiltinType::Bool) && 15382 FloatCandidateBT && (FloatCandidateBT->isFloatingPoint())); 15383 } 15384 15385 static void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall, 15386 SourceLocation CC) { 15387 unsigned NumArgs = TheCall->getNumArgs(); 15388 for (unsigned i = 0; i < NumArgs; ++i) { 15389 Expr *CurrA = TheCall->getArg(i); 15390 if (!IsImplicitBoolFloatConversion(S, CurrA, true)) 15391 continue; 15392 15393 bool IsSwapped = ((i > 0) && 15394 IsImplicitBoolFloatConversion(S, TheCall->getArg(i - 1), false)); 15395 IsSwapped |= ((i < (NumArgs - 1)) && 15396 IsImplicitBoolFloatConversion(S, TheCall->getArg(i + 1), false)); 15397 if (IsSwapped) { 15398 // Warn on this floating-point to bool conversion. 15399 DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(), 15400 CurrA->getType(), CC, 15401 diag::warn_impcast_floating_point_to_bool); 15402 } 15403 } 15404 } 15405 15406 static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T, 15407 SourceLocation CC) { 15408 if (S.Diags.isIgnored(diag::warn_impcast_null_pointer_to_integer, 15409 E->getExprLoc())) 15410 return; 15411 15412 // Don't warn on functions which have return type nullptr_t. 15413 if (isa<CallExpr>(E)) 15414 return; 15415 15416 // Check for NULL (GNUNull) or nullptr (CXX11_nullptr). 15417 const Expr *NewE = E->IgnoreParenImpCasts(); 15418 bool IsGNUNullExpr = isa<GNUNullExpr>(NewE); 15419 bool HasNullPtrType = NewE->getType()->isNullPtrType(); 15420 if (!IsGNUNullExpr && !HasNullPtrType) 15421 return; 15422 15423 // Return if target type is a safe conversion. 15424 if (T->isAnyPointerType() || T->isBlockPointerType() || 15425 T->isMemberPointerType() || !T->isScalarType() || T->isNullPtrType()) 15426 return; 15427 15428 SourceLocation Loc = E->getSourceRange().getBegin(); 15429 15430 // Venture through the macro stacks to get to the source of macro arguments. 15431 // The new location is a better location than the complete location that was 15432 // passed in. 15433 Loc = S.SourceMgr.getTopMacroCallerLoc(Loc); 15434 CC = S.SourceMgr.getTopMacroCallerLoc(CC); 15435 15436 // __null is usually wrapped in a macro. Go up a macro if that is the case. 15437 if (IsGNUNullExpr && Loc.isMacroID()) { 15438 StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics( 15439 Loc, S.SourceMgr, S.getLangOpts()); 15440 if (MacroName == "NULL") 15441 Loc = S.SourceMgr.getImmediateExpansionRange(Loc).getBegin(); 15442 } 15443 15444 // Only warn if the null and context location are in the same macro expansion. 15445 if (S.SourceMgr.getFileID(Loc) != S.SourceMgr.getFileID(CC)) 15446 return; 15447 15448 S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer) 15449 << HasNullPtrType << T << SourceRange(CC) 15450 << FixItHint::CreateReplacement(Loc, 15451 S.getFixItZeroLiteralForType(T, Loc)); 15452 } 15453 15454 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 15455 ObjCArrayLiteral *ArrayLiteral); 15456 15457 static void 15458 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 15459 ObjCDictionaryLiteral *DictionaryLiteral); 15460 15461 /// Check a single element within a collection literal against the 15462 /// target element type. 15463 static void checkObjCCollectionLiteralElement(Sema &S, 15464 QualType TargetElementType, 15465 Expr *Element, 15466 unsigned ElementKind) { 15467 // Skip a bitcast to 'id' or qualified 'id'. 15468 if (auto ICE = dyn_cast<ImplicitCastExpr>(Element)) { 15469 if (ICE->getCastKind() == CK_BitCast && 15470 ICE->getSubExpr()->getType()->getAs<ObjCObjectPointerType>()) 15471 Element = ICE->getSubExpr(); 15472 } 15473 15474 QualType ElementType = Element->getType(); 15475 ExprResult ElementResult(Element); 15476 if (ElementType->getAs<ObjCObjectPointerType>() && 15477 S.CheckSingleAssignmentConstraints(TargetElementType, 15478 ElementResult, 15479 false, false) 15480 != Sema::Compatible) { 15481 S.Diag(Element->getBeginLoc(), diag::warn_objc_collection_literal_element) 15482 << ElementType << ElementKind << TargetElementType 15483 << Element->getSourceRange(); 15484 } 15485 15486 if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Element)) 15487 checkObjCArrayLiteral(S, TargetElementType, ArrayLiteral); 15488 else if (auto DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Element)) 15489 checkObjCDictionaryLiteral(S, TargetElementType, DictionaryLiteral); 15490 } 15491 15492 /// Check an Objective-C array literal being converted to the given 15493 /// target type. 15494 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 15495 ObjCArrayLiteral *ArrayLiteral) { 15496 if (!S.NSArrayDecl) 15497 return; 15498 15499 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 15500 if (!TargetObjCPtr) 15501 return; 15502 15503 if (TargetObjCPtr->isUnspecialized() || 15504 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 15505 != S.NSArrayDecl->getCanonicalDecl()) 15506 return; 15507 15508 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 15509 if (TypeArgs.size() != 1) 15510 return; 15511 15512 QualType TargetElementType = TypeArgs[0]; 15513 for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) { 15514 checkObjCCollectionLiteralElement(S, TargetElementType, 15515 ArrayLiteral->getElement(I), 15516 0); 15517 } 15518 } 15519 15520 /// Check an Objective-C dictionary literal being converted to the given 15521 /// target type. 15522 static void 15523 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 15524 ObjCDictionaryLiteral *DictionaryLiteral) { 15525 if (!S.NSDictionaryDecl) 15526 return; 15527 15528 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 15529 if (!TargetObjCPtr) 15530 return; 15531 15532 if (TargetObjCPtr->isUnspecialized() || 15533 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 15534 != S.NSDictionaryDecl->getCanonicalDecl()) 15535 return; 15536 15537 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 15538 if (TypeArgs.size() != 2) 15539 return; 15540 15541 QualType TargetKeyType = TypeArgs[0]; 15542 QualType TargetObjectType = TypeArgs[1]; 15543 for (unsigned I = 0, N = DictionaryLiteral->getNumElements(); I != N; ++I) { 15544 auto Element = DictionaryLiteral->getKeyValueElement(I); 15545 checkObjCCollectionLiteralElement(S, TargetKeyType, Element.Key, 1); 15546 checkObjCCollectionLiteralElement(S, TargetObjectType, Element.Value, 2); 15547 } 15548 } 15549 15550 // Helper function to filter out cases for constant width constant conversion. 15551 // Don't warn on char array initialization or for non-decimal values. 15552 static bool isSameWidthConstantConversion(Sema &S, Expr *E, QualType T, 15553 SourceLocation CC) { 15554 // If initializing from a constant, and the constant starts with '0', 15555 // then it is a binary, octal, or hexadecimal. Allow these constants 15556 // to fill all the bits, even if there is a sign change. 15557 if (auto *IntLit = dyn_cast<IntegerLiteral>(E->IgnoreParenImpCasts())) { 15558 const char FirstLiteralCharacter = 15559 S.getSourceManager().getCharacterData(IntLit->getBeginLoc())[0]; 15560 if (FirstLiteralCharacter == '0') 15561 return false; 15562 } 15563 15564 // If the CC location points to a '{', and the type is char, then assume 15565 // assume it is an array initialization. 15566 if (CC.isValid() && T->isCharType()) { 15567 const char FirstContextCharacter = 15568 S.getSourceManager().getCharacterData(CC)[0]; 15569 if (FirstContextCharacter == '{') 15570 return false; 15571 } 15572 15573 return true; 15574 } 15575 15576 static const IntegerLiteral *getIntegerLiteral(Expr *E) { 15577 const auto *IL = dyn_cast<IntegerLiteral>(E); 15578 if (!IL) { 15579 if (auto *UO = dyn_cast<UnaryOperator>(E)) { 15580 if (UO->getOpcode() == UO_Minus) 15581 return dyn_cast<IntegerLiteral>(UO->getSubExpr()); 15582 } 15583 } 15584 15585 return IL; 15586 } 15587 15588 static void DiagnoseIntInBoolContext(Sema &S, Expr *E) { 15589 E = E->IgnoreParenImpCasts(); 15590 SourceLocation ExprLoc = E->getExprLoc(); 15591 15592 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 15593 BinaryOperator::Opcode Opc = BO->getOpcode(); 15594 Expr::EvalResult Result; 15595 // Do not diagnose unsigned shifts. 15596 if (Opc == BO_Shl) { 15597 const auto *LHS = getIntegerLiteral(BO->getLHS()); 15598 const auto *RHS = getIntegerLiteral(BO->getRHS()); 15599 if (LHS && LHS->getValue() == 0) 15600 S.Diag(ExprLoc, diag::warn_left_shift_always) << 0; 15601 else if (!E->isValueDependent() && LHS && RHS && 15602 RHS->getValue().isNonNegative() && 15603 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) 15604 S.Diag(ExprLoc, diag::warn_left_shift_always) 15605 << (Result.Val.getInt() != 0); 15606 else if (E->getType()->isSignedIntegerType()) 15607 S.Diag(ExprLoc, diag::warn_left_shift_in_bool_context) << E; 15608 } 15609 } 15610 15611 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 15612 const auto *LHS = getIntegerLiteral(CO->getTrueExpr()); 15613 const auto *RHS = getIntegerLiteral(CO->getFalseExpr()); 15614 if (!LHS || !RHS) 15615 return; 15616 if ((LHS->getValue() == 0 || LHS->getValue() == 1) && 15617 (RHS->getValue() == 0 || RHS->getValue() == 1)) 15618 // Do not diagnose common idioms. 15619 return; 15620 if (LHS->getValue() != 0 && RHS->getValue() != 0) 15621 S.Diag(ExprLoc, diag::warn_integer_constants_in_conditional_always_true); 15622 } 15623 } 15624 15625 static void CheckImplicitConversion(Sema &S, Expr *E, QualType T, 15626 SourceLocation CC, 15627 bool *ICContext = nullptr, 15628 bool IsListInit = false) { 15629 if (E->isTypeDependent() || E->isValueDependent()) return; 15630 15631 const Type *Source = S.Context.getCanonicalType(E->getType()).getTypePtr(); 15632 const Type *Target = S.Context.getCanonicalType(T).getTypePtr(); 15633 if (Source == Target) return; 15634 if (Target->isDependentType()) return; 15635 15636 // If the conversion context location is invalid don't complain. We also 15637 // don't want to emit a warning if the issue occurs from the expansion of 15638 // a system macro. The problem is that 'getSpellingLoc()' is slow, so we 15639 // delay this check as long as possible. Once we detect we are in that 15640 // scenario, we just return. 15641 if (CC.isInvalid()) 15642 return; 15643 15644 if (Source->isAtomicType()) 15645 S.Diag(E->getExprLoc(), diag::warn_atomic_implicit_seq_cst); 15646 15647 // Diagnose implicit casts to bool. 15648 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) { 15649 if (isa<StringLiteral>(E)) 15650 // Warn on string literal to bool. Checks for string literals in logical 15651 // and expressions, for instance, assert(0 && "error here"), are 15652 // prevented by a check in AnalyzeImplicitConversions(). 15653 return DiagnoseImpCast(S, E, T, CC, 15654 diag::warn_impcast_string_literal_to_bool); 15655 if (isa<ObjCStringLiteral>(E) || isa<ObjCArrayLiteral>(E) || 15656 isa<ObjCDictionaryLiteral>(E) || isa<ObjCBoxedExpr>(E)) { 15657 // This covers the literal expressions that evaluate to Objective-C 15658 // objects. 15659 return DiagnoseImpCast(S, E, T, CC, 15660 diag::warn_impcast_objective_c_literal_to_bool); 15661 } 15662 if (Source->isPointerType() || Source->canDecayToPointerType()) { 15663 // Warn on pointer to bool conversion that is always true. 15664 S.DiagnoseAlwaysNonNullPointer(E, Expr::NPCK_NotNull, /*IsEqual*/ false, 15665 SourceRange(CC)); 15666 } 15667 } 15668 15669 // If the we're converting a constant to an ObjC BOOL on a platform where BOOL 15670 // is a typedef for signed char (macOS), then that constant value has to be 1 15671 // or 0. 15672 if (isObjCSignedCharBool(S, T) && Source->isIntegralType(S.Context)) { 15673 Expr::EvalResult Result; 15674 if (E->EvaluateAsInt(Result, S.getASTContext(), 15675 Expr::SE_AllowSideEffects)) { 15676 if (Result.Val.getInt() != 1 && Result.Val.getInt() != 0) { 15677 adornObjCBoolConversionDiagWithTernaryFixit( 15678 S, E, 15679 S.Diag(CC, diag::warn_impcast_constant_value_to_objc_bool) 15680 << toString(Result.Val.getInt(), 10)); 15681 } 15682 return; 15683 } 15684 } 15685 15686 // Check implicit casts from Objective-C collection literals to specialized 15687 // collection types, e.g., NSArray<NSString *> *. 15688 if (auto *ArrayLiteral = dyn_cast<ObjCArrayLiteral>(E)) 15689 checkObjCArrayLiteral(S, QualType(Target, 0), ArrayLiteral); 15690 else if (auto *DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(E)) 15691 checkObjCDictionaryLiteral(S, QualType(Target, 0), DictionaryLiteral); 15692 15693 // Strip vector types. 15694 if (isa<VectorType>(Source)) { 15695 if (Target->isSveVLSBuiltinType() && 15696 (S.Context.areCompatibleSveTypes(QualType(Target, 0), 15697 QualType(Source, 0)) || 15698 S.Context.areLaxCompatibleSveTypes(QualType(Target, 0), 15699 QualType(Source, 0)))) 15700 return; 15701 15702 if (Target->isRVVVLSBuiltinType() && 15703 (S.Context.areCompatibleRVVTypes(QualType(Target, 0), 15704 QualType(Source, 0)) || 15705 S.Context.areLaxCompatibleRVVTypes(QualType(Target, 0), 15706 QualType(Source, 0)))) 15707 return; 15708 15709 if (!isa<VectorType>(Target)) { 15710 if (S.SourceMgr.isInSystemMacro(CC)) 15711 return; 15712 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar); 15713 } 15714 15715 // If the vector cast is cast between two vectors of the same size, it is 15716 // a bitcast, not a conversion. 15717 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target)) 15718 return; 15719 15720 Source = cast<VectorType>(Source)->getElementType().getTypePtr(); 15721 Target = cast<VectorType>(Target)->getElementType().getTypePtr(); 15722 } 15723 if (auto VecTy = dyn_cast<VectorType>(Target)) 15724 Target = VecTy->getElementType().getTypePtr(); 15725 15726 // Strip complex types. 15727 if (isa<ComplexType>(Source)) { 15728 if (!isa<ComplexType>(Target)) { 15729 if (S.SourceMgr.isInSystemMacro(CC) || Target->isBooleanType()) 15730 return; 15731 15732 return DiagnoseImpCast(S, E, T, CC, 15733 S.getLangOpts().CPlusPlus 15734 ? diag::err_impcast_complex_scalar 15735 : diag::warn_impcast_complex_scalar); 15736 } 15737 15738 Source = cast<ComplexType>(Source)->getElementType().getTypePtr(); 15739 Target = cast<ComplexType>(Target)->getElementType().getTypePtr(); 15740 } 15741 15742 const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source); 15743 const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target); 15744 15745 // Strip SVE vector types 15746 if (SourceBT && SourceBT->isSveVLSBuiltinType()) { 15747 // Need the original target type for vector type checks 15748 const Type *OriginalTarget = S.Context.getCanonicalType(T).getTypePtr(); 15749 // Handle conversion from scalable to fixed when msve-vector-bits is 15750 // specified 15751 if (S.Context.areCompatibleSveTypes(QualType(OriginalTarget, 0), 15752 QualType(Source, 0)) || 15753 S.Context.areLaxCompatibleSveTypes(QualType(OriginalTarget, 0), 15754 QualType(Source, 0))) 15755 return; 15756 15757 // If the vector cast is cast between two vectors of the same size, it is 15758 // a bitcast, not a conversion. 15759 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target)) 15760 return; 15761 15762 Source = SourceBT->getSveEltType(S.Context).getTypePtr(); 15763 } 15764 15765 if (TargetBT && TargetBT->isSveVLSBuiltinType()) 15766 Target = TargetBT->getSveEltType(S.Context).getTypePtr(); 15767 15768 // If the source is floating point... 15769 if (SourceBT && SourceBT->isFloatingPoint()) { 15770 // ...and the target is floating point... 15771 if (TargetBT && TargetBT->isFloatingPoint()) { 15772 // ...then warn if we're dropping FP rank. 15773 15774 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 15775 QualType(SourceBT, 0), QualType(TargetBT, 0)); 15776 if (Order > 0) { 15777 // Don't warn about float constants that are precisely 15778 // representable in the target type. 15779 Expr::EvalResult result; 15780 if (E->EvaluateAsRValue(result, S.Context)) { 15781 // Value might be a float, a float vector, or a float complex. 15782 if (IsSameFloatAfterCast(result.Val, 15783 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)), 15784 S.Context.getFloatTypeSemantics(QualType(SourceBT, 0)))) 15785 return; 15786 } 15787 15788 if (S.SourceMgr.isInSystemMacro(CC)) 15789 return; 15790 15791 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision); 15792 } 15793 // ... or possibly if we're increasing rank, too 15794 else if (Order < 0) { 15795 if (S.SourceMgr.isInSystemMacro(CC)) 15796 return; 15797 15798 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_double_promotion); 15799 } 15800 return; 15801 } 15802 15803 // If the target is integral, always warn. 15804 if (TargetBT && TargetBT->isInteger()) { 15805 if (S.SourceMgr.isInSystemMacro(CC)) 15806 return; 15807 15808 DiagnoseFloatingImpCast(S, E, T, CC); 15809 } 15810 15811 // Detect the case where a call result is converted from floating-point to 15812 // to bool, and the final argument to the call is converted from bool, to 15813 // discover this typo: 15814 // 15815 // bool b = fabs(x < 1.0); // should be "bool b = fabs(x) < 1.0;" 15816 // 15817 // FIXME: This is an incredibly special case; is there some more general 15818 // way to detect this class of misplaced-parentheses bug? 15819 if (Target->isBooleanType() && isa<CallExpr>(E)) { 15820 // Check last argument of function call to see if it is an 15821 // implicit cast from a type matching the type the result 15822 // is being cast to. 15823 CallExpr *CEx = cast<CallExpr>(E); 15824 if (unsigned NumArgs = CEx->getNumArgs()) { 15825 Expr *LastA = CEx->getArg(NumArgs - 1); 15826 Expr *InnerE = LastA->IgnoreParenImpCasts(); 15827 if (isa<ImplicitCastExpr>(LastA) && 15828 InnerE->getType()->isBooleanType()) { 15829 // Warn on this floating-point to bool conversion 15830 DiagnoseImpCast(S, E, T, CC, 15831 diag::warn_impcast_floating_point_to_bool); 15832 } 15833 } 15834 } 15835 return; 15836 } 15837 15838 // Valid casts involving fixed point types should be accounted for here. 15839 if (Source->isFixedPointType()) { 15840 if (Target->isUnsaturatedFixedPointType()) { 15841 Expr::EvalResult Result; 15842 if (E->EvaluateAsFixedPoint(Result, S.Context, Expr::SE_AllowSideEffects, 15843 S.isConstantEvaluatedContext())) { 15844 llvm::APFixedPoint Value = Result.Val.getFixedPoint(); 15845 llvm::APFixedPoint MaxVal = S.Context.getFixedPointMax(T); 15846 llvm::APFixedPoint MinVal = S.Context.getFixedPointMin(T); 15847 if (Value > MaxVal || Value < MinVal) { 15848 S.DiagRuntimeBehavior(E->getExprLoc(), E, 15849 S.PDiag(diag::warn_impcast_fixed_point_range) 15850 << Value.toString() << T 15851 << E->getSourceRange() 15852 << clang::SourceRange(CC)); 15853 return; 15854 } 15855 } 15856 } else if (Target->isIntegerType()) { 15857 Expr::EvalResult Result; 15858 if (!S.isConstantEvaluatedContext() && 15859 E->EvaluateAsFixedPoint(Result, S.Context, 15860 Expr::SE_AllowSideEffects)) { 15861 llvm::APFixedPoint FXResult = Result.Val.getFixedPoint(); 15862 15863 bool Overflowed; 15864 llvm::APSInt IntResult = FXResult.convertToInt( 15865 S.Context.getIntWidth(T), 15866 Target->isSignedIntegerOrEnumerationType(), &Overflowed); 15867 15868 if (Overflowed) { 15869 S.DiagRuntimeBehavior(E->getExprLoc(), E, 15870 S.PDiag(diag::warn_impcast_fixed_point_range) 15871 << FXResult.toString() << T 15872 << E->getSourceRange() 15873 << clang::SourceRange(CC)); 15874 return; 15875 } 15876 } 15877 } 15878 } else if (Target->isUnsaturatedFixedPointType()) { 15879 if (Source->isIntegerType()) { 15880 Expr::EvalResult Result; 15881 if (!S.isConstantEvaluatedContext() && 15882 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) { 15883 llvm::APSInt Value = Result.Val.getInt(); 15884 15885 bool Overflowed; 15886 llvm::APFixedPoint IntResult = llvm::APFixedPoint::getFromIntValue( 15887 Value, S.Context.getFixedPointSemantics(T), &Overflowed); 15888 15889 if (Overflowed) { 15890 S.DiagRuntimeBehavior(E->getExprLoc(), E, 15891 S.PDiag(diag::warn_impcast_fixed_point_range) 15892 << toString(Value, /*Radix=*/10) << T 15893 << E->getSourceRange() 15894 << clang::SourceRange(CC)); 15895 return; 15896 } 15897 } 15898 } 15899 } 15900 15901 // If we are casting an integer type to a floating point type without 15902 // initialization-list syntax, we might lose accuracy if the floating 15903 // point type has a narrower significand than the integer type. 15904 if (SourceBT && TargetBT && SourceBT->isIntegerType() && 15905 TargetBT->isFloatingType() && !IsListInit) { 15906 // Determine the number of precision bits in the source integer type. 15907 IntRange SourceRange = 15908 GetExprRange(S.Context, E, S.isConstantEvaluatedContext(), 15909 /*Approximate=*/true); 15910 unsigned int SourcePrecision = SourceRange.Width; 15911 15912 // Determine the number of precision bits in the 15913 // target floating point type. 15914 unsigned int TargetPrecision = llvm::APFloatBase::semanticsPrecision( 15915 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 15916 15917 if (SourcePrecision > 0 && TargetPrecision > 0 && 15918 SourcePrecision > TargetPrecision) { 15919 15920 if (std::optional<llvm::APSInt> SourceInt = 15921 E->getIntegerConstantExpr(S.Context)) { 15922 // If the source integer is a constant, convert it to the target 15923 // floating point type. Issue a warning if the value changes 15924 // during the whole conversion. 15925 llvm::APFloat TargetFloatValue( 15926 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 15927 llvm::APFloat::opStatus ConversionStatus = 15928 TargetFloatValue.convertFromAPInt( 15929 *SourceInt, SourceBT->isSignedInteger(), 15930 llvm::APFloat::rmNearestTiesToEven); 15931 15932 if (ConversionStatus != llvm::APFloat::opOK) { 15933 SmallString<32> PrettySourceValue; 15934 SourceInt->toString(PrettySourceValue, 10); 15935 SmallString<32> PrettyTargetValue; 15936 TargetFloatValue.toString(PrettyTargetValue, TargetPrecision); 15937 15938 S.DiagRuntimeBehavior( 15939 E->getExprLoc(), E, 15940 S.PDiag(diag::warn_impcast_integer_float_precision_constant) 15941 << PrettySourceValue << PrettyTargetValue << E->getType() << T 15942 << E->getSourceRange() << clang::SourceRange(CC)); 15943 } 15944 } else { 15945 // Otherwise, the implicit conversion may lose precision. 15946 DiagnoseImpCast(S, E, T, CC, 15947 diag::warn_impcast_integer_float_precision); 15948 } 15949 } 15950 } 15951 15952 DiagnoseNullConversion(S, E, T, CC); 15953 15954 S.DiscardMisalignedMemberAddress(Target, E); 15955 15956 if (Target->isBooleanType()) 15957 DiagnoseIntInBoolContext(S, E); 15958 15959 if (!Source->isIntegerType() || !Target->isIntegerType()) 15960 return; 15961 15962 // TODO: remove this early return once the false positives for constant->bool 15963 // in templates, macros, etc, are reduced or removed. 15964 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) 15965 return; 15966 15967 if (isObjCSignedCharBool(S, T) && !Source->isCharType() && 15968 !E->isKnownToHaveBooleanValue(/*Semantic=*/false)) { 15969 return adornObjCBoolConversionDiagWithTernaryFixit( 15970 S, E, 15971 S.Diag(CC, diag::warn_impcast_int_to_objc_signed_char_bool) 15972 << E->getType()); 15973 } 15974 15975 IntRange SourceTypeRange = 15976 IntRange::forTargetOfCanonicalType(S.Context, Source); 15977 IntRange LikelySourceRange = GetExprRange( 15978 S.Context, E, S.isConstantEvaluatedContext(), /*Approximate=*/true); 15979 IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target); 15980 15981 if (LikelySourceRange.Width > TargetRange.Width) { 15982 // If the source is a constant, use a default-on diagnostic. 15983 // TODO: this should happen for bitfield stores, too. 15984 Expr::EvalResult Result; 15985 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects, 15986 S.isConstantEvaluatedContext())) { 15987 llvm::APSInt Value(32); 15988 Value = Result.Val.getInt(); 15989 15990 if (S.SourceMgr.isInSystemMacro(CC)) 15991 return; 15992 15993 std::string PrettySourceValue = toString(Value, 10); 15994 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 15995 15996 S.DiagRuntimeBehavior( 15997 E->getExprLoc(), E, 15998 S.PDiag(diag::warn_impcast_integer_precision_constant) 15999 << PrettySourceValue << PrettyTargetValue << E->getType() << T 16000 << E->getSourceRange() << SourceRange(CC)); 16001 return; 16002 } 16003 16004 // People want to build with -Wshorten-64-to-32 and not -Wconversion. 16005 if (S.SourceMgr.isInSystemMacro(CC)) 16006 return; 16007 16008 if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64) 16009 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32, 16010 /* pruneControlFlow */ true); 16011 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision); 16012 } 16013 16014 if (TargetRange.Width > SourceTypeRange.Width) { 16015 if (auto *UO = dyn_cast<UnaryOperator>(E)) 16016 if (UO->getOpcode() == UO_Minus) 16017 if (Source->isUnsignedIntegerType()) { 16018 if (Target->isUnsignedIntegerType()) 16019 return DiagnoseImpCast(S, E, T, CC, 16020 diag::warn_impcast_high_order_zero_bits); 16021 if (Target->isSignedIntegerType()) 16022 return DiagnoseImpCast(S, E, T, CC, 16023 diag::warn_impcast_nonnegative_result); 16024 } 16025 } 16026 16027 if (TargetRange.Width == LikelySourceRange.Width && 16028 !TargetRange.NonNegative && LikelySourceRange.NonNegative && 16029 Source->isSignedIntegerType()) { 16030 // Warn when doing a signed to signed conversion, warn if the positive 16031 // source value is exactly the width of the target type, which will 16032 // cause a negative value to be stored. 16033 16034 Expr::EvalResult Result; 16035 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects) && 16036 !S.SourceMgr.isInSystemMacro(CC)) { 16037 llvm::APSInt Value = Result.Val.getInt(); 16038 if (isSameWidthConstantConversion(S, E, T, CC)) { 16039 std::string PrettySourceValue = toString(Value, 10); 16040 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 16041 16042 S.DiagRuntimeBehavior( 16043 E->getExprLoc(), E, 16044 S.PDiag(diag::warn_impcast_integer_precision_constant) 16045 << PrettySourceValue << PrettyTargetValue << E->getType() << T 16046 << E->getSourceRange() << SourceRange(CC)); 16047 return; 16048 } 16049 } 16050 16051 // Fall through for non-constants to give a sign conversion warning. 16052 } 16053 16054 if ((!isa<EnumType>(Target) || !isa<EnumType>(Source)) && 16055 ((TargetRange.NonNegative && !LikelySourceRange.NonNegative) || 16056 (!TargetRange.NonNegative && LikelySourceRange.NonNegative && 16057 LikelySourceRange.Width == TargetRange.Width))) { 16058 if (S.SourceMgr.isInSystemMacro(CC)) 16059 return; 16060 16061 if (SourceBT && SourceBT->isInteger() && TargetBT && 16062 TargetBT->isInteger() && 16063 Source->isSignedIntegerType() == Target->isSignedIntegerType()) { 16064 return; 16065 } 16066 16067 unsigned DiagID = diag::warn_impcast_integer_sign; 16068 16069 // Traditionally, gcc has warned about this under -Wsign-compare. 16070 // We also want to warn about it in -Wconversion. 16071 // So if -Wconversion is off, use a completely identical diagnostic 16072 // in the sign-compare group. 16073 // The conditional-checking code will 16074 if (ICContext) { 16075 DiagID = diag::warn_impcast_integer_sign_conditional; 16076 *ICContext = true; 16077 } 16078 16079 return DiagnoseImpCast(S, E, T, CC, DiagID); 16080 } 16081 16082 // Diagnose conversions between different enumeration types. 16083 // In C, we pretend that the type of an EnumConstantDecl is its enumeration 16084 // type, to give us better diagnostics. 16085 QualType SourceType = E->getType(); 16086 if (!S.getLangOpts().CPlusPlus) { 16087 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 16088 if (EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(DRE->getDecl())) { 16089 EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext()); 16090 SourceType = S.Context.getTypeDeclType(Enum); 16091 Source = S.Context.getCanonicalType(SourceType).getTypePtr(); 16092 } 16093 } 16094 16095 if (const EnumType *SourceEnum = Source->getAs<EnumType>()) 16096 if (const EnumType *TargetEnum = Target->getAs<EnumType>()) 16097 if (SourceEnum->getDecl()->hasNameForLinkage() && 16098 TargetEnum->getDecl()->hasNameForLinkage() && 16099 SourceEnum != TargetEnum) { 16100 if (S.SourceMgr.isInSystemMacro(CC)) 16101 return; 16102 16103 return DiagnoseImpCast(S, E, SourceType, T, CC, 16104 diag::warn_impcast_different_enum_types); 16105 } 16106 } 16107 16108 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 16109 SourceLocation CC, QualType T); 16110 16111 static void CheckConditionalOperand(Sema &S, Expr *E, QualType T, 16112 SourceLocation CC, bool &ICContext) { 16113 E = E->IgnoreParenImpCasts(); 16114 // Diagnose incomplete type for second or third operand in C. 16115 if (!S.getLangOpts().CPlusPlus && E->getType()->isRecordType()) 16116 S.RequireCompleteExprType(E, diag::err_incomplete_type); 16117 16118 if (auto *CO = dyn_cast<AbstractConditionalOperator>(E)) 16119 return CheckConditionalOperator(S, CO, CC, T); 16120 16121 AnalyzeImplicitConversions(S, E, CC); 16122 if (E->getType() != T) 16123 return CheckImplicitConversion(S, E, T, CC, &ICContext); 16124 } 16125 16126 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 16127 SourceLocation CC, QualType T) { 16128 AnalyzeImplicitConversions(S, E->getCond(), E->getQuestionLoc()); 16129 16130 Expr *TrueExpr = E->getTrueExpr(); 16131 if (auto *BCO = dyn_cast<BinaryConditionalOperator>(E)) 16132 TrueExpr = BCO->getCommon(); 16133 16134 bool Suspicious = false; 16135 CheckConditionalOperand(S, TrueExpr, T, CC, Suspicious); 16136 CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious); 16137 16138 if (T->isBooleanType()) 16139 DiagnoseIntInBoolContext(S, E); 16140 16141 // If -Wconversion would have warned about either of the candidates 16142 // for a signedness conversion to the context type... 16143 if (!Suspicious) return; 16144 16145 // ...but it's currently ignored... 16146 if (!S.Diags.isIgnored(diag::warn_impcast_integer_sign_conditional, CC)) 16147 return; 16148 16149 // ...then check whether it would have warned about either of the 16150 // candidates for a signedness conversion to the condition type. 16151 if (E->getType() == T) return; 16152 16153 Suspicious = false; 16154 CheckImplicitConversion(S, TrueExpr->IgnoreParenImpCasts(), 16155 E->getType(), CC, &Suspicious); 16156 if (!Suspicious) 16157 CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(), 16158 E->getType(), CC, &Suspicious); 16159 } 16160 16161 /// Check conversion of given expression to boolean. 16162 /// Input argument E is a logical expression. 16163 static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) { 16164 if (S.getLangOpts().Bool) 16165 return; 16166 if (E->IgnoreParenImpCasts()->getType()->isAtomicType()) 16167 return; 16168 CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC); 16169 } 16170 16171 namespace { 16172 struct AnalyzeImplicitConversionsWorkItem { 16173 Expr *E; 16174 SourceLocation CC; 16175 bool IsListInit; 16176 }; 16177 } 16178 16179 /// Data recursive variant of AnalyzeImplicitConversions. Subexpressions 16180 /// that should be visited are added to WorkList. 16181 static void AnalyzeImplicitConversions( 16182 Sema &S, AnalyzeImplicitConversionsWorkItem Item, 16183 llvm::SmallVectorImpl<AnalyzeImplicitConversionsWorkItem> &WorkList) { 16184 Expr *OrigE = Item.E; 16185 SourceLocation CC = Item.CC; 16186 16187 QualType T = OrigE->getType(); 16188 Expr *E = OrigE->IgnoreParenImpCasts(); 16189 16190 // Propagate whether we are in a C++ list initialization expression. 16191 // If so, we do not issue warnings for implicit int-float conversion 16192 // precision loss, because C++11 narrowing already handles it. 16193 bool IsListInit = Item.IsListInit || 16194 (isa<InitListExpr>(OrigE) && S.getLangOpts().CPlusPlus); 16195 16196 if (E->isTypeDependent() || E->isValueDependent()) 16197 return; 16198 16199 Expr *SourceExpr = E; 16200 // Examine, but don't traverse into the source expression of an 16201 // OpaqueValueExpr, since it may have multiple parents and we don't want to 16202 // emit duplicate diagnostics. Its fine to examine the form or attempt to 16203 // evaluate it in the context of checking the specific conversion to T though. 16204 if (auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 16205 if (auto *Src = OVE->getSourceExpr()) 16206 SourceExpr = Src; 16207 16208 if (const auto *UO = dyn_cast<UnaryOperator>(SourceExpr)) 16209 if (UO->getOpcode() == UO_Not && 16210 UO->getSubExpr()->isKnownToHaveBooleanValue()) 16211 S.Diag(UO->getBeginLoc(), diag::warn_bitwise_negation_bool) 16212 << OrigE->getSourceRange() << T->isBooleanType() 16213 << FixItHint::CreateReplacement(UO->getBeginLoc(), "!"); 16214 16215 if (const auto *BO = dyn_cast<BinaryOperator>(SourceExpr)) 16216 if ((BO->getOpcode() == BO_And || BO->getOpcode() == BO_Or) && 16217 BO->getLHS()->isKnownToHaveBooleanValue() && 16218 BO->getRHS()->isKnownToHaveBooleanValue() && 16219 BO->getLHS()->HasSideEffects(S.Context) && 16220 BO->getRHS()->HasSideEffects(S.Context)) { 16221 S.Diag(BO->getBeginLoc(), diag::warn_bitwise_instead_of_logical) 16222 << (BO->getOpcode() == BO_And ? "&" : "|") << OrigE->getSourceRange() 16223 << FixItHint::CreateReplacement( 16224 BO->getOperatorLoc(), 16225 (BO->getOpcode() == BO_And ? "&&" : "||")); 16226 S.Diag(BO->getBeginLoc(), diag::note_cast_operand_to_int); 16227 } 16228 16229 // For conditional operators, we analyze the arguments as if they 16230 // were being fed directly into the output. 16231 if (auto *CO = dyn_cast<AbstractConditionalOperator>(SourceExpr)) { 16232 CheckConditionalOperator(S, CO, CC, T); 16233 return; 16234 } 16235 16236 // Check implicit argument conversions for function calls. 16237 if (CallExpr *Call = dyn_cast<CallExpr>(SourceExpr)) 16238 CheckImplicitArgumentConversions(S, Call, CC); 16239 16240 // Go ahead and check any implicit conversions we might have skipped. 16241 // The non-canonical typecheck is just an optimization; 16242 // CheckImplicitConversion will filter out dead implicit conversions. 16243 if (SourceExpr->getType() != T) 16244 CheckImplicitConversion(S, SourceExpr, T, CC, nullptr, IsListInit); 16245 16246 // Now continue drilling into this expression. 16247 16248 if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) { 16249 // The bound subexpressions in a PseudoObjectExpr are not reachable 16250 // as transitive children. 16251 // FIXME: Use a more uniform representation for this. 16252 for (auto *SE : POE->semantics()) 16253 if (auto *OVE = dyn_cast<OpaqueValueExpr>(SE)) 16254 WorkList.push_back({OVE->getSourceExpr(), CC, IsListInit}); 16255 } 16256 16257 // Skip past explicit casts. 16258 if (auto *CE = dyn_cast<ExplicitCastExpr>(E)) { 16259 E = CE->getSubExpr()->IgnoreParenImpCasts(); 16260 if (!CE->getType()->isVoidType() && E->getType()->isAtomicType()) 16261 S.Diag(E->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 16262 WorkList.push_back({E, CC, IsListInit}); 16263 return; 16264 } 16265 16266 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 16267 // Do a somewhat different check with comparison operators. 16268 if (BO->isComparisonOp()) 16269 return AnalyzeComparison(S, BO); 16270 16271 // And with simple assignments. 16272 if (BO->getOpcode() == BO_Assign) 16273 return AnalyzeAssignment(S, BO); 16274 // And with compound assignments. 16275 if (BO->isAssignmentOp()) 16276 return AnalyzeCompoundAssignment(S, BO); 16277 } 16278 16279 // These break the otherwise-useful invariant below. Fortunately, 16280 // we don't really need to recurse into them, because any internal 16281 // expressions should have been analyzed already when they were 16282 // built into statements. 16283 if (isa<StmtExpr>(E)) return; 16284 16285 // Don't descend into unevaluated contexts. 16286 if (isa<UnaryExprOrTypeTraitExpr>(E)) return; 16287 16288 // Now just recurse over the expression's children. 16289 CC = E->getExprLoc(); 16290 BinaryOperator *BO = dyn_cast<BinaryOperator>(E); 16291 bool IsLogicalAndOperator = BO && BO->getOpcode() == BO_LAnd; 16292 for (Stmt *SubStmt : E->children()) { 16293 Expr *ChildExpr = dyn_cast_or_null<Expr>(SubStmt); 16294 if (!ChildExpr) 16295 continue; 16296 16297 if (auto *CSE = dyn_cast<CoroutineSuspendExpr>(E)) 16298 if (ChildExpr == CSE->getOperand()) 16299 // Do not recurse over a CoroutineSuspendExpr's operand. 16300 // The operand is also a subexpression of getCommonExpr(), and 16301 // recursing into it directly would produce duplicate diagnostics. 16302 continue; 16303 16304 if (IsLogicalAndOperator && 16305 isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts())) 16306 // Ignore checking string literals that are in logical and operators. 16307 // This is a common pattern for asserts. 16308 continue; 16309 WorkList.push_back({ChildExpr, CC, IsListInit}); 16310 } 16311 16312 if (BO && BO->isLogicalOp()) { 16313 Expr *SubExpr = BO->getLHS()->IgnoreParenImpCasts(); 16314 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 16315 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 16316 16317 SubExpr = BO->getRHS()->IgnoreParenImpCasts(); 16318 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 16319 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 16320 } 16321 16322 if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E)) { 16323 if (U->getOpcode() == UO_LNot) { 16324 ::CheckBoolLikeConversion(S, U->getSubExpr(), CC); 16325 } else if (U->getOpcode() != UO_AddrOf) { 16326 if (U->getSubExpr()->getType()->isAtomicType()) 16327 S.Diag(U->getSubExpr()->getBeginLoc(), 16328 diag::warn_atomic_implicit_seq_cst); 16329 } 16330 } 16331 } 16332 16333 /// AnalyzeImplicitConversions - Find and report any interesting 16334 /// implicit conversions in the given expression. There are a couple 16335 /// of competing diagnostics here, -Wconversion and -Wsign-compare. 16336 static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC, 16337 bool IsListInit/*= false*/) { 16338 llvm::SmallVector<AnalyzeImplicitConversionsWorkItem, 16> WorkList; 16339 WorkList.push_back({OrigE, CC, IsListInit}); 16340 while (!WorkList.empty()) 16341 AnalyzeImplicitConversions(S, WorkList.pop_back_val(), WorkList); 16342 } 16343 16344 /// Diagnose integer type and any valid implicit conversion to it. 16345 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) { 16346 // Taking into account implicit conversions, 16347 // allow any integer. 16348 if (!E->getType()->isIntegerType()) { 16349 S.Diag(E->getBeginLoc(), 16350 diag::err_opencl_enqueue_kernel_invalid_local_size_type); 16351 return true; 16352 } 16353 // Potentially emit standard warnings for implicit conversions if enabled 16354 // using -Wconversion. 16355 CheckImplicitConversion(S, E, IntT, E->getBeginLoc()); 16356 return false; 16357 } 16358 16359 // Helper function for Sema::DiagnoseAlwaysNonNullPointer. 16360 // Returns true when emitting a warning about taking the address of a reference. 16361 static bool CheckForReference(Sema &SemaRef, const Expr *E, 16362 const PartialDiagnostic &PD) { 16363 E = E->IgnoreParenImpCasts(); 16364 16365 const FunctionDecl *FD = nullptr; 16366 16367 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { 16368 if (!DRE->getDecl()->getType()->isReferenceType()) 16369 return false; 16370 } else if (const MemberExpr *M = dyn_cast<MemberExpr>(E)) { 16371 if (!M->getMemberDecl()->getType()->isReferenceType()) 16372 return false; 16373 } else if (const CallExpr *Call = dyn_cast<CallExpr>(E)) { 16374 if (!Call->getCallReturnType(SemaRef.Context)->isReferenceType()) 16375 return false; 16376 FD = Call->getDirectCallee(); 16377 } else { 16378 return false; 16379 } 16380 16381 SemaRef.Diag(E->getExprLoc(), PD); 16382 16383 // If possible, point to location of function. 16384 if (FD) { 16385 SemaRef.Diag(FD->getLocation(), diag::note_reference_is_return_value) << FD; 16386 } 16387 16388 return true; 16389 } 16390 16391 // Returns true if the SourceLocation is expanded from any macro body. 16392 // Returns false if the SourceLocation is invalid, is from not in a macro 16393 // expansion, or is from expanded from a top-level macro argument. 16394 static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) { 16395 if (Loc.isInvalid()) 16396 return false; 16397 16398 while (Loc.isMacroID()) { 16399 if (SM.isMacroBodyExpansion(Loc)) 16400 return true; 16401 Loc = SM.getImmediateMacroCallerLoc(Loc); 16402 } 16403 16404 return false; 16405 } 16406 16407 /// Diagnose pointers that are always non-null. 16408 /// \param E the expression containing the pointer 16409 /// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is 16410 /// compared to a null pointer 16411 /// \param IsEqual True when the comparison is equal to a null pointer 16412 /// \param Range Extra SourceRange to highlight in the diagnostic 16413 void Sema::DiagnoseAlwaysNonNullPointer(Expr *E, 16414 Expr::NullPointerConstantKind NullKind, 16415 bool IsEqual, SourceRange Range) { 16416 if (!E) 16417 return; 16418 16419 // Don't warn inside macros. 16420 if (E->getExprLoc().isMacroID()) { 16421 const SourceManager &SM = getSourceManager(); 16422 if (IsInAnyMacroBody(SM, E->getExprLoc()) || 16423 IsInAnyMacroBody(SM, Range.getBegin())) 16424 return; 16425 } 16426 E = E->IgnoreImpCasts(); 16427 16428 const bool IsCompare = NullKind != Expr::NPCK_NotNull; 16429 16430 if (isa<CXXThisExpr>(E)) { 16431 unsigned DiagID = IsCompare ? diag::warn_this_null_compare 16432 : diag::warn_this_bool_conversion; 16433 Diag(E->getExprLoc(), DiagID) << E->getSourceRange() << Range << IsEqual; 16434 return; 16435 } 16436 16437 bool IsAddressOf = false; 16438 16439 if (auto *UO = dyn_cast<UnaryOperator>(E->IgnoreParens())) { 16440 if (UO->getOpcode() != UO_AddrOf) 16441 return; 16442 IsAddressOf = true; 16443 E = UO->getSubExpr(); 16444 } 16445 16446 if (IsAddressOf) { 16447 unsigned DiagID = IsCompare 16448 ? diag::warn_address_of_reference_null_compare 16449 : diag::warn_address_of_reference_bool_conversion; 16450 PartialDiagnostic PD = PDiag(DiagID) << E->getSourceRange() << Range 16451 << IsEqual; 16452 if (CheckForReference(*this, E, PD)) { 16453 return; 16454 } 16455 } 16456 16457 auto ComplainAboutNonnullParamOrCall = [&](const Attr *NonnullAttr) { 16458 bool IsParam = isa<NonNullAttr>(NonnullAttr); 16459 std::string Str; 16460 llvm::raw_string_ostream S(Str); 16461 E->printPretty(S, nullptr, getPrintingPolicy()); 16462 unsigned DiagID = IsCompare ? diag::warn_nonnull_expr_compare 16463 : diag::warn_cast_nonnull_to_bool; 16464 Diag(E->getExprLoc(), DiagID) << IsParam << S.str() 16465 << E->getSourceRange() << Range << IsEqual; 16466 Diag(NonnullAttr->getLocation(), diag::note_declared_nonnull) << IsParam; 16467 }; 16468 16469 // If we have a CallExpr that is tagged with returns_nonnull, we can complain. 16470 if (auto *Call = dyn_cast<CallExpr>(E->IgnoreParenImpCasts())) { 16471 if (auto *Callee = Call->getDirectCallee()) { 16472 if (const Attr *A = Callee->getAttr<ReturnsNonNullAttr>()) { 16473 ComplainAboutNonnullParamOrCall(A); 16474 return; 16475 } 16476 } 16477 } 16478 16479 // Expect to find a single Decl. Skip anything more complicated. 16480 ValueDecl *D = nullptr; 16481 if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) { 16482 D = R->getDecl(); 16483 } else if (MemberExpr *M = dyn_cast<MemberExpr>(E)) { 16484 D = M->getMemberDecl(); 16485 } 16486 16487 // Weak Decls can be null. 16488 if (!D || D->isWeak()) 16489 return; 16490 16491 // Check for parameter decl with nonnull attribute 16492 if (const auto* PV = dyn_cast<ParmVarDecl>(D)) { 16493 if (getCurFunction() && 16494 !getCurFunction()->ModifiedNonNullParams.count(PV)) { 16495 if (const Attr *A = PV->getAttr<NonNullAttr>()) { 16496 ComplainAboutNonnullParamOrCall(A); 16497 return; 16498 } 16499 16500 if (const auto *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) { 16501 // Skip function template not specialized yet. 16502 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 16503 return; 16504 auto ParamIter = llvm::find(FD->parameters(), PV); 16505 assert(ParamIter != FD->param_end()); 16506 unsigned ParamNo = std::distance(FD->param_begin(), ParamIter); 16507 16508 for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) { 16509 if (!NonNull->args_size()) { 16510 ComplainAboutNonnullParamOrCall(NonNull); 16511 return; 16512 } 16513 16514 for (const ParamIdx &ArgNo : NonNull->args()) { 16515 if (ArgNo.getASTIndex() == ParamNo) { 16516 ComplainAboutNonnullParamOrCall(NonNull); 16517 return; 16518 } 16519 } 16520 } 16521 } 16522 } 16523 } 16524 16525 QualType T = D->getType(); 16526 const bool IsArray = T->isArrayType(); 16527 const bool IsFunction = T->isFunctionType(); 16528 16529 // Address of function is used to silence the function warning. 16530 if (IsAddressOf && IsFunction) { 16531 return; 16532 } 16533 16534 // Found nothing. 16535 if (!IsAddressOf && !IsFunction && !IsArray) 16536 return; 16537 16538 // Pretty print the expression for the diagnostic. 16539 std::string Str; 16540 llvm::raw_string_ostream S(Str); 16541 E->printPretty(S, nullptr, getPrintingPolicy()); 16542 16543 unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare 16544 : diag::warn_impcast_pointer_to_bool; 16545 enum { 16546 AddressOf, 16547 FunctionPointer, 16548 ArrayPointer 16549 } DiagType; 16550 if (IsAddressOf) 16551 DiagType = AddressOf; 16552 else if (IsFunction) 16553 DiagType = FunctionPointer; 16554 else if (IsArray) 16555 DiagType = ArrayPointer; 16556 else 16557 llvm_unreachable("Could not determine diagnostic."); 16558 Diag(E->getExprLoc(), DiagID) << DiagType << S.str() << E->getSourceRange() 16559 << Range << IsEqual; 16560 16561 if (!IsFunction) 16562 return; 16563 16564 // Suggest '&' to silence the function warning. 16565 Diag(E->getExprLoc(), diag::note_function_warning_silence) 16566 << FixItHint::CreateInsertion(E->getBeginLoc(), "&"); 16567 16568 // Check to see if '()' fixit should be emitted. 16569 QualType ReturnType; 16570 UnresolvedSet<4> NonTemplateOverloads; 16571 tryExprAsCall(*E, ReturnType, NonTemplateOverloads); 16572 if (ReturnType.isNull()) 16573 return; 16574 16575 if (IsCompare) { 16576 // There are two cases here. If there is null constant, the only suggest 16577 // for a pointer return type. If the null is 0, then suggest if the return 16578 // type is a pointer or an integer type. 16579 if (!ReturnType->isPointerType()) { 16580 if (NullKind == Expr::NPCK_ZeroExpression || 16581 NullKind == Expr::NPCK_ZeroLiteral) { 16582 if (!ReturnType->isIntegerType()) 16583 return; 16584 } else { 16585 return; 16586 } 16587 } 16588 } else { // !IsCompare 16589 // For function to bool, only suggest if the function pointer has bool 16590 // return type. 16591 if (!ReturnType->isSpecificBuiltinType(BuiltinType::Bool)) 16592 return; 16593 } 16594 Diag(E->getExprLoc(), diag::note_function_to_function_call) 16595 << FixItHint::CreateInsertion(getLocForEndOfToken(E->getEndLoc()), "()"); 16596 } 16597 16598 /// Diagnoses "dangerous" implicit conversions within the given 16599 /// expression (which is a full expression). Implements -Wconversion 16600 /// and -Wsign-compare. 16601 /// 16602 /// \param CC the "context" location of the implicit conversion, i.e. 16603 /// the most location of the syntactic entity requiring the implicit 16604 /// conversion 16605 void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) { 16606 // Don't diagnose in unevaluated contexts. 16607 if (isUnevaluatedContext()) 16608 return; 16609 16610 // Don't diagnose for value- or type-dependent expressions. 16611 if (E->isTypeDependent() || E->isValueDependent()) 16612 return; 16613 16614 // Check for array bounds violations in cases where the check isn't triggered 16615 // elsewhere for other Expr types (like BinaryOperators), e.g. when an 16616 // ArraySubscriptExpr is on the RHS of a variable initialization. 16617 CheckArrayAccess(E); 16618 16619 // This is not the right CC for (e.g.) a variable initialization. 16620 AnalyzeImplicitConversions(*this, E, CC); 16621 } 16622 16623 /// CheckBoolLikeConversion - Check conversion of given expression to boolean. 16624 /// Input argument E is a logical expression. 16625 void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) { 16626 ::CheckBoolLikeConversion(*this, E, CC); 16627 } 16628 16629 /// Diagnose when expression is an integer constant expression and its evaluation 16630 /// results in integer overflow 16631 void Sema::CheckForIntOverflow (const Expr *E) { 16632 // Use a work list to deal with nested struct initializers. 16633 SmallVector<const Expr *, 2> Exprs(1, E); 16634 16635 do { 16636 const Expr *OriginalE = Exprs.pop_back_val(); 16637 const Expr *E = OriginalE->IgnoreParenCasts(); 16638 16639 if (isa<BinaryOperator, UnaryOperator>(E)) { 16640 E->EvaluateForOverflow(Context); 16641 continue; 16642 } 16643 16644 if (const auto *InitList = dyn_cast<InitListExpr>(OriginalE)) 16645 Exprs.append(InitList->inits().begin(), InitList->inits().end()); 16646 else if (isa<ObjCBoxedExpr>(OriginalE)) 16647 E->EvaluateForOverflow(Context); 16648 else if (const auto *Call = dyn_cast<CallExpr>(E)) 16649 Exprs.append(Call->arg_begin(), Call->arg_end()); 16650 else if (const auto *Message = dyn_cast<ObjCMessageExpr>(E)) 16651 Exprs.append(Message->arg_begin(), Message->arg_end()); 16652 else if (const auto *Construct = dyn_cast<CXXConstructExpr>(E)) 16653 Exprs.append(Construct->arg_begin(), Construct->arg_end()); 16654 else if (const auto *Temporary = dyn_cast<CXXBindTemporaryExpr>(E)) 16655 Exprs.push_back(Temporary->getSubExpr()); 16656 else if (const auto *Array = dyn_cast<ArraySubscriptExpr>(E)) 16657 Exprs.push_back(Array->getIdx()); 16658 else if (const auto *Compound = dyn_cast<CompoundLiteralExpr>(E)) 16659 Exprs.push_back(Compound->getInitializer()); 16660 else if (const auto *New = dyn_cast<CXXNewExpr>(E); 16661 New && New->isArray()) { 16662 if (auto ArraySize = New->getArraySize()) 16663 Exprs.push_back(*ArraySize); 16664 } 16665 } while (!Exprs.empty()); 16666 } 16667 16668 namespace { 16669 16670 /// Visitor for expressions which looks for unsequenced operations on the 16671 /// same object. 16672 class SequenceChecker : public ConstEvaluatedExprVisitor<SequenceChecker> { 16673 using Base = ConstEvaluatedExprVisitor<SequenceChecker>; 16674 16675 /// A tree of sequenced regions within an expression. Two regions are 16676 /// unsequenced if one is an ancestor or a descendent of the other. When we 16677 /// finish processing an expression with sequencing, such as a comma 16678 /// expression, we fold its tree nodes into its parent, since they are 16679 /// unsequenced with respect to nodes we will visit later. 16680 class SequenceTree { 16681 struct Value { 16682 explicit Value(unsigned Parent) : Parent(Parent), Merged(false) {} 16683 unsigned Parent : 31; 16684 unsigned Merged : 1; 16685 }; 16686 SmallVector<Value, 8> Values; 16687 16688 public: 16689 /// A region within an expression which may be sequenced with respect 16690 /// to some other region. 16691 class Seq { 16692 friend class SequenceTree; 16693 16694 unsigned Index; 16695 16696 explicit Seq(unsigned N) : Index(N) {} 16697 16698 public: 16699 Seq() : Index(0) {} 16700 }; 16701 16702 SequenceTree() { Values.push_back(Value(0)); } 16703 Seq root() const { return Seq(0); } 16704 16705 /// Create a new sequence of operations, which is an unsequenced 16706 /// subset of \p Parent. This sequence of operations is sequenced with 16707 /// respect to other children of \p Parent. 16708 Seq allocate(Seq Parent) { 16709 Values.push_back(Value(Parent.Index)); 16710 return Seq(Values.size() - 1); 16711 } 16712 16713 /// Merge a sequence of operations into its parent. 16714 void merge(Seq S) { 16715 Values[S.Index].Merged = true; 16716 } 16717 16718 /// Determine whether two operations are unsequenced. This operation 16719 /// is asymmetric: \p Cur should be the more recent sequence, and \p Old 16720 /// should have been merged into its parent as appropriate. 16721 bool isUnsequenced(Seq Cur, Seq Old) { 16722 unsigned C = representative(Cur.Index); 16723 unsigned Target = representative(Old.Index); 16724 while (C >= Target) { 16725 if (C == Target) 16726 return true; 16727 C = Values[C].Parent; 16728 } 16729 return false; 16730 } 16731 16732 private: 16733 /// Pick a representative for a sequence. 16734 unsigned representative(unsigned K) { 16735 if (Values[K].Merged) 16736 // Perform path compression as we go. 16737 return Values[K].Parent = representative(Values[K].Parent); 16738 return K; 16739 } 16740 }; 16741 16742 /// An object for which we can track unsequenced uses. 16743 using Object = const NamedDecl *; 16744 16745 /// Different flavors of object usage which we track. We only track the 16746 /// least-sequenced usage of each kind. 16747 enum UsageKind { 16748 /// A read of an object. Multiple unsequenced reads are OK. 16749 UK_Use, 16750 16751 /// A modification of an object which is sequenced before the value 16752 /// computation of the expression, such as ++n in C++. 16753 UK_ModAsValue, 16754 16755 /// A modification of an object which is not sequenced before the value 16756 /// computation of the expression, such as n++. 16757 UK_ModAsSideEffect, 16758 16759 UK_Count = UK_ModAsSideEffect + 1 16760 }; 16761 16762 /// Bundle together a sequencing region and the expression corresponding 16763 /// to a specific usage. One Usage is stored for each usage kind in UsageInfo. 16764 struct Usage { 16765 const Expr *UsageExpr = nullptr; 16766 SequenceTree::Seq Seq; 16767 16768 Usage() = default; 16769 }; 16770 16771 struct UsageInfo { 16772 Usage Uses[UK_Count]; 16773 16774 /// Have we issued a diagnostic for this object already? 16775 bool Diagnosed = false; 16776 16777 UsageInfo(); 16778 }; 16779 using UsageInfoMap = llvm::SmallDenseMap<Object, UsageInfo, 16>; 16780 16781 Sema &SemaRef; 16782 16783 /// Sequenced regions within the expression. 16784 SequenceTree Tree; 16785 16786 /// Declaration modifications and references which we have seen. 16787 UsageInfoMap UsageMap; 16788 16789 /// The region we are currently within. 16790 SequenceTree::Seq Region; 16791 16792 /// Filled in with declarations which were modified as a side-effect 16793 /// (that is, post-increment operations). 16794 SmallVectorImpl<std::pair<Object, Usage>> *ModAsSideEffect = nullptr; 16795 16796 /// Expressions to check later. We defer checking these to reduce 16797 /// stack usage. 16798 SmallVectorImpl<const Expr *> &WorkList; 16799 16800 /// RAII object wrapping the visitation of a sequenced subexpression of an 16801 /// expression. At the end of this process, the side-effects of the evaluation 16802 /// become sequenced with respect to the value computation of the result, so 16803 /// we downgrade any UK_ModAsSideEffect within the evaluation to 16804 /// UK_ModAsValue. 16805 struct SequencedSubexpression { 16806 SequencedSubexpression(SequenceChecker &Self) 16807 : Self(Self), OldModAsSideEffect(Self.ModAsSideEffect) { 16808 Self.ModAsSideEffect = &ModAsSideEffect; 16809 } 16810 16811 ~SequencedSubexpression() { 16812 for (const std::pair<Object, Usage> &M : llvm::reverse(ModAsSideEffect)) { 16813 // Add a new usage with usage kind UK_ModAsValue, and then restore 16814 // the previous usage with UK_ModAsSideEffect (thus clearing it if 16815 // the previous one was empty). 16816 UsageInfo &UI = Self.UsageMap[M.first]; 16817 auto &SideEffectUsage = UI.Uses[UK_ModAsSideEffect]; 16818 Self.addUsage(M.first, UI, SideEffectUsage.UsageExpr, UK_ModAsValue); 16819 SideEffectUsage = M.second; 16820 } 16821 Self.ModAsSideEffect = OldModAsSideEffect; 16822 } 16823 16824 SequenceChecker &Self; 16825 SmallVector<std::pair<Object, Usage>, 4> ModAsSideEffect; 16826 SmallVectorImpl<std::pair<Object, Usage>> *OldModAsSideEffect; 16827 }; 16828 16829 /// RAII object wrapping the visitation of a subexpression which we might 16830 /// choose to evaluate as a constant. If any subexpression is evaluated and 16831 /// found to be non-constant, this allows us to suppress the evaluation of 16832 /// the outer expression. 16833 class EvaluationTracker { 16834 public: 16835 EvaluationTracker(SequenceChecker &Self) 16836 : Self(Self), Prev(Self.EvalTracker) { 16837 Self.EvalTracker = this; 16838 } 16839 16840 ~EvaluationTracker() { 16841 Self.EvalTracker = Prev; 16842 if (Prev) 16843 Prev->EvalOK &= EvalOK; 16844 } 16845 16846 bool evaluate(const Expr *E, bool &Result) { 16847 if (!EvalOK || E->isValueDependent()) 16848 return false; 16849 EvalOK = E->EvaluateAsBooleanCondition( 16850 Result, Self.SemaRef.Context, 16851 Self.SemaRef.isConstantEvaluatedContext()); 16852 return EvalOK; 16853 } 16854 16855 private: 16856 SequenceChecker &Self; 16857 EvaluationTracker *Prev; 16858 bool EvalOK = true; 16859 } *EvalTracker = nullptr; 16860 16861 /// Find the object which is produced by the specified expression, 16862 /// if any. 16863 Object getObject(const Expr *E, bool Mod) const { 16864 E = E->IgnoreParenCasts(); 16865 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 16866 if (Mod && (UO->getOpcode() == UO_PreInc || UO->getOpcode() == UO_PreDec)) 16867 return getObject(UO->getSubExpr(), Mod); 16868 } else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 16869 if (BO->getOpcode() == BO_Comma) 16870 return getObject(BO->getRHS(), Mod); 16871 if (Mod && BO->isAssignmentOp()) 16872 return getObject(BO->getLHS(), Mod); 16873 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) { 16874 // FIXME: Check for more interesting cases, like "x.n = ++x.n". 16875 if (isa<CXXThisExpr>(ME->getBase()->IgnoreParenCasts())) 16876 return ME->getMemberDecl(); 16877 } else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 16878 // FIXME: If this is a reference, map through to its value. 16879 return DRE->getDecl(); 16880 return nullptr; 16881 } 16882 16883 /// Note that an object \p O was modified or used by an expression 16884 /// \p UsageExpr with usage kind \p UK. \p UI is the \p UsageInfo for 16885 /// the object \p O as obtained via the \p UsageMap. 16886 void addUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, UsageKind UK) { 16887 // Get the old usage for the given object and usage kind. 16888 Usage &U = UI.Uses[UK]; 16889 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) { 16890 // If we have a modification as side effect and are in a sequenced 16891 // subexpression, save the old Usage so that we can restore it later 16892 // in SequencedSubexpression::~SequencedSubexpression. 16893 if (UK == UK_ModAsSideEffect && ModAsSideEffect) 16894 ModAsSideEffect->push_back(std::make_pair(O, U)); 16895 // Then record the new usage with the current sequencing region. 16896 U.UsageExpr = UsageExpr; 16897 U.Seq = Region; 16898 } 16899 } 16900 16901 /// Check whether a modification or use of an object \p O in an expression 16902 /// \p UsageExpr conflicts with a prior usage of kind \p OtherKind. \p UI is 16903 /// the \p UsageInfo for the object \p O as obtained via the \p UsageMap. 16904 /// \p IsModMod is true when we are checking for a mod-mod unsequenced 16905 /// usage and false we are checking for a mod-use unsequenced usage. 16906 void checkUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, 16907 UsageKind OtherKind, bool IsModMod) { 16908 if (UI.Diagnosed) 16909 return; 16910 16911 const Usage &U = UI.Uses[OtherKind]; 16912 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) 16913 return; 16914 16915 const Expr *Mod = U.UsageExpr; 16916 const Expr *ModOrUse = UsageExpr; 16917 if (OtherKind == UK_Use) 16918 std::swap(Mod, ModOrUse); 16919 16920 SemaRef.DiagRuntimeBehavior( 16921 Mod->getExprLoc(), {Mod, ModOrUse}, 16922 SemaRef.PDiag(IsModMod ? diag::warn_unsequenced_mod_mod 16923 : diag::warn_unsequenced_mod_use) 16924 << O << SourceRange(ModOrUse->getExprLoc())); 16925 UI.Diagnosed = true; 16926 } 16927 16928 // A note on note{Pre, Post}{Use, Mod}: 16929 // 16930 // (It helps to follow the algorithm with an expression such as 16931 // "((++k)++, k) = k" or "k = (k++, k++)". Both contain unsequenced 16932 // operations before C++17 and both are well-defined in C++17). 16933 // 16934 // When visiting a node which uses/modify an object we first call notePreUse 16935 // or notePreMod before visiting its sub-expression(s). At this point the 16936 // children of the current node have not yet been visited and so the eventual 16937 // uses/modifications resulting from the children of the current node have not 16938 // been recorded yet. 16939 // 16940 // We then visit the children of the current node. After that notePostUse or 16941 // notePostMod is called. These will 1) detect an unsequenced modification 16942 // as side effect (as in "k++ + k") and 2) add a new usage with the 16943 // appropriate usage kind. 16944 // 16945 // We also have to be careful that some operation sequences modification as 16946 // side effect as well (for example: || or ,). To account for this we wrap 16947 // the visitation of such a sub-expression (for example: the LHS of || or ,) 16948 // with SequencedSubexpression. SequencedSubexpression is an RAII object 16949 // which record usages which are modifications as side effect, and then 16950 // downgrade them (or more accurately restore the previous usage which was a 16951 // modification as side effect) when exiting the scope of the sequenced 16952 // subexpression. 16953 16954 void notePreUse(Object O, const Expr *UseExpr) { 16955 UsageInfo &UI = UsageMap[O]; 16956 // Uses conflict with other modifications. 16957 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/false); 16958 } 16959 16960 void notePostUse(Object O, const Expr *UseExpr) { 16961 UsageInfo &UI = UsageMap[O]; 16962 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsSideEffect, 16963 /*IsModMod=*/false); 16964 addUsage(O, UI, UseExpr, /*UsageKind=*/UK_Use); 16965 } 16966 16967 void notePreMod(Object O, const Expr *ModExpr) { 16968 UsageInfo &UI = UsageMap[O]; 16969 // Modifications conflict with other modifications and with uses. 16970 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/true); 16971 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_Use, /*IsModMod=*/false); 16972 } 16973 16974 void notePostMod(Object O, const Expr *ModExpr, UsageKind UK) { 16975 UsageInfo &UI = UsageMap[O]; 16976 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsSideEffect, 16977 /*IsModMod=*/true); 16978 addUsage(O, UI, ModExpr, /*UsageKind=*/UK); 16979 } 16980 16981 public: 16982 SequenceChecker(Sema &S, const Expr *E, 16983 SmallVectorImpl<const Expr *> &WorkList) 16984 : Base(S.Context), SemaRef(S), Region(Tree.root()), WorkList(WorkList) { 16985 Visit(E); 16986 // Silence a -Wunused-private-field since WorkList is now unused. 16987 // TODO: Evaluate if it can be used, and if not remove it. 16988 (void)this->WorkList; 16989 } 16990 16991 void VisitStmt(const Stmt *S) { 16992 // Skip all statements which aren't expressions for now. 16993 } 16994 16995 void VisitExpr(const Expr *E) { 16996 // By default, just recurse to evaluated subexpressions. 16997 Base::VisitStmt(E); 16998 } 16999 17000 void VisitCoroutineSuspendExpr(const CoroutineSuspendExpr *CSE) { 17001 for (auto *Sub : CSE->children()) { 17002 const Expr *ChildExpr = dyn_cast_or_null<Expr>(Sub); 17003 if (!ChildExpr) 17004 continue; 17005 17006 if (ChildExpr == CSE->getOperand()) 17007 // Do not recurse over a CoroutineSuspendExpr's operand. 17008 // The operand is also a subexpression of getCommonExpr(), and 17009 // recursing into it directly could confuse object management 17010 // for the sake of sequence tracking. 17011 continue; 17012 17013 Visit(Sub); 17014 } 17015 } 17016 17017 void VisitCastExpr(const CastExpr *E) { 17018 Object O = Object(); 17019 if (E->getCastKind() == CK_LValueToRValue) 17020 O = getObject(E->getSubExpr(), false); 17021 17022 if (O) 17023 notePreUse(O, E); 17024 VisitExpr(E); 17025 if (O) 17026 notePostUse(O, E); 17027 } 17028 17029 void VisitSequencedExpressions(const Expr *SequencedBefore, 17030 const Expr *SequencedAfter) { 17031 SequenceTree::Seq BeforeRegion = Tree.allocate(Region); 17032 SequenceTree::Seq AfterRegion = Tree.allocate(Region); 17033 SequenceTree::Seq OldRegion = Region; 17034 17035 { 17036 SequencedSubexpression SeqBefore(*this); 17037 Region = BeforeRegion; 17038 Visit(SequencedBefore); 17039 } 17040 17041 Region = AfterRegion; 17042 Visit(SequencedAfter); 17043 17044 Region = OldRegion; 17045 17046 Tree.merge(BeforeRegion); 17047 Tree.merge(AfterRegion); 17048 } 17049 17050 void VisitArraySubscriptExpr(const ArraySubscriptExpr *ASE) { 17051 // C++17 [expr.sub]p1: 17052 // The expression E1[E2] is identical (by definition) to *((E1)+(E2)). The 17053 // expression E1 is sequenced before the expression E2. 17054 if (SemaRef.getLangOpts().CPlusPlus17) 17055 VisitSequencedExpressions(ASE->getLHS(), ASE->getRHS()); 17056 else { 17057 Visit(ASE->getLHS()); 17058 Visit(ASE->getRHS()); 17059 } 17060 } 17061 17062 void VisitBinPtrMemD(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 17063 void VisitBinPtrMemI(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 17064 void VisitBinPtrMem(const BinaryOperator *BO) { 17065 // C++17 [expr.mptr.oper]p4: 17066 // Abbreviating pm-expression.*cast-expression as E1.*E2, [...] 17067 // the expression E1 is sequenced before the expression E2. 17068 if (SemaRef.getLangOpts().CPlusPlus17) 17069 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 17070 else { 17071 Visit(BO->getLHS()); 17072 Visit(BO->getRHS()); 17073 } 17074 } 17075 17076 void VisitBinShl(const BinaryOperator *BO) { VisitBinShlShr(BO); } 17077 void VisitBinShr(const BinaryOperator *BO) { VisitBinShlShr(BO); } 17078 void VisitBinShlShr(const BinaryOperator *BO) { 17079 // C++17 [expr.shift]p4: 17080 // The expression E1 is sequenced before the expression E2. 17081 if (SemaRef.getLangOpts().CPlusPlus17) 17082 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 17083 else { 17084 Visit(BO->getLHS()); 17085 Visit(BO->getRHS()); 17086 } 17087 } 17088 17089 void VisitBinComma(const BinaryOperator *BO) { 17090 // C++11 [expr.comma]p1: 17091 // Every value computation and side effect associated with the left 17092 // expression is sequenced before every value computation and side 17093 // effect associated with the right expression. 17094 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 17095 } 17096 17097 void VisitBinAssign(const BinaryOperator *BO) { 17098 SequenceTree::Seq RHSRegion; 17099 SequenceTree::Seq LHSRegion; 17100 if (SemaRef.getLangOpts().CPlusPlus17) { 17101 RHSRegion = Tree.allocate(Region); 17102 LHSRegion = Tree.allocate(Region); 17103 } else { 17104 RHSRegion = Region; 17105 LHSRegion = Region; 17106 } 17107 SequenceTree::Seq OldRegion = Region; 17108 17109 // C++11 [expr.ass]p1: 17110 // [...] the assignment is sequenced after the value computation 17111 // of the right and left operands, [...] 17112 // 17113 // so check it before inspecting the operands and update the 17114 // map afterwards. 17115 Object O = getObject(BO->getLHS(), /*Mod=*/true); 17116 if (O) 17117 notePreMod(O, BO); 17118 17119 if (SemaRef.getLangOpts().CPlusPlus17) { 17120 // C++17 [expr.ass]p1: 17121 // [...] The right operand is sequenced before the left operand. [...] 17122 { 17123 SequencedSubexpression SeqBefore(*this); 17124 Region = RHSRegion; 17125 Visit(BO->getRHS()); 17126 } 17127 17128 Region = LHSRegion; 17129 Visit(BO->getLHS()); 17130 17131 if (O && isa<CompoundAssignOperator>(BO)) 17132 notePostUse(O, BO); 17133 17134 } else { 17135 // C++11 does not specify any sequencing between the LHS and RHS. 17136 Region = LHSRegion; 17137 Visit(BO->getLHS()); 17138 17139 if (O && isa<CompoundAssignOperator>(BO)) 17140 notePostUse(O, BO); 17141 17142 Region = RHSRegion; 17143 Visit(BO->getRHS()); 17144 } 17145 17146 // C++11 [expr.ass]p1: 17147 // the assignment is sequenced [...] before the value computation of the 17148 // assignment expression. 17149 // C11 6.5.16/3 has no such rule. 17150 Region = OldRegion; 17151 if (O) 17152 notePostMod(O, BO, 17153 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 17154 : UK_ModAsSideEffect); 17155 if (SemaRef.getLangOpts().CPlusPlus17) { 17156 Tree.merge(RHSRegion); 17157 Tree.merge(LHSRegion); 17158 } 17159 } 17160 17161 void VisitCompoundAssignOperator(const CompoundAssignOperator *CAO) { 17162 VisitBinAssign(CAO); 17163 } 17164 17165 void VisitUnaryPreInc(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 17166 void VisitUnaryPreDec(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 17167 void VisitUnaryPreIncDec(const UnaryOperator *UO) { 17168 Object O = getObject(UO->getSubExpr(), true); 17169 if (!O) 17170 return VisitExpr(UO); 17171 17172 notePreMod(O, UO); 17173 Visit(UO->getSubExpr()); 17174 // C++11 [expr.pre.incr]p1: 17175 // the expression ++x is equivalent to x+=1 17176 notePostMod(O, UO, 17177 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 17178 : UK_ModAsSideEffect); 17179 } 17180 17181 void VisitUnaryPostInc(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 17182 void VisitUnaryPostDec(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 17183 void VisitUnaryPostIncDec(const UnaryOperator *UO) { 17184 Object O = getObject(UO->getSubExpr(), true); 17185 if (!O) 17186 return VisitExpr(UO); 17187 17188 notePreMod(O, UO); 17189 Visit(UO->getSubExpr()); 17190 notePostMod(O, UO, UK_ModAsSideEffect); 17191 } 17192 17193 void VisitBinLOr(const BinaryOperator *BO) { 17194 // C++11 [expr.log.or]p2: 17195 // If the second expression is evaluated, every value computation and 17196 // side effect associated with the first expression is sequenced before 17197 // every value computation and side effect associated with the 17198 // second expression. 17199 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 17200 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 17201 SequenceTree::Seq OldRegion = Region; 17202 17203 EvaluationTracker Eval(*this); 17204 { 17205 SequencedSubexpression Sequenced(*this); 17206 Region = LHSRegion; 17207 Visit(BO->getLHS()); 17208 } 17209 17210 // C++11 [expr.log.or]p1: 17211 // [...] the second operand is not evaluated if the first operand 17212 // evaluates to true. 17213 bool EvalResult = false; 17214 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 17215 bool ShouldVisitRHS = !EvalOK || (EvalOK && !EvalResult); 17216 if (ShouldVisitRHS) { 17217 Region = RHSRegion; 17218 Visit(BO->getRHS()); 17219 } 17220 17221 Region = OldRegion; 17222 Tree.merge(LHSRegion); 17223 Tree.merge(RHSRegion); 17224 } 17225 17226 void VisitBinLAnd(const BinaryOperator *BO) { 17227 // C++11 [expr.log.and]p2: 17228 // If the second expression is evaluated, every value computation and 17229 // side effect associated with the first expression is sequenced before 17230 // every value computation and side effect associated with the 17231 // second expression. 17232 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 17233 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 17234 SequenceTree::Seq OldRegion = Region; 17235 17236 EvaluationTracker Eval(*this); 17237 { 17238 SequencedSubexpression Sequenced(*this); 17239 Region = LHSRegion; 17240 Visit(BO->getLHS()); 17241 } 17242 17243 // C++11 [expr.log.and]p1: 17244 // [...] the second operand is not evaluated if the first operand is false. 17245 bool EvalResult = false; 17246 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 17247 bool ShouldVisitRHS = !EvalOK || (EvalOK && EvalResult); 17248 if (ShouldVisitRHS) { 17249 Region = RHSRegion; 17250 Visit(BO->getRHS()); 17251 } 17252 17253 Region = OldRegion; 17254 Tree.merge(LHSRegion); 17255 Tree.merge(RHSRegion); 17256 } 17257 17258 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO) { 17259 // C++11 [expr.cond]p1: 17260 // [...] Every value computation and side effect associated with the first 17261 // expression is sequenced before every value computation and side effect 17262 // associated with the second or third expression. 17263 SequenceTree::Seq ConditionRegion = Tree.allocate(Region); 17264 17265 // No sequencing is specified between the true and false expression. 17266 // However since exactly one of both is going to be evaluated we can 17267 // consider them to be sequenced. This is needed to avoid warning on 17268 // something like "x ? y+= 1 : y += 2;" in the case where we will visit 17269 // both the true and false expressions because we can't evaluate x. 17270 // This will still allow us to detect an expression like (pre C++17) 17271 // "(x ? y += 1 : y += 2) = y". 17272 // 17273 // We don't wrap the visitation of the true and false expression with 17274 // SequencedSubexpression because we don't want to downgrade modifications 17275 // as side effect in the true and false expressions after the visition 17276 // is done. (for example in the expression "(x ? y++ : y++) + y" we should 17277 // not warn between the two "y++", but we should warn between the "y++" 17278 // and the "y". 17279 SequenceTree::Seq TrueRegion = Tree.allocate(Region); 17280 SequenceTree::Seq FalseRegion = Tree.allocate(Region); 17281 SequenceTree::Seq OldRegion = Region; 17282 17283 EvaluationTracker Eval(*this); 17284 { 17285 SequencedSubexpression Sequenced(*this); 17286 Region = ConditionRegion; 17287 Visit(CO->getCond()); 17288 } 17289 17290 // C++11 [expr.cond]p1: 17291 // [...] The first expression is contextually converted to bool (Clause 4). 17292 // It is evaluated and if it is true, the result of the conditional 17293 // expression is the value of the second expression, otherwise that of the 17294 // third expression. Only one of the second and third expressions is 17295 // evaluated. [...] 17296 bool EvalResult = false; 17297 bool EvalOK = Eval.evaluate(CO->getCond(), EvalResult); 17298 bool ShouldVisitTrueExpr = !EvalOK || (EvalOK && EvalResult); 17299 bool ShouldVisitFalseExpr = !EvalOK || (EvalOK && !EvalResult); 17300 if (ShouldVisitTrueExpr) { 17301 Region = TrueRegion; 17302 Visit(CO->getTrueExpr()); 17303 } 17304 if (ShouldVisitFalseExpr) { 17305 Region = FalseRegion; 17306 Visit(CO->getFalseExpr()); 17307 } 17308 17309 Region = OldRegion; 17310 Tree.merge(ConditionRegion); 17311 Tree.merge(TrueRegion); 17312 Tree.merge(FalseRegion); 17313 } 17314 17315 void VisitCallExpr(const CallExpr *CE) { 17316 // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions. 17317 17318 if (CE->isUnevaluatedBuiltinCall(Context)) 17319 return; 17320 17321 // C++11 [intro.execution]p15: 17322 // When calling a function [...], every value computation and side effect 17323 // associated with any argument expression, or with the postfix expression 17324 // designating the called function, is sequenced before execution of every 17325 // expression or statement in the body of the function [and thus before 17326 // the value computation of its result]. 17327 SequencedSubexpression Sequenced(*this); 17328 SemaRef.runWithSufficientStackSpace(CE->getExprLoc(), [&] { 17329 // C++17 [expr.call]p5 17330 // The postfix-expression is sequenced before each expression in the 17331 // expression-list and any default argument. [...] 17332 SequenceTree::Seq CalleeRegion; 17333 SequenceTree::Seq OtherRegion; 17334 if (SemaRef.getLangOpts().CPlusPlus17) { 17335 CalleeRegion = Tree.allocate(Region); 17336 OtherRegion = Tree.allocate(Region); 17337 } else { 17338 CalleeRegion = Region; 17339 OtherRegion = Region; 17340 } 17341 SequenceTree::Seq OldRegion = Region; 17342 17343 // Visit the callee expression first. 17344 Region = CalleeRegion; 17345 if (SemaRef.getLangOpts().CPlusPlus17) { 17346 SequencedSubexpression Sequenced(*this); 17347 Visit(CE->getCallee()); 17348 } else { 17349 Visit(CE->getCallee()); 17350 } 17351 17352 // Then visit the argument expressions. 17353 Region = OtherRegion; 17354 for (const Expr *Argument : CE->arguments()) 17355 Visit(Argument); 17356 17357 Region = OldRegion; 17358 if (SemaRef.getLangOpts().CPlusPlus17) { 17359 Tree.merge(CalleeRegion); 17360 Tree.merge(OtherRegion); 17361 } 17362 }); 17363 } 17364 17365 void VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *CXXOCE) { 17366 // C++17 [over.match.oper]p2: 17367 // [...] the operator notation is first transformed to the equivalent 17368 // function-call notation as summarized in Table 12 (where @ denotes one 17369 // of the operators covered in the specified subclause). However, the 17370 // operands are sequenced in the order prescribed for the built-in 17371 // operator (Clause 8). 17372 // 17373 // From the above only overloaded binary operators and overloaded call 17374 // operators have sequencing rules in C++17 that we need to handle 17375 // separately. 17376 if (!SemaRef.getLangOpts().CPlusPlus17 || 17377 (CXXOCE->getNumArgs() != 2 && CXXOCE->getOperator() != OO_Call)) 17378 return VisitCallExpr(CXXOCE); 17379 17380 enum { 17381 NoSequencing, 17382 LHSBeforeRHS, 17383 RHSBeforeLHS, 17384 LHSBeforeRest 17385 } SequencingKind; 17386 switch (CXXOCE->getOperator()) { 17387 case OO_Equal: 17388 case OO_PlusEqual: 17389 case OO_MinusEqual: 17390 case OO_StarEqual: 17391 case OO_SlashEqual: 17392 case OO_PercentEqual: 17393 case OO_CaretEqual: 17394 case OO_AmpEqual: 17395 case OO_PipeEqual: 17396 case OO_LessLessEqual: 17397 case OO_GreaterGreaterEqual: 17398 SequencingKind = RHSBeforeLHS; 17399 break; 17400 17401 case OO_LessLess: 17402 case OO_GreaterGreater: 17403 case OO_AmpAmp: 17404 case OO_PipePipe: 17405 case OO_Comma: 17406 case OO_ArrowStar: 17407 case OO_Subscript: 17408 SequencingKind = LHSBeforeRHS; 17409 break; 17410 17411 case OO_Call: 17412 SequencingKind = LHSBeforeRest; 17413 break; 17414 17415 default: 17416 SequencingKind = NoSequencing; 17417 break; 17418 } 17419 17420 if (SequencingKind == NoSequencing) 17421 return VisitCallExpr(CXXOCE); 17422 17423 // This is a call, so all subexpressions are sequenced before the result. 17424 SequencedSubexpression Sequenced(*this); 17425 17426 SemaRef.runWithSufficientStackSpace(CXXOCE->getExprLoc(), [&] { 17427 assert(SemaRef.getLangOpts().CPlusPlus17 && 17428 "Should only get there with C++17 and above!"); 17429 assert((CXXOCE->getNumArgs() == 2 || CXXOCE->getOperator() == OO_Call) && 17430 "Should only get there with an overloaded binary operator" 17431 " or an overloaded call operator!"); 17432 17433 if (SequencingKind == LHSBeforeRest) { 17434 assert(CXXOCE->getOperator() == OO_Call && 17435 "We should only have an overloaded call operator here!"); 17436 17437 // This is very similar to VisitCallExpr, except that we only have the 17438 // C++17 case. The postfix-expression is the first argument of the 17439 // CXXOperatorCallExpr. The expressions in the expression-list, if any, 17440 // are in the following arguments. 17441 // 17442 // Note that we intentionally do not visit the callee expression since 17443 // it is just a decayed reference to a function. 17444 SequenceTree::Seq PostfixExprRegion = Tree.allocate(Region); 17445 SequenceTree::Seq ArgsRegion = Tree.allocate(Region); 17446 SequenceTree::Seq OldRegion = Region; 17447 17448 assert(CXXOCE->getNumArgs() >= 1 && 17449 "An overloaded call operator must have at least one argument" 17450 " for the postfix-expression!"); 17451 const Expr *PostfixExpr = CXXOCE->getArgs()[0]; 17452 llvm::ArrayRef<const Expr *> Args(CXXOCE->getArgs() + 1, 17453 CXXOCE->getNumArgs() - 1); 17454 17455 // Visit the postfix-expression first. 17456 { 17457 Region = PostfixExprRegion; 17458 SequencedSubexpression Sequenced(*this); 17459 Visit(PostfixExpr); 17460 } 17461 17462 // Then visit the argument expressions. 17463 Region = ArgsRegion; 17464 for (const Expr *Arg : Args) 17465 Visit(Arg); 17466 17467 Region = OldRegion; 17468 Tree.merge(PostfixExprRegion); 17469 Tree.merge(ArgsRegion); 17470 } else { 17471 assert(CXXOCE->getNumArgs() == 2 && 17472 "Should only have two arguments here!"); 17473 assert((SequencingKind == LHSBeforeRHS || 17474 SequencingKind == RHSBeforeLHS) && 17475 "Unexpected sequencing kind!"); 17476 17477 // We do not visit the callee expression since it is just a decayed 17478 // reference to a function. 17479 const Expr *E1 = CXXOCE->getArg(0); 17480 const Expr *E2 = CXXOCE->getArg(1); 17481 if (SequencingKind == RHSBeforeLHS) 17482 std::swap(E1, E2); 17483 17484 return VisitSequencedExpressions(E1, E2); 17485 } 17486 }); 17487 } 17488 17489 void VisitCXXConstructExpr(const CXXConstructExpr *CCE) { 17490 // This is a call, so all subexpressions are sequenced before the result. 17491 SequencedSubexpression Sequenced(*this); 17492 17493 if (!CCE->isListInitialization()) 17494 return VisitExpr(CCE); 17495 17496 // In C++11, list initializations are sequenced. 17497 SmallVector<SequenceTree::Seq, 32> Elts; 17498 SequenceTree::Seq Parent = Region; 17499 for (CXXConstructExpr::const_arg_iterator I = CCE->arg_begin(), 17500 E = CCE->arg_end(); 17501 I != E; ++I) { 17502 Region = Tree.allocate(Parent); 17503 Elts.push_back(Region); 17504 Visit(*I); 17505 } 17506 17507 // Forget that the initializers are sequenced. 17508 Region = Parent; 17509 for (unsigned I = 0; I < Elts.size(); ++I) 17510 Tree.merge(Elts[I]); 17511 } 17512 17513 void VisitInitListExpr(const InitListExpr *ILE) { 17514 if (!SemaRef.getLangOpts().CPlusPlus11) 17515 return VisitExpr(ILE); 17516 17517 // In C++11, list initializations are sequenced. 17518 SmallVector<SequenceTree::Seq, 32> Elts; 17519 SequenceTree::Seq Parent = Region; 17520 for (unsigned I = 0; I < ILE->getNumInits(); ++I) { 17521 const Expr *E = ILE->getInit(I); 17522 if (!E) 17523 continue; 17524 Region = Tree.allocate(Parent); 17525 Elts.push_back(Region); 17526 Visit(E); 17527 } 17528 17529 // Forget that the initializers are sequenced. 17530 Region = Parent; 17531 for (unsigned I = 0; I < Elts.size(); ++I) 17532 Tree.merge(Elts[I]); 17533 } 17534 }; 17535 17536 SequenceChecker::UsageInfo::UsageInfo() = default; 17537 17538 } // namespace 17539 17540 void Sema::CheckUnsequencedOperations(const Expr *E) { 17541 SmallVector<const Expr *, 8> WorkList; 17542 WorkList.push_back(E); 17543 while (!WorkList.empty()) { 17544 const Expr *Item = WorkList.pop_back_val(); 17545 SequenceChecker(*this, Item, WorkList); 17546 } 17547 } 17548 17549 void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc, 17550 bool IsConstexpr) { 17551 llvm::SaveAndRestore ConstantContext(isConstantEvaluatedOverride, 17552 IsConstexpr || isa<ConstantExpr>(E)); 17553 CheckImplicitConversions(E, CheckLoc); 17554 if (!E->isInstantiationDependent()) 17555 CheckUnsequencedOperations(E); 17556 if (!IsConstexpr && !E->isValueDependent()) 17557 CheckForIntOverflow(E); 17558 DiagnoseMisalignedMembers(); 17559 } 17560 17561 void Sema::CheckBitFieldInitialization(SourceLocation InitLoc, 17562 FieldDecl *BitField, 17563 Expr *Init) { 17564 (void) AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc); 17565 } 17566 17567 static void diagnoseArrayStarInParamType(Sema &S, QualType PType, 17568 SourceLocation Loc) { 17569 if (!PType->isVariablyModifiedType()) 17570 return; 17571 if (const auto *PointerTy = dyn_cast<PointerType>(PType)) { 17572 diagnoseArrayStarInParamType(S, PointerTy->getPointeeType(), Loc); 17573 return; 17574 } 17575 if (const auto *ReferenceTy = dyn_cast<ReferenceType>(PType)) { 17576 diagnoseArrayStarInParamType(S, ReferenceTy->getPointeeType(), Loc); 17577 return; 17578 } 17579 if (const auto *ParenTy = dyn_cast<ParenType>(PType)) { 17580 diagnoseArrayStarInParamType(S, ParenTy->getInnerType(), Loc); 17581 return; 17582 } 17583 17584 const ArrayType *AT = S.Context.getAsArrayType(PType); 17585 if (!AT) 17586 return; 17587 17588 if (AT->getSizeModifier() != ArraySizeModifier::Star) { 17589 diagnoseArrayStarInParamType(S, AT->getElementType(), Loc); 17590 return; 17591 } 17592 17593 S.Diag(Loc, diag::err_array_star_in_function_definition); 17594 } 17595 17596 /// CheckParmsForFunctionDef - Check that the parameters of the given 17597 /// function are appropriate for the definition of a function. This 17598 /// takes care of any checks that cannot be performed on the 17599 /// declaration itself, e.g., that the types of each of the function 17600 /// parameters are complete. 17601 bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, 17602 bool CheckParameterNames) { 17603 bool HasInvalidParm = false; 17604 for (ParmVarDecl *Param : Parameters) { 17605 assert(Param && "null in a parameter list"); 17606 // C99 6.7.5.3p4: the parameters in a parameter type list in a 17607 // function declarator that is part of a function definition of 17608 // that function shall not have incomplete type. 17609 // 17610 // C++23 [dcl.fct.def.general]/p2 17611 // The type of a parameter [...] for a function definition 17612 // shall not be a (possibly cv-qualified) class type that is incomplete 17613 // or abstract within the function body unless the function is deleted. 17614 if (!Param->isInvalidDecl() && 17615 (RequireCompleteType(Param->getLocation(), Param->getType(), 17616 diag::err_typecheck_decl_incomplete_type) || 17617 RequireNonAbstractType(Param->getBeginLoc(), Param->getOriginalType(), 17618 diag::err_abstract_type_in_decl, 17619 AbstractParamType))) { 17620 Param->setInvalidDecl(); 17621 HasInvalidParm = true; 17622 } 17623 17624 // C99 6.9.1p5: If the declarator includes a parameter type list, the 17625 // declaration of each parameter shall include an identifier. 17626 if (CheckParameterNames && Param->getIdentifier() == nullptr && 17627 !Param->isImplicit() && !getLangOpts().CPlusPlus) { 17628 // Diagnose this as an extension in C17 and earlier. 17629 if (!getLangOpts().C23) 17630 Diag(Param->getLocation(), diag::ext_parameter_name_omitted_c23); 17631 } 17632 17633 // C99 6.7.5.3p12: 17634 // If the function declarator is not part of a definition of that 17635 // function, parameters may have incomplete type and may use the [*] 17636 // notation in their sequences of declarator specifiers to specify 17637 // variable length array types. 17638 QualType PType = Param->getOriginalType(); 17639 // FIXME: This diagnostic should point the '[*]' if source-location 17640 // information is added for it. 17641 diagnoseArrayStarInParamType(*this, PType, Param->getLocation()); 17642 17643 // If the parameter is a c++ class type and it has to be destructed in the 17644 // callee function, declare the destructor so that it can be called by the 17645 // callee function. Do not perform any direct access check on the dtor here. 17646 if (!Param->isInvalidDecl()) { 17647 if (CXXRecordDecl *ClassDecl = Param->getType()->getAsCXXRecordDecl()) { 17648 if (!ClassDecl->isInvalidDecl() && 17649 !ClassDecl->hasIrrelevantDestructor() && 17650 !ClassDecl->isDependentContext() && 17651 ClassDecl->isParamDestroyedInCallee()) { 17652 CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl); 17653 MarkFunctionReferenced(Param->getLocation(), Destructor); 17654 DiagnoseUseOfDecl(Destructor, Param->getLocation()); 17655 } 17656 } 17657 } 17658 17659 // Parameters with the pass_object_size attribute only need to be marked 17660 // constant at function definitions. Because we lack information about 17661 // whether we're on a declaration or definition when we're instantiating the 17662 // attribute, we need to check for constness here. 17663 if (const auto *Attr = Param->getAttr<PassObjectSizeAttr>()) 17664 if (!Param->getType().isConstQualified()) 17665 Diag(Param->getLocation(), diag::err_attribute_pointers_only) 17666 << Attr->getSpelling() << 1; 17667 17668 // Check for parameter names shadowing fields from the class. 17669 if (LangOpts.CPlusPlus && !Param->isInvalidDecl()) { 17670 // The owning context for the parameter should be the function, but we 17671 // want to see if this function's declaration context is a record. 17672 DeclContext *DC = Param->getDeclContext(); 17673 if (DC && DC->isFunctionOrMethod()) { 17674 if (auto *RD = dyn_cast<CXXRecordDecl>(DC->getParent())) 17675 CheckShadowInheritedFields(Param->getLocation(), Param->getDeclName(), 17676 RD, /*DeclIsField*/ false); 17677 } 17678 } 17679 17680 if (!Param->isInvalidDecl() && 17681 Param->getOriginalType()->isWebAssemblyTableType()) { 17682 Param->setInvalidDecl(); 17683 HasInvalidParm = true; 17684 Diag(Param->getLocation(), diag::err_wasm_table_as_function_parameter); 17685 } 17686 } 17687 17688 return HasInvalidParm; 17689 } 17690 17691 std::optional<std::pair< 17692 CharUnits, CharUnits>> static getBaseAlignmentAndOffsetFromPtr(const Expr 17693 *E, 17694 ASTContext 17695 &Ctx); 17696 17697 /// Compute the alignment and offset of the base class object given the 17698 /// derived-to-base cast expression and the alignment and offset of the derived 17699 /// class object. 17700 static std::pair<CharUnits, CharUnits> 17701 getDerivedToBaseAlignmentAndOffset(const CastExpr *CE, QualType DerivedType, 17702 CharUnits BaseAlignment, CharUnits Offset, 17703 ASTContext &Ctx) { 17704 for (auto PathI = CE->path_begin(), PathE = CE->path_end(); PathI != PathE; 17705 ++PathI) { 17706 const CXXBaseSpecifier *Base = *PathI; 17707 const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl(); 17708 if (Base->isVirtual()) { 17709 // The complete object may have a lower alignment than the non-virtual 17710 // alignment of the base, in which case the base may be misaligned. Choose 17711 // the smaller of the non-virtual alignment and BaseAlignment, which is a 17712 // conservative lower bound of the complete object alignment. 17713 CharUnits NonVirtualAlignment = 17714 Ctx.getASTRecordLayout(BaseDecl).getNonVirtualAlignment(); 17715 BaseAlignment = std::min(BaseAlignment, NonVirtualAlignment); 17716 Offset = CharUnits::Zero(); 17717 } else { 17718 const ASTRecordLayout &RL = 17719 Ctx.getASTRecordLayout(DerivedType->getAsCXXRecordDecl()); 17720 Offset += RL.getBaseClassOffset(BaseDecl); 17721 } 17722 DerivedType = Base->getType(); 17723 } 17724 17725 return std::make_pair(BaseAlignment, Offset); 17726 } 17727 17728 /// Compute the alignment and offset of a binary additive operator. 17729 static std::optional<std::pair<CharUnits, CharUnits>> 17730 getAlignmentAndOffsetFromBinAddOrSub(const Expr *PtrE, const Expr *IntE, 17731 bool IsSub, ASTContext &Ctx) { 17732 QualType PointeeType = PtrE->getType()->getPointeeType(); 17733 17734 if (!PointeeType->isConstantSizeType()) 17735 return std::nullopt; 17736 17737 auto P = getBaseAlignmentAndOffsetFromPtr(PtrE, Ctx); 17738 17739 if (!P) 17740 return std::nullopt; 17741 17742 CharUnits EltSize = Ctx.getTypeSizeInChars(PointeeType); 17743 if (std::optional<llvm::APSInt> IdxRes = IntE->getIntegerConstantExpr(Ctx)) { 17744 CharUnits Offset = EltSize * IdxRes->getExtValue(); 17745 if (IsSub) 17746 Offset = -Offset; 17747 return std::make_pair(P->first, P->second + Offset); 17748 } 17749 17750 // If the integer expression isn't a constant expression, compute the lower 17751 // bound of the alignment using the alignment and offset of the pointer 17752 // expression and the element size. 17753 return std::make_pair( 17754 P->first.alignmentAtOffset(P->second).alignmentAtOffset(EltSize), 17755 CharUnits::Zero()); 17756 } 17757 17758 /// This helper function takes an lvalue expression and returns the alignment of 17759 /// a VarDecl and a constant offset from the VarDecl. 17760 std::optional<std::pair< 17761 CharUnits, 17762 CharUnits>> static getBaseAlignmentAndOffsetFromLValue(const Expr *E, 17763 ASTContext &Ctx) { 17764 E = E->IgnoreParens(); 17765 switch (E->getStmtClass()) { 17766 default: 17767 break; 17768 case Stmt::CStyleCastExprClass: 17769 case Stmt::CXXStaticCastExprClass: 17770 case Stmt::ImplicitCastExprClass: { 17771 auto *CE = cast<CastExpr>(E); 17772 const Expr *From = CE->getSubExpr(); 17773 switch (CE->getCastKind()) { 17774 default: 17775 break; 17776 case CK_NoOp: 17777 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 17778 case CK_UncheckedDerivedToBase: 17779 case CK_DerivedToBase: { 17780 auto P = getBaseAlignmentAndOffsetFromLValue(From, Ctx); 17781 if (!P) 17782 break; 17783 return getDerivedToBaseAlignmentAndOffset(CE, From->getType(), P->first, 17784 P->second, Ctx); 17785 } 17786 } 17787 break; 17788 } 17789 case Stmt::ArraySubscriptExprClass: { 17790 auto *ASE = cast<ArraySubscriptExpr>(E); 17791 return getAlignmentAndOffsetFromBinAddOrSub(ASE->getBase(), ASE->getIdx(), 17792 false, Ctx); 17793 } 17794 case Stmt::DeclRefExprClass: { 17795 if (auto *VD = dyn_cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())) { 17796 // FIXME: If VD is captured by copy or is an escaping __block variable, 17797 // use the alignment of VD's type. 17798 if (!VD->getType()->isReferenceType()) { 17799 // Dependent alignment cannot be resolved -> bail out. 17800 if (VD->hasDependentAlignment()) 17801 break; 17802 return std::make_pair(Ctx.getDeclAlign(VD), CharUnits::Zero()); 17803 } 17804 if (VD->hasInit()) 17805 return getBaseAlignmentAndOffsetFromLValue(VD->getInit(), Ctx); 17806 } 17807 break; 17808 } 17809 case Stmt::MemberExprClass: { 17810 auto *ME = cast<MemberExpr>(E); 17811 auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()); 17812 if (!FD || FD->getType()->isReferenceType() || 17813 FD->getParent()->isInvalidDecl()) 17814 break; 17815 std::optional<std::pair<CharUnits, CharUnits>> P; 17816 if (ME->isArrow()) 17817 P = getBaseAlignmentAndOffsetFromPtr(ME->getBase(), Ctx); 17818 else 17819 P = getBaseAlignmentAndOffsetFromLValue(ME->getBase(), Ctx); 17820 if (!P) 17821 break; 17822 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(FD->getParent()); 17823 uint64_t Offset = Layout.getFieldOffset(FD->getFieldIndex()); 17824 return std::make_pair(P->first, 17825 P->second + CharUnits::fromQuantity(Offset)); 17826 } 17827 case Stmt::UnaryOperatorClass: { 17828 auto *UO = cast<UnaryOperator>(E); 17829 switch (UO->getOpcode()) { 17830 default: 17831 break; 17832 case UO_Deref: 17833 return getBaseAlignmentAndOffsetFromPtr(UO->getSubExpr(), Ctx); 17834 } 17835 break; 17836 } 17837 case Stmt::BinaryOperatorClass: { 17838 auto *BO = cast<BinaryOperator>(E); 17839 auto Opcode = BO->getOpcode(); 17840 switch (Opcode) { 17841 default: 17842 break; 17843 case BO_Comma: 17844 return getBaseAlignmentAndOffsetFromLValue(BO->getRHS(), Ctx); 17845 } 17846 break; 17847 } 17848 } 17849 return std::nullopt; 17850 } 17851 17852 /// This helper function takes a pointer expression and returns the alignment of 17853 /// a VarDecl and a constant offset from the VarDecl. 17854 std::optional<std::pair< 17855 CharUnits, CharUnits>> static getBaseAlignmentAndOffsetFromPtr(const Expr 17856 *E, 17857 ASTContext 17858 &Ctx) { 17859 E = E->IgnoreParens(); 17860 switch (E->getStmtClass()) { 17861 default: 17862 break; 17863 case Stmt::CStyleCastExprClass: 17864 case Stmt::CXXStaticCastExprClass: 17865 case Stmt::ImplicitCastExprClass: { 17866 auto *CE = cast<CastExpr>(E); 17867 const Expr *From = CE->getSubExpr(); 17868 switch (CE->getCastKind()) { 17869 default: 17870 break; 17871 case CK_NoOp: 17872 return getBaseAlignmentAndOffsetFromPtr(From, Ctx); 17873 case CK_ArrayToPointerDecay: 17874 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 17875 case CK_UncheckedDerivedToBase: 17876 case CK_DerivedToBase: { 17877 auto P = getBaseAlignmentAndOffsetFromPtr(From, Ctx); 17878 if (!P) 17879 break; 17880 return getDerivedToBaseAlignmentAndOffset( 17881 CE, From->getType()->getPointeeType(), P->first, P->second, Ctx); 17882 } 17883 } 17884 break; 17885 } 17886 case Stmt::CXXThisExprClass: { 17887 auto *RD = E->getType()->getPointeeType()->getAsCXXRecordDecl(); 17888 CharUnits Alignment = Ctx.getASTRecordLayout(RD).getNonVirtualAlignment(); 17889 return std::make_pair(Alignment, CharUnits::Zero()); 17890 } 17891 case Stmt::UnaryOperatorClass: { 17892 auto *UO = cast<UnaryOperator>(E); 17893 if (UO->getOpcode() == UO_AddrOf) 17894 return getBaseAlignmentAndOffsetFromLValue(UO->getSubExpr(), Ctx); 17895 break; 17896 } 17897 case Stmt::BinaryOperatorClass: { 17898 auto *BO = cast<BinaryOperator>(E); 17899 auto Opcode = BO->getOpcode(); 17900 switch (Opcode) { 17901 default: 17902 break; 17903 case BO_Add: 17904 case BO_Sub: { 17905 const Expr *LHS = BO->getLHS(), *RHS = BO->getRHS(); 17906 if (Opcode == BO_Add && !RHS->getType()->isIntegralOrEnumerationType()) 17907 std::swap(LHS, RHS); 17908 return getAlignmentAndOffsetFromBinAddOrSub(LHS, RHS, Opcode == BO_Sub, 17909 Ctx); 17910 } 17911 case BO_Comma: 17912 return getBaseAlignmentAndOffsetFromPtr(BO->getRHS(), Ctx); 17913 } 17914 break; 17915 } 17916 } 17917 return std::nullopt; 17918 } 17919 17920 static CharUnits getPresumedAlignmentOfPointer(const Expr *E, Sema &S) { 17921 // See if we can compute the alignment of a VarDecl and an offset from it. 17922 std::optional<std::pair<CharUnits, CharUnits>> P = 17923 getBaseAlignmentAndOffsetFromPtr(E, S.Context); 17924 17925 if (P) 17926 return P->first.alignmentAtOffset(P->second); 17927 17928 // If that failed, return the type's alignment. 17929 return S.Context.getTypeAlignInChars(E->getType()->getPointeeType()); 17930 } 17931 17932 /// CheckCastAlign - Implements -Wcast-align, which warns when a 17933 /// pointer cast increases the alignment requirements. 17934 void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) { 17935 // This is actually a lot of work to potentially be doing on every 17936 // cast; don't do it if we're ignoring -Wcast_align (as is the default). 17937 if (getDiagnostics().isIgnored(diag::warn_cast_align, TRange.getBegin())) 17938 return; 17939 17940 // Ignore dependent types. 17941 if (T->isDependentType() || Op->getType()->isDependentType()) 17942 return; 17943 17944 // Require that the destination be a pointer type. 17945 const PointerType *DestPtr = T->getAs<PointerType>(); 17946 if (!DestPtr) return; 17947 17948 // If the destination has alignment 1, we're done. 17949 QualType DestPointee = DestPtr->getPointeeType(); 17950 if (DestPointee->isIncompleteType()) return; 17951 CharUnits DestAlign = Context.getTypeAlignInChars(DestPointee); 17952 if (DestAlign.isOne()) return; 17953 17954 // Require that the source be a pointer type. 17955 const PointerType *SrcPtr = Op->getType()->getAs<PointerType>(); 17956 if (!SrcPtr) return; 17957 QualType SrcPointee = SrcPtr->getPointeeType(); 17958 17959 // Explicitly allow casts from cv void*. We already implicitly 17960 // allowed casts to cv void*, since they have alignment 1. 17961 // Also allow casts involving incomplete types, which implicitly 17962 // includes 'void'. 17963 if (SrcPointee->isIncompleteType()) return; 17964 17965 CharUnits SrcAlign = getPresumedAlignmentOfPointer(Op, *this); 17966 17967 if (SrcAlign >= DestAlign) return; 17968 17969 Diag(TRange.getBegin(), diag::warn_cast_align) 17970 << Op->getType() << T 17971 << static_cast<unsigned>(SrcAlign.getQuantity()) 17972 << static_cast<unsigned>(DestAlign.getQuantity()) 17973 << TRange << Op->getSourceRange(); 17974 } 17975 17976 void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, 17977 const ArraySubscriptExpr *ASE, 17978 bool AllowOnePastEnd, bool IndexNegated) { 17979 // Already diagnosed by the constant evaluator. 17980 if (isConstantEvaluatedContext()) 17981 return; 17982 17983 IndexExpr = IndexExpr->IgnoreParenImpCasts(); 17984 if (IndexExpr->isValueDependent()) 17985 return; 17986 17987 const Type *EffectiveType = 17988 BaseExpr->getType()->getPointeeOrArrayElementType(); 17989 BaseExpr = BaseExpr->IgnoreParenCasts(); 17990 const ConstantArrayType *ArrayTy = 17991 Context.getAsConstantArrayType(BaseExpr->getType()); 17992 17993 LangOptions::StrictFlexArraysLevelKind 17994 StrictFlexArraysLevel = getLangOpts().getStrictFlexArraysLevel(); 17995 17996 const Type *BaseType = 17997 ArrayTy == nullptr ? nullptr : ArrayTy->getElementType().getTypePtr(); 17998 bool IsUnboundedArray = 17999 BaseType == nullptr || BaseExpr->isFlexibleArrayMemberLike( 18000 Context, StrictFlexArraysLevel, 18001 /*IgnoreTemplateOrMacroSubstitution=*/true); 18002 if (EffectiveType->isDependentType() || 18003 (!IsUnboundedArray && BaseType->isDependentType())) 18004 return; 18005 18006 Expr::EvalResult Result; 18007 if (!IndexExpr->EvaluateAsInt(Result, Context, Expr::SE_AllowSideEffects)) 18008 return; 18009 18010 llvm::APSInt index = Result.Val.getInt(); 18011 if (IndexNegated) { 18012 index.setIsUnsigned(false); 18013 index = -index; 18014 } 18015 18016 if (IsUnboundedArray) { 18017 if (EffectiveType->isFunctionType()) 18018 return; 18019 if (index.isUnsigned() || !index.isNegative()) { 18020 const auto &ASTC = getASTContext(); 18021 unsigned AddrBits = ASTC.getTargetInfo().getPointerWidth( 18022 EffectiveType->getCanonicalTypeInternal().getAddressSpace()); 18023 if (index.getBitWidth() < AddrBits) 18024 index = index.zext(AddrBits); 18025 std::optional<CharUnits> ElemCharUnits = 18026 ASTC.getTypeSizeInCharsIfKnown(EffectiveType); 18027 // PR50741 - If EffectiveType has unknown size (e.g., if it's a void 18028 // pointer) bounds-checking isn't meaningful. 18029 if (!ElemCharUnits || ElemCharUnits->isZero()) 18030 return; 18031 llvm::APInt ElemBytes(index.getBitWidth(), ElemCharUnits->getQuantity()); 18032 // If index has more active bits than address space, we already know 18033 // we have a bounds violation to warn about. Otherwise, compute 18034 // address of (index + 1)th element, and warn about bounds violation 18035 // only if that address exceeds address space. 18036 if (index.getActiveBits() <= AddrBits) { 18037 bool Overflow; 18038 llvm::APInt Product(index); 18039 Product += 1; 18040 Product = Product.umul_ov(ElemBytes, Overflow); 18041 if (!Overflow && Product.getActiveBits() <= AddrBits) 18042 return; 18043 } 18044 18045 // Need to compute max possible elements in address space, since that 18046 // is included in diag message. 18047 llvm::APInt MaxElems = llvm::APInt::getMaxValue(AddrBits); 18048 MaxElems = MaxElems.zext(std::max(AddrBits + 1, ElemBytes.getBitWidth())); 18049 MaxElems += 1; 18050 ElemBytes = ElemBytes.zextOrTrunc(MaxElems.getBitWidth()); 18051 MaxElems = MaxElems.udiv(ElemBytes); 18052 18053 unsigned DiagID = 18054 ASE ? diag::warn_array_index_exceeds_max_addressable_bounds 18055 : diag::warn_ptr_arith_exceeds_max_addressable_bounds; 18056 18057 // Diag message shows element size in bits and in "bytes" (platform- 18058 // dependent CharUnits) 18059 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 18060 PDiag(DiagID) 18061 << toString(index, 10, true) << AddrBits 18062 << (unsigned)ASTC.toBits(*ElemCharUnits) 18063 << toString(ElemBytes, 10, false) 18064 << toString(MaxElems, 10, false) 18065 << (unsigned)MaxElems.getLimitedValue(~0U) 18066 << IndexExpr->getSourceRange()); 18067 18068 const NamedDecl *ND = nullptr; 18069 // Try harder to find a NamedDecl to point at in the note. 18070 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr)) 18071 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 18072 if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 18073 ND = DRE->getDecl(); 18074 if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr)) 18075 ND = ME->getMemberDecl(); 18076 18077 if (ND) 18078 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 18079 PDiag(diag::note_array_declared_here) << ND); 18080 } 18081 return; 18082 } 18083 18084 if (index.isUnsigned() || !index.isNegative()) { 18085 // It is possible that the type of the base expression after 18086 // IgnoreParenCasts is incomplete, even though the type of the base 18087 // expression before IgnoreParenCasts is complete (see PR39746 for an 18088 // example). In this case we have no information about whether the array 18089 // access exceeds the array bounds. However we can still diagnose an array 18090 // access which precedes the array bounds. 18091 if (BaseType->isIncompleteType()) 18092 return; 18093 18094 llvm::APInt size = ArrayTy->getSize(); 18095 18096 if (BaseType != EffectiveType) { 18097 // Make sure we're comparing apples to apples when comparing index to 18098 // size. 18099 uint64_t ptrarith_typesize = Context.getTypeSize(EffectiveType); 18100 uint64_t array_typesize = Context.getTypeSize(BaseType); 18101 18102 // Handle ptrarith_typesize being zero, such as when casting to void*. 18103 // Use the size in bits (what "getTypeSize()" returns) rather than bytes. 18104 if (!ptrarith_typesize) 18105 ptrarith_typesize = Context.getCharWidth(); 18106 18107 if (ptrarith_typesize != array_typesize) { 18108 // There's a cast to a different size type involved. 18109 uint64_t ratio = array_typesize / ptrarith_typesize; 18110 18111 // TODO: Be smarter about handling cases where array_typesize is not a 18112 // multiple of ptrarith_typesize. 18113 if (ptrarith_typesize * ratio == array_typesize) 18114 size *= llvm::APInt(size.getBitWidth(), ratio); 18115 } 18116 } 18117 18118 if (size.getBitWidth() > index.getBitWidth()) 18119 index = index.zext(size.getBitWidth()); 18120 else if (size.getBitWidth() < index.getBitWidth()) 18121 size = size.zext(index.getBitWidth()); 18122 18123 // For array subscripting the index must be less than size, but for pointer 18124 // arithmetic also allow the index (offset) to be equal to size since 18125 // computing the next address after the end of the array is legal and 18126 // commonly done e.g. in C++ iterators and range-based for loops. 18127 if (AllowOnePastEnd ? index.ule(size) : index.ult(size)) 18128 return; 18129 18130 // Suppress the warning if the subscript expression (as identified by the 18131 // ']' location) and the index expression are both from macro expansions 18132 // within a system header. 18133 if (ASE) { 18134 SourceLocation RBracketLoc = SourceMgr.getSpellingLoc( 18135 ASE->getRBracketLoc()); 18136 if (SourceMgr.isInSystemHeader(RBracketLoc)) { 18137 SourceLocation IndexLoc = 18138 SourceMgr.getSpellingLoc(IndexExpr->getBeginLoc()); 18139 if (SourceMgr.isWrittenInSameFile(RBracketLoc, IndexLoc)) 18140 return; 18141 } 18142 } 18143 18144 unsigned DiagID = ASE ? diag::warn_array_index_exceeds_bounds 18145 : diag::warn_ptr_arith_exceeds_bounds; 18146 unsigned CastMsg = (!ASE || BaseType == EffectiveType) ? 0 : 1; 18147 QualType CastMsgTy = ASE ? ASE->getLHS()->getType() : QualType(); 18148 18149 DiagRuntimeBehavior( 18150 BaseExpr->getBeginLoc(), BaseExpr, 18151 PDiag(DiagID) << toString(index, 10, true) << ArrayTy->desugar() 18152 << CastMsg << CastMsgTy << IndexExpr->getSourceRange()); 18153 } else { 18154 unsigned DiagID = diag::warn_array_index_precedes_bounds; 18155 if (!ASE) { 18156 DiagID = diag::warn_ptr_arith_precedes_bounds; 18157 if (index.isNegative()) index = -index; 18158 } 18159 18160 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 18161 PDiag(DiagID) << toString(index, 10, true) 18162 << IndexExpr->getSourceRange()); 18163 } 18164 18165 const NamedDecl *ND = nullptr; 18166 // Try harder to find a NamedDecl to point at in the note. 18167 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr)) 18168 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 18169 if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 18170 ND = DRE->getDecl(); 18171 if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr)) 18172 ND = ME->getMemberDecl(); 18173 18174 if (ND) 18175 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 18176 PDiag(diag::note_array_declared_here) << ND); 18177 } 18178 18179 void Sema::CheckArrayAccess(const Expr *expr) { 18180 int AllowOnePastEnd = 0; 18181 while (expr) { 18182 expr = expr->IgnoreParenImpCasts(); 18183 switch (expr->getStmtClass()) { 18184 case Stmt::ArraySubscriptExprClass: { 18185 const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(expr); 18186 CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE, 18187 AllowOnePastEnd > 0); 18188 expr = ASE->getBase(); 18189 break; 18190 } 18191 case Stmt::MemberExprClass: { 18192 expr = cast<MemberExpr>(expr)->getBase(); 18193 break; 18194 } 18195 case Stmt::OMPArraySectionExprClass: { 18196 const OMPArraySectionExpr *ASE = cast<OMPArraySectionExpr>(expr); 18197 if (ASE->getLowerBound()) 18198 CheckArrayAccess(ASE->getBase(), ASE->getLowerBound(), 18199 /*ASE=*/nullptr, AllowOnePastEnd > 0); 18200 return; 18201 } 18202 case Stmt::UnaryOperatorClass: { 18203 // Only unwrap the * and & unary operators 18204 const UnaryOperator *UO = cast<UnaryOperator>(expr); 18205 expr = UO->getSubExpr(); 18206 switch (UO->getOpcode()) { 18207 case UO_AddrOf: 18208 AllowOnePastEnd++; 18209 break; 18210 case UO_Deref: 18211 AllowOnePastEnd--; 18212 break; 18213 default: 18214 return; 18215 } 18216 break; 18217 } 18218 case Stmt::ConditionalOperatorClass: { 18219 const ConditionalOperator *cond = cast<ConditionalOperator>(expr); 18220 if (const Expr *lhs = cond->getLHS()) 18221 CheckArrayAccess(lhs); 18222 if (const Expr *rhs = cond->getRHS()) 18223 CheckArrayAccess(rhs); 18224 return; 18225 } 18226 case Stmt::CXXOperatorCallExprClass: { 18227 const auto *OCE = cast<CXXOperatorCallExpr>(expr); 18228 for (const auto *Arg : OCE->arguments()) 18229 CheckArrayAccess(Arg); 18230 return; 18231 } 18232 default: 18233 return; 18234 } 18235 } 18236 } 18237 18238 //===--- CHECK: Objective-C retain cycles ----------------------------------// 18239 18240 namespace { 18241 18242 struct RetainCycleOwner { 18243 VarDecl *Variable = nullptr; 18244 SourceRange Range; 18245 SourceLocation Loc; 18246 bool Indirect = false; 18247 18248 RetainCycleOwner() = default; 18249 18250 void setLocsFrom(Expr *e) { 18251 Loc = e->getExprLoc(); 18252 Range = e->getSourceRange(); 18253 } 18254 }; 18255 18256 } // namespace 18257 18258 /// Consider whether capturing the given variable can possibly lead to 18259 /// a retain cycle. 18260 static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) { 18261 // In ARC, it's captured strongly iff the variable has __strong 18262 // lifetime. In MRR, it's captured strongly if the variable is 18263 // __block and has an appropriate type. 18264 if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 18265 return false; 18266 18267 owner.Variable = var; 18268 if (ref) 18269 owner.setLocsFrom(ref); 18270 return true; 18271 } 18272 18273 static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) { 18274 while (true) { 18275 e = e->IgnoreParens(); 18276 if (CastExpr *cast = dyn_cast<CastExpr>(e)) { 18277 switch (cast->getCastKind()) { 18278 case CK_BitCast: 18279 case CK_LValueBitCast: 18280 case CK_LValueToRValue: 18281 case CK_ARCReclaimReturnedObject: 18282 e = cast->getSubExpr(); 18283 continue; 18284 18285 default: 18286 return false; 18287 } 18288 } 18289 18290 if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(e)) { 18291 ObjCIvarDecl *ivar = ref->getDecl(); 18292 if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 18293 return false; 18294 18295 // Try to find a retain cycle in the base. 18296 if (!findRetainCycleOwner(S, ref->getBase(), owner)) 18297 return false; 18298 18299 if (ref->isFreeIvar()) owner.setLocsFrom(ref); 18300 owner.Indirect = true; 18301 return true; 18302 } 18303 18304 if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) { 18305 VarDecl *var = dyn_cast<VarDecl>(ref->getDecl()); 18306 if (!var) return false; 18307 return considerVariable(var, ref, owner); 18308 } 18309 18310 if (MemberExpr *member = dyn_cast<MemberExpr>(e)) { 18311 if (member->isArrow()) return false; 18312 18313 // Don't count this as an indirect ownership. 18314 e = member->getBase(); 18315 continue; 18316 } 18317 18318 if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { 18319 // Only pay attention to pseudo-objects on property references. 18320 ObjCPropertyRefExpr *pre 18321 = dyn_cast<ObjCPropertyRefExpr>(pseudo->getSyntacticForm() 18322 ->IgnoreParens()); 18323 if (!pre) return false; 18324 if (pre->isImplicitProperty()) return false; 18325 ObjCPropertyDecl *property = pre->getExplicitProperty(); 18326 if (!property->isRetaining() && 18327 !(property->getPropertyIvarDecl() && 18328 property->getPropertyIvarDecl()->getType() 18329 .getObjCLifetime() == Qualifiers::OCL_Strong)) 18330 return false; 18331 18332 owner.Indirect = true; 18333 if (pre->isSuperReceiver()) { 18334 owner.Variable = S.getCurMethodDecl()->getSelfDecl(); 18335 if (!owner.Variable) 18336 return false; 18337 owner.Loc = pre->getLocation(); 18338 owner.Range = pre->getSourceRange(); 18339 return true; 18340 } 18341 e = const_cast<Expr*>(cast<OpaqueValueExpr>(pre->getBase()) 18342 ->getSourceExpr()); 18343 continue; 18344 } 18345 18346 // Array ivars? 18347 18348 return false; 18349 } 18350 } 18351 18352 namespace { 18353 18354 struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> { 18355 VarDecl *Variable; 18356 Expr *Capturer = nullptr; 18357 bool VarWillBeReased = false; 18358 18359 FindCaptureVisitor(ASTContext &Context, VarDecl *variable) 18360 : EvaluatedExprVisitor<FindCaptureVisitor>(Context), 18361 Variable(variable) {} 18362 18363 void VisitDeclRefExpr(DeclRefExpr *ref) { 18364 if (ref->getDecl() == Variable && !Capturer) 18365 Capturer = ref; 18366 } 18367 18368 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) { 18369 if (Capturer) return; 18370 Visit(ref->getBase()); 18371 if (Capturer && ref->isFreeIvar()) 18372 Capturer = ref; 18373 } 18374 18375 void VisitBlockExpr(BlockExpr *block) { 18376 // Look inside nested blocks 18377 if (block->getBlockDecl()->capturesVariable(Variable)) 18378 Visit(block->getBlockDecl()->getBody()); 18379 } 18380 18381 void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) { 18382 if (Capturer) return; 18383 if (OVE->getSourceExpr()) 18384 Visit(OVE->getSourceExpr()); 18385 } 18386 18387 void VisitBinaryOperator(BinaryOperator *BinOp) { 18388 if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign) 18389 return; 18390 Expr *LHS = BinOp->getLHS(); 18391 if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(LHS)) { 18392 if (DRE->getDecl() != Variable) 18393 return; 18394 if (Expr *RHS = BinOp->getRHS()) { 18395 RHS = RHS->IgnoreParenCasts(); 18396 std::optional<llvm::APSInt> Value; 18397 VarWillBeReased = 18398 (RHS && (Value = RHS->getIntegerConstantExpr(Context)) && 18399 *Value == 0); 18400 } 18401 } 18402 } 18403 }; 18404 18405 } // namespace 18406 18407 /// Check whether the given argument is a block which captures a 18408 /// variable. 18409 static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) { 18410 assert(owner.Variable && owner.Loc.isValid()); 18411 18412 e = e->IgnoreParenCasts(); 18413 18414 // Look through [^{...} copy] and Block_copy(^{...}). 18415 if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(e)) { 18416 Selector Cmd = ME->getSelector(); 18417 if (Cmd.isUnarySelector() && Cmd.getNameForSlot(0) == "copy") { 18418 e = ME->getInstanceReceiver(); 18419 if (!e) 18420 return nullptr; 18421 e = e->IgnoreParenCasts(); 18422 } 18423 } else if (CallExpr *CE = dyn_cast<CallExpr>(e)) { 18424 if (CE->getNumArgs() == 1) { 18425 FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl()); 18426 if (Fn) { 18427 const IdentifierInfo *FnI = Fn->getIdentifier(); 18428 if (FnI && FnI->isStr("_Block_copy")) { 18429 e = CE->getArg(0)->IgnoreParenCasts(); 18430 } 18431 } 18432 } 18433 } 18434 18435 BlockExpr *block = dyn_cast<BlockExpr>(e); 18436 if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable)) 18437 return nullptr; 18438 18439 FindCaptureVisitor visitor(S.Context, owner.Variable); 18440 visitor.Visit(block->getBlockDecl()->getBody()); 18441 return visitor.VarWillBeReased ? nullptr : visitor.Capturer; 18442 } 18443 18444 static void diagnoseRetainCycle(Sema &S, Expr *capturer, 18445 RetainCycleOwner &owner) { 18446 assert(capturer); 18447 assert(owner.Variable && owner.Loc.isValid()); 18448 18449 S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle) 18450 << owner.Variable << capturer->getSourceRange(); 18451 S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner) 18452 << owner.Indirect << owner.Range; 18453 } 18454 18455 /// Check for a keyword selector that starts with the word 'add' or 18456 /// 'set'. 18457 static bool isSetterLikeSelector(Selector sel) { 18458 if (sel.isUnarySelector()) return false; 18459 18460 StringRef str = sel.getNameForSlot(0); 18461 str = str.ltrim('_'); 18462 if (str.starts_with("set")) 18463 str = str.substr(3); 18464 else if (str.starts_with("add")) { 18465 // Specially allow 'addOperationWithBlock:'. 18466 if (sel.getNumArgs() == 1 && str.starts_with("addOperationWithBlock")) 18467 return false; 18468 str = str.substr(3); 18469 } else 18470 return false; 18471 18472 if (str.empty()) return true; 18473 return !isLowercase(str.front()); 18474 } 18475 18476 static std::optional<int> 18477 GetNSMutableArrayArgumentIndex(Sema &S, ObjCMessageExpr *Message) { 18478 bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass( 18479 Message->getReceiverInterface(), 18480 NSAPI::ClassId_NSMutableArray); 18481 if (!IsMutableArray) { 18482 return std::nullopt; 18483 } 18484 18485 Selector Sel = Message->getSelector(); 18486 18487 std::optional<NSAPI::NSArrayMethodKind> MKOpt = 18488 S.NSAPIObj->getNSArrayMethodKind(Sel); 18489 if (!MKOpt) { 18490 return std::nullopt; 18491 } 18492 18493 NSAPI::NSArrayMethodKind MK = *MKOpt; 18494 18495 switch (MK) { 18496 case NSAPI::NSMutableArr_addObject: 18497 case NSAPI::NSMutableArr_insertObjectAtIndex: 18498 case NSAPI::NSMutableArr_setObjectAtIndexedSubscript: 18499 return 0; 18500 case NSAPI::NSMutableArr_replaceObjectAtIndex: 18501 return 1; 18502 18503 default: 18504 return std::nullopt; 18505 } 18506 18507 return std::nullopt; 18508 } 18509 18510 static std::optional<int> 18511 GetNSMutableDictionaryArgumentIndex(Sema &S, ObjCMessageExpr *Message) { 18512 bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass( 18513 Message->getReceiverInterface(), 18514 NSAPI::ClassId_NSMutableDictionary); 18515 if (!IsMutableDictionary) { 18516 return std::nullopt; 18517 } 18518 18519 Selector Sel = Message->getSelector(); 18520 18521 std::optional<NSAPI::NSDictionaryMethodKind> MKOpt = 18522 S.NSAPIObj->getNSDictionaryMethodKind(Sel); 18523 if (!MKOpt) { 18524 return std::nullopt; 18525 } 18526 18527 NSAPI::NSDictionaryMethodKind MK = *MKOpt; 18528 18529 switch (MK) { 18530 case NSAPI::NSMutableDict_setObjectForKey: 18531 case NSAPI::NSMutableDict_setValueForKey: 18532 case NSAPI::NSMutableDict_setObjectForKeyedSubscript: 18533 return 0; 18534 18535 default: 18536 return std::nullopt; 18537 } 18538 18539 return std::nullopt; 18540 } 18541 18542 static std::optional<int> GetNSSetArgumentIndex(Sema &S, 18543 ObjCMessageExpr *Message) { 18544 bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass( 18545 Message->getReceiverInterface(), 18546 NSAPI::ClassId_NSMutableSet); 18547 18548 bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass( 18549 Message->getReceiverInterface(), 18550 NSAPI::ClassId_NSMutableOrderedSet); 18551 if (!IsMutableSet && !IsMutableOrderedSet) { 18552 return std::nullopt; 18553 } 18554 18555 Selector Sel = Message->getSelector(); 18556 18557 std::optional<NSAPI::NSSetMethodKind> MKOpt = 18558 S.NSAPIObj->getNSSetMethodKind(Sel); 18559 if (!MKOpt) { 18560 return std::nullopt; 18561 } 18562 18563 NSAPI::NSSetMethodKind MK = *MKOpt; 18564 18565 switch (MK) { 18566 case NSAPI::NSMutableSet_addObject: 18567 case NSAPI::NSOrderedSet_setObjectAtIndex: 18568 case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript: 18569 case NSAPI::NSOrderedSet_insertObjectAtIndex: 18570 return 0; 18571 case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject: 18572 return 1; 18573 } 18574 18575 return std::nullopt; 18576 } 18577 18578 void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) { 18579 if (!Message->isInstanceMessage()) { 18580 return; 18581 } 18582 18583 std::optional<int> ArgOpt; 18584 18585 if (!(ArgOpt = GetNSMutableArrayArgumentIndex(*this, Message)) && 18586 !(ArgOpt = GetNSMutableDictionaryArgumentIndex(*this, Message)) && 18587 !(ArgOpt = GetNSSetArgumentIndex(*this, Message))) { 18588 return; 18589 } 18590 18591 int ArgIndex = *ArgOpt; 18592 18593 Expr *Arg = Message->getArg(ArgIndex)->IgnoreImpCasts(); 18594 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Arg)) { 18595 Arg = OE->getSourceExpr()->IgnoreImpCasts(); 18596 } 18597 18598 if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) { 18599 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 18600 if (ArgRE->isObjCSelfExpr()) { 18601 Diag(Message->getSourceRange().getBegin(), 18602 diag::warn_objc_circular_container) 18603 << ArgRE->getDecl() << StringRef("'super'"); 18604 } 18605 } 18606 } else { 18607 Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts(); 18608 18609 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) { 18610 Receiver = OE->getSourceExpr()->IgnoreImpCasts(); 18611 } 18612 18613 if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) { 18614 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 18615 if (ReceiverRE->getDecl() == ArgRE->getDecl()) { 18616 ValueDecl *Decl = ReceiverRE->getDecl(); 18617 Diag(Message->getSourceRange().getBegin(), 18618 diag::warn_objc_circular_container) 18619 << Decl << Decl; 18620 if (!ArgRE->isObjCSelfExpr()) { 18621 Diag(Decl->getLocation(), 18622 diag::note_objc_circular_container_declared_here) 18623 << Decl; 18624 } 18625 } 18626 } 18627 } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) { 18628 if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) { 18629 if (IvarRE->getDecl() == IvarArgRE->getDecl()) { 18630 ObjCIvarDecl *Decl = IvarRE->getDecl(); 18631 Diag(Message->getSourceRange().getBegin(), 18632 diag::warn_objc_circular_container) 18633 << Decl << Decl; 18634 Diag(Decl->getLocation(), 18635 diag::note_objc_circular_container_declared_here) 18636 << Decl; 18637 } 18638 } 18639 } 18640 } 18641 } 18642 18643 /// Check a message send to see if it's likely to cause a retain cycle. 18644 void Sema::checkRetainCycles(ObjCMessageExpr *msg) { 18645 // Only check instance methods whose selector looks like a setter. 18646 if (!msg->isInstanceMessage() || !isSetterLikeSelector(msg->getSelector())) 18647 return; 18648 18649 // Try to find a variable that the receiver is strongly owned by. 18650 RetainCycleOwner owner; 18651 if (msg->getReceiverKind() == ObjCMessageExpr::Instance) { 18652 if (!findRetainCycleOwner(*this, msg->getInstanceReceiver(), owner)) 18653 return; 18654 } else { 18655 assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance); 18656 owner.Variable = getCurMethodDecl()->getSelfDecl(); 18657 owner.Loc = msg->getSuperLoc(); 18658 owner.Range = msg->getSuperLoc(); 18659 } 18660 18661 // Check whether the receiver is captured by any of the arguments. 18662 const ObjCMethodDecl *MD = msg->getMethodDecl(); 18663 for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i) { 18664 if (Expr *capturer = findCapturingExpr(*this, msg->getArg(i), owner)) { 18665 // noescape blocks should not be retained by the method. 18666 if (MD && MD->parameters()[i]->hasAttr<NoEscapeAttr>()) 18667 continue; 18668 return diagnoseRetainCycle(*this, capturer, owner); 18669 } 18670 } 18671 } 18672 18673 /// Check a property assign to see if it's likely to cause a retain cycle. 18674 void Sema::checkRetainCycles(Expr *receiver, Expr *argument) { 18675 RetainCycleOwner owner; 18676 if (!findRetainCycleOwner(*this, receiver, owner)) 18677 return; 18678 18679 if (Expr *capturer = findCapturingExpr(*this, argument, owner)) 18680 diagnoseRetainCycle(*this, capturer, owner); 18681 } 18682 18683 void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) { 18684 RetainCycleOwner Owner; 18685 if (!considerVariable(Var, /*DeclRefExpr=*/nullptr, Owner)) 18686 return; 18687 18688 // Because we don't have an expression for the variable, we have to set the 18689 // location explicitly here. 18690 Owner.Loc = Var->getLocation(); 18691 Owner.Range = Var->getSourceRange(); 18692 18693 if (Expr *Capturer = findCapturingExpr(*this, Init, Owner)) 18694 diagnoseRetainCycle(*this, Capturer, Owner); 18695 } 18696 18697 static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc, 18698 Expr *RHS, bool isProperty) { 18699 // Check if RHS is an Objective-C object literal, which also can get 18700 // immediately zapped in a weak reference. Note that we explicitly 18701 // allow ObjCStringLiterals, since those are designed to never really die. 18702 RHS = RHS->IgnoreParenImpCasts(); 18703 18704 // This enum needs to match with the 'select' in 18705 // warn_objc_arc_literal_assign (off-by-1). 18706 Sema::ObjCLiteralKind Kind = S.CheckLiteralKind(RHS); 18707 if (Kind == Sema::LK_String || Kind == Sema::LK_None) 18708 return false; 18709 18710 S.Diag(Loc, diag::warn_arc_literal_assign) 18711 << (unsigned) Kind 18712 << (isProperty ? 0 : 1) 18713 << RHS->getSourceRange(); 18714 18715 return true; 18716 } 18717 18718 static bool checkUnsafeAssignObject(Sema &S, SourceLocation Loc, 18719 Qualifiers::ObjCLifetime LT, 18720 Expr *RHS, bool isProperty) { 18721 // Strip off any implicit cast added to get to the one ARC-specific. 18722 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 18723 if (cast->getCastKind() == CK_ARCConsumeObject) { 18724 S.Diag(Loc, diag::warn_arc_retained_assign) 18725 << (LT == Qualifiers::OCL_ExplicitNone) 18726 << (isProperty ? 0 : 1) 18727 << RHS->getSourceRange(); 18728 return true; 18729 } 18730 RHS = cast->getSubExpr(); 18731 } 18732 18733 if (LT == Qualifiers::OCL_Weak && 18734 checkUnsafeAssignLiteral(S, Loc, RHS, isProperty)) 18735 return true; 18736 18737 return false; 18738 } 18739 18740 bool Sema::checkUnsafeAssigns(SourceLocation Loc, 18741 QualType LHS, Expr *RHS) { 18742 Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime(); 18743 18744 if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone) 18745 return false; 18746 18747 if (checkUnsafeAssignObject(*this, Loc, LT, RHS, false)) 18748 return true; 18749 18750 return false; 18751 } 18752 18753 void Sema::checkUnsafeExprAssigns(SourceLocation Loc, 18754 Expr *LHS, Expr *RHS) { 18755 QualType LHSType; 18756 // PropertyRef on LHS type need be directly obtained from 18757 // its declaration as it has a PseudoType. 18758 ObjCPropertyRefExpr *PRE 18759 = dyn_cast<ObjCPropertyRefExpr>(LHS->IgnoreParens()); 18760 if (PRE && !PRE->isImplicitProperty()) { 18761 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 18762 if (PD) 18763 LHSType = PD->getType(); 18764 } 18765 18766 if (LHSType.isNull()) 18767 LHSType = LHS->getType(); 18768 18769 Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime(); 18770 18771 if (LT == Qualifiers::OCL_Weak) { 18772 if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc)) 18773 getCurFunction()->markSafeWeakUse(LHS); 18774 } 18775 18776 if (checkUnsafeAssigns(Loc, LHSType, RHS)) 18777 return; 18778 18779 // FIXME. Check for other life times. 18780 if (LT != Qualifiers::OCL_None) 18781 return; 18782 18783 if (PRE) { 18784 if (PRE->isImplicitProperty()) 18785 return; 18786 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 18787 if (!PD) 18788 return; 18789 18790 unsigned Attributes = PD->getPropertyAttributes(); 18791 if (Attributes & ObjCPropertyAttribute::kind_assign) { 18792 // when 'assign' attribute was not explicitly specified 18793 // by user, ignore it and rely on property type itself 18794 // for lifetime info. 18795 unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten(); 18796 if (!(AsWrittenAttr & ObjCPropertyAttribute::kind_assign) && 18797 LHSType->isObjCRetainableType()) 18798 return; 18799 18800 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 18801 if (cast->getCastKind() == CK_ARCConsumeObject) { 18802 Diag(Loc, diag::warn_arc_retained_property_assign) 18803 << RHS->getSourceRange(); 18804 return; 18805 } 18806 RHS = cast->getSubExpr(); 18807 } 18808 } else if (Attributes & ObjCPropertyAttribute::kind_weak) { 18809 if (checkUnsafeAssignObject(*this, Loc, Qualifiers::OCL_Weak, RHS, true)) 18810 return; 18811 } 18812 } 18813 } 18814 18815 //===--- CHECK: Empty statement body (-Wempty-body) ---------------------===// 18816 18817 static bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr, 18818 SourceLocation StmtLoc, 18819 const NullStmt *Body) { 18820 // Do not warn if the body is a macro that expands to nothing, e.g: 18821 // 18822 // #define CALL(x) 18823 // if (condition) 18824 // CALL(0); 18825 if (Body->hasLeadingEmptyMacro()) 18826 return false; 18827 18828 // Get line numbers of statement and body. 18829 bool StmtLineInvalid; 18830 unsigned StmtLine = SourceMgr.getPresumedLineNumber(StmtLoc, 18831 &StmtLineInvalid); 18832 if (StmtLineInvalid) 18833 return false; 18834 18835 bool BodyLineInvalid; 18836 unsigned BodyLine = SourceMgr.getSpellingLineNumber(Body->getSemiLoc(), 18837 &BodyLineInvalid); 18838 if (BodyLineInvalid) 18839 return false; 18840 18841 // Warn if null statement and body are on the same line. 18842 if (StmtLine != BodyLine) 18843 return false; 18844 18845 return true; 18846 } 18847 18848 void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc, 18849 const Stmt *Body, 18850 unsigned DiagID) { 18851 // Since this is a syntactic check, don't emit diagnostic for template 18852 // instantiations, this just adds noise. 18853 if (CurrentInstantiationScope) 18854 return; 18855 18856 // The body should be a null statement. 18857 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 18858 if (!NBody) 18859 return; 18860 18861 // Do the usual checks. 18862 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 18863 return; 18864 18865 Diag(NBody->getSemiLoc(), DiagID); 18866 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 18867 } 18868 18869 void Sema::DiagnoseEmptyLoopBody(const Stmt *S, 18870 const Stmt *PossibleBody) { 18871 assert(!CurrentInstantiationScope); // Ensured by caller 18872 18873 SourceLocation StmtLoc; 18874 const Stmt *Body; 18875 unsigned DiagID; 18876 if (const ForStmt *FS = dyn_cast<ForStmt>(S)) { 18877 StmtLoc = FS->getRParenLoc(); 18878 Body = FS->getBody(); 18879 DiagID = diag::warn_empty_for_body; 18880 } else if (const WhileStmt *WS = dyn_cast<WhileStmt>(S)) { 18881 StmtLoc = WS->getRParenLoc(); 18882 Body = WS->getBody(); 18883 DiagID = diag::warn_empty_while_body; 18884 } else 18885 return; // Neither `for' nor `while'. 18886 18887 // The body should be a null statement. 18888 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 18889 if (!NBody) 18890 return; 18891 18892 // Skip expensive checks if diagnostic is disabled. 18893 if (Diags.isIgnored(DiagID, NBody->getSemiLoc())) 18894 return; 18895 18896 // Do the usual checks. 18897 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 18898 return; 18899 18900 // `for(...);' and `while(...);' are popular idioms, so in order to keep 18901 // noise level low, emit diagnostics only if for/while is followed by a 18902 // CompoundStmt, e.g.: 18903 // for (int i = 0; i < n; i++); 18904 // { 18905 // a(i); 18906 // } 18907 // or if for/while is followed by a statement with more indentation 18908 // than for/while itself: 18909 // for (int i = 0; i < n; i++); 18910 // a(i); 18911 bool ProbableTypo = isa<CompoundStmt>(PossibleBody); 18912 if (!ProbableTypo) { 18913 bool BodyColInvalid; 18914 unsigned BodyCol = SourceMgr.getPresumedColumnNumber( 18915 PossibleBody->getBeginLoc(), &BodyColInvalid); 18916 if (BodyColInvalid) 18917 return; 18918 18919 bool StmtColInvalid; 18920 unsigned StmtCol = 18921 SourceMgr.getPresumedColumnNumber(S->getBeginLoc(), &StmtColInvalid); 18922 if (StmtColInvalid) 18923 return; 18924 18925 if (BodyCol > StmtCol) 18926 ProbableTypo = true; 18927 } 18928 18929 if (ProbableTypo) { 18930 Diag(NBody->getSemiLoc(), DiagID); 18931 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 18932 } 18933 } 18934 18935 //===--- CHECK: Warn on self move with std::move. -------------------------===// 18936 18937 /// DiagnoseSelfMove - Emits a warning if a value is moved to itself. 18938 void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, 18939 SourceLocation OpLoc) { 18940 if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc)) 18941 return; 18942 18943 if (inTemplateInstantiation()) 18944 return; 18945 18946 // Strip parens and casts away. 18947 LHSExpr = LHSExpr->IgnoreParenImpCasts(); 18948 RHSExpr = RHSExpr->IgnoreParenImpCasts(); 18949 18950 // Check for a call expression 18951 const CallExpr *CE = dyn_cast<CallExpr>(RHSExpr); 18952 if (!CE || CE->getNumArgs() != 1) 18953 return; 18954 18955 // Check for a call to std::move 18956 if (!CE->isCallToStdMove()) 18957 return; 18958 18959 // Get argument from std::move 18960 RHSExpr = CE->getArg(0); 18961 18962 const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr); 18963 const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr); 18964 18965 // Two DeclRefExpr's, check that the decls are the same. 18966 if (LHSDeclRef && RHSDeclRef) { 18967 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 18968 return; 18969 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 18970 RHSDeclRef->getDecl()->getCanonicalDecl()) 18971 return; 18972 18973 auto D = Diag(OpLoc, diag::warn_self_move) 18974 << LHSExpr->getType() << LHSExpr->getSourceRange() 18975 << RHSExpr->getSourceRange(); 18976 if (const FieldDecl *F = 18977 getSelfAssignmentClassMemberCandidate(RHSDeclRef->getDecl())) 18978 D << 1 << F 18979 << FixItHint::CreateInsertion(LHSDeclRef->getBeginLoc(), "this->"); 18980 else 18981 D << 0; 18982 return; 18983 } 18984 18985 // Member variables require a different approach to check for self moves. 18986 // MemberExpr's are the same if every nested MemberExpr refers to the same 18987 // Decl and that the base Expr's are DeclRefExpr's with the same Decl or 18988 // the base Expr's are CXXThisExpr's. 18989 const Expr *LHSBase = LHSExpr; 18990 const Expr *RHSBase = RHSExpr; 18991 const MemberExpr *LHSME = dyn_cast<MemberExpr>(LHSExpr); 18992 const MemberExpr *RHSME = dyn_cast<MemberExpr>(RHSExpr); 18993 if (!LHSME || !RHSME) 18994 return; 18995 18996 while (LHSME && RHSME) { 18997 if (LHSME->getMemberDecl()->getCanonicalDecl() != 18998 RHSME->getMemberDecl()->getCanonicalDecl()) 18999 return; 19000 19001 LHSBase = LHSME->getBase(); 19002 RHSBase = RHSME->getBase(); 19003 LHSME = dyn_cast<MemberExpr>(LHSBase); 19004 RHSME = dyn_cast<MemberExpr>(RHSBase); 19005 } 19006 19007 LHSDeclRef = dyn_cast<DeclRefExpr>(LHSBase); 19008 RHSDeclRef = dyn_cast<DeclRefExpr>(RHSBase); 19009 if (LHSDeclRef && RHSDeclRef) { 19010 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 19011 return; 19012 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 19013 RHSDeclRef->getDecl()->getCanonicalDecl()) 19014 return; 19015 19016 Diag(OpLoc, diag::warn_self_move) 19017 << LHSExpr->getType() << 0 << LHSExpr->getSourceRange() 19018 << RHSExpr->getSourceRange(); 19019 return; 19020 } 19021 19022 if (isa<CXXThisExpr>(LHSBase) && isa<CXXThisExpr>(RHSBase)) 19023 Diag(OpLoc, diag::warn_self_move) 19024 << LHSExpr->getType() << 0 << LHSExpr->getSourceRange() 19025 << RHSExpr->getSourceRange(); 19026 } 19027 19028 //===--- Layout compatibility ----------------------------------------------// 19029 19030 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2); 19031 19032 /// Check if two enumeration types are layout-compatible. 19033 static bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) { 19034 // C++11 [dcl.enum] p8: 19035 // Two enumeration types are layout-compatible if they have the same 19036 // underlying type. 19037 return ED1->isComplete() && ED2->isComplete() && 19038 C.hasSameType(ED1->getIntegerType(), ED2->getIntegerType()); 19039 } 19040 19041 /// Check if two fields are layout-compatible. 19042 static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1, 19043 FieldDecl *Field2) { 19044 if (!isLayoutCompatible(C, Field1->getType(), Field2->getType())) 19045 return false; 19046 19047 if (Field1->isBitField() != Field2->isBitField()) 19048 return false; 19049 19050 if (Field1->isBitField()) { 19051 // Make sure that the bit-fields are the same length. 19052 unsigned Bits1 = Field1->getBitWidthValue(C); 19053 unsigned Bits2 = Field2->getBitWidthValue(C); 19054 19055 if (Bits1 != Bits2) 19056 return false; 19057 } 19058 19059 return true; 19060 } 19061 19062 /// Check if two standard-layout structs are layout-compatible. 19063 /// (C++11 [class.mem] p17) 19064 static bool isLayoutCompatibleStruct(ASTContext &C, RecordDecl *RD1, 19065 RecordDecl *RD2) { 19066 // If both records are C++ classes, check that base classes match. 19067 if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(RD1)) { 19068 // If one of records is a CXXRecordDecl we are in C++ mode, 19069 // thus the other one is a CXXRecordDecl, too. 19070 const CXXRecordDecl *D2CXX = cast<CXXRecordDecl>(RD2); 19071 // Check number of base classes. 19072 if (D1CXX->getNumBases() != D2CXX->getNumBases()) 19073 return false; 19074 19075 // Check the base classes. 19076 for (CXXRecordDecl::base_class_const_iterator 19077 Base1 = D1CXX->bases_begin(), 19078 BaseEnd1 = D1CXX->bases_end(), 19079 Base2 = D2CXX->bases_begin(); 19080 Base1 != BaseEnd1; 19081 ++Base1, ++Base2) { 19082 if (!isLayoutCompatible(C, Base1->getType(), Base2->getType())) 19083 return false; 19084 } 19085 } else if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(RD2)) { 19086 // If only RD2 is a C++ class, it should have zero base classes. 19087 if (D2CXX->getNumBases() > 0) 19088 return false; 19089 } 19090 19091 // Check the fields. 19092 RecordDecl::field_iterator Field2 = RD2->field_begin(), 19093 Field2End = RD2->field_end(), 19094 Field1 = RD1->field_begin(), 19095 Field1End = RD1->field_end(); 19096 for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) { 19097 if (!isLayoutCompatible(C, *Field1, *Field2)) 19098 return false; 19099 } 19100 if (Field1 != Field1End || Field2 != Field2End) 19101 return false; 19102 19103 return true; 19104 } 19105 19106 /// Check if two standard-layout unions are layout-compatible. 19107 /// (C++11 [class.mem] p18) 19108 static bool isLayoutCompatibleUnion(ASTContext &C, RecordDecl *RD1, 19109 RecordDecl *RD2) { 19110 llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields; 19111 for (auto *Field2 : RD2->fields()) 19112 UnmatchedFields.insert(Field2); 19113 19114 for (auto *Field1 : RD1->fields()) { 19115 llvm::SmallPtrSet<FieldDecl *, 8>::iterator 19116 I = UnmatchedFields.begin(), 19117 E = UnmatchedFields.end(); 19118 19119 for ( ; I != E; ++I) { 19120 if (isLayoutCompatible(C, Field1, *I)) { 19121 bool Result = UnmatchedFields.erase(*I); 19122 (void) Result; 19123 assert(Result); 19124 break; 19125 } 19126 } 19127 if (I == E) 19128 return false; 19129 } 19130 19131 return UnmatchedFields.empty(); 19132 } 19133 19134 static bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1, 19135 RecordDecl *RD2) { 19136 if (RD1->isUnion() != RD2->isUnion()) 19137 return false; 19138 19139 if (RD1->isUnion()) 19140 return isLayoutCompatibleUnion(C, RD1, RD2); 19141 else 19142 return isLayoutCompatibleStruct(C, RD1, RD2); 19143 } 19144 19145 /// Check if two types are layout-compatible in C++11 sense. 19146 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) { 19147 if (T1.isNull() || T2.isNull()) 19148 return false; 19149 19150 // C++11 [basic.types] p11: 19151 // If two types T1 and T2 are the same type, then T1 and T2 are 19152 // layout-compatible types. 19153 if (C.hasSameType(T1, T2)) 19154 return true; 19155 19156 T1 = T1.getCanonicalType().getUnqualifiedType(); 19157 T2 = T2.getCanonicalType().getUnqualifiedType(); 19158 19159 const Type::TypeClass TC1 = T1->getTypeClass(); 19160 const Type::TypeClass TC2 = T2->getTypeClass(); 19161 19162 if (TC1 != TC2) 19163 return false; 19164 19165 if (TC1 == Type::Enum) { 19166 return isLayoutCompatible(C, 19167 cast<EnumType>(T1)->getDecl(), 19168 cast<EnumType>(T2)->getDecl()); 19169 } else if (TC1 == Type::Record) { 19170 if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType()) 19171 return false; 19172 19173 return isLayoutCompatible(C, 19174 cast<RecordType>(T1)->getDecl(), 19175 cast<RecordType>(T2)->getDecl()); 19176 } 19177 19178 return false; 19179 } 19180 19181 //===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----// 19182 19183 /// Given a type tag expression find the type tag itself. 19184 /// 19185 /// \param TypeExpr Type tag expression, as it appears in user's code. 19186 /// 19187 /// \param VD Declaration of an identifier that appears in a type tag. 19188 /// 19189 /// \param MagicValue Type tag magic value. 19190 /// 19191 /// \param isConstantEvaluated whether the evalaution should be performed in 19192 19193 /// constant context. 19194 static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx, 19195 const ValueDecl **VD, uint64_t *MagicValue, 19196 bool isConstantEvaluated) { 19197 while(true) { 19198 if (!TypeExpr) 19199 return false; 19200 19201 TypeExpr = TypeExpr->IgnoreParenImpCasts()->IgnoreParenCasts(); 19202 19203 switch (TypeExpr->getStmtClass()) { 19204 case Stmt::UnaryOperatorClass: { 19205 const UnaryOperator *UO = cast<UnaryOperator>(TypeExpr); 19206 if (UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Deref) { 19207 TypeExpr = UO->getSubExpr(); 19208 continue; 19209 } 19210 return false; 19211 } 19212 19213 case Stmt::DeclRefExprClass: { 19214 const DeclRefExpr *DRE = cast<DeclRefExpr>(TypeExpr); 19215 *VD = DRE->getDecl(); 19216 return true; 19217 } 19218 19219 case Stmt::IntegerLiteralClass: { 19220 const IntegerLiteral *IL = cast<IntegerLiteral>(TypeExpr); 19221 llvm::APInt MagicValueAPInt = IL->getValue(); 19222 if (MagicValueAPInt.getActiveBits() <= 64) { 19223 *MagicValue = MagicValueAPInt.getZExtValue(); 19224 return true; 19225 } else 19226 return false; 19227 } 19228 19229 case Stmt::BinaryConditionalOperatorClass: 19230 case Stmt::ConditionalOperatorClass: { 19231 const AbstractConditionalOperator *ACO = 19232 cast<AbstractConditionalOperator>(TypeExpr); 19233 bool Result; 19234 if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx, 19235 isConstantEvaluated)) { 19236 if (Result) 19237 TypeExpr = ACO->getTrueExpr(); 19238 else 19239 TypeExpr = ACO->getFalseExpr(); 19240 continue; 19241 } 19242 return false; 19243 } 19244 19245 case Stmt::BinaryOperatorClass: { 19246 const BinaryOperator *BO = cast<BinaryOperator>(TypeExpr); 19247 if (BO->getOpcode() == BO_Comma) { 19248 TypeExpr = BO->getRHS(); 19249 continue; 19250 } 19251 return false; 19252 } 19253 19254 default: 19255 return false; 19256 } 19257 } 19258 } 19259 19260 /// Retrieve the C type corresponding to type tag TypeExpr. 19261 /// 19262 /// \param TypeExpr Expression that specifies a type tag. 19263 /// 19264 /// \param MagicValues Registered magic values. 19265 /// 19266 /// \param FoundWrongKind Set to true if a type tag was found, but of a wrong 19267 /// kind. 19268 /// 19269 /// \param TypeInfo Information about the corresponding C type. 19270 /// 19271 /// \param isConstantEvaluated whether the evalaution should be performed in 19272 /// constant context. 19273 /// 19274 /// \returns true if the corresponding C type was found. 19275 static bool GetMatchingCType( 19276 const IdentifierInfo *ArgumentKind, const Expr *TypeExpr, 19277 const ASTContext &Ctx, 19278 const llvm::DenseMap<Sema::TypeTagMagicValue, Sema::TypeTagData> 19279 *MagicValues, 19280 bool &FoundWrongKind, Sema::TypeTagData &TypeInfo, 19281 bool isConstantEvaluated) { 19282 FoundWrongKind = false; 19283 19284 // Variable declaration that has type_tag_for_datatype attribute. 19285 const ValueDecl *VD = nullptr; 19286 19287 uint64_t MagicValue; 19288 19289 if (!FindTypeTagExpr(TypeExpr, Ctx, &VD, &MagicValue, isConstantEvaluated)) 19290 return false; 19291 19292 if (VD) { 19293 if (TypeTagForDatatypeAttr *I = VD->getAttr<TypeTagForDatatypeAttr>()) { 19294 if (I->getArgumentKind() != ArgumentKind) { 19295 FoundWrongKind = true; 19296 return false; 19297 } 19298 TypeInfo.Type = I->getMatchingCType(); 19299 TypeInfo.LayoutCompatible = I->getLayoutCompatible(); 19300 TypeInfo.MustBeNull = I->getMustBeNull(); 19301 return true; 19302 } 19303 return false; 19304 } 19305 19306 if (!MagicValues) 19307 return false; 19308 19309 llvm::DenseMap<Sema::TypeTagMagicValue, 19310 Sema::TypeTagData>::const_iterator I = 19311 MagicValues->find(std::make_pair(ArgumentKind, MagicValue)); 19312 if (I == MagicValues->end()) 19313 return false; 19314 19315 TypeInfo = I->second; 19316 return true; 19317 } 19318 19319 void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, 19320 uint64_t MagicValue, QualType Type, 19321 bool LayoutCompatible, 19322 bool MustBeNull) { 19323 if (!TypeTagForDatatypeMagicValues) 19324 TypeTagForDatatypeMagicValues.reset( 19325 new llvm::DenseMap<TypeTagMagicValue, TypeTagData>); 19326 19327 TypeTagMagicValue Magic(ArgumentKind, MagicValue); 19328 (*TypeTagForDatatypeMagicValues)[Magic] = 19329 TypeTagData(Type, LayoutCompatible, MustBeNull); 19330 } 19331 19332 static bool IsSameCharType(QualType T1, QualType T2) { 19333 const BuiltinType *BT1 = T1->getAs<BuiltinType>(); 19334 if (!BT1) 19335 return false; 19336 19337 const BuiltinType *BT2 = T2->getAs<BuiltinType>(); 19338 if (!BT2) 19339 return false; 19340 19341 BuiltinType::Kind T1Kind = BT1->getKind(); 19342 BuiltinType::Kind T2Kind = BT2->getKind(); 19343 19344 return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) || 19345 (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) || 19346 (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) || 19347 (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar); 19348 } 19349 19350 void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, 19351 const ArrayRef<const Expr *> ExprArgs, 19352 SourceLocation CallSiteLoc) { 19353 const IdentifierInfo *ArgumentKind = Attr->getArgumentKind(); 19354 bool IsPointerAttr = Attr->getIsPointer(); 19355 19356 // Retrieve the argument representing the 'type_tag'. 19357 unsigned TypeTagIdxAST = Attr->getTypeTagIdx().getASTIndex(); 19358 if (TypeTagIdxAST >= ExprArgs.size()) { 19359 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 19360 << 0 << Attr->getTypeTagIdx().getSourceIndex(); 19361 return; 19362 } 19363 const Expr *TypeTagExpr = ExprArgs[TypeTagIdxAST]; 19364 bool FoundWrongKind; 19365 TypeTagData TypeInfo; 19366 if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context, 19367 TypeTagForDatatypeMagicValues.get(), FoundWrongKind, 19368 TypeInfo, isConstantEvaluatedContext())) { 19369 if (FoundWrongKind) 19370 Diag(TypeTagExpr->getExprLoc(), 19371 diag::warn_type_tag_for_datatype_wrong_kind) 19372 << TypeTagExpr->getSourceRange(); 19373 return; 19374 } 19375 19376 // Retrieve the argument representing the 'arg_idx'. 19377 unsigned ArgumentIdxAST = Attr->getArgumentIdx().getASTIndex(); 19378 if (ArgumentIdxAST >= ExprArgs.size()) { 19379 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 19380 << 1 << Attr->getArgumentIdx().getSourceIndex(); 19381 return; 19382 } 19383 const Expr *ArgumentExpr = ExprArgs[ArgumentIdxAST]; 19384 if (IsPointerAttr) { 19385 // Skip implicit cast of pointer to `void *' (as a function argument). 19386 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgumentExpr)) 19387 if (ICE->getType()->isVoidPointerType() && 19388 ICE->getCastKind() == CK_BitCast) 19389 ArgumentExpr = ICE->getSubExpr(); 19390 } 19391 QualType ArgumentType = ArgumentExpr->getType(); 19392 19393 // Passing a `void*' pointer shouldn't trigger a warning. 19394 if (IsPointerAttr && ArgumentType->isVoidPointerType()) 19395 return; 19396 19397 if (TypeInfo.MustBeNull) { 19398 // Type tag with matching void type requires a null pointer. 19399 if (!ArgumentExpr->isNullPointerConstant(Context, 19400 Expr::NPC_ValueDependentIsNotNull)) { 19401 Diag(ArgumentExpr->getExprLoc(), 19402 diag::warn_type_safety_null_pointer_required) 19403 << ArgumentKind->getName() 19404 << ArgumentExpr->getSourceRange() 19405 << TypeTagExpr->getSourceRange(); 19406 } 19407 return; 19408 } 19409 19410 QualType RequiredType = TypeInfo.Type; 19411 if (IsPointerAttr) 19412 RequiredType = Context.getPointerType(RequiredType); 19413 19414 bool mismatch = false; 19415 if (!TypeInfo.LayoutCompatible) { 19416 mismatch = !Context.hasSameType(ArgumentType, RequiredType); 19417 19418 // C++11 [basic.fundamental] p1: 19419 // Plain char, signed char, and unsigned char are three distinct types. 19420 // 19421 // But we treat plain `char' as equivalent to `signed char' or `unsigned 19422 // char' depending on the current char signedness mode. 19423 if (mismatch) 19424 if ((IsPointerAttr && IsSameCharType(ArgumentType->getPointeeType(), 19425 RequiredType->getPointeeType())) || 19426 (!IsPointerAttr && IsSameCharType(ArgumentType, RequiredType))) 19427 mismatch = false; 19428 } else 19429 if (IsPointerAttr) 19430 mismatch = !isLayoutCompatible(Context, 19431 ArgumentType->getPointeeType(), 19432 RequiredType->getPointeeType()); 19433 else 19434 mismatch = !isLayoutCompatible(Context, ArgumentType, RequiredType); 19435 19436 if (mismatch) 19437 Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch) 19438 << ArgumentType << ArgumentKind 19439 << TypeInfo.LayoutCompatible << RequiredType 19440 << ArgumentExpr->getSourceRange() 19441 << TypeTagExpr->getSourceRange(); 19442 } 19443 19444 void Sema::AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, 19445 CharUnits Alignment) { 19446 MisalignedMembers.emplace_back(E, RD, MD, Alignment); 19447 } 19448 19449 void Sema::DiagnoseMisalignedMembers() { 19450 for (MisalignedMember &m : MisalignedMembers) { 19451 const NamedDecl *ND = m.RD; 19452 if (ND->getName().empty()) { 19453 if (const TypedefNameDecl *TD = m.RD->getTypedefNameForAnonDecl()) 19454 ND = TD; 19455 } 19456 Diag(m.E->getBeginLoc(), diag::warn_taking_address_of_packed_member) 19457 << m.MD << ND << m.E->getSourceRange(); 19458 } 19459 MisalignedMembers.clear(); 19460 } 19461 19462 void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) { 19463 E = E->IgnoreParens(); 19464 if (!T->isPointerType() && !T->isIntegerType() && !T->isDependentType()) 19465 return; 19466 if (isa<UnaryOperator>(E) && 19467 cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf) { 19468 auto *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens(); 19469 if (isa<MemberExpr>(Op)) { 19470 auto *MA = llvm::find(MisalignedMembers, MisalignedMember(Op)); 19471 if (MA != MisalignedMembers.end() && 19472 (T->isDependentType() || T->isIntegerType() || 19473 (T->isPointerType() && (T->getPointeeType()->isIncompleteType() || 19474 Context.getTypeAlignInChars( 19475 T->getPointeeType()) <= MA->Alignment)))) 19476 MisalignedMembers.erase(MA); 19477 } 19478 } 19479 } 19480 19481 void Sema::RefersToMemberWithReducedAlignment( 19482 Expr *E, 19483 llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> 19484 Action) { 19485 const auto *ME = dyn_cast<MemberExpr>(E); 19486 if (!ME) 19487 return; 19488 19489 // No need to check expressions with an __unaligned-qualified type. 19490 if (E->getType().getQualifiers().hasUnaligned()) 19491 return; 19492 19493 // For a chain of MemberExpr like "a.b.c.d" this list 19494 // will keep FieldDecl's like [d, c, b]. 19495 SmallVector<FieldDecl *, 4> ReverseMemberChain; 19496 const MemberExpr *TopME = nullptr; 19497 bool AnyIsPacked = false; 19498 do { 19499 QualType BaseType = ME->getBase()->getType(); 19500 if (BaseType->isDependentType()) 19501 return; 19502 if (ME->isArrow()) 19503 BaseType = BaseType->getPointeeType(); 19504 RecordDecl *RD = BaseType->castAs<RecordType>()->getDecl(); 19505 if (RD->isInvalidDecl()) 19506 return; 19507 19508 ValueDecl *MD = ME->getMemberDecl(); 19509 auto *FD = dyn_cast<FieldDecl>(MD); 19510 // We do not care about non-data members. 19511 if (!FD || FD->isInvalidDecl()) 19512 return; 19513 19514 AnyIsPacked = 19515 AnyIsPacked || (RD->hasAttr<PackedAttr>() || MD->hasAttr<PackedAttr>()); 19516 ReverseMemberChain.push_back(FD); 19517 19518 TopME = ME; 19519 ME = dyn_cast<MemberExpr>(ME->getBase()->IgnoreParens()); 19520 } while (ME); 19521 assert(TopME && "We did not compute a topmost MemberExpr!"); 19522 19523 // Not the scope of this diagnostic. 19524 if (!AnyIsPacked) 19525 return; 19526 19527 const Expr *TopBase = TopME->getBase()->IgnoreParenImpCasts(); 19528 const auto *DRE = dyn_cast<DeclRefExpr>(TopBase); 19529 // TODO: The innermost base of the member expression may be too complicated. 19530 // For now, just disregard these cases. This is left for future 19531 // improvement. 19532 if (!DRE && !isa<CXXThisExpr>(TopBase)) 19533 return; 19534 19535 // Alignment expected by the whole expression. 19536 CharUnits ExpectedAlignment = Context.getTypeAlignInChars(E->getType()); 19537 19538 // No need to do anything else with this case. 19539 if (ExpectedAlignment.isOne()) 19540 return; 19541 19542 // Synthesize offset of the whole access. 19543 CharUnits Offset; 19544 for (const FieldDecl *FD : llvm::reverse(ReverseMemberChain)) 19545 Offset += Context.toCharUnitsFromBits(Context.getFieldOffset(FD)); 19546 19547 // Compute the CompleteObjectAlignment as the alignment of the whole chain. 19548 CharUnits CompleteObjectAlignment = Context.getTypeAlignInChars( 19549 ReverseMemberChain.back()->getParent()->getTypeForDecl()); 19550 19551 // The base expression of the innermost MemberExpr may give 19552 // stronger guarantees than the class containing the member. 19553 if (DRE && !TopME->isArrow()) { 19554 const ValueDecl *VD = DRE->getDecl(); 19555 if (!VD->getType()->isReferenceType()) 19556 CompleteObjectAlignment = 19557 std::max(CompleteObjectAlignment, Context.getDeclAlign(VD)); 19558 } 19559 19560 // Check if the synthesized offset fulfills the alignment. 19561 if (Offset % ExpectedAlignment != 0 || 19562 // It may fulfill the offset it but the effective alignment may still be 19563 // lower than the expected expression alignment. 19564 CompleteObjectAlignment < ExpectedAlignment) { 19565 // If this happens, we want to determine a sensible culprit of this. 19566 // Intuitively, watching the chain of member expressions from right to 19567 // left, we start with the required alignment (as required by the field 19568 // type) but some packed attribute in that chain has reduced the alignment. 19569 // It may happen that another packed structure increases it again. But if 19570 // we are here such increase has not been enough. So pointing the first 19571 // FieldDecl that either is packed or else its RecordDecl is, 19572 // seems reasonable. 19573 FieldDecl *FD = nullptr; 19574 CharUnits Alignment; 19575 for (FieldDecl *FDI : ReverseMemberChain) { 19576 if (FDI->hasAttr<PackedAttr>() || 19577 FDI->getParent()->hasAttr<PackedAttr>()) { 19578 FD = FDI; 19579 Alignment = std::min( 19580 Context.getTypeAlignInChars(FD->getType()), 19581 Context.getTypeAlignInChars(FD->getParent()->getTypeForDecl())); 19582 break; 19583 } 19584 } 19585 assert(FD && "We did not find a packed FieldDecl!"); 19586 Action(E, FD->getParent(), FD, Alignment); 19587 } 19588 } 19589 19590 void Sema::CheckAddressOfPackedMember(Expr *rhs) { 19591 using namespace std::placeholders; 19592 19593 RefersToMemberWithReducedAlignment( 19594 rhs, std::bind(&Sema::AddPotentialMisalignedMembers, std::ref(*this), _1, 19595 _2, _3, _4)); 19596 } 19597 19598 bool Sema::PrepareBuiltinElementwiseMathOneArgCall(CallExpr *TheCall) { 19599 if (checkArgCount(*this, TheCall, 1)) 19600 return true; 19601 19602 ExprResult A = UsualUnaryConversions(TheCall->getArg(0)); 19603 if (A.isInvalid()) 19604 return true; 19605 19606 TheCall->setArg(0, A.get()); 19607 QualType TyA = A.get()->getType(); 19608 19609 if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA)) 19610 return true; 19611 19612 TheCall->setType(TyA); 19613 return false; 19614 } 19615 19616 bool Sema::SemaBuiltinElementwiseMath(CallExpr *TheCall) { 19617 if (checkArgCount(*this, TheCall, 2)) 19618 return true; 19619 19620 ExprResult A = TheCall->getArg(0); 19621 ExprResult B = TheCall->getArg(1); 19622 // Do standard promotions between the two arguments, returning their common 19623 // type. 19624 QualType Res = 19625 UsualArithmeticConversions(A, B, TheCall->getExprLoc(), ACK_Comparison); 19626 if (A.isInvalid() || B.isInvalid()) 19627 return true; 19628 19629 QualType TyA = A.get()->getType(); 19630 QualType TyB = B.get()->getType(); 19631 19632 if (Res.isNull() || TyA.getCanonicalType() != TyB.getCanonicalType()) 19633 return Diag(A.get()->getBeginLoc(), 19634 diag::err_typecheck_call_different_arg_types) 19635 << TyA << TyB; 19636 19637 if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA)) 19638 return true; 19639 19640 TheCall->setArg(0, A.get()); 19641 TheCall->setArg(1, B.get()); 19642 TheCall->setType(Res); 19643 return false; 19644 } 19645 19646 bool Sema::SemaBuiltinElementwiseTernaryMath(CallExpr *TheCall) { 19647 if (checkArgCount(*this, TheCall, 3)) 19648 return true; 19649 19650 Expr *Args[3]; 19651 for (int I = 0; I < 3; ++I) { 19652 ExprResult Converted = UsualUnaryConversions(TheCall->getArg(I)); 19653 if (Converted.isInvalid()) 19654 return true; 19655 Args[I] = Converted.get(); 19656 } 19657 19658 int ArgOrdinal = 1; 19659 for (Expr *Arg : Args) { 19660 if (checkFPMathBuiltinElementType(*this, Arg->getBeginLoc(), Arg->getType(), 19661 ArgOrdinal++)) 19662 return true; 19663 } 19664 19665 for (int I = 1; I < 3; ++I) { 19666 if (Args[0]->getType().getCanonicalType() != 19667 Args[I]->getType().getCanonicalType()) { 19668 return Diag(Args[0]->getBeginLoc(), 19669 diag::err_typecheck_call_different_arg_types) 19670 << Args[0]->getType() << Args[I]->getType(); 19671 } 19672 19673 TheCall->setArg(I, Args[I]); 19674 } 19675 19676 TheCall->setType(Args[0]->getType()); 19677 return false; 19678 } 19679 19680 bool Sema::PrepareBuiltinReduceMathOneArgCall(CallExpr *TheCall) { 19681 if (checkArgCount(*this, TheCall, 1)) 19682 return true; 19683 19684 ExprResult A = UsualUnaryConversions(TheCall->getArg(0)); 19685 if (A.isInvalid()) 19686 return true; 19687 19688 TheCall->setArg(0, A.get()); 19689 return false; 19690 } 19691 19692 bool Sema::SemaBuiltinNonDeterministicValue(CallExpr *TheCall) { 19693 if (checkArgCount(*this, TheCall, 1)) 19694 return true; 19695 19696 ExprResult Arg = TheCall->getArg(0); 19697 QualType TyArg = Arg.get()->getType(); 19698 19699 if (!TyArg->isBuiltinType() && !TyArg->isVectorType()) 19700 return Diag(TheCall->getArg(0)->getBeginLoc(), diag::err_builtin_invalid_arg_type) 19701 << 1 << /*vector, integer or floating point ty*/ 0 << TyArg; 19702 19703 TheCall->setType(TyArg); 19704 return false; 19705 } 19706 19707 ExprResult Sema::SemaBuiltinMatrixTranspose(CallExpr *TheCall, 19708 ExprResult CallResult) { 19709 if (checkArgCount(*this, TheCall, 1)) 19710 return ExprError(); 19711 19712 ExprResult MatrixArg = DefaultLvalueConversion(TheCall->getArg(0)); 19713 if (MatrixArg.isInvalid()) 19714 return MatrixArg; 19715 Expr *Matrix = MatrixArg.get(); 19716 19717 auto *MType = Matrix->getType()->getAs<ConstantMatrixType>(); 19718 if (!MType) { 19719 Diag(Matrix->getBeginLoc(), diag::err_builtin_invalid_arg_type) 19720 << 1 << /* matrix ty*/ 1 << Matrix->getType(); 19721 return ExprError(); 19722 } 19723 19724 // Create returned matrix type by swapping rows and columns of the argument 19725 // matrix type. 19726 QualType ResultType = Context.getConstantMatrixType( 19727 MType->getElementType(), MType->getNumColumns(), MType->getNumRows()); 19728 19729 // Change the return type to the type of the returned matrix. 19730 TheCall->setType(ResultType); 19731 19732 // Update call argument to use the possibly converted matrix argument. 19733 TheCall->setArg(0, Matrix); 19734 return CallResult; 19735 } 19736 19737 // Get and verify the matrix dimensions. 19738 static std::optional<unsigned> 19739 getAndVerifyMatrixDimension(Expr *Expr, StringRef Name, Sema &S) { 19740 SourceLocation ErrorPos; 19741 std::optional<llvm::APSInt> Value = 19742 Expr->getIntegerConstantExpr(S.Context, &ErrorPos); 19743 if (!Value) { 19744 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_scalar_unsigned_arg) 19745 << Name; 19746 return {}; 19747 } 19748 uint64_t Dim = Value->getZExtValue(); 19749 if (!ConstantMatrixType::isDimensionValid(Dim)) { 19750 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_invalid_dimension) 19751 << Name << ConstantMatrixType::getMaxElementsPerDimension(); 19752 return {}; 19753 } 19754 return Dim; 19755 } 19756 19757 ExprResult Sema::SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, 19758 ExprResult CallResult) { 19759 if (!getLangOpts().MatrixTypes) { 19760 Diag(TheCall->getBeginLoc(), diag::err_builtin_matrix_disabled); 19761 return ExprError(); 19762 } 19763 19764 if (checkArgCount(*this, TheCall, 4)) 19765 return ExprError(); 19766 19767 unsigned PtrArgIdx = 0; 19768 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 19769 Expr *RowsExpr = TheCall->getArg(1); 19770 Expr *ColumnsExpr = TheCall->getArg(2); 19771 Expr *StrideExpr = TheCall->getArg(3); 19772 19773 bool ArgError = false; 19774 19775 // Check pointer argument. 19776 { 19777 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 19778 if (PtrConv.isInvalid()) 19779 return PtrConv; 19780 PtrExpr = PtrConv.get(); 19781 TheCall->setArg(0, PtrExpr); 19782 if (PtrExpr->isTypeDependent()) { 19783 TheCall->setType(Context.DependentTy); 19784 return TheCall; 19785 } 19786 } 19787 19788 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 19789 QualType ElementTy; 19790 if (!PtrTy) { 19791 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 19792 << PtrArgIdx + 1 << /*pointer to element ty*/ 2 << PtrExpr->getType(); 19793 ArgError = true; 19794 } else { 19795 ElementTy = PtrTy->getPointeeType().getUnqualifiedType(); 19796 19797 if (!ConstantMatrixType::isValidElementType(ElementTy)) { 19798 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 19799 << PtrArgIdx + 1 << /* pointer to element ty*/ 2 19800 << PtrExpr->getType(); 19801 ArgError = true; 19802 } 19803 } 19804 19805 // Apply default Lvalue conversions and convert the expression to size_t. 19806 auto ApplyArgumentConversions = [this](Expr *E) { 19807 ExprResult Conv = DefaultLvalueConversion(E); 19808 if (Conv.isInvalid()) 19809 return Conv; 19810 19811 return tryConvertExprToType(Conv.get(), Context.getSizeType()); 19812 }; 19813 19814 // Apply conversion to row and column expressions. 19815 ExprResult RowsConv = ApplyArgumentConversions(RowsExpr); 19816 if (!RowsConv.isInvalid()) { 19817 RowsExpr = RowsConv.get(); 19818 TheCall->setArg(1, RowsExpr); 19819 } else 19820 RowsExpr = nullptr; 19821 19822 ExprResult ColumnsConv = ApplyArgumentConversions(ColumnsExpr); 19823 if (!ColumnsConv.isInvalid()) { 19824 ColumnsExpr = ColumnsConv.get(); 19825 TheCall->setArg(2, ColumnsExpr); 19826 } else 19827 ColumnsExpr = nullptr; 19828 19829 // If any part of the result matrix type is still pending, just use 19830 // Context.DependentTy, until all parts are resolved. 19831 if ((RowsExpr && RowsExpr->isTypeDependent()) || 19832 (ColumnsExpr && ColumnsExpr->isTypeDependent())) { 19833 TheCall->setType(Context.DependentTy); 19834 return CallResult; 19835 } 19836 19837 // Check row and column dimensions. 19838 std::optional<unsigned> MaybeRows; 19839 if (RowsExpr) 19840 MaybeRows = getAndVerifyMatrixDimension(RowsExpr, "row", *this); 19841 19842 std::optional<unsigned> MaybeColumns; 19843 if (ColumnsExpr) 19844 MaybeColumns = getAndVerifyMatrixDimension(ColumnsExpr, "column", *this); 19845 19846 // Check stride argument. 19847 ExprResult StrideConv = ApplyArgumentConversions(StrideExpr); 19848 if (StrideConv.isInvalid()) 19849 return ExprError(); 19850 StrideExpr = StrideConv.get(); 19851 TheCall->setArg(3, StrideExpr); 19852 19853 if (MaybeRows) { 19854 if (std::optional<llvm::APSInt> Value = 19855 StrideExpr->getIntegerConstantExpr(Context)) { 19856 uint64_t Stride = Value->getZExtValue(); 19857 if (Stride < *MaybeRows) { 19858 Diag(StrideExpr->getBeginLoc(), 19859 diag::err_builtin_matrix_stride_too_small); 19860 ArgError = true; 19861 } 19862 } 19863 } 19864 19865 if (ArgError || !MaybeRows || !MaybeColumns) 19866 return ExprError(); 19867 19868 TheCall->setType( 19869 Context.getConstantMatrixType(ElementTy, *MaybeRows, *MaybeColumns)); 19870 return CallResult; 19871 } 19872 19873 ExprResult Sema::SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, 19874 ExprResult CallResult) { 19875 if (checkArgCount(*this, TheCall, 3)) 19876 return ExprError(); 19877 19878 unsigned PtrArgIdx = 1; 19879 Expr *MatrixExpr = TheCall->getArg(0); 19880 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 19881 Expr *StrideExpr = TheCall->getArg(2); 19882 19883 bool ArgError = false; 19884 19885 { 19886 ExprResult MatrixConv = DefaultLvalueConversion(MatrixExpr); 19887 if (MatrixConv.isInvalid()) 19888 return MatrixConv; 19889 MatrixExpr = MatrixConv.get(); 19890 TheCall->setArg(0, MatrixExpr); 19891 } 19892 if (MatrixExpr->isTypeDependent()) { 19893 TheCall->setType(Context.DependentTy); 19894 return TheCall; 19895 } 19896 19897 auto *MatrixTy = MatrixExpr->getType()->getAs<ConstantMatrixType>(); 19898 if (!MatrixTy) { 19899 Diag(MatrixExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 19900 << 1 << /*matrix ty */ 1 << MatrixExpr->getType(); 19901 ArgError = true; 19902 } 19903 19904 { 19905 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 19906 if (PtrConv.isInvalid()) 19907 return PtrConv; 19908 PtrExpr = PtrConv.get(); 19909 TheCall->setArg(1, PtrExpr); 19910 if (PtrExpr->isTypeDependent()) { 19911 TheCall->setType(Context.DependentTy); 19912 return TheCall; 19913 } 19914 } 19915 19916 // Check pointer argument. 19917 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 19918 if (!PtrTy) { 19919 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 19920 << PtrArgIdx + 1 << /*pointer to element ty*/ 2 << PtrExpr->getType(); 19921 ArgError = true; 19922 } else { 19923 QualType ElementTy = PtrTy->getPointeeType(); 19924 if (ElementTy.isConstQualified()) { 19925 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_store_to_const); 19926 ArgError = true; 19927 } 19928 ElementTy = ElementTy.getUnqualifiedType().getCanonicalType(); 19929 if (MatrixTy && 19930 !Context.hasSameType(ElementTy, MatrixTy->getElementType())) { 19931 Diag(PtrExpr->getBeginLoc(), 19932 diag::err_builtin_matrix_pointer_arg_mismatch) 19933 << ElementTy << MatrixTy->getElementType(); 19934 ArgError = true; 19935 } 19936 } 19937 19938 // Apply default Lvalue conversions and convert the stride expression to 19939 // size_t. 19940 { 19941 ExprResult StrideConv = DefaultLvalueConversion(StrideExpr); 19942 if (StrideConv.isInvalid()) 19943 return StrideConv; 19944 19945 StrideConv = tryConvertExprToType(StrideConv.get(), Context.getSizeType()); 19946 if (StrideConv.isInvalid()) 19947 return StrideConv; 19948 StrideExpr = StrideConv.get(); 19949 TheCall->setArg(2, StrideExpr); 19950 } 19951 19952 // Check stride argument. 19953 if (MatrixTy) { 19954 if (std::optional<llvm::APSInt> Value = 19955 StrideExpr->getIntegerConstantExpr(Context)) { 19956 uint64_t Stride = Value->getZExtValue(); 19957 if (Stride < MatrixTy->getNumRows()) { 19958 Diag(StrideExpr->getBeginLoc(), 19959 diag::err_builtin_matrix_stride_too_small); 19960 ArgError = true; 19961 } 19962 } 19963 } 19964 19965 if (ArgError) 19966 return ExprError(); 19967 19968 return CallResult; 19969 } 19970 19971 /// Checks the argument at the given index is a WebAssembly table and if it 19972 /// is, sets ElTy to the element type. 19973 static bool CheckWasmBuiltinArgIsTable(Sema &S, CallExpr *E, unsigned ArgIndex, 19974 QualType &ElTy) { 19975 Expr *ArgExpr = E->getArg(ArgIndex); 19976 const auto *ATy = dyn_cast<ArrayType>(ArgExpr->getType()); 19977 if (!ATy || !ATy->getElementType().isWebAssemblyReferenceType()) { 19978 return S.Diag(ArgExpr->getBeginLoc(), 19979 diag::err_wasm_builtin_arg_must_be_table_type) 19980 << ArgIndex + 1 << ArgExpr->getSourceRange(); 19981 } 19982 ElTy = ATy->getElementType(); 19983 return false; 19984 } 19985 19986 /// Checks the argument at the given index is an integer. 19987 static bool CheckWasmBuiltinArgIsInteger(Sema &S, CallExpr *E, 19988 unsigned ArgIndex) { 19989 Expr *ArgExpr = E->getArg(ArgIndex); 19990 if (!ArgExpr->getType()->isIntegerType()) { 19991 return S.Diag(ArgExpr->getBeginLoc(), 19992 diag::err_wasm_builtin_arg_must_be_integer_type) 19993 << ArgIndex + 1 << ArgExpr->getSourceRange(); 19994 } 19995 return false; 19996 } 19997 19998 /// Check that the first argument is a WebAssembly table, and the second 19999 /// is an index to use as index into the table. 20000 bool Sema::BuiltinWasmTableGet(CallExpr *TheCall) { 20001 if (checkArgCount(*this, TheCall, 2)) 20002 return true; 20003 20004 QualType ElTy; 20005 if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy)) 20006 return true; 20007 20008 if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 1)) 20009 return true; 20010 20011 // If all is well, we set the type of TheCall to be the type of the 20012 // element of the table. 20013 // i.e. a table.get on an externref table has type externref, 20014 // or whatever the type of the table element is. 20015 TheCall->setType(ElTy); 20016 20017 return false; 20018 } 20019 20020 /// Check that the first argumnet is a WebAssembly table, the second is 20021 /// an index to use as index into the table and the third is the reference 20022 /// type to set into the table. 20023 bool Sema::BuiltinWasmTableSet(CallExpr *TheCall) { 20024 if (checkArgCount(*this, TheCall, 3)) 20025 return true; 20026 20027 QualType ElTy; 20028 if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy)) 20029 return true; 20030 20031 if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 1)) 20032 return true; 20033 20034 if (!Context.hasSameType(ElTy, TheCall->getArg(2)->getType())) 20035 return true; 20036 20037 return false; 20038 } 20039 20040 /// Check that the argument is a WebAssembly table. 20041 bool Sema::BuiltinWasmTableSize(CallExpr *TheCall) { 20042 if (checkArgCount(*this, TheCall, 1)) 20043 return true; 20044 20045 QualType ElTy; 20046 if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy)) 20047 return true; 20048 20049 return false; 20050 } 20051 20052 /// Check that the first argument is a WebAssembly table, the second is the 20053 /// value to use for new elements (of a type matching the table type), the 20054 /// third value is an integer. 20055 bool Sema::BuiltinWasmTableGrow(CallExpr *TheCall) { 20056 if (checkArgCount(*this, TheCall, 3)) 20057 return true; 20058 20059 QualType ElTy; 20060 if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy)) 20061 return true; 20062 20063 Expr *NewElemArg = TheCall->getArg(1); 20064 if (!Context.hasSameType(ElTy, NewElemArg->getType())) { 20065 return Diag(NewElemArg->getBeginLoc(), 20066 diag::err_wasm_builtin_arg_must_match_table_element_type) 20067 << 2 << 1 << NewElemArg->getSourceRange(); 20068 } 20069 20070 if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 2)) 20071 return true; 20072 20073 return false; 20074 } 20075 20076 /// Check that the first argument is a WebAssembly table, the second is an 20077 /// integer, the third is the value to use to fill the table (of a type 20078 /// matching the table type), and the fourth is an integer. 20079 bool Sema::BuiltinWasmTableFill(CallExpr *TheCall) { 20080 if (checkArgCount(*this, TheCall, 4)) 20081 return true; 20082 20083 QualType ElTy; 20084 if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy)) 20085 return true; 20086 20087 if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 1)) 20088 return true; 20089 20090 Expr *NewElemArg = TheCall->getArg(2); 20091 if (!Context.hasSameType(ElTy, NewElemArg->getType())) { 20092 return Diag(NewElemArg->getBeginLoc(), 20093 diag::err_wasm_builtin_arg_must_match_table_element_type) 20094 << 3 << 1 << NewElemArg->getSourceRange(); 20095 } 20096 20097 if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 3)) 20098 return true; 20099 20100 return false; 20101 } 20102 20103 /// Check that the first argument is a WebAssembly table, the second is also a 20104 /// WebAssembly table (of the same element type), and the third to fifth 20105 /// arguments are integers. 20106 bool Sema::BuiltinWasmTableCopy(CallExpr *TheCall) { 20107 if (checkArgCount(*this, TheCall, 5)) 20108 return true; 20109 20110 QualType XElTy; 20111 if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, XElTy)) 20112 return true; 20113 20114 QualType YElTy; 20115 if (CheckWasmBuiltinArgIsTable(*this, TheCall, 1, YElTy)) 20116 return true; 20117 20118 Expr *TableYArg = TheCall->getArg(1); 20119 if (!Context.hasSameType(XElTy, YElTy)) { 20120 return Diag(TableYArg->getBeginLoc(), 20121 diag::err_wasm_builtin_arg_must_match_table_element_type) 20122 << 2 << 1 << TableYArg->getSourceRange(); 20123 } 20124 20125 for (int I = 2; I <= 4; I++) { 20126 if (CheckWasmBuiltinArgIsInteger(*this, TheCall, I)) 20127 return true; 20128 } 20129 20130 return false; 20131 } 20132 20133 /// \brief Enforce the bounds of a TCB 20134 /// CheckTCBEnforcement - Enforces that every function in a named TCB only 20135 /// directly calls other functions in the same TCB as marked by the enforce_tcb 20136 /// and enforce_tcb_leaf attributes. 20137 void Sema::CheckTCBEnforcement(const SourceLocation CallExprLoc, 20138 const NamedDecl *Callee) { 20139 // This warning does not make sense in code that has no runtime behavior. 20140 if (isUnevaluatedContext()) 20141 return; 20142 20143 const NamedDecl *Caller = getCurFunctionOrMethodDecl(); 20144 20145 if (!Caller || !Caller->hasAttr<EnforceTCBAttr>()) 20146 return; 20147 20148 // Search through the enforce_tcb and enforce_tcb_leaf attributes to find 20149 // all TCBs the callee is a part of. 20150 llvm::StringSet<> CalleeTCBs; 20151 for (const auto *A : Callee->specific_attrs<EnforceTCBAttr>()) 20152 CalleeTCBs.insert(A->getTCBName()); 20153 for (const auto *A : Callee->specific_attrs<EnforceTCBLeafAttr>()) 20154 CalleeTCBs.insert(A->getTCBName()); 20155 20156 // Go through the TCBs the caller is a part of and emit warnings if Caller 20157 // is in a TCB that the Callee is not. 20158 for (const auto *A : Caller->specific_attrs<EnforceTCBAttr>()) { 20159 StringRef CallerTCB = A->getTCBName(); 20160 if (CalleeTCBs.count(CallerTCB) == 0) { 20161 this->Diag(CallExprLoc, diag::warn_tcb_enforcement_violation) 20162 << Callee << CallerTCB; 20163 } 20164 } 20165 } 20166