1 //===- SemaChecking.cpp - Extra Semantic Checking -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements extra semantic analysis beyond what is enforced 10 // by the C type system. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "clang/AST/APValue.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/Attr.h" 17 #include "clang/AST/AttrIterator.h" 18 #include "clang/AST/CharUnits.h" 19 #include "clang/AST/Decl.h" 20 #include "clang/AST/DeclBase.h" 21 #include "clang/AST/DeclCXX.h" 22 #include "clang/AST/DeclObjC.h" 23 #include "clang/AST/DeclarationName.h" 24 #include "clang/AST/EvaluatedExprVisitor.h" 25 #include "clang/AST/Expr.h" 26 #include "clang/AST/ExprCXX.h" 27 #include "clang/AST/ExprObjC.h" 28 #include "clang/AST/ExprOpenMP.h" 29 #include "clang/AST/FormatString.h" 30 #include "clang/AST/NSAPI.h" 31 #include "clang/AST/NonTrivialTypeVisitor.h" 32 #include "clang/AST/OperationKinds.h" 33 #include "clang/AST/RecordLayout.h" 34 #include "clang/AST/Stmt.h" 35 #include "clang/AST/TemplateBase.h" 36 #include "clang/AST/Type.h" 37 #include "clang/AST/TypeLoc.h" 38 #include "clang/AST/UnresolvedSet.h" 39 #include "clang/Basic/AddressSpaces.h" 40 #include "clang/Basic/CharInfo.h" 41 #include "clang/Basic/Diagnostic.h" 42 #include "clang/Basic/IdentifierTable.h" 43 #include "clang/Basic/LLVM.h" 44 #include "clang/Basic/LangOptions.h" 45 #include "clang/Basic/OpenCLOptions.h" 46 #include "clang/Basic/OperatorKinds.h" 47 #include "clang/Basic/PartialDiagnostic.h" 48 #include "clang/Basic/SourceLocation.h" 49 #include "clang/Basic/SourceManager.h" 50 #include "clang/Basic/Specifiers.h" 51 #include "clang/Basic/SyncScope.h" 52 #include "clang/Basic/TargetBuiltins.h" 53 #include "clang/Basic/TargetCXXABI.h" 54 #include "clang/Basic/TargetInfo.h" 55 #include "clang/Basic/TypeTraits.h" 56 #include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering. 57 #include "clang/Sema/Initialization.h" 58 #include "clang/Sema/Lookup.h" 59 #include "clang/Sema/Ownership.h" 60 #include "clang/Sema/Scope.h" 61 #include "clang/Sema/ScopeInfo.h" 62 #include "clang/Sema/Sema.h" 63 #include "clang/Sema/SemaInternal.h" 64 #include "llvm/ADT/APFloat.h" 65 #include "llvm/ADT/APInt.h" 66 #include "llvm/ADT/APSInt.h" 67 #include "llvm/ADT/ArrayRef.h" 68 #include "llvm/ADT/DenseMap.h" 69 #include "llvm/ADT/FoldingSet.h" 70 #include "llvm/ADT/STLExtras.h" 71 #include "llvm/ADT/SmallBitVector.h" 72 #include "llvm/ADT/SmallPtrSet.h" 73 #include "llvm/ADT/SmallString.h" 74 #include "llvm/ADT/SmallVector.h" 75 #include "llvm/ADT/StringExtras.h" 76 #include "llvm/ADT/StringRef.h" 77 #include "llvm/ADT/StringSet.h" 78 #include "llvm/ADT/StringSwitch.h" 79 #include "llvm/Support/AtomicOrdering.h" 80 #include "llvm/Support/Casting.h" 81 #include "llvm/Support/Compiler.h" 82 #include "llvm/Support/ConvertUTF.h" 83 #include "llvm/Support/ErrorHandling.h" 84 #include "llvm/Support/Format.h" 85 #include "llvm/Support/Locale.h" 86 #include "llvm/Support/MathExtras.h" 87 #include "llvm/Support/SaveAndRestore.h" 88 #include "llvm/Support/raw_ostream.h" 89 #include "llvm/TargetParser/Triple.h" 90 #include <algorithm> 91 #include <bitset> 92 #include <cassert> 93 #include <cctype> 94 #include <cstddef> 95 #include <cstdint> 96 #include <functional> 97 #include <limits> 98 #include <optional> 99 #include <string> 100 #include <tuple> 101 #include <utility> 102 103 using namespace clang; 104 using namespace sema; 105 106 SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL, 107 unsigned ByteNo) const { 108 return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts, 109 Context.getTargetInfo()); 110 } 111 112 static constexpr unsigned short combineFAPK(Sema::FormatArgumentPassingKind A, 113 Sema::FormatArgumentPassingKind B) { 114 return (A << 8) | B; 115 } 116 117 /// Checks that a call expression's argument count is at least the desired 118 /// number. This is useful when doing custom type-checking on a variadic 119 /// function. Returns true on error. 120 static bool checkArgCountAtLeast(Sema &S, CallExpr *Call, 121 unsigned MinArgCount) { 122 unsigned ArgCount = Call->getNumArgs(); 123 if (ArgCount >= MinArgCount) 124 return false; 125 126 return S.Diag(Call->getEndLoc(), diag::err_typecheck_call_too_few_args) 127 << 0 /*function call*/ << MinArgCount << ArgCount 128 << Call->getSourceRange(); 129 } 130 131 /// Checks that a call expression's argument count is at most the desired 132 /// number. This is useful when doing custom type-checking on a variadic 133 /// function. Returns true on error. 134 static bool checkArgCountAtMost(Sema &S, CallExpr *Call, unsigned MaxArgCount) { 135 unsigned ArgCount = Call->getNumArgs(); 136 if (ArgCount <= MaxArgCount) 137 return false; 138 return S.Diag(Call->getEndLoc(), 139 diag::err_typecheck_call_too_many_args_at_most) 140 << 0 /*function call*/ << MaxArgCount << ArgCount 141 << Call->getSourceRange(); 142 } 143 144 /// Checks that a call expression's argument count is in the desired range. This 145 /// is useful when doing custom type-checking on a variadic function. Returns 146 /// true on error. 147 static bool checkArgCountRange(Sema &S, CallExpr *Call, unsigned MinArgCount, 148 unsigned MaxArgCount) { 149 return checkArgCountAtLeast(S, Call, MinArgCount) || 150 checkArgCountAtMost(S, Call, MaxArgCount); 151 } 152 153 /// Checks that a call expression's argument count is the desired number. 154 /// This is useful when doing custom type-checking. Returns true on error. 155 static bool checkArgCount(Sema &S, CallExpr *Call, unsigned DesiredArgCount) { 156 unsigned ArgCount = Call->getNumArgs(); 157 if (ArgCount == DesiredArgCount) 158 return false; 159 160 if (checkArgCountAtLeast(S, Call, DesiredArgCount)) 161 return true; 162 assert(ArgCount > DesiredArgCount && "should have diagnosed this"); 163 164 // Highlight all the excess arguments. 165 SourceRange Range(Call->getArg(DesiredArgCount)->getBeginLoc(), 166 Call->getArg(ArgCount - 1)->getEndLoc()); 167 168 return S.Diag(Range.getBegin(), diag::err_typecheck_call_too_many_args) 169 << 0 /*function call*/ << DesiredArgCount << ArgCount 170 << Call->getArg(1)->getSourceRange(); 171 } 172 173 static bool convertArgumentToType(Sema &S, Expr *&Value, QualType Ty) { 174 if (Value->isTypeDependent()) 175 return false; 176 177 InitializedEntity Entity = 178 InitializedEntity::InitializeParameter(S.Context, Ty, false); 179 ExprResult Result = 180 S.PerformCopyInitialization(Entity, SourceLocation(), Value); 181 if (Result.isInvalid()) 182 return true; 183 Value = Result.get(); 184 return false; 185 } 186 187 /// Check that the first argument to __builtin_annotation is an integer 188 /// and the second argument is a non-wide string literal. 189 static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) { 190 if (checkArgCount(S, TheCall, 2)) 191 return true; 192 193 // First argument should be an integer. 194 Expr *ValArg = TheCall->getArg(0); 195 QualType Ty = ValArg->getType(); 196 if (!Ty->isIntegerType()) { 197 S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg) 198 << ValArg->getSourceRange(); 199 return true; 200 } 201 202 // Second argument should be a constant string. 203 Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts(); 204 StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg); 205 if (!Literal || !Literal->isOrdinary()) { 206 S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg) 207 << StrArg->getSourceRange(); 208 return true; 209 } 210 211 TheCall->setType(Ty); 212 return false; 213 } 214 215 static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) { 216 // We need at least one argument. 217 if (TheCall->getNumArgs() < 1) { 218 S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 219 << 0 << 1 << TheCall->getNumArgs() 220 << TheCall->getCallee()->getSourceRange(); 221 return true; 222 } 223 224 // All arguments should be wide string literals. 225 for (Expr *Arg : TheCall->arguments()) { 226 auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts()); 227 if (!Literal || !Literal->isWide()) { 228 S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str) 229 << Arg->getSourceRange(); 230 return true; 231 } 232 } 233 234 return false; 235 } 236 237 /// Check that the argument to __builtin_addressof is a glvalue, and set the 238 /// result type to the corresponding pointer type. 239 static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) { 240 if (checkArgCount(S, TheCall, 1)) 241 return true; 242 243 ExprResult Arg(TheCall->getArg(0)); 244 QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc()); 245 if (ResultType.isNull()) 246 return true; 247 248 TheCall->setArg(0, Arg.get()); 249 TheCall->setType(ResultType); 250 return false; 251 } 252 253 /// Check that the argument to __builtin_function_start is a function. 254 static bool SemaBuiltinFunctionStart(Sema &S, CallExpr *TheCall) { 255 if (checkArgCount(S, TheCall, 1)) 256 return true; 257 258 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 259 if (Arg.isInvalid()) 260 return true; 261 262 TheCall->setArg(0, Arg.get()); 263 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>( 264 Arg.get()->getAsBuiltinConstantDeclRef(S.getASTContext())); 265 266 if (!FD) { 267 S.Diag(TheCall->getBeginLoc(), diag::err_function_start_invalid_type) 268 << TheCall->getSourceRange(); 269 return true; 270 } 271 272 return !S.checkAddressOfFunctionIsAvailable(FD, /*Complain=*/true, 273 TheCall->getBeginLoc()); 274 } 275 276 /// Check the number of arguments and set the result type to 277 /// the argument type. 278 static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) { 279 if (checkArgCount(S, TheCall, 1)) 280 return true; 281 282 TheCall->setType(TheCall->getArg(0)->getType()); 283 return false; 284 } 285 286 /// Check that the value argument for __builtin_is_aligned(value, alignment) and 287 /// __builtin_aligned_{up,down}(value, alignment) is an integer or a pointer 288 /// type (but not a function pointer) and that the alignment is a power-of-two. 289 static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) { 290 if (checkArgCount(S, TheCall, 2)) 291 return true; 292 293 clang::Expr *Source = TheCall->getArg(0); 294 bool IsBooleanAlignBuiltin = ID == Builtin::BI__builtin_is_aligned; 295 296 auto IsValidIntegerType = [](QualType Ty) { 297 return Ty->isIntegerType() && !Ty->isEnumeralType() && !Ty->isBooleanType(); 298 }; 299 QualType SrcTy = Source->getType(); 300 // We should also be able to use it with arrays (but not functions!). 301 if (SrcTy->canDecayToPointerType() && SrcTy->isArrayType()) { 302 SrcTy = S.Context.getDecayedType(SrcTy); 303 } 304 if ((!SrcTy->isPointerType() && !IsValidIntegerType(SrcTy)) || 305 SrcTy->isFunctionPointerType()) { 306 // FIXME: this is not quite the right error message since we don't allow 307 // floating point types, or member pointers. 308 S.Diag(Source->getExprLoc(), diag::err_typecheck_expect_scalar_operand) 309 << SrcTy; 310 return true; 311 } 312 313 clang::Expr *AlignOp = TheCall->getArg(1); 314 if (!IsValidIntegerType(AlignOp->getType())) { 315 S.Diag(AlignOp->getExprLoc(), diag::err_typecheck_expect_int) 316 << AlignOp->getType(); 317 return true; 318 } 319 Expr::EvalResult AlignResult; 320 unsigned MaxAlignmentBits = S.Context.getIntWidth(SrcTy) - 1; 321 // We can't check validity of alignment if it is value dependent. 322 if (!AlignOp->isValueDependent() && 323 AlignOp->EvaluateAsInt(AlignResult, S.Context, 324 Expr::SE_AllowSideEffects)) { 325 llvm::APSInt AlignValue = AlignResult.Val.getInt(); 326 llvm::APSInt MaxValue( 327 llvm::APInt::getOneBitSet(MaxAlignmentBits + 1, MaxAlignmentBits)); 328 if (AlignValue < 1) { 329 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_small) << 1; 330 return true; 331 } 332 if (llvm::APSInt::compareValues(AlignValue, MaxValue) > 0) { 333 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_big) 334 << toString(MaxValue, 10); 335 return true; 336 } 337 if (!AlignValue.isPowerOf2()) { 338 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_not_power_of_two); 339 return true; 340 } 341 if (AlignValue == 1) { 342 S.Diag(AlignOp->getExprLoc(), diag::warn_alignment_builtin_useless) 343 << IsBooleanAlignBuiltin; 344 } 345 } 346 347 ExprResult SrcArg = S.PerformCopyInitialization( 348 InitializedEntity::InitializeParameter(S.Context, SrcTy, false), 349 SourceLocation(), Source); 350 if (SrcArg.isInvalid()) 351 return true; 352 TheCall->setArg(0, SrcArg.get()); 353 ExprResult AlignArg = 354 S.PerformCopyInitialization(InitializedEntity::InitializeParameter( 355 S.Context, AlignOp->getType(), false), 356 SourceLocation(), AlignOp); 357 if (AlignArg.isInvalid()) 358 return true; 359 TheCall->setArg(1, AlignArg.get()); 360 // For align_up/align_down, the return type is the same as the (potentially 361 // decayed) argument type including qualifiers. For is_aligned(), the result 362 // is always bool. 363 TheCall->setType(IsBooleanAlignBuiltin ? S.Context.BoolTy : SrcTy); 364 return false; 365 } 366 367 static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall, 368 unsigned BuiltinID) { 369 if (checkArgCount(S, TheCall, 3)) 370 return true; 371 372 // First two arguments should be integers. 373 for (unsigned I = 0; I < 2; ++I) { 374 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(I)); 375 if (Arg.isInvalid()) return true; 376 TheCall->setArg(I, Arg.get()); 377 378 QualType Ty = Arg.get()->getType(); 379 if (!Ty->isIntegerType()) { 380 S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int) 381 << Ty << Arg.get()->getSourceRange(); 382 return true; 383 } 384 } 385 386 // Third argument should be a pointer to a non-const integer. 387 // IRGen correctly handles volatile, restrict, and address spaces, and 388 // the other qualifiers aren't possible. 389 { 390 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(2)); 391 if (Arg.isInvalid()) return true; 392 TheCall->setArg(2, Arg.get()); 393 394 QualType Ty = Arg.get()->getType(); 395 const auto *PtrTy = Ty->getAs<PointerType>(); 396 if (!PtrTy || 397 !PtrTy->getPointeeType()->isIntegerType() || 398 PtrTy->getPointeeType().isConstQualified()) { 399 S.Diag(Arg.get()->getBeginLoc(), 400 diag::err_overflow_builtin_must_be_ptr_int) 401 << Ty << Arg.get()->getSourceRange(); 402 return true; 403 } 404 } 405 406 // Disallow signed bit-precise integer args larger than 128 bits to mul 407 // function until we improve backend support. 408 if (BuiltinID == Builtin::BI__builtin_mul_overflow) { 409 for (unsigned I = 0; I < 3; ++I) { 410 const auto Arg = TheCall->getArg(I); 411 // Third argument will be a pointer. 412 auto Ty = I < 2 ? Arg->getType() : Arg->getType()->getPointeeType(); 413 if (Ty->isBitIntType() && Ty->isSignedIntegerType() && 414 S.getASTContext().getIntWidth(Ty) > 128) 415 return S.Diag(Arg->getBeginLoc(), 416 diag::err_overflow_builtin_bit_int_max_size) 417 << 128; 418 } 419 } 420 421 return false; 422 } 423 424 namespace { 425 struct BuiltinDumpStructGenerator { 426 Sema &S; 427 CallExpr *TheCall; 428 SourceLocation Loc = TheCall->getBeginLoc(); 429 SmallVector<Expr *, 32> Actions; 430 DiagnosticErrorTrap ErrorTracker; 431 PrintingPolicy Policy; 432 433 BuiltinDumpStructGenerator(Sema &S, CallExpr *TheCall) 434 : S(S), TheCall(TheCall), ErrorTracker(S.getDiagnostics()), 435 Policy(S.Context.getPrintingPolicy()) { 436 Policy.AnonymousTagLocations = false; 437 } 438 439 Expr *makeOpaqueValueExpr(Expr *Inner) { 440 auto *OVE = new (S.Context) 441 OpaqueValueExpr(Loc, Inner->getType(), Inner->getValueKind(), 442 Inner->getObjectKind(), Inner); 443 Actions.push_back(OVE); 444 return OVE; 445 } 446 447 Expr *getStringLiteral(llvm::StringRef Str) { 448 Expr *Lit = S.Context.getPredefinedStringLiteralFromCache(Str); 449 // Wrap the literal in parentheses to attach a source location. 450 return new (S.Context) ParenExpr(Loc, Loc, Lit); 451 } 452 453 bool callPrintFunction(llvm::StringRef Format, 454 llvm::ArrayRef<Expr *> Exprs = {}) { 455 SmallVector<Expr *, 8> Args; 456 assert(TheCall->getNumArgs() >= 2); 457 Args.reserve((TheCall->getNumArgs() - 2) + /*Format*/ 1 + Exprs.size()); 458 Args.assign(TheCall->arg_begin() + 2, TheCall->arg_end()); 459 Args.push_back(getStringLiteral(Format)); 460 Args.insert(Args.end(), Exprs.begin(), Exprs.end()); 461 462 // Register a note to explain why we're performing the call. 463 Sema::CodeSynthesisContext Ctx; 464 Ctx.Kind = Sema::CodeSynthesisContext::BuildingBuiltinDumpStructCall; 465 Ctx.PointOfInstantiation = Loc; 466 Ctx.CallArgs = Args.data(); 467 Ctx.NumCallArgs = Args.size(); 468 S.pushCodeSynthesisContext(Ctx); 469 470 ExprResult RealCall = 471 S.BuildCallExpr(/*Scope=*/nullptr, TheCall->getArg(1), 472 TheCall->getBeginLoc(), Args, TheCall->getRParenLoc()); 473 474 S.popCodeSynthesisContext(); 475 if (!RealCall.isInvalid()) 476 Actions.push_back(RealCall.get()); 477 // Bail out if we've hit any errors, even if we managed to build the 478 // call. We don't want to produce more than one error. 479 return RealCall.isInvalid() || ErrorTracker.hasErrorOccurred(); 480 } 481 482 Expr *getIndentString(unsigned Depth) { 483 if (!Depth) 484 return nullptr; 485 486 llvm::SmallString<32> Indent; 487 Indent.resize(Depth * Policy.Indentation, ' '); 488 return getStringLiteral(Indent); 489 } 490 491 Expr *getTypeString(QualType T) { 492 return getStringLiteral(T.getAsString(Policy)); 493 } 494 495 bool appendFormatSpecifier(QualType T, llvm::SmallVectorImpl<char> &Str) { 496 llvm::raw_svector_ostream OS(Str); 497 498 // Format 'bool', 'char', 'signed char', 'unsigned char' as numbers, rather 499 // than trying to print a single character. 500 if (auto *BT = T->getAs<BuiltinType>()) { 501 switch (BT->getKind()) { 502 case BuiltinType::Bool: 503 OS << "%d"; 504 return true; 505 case BuiltinType::Char_U: 506 case BuiltinType::UChar: 507 OS << "%hhu"; 508 return true; 509 case BuiltinType::Char_S: 510 case BuiltinType::SChar: 511 OS << "%hhd"; 512 return true; 513 default: 514 break; 515 } 516 } 517 518 analyze_printf::PrintfSpecifier Specifier; 519 if (Specifier.fixType(T, S.getLangOpts(), S.Context, /*IsObjCLiteral=*/false)) { 520 // We were able to guess how to format this. 521 if (Specifier.getConversionSpecifier().getKind() == 522 analyze_printf::PrintfConversionSpecifier::sArg) { 523 // Wrap double-quotes around a '%s' specifier and limit its maximum 524 // length. Ideally we'd also somehow escape special characters in the 525 // contents but printf doesn't support that. 526 // FIXME: '%s' formatting is not safe in general. 527 OS << '"'; 528 Specifier.setPrecision(analyze_printf::OptionalAmount(32u)); 529 Specifier.toString(OS); 530 OS << '"'; 531 // FIXME: It would be nice to include a '...' if the string doesn't fit 532 // in the length limit. 533 } else { 534 Specifier.toString(OS); 535 } 536 return true; 537 } 538 539 if (T->isPointerType()) { 540 // Format all pointers with '%p'. 541 OS << "%p"; 542 return true; 543 } 544 545 return false; 546 } 547 548 bool dumpUnnamedRecord(const RecordDecl *RD, Expr *E, unsigned Depth) { 549 Expr *IndentLit = getIndentString(Depth); 550 Expr *TypeLit = getTypeString(S.Context.getRecordType(RD)); 551 if (IndentLit ? callPrintFunction("%s%s", {IndentLit, TypeLit}) 552 : callPrintFunction("%s", {TypeLit})) 553 return true; 554 555 return dumpRecordValue(RD, E, IndentLit, Depth); 556 } 557 558 // Dump a record value. E should be a pointer or lvalue referring to an RD. 559 bool dumpRecordValue(const RecordDecl *RD, Expr *E, Expr *RecordIndent, 560 unsigned Depth) { 561 // FIXME: Decide what to do if RD is a union. At least we should probably 562 // turn off printing `const char*` members with `%s`, because that is very 563 // likely to crash if that's not the active member. Whatever we decide, we 564 // should document it. 565 566 // Build an OpaqueValueExpr so we can refer to E more than once without 567 // triggering re-evaluation. 568 Expr *RecordArg = makeOpaqueValueExpr(E); 569 bool RecordArgIsPtr = RecordArg->getType()->isPointerType(); 570 571 if (callPrintFunction(" {\n")) 572 return true; 573 574 // Dump each base class, regardless of whether they're aggregates. 575 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 576 for (const auto &Base : CXXRD->bases()) { 577 QualType BaseType = 578 RecordArgIsPtr ? S.Context.getPointerType(Base.getType()) 579 : S.Context.getLValueReferenceType(Base.getType()); 580 ExprResult BasePtr = S.BuildCStyleCastExpr( 581 Loc, S.Context.getTrivialTypeSourceInfo(BaseType, Loc), Loc, 582 RecordArg); 583 if (BasePtr.isInvalid() || 584 dumpUnnamedRecord(Base.getType()->getAsRecordDecl(), BasePtr.get(), 585 Depth + 1)) 586 return true; 587 } 588 } 589 590 Expr *FieldIndentArg = getIndentString(Depth + 1); 591 592 // Dump each field. 593 for (auto *D : RD->decls()) { 594 auto *IFD = dyn_cast<IndirectFieldDecl>(D); 595 auto *FD = IFD ? IFD->getAnonField() : dyn_cast<FieldDecl>(D); 596 if (!FD || FD->isUnnamedBitfield() || FD->isAnonymousStructOrUnion()) 597 continue; 598 599 llvm::SmallString<20> Format = llvm::StringRef("%s%s %s "); 600 llvm::SmallVector<Expr *, 5> Args = {FieldIndentArg, 601 getTypeString(FD->getType()), 602 getStringLiteral(FD->getName())}; 603 604 if (FD->isBitField()) { 605 Format += ": %zu "; 606 QualType SizeT = S.Context.getSizeType(); 607 llvm::APInt BitWidth(S.Context.getIntWidth(SizeT), 608 FD->getBitWidthValue(S.Context)); 609 Args.push_back(IntegerLiteral::Create(S.Context, BitWidth, SizeT, Loc)); 610 } 611 612 Format += "="; 613 614 ExprResult Field = 615 IFD ? S.BuildAnonymousStructUnionMemberReference( 616 CXXScopeSpec(), Loc, IFD, 617 DeclAccessPair::make(IFD, AS_public), RecordArg, Loc) 618 : S.BuildFieldReferenceExpr( 619 RecordArg, RecordArgIsPtr, Loc, CXXScopeSpec(), FD, 620 DeclAccessPair::make(FD, AS_public), 621 DeclarationNameInfo(FD->getDeclName(), Loc)); 622 if (Field.isInvalid()) 623 return true; 624 625 auto *InnerRD = FD->getType()->getAsRecordDecl(); 626 auto *InnerCXXRD = dyn_cast_or_null<CXXRecordDecl>(InnerRD); 627 if (InnerRD && (!InnerCXXRD || InnerCXXRD->isAggregate())) { 628 // Recursively print the values of members of aggregate record type. 629 if (callPrintFunction(Format, Args) || 630 dumpRecordValue(InnerRD, Field.get(), FieldIndentArg, Depth + 1)) 631 return true; 632 } else { 633 Format += " "; 634 if (appendFormatSpecifier(FD->getType(), Format)) { 635 // We know how to print this field. 636 Args.push_back(Field.get()); 637 } else { 638 // We don't know how to print this field. Print out its address 639 // with a format specifier that a smart tool will be able to 640 // recognize and treat specially. 641 Format += "*%p"; 642 ExprResult FieldAddr = 643 S.BuildUnaryOp(nullptr, Loc, UO_AddrOf, Field.get()); 644 if (FieldAddr.isInvalid()) 645 return true; 646 Args.push_back(FieldAddr.get()); 647 } 648 Format += "\n"; 649 if (callPrintFunction(Format, Args)) 650 return true; 651 } 652 } 653 654 return RecordIndent ? callPrintFunction("%s}\n", RecordIndent) 655 : callPrintFunction("}\n"); 656 } 657 658 Expr *buildWrapper() { 659 auto *Wrapper = PseudoObjectExpr::Create(S.Context, TheCall, Actions, 660 PseudoObjectExpr::NoResult); 661 TheCall->setType(Wrapper->getType()); 662 TheCall->setValueKind(Wrapper->getValueKind()); 663 return Wrapper; 664 } 665 }; 666 } // namespace 667 668 static ExprResult SemaBuiltinDumpStruct(Sema &S, CallExpr *TheCall) { 669 if (checkArgCountAtLeast(S, TheCall, 2)) 670 return ExprError(); 671 672 ExprResult PtrArgResult = S.DefaultLvalueConversion(TheCall->getArg(0)); 673 if (PtrArgResult.isInvalid()) 674 return ExprError(); 675 TheCall->setArg(0, PtrArgResult.get()); 676 677 // First argument should be a pointer to a struct. 678 QualType PtrArgType = PtrArgResult.get()->getType(); 679 if (!PtrArgType->isPointerType() || 680 !PtrArgType->getPointeeType()->isRecordType()) { 681 S.Diag(PtrArgResult.get()->getBeginLoc(), 682 diag::err_expected_struct_pointer_argument) 683 << 1 << TheCall->getDirectCallee() << PtrArgType; 684 return ExprError(); 685 } 686 const RecordDecl *RD = PtrArgType->getPointeeType()->getAsRecordDecl(); 687 688 // Second argument is a callable, but we can't fully validate it until we try 689 // calling it. 690 QualType FnArgType = TheCall->getArg(1)->getType(); 691 if (!FnArgType->isFunctionType() && !FnArgType->isFunctionPointerType() && 692 !FnArgType->isBlockPointerType() && 693 !(S.getLangOpts().CPlusPlus && FnArgType->isRecordType())) { 694 auto *BT = FnArgType->getAs<BuiltinType>(); 695 switch (BT ? BT->getKind() : BuiltinType::Void) { 696 case BuiltinType::Dependent: 697 case BuiltinType::Overload: 698 case BuiltinType::BoundMember: 699 case BuiltinType::PseudoObject: 700 case BuiltinType::UnknownAny: 701 case BuiltinType::BuiltinFn: 702 // This might be a callable. 703 break; 704 705 default: 706 S.Diag(TheCall->getArg(1)->getBeginLoc(), 707 diag::err_expected_callable_argument) 708 << 2 << TheCall->getDirectCallee() << FnArgType; 709 return ExprError(); 710 } 711 } 712 713 BuiltinDumpStructGenerator Generator(S, TheCall); 714 715 // Wrap parentheses around the given pointer. This is not necessary for 716 // correct code generation, but it means that when we pretty-print the call 717 // arguments in our diagnostics we will produce '(&s)->n' instead of the 718 // incorrect '&s->n'. 719 Expr *PtrArg = PtrArgResult.get(); 720 PtrArg = new (S.Context) 721 ParenExpr(PtrArg->getBeginLoc(), 722 S.getLocForEndOfToken(PtrArg->getEndLoc()), PtrArg); 723 if (Generator.dumpUnnamedRecord(RD, PtrArg, 0)) 724 return ExprError(); 725 726 return Generator.buildWrapper(); 727 } 728 729 static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) { 730 if (checkArgCount(S, BuiltinCall, 2)) 731 return true; 732 733 SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc(); 734 Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts(); 735 Expr *Call = BuiltinCall->getArg(0); 736 Expr *Chain = BuiltinCall->getArg(1); 737 738 if (Call->getStmtClass() != Stmt::CallExprClass) { 739 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call) 740 << Call->getSourceRange(); 741 return true; 742 } 743 744 auto CE = cast<CallExpr>(Call); 745 if (CE->getCallee()->getType()->isBlockPointerType()) { 746 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call) 747 << Call->getSourceRange(); 748 return true; 749 } 750 751 const Decl *TargetDecl = CE->getCalleeDecl(); 752 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) 753 if (FD->getBuiltinID()) { 754 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call) 755 << Call->getSourceRange(); 756 return true; 757 } 758 759 if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) { 760 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call) 761 << Call->getSourceRange(); 762 return true; 763 } 764 765 ExprResult ChainResult = S.UsualUnaryConversions(Chain); 766 if (ChainResult.isInvalid()) 767 return true; 768 if (!ChainResult.get()->getType()->isPointerType()) { 769 S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer) 770 << Chain->getSourceRange(); 771 return true; 772 } 773 774 QualType ReturnTy = CE->getCallReturnType(S.Context); 775 QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() }; 776 QualType BuiltinTy = S.Context.getFunctionType( 777 ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo()); 778 QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy); 779 780 Builtin = 781 S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get(); 782 783 BuiltinCall->setType(CE->getType()); 784 BuiltinCall->setValueKind(CE->getValueKind()); 785 BuiltinCall->setObjectKind(CE->getObjectKind()); 786 BuiltinCall->setCallee(Builtin); 787 BuiltinCall->setArg(1, ChainResult.get()); 788 789 return false; 790 } 791 792 namespace { 793 794 class ScanfDiagnosticFormatHandler 795 : public analyze_format_string::FormatStringHandler { 796 // Accepts the argument index (relative to the first destination index) of the 797 // argument whose size we want. 798 using ComputeSizeFunction = 799 llvm::function_ref<std::optional<llvm::APSInt>(unsigned)>; 800 801 // Accepts the argument index (relative to the first destination index), the 802 // destination size, and the source size). 803 using DiagnoseFunction = 804 llvm::function_ref<void(unsigned, unsigned, unsigned)>; 805 806 ComputeSizeFunction ComputeSizeArgument; 807 DiagnoseFunction Diagnose; 808 809 public: 810 ScanfDiagnosticFormatHandler(ComputeSizeFunction ComputeSizeArgument, 811 DiagnoseFunction Diagnose) 812 : ComputeSizeArgument(ComputeSizeArgument), Diagnose(Diagnose) {} 813 814 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 815 const char *StartSpecifier, 816 unsigned specifierLen) override { 817 if (!FS.consumesDataArgument()) 818 return true; 819 820 unsigned NulByte = 0; 821 switch ((FS.getConversionSpecifier().getKind())) { 822 default: 823 return true; 824 case analyze_format_string::ConversionSpecifier::sArg: 825 case analyze_format_string::ConversionSpecifier::ScanListArg: 826 NulByte = 1; 827 break; 828 case analyze_format_string::ConversionSpecifier::cArg: 829 break; 830 } 831 832 analyze_format_string::OptionalAmount FW = FS.getFieldWidth(); 833 if (FW.getHowSpecified() != 834 analyze_format_string::OptionalAmount::HowSpecified::Constant) 835 return true; 836 837 unsigned SourceSize = FW.getConstantAmount() + NulByte; 838 839 std::optional<llvm::APSInt> DestSizeAPS = 840 ComputeSizeArgument(FS.getArgIndex()); 841 if (!DestSizeAPS) 842 return true; 843 844 unsigned DestSize = DestSizeAPS->getZExtValue(); 845 846 if (DestSize < SourceSize) 847 Diagnose(FS.getArgIndex(), DestSize, SourceSize); 848 849 return true; 850 } 851 }; 852 853 class EstimateSizeFormatHandler 854 : public analyze_format_string::FormatStringHandler { 855 size_t Size; 856 857 public: 858 EstimateSizeFormatHandler(StringRef Format) 859 : Size(std::min(Format.find(0), Format.size()) + 860 1 /* null byte always written by sprintf */) {} 861 862 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 863 const char *, unsigned SpecifierLen, 864 const TargetInfo &) override { 865 866 const size_t FieldWidth = computeFieldWidth(FS); 867 const size_t Precision = computePrecision(FS); 868 869 // The actual format. 870 switch (FS.getConversionSpecifier().getKind()) { 871 // Just a char. 872 case analyze_format_string::ConversionSpecifier::cArg: 873 case analyze_format_string::ConversionSpecifier::CArg: 874 Size += std::max(FieldWidth, (size_t)1); 875 break; 876 // Just an integer. 877 case analyze_format_string::ConversionSpecifier::dArg: 878 case analyze_format_string::ConversionSpecifier::DArg: 879 case analyze_format_string::ConversionSpecifier::iArg: 880 case analyze_format_string::ConversionSpecifier::oArg: 881 case analyze_format_string::ConversionSpecifier::OArg: 882 case analyze_format_string::ConversionSpecifier::uArg: 883 case analyze_format_string::ConversionSpecifier::UArg: 884 case analyze_format_string::ConversionSpecifier::xArg: 885 case analyze_format_string::ConversionSpecifier::XArg: 886 Size += std::max(FieldWidth, Precision); 887 break; 888 889 // %g style conversion switches between %f or %e style dynamically. 890 // %f always takes less space, so default to it. 891 case analyze_format_string::ConversionSpecifier::gArg: 892 case analyze_format_string::ConversionSpecifier::GArg: 893 894 // Floating point number in the form '[+]ddd.ddd'. 895 case analyze_format_string::ConversionSpecifier::fArg: 896 case analyze_format_string::ConversionSpecifier::FArg: 897 Size += std::max(FieldWidth, 1 /* integer part */ + 898 (Precision ? 1 + Precision 899 : 0) /* period + decimal */); 900 break; 901 902 // Floating point number in the form '[-]d.ddde[+-]dd'. 903 case analyze_format_string::ConversionSpecifier::eArg: 904 case analyze_format_string::ConversionSpecifier::EArg: 905 Size += 906 std::max(FieldWidth, 907 1 /* integer part */ + 908 (Precision ? 1 + Precision : 0) /* period + decimal */ + 909 1 /* e or E letter */ + 2 /* exponent */); 910 break; 911 912 // Floating point number in the form '[-]0xh.hhhhp±dd'. 913 case analyze_format_string::ConversionSpecifier::aArg: 914 case analyze_format_string::ConversionSpecifier::AArg: 915 Size += 916 std::max(FieldWidth, 917 2 /* 0x */ + 1 /* integer part */ + 918 (Precision ? 1 + Precision : 0) /* period + decimal */ + 919 1 /* p or P letter */ + 1 /* + or - */ + 1 /* value */); 920 break; 921 922 // Just a string. 923 case analyze_format_string::ConversionSpecifier::sArg: 924 case analyze_format_string::ConversionSpecifier::SArg: 925 Size += FieldWidth; 926 break; 927 928 // Just a pointer in the form '0xddd'. 929 case analyze_format_string::ConversionSpecifier::pArg: 930 Size += std::max(FieldWidth, 2 /* leading 0x */ + Precision); 931 break; 932 933 // A plain percent. 934 case analyze_format_string::ConversionSpecifier::PercentArg: 935 Size += 1; 936 break; 937 938 default: 939 break; 940 } 941 942 Size += FS.hasPlusPrefix() || FS.hasSpacePrefix(); 943 944 if (FS.hasAlternativeForm()) { 945 switch (FS.getConversionSpecifier().getKind()) { 946 default: 947 break; 948 // Force a leading '0'. 949 case analyze_format_string::ConversionSpecifier::oArg: 950 Size += 1; 951 break; 952 // Force a leading '0x'. 953 case analyze_format_string::ConversionSpecifier::xArg: 954 case analyze_format_string::ConversionSpecifier::XArg: 955 Size += 2; 956 break; 957 // Force a period '.' before decimal, even if precision is 0. 958 case analyze_format_string::ConversionSpecifier::aArg: 959 case analyze_format_string::ConversionSpecifier::AArg: 960 case analyze_format_string::ConversionSpecifier::eArg: 961 case analyze_format_string::ConversionSpecifier::EArg: 962 case analyze_format_string::ConversionSpecifier::fArg: 963 case analyze_format_string::ConversionSpecifier::FArg: 964 case analyze_format_string::ConversionSpecifier::gArg: 965 case analyze_format_string::ConversionSpecifier::GArg: 966 Size += (Precision ? 0 : 1); 967 break; 968 } 969 } 970 assert(SpecifierLen <= Size && "no underflow"); 971 Size -= SpecifierLen; 972 return true; 973 } 974 975 size_t getSizeLowerBound() const { return Size; } 976 977 private: 978 static size_t computeFieldWidth(const analyze_printf::PrintfSpecifier &FS) { 979 const analyze_format_string::OptionalAmount &FW = FS.getFieldWidth(); 980 size_t FieldWidth = 0; 981 if (FW.getHowSpecified() == analyze_format_string::OptionalAmount::Constant) 982 FieldWidth = FW.getConstantAmount(); 983 return FieldWidth; 984 } 985 986 static size_t computePrecision(const analyze_printf::PrintfSpecifier &FS) { 987 const analyze_format_string::OptionalAmount &FW = FS.getPrecision(); 988 size_t Precision = 0; 989 990 // See man 3 printf for default precision value based on the specifier. 991 switch (FW.getHowSpecified()) { 992 case analyze_format_string::OptionalAmount::NotSpecified: 993 switch (FS.getConversionSpecifier().getKind()) { 994 default: 995 break; 996 case analyze_format_string::ConversionSpecifier::dArg: // %d 997 case analyze_format_string::ConversionSpecifier::DArg: // %D 998 case analyze_format_string::ConversionSpecifier::iArg: // %i 999 Precision = 1; 1000 break; 1001 case analyze_format_string::ConversionSpecifier::oArg: // %d 1002 case analyze_format_string::ConversionSpecifier::OArg: // %D 1003 case analyze_format_string::ConversionSpecifier::uArg: // %d 1004 case analyze_format_string::ConversionSpecifier::UArg: // %D 1005 case analyze_format_string::ConversionSpecifier::xArg: // %d 1006 case analyze_format_string::ConversionSpecifier::XArg: // %D 1007 Precision = 1; 1008 break; 1009 case analyze_format_string::ConversionSpecifier::fArg: // %f 1010 case analyze_format_string::ConversionSpecifier::FArg: // %F 1011 case analyze_format_string::ConversionSpecifier::eArg: // %e 1012 case analyze_format_string::ConversionSpecifier::EArg: // %E 1013 case analyze_format_string::ConversionSpecifier::gArg: // %g 1014 case analyze_format_string::ConversionSpecifier::GArg: // %G 1015 Precision = 6; 1016 break; 1017 case analyze_format_string::ConversionSpecifier::pArg: // %d 1018 Precision = 1; 1019 break; 1020 } 1021 break; 1022 case analyze_format_string::OptionalAmount::Constant: 1023 Precision = FW.getConstantAmount(); 1024 break; 1025 default: 1026 break; 1027 } 1028 return Precision; 1029 } 1030 }; 1031 1032 } // namespace 1033 1034 void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, 1035 CallExpr *TheCall) { 1036 if (TheCall->isValueDependent() || TheCall->isTypeDependent() || 1037 isConstantEvaluated()) 1038 return; 1039 1040 bool UseDABAttr = false; 1041 const FunctionDecl *UseDecl = FD; 1042 1043 const auto *DABAttr = FD->getAttr<DiagnoseAsBuiltinAttr>(); 1044 if (DABAttr) { 1045 UseDecl = DABAttr->getFunction(); 1046 assert(UseDecl && "Missing FunctionDecl in DiagnoseAsBuiltin attribute!"); 1047 UseDABAttr = true; 1048 } 1049 1050 unsigned BuiltinID = UseDecl->getBuiltinID(/*ConsiderWrappers=*/true); 1051 1052 if (!BuiltinID) 1053 return; 1054 1055 const TargetInfo &TI = getASTContext().getTargetInfo(); 1056 unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType()); 1057 1058 auto TranslateIndex = [&](unsigned Index) -> std::optional<unsigned> { 1059 // If we refer to a diagnose_as_builtin attribute, we need to change the 1060 // argument index to refer to the arguments of the called function. Unless 1061 // the index is out of bounds, which presumably means it's a variadic 1062 // function. 1063 if (!UseDABAttr) 1064 return Index; 1065 unsigned DABIndices = DABAttr->argIndices_size(); 1066 unsigned NewIndex = Index < DABIndices 1067 ? DABAttr->argIndices_begin()[Index] 1068 : Index - DABIndices + FD->getNumParams(); 1069 if (NewIndex >= TheCall->getNumArgs()) 1070 return std::nullopt; 1071 return NewIndex; 1072 }; 1073 1074 auto ComputeExplicitObjectSizeArgument = 1075 [&](unsigned Index) -> std::optional<llvm::APSInt> { 1076 std::optional<unsigned> IndexOptional = TranslateIndex(Index); 1077 if (!IndexOptional) 1078 return std::nullopt; 1079 unsigned NewIndex = *IndexOptional; 1080 Expr::EvalResult Result; 1081 Expr *SizeArg = TheCall->getArg(NewIndex); 1082 if (!SizeArg->EvaluateAsInt(Result, getASTContext())) 1083 return std::nullopt; 1084 llvm::APSInt Integer = Result.Val.getInt(); 1085 Integer.setIsUnsigned(true); 1086 return Integer; 1087 }; 1088 1089 auto ComputeSizeArgument = 1090 [&](unsigned Index) -> std::optional<llvm::APSInt> { 1091 // If the parameter has a pass_object_size attribute, then we should use its 1092 // (potentially) more strict checking mode. Otherwise, conservatively assume 1093 // type 0. 1094 int BOSType = 0; 1095 // This check can fail for variadic functions. 1096 if (Index < FD->getNumParams()) { 1097 if (const auto *POS = 1098 FD->getParamDecl(Index)->getAttr<PassObjectSizeAttr>()) 1099 BOSType = POS->getType(); 1100 } 1101 1102 std::optional<unsigned> IndexOptional = TranslateIndex(Index); 1103 if (!IndexOptional) 1104 return std::nullopt; 1105 unsigned NewIndex = *IndexOptional; 1106 1107 if (NewIndex >= TheCall->getNumArgs()) 1108 return std::nullopt; 1109 1110 const Expr *ObjArg = TheCall->getArg(NewIndex); 1111 uint64_t Result; 1112 if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType)) 1113 return std::nullopt; 1114 1115 // Get the object size in the target's size_t width. 1116 return llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth); 1117 }; 1118 1119 auto ComputeStrLenArgument = 1120 [&](unsigned Index) -> std::optional<llvm::APSInt> { 1121 std::optional<unsigned> IndexOptional = TranslateIndex(Index); 1122 if (!IndexOptional) 1123 return std::nullopt; 1124 unsigned NewIndex = *IndexOptional; 1125 1126 const Expr *ObjArg = TheCall->getArg(NewIndex); 1127 uint64_t Result; 1128 if (!ObjArg->tryEvaluateStrLen(Result, getASTContext())) 1129 return std::nullopt; 1130 // Add 1 for null byte. 1131 return llvm::APSInt::getUnsigned(Result + 1).extOrTrunc(SizeTypeWidth); 1132 }; 1133 1134 std::optional<llvm::APSInt> SourceSize; 1135 std::optional<llvm::APSInt> DestinationSize; 1136 unsigned DiagID = 0; 1137 bool IsChkVariant = false; 1138 1139 auto GetFunctionName = [&]() { 1140 StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID); 1141 // Skim off the details of whichever builtin was called to produce a better 1142 // diagnostic, as it's unlikely that the user wrote the __builtin 1143 // explicitly. 1144 if (IsChkVariant) { 1145 FunctionName = FunctionName.drop_front(std::strlen("__builtin___")); 1146 FunctionName = FunctionName.drop_back(std::strlen("_chk")); 1147 } else if (FunctionName.startswith("__builtin_")) { 1148 FunctionName = FunctionName.drop_front(std::strlen("__builtin_")); 1149 } 1150 return FunctionName; 1151 }; 1152 1153 switch (BuiltinID) { 1154 default: 1155 return; 1156 case Builtin::BI__builtin_strcpy: 1157 case Builtin::BIstrcpy: { 1158 DiagID = diag::warn_fortify_strlen_overflow; 1159 SourceSize = ComputeStrLenArgument(1); 1160 DestinationSize = ComputeSizeArgument(0); 1161 break; 1162 } 1163 1164 case Builtin::BI__builtin___strcpy_chk: { 1165 DiagID = diag::warn_fortify_strlen_overflow; 1166 SourceSize = ComputeStrLenArgument(1); 1167 DestinationSize = ComputeExplicitObjectSizeArgument(2); 1168 IsChkVariant = true; 1169 break; 1170 } 1171 1172 case Builtin::BIscanf: 1173 case Builtin::BIfscanf: 1174 case Builtin::BIsscanf: { 1175 unsigned FormatIndex = 1; 1176 unsigned DataIndex = 2; 1177 if (BuiltinID == Builtin::BIscanf) { 1178 FormatIndex = 0; 1179 DataIndex = 1; 1180 } 1181 1182 const auto *FormatExpr = 1183 TheCall->getArg(FormatIndex)->IgnoreParenImpCasts(); 1184 1185 const auto *Format = dyn_cast<StringLiteral>(FormatExpr); 1186 if (!Format) 1187 return; 1188 1189 if (!Format->isOrdinary() && !Format->isUTF8()) 1190 return; 1191 1192 auto Diagnose = [&](unsigned ArgIndex, unsigned DestSize, 1193 unsigned SourceSize) { 1194 DiagID = diag::warn_fortify_scanf_overflow; 1195 unsigned Index = ArgIndex + DataIndex; 1196 StringRef FunctionName = GetFunctionName(); 1197 DiagRuntimeBehavior(TheCall->getArg(Index)->getBeginLoc(), TheCall, 1198 PDiag(DiagID) << FunctionName << (Index + 1) 1199 << DestSize << SourceSize); 1200 }; 1201 1202 StringRef FormatStrRef = Format->getString(); 1203 auto ShiftedComputeSizeArgument = [&](unsigned Index) { 1204 return ComputeSizeArgument(Index + DataIndex); 1205 }; 1206 ScanfDiagnosticFormatHandler H(ShiftedComputeSizeArgument, Diagnose); 1207 const char *FormatBytes = FormatStrRef.data(); 1208 const ConstantArrayType *T = 1209 Context.getAsConstantArrayType(Format->getType()); 1210 assert(T && "String literal not of constant array type!"); 1211 size_t TypeSize = T->getSize().getZExtValue(); 1212 1213 // In case there's a null byte somewhere. 1214 size_t StrLen = 1215 std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0)); 1216 1217 analyze_format_string::ParseScanfString(H, FormatBytes, 1218 FormatBytes + StrLen, getLangOpts(), 1219 Context.getTargetInfo()); 1220 1221 // Unlike the other cases, in this one we have already issued the diagnostic 1222 // here, so no need to continue (because unlike the other cases, here the 1223 // diagnostic refers to the argument number). 1224 return; 1225 } 1226 1227 case Builtin::BIsprintf: 1228 case Builtin::BI__builtin___sprintf_chk: { 1229 size_t FormatIndex = BuiltinID == Builtin::BIsprintf ? 1 : 3; 1230 auto *FormatExpr = TheCall->getArg(FormatIndex)->IgnoreParenImpCasts(); 1231 1232 if (auto *Format = dyn_cast<StringLiteral>(FormatExpr)) { 1233 1234 if (!Format->isOrdinary() && !Format->isUTF8()) 1235 return; 1236 1237 StringRef FormatStrRef = Format->getString(); 1238 EstimateSizeFormatHandler H(FormatStrRef); 1239 const char *FormatBytes = FormatStrRef.data(); 1240 const ConstantArrayType *T = 1241 Context.getAsConstantArrayType(Format->getType()); 1242 assert(T && "String literal not of constant array type!"); 1243 size_t TypeSize = T->getSize().getZExtValue(); 1244 1245 // In case there's a null byte somewhere. 1246 size_t StrLen = 1247 std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0)); 1248 if (!analyze_format_string::ParsePrintfString( 1249 H, FormatBytes, FormatBytes + StrLen, getLangOpts(), 1250 Context.getTargetInfo(), false)) { 1251 DiagID = diag::warn_fortify_source_format_overflow; 1252 SourceSize = llvm::APSInt::getUnsigned(H.getSizeLowerBound()) 1253 .extOrTrunc(SizeTypeWidth); 1254 if (BuiltinID == Builtin::BI__builtin___sprintf_chk) { 1255 DestinationSize = ComputeExplicitObjectSizeArgument(2); 1256 IsChkVariant = true; 1257 } else { 1258 DestinationSize = ComputeSizeArgument(0); 1259 } 1260 break; 1261 } 1262 } 1263 return; 1264 } 1265 case Builtin::BI__builtin___memcpy_chk: 1266 case Builtin::BI__builtin___memmove_chk: 1267 case Builtin::BI__builtin___memset_chk: 1268 case Builtin::BI__builtin___strlcat_chk: 1269 case Builtin::BI__builtin___strlcpy_chk: 1270 case Builtin::BI__builtin___strncat_chk: 1271 case Builtin::BI__builtin___strncpy_chk: 1272 case Builtin::BI__builtin___stpncpy_chk: 1273 case Builtin::BI__builtin___memccpy_chk: 1274 case Builtin::BI__builtin___mempcpy_chk: { 1275 DiagID = diag::warn_builtin_chk_overflow; 1276 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 2); 1277 DestinationSize = 1278 ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 1279 IsChkVariant = true; 1280 break; 1281 } 1282 1283 case Builtin::BI__builtin___snprintf_chk: 1284 case Builtin::BI__builtin___vsnprintf_chk: { 1285 DiagID = diag::warn_builtin_chk_overflow; 1286 SourceSize = ComputeExplicitObjectSizeArgument(1); 1287 DestinationSize = ComputeExplicitObjectSizeArgument(3); 1288 IsChkVariant = true; 1289 break; 1290 } 1291 1292 case Builtin::BIstrncat: 1293 case Builtin::BI__builtin_strncat: 1294 case Builtin::BIstrncpy: 1295 case Builtin::BI__builtin_strncpy: 1296 case Builtin::BIstpncpy: 1297 case Builtin::BI__builtin_stpncpy: { 1298 // Whether these functions overflow depends on the runtime strlen of the 1299 // string, not just the buffer size, so emitting the "always overflow" 1300 // diagnostic isn't quite right. We should still diagnose passing a buffer 1301 // size larger than the destination buffer though; this is a runtime abort 1302 // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise. 1303 DiagID = diag::warn_fortify_source_size_mismatch; 1304 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 1305 DestinationSize = ComputeSizeArgument(0); 1306 break; 1307 } 1308 1309 case Builtin::BImemcpy: 1310 case Builtin::BI__builtin_memcpy: 1311 case Builtin::BImemmove: 1312 case Builtin::BI__builtin_memmove: 1313 case Builtin::BImemset: 1314 case Builtin::BI__builtin_memset: 1315 case Builtin::BImempcpy: 1316 case Builtin::BI__builtin_mempcpy: { 1317 DiagID = diag::warn_fortify_source_overflow; 1318 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 1319 DestinationSize = ComputeSizeArgument(0); 1320 break; 1321 } 1322 case Builtin::BIsnprintf: 1323 case Builtin::BI__builtin_snprintf: 1324 case Builtin::BIvsnprintf: 1325 case Builtin::BI__builtin_vsnprintf: { 1326 DiagID = diag::warn_fortify_source_size_mismatch; 1327 SourceSize = ComputeExplicitObjectSizeArgument(1); 1328 DestinationSize = ComputeSizeArgument(0); 1329 break; 1330 } 1331 } 1332 1333 if (!SourceSize || !DestinationSize || 1334 llvm::APSInt::compareValues(*SourceSize, *DestinationSize) <= 0) 1335 return; 1336 1337 StringRef FunctionName = GetFunctionName(); 1338 1339 SmallString<16> DestinationStr; 1340 SmallString<16> SourceStr; 1341 DestinationSize->toString(DestinationStr, /*Radix=*/10); 1342 SourceSize->toString(SourceStr, /*Radix=*/10); 1343 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 1344 PDiag(DiagID) 1345 << FunctionName << DestinationStr << SourceStr); 1346 } 1347 1348 static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall, 1349 Scope::ScopeFlags NeededScopeFlags, 1350 unsigned DiagID) { 1351 // Scopes aren't available during instantiation. Fortunately, builtin 1352 // functions cannot be template args so they cannot be formed through template 1353 // instantiation. Therefore checking once during the parse is sufficient. 1354 if (SemaRef.inTemplateInstantiation()) 1355 return false; 1356 1357 Scope *S = SemaRef.getCurScope(); 1358 while (S && !S->isSEHExceptScope()) 1359 S = S->getParent(); 1360 if (!S || !(S->getFlags() & NeededScopeFlags)) { 1361 auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 1362 SemaRef.Diag(TheCall->getExprLoc(), DiagID) 1363 << DRE->getDecl()->getIdentifier(); 1364 return true; 1365 } 1366 1367 return false; 1368 } 1369 1370 static inline bool isBlockPointer(Expr *Arg) { 1371 return Arg->getType()->isBlockPointerType(); 1372 } 1373 1374 /// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local 1375 /// void*, which is a requirement of device side enqueue. 1376 static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) { 1377 const BlockPointerType *BPT = 1378 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 1379 ArrayRef<QualType> Params = 1380 BPT->getPointeeType()->castAs<FunctionProtoType>()->getParamTypes(); 1381 unsigned ArgCounter = 0; 1382 bool IllegalParams = false; 1383 // Iterate through the block parameters until either one is found that is not 1384 // a local void*, or the block is valid. 1385 for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end(); 1386 I != E; ++I, ++ArgCounter) { 1387 if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() || 1388 (*I)->getPointeeType().getQualifiers().getAddressSpace() != 1389 LangAS::opencl_local) { 1390 // Get the location of the error. If a block literal has been passed 1391 // (BlockExpr) then we can point straight to the offending argument, 1392 // else we just point to the variable reference. 1393 SourceLocation ErrorLoc; 1394 if (isa<BlockExpr>(BlockArg)) { 1395 BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl(); 1396 ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc(); 1397 } else if (isa<DeclRefExpr>(BlockArg)) { 1398 ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc(); 1399 } 1400 S.Diag(ErrorLoc, 1401 diag::err_opencl_enqueue_kernel_blocks_non_local_void_args); 1402 IllegalParams = true; 1403 } 1404 } 1405 1406 return IllegalParams; 1407 } 1408 1409 static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) { 1410 // OpenCL device can support extension but not the feature as extension 1411 // requires subgroup independent forward progress, but subgroup independent 1412 // forward progress is optional in OpenCL C 3.0 __opencl_c_subgroups feature. 1413 if (!S.getOpenCLOptions().isSupported("cl_khr_subgroups", S.getLangOpts()) && 1414 !S.getOpenCLOptions().isSupported("__opencl_c_subgroups", 1415 S.getLangOpts())) { 1416 S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension) 1417 << 1 << Call->getDirectCallee() 1418 << "cl_khr_subgroups or __opencl_c_subgroups"; 1419 return true; 1420 } 1421 return false; 1422 } 1423 1424 static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) { 1425 if (checkArgCount(S, TheCall, 2)) 1426 return true; 1427 1428 if (checkOpenCLSubgroupExt(S, TheCall)) 1429 return true; 1430 1431 // First argument is an ndrange_t type. 1432 Expr *NDRangeArg = TheCall->getArg(0); 1433 if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 1434 S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1435 << TheCall->getDirectCallee() << "'ndrange_t'"; 1436 return true; 1437 } 1438 1439 Expr *BlockArg = TheCall->getArg(1); 1440 if (!isBlockPointer(BlockArg)) { 1441 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1442 << TheCall->getDirectCallee() << "block"; 1443 return true; 1444 } 1445 return checkOpenCLBlockArgs(S, BlockArg); 1446 } 1447 1448 /// OpenCL C v2.0, s6.13.17.6 - Check the argument to the 1449 /// get_kernel_work_group_size 1450 /// and get_kernel_preferred_work_group_size_multiple builtin functions. 1451 static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) { 1452 if (checkArgCount(S, TheCall, 1)) 1453 return true; 1454 1455 Expr *BlockArg = TheCall->getArg(0); 1456 if (!isBlockPointer(BlockArg)) { 1457 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1458 << TheCall->getDirectCallee() << "block"; 1459 return true; 1460 } 1461 return checkOpenCLBlockArgs(S, BlockArg); 1462 } 1463 1464 /// Diagnose integer type and any valid implicit conversion to it. 1465 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, 1466 const QualType &IntType); 1467 1468 static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall, 1469 unsigned Start, unsigned End) { 1470 bool IllegalParams = false; 1471 for (unsigned I = Start; I <= End; ++I) 1472 IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I), 1473 S.Context.getSizeType()); 1474 return IllegalParams; 1475 } 1476 1477 /// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all 1478 /// 'local void*' parameter of passed block. 1479 static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall, 1480 Expr *BlockArg, 1481 unsigned NumNonVarArgs) { 1482 const BlockPointerType *BPT = 1483 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 1484 unsigned NumBlockParams = 1485 BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams(); 1486 unsigned TotalNumArgs = TheCall->getNumArgs(); 1487 1488 // For each argument passed to the block, a corresponding uint needs to 1489 // be passed to describe the size of the local memory. 1490 if (TotalNumArgs != NumBlockParams + NumNonVarArgs) { 1491 S.Diag(TheCall->getBeginLoc(), 1492 diag::err_opencl_enqueue_kernel_local_size_args); 1493 return true; 1494 } 1495 1496 // Check that the sizes of the local memory are specified by integers. 1497 return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs, 1498 TotalNumArgs - 1); 1499 } 1500 1501 /// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different 1502 /// overload formats specified in Table 6.13.17.1. 1503 /// int enqueue_kernel(queue_t queue, 1504 /// kernel_enqueue_flags_t flags, 1505 /// const ndrange_t ndrange, 1506 /// void (^block)(void)) 1507 /// int enqueue_kernel(queue_t queue, 1508 /// kernel_enqueue_flags_t flags, 1509 /// const ndrange_t ndrange, 1510 /// uint num_events_in_wait_list, 1511 /// clk_event_t *event_wait_list, 1512 /// clk_event_t *event_ret, 1513 /// void (^block)(void)) 1514 /// int enqueue_kernel(queue_t queue, 1515 /// kernel_enqueue_flags_t flags, 1516 /// const ndrange_t ndrange, 1517 /// void (^block)(local void*, ...), 1518 /// uint size0, ...) 1519 /// int enqueue_kernel(queue_t queue, 1520 /// kernel_enqueue_flags_t flags, 1521 /// const ndrange_t ndrange, 1522 /// uint num_events_in_wait_list, 1523 /// clk_event_t *event_wait_list, 1524 /// clk_event_t *event_ret, 1525 /// void (^block)(local void*, ...), 1526 /// uint size0, ...) 1527 static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) { 1528 unsigned NumArgs = TheCall->getNumArgs(); 1529 1530 if (NumArgs < 4) { 1531 S.Diag(TheCall->getBeginLoc(), 1532 diag::err_typecheck_call_too_few_args_at_least) 1533 << 0 << 4 << NumArgs; 1534 return true; 1535 } 1536 1537 Expr *Arg0 = TheCall->getArg(0); 1538 Expr *Arg1 = TheCall->getArg(1); 1539 Expr *Arg2 = TheCall->getArg(2); 1540 Expr *Arg3 = TheCall->getArg(3); 1541 1542 // First argument always needs to be a queue_t type. 1543 if (!Arg0->getType()->isQueueT()) { 1544 S.Diag(TheCall->getArg(0)->getBeginLoc(), 1545 diag::err_opencl_builtin_expected_type) 1546 << TheCall->getDirectCallee() << S.Context.OCLQueueTy; 1547 return true; 1548 } 1549 1550 // Second argument always needs to be a kernel_enqueue_flags_t enum value. 1551 if (!Arg1->getType()->isIntegerType()) { 1552 S.Diag(TheCall->getArg(1)->getBeginLoc(), 1553 diag::err_opencl_builtin_expected_type) 1554 << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)"; 1555 return true; 1556 } 1557 1558 // Third argument is always an ndrange_t type. 1559 if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 1560 S.Diag(TheCall->getArg(2)->getBeginLoc(), 1561 diag::err_opencl_builtin_expected_type) 1562 << TheCall->getDirectCallee() << "'ndrange_t'"; 1563 return true; 1564 } 1565 1566 // With four arguments, there is only one form that the function could be 1567 // called in: no events and no variable arguments. 1568 if (NumArgs == 4) { 1569 // check that the last argument is the right block type. 1570 if (!isBlockPointer(Arg3)) { 1571 S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1572 << TheCall->getDirectCallee() << "block"; 1573 return true; 1574 } 1575 // we have a block type, check the prototype 1576 const BlockPointerType *BPT = 1577 cast<BlockPointerType>(Arg3->getType().getCanonicalType()); 1578 if (BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams() > 0) { 1579 S.Diag(Arg3->getBeginLoc(), 1580 diag::err_opencl_enqueue_kernel_blocks_no_args); 1581 return true; 1582 } 1583 return false; 1584 } 1585 // we can have block + varargs. 1586 if (isBlockPointer(Arg3)) 1587 return (checkOpenCLBlockArgs(S, Arg3) || 1588 checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4)); 1589 // last two cases with either exactly 7 args or 7 args and varargs. 1590 if (NumArgs >= 7) { 1591 // check common block argument. 1592 Expr *Arg6 = TheCall->getArg(6); 1593 if (!isBlockPointer(Arg6)) { 1594 S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1595 << TheCall->getDirectCallee() << "block"; 1596 return true; 1597 } 1598 if (checkOpenCLBlockArgs(S, Arg6)) 1599 return true; 1600 1601 // Forth argument has to be any integer type. 1602 if (!Arg3->getType()->isIntegerType()) { 1603 S.Diag(TheCall->getArg(3)->getBeginLoc(), 1604 diag::err_opencl_builtin_expected_type) 1605 << TheCall->getDirectCallee() << "integer"; 1606 return true; 1607 } 1608 // check remaining common arguments. 1609 Expr *Arg4 = TheCall->getArg(4); 1610 Expr *Arg5 = TheCall->getArg(5); 1611 1612 // Fifth argument is always passed as a pointer to clk_event_t. 1613 if (!Arg4->isNullPointerConstant(S.Context, 1614 Expr::NPC_ValueDependentIsNotNull) && 1615 !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) { 1616 S.Diag(TheCall->getArg(4)->getBeginLoc(), 1617 diag::err_opencl_builtin_expected_type) 1618 << TheCall->getDirectCallee() 1619 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1620 return true; 1621 } 1622 1623 // Sixth argument is always passed as a pointer to clk_event_t. 1624 if (!Arg5->isNullPointerConstant(S.Context, 1625 Expr::NPC_ValueDependentIsNotNull) && 1626 !(Arg5->getType()->isPointerType() && 1627 Arg5->getType()->getPointeeType()->isClkEventT())) { 1628 S.Diag(TheCall->getArg(5)->getBeginLoc(), 1629 diag::err_opencl_builtin_expected_type) 1630 << TheCall->getDirectCallee() 1631 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1632 return true; 1633 } 1634 1635 if (NumArgs == 7) 1636 return false; 1637 1638 return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7); 1639 } 1640 1641 // None of the specific case has been detected, give generic error 1642 S.Diag(TheCall->getBeginLoc(), 1643 diag::err_opencl_enqueue_kernel_incorrect_args); 1644 return true; 1645 } 1646 1647 /// Returns OpenCL access qual. 1648 static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) { 1649 return D->getAttr<OpenCLAccessAttr>(); 1650 } 1651 1652 /// Returns true if pipe element type is different from the pointer. 1653 static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) { 1654 const Expr *Arg0 = Call->getArg(0); 1655 // First argument type should always be pipe. 1656 if (!Arg0->getType()->isPipeType()) { 1657 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1658 << Call->getDirectCallee() << Arg0->getSourceRange(); 1659 return true; 1660 } 1661 OpenCLAccessAttr *AccessQual = 1662 getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl()); 1663 // Validates the access qualifier is compatible with the call. 1664 // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be 1665 // read_only and write_only, and assumed to be read_only if no qualifier is 1666 // specified. 1667 switch (Call->getDirectCallee()->getBuiltinID()) { 1668 case Builtin::BIread_pipe: 1669 case Builtin::BIreserve_read_pipe: 1670 case Builtin::BIcommit_read_pipe: 1671 case Builtin::BIwork_group_reserve_read_pipe: 1672 case Builtin::BIsub_group_reserve_read_pipe: 1673 case Builtin::BIwork_group_commit_read_pipe: 1674 case Builtin::BIsub_group_commit_read_pipe: 1675 if (!(!AccessQual || AccessQual->isReadOnly())) { 1676 S.Diag(Arg0->getBeginLoc(), 1677 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1678 << "read_only" << Arg0->getSourceRange(); 1679 return true; 1680 } 1681 break; 1682 case Builtin::BIwrite_pipe: 1683 case Builtin::BIreserve_write_pipe: 1684 case Builtin::BIcommit_write_pipe: 1685 case Builtin::BIwork_group_reserve_write_pipe: 1686 case Builtin::BIsub_group_reserve_write_pipe: 1687 case Builtin::BIwork_group_commit_write_pipe: 1688 case Builtin::BIsub_group_commit_write_pipe: 1689 if (!(AccessQual && AccessQual->isWriteOnly())) { 1690 S.Diag(Arg0->getBeginLoc(), 1691 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1692 << "write_only" << Arg0->getSourceRange(); 1693 return true; 1694 } 1695 break; 1696 default: 1697 break; 1698 } 1699 return false; 1700 } 1701 1702 /// Returns true if pipe element type is different from the pointer. 1703 static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) { 1704 const Expr *Arg0 = Call->getArg(0); 1705 const Expr *ArgIdx = Call->getArg(Idx); 1706 const PipeType *PipeTy = cast<PipeType>(Arg0->getType()); 1707 const QualType EltTy = PipeTy->getElementType(); 1708 const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>(); 1709 // The Idx argument should be a pointer and the type of the pointer and 1710 // the type of pipe element should also be the same. 1711 if (!ArgTy || 1712 !S.Context.hasSameType( 1713 EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) { 1714 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1715 << Call->getDirectCallee() << S.Context.getPointerType(EltTy) 1716 << ArgIdx->getType() << ArgIdx->getSourceRange(); 1717 return true; 1718 } 1719 return false; 1720 } 1721 1722 // Performs semantic analysis for the read/write_pipe call. 1723 // \param S Reference to the semantic analyzer. 1724 // \param Call A pointer to the builtin call. 1725 // \return True if a semantic error has been found, false otherwise. 1726 static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) { 1727 // OpenCL v2.0 s6.13.16.2 - The built-in read/write 1728 // functions have two forms. 1729 switch (Call->getNumArgs()) { 1730 case 2: 1731 if (checkOpenCLPipeArg(S, Call)) 1732 return true; 1733 // The call with 2 arguments should be 1734 // read/write_pipe(pipe T, T*). 1735 // Check packet type T. 1736 if (checkOpenCLPipePacketType(S, Call, 1)) 1737 return true; 1738 break; 1739 1740 case 4: { 1741 if (checkOpenCLPipeArg(S, Call)) 1742 return true; 1743 // The call with 4 arguments should be 1744 // read/write_pipe(pipe T, reserve_id_t, uint, T*). 1745 // Check reserve_id_t. 1746 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1747 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1748 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1749 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1750 return true; 1751 } 1752 1753 // Check the index. 1754 const Expr *Arg2 = Call->getArg(2); 1755 if (!Arg2->getType()->isIntegerType() && 1756 !Arg2->getType()->isUnsignedIntegerType()) { 1757 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1758 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1759 << Arg2->getType() << Arg2->getSourceRange(); 1760 return true; 1761 } 1762 1763 // Check packet type T. 1764 if (checkOpenCLPipePacketType(S, Call, 3)) 1765 return true; 1766 } break; 1767 default: 1768 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num) 1769 << Call->getDirectCallee() << Call->getSourceRange(); 1770 return true; 1771 } 1772 1773 return false; 1774 } 1775 1776 // Performs a semantic analysis on the {work_group_/sub_group_ 1777 // /_}reserve_{read/write}_pipe 1778 // \param S Reference to the semantic analyzer. 1779 // \param Call The call to the builtin function to be analyzed. 1780 // \return True if a semantic error was found, false otherwise. 1781 static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) { 1782 if (checkArgCount(S, Call, 2)) 1783 return true; 1784 1785 if (checkOpenCLPipeArg(S, Call)) 1786 return true; 1787 1788 // Check the reserve size. 1789 if (!Call->getArg(1)->getType()->isIntegerType() && 1790 !Call->getArg(1)->getType()->isUnsignedIntegerType()) { 1791 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1792 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1793 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1794 return true; 1795 } 1796 1797 // Since return type of reserve_read/write_pipe built-in function is 1798 // reserve_id_t, which is not defined in the builtin def file , we used int 1799 // as return type and need to override the return type of these functions. 1800 Call->setType(S.Context.OCLReserveIDTy); 1801 1802 return false; 1803 } 1804 1805 // Performs a semantic analysis on {work_group_/sub_group_ 1806 // /_}commit_{read/write}_pipe 1807 // \param S Reference to the semantic analyzer. 1808 // \param Call The call to the builtin function to be analyzed. 1809 // \return True if a semantic error was found, false otherwise. 1810 static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) { 1811 if (checkArgCount(S, Call, 2)) 1812 return true; 1813 1814 if (checkOpenCLPipeArg(S, Call)) 1815 return true; 1816 1817 // Check reserve_id_t. 1818 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1819 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1820 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1821 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1822 return true; 1823 } 1824 1825 return false; 1826 } 1827 1828 // Performs a semantic analysis on the call to built-in Pipe 1829 // Query Functions. 1830 // \param S Reference to the semantic analyzer. 1831 // \param Call The call to the builtin function to be analyzed. 1832 // \return True if a semantic error was found, false otherwise. 1833 static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) { 1834 if (checkArgCount(S, Call, 1)) 1835 return true; 1836 1837 if (!Call->getArg(0)->getType()->isPipeType()) { 1838 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1839 << Call->getDirectCallee() << Call->getArg(0)->getSourceRange(); 1840 return true; 1841 } 1842 1843 return false; 1844 } 1845 1846 // OpenCL v2.0 s6.13.9 - Address space qualifier functions. 1847 // Performs semantic analysis for the to_global/local/private call. 1848 // \param S Reference to the semantic analyzer. 1849 // \param BuiltinID ID of the builtin function. 1850 // \param Call A pointer to the builtin call. 1851 // \return True if a semantic error has been found, false otherwise. 1852 static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID, 1853 CallExpr *Call) { 1854 if (checkArgCount(S, Call, 1)) 1855 return true; 1856 1857 auto RT = Call->getArg(0)->getType(); 1858 if (!RT->isPointerType() || RT->getPointeeType() 1859 .getAddressSpace() == LangAS::opencl_constant) { 1860 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg) 1861 << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange(); 1862 return true; 1863 } 1864 1865 if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) { 1866 S.Diag(Call->getArg(0)->getBeginLoc(), 1867 diag::warn_opencl_generic_address_space_arg) 1868 << Call->getDirectCallee()->getNameInfo().getAsString() 1869 << Call->getArg(0)->getSourceRange(); 1870 } 1871 1872 RT = RT->getPointeeType(); 1873 auto Qual = RT.getQualifiers(); 1874 switch (BuiltinID) { 1875 case Builtin::BIto_global: 1876 Qual.setAddressSpace(LangAS::opencl_global); 1877 break; 1878 case Builtin::BIto_local: 1879 Qual.setAddressSpace(LangAS::opencl_local); 1880 break; 1881 case Builtin::BIto_private: 1882 Qual.setAddressSpace(LangAS::opencl_private); 1883 break; 1884 default: 1885 llvm_unreachable("Invalid builtin function"); 1886 } 1887 Call->setType(S.Context.getPointerType(S.Context.getQualifiedType( 1888 RT.getUnqualifiedType(), Qual))); 1889 1890 return false; 1891 } 1892 1893 static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) { 1894 if (checkArgCount(S, TheCall, 1)) 1895 return ExprError(); 1896 1897 // Compute __builtin_launder's parameter type from the argument. 1898 // The parameter type is: 1899 // * The type of the argument if it's not an array or function type, 1900 // Otherwise, 1901 // * The decayed argument type. 1902 QualType ParamTy = [&]() { 1903 QualType ArgTy = TheCall->getArg(0)->getType(); 1904 if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe()) 1905 return S.Context.getPointerType(Ty->getElementType()); 1906 if (ArgTy->isFunctionType()) { 1907 return S.Context.getPointerType(ArgTy); 1908 } 1909 return ArgTy; 1910 }(); 1911 1912 TheCall->setType(ParamTy); 1913 1914 auto DiagSelect = [&]() -> std::optional<unsigned> { 1915 if (!ParamTy->isPointerType()) 1916 return 0; 1917 if (ParamTy->isFunctionPointerType()) 1918 return 1; 1919 if (ParamTy->isVoidPointerType()) 1920 return 2; 1921 return std::optional<unsigned>{}; 1922 }(); 1923 if (DiagSelect) { 1924 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg) 1925 << *DiagSelect << TheCall->getSourceRange(); 1926 return ExprError(); 1927 } 1928 1929 // We either have an incomplete class type, or we have a class template 1930 // whose instantiation has not been forced. Example: 1931 // 1932 // template <class T> struct Foo { T value; }; 1933 // Foo<int> *p = nullptr; 1934 // auto *d = __builtin_launder(p); 1935 if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(), 1936 diag::err_incomplete_type)) 1937 return ExprError(); 1938 1939 assert(ParamTy->getPointeeType()->isObjectType() && 1940 "Unhandled non-object pointer case"); 1941 1942 InitializedEntity Entity = 1943 InitializedEntity::InitializeParameter(S.Context, ParamTy, false); 1944 ExprResult Arg = 1945 S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0)); 1946 if (Arg.isInvalid()) 1947 return ExprError(); 1948 TheCall->setArg(0, Arg.get()); 1949 1950 return TheCall; 1951 } 1952 1953 // Emit an error and return true if the current object format type is in the 1954 // list of unsupported types. 1955 static bool CheckBuiltinTargetNotInUnsupported( 1956 Sema &S, unsigned BuiltinID, CallExpr *TheCall, 1957 ArrayRef<llvm::Triple::ObjectFormatType> UnsupportedObjectFormatTypes) { 1958 llvm::Triple::ObjectFormatType CurObjFormat = 1959 S.getASTContext().getTargetInfo().getTriple().getObjectFormat(); 1960 if (llvm::is_contained(UnsupportedObjectFormatTypes, CurObjFormat)) { 1961 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 1962 << TheCall->getSourceRange(); 1963 return true; 1964 } 1965 return false; 1966 } 1967 1968 // Emit an error and return true if the current architecture is not in the list 1969 // of supported architectures. 1970 static bool 1971 CheckBuiltinTargetInSupported(Sema &S, unsigned BuiltinID, CallExpr *TheCall, 1972 ArrayRef<llvm::Triple::ArchType> SupportedArchs) { 1973 llvm::Triple::ArchType CurArch = 1974 S.getASTContext().getTargetInfo().getTriple().getArch(); 1975 if (llvm::is_contained(SupportedArchs, CurArch)) 1976 return false; 1977 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 1978 << TheCall->getSourceRange(); 1979 return true; 1980 } 1981 1982 static void CheckNonNullArgument(Sema &S, const Expr *ArgExpr, 1983 SourceLocation CallSiteLoc); 1984 1985 bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 1986 CallExpr *TheCall) { 1987 switch (TI.getTriple().getArch()) { 1988 default: 1989 // Some builtins don't require additional checking, so just consider these 1990 // acceptable. 1991 return false; 1992 case llvm::Triple::arm: 1993 case llvm::Triple::armeb: 1994 case llvm::Triple::thumb: 1995 case llvm::Triple::thumbeb: 1996 return CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall); 1997 case llvm::Triple::aarch64: 1998 case llvm::Triple::aarch64_32: 1999 case llvm::Triple::aarch64_be: 2000 return CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall); 2001 case llvm::Triple::bpfeb: 2002 case llvm::Triple::bpfel: 2003 return CheckBPFBuiltinFunctionCall(BuiltinID, TheCall); 2004 case llvm::Triple::hexagon: 2005 return CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall); 2006 case llvm::Triple::mips: 2007 case llvm::Triple::mipsel: 2008 case llvm::Triple::mips64: 2009 case llvm::Triple::mips64el: 2010 return CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall); 2011 case llvm::Triple::systemz: 2012 return CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall); 2013 case llvm::Triple::x86: 2014 case llvm::Triple::x86_64: 2015 return CheckX86BuiltinFunctionCall(TI, BuiltinID, TheCall); 2016 case llvm::Triple::ppc: 2017 case llvm::Triple::ppcle: 2018 case llvm::Triple::ppc64: 2019 case llvm::Triple::ppc64le: 2020 return CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall); 2021 case llvm::Triple::amdgcn: 2022 return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall); 2023 case llvm::Triple::riscv32: 2024 case llvm::Triple::riscv64: 2025 return CheckRISCVBuiltinFunctionCall(TI, BuiltinID, TheCall); 2026 case llvm::Triple::loongarch32: 2027 case llvm::Triple::loongarch64: 2028 return CheckLoongArchBuiltinFunctionCall(TI, BuiltinID, TheCall); 2029 case llvm::Triple::wasm32: 2030 case llvm::Triple::wasm64: 2031 return CheckWebAssemblyBuiltinFunctionCall(TI, BuiltinID, TheCall); 2032 case llvm::Triple::nvptx: 2033 case llvm::Triple::nvptx64: 2034 return CheckNVPTXBuiltinFunctionCall(TI, BuiltinID, TheCall); 2035 } 2036 } 2037 2038 // Check if \p Ty is a valid type for the elementwise math builtins. If it is 2039 // not a valid type, emit an error message and return true. Otherwise return 2040 // false. 2041 static bool checkMathBuiltinElementType(Sema &S, SourceLocation Loc, 2042 QualType Ty) { 2043 if (!Ty->getAs<VectorType>() && !ConstantMatrixType::isValidElementType(Ty)) { 2044 return S.Diag(Loc, diag::err_builtin_invalid_arg_type) 2045 << 1 << /* vector, integer or float ty*/ 0 << Ty; 2046 } 2047 2048 return false; 2049 } 2050 2051 static bool checkFPMathBuiltinElementType(Sema &S, SourceLocation Loc, 2052 QualType ArgTy, int ArgIndex) { 2053 QualType EltTy = ArgTy; 2054 if (auto *VecTy = EltTy->getAs<VectorType>()) 2055 EltTy = VecTy->getElementType(); 2056 2057 if (!EltTy->isRealFloatingType()) { 2058 return S.Diag(Loc, diag::err_builtin_invalid_arg_type) 2059 << ArgIndex << /* vector or float ty*/ 5 << ArgTy; 2060 } 2061 2062 return false; 2063 } 2064 2065 ExprResult 2066 Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, 2067 CallExpr *TheCall) { 2068 ExprResult TheCallResult(TheCall); 2069 2070 // Find out if any arguments are required to be integer constant expressions. 2071 unsigned ICEArguments = 0; 2072 ASTContext::GetBuiltinTypeError Error; 2073 Context.GetBuiltinType(BuiltinID, Error, &ICEArguments); 2074 if (Error != ASTContext::GE_None) 2075 ICEArguments = 0; // Don't diagnose previously diagnosed errors. 2076 2077 // If any arguments are required to be ICE's, check and diagnose. 2078 for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) { 2079 // Skip arguments not required to be ICE's. 2080 if ((ICEArguments & (1 << ArgNo)) == 0) continue; 2081 2082 llvm::APSInt Result; 2083 // If we don't have enough arguments, continue so we can issue better 2084 // diagnostic in checkArgCount(...) 2085 if (ArgNo < TheCall->getNumArgs() && 2086 SemaBuiltinConstantArg(TheCall, ArgNo, Result)) 2087 return true; 2088 ICEArguments &= ~(1 << ArgNo); 2089 } 2090 2091 switch (BuiltinID) { 2092 case Builtin::BI__builtin___CFStringMakeConstantString: 2093 // CFStringMakeConstantString is currently not implemented for GOFF (i.e., 2094 // on z/OS) and for XCOFF (i.e., on AIX). Emit unsupported 2095 if (CheckBuiltinTargetNotInUnsupported( 2096 *this, BuiltinID, TheCall, 2097 {llvm::Triple::GOFF, llvm::Triple::XCOFF})) 2098 return ExprError(); 2099 assert(TheCall->getNumArgs() == 1 && 2100 "Wrong # arguments to builtin CFStringMakeConstantString"); 2101 if (CheckObjCString(TheCall->getArg(0))) 2102 return ExprError(); 2103 break; 2104 case Builtin::BI__builtin_ms_va_start: 2105 case Builtin::BI__builtin_stdarg_start: 2106 case Builtin::BI__builtin_va_start: 2107 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 2108 return ExprError(); 2109 break; 2110 case Builtin::BI__va_start: { 2111 switch (Context.getTargetInfo().getTriple().getArch()) { 2112 case llvm::Triple::aarch64: 2113 case llvm::Triple::arm: 2114 case llvm::Triple::thumb: 2115 if (SemaBuiltinVAStartARMMicrosoft(TheCall)) 2116 return ExprError(); 2117 break; 2118 default: 2119 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 2120 return ExprError(); 2121 break; 2122 } 2123 break; 2124 } 2125 2126 // The acquire, release, and no fence variants are ARM and AArch64 only. 2127 case Builtin::BI_interlockedbittestandset_acq: 2128 case Builtin::BI_interlockedbittestandset_rel: 2129 case Builtin::BI_interlockedbittestandset_nf: 2130 case Builtin::BI_interlockedbittestandreset_acq: 2131 case Builtin::BI_interlockedbittestandreset_rel: 2132 case Builtin::BI_interlockedbittestandreset_nf: 2133 if (CheckBuiltinTargetInSupported( 2134 *this, BuiltinID, TheCall, 2135 {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64})) 2136 return ExprError(); 2137 break; 2138 2139 // The 64-bit bittest variants are x64, ARM, and AArch64 only. 2140 case Builtin::BI_bittest64: 2141 case Builtin::BI_bittestandcomplement64: 2142 case Builtin::BI_bittestandreset64: 2143 case Builtin::BI_bittestandset64: 2144 case Builtin::BI_interlockedbittestandreset64: 2145 case Builtin::BI_interlockedbittestandset64: 2146 if (CheckBuiltinTargetInSupported(*this, BuiltinID, TheCall, 2147 {llvm::Triple::x86_64, llvm::Triple::arm, 2148 llvm::Triple::thumb, 2149 llvm::Triple::aarch64})) 2150 return ExprError(); 2151 break; 2152 2153 case Builtin::BI__builtin_set_flt_rounds: 2154 if (CheckBuiltinTargetInSupported(*this, BuiltinID, TheCall, 2155 {llvm::Triple::x86, llvm::Triple::x86_64, 2156 llvm::Triple::arm, llvm::Triple::thumb, 2157 llvm::Triple::aarch64})) 2158 return ExprError(); 2159 break; 2160 2161 case Builtin::BI__builtin_isgreater: 2162 case Builtin::BI__builtin_isgreaterequal: 2163 case Builtin::BI__builtin_isless: 2164 case Builtin::BI__builtin_islessequal: 2165 case Builtin::BI__builtin_islessgreater: 2166 case Builtin::BI__builtin_isunordered: 2167 if (SemaBuiltinUnorderedCompare(TheCall)) 2168 return ExprError(); 2169 break; 2170 case Builtin::BI__builtin_fpclassify: 2171 if (SemaBuiltinFPClassification(TheCall, 6)) 2172 return ExprError(); 2173 break; 2174 case Builtin::BI__builtin_isfpclass: 2175 if (SemaBuiltinFPClassification(TheCall, 2)) 2176 return ExprError(); 2177 break; 2178 case Builtin::BI__builtin_isfinite: 2179 case Builtin::BI__builtin_isinf: 2180 case Builtin::BI__builtin_isinf_sign: 2181 case Builtin::BI__builtin_isnan: 2182 case Builtin::BI__builtin_isnormal: 2183 case Builtin::BI__builtin_signbit: 2184 case Builtin::BI__builtin_signbitf: 2185 case Builtin::BI__builtin_signbitl: 2186 if (SemaBuiltinFPClassification(TheCall, 1)) 2187 return ExprError(); 2188 break; 2189 case Builtin::BI__builtin_shufflevector: 2190 return SemaBuiltinShuffleVector(TheCall); 2191 // TheCall will be freed by the smart pointer here, but that's fine, since 2192 // SemaBuiltinShuffleVector guts it, but then doesn't release it. 2193 case Builtin::BI__builtin_prefetch: 2194 if (SemaBuiltinPrefetch(TheCall)) 2195 return ExprError(); 2196 break; 2197 case Builtin::BI__builtin_alloca_with_align: 2198 case Builtin::BI__builtin_alloca_with_align_uninitialized: 2199 if (SemaBuiltinAllocaWithAlign(TheCall)) 2200 return ExprError(); 2201 [[fallthrough]]; 2202 case Builtin::BI__builtin_alloca: 2203 case Builtin::BI__builtin_alloca_uninitialized: 2204 Diag(TheCall->getBeginLoc(), diag::warn_alloca) 2205 << TheCall->getDirectCallee(); 2206 break; 2207 case Builtin::BI__arithmetic_fence: 2208 if (SemaBuiltinArithmeticFence(TheCall)) 2209 return ExprError(); 2210 break; 2211 case Builtin::BI__assume: 2212 case Builtin::BI__builtin_assume: 2213 if (SemaBuiltinAssume(TheCall)) 2214 return ExprError(); 2215 break; 2216 case Builtin::BI__builtin_assume_aligned: 2217 if (SemaBuiltinAssumeAligned(TheCall)) 2218 return ExprError(); 2219 break; 2220 case Builtin::BI__builtin_dynamic_object_size: 2221 case Builtin::BI__builtin_object_size: 2222 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3)) 2223 return ExprError(); 2224 break; 2225 case Builtin::BI__builtin_longjmp: 2226 if (SemaBuiltinLongjmp(TheCall)) 2227 return ExprError(); 2228 break; 2229 case Builtin::BI__builtin_setjmp: 2230 if (SemaBuiltinSetjmp(TheCall)) 2231 return ExprError(); 2232 break; 2233 case Builtin::BI__builtin_classify_type: 2234 if (checkArgCount(*this, TheCall, 1)) return true; 2235 TheCall->setType(Context.IntTy); 2236 break; 2237 case Builtin::BI__builtin_complex: 2238 if (SemaBuiltinComplex(TheCall)) 2239 return ExprError(); 2240 break; 2241 case Builtin::BI__builtin_constant_p: { 2242 if (checkArgCount(*this, TheCall, 1)) return true; 2243 ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 2244 if (Arg.isInvalid()) return true; 2245 TheCall->setArg(0, Arg.get()); 2246 TheCall->setType(Context.IntTy); 2247 break; 2248 } 2249 case Builtin::BI__builtin_launder: 2250 return SemaBuiltinLaunder(*this, TheCall); 2251 case Builtin::BI__sync_fetch_and_add: 2252 case Builtin::BI__sync_fetch_and_add_1: 2253 case Builtin::BI__sync_fetch_and_add_2: 2254 case Builtin::BI__sync_fetch_and_add_4: 2255 case Builtin::BI__sync_fetch_and_add_8: 2256 case Builtin::BI__sync_fetch_and_add_16: 2257 case Builtin::BI__sync_fetch_and_sub: 2258 case Builtin::BI__sync_fetch_and_sub_1: 2259 case Builtin::BI__sync_fetch_and_sub_2: 2260 case Builtin::BI__sync_fetch_and_sub_4: 2261 case Builtin::BI__sync_fetch_and_sub_8: 2262 case Builtin::BI__sync_fetch_and_sub_16: 2263 case Builtin::BI__sync_fetch_and_or: 2264 case Builtin::BI__sync_fetch_and_or_1: 2265 case Builtin::BI__sync_fetch_and_or_2: 2266 case Builtin::BI__sync_fetch_and_or_4: 2267 case Builtin::BI__sync_fetch_and_or_8: 2268 case Builtin::BI__sync_fetch_and_or_16: 2269 case Builtin::BI__sync_fetch_and_and: 2270 case Builtin::BI__sync_fetch_and_and_1: 2271 case Builtin::BI__sync_fetch_and_and_2: 2272 case Builtin::BI__sync_fetch_and_and_4: 2273 case Builtin::BI__sync_fetch_and_and_8: 2274 case Builtin::BI__sync_fetch_and_and_16: 2275 case Builtin::BI__sync_fetch_and_xor: 2276 case Builtin::BI__sync_fetch_and_xor_1: 2277 case Builtin::BI__sync_fetch_and_xor_2: 2278 case Builtin::BI__sync_fetch_and_xor_4: 2279 case Builtin::BI__sync_fetch_and_xor_8: 2280 case Builtin::BI__sync_fetch_and_xor_16: 2281 case Builtin::BI__sync_fetch_and_nand: 2282 case Builtin::BI__sync_fetch_and_nand_1: 2283 case Builtin::BI__sync_fetch_and_nand_2: 2284 case Builtin::BI__sync_fetch_and_nand_4: 2285 case Builtin::BI__sync_fetch_and_nand_8: 2286 case Builtin::BI__sync_fetch_and_nand_16: 2287 case Builtin::BI__sync_add_and_fetch: 2288 case Builtin::BI__sync_add_and_fetch_1: 2289 case Builtin::BI__sync_add_and_fetch_2: 2290 case Builtin::BI__sync_add_and_fetch_4: 2291 case Builtin::BI__sync_add_and_fetch_8: 2292 case Builtin::BI__sync_add_and_fetch_16: 2293 case Builtin::BI__sync_sub_and_fetch: 2294 case Builtin::BI__sync_sub_and_fetch_1: 2295 case Builtin::BI__sync_sub_and_fetch_2: 2296 case Builtin::BI__sync_sub_and_fetch_4: 2297 case Builtin::BI__sync_sub_and_fetch_8: 2298 case Builtin::BI__sync_sub_and_fetch_16: 2299 case Builtin::BI__sync_and_and_fetch: 2300 case Builtin::BI__sync_and_and_fetch_1: 2301 case Builtin::BI__sync_and_and_fetch_2: 2302 case Builtin::BI__sync_and_and_fetch_4: 2303 case Builtin::BI__sync_and_and_fetch_8: 2304 case Builtin::BI__sync_and_and_fetch_16: 2305 case Builtin::BI__sync_or_and_fetch: 2306 case Builtin::BI__sync_or_and_fetch_1: 2307 case Builtin::BI__sync_or_and_fetch_2: 2308 case Builtin::BI__sync_or_and_fetch_4: 2309 case Builtin::BI__sync_or_and_fetch_8: 2310 case Builtin::BI__sync_or_and_fetch_16: 2311 case Builtin::BI__sync_xor_and_fetch: 2312 case Builtin::BI__sync_xor_and_fetch_1: 2313 case Builtin::BI__sync_xor_and_fetch_2: 2314 case Builtin::BI__sync_xor_and_fetch_4: 2315 case Builtin::BI__sync_xor_and_fetch_8: 2316 case Builtin::BI__sync_xor_and_fetch_16: 2317 case Builtin::BI__sync_nand_and_fetch: 2318 case Builtin::BI__sync_nand_and_fetch_1: 2319 case Builtin::BI__sync_nand_and_fetch_2: 2320 case Builtin::BI__sync_nand_and_fetch_4: 2321 case Builtin::BI__sync_nand_and_fetch_8: 2322 case Builtin::BI__sync_nand_and_fetch_16: 2323 case Builtin::BI__sync_val_compare_and_swap: 2324 case Builtin::BI__sync_val_compare_and_swap_1: 2325 case Builtin::BI__sync_val_compare_and_swap_2: 2326 case Builtin::BI__sync_val_compare_and_swap_4: 2327 case Builtin::BI__sync_val_compare_and_swap_8: 2328 case Builtin::BI__sync_val_compare_and_swap_16: 2329 case Builtin::BI__sync_bool_compare_and_swap: 2330 case Builtin::BI__sync_bool_compare_and_swap_1: 2331 case Builtin::BI__sync_bool_compare_and_swap_2: 2332 case Builtin::BI__sync_bool_compare_and_swap_4: 2333 case Builtin::BI__sync_bool_compare_and_swap_8: 2334 case Builtin::BI__sync_bool_compare_and_swap_16: 2335 case Builtin::BI__sync_lock_test_and_set: 2336 case Builtin::BI__sync_lock_test_and_set_1: 2337 case Builtin::BI__sync_lock_test_and_set_2: 2338 case Builtin::BI__sync_lock_test_and_set_4: 2339 case Builtin::BI__sync_lock_test_and_set_8: 2340 case Builtin::BI__sync_lock_test_and_set_16: 2341 case Builtin::BI__sync_lock_release: 2342 case Builtin::BI__sync_lock_release_1: 2343 case Builtin::BI__sync_lock_release_2: 2344 case Builtin::BI__sync_lock_release_4: 2345 case Builtin::BI__sync_lock_release_8: 2346 case Builtin::BI__sync_lock_release_16: 2347 case Builtin::BI__sync_swap: 2348 case Builtin::BI__sync_swap_1: 2349 case Builtin::BI__sync_swap_2: 2350 case Builtin::BI__sync_swap_4: 2351 case Builtin::BI__sync_swap_8: 2352 case Builtin::BI__sync_swap_16: 2353 return SemaBuiltinAtomicOverloaded(TheCallResult); 2354 case Builtin::BI__sync_synchronize: 2355 Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst) 2356 << TheCall->getCallee()->getSourceRange(); 2357 break; 2358 case Builtin::BI__builtin_nontemporal_load: 2359 case Builtin::BI__builtin_nontemporal_store: 2360 return SemaBuiltinNontemporalOverloaded(TheCallResult); 2361 case Builtin::BI__builtin_memcpy_inline: { 2362 clang::Expr *SizeOp = TheCall->getArg(2); 2363 // We warn about copying to or from `nullptr` pointers when `size` is 2364 // greater than 0. When `size` is value dependent we cannot evaluate its 2365 // value so we bail out. 2366 if (SizeOp->isValueDependent()) 2367 break; 2368 if (!SizeOp->EvaluateKnownConstInt(Context).isZero()) { 2369 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc()); 2370 CheckNonNullArgument(*this, TheCall->getArg(1), TheCall->getExprLoc()); 2371 } 2372 break; 2373 } 2374 case Builtin::BI__builtin_memset_inline: { 2375 clang::Expr *SizeOp = TheCall->getArg(2); 2376 // We warn about filling to `nullptr` pointers when `size` is greater than 2377 // 0. When `size` is value dependent we cannot evaluate its value so we bail 2378 // out. 2379 if (SizeOp->isValueDependent()) 2380 break; 2381 if (!SizeOp->EvaluateKnownConstInt(Context).isZero()) 2382 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc()); 2383 break; 2384 } 2385 #define BUILTIN(ID, TYPE, ATTRS) 2386 #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \ 2387 case Builtin::BI##ID: \ 2388 return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID); 2389 #include "clang/Basic/Builtins.def" 2390 case Builtin::BI__annotation: 2391 if (SemaBuiltinMSVCAnnotation(*this, TheCall)) 2392 return ExprError(); 2393 break; 2394 case Builtin::BI__builtin_annotation: 2395 if (SemaBuiltinAnnotation(*this, TheCall)) 2396 return ExprError(); 2397 break; 2398 case Builtin::BI__builtin_addressof: 2399 if (SemaBuiltinAddressof(*this, TheCall)) 2400 return ExprError(); 2401 break; 2402 case Builtin::BI__builtin_function_start: 2403 if (SemaBuiltinFunctionStart(*this, TheCall)) 2404 return ExprError(); 2405 break; 2406 case Builtin::BI__builtin_is_aligned: 2407 case Builtin::BI__builtin_align_up: 2408 case Builtin::BI__builtin_align_down: 2409 if (SemaBuiltinAlignment(*this, TheCall, BuiltinID)) 2410 return ExprError(); 2411 break; 2412 case Builtin::BI__builtin_add_overflow: 2413 case Builtin::BI__builtin_sub_overflow: 2414 case Builtin::BI__builtin_mul_overflow: 2415 if (SemaBuiltinOverflow(*this, TheCall, BuiltinID)) 2416 return ExprError(); 2417 break; 2418 case Builtin::BI__builtin_operator_new: 2419 case Builtin::BI__builtin_operator_delete: { 2420 bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete; 2421 ExprResult Res = 2422 SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete); 2423 if (Res.isInvalid()) 2424 CorrectDelayedTyposInExpr(TheCallResult.get()); 2425 return Res; 2426 } 2427 case Builtin::BI__builtin_dump_struct: 2428 return SemaBuiltinDumpStruct(*this, TheCall); 2429 case Builtin::BI__builtin_expect_with_probability: { 2430 // We first want to ensure we are called with 3 arguments 2431 if (checkArgCount(*this, TheCall, 3)) 2432 return ExprError(); 2433 // then check probability is constant float in range [0.0, 1.0] 2434 const Expr *ProbArg = TheCall->getArg(2); 2435 SmallVector<PartialDiagnosticAt, 8> Notes; 2436 Expr::EvalResult Eval; 2437 Eval.Diag = &Notes; 2438 if ((!ProbArg->EvaluateAsConstantExpr(Eval, Context)) || 2439 !Eval.Val.isFloat()) { 2440 Diag(ProbArg->getBeginLoc(), diag::err_probability_not_constant_float) 2441 << ProbArg->getSourceRange(); 2442 for (const PartialDiagnosticAt &PDiag : Notes) 2443 Diag(PDiag.first, PDiag.second); 2444 return ExprError(); 2445 } 2446 llvm::APFloat Probability = Eval.Val.getFloat(); 2447 bool LoseInfo = false; 2448 Probability.convert(llvm::APFloat::IEEEdouble(), 2449 llvm::RoundingMode::Dynamic, &LoseInfo); 2450 if (!(Probability >= llvm::APFloat(0.0) && 2451 Probability <= llvm::APFloat(1.0))) { 2452 Diag(ProbArg->getBeginLoc(), diag::err_probability_out_of_range) 2453 << ProbArg->getSourceRange(); 2454 return ExprError(); 2455 } 2456 break; 2457 } 2458 case Builtin::BI__builtin_preserve_access_index: 2459 if (SemaBuiltinPreserveAI(*this, TheCall)) 2460 return ExprError(); 2461 break; 2462 case Builtin::BI__builtin_call_with_static_chain: 2463 if (SemaBuiltinCallWithStaticChain(*this, TheCall)) 2464 return ExprError(); 2465 break; 2466 case Builtin::BI__exception_code: 2467 case Builtin::BI_exception_code: 2468 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope, 2469 diag::err_seh___except_block)) 2470 return ExprError(); 2471 break; 2472 case Builtin::BI__exception_info: 2473 case Builtin::BI_exception_info: 2474 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope, 2475 diag::err_seh___except_filter)) 2476 return ExprError(); 2477 break; 2478 case Builtin::BI__GetExceptionInfo: 2479 if (checkArgCount(*this, TheCall, 1)) 2480 return ExprError(); 2481 2482 if (CheckCXXThrowOperand( 2483 TheCall->getBeginLoc(), 2484 Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()), 2485 TheCall)) 2486 return ExprError(); 2487 2488 TheCall->setType(Context.VoidPtrTy); 2489 break; 2490 case Builtin::BIaddressof: 2491 case Builtin::BI__addressof: 2492 case Builtin::BIforward: 2493 case Builtin::BIforward_like: 2494 case Builtin::BImove: 2495 case Builtin::BImove_if_noexcept: 2496 case Builtin::BIas_const: { 2497 // These are all expected to be of the form 2498 // T &/&&/* f(U &/&&) 2499 // where T and U only differ in qualification. 2500 if (checkArgCount(*this, TheCall, 1)) 2501 return ExprError(); 2502 QualType Param = FDecl->getParamDecl(0)->getType(); 2503 QualType Result = FDecl->getReturnType(); 2504 bool ReturnsPointer = BuiltinID == Builtin::BIaddressof || 2505 BuiltinID == Builtin::BI__addressof; 2506 if (!(Param->isReferenceType() && 2507 (ReturnsPointer ? Result->isAnyPointerType() 2508 : Result->isReferenceType()) && 2509 Context.hasSameUnqualifiedType(Param->getPointeeType(), 2510 Result->getPointeeType()))) { 2511 Diag(TheCall->getBeginLoc(), diag::err_builtin_move_forward_unsupported) 2512 << FDecl; 2513 return ExprError(); 2514 } 2515 break; 2516 } 2517 // OpenCL v2.0, s6.13.16 - Pipe functions 2518 case Builtin::BIread_pipe: 2519 case Builtin::BIwrite_pipe: 2520 // Since those two functions are declared with var args, we need a semantic 2521 // check for the argument. 2522 if (SemaBuiltinRWPipe(*this, TheCall)) 2523 return ExprError(); 2524 break; 2525 case Builtin::BIreserve_read_pipe: 2526 case Builtin::BIreserve_write_pipe: 2527 case Builtin::BIwork_group_reserve_read_pipe: 2528 case Builtin::BIwork_group_reserve_write_pipe: 2529 if (SemaBuiltinReserveRWPipe(*this, TheCall)) 2530 return ExprError(); 2531 break; 2532 case Builtin::BIsub_group_reserve_read_pipe: 2533 case Builtin::BIsub_group_reserve_write_pipe: 2534 if (checkOpenCLSubgroupExt(*this, TheCall) || 2535 SemaBuiltinReserveRWPipe(*this, TheCall)) 2536 return ExprError(); 2537 break; 2538 case Builtin::BIcommit_read_pipe: 2539 case Builtin::BIcommit_write_pipe: 2540 case Builtin::BIwork_group_commit_read_pipe: 2541 case Builtin::BIwork_group_commit_write_pipe: 2542 if (SemaBuiltinCommitRWPipe(*this, TheCall)) 2543 return ExprError(); 2544 break; 2545 case Builtin::BIsub_group_commit_read_pipe: 2546 case Builtin::BIsub_group_commit_write_pipe: 2547 if (checkOpenCLSubgroupExt(*this, TheCall) || 2548 SemaBuiltinCommitRWPipe(*this, TheCall)) 2549 return ExprError(); 2550 break; 2551 case Builtin::BIget_pipe_num_packets: 2552 case Builtin::BIget_pipe_max_packets: 2553 if (SemaBuiltinPipePackets(*this, TheCall)) 2554 return ExprError(); 2555 break; 2556 case Builtin::BIto_global: 2557 case Builtin::BIto_local: 2558 case Builtin::BIto_private: 2559 if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall)) 2560 return ExprError(); 2561 break; 2562 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions. 2563 case Builtin::BIenqueue_kernel: 2564 if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall)) 2565 return ExprError(); 2566 break; 2567 case Builtin::BIget_kernel_work_group_size: 2568 case Builtin::BIget_kernel_preferred_work_group_size_multiple: 2569 if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall)) 2570 return ExprError(); 2571 break; 2572 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange: 2573 case Builtin::BIget_kernel_sub_group_count_for_ndrange: 2574 if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall)) 2575 return ExprError(); 2576 break; 2577 case Builtin::BI__builtin_os_log_format: 2578 Cleanup.setExprNeedsCleanups(true); 2579 [[fallthrough]]; 2580 case Builtin::BI__builtin_os_log_format_buffer_size: 2581 if (SemaBuiltinOSLogFormat(TheCall)) 2582 return ExprError(); 2583 break; 2584 case Builtin::BI__builtin_frame_address: 2585 case Builtin::BI__builtin_return_address: { 2586 if (SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xFFFF)) 2587 return ExprError(); 2588 2589 // -Wframe-address warning if non-zero passed to builtin 2590 // return/frame address. 2591 Expr::EvalResult Result; 2592 if (!TheCall->getArg(0)->isValueDependent() && 2593 TheCall->getArg(0)->EvaluateAsInt(Result, getASTContext()) && 2594 Result.Val.getInt() != 0) 2595 Diag(TheCall->getBeginLoc(), diag::warn_frame_address) 2596 << ((BuiltinID == Builtin::BI__builtin_return_address) 2597 ? "__builtin_return_address" 2598 : "__builtin_frame_address") 2599 << TheCall->getSourceRange(); 2600 break; 2601 } 2602 2603 case Builtin::BI__builtin_nondeterministic_value: { 2604 if (SemaBuiltinNonDeterministicValue(TheCall)) 2605 return ExprError(); 2606 break; 2607 } 2608 2609 // __builtin_elementwise_abs restricts the element type to signed integers or 2610 // floating point types only. 2611 case Builtin::BI__builtin_elementwise_abs: { 2612 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall)) 2613 return ExprError(); 2614 2615 QualType ArgTy = TheCall->getArg(0)->getType(); 2616 QualType EltTy = ArgTy; 2617 2618 if (auto *VecTy = EltTy->getAs<VectorType>()) 2619 EltTy = VecTy->getElementType(); 2620 if (EltTy->isUnsignedIntegerType()) { 2621 Diag(TheCall->getArg(0)->getBeginLoc(), 2622 diag::err_builtin_invalid_arg_type) 2623 << 1 << /* signed integer or float ty*/ 3 << ArgTy; 2624 return ExprError(); 2625 } 2626 break; 2627 } 2628 2629 // These builtins restrict the element type to floating point 2630 // types only. 2631 case Builtin::BI__builtin_elementwise_ceil: 2632 case Builtin::BI__builtin_elementwise_cos: 2633 case Builtin::BI__builtin_elementwise_exp: 2634 case Builtin::BI__builtin_elementwise_exp2: 2635 case Builtin::BI__builtin_elementwise_floor: 2636 case Builtin::BI__builtin_elementwise_log: 2637 case Builtin::BI__builtin_elementwise_log2: 2638 case Builtin::BI__builtin_elementwise_log10: 2639 case Builtin::BI__builtin_elementwise_roundeven: 2640 case Builtin::BI__builtin_elementwise_round: 2641 case Builtin::BI__builtin_elementwise_rint: 2642 case Builtin::BI__builtin_elementwise_nearbyint: 2643 case Builtin::BI__builtin_elementwise_sin: 2644 case Builtin::BI__builtin_elementwise_trunc: 2645 case Builtin::BI__builtin_elementwise_canonicalize: { 2646 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall)) 2647 return ExprError(); 2648 2649 QualType ArgTy = TheCall->getArg(0)->getType(); 2650 if (checkFPMathBuiltinElementType(*this, TheCall->getArg(0)->getBeginLoc(), 2651 ArgTy, 1)) 2652 return ExprError(); 2653 break; 2654 } 2655 case Builtin::BI__builtin_elementwise_fma: { 2656 if (SemaBuiltinElementwiseTernaryMath(TheCall)) 2657 return ExprError(); 2658 break; 2659 } 2660 2661 // These builtins restrict the element type to floating point 2662 // types only, and take in two arguments. 2663 case Builtin::BI__builtin_elementwise_pow: { 2664 if (SemaBuiltinElementwiseMath(TheCall)) 2665 return ExprError(); 2666 2667 QualType ArgTy = TheCall->getArg(0)->getType(); 2668 if (checkFPMathBuiltinElementType(*this, TheCall->getArg(0)->getBeginLoc(), 2669 ArgTy, 1) || 2670 checkFPMathBuiltinElementType(*this, TheCall->getArg(1)->getBeginLoc(), 2671 ArgTy, 2)) 2672 return ExprError(); 2673 break; 2674 } 2675 2676 // These builtins restrict the element type to integer 2677 // types only. 2678 case Builtin::BI__builtin_elementwise_add_sat: 2679 case Builtin::BI__builtin_elementwise_sub_sat: { 2680 if (SemaBuiltinElementwiseMath(TheCall)) 2681 return ExprError(); 2682 2683 const Expr *Arg = TheCall->getArg(0); 2684 QualType ArgTy = Arg->getType(); 2685 QualType EltTy = ArgTy; 2686 2687 if (auto *VecTy = EltTy->getAs<VectorType>()) 2688 EltTy = VecTy->getElementType(); 2689 2690 if (!EltTy->isIntegerType()) { 2691 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2692 << 1 << /* integer ty */ 6 << ArgTy; 2693 return ExprError(); 2694 } 2695 break; 2696 } 2697 2698 case Builtin::BI__builtin_elementwise_min: 2699 case Builtin::BI__builtin_elementwise_max: 2700 if (SemaBuiltinElementwiseMath(TheCall)) 2701 return ExprError(); 2702 break; 2703 case Builtin::BI__builtin_elementwise_copysign: { 2704 if (checkArgCount(*this, TheCall, 2)) 2705 return ExprError(); 2706 2707 ExprResult Magnitude = UsualUnaryConversions(TheCall->getArg(0)); 2708 ExprResult Sign = UsualUnaryConversions(TheCall->getArg(1)); 2709 if (Magnitude.isInvalid() || Sign.isInvalid()) 2710 return ExprError(); 2711 2712 QualType MagnitudeTy = Magnitude.get()->getType(); 2713 QualType SignTy = Sign.get()->getType(); 2714 if (checkFPMathBuiltinElementType(*this, TheCall->getArg(0)->getBeginLoc(), 2715 MagnitudeTy, 1) || 2716 checkFPMathBuiltinElementType(*this, TheCall->getArg(1)->getBeginLoc(), 2717 SignTy, 2)) { 2718 return ExprError(); 2719 } 2720 2721 if (MagnitudeTy.getCanonicalType() != SignTy.getCanonicalType()) { 2722 return Diag(Sign.get()->getBeginLoc(), 2723 diag::err_typecheck_call_different_arg_types) 2724 << MagnitudeTy << SignTy; 2725 } 2726 2727 TheCall->setArg(0, Magnitude.get()); 2728 TheCall->setArg(1, Sign.get()); 2729 TheCall->setType(Magnitude.get()->getType()); 2730 break; 2731 } 2732 case Builtin::BI__builtin_reduce_max: 2733 case Builtin::BI__builtin_reduce_min: { 2734 if (PrepareBuiltinReduceMathOneArgCall(TheCall)) 2735 return ExprError(); 2736 2737 const Expr *Arg = TheCall->getArg(0); 2738 const auto *TyA = Arg->getType()->getAs<VectorType>(); 2739 if (!TyA) { 2740 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2741 << 1 << /* vector ty*/ 4 << Arg->getType(); 2742 return ExprError(); 2743 } 2744 2745 TheCall->setType(TyA->getElementType()); 2746 break; 2747 } 2748 2749 // These builtins support vectors of integers only. 2750 // TODO: ADD/MUL should support floating-point types. 2751 case Builtin::BI__builtin_reduce_add: 2752 case Builtin::BI__builtin_reduce_mul: 2753 case Builtin::BI__builtin_reduce_xor: 2754 case Builtin::BI__builtin_reduce_or: 2755 case Builtin::BI__builtin_reduce_and: { 2756 if (PrepareBuiltinReduceMathOneArgCall(TheCall)) 2757 return ExprError(); 2758 2759 const Expr *Arg = TheCall->getArg(0); 2760 const auto *TyA = Arg->getType()->getAs<VectorType>(); 2761 if (!TyA || !TyA->getElementType()->isIntegerType()) { 2762 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2763 << 1 << /* vector of integers */ 6 << Arg->getType(); 2764 return ExprError(); 2765 } 2766 TheCall->setType(TyA->getElementType()); 2767 break; 2768 } 2769 2770 case Builtin::BI__builtin_matrix_transpose: 2771 return SemaBuiltinMatrixTranspose(TheCall, TheCallResult); 2772 2773 case Builtin::BI__builtin_matrix_column_major_load: 2774 return SemaBuiltinMatrixColumnMajorLoad(TheCall, TheCallResult); 2775 2776 case Builtin::BI__builtin_matrix_column_major_store: 2777 return SemaBuiltinMatrixColumnMajorStore(TheCall, TheCallResult); 2778 2779 case Builtin::BI__builtin_get_device_side_mangled_name: { 2780 auto Check = [](CallExpr *TheCall) { 2781 if (TheCall->getNumArgs() != 1) 2782 return false; 2783 auto *DRE = dyn_cast<DeclRefExpr>(TheCall->getArg(0)->IgnoreImpCasts()); 2784 if (!DRE) 2785 return false; 2786 auto *D = DRE->getDecl(); 2787 if (!isa<FunctionDecl>(D) && !isa<VarDecl>(D)) 2788 return false; 2789 return D->hasAttr<CUDAGlobalAttr>() || D->hasAttr<CUDADeviceAttr>() || 2790 D->hasAttr<CUDAConstantAttr>() || D->hasAttr<HIPManagedAttr>(); 2791 }; 2792 if (!Check(TheCall)) { 2793 Diag(TheCall->getBeginLoc(), 2794 diag::err_hip_invalid_args_builtin_mangled_name); 2795 return ExprError(); 2796 } 2797 } 2798 } 2799 2800 // Since the target specific builtins for each arch overlap, only check those 2801 // of the arch we are compiling for. 2802 if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) { 2803 if (Context.BuiltinInfo.isAuxBuiltinID(BuiltinID)) { 2804 assert(Context.getAuxTargetInfo() && 2805 "Aux Target Builtin, but not an aux target?"); 2806 2807 if (CheckTSBuiltinFunctionCall( 2808 *Context.getAuxTargetInfo(), 2809 Context.BuiltinInfo.getAuxBuiltinID(BuiltinID), TheCall)) 2810 return ExprError(); 2811 } else { 2812 if (CheckTSBuiltinFunctionCall(Context.getTargetInfo(), BuiltinID, 2813 TheCall)) 2814 return ExprError(); 2815 } 2816 } 2817 2818 return TheCallResult; 2819 } 2820 2821 // Get the valid immediate range for the specified NEON type code. 2822 static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) { 2823 NeonTypeFlags Type(t); 2824 int IsQuad = ForceQuad ? true : Type.isQuad(); 2825 switch (Type.getEltType()) { 2826 case NeonTypeFlags::Int8: 2827 case NeonTypeFlags::Poly8: 2828 return shift ? 7 : (8 << IsQuad) - 1; 2829 case NeonTypeFlags::Int16: 2830 case NeonTypeFlags::Poly16: 2831 return shift ? 15 : (4 << IsQuad) - 1; 2832 case NeonTypeFlags::Int32: 2833 return shift ? 31 : (2 << IsQuad) - 1; 2834 case NeonTypeFlags::Int64: 2835 case NeonTypeFlags::Poly64: 2836 return shift ? 63 : (1 << IsQuad) - 1; 2837 case NeonTypeFlags::Poly128: 2838 return shift ? 127 : (1 << IsQuad) - 1; 2839 case NeonTypeFlags::Float16: 2840 assert(!shift && "cannot shift float types!"); 2841 return (4 << IsQuad) - 1; 2842 case NeonTypeFlags::Float32: 2843 assert(!shift && "cannot shift float types!"); 2844 return (2 << IsQuad) - 1; 2845 case NeonTypeFlags::Float64: 2846 assert(!shift && "cannot shift float types!"); 2847 return (1 << IsQuad) - 1; 2848 case NeonTypeFlags::BFloat16: 2849 assert(!shift && "cannot shift float types!"); 2850 return (4 << IsQuad) - 1; 2851 } 2852 llvm_unreachable("Invalid NeonTypeFlag!"); 2853 } 2854 2855 /// getNeonEltType - Return the QualType corresponding to the elements of 2856 /// the vector type specified by the NeonTypeFlags. This is used to check 2857 /// the pointer arguments for Neon load/store intrinsics. 2858 static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context, 2859 bool IsPolyUnsigned, bool IsInt64Long) { 2860 switch (Flags.getEltType()) { 2861 case NeonTypeFlags::Int8: 2862 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy; 2863 case NeonTypeFlags::Int16: 2864 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy; 2865 case NeonTypeFlags::Int32: 2866 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy; 2867 case NeonTypeFlags::Int64: 2868 if (IsInt64Long) 2869 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy; 2870 else 2871 return Flags.isUnsigned() ? Context.UnsignedLongLongTy 2872 : Context.LongLongTy; 2873 case NeonTypeFlags::Poly8: 2874 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy; 2875 case NeonTypeFlags::Poly16: 2876 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy; 2877 case NeonTypeFlags::Poly64: 2878 if (IsInt64Long) 2879 return Context.UnsignedLongTy; 2880 else 2881 return Context.UnsignedLongLongTy; 2882 case NeonTypeFlags::Poly128: 2883 break; 2884 case NeonTypeFlags::Float16: 2885 return Context.HalfTy; 2886 case NeonTypeFlags::Float32: 2887 return Context.FloatTy; 2888 case NeonTypeFlags::Float64: 2889 return Context.DoubleTy; 2890 case NeonTypeFlags::BFloat16: 2891 return Context.BFloat16Ty; 2892 } 2893 llvm_unreachable("Invalid NeonTypeFlag!"); 2894 } 2895 2896 bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2897 // Range check SVE intrinsics that take immediate values. 2898 SmallVector<std::tuple<int,int,int>, 3> ImmChecks; 2899 2900 switch (BuiltinID) { 2901 default: 2902 return false; 2903 #define GET_SVE_IMMEDIATE_CHECK 2904 #include "clang/Basic/arm_sve_sema_rangechecks.inc" 2905 #undef GET_SVE_IMMEDIATE_CHECK 2906 #define GET_SME_IMMEDIATE_CHECK 2907 #include "clang/Basic/arm_sme_sema_rangechecks.inc" 2908 #undef GET_SME_IMMEDIATE_CHECK 2909 } 2910 2911 // Perform all the immediate checks for this builtin call. 2912 bool HasError = false; 2913 for (auto &I : ImmChecks) { 2914 int ArgNum, CheckTy, ElementSizeInBits; 2915 std::tie(ArgNum, CheckTy, ElementSizeInBits) = I; 2916 2917 typedef bool(*OptionSetCheckFnTy)(int64_t Value); 2918 2919 // Function that checks whether the operand (ArgNum) is an immediate 2920 // that is one of the predefined values. 2921 auto CheckImmediateInSet = [&](OptionSetCheckFnTy CheckImm, 2922 int ErrDiag) -> bool { 2923 // We can't check the value of a dependent argument. 2924 Expr *Arg = TheCall->getArg(ArgNum); 2925 if (Arg->isTypeDependent() || Arg->isValueDependent()) 2926 return false; 2927 2928 // Check constant-ness first. 2929 llvm::APSInt Imm; 2930 if (SemaBuiltinConstantArg(TheCall, ArgNum, Imm)) 2931 return true; 2932 2933 if (!CheckImm(Imm.getSExtValue())) 2934 return Diag(TheCall->getBeginLoc(), ErrDiag) << Arg->getSourceRange(); 2935 return false; 2936 }; 2937 2938 switch ((SVETypeFlags::ImmCheckType)CheckTy) { 2939 case SVETypeFlags::ImmCheck0_31: 2940 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 31)) 2941 HasError = true; 2942 break; 2943 case SVETypeFlags::ImmCheck0_13: 2944 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 13)) 2945 HasError = true; 2946 break; 2947 case SVETypeFlags::ImmCheck1_16: 2948 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 16)) 2949 HasError = true; 2950 break; 2951 case SVETypeFlags::ImmCheck0_7: 2952 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 7)) 2953 HasError = true; 2954 break; 2955 case SVETypeFlags::ImmCheckExtract: 2956 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2957 (2048 / ElementSizeInBits) - 1)) 2958 HasError = true; 2959 break; 2960 case SVETypeFlags::ImmCheckShiftRight: 2961 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits)) 2962 HasError = true; 2963 break; 2964 case SVETypeFlags::ImmCheckShiftRightNarrow: 2965 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 2966 ElementSizeInBits / 2)) 2967 HasError = true; 2968 break; 2969 case SVETypeFlags::ImmCheckShiftLeft: 2970 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2971 ElementSizeInBits - 1)) 2972 HasError = true; 2973 break; 2974 case SVETypeFlags::ImmCheckLaneIndex: 2975 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2976 (128 / (1 * ElementSizeInBits)) - 1)) 2977 HasError = true; 2978 break; 2979 case SVETypeFlags::ImmCheckLaneIndexCompRotate: 2980 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2981 (128 / (2 * ElementSizeInBits)) - 1)) 2982 HasError = true; 2983 break; 2984 case SVETypeFlags::ImmCheckLaneIndexDot: 2985 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2986 (128 / (4 * ElementSizeInBits)) - 1)) 2987 HasError = true; 2988 break; 2989 case SVETypeFlags::ImmCheckComplexRot90_270: 2990 if (CheckImmediateInSet([](int64_t V) { return V == 90 || V == 270; }, 2991 diag::err_rotation_argument_to_cadd)) 2992 HasError = true; 2993 break; 2994 case SVETypeFlags::ImmCheckComplexRotAll90: 2995 if (CheckImmediateInSet( 2996 [](int64_t V) { 2997 return V == 0 || V == 90 || V == 180 || V == 270; 2998 }, 2999 diag::err_rotation_argument_to_cmla)) 3000 HasError = true; 3001 break; 3002 case SVETypeFlags::ImmCheck0_1: 3003 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 1)) 3004 HasError = true; 3005 break; 3006 case SVETypeFlags::ImmCheck0_2: 3007 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2)) 3008 HasError = true; 3009 break; 3010 case SVETypeFlags::ImmCheck0_3: 3011 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3)) 3012 HasError = true; 3013 break; 3014 case SVETypeFlags::ImmCheck0_0: 3015 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 0)) 3016 HasError = true; 3017 break; 3018 case SVETypeFlags::ImmCheck0_15: 3019 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 15)) 3020 HasError = true; 3021 break; 3022 case SVETypeFlags::ImmCheck0_255: 3023 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 255)) 3024 HasError = true; 3025 break; 3026 } 3027 } 3028 3029 return HasError; 3030 } 3031 3032 bool Sema::CheckNeonBuiltinFunctionCall(const TargetInfo &TI, 3033 unsigned BuiltinID, CallExpr *TheCall) { 3034 llvm::APSInt Result; 3035 uint64_t mask = 0; 3036 unsigned TV = 0; 3037 int PtrArgNum = -1; 3038 bool HasConstPtr = false; 3039 switch (BuiltinID) { 3040 #define GET_NEON_OVERLOAD_CHECK 3041 #include "clang/Basic/arm_neon.inc" 3042 #include "clang/Basic/arm_fp16.inc" 3043 #undef GET_NEON_OVERLOAD_CHECK 3044 } 3045 3046 // For NEON intrinsics which are overloaded on vector element type, validate 3047 // the immediate which specifies which variant to emit. 3048 unsigned ImmArg = TheCall->getNumArgs()-1; 3049 if (mask) { 3050 if (SemaBuiltinConstantArg(TheCall, ImmArg, Result)) 3051 return true; 3052 3053 TV = Result.getLimitedValue(64); 3054 if ((TV > 63) || (mask & (1ULL << TV)) == 0) 3055 return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code) 3056 << TheCall->getArg(ImmArg)->getSourceRange(); 3057 } 3058 3059 if (PtrArgNum >= 0) { 3060 // Check that pointer arguments have the specified type. 3061 Expr *Arg = TheCall->getArg(PtrArgNum); 3062 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) 3063 Arg = ICE->getSubExpr(); 3064 ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg); 3065 QualType RHSTy = RHS.get()->getType(); 3066 3067 llvm::Triple::ArchType Arch = TI.getTriple().getArch(); 3068 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 || 3069 Arch == llvm::Triple::aarch64_32 || 3070 Arch == llvm::Triple::aarch64_be; 3071 bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong; 3072 QualType EltTy = 3073 getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long); 3074 if (HasConstPtr) 3075 EltTy = EltTy.withConst(); 3076 QualType LHSTy = Context.getPointerType(EltTy); 3077 AssignConvertType ConvTy; 3078 ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS); 3079 if (RHS.isInvalid()) 3080 return true; 3081 if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy, 3082 RHS.get(), AA_Assigning)) 3083 return true; 3084 } 3085 3086 // For NEON intrinsics which take an immediate value as part of the 3087 // instruction, range check them here. 3088 unsigned i = 0, l = 0, u = 0; 3089 switch (BuiltinID) { 3090 default: 3091 return false; 3092 #define GET_NEON_IMMEDIATE_CHECK 3093 #include "clang/Basic/arm_neon.inc" 3094 #include "clang/Basic/arm_fp16.inc" 3095 #undef GET_NEON_IMMEDIATE_CHECK 3096 } 3097 3098 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 3099 } 3100 3101 bool Sema::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 3102 switch (BuiltinID) { 3103 default: 3104 return false; 3105 #include "clang/Basic/arm_mve_builtin_sema.inc" 3106 } 3107 } 3108 3109 bool Sema::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 3110 CallExpr *TheCall) { 3111 bool Err = false; 3112 switch (BuiltinID) { 3113 default: 3114 return false; 3115 #include "clang/Basic/arm_cde_builtin_sema.inc" 3116 } 3117 3118 if (Err) 3119 return true; 3120 3121 return CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), /*WantCDE*/ true); 3122 } 3123 3124 bool Sema::CheckARMCoprocessorImmediate(const TargetInfo &TI, 3125 const Expr *CoprocArg, bool WantCDE) { 3126 if (isConstantEvaluated()) 3127 return false; 3128 3129 // We can't check the value of a dependent argument. 3130 if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent()) 3131 return false; 3132 3133 llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Context); 3134 int64_t CoprocNo = CoprocNoAP.getExtValue(); 3135 assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative"); 3136 3137 uint32_t CDECoprocMask = TI.getARMCDECoprocMask(); 3138 bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo)); 3139 3140 if (IsCDECoproc != WantCDE) 3141 return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc) 3142 << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange(); 3143 3144 return false; 3145 } 3146 3147 bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, 3148 unsigned MaxWidth) { 3149 assert((BuiltinID == ARM::BI__builtin_arm_ldrex || 3150 BuiltinID == ARM::BI__builtin_arm_ldaex || 3151 BuiltinID == ARM::BI__builtin_arm_strex || 3152 BuiltinID == ARM::BI__builtin_arm_stlex || 3153 BuiltinID == AArch64::BI__builtin_arm_ldrex || 3154 BuiltinID == AArch64::BI__builtin_arm_ldaex || 3155 BuiltinID == AArch64::BI__builtin_arm_strex || 3156 BuiltinID == AArch64::BI__builtin_arm_stlex) && 3157 "unexpected ARM builtin"); 3158 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex || 3159 BuiltinID == ARM::BI__builtin_arm_ldaex || 3160 BuiltinID == AArch64::BI__builtin_arm_ldrex || 3161 BuiltinID == AArch64::BI__builtin_arm_ldaex; 3162 3163 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 3164 3165 // Ensure that we have the proper number of arguments. 3166 if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2)) 3167 return true; 3168 3169 // Inspect the pointer argument of the atomic builtin. This should always be 3170 // a pointer type, whose element is an integral scalar or pointer type. 3171 // Because it is a pointer type, we don't have to worry about any implicit 3172 // casts here. 3173 Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1); 3174 ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg); 3175 if (PointerArgRes.isInvalid()) 3176 return true; 3177 PointerArg = PointerArgRes.get(); 3178 3179 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 3180 if (!pointerType) { 3181 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 3182 << PointerArg->getType() << PointerArg->getSourceRange(); 3183 return true; 3184 } 3185 3186 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next 3187 // task is to insert the appropriate casts into the AST. First work out just 3188 // what the appropriate type is. 3189 QualType ValType = pointerType->getPointeeType(); 3190 QualType AddrType = ValType.getUnqualifiedType().withVolatile(); 3191 if (IsLdrex) 3192 AddrType.addConst(); 3193 3194 // Issue a warning if the cast is dodgy. 3195 CastKind CastNeeded = CK_NoOp; 3196 if (!AddrType.isAtLeastAsQualifiedAs(ValType)) { 3197 CastNeeded = CK_BitCast; 3198 Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers) 3199 << PointerArg->getType() << Context.getPointerType(AddrType) 3200 << AA_Passing << PointerArg->getSourceRange(); 3201 } 3202 3203 // Finally, do the cast and replace the argument with the corrected version. 3204 AddrType = Context.getPointerType(AddrType); 3205 PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded); 3206 if (PointerArgRes.isInvalid()) 3207 return true; 3208 PointerArg = PointerArgRes.get(); 3209 3210 TheCall->setArg(IsLdrex ? 0 : 1, PointerArg); 3211 3212 // In general, we allow ints, floats and pointers to be loaded and stored. 3213 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 3214 !ValType->isBlockPointerType() && !ValType->isFloatingType()) { 3215 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr) 3216 << PointerArg->getType() << PointerArg->getSourceRange(); 3217 return true; 3218 } 3219 3220 // But ARM doesn't have instructions to deal with 128-bit versions. 3221 if (Context.getTypeSize(ValType) > MaxWidth) { 3222 assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate"); 3223 Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size) 3224 << PointerArg->getType() << PointerArg->getSourceRange(); 3225 return true; 3226 } 3227 3228 switch (ValType.getObjCLifetime()) { 3229 case Qualifiers::OCL_None: 3230 case Qualifiers::OCL_ExplicitNone: 3231 // okay 3232 break; 3233 3234 case Qualifiers::OCL_Weak: 3235 case Qualifiers::OCL_Strong: 3236 case Qualifiers::OCL_Autoreleasing: 3237 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 3238 << ValType << PointerArg->getSourceRange(); 3239 return true; 3240 } 3241 3242 if (IsLdrex) { 3243 TheCall->setType(ValType); 3244 return false; 3245 } 3246 3247 // Initialize the argument to be stored. 3248 ExprResult ValArg = TheCall->getArg(0); 3249 InitializedEntity Entity = InitializedEntity::InitializeParameter( 3250 Context, ValType, /*consume*/ false); 3251 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 3252 if (ValArg.isInvalid()) 3253 return true; 3254 TheCall->setArg(0, ValArg.get()); 3255 3256 // __builtin_arm_strex always returns an int. It's marked as such in the .def, 3257 // but the custom checker bypasses all default analysis. 3258 TheCall->setType(Context.IntTy); 3259 return false; 3260 } 3261 3262 bool Sema::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 3263 CallExpr *TheCall) { 3264 if (BuiltinID == ARM::BI__builtin_arm_ldrex || 3265 BuiltinID == ARM::BI__builtin_arm_ldaex || 3266 BuiltinID == ARM::BI__builtin_arm_strex || 3267 BuiltinID == ARM::BI__builtin_arm_stlex) { 3268 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64); 3269 } 3270 3271 if (BuiltinID == ARM::BI__builtin_arm_prefetch) { 3272 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3273 SemaBuiltinConstantArgRange(TheCall, 2, 0, 1); 3274 } 3275 3276 if (BuiltinID == ARM::BI__builtin_arm_rsr64 || 3277 BuiltinID == ARM::BI__builtin_arm_wsr64) 3278 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false); 3279 3280 if (BuiltinID == ARM::BI__builtin_arm_rsr || 3281 BuiltinID == ARM::BI__builtin_arm_rsrp || 3282 BuiltinID == ARM::BI__builtin_arm_wsr || 3283 BuiltinID == ARM::BI__builtin_arm_wsrp) 3284 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 3285 3286 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 3287 return true; 3288 if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall)) 3289 return true; 3290 if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall)) 3291 return true; 3292 3293 // For intrinsics which take an immediate value as part of the instruction, 3294 // range check them here. 3295 // FIXME: VFP Intrinsics should error if VFP not present. 3296 switch (BuiltinID) { 3297 default: return false; 3298 case ARM::BI__builtin_arm_ssat: 3299 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32); 3300 case ARM::BI__builtin_arm_usat: 3301 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 3302 case ARM::BI__builtin_arm_ssat16: 3303 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 3304 case ARM::BI__builtin_arm_usat16: 3305 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 3306 case ARM::BI__builtin_arm_vcvtr_f: 3307 case ARM::BI__builtin_arm_vcvtr_d: 3308 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 3309 case ARM::BI__builtin_arm_dmb: 3310 case ARM::BI__builtin_arm_dsb: 3311 case ARM::BI__builtin_arm_isb: 3312 case ARM::BI__builtin_arm_dbg: 3313 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15); 3314 case ARM::BI__builtin_arm_cdp: 3315 case ARM::BI__builtin_arm_cdp2: 3316 case ARM::BI__builtin_arm_mcr: 3317 case ARM::BI__builtin_arm_mcr2: 3318 case ARM::BI__builtin_arm_mrc: 3319 case ARM::BI__builtin_arm_mrc2: 3320 case ARM::BI__builtin_arm_mcrr: 3321 case ARM::BI__builtin_arm_mcrr2: 3322 case ARM::BI__builtin_arm_mrrc: 3323 case ARM::BI__builtin_arm_mrrc2: 3324 case ARM::BI__builtin_arm_ldc: 3325 case ARM::BI__builtin_arm_ldcl: 3326 case ARM::BI__builtin_arm_ldc2: 3327 case ARM::BI__builtin_arm_ldc2l: 3328 case ARM::BI__builtin_arm_stc: 3329 case ARM::BI__builtin_arm_stcl: 3330 case ARM::BI__builtin_arm_stc2: 3331 case ARM::BI__builtin_arm_stc2l: 3332 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15) || 3333 CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), 3334 /*WantCDE*/ false); 3335 } 3336 } 3337 3338 bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, 3339 unsigned BuiltinID, 3340 CallExpr *TheCall) { 3341 if (BuiltinID == AArch64::BI__builtin_arm_ldrex || 3342 BuiltinID == AArch64::BI__builtin_arm_ldaex || 3343 BuiltinID == AArch64::BI__builtin_arm_strex || 3344 BuiltinID == AArch64::BI__builtin_arm_stlex) { 3345 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128); 3346 } 3347 3348 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) { 3349 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3350 SemaBuiltinConstantArgRange(TheCall, 2, 0, 3) || 3351 SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) || 3352 SemaBuiltinConstantArgRange(TheCall, 4, 0, 1); 3353 } 3354 3355 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 || 3356 BuiltinID == AArch64::BI__builtin_arm_wsr64 || 3357 BuiltinID == AArch64::BI__builtin_arm_rsr128 || 3358 BuiltinID == AArch64::BI__builtin_arm_wsr128) 3359 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 3360 3361 // Memory Tagging Extensions (MTE) Intrinsics 3362 if (BuiltinID == AArch64::BI__builtin_arm_irg || 3363 BuiltinID == AArch64::BI__builtin_arm_addg || 3364 BuiltinID == AArch64::BI__builtin_arm_gmi || 3365 BuiltinID == AArch64::BI__builtin_arm_ldg || 3366 BuiltinID == AArch64::BI__builtin_arm_stg || 3367 BuiltinID == AArch64::BI__builtin_arm_subp) { 3368 return SemaBuiltinARMMemoryTaggingCall(BuiltinID, TheCall); 3369 } 3370 3371 if (BuiltinID == AArch64::BI__builtin_arm_rsr || 3372 BuiltinID == AArch64::BI__builtin_arm_rsrp || 3373 BuiltinID == AArch64::BI__builtin_arm_wsr || 3374 BuiltinID == AArch64::BI__builtin_arm_wsrp) 3375 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 3376 3377 // Only check the valid encoding range. Any constant in this range would be 3378 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw 3379 // an exception for incorrect registers. This matches MSVC behavior. 3380 if (BuiltinID == AArch64::BI_ReadStatusReg || 3381 BuiltinID == AArch64::BI_WriteStatusReg) 3382 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff); 3383 3384 if (BuiltinID == AArch64::BI__getReg) 3385 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 3386 3387 if (BuiltinID == AArch64::BI__break) 3388 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xffff); 3389 3390 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 3391 return true; 3392 3393 if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall)) 3394 return true; 3395 3396 // For intrinsics which take an immediate value as part of the instruction, 3397 // range check them here. 3398 unsigned i = 0, l = 0, u = 0; 3399 switch (BuiltinID) { 3400 default: return false; 3401 case AArch64::BI__builtin_arm_dmb: 3402 case AArch64::BI__builtin_arm_dsb: 3403 case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break; 3404 case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break; 3405 } 3406 3407 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 3408 } 3409 3410 static bool isValidBPFPreserveFieldInfoArg(Expr *Arg) { 3411 if (Arg->getType()->getAsPlaceholderType()) 3412 return false; 3413 3414 // The first argument needs to be a record field access. 3415 // If it is an array element access, we delay decision 3416 // to BPF backend to check whether the access is a 3417 // field access or not. 3418 return (Arg->IgnoreParens()->getObjectKind() == OK_BitField || 3419 isa<MemberExpr>(Arg->IgnoreParens()) || 3420 isa<ArraySubscriptExpr>(Arg->IgnoreParens())); 3421 } 3422 3423 static bool isValidBPFPreserveTypeInfoArg(Expr *Arg) { 3424 QualType ArgType = Arg->getType(); 3425 if (ArgType->getAsPlaceholderType()) 3426 return false; 3427 3428 // for TYPE_EXISTENCE/TYPE_MATCH/TYPE_SIZEOF reloc type 3429 // format: 3430 // 1. __builtin_preserve_type_info(*(<type> *)0, flag); 3431 // 2. <type> var; 3432 // __builtin_preserve_type_info(var, flag); 3433 if (!isa<DeclRefExpr>(Arg->IgnoreParens()) && 3434 !isa<UnaryOperator>(Arg->IgnoreParens())) 3435 return false; 3436 3437 // Typedef type. 3438 if (ArgType->getAs<TypedefType>()) 3439 return true; 3440 3441 // Record type or Enum type. 3442 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 3443 if (const auto *RT = Ty->getAs<RecordType>()) { 3444 if (!RT->getDecl()->getDeclName().isEmpty()) 3445 return true; 3446 } else if (const auto *ET = Ty->getAs<EnumType>()) { 3447 if (!ET->getDecl()->getDeclName().isEmpty()) 3448 return true; 3449 } 3450 3451 return false; 3452 } 3453 3454 static bool isValidBPFPreserveEnumValueArg(Expr *Arg) { 3455 QualType ArgType = Arg->getType(); 3456 if (ArgType->getAsPlaceholderType()) 3457 return false; 3458 3459 // for ENUM_VALUE_EXISTENCE/ENUM_VALUE reloc type 3460 // format: 3461 // __builtin_preserve_enum_value(*(<enum_type> *)<enum_value>, 3462 // flag); 3463 const auto *UO = dyn_cast<UnaryOperator>(Arg->IgnoreParens()); 3464 if (!UO) 3465 return false; 3466 3467 const auto *CE = dyn_cast<CStyleCastExpr>(UO->getSubExpr()); 3468 if (!CE) 3469 return false; 3470 if (CE->getCastKind() != CK_IntegralToPointer && 3471 CE->getCastKind() != CK_NullToPointer) 3472 return false; 3473 3474 // The integer must be from an EnumConstantDecl. 3475 const auto *DR = dyn_cast<DeclRefExpr>(CE->getSubExpr()); 3476 if (!DR) 3477 return false; 3478 3479 const EnumConstantDecl *Enumerator = 3480 dyn_cast<EnumConstantDecl>(DR->getDecl()); 3481 if (!Enumerator) 3482 return false; 3483 3484 // The type must be EnumType. 3485 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 3486 const auto *ET = Ty->getAs<EnumType>(); 3487 if (!ET) 3488 return false; 3489 3490 // The enum value must be supported. 3491 return llvm::is_contained(ET->getDecl()->enumerators(), Enumerator); 3492 } 3493 3494 bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID, 3495 CallExpr *TheCall) { 3496 assert((BuiltinID == BPF::BI__builtin_preserve_field_info || 3497 BuiltinID == BPF::BI__builtin_btf_type_id || 3498 BuiltinID == BPF::BI__builtin_preserve_type_info || 3499 BuiltinID == BPF::BI__builtin_preserve_enum_value) && 3500 "unexpected BPF builtin"); 3501 3502 if (checkArgCount(*this, TheCall, 2)) 3503 return true; 3504 3505 // The second argument needs to be a constant int 3506 Expr *Arg = TheCall->getArg(1); 3507 std::optional<llvm::APSInt> Value = Arg->getIntegerConstantExpr(Context); 3508 diag::kind kind; 3509 if (!Value) { 3510 if (BuiltinID == BPF::BI__builtin_preserve_field_info) 3511 kind = diag::err_preserve_field_info_not_const; 3512 else if (BuiltinID == BPF::BI__builtin_btf_type_id) 3513 kind = diag::err_btf_type_id_not_const; 3514 else if (BuiltinID == BPF::BI__builtin_preserve_type_info) 3515 kind = diag::err_preserve_type_info_not_const; 3516 else 3517 kind = diag::err_preserve_enum_value_not_const; 3518 Diag(Arg->getBeginLoc(), kind) << 2 << Arg->getSourceRange(); 3519 return true; 3520 } 3521 3522 // The first argument 3523 Arg = TheCall->getArg(0); 3524 bool InvalidArg = false; 3525 bool ReturnUnsignedInt = true; 3526 if (BuiltinID == BPF::BI__builtin_preserve_field_info) { 3527 if (!isValidBPFPreserveFieldInfoArg(Arg)) { 3528 InvalidArg = true; 3529 kind = diag::err_preserve_field_info_not_field; 3530 } 3531 } else if (BuiltinID == BPF::BI__builtin_preserve_type_info) { 3532 if (!isValidBPFPreserveTypeInfoArg(Arg)) { 3533 InvalidArg = true; 3534 kind = diag::err_preserve_type_info_invalid; 3535 } 3536 } else if (BuiltinID == BPF::BI__builtin_preserve_enum_value) { 3537 if (!isValidBPFPreserveEnumValueArg(Arg)) { 3538 InvalidArg = true; 3539 kind = diag::err_preserve_enum_value_invalid; 3540 } 3541 ReturnUnsignedInt = false; 3542 } else if (BuiltinID == BPF::BI__builtin_btf_type_id) { 3543 ReturnUnsignedInt = false; 3544 } 3545 3546 if (InvalidArg) { 3547 Diag(Arg->getBeginLoc(), kind) << 1 << Arg->getSourceRange(); 3548 return true; 3549 } 3550 3551 if (ReturnUnsignedInt) 3552 TheCall->setType(Context.UnsignedIntTy); 3553 else 3554 TheCall->setType(Context.UnsignedLongTy); 3555 return false; 3556 } 3557 3558 bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 3559 struct ArgInfo { 3560 uint8_t OpNum; 3561 bool IsSigned; 3562 uint8_t BitWidth; 3563 uint8_t Align; 3564 }; 3565 struct BuiltinInfo { 3566 unsigned BuiltinID; 3567 ArgInfo Infos[2]; 3568 }; 3569 3570 static BuiltinInfo Infos[] = { 3571 { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} }, 3572 { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} }, 3573 { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} }, 3574 { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 1 }} }, 3575 { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} }, 3576 { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} }, 3577 { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} }, 3578 { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} }, 3579 { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} }, 3580 { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} }, 3581 { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} }, 3582 3583 { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} }, 3584 { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} }, 3585 { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} }, 3586 { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} }, 3587 { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} }, 3588 { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} }, 3589 { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} }, 3590 { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} }, 3591 { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} }, 3592 { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} }, 3593 { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} }, 3594 3595 { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} }, 3596 { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} }, 3597 { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} }, 3598 { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} }, 3599 { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} }, 3600 { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} }, 3601 { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} }, 3602 { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} }, 3603 { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} }, 3604 { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} }, 3605 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} }, 3606 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} }, 3607 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} }, 3608 { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} }, 3609 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} }, 3610 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} }, 3611 { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} }, 3612 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} }, 3613 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} }, 3614 { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} }, 3615 { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} }, 3616 { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} }, 3617 { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} }, 3618 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} }, 3619 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} }, 3620 { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} }, 3621 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} }, 3622 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} }, 3623 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} }, 3624 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} }, 3625 { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} }, 3626 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} }, 3627 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} }, 3628 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} }, 3629 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} }, 3630 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} }, 3631 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} }, 3632 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} }, 3633 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} }, 3634 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} }, 3635 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} }, 3636 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} }, 3637 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} }, 3638 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} }, 3639 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} }, 3640 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} }, 3641 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} }, 3642 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} }, 3643 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} }, 3644 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} }, 3645 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} }, 3646 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax, 3647 {{ 1, false, 6, 0 }} }, 3648 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} }, 3649 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} }, 3650 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} }, 3651 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} }, 3652 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} }, 3653 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} }, 3654 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax, 3655 {{ 1, false, 5, 0 }} }, 3656 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} }, 3657 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} }, 3658 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} }, 3659 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} }, 3660 { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} }, 3661 { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 }, 3662 { 2, false, 5, 0 }} }, 3663 { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 }, 3664 { 2, false, 6, 0 }} }, 3665 { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 }, 3666 { 3, false, 5, 0 }} }, 3667 { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 }, 3668 { 3, false, 6, 0 }} }, 3669 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} }, 3670 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} }, 3671 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} }, 3672 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} }, 3673 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} }, 3674 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} }, 3675 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} }, 3676 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} }, 3677 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} }, 3678 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} }, 3679 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} }, 3680 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} }, 3681 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} }, 3682 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} }, 3683 { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} }, 3684 { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax, 3685 {{ 2, false, 4, 0 }, 3686 { 3, false, 5, 0 }} }, 3687 { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax, 3688 {{ 2, false, 4, 0 }, 3689 { 3, false, 5, 0 }} }, 3690 { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax, 3691 {{ 2, false, 4, 0 }, 3692 { 3, false, 5, 0 }} }, 3693 { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax, 3694 {{ 2, false, 4, 0 }, 3695 { 3, false, 5, 0 }} }, 3696 { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} }, 3697 { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} }, 3698 { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} }, 3699 { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} }, 3700 { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} }, 3701 { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} }, 3702 { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} }, 3703 { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} }, 3704 { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} }, 3705 { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} }, 3706 { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 }, 3707 { 2, false, 5, 0 }} }, 3708 { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 }, 3709 { 2, false, 6, 0 }} }, 3710 { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} }, 3711 { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} }, 3712 { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} }, 3713 { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} }, 3714 { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} }, 3715 { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} }, 3716 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} }, 3717 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} }, 3718 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax, 3719 {{ 1, false, 4, 0 }} }, 3720 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} }, 3721 { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax, 3722 {{ 1, false, 4, 0 }} }, 3723 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} }, 3724 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} }, 3725 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} }, 3726 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} }, 3727 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} }, 3728 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} }, 3729 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} }, 3730 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} }, 3731 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} }, 3732 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} }, 3733 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} }, 3734 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} }, 3735 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} }, 3736 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} }, 3737 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} }, 3738 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} }, 3739 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} }, 3740 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} }, 3741 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} }, 3742 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, 3743 {{ 3, false, 1, 0 }} }, 3744 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} }, 3745 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} }, 3746 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} }, 3747 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, 3748 {{ 3, false, 1, 0 }} }, 3749 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} }, 3750 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} }, 3751 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} }, 3752 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, 3753 {{ 3, false, 1, 0 }} }, 3754 3755 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10, {{ 2, false, 2, 0 }} }, 3756 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_128B, 3757 {{ 2, false, 2, 0 }} }, 3758 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_vxx, 3759 {{ 3, false, 2, 0 }} }, 3760 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_vxx_128B, 3761 {{ 3, false, 2, 0 }} }, 3762 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10, {{ 2, false, 2, 0 }} }, 3763 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_128B, 3764 {{ 2, false, 2, 0 }} }, 3765 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_vxx, 3766 {{ 3, false, 2, 0 }} }, 3767 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_vxx_128B, 3768 {{ 3, false, 2, 0 }} }, 3769 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi, {{ 2, false, 3, 0 }} }, 3770 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi_128B, {{ 2, false, 3, 0 }} }, 3771 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci, {{ 3, false, 3, 0 }} }, 3772 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci_128B, 3773 {{ 3, false, 3, 0 }} }, 3774 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi, {{ 2, false, 3, 0 }} }, 3775 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi_128B, {{ 2, false, 3, 0 }} }, 3776 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci, {{ 3, false, 3, 0 }} }, 3777 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci_128B, 3778 {{ 3, false, 3, 0 }} }, 3779 }; 3780 3781 // Use a dynamically initialized static to sort the table exactly once on 3782 // first run. 3783 static const bool SortOnce = 3784 (llvm::sort(Infos, 3785 [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) { 3786 return LHS.BuiltinID < RHS.BuiltinID; 3787 }), 3788 true); 3789 (void)SortOnce; 3790 3791 const BuiltinInfo *F = llvm::partition_point( 3792 Infos, [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; }); 3793 if (F == std::end(Infos) || F->BuiltinID != BuiltinID) 3794 return false; 3795 3796 bool Error = false; 3797 3798 for (const ArgInfo &A : F->Infos) { 3799 // Ignore empty ArgInfo elements. 3800 if (A.BitWidth == 0) 3801 continue; 3802 3803 int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0; 3804 int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1; 3805 if (!A.Align) { 3806 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 3807 } else { 3808 unsigned M = 1 << A.Align; 3809 Min *= M; 3810 Max *= M; 3811 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 3812 Error |= SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M); 3813 } 3814 } 3815 return Error; 3816 } 3817 3818 bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, 3819 CallExpr *TheCall) { 3820 return CheckHexagonBuiltinArgument(BuiltinID, TheCall); 3821 } 3822 3823 bool Sema::CheckLoongArchBuiltinFunctionCall(const TargetInfo &TI, 3824 unsigned BuiltinID, 3825 CallExpr *TheCall) { 3826 switch (BuiltinID) { 3827 default: 3828 break; 3829 case LoongArch::BI__builtin_loongarch_cacop_d: 3830 if (!TI.hasFeature("64bit")) 3831 return Diag(TheCall->getBeginLoc(), 3832 diag::err_loongarch_builtin_requires_la64) 3833 << TheCall->getSourceRange(); 3834 [[fallthrough]]; 3835 case LoongArch::BI__builtin_loongarch_cacop_w: { 3836 if (BuiltinID == LoongArch::BI__builtin_loongarch_cacop_w && 3837 !TI.hasFeature("32bit")) 3838 return Diag(TheCall->getBeginLoc(), 3839 diag::err_loongarch_builtin_requires_la32) 3840 << TheCall->getSourceRange(); 3841 SemaBuiltinConstantArgRange(TheCall, 0, 0, llvm::maxUIntN(5)); 3842 SemaBuiltinConstantArgRange(TheCall, 2, llvm::minIntN(12), 3843 llvm::maxIntN(12)); 3844 break; 3845 } 3846 case LoongArch::BI__builtin_loongarch_crc_w_b_w: 3847 case LoongArch::BI__builtin_loongarch_crc_w_h_w: 3848 case LoongArch::BI__builtin_loongarch_crc_w_w_w: 3849 case LoongArch::BI__builtin_loongarch_crc_w_d_w: 3850 case LoongArch::BI__builtin_loongarch_crcc_w_b_w: 3851 case LoongArch::BI__builtin_loongarch_crcc_w_h_w: 3852 case LoongArch::BI__builtin_loongarch_crcc_w_w_w: 3853 case LoongArch::BI__builtin_loongarch_crcc_w_d_w: 3854 case LoongArch::BI__builtin_loongarch_iocsrrd_d: 3855 case LoongArch::BI__builtin_loongarch_iocsrwr_d: 3856 case LoongArch::BI__builtin_loongarch_asrtle_d: 3857 case LoongArch::BI__builtin_loongarch_asrtgt_d: 3858 if (!TI.hasFeature("64bit")) 3859 return Diag(TheCall->getBeginLoc(), 3860 diag::err_loongarch_builtin_requires_la64) 3861 << TheCall->getSourceRange(); 3862 break; 3863 case LoongArch::BI__builtin_loongarch_break: 3864 case LoongArch::BI__builtin_loongarch_dbar: 3865 case LoongArch::BI__builtin_loongarch_ibar: 3866 case LoongArch::BI__builtin_loongarch_syscall: 3867 // Check if immediate is in [0, 32767]. 3868 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 32767); 3869 case LoongArch::BI__builtin_loongarch_csrrd_w: 3870 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 16383); 3871 case LoongArch::BI__builtin_loongarch_csrwr_w: 3872 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 16383); 3873 case LoongArch::BI__builtin_loongarch_csrxchg_w: 3874 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 16383); 3875 case LoongArch::BI__builtin_loongarch_csrrd_d: 3876 if (!TI.hasFeature("64bit")) 3877 return Diag(TheCall->getBeginLoc(), 3878 diag::err_loongarch_builtin_requires_la64) 3879 << TheCall->getSourceRange(); 3880 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 16383); 3881 case LoongArch::BI__builtin_loongarch_csrwr_d: 3882 if (!TI.hasFeature("64bit")) 3883 return Diag(TheCall->getBeginLoc(), 3884 diag::err_loongarch_builtin_requires_la64) 3885 << TheCall->getSourceRange(); 3886 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 16383); 3887 case LoongArch::BI__builtin_loongarch_csrxchg_d: 3888 if (!TI.hasFeature("64bit")) 3889 return Diag(TheCall->getBeginLoc(), 3890 diag::err_loongarch_builtin_requires_la64) 3891 << TheCall->getSourceRange(); 3892 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 16383); 3893 case LoongArch::BI__builtin_loongarch_lddir_d: 3894 case LoongArch::BI__builtin_loongarch_ldpte_d: 3895 if (!TI.hasFeature("64bit")) 3896 return Diag(TheCall->getBeginLoc(), 3897 diag::err_loongarch_builtin_requires_la64) 3898 << TheCall->getSourceRange(); 3899 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 3900 case LoongArch::BI__builtin_loongarch_movfcsr2gr: 3901 case LoongArch::BI__builtin_loongarch_movgr2fcsr: 3902 return SemaBuiltinConstantArgRange(TheCall, 0, 0, llvm::maxUIntN(2)); 3903 } 3904 3905 return false; 3906 } 3907 3908 bool Sema::CheckMipsBuiltinFunctionCall(const TargetInfo &TI, 3909 unsigned BuiltinID, CallExpr *TheCall) { 3910 return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) || 3911 CheckMipsBuiltinArgument(BuiltinID, TheCall); 3912 } 3913 3914 bool Sema::CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, 3915 CallExpr *TheCall) { 3916 3917 if (Mips::BI__builtin_mips_addu_qb <= BuiltinID && 3918 BuiltinID <= Mips::BI__builtin_mips_lwx) { 3919 if (!TI.hasFeature("dsp")) 3920 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_dsp); 3921 } 3922 3923 if (Mips::BI__builtin_mips_absq_s_qb <= BuiltinID && 3924 BuiltinID <= Mips::BI__builtin_mips_subuh_r_qb) { 3925 if (!TI.hasFeature("dspr2")) 3926 return Diag(TheCall->getBeginLoc(), 3927 diag::err_mips_builtin_requires_dspr2); 3928 } 3929 3930 if (Mips::BI__builtin_msa_add_a_b <= BuiltinID && 3931 BuiltinID <= Mips::BI__builtin_msa_xori_b) { 3932 if (!TI.hasFeature("msa")) 3933 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_msa); 3934 } 3935 3936 return false; 3937 } 3938 3939 // CheckMipsBuiltinArgument - Checks the constant value passed to the 3940 // intrinsic is correct. The switch statement is ordered by DSP, MSA. The 3941 // ordering for DSP is unspecified. MSA is ordered by the data format used 3942 // by the underlying instruction i.e., df/m, df/n and then by size. 3943 // 3944 // FIXME: The size tests here should instead be tablegen'd along with the 3945 // definitions from include/clang/Basic/BuiltinsMips.def. 3946 // FIXME: GCC is strict on signedness for some of these intrinsics, we should 3947 // be too. 3948 bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 3949 unsigned i = 0, l = 0, u = 0, m = 0; 3950 switch (BuiltinID) { 3951 default: return false; 3952 case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break; 3953 case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break; 3954 case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break; 3955 case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break; 3956 case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break; 3957 case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break; 3958 case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break; 3959 // MSA intrinsics. Instructions (which the intrinsics maps to) which use the 3960 // df/m field. 3961 // These intrinsics take an unsigned 3 bit immediate. 3962 case Mips::BI__builtin_msa_bclri_b: 3963 case Mips::BI__builtin_msa_bnegi_b: 3964 case Mips::BI__builtin_msa_bseti_b: 3965 case Mips::BI__builtin_msa_sat_s_b: 3966 case Mips::BI__builtin_msa_sat_u_b: 3967 case Mips::BI__builtin_msa_slli_b: 3968 case Mips::BI__builtin_msa_srai_b: 3969 case Mips::BI__builtin_msa_srari_b: 3970 case Mips::BI__builtin_msa_srli_b: 3971 case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break; 3972 case Mips::BI__builtin_msa_binsli_b: 3973 case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break; 3974 // These intrinsics take an unsigned 4 bit immediate. 3975 case Mips::BI__builtin_msa_bclri_h: 3976 case Mips::BI__builtin_msa_bnegi_h: 3977 case Mips::BI__builtin_msa_bseti_h: 3978 case Mips::BI__builtin_msa_sat_s_h: 3979 case Mips::BI__builtin_msa_sat_u_h: 3980 case Mips::BI__builtin_msa_slli_h: 3981 case Mips::BI__builtin_msa_srai_h: 3982 case Mips::BI__builtin_msa_srari_h: 3983 case Mips::BI__builtin_msa_srli_h: 3984 case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break; 3985 case Mips::BI__builtin_msa_binsli_h: 3986 case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break; 3987 // These intrinsics take an unsigned 5 bit immediate. 3988 // The first block of intrinsics actually have an unsigned 5 bit field, 3989 // not a df/n field. 3990 case Mips::BI__builtin_msa_cfcmsa: 3991 case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break; 3992 case Mips::BI__builtin_msa_clei_u_b: 3993 case Mips::BI__builtin_msa_clei_u_h: 3994 case Mips::BI__builtin_msa_clei_u_w: 3995 case Mips::BI__builtin_msa_clei_u_d: 3996 case Mips::BI__builtin_msa_clti_u_b: 3997 case Mips::BI__builtin_msa_clti_u_h: 3998 case Mips::BI__builtin_msa_clti_u_w: 3999 case Mips::BI__builtin_msa_clti_u_d: 4000 case Mips::BI__builtin_msa_maxi_u_b: 4001 case Mips::BI__builtin_msa_maxi_u_h: 4002 case Mips::BI__builtin_msa_maxi_u_w: 4003 case Mips::BI__builtin_msa_maxi_u_d: 4004 case Mips::BI__builtin_msa_mini_u_b: 4005 case Mips::BI__builtin_msa_mini_u_h: 4006 case Mips::BI__builtin_msa_mini_u_w: 4007 case Mips::BI__builtin_msa_mini_u_d: 4008 case Mips::BI__builtin_msa_addvi_b: 4009 case Mips::BI__builtin_msa_addvi_h: 4010 case Mips::BI__builtin_msa_addvi_w: 4011 case Mips::BI__builtin_msa_addvi_d: 4012 case Mips::BI__builtin_msa_bclri_w: 4013 case Mips::BI__builtin_msa_bnegi_w: 4014 case Mips::BI__builtin_msa_bseti_w: 4015 case Mips::BI__builtin_msa_sat_s_w: 4016 case Mips::BI__builtin_msa_sat_u_w: 4017 case Mips::BI__builtin_msa_slli_w: 4018 case Mips::BI__builtin_msa_srai_w: 4019 case Mips::BI__builtin_msa_srari_w: 4020 case Mips::BI__builtin_msa_srli_w: 4021 case Mips::BI__builtin_msa_srlri_w: 4022 case Mips::BI__builtin_msa_subvi_b: 4023 case Mips::BI__builtin_msa_subvi_h: 4024 case Mips::BI__builtin_msa_subvi_w: 4025 case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break; 4026 case Mips::BI__builtin_msa_binsli_w: 4027 case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break; 4028 // These intrinsics take an unsigned 6 bit immediate. 4029 case Mips::BI__builtin_msa_bclri_d: 4030 case Mips::BI__builtin_msa_bnegi_d: 4031 case Mips::BI__builtin_msa_bseti_d: 4032 case Mips::BI__builtin_msa_sat_s_d: 4033 case Mips::BI__builtin_msa_sat_u_d: 4034 case Mips::BI__builtin_msa_slli_d: 4035 case Mips::BI__builtin_msa_srai_d: 4036 case Mips::BI__builtin_msa_srari_d: 4037 case Mips::BI__builtin_msa_srli_d: 4038 case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break; 4039 case Mips::BI__builtin_msa_binsli_d: 4040 case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break; 4041 // These intrinsics take a signed 5 bit immediate. 4042 case Mips::BI__builtin_msa_ceqi_b: 4043 case Mips::BI__builtin_msa_ceqi_h: 4044 case Mips::BI__builtin_msa_ceqi_w: 4045 case Mips::BI__builtin_msa_ceqi_d: 4046 case Mips::BI__builtin_msa_clti_s_b: 4047 case Mips::BI__builtin_msa_clti_s_h: 4048 case Mips::BI__builtin_msa_clti_s_w: 4049 case Mips::BI__builtin_msa_clti_s_d: 4050 case Mips::BI__builtin_msa_clei_s_b: 4051 case Mips::BI__builtin_msa_clei_s_h: 4052 case Mips::BI__builtin_msa_clei_s_w: 4053 case Mips::BI__builtin_msa_clei_s_d: 4054 case Mips::BI__builtin_msa_maxi_s_b: 4055 case Mips::BI__builtin_msa_maxi_s_h: 4056 case Mips::BI__builtin_msa_maxi_s_w: 4057 case Mips::BI__builtin_msa_maxi_s_d: 4058 case Mips::BI__builtin_msa_mini_s_b: 4059 case Mips::BI__builtin_msa_mini_s_h: 4060 case Mips::BI__builtin_msa_mini_s_w: 4061 case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break; 4062 // These intrinsics take an unsigned 8 bit immediate. 4063 case Mips::BI__builtin_msa_andi_b: 4064 case Mips::BI__builtin_msa_nori_b: 4065 case Mips::BI__builtin_msa_ori_b: 4066 case Mips::BI__builtin_msa_shf_b: 4067 case Mips::BI__builtin_msa_shf_h: 4068 case Mips::BI__builtin_msa_shf_w: 4069 case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break; 4070 case Mips::BI__builtin_msa_bseli_b: 4071 case Mips::BI__builtin_msa_bmnzi_b: 4072 case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break; 4073 // df/n format 4074 // These intrinsics take an unsigned 4 bit immediate. 4075 case Mips::BI__builtin_msa_copy_s_b: 4076 case Mips::BI__builtin_msa_copy_u_b: 4077 case Mips::BI__builtin_msa_insve_b: 4078 case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break; 4079 case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break; 4080 // These intrinsics take an unsigned 3 bit immediate. 4081 case Mips::BI__builtin_msa_copy_s_h: 4082 case Mips::BI__builtin_msa_copy_u_h: 4083 case Mips::BI__builtin_msa_insve_h: 4084 case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break; 4085 case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break; 4086 // These intrinsics take an unsigned 2 bit immediate. 4087 case Mips::BI__builtin_msa_copy_s_w: 4088 case Mips::BI__builtin_msa_copy_u_w: 4089 case Mips::BI__builtin_msa_insve_w: 4090 case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break; 4091 case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break; 4092 // These intrinsics take an unsigned 1 bit immediate. 4093 case Mips::BI__builtin_msa_copy_s_d: 4094 case Mips::BI__builtin_msa_copy_u_d: 4095 case Mips::BI__builtin_msa_insve_d: 4096 case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break; 4097 case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break; 4098 // Memory offsets and immediate loads. 4099 // These intrinsics take a signed 10 bit immediate. 4100 case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break; 4101 case Mips::BI__builtin_msa_ldi_h: 4102 case Mips::BI__builtin_msa_ldi_w: 4103 case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break; 4104 case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break; 4105 case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break; 4106 case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break; 4107 case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break; 4108 case Mips::BI__builtin_msa_ldr_d: i = 1; l = -4096; u = 4088; m = 8; break; 4109 case Mips::BI__builtin_msa_ldr_w: i = 1; l = -2048; u = 2044; m = 4; break; 4110 case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break; 4111 case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break; 4112 case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break; 4113 case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break; 4114 case Mips::BI__builtin_msa_str_d: i = 2; l = -4096; u = 4088; m = 8; break; 4115 case Mips::BI__builtin_msa_str_w: i = 2; l = -2048; u = 2044; m = 4; break; 4116 } 4117 4118 if (!m) 4119 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 4120 4121 return SemaBuiltinConstantArgRange(TheCall, i, l, u) || 4122 SemaBuiltinConstantArgMultiple(TheCall, i, m); 4123 } 4124 4125 /// DecodePPCMMATypeFromStr - This decodes one PPC MMA type descriptor from Str, 4126 /// advancing the pointer over the consumed characters. The decoded type is 4127 /// returned. If the decoded type represents a constant integer with a 4128 /// constraint on its value then Mask is set to that value. The type descriptors 4129 /// used in Str are specific to PPC MMA builtins and are documented in the file 4130 /// defining the PPC builtins. 4131 static QualType DecodePPCMMATypeFromStr(ASTContext &Context, const char *&Str, 4132 unsigned &Mask) { 4133 bool RequireICE = false; 4134 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 4135 switch (*Str++) { 4136 case 'V': 4137 return Context.getVectorType(Context.UnsignedCharTy, 16, 4138 VectorType::VectorKind::AltiVecVector); 4139 case 'i': { 4140 char *End; 4141 unsigned size = strtoul(Str, &End, 10); 4142 assert(End != Str && "Missing constant parameter constraint"); 4143 Str = End; 4144 Mask = size; 4145 return Context.IntTy; 4146 } 4147 case 'W': { 4148 char *End; 4149 unsigned size = strtoul(Str, &End, 10); 4150 assert(End != Str && "Missing PowerPC MMA type size"); 4151 Str = End; 4152 QualType Type; 4153 switch (size) { 4154 #define PPC_VECTOR_TYPE(typeName, Id, size) \ 4155 case size: Type = Context.Id##Ty; break; 4156 #include "clang/Basic/PPCTypes.def" 4157 default: llvm_unreachable("Invalid PowerPC MMA vector type"); 4158 } 4159 bool CheckVectorArgs = false; 4160 while (!CheckVectorArgs) { 4161 switch (*Str++) { 4162 case '*': 4163 Type = Context.getPointerType(Type); 4164 break; 4165 case 'C': 4166 Type = Type.withConst(); 4167 break; 4168 default: 4169 CheckVectorArgs = true; 4170 --Str; 4171 break; 4172 } 4173 } 4174 return Type; 4175 } 4176 default: 4177 return Context.DecodeTypeStr(--Str, Context, Error, RequireICE, true); 4178 } 4179 } 4180 4181 static bool isPPC_64Builtin(unsigned BuiltinID) { 4182 // These builtins only work on PPC 64bit targets. 4183 switch (BuiltinID) { 4184 case PPC::BI__builtin_divde: 4185 case PPC::BI__builtin_divdeu: 4186 case PPC::BI__builtin_bpermd: 4187 case PPC::BI__builtin_pdepd: 4188 case PPC::BI__builtin_pextd: 4189 case PPC::BI__builtin_ppc_ldarx: 4190 case PPC::BI__builtin_ppc_stdcx: 4191 case PPC::BI__builtin_ppc_tdw: 4192 case PPC::BI__builtin_ppc_trapd: 4193 case PPC::BI__builtin_ppc_cmpeqb: 4194 case PPC::BI__builtin_ppc_setb: 4195 case PPC::BI__builtin_ppc_mulhd: 4196 case PPC::BI__builtin_ppc_mulhdu: 4197 case PPC::BI__builtin_ppc_maddhd: 4198 case PPC::BI__builtin_ppc_maddhdu: 4199 case PPC::BI__builtin_ppc_maddld: 4200 case PPC::BI__builtin_ppc_load8r: 4201 case PPC::BI__builtin_ppc_store8r: 4202 case PPC::BI__builtin_ppc_insert_exp: 4203 case PPC::BI__builtin_ppc_extract_sig: 4204 case PPC::BI__builtin_ppc_addex: 4205 case PPC::BI__builtin_darn: 4206 case PPC::BI__builtin_darn_raw: 4207 case PPC::BI__builtin_ppc_compare_and_swaplp: 4208 case PPC::BI__builtin_ppc_fetch_and_addlp: 4209 case PPC::BI__builtin_ppc_fetch_and_andlp: 4210 case PPC::BI__builtin_ppc_fetch_and_orlp: 4211 case PPC::BI__builtin_ppc_fetch_and_swaplp: 4212 return true; 4213 } 4214 return false; 4215 } 4216 4217 /// Returns true if the argument consists of one contiguous run of 1s with any 4218 /// number of 0s on either side. The 1s are allowed to wrap from LSB to MSB, so 4219 /// 0x000FFF0, 0x0000FFFF, 0xFF0000FF, 0x0 are all runs. 0x0F0F0000 is not, 4220 /// since all 1s are not contiguous. 4221 bool Sema::SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum) { 4222 llvm::APSInt Result; 4223 // We can't check the value of a dependent argument. 4224 Expr *Arg = TheCall->getArg(ArgNum); 4225 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4226 return false; 4227 4228 // Check constant-ness first. 4229 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4230 return true; 4231 4232 // Check contiguous run of 1s, 0xFF0000FF is also a run of 1s. 4233 if (Result.isShiftedMask() || (~Result).isShiftedMask()) 4234 return false; 4235 4236 return Diag(TheCall->getBeginLoc(), 4237 diag::err_argument_not_contiguous_bit_field) 4238 << ArgNum << Arg->getSourceRange(); 4239 } 4240 4241 bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 4242 CallExpr *TheCall) { 4243 unsigned i = 0, l = 0, u = 0; 4244 bool IsTarget64Bit = TI.getTypeWidth(TI.getIntPtrType()) == 64; 4245 llvm::APSInt Result; 4246 4247 if (isPPC_64Builtin(BuiltinID) && !IsTarget64Bit) 4248 return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt) 4249 << TheCall->getSourceRange(); 4250 4251 switch (BuiltinID) { 4252 default: return false; 4253 case PPC::BI__builtin_altivec_crypto_vshasigmaw: 4254 case PPC::BI__builtin_altivec_crypto_vshasigmad: 4255 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 4256 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 4257 case PPC::BI__builtin_altivec_dss: 4258 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3); 4259 case PPC::BI__builtin_tbegin: 4260 case PPC::BI__builtin_tend: 4261 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1); 4262 case PPC::BI__builtin_tsr: 4263 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7); 4264 case PPC::BI__builtin_tabortwc: 4265 case PPC::BI__builtin_tabortdc: 4266 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 4267 case PPC::BI__builtin_tabortwci: 4268 case PPC::BI__builtin_tabortdci: 4269 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || 4270 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31); 4271 // According to GCC 'Basic PowerPC Built-in Functions Available on ISA 2.05', 4272 // __builtin_(un)pack_longdouble are available only if long double uses IBM 4273 // extended double representation. 4274 case PPC::BI__builtin_unpack_longdouble: 4275 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 1)) 4276 return true; 4277 [[fallthrough]]; 4278 case PPC::BI__builtin_pack_longdouble: 4279 if (&TI.getLongDoubleFormat() != &llvm::APFloat::PPCDoubleDouble()) 4280 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_requires_abi) 4281 << "ibmlongdouble"; 4282 return false; 4283 case PPC::BI__builtin_altivec_dst: 4284 case PPC::BI__builtin_altivec_dstt: 4285 case PPC::BI__builtin_altivec_dstst: 4286 case PPC::BI__builtin_altivec_dststt: 4287 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 4288 case PPC::BI__builtin_vsx_xxpermdi: 4289 case PPC::BI__builtin_vsx_xxsldwi: 4290 return SemaBuiltinVSX(TheCall); 4291 case PPC::BI__builtin_unpack_vector_int128: 4292 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 4293 case PPC::BI__builtin_altivec_vgnb: 4294 return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7); 4295 case PPC::BI__builtin_vsx_xxeval: 4296 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 255); 4297 case PPC::BI__builtin_altivec_vsldbi: 4298 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 4299 case PPC::BI__builtin_altivec_vsrdbi: 4300 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 4301 case PPC::BI__builtin_vsx_xxpermx: 4302 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 7); 4303 case PPC::BI__builtin_ppc_tw: 4304 case PPC::BI__builtin_ppc_tdw: 4305 return SemaBuiltinConstantArgRange(TheCall, 2, 1, 31); 4306 case PPC::BI__builtin_ppc_cmprb: 4307 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1); 4308 // For __rlwnm, __rlwimi and __rldimi, the last parameter mask must 4309 // be a constant that represents a contiguous bit field. 4310 case PPC::BI__builtin_ppc_rlwnm: 4311 return SemaValueIsRunOfOnes(TheCall, 2); 4312 case PPC::BI__builtin_ppc_rlwimi: 4313 case PPC::BI__builtin_ppc_rldimi: 4314 return SemaBuiltinConstantArg(TheCall, 2, Result) || 4315 SemaValueIsRunOfOnes(TheCall, 3); 4316 case PPC::BI__builtin_ppc_addex: { 4317 if (SemaBuiltinConstantArgRange(TheCall, 2, 0, 3)) 4318 return true; 4319 // Output warning for reserved values 1 to 3. 4320 int ArgValue = 4321 TheCall->getArg(2)->getIntegerConstantExpr(Context)->getSExtValue(); 4322 if (ArgValue != 0) 4323 Diag(TheCall->getBeginLoc(), diag::warn_argument_undefined_behaviour) 4324 << ArgValue; 4325 return false; 4326 } 4327 case PPC::BI__builtin_ppc_mtfsb0: 4328 case PPC::BI__builtin_ppc_mtfsb1: 4329 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 4330 case PPC::BI__builtin_ppc_mtfsf: 4331 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 255); 4332 case PPC::BI__builtin_ppc_mtfsfi: 4333 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) || 4334 SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 4335 case PPC::BI__builtin_ppc_alignx: 4336 return SemaBuiltinConstantArgPower2(TheCall, 0); 4337 case PPC::BI__builtin_ppc_rdlam: 4338 return SemaValueIsRunOfOnes(TheCall, 2); 4339 case PPC::BI__builtin_vsx_ldrmb: 4340 case PPC::BI__builtin_vsx_strmb: 4341 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 4342 case PPC::BI__builtin_altivec_vcntmbb: 4343 case PPC::BI__builtin_altivec_vcntmbh: 4344 case PPC::BI__builtin_altivec_vcntmbw: 4345 case PPC::BI__builtin_altivec_vcntmbd: 4346 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 4347 case PPC::BI__builtin_vsx_xxgenpcvbm: 4348 case PPC::BI__builtin_vsx_xxgenpcvhm: 4349 case PPC::BI__builtin_vsx_xxgenpcvwm: 4350 case PPC::BI__builtin_vsx_xxgenpcvdm: 4351 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3); 4352 case PPC::BI__builtin_ppc_test_data_class: { 4353 // Check if the first argument of the __builtin_ppc_test_data_class call is 4354 // valid. The argument must be 'float' or 'double' or '__float128'. 4355 QualType ArgType = TheCall->getArg(0)->getType(); 4356 if (ArgType != QualType(Context.FloatTy) && 4357 ArgType != QualType(Context.DoubleTy) && 4358 ArgType != QualType(Context.Float128Ty)) 4359 return Diag(TheCall->getBeginLoc(), 4360 diag::err_ppc_invalid_test_data_class_type); 4361 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 127); 4362 } 4363 case PPC::BI__builtin_ppc_maxfe: 4364 case PPC::BI__builtin_ppc_minfe: 4365 case PPC::BI__builtin_ppc_maxfl: 4366 case PPC::BI__builtin_ppc_minfl: 4367 case PPC::BI__builtin_ppc_maxfs: 4368 case PPC::BI__builtin_ppc_minfs: { 4369 if (Context.getTargetInfo().getTriple().isOSAIX() && 4370 (BuiltinID == PPC::BI__builtin_ppc_maxfe || 4371 BuiltinID == PPC::BI__builtin_ppc_minfe)) 4372 return Diag(TheCall->getBeginLoc(), diag::err_target_unsupported_type) 4373 << "builtin" << true << 128 << QualType(Context.LongDoubleTy) 4374 << false << Context.getTargetInfo().getTriple().str(); 4375 // Argument type should be exact. 4376 QualType ArgType = QualType(Context.LongDoubleTy); 4377 if (BuiltinID == PPC::BI__builtin_ppc_maxfl || 4378 BuiltinID == PPC::BI__builtin_ppc_minfl) 4379 ArgType = QualType(Context.DoubleTy); 4380 else if (BuiltinID == PPC::BI__builtin_ppc_maxfs || 4381 BuiltinID == PPC::BI__builtin_ppc_minfs) 4382 ArgType = QualType(Context.FloatTy); 4383 for (unsigned I = 0, E = TheCall->getNumArgs(); I < E; ++I) 4384 if (TheCall->getArg(I)->getType() != ArgType) 4385 return Diag(TheCall->getBeginLoc(), 4386 diag::err_typecheck_convert_incompatible) 4387 << TheCall->getArg(I)->getType() << ArgType << 1 << 0 << 0; 4388 return false; 4389 } 4390 #define CUSTOM_BUILTIN(Name, Intr, Types, Acc, Feature) \ 4391 case PPC::BI__builtin_##Name: \ 4392 return SemaBuiltinPPCMMACall(TheCall, BuiltinID, Types); 4393 #include "clang/Basic/BuiltinsPPC.def" 4394 } 4395 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 4396 } 4397 4398 // Check if the given type is a non-pointer PPC MMA type. This function is used 4399 // in Sema to prevent invalid uses of restricted PPC MMA types. 4400 bool Sema::CheckPPCMMAType(QualType Type, SourceLocation TypeLoc) { 4401 if (Type->isPointerType() || Type->isArrayType()) 4402 return false; 4403 4404 QualType CoreType = Type.getCanonicalType().getUnqualifiedType(); 4405 #define PPC_VECTOR_TYPE(Name, Id, Size) || CoreType == Context.Id##Ty 4406 if (false 4407 #include "clang/Basic/PPCTypes.def" 4408 ) { 4409 Diag(TypeLoc, diag::err_ppc_invalid_use_mma_type); 4410 return true; 4411 } 4412 return false; 4413 } 4414 4415 bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, 4416 CallExpr *TheCall) { 4417 // position of memory order and scope arguments in the builtin 4418 unsigned OrderIndex, ScopeIndex; 4419 switch (BuiltinID) { 4420 case AMDGPU::BI__builtin_amdgcn_atomic_inc32: 4421 case AMDGPU::BI__builtin_amdgcn_atomic_inc64: 4422 case AMDGPU::BI__builtin_amdgcn_atomic_dec32: 4423 case AMDGPU::BI__builtin_amdgcn_atomic_dec64: 4424 OrderIndex = 2; 4425 ScopeIndex = 3; 4426 break; 4427 case AMDGPU::BI__builtin_amdgcn_fence: 4428 OrderIndex = 0; 4429 ScopeIndex = 1; 4430 break; 4431 default: 4432 return false; 4433 } 4434 4435 ExprResult Arg = TheCall->getArg(OrderIndex); 4436 auto ArgExpr = Arg.get(); 4437 Expr::EvalResult ArgResult; 4438 4439 if (!ArgExpr->EvaluateAsInt(ArgResult, Context)) 4440 return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int) 4441 << ArgExpr->getType(); 4442 auto Ord = ArgResult.Val.getInt().getZExtValue(); 4443 4444 // Check validity of memory ordering as per C11 / C++11's memody model. 4445 // Only fence needs check. Atomic dec/inc allow all memory orders. 4446 if (!llvm::isValidAtomicOrderingCABI(Ord)) 4447 return Diag(ArgExpr->getBeginLoc(), 4448 diag::warn_atomic_op_has_invalid_memory_order) 4449 << ArgExpr->getSourceRange(); 4450 switch (static_cast<llvm::AtomicOrderingCABI>(Ord)) { 4451 case llvm::AtomicOrderingCABI::relaxed: 4452 case llvm::AtomicOrderingCABI::consume: 4453 if (BuiltinID == AMDGPU::BI__builtin_amdgcn_fence) 4454 return Diag(ArgExpr->getBeginLoc(), 4455 diag::warn_atomic_op_has_invalid_memory_order) 4456 << ArgExpr->getSourceRange(); 4457 break; 4458 case llvm::AtomicOrderingCABI::acquire: 4459 case llvm::AtomicOrderingCABI::release: 4460 case llvm::AtomicOrderingCABI::acq_rel: 4461 case llvm::AtomicOrderingCABI::seq_cst: 4462 break; 4463 } 4464 4465 Arg = TheCall->getArg(ScopeIndex); 4466 ArgExpr = Arg.get(); 4467 Expr::EvalResult ArgResult1; 4468 // Check that sync scope is a constant literal 4469 if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, Context)) 4470 return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal) 4471 << ArgExpr->getType(); 4472 4473 return false; 4474 } 4475 4476 bool Sema::CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum) { 4477 llvm::APSInt Result; 4478 4479 // We can't check the value of a dependent argument. 4480 Expr *Arg = TheCall->getArg(ArgNum); 4481 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4482 return false; 4483 4484 // Check constant-ness first. 4485 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4486 return true; 4487 4488 int64_t Val = Result.getSExtValue(); 4489 if ((Val >= 0 && Val <= 3) || (Val >= 5 && Val <= 7)) 4490 return false; 4491 4492 return Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_invalid_lmul) 4493 << Arg->getSourceRange(); 4494 } 4495 4496 bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, 4497 unsigned BuiltinID, 4498 CallExpr *TheCall) { 4499 // CodeGenFunction can also detect this, but this gives a better error 4500 // message. 4501 bool FeatureMissing = false; 4502 SmallVector<StringRef> ReqFeatures; 4503 StringRef Features = Context.BuiltinInfo.getRequiredFeatures(BuiltinID); 4504 Features.split(ReqFeatures, ',', -1, false); 4505 4506 // Check if each required feature is included 4507 for (StringRef F : ReqFeatures) { 4508 SmallVector<StringRef> ReqOpFeatures; 4509 F.split(ReqOpFeatures, '|'); 4510 4511 if (llvm::none_of(ReqOpFeatures, 4512 [&TI](StringRef OF) { return TI.hasFeature(OF); })) { 4513 std::string FeatureStrs; 4514 bool IsExtension = true; 4515 for (StringRef OF : ReqOpFeatures) { 4516 // If the feature is 64bit, alter the string so it will print better in 4517 // the diagnostic. 4518 if (OF == "64bit") { 4519 assert(ReqOpFeatures.size() == 1 && "Expected '64bit' to be alone"); 4520 OF = "RV64"; 4521 IsExtension = false; 4522 } 4523 if (OF == "32bit") { 4524 assert(ReqOpFeatures.size() == 1 && "Expected '32bit' to be alone"); 4525 OF = "RV32"; 4526 IsExtension = false; 4527 } 4528 4529 // Convert features like "zbr" and "experimental-zbr" to "Zbr". 4530 OF.consume_front("experimental-"); 4531 std::string FeatureStr = OF.str(); 4532 FeatureStr[0] = std::toupper(FeatureStr[0]); 4533 // Combine strings. 4534 FeatureStrs += FeatureStrs.empty() ? "" : ", "; 4535 FeatureStrs += "'"; 4536 FeatureStrs += FeatureStr; 4537 FeatureStrs += "'"; 4538 } 4539 // Error message 4540 FeatureMissing = true; 4541 Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_requires_extension) 4542 << IsExtension 4543 << TheCall->getSourceRange() << StringRef(FeatureStrs); 4544 } 4545 } 4546 4547 if (FeatureMissing) 4548 return true; 4549 4550 // vmulh.vv, vmulh.vx, vmulhu.vv, vmulhu.vx, vmulhsu.vv, vmulhsu.vx, 4551 // vsmul.vv, vsmul.vx are not included for EEW=64 in Zve64*. 4552 switch (BuiltinID) { 4553 default: 4554 break; 4555 case RISCVVector::BI__builtin_rvv_vmulhsu_vv: 4556 case RISCVVector::BI__builtin_rvv_vmulhsu_vx: 4557 case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tu: 4558 case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tu: 4559 case RISCVVector::BI__builtin_rvv_vmulhsu_vv_m: 4560 case RISCVVector::BI__builtin_rvv_vmulhsu_vx_m: 4561 case RISCVVector::BI__builtin_rvv_vmulhsu_vv_mu: 4562 case RISCVVector::BI__builtin_rvv_vmulhsu_vx_mu: 4563 case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tum: 4564 case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tum: 4565 case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tumu: 4566 case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tumu: 4567 case RISCVVector::BI__builtin_rvv_vmulhu_vv: 4568 case RISCVVector::BI__builtin_rvv_vmulhu_vx: 4569 case RISCVVector::BI__builtin_rvv_vmulhu_vv_tu: 4570 case RISCVVector::BI__builtin_rvv_vmulhu_vx_tu: 4571 case RISCVVector::BI__builtin_rvv_vmulhu_vv_m: 4572 case RISCVVector::BI__builtin_rvv_vmulhu_vx_m: 4573 case RISCVVector::BI__builtin_rvv_vmulhu_vv_mu: 4574 case RISCVVector::BI__builtin_rvv_vmulhu_vx_mu: 4575 case RISCVVector::BI__builtin_rvv_vmulhu_vv_tum: 4576 case RISCVVector::BI__builtin_rvv_vmulhu_vx_tum: 4577 case RISCVVector::BI__builtin_rvv_vmulhu_vv_tumu: 4578 case RISCVVector::BI__builtin_rvv_vmulhu_vx_tumu: 4579 case RISCVVector::BI__builtin_rvv_vmulh_vv: 4580 case RISCVVector::BI__builtin_rvv_vmulh_vx: 4581 case RISCVVector::BI__builtin_rvv_vmulh_vv_tu: 4582 case RISCVVector::BI__builtin_rvv_vmulh_vx_tu: 4583 case RISCVVector::BI__builtin_rvv_vmulh_vv_m: 4584 case RISCVVector::BI__builtin_rvv_vmulh_vx_m: 4585 case RISCVVector::BI__builtin_rvv_vmulh_vv_mu: 4586 case RISCVVector::BI__builtin_rvv_vmulh_vx_mu: 4587 case RISCVVector::BI__builtin_rvv_vmulh_vv_tum: 4588 case RISCVVector::BI__builtin_rvv_vmulh_vx_tum: 4589 case RISCVVector::BI__builtin_rvv_vmulh_vv_tumu: 4590 case RISCVVector::BI__builtin_rvv_vmulh_vx_tumu: 4591 case RISCVVector::BI__builtin_rvv_vsmul_vv: 4592 case RISCVVector::BI__builtin_rvv_vsmul_vx: 4593 case RISCVVector::BI__builtin_rvv_vsmul_vv_tu: 4594 case RISCVVector::BI__builtin_rvv_vsmul_vx_tu: 4595 case RISCVVector::BI__builtin_rvv_vsmul_vv_m: 4596 case RISCVVector::BI__builtin_rvv_vsmul_vx_m: 4597 case RISCVVector::BI__builtin_rvv_vsmul_vv_mu: 4598 case RISCVVector::BI__builtin_rvv_vsmul_vx_mu: 4599 case RISCVVector::BI__builtin_rvv_vsmul_vv_tum: 4600 case RISCVVector::BI__builtin_rvv_vsmul_vx_tum: 4601 case RISCVVector::BI__builtin_rvv_vsmul_vv_tumu: 4602 case RISCVVector::BI__builtin_rvv_vsmul_vx_tumu: { 4603 bool RequireV = false; 4604 for (unsigned ArgNum = 0; ArgNum < TheCall->getNumArgs(); ++ArgNum) 4605 RequireV |= TheCall->getArg(ArgNum)->getType()->isRVVType( 4606 /* Bitwidth */ 64, /* IsFloat */ false); 4607 4608 if (RequireV && !TI.hasFeature("v")) 4609 return Diag(TheCall->getBeginLoc(), 4610 diag::err_riscv_builtin_requires_extension) 4611 << /* IsExtension */ false << TheCall->getSourceRange() << "v"; 4612 4613 break; 4614 } 4615 } 4616 4617 switch (BuiltinID) { 4618 case RISCVVector::BI__builtin_rvv_vsetvli: 4619 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3) || 4620 CheckRISCVLMUL(TheCall, 2); 4621 case RISCVVector::BI__builtin_rvv_vsetvlimax: 4622 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || 4623 CheckRISCVLMUL(TheCall, 1); 4624 case RISCVVector::BI__builtin_rvv_vget_v: { 4625 ASTContext::BuiltinVectorTypeInfo ResVecInfo = 4626 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4627 TheCall->getType().getCanonicalType().getTypePtr())); 4628 ASTContext::BuiltinVectorTypeInfo VecInfo = 4629 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4630 TheCall->getArg(0)->getType().getCanonicalType().getTypePtr())); 4631 unsigned MaxIndex; 4632 if (VecInfo.NumVectors != 1) // vget for tuple type 4633 MaxIndex = VecInfo.NumVectors; 4634 else // vget for non-tuple type 4635 MaxIndex = (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors) / 4636 (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors); 4637 return SemaBuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1); 4638 } 4639 case RISCVVector::BI__builtin_rvv_vset_v: { 4640 ASTContext::BuiltinVectorTypeInfo ResVecInfo = 4641 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4642 TheCall->getType().getCanonicalType().getTypePtr())); 4643 ASTContext::BuiltinVectorTypeInfo VecInfo = 4644 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4645 TheCall->getArg(2)->getType().getCanonicalType().getTypePtr())); 4646 unsigned MaxIndex; 4647 if (ResVecInfo.NumVectors != 1) // vset for tuple type 4648 MaxIndex = ResVecInfo.NumVectors; 4649 else // vset fo non-tuple type 4650 MaxIndex = (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors) / 4651 (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors); 4652 return SemaBuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1); 4653 } 4654 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8mf8: 4655 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8mf4: 4656 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8mf2: 4657 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8m1: 4658 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8m2: 4659 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8m4: 4660 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8m8: 4661 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16mf4: 4662 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16mf2: 4663 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16m1: 4664 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16m2: 4665 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16m4: 4666 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16m8: 4667 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32mf2: 4668 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32m1: 4669 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32m2: 4670 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32m4: 4671 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32m8: 4672 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u64m1: 4673 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u64m2: 4674 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u64m4: 4675 case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u64m8: 4676 // bit_27_26, bit_24_20, bit_11_7, simm5 4677 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || 4678 SemaBuiltinConstantArgRange(TheCall, 1, 0, 31) || 4679 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31) || 4680 SemaBuiltinConstantArgRange(TheCall, 3, -16, 15); 4681 case RISCVVector::BI__builtin_rvv_sf_vc_iv_se: 4682 // bit_27_26, bit_11_7, vs2, simm5 4683 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || 4684 SemaBuiltinConstantArgRange(TheCall, 1, 0, 31) || 4685 SemaBuiltinConstantArgRange(TheCall, 3, -16, 15); 4686 case RISCVVector::BI__builtin_rvv_sf_vc_v_i: 4687 case RISCVVector::BI__builtin_rvv_sf_vc_v_i_se: 4688 // bit_27_26, bit_24_20, simm5 4689 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || 4690 SemaBuiltinConstantArgRange(TheCall, 1, 0, 31) || 4691 SemaBuiltinConstantArgRange(TheCall, 2, -16, 15); 4692 case RISCVVector::BI__builtin_rvv_sf_vc_v_iv: 4693 case RISCVVector::BI__builtin_rvv_sf_vc_v_iv_se: 4694 // bit_27_26, vs2, simm5 4695 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || 4696 SemaBuiltinConstantArgRange(TheCall, 2, -16, 15); 4697 case RISCVVector::BI__builtin_rvv_sf_vc_ivv_se: 4698 case RISCVVector::BI__builtin_rvv_sf_vc_ivw_se: 4699 case RISCVVector::BI__builtin_rvv_sf_vc_v_ivv: 4700 case RISCVVector::BI__builtin_rvv_sf_vc_v_ivw: 4701 case RISCVVector::BI__builtin_rvv_sf_vc_v_ivv_se: 4702 case RISCVVector::BI__builtin_rvv_sf_vc_v_ivw_se: 4703 // bit_27_26, vd, vs2, simm5 4704 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || 4705 SemaBuiltinConstantArgRange(TheCall, 3, -16, 15); 4706 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8mf8: 4707 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8mf4: 4708 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8mf2: 4709 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8m1: 4710 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8m2: 4711 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8m4: 4712 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8m8: 4713 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16mf4: 4714 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16mf2: 4715 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16m1: 4716 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16m2: 4717 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16m4: 4718 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16m8: 4719 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32mf2: 4720 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32m1: 4721 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32m2: 4722 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32m4: 4723 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32m8: 4724 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u64m1: 4725 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u64m2: 4726 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u64m4: 4727 case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u64m8: 4728 // bit_27_26, bit_24_20, bit_11_7, xs1 4729 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || 4730 SemaBuiltinConstantArgRange(TheCall, 1, 0, 31) || 4731 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31); 4732 case RISCVVector::BI__builtin_rvv_sf_vc_xv_se: 4733 case RISCVVector::BI__builtin_rvv_sf_vc_vv_se: 4734 // bit_27_26, bit_11_7, vs2, xs1/vs1 4735 case RISCVVector::BI__builtin_rvv_sf_vc_v_x: 4736 case RISCVVector::BI__builtin_rvv_sf_vc_v_x_se: 4737 // bit_27_26, bit_24-20, xs1 4738 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || 4739 SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 4740 case RISCVVector::BI__builtin_rvv_sf_vc_vvv_se: 4741 case RISCVVector::BI__builtin_rvv_sf_vc_xvv_se: 4742 case RISCVVector::BI__builtin_rvv_sf_vc_vvw_se: 4743 case RISCVVector::BI__builtin_rvv_sf_vc_xvw_se: 4744 // bit_27_26, vd, vs2, xs1 4745 case RISCVVector::BI__builtin_rvv_sf_vc_v_xv: 4746 case RISCVVector::BI__builtin_rvv_sf_vc_v_vv: 4747 case RISCVVector::BI__builtin_rvv_sf_vc_v_xv_se: 4748 case RISCVVector::BI__builtin_rvv_sf_vc_v_vv_se: 4749 // bit_27_26, vs2, xs1/vs1 4750 case RISCVVector::BI__builtin_rvv_sf_vc_v_xvv: 4751 case RISCVVector::BI__builtin_rvv_sf_vc_v_vvv: 4752 case RISCVVector::BI__builtin_rvv_sf_vc_v_xvw: 4753 case RISCVVector::BI__builtin_rvv_sf_vc_v_vvw: 4754 case RISCVVector::BI__builtin_rvv_sf_vc_v_xvv_se: 4755 case RISCVVector::BI__builtin_rvv_sf_vc_v_vvv_se: 4756 case RISCVVector::BI__builtin_rvv_sf_vc_v_xvw_se: 4757 case RISCVVector::BI__builtin_rvv_sf_vc_v_vvw_se: 4758 // bit_27_26, vd, vs2, xs1/vs1 4759 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3); 4760 case RISCVVector::BI__builtin_rvv_sf_vc_fv_se: 4761 // bit_26, bit_11_7, vs2, fs1 4762 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1) || 4763 SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 4764 case RISCVVector::BI__builtin_rvv_sf_vc_fvv_se: 4765 case RISCVVector::BI__builtin_rvv_sf_vc_fvw_se: 4766 case RISCVVector::BI__builtin_rvv_sf_vc_v_fvv: 4767 case RISCVVector::BI__builtin_rvv_sf_vc_v_fvw: 4768 case RISCVVector::BI__builtin_rvv_sf_vc_v_fvv_se: 4769 case RISCVVector::BI__builtin_rvv_sf_vc_v_fvw_se: 4770 // bit_26, vd, vs2, fs1 4771 case RISCVVector::BI__builtin_rvv_sf_vc_v_fv: 4772 case RISCVVector::BI__builtin_rvv_sf_vc_v_fv_se: 4773 // bit_26, vs2, fs1 4774 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1); 4775 // Check if byteselect is in [0, 3] 4776 case RISCV::BI__builtin_riscv_aes32dsi: 4777 case RISCV::BI__builtin_riscv_aes32dsmi: 4778 case RISCV::BI__builtin_riscv_aes32esi: 4779 case RISCV::BI__builtin_riscv_aes32esmi: 4780 case RISCV::BI__builtin_riscv_sm4ks: 4781 case RISCV::BI__builtin_riscv_sm4ed: 4782 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 4783 // Check if rnum is in [0, 10] 4784 case RISCV::BI__builtin_riscv_aes64ks1i: 4785 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 10); 4786 // Check if value range for vxrm is in [0, 3] 4787 case RISCVVector::BI__builtin_rvv_vaaddu_vv: 4788 case RISCVVector::BI__builtin_rvv_vaaddu_vx: 4789 case RISCVVector::BI__builtin_rvv_vaadd_vv: 4790 case RISCVVector::BI__builtin_rvv_vaadd_vx: 4791 case RISCVVector::BI__builtin_rvv_vasubu_vv: 4792 case RISCVVector::BI__builtin_rvv_vasubu_vx: 4793 case RISCVVector::BI__builtin_rvv_vasub_vv: 4794 case RISCVVector::BI__builtin_rvv_vasub_vx: 4795 case RISCVVector::BI__builtin_rvv_vsmul_vv: 4796 case RISCVVector::BI__builtin_rvv_vsmul_vx: 4797 case RISCVVector::BI__builtin_rvv_vssra_vv: 4798 case RISCVVector::BI__builtin_rvv_vssra_vx: 4799 case RISCVVector::BI__builtin_rvv_vssrl_vv: 4800 case RISCVVector::BI__builtin_rvv_vssrl_vx: 4801 case RISCVVector::BI__builtin_rvv_vnclip_wv: 4802 case RISCVVector::BI__builtin_rvv_vnclip_wx: 4803 case RISCVVector::BI__builtin_rvv_vnclipu_wv: 4804 case RISCVVector::BI__builtin_rvv_vnclipu_wx: 4805 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 4806 case RISCVVector::BI__builtin_rvv_vaaddu_vv_tu: 4807 case RISCVVector::BI__builtin_rvv_vaaddu_vx_tu: 4808 case RISCVVector::BI__builtin_rvv_vaadd_vv_tu: 4809 case RISCVVector::BI__builtin_rvv_vaadd_vx_tu: 4810 case RISCVVector::BI__builtin_rvv_vasubu_vv_tu: 4811 case RISCVVector::BI__builtin_rvv_vasubu_vx_tu: 4812 case RISCVVector::BI__builtin_rvv_vasub_vv_tu: 4813 case RISCVVector::BI__builtin_rvv_vasub_vx_tu: 4814 case RISCVVector::BI__builtin_rvv_vsmul_vv_tu: 4815 case RISCVVector::BI__builtin_rvv_vsmul_vx_tu: 4816 case RISCVVector::BI__builtin_rvv_vssra_vv_tu: 4817 case RISCVVector::BI__builtin_rvv_vssra_vx_tu: 4818 case RISCVVector::BI__builtin_rvv_vssrl_vv_tu: 4819 case RISCVVector::BI__builtin_rvv_vssrl_vx_tu: 4820 case RISCVVector::BI__builtin_rvv_vnclip_wv_tu: 4821 case RISCVVector::BI__builtin_rvv_vnclip_wx_tu: 4822 case RISCVVector::BI__builtin_rvv_vnclipu_wv_tu: 4823 case RISCVVector::BI__builtin_rvv_vnclipu_wx_tu: 4824 case RISCVVector::BI__builtin_rvv_vaaddu_vv_m: 4825 case RISCVVector::BI__builtin_rvv_vaaddu_vx_m: 4826 case RISCVVector::BI__builtin_rvv_vaadd_vv_m: 4827 case RISCVVector::BI__builtin_rvv_vaadd_vx_m: 4828 case RISCVVector::BI__builtin_rvv_vasubu_vv_m: 4829 case RISCVVector::BI__builtin_rvv_vasubu_vx_m: 4830 case RISCVVector::BI__builtin_rvv_vasub_vv_m: 4831 case RISCVVector::BI__builtin_rvv_vasub_vx_m: 4832 case RISCVVector::BI__builtin_rvv_vsmul_vv_m: 4833 case RISCVVector::BI__builtin_rvv_vsmul_vx_m: 4834 case RISCVVector::BI__builtin_rvv_vssra_vv_m: 4835 case RISCVVector::BI__builtin_rvv_vssra_vx_m: 4836 case RISCVVector::BI__builtin_rvv_vssrl_vv_m: 4837 case RISCVVector::BI__builtin_rvv_vssrl_vx_m: 4838 case RISCVVector::BI__builtin_rvv_vnclip_wv_m: 4839 case RISCVVector::BI__builtin_rvv_vnclip_wx_m: 4840 case RISCVVector::BI__builtin_rvv_vnclipu_wv_m: 4841 case RISCVVector::BI__builtin_rvv_vnclipu_wx_m: 4842 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 3); 4843 case RISCVVector::BI__builtin_rvv_vaaddu_vv_tum: 4844 case RISCVVector::BI__builtin_rvv_vaaddu_vv_tumu: 4845 case RISCVVector::BI__builtin_rvv_vaaddu_vv_mu: 4846 case RISCVVector::BI__builtin_rvv_vaaddu_vx_tum: 4847 case RISCVVector::BI__builtin_rvv_vaaddu_vx_tumu: 4848 case RISCVVector::BI__builtin_rvv_vaaddu_vx_mu: 4849 case RISCVVector::BI__builtin_rvv_vaadd_vv_tum: 4850 case RISCVVector::BI__builtin_rvv_vaadd_vv_tumu: 4851 case RISCVVector::BI__builtin_rvv_vaadd_vv_mu: 4852 case RISCVVector::BI__builtin_rvv_vaadd_vx_tum: 4853 case RISCVVector::BI__builtin_rvv_vaadd_vx_tumu: 4854 case RISCVVector::BI__builtin_rvv_vaadd_vx_mu: 4855 case RISCVVector::BI__builtin_rvv_vasubu_vv_tum: 4856 case RISCVVector::BI__builtin_rvv_vasubu_vv_tumu: 4857 case RISCVVector::BI__builtin_rvv_vasubu_vv_mu: 4858 case RISCVVector::BI__builtin_rvv_vasubu_vx_tum: 4859 case RISCVVector::BI__builtin_rvv_vasubu_vx_tumu: 4860 case RISCVVector::BI__builtin_rvv_vasubu_vx_mu: 4861 case RISCVVector::BI__builtin_rvv_vasub_vv_tum: 4862 case RISCVVector::BI__builtin_rvv_vasub_vv_tumu: 4863 case RISCVVector::BI__builtin_rvv_vasub_vv_mu: 4864 case RISCVVector::BI__builtin_rvv_vasub_vx_tum: 4865 case RISCVVector::BI__builtin_rvv_vasub_vx_tumu: 4866 case RISCVVector::BI__builtin_rvv_vasub_vx_mu: 4867 case RISCVVector::BI__builtin_rvv_vsmul_vv_mu: 4868 case RISCVVector::BI__builtin_rvv_vsmul_vx_mu: 4869 case RISCVVector::BI__builtin_rvv_vssra_vv_mu: 4870 case RISCVVector::BI__builtin_rvv_vssra_vx_mu: 4871 case RISCVVector::BI__builtin_rvv_vssrl_vv_mu: 4872 case RISCVVector::BI__builtin_rvv_vssrl_vx_mu: 4873 case RISCVVector::BI__builtin_rvv_vnclip_wv_mu: 4874 case RISCVVector::BI__builtin_rvv_vnclip_wx_mu: 4875 case RISCVVector::BI__builtin_rvv_vnclipu_wv_mu: 4876 case RISCVVector::BI__builtin_rvv_vnclipu_wx_mu: 4877 case RISCVVector::BI__builtin_rvv_vsmul_vv_tum: 4878 case RISCVVector::BI__builtin_rvv_vsmul_vx_tum: 4879 case RISCVVector::BI__builtin_rvv_vssra_vv_tum: 4880 case RISCVVector::BI__builtin_rvv_vssra_vx_tum: 4881 case RISCVVector::BI__builtin_rvv_vssrl_vv_tum: 4882 case RISCVVector::BI__builtin_rvv_vssrl_vx_tum: 4883 case RISCVVector::BI__builtin_rvv_vnclip_wv_tum: 4884 case RISCVVector::BI__builtin_rvv_vnclip_wx_tum: 4885 case RISCVVector::BI__builtin_rvv_vnclipu_wv_tum: 4886 case RISCVVector::BI__builtin_rvv_vnclipu_wx_tum: 4887 case RISCVVector::BI__builtin_rvv_vsmul_vv_tumu: 4888 case RISCVVector::BI__builtin_rvv_vsmul_vx_tumu: 4889 case RISCVVector::BI__builtin_rvv_vssra_vv_tumu: 4890 case RISCVVector::BI__builtin_rvv_vssra_vx_tumu: 4891 case RISCVVector::BI__builtin_rvv_vssrl_vv_tumu: 4892 case RISCVVector::BI__builtin_rvv_vssrl_vx_tumu: 4893 case RISCVVector::BI__builtin_rvv_vnclip_wv_tumu: 4894 case RISCVVector::BI__builtin_rvv_vnclip_wx_tumu: 4895 case RISCVVector::BI__builtin_rvv_vnclipu_wv_tumu: 4896 case RISCVVector::BI__builtin_rvv_vnclipu_wx_tumu: 4897 return SemaBuiltinConstantArgRange(TheCall, 4, 0, 3); 4898 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm: 4899 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm: 4900 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm: 4901 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm: 4902 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm: 4903 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm: 4904 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm: 4905 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm: 4906 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm: 4907 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm: 4908 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm: 4909 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm: 4910 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm: 4911 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 4); 4912 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm: 4913 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm: 4914 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm: 4915 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm: 4916 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm: 4917 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm: 4918 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm: 4919 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm: 4920 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm: 4921 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm: 4922 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm: 4923 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm: 4924 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm: 4925 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm: 4926 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm: 4927 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm: 4928 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm: 4929 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm: 4930 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm: 4931 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm: 4932 case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm: 4933 case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm: 4934 case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm: 4935 case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm: 4936 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tu: 4937 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tu: 4938 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tu: 4939 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tu: 4940 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tu: 4941 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tu: 4942 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tu: 4943 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tu: 4944 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tu: 4945 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tu: 4946 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tu: 4947 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tu: 4948 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tu: 4949 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_m: 4950 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_m: 4951 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_m: 4952 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_m: 4953 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_m: 4954 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_m: 4955 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_m: 4956 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_m: 4957 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_m: 4958 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_m: 4959 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_m: 4960 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_m: 4961 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_m: 4962 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 4); 4963 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tu: 4964 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tu: 4965 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tu: 4966 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tu: 4967 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tu: 4968 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tu: 4969 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tu: 4970 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tu: 4971 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tu: 4972 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tu: 4973 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tu: 4974 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tu: 4975 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tu: 4976 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tu: 4977 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tu: 4978 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tu: 4979 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tu: 4980 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tu: 4981 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tu: 4982 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tu: 4983 case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_tu: 4984 case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_tu: 4985 case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_tu: 4986 case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_tu: 4987 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm: 4988 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm: 4989 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm: 4990 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm: 4991 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm: 4992 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm: 4993 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm: 4994 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm: 4995 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm: 4996 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm: 4997 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm: 4998 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm: 4999 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm: 5000 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm: 5001 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm: 5002 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm: 5003 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm: 5004 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm: 5005 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm: 5006 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm: 5007 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm: 5008 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm: 5009 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm: 5010 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm: 5011 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tu: 5012 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tu: 5013 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tu: 5014 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tu: 5015 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tu: 5016 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tu: 5017 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tu: 5018 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tu: 5019 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tu: 5020 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tu: 5021 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tu: 5022 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tu: 5023 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tu: 5024 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tu: 5025 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tu: 5026 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tu: 5027 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tu: 5028 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tu: 5029 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tu: 5030 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tu: 5031 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tu: 5032 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tu: 5033 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tu: 5034 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tu: 5035 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_m: 5036 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_m: 5037 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_m: 5038 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_m: 5039 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_m: 5040 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_m: 5041 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_m: 5042 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_m: 5043 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_m: 5044 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_m: 5045 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_m: 5046 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_m: 5047 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_m: 5048 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_m: 5049 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_m: 5050 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_m: 5051 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_m: 5052 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_m: 5053 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_m: 5054 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_m: 5055 case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_m: 5056 case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_m: 5057 case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_m: 5058 case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_m: 5059 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tum: 5060 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tum: 5061 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tum: 5062 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tum: 5063 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tum: 5064 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tum: 5065 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tum: 5066 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tum: 5067 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tum: 5068 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tum: 5069 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tum: 5070 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tum: 5071 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tum: 5072 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tumu: 5073 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tumu: 5074 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tumu: 5075 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tumu: 5076 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tumu: 5077 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tumu: 5078 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tumu: 5079 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tumu: 5080 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tumu: 5081 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tumu: 5082 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tumu: 5083 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tumu: 5084 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tumu: 5085 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_mu: 5086 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_mu: 5087 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_mu: 5088 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_mu: 5089 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_mu: 5090 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_mu: 5091 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_mu: 5092 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_mu: 5093 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_mu: 5094 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_mu: 5095 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_mu: 5096 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_mu: 5097 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_mu: 5098 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 4); 5099 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_m: 5100 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_m: 5101 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_m: 5102 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_m: 5103 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_m: 5104 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_m: 5105 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_m: 5106 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_m: 5107 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_m: 5108 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_m: 5109 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_m: 5110 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_m: 5111 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_m: 5112 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_m: 5113 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_m: 5114 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_m: 5115 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_m: 5116 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_m: 5117 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_m: 5118 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_m: 5119 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_m: 5120 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_m: 5121 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_m: 5122 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_m: 5123 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tum: 5124 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tum: 5125 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tum: 5126 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tum: 5127 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tum: 5128 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tum: 5129 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tum: 5130 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tum: 5131 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tum: 5132 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tum: 5133 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tum: 5134 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tum: 5135 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tum: 5136 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tum: 5137 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tum: 5138 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tum: 5139 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tum: 5140 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tum: 5141 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tum: 5142 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tum: 5143 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tum: 5144 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tum: 5145 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tum: 5146 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tum: 5147 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tum: 5148 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tum: 5149 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tum: 5150 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tum: 5151 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tum: 5152 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tum: 5153 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tum: 5154 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tum: 5155 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tum: 5156 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tum: 5157 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tum: 5158 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tum: 5159 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tum: 5160 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tum: 5161 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tum: 5162 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tum: 5163 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tum: 5164 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tum: 5165 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tum: 5166 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tum: 5167 case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_tum: 5168 case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_tum: 5169 case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_tum: 5170 case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_tum: 5171 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tumu: 5172 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tumu: 5173 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tumu: 5174 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tumu: 5175 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tumu: 5176 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tumu: 5177 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tumu: 5178 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tumu: 5179 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tumu: 5180 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tumu: 5181 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tumu: 5182 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tumu: 5183 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tumu: 5184 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tumu: 5185 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tumu: 5186 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tumu: 5187 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tumu: 5188 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tumu: 5189 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tumu: 5190 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tumu: 5191 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tumu: 5192 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tumu: 5193 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tumu: 5194 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tumu: 5195 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tumu: 5196 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tumu: 5197 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tumu: 5198 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tumu: 5199 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tumu: 5200 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tumu: 5201 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tumu: 5202 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tumu: 5203 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tumu: 5204 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tumu: 5205 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tumu: 5206 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tumu: 5207 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tumu: 5208 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tumu: 5209 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tumu: 5210 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tumu: 5211 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tumu: 5212 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tumu: 5213 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tumu: 5214 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tumu: 5215 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_mu: 5216 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_mu: 5217 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_mu: 5218 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_mu: 5219 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_mu: 5220 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_mu: 5221 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_mu: 5222 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_mu: 5223 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_mu: 5224 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_mu: 5225 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_mu: 5226 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_mu: 5227 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_mu: 5228 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_mu: 5229 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_mu: 5230 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_mu: 5231 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_mu: 5232 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_mu: 5233 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_mu: 5234 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_mu: 5235 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_mu: 5236 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_mu: 5237 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_mu: 5238 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_mu: 5239 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_mu: 5240 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_mu: 5241 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_mu: 5242 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_mu: 5243 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_mu: 5244 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_mu: 5245 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_mu: 5246 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_mu: 5247 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_mu: 5248 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_mu: 5249 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_mu: 5250 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_mu: 5251 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_mu: 5252 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_mu: 5253 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_mu: 5254 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_mu: 5255 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_mu: 5256 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_mu: 5257 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_mu: 5258 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_mu: 5259 return SemaBuiltinConstantArgRange(TheCall, 4, 0, 4); 5260 case RISCV::BI__builtin_riscv_ntl_load: 5261 case RISCV::BI__builtin_riscv_ntl_store: 5262 DeclRefExpr *DRE = 5263 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 5264 assert((BuiltinID == RISCV::BI__builtin_riscv_ntl_store || 5265 BuiltinID == RISCV::BI__builtin_riscv_ntl_load) && 5266 "Unexpected RISC-V nontemporal load/store builtin!"); 5267 bool IsStore = BuiltinID == RISCV::BI__builtin_riscv_ntl_store; 5268 unsigned NumArgs = IsStore ? 3 : 2; 5269 5270 if (checkArgCount(*this, TheCall, NumArgs)) 5271 return true; 5272 5273 // Domain value should be compile-time constant. 5274 // 2 <= domain <= 5 5275 if (SemaBuiltinConstantArgRange(TheCall, NumArgs - 1, 2, 5)) 5276 return true; 5277 5278 Expr *PointerArg = TheCall->getArg(0); 5279 ExprResult PointerArgResult = 5280 DefaultFunctionArrayLvalueConversion(PointerArg); 5281 5282 if (PointerArgResult.isInvalid()) 5283 return true; 5284 PointerArg = PointerArgResult.get(); 5285 5286 const PointerType *PtrType = PointerArg->getType()->getAs<PointerType>(); 5287 if (!PtrType) { 5288 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer) 5289 << PointerArg->getType() << PointerArg->getSourceRange(); 5290 return true; 5291 } 5292 5293 QualType ValType = PtrType->getPointeeType(); 5294 ValType = ValType.getUnqualifiedType(); 5295 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 5296 !ValType->isBlockPointerType() && !ValType->isFloatingType() && 5297 !ValType->isVectorType() && !ValType->isRVVType()) { 5298 Diag(DRE->getBeginLoc(), 5299 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector) 5300 << PointerArg->getType() << PointerArg->getSourceRange(); 5301 return true; 5302 } 5303 5304 if (!IsStore) { 5305 TheCall->setType(ValType); 5306 return false; 5307 } 5308 5309 ExprResult ValArg = TheCall->getArg(1); 5310 InitializedEntity Entity = InitializedEntity::InitializeParameter( 5311 Context, ValType, /*consume*/ false); 5312 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 5313 if (ValArg.isInvalid()) 5314 return true; 5315 5316 TheCall->setArg(1, ValArg.get()); 5317 TheCall->setType(Context.VoidTy); 5318 return false; 5319 } 5320 5321 return false; 5322 } 5323 5324 bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, 5325 CallExpr *TheCall) { 5326 if (BuiltinID == SystemZ::BI__builtin_tabort) { 5327 Expr *Arg = TheCall->getArg(0); 5328 if (std::optional<llvm::APSInt> AbortCode = 5329 Arg->getIntegerConstantExpr(Context)) 5330 if (AbortCode->getSExtValue() >= 0 && AbortCode->getSExtValue() < 256) 5331 return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code) 5332 << Arg->getSourceRange(); 5333 } 5334 5335 // For intrinsics which take an immediate value as part of the instruction, 5336 // range check them here. 5337 unsigned i = 0, l = 0, u = 0; 5338 switch (BuiltinID) { 5339 default: return false; 5340 case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break; 5341 case SystemZ::BI__builtin_s390_verimb: 5342 case SystemZ::BI__builtin_s390_verimh: 5343 case SystemZ::BI__builtin_s390_verimf: 5344 case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break; 5345 case SystemZ::BI__builtin_s390_vfaeb: 5346 case SystemZ::BI__builtin_s390_vfaeh: 5347 case SystemZ::BI__builtin_s390_vfaef: 5348 case SystemZ::BI__builtin_s390_vfaebs: 5349 case SystemZ::BI__builtin_s390_vfaehs: 5350 case SystemZ::BI__builtin_s390_vfaefs: 5351 case SystemZ::BI__builtin_s390_vfaezb: 5352 case SystemZ::BI__builtin_s390_vfaezh: 5353 case SystemZ::BI__builtin_s390_vfaezf: 5354 case SystemZ::BI__builtin_s390_vfaezbs: 5355 case SystemZ::BI__builtin_s390_vfaezhs: 5356 case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break; 5357 case SystemZ::BI__builtin_s390_vfisb: 5358 case SystemZ::BI__builtin_s390_vfidb: 5359 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) || 5360 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 5361 case SystemZ::BI__builtin_s390_vftcisb: 5362 case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break; 5363 case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break; 5364 case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break; 5365 case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break; 5366 case SystemZ::BI__builtin_s390_vstrcb: 5367 case SystemZ::BI__builtin_s390_vstrch: 5368 case SystemZ::BI__builtin_s390_vstrcf: 5369 case SystemZ::BI__builtin_s390_vstrczb: 5370 case SystemZ::BI__builtin_s390_vstrczh: 5371 case SystemZ::BI__builtin_s390_vstrczf: 5372 case SystemZ::BI__builtin_s390_vstrcbs: 5373 case SystemZ::BI__builtin_s390_vstrchs: 5374 case SystemZ::BI__builtin_s390_vstrcfs: 5375 case SystemZ::BI__builtin_s390_vstrczbs: 5376 case SystemZ::BI__builtin_s390_vstrczhs: 5377 case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break; 5378 case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break; 5379 case SystemZ::BI__builtin_s390_vfminsb: 5380 case SystemZ::BI__builtin_s390_vfmaxsb: 5381 case SystemZ::BI__builtin_s390_vfmindb: 5382 case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break; 5383 case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break; 5384 case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break; 5385 case SystemZ::BI__builtin_s390_vclfnhs: 5386 case SystemZ::BI__builtin_s390_vclfnls: 5387 case SystemZ::BI__builtin_s390_vcfn: 5388 case SystemZ::BI__builtin_s390_vcnf: i = 1; l = 0; u = 15; break; 5389 case SystemZ::BI__builtin_s390_vcrnfs: i = 2; l = 0; u = 15; break; 5390 } 5391 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 5392 } 5393 5394 bool Sema::CheckWebAssemblyBuiltinFunctionCall(const TargetInfo &TI, 5395 unsigned BuiltinID, 5396 CallExpr *TheCall) { 5397 switch (BuiltinID) { 5398 case WebAssembly::BI__builtin_wasm_ref_null_extern: 5399 return BuiltinWasmRefNullExtern(TheCall); 5400 case WebAssembly::BI__builtin_wasm_ref_null_func: 5401 return BuiltinWasmRefNullFunc(TheCall); 5402 case WebAssembly::BI__builtin_wasm_table_get: 5403 return BuiltinWasmTableGet(TheCall); 5404 case WebAssembly::BI__builtin_wasm_table_set: 5405 return BuiltinWasmTableSet(TheCall); 5406 case WebAssembly::BI__builtin_wasm_table_size: 5407 return BuiltinWasmTableSize(TheCall); 5408 case WebAssembly::BI__builtin_wasm_table_grow: 5409 return BuiltinWasmTableGrow(TheCall); 5410 case WebAssembly::BI__builtin_wasm_table_fill: 5411 return BuiltinWasmTableFill(TheCall); 5412 case WebAssembly::BI__builtin_wasm_table_copy: 5413 return BuiltinWasmTableCopy(TheCall); 5414 } 5415 5416 return false; 5417 } 5418 5419 void Sema::checkRVVTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) { 5420 const TargetInfo &TI = Context.getTargetInfo(); 5421 // (ELEN, LMUL) pairs of (8, mf8), (16, mf4), (32, mf2), (64, m1) requires at 5422 // least zve64x 5423 if ((Ty->isRVVType(/* Bitwidth */ 64, /* IsFloat */ false) || 5424 Ty->isRVVType(/* ElementCount */ 1)) && 5425 !TI.hasFeature("zve64x")) 5426 Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve64x"; 5427 if (Ty->isRVVType(/* Bitwidth */ 16, /* IsFloat */ true) && 5428 !TI.hasFeature("zvfh")) 5429 Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zvfh"; 5430 if (Ty->isRVVType(/* Bitwidth */ 32, /* IsFloat */ true) && 5431 !TI.hasFeature("zve32f")) 5432 Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve32f"; 5433 if (Ty->isRVVType(/* Bitwidth */ 64, /* IsFloat */ true) && 5434 !TI.hasFeature("zve64d")) 5435 Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve64d"; 5436 // Given that caller already checked isRVVType() before calling this function, 5437 // if we don't have at least zve32x supported, then we need to emit error. 5438 if (!TI.hasFeature("zve32x")) 5439 Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve32x"; 5440 } 5441 5442 bool Sema::CheckNVPTXBuiltinFunctionCall(const TargetInfo &TI, 5443 unsigned BuiltinID, 5444 CallExpr *TheCall) { 5445 switch (BuiltinID) { 5446 case NVPTX::BI__nvvm_cp_async_ca_shared_global_4: 5447 case NVPTX::BI__nvvm_cp_async_ca_shared_global_8: 5448 case NVPTX::BI__nvvm_cp_async_ca_shared_global_16: 5449 case NVPTX::BI__nvvm_cp_async_cg_shared_global_16: 5450 return checkArgCountAtMost(*this, TheCall, 3); 5451 } 5452 5453 return false; 5454 } 5455 5456 /// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *). 5457 /// This checks that the target supports __builtin_cpu_supports and 5458 /// that the string argument is constant and valid. 5459 static bool SemaBuiltinCpuSupports(Sema &S, const TargetInfo &TI, 5460 CallExpr *TheCall) { 5461 Expr *Arg = TheCall->getArg(0); 5462 5463 // Check if the argument is a string literal. 5464 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 5465 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 5466 << Arg->getSourceRange(); 5467 5468 // Check the contents of the string. 5469 StringRef Feature = 5470 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 5471 if (!TI.validateCpuSupports(Feature)) 5472 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports) 5473 << Arg->getSourceRange(); 5474 return false; 5475 } 5476 5477 /// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *). 5478 /// This checks that the target supports __builtin_cpu_is and 5479 /// that the string argument is constant and valid. 5480 static bool SemaBuiltinCpuIs(Sema &S, const TargetInfo &TI, CallExpr *TheCall) { 5481 Expr *Arg = TheCall->getArg(0); 5482 5483 // Check if the argument is a string literal. 5484 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 5485 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 5486 << Arg->getSourceRange(); 5487 5488 // Check the contents of the string. 5489 StringRef Feature = 5490 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 5491 if (!TI.validateCpuIs(Feature)) 5492 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is) 5493 << Arg->getSourceRange(); 5494 return false; 5495 } 5496 5497 // Check if the rounding mode is legal. 5498 bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) { 5499 // Indicates if this instruction has rounding control or just SAE. 5500 bool HasRC = false; 5501 5502 unsigned ArgNum = 0; 5503 switch (BuiltinID) { 5504 default: 5505 return false; 5506 case X86::BI__builtin_ia32_vcvttsd2si32: 5507 case X86::BI__builtin_ia32_vcvttsd2si64: 5508 case X86::BI__builtin_ia32_vcvttsd2usi32: 5509 case X86::BI__builtin_ia32_vcvttsd2usi64: 5510 case X86::BI__builtin_ia32_vcvttss2si32: 5511 case X86::BI__builtin_ia32_vcvttss2si64: 5512 case X86::BI__builtin_ia32_vcvttss2usi32: 5513 case X86::BI__builtin_ia32_vcvttss2usi64: 5514 case X86::BI__builtin_ia32_vcvttsh2si32: 5515 case X86::BI__builtin_ia32_vcvttsh2si64: 5516 case X86::BI__builtin_ia32_vcvttsh2usi32: 5517 case X86::BI__builtin_ia32_vcvttsh2usi64: 5518 ArgNum = 1; 5519 break; 5520 case X86::BI__builtin_ia32_maxpd512: 5521 case X86::BI__builtin_ia32_maxps512: 5522 case X86::BI__builtin_ia32_minpd512: 5523 case X86::BI__builtin_ia32_minps512: 5524 case X86::BI__builtin_ia32_maxph512: 5525 case X86::BI__builtin_ia32_minph512: 5526 ArgNum = 2; 5527 break; 5528 case X86::BI__builtin_ia32_vcvtph2pd512_mask: 5529 case X86::BI__builtin_ia32_vcvtph2psx512_mask: 5530 case X86::BI__builtin_ia32_cvtps2pd512_mask: 5531 case X86::BI__builtin_ia32_cvttpd2dq512_mask: 5532 case X86::BI__builtin_ia32_cvttpd2qq512_mask: 5533 case X86::BI__builtin_ia32_cvttpd2udq512_mask: 5534 case X86::BI__builtin_ia32_cvttpd2uqq512_mask: 5535 case X86::BI__builtin_ia32_cvttps2dq512_mask: 5536 case X86::BI__builtin_ia32_cvttps2qq512_mask: 5537 case X86::BI__builtin_ia32_cvttps2udq512_mask: 5538 case X86::BI__builtin_ia32_cvttps2uqq512_mask: 5539 case X86::BI__builtin_ia32_vcvttph2w512_mask: 5540 case X86::BI__builtin_ia32_vcvttph2uw512_mask: 5541 case X86::BI__builtin_ia32_vcvttph2dq512_mask: 5542 case X86::BI__builtin_ia32_vcvttph2udq512_mask: 5543 case X86::BI__builtin_ia32_vcvttph2qq512_mask: 5544 case X86::BI__builtin_ia32_vcvttph2uqq512_mask: 5545 case X86::BI__builtin_ia32_exp2pd_mask: 5546 case X86::BI__builtin_ia32_exp2ps_mask: 5547 case X86::BI__builtin_ia32_getexppd512_mask: 5548 case X86::BI__builtin_ia32_getexpps512_mask: 5549 case X86::BI__builtin_ia32_getexpph512_mask: 5550 case X86::BI__builtin_ia32_rcp28pd_mask: 5551 case X86::BI__builtin_ia32_rcp28ps_mask: 5552 case X86::BI__builtin_ia32_rsqrt28pd_mask: 5553 case X86::BI__builtin_ia32_rsqrt28ps_mask: 5554 case X86::BI__builtin_ia32_vcomisd: 5555 case X86::BI__builtin_ia32_vcomiss: 5556 case X86::BI__builtin_ia32_vcomish: 5557 case X86::BI__builtin_ia32_vcvtph2ps512_mask: 5558 ArgNum = 3; 5559 break; 5560 case X86::BI__builtin_ia32_cmppd512_mask: 5561 case X86::BI__builtin_ia32_cmpps512_mask: 5562 case X86::BI__builtin_ia32_cmpsd_mask: 5563 case X86::BI__builtin_ia32_cmpss_mask: 5564 case X86::BI__builtin_ia32_cmpsh_mask: 5565 case X86::BI__builtin_ia32_vcvtsh2sd_round_mask: 5566 case X86::BI__builtin_ia32_vcvtsh2ss_round_mask: 5567 case X86::BI__builtin_ia32_cvtss2sd_round_mask: 5568 case X86::BI__builtin_ia32_getexpsd128_round_mask: 5569 case X86::BI__builtin_ia32_getexpss128_round_mask: 5570 case X86::BI__builtin_ia32_getexpsh128_round_mask: 5571 case X86::BI__builtin_ia32_getmantpd512_mask: 5572 case X86::BI__builtin_ia32_getmantps512_mask: 5573 case X86::BI__builtin_ia32_getmantph512_mask: 5574 case X86::BI__builtin_ia32_maxsd_round_mask: 5575 case X86::BI__builtin_ia32_maxss_round_mask: 5576 case X86::BI__builtin_ia32_maxsh_round_mask: 5577 case X86::BI__builtin_ia32_minsd_round_mask: 5578 case X86::BI__builtin_ia32_minss_round_mask: 5579 case X86::BI__builtin_ia32_minsh_round_mask: 5580 case X86::BI__builtin_ia32_rcp28sd_round_mask: 5581 case X86::BI__builtin_ia32_rcp28ss_round_mask: 5582 case X86::BI__builtin_ia32_reducepd512_mask: 5583 case X86::BI__builtin_ia32_reduceps512_mask: 5584 case X86::BI__builtin_ia32_reduceph512_mask: 5585 case X86::BI__builtin_ia32_rndscalepd_mask: 5586 case X86::BI__builtin_ia32_rndscaleps_mask: 5587 case X86::BI__builtin_ia32_rndscaleph_mask: 5588 case X86::BI__builtin_ia32_rsqrt28sd_round_mask: 5589 case X86::BI__builtin_ia32_rsqrt28ss_round_mask: 5590 ArgNum = 4; 5591 break; 5592 case X86::BI__builtin_ia32_fixupimmpd512_mask: 5593 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 5594 case X86::BI__builtin_ia32_fixupimmps512_mask: 5595 case X86::BI__builtin_ia32_fixupimmps512_maskz: 5596 case X86::BI__builtin_ia32_fixupimmsd_mask: 5597 case X86::BI__builtin_ia32_fixupimmsd_maskz: 5598 case X86::BI__builtin_ia32_fixupimmss_mask: 5599 case X86::BI__builtin_ia32_fixupimmss_maskz: 5600 case X86::BI__builtin_ia32_getmantsd_round_mask: 5601 case X86::BI__builtin_ia32_getmantss_round_mask: 5602 case X86::BI__builtin_ia32_getmantsh_round_mask: 5603 case X86::BI__builtin_ia32_rangepd512_mask: 5604 case X86::BI__builtin_ia32_rangeps512_mask: 5605 case X86::BI__builtin_ia32_rangesd128_round_mask: 5606 case X86::BI__builtin_ia32_rangess128_round_mask: 5607 case X86::BI__builtin_ia32_reducesd_mask: 5608 case X86::BI__builtin_ia32_reducess_mask: 5609 case X86::BI__builtin_ia32_reducesh_mask: 5610 case X86::BI__builtin_ia32_rndscalesd_round_mask: 5611 case X86::BI__builtin_ia32_rndscaless_round_mask: 5612 case X86::BI__builtin_ia32_rndscalesh_round_mask: 5613 ArgNum = 5; 5614 break; 5615 case X86::BI__builtin_ia32_vcvtsd2si64: 5616 case X86::BI__builtin_ia32_vcvtsd2si32: 5617 case X86::BI__builtin_ia32_vcvtsd2usi32: 5618 case X86::BI__builtin_ia32_vcvtsd2usi64: 5619 case X86::BI__builtin_ia32_vcvtss2si32: 5620 case X86::BI__builtin_ia32_vcvtss2si64: 5621 case X86::BI__builtin_ia32_vcvtss2usi32: 5622 case X86::BI__builtin_ia32_vcvtss2usi64: 5623 case X86::BI__builtin_ia32_vcvtsh2si32: 5624 case X86::BI__builtin_ia32_vcvtsh2si64: 5625 case X86::BI__builtin_ia32_vcvtsh2usi32: 5626 case X86::BI__builtin_ia32_vcvtsh2usi64: 5627 case X86::BI__builtin_ia32_sqrtpd512: 5628 case X86::BI__builtin_ia32_sqrtps512: 5629 case X86::BI__builtin_ia32_sqrtph512: 5630 ArgNum = 1; 5631 HasRC = true; 5632 break; 5633 case X86::BI__builtin_ia32_addph512: 5634 case X86::BI__builtin_ia32_divph512: 5635 case X86::BI__builtin_ia32_mulph512: 5636 case X86::BI__builtin_ia32_subph512: 5637 case X86::BI__builtin_ia32_addpd512: 5638 case X86::BI__builtin_ia32_addps512: 5639 case X86::BI__builtin_ia32_divpd512: 5640 case X86::BI__builtin_ia32_divps512: 5641 case X86::BI__builtin_ia32_mulpd512: 5642 case X86::BI__builtin_ia32_mulps512: 5643 case X86::BI__builtin_ia32_subpd512: 5644 case X86::BI__builtin_ia32_subps512: 5645 case X86::BI__builtin_ia32_cvtsi2sd64: 5646 case X86::BI__builtin_ia32_cvtsi2ss32: 5647 case X86::BI__builtin_ia32_cvtsi2ss64: 5648 case X86::BI__builtin_ia32_cvtusi2sd64: 5649 case X86::BI__builtin_ia32_cvtusi2ss32: 5650 case X86::BI__builtin_ia32_cvtusi2ss64: 5651 case X86::BI__builtin_ia32_vcvtusi2sh: 5652 case X86::BI__builtin_ia32_vcvtusi642sh: 5653 case X86::BI__builtin_ia32_vcvtsi2sh: 5654 case X86::BI__builtin_ia32_vcvtsi642sh: 5655 ArgNum = 2; 5656 HasRC = true; 5657 break; 5658 case X86::BI__builtin_ia32_cvtdq2ps512_mask: 5659 case X86::BI__builtin_ia32_cvtudq2ps512_mask: 5660 case X86::BI__builtin_ia32_vcvtpd2ph512_mask: 5661 case X86::BI__builtin_ia32_vcvtps2phx512_mask: 5662 case X86::BI__builtin_ia32_cvtpd2ps512_mask: 5663 case X86::BI__builtin_ia32_cvtpd2dq512_mask: 5664 case X86::BI__builtin_ia32_cvtpd2qq512_mask: 5665 case X86::BI__builtin_ia32_cvtpd2udq512_mask: 5666 case X86::BI__builtin_ia32_cvtpd2uqq512_mask: 5667 case X86::BI__builtin_ia32_cvtps2dq512_mask: 5668 case X86::BI__builtin_ia32_cvtps2qq512_mask: 5669 case X86::BI__builtin_ia32_cvtps2udq512_mask: 5670 case X86::BI__builtin_ia32_cvtps2uqq512_mask: 5671 case X86::BI__builtin_ia32_cvtqq2pd512_mask: 5672 case X86::BI__builtin_ia32_cvtqq2ps512_mask: 5673 case X86::BI__builtin_ia32_cvtuqq2pd512_mask: 5674 case X86::BI__builtin_ia32_cvtuqq2ps512_mask: 5675 case X86::BI__builtin_ia32_vcvtdq2ph512_mask: 5676 case X86::BI__builtin_ia32_vcvtudq2ph512_mask: 5677 case X86::BI__builtin_ia32_vcvtw2ph512_mask: 5678 case X86::BI__builtin_ia32_vcvtuw2ph512_mask: 5679 case X86::BI__builtin_ia32_vcvtph2w512_mask: 5680 case X86::BI__builtin_ia32_vcvtph2uw512_mask: 5681 case X86::BI__builtin_ia32_vcvtph2dq512_mask: 5682 case X86::BI__builtin_ia32_vcvtph2udq512_mask: 5683 case X86::BI__builtin_ia32_vcvtph2qq512_mask: 5684 case X86::BI__builtin_ia32_vcvtph2uqq512_mask: 5685 case X86::BI__builtin_ia32_vcvtqq2ph512_mask: 5686 case X86::BI__builtin_ia32_vcvtuqq2ph512_mask: 5687 ArgNum = 3; 5688 HasRC = true; 5689 break; 5690 case X86::BI__builtin_ia32_addsh_round_mask: 5691 case X86::BI__builtin_ia32_addss_round_mask: 5692 case X86::BI__builtin_ia32_addsd_round_mask: 5693 case X86::BI__builtin_ia32_divsh_round_mask: 5694 case X86::BI__builtin_ia32_divss_round_mask: 5695 case X86::BI__builtin_ia32_divsd_round_mask: 5696 case X86::BI__builtin_ia32_mulsh_round_mask: 5697 case X86::BI__builtin_ia32_mulss_round_mask: 5698 case X86::BI__builtin_ia32_mulsd_round_mask: 5699 case X86::BI__builtin_ia32_subsh_round_mask: 5700 case X86::BI__builtin_ia32_subss_round_mask: 5701 case X86::BI__builtin_ia32_subsd_round_mask: 5702 case X86::BI__builtin_ia32_scalefph512_mask: 5703 case X86::BI__builtin_ia32_scalefpd512_mask: 5704 case X86::BI__builtin_ia32_scalefps512_mask: 5705 case X86::BI__builtin_ia32_scalefsd_round_mask: 5706 case X86::BI__builtin_ia32_scalefss_round_mask: 5707 case X86::BI__builtin_ia32_scalefsh_round_mask: 5708 case X86::BI__builtin_ia32_cvtsd2ss_round_mask: 5709 case X86::BI__builtin_ia32_vcvtss2sh_round_mask: 5710 case X86::BI__builtin_ia32_vcvtsd2sh_round_mask: 5711 case X86::BI__builtin_ia32_sqrtsd_round_mask: 5712 case X86::BI__builtin_ia32_sqrtss_round_mask: 5713 case X86::BI__builtin_ia32_sqrtsh_round_mask: 5714 case X86::BI__builtin_ia32_vfmaddsd3_mask: 5715 case X86::BI__builtin_ia32_vfmaddsd3_maskz: 5716 case X86::BI__builtin_ia32_vfmaddsd3_mask3: 5717 case X86::BI__builtin_ia32_vfmaddss3_mask: 5718 case X86::BI__builtin_ia32_vfmaddss3_maskz: 5719 case X86::BI__builtin_ia32_vfmaddss3_mask3: 5720 case X86::BI__builtin_ia32_vfmaddsh3_mask: 5721 case X86::BI__builtin_ia32_vfmaddsh3_maskz: 5722 case X86::BI__builtin_ia32_vfmaddsh3_mask3: 5723 case X86::BI__builtin_ia32_vfmaddpd512_mask: 5724 case X86::BI__builtin_ia32_vfmaddpd512_maskz: 5725 case X86::BI__builtin_ia32_vfmaddpd512_mask3: 5726 case X86::BI__builtin_ia32_vfmsubpd512_mask3: 5727 case X86::BI__builtin_ia32_vfmaddps512_mask: 5728 case X86::BI__builtin_ia32_vfmaddps512_maskz: 5729 case X86::BI__builtin_ia32_vfmaddps512_mask3: 5730 case X86::BI__builtin_ia32_vfmsubps512_mask3: 5731 case X86::BI__builtin_ia32_vfmaddph512_mask: 5732 case X86::BI__builtin_ia32_vfmaddph512_maskz: 5733 case X86::BI__builtin_ia32_vfmaddph512_mask3: 5734 case X86::BI__builtin_ia32_vfmsubph512_mask3: 5735 case X86::BI__builtin_ia32_vfmaddsubpd512_mask: 5736 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz: 5737 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3: 5738 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3: 5739 case X86::BI__builtin_ia32_vfmaddsubps512_mask: 5740 case X86::BI__builtin_ia32_vfmaddsubps512_maskz: 5741 case X86::BI__builtin_ia32_vfmaddsubps512_mask3: 5742 case X86::BI__builtin_ia32_vfmsubaddps512_mask3: 5743 case X86::BI__builtin_ia32_vfmaddsubph512_mask: 5744 case X86::BI__builtin_ia32_vfmaddsubph512_maskz: 5745 case X86::BI__builtin_ia32_vfmaddsubph512_mask3: 5746 case X86::BI__builtin_ia32_vfmsubaddph512_mask3: 5747 case X86::BI__builtin_ia32_vfmaddcsh_mask: 5748 case X86::BI__builtin_ia32_vfmaddcsh_round_mask: 5749 case X86::BI__builtin_ia32_vfmaddcsh_round_mask3: 5750 case X86::BI__builtin_ia32_vfmaddcph512_mask: 5751 case X86::BI__builtin_ia32_vfmaddcph512_maskz: 5752 case X86::BI__builtin_ia32_vfmaddcph512_mask3: 5753 case X86::BI__builtin_ia32_vfcmaddcsh_mask: 5754 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask: 5755 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3: 5756 case X86::BI__builtin_ia32_vfcmaddcph512_mask: 5757 case X86::BI__builtin_ia32_vfcmaddcph512_maskz: 5758 case X86::BI__builtin_ia32_vfcmaddcph512_mask3: 5759 case X86::BI__builtin_ia32_vfmulcsh_mask: 5760 case X86::BI__builtin_ia32_vfmulcph512_mask: 5761 case X86::BI__builtin_ia32_vfcmulcsh_mask: 5762 case X86::BI__builtin_ia32_vfcmulcph512_mask: 5763 ArgNum = 4; 5764 HasRC = true; 5765 break; 5766 } 5767 5768 llvm::APSInt Result; 5769 5770 // We can't check the value of a dependent argument. 5771 Expr *Arg = TheCall->getArg(ArgNum); 5772 if (Arg->isTypeDependent() || Arg->isValueDependent()) 5773 return false; 5774 5775 // Check constant-ness first. 5776 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 5777 return true; 5778 5779 // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit 5780 // is set. If the intrinsic has rounding control(bits 1:0), make sure its only 5781 // combined with ROUND_NO_EXC. If the intrinsic does not have rounding 5782 // control, allow ROUND_NO_EXC and ROUND_CUR_DIRECTION together. 5783 if (Result == 4/*ROUND_CUR_DIRECTION*/ || 5784 Result == 8/*ROUND_NO_EXC*/ || 5785 (!HasRC && Result == 12/*ROUND_CUR_DIRECTION|ROUND_NO_EXC*/) || 5786 (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11)) 5787 return false; 5788 5789 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding) 5790 << Arg->getSourceRange(); 5791 } 5792 5793 // Check if the gather/scatter scale is legal. 5794 bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, 5795 CallExpr *TheCall) { 5796 unsigned ArgNum = 0; 5797 switch (BuiltinID) { 5798 default: 5799 return false; 5800 case X86::BI__builtin_ia32_gatherpfdpd: 5801 case X86::BI__builtin_ia32_gatherpfdps: 5802 case X86::BI__builtin_ia32_gatherpfqpd: 5803 case X86::BI__builtin_ia32_gatherpfqps: 5804 case X86::BI__builtin_ia32_scatterpfdpd: 5805 case X86::BI__builtin_ia32_scatterpfdps: 5806 case X86::BI__builtin_ia32_scatterpfqpd: 5807 case X86::BI__builtin_ia32_scatterpfqps: 5808 ArgNum = 3; 5809 break; 5810 case X86::BI__builtin_ia32_gatherd_pd: 5811 case X86::BI__builtin_ia32_gatherd_pd256: 5812 case X86::BI__builtin_ia32_gatherq_pd: 5813 case X86::BI__builtin_ia32_gatherq_pd256: 5814 case X86::BI__builtin_ia32_gatherd_ps: 5815 case X86::BI__builtin_ia32_gatherd_ps256: 5816 case X86::BI__builtin_ia32_gatherq_ps: 5817 case X86::BI__builtin_ia32_gatherq_ps256: 5818 case X86::BI__builtin_ia32_gatherd_q: 5819 case X86::BI__builtin_ia32_gatherd_q256: 5820 case X86::BI__builtin_ia32_gatherq_q: 5821 case X86::BI__builtin_ia32_gatherq_q256: 5822 case X86::BI__builtin_ia32_gatherd_d: 5823 case X86::BI__builtin_ia32_gatherd_d256: 5824 case X86::BI__builtin_ia32_gatherq_d: 5825 case X86::BI__builtin_ia32_gatherq_d256: 5826 case X86::BI__builtin_ia32_gather3div2df: 5827 case X86::BI__builtin_ia32_gather3div2di: 5828 case X86::BI__builtin_ia32_gather3div4df: 5829 case X86::BI__builtin_ia32_gather3div4di: 5830 case X86::BI__builtin_ia32_gather3div4sf: 5831 case X86::BI__builtin_ia32_gather3div4si: 5832 case X86::BI__builtin_ia32_gather3div8sf: 5833 case X86::BI__builtin_ia32_gather3div8si: 5834 case X86::BI__builtin_ia32_gather3siv2df: 5835 case X86::BI__builtin_ia32_gather3siv2di: 5836 case X86::BI__builtin_ia32_gather3siv4df: 5837 case X86::BI__builtin_ia32_gather3siv4di: 5838 case X86::BI__builtin_ia32_gather3siv4sf: 5839 case X86::BI__builtin_ia32_gather3siv4si: 5840 case X86::BI__builtin_ia32_gather3siv8sf: 5841 case X86::BI__builtin_ia32_gather3siv8si: 5842 case X86::BI__builtin_ia32_gathersiv8df: 5843 case X86::BI__builtin_ia32_gathersiv16sf: 5844 case X86::BI__builtin_ia32_gatherdiv8df: 5845 case X86::BI__builtin_ia32_gatherdiv16sf: 5846 case X86::BI__builtin_ia32_gathersiv8di: 5847 case X86::BI__builtin_ia32_gathersiv16si: 5848 case X86::BI__builtin_ia32_gatherdiv8di: 5849 case X86::BI__builtin_ia32_gatherdiv16si: 5850 case X86::BI__builtin_ia32_scatterdiv2df: 5851 case X86::BI__builtin_ia32_scatterdiv2di: 5852 case X86::BI__builtin_ia32_scatterdiv4df: 5853 case X86::BI__builtin_ia32_scatterdiv4di: 5854 case X86::BI__builtin_ia32_scatterdiv4sf: 5855 case X86::BI__builtin_ia32_scatterdiv4si: 5856 case X86::BI__builtin_ia32_scatterdiv8sf: 5857 case X86::BI__builtin_ia32_scatterdiv8si: 5858 case X86::BI__builtin_ia32_scattersiv2df: 5859 case X86::BI__builtin_ia32_scattersiv2di: 5860 case X86::BI__builtin_ia32_scattersiv4df: 5861 case X86::BI__builtin_ia32_scattersiv4di: 5862 case X86::BI__builtin_ia32_scattersiv4sf: 5863 case X86::BI__builtin_ia32_scattersiv4si: 5864 case X86::BI__builtin_ia32_scattersiv8sf: 5865 case X86::BI__builtin_ia32_scattersiv8si: 5866 case X86::BI__builtin_ia32_scattersiv8df: 5867 case X86::BI__builtin_ia32_scattersiv16sf: 5868 case X86::BI__builtin_ia32_scatterdiv8df: 5869 case X86::BI__builtin_ia32_scatterdiv16sf: 5870 case X86::BI__builtin_ia32_scattersiv8di: 5871 case X86::BI__builtin_ia32_scattersiv16si: 5872 case X86::BI__builtin_ia32_scatterdiv8di: 5873 case X86::BI__builtin_ia32_scatterdiv16si: 5874 ArgNum = 4; 5875 break; 5876 } 5877 5878 llvm::APSInt Result; 5879 5880 // We can't check the value of a dependent argument. 5881 Expr *Arg = TheCall->getArg(ArgNum); 5882 if (Arg->isTypeDependent() || Arg->isValueDependent()) 5883 return false; 5884 5885 // Check constant-ness first. 5886 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 5887 return true; 5888 5889 if (Result == 1 || Result == 2 || Result == 4 || Result == 8) 5890 return false; 5891 5892 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale) 5893 << Arg->getSourceRange(); 5894 } 5895 5896 enum { TileRegLow = 0, TileRegHigh = 7 }; 5897 5898 bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, 5899 ArrayRef<int> ArgNums) { 5900 for (int ArgNum : ArgNums) { 5901 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, TileRegLow, TileRegHigh)) 5902 return true; 5903 } 5904 return false; 5905 } 5906 5907 bool Sema::CheckX86BuiltinTileDuplicate(CallExpr *TheCall, 5908 ArrayRef<int> ArgNums) { 5909 // Because the max number of tile register is TileRegHigh + 1, so here we use 5910 // each bit to represent the usage of them in bitset. 5911 std::bitset<TileRegHigh + 1> ArgValues; 5912 for (int ArgNum : ArgNums) { 5913 Expr *Arg = TheCall->getArg(ArgNum); 5914 if (Arg->isTypeDependent() || Arg->isValueDependent()) 5915 continue; 5916 5917 llvm::APSInt Result; 5918 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 5919 return true; 5920 int ArgExtValue = Result.getExtValue(); 5921 assert((ArgExtValue >= TileRegLow || ArgExtValue <= TileRegHigh) && 5922 "Incorrect tile register num."); 5923 if (ArgValues.test(ArgExtValue)) 5924 return Diag(TheCall->getBeginLoc(), 5925 diag::err_x86_builtin_tile_arg_duplicate) 5926 << TheCall->getArg(ArgNum)->getSourceRange(); 5927 ArgValues.set(ArgExtValue); 5928 } 5929 return false; 5930 } 5931 5932 bool Sema::CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, 5933 ArrayRef<int> ArgNums) { 5934 return CheckX86BuiltinTileArgumentsRange(TheCall, ArgNums) || 5935 CheckX86BuiltinTileDuplicate(TheCall, ArgNums); 5936 } 5937 5938 bool Sema::CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall) { 5939 switch (BuiltinID) { 5940 default: 5941 return false; 5942 case X86::BI__builtin_ia32_tileloadd64: 5943 case X86::BI__builtin_ia32_tileloaddt164: 5944 case X86::BI__builtin_ia32_tilestored64: 5945 case X86::BI__builtin_ia32_tilezero: 5946 return CheckX86BuiltinTileArgumentsRange(TheCall, 0); 5947 case X86::BI__builtin_ia32_tdpbssd: 5948 case X86::BI__builtin_ia32_tdpbsud: 5949 case X86::BI__builtin_ia32_tdpbusd: 5950 case X86::BI__builtin_ia32_tdpbuud: 5951 case X86::BI__builtin_ia32_tdpbf16ps: 5952 case X86::BI__builtin_ia32_tdpfp16ps: 5953 case X86::BI__builtin_ia32_tcmmimfp16ps: 5954 case X86::BI__builtin_ia32_tcmmrlfp16ps: 5955 return CheckX86BuiltinTileRangeAndDuplicate(TheCall, {0, 1, 2}); 5956 } 5957 } 5958 static bool isX86_32Builtin(unsigned BuiltinID) { 5959 // These builtins only work on x86-32 targets. 5960 switch (BuiltinID) { 5961 case X86::BI__builtin_ia32_readeflags_u32: 5962 case X86::BI__builtin_ia32_writeeflags_u32: 5963 return true; 5964 } 5965 5966 return false; 5967 } 5968 5969 bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 5970 CallExpr *TheCall) { 5971 if (BuiltinID == X86::BI__builtin_cpu_supports) 5972 return SemaBuiltinCpuSupports(*this, TI, TheCall); 5973 5974 if (BuiltinID == X86::BI__builtin_cpu_is) 5975 return SemaBuiltinCpuIs(*this, TI, TheCall); 5976 5977 // Check for 32-bit only builtins on a 64-bit target. 5978 const llvm::Triple &TT = TI.getTriple(); 5979 if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID)) 5980 return Diag(TheCall->getCallee()->getBeginLoc(), 5981 diag::err_32_bit_builtin_64_bit_tgt); 5982 5983 // If the intrinsic has rounding or SAE make sure its valid. 5984 if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall)) 5985 return true; 5986 5987 // If the intrinsic has a gather/scatter scale immediate make sure its valid. 5988 if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall)) 5989 return true; 5990 5991 // If the intrinsic has a tile arguments, make sure they are valid. 5992 if (CheckX86BuiltinTileArguments(BuiltinID, TheCall)) 5993 return true; 5994 5995 // For intrinsics which take an immediate value as part of the instruction, 5996 // range check them here. 5997 int i = 0, l = 0, u = 0; 5998 switch (BuiltinID) { 5999 default: 6000 return false; 6001 case X86::BI__builtin_ia32_vec_ext_v2si: 6002 case X86::BI__builtin_ia32_vec_ext_v2di: 6003 case X86::BI__builtin_ia32_vextractf128_pd256: 6004 case X86::BI__builtin_ia32_vextractf128_ps256: 6005 case X86::BI__builtin_ia32_vextractf128_si256: 6006 case X86::BI__builtin_ia32_extract128i256: 6007 case X86::BI__builtin_ia32_extractf64x4_mask: 6008 case X86::BI__builtin_ia32_extracti64x4_mask: 6009 case X86::BI__builtin_ia32_extractf32x8_mask: 6010 case X86::BI__builtin_ia32_extracti32x8_mask: 6011 case X86::BI__builtin_ia32_extractf64x2_256_mask: 6012 case X86::BI__builtin_ia32_extracti64x2_256_mask: 6013 case X86::BI__builtin_ia32_extractf32x4_256_mask: 6014 case X86::BI__builtin_ia32_extracti32x4_256_mask: 6015 i = 1; l = 0; u = 1; 6016 break; 6017 case X86::BI__builtin_ia32_vec_set_v2di: 6018 case X86::BI__builtin_ia32_vinsertf128_pd256: 6019 case X86::BI__builtin_ia32_vinsertf128_ps256: 6020 case X86::BI__builtin_ia32_vinsertf128_si256: 6021 case X86::BI__builtin_ia32_insert128i256: 6022 case X86::BI__builtin_ia32_insertf32x8: 6023 case X86::BI__builtin_ia32_inserti32x8: 6024 case X86::BI__builtin_ia32_insertf64x4: 6025 case X86::BI__builtin_ia32_inserti64x4: 6026 case X86::BI__builtin_ia32_insertf64x2_256: 6027 case X86::BI__builtin_ia32_inserti64x2_256: 6028 case X86::BI__builtin_ia32_insertf32x4_256: 6029 case X86::BI__builtin_ia32_inserti32x4_256: 6030 i = 2; l = 0; u = 1; 6031 break; 6032 case X86::BI__builtin_ia32_vpermilpd: 6033 case X86::BI__builtin_ia32_vec_ext_v4hi: 6034 case X86::BI__builtin_ia32_vec_ext_v4si: 6035 case X86::BI__builtin_ia32_vec_ext_v4sf: 6036 case X86::BI__builtin_ia32_vec_ext_v4di: 6037 case X86::BI__builtin_ia32_extractf32x4_mask: 6038 case X86::BI__builtin_ia32_extracti32x4_mask: 6039 case X86::BI__builtin_ia32_extractf64x2_512_mask: 6040 case X86::BI__builtin_ia32_extracti64x2_512_mask: 6041 i = 1; l = 0; u = 3; 6042 break; 6043 case X86::BI_mm_prefetch: 6044 case X86::BI__builtin_ia32_vec_ext_v8hi: 6045 case X86::BI__builtin_ia32_vec_ext_v8si: 6046 i = 1; l = 0; u = 7; 6047 break; 6048 case X86::BI__builtin_ia32_sha1rnds4: 6049 case X86::BI__builtin_ia32_blendpd: 6050 case X86::BI__builtin_ia32_shufpd: 6051 case X86::BI__builtin_ia32_vec_set_v4hi: 6052 case X86::BI__builtin_ia32_vec_set_v4si: 6053 case X86::BI__builtin_ia32_vec_set_v4di: 6054 case X86::BI__builtin_ia32_shuf_f32x4_256: 6055 case X86::BI__builtin_ia32_shuf_f64x2_256: 6056 case X86::BI__builtin_ia32_shuf_i32x4_256: 6057 case X86::BI__builtin_ia32_shuf_i64x2_256: 6058 case X86::BI__builtin_ia32_insertf64x2_512: 6059 case X86::BI__builtin_ia32_inserti64x2_512: 6060 case X86::BI__builtin_ia32_insertf32x4: 6061 case X86::BI__builtin_ia32_inserti32x4: 6062 i = 2; l = 0; u = 3; 6063 break; 6064 case X86::BI__builtin_ia32_vpermil2pd: 6065 case X86::BI__builtin_ia32_vpermil2pd256: 6066 case X86::BI__builtin_ia32_vpermil2ps: 6067 case X86::BI__builtin_ia32_vpermil2ps256: 6068 i = 3; l = 0; u = 3; 6069 break; 6070 case X86::BI__builtin_ia32_cmpb128_mask: 6071 case X86::BI__builtin_ia32_cmpw128_mask: 6072 case X86::BI__builtin_ia32_cmpd128_mask: 6073 case X86::BI__builtin_ia32_cmpq128_mask: 6074 case X86::BI__builtin_ia32_cmpb256_mask: 6075 case X86::BI__builtin_ia32_cmpw256_mask: 6076 case X86::BI__builtin_ia32_cmpd256_mask: 6077 case X86::BI__builtin_ia32_cmpq256_mask: 6078 case X86::BI__builtin_ia32_cmpb512_mask: 6079 case X86::BI__builtin_ia32_cmpw512_mask: 6080 case X86::BI__builtin_ia32_cmpd512_mask: 6081 case X86::BI__builtin_ia32_cmpq512_mask: 6082 case X86::BI__builtin_ia32_ucmpb128_mask: 6083 case X86::BI__builtin_ia32_ucmpw128_mask: 6084 case X86::BI__builtin_ia32_ucmpd128_mask: 6085 case X86::BI__builtin_ia32_ucmpq128_mask: 6086 case X86::BI__builtin_ia32_ucmpb256_mask: 6087 case X86::BI__builtin_ia32_ucmpw256_mask: 6088 case X86::BI__builtin_ia32_ucmpd256_mask: 6089 case X86::BI__builtin_ia32_ucmpq256_mask: 6090 case X86::BI__builtin_ia32_ucmpb512_mask: 6091 case X86::BI__builtin_ia32_ucmpw512_mask: 6092 case X86::BI__builtin_ia32_ucmpd512_mask: 6093 case X86::BI__builtin_ia32_ucmpq512_mask: 6094 case X86::BI__builtin_ia32_vpcomub: 6095 case X86::BI__builtin_ia32_vpcomuw: 6096 case X86::BI__builtin_ia32_vpcomud: 6097 case X86::BI__builtin_ia32_vpcomuq: 6098 case X86::BI__builtin_ia32_vpcomb: 6099 case X86::BI__builtin_ia32_vpcomw: 6100 case X86::BI__builtin_ia32_vpcomd: 6101 case X86::BI__builtin_ia32_vpcomq: 6102 case X86::BI__builtin_ia32_vec_set_v8hi: 6103 case X86::BI__builtin_ia32_vec_set_v8si: 6104 i = 2; l = 0; u = 7; 6105 break; 6106 case X86::BI__builtin_ia32_vpermilpd256: 6107 case X86::BI__builtin_ia32_roundps: 6108 case X86::BI__builtin_ia32_roundpd: 6109 case X86::BI__builtin_ia32_roundps256: 6110 case X86::BI__builtin_ia32_roundpd256: 6111 case X86::BI__builtin_ia32_getmantpd128_mask: 6112 case X86::BI__builtin_ia32_getmantpd256_mask: 6113 case X86::BI__builtin_ia32_getmantps128_mask: 6114 case X86::BI__builtin_ia32_getmantps256_mask: 6115 case X86::BI__builtin_ia32_getmantpd512_mask: 6116 case X86::BI__builtin_ia32_getmantps512_mask: 6117 case X86::BI__builtin_ia32_getmantph128_mask: 6118 case X86::BI__builtin_ia32_getmantph256_mask: 6119 case X86::BI__builtin_ia32_getmantph512_mask: 6120 case X86::BI__builtin_ia32_vec_ext_v16qi: 6121 case X86::BI__builtin_ia32_vec_ext_v16hi: 6122 i = 1; l = 0; u = 15; 6123 break; 6124 case X86::BI__builtin_ia32_pblendd128: 6125 case X86::BI__builtin_ia32_blendps: 6126 case X86::BI__builtin_ia32_blendpd256: 6127 case X86::BI__builtin_ia32_shufpd256: 6128 case X86::BI__builtin_ia32_roundss: 6129 case X86::BI__builtin_ia32_roundsd: 6130 case X86::BI__builtin_ia32_rangepd128_mask: 6131 case X86::BI__builtin_ia32_rangepd256_mask: 6132 case X86::BI__builtin_ia32_rangepd512_mask: 6133 case X86::BI__builtin_ia32_rangeps128_mask: 6134 case X86::BI__builtin_ia32_rangeps256_mask: 6135 case X86::BI__builtin_ia32_rangeps512_mask: 6136 case X86::BI__builtin_ia32_getmantsd_round_mask: 6137 case X86::BI__builtin_ia32_getmantss_round_mask: 6138 case X86::BI__builtin_ia32_getmantsh_round_mask: 6139 case X86::BI__builtin_ia32_vec_set_v16qi: 6140 case X86::BI__builtin_ia32_vec_set_v16hi: 6141 i = 2; l = 0; u = 15; 6142 break; 6143 case X86::BI__builtin_ia32_vec_ext_v32qi: 6144 i = 1; l = 0; u = 31; 6145 break; 6146 case X86::BI__builtin_ia32_cmpps: 6147 case X86::BI__builtin_ia32_cmpss: 6148 case X86::BI__builtin_ia32_cmppd: 6149 case X86::BI__builtin_ia32_cmpsd: 6150 case X86::BI__builtin_ia32_cmpps256: 6151 case X86::BI__builtin_ia32_cmppd256: 6152 case X86::BI__builtin_ia32_cmpps128_mask: 6153 case X86::BI__builtin_ia32_cmppd128_mask: 6154 case X86::BI__builtin_ia32_cmpps256_mask: 6155 case X86::BI__builtin_ia32_cmppd256_mask: 6156 case X86::BI__builtin_ia32_cmpps512_mask: 6157 case X86::BI__builtin_ia32_cmppd512_mask: 6158 case X86::BI__builtin_ia32_cmpsd_mask: 6159 case X86::BI__builtin_ia32_cmpss_mask: 6160 case X86::BI__builtin_ia32_vec_set_v32qi: 6161 i = 2; l = 0; u = 31; 6162 break; 6163 case X86::BI__builtin_ia32_permdf256: 6164 case X86::BI__builtin_ia32_permdi256: 6165 case X86::BI__builtin_ia32_permdf512: 6166 case X86::BI__builtin_ia32_permdi512: 6167 case X86::BI__builtin_ia32_vpermilps: 6168 case X86::BI__builtin_ia32_vpermilps256: 6169 case X86::BI__builtin_ia32_vpermilpd512: 6170 case X86::BI__builtin_ia32_vpermilps512: 6171 case X86::BI__builtin_ia32_pshufd: 6172 case X86::BI__builtin_ia32_pshufd256: 6173 case X86::BI__builtin_ia32_pshufd512: 6174 case X86::BI__builtin_ia32_pshufhw: 6175 case X86::BI__builtin_ia32_pshufhw256: 6176 case X86::BI__builtin_ia32_pshufhw512: 6177 case X86::BI__builtin_ia32_pshuflw: 6178 case X86::BI__builtin_ia32_pshuflw256: 6179 case X86::BI__builtin_ia32_pshuflw512: 6180 case X86::BI__builtin_ia32_vcvtps2ph: 6181 case X86::BI__builtin_ia32_vcvtps2ph_mask: 6182 case X86::BI__builtin_ia32_vcvtps2ph256: 6183 case X86::BI__builtin_ia32_vcvtps2ph256_mask: 6184 case X86::BI__builtin_ia32_vcvtps2ph512_mask: 6185 case X86::BI__builtin_ia32_rndscaleps_128_mask: 6186 case X86::BI__builtin_ia32_rndscalepd_128_mask: 6187 case X86::BI__builtin_ia32_rndscaleps_256_mask: 6188 case X86::BI__builtin_ia32_rndscalepd_256_mask: 6189 case X86::BI__builtin_ia32_rndscaleps_mask: 6190 case X86::BI__builtin_ia32_rndscalepd_mask: 6191 case X86::BI__builtin_ia32_rndscaleph_mask: 6192 case X86::BI__builtin_ia32_reducepd128_mask: 6193 case X86::BI__builtin_ia32_reducepd256_mask: 6194 case X86::BI__builtin_ia32_reducepd512_mask: 6195 case X86::BI__builtin_ia32_reduceps128_mask: 6196 case X86::BI__builtin_ia32_reduceps256_mask: 6197 case X86::BI__builtin_ia32_reduceps512_mask: 6198 case X86::BI__builtin_ia32_reduceph128_mask: 6199 case X86::BI__builtin_ia32_reduceph256_mask: 6200 case X86::BI__builtin_ia32_reduceph512_mask: 6201 case X86::BI__builtin_ia32_prold512: 6202 case X86::BI__builtin_ia32_prolq512: 6203 case X86::BI__builtin_ia32_prold128: 6204 case X86::BI__builtin_ia32_prold256: 6205 case X86::BI__builtin_ia32_prolq128: 6206 case X86::BI__builtin_ia32_prolq256: 6207 case X86::BI__builtin_ia32_prord512: 6208 case X86::BI__builtin_ia32_prorq512: 6209 case X86::BI__builtin_ia32_prord128: 6210 case X86::BI__builtin_ia32_prord256: 6211 case X86::BI__builtin_ia32_prorq128: 6212 case X86::BI__builtin_ia32_prorq256: 6213 case X86::BI__builtin_ia32_fpclasspd128_mask: 6214 case X86::BI__builtin_ia32_fpclasspd256_mask: 6215 case X86::BI__builtin_ia32_fpclassps128_mask: 6216 case X86::BI__builtin_ia32_fpclassps256_mask: 6217 case X86::BI__builtin_ia32_fpclassps512_mask: 6218 case X86::BI__builtin_ia32_fpclasspd512_mask: 6219 case X86::BI__builtin_ia32_fpclassph128_mask: 6220 case X86::BI__builtin_ia32_fpclassph256_mask: 6221 case X86::BI__builtin_ia32_fpclassph512_mask: 6222 case X86::BI__builtin_ia32_fpclasssd_mask: 6223 case X86::BI__builtin_ia32_fpclassss_mask: 6224 case X86::BI__builtin_ia32_fpclasssh_mask: 6225 case X86::BI__builtin_ia32_pslldqi128_byteshift: 6226 case X86::BI__builtin_ia32_pslldqi256_byteshift: 6227 case X86::BI__builtin_ia32_pslldqi512_byteshift: 6228 case X86::BI__builtin_ia32_psrldqi128_byteshift: 6229 case X86::BI__builtin_ia32_psrldqi256_byteshift: 6230 case X86::BI__builtin_ia32_psrldqi512_byteshift: 6231 case X86::BI__builtin_ia32_kshiftliqi: 6232 case X86::BI__builtin_ia32_kshiftlihi: 6233 case X86::BI__builtin_ia32_kshiftlisi: 6234 case X86::BI__builtin_ia32_kshiftlidi: 6235 case X86::BI__builtin_ia32_kshiftriqi: 6236 case X86::BI__builtin_ia32_kshiftrihi: 6237 case X86::BI__builtin_ia32_kshiftrisi: 6238 case X86::BI__builtin_ia32_kshiftridi: 6239 i = 1; l = 0; u = 255; 6240 break; 6241 case X86::BI__builtin_ia32_vperm2f128_pd256: 6242 case X86::BI__builtin_ia32_vperm2f128_ps256: 6243 case X86::BI__builtin_ia32_vperm2f128_si256: 6244 case X86::BI__builtin_ia32_permti256: 6245 case X86::BI__builtin_ia32_pblendw128: 6246 case X86::BI__builtin_ia32_pblendw256: 6247 case X86::BI__builtin_ia32_blendps256: 6248 case X86::BI__builtin_ia32_pblendd256: 6249 case X86::BI__builtin_ia32_palignr128: 6250 case X86::BI__builtin_ia32_palignr256: 6251 case X86::BI__builtin_ia32_palignr512: 6252 case X86::BI__builtin_ia32_alignq512: 6253 case X86::BI__builtin_ia32_alignd512: 6254 case X86::BI__builtin_ia32_alignd128: 6255 case X86::BI__builtin_ia32_alignd256: 6256 case X86::BI__builtin_ia32_alignq128: 6257 case X86::BI__builtin_ia32_alignq256: 6258 case X86::BI__builtin_ia32_vcomisd: 6259 case X86::BI__builtin_ia32_vcomiss: 6260 case X86::BI__builtin_ia32_shuf_f32x4: 6261 case X86::BI__builtin_ia32_shuf_f64x2: 6262 case X86::BI__builtin_ia32_shuf_i32x4: 6263 case X86::BI__builtin_ia32_shuf_i64x2: 6264 case X86::BI__builtin_ia32_shufpd512: 6265 case X86::BI__builtin_ia32_shufps: 6266 case X86::BI__builtin_ia32_shufps256: 6267 case X86::BI__builtin_ia32_shufps512: 6268 case X86::BI__builtin_ia32_dbpsadbw128: 6269 case X86::BI__builtin_ia32_dbpsadbw256: 6270 case X86::BI__builtin_ia32_dbpsadbw512: 6271 case X86::BI__builtin_ia32_vpshldd128: 6272 case X86::BI__builtin_ia32_vpshldd256: 6273 case X86::BI__builtin_ia32_vpshldd512: 6274 case X86::BI__builtin_ia32_vpshldq128: 6275 case X86::BI__builtin_ia32_vpshldq256: 6276 case X86::BI__builtin_ia32_vpshldq512: 6277 case X86::BI__builtin_ia32_vpshldw128: 6278 case X86::BI__builtin_ia32_vpshldw256: 6279 case X86::BI__builtin_ia32_vpshldw512: 6280 case X86::BI__builtin_ia32_vpshrdd128: 6281 case X86::BI__builtin_ia32_vpshrdd256: 6282 case X86::BI__builtin_ia32_vpshrdd512: 6283 case X86::BI__builtin_ia32_vpshrdq128: 6284 case X86::BI__builtin_ia32_vpshrdq256: 6285 case X86::BI__builtin_ia32_vpshrdq512: 6286 case X86::BI__builtin_ia32_vpshrdw128: 6287 case X86::BI__builtin_ia32_vpshrdw256: 6288 case X86::BI__builtin_ia32_vpshrdw512: 6289 i = 2; l = 0; u = 255; 6290 break; 6291 case X86::BI__builtin_ia32_fixupimmpd512_mask: 6292 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 6293 case X86::BI__builtin_ia32_fixupimmps512_mask: 6294 case X86::BI__builtin_ia32_fixupimmps512_maskz: 6295 case X86::BI__builtin_ia32_fixupimmsd_mask: 6296 case X86::BI__builtin_ia32_fixupimmsd_maskz: 6297 case X86::BI__builtin_ia32_fixupimmss_mask: 6298 case X86::BI__builtin_ia32_fixupimmss_maskz: 6299 case X86::BI__builtin_ia32_fixupimmpd128_mask: 6300 case X86::BI__builtin_ia32_fixupimmpd128_maskz: 6301 case X86::BI__builtin_ia32_fixupimmpd256_mask: 6302 case X86::BI__builtin_ia32_fixupimmpd256_maskz: 6303 case X86::BI__builtin_ia32_fixupimmps128_mask: 6304 case X86::BI__builtin_ia32_fixupimmps128_maskz: 6305 case X86::BI__builtin_ia32_fixupimmps256_mask: 6306 case X86::BI__builtin_ia32_fixupimmps256_maskz: 6307 case X86::BI__builtin_ia32_pternlogd512_mask: 6308 case X86::BI__builtin_ia32_pternlogd512_maskz: 6309 case X86::BI__builtin_ia32_pternlogq512_mask: 6310 case X86::BI__builtin_ia32_pternlogq512_maskz: 6311 case X86::BI__builtin_ia32_pternlogd128_mask: 6312 case X86::BI__builtin_ia32_pternlogd128_maskz: 6313 case X86::BI__builtin_ia32_pternlogd256_mask: 6314 case X86::BI__builtin_ia32_pternlogd256_maskz: 6315 case X86::BI__builtin_ia32_pternlogq128_mask: 6316 case X86::BI__builtin_ia32_pternlogq128_maskz: 6317 case X86::BI__builtin_ia32_pternlogq256_mask: 6318 case X86::BI__builtin_ia32_pternlogq256_maskz: 6319 case X86::BI__builtin_ia32_vsm3rnds2: 6320 i = 3; l = 0; u = 255; 6321 break; 6322 case X86::BI__builtin_ia32_gatherpfdpd: 6323 case X86::BI__builtin_ia32_gatherpfdps: 6324 case X86::BI__builtin_ia32_gatherpfqpd: 6325 case X86::BI__builtin_ia32_gatherpfqps: 6326 case X86::BI__builtin_ia32_scatterpfdpd: 6327 case X86::BI__builtin_ia32_scatterpfdps: 6328 case X86::BI__builtin_ia32_scatterpfqpd: 6329 case X86::BI__builtin_ia32_scatterpfqps: 6330 i = 4; l = 2; u = 3; 6331 break; 6332 case X86::BI__builtin_ia32_reducesd_mask: 6333 case X86::BI__builtin_ia32_reducess_mask: 6334 case X86::BI__builtin_ia32_rndscalesd_round_mask: 6335 case X86::BI__builtin_ia32_rndscaless_round_mask: 6336 case X86::BI__builtin_ia32_rndscalesh_round_mask: 6337 case X86::BI__builtin_ia32_reducesh_mask: 6338 i = 4; l = 0; u = 255; 6339 break; 6340 case X86::BI__builtin_ia32_cmpccxadd32: 6341 case X86::BI__builtin_ia32_cmpccxadd64: 6342 i = 3; l = 0; u = 15; 6343 break; 6344 } 6345 6346 // Note that we don't force a hard error on the range check here, allowing 6347 // template-generated or macro-generated dead code to potentially have out-of- 6348 // range values. These need to code generate, but don't need to necessarily 6349 // make any sense. We use a warning that defaults to an error. 6350 return SemaBuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false); 6351 } 6352 6353 /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo 6354 /// parameter with the FormatAttr's correct format_idx and firstDataArg. 6355 /// Returns true when the format fits the function and the FormatStringInfo has 6356 /// been populated. 6357 bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, 6358 bool IsVariadic, FormatStringInfo *FSI) { 6359 if (Format->getFirstArg() == 0) 6360 FSI->ArgPassingKind = FAPK_VAList; 6361 else if (IsVariadic) 6362 FSI->ArgPassingKind = FAPK_Variadic; 6363 else 6364 FSI->ArgPassingKind = FAPK_Fixed; 6365 FSI->FormatIdx = Format->getFormatIdx() - 1; 6366 FSI->FirstDataArg = 6367 FSI->ArgPassingKind == FAPK_VAList ? 0 : Format->getFirstArg() - 1; 6368 6369 // The way the format attribute works in GCC, the implicit this argument 6370 // of member functions is counted. However, it doesn't appear in our own 6371 // lists, so decrement format_idx in that case. 6372 if (IsCXXMember) { 6373 if(FSI->FormatIdx == 0) 6374 return false; 6375 --FSI->FormatIdx; 6376 if (FSI->FirstDataArg != 0) 6377 --FSI->FirstDataArg; 6378 } 6379 return true; 6380 } 6381 6382 /// Checks if a the given expression evaluates to null. 6383 /// 6384 /// Returns true if the value evaluates to null. 6385 static bool CheckNonNullExpr(Sema &S, const Expr *Expr) { 6386 // If the expression has non-null type, it doesn't evaluate to null. 6387 if (auto nullability = Expr->IgnoreImplicit()->getType()->getNullability()) { 6388 if (*nullability == NullabilityKind::NonNull) 6389 return false; 6390 } 6391 6392 // As a special case, transparent unions initialized with zero are 6393 // considered null for the purposes of the nonnull attribute. 6394 if (const RecordType *UT = Expr->getType()->getAsUnionType()) { 6395 if (UT->getDecl()->hasAttr<TransparentUnionAttr>()) 6396 if (const CompoundLiteralExpr *CLE = 6397 dyn_cast<CompoundLiteralExpr>(Expr)) 6398 if (const InitListExpr *ILE = 6399 dyn_cast<InitListExpr>(CLE->getInitializer())) 6400 Expr = ILE->getInit(0); 6401 } 6402 6403 bool Result; 6404 return (!Expr->isValueDependent() && 6405 Expr->EvaluateAsBooleanCondition(Result, S.Context) && 6406 !Result); 6407 } 6408 6409 static void CheckNonNullArgument(Sema &S, 6410 const Expr *ArgExpr, 6411 SourceLocation CallSiteLoc) { 6412 if (CheckNonNullExpr(S, ArgExpr)) 6413 S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr, 6414 S.PDiag(diag::warn_null_arg) 6415 << ArgExpr->getSourceRange()); 6416 } 6417 6418 bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) { 6419 FormatStringInfo FSI; 6420 if ((GetFormatStringType(Format) == FST_NSString) && 6421 getFormatStringInfo(Format, false, true, &FSI)) { 6422 Idx = FSI.FormatIdx; 6423 return true; 6424 } 6425 return false; 6426 } 6427 6428 /// Diagnose use of %s directive in an NSString which is being passed 6429 /// as formatting string to formatting method. 6430 static void 6431 DiagnoseCStringFormatDirectiveInCFAPI(Sema &S, 6432 const NamedDecl *FDecl, 6433 Expr **Args, 6434 unsigned NumArgs) { 6435 unsigned Idx = 0; 6436 bool Format = false; 6437 ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily(); 6438 if (SFFamily == ObjCStringFormatFamily::SFF_CFString) { 6439 Idx = 2; 6440 Format = true; 6441 } 6442 else 6443 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 6444 if (S.GetFormatNSStringIdx(I, Idx)) { 6445 Format = true; 6446 break; 6447 } 6448 } 6449 if (!Format || NumArgs <= Idx) 6450 return; 6451 const Expr *FormatExpr = Args[Idx]; 6452 if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr)) 6453 FormatExpr = CSCE->getSubExpr(); 6454 const StringLiteral *FormatString; 6455 if (const ObjCStringLiteral *OSL = 6456 dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts())) 6457 FormatString = OSL->getString(); 6458 else 6459 FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts()); 6460 if (!FormatString) 6461 return; 6462 if (S.FormatStringHasSArg(FormatString)) { 6463 S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string) 6464 << "%s" << 1 << 1; 6465 S.Diag(FDecl->getLocation(), diag::note_entity_declared_at) 6466 << FDecl->getDeclName(); 6467 } 6468 } 6469 6470 /// Determine whether the given type has a non-null nullability annotation. 6471 static bool isNonNullType(QualType type) { 6472 if (auto nullability = type->getNullability()) 6473 return *nullability == NullabilityKind::NonNull; 6474 6475 return false; 6476 } 6477 6478 static void CheckNonNullArguments(Sema &S, 6479 const NamedDecl *FDecl, 6480 const FunctionProtoType *Proto, 6481 ArrayRef<const Expr *> Args, 6482 SourceLocation CallSiteLoc) { 6483 assert((FDecl || Proto) && "Need a function declaration or prototype"); 6484 6485 // Already checked by constant evaluator. 6486 if (S.isConstantEvaluated()) 6487 return; 6488 // Check the attributes attached to the method/function itself. 6489 llvm::SmallBitVector NonNullArgs; 6490 if (FDecl) { 6491 // Handle the nonnull attribute on the function/method declaration itself. 6492 for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) { 6493 if (!NonNull->args_size()) { 6494 // Easy case: all pointer arguments are nonnull. 6495 for (const auto *Arg : Args) 6496 if (S.isValidPointerAttrType(Arg->getType())) 6497 CheckNonNullArgument(S, Arg, CallSiteLoc); 6498 return; 6499 } 6500 6501 for (const ParamIdx &Idx : NonNull->args()) { 6502 unsigned IdxAST = Idx.getASTIndex(); 6503 if (IdxAST >= Args.size()) 6504 continue; 6505 if (NonNullArgs.empty()) 6506 NonNullArgs.resize(Args.size()); 6507 NonNullArgs.set(IdxAST); 6508 } 6509 } 6510 } 6511 6512 if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) { 6513 // Handle the nonnull attribute on the parameters of the 6514 // function/method. 6515 ArrayRef<ParmVarDecl*> parms; 6516 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl)) 6517 parms = FD->parameters(); 6518 else 6519 parms = cast<ObjCMethodDecl>(FDecl)->parameters(); 6520 6521 unsigned ParamIndex = 0; 6522 for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end(); 6523 I != E; ++I, ++ParamIndex) { 6524 const ParmVarDecl *PVD = *I; 6525 if (PVD->hasAttr<NonNullAttr>() || isNonNullType(PVD->getType())) { 6526 if (NonNullArgs.empty()) 6527 NonNullArgs.resize(Args.size()); 6528 6529 NonNullArgs.set(ParamIndex); 6530 } 6531 } 6532 } else { 6533 // If we have a non-function, non-method declaration but no 6534 // function prototype, try to dig out the function prototype. 6535 if (!Proto) { 6536 if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) { 6537 QualType type = VD->getType().getNonReferenceType(); 6538 if (auto pointerType = type->getAs<PointerType>()) 6539 type = pointerType->getPointeeType(); 6540 else if (auto blockType = type->getAs<BlockPointerType>()) 6541 type = blockType->getPointeeType(); 6542 // FIXME: data member pointers? 6543 6544 // Dig out the function prototype, if there is one. 6545 Proto = type->getAs<FunctionProtoType>(); 6546 } 6547 } 6548 6549 // Fill in non-null argument information from the nullability 6550 // information on the parameter types (if we have them). 6551 if (Proto) { 6552 unsigned Index = 0; 6553 for (auto paramType : Proto->getParamTypes()) { 6554 if (isNonNullType(paramType)) { 6555 if (NonNullArgs.empty()) 6556 NonNullArgs.resize(Args.size()); 6557 6558 NonNullArgs.set(Index); 6559 } 6560 6561 ++Index; 6562 } 6563 } 6564 } 6565 6566 // Check for non-null arguments. 6567 for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size(); 6568 ArgIndex != ArgIndexEnd; ++ArgIndex) { 6569 if (NonNullArgs[ArgIndex]) 6570 CheckNonNullArgument(S, Args[ArgIndex], Args[ArgIndex]->getExprLoc()); 6571 } 6572 } 6573 6574 // 16 byte ByVal alignment not due to a vector member is not honoured by XL 6575 // on AIX. Emit a warning here that users are generating binary incompatible 6576 // code to be safe. 6577 // Here we try to get information about the alignment of the struct member 6578 // from the struct passed to the caller function. We only warn when the struct 6579 // is passed byval, hence the series of checks and early returns if we are a not 6580 // passing a struct byval. 6581 void Sema::checkAIXMemberAlignment(SourceLocation Loc, const Expr *Arg) { 6582 const auto *ICE = dyn_cast<ImplicitCastExpr>(Arg->IgnoreParens()); 6583 if (!ICE) 6584 return; 6585 6586 const auto *DR = dyn_cast<DeclRefExpr>(ICE->getSubExpr()); 6587 if (!DR) 6588 return; 6589 6590 const auto *PD = dyn_cast<ParmVarDecl>(DR->getDecl()); 6591 if (!PD || !PD->getType()->isRecordType()) 6592 return; 6593 6594 QualType ArgType = Arg->getType(); 6595 for (const FieldDecl *FD : 6596 ArgType->castAs<RecordType>()->getDecl()->fields()) { 6597 if (const auto *AA = FD->getAttr<AlignedAttr>()) { 6598 CharUnits Alignment = 6599 Context.toCharUnitsFromBits(AA->getAlignment(Context)); 6600 if (Alignment.getQuantity() == 16) { 6601 Diag(FD->getLocation(), diag::warn_not_xl_compatible) << FD; 6602 Diag(Loc, diag::note_misaligned_member_used_here) << PD; 6603 } 6604 } 6605 } 6606 } 6607 6608 /// Warn if a pointer or reference argument passed to a function points to an 6609 /// object that is less aligned than the parameter. This can happen when 6610 /// creating a typedef with a lower alignment than the original type and then 6611 /// calling functions defined in terms of the original type. 6612 void Sema::CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl, 6613 StringRef ParamName, QualType ArgTy, 6614 QualType ParamTy) { 6615 6616 // If a function accepts a pointer or reference type 6617 if (!ParamTy->isPointerType() && !ParamTy->isReferenceType()) 6618 return; 6619 6620 // If the parameter is a pointer type, get the pointee type for the 6621 // argument too. If the parameter is a reference type, don't try to get 6622 // the pointee type for the argument. 6623 if (ParamTy->isPointerType()) 6624 ArgTy = ArgTy->getPointeeType(); 6625 6626 // Remove reference or pointer 6627 ParamTy = ParamTy->getPointeeType(); 6628 6629 // Find expected alignment, and the actual alignment of the passed object. 6630 // getTypeAlignInChars requires complete types 6631 if (ArgTy.isNull() || ParamTy->isDependentType() || 6632 ParamTy->isIncompleteType() || ArgTy->isIncompleteType() || 6633 ParamTy->isUndeducedType() || ArgTy->isUndeducedType()) 6634 return; 6635 6636 CharUnits ParamAlign = Context.getTypeAlignInChars(ParamTy); 6637 CharUnits ArgAlign = Context.getTypeAlignInChars(ArgTy); 6638 6639 // If the argument is less aligned than the parameter, there is a 6640 // potential alignment issue. 6641 if (ArgAlign < ParamAlign) 6642 Diag(Loc, diag::warn_param_mismatched_alignment) 6643 << (int)ArgAlign.getQuantity() << (int)ParamAlign.getQuantity() 6644 << ParamName << (FDecl != nullptr) << FDecl; 6645 } 6646 6647 /// Handles the checks for format strings, non-POD arguments to vararg 6648 /// functions, NULL arguments passed to non-NULL parameters, and diagnose_if 6649 /// attributes. 6650 void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, 6651 const Expr *ThisArg, ArrayRef<const Expr *> Args, 6652 bool IsMemberFunction, SourceLocation Loc, 6653 SourceRange Range, VariadicCallType CallType) { 6654 // FIXME: We should check as much as we can in the template definition. 6655 if (CurContext->isDependentContext()) 6656 return; 6657 6658 // Printf and scanf checking. 6659 llvm::SmallBitVector CheckedVarArgs; 6660 if (FDecl) { 6661 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 6662 // Only create vector if there are format attributes. 6663 CheckedVarArgs.resize(Args.size()); 6664 6665 CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range, 6666 CheckedVarArgs); 6667 } 6668 } 6669 6670 // Refuse POD arguments that weren't caught by the format string 6671 // checks above. 6672 auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl); 6673 if (CallType != VariadicDoesNotApply && 6674 (!FD || FD->getBuiltinID() != Builtin::BI__noop)) { 6675 unsigned NumParams = Proto ? Proto->getNumParams() 6676 : FDecl && isa<FunctionDecl>(FDecl) 6677 ? cast<FunctionDecl>(FDecl)->getNumParams() 6678 : FDecl && isa<ObjCMethodDecl>(FDecl) 6679 ? cast<ObjCMethodDecl>(FDecl)->param_size() 6680 : 0; 6681 6682 for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) { 6683 // Args[ArgIdx] can be null in malformed code. 6684 if (const Expr *Arg = Args[ArgIdx]) { 6685 if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx]) 6686 checkVariadicArgument(Arg, CallType); 6687 } 6688 } 6689 } 6690 6691 if (FDecl || Proto) { 6692 CheckNonNullArguments(*this, FDecl, Proto, Args, Loc); 6693 6694 // Type safety checking. 6695 if (FDecl) { 6696 for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>()) 6697 CheckArgumentWithTypeTag(I, Args, Loc); 6698 } 6699 } 6700 6701 // Check that passed arguments match the alignment of original arguments. 6702 // Try to get the missing prototype from the declaration. 6703 if (!Proto && FDecl) { 6704 const auto *FT = FDecl->getFunctionType(); 6705 if (isa_and_nonnull<FunctionProtoType>(FT)) 6706 Proto = cast<FunctionProtoType>(FDecl->getFunctionType()); 6707 } 6708 if (Proto) { 6709 // For variadic functions, we may have more args than parameters. 6710 // For some K&R functions, we may have less args than parameters. 6711 const auto N = std::min<unsigned>(Proto->getNumParams(), Args.size()); 6712 for (unsigned ArgIdx = 0; ArgIdx < N; ++ArgIdx) { 6713 // Args[ArgIdx] can be null in malformed code. 6714 if (const Expr *Arg = Args[ArgIdx]) { 6715 if (Arg->containsErrors()) 6716 continue; 6717 6718 if (Context.getTargetInfo().getTriple().isOSAIX() && FDecl && Arg && 6719 FDecl->hasLinkage() && 6720 FDecl->getFormalLinkage() != InternalLinkage && 6721 CallType == VariadicDoesNotApply) 6722 checkAIXMemberAlignment((Arg->getExprLoc()), Arg); 6723 6724 QualType ParamTy = Proto->getParamType(ArgIdx); 6725 QualType ArgTy = Arg->getType(); 6726 CheckArgAlignment(Arg->getExprLoc(), FDecl, std::to_string(ArgIdx + 1), 6727 ArgTy, ParamTy); 6728 } 6729 } 6730 } 6731 6732 if (FDecl && FDecl->hasAttr<AllocAlignAttr>()) { 6733 auto *AA = FDecl->getAttr<AllocAlignAttr>(); 6734 const Expr *Arg = Args[AA->getParamIndex().getASTIndex()]; 6735 if (!Arg->isValueDependent()) { 6736 Expr::EvalResult Align; 6737 if (Arg->EvaluateAsInt(Align, Context)) { 6738 const llvm::APSInt &I = Align.Val.getInt(); 6739 if (!I.isPowerOf2()) 6740 Diag(Arg->getExprLoc(), diag::warn_alignment_not_power_of_two) 6741 << Arg->getSourceRange(); 6742 6743 if (I > Sema::MaximumAlignment) 6744 Diag(Arg->getExprLoc(), diag::warn_assume_aligned_too_great) 6745 << Arg->getSourceRange() << Sema::MaximumAlignment; 6746 } 6747 } 6748 } 6749 6750 if (FD) 6751 diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc); 6752 } 6753 6754 /// CheckConstructorCall - Check a constructor call for correctness and safety 6755 /// properties not enforced by the C type system. 6756 void Sema::CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType, 6757 ArrayRef<const Expr *> Args, 6758 const FunctionProtoType *Proto, 6759 SourceLocation Loc) { 6760 VariadicCallType CallType = 6761 Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply; 6762 6763 auto *Ctor = cast<CXXConstructorDecl>(FDecl); 6764 CheckArgAlignment(Loc, FDecl, "'this'", Context.getPointerType(ThisType), 6765 Context.getPointerType(Ctor->getThisObjectType())); 6766 6767 checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true, 6768 Loc, SourceRange(), CallType); 6769 } 6770 6771 /// CheckFunctionCall - Check a direct function call for various correctness 6772 /// and safety properties not strictly enforced by the C type system. 6773 bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, 6774 const FunctionProtoType *Proto) { 6775 bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) && 6776 isa<CXXMethodDecl>(FDecl); 6777 bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) || 6778 IsMemberOperatorCall; 6779 VariadicCallType CallType = getVariadicCallType(FDecl, Proto, 6780 TheCall->getCallee()); 6781 Expr** Args = TheCall->getArgs(); 6782 unsigned NumArgs = TheCall->getNumArgs(); 6783 6784 Expr *ImplicitThis = nullptr; 6785 if (IsMemberOperatorCall && !FDecl->isStatic()) { 6786 // If this is a call to a non-static member operator, hide the first 6787 // argument from checkCall. 6788 // FIXME: Our choice of AST representation here is less than ideal. 6789 ImplicitThis = Args[0]; 6790 ++Args; 6791 --NumArgs; 6792 } else if (IsMemberFunction && !FDecl->isStatic()) 6793 ImplicitThis = 6794 cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument(); 6795 6796 if (ImplicitThis) { 6797 // ImplicitThis may or may not be a pointer, depending on whether . or -> is 6798 // used. 6799 QualType ThisType = ImplicitThis->getType(); 6800 if (!ThisType->isPointerType()) { 6801 assert(!ThisType->isReferenceType()); 6802 ThisType = Context.getPointerType(ThisType); 6803 } 6804 6805 QualType ThisTypeFromDecl = 6806 Context.getPointerType(cast<CXXMethodDecl>(FDecl)->getThisObjectType()); 6807 6808 CheckArgAlignment(TheCall->getRParenLoc(), FDecl, "'this'", ThisType, 6809 ThisTypeFromDecl); 6810 } 6811 6812 checkCall(FDecl, Proto, ImplicitThis, llvm::ArrayRef(Args, NumArgs), 6813 IsMemberFunction, TheCall->getRParenLoc(), 6814 TheCall->getCallee()->getSourceRange(), CallType); 6815 6816 IdentifierInfo *FnInfo = FDecl->getIdentifier(); 6817 // None of the checks below are needed for functions that don't have 6818 // simple names (e.g., C++ conversion functions). 6819 if (!FnInfo) 6820 return false; 6821 6822 // Enforce TCB except for builtin calls, which are always allowed. 6823 if (FDecl->getBuiltinID() == 0) 6824 CheckTCBEnforcement(TheCall->getExprLoc(), FDecl); 6825 6826 CheckAbsoluteValueFunction(TheCall, FDecl); 6827 CheckMaxUnsignedZero(TheCall, FDecl); 6828 6829 if (getLangOpts().ObjC) 6830 DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs); 6831 6832 unsigned CMId = FDecl->getMemoryFunctionKind(); 6833 6834 // Handle memory setting and copying functions. 6835 switch (CMId) { 6836 case 0: 6837 return false; 6838 case Builtin::BIstrlcpy: // fallthrough 6839 case Builtin::BIstrlcat: 6840 CheckStrlcpycatArguments(TheCall, FnInfo); 6841 break; 6842 case Builtin::BIstrncat: 6843 CheckStrncatArguments(TheCall, FnInfo); 6844 break; 6845 case Builtin::BIfree: 6846 CheckFreeArguments(TheCall); 6847 break; 6848 default: 6849 CheckMemaccessArguments(TheCall, CMId, FnInfo); 6850 } 6851 6852 return false; 6853 } 6854 6855 bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac, 6856 ArrayRef<const Expr *> Args) { 6857 VariadicCallType CallType = 6858 Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply; 6859 6860 checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args, 6861 /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(), 6862 CallType); 6863 6864 CheckTCBEnforcement(lbrac, Method); 6865 6866 return false; 6867 } 6868 6869 bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, 6870 const FunctionProtoType *Proto) { 6871 QualType Ty; 6872 if (const auto *V = dyn_cast<VarDecl>(NDecl)) 6873 Ty = V->getType().getNonReferenceType(); 6874 else if (const auto *F = dyn_cast<FieldDecl>(NDecl)) 6875 Ty = F->getType().getNonReferenceType(); 6876 else 6877 return false; 6878 6879 if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() && 6880 !Ty->isFunctionProtoType()) 6881 return false; 6882 6883 VariadicCallType CallType; 6884 if (!Proto || !Proto->isVariadic()) { 6885 CallType = VariadicDoesNotApply; 6886 } else if (Ty->isBlockPointerType()) { 6887 CallType = VariadicBlock; 6888 } else { // Ty->isFunctionPointerType() 6889 CallType = VariadicFunction; 6890 } 6891 6892 checkCall(NDecl, Proto, /*ThisArg=*/nullptr, 6893 llvm::ArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 6894 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 6895 TheCall->getCallee()->getSourceRange(), CallType); 6896 6897 return false; 6898 } 6899 6900 /// Checks function calls when a FunctionDecl or a NamedDecl is not available, 6901 /// such as function pointers returned from functions. 6902 bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) { 6903 VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto, 6904 TheCall->getCallee()); 6905 checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr, 6906 llvm::ArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 6907 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 6908 TheCall->getCallee()->getSourceRange(), CallType); 6909 6910 return false; 6911 } 6912 6913 static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) { 6914 if (!llvm::isValidAtomicOrderingCABI(Ordering)) 6915 return false; 6916 6917 auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering; 6918 switch (Op) { 6919 case AtomicExpr::AO__c11_atomic_init: 6920 case AtomicExpr::AO__opencl_atomic_init: 6921 llvm_unreachable("There is no ordering argument for an init"); 6922 6923 case AtomicExpr::AO__c11_atomic_load: 6924 case AtomicExpr::AO__opencl_atomic_load: 6925 case AtomicExpr::AO__hip_atomic_load: 6926 case AtomicExpr::AO__atomic_load_n: 6927 case AtomicExpr::AO__atomic_load: 6928 return OrderingCABI != llvm::AtomicOrderingCABI::release && 6929 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 6930 6931 case AtomicExpr::AO__c11_atomic_store: 6932 case AtomicExpr::AO__opencl_atomic_store: 6933 case AtomicExpr::AO__hip_atomic_store: 6934 case AtomicExpr::AO__atomic_store: 6935 case AtomicExpr::AO__atomic_store_n: 6936 return OrderingCABI != llvm::AtomicOrderingCABI::consume && 6937 OrderingCABI != llvm::AtomicOrderingCABI::acquire && 6938 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 6939 6940 default: 6941 return true; 6942 } 6943 } 6944 6945 ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, 6946 AtomicExpr::AtomicOp Op) { 6947 CallExpr *TheCall = cast<CallExpr>(TheCallResult.get()); 6948 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 6949 MultiExprArg Args{TheCall->getArgs(), TheCall->getNumArgs()}; 6950 return BuildAtomicExpr({TheCall->getBeginLoc(), TheCall->getEndLoc()}, 6951 DRE->getSourceRange(), TheCall->getRParenLoc(), Args, 6952 Op); 6953 } 6954 6955 ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, 6956 SourceLocation RParenLoc, MultiExprArg Args, 6957 AtomicExpr::AtomicOp Op, 6958 AtomicArgumentOrder ArgOrder) { 6959 // All the non-OpenCL operations take one of the following forms. 6960 // The OpenCL operations take the __c11 forms with one extra argument for 6961 // synchronization scope. 6962 enum { 6963 // C __c11_atomic_init(A *, C) 6964 Init, 6965 6966 // C __c11_atomic_load(A *, int) 6967 Load, 6968 6969 // void __atomic_load(A *, CP, int) 6970 LoadCopy, 6971 6972 // void __atomic_store(A *, CP, int) 6973 Copy, 6974 6975 // C __c11_atomic_add(A *, M, int) 6976 Arithmetic, 6977 6978 // C __atomic_exchange_n(A *, CP, int) 6979 Xchg, 6980 6981 // void __atomic_exchange(A *, C *, CP, int) 6982 GNUXchg, 6983 6984 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int) 6985 C11CmpXchg, 6986 6987 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int) 6988 GNUCmpXchg 6989 } Form = Init; 6990 6991 const unsigned NumForm = GNUCmpXchg + 1; 6992 const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 }; 6993 const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 }; 6994 // where: 6995 // C is an appropriate type, 6996 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins, 6997 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise, 6998 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and 6999 // the int parameters are for orderings. 7000 7001 static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm 7002 && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm, 7003 "need to update code for modified forms"); 7004 static_assert(AtomicExpr::AO__c11_atomic_init == 0 && 7005 AtomicExpr::AO__c11_atomic_fetch_min + 1 == 7006 AtomicExpr::AO__atomic_load, 7007 "need to update code for modified C11 atomics"); 7008 bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init && 7009 Op <= AtomicExpr::AO__opencl_atomic_fetch_max; 7010 bool IsHIP = Op >= AtomicExpr::AO__hip_atomic_load && 7011 Op <= AtomicExpr::AO__hip_atomic_fetch_max; 7012 bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init && 7013 Op <= AtomicExpr::AO__c11_atomic_fetch_min) || 7014 IsOpenCL; 7015 bool IsN = Op == AtomicExpr::AO__atomic_load_n || 7016 Op == AtomicExpr::AO__atomic_store_n || 7017 Op == AtomicExpr::AO__atomic_exchange_n || 7018 Op == AtomicExpr::AO__atomic_compare_exchange_n; 7019 // Bit mask for extra allowed value types other than integers for atomic 7020 // arithmetic operations. Add/sub allow pointer and floating point. Min/max 7021 // allow floating point. 7022 enum ArithOpExtraValueType { 7023 AOEVT_None = 0, 7024 AOEVT_Pointer = 1, 7025 AOEVT_FP = 2, 7026 }; 7027 unsigned ArithAllows = AOEVT_None; 7028 7029 switch (Op) { 7030 case AtomicExpr::AO__c11_atomic_init: 7031 case AtomicExpr::AO__opencl_atomic_init: 7032 Form = Init; 7033 break; 7034 7035 case AtomicExpr::AO__c11_atomic_load: 7036 case AtomicExpr::AO__opencl_atomic_load: 7037 case AtomicExpr::AO__hip_atomic_load: 7038 case AtomicExpr::AO__atomic_load_n: 7039 Form = Load; 7040 break; 7041 7042 case AtomicExpr::AO__atomic_load: 7043 Form = LoadCopy; 7044 break; 7045 7046 case AtomicExpr::AO__c11_atomic_store: 7047 case AtomicExpr::AO__opencl_atomic_store: 7048 case AtomicExpr::AO__hip_atomic_store: 7049 case AtomicExpr::AO__atomic_store: 7050 case AtomicExpr::AO__atomic_store_n: 7051 Form = Copy; 7052 break; 7053 case AtomicExpr::AO__atomic_fetch_add: 7054 case AtomicExpr::AO__atomic_fetch_sub: 7055 case AtomicExpr::AO__atomic_add_fetch: 7056 case AtomicExpr::AO__atomic_sub_fetch: 7057 case AtomicExpr::AO__c11_atomic_fetch_add: 7058 case AtomicExpr::AO__c11_atomic_fetch_sub: 7059 case AtomicExpr::AO__opencl_atomic_fetch_add: 7060 case AtomicExpr::AO__opencl_atomic_fetch_sub: 7061 case AtomicExpr::AO__hip_atomic_fetch_add: 7062 case AtomicExpr::AO__hip_atomic_fetch_sub: 7063 ArithAllows = AOEVT_Pointer | AOEVT_FP; 7064 Form = Arithmetic; 7065 break; 7066 case AtomicExpr::AO__atomic_fetch_max: 7067 case AtomicExpr::AO__atomic_fetch_min: 7068 case AtomicExpr::AO__atomic_max_fetch: 7069 case AtomicExpr::AO__atomic_min_fetch: 7070 case AtomicExpr::AO__c11_atomic_fetch_max: 7071 case AtomicExpr::AO__c11_atomic_fetch_min: 7072 case AtomicExpr::AO__opencl_atomic_fetch_max: 7073 case AtomicExpr::AO__opencl_atomic_fetch_min: 7074 case AtomicExpr::AO__hip_atomic_fetch_max: 7075 case AtomicExpr::AO__hip_atomic_fetch_min: 7076 ArithAllows = AOEVT_FP; 7077 Form = Arithmetic; 7078 break; 7079 case AtomicExpr::AO__c11_atomic_fetch_and: 7080 case AtomicExpr::AO__c11_atomic_fetch_or: 7081 case AtomicExpr::AO__c11_atomic_fetch_xor: 7082 case AtomicExpr::AO__hip_atomic_fetch_and: 7083 case AtomicExpr::AO__hip_atomic_fetch_or: 7084 case AtomicExpr::AO__hip_atomic_fetch_xor: 7085 case AtomicExpr::AO__c11_atomic_fetch_nand: 7086 case AtomicExpr::AO__opencl_atomic_fetch_and: 7087 case AtomicExpr::AO__opencl_atomic_fetch_or: 7088 case AtomicExpr::AO__opencl_atomic_fetch_xor: 7089 case AtomicExpr::AO__atomic_fetch_and: 7090 case AtomicExpr::AO__atomic_fetch_or: 7091 case AtomicExpr::AO__atomic_fetch_xor: 7092 case AtomicExpr::AO__atomic_fetch_nand: 7093 case AtomicExpr::AO__atomic_and_fetch: 7094 case AtomicExpr::AO__atomic_or_fetch: 7095 case AtomicExpr::AO__atomic_xor_fetch: 7096 case AtomicExpr::AO__atomic_nand_fetch: 7097 Form = Arithmetic; 7098 break; 7099 7100 case AtomicExpr::AO__c11_atomic_exchange: 7101 case AtomicExpr::AO__hip_atomic_exchange: 7102 case AtomicExpr::AO__opencl_atomic_exchange: 7103 case AtomicExpr::AO__atomic_exchange_n: 7104 Form = Xchg; 7105 break; 7106 7107 case AtomicExpr::AO__atomic_exchange: 7108 Form = GNUXchg; 7109 break; 7110 7111 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 7112 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 7113 case AtomicExpr::AO__hip_atomic_compare_exchange_strong: 7114 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: 7115 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: 7116 case AtomicExpr::AO__hip_atomic_compare_exchange_weak: 7117 Form = C11CmpXchg; 7118 break; 7119 7120 case AtomicExpr::AO__atomic_compare_exchange: 7121 case AtomicExpr::AO__atomic_compare_exchange_n: 7122 Form = GNUCmpXchg; 7123 break; 7124 } 7125 7126 unsigned AdjustedNumArgs = NumArgs[Form]; 7127 if ((IsOpenCL || IsHIP) && Op != AtomicExpr::AO__opencl_atomic_init) 7128 ++AdjustedNumArgs; 7129 // Check we have the right number of arguments. 7130 if (Args.size() < AdjustedNumArgs) { 7131 Diag(CallRange.getEnd(), diag::err_typecheck_call_too_few_args) 7132 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 7133 << ExprRange; 7134 return ExprError(); 7135 } else if (Args.size() > AdjustedNumArgs) { 7136 Diag(Args[AdjustedNumArgs]->getBeginLoc(), 7137 diag::err_typecheck_call_too_many_args) 7138 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 7139 << ExprRange; 7140 return ExprError(); 7141 } 7142 7143 // Inspect the first argument of the atomic operation. 7144 Expr *Ptr = Args[0]; 7145 ExprResult ConvertedPtr = DefaultFunctionArrayLvalueConversion(Ptr); 7146 if (ConvertedPtr.isInvalid()) 7147 return ExprError(); 7148 7149 Ptr = ConvertedPtr.get(); 7150 const PointerType *pointerType = Ptr->getType()->getAs<PointerType>(); 7151 if (!pointerType) { 7152 Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer) 7153 << Ptr->getType() << Ptr->getSourceRange(); 7154 return ExprError(); 7155 } 7156 7157 // For a __c11 builtin, this should be a pointer to an _Atomic type. 7158 QualType AtomTy = pointerType->getPointeeType(); // 'A' 7159 QualType ValType = AtomTy; // 'C' 7160 if (IsC11) { 7161 if (!AtomTy->isAtomicType()) { 7162 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic) 7163 << Ptr->getType() << Ptr->getSourceRange(); 7164 return ExprError(); 7165 } 7166 if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) || 7167 AtomTy.getAddressSpace() == LangAS::opencl_constant) { 7168 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_atomic) 7169 << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType() 7170 << Ptr->getSourceRange(); 7171 return ExprError(); 7172 } 7173 ValType = AtomTy->castAs<AtomicType>()->getValueType(); 7174 } else if (Form != Load && Form != LoadCopy) { 7175 if (ValType.isConstQualified()) { 7176 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_pointer) 7177 << Ptr->getType() << Ptr->getSourceRange(); 7178 return ExprError(); 7179 } 7180 } 7181 7182 // For an arithmetic operation, the implied arithmetic must be well-formed. 7183 if (Form == Arithmetic) { 7184 // GCC does not enforce these rules for GNU atomics, but we do to help catch 7185 // trivial type errors. 7186 auto IsAllowedValueType = [&](QualType ValType, 7187 unsigned AllowedType) -> bool { 7188 if (ValType->isIntegerType()) 7189 return true; 7190 if (ValType->isPointerType()) 7191 return AllowedType & AOEVT_Pointer; 7192 if (!(ValType->isFloatingType() && (AllowedType & AOEVT_FP))) 7193 return false; 7194 // LLVM Parser does not allow atomicrmw with x86_fp80 type. 7195 if (ValType->isSpecificBuiltinType(BuiltinType::LongDouble) && 7196 &Context.getTargetInfo().getLongDoubleFormat() == 7197 &llvm::APFloat::x87DoubleExtended()) 7198 return false; 7199 return true; 7200 }; 7201 if (!IsAllowedValueType(ValType, ArithAllows)) { 7202 auto DID = ArithAllows & AOEVT_FP 7203 ? (ArithAllows & AOEVT_Pointer 7204 ? diag::err_atomic_op_needs_atomic_int_ptr_or_fp 7205 : diag::err_atomic_op_needs_atomic_int_or_fp) 7206 : diag::err_atomic_op_needs_atomic_int; 7207 Diag(ExprRange.getBegin(), DID) 7208 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 7209 return ExprError(); 7210 } 7211 if (IsC11 && ValType->isPointerType() && 7212 RequireCompleteType(Ptr->getBeginLoc(), ValType->getPointeeType(), 7213 diag::err_incomplete_type)) { 7214 return ExprError(); 7215 } 7216 } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) { 7217 // For __atomic_*_n operations, the value type must be a scalar integral or 7218 // pointer type which is 1, 2, 4, 8 or 16 bytes in length. 7219 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr) 7220 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 7221 return ExprError(); 7222 } 7223 7224 if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) && 7225 !AtomTy->isScalarType()) { 7226 // For GNU atomics, require a trivially-copyable type. This is not part of 7227 // the GNU atomics specification but we enforce it for consistency with 7228 // other atomics which generally all require a trivially-copyable type. This 7229 // is because atomics just copy bits. 7230 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_trivial_copy) 7231 << Ptr->getType() << Ptr->getSourceRange(); 7232 return ExprError(); 7233 } 7234 7235 switch (ValType.getObjCLifetime()) { 7236 case Qualifiers::OCL_None: 7237 case Qualifiers::OCL_ExplicitNone: 7238 // okay 7239 break; 7240 7241 case Qualifiers::OCL_Weak: 7242 case Qualifiers::OCL_Strong: 7243 case Qualifiers::OCL_Autoreleasing: 7244 // FIXME: Can this happen? By this point, ValType should be known 7245 // to be trivially copyable. 7246 Diag(ExprRange.getBegin(), diag::err_arc_atomic_ownership) 7247 << ValType << Ptr->getSourceRange(); 7248 return ExprError(); 7249 } 7250 7251 // All atomic operations have an overload which takes a pointer to a volatile 7252 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself 7253 // into the result or the other operands. Similarly atomic_load takes a 7254 // pointer to a const 'A'. 7255 ValType.removeLocalVolatile(); 7256 ValType.removeLocalConst(); 7257 QualType ResultType = ValType; 7258 if (Form == Copy || Form == LoadCopy || Form == GNUXchg || 7259 Form == Init) 7260 ResultType = Context.VoidTy; 7261 else if (Form == C11CmpXchg || Form == GNUCmpXchg) 7262 ResultType = Context.BoolTy; 7263 7264 // The type of a parameter passed 'by value'. In the GNU atomics, such 7265 // arguments are actually passed as pointers. 7266 QualType ByValType = ValType; // 'CP' 7267 bool IsPassedByAddress = false; 7268 if (!IsC11 && !IsHIP && !IsN) { 7269 ByValType = Ptr->getType(); 7270 IsPassedByAddress = true; 7271 } 7272 7273 SmallVector<Expr *, 5> APIOrderedArgs; 7274 if (ArgOrder == Sema::AtomicArgumentOrder::AST) { 7275 APIOrderedArgs.push_back(Args[0]); 7276 switch (Form) { 7277 case Init: 7278 case Load: 7279 APIOrderedArgs.push_back(Args[1]); // Val1/Order 7280 break; 7281 case LoadCopy: 7282 case Copy: 7283 case Arithmetic: 7284 case Xchg: 7285 APIOrderedArgs.push_back(Args[2]); // Val1 7286 APIOrderedArgs.push_back(Args[1]); // Order 7287 break; 7288 case GNUXchg: 7289 APIOrderedArgs.push_back(Args[2]); // Val1 7290 APIOrderedArgs.push_back(Args[3]); // Val2 7291 APIOrderedArgs.push_back(Args[1]); // Order 7292 break; 7293 case C11CmpXchg: 7294 APIOrderedArgs.push_back(Args[2]); // Val1 7295 APIOrderedArgs.push_back(Args[4]); // Val2 7296 APIOrderedArgs.push_back(Args[1]); // Order 7297 APIOrderedArgs.push_back(Args[3]); // OrderFail 7298 break; 7299 case GNUCmpXchg: 7300 APIOrderedArgs.push_back(Args[2]); // Val1 7301 APIOrderedArgs.push_back(Args[4]); // Val2 7302 APIOrderedArgs.push_back(Args[5]); // Weak 7303 APIOrderedArgs.push_back(Args[1]); // Order 7304 APIOrderedArgs.push_back(Args[3]); // OrderFail 7305 break; 7306 } 7307 } else 7308 APIOrderedArgs.append(Args.begin(), Args.end()); 7309 7310 // The first argument's non-CV pointer type is used to deduce the type of 7311 // subsequent arguments, except for: 7312 // - weak flag (always converted to bool) 7313 // - memory order (always converted to int) 7314 // - scope (always converted to int) 7315 for (unsigned i = 0; i != APIOrderedArgs.size(); ++i) { 7316 QualType Ty; 7317 if (i < NumVals[Form] + 1) { 7318 switch (i) { 7319 case 0: 7320 // The first argument is always a pointer. It has a fixed type. 7321 // It is always dereferenced, a nullptr is undefined. 7322 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 7323 // Nothing else to do: we already know all we want about this pointer. 7324 continue; 7325 case 1: 7326 // The second argument is the non-atomic operand. For arithmetic, this 7327 // is always passed by value, and for a compare_exchange it is always 7328 // passed by address. For the rest, GNU uses by-address and C11 uses 7329 // by-value. 7330 assert(Form != Load); 7331 if (Form == Arithmetic && ValType->isPointerType()) 7332 Ty = Context.getPointerDiffType(); 7333 else if (Form == Init || Form == Arithmetic) 7334 Ty = ValType; 7335 else if (Form == Copy || Form == Xchg) { 7336 if (IsPassedByAddress) { 7337 // The value pointer is always dereferenced, a nullptr is undefined. 7338 CheckNonNullArgument(*this, APIOrderedArgs[i], 7339 ExprRange.getBegin()); 7340 } 7341 Ty = ByValType; 7342 } else { 7343 Expr *ValArg = APIOrderedArgs[i]; 7344 // The value pointer is always dereferenced, a nullptr is undefined. 7345 CheckNonNullArgument(*this, ValArg, ExprRange.getBegin()); 7346 LangAS AS = LangAS::Default; 7347 // Keep address space of non-atomic pointer type. 7348 if (const PointerType *PtrTy = 7349 ValArg->getType()->getAs<PointerType>()) { 7350 AS = PtrTy->getPointeeType().getAddressSpace(); 7351 } 7352 Ty = Context.getPointerType( 7353 Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS)); 7354 } 7355 break; 7356 case 2: 7357 // The third argument to compare_exchange / GNU exchange is the desired 7358 // value, either by-value (for the C11 and *_n variant) or as a pointer. 7359 if (IsPassedByAddress) 7360 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 7361 Ty = ByValType; 7362 break; 7363 case 3: 7364 // The fourth argument to GNU compare_exchange is a 'weak' flag. 7365 Ty = Context.BoolTy; 7366 break; 7367 } 7368 } else { 7369 // The order(s) and scope are always converted to int. 7370 Ty = Context.IntTy; 7371 } 7372 7373 InitializedEntity Entity = 7374 InitializedEntity::InitializeParameter(Context, Ty, false); 7375 ExprResult Arg = APIOrderedArgs[i]; 7376 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 7377 if (Arg.isInvalid()) 7378 return true; 7379 APIOrderedArgs[i] = Arg.get(); 7380 } 7381 7382 // Permute the arguments into a 'consistent' order. 7383 SmallVector<Expr*, 5> SubExprs; 7384 SubExprs.push_back(Ptr); 7385 switch (Form) { 7386 case Init: 7387 // Note, AtomicExpr::getVal1() has a special case for this atomic. 7388 SubExprs.push_back(APIOrderedArgs[1]); // Val1 7389 break; 7390 case Load: 7391 SubExprs.push_back(APIOrderedArgs[1]); // Order 7392 break; 7393 case LoadCopy: 7394 case Copy: 7395 case Arithmetic: 7396 case Xchg: 7397 SubExprs.push_back(APIOrderedArgs[2]); // Order 7398 SubExprs.push_back(APIOrderedArgs[1]); // Val1 7399 break; 7400 case GNUXchg: 7401 // Note, AtomicExpr::getVal2() has a special case for this atomic. 7402 SubExprs.push_back(APIOrderedArgs[3]); // Order 7403 SubExprs.push_back(APIOrderedArgs[1]); // Val1 7404 SubExprs.push_back(APIOrderedArgs[2]); // Val2 7405 break; 7406 case C11CmpXchg: 7407 SubExprs.push_back(APIOrderedArgs[3]); // Order 7408 SubExprs.push_back(APIOrderedArgs[1]); // Val1 7409 SubExprs.push_back(APIOrderedArgs[4]); // OrderFail 7410 SubExprs.push_back(APIOrderedArgs[2]); // Val2 7411 break; 7412 case GNUCmpXchg: 7413 SubExprs.push_back(APIOrderedArgs[4]); // Order 7414 SubExprs.push_back(APIOrderedArgs[1]); // Val1 7415 SubExprs.push_back(APIOrderedArgs[5]); // OrderFail 7416 SubExprs.push_back(APIOrderedArgs[2]); // Val2 7417 SubExprs.push_back(APIOrderedArgs[3]); // Weak 7418 break; 7419 } 7420 7421 if (SubExprs.size() >= 2 && Form != Init) { 7422 if (std::optional<llvm::APSInt> Result = 7423 SubExprs[1]->getIntegerConstantExpr(Context)) 7424 if (!isValidOrderingForOp(Result->getSExtValue(), Op)) 7425 Diag(SubExprs[1]->getBeginLoc(), 7426 diag::warn_atomic_op_has_invalid_memory_order) 7427 << SubExprs[1]->getSourceRange(); 7428 } 7429 7430 if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) { 7431 auto *Scope = Args[Args.size() - 1]; 7432 if (std::optional<llvm::APSInt> Result = 7433 Scope->getIntegerConstantExpr(Context)) { 7434 if (!ScopeModel->isValid(Result->getZExtValue())) 7435 Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope) 7436 << Scope->getSourceRange(); 7437 } 7438 SubExprs.push_back(Scope); 7439 } 7440 7441 AtomicExpr *AE = new (Context) 7442 AtomicExpr(ExprRange.getBegin(), SubExprs, ResultType, Op, RParenLoc); 7443 7444 if ((Op == AtomicExpr::AO__c11_atomic_load || 7445 Op == AtomicExpr::AO__c11_atomic_store || 7446 Op == AtomicExpr::AO__opencl_atomic_load || 7447 Op == AtomicExpr::AO__hip_atomic_load || 7448 Op == AtomicExpr::AO__opencl_atomic_store || 7449 Op == AtomicExpr::AO__hip_atomic_store) && 7450 Context.AtomicUsesUnsupportedLibcall(AE)) 7451 Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib) 7452 << ((Op == AtomicExpr::AO__c11_atomic_load || 7453 Op == AtomicExpr::AO__opencl_atomic_load || 7454 Op == AtomicExpr::AO__hip_atomic_load) 7455 ? 0 7456 : 1); 7457 7458 if (ValType->isBitIntType()) { 7459 Diag(Ptr->getExprLoc(), diag::err_atomic_builtin_bit_int_prohibit); 7460 return ExprError(); 7461 } 7462 7463 return AE; 7464 } 7465 7466 /// checkBuiltinArgument - Given a call to a builtin function, perform 7467 /// normal type-checking on the given argument, updating the call in 7468 /// place. This is useful when a builtin function requires custom 7469 /// type-checking for some of its arguments but not necessarily all of 7470 /// them. 7471 /// 7472 /// Returns true on error. 7473 static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) { 7474 FunctionDecl *Fn = E->getDirectCallee(); 7475 assert(Fn && "builtin call without direct callee!"); 7476 7477 ParmVarDecl *Param = Fn->getParamDecl(ArgIndex); 7478 InitializedEntity Entity = 7479 InitializedEntity::InitializeParameter(S.Context, Param); 7480 7481 ExprResult Arg = E->getArg(ArgIndex); 7482 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 7483 if (Arg.isInvalid()) 7484 return true; 7485 7486 E->setArg(ArgIndex, Arg.get()); 7487 return false; 7488 } 7489 7490 bool Sema::BuiltinWasmRefNullExtern(CallExpr *TheCall) { 7491 if (TheCall->getNumArgs() != 0) 7492 return true; 7493 7494 TheCall->setType(Context.getWebAssemblyExternrefType()); 7495 7496 return false; 7497 } 7498 7499 bool Sema::BuiltinWasmRefNullFunc(CallExpr *TheCall) { 7500 if (TheCall->getNumArgs() != 0) { 7501 Diag(TheCall->getBeginLoc(), diag::err_typecheck_call_too_many_args) 7502 << 0 /*function call*/ << 0 << TheCall->getNumArgs(); 7503 return true; 7504 } 7505 7506 // This custom type checking code ensures that the nodes are as expected 7507 // in order to later on generate the necessary builtin. 7508 QualType Pointee = Context.getFunctionType(Context.VoidTy, {}, {}); 7509 QualType Type = Context.getPointerType(Pointee); 7510 Pointee = Context.getAddrSpaceQualType(Pointee, LangAS::wasm_funcref); 7511 Type = Context.getAttributedType(attr::WebAssemblyFuncref, Type, 7512 Context.getPointerType(Pointee)); 7513 TheCall->setType(Type); 7514 7515 return false; 7516 } 7517 7518 /// We have a call to a function like __sync_fetch_and_add, which is an 7519 /// overloaded function based on the pointer type of its first argument. 7520 /// The main BuildCallExpr routines have already promoted the types of 7521 /// arguments because all of these calls are prototyped as void(...). 7522 /// 7523 /// This function goes through and does final semantic checking for these 7524 /// builtins, as well as generating any warnings. 7525 ExprResult 7526 Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) { 7527 CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get()); 7528 Expr *Callee = TheCall->getCallee(); 7529 DeclRefExpr *DRE = cast<DeclRefExpr>(Callee->IgnoreParenCasts()); 7530 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 7531 7532 // Ensure that we have at least one argument to do type inference from. 7533 if (TheCall->getNumArgs() < 1) { 7534 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 7535 << 0 << 1 << TheCall->getNumArgs() << Callee->getSourceRange(); 7536 return ExprError(); 7537 } 7538 7539 // Inspect the first argument of the atomic builtin. This should always be 7540 // a pointer type, whose element is an integral scalar or pointer type. 7541 // Because it is a pointer type, we don't have to worry about any implicit 7542 // casts here. 7543 // FIXME: We don't allow floating point scalars as input. 7544 Expr *FirstArg = TheCall->getArg(0); 7545 ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg); 7546 if (FirstArgResult.isInvalid()) 7547 return ExprError(); 7548 FirstArg = FirstArgResult.get(); 7549 TheCall->setArg(0, FirstArg); 7550 7551 const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>(); 7552 if (!pointerType) { 7553 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 7554 << FirstArg->getType() << FirstArg->getSourceRange(); 7555 return ExprError(); 7556 } 7557 7558 QualType ValType = pointerType->getPointeeType(); 7559 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 7560 !ValType->isBlockPointerType()) { 7561 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr) 7562 << FirstArg->getType() << FirstArg->getSourceRange(); 7563 return ExprError(); 7564 } 7565 7566 if (ValType.isConstQualified()) { 7567 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const) 7568 << FirstArg->getType() << FirstArg->getSourceRange(); 7569 return ExprError(); 7570 } 7571 7572 switch (ValType.getObjCLifetime()) { 7573 case Qualifiers::OCL_None: 7574 case Qualifiers::OCL_ExplicitNone: 7575 // okay 7576 break; 7577 7578 case Qualifiers::OCL_Weak: 7579 case Qualifiers::OCL_Strong: 7580 case Qualifiers::OCL_Autoreleasing: 7581 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 7582 << ValType << FirstArg->getSourceRange(); 7583 return ExprError(); 7584 } 7585 7586 // Strip any qualifiers off ValType. 7587 ValType = ValType.getUnqualifiedType(); 7588 7589 // The majority of builtins return a value, but a few have special return 7590 // types, so allow them to override appropriately below. 7591 QualType ResultType = ValType; 7592 7593 // We need to figure out which concrete builtin this maps onto. For example, 7594 // __sync_fetch_and_add with a 2 byte object turns into 7595 // __sync_fetch_and_add_2. 7596 #define BUILTIN_ROW(x) \ 7597 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \ 7598 Builtin::BI##x##_8, Builtin::BI##x##_16 } 7599 7600 static const unsigned BuiltinIndices[][5] = { 7601 BUILTIN_ROW(__sync_fetch_and_add), 7602 BUILTIN_ROW(__sync_fetch_and_sub), 7603 BUILTIN_ROW(__sync_fetch_and_or), 7604 BUILTIN_ROW(__sync_fetch_and_and), 7605 BUILTIN_ROW(__sync_fetch_and_xor), 7606 BUILTIN_ROW(__sync_fetch_and_nand), 7607 7608 BUILTIN_ROW(__sync_add_and_fetch), 7609 BUILTIN_ROW(__sync_sub_and_fetch), 7610 BUILTIN_ROW(__sync_and_and_fetch), 7611 BUILTIN_ROW(__sync_or_and_fetch), 7612 BUILTIN_ROW(__sync_xor_and_fetch), 7613 BUILTIN_ROW(__sync_nand_and_fetch), 7614 7615 BUILTIN_ROW(__sync_val_compare_and_swap), 7616 BUILTIN_ROW(__sync_bool_compare_and_swap), 7617 BUILTIN_ROW(__sync_lock_test_and_set), 7618 BUILTIN_ROW(__sync_lock_release), 7619 BUILTIN_ROW(__sync_swap) 7620 }; 7621 #undef BUILTIN_ROW 7622 7623 // Determine the index of the size. 7624 unsigned SizeIndex; 7625 switch (Context.getTypeSizeInChars(ValType).getQuantity()) { 7626 case 1: SizeIndex = 0; break; 7627 case 2: SizeIndex = 1; break; 7628 case 4: SizeIndex = 2; break; 7629 case 8: SizeIndex = 3; break; 7630 case 16: SizeIndex = 4; break; 7631 default: 7632 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size) 7633 << FirstArg->getType() << FirstArg->getSourceRange(); 7634 return ExprError(); 7635 } 7636 7637 // Each of these builtins has one pointer argument, followed by some number of 7638 // values (0, 1 or 2) followed by a potentially empty varags list of stuff 7639 // that we ignore. Find out which row of BuiltinIndices to read from as well 7640 // as the number of fixed args. 7641 unsigned BuiltinID = FDecl->getBuiltinID(); 7642 unsigned BuiltinIndex, NumFixed = 1; 7643 bool WarnAboutSemanticsChange = false; 7644 switch (BuiltinID) { 7645 default: llvm_unreachable("Unknown overloaded atomic builtin!"); 7646 case Builtin::BI__sync_fetch_and_add: 7647 case Builtin::BI__sync_fetch_and_add_1: 7648 case Builtin::BI__sync_fetch_and_add_2: 7649 case Builtin::BI__sync_fetch_and_add_4: 7650 case Builtin::BI__sync_fetch_and_add_8: 7651 case Builtin::BI__sync_fetch_and_add_16: 7652 BuiltinIndex = 0; 7653 break; 7654 7655 case Builtin::BI__sync_fetch_and_sub: 7656 case Builtin::BI__sync_fetch_and_sub_1: 7657 case Builtin::BI__sync_fetch_and_sub_2: 7658 case Builtin::BI__sync_fetch_and_sub_4: 7659 case Builtin::BI__sync_fetch_and_sub_8: 7660 case Builtin::BI__sync_fetch_and_sub_16: 7661 BuiltinIndex = 1; 7662 break; 7663 7664 case Builtin::BI__sync_fetch_and_or: 7665 case Builtin::BI__sync_fetch_and_or_1: 7666 case Builtin::BI__sync_fetch_and_or_2: 7667 case Builtin::BI__sync_fetch_and_or_4: 7668 case Builtin::BI__sync_fetch_and_or_8: 7669 case Builtin::BI__sync_fetch_and_or_16: 7670 BuiltinIndex = 2; 7671 break; 7672 7673 case Builtin::BI__sync_fetch_and_and: 7674 case Builtin::BI__sync_fetch_and_and_1: 7675 case Builtin::BI__sync_fetch_and_and_2: 7676 case Builtin::BI__sync_fetch_and_and_4: 7677 case Builtin::BI__sync_fetch_and_and_8: 7678 case Builtin::BI__sync_fetch_and_and_16: 7679 BuiltinIndex = 3; 7680 break; 7681 7682 case Builtin::BI__sync_fetch_and_xor: 7683 case Builtin::BI__sync_fetch_and_xor_1: 7684 case Builtin::BI__sync_fetch_and_xor_2: 7685 case Builtin::BI__sync_fetch_and_xor_4: 7686 case Builtin::BI__sync_fetch_and_xor_8: 7687 case Builtin::BI__sync_fetch_and_xor_16: 7688 BuiltinIndex = 4; 7689 break; 7690 7691 case Builtin::BI__sync_fetch_and_nand: 7692 case Builtin::BI__sync_fetch_and_nand_1: 7693 case Builtin::BI__sync_fetch_and_nand_2: 7694 case Builtin::BI__sync_fetch_and_nand_4: 7695 case Builtin::BI__sync_fetch_and_nand_8: 7696 case Builtin::BI__sync_fetch_and_nand_16: 7697 BuiltinIndex = 5; 7698 WarnAboutSemanticsChange = true; 7699 break; 7700 7701 case Builtin::BI__sync_add_and_fetch: 7702 case Builtin::BI__sync_add_and_fetch_1: 7703 case Builtin::BI__sync_add_and_fetch_2: 7704 case Builtin::BI__sync_add_and_fetch_4: 7705 case Builtin::BI__sync_add_and_fetch_8: 7706 case Builtin::BI__sync_add_and_fetch_16: 7707 BuiltinIndex = 6; 7708 break; 7709 7710 case Builtin::BI__sync_sub_and_fetch: 7711 case Builtin::BI__sync_sub_and_fetch_1: 7712 case Builtin::BI__sync_sub_and_fetch_2: 7713 case Builtin::BI__sync_sub_and_fetch_4: 7714 case Builtin::BI__sync_sub_and_fetch_8: 7715 case Builtin::BI__sync_sub_and_fetch_16: 7716 BuiltinIndex = 7; 7717 break; 7718 7719 case Builtin::BI__sync_and_and_fetch: 7720 case Builtin::BI__sync_and_and_fetch_1: 7721 case Builtin::BI__sync_and_and_fetch_2: 7722 case Builtin::BI__sync_and_and_fetch_4: 7723 case Builtin::BI__sync_and_and_fetch_8: 7724 case Builtin::BI__sync_and_and_fetch_16: 7725 BuiltinIndex = 8; 7726 break; 7727 7728 case Builtin::BI__sync_or_and_fetch: 7729 case Builtin::BI__sync_or_and_fetch_1: 7730 case Builtin::BI__sync_or_and_fetch_2: 7731 case Builtin::BI__sync_or_and_fetch_4: 7732 case Builtin::BI__sync_or_and_fetch_8: 7733 case Builtin::BI__sync_or_and_fetch_16: 7734 BuiltinIndex = 9; 7735 break; 7736 7737 case Builtin::BI__sync_xor_and_fetch: 7738 case Builtin::BI__sync_xor_and_fetch_1: 7739 case Builtin::BI__sync_xor_and_fetch_2: 7740 case Builtin::BI__sync_xor_and_fetch_4: 7741 case Builtin::BI__sync_xor_and_fetch_8: 7742 case Builtin::BI__sync_xor_and_fetch_16: 7743 BuiltinIndex = 10; 7744 break; 7745 7746 case Builtin::BI__sync_nand_and_fetch: 7747 case Builtin::BI__sync_nand_and_fetch_1: 7748 case Builtin::BI__sync_nand_and_fetch_2: 7749 case Builtin::BI__sync_nand_and_fetch_4: 7750 case Builtin::BI__sync_nand_and_fetch_8: 7751 case Builtin::BI__sync_nand_and_fetch_16: 7752 BuiltinIndex = 11; 7753 WarnAboutSemanticsChange = true; 7754 break; 7755 7756 case Builtin::BI__sync_val_compare_and_swap: 7757 case Builtin::BI__sync_val_compare_and_swap_1: 7758 case Builtin::BI__sync_val_compare_and_swap_2: 7759 case Builtin::BI__sync_val_compare_and_swap_4: 7760 case Builtin::BI__sync_val_compare_and_swap_8: 7761 case Builtin::BI__sync_val_compare_and_swap_16: 7762 BuiltinIndex = 12; 7763 NumFixed = 2; 7764 break; 7765 7766 case Builtin::BI__sync_bool_compare_and_swap: 7767 case Builtin::BI__sync_bool_compare_and_swap_1: 7768 case Builtin::BI__sync_bool_compare_and_swap_2: 7769 case Builtin::BI__sync_bool_compare_and_swap_4: 7770 case Builtin::BI__sync_bool_compare_and_swap_8: 7771 case Builtin::BI__sync_bool_compare_and_swap_16: 7772 BuiltinIndex = 13; 7773 NumFixed = 2; 7774 ResultType = Context.BoolTy; 7775 break; 7776 7777 case Builtin::BI__sync_lock_test_and_set: 7778 case Builtin::BI__sync_lock_test_and_set_1: 7779 case Builtin::BI__sync_lock_test_and_set_2: 7780 case Builtin::BI__sync_lock_test_and_set_4: 7781 case Builtin::BI__sync_lock_test_and_set_8: 7782 case Builtin::BI__sync_lock_test_and_set_16: 7783 BuiltinIndex = 14; 7784 break; 7785 7786 case Builtin::BI__sync_lock_release: 7787 case Builtin::BI__sync_lock_release_1: 7788 case Builtin::BI__sync_lock_release_2: 7789 case Builtin::BI__sync_lock_release_4: 7790 case Builtin::BI__sync_lock_release_8: 7791 case Builtin::BI__sync_lock_release_16: 7792 BuiltinIndex = 15; 7793 NumFixed = 0; 7794 ResultType = Context.VoidTy; 7795 break; 7796 7797 case Builtin::BI__sync_swap: 7798 case Builtin::BI__sync_swap_1: 7799 case Builtin::BI__sync_swap_2: 7800 case Builtin::BI__sync_swap_4: 7801 case Builtin::BI__sync_swap_8: 7802 case Builtin::BI__sync_swap_16: 7803 BuiltinIndex = 16; 7804 break; 7805 } 7806 7807 // Now that we know how many fixed arguments we expect, first check that we 7808 // have at least that many. 7809 if (TheCall->getNumArgs() < 1+NumFixed) { 7810 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 7811 << 0 << 1 + NumFixed << TheCall->getNumArgs() 7812 << Callee->getSourceRange(); 7813 return ExprError(); 7814 } 7815 7816 Diag(TheCall->getEndLoc(), diag::warn_atomic_implicit_seq_cst) 7817 << Callee->getSourceRange(); 7818 7819 if (WarnAboutSemanticsChange) { 7820 Diag(TheCall->getEndLoc(), diag::warn_sync_fetch_and_nand_semantics_change) 7821 << Callee->getSourceRange(); 7822 } 7823 7824 // Get the decl for the concrete builtin from this, we can tell what the 7825 // concrete integer type we should convert to is. 7826 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex]; 7827 StringRef NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID); 7828 FunctionDecl *NewBuiltinDecl; 7829 if (NewBuiltinID == BuiltinID) 7830 NewBuiltinDecl = FDecl; 7831 else { 7832 // Perform builtin lookup to avoid redeclaring it. 7833 DeclarationName DN(&Context.Idents.get(NewBuiltinName)); 7834 LookupResult Res(*this, DN, DRE->getBeginLoc(), LookupOrdinaryName); 7835 LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true); 7836 assert(Res.getFoundDecl()); 7837 NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl()); 7838 if (!NewBuiltinDecl) 7839 return ExprError(); 7840 } 7841 7842 // The first argument --- the pointer --- has a fixed type; we 7843 // deduce the types of the rest of the arguments accordingly. Walk 7844 // the remaining arguments, converting them to the deduced value type. 7845 for (unsigned i = 0; i != NumFixed; ++i) { 7846 ExprResult Arg = TheCall->getArg(i+1); 7847 7848 // GCC does an implicit conversion to the pointer or integer ValType. This 7849 // can fail in some cases (1i -> int**), check for this error case now. 7850 // Initialize the argument. 7851 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 7852 ValType, /*consume*/ false); 7853 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 7854 if (Arg.isInvalid()) 7855 return ExprError(); 7856 7857 // Okay, we have something that *can* be converted to the right type. Check 7858 // to see if there is a potentially weird extension going on here. This can 7859 // happen when you do an atomic operation on something like an char* and 7860 // pass in 42. The 42 gets converted to char. This is even more strange 7861 // for things like 45.123 -> char, etc. 7862 // FIXME: Do this check. 7863 TheCall->setArg(i+1, Arg.get()); 7864 } 7865 7866 // Create a new DeclRefExpr to refer to the new decl. 7867 DeclRefExpr *NewDRE = DeclRefExpr::Create( 7868 Context, DRE->getQualifierLoc(), SourceLocation(), NewBuiltinDecl, 7869 /*enclosing*/ false, DRE->getLocation(), Context.BuiltinFnTy, 7870 DRE->getValueKind(), nullptr, nullptr, DRE->isNonOdrUse()); 7871 7872 // Set the callee in the CallExpr. 7873 // FIXME: This loses syntactic information. 7874 QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType()); 7875 ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy, 7876 CK_BuiltinFnToFnPtr); 7877 TheCall->setCallee(PromotedCall.get()); 7878 7879 // Change the result type of the call to match the original value type. This 7880 // is arbitrary, but the codegen for these builtins ins design to handle it 7881 // gracefully. 7882 TheCall->setType(ResultType); 7883 7884 // Prohibit problematic uses of bit-precise integer types with atomic 7885 // builtins. The arguments would have already been converted to the first 7886 // argument's type, so only need to check the first argument. 7887 const auto *BitIntValType = ValType->getAs<BitIntType>(); 7888 if (BitIntValType && !llvm::isPowerOf2_64(BitIntValType->getNumBits())) { 7889 Diag(FirstArg->getExprLoc(), diag::err_atomic_builtin_ext_int_size); 7890 return ExprError(); 7891 } 7892 7893 return TheCallResult; 7894 } 7895 7896 /// SemaBuiltinNontemporalOverloaded - We have a call to 7897 /// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an 7898 /// overloaded function based on the pointer type of its last argument. 7899 /// 7900 /// This function goes through and does final semantic checking for these 7901 /// builtins. 7902 ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) { 7903 CallExpr *TheCall = (CallExpr *)TheCallResult.get(); 7904 DeclRefExpr *DRE = 7905 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 7906 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 7907 unsigned BuiltinID = FDecl->getBuiltinID(); 7908 assert((BuiltinID == Builtin::BI__builtin_nontemporal_store || 7909 BuiltinID == Builtin::BI__builtin_nontemporal_load) && 7910 "Unexpected nontemporal load/store builtin!"); 7911 bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store; 7912 unsigned numArgs = isStore ? 2 : 1; 7913 7914 // Ensure that we have the proper number of arguments. 7915 if (checkArgCount(*this, TheCall, numArgs)) 7916 return ExprError(); 7917 7918 // Inspect the last argument of the nontemporal builtin. This should always 7919 // be a pointer type, from which we imply the type of the memory access. 7920 // Because it is a pointer type, we don't have to worry about any implicit 7921 // casts here. 7922 Expr *PointerArg = TheCall->getArg(numArgs - 1); 7923 ExprResult PointerArgResult = 7924 DefaultFunctionArrayLvalueConversion(PointerArg); 7925 7926 if (PointerArgResult.isInvalid()) 7927 return ExprError(); 7928 PointerArg = PointerArgResult.get(); 7929 TheCall->setArg(numArgs - 1, PointerArg); 7930 7931 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 7932 if (!pointerType) { 7933 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer) 7934 << PointerArg->getType() << PointerArg->getSourceRange(); 7935 return ExprError(); 7936 } 7937 7938 QualType ValType = pointerType->getPointeeType(); 7939 7940 // Strip any qualifiers off ValType. 7941 ValType = ValType.getUnqualifiedType(); 7942 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 7943 !ValType->isBlockPointerType() && !ValType->isFloatingType() && 7944 !ValType->isVectorType()) { 7945 Diag(DRE->getBeginLoc(), 7946 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector) 7947 << PointerArg->getType() << PointerArg->getSourceRange(); 7948 return ExprError(); 7949 } 7950 7951 if (!isStore) { 7952 TheCall->setType(ValType); 7953 return TheCallResult; 7954 } 7955 7956 ExprResult ValArg = TheCall->getArg(0); 7957 InitializedEntity Entity = InitializedEntity::InitializeParameter( 7958 Context, ValType, /*consume*/ false); 7959 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 7960 if (ValArg.isInvalid()) 7961 return ExprError(); 7962 7963 TheCall->setArg(0, ValArg.get()); 7964 TheCall->setType(Context.VoidTy); 7965 return TheCallResult; 7966 } 7967 7968 /// CheckObjCString - Checks that the argument to the builtin 7969 /// CFString constructor is correct 7970 /// Note: It might also make sense to do the UTF-16 conversion here (would 7971 /// simplify the backend). 7972 bool Sema::CheckObjCString(Expr *Arg) { 7973 Arg = Arg->IgnoreParenCasts(); 7974 StringLiteral *Literal = dyn_cast<StringLiteral>(Arg); 7975 7976 if (!Literal || !Literal->isOrdinary()) { 7977 Diag(Arg->getBeginLoc(), diag::err_cfstring_literal_not_string_constant) 7978 << Arg->getSourceRange(); 7979 return true; 7980 } 7981 7982 if (Literal->containsNonAsciiOrNull()) { 7983 StringRef String = Literal->getString(); 7984 unsigned NumBytes = String.size(); 7985 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes); 7986 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data(); 7987 llvm::UTF16 *ToPtr = &ToBuf[0]; 7988 7989 llvm::ConversionResult Result = 7990 llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr, 7991 ToPtr + NumBytes, llvm::strictConversion); 7992 // Check for conversion failure. 7993 if (Result != llvm::conversionOK) 7994 Diag(Arg->getBeginLoc(), diag::warn_cfstring_truncated) 7995 << Arg->getSourceRange(); 7996 } 7997 return false; 7998 } 7999 8000 /// CheckObjCString - Checks that the format string argument to the os_log() 8001 /// and os_trace() functions is correct, and converts it to const char *. 8002 ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) { 8003 Arg = Arg->IgnoreParenCasts(); 8004 auto *Literal = dyn_cast<StringLiteral>(Arg); 8005 if (!Literal) { 8006 if (auto *ObjcLiteral = dyn_cast<ObjCStringLiteral>(Arg)) { 8007 Literal = ObjcLiteral->getString(); 8008 } 8009 } 8010 8011 if (!Literal || (!Literal->isOrdinary() && !Literal->isUTF8())) { 8012 return ExprError( 8013 Diag(Arg->getBeginLoc(), diag::err_os_log_format_not_string_constant) 8014 << Arg->getSourceRange()); 8015 } 8016 8017 ExprResult Result(Literal); 8018 QualType ResultTy = Context.getPointerType(Context.CharTy.withConst()); 8019 InitializedEntity Entity = 8020 InitializedEntity::InitializeParameter(Context, ResultTy, false); 8021 Result = PerformCopyInitialization(Entity, SourceLocation(), Result); 8022 return Result; 8023 } 8024 8025 /// Check that the user is calling the appropriate va_start builtin for the 8026 /// target and calling convention. 8027 static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) { 8028 const llvm::Triple &TT = S.Context.getTargetInfo().getTriple(); 8029 bool IsX64 = TT.getArch() == llvm::Triple::x86_64; 8030 bool IsAArch64 = (TT.getArch() == llvm::Triple::aarch64 || 8031 TT.getArch() == llvm::Triple::aarch64_32); 8032 bool IsWindows = TT.isOSWindows(); 8033 bool IsMSVAStart = BuiltinID == Builtin::BI__builtin_ms_va_start; 8034 if (IsX64 || IsAArch64) { 8035 CallingConv CC = CC_C; 8036 if (const FunctionDecl *FD = S.getCurFunctionDecl()) 8037 CC = FD->getType()->castAs<FunctionType>()->getCallConv(); 8038 if (IsMSVAStart) { 8039 // Don't allow this in System V ABI functions. 8040 if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_Win64)) 8041 return S.Diag(Fn->getBeginLoc(), 8042 diag::err_ms_va_start_used_in_sysv_function); 8043 } else { 8044 // On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions. 8045 // On x64 Windows, don't allow this in System V ABI functions. 8046 // (Yes, that means there's no corresponding way to support variadic 8047 // System V ABI functions on Windows.) 8048 if ((IsWindows && CC == CC_X86_64SysV) || 8049 (!IsWindows && CC == CC_Win64)) 8050 return S.Diag(Fn->getBeginLoc(), 8051 diag::err_va_start_used_in_wrong_abi_function) 8052 << !IsWindows; 8053 } 8054 return false; 8055 } 8056 8057 if (IsMSVAStart) 8058 return S.Diag(Fn->getBeginLoc(), diag::err_builtin_x64_aarch64_only); 8059 return false; 8060 } 8061 8062 static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn, 8063 ParmVarDecl **LastParam = nullptr) { 8064 // Determine whether the current function, block, or obj-c method is variadic 8065 // and get its parameter list. 8066 bool IsVariadic = false; 8067 ArrayRef<ParmVarDecl *> Params; 8068 DeclContext *Caller = S.CurContext; 8069 if (auto *Block = dyn_cast<BlockDecl>(Caller)) { 8070 IsVariadic = Block->isVariadic(); 8071 Params = Block->parameters(); 8072 } else if (auto *FD = dyn_cast<FunctionDecl>(Caller)) { 8073 IsVariadic = FD->isVariadic(); 8074 Params = FD->parameters(); 8075 } else if (auto *MD = dyn_cast<ObjCMethodDecl>(Caller)) { 8076 IsVariadic = MD->isVariadic(); 8077 // FIXME: This isn't correct for methods (results in bogus warning). 8078 Params = MD->parameters(); 8079 } else if (isa<CapturedDecl>(Caller)) { 8080 // We don't support va_start in a CapturedDecl. 8081 S.Diag(Fn->getBeginLoc(), diag::err_va_start_captured_stmt); 8082 return true; 8083 } else { 8084 // This must be some other declcontext that parses exprs. 8085 S.Diag(Fn->getBeginLoc(), diag::err_va_start_outside_function); 8086 return true; 8087 } 8088 8089 if (!IsVariadic) { 8090 S.Diag(Fn->getBeginLoc(), diag::err_va_start_fixed_function); 8091 return true; 8092 } 8093 8094 if (LastParam) 8095 *LastParam = Params.empty() ? nullptr : Params.back(); 8096 8097 return false; 8098 } 8099 8100 /// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start' 8101 /// for validity. Emit an error and return true on failure; return false 8102 /// on success. 8103 bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) { 8104 Expr *Fn = TheCall->getCallee(); 8105 8106 if (checkVAStartABI(*this, BuiltinID, Fn)) 8107 return true; 8108 8109 // In C2x mode, va_start only needs one argument. However, the builtin still 8110 // requires two arguments (which matches the behavior of the GCC builtin), 8111 // <stdarg.h> passes `0` as the second argument in C2x mode. 8112 if (checkArgCount(*this, TheCall, 2)) 8113 return true; 8114 8115 // Type-check the first argument normally. 8116 if (checkBuiltinArgument(*this, TheCall, 0)) 8117 return true; 8118 8119 // Check that the current function is variadic, and get its last parameter. 8120 ParmVarDecl *LastParam; 8121 if (checkVAStartIsInVariadicFunction(*this, Fn, &LastParam)) 8122 return true; 8123 8124 // Verify that the second argument to the builtin is the last argument of the 8125 // current function or method. In C2x mode, if the second argument is an 8126 // integer constant expression with value 0, then we don't bother with this 8127 // check. 8128 bool SecondArgIsLastNamedArgument = false; 8129 const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts(); 8130 if (std::optional<llvm::APSInt> Val = 8131 TheCall->getArg(1)->getIntegerConstantExpr(Context); 8132 Val && LangOpts.C2x && *Val == 0) 8133 return false; 8134 8135 // These are valid if SecondArgIsLastNamedArgument is false after the next 8136 // block. 8137 QualType Type; 8138 SourceLocation ParamLoc; 8139 bool IsCRegister = false; 8140 8141 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) { 8142 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) { 8143 SecondArgIsLastNamedArgument = PV == LastParam; 8144 8145 Type = PV->getType(); 8146 ParamLoc = PV->getLocation(); 8147 IsCRegister = 8148 PV->getStorageClass() == SC_Register && !getLangOpts().CPlusPlus; 8149 } 8150 } 8151 8152 if (!SecondArgIsLastNamedArgument) 8153 Diag(TheCall->getArg(1)->getBeginLoc(), 8154 diag::warn_second_arg_of_va_start_not_last_named_param); 8155 else if (IsCRegister || Type->isReferenceType() || 8156 Type->isSpecificBuiltinType(BuiltinType::Float) || [=] { 8157 // Promotable integers are UB, but enumerations need a bit of 8158 // extra checking to see what their promotable type actually is. 8159 if (!Context.isPromotableIntegerType(Type)) 8160 return false; 8161 if (!Type->isEnumeralType()) 8162 return true; 8163 const EnumDecl *ED = Type->castAs<EnumType>()->getDecl(); 8164 return !(ED && 8165 Context.typesAreCompatible(ED->getPromotionType(), Type)); 8166 }()) { 8167 unsigned Reason = 0; 8168 if (Type->isReferenceType()) Reason = 1; 8169 else if (IsCRegister) Reason = 2; 8170 Diag(Arg->getBeginLoc(), diag::warn_va_start_type_is_undefined) << Reason; 8171 Diag(ParamLoc, diag::note_parameter_type) << Type; 8172 } 8173 8174 return false; 8175 } 8176 8177 bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) { 8178 auto IsSuitablyTypedFormatArgument = [this](const Expr *Arg) -> bool { 8179 const LangOptions &LO = getLangOpts(); 8180 8181 if (LO.CPlusPlus) 8182 return Arg->getType() 8183 .getCanonicalType() 8184 .getTypePtr() 8185 ->getPointeeType() 8186 .withoutLocalFastQualifiers() == Context.CharTy; 8187 8188 // In C, allow aliasing through `char *`, this is required for AArch64 at 8189 // least. 8190 return true; 8191 }; 8192 8193 // void __va_start(va_list *ap, const char *named_addr, size_t slot_size, 8194 // const char *named_addr); 8195 8196 Expr *Func = Call->getCallee(); 8197 8198 if (Call->getNumArgs() < 3) 8199 return Diag(Call->getEndLoc(), 8200 diag::err_typecheck_call_too_few_args_at_least) 8201 << 0 /*function call*/ << 3 << Call->getNumArgs(); 8202 8203 // Type-check the first argument normally. 8204 if (checkBuiltinArgument(*this, Call, 0)) 8205 return true; 8206 8207 // Check that the current function is variadic. 8208 if (checkVAStartIsInVariadicFunction(*this, Func)) 8209 return true; 8210 8211 // __va_start on Windows does not validate the parameter qualifiers 8212 8213 const Expr *Arg1 = Call->getArg(1)->IgnoreParens(); 8214 const Type *Arg1Ty = Arg1->getType().getCanonicalType().getTypePtr(); 8215 8216 const Expr *Arg2 = Call->getArg(2)->IgnoreParens(); 8217 const Type *Arg2Ty = Arg2->getType().getCanonicalType().getTypePtr(); 8218 8219 const QualType &ConstCharPtrTy = 8220 Context.getPointerType(Context.CharTy.withConst()); 8221 if (!Arg1Ty->isPointerType() || !IsSuitablyTypedFormatArgument(Arg1)) 8222 Diag(Arg1->getBeginLoc(), diag::err_typecheck_convert_incompatible) 8223 << Arg1->getType() << ConstCharPtrTy << 1 /* different class */ 8224 << 0 /* qualifier difference */ 8225 << 3 /* parameter mismatch */ 8226 << 2 << Arg1->getType() << ConstCharPtrTy; 8227 8228 const QualType SizeTy = Context.getSizeType(); 8229 if (Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy) 8230 Diag(Arg2->getBeginLoc(), diag::err_typecheck_convert_incompatible) 8231 << Arg2->getType() << SizeTy << 1 /* different class */ 8232 << 0 /* qualifier difference */ 8233 << 3 /* parameter mismatch */ 8234 << 3 << Arg2->getType() << SizeTy; 8235 8236 return false; 8237 } 8238 8239 /// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and 8240 /// friends. This is declared to take (...), so we have to check everything. 8241 bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) { 8242 if (checkArgCount(*this, TheCall, 2)) 8243 return true; 8244 8245 ExprResult OrigArg0 = TheCall->getArg(0); 8246 ExprResult OrigArg1 = TheCall->getArg(1); 8247 8248 // Do standard promotions between the two arguments, returning their common 8249 // type. 8250 QualType Res = UsualArithmeticConversions( 8251 OrigArg0, OrigArg1, TheCall->getExprLoc(), ACK_Comparison); 8252 if (OrigArg0.isInvalid() || OrigArg1.isInvalid()) 8253 return true; 8254 8255 // Make sure any conversions are pushed back into the call; this is 8256 // type safe since unordered compare builtins are declared as "_Bool 8257 // foo(...)". 8258 TheCall->setArg(0, OrigArg0.get()); 8259 TheCall->setArg(1, OrigArg1.get()); 8260 8261 if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent()) 8262 return false; 8263 8264 // If the common type isn't a real floating type, then the arguments were 8265 // invalid for this operation. 8266 if (Res.isNull() || !Res->isRealFloatingType()) 8267 return Diag(OrigArg0.get()->getBeginLoc(), 8268 diag::err_typecheck_call_invalid_ordered_compare) 8269 << OrigArg0.get()->getType() << OrigArg1.get()->getType() 8270 << SourceRange(OrigArg0.get()->getBeginLoc(), 8271 OrigArg1.get()->getEndLoc()); 8272 8273 return false; 8274 } 8275 8276 /// SemaBuiltinSemaBuiltinFPClassification - Handle functions like 8277 /// __builtin_isnan and friends. This is declared to take (...), so we have 8278 /// to check everything. We expect the last argument to be a floating point 8279 /// value. 8280 bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) { 8281 if (checkArgCount(*this, TheCall, NumArgs)) 8282 return true; 8283 8284 // Find out position of floating-point argument. 8285 unsigned FPArgNo = (NumArgs == 2) ? 0 : NumArgs - 1; 8286 8287 // We can count on all parameters preceding the floating-point just being int. 8288 // Try all of those. 8289 for (unsigned i = 0; i < FPArgNo; ++i) { 8290 Expr *Arg = TheCall->getArg(i); 8291 8292 if (Arg->isTypeDependent()) 8293 return false; 8294 8295 ExprResult Res = PerformImplicitConversion(Arg, Context.IntTy, AA_Passing); 8296 8297 if (Res.isInvalid()) 8298 return true; 8299 TheCall->setArg(i, Res.get()); 8300 } 8301 8302 Expr *OrigArg = TheCall->getArg(FPArgNo); 8303 8304 if (OrigArg->isTypeDependent()) 8305 return false; 8306 8307 // Usual Unary Conversions will convert half to float, which we want for 8308 // machines that use fp16 conversion intrinsics. Else, we wnat to leave the 8309 // type how it is, but do normal L->Rvalue conversions. 8310 if (Context.getTargetInfo().useFP16ConversionIntrinsics()) 8311 OrigArg = UsualUnaryConversions(OrigArg).get(); 8312 else 8313 OrigArg = DefaultFunctionArrayLvalueConversion(OrigArg).get(); 8314 TheCall->setArg(FPArgNo, OrigArg); 8315 8316 // This operation requires a non-_Complex floating-point number. 8317 if (!OrigArg->getType()->isRealFloatingType()) 8318 return Diag(OrigArg->getBeginLoc(), 8319 diag::err_typecheck_call_invalid_unary_fp) 8320 << OrigArg->getType() << OrigArg->getSourceRange(); 8321 8322 // __builtin_isfpclass has integer parameter that specify test mask. It is 8323 // passed in (...), so it should be analyzed completely here. 8324 if (NumArgs == 2) 8325 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, llvm::fcAllFlags)) 8326 return true; 8327 8328 return false; 8329 } 8330 8331 /// Perform semantic analysis for a call to __builtin_complex. 8332 bool Sema::SemaBuiltinComplex(CallExpr *TheCall) { 8333 if (checkArgCount(*this, TheCall, 2)) 8334 return true; 8335 8336 bool Dependent = false; 8337 for (unsigned I = 0; I != 2; ++I) { 8338 Expr *Arg = TheCall->getArg(I); 8339 QualType T = Arg->getType(); 8340 if (T->isDependentType()) { 8341 Dependent = true; 8342 continue; 8343 } 8344 8345 // Despite supporting _Complex int, GCC requires a real floating point type 8346 // for the operands of __builtin_complex. 8347 if (!T->isRealFloatingType()) { 8348 return Diag(Arg->getBeginLoc(), diag::err_typecheck_call_requires_real_fp) 8349 << Arg->getType() << Arg->getSourceRange(); 8350 } 8351 8352 ExprResult Converted = DefaultLvalueConversion(Arg); 8353 if (Converted.isInvalid()) 8354 return true; 8355 TheCall->setArg(I, Converted.get()); 8356 } 8357 8358 if (Dependent) { 8359 TheCall->setType(Context.DependentTy); 8360 return false; 8361 } 8362 8363 Expr *Real = TheCall->getArg(0); 8364 Expr *Imag = TheCall->getArg(1); 8365 if (!Context.hasSameType(Real->getType(), Imag->getType())) { 8366 return Diag(Real->getBeginLoc(), 8367 diag::err_typecheck_call_different_arg_types) 8368 << Real->getType() << Imag->getType() 8369 << Real->getSourceRange() << Imag->getSourceRange(); 8370 } 8371 8372 // We don't allow _Complex _Float16 nor _Complex __fp16 as type specifiers; 8373 // don't allow this builtin to form those types either. 8374 // FIXME: Should we allow these types? 8375 if (Real->getType()->isFloat16Type()) 8376 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 8377 << "_Float16"; 8378 if (Real->getType()->isHalfType()) 8379 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 8380 << "half"; 8381 8382 TheCall->setType(Context.getComplexType(Real->getType())); 8383 return false; 8384 } 8385 8386 // Customized Sema Checking for VSX builtins that have the following signature: 8387 // vector [...] builtinName(vector [...], vector [...], const int); 8388 // Which takes the same type of vectors (any legal vector type) for the first 8389 // two arguments and takes compile time constant for the third argument. 8390 // Example builtins are : 8391 // vector double vec_xxpermdi(vector double, vector double, int); 8392 // vector short vec_xxsldwi(vector short, vector short, int); 8393 bool Sema::SemaBuiltinVSX(CallExpr *TheCall) { 8394 unsigned ExpectedNumArgs = 3; 8395 if (checkArgCount(*this, TheCall, ExpectedNumArgs)) 8396 return true; 8397 8398 // Check the third argument is a compile time constant 8399 if (!TheCall->getArg(2)->isIntegerConstantExpr(Context)) 8400 return Diag(TheCall->getBeginLoc(), 8401 diag::err_vsx_builtin_nonconstant_argument) 8402 << 3 /* argument index */ << TheCall->getDirectCallee() 8403 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 8404 TheCall->getArg(2)->getEndLoc()); 8405 8406 QualType Arg1Ty = TheCall->getArg(0)->getType(); 8407 QualType Arg2Ty = TheCall->getArg(1)->getType(); 8408 8409 // Check the type of argument 1 and argument 2 are vectors. 8410 SourceLocation BuiltinLoc = TheCall->getBeginLoc(); 8411 if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) || 8412 (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) { 8413 return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector) 8414 << TheCall->getDirectCallee() 8415 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 8416 TheCall->getArg(1)->getEndLoc()); 8417 } 8418 8419 // Check the first two arguments are the same type. 8420 if (!Context.hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) { 8421 return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector) 8422 << TheCall->getDirectCallee() 8423 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 8424 TheCall->getArg(1)->getEndLoc()); 8425 } 8426 8427 // When default clang type checking is turned off and the customized type 8428 // checking is used, the returning type of the function must be explicitly 8429 // set. Otherwise it is _Bool by default. 8430 TheCall->setType(Arg1Ty); 8431 8432 return false; 8433 } 8434 8435 /// SemaBuiltinShuffleVector - Handle __builtin_shufflevector. 8436 // This is declared to take (...), so we have to check everything. 8437 ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) { 8438 if (TheCall->getNumArgs() < 2) 8439 return ExprError(Diag(TheCall->getEndLoc(), 8440 diag::err_typecheck_call_too_few_args_at_least) 8441 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 8442 << TheCall->getSourceRange()); 8443 8444 // Determine which of the following types of shufflevector we're checking: 8445 // 1) unary, vector mask: (lhs, mask) 8446 // 2) binary, scalar mask: (lhs, rhs, index, ..., index) 8447 QualType resType = TheCall->getArg(0)->getType(); 8448 unsigned numElements = 0; 8449 8450 if (!TheCall->getArg(0)->isTypeDependent() && 8451 !TheCall->getArg(1)->isTypeDependent()) { 8452 QualType LHSType = TheCall->getArg(0)->getType(); 8453 QualType RHSType = TheCall->getArg(1)->getType(); 8454 8455 if (!LHSType->isVectorType() || !RHSType->isVectorType()) 8456 return ExprError( 8457 Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_non_vector) 8458 << TheCall->getDirectCallee() 8459 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 8460 TheCall->getArg(1)->getEndLoc())); 8461 8462 numElements = LHSType->castAs<VectorType>()->getNumElements(); 8463 unsigned numResElements = TheCall->getNumArgs() - 2; 8464 8465 // Check to see if we have a call with 2 vector arguments, the unary shuffle 8466 // with mask. If so, verify that RHS is an integer vector type with the 8467 // same number of elts as lhs. 8468 if (TheCall->getNumArgs() == 2) { 8469 if (!RHSType->hasIntegerRepresentation() || 8470 RHSType->castAs<VectorType>()->getNumElements() != numElements) 8471 return ExprError(Diag(TheCall->getBeginLoc(), 8472 diag::err_vec_builtin_incompatible_vector) 8473 << TheCall->getDirectCallee() 8474 << SourceRange(TheCall->getArg(1)->getBeginLoc(), 8475 TheCall->getArg(1)->getEndLoc())); 8476 } else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) { 8477 return ExprError(Diag(TheCall->getBeginLoc(), 8478 diag::err_vec_builtin_incompatible_vector) 8479 << TheCall->getDirectCallee() 8480 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 8481 TheCall->getArg(1)->getEndLoc())); 8482 } else if (numElements != numResElements) { 8483 QualType eltType = LHSType->castAs<VectorType>()->getElementType(); 8484 resType = Context.getVectorType(eltType, numResElements, 8485 VectorType::GenericVector); 8486 } 8487 } 8488 8489 for (unsigned i = 2; i < TheCall->getNumArgs(); i++) { 8490 if (TheCall->getArg(i)->isTypeDependent() || 8491 TheCall->getArg(i)->isValueDependent()) 8492 continue; 8493 8494 std::optional<llvm::APSInt> Result; 8495 if (!(Result = TheCall->getArg(i)->getIntegerConstantExpr(Context))) 8496 return ExprError(Diag(TheCall->getBeginLoc(), 8497 diag::err_shufflevector_nonconstant_argument) 8498 << TheCall->getArg(i)->getSourceRange()); 8499 8500 // Allow -1 which will be translated to undef in the IR. 8501 if (Result->isSigned() && Result->isAllOnes()) 8502 continue; 8503 8504 if (Result->getActiveBits() > 64 || 8505 Result->getZExtValue() >= numElements * 2) 8506 return ExprError(Diag(TheCall->getBeginLoc(), 8507 diag::err_shufflevector_argument_too_large) 8508 << TheCall->getArg(i)->getSourceRange()); 8509 } 8510 8511 SmallVector<Expr*, 32> exprs; 8512 8513 for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) { 8514 exprs.push_back(TheCall->getArg(i)); 8515 TheCall->setArg(i, nullptr); 8516 } 8517 8518 return new (Context) ShuffleVectorExpr(Context, exprs, resType, 8519 TheCall->getCallee()->getBeginLoc(), 8520 TheCall->getRParenLoc()); 8521 } 8522 8523 /// SemaConvertVectorExpr - Handle __builtin_convertvector 8524 ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, 8525 SourceLocation BuiltinLoc, 8526 SourceLocation RParenLoc) { 8527 ExprValueKind VK = VK_PRValue; 8528 ExprObjectKind OK = OK_Ordinary; 8529 QualType DstTy = TInfo->getType(); 8530 QualType SrcTy = E->getType(); 8531 8532 if (!SrcTy->isVectorType() && !SrcTy->isDependentType()) 8533 return ExprError(Diag(BuiltinLoc, 8534 diag::err_convertvector_non_vector) 8535 << E->getSourceRange()); 8536 if (!DstTy->isVectorType() && !DstTy->isDependentType()) 8537 return ExprError(Diag(BuiltinLoc, 8538 diag::err_convertvector_non_vector_type)); 8539 8540 if (!SrcTy->isDependentType() && !DstTy->isDependentType()) { 8541 unsigned SrcElts = SrcTy->castAs<VectorType>()->getNumElements(); 8542 unsigned DstElts = DstTy->castAs<VectorType>()->getNumElements(); 8543 if (SrcElts != DstElts) 8544 return ExprError(Diag(BuiltinLoc, 8545 diag::err_convertvector_incompatible_vector) 8546 << E->getSourceRange()); 8547 } 8548 8549 return new (Context) 8550 ConvertVectorExpr(E, TInfo, DstTy, VK, OK, BuiltinLoc, RParenLoc); 8551 } 8552 8553 /// SemaBuiltinPrefetch - Handle __builtin_prefetch. 8554 // This is declared to take (const void*, ...) and can take two 8555 // optional constant int args. 8556 bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) { 8557 unsigned NumArgs = TheCall->getNumArgs(); 8558 8559 if (NumArgs > 3) 8560 return Diag(TheCall->getEndLoc(), 8561 diag::err_typecheck_call_too_many_args_at_most) 8562 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 8563 8564 // Argument 0 is checked for us and the remaining arguments must be 8565 // constant integers. 8566 for (unsigned i = 1; i != NumArgs; ++i) 8567 if (SemaBuiltinConstantArgRange(TheCall, i, 0, i == 1 ? 1 : 3)) 8568 return true; 8569 8570 return false; 8571 } 8572 8573 /// SemaBuiltinArithmeticFence - Handle __arithmetic_fence. 8574 bool Sema::SemaBuiltinArithmeticFence(CallExpr *TheCall) { 8575 if (!Context.getTargetInfo().checkArithmeticFenceSupported()) 8576 return Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 8577 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 8578 if (checkArgCount(*this, TheCall, 1)) 8579 return true; 8580 Expr *Arg = TheCall->getArg(0); 8581 if (Arg->isInstantiationDependent()) 8582 return false; 8583 8584 QualType ArgTy = Arg->getType(); 8585 if (!ArgTy->hasFloatingRepresentation()) 8586 return Diag(TheCall->getEndLoc(), diag::err_typecheck_expect_flt_or_vector) 8587 << ArgTy; 8588 if (Arg->isLValue()) { 8589 ExprResult FirstArg = DefaultLvalueConversion(Arg); 8590 TheCall->setArg(0, FirstArg.get()); 8591 } 8592 TheCall->setType(TheCall->getArg(0)->getType()); 8593 return false; 8594 } 8595 8596 /// SemaBuiltinAssume - Handle __assume (MS Extension). 8597 // __assume does not evaluate its arguments, and should warn if its argument 8598 // has side effects. 8599 bool Sema::SemaBuiltinAssume(CallExpr *TheCall) { 8600 Expr *Arg = TheCall->getArg(0); 8601 if (Arg->isInstantiationDependent()) return false; 8602 8603 if (Arg->HasSideEffects(Context)) 8604 Diag(Arg->getBeginLoc(), diag::warn_assume_side_effects) 8605 << Arg->getSourceRange() 8606 << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier(); 8607 8608 return false; 8609 } 8610 8611 /// Handle __builtin_alloca_with_align. This is declared 8612 /// as (size_t, size_t) where the second size_t must be a power of 2 greater 8613 /// than 8. 8614 bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) { 8615 // The alignment must be a constant integer. 8616 Expr *Arg = TheCall->getArg(1); 8617 8618 // We can't check the value of a dependent argument. 8619 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 8620 if (const auto *UE = 8621 dyn_cast<UnaryExprOrTypeTraitExpr>(Arg->IgnoreParenImpCasts())) 8622 if (UE->getKind() == UETT_AlignOf || 8623 UE->getKind() == UETT_PreferredAlignOf) 8624 Diag(TheCall->getBeginLoc(), diag::warn_alloca_align_alignof) 8625 << Arg->getSourceRange(); 8626 8627 llvm::APSInt Result = Arg->EvaluateKnownConstInt(Context); 8628 8629 if (!Result.isPowerOf2()) 8630 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 8631 << Arg->getSourceRange(); 8632 8633 if (Result < Context.getCharWidth()) 8634 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_small) 8635 << (unsigned)Context.getCharWidth() << Arg->getSourceRange(); 8636 8637 if (Result > std::numeric_limits<int32_t>::max()) 8638 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_big) 8639 << std::numeric_limits<int32_t>::max() << Arg->getSourceRange(); 8640 } 8641 8642 return false; 8643 } 8644 8645 /// Handle __builtin_assume_aligned. This is declared 8646 /// as (const void*, size_t, ...) and can take one optional constant int arg. 8647 bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) { 8648 if (checkArgCountRange(*this, TheCall, 2, 3)) 8649 return true; 8650 8651 unsigned NumArgs = TheCall->getNumArgs(); 8652 Expr *FirstArg = TheCall->getArg(0); 8653 8654 { 8655 ExprResult FirstArgResult = 8656 DefaultFunctionArrayLvalueConversion(FirstArg); 8657 if (checkBuiltinArgument(*this, TheCall, 0)) 8658 return true; 8659 /// In-place updation of FirstArg by checkBuiltinArgument is ignored. 8660 TheCall->setArg(0, FirstArgResult.get()); 8661 } 8662 8663 // The alignment must be a constant integer. 8664 Expr *SecondArg = TheCall->getArg(1); 8665 8666 // We can't check the value of a dependent argument. 8667 if (!SecondArg->isValueDependent()) { 8668 llvm::APSInt Result; 8669 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 8670 return true; 8671 8672 if (!Result.isPowerOf2()) 8673 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 8674 << SecondArg->getSourceRange(); 8675 8676 if (Result > Sema::MaximumAlignment) 8677 Diag(TheCall->getBeginLoc(), diag::warn_assume_aligned_too_great) 8678 << SecondArg->getSourceRange() << Sema::MaximumAlignment; 8679 } 8680 8681 if (NumArgs > 2) { 8682 Expr *ThirdArg = TheCall->getArg(2); 8683 if (convertArgumentToType(*this, ThirdArg, Context.getSizeType())) 8684 return true; 8685 TheCall->setArg(2, ThirdArg); 8686 } 8687 8688 return false; 8689 } 8690 8691 bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) { 8692 unsigned BuiltinID = 8693 cast<FunctionDecl>(TheCall->getCalleeDecl())->getBuiltinID(); 8694 bool IsSizeCall = BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size; 8695 8696 unsigned NumArgs = TheCall->getNumArgs(); 8697 unsigned NumRequiredArgs = IsSizeCall ? 1 : 2; 8698 if (NumArgs < NumRequiredArgs) { 8699 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 8700 << 0 /* function call */ << NumRequiredArgs << NumArgs 8701 << TheCall->getSourceRange(); 8702 } 8703 if (NumArgs >= NumRequiredArgs + 0x100) { 8704 return Diag(TheCall->getEndLoc(), 8705 diag::err_typecheck_call_too_many_args_at_most) 8706 << 0 /* function call */ << (NumRequiredArgs + 0xff) << NumArgs 8707 << TheCall->getSourceRange(); 8708 } 8709 unsigned i = 0; 8710 8711 // For formatting call, check buffer arg. 8712 if (!IsSizeCall) { 8713 ExprResult Arg(TheCall->getArg(i)); 8714 InitializedEntity Entity = InitializedEntity::InitializeParameter( 8715 Context, Context.VoidPtrTy, false); 8716 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 8717 if (Arg.isInvalid()) 8718 return true; 8719 TheCall->setArg(i, Arg.get()); 8720 i++; 8721 } 8722 8723 // Check string literal arg. 8724 unsigned FormatIdx = i; 8725 { 8726 ExprResult Arg = CheckOSLogFormatStringArg(TheCall->getArg(i)); 8727 if (Arg.isInvalid()) 8728 return true; 8729 TheCall->setArg(i, Arg.get()); 8730 i++; 8731 } 8732 8733 // Make sure variadic args are scalar. 8734 unsigned FirstDataArg = i; 8735 while (i < NumArgs) { 8736 ExprResult Arg = DefaultVariadicArgumentPromotion( 8737 TheCall->getArg(i), VariadicFunction, nullptr); 8738 if (Arg.isInvalid()) 8739 return true; 8740 CharUnits ArgSize = Context.getTypeSizeInChars(Arg.get()->getType()); 8741 if (ArgSize.getQuantity() >= 0x100) { 8742 return Diag(Arg.get()->getEndLoc(), diag::err_os_log_argument_too_big) 8743 << i << (int)ArgSize.getQuantity() << 0xff 8744 << TheCall->getSourceRange(); 8745 } 8746 TheCall->setArg(i, Arg.get()); 8747 i++; 8748 } 8749 8750 // Check formatting specifiers. NOTE: We're only doing this for the non-size 8751 // call to avoid duplicate diagnostics. 8752 if (!IsSizeCall) { 8753 llvm::SmallBitVector CheckedVarArgs(NumArgs, false); 8754 ArrayRef<const Expr *> Args(TheCall->getArgs(), TheCall->getNumArgs()); 8755 bool Success = CheckFormatArguments( 8756 Args, FAPK_Variadic, FormatIdx, FirstDataArg, FST_OSLog, 8757 VariadicFunction, TheCall->getBeginLoc(), SourceRange(), 8758 CheckedVarArgs); 8759 if (!Success) 8760 return true; 8761 } 8762 8763 if (IsSizeCall) { 8764 TheCall->setType(Context.getSizeType()); 8765 } else { 8766 TheCall->setType(Context.VoidPtrTy); 8767 } 8768 return false; 8769 } 8770 8771 /// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr 8772 /// TheCall is a constant expression. 8773 bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, 8774 llvm::APSInt &Result) { 8775 Expr *Arg = TheCall->getArg(ArgNum); 8776 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 8777 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 8778 8779 if (Arg->isTypeDependent() || Arg->isValueDependent()) return false; 8780 8781 std::optional<llvm::APSInt> R; 8782 if (!(R = Arg->getIntegerConstantExpr(Context))) 8783 return Diag(TheCall->getBeginLoc(), diag::err_constant_integer_arg_type) 8784 << FDecl->getDeclName() << Arg->getSourceRange(); 8785 Result = *R; 8786 return false; 8787 } 8788 8789 /// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr 8790 /// TheCall is a constant expression in the range [Low, High]. 8791 bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, 8792 int Low, int High, bool RangeIsError) { 8793 if (isConstantEvaluated()) 8794 return false; 8795 llvm::APSInt Result; 8796 8797 // We can't check the value of a dependent argument. 8798 Expr *Arg = TheCall->getArg(ArgNum); 8799 if (Arg->isTypeDependent() || Arg->isValueDependent()) 8800 return false; 8801 8802 // Check constant-ness first. 8803 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 8804 return true; 8805 8806 if (Result.getSExtValue() < Low || Result.getSExtValue() > High) { 8807 if (RangeIsError) 8808 return Diag(TheCall->getBeginLoc(), diag::err_argument_invalid_range) 8809 << toString(Result, 10) << Low << High << Arg->getSourceRange(); 8810 else 8811 // Defer the warning until we know if the code will be emitted so that 8812 // dead code can ignore this. 8813 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 8814 PDiag(diag::warn_argument_invalid_range) 8815 << toString(Result, 10) << Low << High 8816 << Arg->getSourceRange()); 8817 } 8818 8819 return false; 8820 } 8821 8822 /// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr 8823 /// TheCall is a constant expression is a multiple of Num.. 8824 bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, 8825 unsigned Num) { 8826 llvm::APSInt Result; 8827 8828 // We can't check the value of a dependent argument. 8829 Expr *Arg = TheCall->getArg(ArgNum); 8830 if (Arg->isTypeDependent() || Arg->isValueDependent()) 8831 return false; 8832 8833 // Check constant-ness first. 8834 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 8835 return true; 8836 8837 if (Result.getSExtValue() % Num != 0) 8838 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_multiple) 8839 << Num << Arg->getSourceRange(); 8840 8841 return false; 8842 } 8843 8844 /// SemaBuiltinConstantArgPower2 - Check if argument ArgNum of TheCall is a 8845 /// constant expression representing a power of 2. 8846 bool Sema::SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum) { 8847 llvm::APSInt Result; 8848 8849 // We can't check the value of a dependent argument. 8850 Expr *Arg = TheCall->getArg(ArgNum); 8851 if (Arg->isTypeDependent() || Arg->isValueDependent()) 8852 return false; 8853 8854 // Check constant-ness first. 8855 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 8856 return true; 8857 8858 // Bit-twiddling to test for a power of 2: for x > 0, x & (x-1) is zero if 8859 // and only if x is a power of 2. 8860 if (Result.isStrictlyPositive() && (Result & (Result - 1)) == 0) 8861 return false; 8862 8863 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_power_of_2) 8864 << Arg->getSourceRange(); 8865 } 8866 8867 static bool IsShiftedByte(llvm::APSInt Value) { 8868 if (Value.isNegative()) 8869 return false; 8870 8871 // Check if it's a shifted byte, by shifting it down 8872 while (true) { 8873 // If the value fits in the bottom byte, the check passes. 8874 if (Value < 0x100) 8875 return true; 8876 8877 // Otherwise, if the value has _any_ bits in the bottom byte, the check 8878 // fails. 8879 if ((Value & 0xFF) != 0) 8880 return false; 8881 8882 // If the bottom 8 bits are all 0, but something above that is nonzero, 8883 // then shifting the value right by 8 bits won't affect whether it's a 8884 // shifted byte or not. So do that, and go round again. 8885 Value >>= 8; 8886 } 8887 } 8888 8889 /// SemaBuiltinConstantArgShiftedByte - Check if argument ArgNum of TheCall is 8890 /// a constant expression representing an arbitrary byte value shifted left by 8891 /// a multiple of 8 bits. 8892 bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, 8893 unsigned ArgBits) { 8894 llvm::APSInt Result; 8895 8896 // We can't check the value of a dependent argument. 8897 Expr *Arg = TheCall->getArg(ArgNum); 8898 if (Arg->isTypeDependent() || Arg->isValueDependent()) 8899 return false; 8900 8901 // Check constant-ness first. 8902 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 8903 return true; 8904 8905 // Truncate to the given size. 8906 Result = Result.getLoBits(ArgBits); 8907 Result.setIsUnsigned(true); 8908 8909 if (IsShiftedByte(Result)) 8910 return false; 8911 8912 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_shifted_byte) 8913 << Arg->getSourceRange(); 8914 } 8915 8916 /// SemaBuiltinConstantArgShiftedByteOr0xFF - Check if argument ArgNum of 8917 /// TheCall is a constant expression representing either a shifted byte value, 8918 /// or a value of the form 0x??FF (i.e. a member of the arithmetic progression 8919 /// 0x00FF, 0x01FF, ..., 0xFFFF). This strange range check is needed for some 8920 /// Arm MVE intrinsics. 8921 bool Sema::SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, 8922 int ArgNum, 8923 unsigned ArgBits) { 8924 llvm::APSInt Result; 8925 8926 // We can't check the value of a dependent argument. 8927 Expr *Arg = TheCall->getArg(ArgNum); 8928 if (Arg->isTypeDependent() || Arg->isValueDependent()) 8929 return false; 8930 8931 // Check constant-ness first. 8932 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 8933 return true; 8934 8935 // Truncate to the given size. 8936 Result = Result.getLoBits(ArgBits); 8937 Result.setIsUnsigned(true); 8938 8939 // Check to see if it's in either of the required forms. 8940 if (IsShiftedByte(Result) || 8941 (Result > 0 && Result < 0x10000 && (Result & 0xFF) == 0xFF)) 8942 return false; 8943 8944 return Diag(TheCall->getBeginLoc(), 8945 diag::err_argument_not_shifted_byte_or_xxff) 8946 << Arg->getSourceRange(); 8947 } 8948 8949 /// SemaBuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions 8950 bool Sema::SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) { 8951 if (BuiltinID == AArch64::BI__builtin_arm_irg) { 8952 if (checkArgCount(*this, TheCall, 2)) 8953 return true; 8954 Expr *Arg0 = TheCall->getArg(0); 8955 Expr *Arg1 = TheCall->getArg(1); 8956 8957 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 8958 if (FirstArg.isInvalid()) 8959 return true; 8960 QualType FirstArgType = FirstArg.get()->getType(); 8961 if (!FirstArgType->isAnyPointerType()) 8962 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 8963 << "first" << FirstArgType << Arg0->getSourceRange(); 8964 TheCall->setArg(0, FirstArg.get()); 8965 8966 ExprResult SecArg = DefaultLvalueConversion(Arg1); 8967 if (SecArg.isInvalid()) 8968 return true; 8969 QualType SecArgType = SecArg.get()->getType(); 8970 if (!SecArgType->isIntegerType()) 8971 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 8972 << "second" << SecArgType << Arg1->getSourceRange(); 8973 8974 // Derive the return type from the pointer argument. 8975 TheCall->setType(FirstArgType); 8976 return false; 8977 } 8978 8979 if (BuiltinID == AArch64::BI__builtin_arm_addg) { 8980 if (checkArgCount(*this, TheCall, 2)) 8981 return true; 8982 8983 Expr *Arg0 = TheCall->getArg(0); 8984 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 8985 if (FirstArg.isInvalid()) 8986 return true; 8987 QualType FirstArgType = FirstArg.get()->getType(); 8988 if (!FirstArgType->isAnyPointerType()) 8989 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 8990 << "first" << FirstArgType << Arg0->getSourceRange(); 8991 TheCall->setArg(0, FirstArg.get()); 8992 8993 // Derive the return type from the pointer argument. 8994 TheCall->setType(FirstArgType); 8995 8996 // Second arg must be an constant in range [0,15] 8997 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 8998 } 8999 9000 if (BuiltinID == AArch64::BI__builtin_arm_gmi) { 9001 if (checkArgCount(*this, TheCall, 2)) 9002 return true; 9003 Expr *Arg0 = TheCall->getArg(0); 9004 Expr *Arg1 = TheCall->getArg(1); 9005 9006 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 9007 if (FirstArg.isInvalid()) 9008 return true; 9009 QualType FirstArgType = FirstArg.get()->getType(); 9010 if (!FirstArgType->isAnyPointerType()) 9011 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 9012 << "first" << FirstArgType << Arg0->getSourceRange(); 9013 9014 QualType SecArgType = Arg1->getType(); 9015 if (!SecArgType->isIntegerType()) 9016 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 9017 << "second" << SecArgType << Arg1->getSourceRange(); 9018 TheCall->setType(Context.IntTy); 9019 return false; 9020 } 9021 9022 if (BuiltinID == AArch64::BI__builtin_arm_ldg || 9023 BuiltinID == AArch64::BI__builtin_arm_stg) { 9024 if (checkArgCount(*this, TheCall, 1)) 9025 return true; 9026 Expr *Arg0 = TheCall->getArg(0); 9027 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 9028 if (FirstArg.isInvalid()) 9029 return true; 9030 9031 QualType FirstArgType = FirstArg.get()->getType(); 9032 if (!FirstArgType->isAnyPointerType()) 9033 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 9034 << "first" << FirstArgType << Arg0->getSourceRange(); 9035 TheCall->setArg(0, FirstArg.get()); 9036 9037 // Derive the return type from the pointer argument. 9038 if (BuiltinID == AArch64::BI__builtin_arm_ldg) 9039 TheCall->setType(FirstArgType); 9040 return false; 9041 } 9042 9043 if (BuiltinID == AArch64::BI__builtin_arm_subp) { 9044 Expr *ArgA = TheCall->getArg(0); 9045 Expr *ArgB = TheCall->getArg(1); 9046 9047 ExprResult ArgExprA = DefaultFunctionArrayLvalueConversion(ArgA); 9048 ExprResult ArgExprB = DefaultFunctionArrayLvalueConversion(ArgB); 9049 9050 if (ArgExprA.isInvalid() || ArgExprB.isInvalid()) 9051 return true; 9052 9053 QualType ArgTypeA = ArgExprA.get()->getType(); 9054 QualType ArgTypeB = ArgExprB.get()->getType(); 9055 9056 auto isNull = [&] (Expr *E) -> bool { 9057 return E->isNullPointerConstant( 9058 Context, Expr::NPC_ValueDependentIsNotNull); }; 9059 9060 // argument should be either a pointer or null 9061 if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA)) 9062 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 9063 << "first" << ArgTypeA << ArgA->getSourceRange(); 9064 9065 if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB)) 9066 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 9067 << "second" << ArgTypeB << ArgB->getSourceRange(); 9068 9069 // Ensure Pointee types are compatible 9070 if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) && 9071 ArgTypeB->isAnyPointerType() && !isNull(ArgB)) { 9072 QualType pointeeA = ArgTypeA->getPointeeType(); 9073 QualType pointeeB = ArgTypeB->getPointeeType(); 9074 if (!Context.typesAreCompatible( 9075 Context.getCanonicalType(pointeeA).getUnqualifiedType(), 9076 Context.getCanonicalType(pointeeB).getUnqualifiedType())) { 9077 return Diag(TheCall->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible) 9078 << ArgTypeA << ArgTypeB << ArgA->getSourceRange() 9079 << ArgB->getSourceRange(); 9080 } 9081 } 9082 9083 // at least one argument should be pointer type 9084 if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType()) 9085 return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer) 9086 << ArgTypeA << ArgTypeB << ArgA->getSourceRange(); 9087 9088 if (isNull(ArgA)) // adopt type of the other pointer 9089 ArgExprA = ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer); 9090 9091 if (isNull(ArgB)) 9092 ArgExprB = ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer); 9093 9094 TheCall->setArg(0, ArgExprA.get()); 9095 TheCall->setArg(1, ArgExprB.get()); 9096 TheCall->setType(Context.LongLongTy); 9097 return false; 9098 } 9099 assert(false && "Unhandled ARM MTE intrinsic"); 9100 return true; 9101 } 9102 9103 /// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr 9104 /// TheCall is an ARM/AArch64 special register string literal. 9105 bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, 9106 int ArgNum, unsigned ExpectedFieldNum, 9107 bool AllowName) { 9108 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 || 9109 BuiltinID == ARM::BI__builtin_arm_wsr64 || 9110 BuiltinID == ARM::BI__builtin_arm_rsr || 9111 BuiltinID == ARM::BI__builtin_arm_rsrp || 9112 BuiltinID == ARM::BI__builtin_arm_wsr || 9113 BuiltinID == ARM::BI__builtin_arm_wsrp; 9114 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 || 9115 BuiltinID == AArch64::BI__builtin_arm_wsr64 || 9116 BuiltinID == AArch64::BI__builtin_arm_rsr128 || 9117 BuiltinID == AArch64::BI__builtin_arm_wsr128 || 9118 BuiltinID == AArch64::BI__builtin_arm_rsr || 9119 BuiltinID == AArch64::BI__builtin_arm_rsrp || 9120 BuiltinID == AArch64::BI__builtin_arm_wsr || 9121 BuiltinID == AArch64::BI__builtin_arm_wsrp; 9122 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin."); 9123 9124 // We can't check the value of a dependent argument. 9125 Expr *Arg = TheCall->getArg(ArgNum); 9126 if (Arg->isTypeDependent() || Arg->isValueDependent()) 9127 return false; 9128 9129 // Check if the argument is a string literal. 9130 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 9131 return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 9132 << Arg->getSourceRange(); 9133 9134 // Check the type of special register given. 9135 StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 9136 SmallVector<StringRef, 6> Fields; 9137 Reg.split(Fields, ":"); 9138 9139 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1)) 9140 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 9141 << Arg->getSourceRange(); 9142 9143 // If the string is the name of a register then we cannot check that it is 9144 // valid here but if the string is of one the forms described in ACLE then we 9145 // can check that the supplied fields are integers and within the valid 9146 // ranges. 9147 if (Fields.size() > 1) { 9148 bool FiveFields = Fields.size() == 5; 9149 9150 bool ValidString = true; 9151 if (IsARMBuiltin) { 9152 ValidString &= Fields[0].starts_with_insensitive("cp") || 9153 Fields[0].starts_with_insensitive("p"); 9154 if (ValidString) 9155 Fields[0] = Fields[0].drop_front( 9156 Fields[0].starts_with_insensitive("cp") ? 2 : 1); 9157 9158 ValidString &= Fields[2].starts_with_insensitive("c"); 9159 if (ValidString) 9160 Fields[2] = Fields[2].drop_front(1); 9161 9162 if (FiveFields) { 9163 ValidString &= Fields[3].starts_with_insensitive("c"); 9164 if (ValidString) 9165 Fields[3] = Fields[3].drop_front(1); 9166 } 9167 } 9168 9169 SmallVector<int, 5> Ranges; 9170 if (FiveFields) 9171 Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7}); 9172 else 9173 Ranges.append({15, 7, 15}); 9174 9175 for (unsigned i=0; i<Fields.size(); ++i) { 9176 int IntField; 9177 ValidString &= !Fields[i].getAsInteger(10, IntField); 9178 ValidString &= (IntField >= 0 && IntField <= Ranges[i]); 9179 } 9180 9181 if (!ValidString) 9182 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 9183 << Arg->getSourceRange(); 9184 } else if (IsAArch64Builtin && Fields.size() == 1) { 9185 // This code validates writes to PSTATE registers. 9186 9187 // Not a write. 9188 if (TheCall->getNumArgs() != 2) 9189 return false; 9190 9191 // The 128-bit system register accesses do not touch PSTATE. 9192 if (BuiltinID == AArch64::BI__builtin_arm_rsr128 || 9193 BuiltinID == AArch64::BI__builtin_arm_wsr128) 9194 return false; 9195 9196 // These are the named PSTATE accesses using "MSR (immediate)" instructions, 9197 // along with the upper limit on the immediates allowed. 9198 auto MaxLimit = llvm::StringSwitch<std::optional<unsigned>>(Reg) 9199 .CaseLower("spsel", 15) 9200 .CaseLower("daifclr", 15) 9201 .CaseLower("daifset", 15) 9202 .CaseLower("pan", 15) 9203 .CaseLower("uao", 15) 9204 .CaseLower("dit", 15) 9205 .CaseLower("ssbs", 15) 9206 .CaseLower("tco", 15) 9207 .CaseLower("allint", 1) 9208 .CaseLower("pm", 1) 9209 .Default(std::nullopt); 9210 9211 // If this is not a named PSTATE, just continue without validating, as this 9212 // will be lowered to an "MSR (register)" instruction directly 9213 if (!MaxLimit) 9214 return false; 9215 9216 // Here we only allow constants in the range for that pstate, as required by 9217 // the ACLE. 9218 // 9219 // While clang also accepts the names of system registers in its ACLE 9220 // intrinsics, we prevent this with the PSTATE names used in MSR (immediate) 9221 // as the value written via a register is different to the value used as an 9222 // immediate to have the same effect. e.g., for the instruction `msr tco, 9223 // x0`, it is bit 25 of register x0 that is written into PSTATE.TCO, but 9224 // with `msr tco, #imm`, it is bit 0 of xN that is written into PSTATE.TCO. 9225 // 9226 // If a programmer wants to codegen the MSR (register) form of `msr tco, 9227 // xN`, they can still do so by specifying the register using five 9228 // colon-separated numbers in a string. 9229 return SemaBuiltinConstantArgRange(TheCall, 1, 0, *MaxLimit); 9230 } 9231 9232 return false; 9233 } 9234 9235 /// SemaBuiltinPPCMMACall - Check the call to a PPC MMA builtin for validity. 9236 /// Emit an error and return true on failure; return false on success. 9237 /// TypeStr is a string containing the type descriptor of the value returned by 9238 /// the builtin and the descriptors of the expected type of the arguments. 9239 bool Sema::SemaBuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID, 9240 const char *TypeStr) { 9241 9242 assert((TypeStr[0] != '\0') && 9243 "Invalid types in PPC MMA builtin declaration"); 9244 9245 unsigned Mask = 0; 9246 unsigned ArgNum = 0; 9247 9248 // The first type in TypeStr is the type of the value returned by the 9249 // builtin. So we first read that type and change the type of TheCall. 9250 QualType type = DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 9251 TheCall->setType(type); 9252 9253 while (*TypeStr != '\0') { 9254 Mask = 0; 9255 QualType ExpectedType = DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 9256 if (ArgNum >= TheCall->getNumArgs()) { 9257 ArgNum++; 9258 break; 9259 } 9260 9261 Expr *Arg = TheCall->getArg(ArgNum); 9262 QualType PassedType = Arg->getType(); 9263 QualType StrippedRVType = PassedType.getCanonicalType(); 9264 9265 // Strip Restrict/Volatile qualifiers. 9266 if (StrippedRVType.isRestrictQualified() || 9267 StrippedRVType.isVolatileQualified()) 9268 StrippedRVType = StrippedRVType.getCanonicalType().getUnqualifiedType(); 9269 9270 // The only case where the argument type and expected type are allowed to 9271 // mismatch is if the argument type is a non-void pointer (or array) and 9272 // expected type is a void pointer. 9273 if (StrippedRVType != ExpectedType) 9274 if (!(ExpectedType->isVoidPointerType() && 9275 (StrippedRVType->isPointerType() || StrippedRVType->isArrayType()))) 9276 return Diag(Arg->getBeginLoc(), 9277 diag::err_typecheck_convert_incompatible) 9278 << PassedType << ExpectedType << 1 << 0 << 0; 9279 9280 // If the value of the Mask is not 0, we have a constraint in the size of 9281 // the integer argument so here we ensure the argument is a constant that 9282 // is in the valid range. 9283 if (Mask != 0 && 9284 SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, Mask, true)) 9285 return true; 9286 9287 ArgNum++; 9288 } 9289 9290 // In case we exited early from the previous loop, there are other types to 9291 // read from TypeStr. So we need to read them all to ensure we have the right 9292 // number of arguments in TheCall and if it is not the case, to display a 9293 // better error message. 9294 while (*TypeStr != '\0') { 9295 (void) DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 9296 ArgNum++; 9297 } 9298 if (checkArgCount(*this, TheCall, ArgNum)) 9299 return true; 9300 9301 return false; 9302 } 9303 9304 /// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val). 9305 /// This checks that the target supports __builtin_longjmp and 9306 /// that val is a constant 1. 9307 bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) { 9308 if (!Context.getTargetInfo().hasSjLjLowering()) 9309 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_unsupported) 9310 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 9311 9312 Expr *Arg = TheCall->getArg(1); 9313 llvm::APSInt Result; 9314 9315 // TODO: This is less than ideal. Overload this to take a value. 9316 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 9317 return true; 9318 9319 if (Result != 1) 9320 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_invalid_val) 9321 << SourceRange(Arg->getBeginLoc(), Arg->getEndLoc()); 9322 9323 return false; 9324 } 9325 9326 /// SemaBuiltinSetjmp - Handle __builtin_setjmp(void *env[5]). 9327 /// This checks that the target supports __builtin_setjmp. 9328 bool Sema::SemaBuiltinSetjmp(CallExpr *TheCall) { 9329 if (!Context.getTargetInfo().hasSjLjLowering()) 9330 return Diag(TheCall->getBeginLoc(), diag::err_builtin_setjmp_unsupported) 9331 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 9332 return false; 9333 } 9334 9335 namespace { 9336 9337 class UncoveredArgHandler { 9338 enum { Unknown = -1, AllCovered = -2 }; 9339 9340 signed FirstUncoveredArg = Unknown; 9341 SmallVector<const Expr *, 4> DiagnosticExprs; 9342 9343 public: 9344 UncoveredArgHandler() = default; 9345 9346 bool hasUncoveredArg() const { 9347 return (FirstUncoveredArg >= 0); 9348 } 9349 9350 unsigned getUncoveredArg() const { 9351 assert(hasUncoveredArg() && "no uncovered argument"); 9352 return FirstUncoveredArg; 9353 } 9354 9355 void setAllCovered() { 9356 // A string has been found with all arguments covered, so clear out 9357 // the diagnostics. 9358 DiagnosticExprs.clear(); 9359 FirstUncoveredArg = AllCovered; 9360 } 9361 9362 void Update(signed NewFirstUncoveredArg, const Expr *StrExpr) { 9363 assert(NewFirstUncoveredArg >= 0 && "Outside range"); 9364 9365 // Don't update if a previous string covers all arguments. 9366 if (FirstUncoveredArg == AllCovered) 9367 return; 9368 9369 // UncoveredArgHandler tracks the highest uncovered argument index 9370 // and with it all the strings that match this index. 9371 if (NewFirstUncoveredArg == FirstUncoveredArg) 9372 DiagnosticExprs.push_back(StrExpr); 9373 else if (NewFirstUncoveredArg > FirstUncoveredArg) { 9374 DiagnosticExprs.clear(); 9375 DiagnosticExprs.push_back(StrExpr); 9376 FirstUncoveredArg = NewFirstUncoveredArg; 9377 } 9378 } 9379 9380 void Diagnose(Sema &S, bool IsFunctionCall, const Expr *ArgExpr); 9381 }; 9382 9383 enum StringLiteralCheckType { 9384 SLCT_NotALiteral, 9385 SLCT_UncheckedLiteral, 9386 SLCT_CheckedLiteral 9387 }; 9388 9389 } // namespace 9390 9391 static void sumOffsets(llvm::APSInt &Offset, llvm::APSInt Addend, 9392 BinaryOperatorKind BinOpKind, 9393 bool AddendIsRight) { 9394 unsigned BitWidth = Offset.getBitWidth(); 9395 unsigned AddendBitWidth = Addend.getBitWidth(); 9396 // There might be negative interim results. 9397 if (Addend.isUnsigned()) { 9398 Addend = Addend.zext(++AddendBitWidth); 9399 Addend.setIsSigned(true); 9400 } 9401 // Adjust the bit width of the APSInts. 9402 if (AddendBitWidth > BitWidth) { 9403 Offset = Offset.sext(AddendBitWidth); 9404 BitWidth = AddendBitWidth; 9405 } else if (BitWidth > AddendBitWidth) { 9406 Addend = Addend.sext(BitWidth); 9407 } 9408 9409 bool Ov = false; 9410 llvm::APSInt ResOffset = Offset; 9411 if (BinOpKind == BO_Add) 9412 ResOffset = Offset.sadd_ov(Addend, Ov); 9413 else { 9414 assert(AddendIsRight && BinOpKind == BO_Sub && 9415 "operator must be add or sub with addend on the right"); 9416 ResOffset = Offset.ssub_ov(Addend, Ov); 9417 } 9418 9419 // We add an offset to a pointer here so we should support an offset as big as 9420 // possible. 9421 if (Ov) { 9422 assert(BitWidth <= std::numeric_limits<unsigned>::max() / 2 && 9423 "index (intermediate) result too big"); 9424 Offset = Offset.sext(2 * BitWidth); 9425 sumOffsets(Offset, Addend, BinOpKind, AddendIsRight); 9426 return; 9427 } 9428 9429 Offset = ResOffset; 9430 } 9431 9432 namespace { 9433 9434 // This is a wrapper class around StringLiteral to support offsetted string 9435 // literals as format strings. It takes the offset into account when returning 9436 // the string and its length or the source locations to display notes correctly. 9437 class FormatStringLiteral { 9438 const StringLiteral *FExpr; 9439 int64_t Offset; 9440 9441 public: 9442 FormatStringLiteral(const StringLiteral *fexpr, int64_t Offset = 0) 9443 : FExpr(fexpr), Offset(Offset) {} 9444 9445 StringRef getString() const { 9446 return FExpr->getString().drop_front(Offset); 9447 } 9448 9449 unsigned getByteLength() const { 9450 return FExpr->getByteLength() - getCharByteWidth() * Offset; 9451 } 9452 9453 unsigned getLength() const { return FExpr->getLength() - Offset; } 9454 unsigned getCharByteWidth() const { return FExpr->getCharByteWidth(); } 9455 9456 StringLiteral::StringKind getKind() const { return FExpr->getKind(); } 9457 9458 QualType getType() const { return FExpr->getType(); } 9459 9460 bool isAscii() const { return FExpr->isOrdinary(); } 9461 bool isWide() const { return FExpr->isWide(); } 9462 bool isUTF8() const { return FExpr->isUTF8(); } 9463 bool isUTF16() const { return FExpr->isUTF16(); } 9464 bool isUTF32() const { return FExpr->isUTF32(); } 9465 bool isPascal() const { return FExpr->isPascal(); } 9466 9467 SourceLocation getLocationOfByte( 9468 unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, 9469 const TargetInfo &Target, unsigned *StartToken = nullptr, 9470 unsigned *StartTokenByteOffset = nullptr) const { 9471 return FExpr->getLocationOfByte(ByteNo + Offset, SM, Features, Target, 9472 StartToken, StartTokenByteOffset); 9473 } 9474 9475 SourceLocation getBeginLoc() const LLVM_READONLY { 9476 return FExpr->getBeginLoc().getLocWithOffset(Offset); 9477 } 9478 9479 SourceLocation getEndLoc() const LLVM_READONLY { return FExpr->getEndLoc(); } 9480 }; 9481 9482 } // namespace 9483 9484 static void CheckFormatString( 9485 Sema &S, const FormatStringLiteral *FExpr, const Expr *OrigFormatExpr, 9486 ArrayRef<const Expr *> Args, Sema::FormatArgumentPassingKind APK, 9487 unsigned format_idx, unsigned firstDataArg, Sema::FormatStringType Type, 9488 bool inFunctionCall, Sema::VariadicCallType CallType, 9489 llvm::SmallBitVector &CheckedVarArgs, UncoveredArgHandler &UncoveredArg, 9490 bool IgnoreStringsWithoutSpecifiers); 9491 9492 static const Expr *maybeConstEvalStringLiteral(ASTContext &Context, 9493 const Expr *E); 9494 9495 // Determine if an expression is a string literal or constant string. 9496 // If this function returns false on the arguments to a function expecting a 9497 // format string, we will usually need to emit a warning. 9498 // True string literals are then checked by CheckFormatString. 9499 static StringLiteralCheckType 9500 checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args, 9501 Sema::FormatArgumentPassingKind APK, unsigned format_idx, 9502 unsigned firstDataArg, Sema::FormatStringType Type, 9503 Sema::VariadicCallType CallType, bool InFunctionCall, 9504 llvm::SmallBitVector &CheckedVarArgs, 9505 UncoveredArgHandler &UncoveredArg, llvm::APSInt Offset, 9506 bool IgnoreStringsWithoutSpecifiers = false) { 9507 if (S.isConstantEvaluated()) 9508 return SLCT_NotALiteral; 9509 tryAgain: 9510 assert(Offset.isSigned() && "invalid offset"); 9511 9512 if (E->isTypeDependent() || E->isValueDependent()) 9513 return SLCT_NotALiteral; 9514 9515 E = E->IgnoreParenCasts(); 9516 9517 if (E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull)) 9518 // Technically -Wformat-nonliteral does not warn about this case. 9519 // The behavior of printf and friends in this case is implementation 9520 // dependent. Ideally if the format string cannot be null then 9521 // it should have a 'nonnull' attribute in the function prototype. 9522 return SLCT_UncheckedLiteral; 9523 9524 switch (E->getStmtClass()) { 9525 case Stmt::InitListExprClass: 9526 // Handle expressions like {"foobar"}. 9527 if (const clang::Expr *SLE = maybeConstEvalStringLiteral(S.Context, E)) { 9528 return checkFormatStringExpr(S, SLE, Args, APK, format_idx, firstDataArg, 9529 Type, CallType, /*InFunctionCall*/ false, 9530 CheckedVarArgs, UncoveredArg, Offset, 9531 IgnoreStringsWithoutSpecifiers); 9532 } 9533 return SLCT_NotALiteral; 9534 case Stmt::BinaryConditionalOperatorClass: 9535 case Stmt::ConditionalOperatorClass: { 9536 // The expression is a literal if both sub-expressions were, and it was 9537 // completely checked only if both sub-expressions were checked. 9538 const AbstractConditionalOperator *C = 9539 cast<AbstractConditionalOperator>(E); 9540 9541 // Determine whether it is necessary to check both sub-expressions, for 9542 // example, because the condition expression is a constant that can be 9543 // evaluated at compile time. 9544 bool CheckLeft = true, CheckRight = true; 9545 9546 bool Cond; 9547 if (C->getCond()->EvaluateAsBooleanCondition(Cond, S.getASTContext(), 9548 S.isConstantEvaluated())) { 9549 if (Cond) 9550 CheckRight = false; 9551 else 9552 CheckLeft = false; 9553 } 9554 9555 // We need to maintain the offsets for the right and the left hand side 9556 // separately to check if every possible indexed expression is a valid 9557 // string literal. They might have different offsets for different string 9558 // literals in the end. 9559 StringLiteralCheckType Left; 9560 if (!CheckLeft) 9561 Left = SLCT_UncheckedLiteral; 9562 else { 9563 Left = checkFormatStringExpr(S, C->getTrueExpr(), Args, APK, format_idx, 9564 firstDataArg, Type, CallType, InFunctionCall, 9565 CheckedVarArgs, UncoveredArg, Offset, 9566 IgnoreStringsWithoutSpecifiers); 9567 if (Left == SLCT_NotALiteral || !CheckRight) { 9568 return Left; 9569 } 9570 } 9571 9572 StringLiteralCheckType Right = checkFormatStringExpr( 9573 S, C->getFalseExpr(), Args, APK, format_idx, firstDataArg, Type, 9574 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 9575 IgnoreStringsWithoutSpecifiers); 9576 9577 return (CheckLeft && Left < Right) ? Left : Right; 9578 } 9579 9580 case Stmt::ImplicitCastExprClass: 9581 E = cast<ImplicitCastExpr>(E)->getSubExpr(); 9582 goto tryAgain; 9583 9584 case Stmt::OpaqueValueExprClass: 9585 if (const Expr *src = cast<OpaqueValueExpr>(E)->getSourceExpr()) { 9586 E = src; 9587 goto tryAgain; 9588 } 9589 return SLCT_NotALiteral; 9590 9591 case Stmt::PredefinedExprClass: 9592 // While __func__, etc., are technically not string literals, they 9593 // cannot contain format specifiers and thus are not a security 9594 // liability. 9595 return SLCT_UncheckedLiteral; 9596 9597 case Stmt::DeclRefExprClass: { 9598 const DeclRefExpr *DR = cast<DeclRefExpr>(E); 9599 9600 // As an exception, do not flag errors for variables binding to 9601 // const string literals. 9602 if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) { 9603 bool isConstant = false; 9604 QualType T = DR->getType(); 9605 9606 if (const ArrayType *AT = S.Context.getAsArrayType(T)) { 9607 isConstant = AT->getElementType().isConstant(S.Context); 9608 } else if (const PointerType *PT = T->getAs<PointerType>()) { 9609 isConstant = T.isConstant(S.Context) && 9610 PT->getPointeeType().isConstant(S.Context); 9611 } else if (T->isObjCObjectPointerType()) { 9612 // In ObjC, there is usually no "const ObjectPointer" type, 9613 // so don't check if the pointee type is constant. 9614 isConstant = T.isConstant(S.Context); 9615 } 9616 9617 if (isConstant) { 9618 if (const Expr *Init = VD->getAnyInitializer()) { 9619 // Look through initializers like const char c[] = { "foo" } 9620 if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) { 9621 if (InitList->isStringLiteralInit()) 9622 Init = InitList->getInit(0)->IgnoreParenImpCasts(); 9623 } 9624 return checkFormatStringExpr( 9625 S, Init, Args, APK, format_idx, firstDataArg, Type, CallType, 9626 /*InFunctionCall*/ false, CheckedVarArgs, UncoveredArg, Offset); 9627 } 9628 } 9629 9630 // When the format argument is an argument of this function, and this 9631 // function also has the format attribute, there are several interactions 9632 // for which there shouldn't be a warning. For instance, when calling 9633 // v*printf from a function that has the printf format attribute, we 9634 // should not emit a warning about using `fmt`, even though it's not 9635 // constant, because the arguments have already been checked for the 9636 // caller of `logmessage`: 9637 // 9638 // __attribute__((format(printf, 1, 2))) 9639 // void logmessage(char const *fmt, ...) { 9640 // va_list ap; 9641 // va_start(ap, fmt); 9642 // vprintf(fmt, ap); /* do not emit a warning about "fmt" */ 9643 // ... 9644 // } 9645 // 9646 // Another interaction that we need to support is calling a variadic 9647 // format function from a format function that has fixed arguments. For 9648 // instance: 9649 // 9650 // __attribute__((format(printf, 1, 2))) 9651 // void logstring(char const *fmt, char const *str) { 9652 // printf(fmt, str); /* do not emit a warning about "fmt" */ 9653 // } 9654 // 9655 // Same (and perhaps more relatably) for the variadic template case: 9656 // 9657 // template<typename... Args> 9658 // __attribute__((format(printf, 1, 2))) 9659 // void log(const char *fmt, Args&&... args) { 9660 // printf(fmt, forward<Args>(args)...); 9661 // /* do not emit a warning about "fmt" */ 9662 // } 9663 // 9664 // Due to implementation difficulty, we only check the format, not the 9665 // format arguments, in all cases. 9666 // 9667 if (const auto *PV = dyn_cast<ParmVarDecl>(VD)) { 9668 if (const auto *D = dyn_cast<Decl>(PV->getDeclContext())) { 9669 for (const auto *PVFormat : D->specific_attrs<FormatAttr>()) { 9670 bool IsCXXMember = false; 9671 if (const auto *MD = dyn_cast<CXXMethodDecl>(D)) 9672 IsCXXMember = MD->isInstance(); 9673 9674 bool IsVariadic = false; 9675 if (const FunctionType *FnTy = D->getFunctionType()) 9676 IsVariadic = cast<FunctionProtoType>(FnTy)->isVariadic(); 9677 else if (const auto *BD = dyn_cast<BlockDecl>(D)) 9678 IsVariadic = BD->isVariadic(); 9679 else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(D)) 9680 IsVariadic = OMD->isVariadic(); 9681 9682 Sema::FormatStringInfo CallerFSI; 9683 if (Sema::getFormatStringInfo(PVFormat, IsCXXMember, IsVariadic, 9684 &CallerFSI)) { 9685 // We also check if the formats are compatible. 9686 // We can't pass a 'scanf' string to a 'printf' function. 9687 if (PV->getFunctionScopeIndex() == CallerFSI.FormatIdx && 9688 Type == S.GetFormatStringType(PVFormat)) { 9689 // Lastly, check that argument passing kinds transition in a 9690 // way that makes sense: 9691 // from a caller with FAPK_VAList, allow FAPK_VAList 9692 // from a caller with FAPK_Fixed, allow FAPK_Fixed 9693 // from a caller with FAPK_Fixed, allow FAPK_Variadic 9694 // from a caller with FAPK_Variadic, allow FAPK_VAList 9695 switch (combineFAPK(CallerFSI.ArgPassingKind, APK)) { 9696 case combineFAPK(Sema::FAPK_VAList, Sema::FAPK_VAList): 9697 case combineFAPK(Sema::FAPK_Fixed, Sema::FAPK_Fixed): 9698 case combineFAPK(Sema::FAPK_Fixed, Sema::FAPK_Variadic): 9699 case combineFAPK(Sema::FAPK_Variadic, Sema::FAPK_VAList): 9700 return SLCT_UncheckedLiteral; 9701 } 9702 } 9703 } 9704 } 9705 } 9706 } 9707 } 9708 9709 return SLCT_NotALiteral; 9710 } 9711 9712 case Stmt::CallExprClass: 9713 case Stmt::CXXMemberCallExprClass: { 9714 const CallExpr *CE = cast<CallExpr>(E); 9715 if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) { 9716 bool IsFirst = true; 9717 StringLiteralCheckType CommonResult; 9718 for (const auto *FA : ND->specific_attrs<FormatArgAttr>()) { 9719 const Expr *Arg = CE->getArg(FA->getFormatIdx().getASTIndex()); 9720 StringLiteralCheckType Result = checkFormatStringExpr( 9721 S, Arg, Args, APK, format_idx, firstDataArg, Type, CallType, 9722 InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 9723 IgnoreStringsWithoutSpecifiers); 9724 if (IsFirst) { 9725 CommonResult = Result; 9726 IsFirst = false; 9727 } 9728 } 9729 if (!IsFirst) 9730 return CommonResult; 9731 9732 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) { 9733 unsigned BuiltinID = FD->getBuiltinID(); 9734 if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString || 9735 BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) { 9736 const Expr *Arg = CE->getArg(0); 9737 return checkFormatStringExpr( 9738 S, Arg, Args, APK, format_idx, firstDataArg, Type, CallType, 9739 InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 9740 IgnoreStringsWithoutSpecifiers); 9741 } 9742 } 9743 } 9744 if (const Expr *SLE = maybeConstEvalStringLiteral(S.Context, E)) 9745 return checkFormatStringExpr(S, SLE, Args, APK, format_idx, firstDataArg, 9746 Type, CallType, /*InFunctionCall*/ false, 9747 CheckedVarArgs, UncoveredArg, Offset, 9748 IgnoreStringsWithoutSpecifiers); 9749 return SLCT_NotALiteral; 9750 } 9751 case Stmt::ObjCMessageExprClass: { 9752 const auto *ME = cast<ObjCMessageExpr>(E); 9753 if (const auto *MD = ME->getMethodDecl()) { 9754 if (const auto *FA = MD->getAttr<FormatArgAttr>()) { 9755 // As a special case heuristic, if we're using the method -[NSBundle 9756 // localizedStringForKey:value:table:], ignore any key strings that lack 9757 // format specifiers. The idea is that if the key doesn't have any 9758 // format specifiers then its probably just a key to map to the 9759 // localized strings. If it does have format specifiers though, then its 9760 // likely that the text of the key is the format string in the 9761 // programmer's language, and should be checked. 9762 const ObjCInterfaceDecl *IFace; 9763 if (MD->isInstanceMethod() && (IFace = MD->getClassInterface()) && 9764 IFace->getIdentifier()->isStr("NSBundle") && 9765 MD->getSelector().isKeywordSelector( 9766 {"localizedStringForKey", "value", "table"})) { 9767 IgnoreStringsWithoutSpecifiers = true; 9768 } 9769 9770 const Expr *Arg = ME->getArg(FA->getFormatIdx().getASTIndex()); 9771 return checkFormatStringExpr( 9772 S, Arg, Args, APK, format_idx, firstDataArg, Type, CallType, 9773 InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 9774 IgnoreStringsWithoutSpecifiers); 9775 } 9776 } 9777 9778 return SLCT_NotALiteral; 9779 } 9780 case Stmt::ObjCStringLiteralClass: 9781 case Stmt::StringLiteralClass: { 9782 const StringLiteral *StrE = nullptr; 9783 9784 if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E)) 9785 StrE = ObjCFExpr->getString(); 9786 else 9787 StrE = cast<StringLiteral>(E); 9788 9789 if (StrE) { 9790 if (Offset.isNegative() || Offset > StrE->getLength()) { 9791 // TODO: It would be better to have an explicit warning for out of 9792 // bounds literals. 9793 return SLCT_NotALiteral; 9794 } 9795 FormatStringLiteral FStr(StrE, Offset.sextOrTrunc(64).getSExtValue()); 9796 CheckFormatString(S, &FStr, E, Args, APK, format_idx, firstDataArg, Type, 9797 InFunctionCall, CallType, CheckedVarArgs, UncoveredArg, 9798 IgnoreStringsWithoutSpecifiers); 9799 return SLCT_CheckedLiteral; 9800 } 9801 9802 return SLCT_NotALiteral; 9803 } 9804 case Stmt::BinaryOperatorClass: { 9805 const BinaryOperator *BinOp = cast<BinaryOperator>(E); 9806 9807 // A string literal + an int offset is still a string literal. 9808 if (BinOp->isAdditiveOp()) { 9809 Expr::EvalResult LResult, RResult; 9810 9811 bool LIsInt = BinOp->getLHS()->EvaluateAsInt( 9812 LResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 9813 bool RIsInt = BinOp->getRHS()->EvaluateAsInt( 9814 RResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 9815 9816 if (LIsInt != RIsInt) { 9817 BinaryOperatorKind BinOpKind = BinOp->getOpcode(); 9818 9819 if (LIsInt) { 9820 if (BinOpKind == BO_Add) { 9821 sumOffsets(Offset, LResult.Val.getInt(), BinOpKind, RIsInt); 9822 E = BinOp->getRHS(); 9823 goto tryAgain; 9824 } 9825 } else { 9826 sumOffsets(Offset, RResult.Val.getInt(), BinOpKind, RIsInt); 9827 E = BinOp->getLHS(); 9828 goto tryAgain; 9829 } 9830 } 9831 } 9832 9833 return SLCT_NotALiteral; 9834 } 9835 case Stmt::UnaryOperatorClass: { 9836 const UnaryOperator *UnaOp = cast<UnaryOperator>(E); 9837 auto ASE = dyn_cast<ArraySubscriptExpr>(UnaOp->getSubExpr()); 9838 if (UnaOp->getOpcode() == UO_AddrOf && ASE) { 9839 Expr::EvalResult IndexResult; 9840 if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context, 9841 Expr::SE_NoSideEffects, 9842 S.isConstantEvaluated())) { 9843 sumOffsets(Offset, IndexResult.Val.getInt(), BO_Add, 9844 /*RHS is int*/ true); 9845 E = ASE->getBase(); 9846 goto tryAgain; 9847 } 9848 } 9849 9850 return SLCT_NotALiteral; 9851 } 9852 9853 default: 9854 return SLCT_NotALiteral; 9855 } 9856 } 9857 9858 // If this expression can be evaluated at compile-time, 9859 // check if the result is a StringLiteral and return it 9860 // otherwise return nullptr 9861 static const Expr *maybeConstEvalStringLiteral(ASTContext &Context, 9862 const Expr *E) { 9863 Expr::EvalResult Result; 9864 if (E->EvaluateAsRValue(Result, Context) && Result.Val.isLValue()) { 9865 const auto *LVE = Result.Val.getLValueBase().dyn_cast<const Expr *>(); 9866 if (isa_and_nonnull<StringLiteral>(LVE)) 9867 return LVE; 9868 } 9869 return nullptr; 9870 } 9871 9872 Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) { 9873 return llvm::StringSwitch<FormatStringType>(Format->getType()->getName()) 9874 .Case("scanf", FST_Scanf) 9875 .Cases("printf", "printf0", FST_Printf) 9876 .Cases("NSString", "CFString", FST_NSString) 9877 .Case("strftime", FST_Strftime) 9878 .Case("strfmon", FST_Strfmon) 9879 .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf) 9880 .Case("freebsd_kprintf", FST_FreeBSDKPrintf) 9881 .Case("os_trace", FST_OSLog) 9882 .Case("os_log", FST_OSLog) 9883 .Default(FST_Unknown); 9884 } 9885 9886 /// CheckFormatArguments - Check calls to printf and scanf (and similar 9887 /// functions) for correct use of format strings. 9888 /// Returns true if a format string has been fully checked. 9889 bool Sema::CheckFormatArguments(const FormatAttr *Format, 9890 ArrayRef<const Expr *> Args, bool IsCXXMember, 9891 VariadicCallType CallType, SourceLocation Loc, 9892 SourceRange Range, 9893 llvm::SmallBitVector &CheckedVarArgs) { 9894 FormatStringInfo FSI; 9895 if (getFormatStringInfo(Format, IsCXXMember, CallType != VariadicDoesNotApply, 9896 &FSI)) 9897 return CheckFormatArguments(Args, FSI.ArgPassingKind, FSI.FormatIdx, 9898 FSI.FirstDataArg, GetFormatStringType(Format), 9899 CallType, Loc, Range, CheckedVarArgs); 9900 return false; 9901 } 9902 9903 bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args, 9904 Sema::FormatArgumentPassingKind APK, 9905 unsigned format_idx, unsigned firstDataArg, 9906 FormatStringType Type, 9907 VariadicCallType CallType, SourceLocation Loc, 9908 SourceRange Range, 9909 llvm::SmallBitVector &CheckedVarArgs) { 9910 // CHECK: printf/scanf-like function is called with no format string. 9911 if (format_idx >= Args.size()) { 9912 Diag(Loc, diag::warn_missing_format_string) << Range; 9913 return false; 9914 } 9915 9916 const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts(); 9917 9918 // CHECK: format string is not a string literal. 9919 // 9920 // Dynamically generated format strings are difficult to 9921 // automatically vet at compile time. Requiring that format strings 9922 // are string literals: (1) permits the checking of format strings by 9923 // the compiler and thereby (2) can practically remove the source of 9924 // many format string exploits. 9925 9926 // Format string can be either ObjC string (e.g. @"%d") or 9927 // C string (e.g. "%d") 9928 // ObjC string uses the same format specifiers as C string, so we can use 9929 // the same format string checking logic for both ObjC and C strings. 9930 UncoveredArgHandler UncoveredArg; 9931 StringLiteralCheckType CT = checkFormatStringExpr( 9932 *this, OrigFormatExpr, Args, APK, format_idx, firstDataArg, Type, 9933 CallType, 9934 /*IsFunctionCall*/ true, CheckedVarArgs, UncoveredArg, 9935 /*no string offset*/ llvm::APSInt(64, false) = 0); 9936 9937 // Generate a diagnostic where an uncovered argument is detected. 9938 if (UncoveredArg.hasUncoveredArg()) { 9939 unsigned ArgIdx = UncoveredArg.getUncoveredArg() + firstDataArg; 9940 assert(ArgIdx < Args.size() && "ArgIdx outside bounds"); 9941 UncoveredArg.Diagnose(*this, /*IsFunctionCall*/true, Args[ArgIdx]); 9942 } 9943 9944 if (CT != SLCT_NotALiteral) 9945 // Literal format string found, check done! 9946 return CT == SLCT_CheckedLiteral; 9947 9948 // Strftime is particular as it always uses a single 'time' argument, 9949 // so it is safe to pass a non-literal string. 9950 if (Type == FST_Strftime) 9951 return false; 9952 9953 // Do not emit diag when the string param is a macro expansion and the 9954 // format is either NSString or CFString. This is a hack to prevent 9955 // diag when using the NSLocalizedString and CFCopyLocalizedString macros 9956 // which are usually used in place of NS and CF string literals. 9957 SourceLocation FormatLoc = Args[format_idx]->getBeginLoc(); 9958 if (Type == FST_NSString && SourceMgr.isInSystemMacro(FormatLoc)) 9959 return false; 9960 9961 // If there are no arguments specified, warn with -Wformat-security, otherwise 9962 // warn only with -Wformat-nonliteral. 9963 if (Args.size() == firstDataArg) { 9964 Diag(FormatLoc, diag::warn_format_nonliteral_noargs) 9965 << OrigFormatExpr->getSourceRange(); 9966 switch (Type) { 9967 default: 9968 break; 9969 case FST_Kprintf: 9970 case FST_FreeBSDKPrintf: 9971 case FST_Printf: 9972 Diag(FormatLoc, diag::note_format_security_fixit) 9973 << FixItHint::CreateInsertion(FormatLoc, "\"%s\", "); 9974 break; 9975 case FST_NSString: 9976 Diag(FormatLoc, diag::note_format_security_fixit) 9977 << FixItHint::CreateInsertion(FormatLoc, "@\"%@\", "); 9978 break; 9979 } 9980 } else { 9981 Diag(FormatLoc, diag::warn_format_nonliteral) 9982 << OrigFormatExpr->getSourceRange(); 9983 } 9984 return false; 9985 } 9986 9987 namespace { 9988 9989 class CheckFormatHandler : public analyze_format_string::FormatStringHandler { 9990 protected: 9991 Sema &S; 9992 const FormatStringLiteral *FExpr; 9993 const Expr *OrigFormatExpr; 9994 const Sema::FormatStringType FSType; 9995 const unsigned FirstDataArg; 9996 const unsigned NumDataArgs; 9997 const char *Beg; // Start of format string. 9998 const Sema::FormatArgumentPassingKind ArgPassingKind; 9999 ArrayRef<const Expr *> Args; 10000 unsigned FormatIdx; 10001 llvm::SmallBitVector CoveredArgs; 10002 bool usesPositionalArgs = false; 10003 bool atFirstArg = true; 10004 bool inFunctionCall; 10005 Sema::VariadicCallType CallType; 10006 llvm::SmallBitVector &CheckedVarArgs; 10007 UncoveredArgHandler &UncoveredArg; 10008 10009 public: 10010 CheckFormatHandler(Sema &s, const FormatStringLiteral *fexpr, 10011 const Expr *origFormatExpr, 10012 const Sema::FormatStringType type, unsigned firstDataArg, 10013 unsigned numDataArgs, const char *beg, 10014 Sema::FormatArgumentPassingKind APK, 10015 ArrayRef<const Expr *> Args, unsigned formatIdx, 10016 bool inFunctionCall, Sema::VariadicCallType callType, 10017 llvm::SmallBitVector &CheckedVarArgs, 10018 UncoveredArgHandler &UncoveredArg) 10019 : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), FSType(type), 10020 FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), Beg(beg), 10021 ArgPassingKind(APK), Args(Args), FormatIdx(formatIdx), 10022 inFunctionCall(inFunctionCall), CallType(callType), 10023 CheckedVarArgs(CheckedVarArgs), UncoveredArg(UncoveredArg) { 10024 CoveredArgs.resize(numDataArgs); 10025 CoveredArgs.reset(); 10026 } 10027 10028 void DoneProcessing(); 10029 10030 void HandleIncompleteSpecifier(const char *startSpecifier, 10031 unsigned specifierLen) override; 10032 10033 void HandleInvalidLengthModifier( 10034 const analyze_format_string::FormatSpecifier &FS, 10035 const analyze_format_string::ConversionSpecifier &CS, 10036 const char *startSpecifier, unsigned specifierLen, 10037 unsigned DiagID); 10038 10039 void HandleNonStandardLengthModifier( 10040 const analyze_format_string::FormatSpecifier &FS, 10041 const char *startSpecifier, unsigned specifierLen); 10042 10043 void HandleNonStandardConversionSpecifier( 10044 const analyze_format_string::ConversionSpecifier &CS, 10045 const char *startSpecifier, unsigned specifierLen); 10046 10047 void HandlePosition(const char *startPos, unsigned posLen) override; 10048 10049 void HandleInvalidPosition(const char *startSpecifier, 10050 unsigned specifierLen, 10051 analyze_format_string::PositionContext p) override; 10052 10053 void HandleZeroPosition(const char *startPos, unsigned posLen) override; 10054 10055 void HandleNullChar(const char *nullCharacter) override; 10056 10057 template <typename Range> 10058 static void 10059 EmitFormatDiagnostic(Sema &S, bool inFunctionCall, const Expr *ArgumentExpr, 10060 const PartialDiagnostic &PDiag, SourceLocation StringLoc, 10061 bool IsStringLocation, Range StringRange, 10062 ArrayRef<FixItHint> Fixit = std::nullopt); 10063 10064 protected: 10065 bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc, 10066 const char *startSpec, 10067 unsigned specifierLen, 10068 const char *csStart, unsigned csLen); 10069 10070 void HandlePositionalNonpositionalArgs(SourceLocation Loc, 10071 const char *startSpec, 10072 unsigned specifierLen); 10073 10074 SourceRange getFormatStringRange(); 10075 CharSourceRange getSpecifierRange(const char *startSpecifier, 10076 unsigned specifierLen); 10077 SourceLocation getLocationOfByte(const char *x); 10078 10079 const Expr *getDataArg(unsigned i) const; 10080 10081 bool CheckNumArgs(const analyze_format_string::FormatSpecifier &FS, 10082 const analyze_format_string::ConversionSpecifier &CS, 10083 const char *startSpecifier, unsigned specifierLen, 10084 unsigned argIndex); 10085 10086 template <typename Range> 10087 void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc, 10088 bool IsStringLocation, Range StringRange, 10089 ArrayRef<FixItHint> Fixit = std::nullopt); 10090 }; 10091 10092 } // namespace 10093 10094 SourceRange CheckFormatHandler::getFormatStringRange() { 10095 return OrigFormatExpr->getSourceRange(); 10096 } 10097 10098 CharSourceRange CheckFormatHandler:: 10099 getSpecifierRange(const char *startSpecifier, unsigned specifierLen) { 10100 SourceLocation Start = getLocationOfByte(startSpecifier); 10101 SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1); 10102 10103 // Advance the end SourceLocation by one due to half-open ranges. 10104 End = End.getLocWithOffset(1); 10105 10106 return CharSourceRange::getCharRange(Start, End); 10107 } 10108 10109 SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) { 10110 return FExpr->getLocationOfByte(x - Beg, S.getSourceManager(), 10111 S.getLangOpts(), S.Context.getTargetInfo()); 10112 } 10113 10114 void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier, 10115 unsigned specifierLen){ 10116 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier), 10117 getLocationOfByte(startSpecifier), 10118 /*IsStringLocation*/true, 10119 getSpecifierRange(startSpecifier, specifierLen)); 10120 } 10121 10122 void CheckFormatHandler::HandleInvalidLengthModifier( 10123 const analyze_format_string::FormatSpecifier &FS, 10124 const analyze_format_string::ConversionSpecifier &CS, 10125 const char *startSpecifier, unsigned specifierLen, unsigned DiagID) { 10126 using namespace analyze_format_string; 10127 10128 const LengthModifier &LM = FS.getLengthModifier(); 10129 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 10130 10131 // See if we know how to fix this length modifier. 10132 std::optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 10133 if (FixedLM) { 10134 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 10135 getLocationOfByte(LM.getStart()), 10136 /*IsStringLocation*/true, 10137 getSpecifierRange(startSpecifier, specifierLen)); 10138 10139 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 10140 << FixedLM->toString() 10141 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 10142 10143 } else { 10144 FixItHint Hint; 10145 if (DiagID == diag::warn_format_nonsensical_length) 10146 Hint = FixItHint::CreateRemoval(LMRange); 10147 10148 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 10149 getLocationOfByte(LM.getStart()), 10150 /*IsStringLocation*/true, 10151 getSpecifierRange(startSpecifier, specifierLen), 10152 Hint); 10153 } 10154 } 10155 10156 void CheckFormatHandler::HandleNonStandardLengthModifier( 10157 const analyze_format_string::FormatSpecifier &FS, 10158 const char *startSpecifier, unsigned specifierLen) { 10159 using namespace analyze_format_string; 10160 10161 const LengthModifier &LM = FS.getLengthModifier(); 10162 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 10163 10164 // See if we know how to fix this length modifier. 10165 std::optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 10166 if (FixedLM) { 10167 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 10168 << LM.toString() << 0, 10169 getLocationOfByte(LM.getStart()), 10170 /*IsStringLocation*/true, 10171 getSpecifierRange(startSpecifier, specifierLen)); 10172 10173 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 10174 << FixedLM->toString() 10175 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 10176 10177 } else { 10178 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 10179 << LM.toString() << 0, 10180 getLocationOfByte(LM.getStart()), 10181 /*IsStringLocation*/true, 10182 getSpecifierRange(startSpecifier, specifierLen)); 10183 } 10184 } 10185 10186 void CheckFormatHandler::HandleNonStandardConversionSpecifier( 10187 const analyze_format_string::ConversionSpecifier &CS, 10188 const char *startSpecifier, unsigned specifierLen) { 10189 using namespace analyze_format_string; 10190 10191 // See if we know how to fix this conversion specifier. 10192 std::optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier(); 10193 if (FixedCS) { 10194 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 10195 << CS.toString() << /*conversion specifier*/1, 10196 getLocationOfByte(CS.getStart()), 10197 /*IsStringLocation*/true, 10198 getSpecifierRange(startSpecifier, specifierLen)); 10199 10200 CharSourceRange CSRange = getSpecifierRange(CS.getStart(), CS.getLength()); 10201 S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier) 10202 << FixedCS->toString() 10203 << FixItHint::CreateReplacement(CSRange, FixedCS->toString()); 10204 } else { 10205 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 10206 << CS.toString() << /*conversion specifier*/1, 10207 getLocationOfByte(CS.getStart()), 10208 /*IsStringLocation*/true, 10209 getSpecifierRange(startSpecifier, specifierLen)); 10210 } 10211 } 10212 10213 void CheckFormatHandler::HandlePosition(const char *startPos, 10214 unsigned posLen) { 10215 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg), 10216 getLocationOfByte(startPos), 10217 /*IsStringLocation*/true, 10218 getSpecifierRange(startPos, posLen)); 10219 } 10220 10221 void CheckFormatHandler::HandleInvalidPosition( 10222 const char *startSpecifier, unsigned specifierLen, 10223 analyze_format_string::PositionContext p) { 10224 EmitFormatDiagnostic( 10225 S.PDiag(diag::warn_format_invalid_positional_specifier) << (unsigned)p, 10226 getLocationOfByte(startSpecifier), /*IsStringLocation*/ true, 10227 getSpecifierRange(startSpecifier, specifierLen)); 10228 } 10229 10230 void CheckFormatHandler::HandleZeroPosition(const char *startPos, 10231 unsigned posLen) { 10232 EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier), 10233 getLocationOfByte(startPos), 10234 /*IsStringLocation*/true, 10235 getSpecifierRange(startPos, posLen)); 10236 } 10237 10238 void CheckFormatHandler::HandleNullChar(const char *nullCharacter) { 10239 if (!isa<ObjCStringLiteral>(OrigFormatExpr)) { 10240 // The presence of a null character is likely an error. 10241 EmitFormatDiagnostic( 10242 S.PDiag(diag::warn_printf_format_string_contains_null_char), 10243 getLocationOfByte(nullCharacter), /*IsStringLocation*/true, 10244 getFormatStringRange()); 10245 } 10246 } 10247 10248 // Note that this may return NULL if there was an error parsing or building 10249 // one of the argument expressions. 10250 const Expr *CheckFormatHandler::getDataArg(unsigned i) const { 10251 return Args[FirstDataArg + i]; 10252 } 10253 10254 void CheckFormatHandler::DoneProcessing() { 10255 // Does the number of data arguments exceed the number of 10256 // format conversions in the format string? 10257 if (ArgPassingKind != Sema::FAPK_VAList) { 10258 // Find any arguments that weren't covered. 10259 CoveredArgs.flip(); 10260 signed notCoveredArg = CoveredArgs.find_first(); 10261 if (notCoveredArg >= 0) { 10262 assert((unsigned)notCoveredArg < NumDataArgs); 10263 UncoveredArg.Update(notCoveredArg, OrigFormatExpr); 10264 } else { 10265 UncoveredArg.setAllCovered(); 10266 } 10267 } 10268 } 10269 10270 void UncoveredArgHandler::Diagnose(Sema &S, bool IsFunctionCall, 10271 const Expr *ArgExpr) { 10272 assert(hasUncoveredArg() && !DiagnosticExprs.empty() && 10273 "Invalid state"); 10274 10275 if (!ArgExpr) 10276 return; 10277 10278 SourceLocation Loc = ArgExpr->getBeginLoc(); 10279 10280 if (S.getSourceManager().isInSystemMacro(Loc)) 10281 return; 10282 10283 PartialDiagnostic PDiag = S.PDiag(diag::warn_printf_data_arg_not_used); 10284 for (auto E : DiagnosticExprs) 10285 PDiag << E->getSourceRange(); 10286 10287 CheckFormatHandler::EmitFormatDiagnostic( 10288 S, IsFunctionCall, DiagnosticExprs[0], 10289 PDiag, Loc, /*IsStringLocation*/false, 10290 DiagnosticExprs[0]->getSourceRange()); 10291 } 10292 10293 bool 10294 CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex, 10295 SourceLocation Loc, 10296 const char *startSpec, 10297 unsigned specifierLen, 10298 const char *csStart, 10299 unsigned csLen) { 10300 bool keepGoing = true; 10301 if (argIndex < NumDataArgs) { 10302 // Consider the argument coverered, even though the specifier doesn't 10303 // make sense. 10304 CoveredArgs.set(argIndex); 10305 } 10306 else { 10307 // If argIndex exceeds the number of data arguments we 10308 // don't issue a warning because that is just a cascade of warnings (and 10309 // they may have intended '%%' anyway). We don't want to continue processing 10310 // the format string after this point, however, as we will like just get 10311 // gibberish when trying to match arguments. 10312 keepGoing = false; 10313 } 10314 10315 StringRef Specifier(csStart, csLen); 10316 10317 // If the specifier in non-printable, it could be the first byte of a UTF-8 10318 // sequence. In that case, print the UTF-8 code point. If not, print the byte 10319 // hex value. 10320 std::string CodePointStr; 10321 if (!llvm::sys::locale::isPrint(*csStart)) { 10322 llvm::UTF32 CodePoint; 10323 const llvm::UTF8 **B = reinterpret_cast<const llvm::UTF8 **>(&csStart); 10324 const llvm::UTF8 *E = 10325 reinterpret_cast<const llvm::UTF8 *>(csStart + csLen); 10326 llvm::ConversionResult Result = 10327 llvm::convertUTF8Sequence(B, E, &CodePoint, llvm::strictConversion); 10328 10329 if (Result != llvm::conversionOK) { 10330 unsigned char FirstChar = *csStart; 10331 CodePoint = (llvm::UTF32)FirstChar; 10332 } 10333 10334 llvm::raw_string_ostream OS(CodePointStr); 10335 if (CodePoint < 256) 10336 OS << "\\x" << llvm::format("%02x", CodePoint); 10337 else if (CodePoint <= 0xFFFF) 10338 OS << "\\u" << llvm::format("%04x", CodePoint); 10339 else 10340 OS << "\\U" << llvm::format("%08x", CodePoint); 10341 OS.flush(); 10342 Specifier = CodePointStr; 10343 } 10344 10345 EmitFormatDiagnostic( 10346 S.PDiag(diag::warn_format_invalid_conversion) << Specifier, Loc, 10347 /*IsStringLocation*/ true, getSpecifierRange(startSpec, specifierLen)); 10348 10349 return keepGoing; 10350 } 10351 10352 void 10353 CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc, 10354 const char *startSpec, 10355 unsigned specifierLen) { 10356 EmitFormatDiagnostic( 10357 S.PDiag(diag::warn_format_mix_positional_nonpositional_args), 10358 Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen)); 10359 } 10360 10361 bool 10362 CheckFormatHandler::CheckNumArgs( 10363 const analyze_format_string::FormatSpecifier &FS, 10364 const analyze_format_string::ConversionSpecifier &CS, 10365 const char *startSpecifier, unsigned specifierLen, unsigned argIndex) { 10366 10367 if (argIndex >= NumDataArgs) { 10368 PartialDiagnostic PDiag = FS.usesPositionalArg() 10369 ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args) 10370 << (argIndex+1) << NumDataArgs) 10371 : S.PDiag(diag::warn_printf_insufficient_data_args); 10372 EmitFormatDiagnostic( 10373 PDiag, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true, 10374 getSpecifierRange(startSpecifier, specifierLen)); 10375 10376 // Since more arguments than conversion tokens are given, by extension 10377 // all arguments are covered, so mark this as so. 10378 UncoveredArg.setAllCovered(); 10379 return false; 10380 } 10381 return true; 10382 } 10383 10384 template<typename Range> 10385 void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag, 10386 SourceLocation Loc, 10387 bool IsStringLocation, 10388 Range StringRange, 10389 ArrayRef<FixItHint> FixIt) { 10390 EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag, 10391 Loc, IsStringLocation, StringRange, FixIt); 10392 } 10393 10394 /// If the format string is not within the function call, emit a note 10395 /// so that the function call and string are in diagnostic messages. 10396 /// 10397 /// \param InFunctionCall if true, the format string is within the function 10398 /// call and only one diagnostic message will be produced. Otherwise, an 10399 /// extra note will be emitted pointing to location of the format string. 10400 /// 10401 /// \param ArgumentExpr the expression that is passed as the format string 10402 /// argument in the function call. Used for getting locations when two 10403 /// diagnostics are emitted. 10404 /// 10405 /// \param PDiag the callee should already have provided any strings for the 10406 /// diagnostic message. This function only adds locations and fixits 10407 /// to diagnostics. 10408 /// 10409 /// \param Loc primary location for diagnostic. If two diagnostics are 10410 /// required, one will be at Loc and a new SourceLocation will be created for 10411 /// the other one. 10412 /// 10413 /// \param IsStringLocation if true, Loc points to the format string should be 10414 /// used for the note. Otherwise, Loc points to the argument list and will 10415 /// be used with PDiag. 10416 /// 10417 /// \param StringRange some or all of the string to highlight. This is 10418 /// templated so it can accept either a CharSourceRange or a SourceRange. 10419 /// 10420 /// \param FixIt optional fix it hint for the format string. 10421 template <typename Range> 10422 void CheckFormatHandler::EmitFormatDiagnostic( 10423 Sema &S, bool InFunctionCall, const Expr *ArgumentExpr, 10424 const PartialDiagnostic &PDiag, SourceLocation Loc, bool IsStringLocation, 10425 Range StringRange, ArrayRef<FixItHint> FixIt) { 10426 if (InFunctionCall) { 10427 const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PDiag); 10428 D << StringRange; 10429 D << FixIt; 10430 } else { 10431 S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag) 10432 << ArgumentExpr->getSourceRange(); 10433 10434 const Sema::SemaDiagnosticBuilder &Note = 10435 S.Diag(IsStringLocation ? Loc : StringRange.getBegin(), 10436 diag::note_format_string_defined); 10437 10438 Note << StringRange; 10439 Note << FixIt; 10440 } 10441 } 10442 10443 //===--- CHECK: Printf format string checking ------------------------------===// 10444 10445 namespace { 10446 10447 class CheckPrintfHandler : public CheckFormatHandler { 10448 public: 10449 CheckPrintfHandler(Sema &s, const FormatStringLiteral *fexpr, 10450 const Expr *origFormatExpr, 10451 const Sema::FormatStringType type, unsigned firstDataArg, 10452 unsigned numDataArgs, bool isObjC, const char *beg, 10453 Sema::FormatArgumentPassingKind APK, 10454 ArrayRef<const Expr *> Args, unsigned formatIdx, 10455 bool inFunctionCall, Sema::VariadicCallType CallType, 10456 llvm::SmallBitVector &CheckedVarArgs, 10457 UncoveredArgHandler &UncoveredArg) 10458 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 10459 numDataArgs, beg, APK, Args, formatIdx, 10460 inFunctionCall, CallType, CheckedVarArgs, 10461 UncoveredArg) {} 10462 10463 bool isObjCContext() const { return FSType == Sema::FST_NSString; } 10464 10465 /// Returns true if '%@' specifiers are allowed in the format string. 10466 bool allowsObjCArg() const { 10467 return FSType == Sema::FST_NSString || FSType == Sema::FST_OSLog || 10468 FSType == Sema::FST_OSTrace; 10469 } 10470 10471 bool HandleInvalidPrintfConversionSpecifier( 10472 const analyze_printf::PrintfSpecifier &FS, 10473 const char *startSpecifier, 10474 unsigned specifierLen) override; 10475 10476 void handleInvalidMaskType(StringRef MaskType) override; 10477 10478 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 10479 const char *startSpecifier, unsigned specifierLen, 10480 const TargetInfo &Target) override; 10481 bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 10482 const char *StartSpecifier, 10483 unsigned SpecifierLen, 10484 const Expr *E); 10485 10486 bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k, 10487 const char *startSpecifier, unsigned specifierLen); 10488 void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS, 10489 const analyze_printf::OptionalAmount &Amt, 10490 unsigned type, 10491 const char *startSpecifier, unsigned specifierLen); 10492 void HandleFlag(const analyze_printf::PrintfSpecifier &FS, 10493 const analyze_printf::OptionalFlag &flag, 10494 const char *startSpecifier, unsigned specifierLen); 10495 void HandleIgnoredFlag(const analyze_printf::PrintfSpecifier &FS, 10496 const analyze_printf::OptionalFlag &ignoredFlag, 10497 const analyze_printf::OptionalFlag &flag, 10498 const char *startSpecifier, unsigned specifierLen); 10499 bool checkForCStrMembers(const analyze_printf::ArgType &AT, 10500 const Expr *E); 10501 10502 void HandleEmptyObjCModifierFlag(const char *startFlag, 10503 unsigned flagLen) override; 10504 10505 void HandleInvalidObjCModifierFlag(const char *startFlag, 10506 unsigned flagLen) override; 10507 10508 void HandleObjCFlagsWithNonObjCConversion(const char *flagsStart, 10509 const char *flagsEnd, 10510 const char *conversionPosition) 10511 override; 10512 }; 10513 10514 } // namespace 10515 10516 bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier( 10517 const analyze_printf::PrintfSpecifier &FS, 10518 const char *startSpecifier, 10519 unsigned specifierLen) { 10520 const analyze_printf::PrintfConversionSpecifier &CS = 10521 FS.getConversionSpecifier(); 10522 10523 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 10524 getLocationOfByte(CS.getStart()), 10525 startSpecifier, specifierLen, 10526 CS.getStart(), CS.getLength()); 10527 } 10528 10529 void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType) { 10530 S.Diag(getLocationOfByte(MaskType.data()), diag::err_invalid_mask_type_size); 10531 } 10532 10533 bool CheckPrintfHandler::HandleAmount( 10534 const analyze_format_string::OptionalAmount &Amt, unsigned k, 10535 const char *startSpecifier, unsigned specifierLen) { 10536 if (Amt.hasDataArgument()) { 10537 if (ArgPassingKind != Sema::FAPK_VAList) { 10538 unsigned argIndex = Amt.getArgIndex(); 10539 if (argIndex >= NumDataArgs) { 10540 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg) 10541 << k, 10542 getLocationOfByte(Amt.getStart()), 10543 /*IsStringLocation*/ true, 10544 getSpecifierRange(startSpecifier, specifierLen)); 10545 // Don't do any more checking. We will just emit 10546 // spurious errors. 10547 return false; 10548 } 10549 10550 // Type check the data argument. It should be an 'int'. 10551 // Although not in conformance with C99, we also allow the argument to be 10552 // an 'unsigned int' as that is a reasonably safe case. GCC also 10553 // doesn't emit a warning for that case. 10554 CoveredArgs.set(argIndex); 10555 const Expr *Arg = getDataArg(argIndex); 10556 if (!Arg) 10557 return false; 10558 10559 QualType T = Arg->getType(); 10560 10561 const analyze_printf::ArgType &AT = Amt.getArgType(S.Context); 10562 assert(AT.isValid()); 10563 10564 if (!AT.matchesType(S.Context, T)) { 10565 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type) 10566 << k << AT.getRepresentativeTypeName(S.Context) 10567 << T << Arg->getSourceRange(), 10568 getLocationOfByte(Amt.getStart()), 10569 /*IsStringLocation*/true, 10570 getSpecifierRange(startSpecifier, specifierLen)); 10571 // Don't do any more checking. We will just emit 10572 // spurious errors. 10573 return false; 10574 } 10575 } 10576 } 10577 return true; 10578 } 10579 10580 void CheckPrintfHandler::HandleInvalidAmount( 10581 const analyze_printf::PrintfSpecifier &FS, 10582 const analyze_printf::OptionalAmount &Amt, 10583 unsigned type, 10584 const char *startSpecifier, 10585 unsigned specifierLen) { 10586 const analyze_printf::PrintfConversionSpecifier &CS = 10587 FS.getConversionSpecifier(); 10588 10589 FixItHint fixit = 10590 Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant 10591 ? FixItHint::CreateRemoval(getSpecifierRange(Amt.getStart(), 10592 Amt.getConstantLength())) 10593 : FixItHint(); 10594 10595 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount) 10596 << type << CS.toString(), 10597 getLocationOfByte(Amt.getStart()), 10598 /*IsStringLocation*/true, 10599 getSpecifierRange(startSpecifier, specifierLen), 10600 fixit); 10601 } 10602 10603 void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS, 10604 const analyze_printf::OptionalFlag &flag, 10605 const char *startSpecifier, 10606 unsigned specifierLen) { 10607 // Warn about pointless flag with a fixit removal. 10608 const analyze_printf::PrintfConversionSpecifier &CS = 10609 FS.getConversionSpecifier(); 10610 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag) 10611 << flag.toString() << CS.toString(), 10612 getLocationOfByte(flag.getPosition()), 10613 /*IsStringLocation*/true, 10614 getSpecifierRange(startSpecifier, specifierLen), 10615 FixItHint::CreateRemoval( 10616 getSpecifierRange(flag.getPosition(), 1))); 10617 } 10618 10619 void CheckPrintfHandler::HandleIgnoredFlag( 10620 const analyze_printf::PrintfSpecifier &FS, 10621 const analyze_printf::OptionalFlag &ignoredFlag, 10622 const analyze_printf::OptionalFlag &flag, 10623 const char *startSpecifier, 10624 unsigned specifierLen) { 10625 // Warn about ignored flag with a fixit removal. 10626 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag) 10627 << ignoredFlag.toString() << flag.toString(), 10628 getLocationOfByte(ignoredFlag.getPosition()), 10629 /*IsStringLocation*/true, 10630 getSpecifierRange(startSpecifier, specifierLen), 10631 FixItHint::CreateRemoval( 10632 getSpecifierRange(ignoredFlag.getPosition(), 1))); 10633 } 10634 10635 void CheckPrintfHandler::HandleEmptyObjCModifierFlag(const char *startFlag, 10636 unsigned flagLen) { 10637 // Warn about an empty flag. 10638 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_empty_objc_flag), 10639 getLocationOfByte(startFlag), 10640 /*IsStringLocation*/true, 10641 getSpecifierRange(startFlag, flagLen)); 10642 } 10643 10644 void CheckPrintfHandler::HandleInvalidObjCModifierFlag(const char *startFlag, 10645 unsigned flagLen) { 10646 // Warn about an invalid flag. 10647 auto Range = getSpecifierRange(startFlag, flagLen); 10648 StringRef flag(startFlag, flagLen); 10649 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_invalid_objc_flag) << flag, 10650 getLocationOfByte(startFlag), 10651 /*IsStringLocation*/true, 10652 Range, FixItHint::CreateRemoval(Range)); 10653 } 10654 10655 void CheckPrintfHandler::HandleObjCFlagsWithNonObjCConversion( 10656 const char *flagsStart, const char *flagsEnd, const char *conversionPosition) { 10657 // Warn about using '[...]' without a '@' conversion. 10658 auto Range = getSpecifierRange(flagsStart, flagsEnd - flagsStart + 1); 10659 auto diag = diag::warn_printf_ObjCflags_without_ObjCConversion; 10660 EmitFormatDiagnostic(S.PDiag(diag) << StringRef(conversionPosition, 1), 10661 getLocationOfByte(conversionPosition), 10662 /*IsStringLocation*/true, 10663 Range, FixItHint::CreateRemoval(Range)); 10664 } 10665 10666 // Determines if the specified is a C++ class or struct containing 10667 // a member with the specified name and kind (e.g. a CXXMethodDecl named 10668 // "c_str()"). 10669 template<typename MemberKind> 10670 static llvm::SmallPtrSet<MemberKind*, 1> 10671 CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) { 10672 const RecordType *RT = Ty->getAs<RecordType>(); 10673 llvm::SmallPtrSet<MemberKind*, 1> Results; 10674 10675 if (!RT) 10676 return Results; 10677 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 10678 if (!RD || !RD->getDefinition()) 10679 return Results; 10680 10681 LookupResult R(S, &S.Context.Idents.get(Name), SourceLocation(), 10682 Sema::LookupMemberName); 10683 R.suppressDiagnostics(); 10684 10685 // We just need to include all members of the right kind turned up by the 10686 // filter, at this point. 10687 if (S.LookupQualifiedName(R, RT->getDecl())) 10688 for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) { 10689 NamedDecl *decl = (*I)->getUnderlyingDecl(); 10690 if (MemberKind *FK = dyn_cast<MemberKind>(decl)) 10691 Results.insert(FK); 10692 } 10693 return Results; 10694 } 10695 10696 /// Check if we could call '.c_str()' on an object. 10697 /// 10698 /// FIXME: This returns the wrong results in some cases (if cv-qualifiers don't 10699 /// allow the call, or if it would be ambiguous). 10700 bool Sema::hasCStrMethod(const Expr *E) { 10701 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 10702 10703 MethodSet Results = 10704 CXXRecordMembersNamed<CXXMethodDecl>("c_str", *this, E->getType()); 10705 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 10706 MI != ME; ++MI) 10707 if ((*MI)->getMinRequiredArguments() == 0) 10708 return true; 10709 return false; 10710 } 10711 10712 // Check if a (w)string was passed when a (w)char* was needed, and offer a 10713 // better diagnostic if so. AT is assumed to be valid. 10714 // Returns true when a c_str() conversion method is found. 10715 bool CheckPrintfHandler::checkForCStrMembers( 10716 const analyze_printf::ArgType &AT, const Expr *E) { 10717 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 10718 10719 MethodSet Results = 10720 CXXRecordMembersNamed<CXXMethodDecl>("c_str", S, E->getType()); 10721 10722 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 10723 MI != ME; ++MI) { 10724 const CXXMethodDecl *Method = *MI; 10725 if (Method->getMinRequiredArguments() == 0 && 10726 AT.matchesType(S.Context, Method->getReturnType())) { 10727 // FIXME: Suggest parens if the expression needs them. 10728 SourceLocation EndLoc = S.getLocForEndOfToken(E->getEndLoc()); 10729 S.Diag(E->getBeginLoc(), diag::note_printf_c_str) 10730 << "c_str()" << FixItHint::CreateInsertion(EndLoc, ".c_str()"); 10731 return true; 10732 } 10733 } 10734 10735 return false; 10736 } 10737 10738 bool CheckPrintfHandler::HandlePrintfSpecifier( 10739 const analyze_printf::PrintfSpecifier &FS, const char *startSpecifier, 10740 unsigned specifierLen, const TargetInfo &Target) { 10741 using namespace analyze_format_string; 10742 using namespace analyze_printf; 10743 10744 const PrintfConversionSpecifier &CS = FS.getConversionSpecifier(); 10745 10746 if (FS.consumesDataArgument()) { 10747 if (atFirstArg) { 10748 atFirstArg = false; 10749 usesPositionalArgs = FS.usesPositionalArg(); 10750 } 10751 else if (usesPositionalArgs != FS.usesPositionalArg()) { 10752 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 10753 startSpecifier, specifierLen); 10754 return false; 10755 } 10756 } 10757 10758 // First check if the field width, precision, and conversion specifier 10759 // have matching data arguments. 10760 if (!HandleAmount(FS.getFieldWidth(), /* field width */ 0, 10761 startSpecifier, specifierLen)) { 10762 return false; 10763 } 10764 10765 if (!HandleAmount(FS.getPrecision(), /* precision */ 1, 10766 startSpecifier, specifierLen)) { 10767 return false; 10768 } 10769 10770 if (!CS.consumesDataArgument()) { 10771 // FIXME: Technically specifying a precision or field width here 10772 // makes no sense. Worth issuing a warning at some point. 10773 return true; 10774 } 10775 10776 // Consume the argument. 10777 unsigned argIndex = FS.getArgIndex(); 10778 if (argIndex < NumDataArgs) { 10779 // The check to see if the argIndex is valid will come later. 10780 // We set the bit here because we may exit early from this 10781 // function if we encounter some other error. 10782 CoveredArgs.set(argIndex); 10783 } 10784 10785 // FreeBSD kernel extensions. 10786 if (CS.getKind() == ConversionSpecifier::FreeBSDbArg || 10787 CS.getKind() == ConversionSpecifier::FreeBSDDArg) { 10788 // We need at least two arguments. 10789 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex + 1)) 10790 return false; 10791 10792 // Claim the second argument. 10793 CoveredArgs.set(argIndex + 1); 10794 10795 // Type check the first argument (int for %b, pointer for %D) 10796 const Expr *Ex = getDataArg(argIndex); 10797 const analyze_printf::ArgType &AT = 10798 (CS.getKind() == ConversionSpecifier::FreeBSDbArg) ? 10799 ArgType(S.Context.IntTy) : ArgType::CPointerTy; 10800 if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType())) 10801 EmitFormatDiagnostic( 10802 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 10803 << AT.getRepresentativeTypeName(S.Context) << Ex->getType() 10804 << false << Ex->getSourceRange(), 10805 Ex->getBeginLoc(), /*IsStringLocation*/ false, 10806 getSpecifierRange(startSpecifier, specifierLen)); 10807 10808 // Type check the second argument (char * for both %b and %D) 10809 Ex = getDataArg(argIndex + 1); 10810 const analyze_printf::ArgType &AT2 = ArgType::CStrTy; 10811 if (AT2.isValid() && !AT2.matchesType(S.Context, Ex->getType())) 10812 EmitFormatDiagnostic( 10813 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 10814 << AT2.getRepresentativeTypeName(S.Context) << Ex->getType() 10815 << false << Ex->getSourceRange(), 10816 Ex->getBeginLoc(), /*IsStringLocation*/ false, 10817 getSpecifierRange(startSpecifier, specifierLen)); 10818 10819 return true; 10820 } 10821 10822 // Check for using an Objective-C specific conversion specifier 10823 // in a non-ObjC literal. 10824 if (!allowsObjCArg() && CS.isObjCArg()) { 10825 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 10826 specifierLen); 10827 } 10828 10829 // %P can only be used with os_log. 10830 if (FSType != Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::PArg) { 10831 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 10832 specifierLen); 10833 } 10834 10835 // %n is not allowed with os_log. 10836 if (FSType == Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::nArg) { 10837 EmitFormatDiagnostic(S.PDiag(diag::warn_os_log_format_narg), 10838 getLocationOfByte(CS.getStart()), 10839 /*IsStringLocation*/ false, 10840 getSpecifierRange(startSpecifier, specifierLen)); 10841 10842 return true; 10843 } 10844 10845 // Only scalars are allowed for os_trace. 10846 if (FSType == Sema::FST_OSTrace && 10847 (CS.getKind() == ConversionSpecifier::PArg || 10848 CS.getKind() == ConversionSpecifier::sArg || 10849 CS.getKind() == ConversionSpecifier::ObjCObjArg)) { 10850 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 10851 specifierLen); 10852 } 10853 10854 // Check for use of public/private annotation outside of os_log(). 10855 if (FSType != Sema::FST_OSLog) { 10856 if (FS.isPublic().isSet()) { 10857 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 10858 << "public", 10859 getLocationOfByte(FS.isPublic().getPosition()), 10860 /*IsStringLocation*/ false, 10861 getSpecifierRange(startSpecifier, specifierLen)); 10862 } 10863 if (FS.isPrivate().isSet()) { 10864 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 10865 << "private", 10866 getLocationOfByte(FS.isPrivate().getPosition()), 10867 /*IsStringLocation*/ false, 10868 getSpecifierRange(startSpecifier, specifierLen)); 10869 } 10870 } 10871 10872 const llvm::Triple &Triple = Target.getTriple(); 10873 if (CS.getKind() == ConversionSpecifier::nArg && 10874 (Triple.isAndroid() || Triple.isOSFuchsia())) { 10875 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_narg_not_supported), 10876 getLocationOfByte(CS.getStart()), 10877 /*IsStringLocation*/ false, 10878 getSpecifierRange(startSpecifier, specifierLen)); 10879 } 10880 10881 // Check for invalid use of field width 10882 if (!FS.hasValidFieldWidth()) { 10883 HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0, 10884 startSpecifier, specifierLen); 10885 } 10886 10887 // Check for invalid use of precision 10888 if (!FS.hasValidPrecision()) { 10889 HandleInvalidAmount(FS, FS.getPrecision(), /* precision */ 1, 10890 startSpecifier, specifierLen); 10891 } 10892 10893 // Precision is mandatory for %P specifier. 10894 if (CS.getKind() == ConversionSpecifier::PArg && 10895 FS.getPrecision().getHowSpecified() == OptionalAmount::NotSpecified) { 10896 EmitFormatDiagnostic(S.PDiag(diag::warn_format_P_no_precision), 10897 getLocationOfByte(startSpecifier), 10898 /*IsStringLocation*/ false, 10899 getSpecifierRange(startSpecifier, specifierLen)); 10900 } 10901 10902 // Check each flag does not conflict with any other component. 10903 if (!FS.hasValidThousandsGroupingPrefix()) 10904 HandleFlag(FS, FS.hasThousandsGrouping(), startSpecifier, specifierLen); 10905 if (!FS.hasValidLeadingZeros()) 10906 HandleFlag(FS, FS.hasLeadingZeros(), startSpecifier, specifierLen); 10907 if (!FS.hasValidPlusPrefix()) 10908 HandleFlag(FS, FS.hasPlusPrefix(), startSpecifier, specifierLen); 10909 if (!FS.hasValidSpacePrefix()) 10910 HandleFlag(FS, FS.hasSpacePrefix(), startSpecifier, specifierLen); 10911 if (!FS.hasValidAlternativeForm()) 10912 HandleFlag(FS, FS.hasAlternativeForm(), startSpecifier, specifierLen); 10913 if (!FS.hasValidLeftJustified()) 10914 HandleFlag(FS, FS.isLeftJustified(), startSpecifier, specifierLen); 10915 10916 // Check that flags are not ignored by another flag 10917 if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+' 10918 HandleIgnoredFlag(FS, FS.hasSpacePrefix(), FS.hasPlusPrefix(), 10919 startSpecifier, specifierLen); 10920 if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-' 10921 HandleIgnoredFlag(FS, FS.hasLeadingZeros(), FS.isLeftJustified(), 10922 startSpecifier, specifierLen); 10923 10924 // Check the length modifier is valid with the given conversion specifier. 10925 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 10926 S.getLangOpts())) 10927 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 10928 diag::warn_format_nonsensical_length); 10929 else if (!FS.hasStandardLengthModifier()) 10930 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 10931 else if (!FS.hasStandardLengthConversionCombination()) 10932 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 10933 diag::warn_format_non_standard_conversion_spec); 10934 10935 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 10936 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 10937 10938 // The remaining checks depend on the data arguments. 10939 if (ArgPassingKind == Sema::FAPK_VAList) 10940 return true; 10941 10942 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 10943 return false; 10944 10945 const Expr *Arg = getDataArg(argIndex); 10946 if (!Arg) 10947 return true; 10948 10949 return checkFormatExpr(FS, startSpecifier, specifierLen, Arg); 10950 } 10951 10952 static bool requiresParensToAddCast(const Expr *E) { 10953 // FIXME: We should have a general way to reason about operator 10954 // precedence and whether parens are actually needed here. 10955 // Take care of a few common cases where they aren't. 10956 const Expr *Inside = E->IgnoreImpCasts(); 10957 if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Inside)) 10958 Inside = POE->getSyntacticForm()->IgnoreImpCasts(); 10959 10960 switch (Inside->getStmtClass()) { 10961 case Stmt::ArraySubscriptExprClass: 10962 case Stmt::CallExprClass: 10963 case Stmt::CharacterLiteralClass: 10964 case Stmt::CXXBoolLiteralExprClass: 10965 case Stmt::DeclRefExprClass: 10966 case Stmt::FloatingLiteralClass: 10967 case Stmt::IntegerLiteralClass: 10968 case Stmt::MemberExprClass: 10969 case Stmt::ObjCArrayLiteralClass: 10970 case Stmt::ObjCBoolLiteralExprClass: 10971 case Stmt::ObjCBoxedExprClass: 10972 case Stmt::ObjCDictionaryLiteralClass: 10973 case Stmt::ObjCEncodeExprClass: 10974 case Stmt::ObjCIvarRefExprClass: 10975 case Stmt::ObjCMessageExprClass: 10976 case Stmt::ObjCPropertyRefExprClass: 10977 case Stmt::ObjCStringLiteralClass: 10978 case Stmt::ObjCSubscriptRefExprClass: 10979 case Stmt::ParenExprClass: 10980 case Stmt::StringLiteralClass: 10981 case Stmt::UnaryOperatorClass: 10982 return false; 10983 default: 10984 return true; 10985 } 10986 } 10987 10988 static std::pair<QualType, StringRef> 10989 shouldNotPrintDirectly(const ASTContext &Context, 10990 QualType IntendedTy, 10991 const Expr *E) { 10992 // Use a 'while' to peel off layers of typedefs. 10993 QualType TyTy = IntendedTy; 10994 while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) { 10995 StringRef Name = UserTy->getDecl()->getName(); 10996 QualType CastTy = llvm::StringSwitch<QualType>(Name) 10997 .Case("CFIndex", Context.getNSIntegerType()) 10998 .Case("NSInteger", Context.getNSIntegerType()) 10999 .Case("NSUInteger", Context.getNSUIntegerType()) 11000 .Case("SInt32", Context.IntTy) 11001 .Case("UInt32", Context.UnsignedIntTy) 11002 .Default(QualType()); 11003 11004 if (!CastTy.isNull()) 11005 return std::make_pair(CastTy, Name); 11006 11007 TyTy = UserTy->desugar(); 11008 } 11009 11010 // Strip parens if necessary. 11011 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) 11012 return shouldNotPrintDirectly(Context, 11013 PE->getSubExpr()->getType(), 11014 PE->getSubExpr()); 11015 11016 // If this is a conditional expression, then its result type is constructed 11017 // via usual arithmetic conversions and thus there might be no necessary 11018 // typedef sugar there. Recurse to operands to check for NSInteger & 11019 // Co. usage condition. 11020 if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) { 11021 QualType TrueTy, FalseTy; 11022 StringRef TrueName, FalseName; 11023 11024 std::tie(TrueTy, TrueName) = 11025 shouldNotPrintDirectly(Context, 11026 CO->getTrueExpr()->getType(), 11027 CO->getTrueExpr()); 11028 std::tie(FalseTy, FalseName) = 11029 shouldNotPrintDirectly(Context, 11030 CO->getFalseExpr()->getType(), 11031 CO->getFalseExpr()); 11032 11033 if (TrueTy == FalseTy) 11034 return std::make_pair(TrueTy, TrueName); 11035 else if (TrueTy.isNull()) 11036 return std::make_pair(FalseTy, FalseName); 11037 else if (FalseTy.isNull()) 11038 return std::make_pair(TrueTy, TrueName); 11039 } 11040 11041 return std::make_pair(QualType(), StringRef()); 11042 } 11043 11044 /// Return true if \p ICE is an implicit argument promotion of an arithmetic 11045 /// type. Bit-field 'promotions' from a higher ranked type to a lower ranked 11046 /// type do not count. 11047 static bool 11048 isArithmeticArgumentPromotion(Sema &S, const ImplicitCastExpr *ICE) { 11049 QualType From = ICE->getSubExpr()->getType(); 11050 QualType To = ICE->getType(); 11051 // It's an integer promotion if the destination type is the promoted 11052 // source type. 11053 if (ICE->getCastKind() == CK_IntegralCast && 11054 S.Context.isPromotableIntegerType(From) && 11055 S.Context.getPromotedIntegerType(From) == To) 11056 return true; 11057 // Look through vector types, since we do default argument promotion for 11058 // those in OpenCL. 11059 if (const auto *VecTy = From->getAs<ExtVectorType>()) 11060 From = VecTy->getElementType(); 11061 if (const auto *VecTy = To->getAs<ExtVectorType>()) 11062 To = VecTy->getElementType(); 11063 // It's a floating promotion if the source type is a lower rank. 11064 return ICE->getCastKind() == CK_FloatingCast && 11065 S.Context.getFloatingTypeOrder(From, To) < 0; 11066 } 11067 11068 bool 11069 CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 11070 const char *StartSpecifier, 11071 unsigned SpecifierLen, 11072 const Expr *E) { 11073 using namespace analyze_format_string; 11074 using namespace analyze_printf; 11075 11076 // Now type check the data expression that matches the 11077 // format specifier. 11078 const analyze_printf::ArgType &AT = FS.getArgType(S.Context, isObjCContext()); 11079 if (!AT.isValid()) 11080 return true; 11081 11082 QualType ExprTy = E->getType(); 11083 while (const TypeOfExprType *TET = dyn_cast<TypeOfExprType>(ExprTy)) { 11084 ExprTy = TET->getUnderlyingExpr()->getType(); 11085 } 11086 11087 // When using the format attribute in C++, you can receive a function or an 11088 // array that will necessarily decay to a pointer when passed to the final 11089 // format consumer. Apply decay before type comparison. 11090 if (ExprTy->canDecayToPointerType()) 11091 ExprTy = S.Context.getDecayedType(ExprTy); 11092 11093 // Diagnose attempts to print a boolean value as a character. Unlike other 11094 // -Wformat diagnostics, this is fine from a type perspective, but it still 11095 // doesn't make sense. 11096 if (FS.getConversionSpecifier().getKind() == ConversionSpecifier::cArg && 11097 E->isKnownToHaveBooleanValue()) { 11098 const CharSourceRange &CSR = 11099 getSpecifierRange(StartSpecifier, SpecifierLen); 11100 SmallString<4> FSString; 11101 llvm::raw_svector_ostream os(FSString); 11102 FS.toString(os); 11103 EmitFormatDiagnostic(S.PDiag(diag::warn_format_bool_as_character) 11104 << FSString, 11105 E->getExprLoc(), false, CSR); 11106 return true; 11107 } 11108 11109 ArgType::MatchKind ImplicitMatch = ArgType::NoMatch; 11110 ArgType::MatchKind Match = AT.matchesType(S.Context, ExprTy); 11111 if (Match == ArgType::Match) 11112 return true; 11113 11114 // NoMatchPromotionTypeConfusion should be only returned in ImplictCastExpr 11115 assert(Match != ArgType::NoMatchPromotionTypeConfusion); 11116 11117 // Look through argument promotions for our error message's reported type. 11118 // This includes the integral and floating promotions, but excludes array 11119 // and function pointer decay (seeing that an argument intended to be a 11120 // string has type 'char [6]' is probably more confusing than 'char *') and 11121 // certain bitfield promotions (bitfields can be 'demoted' to a lesser type). 11122 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 11123 if (isArithmeticArgumentPromotion(S, ICE)) { 11124 E = ICE->getSubExpr(); 11125 ExprTy = E->getType(); 11126 11127 // Check if we didn't match because of an implicit cast from a 'char' 11128 // or 'short' to an 'int'. This is done because printf is a varargs 11129 // function. 11130 if (ICE->getType() == S.Context.IntTy || 11131 ICE->getType() == S.Context.UnsignedIntTy) { 11132 // All further checking is done on the subexpression 11133 ImplicitMatch = AT.matchesType(S.Context, ExprTy); 11134 if (ImplicitMatch == ArgType::Match) 11135 return true; 11136 } 11137 } 11138 } else if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) { 11139 // Special case for 'a', which has type 'int' in C. 11140 // Note, however, that we do /not/ want to treat multibyte constants like 11141 // 'MooV' as characters! This form is deprecated but still exists. In 11142 // addition, don't treat expressions as of type 'char' if one byte length 11143 // modifier is provided. 11144 if (ExprTy == S.Context.IntTy && 11145 FS.getLengthModifier().getKind() != LengthModifier::AsChar) 11146 if (llvm::isUIntN(S.Context.getCharWidth(), CL->getValue())) { 11147 ExprTy = S.Context.CharTy; 11148 // To improve check results, we consider a character literal in C 11149 // to be a 'char' rather than an 'int'. 'printf("%hd", 'a');' is 11150 // more likely a type confusion situation, so we will suggest to 11151 // use '%hhd' instead by discarding the MatchPromotion. 11152 if (Match == ArgType::MatchPromotion) 11153 Match = ArgType::NoMatch; 11154 } 11155 } 11156 if (Match == ArgType::MatchPromotion) { 11157 // WG14 N2562 only clarified promotions in *printf 11158 // For NSLog in ObjC, just preserve -Wformat behavior 11159 if (!S.getLangOpts().ObjC && 11160 ImplicitMatch != ArgType::NoMatchPromotionTypeConfusion && 11161 ImplicitMatch != ArgType::NoMatchTypeConfusion) 11162 return true; 11163 Match = ArgType::NoMatch; 11164 } 11165 if (ImplicitMatch == ArgType::NoMatchPedantic || 11166 ImplicitMatch == ArgType::NoMatchTypeConfusion) 11167 Match = ImplicitMatch; 11168 assert(Match != ArgType::MatchPromotion); 11169 11170 // Look through unscoped enums to their underlying type. 11171 bool IsEnum = false; 11172 bool IsScopedEnum = false; 11173 QualType IntendedTy = ExprTy; 11174 if (auto EnumTy = ExprTy->getAs<EnumType>()) { 11175 IntendedTy = EnumTy->getDecl()->getIntegerType(); 11176 if (EnumTy->isUnscopedEnumerationType()) { 11177 ExprTy = IntendedTy; 11178 // This controls whether we're talking about the underlying type or not, 11179 // which we only want to do when it's an unscoped enum. 11180 IsEnum = true; 11181 } else { 11182 IsScopedEnum = true; 11183 } 11184 } 11185 11186 // %C in an Objective-C context prints a unichar, not a wchar_t. 11187 // If the argument is an integer of some kind, believe the %C and suggest 11188 // a cast instead of changing the conversion specifier. 11189 if (isObjCContext() && 11190 FS.getConversionSpecifier().getKind() == ConversionSpecifier::CArg) { 11191 if (ExprTy->isIntegralOrUnscopedEnumerationType() && 11192 !ExprTy->isCharType()) { 11193 // 'unichar' is defined as a typedef of unsigned short, but we should 11194 // prefer using the typedef if it is visible. 11195 IntendedTy = S.Context.UnsignedShortTy; 11196 11197 // While we are here, check if the value is an IntegerLiteral that happens 11198 // to be within the valid range. 11199 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) { 11200 const llvm::APInt &V = IL->getValue(); 11201 if (V.getActiveBits() <= S.Context.getTypeSize(IntendedTy)) 11202 return true; 11203 } 11204 11205 LookupResult Result(S, &S.Context.Idents.get("unichar"), E->getBeginLoc(), 11206 Sema::LookupOrdinaryName); 11207 if (S.LookupName(Result, S.getCurScope())) { 11208 NamedDecl *ND = Result.getFoundDecl(); 11209 if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(ND)) 11210 if (TD->getUnderlyingType() == IntendedTy) 11211 IntendedTy = S.Context.getTypedefType(TD); 11212 } 11213 } 11214 } 11215 11216 // Special-case some of Darwin's platform-independence types by suggesting 11217 // casts to primitive types that are known to be large enough. 11218 bool ShouldNotPrintDirectly = false; StringRef CastTyName; 11219 if (S.Context.getTargetInfo().getTriple().isOSDarwin()) { 11220 QualType CastTy; 11221 std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E); 11222 if (!CastTy.isNull()) { 11223 // %zi/%zu and %td/%tu are OK to use for NSInteger/NSUInteger of type int 11224 // (long in ASTContext). Only complain to pedants or when they're the 11225 // underlying type of a scoped enum (which always needs a cast). 11226 if (!IsScopedEnum && 11227 (CastTyName == "NSInteger" || CastTyName == "NSUInteger") && 11228 (AT.isSizeT() || AT.isPtrdiffT()) && 11229 AT.matchesType(S.Context, CastTy)) 11230 Match = ArgType::NoMatchPedantic; 11231 IntendedTy = CastTy; 11232 ShouldNotPrintDirectly = true; 11233 } 11234 } 11235 11236 // We may be able to offer a FixItHint if it is a supported type. 11237 PrintfSpecifier fixedFS = FS; 11238 bool Success = 11239 fixedFS.fixType(IntendedTy, S.getLangOpts(), S.Context, isObjCContext()); 11240 11241 if (Success) { 11242 // Get the fix string from the fixed format specifier 11243 SmallString<16> buf; 11244 llvm::raw_svector_ostream os(buf); 11245 fixedFS.toString(os); 11246 11247 CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen); 11248 11249 if (IntendedTy == ExprTy && !ShouldNotPrintDirectly && !IsScopedEnum) { 11250 unsigned Diag; 11251 switch (Match) { 11252 case ArgType::Match: 11253 case ArgType::MatchPromotion: 11254 case ArgType::NoMatchPromotionTypeConfusion: 11255 llvm_unreachable("expected non-matching"); 11256 case ArgType::NoMatchPedantic: 11257 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 11258 break; 11259 case ArgType::NoMatchTypeConfusion: 11260 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 11261 break; 11262 case ArgType::NoMatch: 11263 Diag = diag::warn_format_conversion_argument_type_mismatch; 11264 break; 11265 } 11266 11267 // In this case, the specifier is wrong and should be changed to match 11268 // the argument. 11269 EmitFormatDiagnostic(S.PDiag(Diag) 11270 << AT.getRepresentativeTypeName(S.Context) 11271 << IntendedTy << IsEnum << E->getSourceRange(), 11272 E->getBeginLoc(), 11273 /*IsStringLocation*/ false, SpecRange, 11274 FixItHint::CreateReplacement(SpecRange, os.str())); 11275 } else { 11276 // The canonical type for formatting this value is different from the 11277 // actual type of the expression. (This occurs, for example, with Darwin's 11278 // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but 11279 // should be printed as 'long' for 64-bit compatibility.) 11280 // Rather than emitting a normal format/argument mismatch, we want to 11281 // add a cast to the recommended type (and correct the format string 11282 // if necessary). We should also do so for scoped enumerations. 11283 SmallString<16> CastBuf; 11284 llvm::raw_svector_ostream CastFix(CastBuf); 11285 CastFix << (S.LangOpts.CPlusPlus ? "static_cast<" : "("); 11286 IntendedTy.print(CastFix, S.Context.getPrintingPolicy()); 11287 CastFix << (S.LangOpts.CPlusPlus ? ">" : ")"); 11288 11289 SmallVector<FixItHint,4> Hints; 11290 if (AT.matchesType(S.Context, IntendedTy) != ArgType::Match || 11291 ShouldNotPrintDirectly) 11292 Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str())); 11293 11294 if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) { 11295 // If there's already a cast present, just replace it. 11296 SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc()); 11297 Hints.push_back(FixItHint::CreateReplacement(CastRange, CastFix.str())); 11298 11299 } else if (!requiresParensToAddCast(E) && !S.LangOpts.CPlusPlus) { 11300 // If the expression has high enough precedence, 11301 // just write the C-style cast. 11302 Hints.push_back( 11303 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 11304 } else { 11305 // Otherwise, add parens around the expression as well as the cast. 11306 CastFix << "("; 11307 Hints.push_back( 11308 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 11309 11310 // We don't use getLocForEndOfToken because it returns invalid source 11311 // locations for macro expansions (by design). 11312 SourceLocation EndLoc = S.SourceMgr.getSpellingLoc(E->getEndLoc()); 11313 SourceLocation After = EndLoc.getLocWithOffset( 11314 Lexer::MeasureTokenLength(EndLoc, S.SourceMgr, S.LangOpts)); 11315 Hints.push_back(FixItHint::CreateInsertion(After, ")")); 11316 } 11317 11318 if (ShouldNotPrintDirectly && !IsScopedEnum) { 11319 // The expression has a type that should not be printed directly. 11320 // We extract the name from the typedef because we don't want to show 11321 // the underlying type in the diagnostic. 11322 StringRef Name; 11323 if (const auto *TypedefTy = ExprTy->getAs<TypedefType>()) 11324 Name = TypedefTy->getDecl()->getName(); 11325 else 11326 Name = CastTyName; 11327 unsigned Diag = Match == ArgType::NoMatchPedantic 11328 ? diag::warn_format_argument_needs_cast_pedantic 11329 : diag::warn_format_argument_needs_cast; 11330 EmitFormatDiagnostic(S.PDiag(Diag) << Name << IntendedTy << IsEnum 11331 << E->getSourceRange(), 11332 E->getBeginLoc(), /*IsStringLocation=*/false, 11333 SpecRange, Hints); 11334 } else { 11335 // In this case, the expression could be printed using a different 11336 // specifier, but we've decided that the specifier is probably correct 11337 // and we should cast instead. Just use the normal warning message. 11338 EmitFormatDiagnostic( 11339 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 11340 << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum 11341 << E->getSourceRange(), 11342 E->getBeginLoc(), /*IsStringLocation*/ false, SpecRange, Hints); 11343 } 11344 } 11345 } else { 11346 const CharSourceRange &CSR = getSpecifierRange(StartSpecifier, 11347 SpecifierLen); 11348 // Since the warning for passing non-POD types to variadic functions 11349 // was deferred until now, we emit a warning for non-POD 11350 // arguments here. 11351 bool EmitTypeMismatch = false; 11352 switch (S.isValidVarArgType(ExprTy)) { 11353 case Sema::VAK_Valid: 11354 case Sema::VAK_ValidInCXX11: { 11355 unsigned Diag; 11356 switch (Match) { 11357 case ArgType::Match: 11358 case ArgType::MatchPromotion: 11359 case ArgType::NoMatchPromotionTypeConfusion: 11360 llvm_unreachable("expected non-matching"); 11361 case ArgType::NoMatchPedantic: 11362 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 11363 break; 11364 case ArgType::NoMatchTypeConfusion: 11365 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 11366 break; 11367 case ArgType::NoMatch: 11368 Diag = diag::warn_format_conversion_argument_type_mismatch; 11369 break; 11370 } 11371 11372 EmitFormatDiagnostic( 11373 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy 11374 << IsEnum << CSR << E->getSourceRange(), 11375 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 11376 break; 11377 } 11378 case Sema::VAK_Undefined: 11379 case Sema::VAK_MSVCUndefined: 11380 if (CallType == Sema::VariadicDoesNotApply) { 11381 EmitTypeMismatch = true; 11382 } else { 11383 EmitFormatDiagnostic( 11384 S.PDiag(diag::warn_non_pod_vararg_with_format_string) 11385 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType 11386 << AT.getRepresentativeTypeName(S.Context) << CSR 11387 << E->getSourceRange(), 11388 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 11389 checkForCStrMembers(AT, E); 11390 } 11391 break; 11392 11393 case Sema::VAK_Invalid: 11394 if (CallType == Sema::VariadicDoesNotApply) 11395 EmitTypeMismatch = true; 11396 else if (ExprTy->isObjCObjectType()) 11397 EmitFormatDiagnostic( 11398 S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format) 11399 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType 11400 << AT.getRepresentativeTypeName(S.Context) << CSR 11401 << E->getSourceRange(), 11402 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 11403 else 11404 // FIXME: If this is an initializer list, suggest removing the braces 11405 // or inserting a cast to the target type. 11406 S.Diag(E->getBeginLoc(), diag::err_cannot_pass_to_vararg_format) 11407 << isa<InitListExpr>(E) << ExprTy << CallType 11408 << AT.getRepresentativeTypeName(S.Context) << E->getSourceRange(); 11409 break; 11410 } 11411 11412 if (EmitTypeMismatch) { 11413 // The function is not variadic, so we do not generate warnings about 11414 // being allowed to pass that object as a variadic argument. Instead, 11415 // since there are inherently no printf specifiers for types which cannot 11416 // be passed as variadic arguments, emit a plain old specifier mismatch 11417 // argument. 11418 EmitFormatDiagnostic( 11419 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 11420 << AT.getRepresentativeTypeName(S.Context) << ExprTy << false 11421 << E->getSourceRange(), 11422 E->getBeginLoc(), false, CSR); 11423 } 11424 11425 assert(FirstDataArg + FS.getArgIndex() < CheckedVarArgs.size() && 11426 "format string specifier index out of range"); 11427 CheckedVarArgs[FirstDataArg + FS.getArgIndex()] = true; 11428 } 11429 11430 return true; 11431 } 11432 11433 //===--- CHECK: Scanf format string checking ------------------------------===// 11434 11435 namespace { 11436 11437 class CheckScanfHandler : public CheckFormatHandler { 11438 public: 11439 CheckScanfHandler(Sema &s, const FormatStringLiteral *fexpr, 11440 const Expr *origFormatExpr, Sema::FormatStringType type, 11441 unsigned firstDataArg, unsigned numDataArgs, 11442 const char *beg, Sema::FormatArgumentPassingKind APK, 11443 ArrayRef<const Expr *> Args, unsigned formatIdx, 11444 bool inFunctionCall, Sema::VariadicCallType CallType, 11445 llvm::SmallBitVector &CheckedVarArgs, 11446 UncoveredArgHandler &UncoveredArg) 11447 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 11448 numDataArgs, beg, APK, Args, formatIdx, 11449 inFunctionCall, CallType, CheckedVarArgs, 11450 UncoveredArg) {} 11451 11452 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 11453 const char *startSpecifier, 11454 unsigned specifierLen) override; 11455 11456 bool HandleInvalidScanfConversionSpecifier( 11457 const analyze_scanf::ScanfSpecifier &FS, 11458 const char *startSpecifier, 11459 unsigned specifierLen) override; 11460 11461 void HandleIncompleteScanList(const char *start, const char *end) override; 11462 }; 11463 11464 } // namespace 11465 11466 void CheckScanfHandler::HandleIncompleteScanList(const char *start, 11467 const char *end) { 11468 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete), 11469 getLocationOfByte(end), /*IsStringLocation*/true, 11470 getSpecifierRange(start, end - start)); 11471 } 11472 11473 bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier( 11474 const analyze_scanf::ScanfSpecifier &FS, 11475 const char *startSpecifier, 11476 unsigned specifierLen) { 11477 const analyze_scanf::ScanfConversionSpecifier &CS = 11478 FS.getConversionSpecifier(); 11479 11480 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 11481 getLocationOfByte(CS.getStart()), 11482 startSpecifier, specifierLen, 11483 CS.getStart(), CS.getLength()); 11484 } 11485 11486 bool CheckScanfHandler::HandleScanfSpecifier( 11487 const analyze_scanf::ScanfSpecifier &FS, 11488 const char *startSpecifier, 11489 unsigned specifierLen) { 11490 using namespace analyze_scanf; 11491 using namespace analyze_format_string; 11492 11493 const ScanfConversionSpecifier &CS = FS.getConversionSpecifier(); 11494 11495 // Handle case where '%' and '*' don't consume an argument. These shouldn't 11496 // be used to decide if we are using positional arguments consistently. 11497 if (FS.consumesDataArgument()) { 11498 if (atFirstArg) { 11499 atFirstArg = false; 11500 usesPositionalArgs = FS.usesPositionalArg(); 11501 } 11502 else if (usesPositionalArgs != FS.usesPositionalArg()) { 11503 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 11504 startSpecifier, specifierLen); 11505 return false; 11506 } 11507 } 11508 11509 // Check if the field with is non-zero. 11510 const OptionalAmount &Amt = FS.getFieldWidth(); 11511 if (Amt.getHowSpecified() == OptionalAmount::Constant) { 11512 if (Amt.getConstantAmount() == 0) { 11513 const CharSourceRange &R = getSpecifierRange(Amt.getStart(), 11514 Amt.getConstantLength()); 11515 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width), 11516 getLocationOfByte(Amt.getStart()), 11517 /*IsStringLocation*/true, R, 11518 FixItHint::CreateRemoval(R)); 11519 } 11520 } 11521 11522 if (!FS.consumesDataArgument()) { 11523 // FIXME: Technically specifying a precision or field width here 11524 // makes no sense. Worth issuing a warning at some point. 11525 return true; 11526 } 11527 11528 // Consume the argument. 11529 unsigned argIndex = FS.getArgIndex(); 11530 if (argIndex < NumDataArgs) { 11531 // The check to see if the argIndex is valid will come later. 11532 // We set the bit here because we may exit early from this 11533 // function if we encounter some other error. 11534 CoveredArgs.set(argIndex); 11535 } 11536 11537 // Check the length modifier is valid with the given conversion specifier. 11538 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 11539 S.getLangOpts())) 11540 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 11541 diag::warn_format_nonsensical_length); 11542 else if (!FS.hasStandardLengthModifier()) 11543 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 11544 else if (!FS.hasStandardLengthConversionCombination()) 11545 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 11546 diag::warn_format_non_standard_conversion_spec); 11547 11548 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 11549 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 11550 11551 // The remaining checks depend on the data arguments. 11552 if (ArgPassingKind == Sema::FAPK_VAList) 11553 return true; 11554 11555 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 11556 return false; 11557 11558 // Check that the argument type matches the format specifier. 11559 const Expr *Ex = getDataArg(argIndex); 11560 if (!Ex) 11561 return true; 11562 11563 const analyze_format_string::ArgType &AT = FS.getArgType(S.Context); 11564 11565 if (!AT.isValid()) { 11566 return true; 11567 } 11568 11569 analyze_format_string::ArgType::MatchKind Match = 11570 AT.matchesType(S.Context, Ex->getType()); 11571 bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic; 11572 if (Match == analyze_format_string::ArgType::Match) 11573 return true; 11574 11575 ScanfSpecifier fixedFS = FS; 11576 bool Success = fixedFS.fixType(Ex->getType(), Ex->IgnoreImpCasts()->getType(), 11577 S.getLangOpts(), S.Context); 11578 11579 unsigned Diag = 11580 Pedantic ? diag::warn_format_conversion_argument_type_mismatch_pedantic 11581 : diag::warn_format_conversion_argument_type_mismatch; 11582 11583 if (Success) { 11584 // Get the fix string from the fixed format specifier. 11585 SmallString<128> buf; 11586 llvm::raw_svector_ostream os(buf); 11587 fixedFS.toString(os); 11588 11589 EmitFormatDiagnostic( 11590 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) 11591 << Ex->getType() << false << Ex->getSourceRange(), 11592 Ex->getBeginLoc(), 11593 /*IsStringLocation*/ false, 11594 getSpecifierRange(startSpecifier, specifierLen), 11595 FixItHint::CreateReplacement( 11596 getSpecifierRange(startSpecifier, specifierLen), os.str())); 11597 } else { 11598 EmitFormatDiagnostic(S.PDiag(Diag) 11599 << AT.getRepresentativeTypeName(S.Context) 11600 << Ex->getType() << false << Ex->getSourceRange(), 11601 Ex->getBeginLoc(), 11602 /*IsStringLocation*/ false, 11603 getSpecifierRange(startSpecifier, specifierLen)); 11604 } 11605 11606 return true; 11607 } 11608 11609 static void CheckFormatString( 11610 Sema &S, const FormatStringLiteral *FExpr, const Expr *OrigFormatExpr, 11611 ArrayRef<const Expr *> Args, Sema::FormatArgumentPassingKind APK, 11612 unsigned format_idx, unsigned firstDataArg, Sema::FormatStringType Type, 11613 bool inFunctionCall, Sema::VariadicCallType CallType, 11614 llvm::SmallBitVector &CheckedVarArgs, UncoveredArgHandler &UncoveredArg, 11615 bool IgnoreStringsWithoutSpecifiers) { 11616 // CHECK: is the format string a wide literal? 11617 if (!FExpr->isAscii() && !FExpr->isUTF8()) { 11618 CheckFormatHandler::EmitFormatDiagnostic( 11619 S, inFunctionCall, Args[format_idx], 11620 S.PDiag(diag::warn_format_string_is_wide_literal), FExpr->getBeginLoc(), 11621 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 11622 return; 11623 } 11624 11625 // Str - The format string. NOTE: this is NOT null-terminated! 11626 StringRef StrRef = FExpr->getString(); 11627 const char *Str = StrRef.data(); 11628 // Account for cases where the string literal is truncated in a declaration. 11629 const ConstantArrayType *T = 11630 S.Context.getAsConstantArrayType(FExpr->getType()); 11631 assert(T && "String literal not of constant array type!"); 11632 size_t TypeSize = T->getSize().getZExtValue(); 11633 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 11634 const unsigned numDataArgs = Args.size() - firstDataArg; 11635 11636 if (IgnoreStringsWithoutSpecifiers && 11637 !analyze_format_string::parseFormatStringHasFormattingSpecifiers( 11638 Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo())) 11639 return; 11640 11641 // Emit a warning if the string literal is truncated and does not contain an 11642 // embedded null character. 11643 if (TypeSize <= StrRef.size() && !StrRef.substr(0, TypeSize).contains('\0')) { 11644 CheckFormatHandler::EmitFormatDiagnostic( 11645 S, inFunctionCall, Args[format_idx], 11646 S.PDiag(diag::warn_printf_format_string_not_null_terminated), 11647 FExpr->getBeginLoc(), 11648 /*IsStringLocation=*/true, OrigFormatExpr->getSourceRange()); 11649 return; 11650 } 11651 11652 // CHECK: empty format string? 11653 if (StrLen == 0 && numDataArgs > 0) { 11654 CheckFormatHandler::EmitFormatDiagnostic( 11655 S, inFunctionCall, Args[format_idx], 11656 S.PDiag(diag::warn_empty_format_string), FExpr->getBeginLoc(), 11657 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 11658 return; 11659 } 11660 11661 if (Type == Sema::FST_Printf || Type == Sema::FST_NSString || 11662 Type == Sema::FST_FreeBSDKPrintf || Type == Sema::FST_OSLog || 11663 Type == Sema::FST_OSTrace) { 11664 CheckPrintfHandler H( 11665 S, FExpr, OrigFormatExpr, Type, firstDataArg, numDataArgs, 11666 (Type == Sema::FST_NSString || Type == Sema::FST_OSTrace), Str, APK, 11667 Args, format_idx, inFunctionCall, CallType, CheckedVarArgs, 11668 UncoveredArg); 11669 11670 if (!analyze_format_string::ParsePrintfString( 11671 H, Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo(), 11672 Type == Sema::FST_FreeBSDKPrintf)) 11673 H.DoneProcessing(); 11674 } else if (Type == Sema::FST_Scanf) { 11675 CheckScanfHandler H(S, FExpr, OrigFormatExpr, Type, firstDataArg, 11676 numDataArgs, Str, APK, Args, format_idx, inFunctionCall, 11677 CallType, CheckedVarArgs, UncoveredArg); 11678 11679 if (!analyze_format_string::ParseScanfString( 11680 H, Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo())) 11681 H.DoneProcessing(); 11682 } // TODO: handle other formats 11683 } 11684 11685 bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) { 11686 // Str - The format string. NOTE: this is NOT null-terminated! 11687 StringRef StrRef = FExpr->getString(); 11688 const char *Str = StrRef.data(); 11689 // Account for cases where the string literal is truncated in a declaration. 11690 const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType()); 11691 assert(T && "String literal not of constant array type!"); 11692 size_t TypeSize = T->getSize().getZExtValue(); 11693 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 11694 return analyze_format_string::ParseFormatStringHasSArg(Str, Str + StrLen, 11695 getLangOpts(), 11696 Context.getTargetInfo()); 11697 } 11698 11699 //===--- CHECK: Warn on use of wrong absolute value function. -------------===// 11700 11701 // Returns the related absolute value function that is larger, of 0 if one 11702 // does not exist. 11703 static unsigned getLargerAbsoluteValueFunction(unsigned AbsFunction) { 11704 switch (AbsFunction) { 11705 default: 11706 return 0; 11707 11708 case Builtin::BI__builtin_abs: 11709 return Builtin::BI__builtin_labs; 11710 case Builtin::BI__builtin_labs: 11711 return Builtin::BI__builtin_llabs; 11712 case Builtin::BI__builtin_llabs: 11713 return 0; 11714 11715 case Builtin::BI__builtin_fabsf: 11716 return Builtin::BI__builtin_fabs; 11717 case Builtin::BI__builtin_fabs: 11718 return Builtin::BI__builtin_fabsl; 11719 case Builtin::BI__builtin_fabsl: 11720 return 0; 11721 11722 case Builtin::BI__builtin_cabsf: 11723 return Builtin::BI__builtin_cabs; 11724 case Builtin::BI__builtin_cabs: 11725 return Builtin::BI__builtin_cabsl; 11726 case Builtin::BI__builtin_cabsl: 11727 return 0; 11728 11729 case Builtin::BIabs: 11730 return Builtin::BIlabs; 11731 case Builtin::BIlabs: 11732 return Builtin::BIllabs; 11733 case Builtin::BIllabs: 11734 return 0; 11735 11736 case Builtin::BIfabsf: 11737 return Builtin::BIfabs; 11738 case Builtin::BIfabs: 11739 return Builtin::BIfabsl; 11740 case Builtin::BIfabsl: 11741 return 0; 11742 11743 case Builtin::BIcabsf: 11744 return Builtin::BIcabs; 11745 case Builtin::BIcabs: 11746 return Builtin::BIcabsl; 11747 case Builtin::BIcabsl: 11748 return 0; 11749 } 11750 } 11751 11752 // Returns the argument type of the absolute value function. 11753 static QualType getAbsoluteValueArgumentType(ASTContext &Context, 11754 unsigned AbsType) { 11755 if (AbsType == 0) 11756 return QualType(); 11757 11758 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 11759 QualType BuiltinType = Context.GetBuiltinType(AbsType, Error); 11760 if (Error != ASTContext::GE_None) 11761 return QualType(); 11762 11763 const FunctionProtoType *FT = BuiltinType->getAs<FunctionProtoType>(); 11764 if (!FT) 11765 return QualType(); 11766 11767 if (FT->getNumParams() != 1) 11768 return QualType(); 11769 11770 return FT->getParamType(0); 11771 } 11772 11773 // Returns the best absolute value function, or zero, based on type and 11774 // current absolute value function. 11775 static unsigned getBestAbsFunction(ASTContext &Context, QualType ArgType, 11776 unsigned AbsFunctionKind) { 11777 unsigned BestKind = 0; 11778 uint64_t ArgSize = Context.getTypeSize(ArgType); 11779 for (unsigned Kind = AbsFunctionKind; Kind != 0; 11780 Kind = getLargerAbsoluteValueFunction(Kind)) { 11781 QualType ParamType = getAbsoluteValueArgumentType(Context, Kind); 11782 if (Context.getTypeSize(ParamType) >= ArgSize) { 11783 if (BestKind == 0) 11784 BestKind = Kind; 11785 else if (Context.hasSameType(ParamType, ArgType)) { 11786 BestKind = Kind; 11787 break; 11788 } 11789 } 11790 } 11791 return BestKind; 11792 } 11793 11794 enum AbsoluteValueKind { 11795 AVK_Integer, 11796 AVK_Floating, 11797 AVK_Complex 11798 }; 11799 11800 static AbsoluteValueKind getAbsoluteValueKind(QualType T) { 11801 if (T->isIntegralOrEnumerationType()) 11802 return AVK_Integer; 11803 if (T->isRealFloatingType()) 11804 return AVK_Floating; 11805 if (T->isAnyComplexType()) 11806 return AVK_Complex; 11807 11808 llvm_unreachable("Type not integer, floating, or complex"); 11809 } 11810 11811 // Changes the absolute value function to a different type. Preserves whether 11812 // the function is a builtin. 11813 static unsigned changeAbsFunction(unsigned AbsKind, 11814 AbsoluteValueKind ValueKind) { 11815 switch (ValueKind) { 11816 case AVK_Integer: 11817 switch (AbsKind) { 11818 default: 11819 return 0; 11820 case Builtin::BI__builtin_fabsf: 11821 case Builtin::BI__builtin_fabs: 11822 case Builtin::BI__builtin_fabsl: 11823 case Builtin::BI__builtin_cabsf: 11824 case Builtin::BI__builtin_cabs: 11825 case Builtin::BI__builtin_cabsl: 11826 return Builtin::BI__builtin_abs; 11827 case Builtin::BIfabsf: 11828 case Builtin::BIfabs: 11829 case Builtin::BIfabsl: 11830 case Builtin::BIcabsf: 11831 case Builtin::BIcabs: 11832 case Builtin::BIcabsl: 11833 return Builtin::BIabs; 11834 } 11835 case AVK_Floating: 11836 switch (AbsKind) { 11837 default: 11838 return 0; 11839 case Builtin::BI__builtin_abs: 11840 case Builtin::BI__builtin_labs: 11841 case Builtin::BI__builtin_llabs: 11842 case Builtin::BI__builtin_cabsf: 11843 case Builtin::BI__builtin_cabs: 11844 case Builtin::BI__builtin_cabsl: 11845 return Builtin::BI__builtin_fabsf; 11846 case Builtin::BIabs: 11847 case Builtin::BIlabs: 11848 case Builtin::BIllabs: 11849 case Builtin::BIcabsf: 11850 case Builtin::BIcabs: 11851 case Builtin::BIcabsl: 11852 return Builtin::BIfabsf; 11853 } 11854 case AVK_Complex: 11855 switch (AbsKind) { 11856 default: 11857 return 0; 11858 case Builtin::BI__builtin_abs: 11859 case Builtin::BI__builtin_labs: 11860 case Builtin::BI__builtin_llabs: 11861 case Builtin::BI__builtin_fabsf: 11862 case Builtin::BI__builtin_fabs: 11863 case Builtin::BI__builtin_fabsl: 11864 return Builtin::BI__builtin_cabsf; 11865 case Builtin::BIabs: 11866 case Builtin::BIlabs: 11867 case Builtin::BIllabs: 11868 case Builtin::BIfabsf: 11869 case Builtin::BIfabs: 11870 case Builtin::BIfabsl: 11871 return Builtin::BIcabsf; 11872 } 11873 } 11874 llvm_unreachable("Unable to convert function"); 11875 } 11876 11877 static unsigned getAbsoluteValueFunctionKind(const FunctionDecl *FDecl) { 11878 const IdentifierInfo *FnInfo = FDecl->getIdentifier(); 11879 if (!FnInfo) 11880 return 0; 11881 11882 switch (FDecl->getBuiltinID()) { 11883 default: 11884 return 0; 11885 case Builtin::BI__builtin_abs: 11886 case Builtin::BI__builtin_fabs: 11887 case Builtin::BI__builtin_fabsf: 11888 case Builtin::BI__builtin_fabsl: 11889 case Builtin::BI__builtin_labs: 11890 case Builtin::BI__builtin_llabs: 11891 case Builtin::BI__builtin_cabs: 11892 case Builtin::BI__builtin_cabsf: 11893 case Builtin::BI__builtin_cabsl: 11894 case Builtin::BIabs: 11895 case Builtin::BIlabs: 11896 case Builtin::BIllabs: 11897 case Builtin::BIfabs: 11898 case Builtin::BIfabsf: 11899 case Builtin::BIfabsl: 11900 case Builtin::BIcabs: 11901 case Builtin::BIcabsf: 11902 case Builtin::BIcabsl: 11903 return FDecl->getBuiltinID(); 11904 } 11905 llvm_unreachable("Unknown Builtin type"); 11906 } 11907 11908 // If the replacement is valid, emit a note with replacement function. 11909 // Additionally, suggest including the proper header if not already included. 11910 static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range, 11911 unsigned AbsKind, QualType ArgType) { 11912 bool EmitHeaderHint = true; 11913 const char *HeaderName = nullptr; 11914 StringRef FunctionName; 11915 if (S.getLangOpts().CPlusPlus && !ArgType->isAnyComplexType()) { 11916 FunctionName = "std::abs"; 11917 if (ArgType->isIntegralOrEnumerationType()) { 11918 HeaderName = "cstdlib"; 11919 } else if (ArgType->isRealFloatingType()) { 11920 HeaderName = "cmath"; 11921 } else { 11922 llvm_unreachable("Invalid Type"); 11923 } 11924 11925 // Lookup all std::abs 11926 if (NamespaceDecl *Std = S.getStdNamespace()) { 11927 LookupResult R(S, &S.Context.Idents.get("abs"), Loc, Sema::LookupAnyName); 11928 R.suppressDiagnostics(); 11929 S.LookupQualifiedName(R, Std); 11930 11931 for (const auto *I : R) { 11932 const FunctionDecl *FDecl = nullptr; 11933 if (const UsingShadowDecl *UsingD = dyn_cast<UsingShadowDecl>(I)) { 11934 FDecl = dyn_cast<FunctionDecl>(UsingD->getTargetDecl()); 11935 } else { 11936 FDecl = dyn_cast<FunctionDecl>(I); 11937 } 11938 if (!FDecl) 11939 continue; 11940 11941 // Found std::abs(), check that they are the right ones. 11942 if (FDecl->getNumParams() != 1) 11943 continue; 11944 11945 // Check that the parameter type can handle the argument. 11946 QualType ParamType = FDecl->getParamDecl(0)->getType(); 11947 if (getAbsoluteValueKind(ArgType) == getAbsoluteValueKind(ParamType) && 11948 S.Context.getTypeSize(ArgType) <= 11949 S.Context.getTypeSize(ParamType)) { 11950 // Found a function, don't need the header hint. 11951 EmitHeaderHint = false; 11952 break; 11953 } 11954 } 11955 } 11956 } else { 11957 FunctionName = S.Context.BuiltinInfo.getName(AbsKind); 11958 HeaderName = S.Context.BuiltinInfo.getHeaderName(AbsKind); 11959 11960 if (HeaderName) { 11961 DeclarationName DN(&S.Context.Idents.get(FunctionName)); 11962 LookupResult R(S, DN, Loc, Sema::LookupAnyName); 11963 R.suppressDiagnostics(); 11964 S.LookupName(R, S.getCurScope()); 11965 11966 if (R.isSingleResult()) { 11967 FunctionDecl *FD = dyn_cast<FunctionDecl>(R.getFoundDecl()); 11968 if (FD && FD->getBuiltinID() == AbsKind) { 11969 EmitHeaderHint = false; 11970 } else { 11971 return; 11972 } 11973 } else if (!R.empty()) { 11974 return; 11975 } 11976 } 11977 } 11978 11979 S.Diag(Loc, diag::note_replace_abs_function) 11980 << FunctionName << FixItHint::CreateReplacement(Range, FunctionName); 11981 11982 if (!HeaderName) 11983 return; 11984 11985 if (!EmitHeaderHint) 11986 return; 11987 11988 S.Diag(Loc, diag::note_include_header_or_declare) << HeaderName 11989 << FunctionName; 11990 } 11991 11992 template <std::size_t StrLen> 11993 static bool IsStdFunction(const FunctionDecl *FDecl, 11994 const char (&Str)[StrLen]) { 11995 if (!FDecl) 11996 return false; 11997 if (!FDecl->getIdentifier() || !FDecl->getIdentifier()->isStr(Str)) 11998 return false; 11999 if (!FDecl->isInStdNamespace()) 12000 return false; 12001 12002 return true; 12003 } 12004 12005 // Warn when using the wrong abs() function. 12006 void Sema::CheckAbsoluteValueFunction(const CallExpr *Call, 12007 const FunctionDecl *FDecl) { 12008 if (Call->getNumArgs() != 1) 12009 return; 12010 12011 unsigned AbsKind = getAbsoluteValueFunctionKind(FDecl); 12012 bool IsStdAbs = IsStdFunction(FDecl, "abs"); 12013 if (AbsKind == 0 && !IsStdAbs) 12014 return; 12015 12016 QualType ArgType = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 12017 QualType ParamType = Call->getArg(0)->getType(); 12018 12019 // Unsigned types cannot be negative. Suggest removing the absolute value 12020 // function call. 12021 if (ArgType->isUnsignedIntegerType()) { 12022 StringRef FunctionName = 12023 IsStdAbs ? "std::abs" : Context.BuiltinInfo.getName(AbsKind); 12024 Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType; 12025 Diag(Call->getExprLoc(), diag::note_remove_abs) 12026 << FunctionName 12027 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()); 12028 return; 12029 } 12030 12031 // Taking the absolute value of a pointer is very suspicious, they probably 12032 // wanted to index into an array, dereference a pointer, call a function, etc. 12033 if (ArgType->isPointerType() || ArgType->canDecayToPointerType()) { 12034 unsigned DiagType = 0; 12035 if (ArgType->isFunctionType()) 12036 DiagType = 1; 12037 else if (ArgType->isArrayType()) 12038 DiagType = 2; 12039 12040 Diag(Call->getExprLoc(), diag::warn_pointer_abs) << DiagType << ArgType; 12041 return; 12042 } 12043 12044 // std::abs has overloads which prevent most of the absolute value problems 12045 // from occurring. 12046 if (IsStdAbs) 12047 return; 12048 12049 AbsoluteValueKind ArgValueKind = getAbsoluteValueKind(ArgType); 12050 AbsoluteValueKind ParamValueKind = getAbsoluteValueKind(ParamType); 12051 12052 // The argument and parameter are the same kind. Check if they are the right 12053 // size. 12054 if (ArgValueKind == ParamValueKind) { 12055 if (Context.getTypeSize(ArgType) <= Context.getTypeSize(ParamType)) 12056 return; 12057 12058 unsigned NewAbsKind = getBestAbsFunction(Context, ArgType, AbsKind); 12059 Diag(Call->getExprLoc(), diag::warn_abs_too_small) 12060 << FDecl << ArgType << ParamType; 12061 12062 if (NewAbsKind == 0) 12063 return; 12064 12065 emitReplacement(*this, Call->getExprLoc(), 12066 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 12067 return; 12068 } 12069 12070 // ArgValueKind != ParamValueKind 12071 // The wrong type of absolute value function was used. Attempt to find the 12072 // proper one. 12073 unsigned NewAbsKind = changeAbsFunction(AbsKind, ArgValueKind); 12074 NewAbsKind = getBestAbsFunction(Context, ArgType, NewAbsKind); 12075 if (NewAbsKind == 0) 12076 return; 12077 12078 Diag(Call->getExprLoc(), diag::warn_wrong_absolute_value_type) 12079 << FDecl << ParamValueKind << ArgValueKind; 12080 12081 emitReplacement(*this, Call->getExprLoc(), 12082 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 12083 } 12084 12085 //===--- CHECK: Warn on use of std::max and unsigned zero. r---------------===// 12086 void Sema::CheckMaxUnsignedZero(const CallExpr *Call, 12087 const FunctionDecl *FDecl) { 12088 if (!Call || !FDecl) return; 12089 12090 // Ignore template specializations and macros. 12091 if (inTemplateInstantiation()) return; 12092 if (Call->getExprLoc().isMacroID()) return; 12093 12094 // Only care about the one template argument, two function parameter std::max 12095 if (Call->getNumArgs() != 2) return; 12096 if (!IsStdFunction(FDecl, "max")) return; 12097 const auto * ArgList = FDecl->getTemplateSpecializationArgs(); 12098 if (!ArgList) return; 12099 if (ArgList->size() != 1) return; 12100 12101 // Check that template type argument is unsigned integer. 12102 const auto& TA = ArgList->get(0); 12103 if (TA.getKind() != TemplateArgument::Type) return; 12104 QualType ArgType = TA.getAsType(); 12105 if (!ArgType->isUnsignedIntegerType()) return; 12106 12107 // See if either argument is a literal zero. 12108 auto IsLiteralZeroArg = [](const Expr* E) -> bool { 12109 const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E); 12110 if (!MTE) return false; 12111 const auto *Num = dyn_cast<IntegerLiteral>(MTE->getSubExpr()); 12112 if (!Num) return false; 12113 if (Num->getValue() != 0) return false; 12114 return true; 12115 }; 12116 12117 const Expr *FirstArg = Call->getArg(0); 12118 const Expr *SecondArg = Call->getArg(1); 12119 const bool IsFirstArgZero = IsLiteralZeroArg(FirstArg); 12120 const bool IsSecondArgZero = IsLiteralZeroArg(SecondArg); 12121 12122 // Only warn when exactly one argument is zero. 12123 if (IsFirstArgZero == IsSecondArgZero) return; 12124 12125 SourceRange FirstRange = FirstArg->getSourceRange(); 12126 SourceRange SecondRange = SecondArg->getSourceRange(); 12127 12128 SourceRange ZeroRange = IsFirstArgZero ? FirstRange : SecondRange; 12129 12130 Diag(Call->getExprLoc(), diag::warn_max_unsigned_zero) 12131 << IsFirstArgZero << Call->getCallee()->getSourceRange() << ZeroRange; 12132 12133 // Deduce what parts to remove so that "std::max(0u, foo)" becomes "(foo)". 12134 SourceRange RemovalRange; 12135 if (IsFirstArgZero) { 12136 RemovalRange = SourceRange(FirstRange.getBegin(), 12137 SecondRange.getBegin().getLocWithOffset(-1)); 12138 } else { 12139 RemovalRange = SourceRange(getLocForEndOfToken(FirstRange.getEnd()), 12140 SecondRange.getEnd()); 12141 } 12142 12143 Diag(Call->getExprLoc(), diag::note_remove_max_call) 12144 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()) 12145 << FixItHint::CreateRemoval(RemovalRange); 12146 } 12147 12148 //===--- CHECK: Standard memory functions ---------------------------------===// 12149 12150 /// Takes the expression passed to the size_t parameter of functions 12151 /// such as memcmp, strncat, etc and warns if it's a comparison. 12152 /// 12153 /// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`. 12154 static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E, 12155 IdentifierInfo *FnName, 12156 SourceLocation FnLoc, 12157 SourceLocation RParenLoc) { 12158 const BinaryOperator *Size = dyn_cast<BinaryOperator>(E); 12159 if (!Size) 12160 return false; 12161 12162 // if E is binop and op is <=>, >, <, >=, <=, ==, &&, ||: 12163 if (!Size->isComparisonOp() && !Size->isLogicalOp()) 12164 return false; 12165 12166 SourceRange SizeRange = Size->getSourceRange(); 12167 S.Diag(Size->getOperatorLoc(), diag::warn_memsize_comparison) 12168 << SizeRange << FnName; 12169 S.Diag(FnLoc, diag::note_memsize_comparison_paren) 12170 << FnName 12171 << FixItHint::CreateInsertion( 12172 S.getLocForEndOfToken(Size->getLHS()->getEndLoc()), ")") 12173 << FixItHint::CreateRemoval(RParenLoc); 12174 S.Diag(SizeRange.getBegin(), diag::note_memsize_comparison_cast_silence) 12175 << FixItHint::CreateInsertion(SizeRange.getBegin(), "(size_t)(") 12176 << FixItHint::CreateInsertion(S.getLocForEndOfToken(SizeRange.getEnd()), 12177 ")"); 12178 12179 return true; 12180 } 12181 12182 /// Determine whether the given type is or contains a dynamic class type 12183 /// (e.g., whether it has a vtable). 12184 static const CXXRecordDecl *getContainedDynamicClass(QualType T, 12185 bool &IsContained) { 12186 // Look through array types while ignoring qualifiers. 12187 const Type *Ty = T->getBaseElementTypeUnsafe(); 12188 IsContained = false; 12189 12190 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 12191 RD = RD ? RD->getDefinition() : nullptr; 12192 if (!RD || RD->isInvalidDecl()) 12193 return nullptr; 12194 12195 if (RD->isDynamicClass()) 12196 return RD; 12197 12198 // Check all the fields. If any bases were dynamic, the class is dynamic. 12199 // It's impossible for a class to transitively contain itself by value, so 12200 // infinite recursion is impossible. 12201 for (auto *FD : RD->fields()) { 12202 bool SubContained; 12203 if (const CXXRecordDecl *ContainedRD = 12204 getContainedDynamicClass(FD->getType(), SubContained)) { 12205 IsContained = true; 12206 return ContainedRD; 12207 } 12208 } 12209 12210 return nullptr; 12211 } 12212 12213 static const UnaryExprOrTypeTraitExpr *getAsSizeOfExpr(const Expr *E) { 12214 if (const auto *Unary = dyn_cast<UnaryExprOrTypeTraitExpr>(E)) 12215 if (Unary->getKind() == UETT_SizeOf) 12216 return Unary; 12217 return nullptr; 12218 } 12219 12220 /// If E is a sizeof expression, returns its argument expression, 12221 /// otherwise returns NULL. 12222 static const Expr *getSizeOfExprArg(const Expr *E) { 12223 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 12224 if (!SizeOf->isArgumentType()) 12225 return SizeOf->getArgumentExpr()->IgnoreParenImpCasts(); 12226 return nullptr; 12227 } 12228 12229 /// If E is a sizeof expression, returns its argument type. 12230 static QualType getSizeOfArgType(const Expr *E) { 12231 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 12232 return SizeOf->getTypeOfArgument(); 12233 return QualType(); 12234 } 12235 12236 namespace { 12237 12238 struct SearchNonTrivialToInitializeField 12239 : DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField> { 12240 using Super = 12241 DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField>; 12242 12243 SearchNonTrivialToInitializeField(const Expr *E, Sema &S) : E(E), S(S) {} 12244 12245 void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType FT, 12246 SourceLocation SL) { 12247 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 12248 asDerived().visitArray(PDIK, AT, SL); 12249 return; 12250 } 12251 12252 Super::visitWithKind(PDIK, FT, SL); 12253 } 12254 12255 void visitARCStrong(QualType FT, SourceLocation SL) { 12256 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 12257 } 12258 void visitARCWeak(QualType FT, SourceLocation SL) { 12259 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 12260 } 12261 void visitStruct(QualType FT, SourceLocation SL) { 12262 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 12263 visit(FD->getType(), FD->getLocation()); 12264 } 12265 void visitArray(QualType::PrimitiveDefaultInitializeKind PDIK, 12266 const ArrayType *AT, SourceLocation SL) { 12267 visit(getContext().getBaseElementType(AT), SL); 12268 } 12269 void visitTrivial(QualType FT, SourceLocation SL) {} 12270 12271 static void diag(QualType RT, const Expr *E, Sema &S) { 12272 SearchNonTrivialToInitializeField(E, S).visitStruct(RT, SourceLocation()); 12273 } 12274 12275 ASTContext &getContext() { return S.getASTContext(); } 12276 12277 const Expr *E; 12278 Sema &S; 12279 }; 12280 12281 struct SearchNonTrivialToCopyField 12282 : CopiedTypeVisitor<SearchNonTrivialToCopyField, false> { 12283 using Super = CopiedTypeVisitor<SearchNonTrivialToCopyField, false>; 12284 12285 SearchNonTrivialToCopyField(const Expr *E, Sema &S) : E(E), S(S) {} 12286 12287 void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType FT, 12288 SourceLocation SL) { 12289 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 12290 asDerived().visitArray(PCK, AT, SL); 12291 return; 12292 } 12293 12294 Super::visitWithKind(PCK, FT, SL); 12295 } 12296 12297 void visitARCStrong(QualType FT, SourceLocation SL) { 12298 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 12299 } 12300 void visitARCWeak(QualType FT, SourceLocation SL) { 12301 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 12302 } 12303 void visitStruct(QualType FT, SourceLocation SL) { 12304 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 12305 visit(FD->getType(), FD->getLocation()); 12306 } 12307 void visitArray(QualType::PrimitiveCopyKind PCK, const ArrayType *AT, 12308 SourceLocation SL) { 12309 visit(getContext().getBaseElementType(AT), SL); 12310 } 12311 void preVisit(QualType::PrimitiveCopyKind PCK, QualType FT, 12312 SourceLocation SL) {} 12313 void visitTrivial(QualType FT, SourceLocation SL) {} 12314 void visitVolatileTrivial(QualType FT, SourceLocation SL) {} 12315 12316 static void diag(QualType RT, const Expr *E, Sema &S) { 12317 SearchNonTrivialToCopyField(E, S).visitStruct(RT, SourceLocation()); 12318 } 12319 12320 ASTContext &getContext() { return S.getASTContext(); } 12321 12322 const Expr *E; 12323 Sema &S; 12324 }; 12325 12326 } 12327 12328 /// Detect if \c SizeofExpr is likely to calculate the sizeof an object. 12329 static bool doesExprLikelyComputeSize(const Expr *SizeofExpr) { 12330 SizeofExpr = SizeofExpr->IgnoreParenImpCasts(); 12331 12332 if (const auto *BO = dyn_cast<BinaryOperator>(SizeofExpr)) { 12333 if (BO->getOpcode() != BO_Mul && BO->getOpcode() != BO_Add) 12334 return false; 12335 12336 return doesExprLikelyComputeSize(BO->getLHS()) || 12337 doesExprLikelyComputeSize(BO->getRHS()); 12338 } 12339 12340 return getAsSizeOfExpr(SizeofExpr) != nullptr; 12341 } 12342 12343 /// Check if the ArgLoc originated from a macro passed to the call at CallLoc. 12344 /// 12345 /// \code 12346 /// #define MACRO 0 12347 /// foo(MACRO); 12348 /// foo(0); 12349 /// \endcode 12350 /// 12351 /// This should return true for the first call to foo, but not for the second 12352 /// (regardless of whether foo is a macro or function). 12353 static bool isArgumentExpandedFromMacro(SourceManager &SM, 12354 SourceLocation CallLoc, 12355 SourceLocation ArgLoc) { 12356 if (!CallLoc.isMacroID()) 12357 return SM.getFileID(CallLoc) != SM.getFileID(ArgLoc); 12358 12359 return SM.getFileID(SM.getImmediateMacroCallerLoc(CallLoc)) != 12360 SM.getFileID(SM.getImmediateMacroCallerLoc(ArgLoc)); 12361 } 12362 12363 /// Diagnose cases like 'memset(buf, sizeof(buf), 0)', which should have the 12364 /// last two arguments transposed. 12365 static void CheckMemaccessSize(Sema &S, unsigned BId, const CallExpr *Call) { 12366 if (BId != Builtin::BImemset && BId != Builtin::BIbzero) 12367 return; 12368 12369 const Expr *SizeArg = 12370 Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts(); 12371 12372 auto isLiteralZero = [](const Expr *E) { 12373 return (isa<IntegerLiteral>(E) && 12374 cast<IntegerLiteral>(E)->getValue() == 0) || 12375 (isa<CharacterLiteral>(E) && 12376 cast<CharacterLiteral>(E)->getValue() == 0); 12377 }; 12378 12379 // If we're memsetting or bzeroing 0 bytes, then this is likely an error. 12380 SourceLocation CallLoc = Call->getRParenLoc(); 12381 SourceManager &SM = S.getSourceManager(); 12382 if (isLiteralZero(SizeArg) && 12383 !isArgumentExpandedFromMacro(SM, CallLoc, SizeArg->getExprLoc())) { 12384 12385 SourceLocation DiagLoc = SizeArg->getExprLoc(); 12386 12387 // Some platforms #define bzero to __builtin_memset. See if this is the 12388 // case, and if so, emit a better diagnostic. 12389 if (BId == Builtin::BIbzero || 12390 (CallLoc.isMacroID() && Lexer::getImmediateMacroName( 12391 CallLoc, SM, S.getLangOpts()) == "bzero")) { 12392 S.Diag(DiagLoc, diag::warn_suspicious_bzero_size); 12393 S.Diag(DiagLoc, diag::note_suspicious_bzero_size_silence); 12394 } else if (!isLiteralZero(Call->getArg(1)->IgnoreImpCasts())) { 12395 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 0; 12396 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 0; 12397 } 12398 return; 12399 } 12400 12401 // If the second argument to a memset is a sizeof expression and the third 12402 // isn't, this is also likely an error. This should catch 12403 // 'memset(buf, sizeof(buf), 0xff)'. 12404 if (BId == Builtin::BImemset && 12405 doesExprLikelyComputeSize(Call->getArg(1)) && 12406 !doesExprLikelyComputeSize(Call->getArg(2))) { 12407 SourceLocation DiagLoc = Call->getArg(1)->getExprLoc(); 12408 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 1; 12409 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 1; 12410 return; 12411 } 12412 } 12413 12414 /// Check for dangerous or invalid arguments to memset(). 12415 /// 12416 /// This issues warnings on known problematic, dangerous or unspecified 12417 /// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp' 12418 /// function calls. 12419 /// 12420 /// \param Call The call expression to diagnose. 12421 void Sema::CheckMemaccessArguments(const CallExpr *Call, 12422 unsigned BId, 12423 IdentifierInfo *FnName) { 12424 assert(BId != 0); 12425 12426 // It is possible to have a non-standard definition of memset. Validate 12427 // we have enough arguments, and if not, abort further checking. 12428 unsigned ExpectedNumArgs = 12429 (BId == Builtin::BIstrndup || BId == Builtin::BIbzero ? 2 : 3); 12430 if (Call->getNumArgs() < ExpectedNumArgs) 12431 return; 12432 12433 unsigned LastArg = (BId == Builtin::BImemset || BId == Builtin::BIbzero || 12434 BId == Builtin::BIstrndup ? 1 : 2); 12435 unsigned LenArg = 12436 (BId == Builtin::BIbzero || BId == Builtin::BIstrndup ? 1 : 2); 12437 const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts(); 12438 12439 if (CheckMemorySizeofForComparison(*this, LenExpr, FnName, 12440 Call->getBeginLoc(), Call->getRParenLoc())) 12441 return; 12442 12443 // Catch cases like 'memset(buf, sizeof(buf), 0)'. 12444 CheckMemaccessSize(*this, BId, Call); 12445 12446 // We have special checking when the length is a sizeof expression. 12447 QualType SizeOfArgTy = getSizeOfArgType(LenExpr); 12448 const Expr *SizeOfArg = getSizeOfExprArg(LenExpr); 12449 llvm::FoldingSetNodeID SizeOfArgID; 12450 12451 // Although widely used, 'bzero' is not a standard function. Be more strict 12452 // with the argument types before allowing diagnostics and only allow the 12453 // form bzero(ptr, sizeof(...)). 12454 QualType FirstArgTy = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 12455 if (BId == Builtin::BIbzero && !FirstArgTy->getAs<PointerType>()) 12456 return; 12457 12458 for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) { 12459 const Expr *Dest = Call->getArg(ArgIdx)->IgnoreParenImpCasts(); 12460 SourceRange ArgRange = Call->getArg(ArgIdx)->getSourceRange(); 12461 12462 QualType DestTy = Dest->getType(); 12463 QualType PointeeTy; 12464 if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) { 12465 PointeeTy = DestPtrTy->getPointeeType(); 12466 12467 // Never warn about void type pointers. This can be used to suppress 12468 // false positives. 12469 if (PointeeTy->isVoidType()) 12470 continue; 12471 12472 // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by 12473 // actually comparing the expressions for equality. Because computing the 12474 // expression IDs can be expensive, we only do this if the diagnostic is 12475 // enabled. 12476 if (SizeOfArg && 12477 !Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, 12478 SizeOfArg->getExprLoc())) { 12479 // We only compute IDs for expressions if the warning is enabled, and 12480 // cache the sizeof arg's ID. 12481 if (SizeOfArgID == llvm::FoldingSetNodeID()) 12482 SizeOfArg->Profile(SizeOfArgID, Context, true); 12483 llvm::FoldingSetNodeID DestID; 12484 Dest->Profile(DestID, Context, true); 12485 if (DestID == SizeOfArgID) { 12486 // TODO: For strncpy() and friends, this could suggest sizeof(dst) 12487 // over sizeof(src) as well. 12488 unsigned ActionIdx = 0; // Default is to suggest dereferencing. 12489 StringRef ReadableName = FnName->getName(); 12490 12491 if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Dest)) 12492 if (UnaryOp->getOpcode() == UO_AddrOf) 12493 ActionIdx = 1; // If its an address-of operator, just remove it. 12494 if (!PointeeTy->isIncompleteType() && 12495 (Context.getTypeSize(PointeeTy) == Context.getCharWidth())) 12496 ActionIdx = 2; // If the pointee's size is sizeof(char), 12497 // suggest an explicit length. 12498 12499 // If the function is defined as a builtin macro, do not show macro 12500 // expansion. 12501 SourceLocation SL = SizeOfArg->getExprLoc(); 12502 SourceRange DSR = Dest->getSourceRange(); 12503 SourceRange SSR = SizeOfArg->getSourceRange(); 12504 SourceManager &SM = getSourceManager(); 12505 12506 if (SM.isMacroArgExpansion(SL)) { 12507 ReadableName = Lexer::getImmediateMacroName(SL, SM, LangOpts); 12508 SL = SM.getSpellingLoc(SL); 12509 DSR = SourceRange(SM.getSpellingLoc(DSR.getBegin()), 12510 SM.getSpellingLoc(DSR.getEnd())); 12511 SSR = SourceRange(SM.getSpellingLoc(SSR.getBegin()), 12512 SM.getSpellingLoc(SSR.getEnd())); 12513 } 12514 12515 DiagRuntimeBehavior(SL, SizeOfArg, 12516 PDiag(diag::warn_sizeof_pointer_expr_memaccess) 12517 << ReadableName 12518 << PointeeTy 12519 << DestTy 12520 << DSR 12521 << SSR); 12522 DiagRuntimeBehavior(SL, SizeOfArg, 12523 PDiag(diag::warn_sizeof_pointer_expr_memaccess_note) 12524 << ActionIdx 12525 << SSR); 12526 12527 break; 12528 } 12529 } 12530 12531 // Also check for cases where the sizeof argument is the exact same 12532 // type as the memory argument, and where it points to a user-defined 12533 // record type. 12534 if (SizeOfArgTy != QualType()) { 12535 if (PointeeTy->isRecordType() && 12536 Context.typesAreCompatible(SizeOfArgTy, DestTy)) { 12537 DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest, 12538 PDiag(diag::warn_sizeof_pointer_type_memaccess) 12539 << FnName << SizeOfArgTy << ArgIdx 12540 << PointeeTy << Dest->getSourceRange() 12541 << LenExpr->getSourceRange()); 12542 break; 12543 } 12544 } 12545 } else if (DestTy->isArrayType()) { 12546 PointeeTy = DestTy; 12547 } 12548 12549 if (PointeeTy == QualType()) 12550 continue; 12551 12552 // Always complain about dynamic classes. 12553 bool IsContained; 12554 if (const CXXRecordDecl *ContainedRD = 12555 getContainedDynamicClass(PointeeTy, IsContained)) { 12556 12557 unsigned OperationType = 0; 12558 const bool IsCmp = BId == Builtin::BImemcmp || BId == Builtin::BIbcmp; 12559 // "overwritten" if we're warning about the destination for any call 12560 // but memcmp; otherwise a verb appropriate to the call. 12561 if (ArgIdx != 0 || IsCmp) { 12562 if (BId == Builtin::BImemcpy) 12563 OperationType = 1; 12564 else if(BId == Builtin::BImemmove) 12565 OperationType = 2; 12566 else if (IsCmp) 12567 OperationType = 3; 12568 } 12569 12570 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 12571 PDiag(diag::warn_dyn_class_memaccess) 12572 << (IsCmp ? ArgIdx + 2 : ArgIdx) << FnName 12573 << IsContained << ContainedRD << OperationType 12574 << Call->getCallee()->getSourceRange()); 12575 } else if (PointeeTy.hasNonTrivialObjCLifetime() && 12576 BId != Builtin::BImemset) 12577 DiagRuntimeBehavior( 12578 Dest->getExprLoc(), Dest, 12579 PDiag(diag::warn_arc_object_memaccess) 12580 << ArgIdx << FnName << PointeeTy 12581 << Call->getCallee()->getSourceRange()); 12582 else if (const auto *RT = PointeeTy->getAs<RecordType>()) { 12583 if ((BId == Builtin::BImemset || BId == Builtin::BIbzero) && 12584 RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) { 12585 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 12586 PDiag(diag::warn_cstruct_memaccess) 12587 << ArgIdx << FnName << PointeeTy << 0); 12588 SearchNonTrivialToInitializeField::diag(PointeeTy, Dest, *this); 12589 } else if ((BId == Builtin::BImemcpy || BId == Builtin::BImemmove) && 12590 RT->getDecl()->isNonTrivialToPrimitiveCopy()) { 12591 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 12592 PDiag(diag::warn_cstruct_memaccess) 12593 << ArgIdx << FnName << PointeeTy << 1); 12594 SearchNonTrivialToCopyField::diag(PointeeTy, Dest, *this); 12595 } else { 12596 continue; 12597 } 12598 } else 12599 continue; 12600 12601 DiagRuntimeBehavior( 12602 Dest->getExprLoc(), Dest, 12603 PDiag(diag::note_bad_memaccess_silence) 12604 << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)")); 12605 break; 12606 } 12607 } 12608 12609 // A little helper routine: ignore addition and subtraction of integer literals. 12610 // This intentionally does not ignore all integer constant expressions because 12611 // we don't want to remove sizeof(). 12612 static const Expr *ignoreLiteralAdditions(const Expr *Ex, ASTContext &Ctx) { 12613 Ex = Ex->IgnoreParenCasts(); 12614 12615 while (true) { 12616 const BinaryOperator * BO = dyn_cast<BinaryOperator>(Ex); 12617 if (!BO || !BO->isAdditiveOp()) 12618 break; 12619 12620 const Expr *RHS = BO->getRHS()->IgnoreParenCasts(); 12621 const Expr *LHS = BO->getLHS()->IgnoreParenCasts(); 12622 12623 if (isa<IntegerLiteral>(RHS)) 12624 Ex = LHS; 12625 else if (isa<IntegerLiteral>(LHS)) 12626 Ex = RHS; 12627 else 12628 break; 12629 } 12630 12631 return Ex; 12632 } 12633 12634 static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty, 12635 ASTContext &Context) { 12636 // Only handle constant-sized or VLAs, but not flexible members. 12637 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(Ty)) { 12638 // Only issue the FIXIT for arrays of size > 1. 12639 if (CAT->getSize().getSExtValue() <= 1) 12640 return false; 12641 } else if (!Ty->isVariableArrayType()) { 12642 return false; 12643 } 12644 return true; 12645 } 12646 12647 // Warn if the user has made the 'size' argument to strlcpy or strlcat 12648 // be the size of the source, instead of the destination. 12649 void Sema::CheckStrlcpycatArguments(const CallExpr *Call, 12650 IdentifierInfo *FnName) { 12651 12652 // Don't crash if the user has the wrong number of arguments 12653 unsigned NumArgs = Call->getNumArgs(); 12654 if ((NumArgs != 3) && (NumArgs != 4)) 12655 return; 12656 12657 const Expr *SrcArg = ignoreLiteralAdditions(Call->getArg(1), Context); 12658 const Expr *SizeArg = ignoreLiteralAdditions(Call->getArg(2), Context); 12659 const Expr *CompareWithSrc = nullptr; 12660 12661 if (CheckMemorySizeofForComparison(*this, SizeArg, FnName, 12662 Call->getBeginLoc(), Call->getRParenLoc())) 12663 return; 12664 12665 // Look for 'strlcpy(dst, x, sizeof(x))' 12666 if (const Expr *Ex = getSizeOfExprArg(SizeArg)) 12667 CompareWithSrc = Ex; 12668 else { 12669 // Look for 'strlcpy(dst, x, strlen(x))' 12670 if (const CallExpr *SizeCall = dyn_cast<CallExpr>(SizeArg)) { 12671 if (SizeCall->getBuiltinCallee() == Builtin::BIstrlen && 12672 SizeCall->getNumArgs() == 1) 12673 CompareWithSrc = ignoreLiteralAdditions(SizeCall->getArg(0), Context); 12674 } 12675 } 12676 12677 if (!CompareWithSrc) 12678 return; 12679 12680 // Determine if the argument to sizeof/strlen is equal to the source 12681 // argument. In principle there's all kinds of things you could do 12682 // here, for instance creating an == expression and evaluating it with 12683 // EvaluateAsBooleanCondition, but this uses a more direct technique: 12684 const DeclRefExpr *SrcArgDRE = dyn_cast<DeclRefExpr>(SrcArg); 12685 if (!SrcArgDRE) 12686 return; 12687 12688 const DeclRefExpr *CompareWithSrcDRE = dyn_cast<DeclRefExpr>(CompareWithSrc); 12689 if (!CompareWithSrcDRE || 12690 SrcArgDRE->getDecl() != CompareWithSrcDRE->getDecl()) 12691 return; 12692 12693 const Expr *OriginalSizeArg = Call->getArg(2); 12694 Diag(CompareWithSrcDRE->getBeginLoc(), diag::warn_strlcpycat_wrong_size) 12695 << OriginalSizeArg->getSourceRange() << FnName; 12696 12697 // Output a FIXIT hint if the destination is an array (rather than a 12698 // pointer to an array). This could be enhanced to handle some 12699 // pointers if we know the actual size, like if DstArg is 'array+2' 12700 // we could say 'sizeof(array)-2'. 12701 const Expr *DstArg = Call->getArg(0)->IgnoreParenImpCasts(); 12702 if (!isConstantSizeArrayWithMoreThanOneElement(DstArg->getType(), Context)) 12703 return; 12704 12705 SmallString<128> sizeString; 12706 llvm::raw_svector_ostream OS(sizeString); 12707 OS << "sizeof("; 12708 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 12709 OS << ")"; 12710 12711 Diag(OriginalSizeArg->getBeginLoc(), diag::note_strlcpycat_wrong_size) 12712 << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(), 12713 OS.str()); 12714 } 12715 12716 /// Check if two expressions refer to the same declaration. 12717 static bool referToTheSameDecl(const Expr *E1, const Expr *E2) { 12718 if (const DeclRefExpr *D1 = dyn_cast_or_null<DeclRefExpr>(E1)) 12719 if (const DeclRefExpr *D2 = dyn_cast_or_null<DeclRefExpr>(E2)) 12720 return D1->getDecl() == D2->getDecl(); 12721 return false; 12722 } 12723 12724 static const Expr *getStrlenExprArg(const Expr *E) { 12725 if (const CallExpr *CE = dyn_cast<CallExpr>(E)) { 12726 const FunctionDecl *FD = CE->getDirectCallee(); 12727 if (!FD || FD->getMemoryFunctionKind() != Builtin::BIstrlen) 12728 return nullptr; 12729 return CE->getArg(0)->IgnoreParenCasts(); 12730 } 12731 return nullptr; 12732 } 12733 12734 // Warn on anti-patterns as the 'size' argument to strncat. 12735 // The correct size argument should look like following: 12736 // strncat(dst, src, sizeof(dst) - strlen(dest) - 1); 12737 void Sema::CheckStrncatArguments(const CallExpr *CE, 12738 IdentifierInfo *FnName) { 12739 // Don't crash if the user has the wrong number of arguments. 12740 if (CE->getNumArgs() < 3) 12741 return; 12742 const Expr *DstArg = CE->getArg(0)->IgnoreParenCasts(); 12743 const Expr *SrcArg = CE->getArg(1)->IgnoreParenCasts(); 12744 const Expr *LenArg = CE->getArg(2)->IgnoreParenCasts(); 12745 12746 if (CheckMemorySizeofForComparison(*this, LenArg, FnName, CE->getBeginLoc(), 12747 CE->getRParenLoc())) 12748 return; 12749 12750 // Identify common expressions, which are wrongly used as the size argument 12751 // to strncat and may lead to buffer overflows. 12752 unsigned PatternType = 0; 12753 if (const Expr *SizeOfArg = getSizeOfExprArg(LenArg)) { 12754 // - sizeof(dst) 12755 if (referToTheSameDecl(SizeOfArg, DstArg)) 12756 PatternType = 1; 12757 // - sizeof(src) 12758 else if (referToTheSameDecl(SizeOfArg, SrcArg)) 12759 PatternType = 2; 12760 } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(LenArg)) { 12761 if (BE->getOpcode() == BO_Sub) { 12762 const Expr *L = BE->getLHS()->IgnoreParenCasts(); 12763 const Expr *R = BE->getRHS()->IgnoreParenCasts(); 12764 // - sizeof(dst) - strlen(dst) 12765 if (referToTheSameDecl(DstArg, getSizeOfExprArg(L)) && 12766 referToTheSameDecl(DstArg, getStrlenExprArg(R))) 12767 PatternType = 1; 12768 // - sizeof(src) - (anything) 12769 else if (referToTheSameDecl(SrcArg, getSizeOfExprArg(L))) 12770 PatternType = 2; 12771 } 12772 } 12773 12774 if (PatternType == 0) 12775 return; 12776 12777 // Generate the diagnostic. 12778 SourceLocation SL = LenArg->getBeginLoc(); 12779 SourceRange SR = LenArg->getSourceRange(); 12780 SourceManager &SM = getSourceManager(); 12781 12782 // If the function is defined as a builtin macro, do not show macro expansion. 12783 if (SM.isMacroArgExpansion(SL)) { 12784 SL = SM.getSpellingLoc(SL); 12785 SR = SourceRange(SM.getSpellingLoc(SR.getBegin()), 12786 SM.getSpellingLoc(SR.getEnd())); 12787 } 12788 12789 // Check if the destination is an array (rather than a pointer to an array). 12790 QualType DstTy = DstArg->getType(); 12791 bool isKnownSizeArray = isConstantSizeArrayWithMoreThanOneElement(DstTy, 12792 Context); 12793 if (!isKnownSizeArray) { 12794 if (PatternType == 1) 12795 Diag(SL, diag::warn_strncat_wrong_size) << SR; 12796 else 12797 Diag(SL, diag::warn_strncat_src_size) << SR; 12798 return; 12799 } 12800 12801 if (PatternType == 1) 12802 Diag(SL, diag::warn_strncat_large_size) << SR; 12803 else 12804 Diag(SL, diag::warn_strncat_src_size) << SR; 12805 12806 SmallString<128> sizeString; 12807 llvm::raw_svector_ostream OS(sizeString); 12808 OS << "sizeof("; 12809 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 12810 OS << ") - "; 12811 OS << "strlen("; 12812 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 12813 OS << ") - 1"; 12814 12815 Diag(SL, diag::note_strncat_wrong_size) 12816 << FixItHint::CreateReplacement(SR, OS.str()); 12817 } 12818 12819 namespace { 12820 void CheckFreeArgumentsOnLvalue(Sema &S, const std::string &CalleeName, 12821 const UnaryOperator *UnaryExpr, const Decl *D) { 12822 if (isa<FieldDecl, FunctionDecl, VarDecl>(D)) { 12823 S.Diag(UnaryExpr->getBeginLoc(), diag::warn_free_nonheap_object) 12824 << CalleeName << 0 /*object: */ << cast<NamedDecl>(D); 12825 return; 12826 } 12827 } 12828 12829 void CheckFreeArgumentsAddressof(Sema &S, const std::string &CalleeName, 12830 const UnaryOperator *UnaryExpr) { 12831 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(UnaryExpr->getSubExpr())) { 12832 const Decl *D = Lvalue->getDecl(); 12833 if (isa<DeclaratorDecl>(D)) 12834 if (!dyn_cast<DeclaratorDecl>(D)->getType()->isReferenceType()) 12835 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, D); 12836 } 12837 12838 if (const auto *Lvalue = dyn_cast<MemberExpr>(UnaryExpr->getSubExpr())) 12839 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, 12840 Lvalue->getMemberDecl()); 12841 } 12842 12843 void CheckFreeArgumentsPlus(Sema &S, const std::string &CalleeName, 12844 const UnaryOperator *UnaryExpr) { 12845 const auto *Lambda = dyn_cast<LambdaExpr>( 12846 UnaryExpr->getSubExpr()->IgnoreImplicitAsWritten()->IgnoreParens()); 12847 if (!Lambda) 12848 return; 12849 12850 S.Diag(Lambda->getBeginLoc(), diag::warn_free_nonheap_object) 12851 << CalleeName << 2 /*object: lambda expression*/; 12852 } 12853 12854 void CheckFreeArgumentsStackArray(Sema &S, const std::string &CalleeName, 12855 const DeclRefExpr *Lvalue) { 12856 const auto *Var = dyn_cast<VarDecl>(Lvalue->getDecl()); 12857 if (Var == nullptr) 12858 return; 12859 12860 S.Diag(Lvalue->getBeginLoc(), diag::warn_free_nonheap_object) 12861 << CalleeName << 0 /*object: */ << Var; 12862 } 12863 12864 void CheckFreeArgumentsCast(Sema &S, const std::string &CalleeName, 12865 const CastExpr *Cast) { 12866 SmallString<128> SizeString; 12867 llvm::raw_svector_ostream OS(SizeString); 12868 12869 clang::CastKind Kind = Cast->getCastKind(); 12870 if (Kind == clang::CK_BitCast && 12871 !Cast->getSubExpr()->getType()->isFunctionPointerType()) 12872 return; 12873 if (Kind == clang::CK_IntegralToPointer && 12874 !isa<IntegerLiteral>( 12875 Cast->getSubExpr()->IgnoreParenImpCasts()->IgnoreParens())) 12876 return; 12877 12878 switch (Cast->getCastKind()) { 12879 case clang::CK_BitCast: 12880 case clang::CK_IntegralToPointer: 12881 case clang::CK_FunctionToPointerDecay: 12882 OS << '\''; 12883 Cast->printPretty(OS, nullptr, S.getPrintingPolicy()); 12884 OS << '\''; 12885 break; 12886 default: 12887 return; 12888 } 12889 12890 S.Diag(Cast->getBeginLoc(), diag::warn_free_nonheap_object) 12891 << CalleeName << 0 /*object: */ << OS.str(); 12892 } 12893 } // namespace 12894 12895 /// Alerts the user that they are attempting to free a non-malloc'd object. 12896 void Sema::CheckFreeArguments(const CallExpr *E) { 12897 const std::string CalleeName = 12898 cast<FunctionDecl>(E->getCalleeDecl())->getQualifiedNameAsString(); 12899 12900 { // Prefer something that doesn't involve a cast to make things simpler. 12901 const Expr *Arg = E->getArg(0)->IgnoreParenCasts(); 12902 if (const auto *UnaryExpr = dyn_cast<UnaryOperator>(Arg)) 12903 switch (UnaryExpr->getOpcode()) { 12904 case UnaryOperator::Opcode::UO_AddrOf: 12905 return CheckFreeArgumentsAddressof(*this, CalleeName, UnaryExpr); 12906 case UnaryOperator::Opcode::UO_Plus: 12907 return CheckFreeArgumentsPlus(*this, CalleeName, UnaryExpr); 12908 default: 12909 break; 12910 } 12911 12912 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(Arg)) 12913 if (Lvalue->getType()->isArrayType()) 12914 return CheckFreeArgumentsStackArray(*this, CalleeName, Lvalue); 12915 12916 if (const auto *Label = dyn_cast<AddrLabelExpr>(Arg)) { 12917 Diag(Label->getBeginLoc(), diag::warn_free_nonheap_object) 12918 << CalleeName << 0 /*object: */ << Label->getLabel()->getIdentifier(); 12919 return; 12920 } 12921 12922 if (isa<BlockExpr>(Arg)) { 12923 Diag(Arg->getBeginLoc(), diag::warn_free_nonheap_object) 12924 << CalleeName << 1 /*object: block*/; 12925 return; 12926 } 12927 } 12928 // Maybe the cast was important, check after the other cases. 12929 if (const auto *Cast = dyn_cast<CastExpr>(E->getArg(0))) 12930 return CheckFreeArgumentsCast(*this, CalleeName, Cast); 12931 } 12932 12933 void 12934 Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType, 12935 SourceLocation ReturnLoc, 12936 bool isObjCMethod, 12937 const AttrVec *Attrs, 12938 const FunctionDecl *FD) { 12939 // Check if the return value is null but should not be. 12940 if (((Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs)) || 12941 (!isObjCMethod && isNonNullType(lhsType))) && 12942 CheckNonNullExpr(*this, RetValExp)) 12943 Diag(ReturnLoc, diag::warn_null_ret) 12944 << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange(); 12945 12946 // C++11 [basic.stc.dynamic.allocation]p4: 12947 // If an allocation function declared with a non-throwing 12948 // exception-specification fails to allocate storage, it shall return 12949 // a null pointer. Any other allocation function that fails to allocate 12950 // storage shall indicate failure only by throwing an exception [...] 12951 if (FD) { 12952 OverloadedOperatorKind Op = FD->getOverloadedOperator(); 12953 if (Op == OO_New || Op == OO_Array_New) { 12954 const FunctionProtoType *Proto 12955 = FD->getType()->castAs<FunctionProtoType>(); 12956 if (!Proto->isNothrow(/*ResultIfDependent*/true) && 12957 CheckNonNullExpr(*this, RetValExp)) 12958 Diag(ReturnLoc, diag::warn_operator_new_returns_null) 12959 << FD << getLangOpts().CPlusPlus11; 12960 } 12961 } 12962 12963 if (RetValExp && RetValExp->getType()->isWebAssemblyTableType()) { 12964 Diag(ReturnLoc, diag::err_wasm_table_art) << 1; 12965 } 12966 12967 // PPC MMA non-pointer types are not allowed as return type. Checking the type 12968 // here prevent the user from using a PPC MMA type as trailing return type. 12969 if (Context.getTargetInfo().getTriple().isPPC64()) 12970 CheckPPCMMAType(RetValExp->getType(), ReturnLoc); 12971 } 12972 12973 /// Check for comparisons of floating-point values using == and !=. Issue a 12974 /// warning if the comparison is not likely to do what the programmer intended. 12975 void Sema::CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS, 12976 BinaryOperatorKind Opcode) { 12977 if (!BinaryOperator::isEqualityOp(Opcode)) 12978 return; 12979 12980 // Match and capture subexpressions such as "(float) X == 0.1". 12981 FloatingLiteral *FPLiteral; 12982 CastExpr *FPCast; 12983 auto getCastAndLiteral = [&FPLiteral, &FPCast](Expr *L, Expr *R) { 12984 FPLiteral = dyn_cast<FloatingLiteral>(L->IgnoreParens()); 12985 FPCast = dyn_cast<CastExpr>(R->IgnoreParens()); 12986 return FPLiteral && FPCast; 12987 }; 12988 12989 if (getCastAndLiteral(LHS, RHS) || getCastAndLiteral(RHS, LHS)) { 12990 auto *SourceTy = FPCast->getSubExpr()->getType()->getAs<BuiltinType>(); 12991 auto *TargetTy = FPLiteral->getType()->getAs<BuiltinType>(); 12992 if (SourceTy && TargetTy && SourceTy->isFloatingPoint() && 12993 TargetTy->isFloatingPoint()) { 12994 bool Lossy; 12995 llvm::APFloat TargetC = FPLiteral->getValue(); 12996 TargetC.convert(Context.getFloatTypeSemantics(QualType(SourceTy, 0)), 12997 llvm::APFloat::rmNearestTiesToEven, &Lossy); 12998 if (Lossy) { 12999 // If the literal cannot be represented in the source type, then a 13000 // check for == is always false and check for != is always true. 13001 Diag(Loc, diag::warn_float_compare_literal) 13002 << (Opcode == BO_EQ) << QualType(SourceTy, 0) 13003 << LHS->getSourceRange() << RHS->getSourceRange(); 13004 return; 13005 } 13006 } 13007 } 13008 13009 // Match a more general floating-point equality comparison (-Wfloat-equal). 13010 Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts(); 13011 Expr* RightExprSansParen = RHS->IgnoreParenImpCasts(); 13012 13013 // Special case: check for x == x (which is OK). 13014 // Do not emit warnings for such cases. 13015 if (auto *DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen)) 13016 if (auto *DRR = dyn_cast<DeclRefExpr>(RightExprSansParen)) 13017 if (DRL->getDecl() == DRR->getDecl()) 13018 return; 13019 13020 // Special case: check for comparisons against literals that can be exactly 13021 // represented by APFloat. In such cases, do not emit a warning. This 13022 // is a heuristic: often comparison against such literals are used to 13023 // detect if a value in a variable has not changed. This clearly can 13024 // lead to false negatives. 13025 if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) { 13026 if (FLL->isExact()) 13027 return; 13028 } else 13029 if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen)) 13030 if (FLR->isExact()) 13031 return; 13032 13033 // Check for comparisons with builtin types. 13034 if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen)) 13035 if (CL->getBuiltinCallee()) 13036 return; 13037 13038 if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen)) 13039 if (CR->getBuiltinCallee()) 13040 return; 13041 13042 // Emit the diagnostic. 13043 Diag(Loc, diag::warn_floatingpoint_eq) 13044 << LHS->getSourceRange() << RHS->getSourceRange(); 13045 } 13046 13047 //===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===// 13048 //===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===// 13049 13050 namespace { 13051 13052 /// Structure recording the 'active' range of an integer-valued 13053 /// expression. 13054 struct IntRange { 13055 /// The number of bits active in the int. Note that this includes exactly one 13056 /// sign bit if !NonNegative. 13057 unsigned Width; 13058 13059 /// True if the int is known not to have negative values. If so, all leading 13060 /// bits before Width are known zero, otherwise they are known to be the 13061 /// same as the MSB within Width. 13062 bool NonNegative; 13063 13064 IntRange(unsigned Width, bool NonNegative) 13065 : Width(Width), NonNegative(NonNegative) {} 13066 13067 /// Number of bits excluding the sign bit. 13068 unsigned valueBits() const { 13069 return NonNegative ? Width : Width - 1; 13070 } 13071 13072 /// Returns the range of the bool type. 13073 static IntRange forBoolType() { 13074 return IntRange(1, true); 13075 } 13076 13077 /// Returns the range of an opaque value of the given integral type. 13078 static IntRange forValueOfType(ASTContext &C, QualType T) { 13079 return forValueOfCanonicalType(C, 13080 T->getCanonicalTypeInternal().getTypePtr()); 13081 } 13082 13083 /// Returns the range of an opaque value of a canonical integral type. 13084 static IntRange forValueOfCanonicalType(ASTContext &C, const Type *T) { 13085 assert(T->isCanonicalUnqualified()); 13086 13087 if (const VectorType *VT = dyn_cast<VectorType>(T)) 13088 T = VT->getElementType().getTypePtr(); 13089 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 13090 T = CT->getElementType().getTypePtr(); 13091 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 13092 T = AT->getValueType().getTypePtr(); 13093 13094 if (!C.getLangOpts().CPlusPlus) { 13095 // For enum types in C code, use the underlying datatype. 13096 if (const EnumType *ET = dyn_cast<EnumType>(T)) 13097 T = ET->getDecl()->getIntegerType().getDesugaredType(C).getTypePtr(); 13098 } else if (const EnumType *ET = dyn_cast<EnumType>(T)) { 13099 // For enum types in C++, use the known bit width of the enumerators. 13100 EnumDecl *Enum = ET->getDecl(); 13101 // In C++11, enums can have a fixed underlying type. Use this type to 13102 // compute the range. 13103 if (Enum->isFixed()) { 13104 return IntRange(C.getIntWidth(QualType(T, 0)), 13105 !ET->isSignedIntegerOrEnumerationType()); 13106 } 13107 13108 unsigned NumPositive = Enum->getNumPositiveBits(); 13109 unsigned NumNegative = Enum->getNumNegativeBits(); 13110 13111 if (NumNegative == 0) 13112 return IntRange(NumPositive, true/*NonNegative*/); 13113 else 13114 return IntRange(std::max(NumPositive + 1, NumNegative), 13115 false/*NonNegative*/); 13116 } 13117 13118 if (const auto *EIT = dyn_cast<BitIntType>(T)) 13119 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 13120 13121 const BuiltinType *BT = cast<BuiltinType>(T); 13122 assert(BT->isInteger()); 13123 13124 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 13125 } 13126 13127 /// Returns the "target" range of a canonical integral type, i.e. 13128 /// the range of values expressible in the type. 13129 /// 13130 /// This matches forValueOfCanonicalType except that enums have the 13131 /// full range of their type, not the range of their enumerators. 13132 static IntRange forTargetOfCanonicalType(ASTContext &C, const Type *T) { 13133 assert(T->isCanonicalUnqualified()); 13134 13135 if (const VectorType *VT = dyn_cast<VectorType>(T)) 13136 T = VT->getElementType().getTypePtr(); 13137 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 13138 T = CT->getElementType().getTypePtr(); 13139 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 13140 T = AT->getValueType().getTypePtr(); 13141 if (const EnumType *ET = dyn_cast<EnumType>(T)) 13142 T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr(); 13143 13144 if (const auto *EIT = dyn_cast<BitIntType>(T)) 13145 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 13146 13147 const BuiltinType *BT = cast<BuiltinType>(T); 13148 assert(BT->isInteger()); 13149 13150 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 13151 } 13152 13153 /// Returns the supremum of two ranges: i.e. their conservative merge. 13154 static IntRange join(IntRange L, IntRange R) { 13155 bool Unsigned = L.NonNegative && R.NonNegative; 13156 return IntRange(std::max(L.valueBits(), R.valueBits()) + !Unsigned, 13157 L.NonNegative && R.NonNegative); 13158 } 13159 13160 /// Return the range of a bitwise-AND of the two ranges. 13161 static IntRange bit_and(IntRange L, IntRange R) { 13162 unsigned Bits = std::max(L.Width, R.Width); 13163 bool NonNegative = false; 13164 if (L.NonNegative) { 13165 Bits = std::min(Bits, L.Width); 13166 NonNegative = true; 13167 } 13168 if (R.NonNegative) { 13169 Bits = std::min(Bits, R.Width); 13170 NonNegative = true; 13171 } 13172 return IntRange(Bits, NonNegative); 13173 } 13174 13175 /// Return the range of a sum of the two ranges. 13176 static IntRange sum(IntRange L, IntRange R) { 13177 bool Unsigned = L.NonNegative && R.NonNegative; 13178 return IntRange(std::max(L.valueBits(), R.valueBits()) + 1 + !Unsigned, 13179 Unsigned); 13180 } 13181 13182 /// Return the range of a difference of the two ranges. 13183 static IntRange difference(IntRange L, IntRange R) { 13184 // We need a 1-bit-wider range if: 13185 // 1) LHS can be negative: least value can be reduced. 13186 // 2) RHS can be negative: greatest value can be increased. 13187 bool CanWiden = !L.NonNegative || !R.NonNegative; 13188 bool Unsigned = L.NonNegative && R.Width == 0; 13189 return IntRange(std::max(L.valueBits(), R.valueBits()) + CanWiden + 13190 !Unsigned, 13191 Unsigned); 13192 } 13193 13194 /// Return the range of a product of the two ranges. 13195 static IntRange product(IntRange L, IntRange R) { 13196 // If both LHS and RHS can be negative, we can form 13197 // -2^L * -2^R = 2^(L + R) 13198 // which requires L + R + 1 value bits to represent. 13199 bool CanWiden = !L.NonNegative && !R.NonNegative; 13200 bool Unsigned = L.NonNegative && R.NonNegative; 13201 return IntRange(L.valueBits() + R.valueBits() + CanWiden + !Unsigned, 13202 Unsigned); 13203 } 13204 13205 /// Return the range of a remainder operation between the two ranges. 13206 static IntRange rem(IntRange L, IntRange R) { 13207 // The result of a remainder can't be larger than the result of 13208 // either side. The sign of the result is the sign of the LHS. 13209 bool Unsigned = L.NonNegative; 13210 return IntRange(std::min(L.valueBits(), R.valueBits()) + !Unsigned, 13211 Unsigned); 13212 } 13213 }; 13214 13215 } // namespace 13216 13217 static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value, 13218 unsigned MaxWidth) { 13219 if (value.isSigned() && value.isNegative()) 13220 return IntRange(value.getSignificantBits(), false); 13221 13222 if (value.getBitWidth() > MaxWidth) 13223 value = value.trunc(MaxWidth); 13224 13225 // isNonNegative() just checks the sign bit without considering 13226 // signedness. 13227 return IntRange(value.getActiveBits(), true); 13228 } 13229 13230 static IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty, 13231 unsigned MaxWidth) { 13232 if (result.isInt()) 13233 return GetValueRange(C, result.getInt(), MaxWidth); 13234 13235 if (result.isVector()) { 13236 IntRange R = GetValueRange(C, result.getVectorElt(0), Ty, MaxWidth); 13237 for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) { 13238 IntRange El = GetValueRange(C, result.getVectorElt(i), Ty, MaxWidth); 13239 R = IntRange::join(R, El); 13240 } 13241 return R; 13242 } 13243 13244 if (result.isComplexInt()) { 13245 IntRange R = GetValueRange(C, result.getComplexIntReal(), MaxWidth); 13246 IntRange I = GetValueRange(C, result.getComplexIntImag(), MaxWidth); 13247 return IntRange::join(R, I); 13248 } 13249 13250 // This can happen with lossless casts to intptr_t of "based" lvalues. 13251 // Assume it might use arbitrary bits. 13252 // FIXME: The only reason we need to pass the type in here is to get 13253 // the sign right on this one case. It would be nice if APValue 13254 // preserved this. 13255 assert(result.isLValue() || result.isAddrLabelDiff()); 13256 return IntRange(MaxWidth, Ty->isUnsignedIntegerOrEnumerationType()); 13257 } 13258 13259 static QualType GetExprType(const Expr *E) { 13260 QualType Ty = E->getType(); 13261 if (const AtomicType *AtomicRHS = Ty->getAs<AtomicType>()) 13262 Ty = AtomicRHS->getValueType(); 13263 return Ty; 13264 } 13265 13266 /// Pseudo-evaluate the given integer expression, estimating the 13267 /// range of values it might take. 13268 /// 13269 /// \param MaxWidth The width to which the value will be truncated. 13270 /// \param Approximate If \c true, return a likely range for the result: in 13271 /// particular, assume that arithmetic on narrower types doesn't leave 13272 /// those types. If \c false, return a range including all possible 13273 /// result values. 13274 static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth, 13275 bool InConstantContext, bool Approximate) { 13276 E = E->IgnoreParens(); 13277 13278 // Try a full evaluation first. 13279 Expr::EvalResult result; 13280 if (E->EvaluateAsRValue(result, C, InConstantContext)) 13281 return GetValueRange(C, result.Val, GetExprType(E), MaxWidth); 13282 13283 // I think we only want to look through implicit casts here; if the 13284 // user has an explicit widening cast, we should treat the value as 13285 // being of the new, wider type. 13286 if (const auto *CE = dyn_cast<ImplicitCastExpr>(E)) { 13287 if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue) 13288 return GetExprRange(C, CE->getSubExpr(), MaxWidth, InConstantContext, 13289 Approximate); 13290 13291 IntRange OutputTypeRange = IntRange::forValueOfType(C, GetExprType(CE)); 13292 13293 bool isIntegerCast = CE->getCastKind() == CK_IntegralCast || 13294 CE->getCastKind() == CK_BooleanToSignedIntegral; 13295 13296 // Assume that non-integer casts can span the full range of the type. 13297 if (!isIntegerCast) 13298 return OutputTypeRange; 13299 13300 IntRange SubRange = GetExprRange(C, CE->getSubExpr(), 13301 std::min(MaxWidth, OutputTypeRange.Width), 13302 InConstantContext, Approximate); 13303 13304 // Bail out if the subexpr's range is as wide as the cast type. 13305 if (SubRange.Width >= OutputTypeRange.Width) 13306 return OutputTypeRange; 13307 13308 // Otherwise, we take the smaller width, and we're non-negative if 13309 // either the output type or the subexpr is. 13310 return IntRange(SubRange.Width, 13311 SubRange.NonNegative || OutputTypeRange.NonNegative); 13312 } 13313 13314 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 13315 // If we can fold the condition, just take that operand. 13316 bool CondResult; 13317 if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C)) 13318 return GetExprRange(C, 13319 CondResult ? CO->getTrueExpr() : CO->getFalseExpr(), 13320 MaxWidth, InConstantContext, Approximate); 13321 13322 // Otherwise, conservatively merge. 13323 // GetExprRange requires an integer expression, but a throw expression 13324 // results in a void type. 13325 Expr *E = CO->getTrueExpr(); 13326 IntRange L = E->getType()->isVoidType() 13327 ? IntRange{0, true} 13328 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 13329 E = CO->getFalseExpr(); 13330 IntRange R = E->getType()->isVoidType() 13331 ? IntRange{0, true} 13332 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 13333 return IntRange::join(L, R); 13334 } 13335 13336 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 13337 IntRange (*Combine)(IntRange, IntRange) = IntRange::join; 13338 13339 switch (BO->getOpcode()) { 13340 case BO_Cmp: 13341 llvm_unreachable("builtin <=> should have class type"); 13342 13343 // Boolean-valued operations are single-bit and positive. 13344 case BO_LAnd: 13345 case BO_LOr: 13346 case BO_LT: 13347 case BO_GT: 13348 case BO_LE: 13349 case BO_GE: 13350 case BO_EQ: 13351 case BO_NE: 13352 return IntRange::forBoolType(); 13353 13354 // The type of the assignments is the type of the LHS, so the RHS 13355 // is not necessarily the same type. 13356 case BO_MulAssign: 13357 case BO_DivAssign: 13358 case BO_RemAssign: 13359 case BO_AddAssign: 13360 case BO_SubAssign: 13361 case BO_XorAssign: 13362 case BO_OrAssign: 13363 // TODO: bitfields? 13364 return IntRange::forValueOfType(C, GetExprType(E)); 13365 13366 // Simple assignments just pass through the RHS, which will have 13367 // been coerced to the LHS type. 13368 case BO_Assign: 13369 // TODO: bitfields? 13370 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 13371 Approximate); 13372 13373 // Operations with opaque sources are black-listed. 13374 case BO_PtrMemD: 13375 case BO_PtrMemI: 13376 return IntRange::forValueOfType(C, GetExprType(E)); 13377 13378 // Bitwise-and uses the *infinum* of the two source ranges. 13379 case BO_And: 13380 case BO_AndAssign: 13381 Combine = IntRange::bit_and; 13382 break; 13383 13384 // Left shift gets black-listed based on a judgement call. 13385 case BO_Shl: 13386 // ...except that we want to treat '1 << (blah)' as logically 13387 // positive. It's an important idiom. 13388 if (IntegerLiteral *I 13389 = dyn_cast<IntegerLiteral>(BO->getLHS()->IgnoreParenCasts())) { 13390 if (I->getValue() == 1) { 13391 IntRange R = IntRange::forValueOfType(C, GetExprType(E)); 13392 return IntRange(R.Width, /*NonNegative*/ true); 13393 } 13394 } 13395 [[fallthrough]]; 13396 13397 case BO_ShlAssign: 13398 return IntRange::forValueOfType(C, GetExprType(E)); 13399 13400 // Right shift by a constant can narrow its left argument. 13401 case BO_Shr: 13402 case BO_ShrAssign: { 13403 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext, 13404 Approximate); 13405 13406 // If the shift amount is a positive constant, drop the width by 13407 // that much. 13408 if (std::optional<llvm::APSInt> shift = 13409 BO->getRHS()->getIntegerConstantExpr(C)) { 13410 if (shift->isNonNegative()) { 13411 unsigned zext = shift->getZExtValue(); 13412 if (zext >= L.Width) 13413 L.Width = (L.NonNegative ? 0 : 1); 13414 else 13415 L.Width -= zext; 13416 } 13417 } 13418 13419 return L; 13420 } 13421 13422 // Comma acts as its right operand. 13423 case BO_Comma: 13424 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 13425 Approximate); 13426 13427 case BO_Add: 13428 if (!Approximate) 13429 Combine = IntRange::sum; 13430 break; 13431 13432 case BO_Sub: 13433 if (BO->getLHS()->getType()->isPointerType()) 13434 return IntRange::forValueOfType(C, GetExprType(E)); 13435 if (!Approximate) 13436 Combine = IntRange::difference; 13437 break; 13438 13439 case BO_Mul: 13440 if (!Approximate) 13441 Combine = IntRange::product; 13442 break; 13443 13444 // The width of a division result is mostly determined by the size 13445 // of the LHS. 13446 case BO_Div: { 13447 // Don't 'pre-truncate' the operands. 13448 unsigned opWidth = C.getIntWidth(GetExprType(E)); 13449 IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, 13450 Approximate); 13451 13452 // If the divisor is constant, use that. 13453 if (std::optional<llvm::APSInt> divisor = 13454 BO->getRHS()->getIntegerConstantExpr(C)) { 13455 unsigned log2 = divisor->logBase2(); // floor(log_2(divisor)) 13456 if (log2 >= L.Width) 13457 L.Width = (L.NonNegative ? 0 : 1); 13458 else 13459 L.Width = std::min(L.Width - log2, MaxWidth); 13460 return L; 13461 } 13462 13463 // Otherwise, just use the LHS's width. 13464 // FIXME: This is wrong if the LHS could be its minimal value and the RHS 13465 // could be -1. 13466 IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, 13467 Approximate); 13468 return IntRange(L.Width, L.NonNegative && R.NonNegative); 13469 } 13470 13471 case BO_Rem: 13472 Combine = IntRange::rem; 13473 break; 13474 13475 // The default behavior is okay for these. 13476 case BO_Xor: 13477 case BO_Or: 13478 break; 13479 } 13480 13481 // Combine the two ranges, but limit the result to the type in which we 13482 // performed the computation. 13483 QualType T = GetExprType(E); 13484 unsigned opWidth = C.getIntWidth(T); 13485 IntRange L = 13486 GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, Approximate); 13487 IntRange R = 13488 GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, Approximate); 13489 IntRange C = Combine(L, R); 13490 C.NonNegative |= T->isUnsignedIntegerOrEnumerationType(); 13491 C.Width = std::min(C.Width, MaxWidth); 13492 return C; 13493 } 13494 13495 if (const auto *UO = dyn_cast<UnaryOperator>(E)) { 13496 switch (UO->getOpcode()) { 13497 // Boolean-valued operations are white-listed. 13498 case UO_LNot: 13499 return IntRange::forBoolType(); 13500 13501 // Operations with opaque sources are black-listed. 13502 case UO_Deref: 13503 case UO_AddrOf: // should be impossible 13504 return IntRange::forValueOfType(C, GetExprType(E)); 13505 13506 default: 13507 return GetExprRange(C, UO->getSubExpr(), MaxWidth, InConstantContext, 13508 Approximate); 13509 } 13510 } 13511 13512 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 13513 return GetExprRange(C, OVE->getSourceExpr(), MaxWidth, InConstantContext, 13514 Approximate); 13515 13516 if (const auto *BitField = E->getSourceBitField()) 13517 return IntRange(BitField->getBitWidthValue(C), 13518 BitField->getType()->isUnsignedIntegerOrEnumerationType()); 13519 13520 return IntRange::forValueOfType(C, GetExprType(E)); 13521 } 13522 13523 static IntRange GetExprRange(ASTContext &C, const Expr *E, 13524 bool InConstantContext, bool Approximate) { 13525 return GetExprRange(C, E, C.getIntWidth(GetExprType(E)), InConstantContext, 13526 Approximate); 13527 } 13528 13529 /// Checks whether the given value, which currently has the given 13530 /// source semantics, has the same value when coerced through the 13531 /// target semantics. 13532 static bool IsSameFloatAfterCast(const llvm::APFloat &value, 13533 const llvm::fltSemantics &Src, 13534 const llvm::fltSemantics &Tgt) { 13535 llvm::APFloat truncated = value; 13536 13537 bool ignored; 13538 truncated.convert(Src, llvm::APFloat::rmNearestTiesToEven, &ignored); 13539 truncated.convert(Tgt, llvm::APFloat::rmNearestTiesToEven, &ignored); 13540 13541 return truncated.bitwiseIsEqual(value); 13542 } 13543 13544 /// Checks whether the given value, which currently has the given 13545 /// source semantics, has the same value when coerced through the 13546 /// target semantics. 13547 /// 13548 /// The value might be a vector of floats (or a complex number). 13549 static bool IsSameFloatAfterCast(const APValue &value, 13550 const llvm::fltSemantics &Src, 13551 const llvm::fltSemantics &Tgt) { 13552 if (value.isFloat()) 13553 return IsSameFloatAfterCast(value.getFloat(), Src, Tgt); 13554 13555 if (value.isVector()) { 13556 for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i) 13557 if (!IsSameFloatAfterCast(value.getVectorElt(i), Src, Tgt)) 13558 return false; 13559 return true; 13560 } 13561 13562 assert(value.isComplexFloat()); 13563 return (IsSameFloatAfterCast(value.getComplexFloatReal(), Src, Tgt) && 13564 IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt)); 13565 } 13566 13567 static void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC, 13568 bool IsListInit = false); 13569 13570 static bool IsEnumConstOrFromMacro(Sema &S, Expr *E) { 13571 // Suppress cases where we are comparing against an enum constant. 13572 if (const DeclRefExpr *DR = 13573 dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) 13574 if (isa<EnumConstantDecl>(DR->getDecl())) 13575 return true; 13576 13577 // Suppress cases where the value is expanded from a macro, unless that macro 13578 // is how a language represents a boolean literal. This is the case in both C 13579 // and Objective-C. 13580 SourceLocation BeginLoc = E->getBeginLoc(); 13581 if (BeginLoc.isMacroID()) { 13582 StringRef MacroName = Lexer::getImmediateMacroName( 13583 BeginLoc, S.getSourceManager(), S.getLangOpts()); 13584 return MacroName != "YES" && MacroName != "NO" && 13585 MacroName != "true" && MacroName != "false"; 13586 } 13587 13588 return false; 13589 } 13590 13591 static bool isKnownToHaveUnsignedValue(Expr *E) { 13592 return E->getType()->isIntegerType() && 13593 (!E->getType()->isSignedIntegerType() || 13594 !E->IgnoreParenImpCasts()->getType()->isSignedIntegerType()); 13595 } 13596 13597 namespace { 13598 /// The promoted range of values of a type. In general this has the 13599 /// following structure: 13600 /// 13601 /// |-----------| . . . |-----------| 13602 /// ^ ^ ^ ^ 13603 /// Min HoleMin HoleMax Max 13604 /// 13605 /// ... where there is only a hole if a signed type is promoted to unsigned 13606 /// (in which case Min and Max are the smallest and largest representable 13607 /// values). 13608 struct PromotedRange { 13609 // Min, or HoleMax if there is a hole. 13610 llvm::APSInt PromotedMin; 13611 // Max, or HoleMin if there is a hole. 13612 llvm::APSInt PromotedMax; 13613 13614 PromotedRange(IntRange R, unsigned BitWidth, bool Unsigned) { 13615 if (R.Width == 0) 13616 PromotedMin = PromotedMax = llvm::APSInt(BitWidth, Unsigned); 13617 else if (R.Width >= BitWidth && !Unsigned) { 13618 // Promotion made the type *narrower*. This happens when promoting 13619 // a < 32-bit unsigned / <= 32-bit signed bit-field to 'signed int'. 13620 // Treat all values of 'signed int' as being in range for now. 13621 PromotedMin = llvm::APSInt::getMinValue(BitWidth, Unsigned); 13622 PromotedMax = llvm::APSInt::getMaxValue(BitWidth, Unsigned); 13623 } else { 13624 PromotedMin = llvm::APSInt::getMinValue(R.Width, R.NonNegative) 13625 .extOrTrunc(BitWidth); 13626 PromotedMin.setIsUnsigned(Unsigned); 13627 13628 PromotedMax = llvm::APSInt::getMaxValue(R.Width, R.NonNegative) 13629 .extOrTrunc(BitWidth); 13630 PromotedMax.setIsUnsigned(Unsigned); 13631 } 13632 } 13633 13634 // Determine whether this range is contiguous (has no hole). 13635 bool isContiguous() const { return PromotedMin <= PromotedMax; } 13636 13637 // Where a constant value is within the range. 13638 enum ComparisonResult { 13639 LT = 0x1, 13640 LE = 0x2, 13641 GT = 0x4, 13642 GE = 0x8, 13643 EQ = 0x10, 13644 NE = 0x20, 13645 InRangeFlag = 0x40, 13646 13647 Less = LE | LT | NE, 13648 Min = LE | InRangeFlag, 13649 InRange = InRangeFlag, 13650 Max = GE | InRangeFlag, 13651 Greater = GE | GT | NE, 13652 13653 OnlyValue = LE | GE | EQ | InRangeFlag, 13654 InHole = NE 13655 }; 13656 13657 ComparisonResult compare(const llvm::APSInt &Value) const { 13658 assert(Value.getBitWidth() == PromotedMin.getBitWidth() && 13659 Value.isUnsigned() == PromotedMin.isUnsigned()); 13660 if (!isContiguous()) { 13661 assert(Value.isUnsigned() && "discontiguous range for signed compare"); 13662 if (Value.isMinValue()) return Min; 13663 if (Value.isMaxValue()) return Max; 13664 if (Value >= PromotedMin) return InRange; 13665 if (Value <= PromotedMax) return InRange; 13666 return InHole; 13667 } 13668 13669 switch (llvm::APSInt::compareValues(Value, PromotedMin)) { 13670 case -1: return Less; 13671 case 0: return PromotedMin == PromotedMax ? OnlyValue : Min; 13672 case 1: 13673 switch (llvm::APSInt::compareValues(Value, PromotedMax)) { 13674 case -1: return InRange; 13675 case 0: return Max; 13676 case 1: return Greater; 13677 } 13678 } 13679 13680 llvm_unreachable("impossible compare result"); 13681 } 13682 13683 static std::optional<StringRef> 13684 constantValue(BinaryOperatorKind Op, ComparisonResult R, bool ConstantOnRHS) { 13685 if (Op == BO_Cmp) { 13686 ComparisonResult LTFlag = LT, GTFlag = GT; 13687 if (ConstantOnRHS) std::swap(LTFlag, GTFlag); 13688 13689 if (R & EQ) return StringRef("'std::strong_ordering::equal'"); 13690 if (R & LTFlag) return StringRef("'std::strong_ordering::less'"); 13691 if (R & GTFlag) return StringRef("'std::strong_ordering::greater'"); 13692 return std::nullopt; 13693 } 13694 13695 ComparisonResult TrueFlag, FalseFlag; 13696 if (Op == BO_EQ) { 13697 TrueFlag = EQ; 13698 FalseFlag = NE; 13699 } else if (Op == BO_NE) { 13700 TrueFlag = NE; 13701 FalseFlag = EQ; 13702 } else { 13703 if ((Op == BO_LT || Op == BO_GE) ^ ConstantOnRHS) { 13704 TrueFlag = LT; 13705 FalseFlag = GE; 13706 } else { 13707 TrueFlag = GT; 13708 FalseFlag = LE; 13709 } 13710 if (Op == BO_GE || Op == BO_LE) 13711 std::swap(TrueFlag, FalseFlag); 13712 } 13713 if (R & TrueFlag) 13714 return StringRef("true"); 13715 if (R & FalseFlag) 13716 return StringRef("false"); 13717 return std::nullopt; 13718 } 13719 }; 13720 } 13721 13722 static bool HasEnumType(Expr *E) { 13723 // Strip off implicit integral promotions. 13724 while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 13725 if (ICE->getCastKind() != CK_IntegralCast && 13726 ICE->getCastKind() != CK_NoOp) 13727 break; 13728 E = ICE->getSubExpr(); 13729 } 13730 13731 return E->getType()->isEnumeralType(); 13732 } 13733 13734 static int classifyConstantValue(Expr *Constant) { 13735 // The values of this enumeration are used in the diagnostics 13736 // diag::warn_out_of_range_compare and diag::warn_tautological_bool_compare. 13737 enum ConstantValueKind { 13738 Miscellaneous = 0, 13739 LiteralTrue, 13740 LiteralFalse 13741 }; 13742 if (auto *BL = dyn_cast<CXXBoolLiteralExpr>(Constant)) 13743 return BL->getValue() ? ConstantValueKind::LiteralTrue 13744 : ConstantValueKind::LiteralFalse; 13745 return ConstantValueKind::Miscellaneous; 13746 } 13747 13748 static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E, 13749 Expr *Constant, Expr *Other, 13750 const llvm::APSInt &Value, 13751 bool RhsConstant) { 13752 if (S.inTemplateInstantiation()) 13753 return false; 13754 13755 Expr *OriginalOther = Other; 13756 13757 Constant = Constant->IgnoreParenImpCasts(); 13758 Other = Other->IgnoreParenImpCasts(); 13759 13760 // Suppress warnings on tautological comparisons between values of the same 13761 // enumeration type. There are only two ways we could warn on this: 13762 // - If the constant is outside the range of representable values of 13763 // the enumeration. In such a case, we should warn about the cast 13764 // to enumeration type, not about the comparison. 13765 // - If the constant is the maximum / minimum in-range value. For an 13766 // enumeratin type, such comparisons can be meaningful and useful. 13767 if (Constant->getType()->isEnumeralType() && 13768 S.Context.hasSameUnqualifiedType(Constant->getType(), Other->getType())) 13769 return false; 13770 13771 IntRange OtherValueRange = GetExprRange( 13772 S.Context, Other, S.isConstantEvaluated(), /*Approximate*/ false); 13773 13774 QualType OtherT = Other->getType(); 13775 if (const auto *AT = OtherT->getAs<AtomicType>()) 13776 OtherT = AT->getValueType(); 13777 IntRange OtherTypeRange = IntRange::forValueOfType(S.Context, OtherT); 13778 13779 // Special case for ObjC BOOL on targets where its a typedef for a signed char 13780 // (Namely, macOS). FIXME: IntRange::forValueOfType should do this. 13781 bool IsObjCSignedCharBool = S.getLangOpts().ObjC && 13782 S.NSAPIObj->isObjCBOOLType(OtherT) && 13783 OtherT->isSpecificBuiltinType(BuiltinType::SChar); 13784 13785 // Whether we're treating Other as being a bool because of the form of 13786 // expression despite it having another type (typically 'int' in C). 13787 bool OtherIsBooleanDespiteType = 13788 !OtherT->isBooleanType() && Other->isKnownToHaveBooleanValue(); 13789 if (OtherIsBooleanDespiteType || IsObjCSignedCharBool) 13790 OtherTypeRange = OtherValueRange = IntRange::forBoolType(); 13791 13792 // Check if all values in the range of possible values of this expression 13793 // lead to the same comparison outcome. 13794 PromotedRange OtherPromotedValueRange(OtherValueRange, Value.getBitWidth(), 13795 Value.isUnsigned()); 13796 auto Cmp = OtherPromotedValueRange.compare(Value); 13797 auto Result = PromotedRange::constantValue(E->getOpcode(), Cmp, RhsConstant); 13798 if (!Result) 13799 return false; 13800 13801 // Also consider the range determined by the type alone. This allows us to 13802 // classify the warning under the proper diagnostic group. 13803 bool TautologicalTypeCompare = false; 13804 { 13805 PromotedRange OtherPromotedTypeRange(OtherTypeRange, Value.getBitWidth(), 13806 Value.isUnsigned()); 13807 auto TypeCmp = OtherPromotedTypeRange.compare(Value); 13808 if (auto TypeResult = PromotedRange::constantValue(E->getOpcode(), TypeCmp, 13809 RhsConstant)) { 13810 TautologicalTypeCompare = true; 13811 Cmp = TypeCmp; 13812 Result = TypeResult; 13813 } 13814 } 13815 13816 // Don't warn if the non-constant operand actually always evaluates to the 13817 // same value. 13818 if (!TautologicalTypeCompare && OtherValueRange.Width == 0) 13819 return false; 13820 13821 // Suppress the diagnostic for an in-range comparison if the constant comes 13822 // from a macro or enumerator. We don't want to diagnose 13823 // 13824 // some_long_value <= INT_MAX 13825 // 13826 // when sizeof(int) == sizeof(long). 13827 bool InRange = Cmp & PromotedRange::InRangeFlag; 13828 if (InRange && IsEnumConstOrFromMacro(S, Constant)) 13829 return false; 13830 13831 // A comparison of an unsigned bit-field against 0 is really a type problem, 13832 // even though at the type level the bit-field might promote to 'signed int'. 13833 if (Other->refersToBitField() && InRange && Value == 0 && 13834 Other->getType()->isUnsignedIntegerOrEnumerationType()) 13835 TautologicalTypeCompare = true; 13836 13837 // If this is a comparison to an enum constant, include that 13838 // constant in the diagnostic. 13839 const EnumConstantDecl *ED = nullptr; 13840 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Constant)) 13841 ED = dyn_cast<EnumConstantDecl>(DR->getDecl()); 13842 13843 // Should be enough for uint128 (39 decimal digits) 13844 SmallString<64> PrettySourceValue; 13845 llvm::raw_svector_ostream OS(PrettySourceValue); 13846 if (ED) { 13847 OS << '\'' << *ED << "' (" << Value << ")"; 13848 } else if (auto *BL = dyn_cast<ObjCBoolLiteralExpr>( 13849 Constant->IgnoreParenImpCasts())) { 13850 OS << (BL->getValue() ? "YES" : "NO"); 13851 } else { 13852 OS << Value; 13853 } 13854 13855 if (!TautologicalTypeCompare) { 13856 S.Diag(E->getOperatorLoc(), diag::warn_tautological_compare_value_range) 13857 << RhsConstant << OtherValueRange.Width << OtherValueRange.NonNegative 13858 << E->getOpcodeStr() << OS.str() << *Result 13859 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 13860 return true; 13861 } 13862 13863 if (IsObjCSignedCharBool) { 13864 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 13865 S.PDiag(diag::warn_tautological_compare_objc_bool) 13866 << OS.str() << *Result); 13867 return true; 13868 } 13869 13870 // FIXME: We use a somewhat different formatting for the in-range cases and 13871 // cases involving boolean values for historical reasons. We should pick a 13872 // consistent way of presenting these diagnostics. 13873 if (!InRange || Other->isKnownToHaveBooleanValue()) { 13874 13875 S.DiagRuntimeBehavior( 13876 E->getOperatorLoc(), E, 13877 S.PDiag(!InRange ? diag::warn_out_of_range_compare 13878 : diag::warn_tautological_bool_compare) 13879 << OS.str() << classifyConstantValue(Constant) << OtherT 13880 << OtherIsBooleanDespiteType << *Result 13881 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange()); 13882 } else { 13883 bool IsCharTy = OtherT.withoutLocalFastQualifiers() == S.Context.CharTy; 13884 unsigned Diag = 13885 (isKnownToHaveUnsignedValue(OriginalOther) && Value == 0) 13886 ? (HasEnumType(OriginalOther) 13887 ? diag::warn_unsigned_enum_always_true_comparison 13888 : IsCharTy ? diag::warn_unsigned_char_always_true_comparison 13889 : diag::warn_unsigned_always_true_comparison) 13890 : diag::warn_tautological_constant_compare; 13891 13892 S.Diag(E->getOperatorLoc(), Diag) 13893 << RhsConstant << OtherT << E->getOpcodeStr() << OS.str() << *Result 13894 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 13895 } 13896 13897 return true; 13898 } 13899 13900 /// Analyze the operands of the given comparison. Implements the 13901 /// fallback case from AnalyzeComparison. 13902 static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) { 13903 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 13904 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 13905 } 13906 13907 /// Implements -Wsign-compare. 13908 /// 13909 /// \param E the binary operator to check for warnings 13910 static void AnalyzeComparison(Sema &S, BinaryOperator *E) { 13911 // The type the comparison is being performed in. 13912 QualType T = E->getLHS()->getType(); 13913 13914 // Only analyze comparison operators where both sides have been converted to 13915 // the same type. 13916 if (!S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType())) 13917 return AnalyzeImpConvsInComparison(S, E); 13918 13919 // Don't analyze value-dependent comparisons directly. 13920 if (E->isValueDependent()) 13921 return AnalyzeImpConvsInComparison(S, E); 13922 13923 Expr *LHS = E->getLHS(); 13924 Expr *RHS = E->getRHS(); 13925 13926 if (T->isIntegralType(S.Context)) { 13927 std::optional<llvm::APSInt> RHSValue = 13928 RHS->getIntegerConstantExpr(S.Context); 13929 std::optional<llvm::APSInt> LHSValue = 13930 LHS->getIntegerConstantExpr(S.Context); 13931 13932 // We don't care about expressions whose result is a constant. 13933 if (RHSValue && LHSValue) 13934 return AnalyzeImpConvsInComparison(S, E); 13935 13936 // We only care about expressions where just one side is literal 13937 if ((bool)RHSValue ^ (bool)LHSValue) { 13938 // Is the constant on the RHS or LHS? 13939 const bool RhsConstant = (bool)RHSValue; 13940 Expr *Const = RhsConstant ? RHS : LHS; 13941 Expr *Other = RhsConstant ? LHS : RHS; 13942 const llvm::APSInt &Value = RhsConstant ? *RHSValue : *LHSValue; 13943 13944 // Check whether an integer constant comparison results in a value 13945 // of 'true' or 'false'. 13946 if (CheckTautologicalComparison(S, E, Const, Other, Value, RhsConstant)) 13947 return AnalyzeImpConvsInComparison(S, E); 13948 } 13949 } 13950 13951 if (!T->hasUnsignedIntegerRepresentation()) { 13952 // We don't do anything special if this isn't an unsigned integral 13953 // comparison: we're only interested in integral comparisons, and 13954 // signed comparisons only happen in cases we don't care to warn about. 13955 return AnalyzeImpConvsInComparison(S, E); 13956 } 13957 13958 LHS = LHS->IgnoreParenImpCasts(); 13959 RHS = RHS->IgnoreParenImpCasts(); 13960 13961 if (!S.getLangOpts().CPlusPlus) { 13962 // Avoid warning about comparison of integers with different signs when 13963 // RHS/LHS has a `typeof(E)` type whose sign is different from the sign of 13964 // the type of `E`. 13965 if (const auto *TET = dyn_cast<TypeOfExprType>(LHS->getType())) 13966 LHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 13967 if (const auto *TET = dyn_cast<TypeOfExprType>(RHS->getType())) 13968 RHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 13969 } 13970 13971 // Check to see if one of the (unmodified) operands is of different 13972 // signedness. 13973 Expr *signedOperand, *unsignedOperand; 13974 if (LHS->getType()->hasSignedIntegerRepresentation()) { 13975 assert(!RHS->getType()->hasSignedIntegerRepresentation() && 13976 "unsigned comparison between two signed integer expressions?"); 13977 signedOperand = LHS; 13978 unsignedOperand = RHS; 13979 } else if (RHS->getType()->hasSignedIntegerRepresentation()) { 13980 signedOperand = RHS; 13981 unsignedOperand = LHS; 13982 } else { 13983 return AnalyzeImpConvsInComparison(S, E); 13984 } 13985 13986 // Otherwise, calculate the effective range of the signed operand. 13987 IntRange signedRange = GetExprRange( 13988 S.Context, signedOperand, S.isConstantEvaluated(), /*Approximate*/ true); 13989 13990 // Go ahead and analyze implicit conversions in the operands. Note 13991 // that we skip the implicit conversions on both sides. 13992 AnalyzeImplicitConversions(S, LHS, E->getOperatorLoc()); 13993 AnalyzeImplicitConversions(S, RHS, E->getOperatorLoc()); 13994 13995 // If the signed range is non-negative, -Wsign-compare won't fire. 13996 if (signedRange.NonNegative) 13997 return; 13998 13999 // For (in)equality comparisons, if the unsigned operand is a 14000 // constant which cannot collide with a overflowed signed operand, 14001 // then reinterpreting the signed operand as unsigned will not 14002 // change the result of the comparison. 14003 if (E->isEqualityOp()) { 14004 unsigned comparisonWidth = S.Context.getIntWidth(T); 14005 IntRange unsignedRange = 14006 GetExprRange(S.Context, unsignedOperand, S.isConstantEvaluated(), 14007 /*Approximate*/ true); 14008 14009 // We should never be unable to prove that the unsigned operand is 14010 // non-negative. 14011 assert(unsignedRange.NonNegative && "unsigned range includes negative?"); 14012 14013 if (unsignedRange.Width < comparisonWidth) 14014 return; 14015 } 14016 14017 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 14018 S.PDiag(diag::warn_mixed_sign_comparison) 14019 << LHS->getType() << RHS->getType() 14020 << LHS->getSourceRange() << RHS->getSourceRange()); 14021 } 14022 14023 /// Analyzes an attempt to assign the given value to a bitfield. 14024 /// 14025 /// Returns true if there was something fishy about the attempt. 14026 static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init, 14027 SourceLocation InitLoc) { 14028 assert(Bitfield->isBitField()); 14029 if (Bitfield->isInvalidDecl()) 14030 return false; 14031 14032 // White-list bool bitfields. 14033 QualType BitfieldType = Bitfield->getType(); 14034 if (BitfieldType->isBooleanType()) 14035 return false; 14036 14037 if (BitfieldType->isEnumeralType()) { 14038 EnumDecl *BitfieldEnumDecl = BitfieldType->castAs<EnumType>()->getDecl(); 14039 // If the underlying enum type was not explicitly specified as an unsigned 14040 // type and the enum contain only positive values, MSVC++ will cause an 14041 // inconsistency by storing this as a signed type. 14042 if (S.getLangOpts().CPlusPlus11 && 14043 !BitfieldEnumDecl->getIntegerTypeSourceInfo() && 14044 BitfieldEnumDecl->getNumPositiveBits() > 0 && 14045 BitfieldEnumDecl->getNumNegativeBits() == 0) { 14046 S.Diag(InitLoc, diag::warn_no_underlying_type_specified_for_enum_bitfield) 14047 << BitfieldEnumDecl; 14048 } 14049 } 14050 14051 // Ignore value- or type-dependent expressions. 14052 if (Bitfield->getBitWidth()->isValueDependent() || 14053 Bitfield->getBitWidth()->isTypeDependent() || 14054 Init->isValueDependent() || 14055 Init->isTypeDependent()) 14056 return false; 14057 14058 Expr *OriginalInit = Init->IgnoreParenImpCasts(); 14059 unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context); 14060 14061 Expr::EvalResult Result; 14062 if (!OriginalInit->EvaluateAsInt(Result, S.Context, 14063 Expr::SE_AllowSideEffects)) { 14064 // The RHS is not constant. If the RHS has an enum type, make sure the 14065 // bitfield is wide enough to hold all the values of the enum without 14066 // truncation. 14067 if (const auto *EnumTy = OriginalInit->getType()->getAs<EnumType>()) { 14068 EnumDecl *ED = EnumTy->getDecl(); 14069 bool SignedBitfield = BitfieldType->isSignedIntegerType(); 14070 14071 // Enum types are implicitly signed on Windows, so check if there are any 14072 // negative enumerators to see if the enum was intended to be signed or 14073 // not. 14074 bool SignedEnum = ED->getNumNegativeBits() > 0; 14075 14076 // Check for surprising sign changes when assigning enum values to a 14077 // bitfield of different signedness. If the bitfield is signed and we 14078 // have exactly the right number of bits to store this unsigned enum, 14079 // suggest changing the enum to an unsigned type. This typically happens 14080 // on Windows where unfixed enums always use an underlying type of 'int'. 14081 unsigned DiagID = 0; 14082 if (SignedEnum && !SignedBitfield) { 14083 DiagID = diag::warn_unsigned_bitfield_assigned_signed_enum; 14084 } else if (SignedBitfield && !SignedEnum && 14085 ED->getNumPositiveBits() == FieldWidth) { 14086 DiagID = diag::warn_signed_bitfield_enum_conversion; 14087 } 14088 14089 if (DiagID) { 14090 S.Diag(InitLoc, DiagID) << Bitfield << ED; 14091 TypeSourceInfo *TSI = Bitfield->getTypeSourceInfo(); 14092 SourceRange TypeRange = 14093 TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange(); 14094 S.Diag(Bitfield->getTypeSpecStartLoc(), diag::note_change_bitfield_sign) 14095 << SignedEnum << TypeRange; 14096 } 14097 14098 // Compute the required bitwidth. If the enum has negative values, we need 14099 // one more bit than the normal number of positive bits to represent the 14100 // sign bit. 14101 unsigned BitsNeeded = SignedEnum ? std::max(ED->getNumPositiveBits() + 1, 14102 ED->getNumNegativeBits()) 14103 : ED->getNumPositiveBits(); 14104 14105 // Check the bitwidth. 14106 if (BitsNeeded > FieldWidth) { 14107 Expr *WidthExpr = Bitfield->getBitWidth(); 14108 S.Diag(InitLoc, diag::warn_bitfield_too_small_for_enum) 14109 << Bitfield << ED; 14110 S.Diag(WidthExpr->getExprLoc(), diag::note_widen_bitfield) 14111 << BitsNeeded << ED << WidthExpr->getSourceRange(); 14112 } 14113 } 14114 14115 return false; 14116 } 14117 14118 llvm::APSInt Value = Result.Val.getInt(); 14119 14120 unsigned OriginalWidth = Value.getBitWidth(); 14121 14122 // In C, the macro 'true' from stdbool.h will evaluate to '1'; To reduce 14123 // false positives where the user is demonstrating they intend to use the 14124 // bit-field as a Boolean, check to see if the value is 1 and we're assigning 14125 // to a one-bit bit-field to see if the value came from a macro named 'true'. 14126 bool OneAssignedToOneBitBitfield = FieldWidth == 1 && Value == 1; 14127 if (OneAssignedToOneBitBitfield && !S.LangOpts.CPlusPlus) { 14128 SourceLocation MaybeMacroLoc = OriginalInit->getBeginLoc(); 14129 if (S.SourceMgr.isInSystemMacro(MaybeMacroLoc) && 14130 S.findMacroSpelling(MaybeMacroLoc, "true")) 14131 return false; 14132 } 14133 14134 if (!Value.isSigned() || Value.isNegative()) 14135 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(OriginalInit)) 14136 if (UO->getOpcode() == UO_Minus || UO->getOpcode() == UO_Not) 14137 OriginalWidth = Value.getSignificantBits(); 14138 14139 if (OriginalWidth <= FieldWidth) 14140 return false; 14141 14142 // Compute the value which the bitfield will contain. 14143 llvm::APSInt TruncatedValue = Value.trunc(FieldWidth); 14144 TruncatedValue.setIsSigned(BitfieldType->isSignedIntegerType()); 14145 14146 // Check whether the stored value is equal to the original value. 14147 TruncatedValue = TruncatedValue.extend(OriginalWidth); 14148 if (llvm::APSInt::isSameValue(Value, TruncatedValue)) 14149 return false; 14150 14151 std::string PrettyValue = toString(Value, 10); 14152 std::string PrettyTrunc = toString(TruncatedValue, 10); 14153 14154 S.Diag(InitLoc, OneAssignedToOneBitBitfield 14155 ? diag::warn_impcast_single_bit_bitield_precision_constant 14156 : diag::warn_impcast_bitfield_precision_constant) 14157 << PrettyValue << PrettyTrunc << OriginalInit->getType() 14158 << Init->getSourceRange(); 14159 14160 return true; 14161 } 14162 14163 /// Analyze the given simple or compound assignment for warning-worthy 14164 /// operations. 14165 static void AnalyzeAssignment(Sema &S, BinaryOperator *E) { 14166 // Just recurse on the LHS. 14167 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 14168 14169 // We want to recurse on the RHS as normal unless we're assigning to 14170 // a bitfield. 14171 if (FieldDecl *Bitfield = E->getLHS()->getSourceBitField()) { 14172 if (AnalyzeBitFieldAssignment(S, Bitfield, E->getRHS(), 14173 E->getOperatorLoc())) { 14174 // Recurse, ignoring any implicit conversions on the RHS. 14175 return AnalyzeImplicitConversions(S, E->getRHS()->IgnoreParenImpCasts(), 14176 E->getOperatorLoc()); 14177 } 14178 } 14179 14180 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 14181 14182 // Diagnose implicitly sequentially-consistent atomic assignment. 14183 if (E->getLHS()->getType()->isAtomicType()) 14184 S.Diag(E->getRHS()->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 14185 } 14186 14187 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 14188 static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T, 14189 SourceLocation CContext, unsigned diag, 14190 bool pruneControlFlow = false) { 14191 if (pruneControlFlow) { 14192 S.DiagRuntimeBehavior(E->getExprLoc(), E, 14193 S.PDiag(diag) 14194 << SourceType << T << E->getSourceRange() 14195 << SourceRange(CContext)); 14196 return; 14197 } 14198 S.Diag(E->getExprLoc(), diag) 14199 << SourceType << T << E->getSourceRange() << SourceRange(CContext); 14200 } 14201 14202 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 14203 static void DiagnoseImpCast(Sema &S, Expr *E, QualType T, 14204 SourceLocation CContext, 14205 unsigned diag, bool pruneControlFlow = false) { 14206 DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow); 14207 } 14208 14209 static bool isObjCSignedCharBool(Sema &S, QualType Ty) { 14210 return Ty->isSpecificBuiltinType(BuiltinType::SChar) && 14211 S.getLangOpts().ObjC && S.NSAPIObj->isObjCBOOLType(Ty); 14212 } 14213 14214 static void adornObjCBoolConversionDiagWithTernaryFixit( 14215 Sema &S, Expr *SourceExpr, const Sema::SemaDiagnosticBuilder &Builder) { 14216 Expr *Ignored = SourceExpr->IgnoreImplicit(); 14217 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(Ignored)) 14218 Ignored = OVE->getSourceExpr(); 14219 bool NeedsParens = isa<AbstractConditionalOperator>(Ignored) || 14220 isa<BinaryOperator>(Ignored) || 14221 isa<CXXOperatorCallExpr>(Ignored); 14222 SourceLocation EndLoc = S.getLocForEndOfToken(SourceExpr->getEndLoc()); 14223 if (NeedsParens) 14224 Builder << FixItHint::CreateInsertion(SourceExpr->getBeginLoc(), "(") 14225 << FixItHint::CreateInsertion(EndLoc, ")"); 14226 Builder << FixItHint::CreateInsertion(EndLoc, " ? YES : NO"); 14227 } 14228 14229 /// Diagnose an implicit cast from a floating point value to an integer value. 14230 static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T, 14231 SourceLocation CContext) { 14232 const bool IsBool = T->isSpecificBuiltinType(BuiltinType::Bool); 14233 const bool PruneWarnings = S.inTemplateInstantiation(); 14234 14235 Expr *InnerE = E->IgnoreParenImpCasts(); 14236 // We also want to warn on, e.g., "int i = -1.234" 14237 if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(InnerE)) 14238 if (UOp->getOpcode() == UO_Minus || UOp->getOpcode() == UO_Plus) 14239 InnerE = UOp->getSubExpr()->IgnoreParenImpCasts(); 14240 14241 const bool IsLiteral = 14242 isa<FloatingLiteral>(E) || isa<FloatingLiteral>(InnerE); 14243 14244 llvm::APFloat Value(0.0); 14245 bool IsConstant = 14246 E->EvaluateAsFloat(Value, S.Context, Expr::SE_AllowSideEffects); 14247 if (!IsConstant) { 14248 if (isObjCSignedCharBool(S, T)) { 14249 return adornObjCBoolConversionDiagWithTernaryFixit( 14250 S, E, 14251 S.Diag(CContext, diag::warn_impcast_float_to_objc_signed_char_bool) 14252 << E->getType()); 14253 } 14254 14255 return DiagnoseImpCast(S, E, T, CContext, 14256 diag::warn_impcast_float_integer, PruneWarnings); 14257 } 14258 14259 bool isExact = false; 14260 14261 llvm::APSInt IntegerValue(S.Context.getIntWidth(T), 14262 T->hasUnsignedIntegerRepresentation()); 14263 llvm::APFloat::opStatus Result = Value.convertToInteger( 14264 IntegerValue, llvm::APFloat::rmTowardZero, &isExact); 14265 14266 // FIXME: Force the precision of the source value down so we don't print 14267 // digits which are usually useless (we don't really care here if we 14268 // truncate a digit by accident in edge cases). Ideally, APFloat::toString 14269 // would automatically print the shortest representation, but it's a bit 14270 // tricky to implement. 14271 SmallString<16> PrettySourceValue; 14272 unsigned precision = llvm::APFloat::semanticsPrecision(Value.getSemantics()); 14273 precision = (precision * 59 + 195) / 196; 14274 Value.toString(PrettySourceValue, precision); 14275 14276 if (isObjCSignedCharBool(S, T) && IntegerValue != 0 && IntegerValue != 1) { 14277 return adornObjCBoolConversionDiagWithTernaryFixit( 14278 S, E, 14279 S.Diag(CContext, diag::warn_impcast_constant_value_to_objc_bool) 14280 << PrettySourceValue); 14281 } 14282 14283 if (Result == llvm::APFloat::opOK && isExact) { 14284 if (IsLiteral) return; 14285 return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer, 14286 PruneWarnings); 14287 } 14288 14289 // Conversion of a floating-point value to a non-bool integer where the 14290 // integral part cannot be represented by the integer type is undefined. 14291 if (!IsBool && Result == llvm::APFloat::opInvalidOp) 14292 return DiagnoseImpCast( 14293 S, E, T, CContext, 14294 IsLiteral ? diag::warn_impcast_literal_float_to_integer_out_of_range 14295 : diag::warn_impcast_float_to_integer_out_of_range, 14296 PruneWarnings); 14297 14298 unsigned DiagID = 0; 14299 if (IsLiteral) { 14300 // Warn on floating point literal to integer. 14301 DiagID = diag::warn_impcast_literal_float_to_integer; 14302 } else if (IntegerValue == 0) { 14303 if (Value.isZero()) { // Skip -0.0 to 0 conversion. 14304 return DiagnoseImpCast(S, E, T, CContext, 14305 diag::warn_impcast_float_integer, PruneWarnings); 14306 } 14307 // Warn on non-zero to zero conversion. 14308 DiagID = diag::warn_impcast_float_to_integer_zero; 14309 } else { 14310 if (IntegerValue.isUnsigned()) { 14311 if (!IntegerValue.isMaxValue()) { 14312 return DiagnoseImpCast(S, E, T, CContext, 14313 diag::warn_impcast_float_integer, PruneWarnings); 14314 } 14315 } else { // IntegerValue.isSigned() 14316 if (!IntegerValue.isMaxSignedValue() && 14317 !IntegerValue.isMinSignedValue()) { 14318 return DiagnoseImpCast(S, E, T, CContext, 14319 diag::warn_impcast_float_integer, PruneWarnings); 14320 } 14321 } 14322 // Warn on evaluatable floating point expression to integer conversion. 14323 DiagID = diag::warn_impcast_float_to_integer; 14324 } 14325 14326 SmallString<16> PrettyTargetValue; 14327 if (IsBool) 14328 PrettyTargetValue = Value.isZero() ? "false" : "true"; 14329 else 14330 IntegerValue.toString(PrettyTargetValue); 14331 14332 if (PruneWarnings) { 14333 S.DiagRuntimeBehavior(E->getExprLoc(), E, 14334 S.PDiag(DiagID) 14335 << E->getType() << T.getUnqualifiedType() 14336 << PrettySourceValue << PrettyTargetValue 14337 << E->getSourceRange() << SourceRange(CContext)); 14338 } else { 14339 S.Diag(E->getExprLoc(), DiagID) 14340 << E->getType() << T.getUnqualifiedType() << PrettySourceValue 14341 << PrettyTargetValue << E->getSourceRange() << SourceRange(CContext); 14342 } 14343 } 14344 14345 /// Analyze the given compound assignment for the possible losing of 14346 /// floating-point precision. 14347 static void AnalyzeCompoundAssignment(Sema &S, BinaryOperator *E) { 14348 assert(isa<CompoundAssignOperator>(E) && 14349 "Must be compound assignment operation"); 14350 // Recurse on the LHS and RHS in here 14351 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 14352 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 14353 14354 if (E->getLHS()->getType()->isAtomicType()) 14355 S.Diag(E->getOperatorLoc(), diag::warn_atomic_implicit_seq_cst); 14356 14357 // Now check the outermost expression 14358 const auto *ResultBT = E->getLHS()->getType()->getAs<BuiltinType>(); 14359 const auto *RBT = cast<CompoundAssignOperator>(E) 14360 ->getComputationResultType() 14361 ->getAs<BuiltinType>(); 14362 14363 // The below checks assume source is floating point. 14364 if (!ResultBT || !RBT || !RBT->isFloatingPoint()) return; 14365 14366 // If source is floating point but target is an integer. 14367 if (ResultBT->isInteger()) 14368 return DiagnoseImpCast(S, E, E->getRHS()->getType(), E->getLHS()->getType(), 14369 E->getExprLoc(), diag::warn_impcast_float_integer); 14370 14371 if (!ResultBT->isFloatingPoint()) 14372 return; 14373 14374 // If both source and target are floating points, warn about losing precision. 14375 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 14376 QualType(ResultBT, 0), QualType(RBT, 0)); 14377 if (Order < 0 && !S.SourceMgr.isInSystemMacro(E->getOperatorLoc())) 14378 // warn about dropping FP rank. 14379 DiagnoseImpCast(S, E->getRHS(), E->getLHS()->getType(), E->getOperatorLoc(), 14380 diag::warn_impcast_float_result_precision); 14381 } 14382 14383 static std::string PrettyPrintInRange(const llvm::APSInt &Value, 14384 IntRange Range) { 14385 if (!Range.Width) return "0"; 14386 14387 llvm::APSInt ValueInRange = Value; 14388 ValueInRange.setIsSigned(!Range.NonNegative); 14389 ValueInRange = ValueInRange.trunc(Range.Width); 14390 return toString(ValueInRange, 10); 14391 } 14392 14393 static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) { 14394 if (!isa<ImplicitCastExpr>(Ex)) 14395 return false; 14396 14397 Expr *InnerE = Ex->IgnoreParenImpCasts(); 14398 const Type *Target = S.Context.getCanonicalType(Ex->getType()).getTypePtr(); 14399 const Type *Source = 14400 S.Context.getCanonicalType(InnerE->getType()).getTypePtr(); 14401 if (Target->isDependentType()) 14402 return false; 14403 14404 const BuiltinType *FloatCandidateBT = 14405 dyn_cast<BuiltinType>(ToBool ? Source : Target); 14406 const Type *BoolCandidateType = ToBool ? Target : Source; 14407 14408 return (BoolCandidateType->isSpecificBuiltinType(BuiltinType::Bool) && 14409 FloatCandidateBT && (FloatCandidateBT->isFloatingPoint())); 14410 } 14411 14412 static void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall, 14413 SourceLocation CC) { 14414 unsigned NumArgs = TheCall->getNumArgs(); 14415 for (unsigned i = 0; i < NumArgs; ++i) { 14416 Expr *CurrA = TheCall->getArg(i); 14417 if (!IsImplicitBoolFloatConversion(S, CurrA, true)) 14418 continue; 14419 14420 bool IsSwapped = ((i > 0) && 14421 IsImplicitBoolFloatConversion(S, TheCall->getArg(i - 1), false)); 14422 IsSwapped |= ((i < (NumArgs - 1)) && 14423 IsImplicitBoolFloatConversion(S, TheCall->getArg(i + 1), false)); 14424 if (IsSwapped) { 14425 // Warn on this floating-point to bool conversion. 14426 DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(), 14427 CurrA->getType(), CC, 14428 diag::warn_impcast_floating_point_to_bool); 14429 } 14430 } 14431 } 14432 14433 static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T, 14434 SourceLocation CC) { 14435 if (S.Diags.isIgnored(diag::warn_impcast_null_pointer_to_integer, 14436 E->getExprLoc())) 14437 return; 14438 14439 // Don't warn on functions which have return type nullptr_t. 14440 if (isa<CallExpr>(E)) 14441 return; 14442 14443 // Check for NULL (GNUNull) or nullptr (CXX11_nullptr). 14444 const Expr *NewE = E->IgnoreParenImpCasts(); 14445 bool IsGNUNullExpr = isa<GNUNullExpr>(NewE); 14446 bool HasNullPtrType = NewE->getType()->isNullPtrType(); 14447 if (!IsGNUNullExpr && !HasNullPtrType) 14448 return; 14449 14450 // Return if target type is a safe conversion. 14451 if (T->isAnyPointerType() || T->isBlockPointerType() || 14452 T->isMemberPointerType() || !T->isScalarType() || T->isNullPtrType()) 14453 return; 14454 14455 SourceLocation Loc = E->getSourceRange().getBegin(); 14456 14457 // Venture through the macro stacks to get to the source of macro arguments. 14458 // The new location is a better location than the complete location that was 14459 // passed in. 14460 Loc = S.SourceMgr.getTopMacroCallerLoc(Loc); 14461 CC = S.SourceMgr.getTopMacroCallerLoc(CC); 14462 14463 // __null is usually wrapped in a macro. Go up a macro if that is the case. 14464 if (IsGNUNullExpr && Loc.isMacroID()) { 14465 StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics( 14466 Loc, S.SourceMgr, S.getLangOpts()); 14467 if (MacroName == "NULL") 14468 Loc = S.SourceMgr.getImmediateExpansionRange(Loc).getBegin(); 14469 } 14470 14471 // Only warn if the null and context location are in the same macro expansion. 14472 if (S.SourceMgr.getFileID(Loc) != S.SourceMgr.getFileID(CC)) 14473 return; 14474 14475 S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer) 14476 << HasNullPtrType << T << SourceRange(CC) 14477 << FixItHint::CreateReplacement(Loc, 14478 S.getFixItZeroLiteralForType(T, Loc)); 14479 } 14480 14481 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 14482 ObjCArrayLiteral *ArrayLiteral); 14483 14484 static void 14485 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 14486 ObjCDictionaryLiteral *DictionaryLiteral); 14487 14488 /// Check a single element within a collection literal against the 14489 /// target element type. 14490 static void checkObjCCollectionLiteralElement(Sema &S, 14491 QualType TargetElementType, 14492 Expr *Element, 14493 unsigned ElementKind) { 14494 // Skip a bitcast to 'id' or qualified 'id'. 14495 if (auto ICE = dyn_cast<ImplicitCastExpr>(Element)) { 14496 if (ICE->getCastKind() == CK_BitCast && 14497 ICE->getSubExpr()->getType()->getAs<ObjCObjectPointerType>()) 14498 Element = ICE->getSubExpr(); 14499 } 14500 14501 QualType ElementType = Element->getType(); 14502 ExprResult ElementResult(Element); 14503 if (ElementType->getAs<ObjCObjectPointerType>() && 14504 S.CheckSingleAssignmentConstraints(TargetElementType, 14505 ElementResult, 14506 false, false) 14507 != Sema::Compatible) { 14508 S.Diag(Element->getBeginLoc(), diag::warn_objc_collection_literal_element) 14509 << ElementType << ElementKind << TargetElementType 14510 << Element->getSourceRange(); 14511 } 14512 14513 if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Element)) 14514 checkObjCArrayLiteral(S, TargetElementType, ArrayLiteral); 14515 else if (auto DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Element)) 14516 checkObjCDictionaryLiteral(S, TargetElementType, DictionaryLiteral); 14517 } 14518 14519 /// Check an Objective-C array literal being converted to the given 14520 /// target type. 14521 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 14522 ObjCArrayLiteral *ArrayLiteral) { 14523 if (!S.NSArrayDecl) 14524 return; 14525 14526 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 14527 if (!TargetObjCPtr) 14528 return; 14529 14530 if (TargetObjCPtr->isUnspecialized() || 14531 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 14532 != S.NSArrayDecl->getCanonicalDecl()) 14533 return; 14534 14535 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 14536 if (TypeArgs.size() != 1) 14537 return; 14538 14539 QualType TargetElementType = TypeArgs[0]; 14540 for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) { 14541 checkObjCCollectionLiteralElement(S, TargetElementType, 14542 ArrayLiteral->getElement(I), 14543 0); 14544 } 14545 } 14546 14547 /// Check an Objective-C dictionary literal being converted to the given 14548 /// target type. 14549 static void 14550 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 14551 ObjCDictionaryLiteral *DictionaryLiteral) { 14552 if (!S.NSDictionaryDecl) 14553 return; 14554 14555 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 14556 if (!TargetObjCPtr) 14557 return; 14558 14559 if (TargetObjCPtr->isUnspecialized() || 14560 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 14561 != S.NSDictionaryDecl->getCanonicalDecl()) 14562 return; 14563 14564 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 14565 if (TypeArgs.size() != 2) 14566 return; 14567 14568 QualType TargetKeyType = TypeArgs[0]; 14569 QualType TargetObjectType = TypeArgs[1]; 14570 for (unsigned I = 0, N = DictionaryLiteral->getNumElements(); I != N; ++I) { 14571 auto Element = DictionaryLiteral->getKeyValueElement(I); 14572 checkObjCCollectionLiteralElement(S, TargetKeyType, Element.Key, 1); 14573 checkObjCCollectionLiteralElement(S, TargetObjectType, Element.Value, 2); 14574 } 14575 } 14576 14577 // Helper function to filter out cases for constant width constant conversion. 14578 // Don't warn on char array initialization or for non-decimal values. 14579 static bool isSameWidthConstantConversion(Sema &S, Expr *E, QualType T, 14580 SourceLocation CC) { 14581 // If initializing from a constant, and the constant starts with '0', 14582 // then it is a binary, octal, or hexadecimal. Allow these constants 14583 // to fill all the bits, even if there is a sign change. 14584 if (auto *IntLit = dyn_cast<IntegerLiteral>(E->IgnoreParenImpCasts())) { 14585 const char FirstLiteralCharacter = 14586 S.getSourceManager().getCharacterData(IntLit->getBeginLoc())[0]; 14587 if (FirstLiteralCharacter == '0') 14588 return false; 14589 } 14590 14591 // If the CC location points to a '{', and the type is char, then assume 14592 // assume it is an array initialization. 14593 if (CC.isValid() && T->isCharType()) { 14594 const char FirstContextCharacter = 14595 S.getSourceManager().getCharacterData(CC)[0]; 14596 if (FirstContextCharacter == '{') 14597 return false; 14598 } 14599 14600 return true; 14601 } 14602 14603 static const IntegerLiteral *getIntegerLiteral(Expr *E) { 14604 const auto *IL = dyn_cast<IntegerLiteral>(E); 14605 if (!IL) { 14606 if (auto *UO = dyn_cast<UnaryOperator>(E)) { 14607 if (UO->getOpcode() == UO_Minus) 14608 return dyn_cast<IntegerLiteral>(UO->getSubExpr()); 14609 } 14610 } 14611 14612 return IL; 14613 } 14614 14615 static void DiagnoseIntInBoolContext(Sema &S, Expr *E) { 14616 E = E->IgnoreParenImpCasts(); 14617 SourceLocation ExprLoc = E->getExprLoc(); 14618 14619 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 14620 BinaryOperator::Opcode Opc = BO->getOpcode(); 14621 Expr::EvalResult Result; 14622 // Do not diagnose unsigned shifts. 14623 if (Opc == BO_Shl) { 14624 const auto *LHS = getIntegerLiteral(BO->getLHS()); 14625 const auto *RHS = getIntegerLiteral(BO->getRHS()); 14626 if (LHS && LHS->getValue() == 0) 14627 S.Diag(ExprLoc, diag::warn_left_shift_always) << 0; 14628 else if (!E->isValueDependent() && LHS && RHS && 14629 RHS->getValue().isNonNegative() && 14630 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) 14631 S.Diag(ExprLoc, diag::warn_left_shift_always) 14632 << (Result.Val.getInt() != 0); 14633 else if (E->getType()->isSignedIntegerType()) 14634 S.Diag(ExprLoc, diag::warn_left_shift_in_bool_context) << E; 14635 } 14636 } 14637 14638 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 14639 const auto *LHS = getIntegerLiteral(CO->getTrueExpr()); 14640 const auto *RHS = getIntegerLiteral(CO->getFalseExpr()); 14641 if (!LHS || !RHS) 14642 return; 14643 if ((LHS->getValue() == 0 || LHS->getValue() == 1) && 14644 (RHS->getValue() == 0 || RHS->getValue() == 1)) 14645 // Do not diagnose common idioms. 14646 return; 14647 if (LHS->getValue() != 0 && RHS->getValue() != 0) 14648 S.Diag(ExprLoc, diag::warn_integer_constants_in_conditional_always_true); 14649 } 14650 } 14651 14652 static void CheckImplicitConversion(Sema &S, Expr *E, QualType T, 14653 SourceLocation CC, 14654 bool *ICContext = nullptr, 14655 bool IsListInit = false) { 14656 if (E->isTypeDependent() || E->isValueDependent()) return; 14657 14658 const Type *Source = S.Context.getCanonicalType(E->getType()).getTypePtr(); 14659 const Type *Target = S.Context.getCanonicalType(T).getTypePtr(); 14660 if (Source == Target) return; 14661 if (Target->isDependentType()) return; 14662 14663 // If the conversion context location is invalid don't complain. We also 14664 // don't want to emit a warning if the issue occurs from the expansion of 14665 // a system macro. The problem is that 'getSpellingLoc()' is slow, so we 14666 // delay this check as long as possible. Once we detect we are in that 14667 // scenario, we just return. 14668 if (CC.isInvalid()) 14669 return; 14670 14671 if (Source->isAtomicType()) 14672 S.Diag(E->getExprLoc(), diag::warn_atomic_implicit_seq_cst); 14673 14674 // Diagnose implicit casts to bool. 14675 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) { 14676 if (isa<StringLiteral>(E)) 14677 // Warn on string literal to bool. Checks for string literals in logical 14678 // and expressions, for instance, assert(0 && "error here"), are 14679 // prevented by a check in AnalyzeImplicitConversions(). 14680 return DiagnoseImpCast(S, E, T, CC, 14681 diag::warn_impcast_string_literal_to_bool); 14682 if (isa<ObjCStringLiteral>(E) || isa<ObjCArrayLiteral>(E) || 14683 isa<ObjCDictionaryLiteral>(E) || isa<ObjCBoxedExpr>(E)) { 14684 // This covers the literal expressions that evaluate to Objective-C 14685 // objects. 14686 return DiagnoseImpCast(S, E, T, CC, 14687 diag::warn_impcast_objective_c_literal_to_bool); 14688 } 14689 if (Source->isPointerType() || Source->canDecayToPointerType()) { 14690 // Warn on pointer to bool conversion that is always true. 14691 S.DiagnoseAlwaysNonNullPointer(E, Expr::NPCK_NotNull, /*IsEqual*/ false, 14692 SourceRange(CC)); 14693 } 14694 } 14695 14696 // If the we're converting a constant to an ObjC BOOL on a platform where BOOL 14697 // is a typedef for signed char (macOS), then that constant value has to be 1 14698 // or 0. 14699 if (isObjCSignedCharBool(S, T) && Source->isIntegralType(S.Context)) { 14700 Expr::EvalResult Result; 14701 if (E->EvaluateAsInt(Result, S.getASTContext(), 14702 Expr::SE_AllowSideEffects)) { 14703 if (Result.Val.getInt() != 1 && Result.Val.getInt() != 0) { 14704 adornObjCBoolConversionDiagWithTernaryFixit( 14705 S, E, 14706 S.Diag(CC, diag::warn_impcast_constant_value_to_objc_bool) 14707 << toString(Result.Val.getInt(), 10)); 14708 } 14709 return; 14710 } 14711 } 14712 14713 // Check implicit casts from Objective-C collection literals to specialized 14714 // collection types, e.g., NSArray<NSString *> *. 14715 if (auto *ArrayLiteral = dyn_cast<ObjCArrayLiteral>(E)) 14716 checkObjCArrayLiteral(S, QualType(Target, 0), ArrayLiteral); 14717 else if (auto *DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(E)) 14718 checkObjCDictionaryLiteral(S, QualType(Target, 0), DictionaryLiteral); 14719 14720 // Strip vector types. 14721 if (isa<VectorType>(Source)) { 14722 if (Target->isVLSTBuiltinType() && 14723 (S.Context.areCompatibleSveTypes(QualType(Target, 0), 14724 QualType(Source, 0)) || 14725 S.Context.areLaxCompatibleSveTypes(QualType(Target, 0), 14726 QualType(Source, 0)))) 14727 return; 14728 14729 if (Target->isRVVVLSBuiltinType() && 14730 (S.Context.areCompatibleRVVTypes(QualType(Target, 0), 14731 QualType(Source, 0)) || 14732 S.Context.areLaxCompatibleRVVTypes(QualType(Target, 0), 14733 QualType(Source, 0)))) 14734 return; 14735 14736 if (!isa<VectorType>(Target)) { 14737 if (S.SourceMgr.isInSystemMacro(CC)) 14738 return; 14739 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar); 14740 } 14741 14742 // If the vector cast is cast between two vectors of the same size, it is 14743 // a bitcast, not a conversion. 14744 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target)) 14745 return; 14746 14747 Source = cast<VectorType>(Source)->getElementType().getTypePtr(); 14748 Target = cast<VectorType>(Target)->getElementType().getTypePtr(); 14749 } 14750 if (auto VecTy = dyn_cast<VectorType>(Target)) 14751 Target = VecTy->getElementType().getTypePtr(); 14752 14753 // Strip complex types. 14754 if (isa<ComplexType>(Source)) { 14755 if (!isa<ComplexType>(Target)) { 14756 if (S.SourceMgr.isInSystemMacro(CC) || Target->isBooleanType()) 14757 return; 14758 14759 return DiagnoseImpCast(S, E, T, CC, 14760 S.getLangOpts().CPlusPlus 14761 ? diag::err_impcast_complex_scalar 14762 : diag::warn_impcast_complex_scalar); 14763 } 14764 14765 Source = cast<ComplexType>(Source)->getElementType().getTypePtr(); 14766 Target = cast<ComplexType>(Target)->getElementType().getTypePtr(); 14767 } 14768 14769 const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source); 14770 const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target); 14771 14772 // Strip SVE vector types 14773 if (SourceBT && SourceBT->isVLSTBuiltinType()) { 14774 // Need the original target type for vector type checks 14775 const Type *OriginalTarget = S.Context.getCanonicalType(T).getTypePtr(); 14776 // Handle conversion from scalable to fixed when msve-vector-bits is 14777 // specified 14778 if (S.Context.areCompatibleSveTypes(QualType(OriginalTarget, 0), 14779 QualType(Source, 0)) || 14780 S.Context.areLaxCompatibleSveTypes(QualType(OriginalTarget, 0), 14781 QualType(Source, 0))) 14782 return; 14783 14784 // If the vector cast is cast between two vectors of the same size, it is 14785 // a bitcast, not a conversion. 14786 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target)) 14787 return; 14788 14789 Source = SourceBT->getSveEltType(S.Context).getTypePtr(); 14790 } 14791 14792 if (TargetBT && TargetBT->isVLSTBuiltinType()) 14793 Target = TargetBT->getSveEltType(S.Context).getTypePtr(); 14794 14795 // If the source is floating point... 14796 if (SourceBT && SourceBT->isFloatingPoint()) { 14797 // ...and the target is floating point... 14798 if (TargetBT && TargetBT->isFloatingPoint()) { 14799 // ...then warn if we're dropping FP rank. 14800 14801 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 14802 QualType(SourceBT, 0), QualType(TargetBT, 0)); 14803 if (Order > 0) { 14804 // Don't warn about float constants that are precisely 14805 // representable in the target type. 14806 Expr::EvalResult result; 14807 if (E->EvaluateAsRValue(result, S.Context)) { 14808 // Value might be a float, a float vector, or a float complex. 14809 if (IsSameFloatAfterCast(result.Val, 14810 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)), 14811 S.Context.getFloatTypeSemantics(QualType(SourceBT, 0)))) 14812 return; 14813 } 14814 14815 if (S.SourceMgr.isInSystemMacro(CC)) 14816 return; 14817 14818 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision); 14819 } 14820 // ... or possibly if we're increasing rank, too 14821 else if (Order < 0) { 14822 if (S.SourceMgr.isInSystemMacro(CC)) 14823 return; 14824 14825 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_double_promotion); 14826 } 14827 return; 14828 } 14829 14830 // If the target is integral, always warn. 14831 if (TargetBT && TargetBT->isInteger()) { 14832 if (S.SourceMgr.isInSystemMacro(CC)) 14833 return; 14834 14835 DiagnoseFloatingImpCast(S, E, T, CC); 14836 } 14837 14838 // Detect the case where a call result is converted from floating-point to 14839 // to bool, and the final argument to the call is converted from bool, to 14840 // discover this typo: 14841 // 14842 // bool b = fabs(x < 1.0); // should be "bool b = fabs(x) < 1.0;" 14843 // 14844 // FIXME: This is an incredibly special case; is there some more general 14845 // way to detect this class of misplaced-parentheses bug? 14846 if (Target->isBooleanType() && isa<CallExpr>(E)) { 14847 // Check last argument of function call to see if it is an 14848 // implicit cast from a type matching the type the result 14849 // is being cast to. 14850 CallExpr *CEx = cast<CallExpr>(E); 14851 if (unsigned NumArgs = CEx->getNumArgs()) { 14852 Expr *LastA = CEx->getArg(NumArgs - 1); 14853 Expr *InnerE = LastA->IgnoreParenImpCasts(); 14854 if (isa<ImplicitCastExpr>(LastA) && 14855 InnerE->getType()->isBooleanType()) { 14856 // Warn on this floating-point to bool conversion 14857 DiagnoseImpCast(S, E, T, CC, 14858 diag::warn_impcast_floating_point_to_bool); 14859 } 14860 } 14861 } 14862 return; 14863 } 14864 14865 // Valid casts involving fixed point types should be accounted for here. 14866 if (Source->isFixedPointType()) { 14867 if (Target->isUnsaturatedFixedPointType()) { 14868 Expr::EvalResult Result; 14869 if (E->EvaluateAsFixedPoint(Result, S.Context, Expr::SE_AllowSideEffects, 14870 S.isConstantEvaluated())) { 14871 llvm::APFixedPoint Value = Result.Val.getFixedPoint(); 14872 llvm::APFixedPoint MaxVal = S.Context.getFixedPointMax(T); 14873 llvm::APFixedPoint MinVal = S.Context.getFixedPointMin(T); 14874 if (Value > MaxVal || Value < MinVal) { 14875 S.DiagRuntimeBehavior(E->getExprLoc(), E, 14876 S.PDiag(diag::warn_impcast_fixed_point_range) 14877 << Value.toString() << T 14878 << E->getSourceRange() 14879 << clang::SourceRange(CC)); 14880 return; 14881 } 14882 } 14883 } else if (Target->isIntegerType()) { 14884 Expr::EvalResult Result; 14885 if (!S.isConstantEvaluated() && 14886 E->EvaluateAsFixedPoint(Result, S.Context, 14887 Expr::SE_AllowSideEffects)) { 14888 llvm::APFixedPoint FXResult = Result.Val.getFixedPoint(); 14889 14890 bool Overflowed; 14891 llvm::APSInt IntResult = FXResult.convertToInt( 14892 S.Context.getIntWidth(T), 14893 Target->isSignedIntegerOrEnumerationType(), &Overflowed); 14894 14895 if (Overflowed) { 14896 S.DiagRuntimeBehavior(E->getExprLoc(), E, 14897 S.PDiag(diag::warn_impcast_fixed_point_range) 14898 << FXResult.toString() << T 14899 << E->getSourceRange() 14900 << clang::SourceRange(CC)); 14901 return; 14902 } 14903 } 14904 } 14905 } else if (Target->isUnsaturatedFixedPointType()) { 14906 if (Source->isIntegerType()) { 14907 Expr::EvalResult Result; 14908 if (!S.isConstantEvaluated() && 14909 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) { 14910 llvm::APSInt Value = Result.Val.getInt(); 14911 14912 bool Overflowed; 14913 llvm::APFixedPoint IntResult = llvm::APFixedPoint::getFromIntValue( 14914 Value, S.Context.getFixedPointSemantics(T), &Overflowed); 14915 14916 if (Overflowed) { 14917 S.DiagRuntimeBehavior(E->getExprLoc(), E, 14918 S.PDiag(diag::warn_impcast_fixed_point_range) 14919 << toString(Value, /*Radix=*/10) << T 14920 << E->getSourceRange() 14921 << clang::SourceRange(CC)); 14922 return; 14923 } 14924 } 14925 } 14926 } 14927 14928 // If we are casting an integer type to a floating point type without 14929 // initialization-list syntax, we might lose accuracy if the floating 14930 // point type has a narrower significand than the integer type. 14931 if (SourceBT && TargetBT && SourceBT->isIntegerType() && 14932 TargetBT->isFloatingType() && !IsListInit) { 14933 // Determine the number of precision bits in the source integer type. 14934 IntRange SourceRange = GetExprRange(S.Context, E, S.isConstantEvaluated(), 14935 /*Approximate*/ true); 14936 unsigned int SourcePrecision = SourceRange.Width; 14937 14938 // Determine the number of precision bits in the 14939 // target floating point type. 14940 unsigned int TargetPrecision = llvm::APFloatBase::semanticsPrecision( 14941 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 14942 14943 if (SourcePrecision > 0 && TargetPrecision > 0 && 14944 SourcePrecision > TargetPrecision) { 14945 14946 if (std::optional<llvm::APSInt> SourceInt = 14947 E->getIntegerConstantExpr(S.Context)) { 14948 // If the source integer is a constant, convert it to the target 14949 // floating point type. Issue a warning if the value changes 14950 // during the whole conversion. 14951 llvm::APFloat TargetFloatValue( 14952 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 14953 llvm::APFloat::opStatus ConversionStatus = 14954 TargetFloatValue.convertFromAPInt( 14955 *SourceInt, SourceBT->isSignedInteger(), 14956 llvm::APFloat::rmNearestTiesToEven); 14957 14958 if (ConversionStatus != llvm::APFloat::opOK) { 14959 SmallString<32> PrettySourceValue; 14960 SourceInt->toString(PrettySourceValue, 10); 14961 SmallString<32> PrettyTargetValue; 14962 TargetFloatValue.toString(PrettyTargetValue, TargetPrecision); 14963 14964 S.DiagRuntimeBehavior( 14965 E->getExprLoc(), E, 14966 S.PDiag(diag::warn_impcast_integer_float_precision_constant) 14967 << PrettySourceValue << PrettyTargetValue << E->getType() << T 14968 << E->getSourceRange() << clang::SourceRange(CC)); 14969 } 14970 } else { 14971 // Otherwise, the implicit conversion may lose precision. 14972 DiagnoseImpCast(S, E, T, CC, 14973 diag::warn_impcast_integer_float_precision); 14974 } 14975 } 14976 } 14977 14978 DiagnoseNullConversion(S, E, T, CC); 14979 14980 S.DiscardMisalignedMemberAddress(Target, E); 14981 14982 if (Target->isBooleanType()) 14983 DiagnoseIntInBoolContext(S, E); 14984 14985 if (!Source->isIntegerType() || !Target->isIntegerType()) 14986 return; 14987 14988 // TODO: remove this early return once the false positives for constant->bool 14989 // in templates, macros, etc, are reduced or removed. 14990 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) 14991 return; 14992 14993 if (isObjCSignedCharBool(S, T) && !Source->isCharType() && 14994 !E->isKnownToHaveBooleanValue(/*Semantic=*/false)) { 14995 return adornObjCBoolConversionDiagWithTernaryFixit( 14996 S, E, 14997 S.Diag(CC, diag::warn_impcast_int_to_objc_signed_char_bool) 14998 << E->getType()); 14999 } 15000 15001 IntRange SourceTypeRange = 15002 IntRange::forTargetOfCanonicalType(S.Context, Source); 15003 IntRange LikelySourceRange = 15004 GetExprRange(S.Context, E, S.isConstantEvaluated(), /*Approximate*/ true); 15005 IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target); 15006 15007 if (LikelySourceRange.Width > TargetRange.Width) { 15008 // If the source is a constant, use a default-on diagnostic. 15009 // TODO: this should happen for bitfield stores, too. 15010 Expr::EvalResult Result; 15011 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects, 15012 S.isConstantEvaluated())) { 15013 llvm::APSInt Value(32); 15014 Value = Result.Val.getInt(); 15015 15016 if (S.SourceMgr.isInSystemMacro(CC)) 15017 return; 15018 15019 std::string PrettySourceValue = toString(Value, 10); 15020 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 15021 15022 S.DiagRuntimeBehavior( 15023 E->getExprLoc(), E, 15024 S.PDiag(diag::warn_impcast_integer_precision_constant) 15025 << PrettySourceValue << PrettyTargetValue << E->getType() << T 15026 << E->getSourceRange() << SourceRange(CC)); 15027 return; 15028 } 15029 15030 // People want to build with -Wshorten-64-to-32 and not -Wconversion. 15031 if (S.SourceMgr.isInSystemMacro(CC)) 15032 return; 15033 15034 if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64) 15035 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32, 15036 /* pruneControlFlow */ true); 15037 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision); 15038 } 15039 15040 if (TargetRange.Width > SourceTypeRange.Width) { 15041 if (auto *UO = dyn_cast<UnaryOperator>(E)) 15042 if (UO->getOpcode() == UO_Minus) 15043 if (Source->isUnsignedIntegerType()) { 15044 if (Target->isUnsignedIntegerType()) 15045 return DiagnoseImpCast(S, E, T, CC, 15046 diag::warn_impcast_high_order_zero_bits); 15047 if (Target->isSignedIntegerType()) 15048 return DiagnoseImpCast(S, E, T, CC, 15049 diag::warn_impcast_nonnegative_result); 15050 } 15051 } 15052 15053 if (TargetRange.Width == LikelySourceRange.Width && 15054 !TargetRange.NonNegative && LikelySourceRange.NonNegative && 15055 Source->isSignedIntegerType()) { 15056 // Warn when doing a signed to signed conversion, warn if the positive 15057 // source value is exactly the width of the target type, which will 15058 // cause a negative value to be stored. 15059 15060 Expr::EvalResult Result; 15061 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects) && 15062 !S.SourceMgr.isInSystemMacro(CC)) { 15063 llvm::APSInt Value = Result.Val.getInt(); 15064 if (isSameWidthConstantConversion(S, E, T, CC)) { 15065 std::string PrettySourceValue = toString(Value, 10); 15066 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 15067 15068 S.DiagRuntimeBehavior( 15069 E->getExprLoc(), E, 15070 S.PDiag(diag::warn_impcast_integer_precision_constant) 15071 << PrettySourceValue << PrettyTargetValue << E->getType() << T 15072 << E->getSourceRange() << SourceRange(CC)); 15073 return; 15074 } 15075 } 15076 15077 // Fall through for non-constants to give a sign conversion warning. 15078 } 15079 15080 if ((!isa<EnumType>(Target) || !isa<EnumType>(Source)) && 15081 ((TargetRange.NonNegative && !LikelySourceRange.NonNegative) || 15082 (!TargetRange.NonNegative && LikelySourceRange.NonNegative && 15083 LikelySourceRange.Width == TargetRange.Width))) { 15084 if (S.SourceMgr.isInSystemMacro(CC)) 15085 return; 15086 15087 if (SourceBT && SourceBT->isInteger() && TargetBT && 15088 TargetBT->isInteger() && 15089 Source->isSignedIntegerType() == Target->isSignedIntegerType()) { 15090 return; 15091 } 15092 15093 unsigned DiagID = diag::warn_impcast_integer_sign; 15094 15095 // Traditionally, gcc has warned about this under -Wsign-compare. 15096 // We also want to warn about it in -Wconversion. 15097 // So if -Wconversion is off, use a completely identical diagnostic 15098 // in the sign-compare group. 15099 // The conditional-checking code will 15100 if (ICContext) { 15101 DiagID = diag::warn_impcast_integer_sign_conditional; 15102 *ICContext = true; 15103 } 15104 15105 return DiagnoseImpCast(S, E, T, CC, DiagID); 15106 } 15107 15108 // Diagnose conversions between different enumeration types. 15109 // In C, we pretend that the type of an EnumConstantDecl is its enumeration 15110 // type, to give us better diagnostics. 15111 QualType SourceType = E->getType(); 15112 if (!S.getLangOpts().CPlusPlus) { 15113 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 15114 if (EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(DRE->getDecl())) { 15115 EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext()); 15116 SourceType = S.Context.getTypeDeclType(Enum); 15117 Source = S.Context.getCanonicalType(SourceType).getTypePtr(); 15118 } 15119 } 15120 15121 if (const EnumType *SourceEnum = Source->getAs<EnumType>()) 15122 if (const EnumType *TargetEnum = Target->getAs<EnumType>()) 15123 if (SourceEnum->getDecl()->hasNameForLinkage() && 15124 TargetEnum->getDecl()->hasNameForLinkage() && 15125 SourceEnum != TargetEnum) { 15126 if (S.SourceMgr.isInSystemMacro(CC)) 15127 return; 15128 15129 return DiagnoseImpCast(S, E, SourceType, T, CC, 15130 diag::warn_impcast_different_enum_types); 15131 } 15132 } 15133 15134 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 15135 SourceLocation CC, QualType T); 15136 15137 static void CheckConditionalOperand(Sema &S, Expr *E, QualType T, 15138 SourceLocation CC, bool &ICContext) { 15139 E = E->IgnoreParenImpCasts(); 15140 // Diagnose incomplete type for second or third operand in C. 15141 if (!S.getLangOpts().CPlusPlus && E->getType()->isRecordType()) 15142 S.RequireCompleteExprType(E, diag::err_incomplete_type); 15143 15144 if (auto *CO = dyn_cast<AbstractConditionalOperator>(E)) 15145 return CheckConditionalOperator(S, CO, CC, T); 15146 15147 AnalyzeImplicitConversions(S, E, CC); 15148 if (E->getType() != T) 15149 return CheckImplicitConversion(S, E, T, CC, &ICContext); 15150 } 15151 15152 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 15153 SourceLocation CC, QualType T) { 15154 AnalyzeImplicitConversions(S, E->getCond(), E->getQuestionLoc()); 15155 15156 Expr *TrueExpr = E->getTrueExpr(); 15157 if (auto *BCO = dyn_cast<BinaryConditionalOperator>(E)) 15158 TrueExpr = BCO->getCommon(); 15159 15160 bool Suspicious = false; 15161 CheckConditionalOperand(S, TrueExpr, T, CC, Suspicious); 15162 CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious); 15163 15164 if (T->isBooleanType()) 15165 DiagnoseIntInBoolContext(S, E); 15166 15167 // If -Wconversion would have warned about either of the candidates 15168 // for a signedness conversion to the context type... 15169 if (!Suspicious) return; 15170 15171 // ...but it's currently ignored... 15172 if (!S.Diags.isIgnored(diag::warn_impcast_integer_sign_conditional, CC)) 15173 return; 15174 15175 // ...then check whether it would have warned about either of the 15176 // candidates for a signedness conversion to the condition type. 15177 if (E->getType() == T) return; 15178 15179 Suspicious = false; 15180 CheckImplicitConversion(S, TrueExpr->IgnoreParenImpCasts(), 15181 E->getType(), CC, &Suspicious); 15182 if (!Suspicious) 15183 CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(), 15184 E->getType(), CC, &Suspicious); 15185 } 15186 15187 /// Check conversion of given expression to boolean. 15188 /// Input argument E is a logical expression. 15189 static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) { 15190 if (S.getLangOpts().Bool) 15191 return; 15192 if (E->IgnoreParenImpCasts()->getType()->isAtomicType()) 15193 return; 15194 CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC); 15195 } 15196 15197 namespace { 15198 struct AnalyzeImplicitConversionsWorkItem { 15199 Expr *E; 15200 SourceLocation CC; 15201 bool IsListInit; 15202 }; 15203 } 15204 15205 /// Data recursive variant of AnalyzeImplicitConversions. Subexpressions 15206 /// that should be visited are added to WorkList. 15207 static void AnalyzeImplicitConversions( 15208 Sema &S, AnalyzeImplicitConversionsWorkItem Item, 15209 llvm::SmallVectorImpl<AnalyzeImplicitConversionsWorkItem> &WorkList) { 15210 Expr *OrigE = Item.E; 15211 SourceLocation CC = Item.CC; 15212 15213 QualType T = OrigE->getType(); 15214 Expr *E = OrigE->IgnoreParenImpCasts(); 15215 15216 // Propagate whether we are in a C++ list initialization expression. 15217 // If so, we do not issue warnings for implicit int-float conversion 15218 // precision loss, because C++11 narrowing already handles it. 15219 bool IsListInit = Item.IsListInit || 15220 (isa<InitListExpr>(OrigE) && S.getLangOpts().CPlusPlus); 15221 15222 if (E->isTypeDependent() || E->isValueDependent()) 15223 return; 15224 15225 Expr *SourceExpr = E; 15226 // Examine, but don't traverse into the source expression of an 15227 // OpaqueValueExpr, since it may have multiple parents and we don't want to 15228 // emit duplicate diagnostics. Its fine to examine the form or attempt to 15229 // evaluate it in the context of checking the specific conversion to T though. 15230 if (auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 15231 if (auto *Src = OVE->getSourceExpr()) 15232 SourceExpr = Src; 15233 15234 if (const auto *UO = dyn_cast<UnaryOperator>(SourceExpr)) 15235 if (UO->getOpcode() == UO_Not && 15236 UO->getSubExpr()->isKnownToHaveBooleanValue()) 15237 S.Diag(UO->getBeginLoc(), diag::warn_bitwise_negation_bool) 15238 << OrigE->getSourceRange() << T->isBooleanType() 15239 << FixItHint::CreateReplacement(UO->getBeginLoc(), "!"); 15240 15241 if (const auto *BO = dyn_cast<BinaryOperator>(SourceExpr)) 15242 if ((BO->getOpcode() == BO_And || BO->getOpcode() == BO_Or) && 15243 BO->getLHS()->isKnownToHaveBooleanValue() && 15244 BO->getRHS()->isKnownToHaveBooleanValue() && 15245 BO->getLHS()->HasSideEffects(S.Context) && 15246 BO->getRHS()->HasSideEffects(S.Context)) { 15247 S.Diag(BO->getBeginLoc(), diag::warn_bitwise_instead_of_logical) 15248 << (BO->getOpcode() == BO_And ? "&" : "|") << OrigE->getSourceRange() 15249 << FixItHint::CreateReplacement( 15250 BO->getOperatorLoc(), 15251 (BO->getOpcode() == BO_And ? "&&" : "||")); 15252 S.Diag(BO->getBeginLoc(), diag::note_cast_operand_to_int); 15253 } 15254 15255 // For conditional operators, we analyze the arguments as if they 15256 // were being fed directly into the output. 15257 if (auto *CO = dyn_cast<AbstractConditionalOperator>(SourceExpr)) { 15258 CheckConditionalOperator(S, CO, CC, T); 15259 return; 15260 } 15261 15262 // Check implicit argument conversions for function calls. 15263 if (CallExpr *Call = dyn_cast<CallExpr>(SourceExpr)) 15264 CheckImplicitArgumentConversions(S, Call, CC); 15265 15266 // Go ahead and check any implicit conversions we might have skipped. 15267 // The non-canonical typecheck is just an optimization; 15268 // CheckImplicitConversion will filter out dead implicit conversions. 15269 if (SourceExpr->getType() != T) 15270 CheckImplicitConversion(S, SourceExpr, T, CC, nullptr, IsListInit); 15271 15272 // Now continue drilling into this expression. 15273 15274 if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) { 15275 // The bound subexpressions in a PseudoObjectExpr are not reachable 15276 // as transitive children. 15277 // FIXME: Use a more uniform representation for this. 15278 for (auto *SE : POE->semantics()) 15279 if (auto *OVE = dyn_cast<OpaqueValueExpr>(SE)) 15280 WorkList.push_back({OVE->getSourceExpr(), CC, IsListInit}); 15281 } 15282 15283 // Skip past explicit casts. 15284 if (auto *CE = dyn_cast<ExplicitCastExpr>(E)) { 15285 E = CE->getSubExpr()->IgnoreParenImpCasts(); 15286 if (!CE->getType()->isVoidType() && E->getType()->isAtomicType()) 15287 S.Diag(E->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 15288 WorkList.push_back({E, CC, IsListInit}); 15289 return; 15290 } 15291 15292 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 15293 // Do a somewhat different check with comparison operators. 15294 if (BO->isComparisonOp()) 15295 return AnalyzeComparison(S, BO); 15296 15297 // And with simple assignments. 15298 if (BO->getOpcode() == BO_Assign) 15299 return AnalyzeAssignment(S, BO); 15300 // And with compound assignments. 15301 if (BO->isAssignmentOp()) 15302 return AnalyzeCompoundAssignment(S, BO); 15303 } 15304 15305 // These break the otherwise-useful invariant below. Fortunately, 15306 // we don't really need to recurse into them, because any internal 15307 // expressions should have been analyzed already when they were 15308 // built into statements. 15309 if (isa<StmtExpr>(E)) return; 15310 15311 // Don't descend into unevaluated contexts. 15312 if (isa<UnaryExprOrTypeTraitExpr>(E)) return; 15313 15314 // Now just recurse over the expression's children. 15315 CC = E->getExprLoc(); 15316 BinaryOperator *BO = dyn_cast<BinaryOperator>(E); 15317 bool IsLogicalAndOperator = BO && BO->getOpcode() == BO_LAnd; 15318 for (Stmt *SubStmt : E->children()) { 15319 Expr *ChildExpr = dyn_cast_or_null<Expr>(SubStmt); 15320 if (!ChildExpr) 15321 continue; 15322 15323 if (auto *CSE = dyn_cast<CoroutineSuspendExpr>(E)) 15324 if (ChildExpr == CSE->getOperand()) 15325 // Do not recurse over a CoroutineSuspendExpr's operand. 15326 // The operand is also a subexpression of getCommonExpr(), and 15327 // recursing into it directly would produce duplicate diagnostics. 15328 continue; 15329 15330 if (IsLogicalAndOperator && 15331 isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts())) 15332 // Ignore checking string literals that are in logical and operators. 15333 // This is a common pattern for asserts. 15334 continue; 15335 WorkList.push_back({ChildExpr, CC, IsListInit}); 15336 } 15337 15338 if (BO && BO->isLogicalOp()) { 15339 Expr *SubExpr = BO->getLHS()->IgnoreParenImpCasts(); 15340 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 15341 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 15342 15343 SubExpr = BO->getRHS()->IgnoreParenImpCasts(); 15344 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 15345 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 15346 } 15347 15348 if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E)) { 15349 if (U->getOpcode() == UO_LNot) { 15350 ::CheckBoolLikeConversion(S, U->getSubExpr(), CC); 15351 } else if (U->getOpcode() != UO_AddrOf) { 15352 if (U->getSubExpr()->getType()->isAtomicType()) 15353 S.Diag(U->getSubExpr()->getBeginLoc(), 15354 diag::warn_atomic_implicit_seq_cst); 15355 } 15356 } 15357 } 15358 15359 /// AnalyzeImplicitConversions - Find and report any interesting 15360 /// implicit conversions in the given expression. There are a couple 15361 /// of competing diagnostics here, -Wconversion and -Wsign-compare. 15362 static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC, 15363 bool IsListInit/*= false*/) { 15364 llvm::SmallVector<AnalyzeImplicitConversionsWorkItem, 16> WorkList; 15365 WorkList.push_back({OrigE, CC, IsListInit}); 15366 while (!WorkList.empty()) 15367 AnalyzeImplicitConversions(S, WorkList.pop_back_val(), WorkList); 15368 } 15369 15370 /// Diagnose integer type and any valid implicit conversion to it. 15371 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) { 15372 // Taking into account implicit conversions, 15373 // allow any integer. 15374 if (!E->getType()->isIntegerType()) { 15375 S.Diag(E->getBeginLoc(), 15376 diag::err_opencl_enqueue_kernel_invalid_local_size_type); 15377 return true; 15378 } 15379 // Potentially emit standard warnings for implicit conversions if enabled 15380 // using -Wconversion. 15381 CheckImplicitConversion(S, E, IntT, E->getBeginLoc()); 15382 return false; 15383 } 15384 15385 // Helper function for Sema::DiagnoseAlwaysNonNullPointer. 15386 // Returns true when emitting a warning about taking the address of a reference. 15387 static bool CheckForReference(Sema &SemaRef, const Expr *E, 15388 const PartialDiagnostic &PD) { 15389 E = E->IgnoreParenImpCasts(); 15390 15391 const FunctionDecl *FD = nullptr; 15392 15393 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { 15394 if (!DRE->getDecl()->getType()->isReferenceType()) 15395 return false; 15396 } else if (const MemberExpr *M = dyn_cast<MemberExpr>(E)) { 15397 if (!M->getMemberDecl()->getType()->isReferenceType()) 15398 return false; 15399 } else if (const CallExpr *Call = dyn_cast<CallExpr>(E)) { 15400 if (!Call->getCallReturnType(SemaRef.Context)->isReferenceType()) 15401 return false; 15402 FD = Call->getDirectCallee(); 15403 } else { 15404 return false; 15405 } 15406 15407 SemaRef.Diag(E->getExprLoc(), PD); 15408 15409 // If possible, point to location of function. 15410 if (FD) { 15411 SemaRef.Diag(FD->getLocation(), diag::note_reference_is_return_value) << FD; 15412 } 15413 15414 return true; 15415 } 15416 15417 // Returns true if the SourceLocation is expanded from any macro body. 15418 // Returns false if the SourceLocation is invalid, is from not in a macro 15419 // expansion, or is from expanded from a top-level macro argument. 15420 static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) { 15421 if (Loc.isInvalid()) 15422 return false; 15423 15424 while (Loc.isMacroID()) { 15425 if (SM.isMacroBodyExpansion(Loc)) 15426 return true; 15427 Loc = SM.getImmediateMacroCallerLoc(Loc); 15428 } 15429 15430 return false; 15431 } 15432 15433 /// Diagnose pointers that are always non-null. 15434 /// \param E the expression containing the pointer 15435 /// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is 15436 /// compared to a null pointer 15437 /// \param IsEqual True when the comparison is equal to a null pointer 15438 /// \param Range Extra SourceRange to highlight in the diagnostic 15439 void Sema::DiagnoseAlwaysNonNullPointer(Expr *E, 15440 Expr::NullPointerConstantKind NullKind, 15441 bool IsEqual, SourceRange Range) { 15442 if (!E) 15443 return; 15444 15445 // Don't warn inside macros. 15446 if (E->getExprLoc().isMacroID()) { 15447 const SourceManager &SM = getSourceManager(); 15448 if (IsInAnyMacroBody(SM, E->getExprLoc()) || 15449 IsInAnyMacroBody(SM, Range.getBegin())) 15450 return; 15451 } 15452 E = E->IgnoreImpCasts(); 15453 15454 const bool IsCompare = NullKind != Expr::NPCK_NotNull; 15455 15456 if (isa<CXXThisExpr>(E)) { 15457 unsigned DiagID = IsCompare ? diag::warn_this_null_compare 15458 : diag::warn_this_bool_conversion; 15459 Diag(E->getExprLoc(), DiagID) << E->getSourceRange() << Range << IsEqual; 15460 return; 15461 } 15462 15463 bool IsAddressOf = false; 15464 15465 if (auto *UO = dyn_cast<UnaryOperator>(E->IgnoreParens())) { 15466 if (UO->getOpcode() != UO_AddrOf) 15467 return; 15468 IsAddressOf = true; 15469 E = UO->getSubExpr(); 15470 } 15471 15472 if (IsAddressOf) { 15473 unsigned DiagID = IsCompare 15474 ? diag::warn_address_of_reference_null_compare 15475 : diag::warn_address_of_reference_bool_conversion; 15476 PartialDiagnostic PD = PDiag(DiagID) << E->getSourceRange() << Range 15477 << IsEqual; 15478 if (CheckForReference(*this, E, PD)) { 15479 return; 15480 } 15481 } 15482 15483 auto ComplainAboutNonnullParamOrCall = [&](const Attr *NonnullAttr) { 15484 bool IsParam = isa<NonNullAttr>(NonnullAttr); 15485 std::string Str; 15486 llvm::raw_string_ostream S(Str); 15487 E->printPretty(S, nullptr, getPrintingPolicy()); 15488 unsigned DiagID = IsCompare ? diag::warn_nonnull_expr_compare 15489 : diag::warn_cast_nonnull_to_bool; 15490 Diag(E->getExprLoc(), DiagID) << IsParam << S.str() 15491 << E->getSourceRange() << Range << IsEqual; 15492 Diag(NonnullAttr->getLocation(), diag::note_declared_nonnull) << IsParam; 15493 }; 15494 15495 // If we have a CallExpr that is tagged with returns_nonnull, we can complain. 15496 if (auto *Call = dyn_cast<CallExpr>(E->IgnoreParenImpCasts())) { 15497 if (auto *Callee = Call->getDirectCallee()) { 15498 if (const Attr *A = Callee->getAttr<ReturnsNonNullAttr>()) { 15499 ComplainAboutNonnullParamOrCall(A); 15500 return; 15501 } 15502 } 15503 } 15504 15505 // Expect to find a single Decl. Skip anything more complicated. 15506 ValueDecl *D = nullptr; 15507 if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) { 15508 D = R->getDecl(); 15509 } else if (MemberExpr *M = dyn_cast<MemberExpr>(E)) { 15510 D = M->getMemberDecl(); 15511 } 15512 15513 // Weak Decls can be null. 15514 if (!D || D->isWeak()) 15515 return; 15516 15517 // Check for parameter decl with nonnull attribute 15518 if (const auto* PV = dyn_cast<ParmVarDecl>(D)) { 15519 if (getCurFunction() && 15520 !getCurFunction()->ModifiedNonNullParams.count(PV)) { 15521 if (const Attr *A = PV->getAttr<NonNullAttr>()) { 15522 ComplainAboutNonnullParamOrCall(A); 15523 return; 15524 } 15525 15526 if (const auto *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) { 15527 // Skip function template not specialized yet. 15528 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 15529 return; 15530 auto ParamIter = llvm::find(FD->parameters(), PV); 15531 assert(ParamIter != FD->param_end()); 15532 unsigned ParamNo = std::distance(FD->param_begin(), ParamIter); 15533 15534 for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) { 15535 if (!NonNull->args_size()) { 15536 ComplainAboutNonnullParamOrCall(NonNull); 15537 return; 15538 } 15539 15540 for (const ParamIdx &ArgNo : NonNull->args()) { 15541 if (ArgNo.getASTIndex() == ParamNo) { 15542 ComplainAboutNonnullParamOrCall(NonNull); 15543 return; 15544 } 15545 } 15546 } 15547 } 15548 } 15549 } 15550 15551 QualType T = D->getType(); 15552 const bool IsArray = T->isArrayType(); 15553 const bool IsFunction = T->isFunctionType(); 15554 15555 // Address of function is used to silence the function warning. 15556 if (IsAddressOf && IsFunction) { 15557 return; 15558 } 15559 15560 // Found nothing. 15561 if (!IsAddressOf && !IsFunction && !IsArray) 15562 return; 15563 15564 // Pretty print the expression for the diagnostic. 15565 std::string Str; 15566 llvm::raw_string_ostream S(Str); 15567 E->printPretty(S, nullptr, getPrintingPolicy()); 15568 15569 unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare 15570 : diag::warn_impcast_pointer_to_bool; 15571 enum { 15572 AddressOf, 15573 FunctionPointer, 15574 ArrayPointer 15575 } DiagType; 15576 if (IsAddressOf) 15577 DiagType = AddressOf; 15578 else if (IsFunction) 15579 DiagType = FunctionPointer; 15580 else if (IsArray) 15581 DiagType = ArrayPointer; 15582 else 15583 llvm_unreachable("Could not determine diagnostic."); 15584 Diag(E->getExprLoc(), DiagID) << DiagType << S.str() << E->getSourceRange() 15585 << Range << IsEqual; 15586 15587 if (!IsFunction) 15588 return; 15589 15590 // Suggest '&' to silence the function warning. 15591 Diag(E->getExprLoc(), diag::note_function_warning_silence) 15592 << FixItHint::CreateInsertion(E->getBeginLoc(), "&"); 15593 15594 // Check to see if '()' fixit should be emitted. 15595 QualType ReturnType; 15596 UnresolvedSet<4> NonTemplateOverloads; 15597 tryExprAsCall(*E, ReturnType, NonTemplateOverloads); 15598 if (ReturnType.isNull()) 15599 return; 15600 15601 if (IsCompare) { 15602 // There are two cases here. If there is null constant, the only suggest 15603 // for a pointer return type. If the null is 0, then suggest if the return 15604 // type is a pointer or an integer type. 15605 if (!ReturnType->isPointerType()) { 15606 if (NullKind == Expr::NPCK_ZeroExpression || 15607 NullKind == Expr::NPCK_ZeroLiteral) { 15608 if (!ReturnType->isIntegerType()) 15609 return; 15610 } else { 15611 return; 15612 } 15613 } 15614 } else { // !IsCompare 15615 // For function to bool, only suggest if the function pointer has bool 15616 // return type. 15617 if (!ReturnType->isSpecificBuiltinType(BuiltinType::Bool)) 15618 return; 15619 } 15620 Diag(E->getExprLoc(), diag::note_function_to_function_call) 15621 << FixItHint::CreateInsertion(getLocForEndOfToken(E->getEndLoc()), "()"); 15622 } 15623 15624 /// Diagnoses "dangerous" implicit conversions within the given 15625 /// expression (which is a full expression). Implements -Wconversion 15626 /// and -Wsign-compare. 15627 /// 15628 /// \param CC the "context" location of the implicit conversion, i.e. 15629 /// the most location of the syntactic entity requiring the implicit 15630 /// conversion 15631 void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) { 15632 // Don't diagnose in unevaluated contexts. 15633 if (isUnevaluatedContext()) 15634 return; 15635 15636 // Don't diagnose for value- or type-dependent expressions. 15637 if (E->isTypeDependent() || E->isValueDependent()) 15638 return; 15639 15640 // Check for array bounds violations in cases where the check isn't triggered 15641 // elsewhere for other Expr types (like BinaryOperators), e.g. when an 15642 // ArraySubscriptExpr is on the RHS of a variable initialization. 15643 CheckArrayAccess(E); 15644 15645 // This is not the right CC for (e.g.) a variable initialization. 15646 AnalyzeImplicitConversions(*this, E, CC); 15647 } 15648 15649 /// CheckBoolLikeConversion - Check conversion of given expression to boolean. 15650 /// Input argument E is a logical expression. 15651 void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) { 15652 ::CheckBoolLikeConversion(*this, E, CC); 15653 } 15654 15655 /// Diagnose when expression is an integer constant expression and its evaluation 15656 /// results in integer overflow 15657 void Sema::CheckForIntOverflow (const Expr *E) { 15658 // Use a work list to deal with nested struct initializers. 15659 SmallVector<const Expr *, 2> Exprs(1, E); 15660 15661 do { 15662 const Expr *OriginalE = Exprs.pop_back_val(); 15663 const Expr *E = OriginalE->IgnoreParenCasts(); 15664 15665 if (isa<BinaryOperator, UnaryOperator>(E)) { 15666 E->EvaluateForOverflow(Context); 15667 continue; 15668 } 15669 15670 if (const auto *InitList = dyn_cast<InitListExpr>(OriginalE)) 15671 Exprs.append(InitList->inits().begin(), InitList->inits().end()); 15672 else if (isa<ObjCBoxedExpr>(OriginalE)) 15673 E->EvaluateForOverflow(Context); 15674 else if (const auto *Call = dyn_cast<CallExpr>(E)) 15675 Exprs.append(Call->arg_begin(), Call->arg_end()); 15676 else if (const auto *Message = dyn_cast<ObjCMessageExpr>(E)) 15677 Exprs.append(Message->arg_begin(), Message->arg_end()); 15678 else if (const auto *Construct = dyn_cast<CXXConstructExpr>(E)) 15679 Exprs.append(Construct->arg_begin(), Construct->arg_end()); 15680 else if (const auto *Temporary = dyn_cast<CXXBindTemporaryExpr>(E)) 15681 Exprs.push_back(Temporary->getSubExpr()); 15682 else if (const auto *Array = dyn_cast<ArraySubscriptExpr>(E)) 15683 Exprs.push_back(Array->getIdx()); 15684 else if (const auto *Compound = dyn_cast<CompoundLiteralExpr>(E)) 15685 Exprs.push_back(Compound->getInitializer()); 15686 else if (const auto *New = dyn_cast<CXXNewExpr>(E); 15687 New && New->isArray()) { 15688 if (auto ArraySize = New->getArraySize()) 15689 Exprs.push_back(*ArraySize); 15690 } 15691 } while (!Exprs.empty()); 15692 } 15693 15694 namespace { 15695 15696 /// Visitor for expressions which looks for unsequenced operations on the 15697 /// same object. 15698 class SequenceChecker : public ConstEvaluatedExprVisitor<SequenceChecker> { 15699 using Base = ConstEvaluatedExprVisitor<SequenceChecker>; 15700 15701 /// A tree of sequenced regions within an expression. Two regions are 15702 /// unsequenced if one is an ancestor or a descendent of the other. When we 15703 /// finish processing an expression with sequencing, such as a comma 15704 /// expression, we fold its tree nodes into its parent, since they are 15705 /// unsequenced with respect to nodes we will visit later. 15706 class SequenceTree { 15707 struct Value { 15708 explicit Value(unsigned Parent) : Parent(Parent), Merged(false) {} 15709 unsigned Parent : 31; 15710 unsigned Merged : 1; 15711 }; 15712 SmallVector<Value, 8> Values; 15713 15714 public: 15715 /// A region within an expression which may be sequenced with respect 15716 /// to some other region. 15717 class Seq { 15718 friend class SequenceTree; 15719 15720 unsigned Index; 15721 15722 explicit Seq(unsigned N) : Index(N) {} 15723 15724 public: 15725 Seq() : Index(0) {} 15726 }; 15727 15728 SequenceTree() { Values.push_back(Value(0)); } 15729 Seq root() const { return Seq(0); } 15730 15731 /// Create a new sequence of operations, which is an unsequenced 15732 /// subset of \p Parent. This sequence of operations is sequenced with 15733 /// respect to other children of \p Parent. 15734 Seq allocate(Seq Parent) { 15735 Values.push_back(Value(Parent.Index)); 15736 return Seq(Values.size() - 1); 15737 } 15738 15739 /// Merge a sequence of operations into its parent. 15740 void merge(Seq S) { 15741 Values[S.Index].Merged = true; 15742 } 15743 15744 /// Determine whether two operations are unsequenced. This operation 15745 /// is asymmetric: \p Cur should be the more recent sequence, and \p Old 15746 /// should have been merged into its parent as appropriate. 15747 bool isUnsequenced(Seq Cur, Seq Old) { 15748 unsigned C = representative(Cur.Index); 15749 unsigned Target = representative(Old.Index); 15750 while (C >= Target) { 15751 if (C == Target) 15752 return true; 15753 C = Values[C].Parent; 15754 } 15755 return false; 15756 } 15757 15758 private: 15759 /// Pick a representative for a sequence. 15760 unsigned representative(unsigned K) { 15761 if (Values[K].Merged) 15762 // Perform path compression as we go. 15763 return Values[K].Parent = representative(Values[K].Parent); 15764 return K; 15765 } 15766 }; 15767 15768 /// An object for which we can track unsequenced uses. 15769 using Object = const NamedDecl *; 15770 15771 /// Different flavors of object usage which we track. We only track the 15772 /// least-sequenced usage of each kind. 15773 enum UsageKind { 15774 /// A read of an object. Multiple unsequenced reads are OK. 15775 UK_Use, 15776 15777 /// A modification of an object which is sequenced before the value 15778 /// computation of the expression, such as ++n in C++. 15779 UK_ModAsValue, 15780 15781 /// A modification of an object which is not sequenced before the value 15782 /// computation of the expression, such as n++. 15783 UK_ModAsSideEffect, 15784 15785 UK_Count = UK_ModAsSideEffect + 1 15786 }; 15787 15788 /// Bundle together a sequencing region and the expression corresponding 15789 /// to a specific usage. One Usage is stored for each usage kind in UsageInfo. 15790 struct Usage { 15791 const Expr *UsageExpr; 15792 SequenceTree::Seq Seq; 15793 15794 Usage() : UsageExpr(nullptr) {} 15795 }; 15796 15797 struct UsageInfo { 15798 Usage Uses[UK_Count]; 15799 15800 /// Have we issued a diagnostic for this object already? 15801 bool Diagnosed; 15802 15803 UsageInfo() : Diagnosed(false) {} 15804 }; 15805 using UsageInfoMap = llvm::SmallDenseMap<Object, UsageInfo, 16>; 15806 15807 Sema &SemaRef; 15808 15809 /// Sequenced regions within the expression. 15810 SequenceTree Tree; 15811 15812 /// Declaration modifications and references which we have seen. 15813 UsageInfoMap UsageMap; 15814 15815 /// The region we are currently within. 15816 SequenceTree::Seq Region; 15817 15818 /// Filled in with declarations which were modified as a side-effect 15819 /// (that is, post-increment operations). 15820 SmallVectorImpl<std::pair<Object, Usage>> *ModAsSideEffect = nullptr; 15821 15822 /// Expressions to check later. We defer checking these to reduce 15823 /// stack usage. 15824 SmallVectorImpl<const Expr *> &WorkList; 15825 15826 /// RAII object wrapping the visitation of a sequenced subexpression of an 15827 /// expression. At the end of this process, the side-effects of the evaluation 15828 /// become sequenced with respect to the value computation of the result, so 15829 /// we downgrade any UK_ModAsSideEffect within the evaluation to 15830 /// UK_ModAsValue. 15831 struct SequencedSubexpression { 15832 SequencedSubexpression(SequenceChecker &Self) 15833 : Self(Self), OldModAsSideEffect(Self.ModAsSideEffect) { 15834 Self.ModAsSideEffect = &ModAsSideEffect; 15835 } 15836 15837 ~SequencedSubexpression() { 15838 for (const std::pair<Object, Usage> &M : llvm::reverse(ModAsSideEffect)) { 15839 // Add a new usage with usage kind UK_ModAsValue, and then restore 15840 // the previous usage with UK_ModAsSideEffect (thus clearing it if 15841 // the previous one was empty). 15842 UsageInfo &UI = Self.UsageMap[M.first]; 15843 auto &SideEffectUsage = UI.Uses[UK_ModAsSideEffect]; 15844 Self.addUsage(M.first, UI, SideEffectUsage.UsageExpr, UK_ModAsValue); 15845 SideEffectUsage = M.second; 15846 } 15847 Self.ModAsSideEffect = OldModAsSideEffect; 15848 } 15849 15850 SequenceChecker &Self; 15851 SmallVector<std::pair<Object, Usage>, 4> ModAsSideEffect; 15852 SmallVectorImpl<std::pair<Object, Usage>> *OldModAsSideEffect; 15853 }; 15854 15855 /// RAII object wrapping the visitation of a subexpression which we might 15856 /// choose to evaluate as a constant. If any subexpression is evaluated and 15857 /// found to be non-constant, this allows us to suppress the evaluation of 15858 /// the outer expression. 15859 class EvaluationTracker { 15860 public: 15861 EvaluationTracker(SequenceChecker &Self) 15862 : Self(Self), Prev(Self.EvalTracker) { 15863 Self.EvalTracker = this; 15864 } 15865 15866 ~EvaluationTracker() { 15867 Self.EvalTracker = Prev; 15868 if (Prev) 15869 Prev->EvalOK &= EvalOK; 15870 } 15871 15872 bool evaluate(const Expr *E, bool &Result) { 15873 if (!EvalOK || E->isValueDependent()) 15874 return false; 15875 EvalOK = E->EvaluateAsBooleanCondition( 15876 Result, Self.SemaRef.Context, Self.SemaRef.isConstantEvaluated()); 15877 return EvalOK; 15878 } 15879 15880 private: 15881 SequenceChecker &Self; 15882 EvaluationTracker *Prev; 15883 bool EvalOK = true; 15884 } *EvalTracker = nullptr; 15885 15886 /// Find the object which is produced by the specified expression, 15887 /// if any. 15888 Object getObject(const Expr *E, bool Mod) const { 15889 E = E->IgnoreParenCasts(); 15890 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 15891 if (Mod && (UO->getOpcode() == UO_PreInc || UO->getOpcode() == UO_PreDec)) 15892 return getObject(UO->getSubExpr(), Mod); 15893 } else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 15894 if (BO->getOpcode() == BO_Comma) 15895 return getObject(BO->getRHS(), Mod); 15896 if (Mod && BO->isAssignmentOp()) 15897 return getObject(BO->getLHS(), Mod); 15898 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) { 15899 // FIXME: Check for more interesting cases, like "x.n = ++x.n". 15900 if (isa<CXXThisExpr>(ME->getBase()->IgnoreParenCasts())) 15901 return ME->getMemberDecl(); 15902 } else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 15903 // FIXME: If this is a reference, map through to its value. 15904 return DRE->getDecl(); 15905 return nullptr; 15906 } 15907 15908 /// Note that an object \p O was modified or used by an expression 15909 /// \p UsageExpr with usage kind \p UK. \p UI is the \p UsageInfo for 15910 /// the object \p O as obtained via the \p UsageMap. 15911 void addUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, UsageKind UK) { 15912 // Get the old usage for the given object and usage kind. 15913 Usage &U = UI.Uses[UK]; 15914 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) { 15915 // If we have a modification as side effect and are in a sequenced 15916 // subexpression, save the old Usage so that we can restore it later 15917 // in SequencedSubexpression::~SequencedSubexpression. 15918 if (UK == UK_ModAsSideEffect && ModAsSideEffect) 15919 ModAsSideEffect->push_back(std::make_pair(O, U)); 15920 // Then record the new usage with the current sequencing region. 15921 U.UsageExpr = UsageExpr; 15922 U.Seq = Region; 15923 } 15924 } 15925 15926 /// Check whether a modification or use of an object \p O in an expression 15927 /// \p UsageExpr conflicts with a prior usage of kind \p OtherKind. \p UI is 15928 /// the \p UsageInfo for the object \p O as obtained via the \p UsageMap. 15929 /// \p IsModMod is true when we are checking for a mod-mod unsequenced 15930 /// usage and false we are checking for a mod-use unsequenced usage. 15931 void checkUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, 15932 UsageKind OtherKind, bool IsModMod) { 15933 if (UI.Diagnosed) 15934 return; 15935 15936 const Usage &U = UI.Uses[OtherKind]; 15937 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) 15938 return; 15939 15940 const Expr *Mod = U.UsageExpr; 15941 const Expr *ModOrUse = UsageExpr; 15942 if (OtherKind == UK_Use) 15943 std::swap(Mod, ModOrUse); 15944 15945 SemaRef.DiagRuntimeBehavior( 15946 Mod->getExprLoc(), {Mod, ModOrUse}, 15947 SemaRef.PDiag(IsModMod ? diag::warn_unsequenced_mod_mod 15948 : diag::warn_unsequenced_mod_use) 15949 << O << SourceRange(ModOrUse->getExprLoc())); 15950 UI.Diagnosed = true; 15951 } 15952 15953 // A note on note{Pre, Post}{Use, Mod}: 15954 // 15955 // (It helps to follow the algorithm with an expression such as 15956 // "((++k)++, k) = k" or "k = (k++, k++)". Both contain unsequenced 15957 // operations before C++17 and both are well-defined in C++17). 15958 // 15959 // When visiting a node which uses/modify an object we first call notePreUse 15960 // or notePreMod before visiting its sub-expression(s). At this point the 15961 // children of the current node have not yet been visited and so the eventual 15962 // uses/modifications resulting from the children of the current node have not 15963 // been recorded yet. 15964 // 15965 // We then visit the children of the current node. After that notePostUse or 15966 // notePostMod is called. These will 1) detect an unsequenced modification 15967 // as side effect (as in "k++ + k") and 2) add a new usage with the 15968 // appropriate usage kind. 15969 // 15970 // We also have to be careful that some operation sequences modification as 15971 // side effect as well (for example: || or ,). To account for this we wrap 15972 // the visitation of such a sub-expression (for example: the LHS of || or ,) 15973 // with SequencedSubexpression. SequencedSubexpression is an RAII object 15974 // which record usages which are modifications as side effect, and then 15975 // downgrade them (or more accurately restore the previous usage which was a 15976 // modification as side effect) when exiting the scope of the sequenced 15977 // subexpression. 15978 15979 void notePreUse(Object O, const Expr *UseExpr) { 15980 UsageInfo &UI = UsageMap[O]; 15981 // Uses conflict with other modifications. 15982 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/false); 15983 } 15984 15985 void notePostUse(Object O, const Expr *UseExpr) { 15986 UsageInfo &UI = UsageMap[O]; 15987 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsSideEffect, 15988 /*IsModMod=*/false); 15989 addUsage(O, UI, UseExpr, /*UsageKind=*/UK_Use); 15990 } 15991 15992 void notePreMod(Object O, const Expr *ModExpr) { 15993 UsageInfo &UI = UsageMap[O]; 15994 // Modifications conflict with other modifications and with uses. 15995 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/true); 15996 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_Use, /*IsModMod=*/false); 15997 } 15998 15999 void notePostMod(Object O, const Expr *ModExpr, UsageKind UK) { 16000 UsageInfo &UI = UsageMap[O]; 16001 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsSideEffect, 16002 /*IsModMod=*/true); 16003 addUsage(O, UI, ModExpr, /*UsageKind=*/UK); 16004 } 16005 16006 public: 16007 SequenceChecker(Sema &S, const Expr *E, 16008 SmallVectorImpl<const Expr *> &WorkList) 16009 : Base(S.Context), SemaRef(S), Region(Tree.root()), WorkList(WorkList) { 16010 Visit(E); 16011 // Silence a -Wunused-private-field since WorkList is now unused. 16012 // TODO: Evaluate if it can be used, and if not remove it. 16013 (void)this->WorkList; 16014 } 16015 16016 void VisitStmt(const Stmt *S) { 16017 // Skip all statements which aren't expressions for now. 16018 } 16019 16020 void VisitExpr(const Expr *E) { 16021 // By default, just recurse to evaluated subexpressions. 16022 Base::VisitStmt(E); 16023 } 16024 16025 void VisitCoroutineSuspendExpr(const CoroutineSuspendExpr *CSE) { 16026 for (auto *Sub : CSE->children()) { 16027 const Expr *ChildExpr = dyn_cast_or_null<Expr>(Sub); 16028 if (!ChildExpr) 16029 continue; 16030 16031 if (ChildExpr == CSE->getOperand()) 16032 // Do not recurse over a CoroutineSuspendExpr's operand. 16033 // The operand is also a subexpression of getCommonExpr(), and 16034 // recursing into it directly could confuse object management 16035 // for the sake of sequence tracking. 16036 continue; 16037 16038 Visit(Sub); 16039 } 16040 } 16041 16042 void VisitCastExpr(const CastExpr *E) { 16043 Object O = Object(); 16044 if (E->getCastKind() == CK_LValueToRValue) 16045 O = getObject(E->getSubExpr(), false); 16046 16047 if (O) 16048 notePreUse(O, E); 16049 VisitExpr(E); 16050 if (O) 16051 notePostUse(O, E); 16052 } 16053 16054 void VisitSequencedExpressions(const Expr *SequencedBefore, 16055 const Expr *SequencedAfter) { 16056 SequenceTree::Seq BeforeRegion = Tree.allocate(Region); 16057 SequenceTree::Seq AfterRegion = Tree.allocate(Region); 16058 SequenceTree::Seq OldRegion = Region; 16059 16060 { 16061 SequencedSubexpression SeqBefore(*this); 16062 Region = BeforeRegion; 16063 Visit(SequencedBefore); 16064 } 16065 16066 Region = AfterRegion; 16067 Visit(SequencedAfter); 16068 16069 Region = OldRegion; 16070 16071 Tree.merge(BeforeRegion); 16072 Tree.merge(AfterRegion); 16073 } 16074 16075 void VisitArraySubscriptExpr(const ArraySubscriptExpr *ASE) { 16076 // C++17 [expr.sub]p1: 16077 // The expression E1[E2] is identical (by definition) to *((E1)+(E2)). The 16078 // expression E1 is sequenced before the expression E2. 16079 if (SemaRef.getLangOpts().CPlusPlus17) 16080 VisitSequencedExpressions(ASE->getLHS(), ASE->getRHS()); 16081 else { 16082 Visit(ASE->getLHS()); 16083 Visit(ASE->getRHS()); 16084 } 16085 } 16086 16087 void VisitBinPtrMemD(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 16088 void VisitBinPtrMemI(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 16089 void VisitBinPtrMem(const BinaryOperator *BO) { 16090 // C++17 [expr.mptr.oper]p4: 16091 // Abbreviating pm-expression.*cast-expression as E1.*E2, [...] 16092 // the expression E1 is sequenced before the expression E2. 16093 if (SemaRef.getLangOpts().CPlusPlus17) 16094 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 16095 else { 16096 Visit(BO->getLHS()); 16097 Visit(BO->getRHS()); 16098 } 16099 } 16100 16101 void VisitBinShl(const BinaryOperator *BO) { VisitBinShlShr(BO); } 16102 void VisitBinShr(const BinaryOperator *BO) { VisitBinShlShr(BO); } 16103 void VisitBinShlShr(const BinaryOperator *BO) { 16104 // C++17 [expr.shift]p4: 16105 // The expression E1 is sequenced before the expression E2. 16106 if (SemaRef.getLangOpts().CPlusPlus17) 16107 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 16108 else { 16109 Visit(BO->getLHS()); 16110 Visit(BO->getRHS()); 16111 } 16112 } 16113 16114 void VisitBinComma(const BinaryOperator *BO) { 16115 // C++11 [expr.comma]p1: 16116 // Every value computation and side effect associated with the left 16117 // expression is sequenced before every value computation and side 16118 // effect associated with the right expression. 16119 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 16120 } 16121 16122 void VisitBinAssign(const BinaryOperator *BO) { 16123 SequenceTree::Seq RHSRegion; 16124 SequenceTree::Seq LHSRegion; 16125 if (SemaRef.getLangOpts().CPlusPlus17) { 16126 RHSRegion = Tree.allocate(Region); 16127 LHSRegion = Tree.allocate(Region); 16128 } else { 16129 RHSRegion = Region; 16130 LHSRegion = Region; 16131 } 16132 SequenceTree::Seq OldRegion = Region; 16133 16134 // C++11 [expr.ass]p1: 16135 // [...] the assignment is sequenced after the value computation 16136 // of the right and left operands, [...] 16137 // 16138 // so check it before inspecting the operands and update the 16139 // map afterwards. 16140 Object O = getObject(BO->getLHS(), /*Mod=*/true); 16141 if (O) 16142 notePreMod(O, BO); 16143 16144 if (SemaRef.getLangOpts().CPlusPlus17) { 16145 // C++17 [expr.ass]p1: 16146 // [...] The right operand is sequenced before the left operand. [...] 16147 { 16148 SequencedSubexpression SeqBefore(*this); 16149 Region = RHSRegion; 16150 Visit(BO->getRHS()); 16151 } 16152 16153 Region = LHSRegion; 16154 Visit(BO->getLHS()); 16155 16156 if (O && isa<CompoundAssignOperator>(BO)) 16157 notePostUse(O, BO); 16158 16159 } else { 16160 // C++11 does not specify any sequencing between the LHS and RHS. 16161 Region = LHSRegion; 16162 Visit(BO->getLHS()); 16163 16164 if (O && isa<CompoundAssignOperator>(BO)) 16165 notePostUse(O, BO); 16166 16167 Region = RHSRegion; 16168 Visit(BO->getRHS()); 16169 } 16170 16171 // C++11 [expr.ass]p1: 16172 // the assignment is sequenced [...] before the value computation of the 16173 // assignment expression. 16174 // C11 6.5.16/3 has no such rule. 16175 Region = OldRegion; 16176 if (O) 16177 notePostMod(O, BO, 16178 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 16179 : UK_ModAsSideEffect); 16180 if (SemaRef.getLangOpts().CPlusPlus17) { 16181 Tree.merge(RHSRegion); 16182 Tree.merge(LHSRegion); 16183 } 16184 } 16185 16186 void VisitCompoundAssignOperator(const CompoundAssignOperator *CAO) { 16187 VisitBinAssign(CAO); 16188 } 16189 16190 void VisitUnaryPreInc(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 16191 void VisitUnaryPreDec(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 16192 void VisitUnaryPreIncDec(const UnaryOperator *UO) { 16193 Object O = getObject(UO->getSubExpr(), true); 16194 if (!O) 16195 return VisitExpr(UO); 16196 16197 notePreMod(O, UO); 16198 Visit(UO->getSubExpr()); 16199 // C++11 [expr.pre.incr]p1: 16200 // the expression ++x is equivalent to x+=1 16201 notePostMod(O, UO, 16202 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 16203 : UK_ModAsSideEffect); 16204 } 16205 16206 void VisitUnaryPostInc(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 16207 void VisitUnaryPostDec(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 16208 void VisitUnaryPostIncDec(const UnaryOperator *UO) { 16209 Object O = getObject(UO->getSubExpr(), true); 16210 if (!O) 16211 return VisitExpr(UO); 16212 16213 notePreMod(O, UO); 16214 Visit(UO->getSubExpr()); 16215 notePostMod(O, UO, UK_ModAsSideEffect); 16216 } 16217 16218 void VisitBinLOr(const BinaryOperator *BO) { 16219 // C++11 [expr.log.or]p2: 16220 // If the second expression is evaluated, every value computation and 16221 // side effect associated with the first expression is sequenced before 16222 // every value computation and side effect associated with the 16223 // second expression. 16224 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 16225 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 16226 SequenceTree::Seq OldRegion = Region; 16227 16228 EvaluationTracker Eval(*this); 16229 { 16230 SequencedSubexpression Sequenced(*this); 16231 Region = LHSRegion; 16232 Visit(BO->getLHS()); 16233 } 16234 16235 // C++11 [expr.log.or]p1: 16236 // [...] the second operand is not evaluated if the first operand 16237 // evaluates to true. 16238 bool EvalResult = false; 16239 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 16240 bool ShouldVisitRHS = !EvalOK || (EvalOK && !EvalResult); 16241 if (ShouldVisitRHS) { 16242 Region = RHSRegion; 16243 Visit(BO->getRHS()); 16244 } 16245 16246 Region = OldRegion; 16247 Tree.merge(LHSRegion); 16248 Tree.merge(RHSRegion); 16249 } 16250 16251 void VisitBinLAnd(const BinaryOperator *BO) { 16252 // C++11 [expr.log.and]p2: 16253 // If the second expression is evaluated, every value computation and 16254 // side effect associated with the first expression is sequenced before 16255 // every value computation and side effect associated with the 16256 // second expression. 16257 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 16258 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 16259 SequenceTree::Seq OldRegion = Region; 16260 16261 EvaluationTracker Eval(*this); 16262 { 16263 SequencedSubexpression Sequenced(*this); 16264 Region = LHSRegion; 16265 Visit(BO->getLHS()); 16266 } 16267 16268 // C++11 [expr.log.and]p1: 16269 // [...] the second operand is not evaluated if the first operand is false. 16270 bool EvalResult = false; 16271 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 16272 bool ShouldVisitRHS = !EvalOK || (EvalOK && EvalResult); 16273 if (ShouldVisitRHS) { 16274 Region = RHSRegion; 16275 Visit(BO->getRHS()); 16276 } 16277 16278 Region = OldRegion; 16279 Tree.merge(LHSRegion); 16280 Tree.merge(RHSRegion); 16281 } 16282 16283 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO) { 16284 // C++11 [expr.cond]p1: 16285 // [...] Every value computation and side effect associated with the first 16286 // expression is sequenced before every value computation and side effect 16287 // associated with the second or third expression. 16288 SequenceTree::Seq ConditionRegion = Tree.allocate(Region); 16289 16290 // No sequencing is specified between the true and false expression. 16291 // However since exactly one of both is going to be evaluated we can 16292 // consider them to be sequenced. This is needed to avoid warning on 16293 // something like "x ? y+= 1 : y += 2;" in the case where we will visit 16294 // both the true and false expressions because we can't evaluate x. 16295 // This will still allow us to detect an expression like (pre C++17) 16296 // "(x ? y += 1 : y += 2) = y". 16297 // 16298 // We don't wrap the visitation of the true and false expression with 16299 // SequencedSubexpression because we don't want to downgrade modifications 16300 // as side effect in the true and false expressions after the visition 16301 // is done. (for example in the expression "(x ? y++ : y++) + y" we should 16302 // not warn between the two "y++", but we should warn between the "y++" 16303 // and the "y". 16304 SequenceTree::Seq TrueRegion = Tree.allocate(Region); 16305 SequenceTree::Seq FalseRegion = Tree.allocate(Region); 16306 SequenceTree::Seq OldRegion = Region; 16307 16308 EvaluationTracker Eval(*this); 16309 { 16310 SequencedSubexpression Sequenced(*this); 16311 Region = ConditionRegion; 16312 Visit(CO->getCond()); 16313 } 16314 16315 // C++11 [expr.cond]p1: 16316 // [...] The first expression is contextually converted to bool (Clause 4). 16317 // It is evaluated and if it is true, the result of the conditional 16318 // expression is the value of the second expression, otherwise that of the 16319 // third expression. Only one of the second and third expressions is 16320 // evaluated. [...] 16321 bool EvalResult = false; 16322 bool EvalOK = Eval.evaluate(CO->getCond(), EvalResult); 16323 bool ShouldVisitTrueExpr = !EvalOK || (EvalOK && EvalResult); 16324 bool ShouldVisitFalseExpr = !EvalOK || (EvalOK && !EvalResult); 16325 if (ShouldVisitTrueExpr) { 16326 Region = TrueRegion; 16327 Visit(CO->getTrueExpr()); 16328 } 16329 if (ShouldVisitFalseExpr) { 16330 Region = FalseRegion; 16331 Visit(CO->getFalseExpr()); 16332 } 16333 16334 Region = OldRegion; 16335 Tree.merge(ConditionRegion); 16336 Tree.merge(TrueRegion); 16337 Tree.merge(FalseRegion); 16338 } 16339 16340 void VisitCallExpr(const CallExpr *CE) { 16341 // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions. 16342 16343 if (CE->isUnevaluatedBuiltinCall(Context)) 16344 return; 16345 16346 // C++11 [intro.execution]p15: 16347 // When calling a function [...], every value computation and side effect 16348 // associated with any argument expression, or with the postfix expression 16349 // designating the called function, is sequenced before execution of every 16350 // expression or statement in the body of the function [and thus before 16351 // the value computation of its result]. 16352 SequencedSubexpression Sequenced(*this); 16353 SemaRef.runWithSufficientStackSpace(CE->getExprLoc(), [&] { 16354 // C++17 [expr.call]p5 16355 // The postfix-expression is sequenced before each expression in the 16356 // expression-list and any default argument. [...] 16357 SequenceTree::Seq CalleeRegion; 16358 SequenceTree::Seq OtherRegion; 16359 if (SemaRef.getLangOpts().CPlusPlus17) { 16360 CalleeRegion = Tree.allocate(Region); 16361 OtherRegion = Tree.allocate(Region); 16362 } else { 16363 CalleeRegion = Region; 16364 OtherRegion = Region; 16365 } 16366 SequenceTree::Seq OldRegion = Region; 16367 16368 // Visit the callee expression first. 16369 Region = CalleeRegion; 16370 if (SemaRef.getLangOpts().CPlusPlus17) { 16371 SequencedSubexpression Sequenced(*this); 16372 Visit(CE->getCallee()); 16373 } else { 16374 Visit(CE->getCallee()); 16375 } 16376 16377 // Then visit the argument expressions. 16378 Region = OtherRegion; 16379 for (const Expr *Argument : CE->arguments()) 16380 Visit(Argument); 16381 16382 Region = OldRegion; 16383 if (SemaRef.getLangOpts().CPlusPlus17) { 16384 Tree.merge(CalleeRegion); 16385 Tree.merge(OtherRegion); 16386 } 16387 }); 16388 } 16389 16390 void VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *CXXOCE) { 16391 // C++17 [over.match.oper]p2: 16392 // [...] the operator notation is first transformed to the equivalent 16393 // function-call notation as summarized in Table 12 (where @ denotes one 16394 // of the operators covered in the specified subclause). However, the 16395 // operands are sequenced in the order prescribed for the built-in 16396 // operator (Clause 8). 16397 // 16398 // From the above only overloaded binary operators and overloaded call 16399 // operators have sequencing rules in C++17 that we need to handle 16400 // separately. 16401 if (!SemaRef.getLangOpts().CPlusPlus17 || 16402 (CXXOCE->getNumArgs() != 2 && CXXOCE->getOperator() != OO_Call)) 16403 return VisitCallExpr(CXXOCE); 16404 16405 enum { 16406 NoSequencing, 16407 LHSBeforeRHS, 16408 RHSBeforeLHS, 16409 LHSBeforeRest 16410 } SequencingKind; 16411 switch (CXXOCE->getOperator()) { 16412 case OO_Equal: 16413 case OO_PlusEqual: 16414 case OO_MinusEqual: 16415 case OO_StarEqual: 16416 case OO_SlashEqual: 16417 case OO_PercentEqual: 16418 case OO_CaretEqual: 16419 case OO_AmpEqual: 16420 case OO_PipeEqual: 16421 case OO_LessLessEqual: 16422 case OO_GreaterGreaterEqual: 16423 SequencingKind = RHSBeforeLHS; 16424 break; 16425 16426 case OO_LessLess: 16427 case OO_GreaterGreater: 16428 case OO_AmpAmp: 16429 case OO_PipePipe: 16430 case OO_Comma: 16431 case OO_ArrowStar: 16432 case OO_Subscript: 16433 SequencingKind = LHSBeforeRHS; 16434 break; 16435 16436 case OO_Call: 16437 SequencingKind = LHSBeforeRest; 16438 break; 16439 16440 default: 16441 SequencingKind = NoSequencing; 16442 break; 16443 } 16444 16445 if (SequencingKind == NoSequencing) 16446 return VisitCallExpr(CXXOCE); 16447 16448 // This is a call, so all subexpressions are sequenced before the result. 16449 SequencedSubexpression Sequenced(*this); 16450 16451 SemaRef.runWithSufficientStackSpace(CXXOCE->getExprLoc(), [&] { 16452 assert(SemaRef.getLangOpts().CPlusPlus17 && 16453 "Should only get there with C++17 and above!"); 16454 assert((CXXOCE->getNumArgs() == 2 || CXXOCE->getOperator() == OO_Call) && 16455 "Should only get there with an overloaded binary operator" 16456 " or an overloaded call operator!"); 16457 16458 if (SequencingKind == LHSBeforeRest) { 16459 assert(CXXOCE->getOperator() == OO_Call && 16460 "We should only have an overloaded call operator here!"); 16461 16462 // This is very similar to VisitCallExpr, except that we only have the 16463 // C++17 case. The postfix-expression is the first argument of the 16464 // CXXOperatorCallExpr. The expressions in the expression-list, if any, 16465 // are in the following arguments. 16466 // 16467 // Note that we intentionally do not visit the callee expression since 16468 // it is just a decayed reference to a function. 16469 SequenceTree::Seq PostfixExprRegion = Tree.allocate(Region); 16470 SequenceTree::Seq ArgsRegion = Tree.allocate(Region); 16471 SequenceTree::Seq OldRegion = Region; 16472 16473 assert(CXXOCE->getNumArgs() >= 1 && 16474 "An overloaded call operator must have at least one argument" 16475 " for the postfix-expression!"); 16476 const Expr *PostfixExpr = CXXOCE->getArgs()[0]; 16477 llvm::ArrayRef<const Expr *> Args(CXXOCE->getArgs() + 1, 16478 CXXOCE->getNumArgs() - 1); 16479 16480 // Visit the postfix-expression first. 16481 { 16482 Region = PostfixExprRegion; 16483 SequencedSubexpression Sequenced(*this); 16484 Visit(PostfixExpr); 16485 } 16486 16487 // Then visit the argument expressions. 16488 Region = ArgsRegion; 16489 for (const Expr *Arg : Args) 16490 Visit(Arg); 16491 16492 Region = OldRegion; 16493 Tree.merge(PostfixExprRegion); 16494 Tree.merge(ArgsRegion); 16495 } else { 16496 assert(CXXOCE->getNumArgs() == 2 && 16497 "Should only have two arguments here!"); 16498 assert((SequencingKind == LHSBeforeRHS || 16499 SequencingKind == RHSBeforeLHS) && 16500 "Unexpected sequencing kind!"); 16501 16502 // We do not visit the callee expression since it is just a decayed 16503 // reference to a function. 16504 const Expr *E1 = CXXOCE->getArg(0); 16505 const Expr *E2 = CXXOCE->getArg(1); 16506 if (SequencingKind == RHSBeforeLHS) 16507 std::swap(E1, E2); 16508 16509 return VisitSequencedExpressions(E1, E2); 16510 } 16511 }); 16512 } 16513 16514 void VisitCXXConstructExpr(const CXXConstructExpr *CCE) { 16515 // This is a call, so all subexpressions are sequenced before the result. 16516 SequencedSubexpression Sequenced(*this); 16517 16518 if (!CCE->isListInitialization()) 16519 return VisitExpr(CCE); 16520 16521 // In C++11, list initializations are sequenced. 16522 SmallVector<SequenceTree::Seq, 32> Elts; 16523 SequenceTree::Seq Parent = Region; 16524 for (CXXConstructExpr::const_arg_iterator I = CCE->arg_begin(), 16525 E = CCE->arg_end(); 16526 I != E; ++I) { 16527 Region = Tree.allocate(Parent); 16528 Elts.push_back(Region); 16529 Visit(*I); 16530 } 16531 16532 // Forget that the initializers are sequenced. 16533 Region = Parent; 16534 for (unsigned I = 0; I < Elts.size(); ++I) 16535 Tree.merge(Elts[I]); 16536 } 16537 16538 void VisitInitListExpr(const InitListExpr *ILE) { 16539 if (!SemaRef.getLangOpts().CPlusPlus11) 16540 return VisitExpr(ILE); 16541 16542 // In C++11, list initializations are sequenced. 16543 SmallVector<SequenceTree::Seq, 32> Elts; 16544 SequenceTree::Seq Parent = Region; 16545 for (unsigned I = 0; I < ILE->getNumInits(); ++I) { 16546 const Expr *E = ILE->getInit(I); 16547 if (!E) 16548 continue; 16549 Region = Tree.allocate(Parent); 16550 Elts.push_back(Region); 16551 Visit(E); 16552 } 16553 16554 // Forget that the initializers are sequenced. 16555 Region = Parent; 16556 for (unsigned I = 0; I < Elts.size(); ++I) 16557 Tree.merge(Elts[I]); 16558 } 16559 }; 16560 16561 } // namespace 16562 16563 void Sema::CheckUnsequencedOperations(const Expr *E) { 16564 SmallVector<const Expr *, 8> WorkList; 16565 WorkList.push_back(E); 16566 while (!WorkList.empty()) { 16567 const Expr *Item = WorkList.pop_back_val(); 16568 SequenceChecker(*this, Item, WorkList); 16569 } 16570 } 16571 16572 void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc, 16573 bool IsConstexpr) { 16574 llvm::SaveAndRestore ConstantContext(isConstantEvaluatedOverride, 16575 IsConstexpr || isa<ConstantExpr>(E)); 16576 CheckImplicitConversions(E, CheckLoc); 16577 if (!E->isInstantiationDependent()) 16578 CheckUnsequencedOperations(E); 16579 if (!IsConstexpr && !E->isValueDependent()) 16580 CheckForIntOverflow(E); 16581 DiagnoseMisalignedMembers(); 16582 } 16583 16584 void Sema::CheckBitFieldInitialization(SourceLocation InitLoc, 16585 FieldDecl *BitField, 16586 Expr *Init) { 16587 (void) AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc); 16588 } 16589 16590 static void diagnoseArrayStarInParamType(Sema &S, QualType PType, 16591 SourceLocation Loc) { 16592 if (!PType->isVariablyModifiedType()) 16593 return; 16594 if (const auto *PointerTy = dyn_cast<PointerType>(PType)) { 16595 diagnoseArrayStarInParamType(S, PointerTy->getPointeeType(), Loc); 16596 return; 16597 } 16598 if (const auto *ReferenceTy = dyn_cast<ReferenceType>(PType)) { 16599 diagnoseArrayStarInParamType(S, ReferenceTy->getPointeeType(), Loc); 16600 return; 16601 } 16602 if (const auto *ParenTy = dyn_cast<ParenType>(PType)) { 16603 diagnoseArrayStarInParamType(S, ParenTy->getInnerType(), Loc); 16604 return; 16605 } 16606 16607 const ArrayType *AT = S.Context.getAsArrayType(PType); 16608 if (!AT) 16609 return; 16610 16611 if (AT->getSizeModifier() != ArrayType::Star) { 16612 diagnoseArrayStarInParamType(S, AT->getElementType(), Loc); 16613 return; 16614 } 16615 16616 S.Diag(Loc, diag::err_array_star_in_function_definition); 16617 } 16618 16619 /// CheckParmsForFunctionDef - Check that the parameters of the given 16620 /// function are appropriate for the definition of a function. This 16621 /// takes care of any checks that cannot be performed on the 16622 /// declaration itself, e.g., that the types of each of the function 16623 /// parameters are complete. 16624 bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, 16625 bool CheckParameterNames) { 16626 bool HasInvalidParm = false; 16627 for (ParmVarDecl *Param : Parameters) { 16628 assert(Param && "null in a parameter list"); 16629 // C99 6.7.5.3p4: the parameters in a parameter type list in a 16630 // function declarator that is part of a function definition of 16631 // that function shall not have incomplete type. 16632 // 16633 // C++23 [dcl.fct.def.general]/p2 16634 // The type of a parameter [...] for a function definition 16635 // shall not be a (possibly cv-qualified) class type that is incomplete 16636 // or abstract within the function body unless the function is deleted. 16637 if (!Param->isInvalidDecl() && 16638 (RequireCompleteType(Param->getLocation(), Param->getType(), 16639 diag::err_typecheck_decl_incomplete_type) || 16640 RequireNonAbstractType(Param->getBeginLoc(), Param->getOriginalType(), 16641 diag::err_abstract_type_in_decl, 16642 AbstractParamType))) { 16643 Param->setInvalidDecl(); 16644 HasInvalidParm = true; 16645 } 16646 16647 // C99 6.9.1p5: If the declarator includes a parameter type list, the 16648 // declaration of each parameter shall include an identifier. 16649 if (CheckParameterNames && Param->getIdentifier() == nullptr && 16650 !Param->isImplicit() && !getLangOpts().CPlusPlus) { 16651 // Diagnose this as an extension in C17 and earlier. 16652 if (!getLangOpts().C2x) 16653 Diag(Param->getLocation(), diag::ext_parameter_name_omitted_c2x); 16654 } 16655 16656 // C99 6.7.5.3p12: 16657 // If the function declarator is not part of a definition of that 16658 // function, parameters may have incomplete type and may use the [*] 16659 // notation in their sequences of declarator specifiers to specify 16660 // variable length array types. 16661 QualType PType = Param->getOriginalType(); 16662 // FIXME: This diagnostic should point the '[*]' if source-location 16663 // information is added for it. 16664 diagnoseArrayStarInParamType(*this, PType, Param->getLocation()); 16665 16666 // If the parameter is a c++ class type and it has to be destructed in the 16667 // callee function, declare the destructor so that it can be called by the 16668 // callee function. Do not perform any direct access check on the dtor here. 16669 if (!Param->isInvalidDecl()) { 16670 if (CXXRecordDecl *ClassDecl = Param->getType()->getAsCXXRecordDecl()) { 16671 if (!ClassDecl->isInvalidDecl() && 16672 !ClassDecl->hasIrrelevantDestructor() && 16673 !ClassDecl->isDependentContext() && 16674 ClassDecl->isParamDestroyedInCallee()) { 16675 CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl); 16676 MarkFunctionReferenced(Param->getLocation(), Destructor); 16677 DiagnoseUseOfDecl(Destructor, Param->getLocation()); 16678 } 16679 } 16680 } 16681 16682 // Parameters with the pass_object_size attribute only need to be marked 16683 // constant at function definitions. Because we lack information about 16684 // whether we're on a declaration or definition when we're instantiating the 16685 // attribute, we need to check for constness here. 16686 if (const auto *Attr = Param->getAttr<PassObjectSizeAttr>()) 16687 if (!Param->getType().isConstQualified()) 16688 Diag(Param->getLocation(), diag::err_attribute_pointers_only) 16689 << Attr->getSpelling() << 1; 16690 16691 // Check for parameter names shadowing fields from the class. 16692 if (LangOpts.CPlusPlus && !Param->isInvalidDecl()) { 16693 // The owning context for the parameter should be the function, but we 16694 // want to see if this function's declaration context is a record. 16695 DeclContext *DC = Param->getDeclContext(); 16696 if (DC && DC->isFunctionOrMethod()) { 16697 if (auto *RD = dyn_cast<CXXRecordDecl>(DC->getParent())) 16698 CheckShadowInheritedFields(Param->getLocation(), Param->getDeclName(), 16699 RD, /*DeclIsField*/ false); 16700 } 16701 } 16702 16703 if (!Param->isInvalidDecl() && 16704 Param->getOriginalType()->isWebAssemblyTableType()) { 16705 Param->setInvalidDecl(); 16706 HasInvalidParm = true; 16707 Diag(Param->getLocation(), diag::err_wasm_table_as_function_parameter); 16708 } 16709 } 16710 16711 return HasInvalidParm; 16712 } 16713 16714 std::optional<std::pair< 16715 CharUnits, CharUnits>> static getBaseAlignmentAndOffsetFromPtr(const Expr 16716 *E, 16717 ASTContext 16718 &Ctx); 16719 16720 /// Compute the alignment and offset of the base class object given the 16721 /// derived-to-base cast expression and the alignment and offset of the derived 16722 /// class object. 16723 static std::pair<CharUnits, CharUnits> 16724 getDerivedToBaseAlignmentAndOffset(const CastExpr *CE, QualType DerivedType, 16725 CharUnits BaseAlignment, CharUnits Offset, 16726 ASTContext &Ctx) { 16727 for (auto PathI = CE->path_begin(), PathE = CE->path_end(); PathI != PathE; 16728 ++PathI) { 16729 const CXXBaseSpecifier *Base = *PathI; 16730 const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl(); 16731 if (Base->isVirtual()) { 16732 // The complete object may have a lower alignment than the non-virtual 16733 // alignment of the base, in which case the base may be misaligned. Choose 16734 // the smaller of the non-virtual alignment and BaseAlignment, which is a 16735 // conservative lower bound of the complete object alignment. 16736 CharUnits NonVirtualAlignment = 16737 Ctx.getASTRecordLayout(BaseDecl).getNonVirtualAlignment(); 16738 BaseAlignment = std::min(BaseAlignment, NonVirtualAlignment); 16739 Offset = CharUnits::Zero(); 16740 } else { 16741 const ASTRecordLayout &RL = 16742 Ctx.getASTRecordLayout(DerivedType->getAsCXXRecordDecl()); 16743 Offset += RL.getBaseClassOffset(BaseDecl); 16744 } 16745 DerivedType = Base->getType(); 16746 } 16747 16748 return std::make_pair(BaseAlignment, Offset); 16749 } 16750 16751 /// Compute the alignment and offset of a binary additive operator. 16752 static std::optional<std::pair<CharUnits, CharUnits>> 16753 getAlignmentAndOffsetFromBinAddOrSub(const Expr *PtrE, const Expr *IntE, 16754 bool IsSub, ASTContext &Ctx) { 16755 QualType PointeeType = PtrE->getType()->getPointeeType(); 16756 16757 if (!PointeeType->isConstantSizeType()) 16758 return std::nullopt; 16759 16760 auto P = getBaseAlignmentAndOffsetFromPtr(PtrE, Ctx); 16761 16762 if (!P) 16763 return std::nullopt; 16764 16765 CharUnits EltSize = Ctx.getTypeSizeInChars(PointeeType); 16766 if (std::optional<llvm::APSInt> IdxRes = IntE->getIntegerConstantExpr(Ctx)) { 16767 CharUnits Offset = EltSize * IdxRes->getExtValue(); 16768 if (IsSub) 16769 Offset = -Offset; 16770 return std::make_pair(P->first, P->second + Offset); 16771 } 16772 16773 // If the integer expression isn't a constant expression, compute the lower 16774 // bound of the alignment using the alignment and offset of the pointer 16775 // expression and the element size. 16776 return std::make_pair( 16777 P->first.alignmentAtOffset(P->second).alignmentAtOffset(EltSize), 16778 CharUnits::Zero()); 16779 } 16780 16781 /// This helper function takes an lvalue expression and returns the alignment of 16782 /// a VarDecl and a constant offset from the VarDecl. 16783 std::optional<std::pair< 16784 CharUnits, 16785 CharUnits>> static getBaseAlignmentAndOffsetFromLValue(const Expr *E, 16786 ASTContext &Ctx) { 16787 E = E->IgnoreParens(); 16788 switch (E->getStmtClass()) { 16789 default: 16790 break; 16791 case Stmt::CStyleCastExprClass: 16792 case Stmt::CXXStaticCastExprClass: 16793 case Stmt::ImplicitCastExprClass: { 16794 auto *CE = cast<CastExpr>(E); 16795 const Expr *From = CE->getSubExpr(); 16796 switch (CE->getCastKind()) { 16797 default: 16798 break; 16799 case CK_NoOp: 16800 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 16801 case CK_UncheckedDerivedToBase: 16802 case CK_DerivedToBase: { 16803 auto P = getBaseAlignmentAndOffsetFromLValue(From, Ctx); 16804 if (!P) 16805 break; 16806 return getDerivedToBaseAlignmentAndOffset(CE, From->getType(), P->first, 16807 P->second, Ctx); 16808 } 16809 } 16810 break; 16811 } 16812 case Stmt::ArraySubscriptExprClass: { 16813 auto *ASE = cast<ArraySubscriptExpr>(E); 16814 return getAlignmentAndOffsetFromBinAddOrSub(ASE->getBase(), ASE->getIdx(), 16815 false, Ctx); 16816 } 16817 case Stmt::DeclRefExprClass: { 16818 if (auto *VD = dyn_cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())) { 16819 // FIXME: If VD is captured by copy or is an escaping __block variable, 16820 // use the alignment of VD's type. 16821 if (!VD->getType()->isReferenceType()) { 16822 // Dependent alignment cannot be resolved -> bail out. 16823 if (VD->hasDependentAlignment()) 16824 break; 16825 return std::make_pair(Ctx.getDeclAlign(VD), CharUnits::Zero()); 16826 } 16827 if (VD->hasInit()) 16828 return getBaseAlignmentAndOffsetFromLValue(VD->getInit(), Ctx); 16829 } 16830 break; 16831 } 16832 case Stmt::MemberExprClass: { 16833 auto *ME = cast<MemberExpr>(E); 16834 auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()); 16835 if (!FD || FD->getType()->isReferenceType() || 16836 FD->getParent()->isInvalidDecl()) 16837 break; 16838 std::optional<std::pair<CharUnits, CharUnits>> P; 16839 if (ME->isArrow()) 16840 P = getBaseAlignmentAndOffsetFromPtr(ME->getBase(), Ctx); 16841 else 16842 P = getBaseAlignmentAndOffsetFromLValue(ME->getBase(), Ctx); 16843 if (!P) 16844 break; 16845 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(FD->getParent()); 16846 uint64_t Offset = Layout.getFieldOffset(FD->getFieldIndex()); 16847 return std::make_pair(P->first, 16848 P->second + CharUnits::fromQuantity(Offset)); 16849 } 16850 case Stmt::UnaryOperatorClass: { 16851 auto *UO = cast<UnaryOperator>(E); 16852 switch (UO->getOpcode()) { 16853 default: 16854 break; 16855 case UO_Deref: 16856 return getBaseAlignmentAndOffsetFromPtr(UO->getSubExpr(), Ctx); 16857 } 16858 break; 16859 } 16860 case Stmt::BinaryOperatorClass: { 16861 auto *BO = cast<BinaryOperator>(E); 16862 auto Opcode = BO->getOpcode(); 16863 switch (Opcode) { 16864 default: 16865 break; 16866 case BO_Comma: 16867 return getBaseAlignmentAndOffsetFromLValue(BO->getRHS(), Ctx); 16868 } 16869 break; 16870 } 16871 } 16872 return std::nullopt; 16873 } 16874 16875 /// This helper function takes a pointer expression and returns the alignment of 16876 /// a VarDecl and a constant offset from the VarDecl. 16877 std::optional<std::pair< 16878 CharUnits, CharUnits>> static getBaseAlignmentAndOffsetFromPtr(const Expr 16879 *E, 16880 ASTContext 16881 &Ctx) { 16882 E = E->IgnoreParens(); 16883 switch (E->getStmtClass()) { 16884 default: 16885 break; 16886 case Stmt::CStyleCastExprClass: 16887 case Stmt::CXXStaticCastExprClass: 16888 case Stmt::ImplicitCastExprClass: { 16889 auto *CE = cast<CastExpr>(E); 16890 const Expr *From = CE->getSubExpr(); 16891 switch (CE->getCastKind()) { 16892 default: 16893 break; 16894 case CK_NoOp: 16895 return getBaseAlignmentAndOffsetFromPtr(From, Ctx); 16896 case CK_ArrayToPointerDecay: 16897 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 16898 case CK_UncheckedDerivedToBase: 16899 case CK_DerivedToBase: { 16900 auto P = getBaseAlignmentAndOffsetFromPtr(From, Ctx); 16901 if (!P) 16902 break; 16903 return getDerivedToBaseAlignmentAndOffset( 16904 CE, From->getType()->getPointeeType(), P->first, P->second, Ctx); 16905 } 16906 } 16907 break; 16908 } 16909 case Stmt::CXXThisExprClass: { 16910 auto *RD = E->getType()->getPointeeType()->getAsCXXRecordDecl(); 16911 CharUnits Alignment = Ctx.getASTRecordLayout(RD).getNonVirtualAlignment(); 16912 return std::make_pair(Alignment, CharUnits::Zero()); 16913 } 16914 case Stmt::UnaryOperatorClass: { 16915 auto *UO = cast<UnaryOperator>(E); 16916 if (UO->getOpcode() == UO_AddrOf) 16917 return getBaseAlignmentAndOffsetFromLValue(UO->getSubExpr(), Ctx); 16918 break; 16919 } 16920 case Stmt::BinaryOperatorClass: { 16921 auto *BO = cast<BinaryOperator>(E); 16922 auto Opcode = BO->getOpcode(); 16923 switch (Opcode) { 16924 default: 16925 break; 16926 case BO_Add: 16927 case BO_Sub: { 16928 const Expr *LHS = BO->getLHS(), *RHS = BO->getRHS(); 16929 if (Opcode == BO_Add && !RHS->getType()->isIntegralOrEnumerationType()) 16930 std::swap(LHS, RHS); 16931 return getAlignmentAndOffsetFromBinAddOrSub(LHS, RHS, Opcode == BO_Sub, 16932 Ctx); 16933 } 16934 case BO_Comma: 16935 return getBaseAlignmentAndOffsetFromPtr(BO->getRHS(), Ctx); 16936 } 16937 break; 16938 } 16939 } 16940 return std::nullopt; 16941 } 16942 16943 static CharUnits getPresumedAlignmentOfPointer(const Expr *E, Sema &S) { 16944 // See if we can compute the alignment of a VarDecl and an offset from it. 16945 std::optional<std::pair<CharUnits, CharUnits>> P = 16946 getBaseAlignmentAndOffsetFromPtr(E, S.Context); 16947 16948 if (P) 16949 return P->first.alignmentAtOffset(P->second); 16950 16951 // If that failed, return the type's alignment. 16952 return S.Context.getTypeAlignInChars(E->getType()->getPointeeType()); 16953 } 16954 16955 /// CheckCastAlign - Implements -Wcast-align, which warns when a 16956 /// pointer cast increases the alignment requirements. 16957 void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) { 16958 // This is actually a lot of work to potentially be doing on every 16959 // cast; don't do it if we're ignoring -Wcast_align (as is the default). 16960 if (getDiagnostics().isIgnored(diag::warn_cast_align, TRange.getBegin())) 16961 return; 16962 16963 // Ignore dependent types. 16964 if (T->isDependentType() || Op->getType()->isDependentType()) 16965 return; 16966 16967 // Require that the destination be a pointer type. 16968 const PointerType *DestPtr = T->getAs<PointerType>(); 16969 if (!DestPtr) return; 16970 16971 // If the destination has alignment 1, we're done. 16972 QualType DestPointee = DestPtr->getPointeeType(); 16973 if (DestPointee->isIncompleteType()) return; 16974 CharUnits DestAlign = Context.getTypeAlignInChars(DestPointee); 16975 if (DestAlign.isOne()) return; 16976 16977 // Require that the source be a pointer type. 16978 const PointerType *SrcPtr = Op->getType()->getAs<PointerType>(); 16979 if (!SrcPtr) return; 16980 QualType SrcPointee = SrcPtr->getPointeeType(); 16981 16982 // Explicitly allow casts from cv void*. We already implicitly 16983 // allowed casts to cv void*, since they have alignment 1. 16984 // Also allow casts involving incomplete types, which implicitly 16985 // includes 'void'. 16986 if (SrcPointee->isIncompleteType()) return; 16987 16988 CharUnits SrcAlign = getPresumedAlignmentOfPointer(Op, *this); 16989 16990 if (SrcAlign >= DestAlign) return; 16991 16992 Diag(TRange.getBegin(), diag::warn_cast_align) 16993 << Op->getType() << T 16994 << static_cast<unsigned>(SrcAlign.getQuantity()) 16995 << static_cast<unsigned>(DestAlign.getQuantity()) 16996 << TRange << Op->getSourceRange(); 16997 } 16998 16999 void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, 17000 const ArraySubscriptExpr *ASE, 17001 bool AllowOnePastEnd, bool IndexNegated) { 17002 // Already diagnosed by the constant evaluator. 17003 if (isConstantEvaluated()) 17004 return; 17005 17006 IndexExpr = IndexExpr->IgnoreParenImpCasts(); 17007 if (IndexExpr->isValueDependent()) 17008 return; 17009 17010 const Type *EffectiveType = 17011 BaseExpr->getType()->getPointeeOrArrayElementType(); 17012 BaseExpr = BaseExpr->IgnoreParenCasts(); 17013 const ConstantArrayType *ArrayTy = 17014 Context.getAsConstantArrayType(BaseExpr->getType()); 17015 17016 LangOptions::StrictFlexArraysLevelKind 17017 StrictFlexArraysLevel = getLangOpts().getStrictFlexArraysLevel(); 17018 17019 const Type *BaseType = 17020 ArrayTy == nullptr ? nullptr : ArrayTy->getElementType().getTypePtr(); 17021 bool IsUnboundedArray = 17022 BaseType == nullptr || BaseExpr->isFlexibleArrayMemberLike( 17023 Context, StrictFlexArraysLevel, 17024 /*IgnoreTemplateOrMacroSubstitution=*/true); 17025 if (EffectiveType->isDependentType() || 17026 (!IsUnboundedArray && BaseType->isDependentType())) 17027 return; 17028 17029 Expr::EvalResult Result; 17030 if (!IndexExpr->EvaluateAsInt(Result, Context, Expr::SE_AllowSideEffects)) 17031 return; 17032 17033 llvm::APSInt index = Result.Val.getInt(); 17034 if (IndexNegated) { 17035 index.setIsUnsigned(false); 17036 index = -index; 17037 } 17038 17039 if (IsUnboundedArray) { 17040 if (EffectiveType->isFunctionType()) 17041 return; 17042 if (index.isUnsigned() || !index.isNegative()) { 17043 const auto &ASTC = getASTContext(); 17044 unsigned AddrBits = ASTC.getTargetInfo().getPointerWidth( 17045 EffectiveType->getCanonicalTypeInternal().getAddressSpace()); 17046 if (index.getBitWidth() < AddrBits) 17047 index = index.zext(AddrBits); 17048 std::optional<CharUnits> ElemCharUnits = 17049 ASTC.getTypeSizeInCharsIfKnown(EffectiveType); 17050 // PR50741 - If EffectiveType has unknown size (e.g., if it's a void 17051 // pointer) bounds-checking isn't meaningful. 17052 if (!ElemCharUnits) 17053 return; 17054 llvm::APInt ElemBytes(index.getBitWidth(), ElemCharUnits->getQuantity()); 17055 // If index has more active bits than address space, we already know 17056 // we have a bounds violation to warn about. Otherwise, compute 17057 // address of (index + 1)th element, and warn about bounds violation 17058 // only if that address exceeds address space. 17059 if (index.getActiveBits() <= AddrBits) { 17060 bool Overflow; 17061 llvm::APInt Product(index); 17062 Product += 1; 17063 Product = Product.umul_ov(ElemBytes, Overflow); 17064 if (!Overflow && Product.getActiveBits() <= AddrBits) 17065 return; 17066 } 17067 17068 // Need to compute max possible elements in address space, since that 17069 // is included in diag message. 17070 llvm::APInt MaxElems = llvm::APInt::getMaxValue(AddrBits); 17071 MaxElems = MaxElems.zext(std::max(AddrBits + 1, ElemBytes.getBitWidth())); 17072 MaxElems += 1; 17073 ElemBytes = ElemBytes.zextOrTrunc(MaxElems.getBitWidth()); 17074 MaxElems = MaxElems.udiv(ElemBytes); 17075 17076 unsigned DiagID = 17077 ASE ? diag::warn_array_index_exceeds_max_addressable_bounds 17078 : diag::warn_ptr_arith_exceeds_max_addressable_bounds; 17079 17080 // Diag message shows element size in bits and in "bytes" (platform- 17081 // dependent CharUnits) 17082 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 17083 PDiag(DiagID) 17084 << toString(index, 10, true) << AddrBits 17085 << (unsigned)ASTC.toBits(*ElemCharUnits) 17086 << toString(ElemBytes, 10, false) 17087 << toString(MaxElems, 10, false) 17088 << (unsigned)MaxElems.getLimitedValue(~0U) 17089 << IndexExpr->getSourceRange()); 17090 17091 const NamedDecl *ND = nullptr; 17092 // Try harder to find a NamedDecl to point at in the note. 17093 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr)) 17094 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 17095 if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 17096 ND = DRE->getDecl(); 17097 if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr)) 17098 ND = ME->getMemberDecl(); 17099 17100 if (ND) 17101 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 17102 PDiag(diag::note_array_declared_here) << ND); 17103 } 17104 return; 17105 } 17106 17107 if (index.isUnsigned() || !index.isNegative()) { 17108 // It is possible that the type of the base expression after 17109 // IgnoreParenCasts is incomplete, even though the type of the base 17110 // expression before IgnoreParenCasts is complete (see PR39746 for an 17111 // example). In this case we have no information about whether the array 17112 // access exceeds the array bounds. However we can still diagnose an array 17113 // access which precedes the array bounds. 17114 if (BaseType->isIncompleteType()) 17115 return; 17116 17117 llvm::APInt size = ArrayTy->getSize(); 17118 17119 if (BaseType != EffectiveType) { 17120 // Make sure we're comparing apples to apples when comparing index to 17121 // size. 17122 uint64_t ptrarith_typesize = Context.getTypeSize(EffectiveType); 17123 uint64_t array_typesize = Context.getTypeSize(BaseType); 17124 17125 // Handle ptrarith_typesize being zero, such as when casting to void*. 17126 // Use the size in bits (what "getTypeSize()" returns) rather than bytes. 17127 if (!ptrarith_typesize) 17128 ptrarith_typesize = Context.getCharWidth(); 17129 17130 if (ptrarith_typesize != array_typesize) { 17131 // There's a cast to a different size type involved. 17132 uint64_t ratio = array_typesize / ptrarith_typesize; 17133 17134 // TODO: Be smarter about handling cases where array_typesize is not a 17135 // multiple of ptrarith_typesize. 17136 if (ptrarith_typesize * ratio == array_typesize) 17137 size *= llvm::APInt(size.getBitWidth(), ratio); 17138 } 17139 } 17140 17141 if (size.getBitWidth() > index.getBitWidth()) 17142 index = index.zext(size.getBitWidth()); 17143 else if (size.getBitWidth() < index.getBitWidth()) 17144 size = size.zext(index.getBitWidth()); 17145 17146 // For array subscripting the index must be less than size, but for pointer 17147 // arithmetic also allow the index (offset) to be equal to size since 17148 // computing the next address after the end of the array is legal and 17149 // commonly done e.g. in C++ iterators and range-based for loops. 17150 if (AllowOnePastEnd ? index.ule(size) : index.ult(size)) 17151 return; 17152 17153 // Suppress the warning if the subscript expression (as identified by the 17154 // ']' location) and the index expression are both from macro expansions 17155 // within a system header. 17156 if (ASE) { 17157 SourceLocation RBracketLoc = SourceMgr.getSpellingLoc( 17158 ASE->getRBracketLoc()); 17159 if (SourceMgr.isInSystemHeader(RBracketLoc)) { 17160 SourceLocation IndexLoc = 17161 SourceMgr.getSpellingLoc(IndexExpr->getBeginLoc()); 17162 if (SourceMgr.isWrittenInSameFile(RBracketLoc, IndexLoc)) 17163 return; 17164 } 17165 } 17166 17167 unsigned DiagID = ASE ? diag::warn_array_index_exceeds_bounds 17168 : diag::warn_ptr_arith_exceeds_bounds; 17169 unsigned CastMsg = (!ASE || BaseType == EffectiveType) ? 0 : 1; 17170 QualType CastMsgTy = ASE ? ASE->getLHS()->getType() : QualType(); 17171 17172 DiagRuntimeBehavior( 17173 BaseExpr->getBeginLoc(), BaseExpr, 17174 PDiag(DiagID) << toString(index, 10, true) << ArrayTy->desugar() 17175 << CastMsg << CastMsgTy << IndexExpr->getSourceRange()); 17176 } else { 17177 unsigned DiagID = diag::warn_array_index_precedes_bounds; 17178 if (!ASE) { 17179 DiagID = diag::warn_ptr_arith_precedes_bounds; 17180 if (index.isNegative()) index = -index; 17181 } 17182 17183 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 17184 PDiag(DiagID) << toString(index, 10, true) 17185 << IndexExpr->getSourceRange()); 17186 } 17187 17188 const NamedDecl *ND = nullptr; 17189 // Try harder to find a NamedDecl to point at in the note. 17190 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr)) 17191 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 17192 if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 17193 ND = DRE->getDecl(); 17194 if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr)) 17195 ND = ME->getMemberDecl(); 17196 17197 if (ND) 17198 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 17199 PDiag(diag::note_array_declared_here) << ND); 17200 } 17201 17202 void Sema::CheckArrayAccess(const Expr *expr) { 17203 int AllowOnePastEnd = 0; 17204 while (expr) { 17205 expr = expr->IgnoreParenImpCasts(); 17206 switch (expr->getStmtClass()) { 17207 case Stmt::ArraySubscriptExprClass: { 17208 const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(expr); 17209 CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE, 17210 AllowOnePastEnd > 0); 17211 expr = ASE->getBase(); 17212 break; 17213 } 17214 case Stmt::MemberExprClass: { 17215 expr = cast<MemberExpr>(expr)->getBase(); 17216 break; 17217 } 17218 case Stmt::OMPArraySectionExprClass: { 17219 const OMPArraySectionExpr *ASE = cast<OMPArraySectionExpr>(expr); 17220 if (ASE->getLowerBound()) 17221 CheckArrayAccess(ASE->getBase(), ASE->getLowerBound(), 17222 /*ASE=*/nullptr, AllowOnePastEnd > 0); 17223 return; 17224 } 17225 case Stmt::UnaryOperatorClass: { 17226 // Only unwrap the * and & unary operators 17227 const UnaryOperator *UO = cast<UnaryOperator>(expr); 17228 expr = UO->getSubExpr(); 17229 switch (UO->getOpcode()) { 17230 case UO_AddrOf: 17231 AllowOnePastEnd++; 17232 break; 17233 case UO_Deref: 17234 AllowOnePastEnd--; 17235 break; 17236 default: 17237 return; 17238 } 17239 break; 17240 } 17241 case Stmt::ConditionalOperatorClass: { 17242 const ConditionalOperator *cond = cast<ConditionalOperator>(expr); 17243 if (const Expr *lhs = cond->getLHS()) 17244 CheckArrayAccess(lhs); 17245 if (const Expr *rhs = cond->getRHS()) 17246 CheckArrayAccess(rhs); 17247 return; 17248 } 17249 case Stmt::CXXOperatorCallExprClass: { 17250 const auto *OCE = cast<CXXOperatorCallExpr>(expr); 17251 for (const auto *Arg : OCE->arguments()) 17252 CheckArrayAccess(Arg); 17253 return; 17254 } 17255 default: 17256 return; 17257 } 17258 } 17259 } 17260 17261 //===--- CHECK: Objective-C retain cycles ----------------------------------// 17262 17263 namespace { 17264 17265 struct RetainCycleOwner { 17266 VarDecl *Variable = nullptr; 17267 SourceRange Range; 17268 SourceLocation Loc; 17269 bool Indirect = false; 17270 17271 RetainCycleOwner() = default; 17272 17273 void setLocsFrom(Expr *e) { 17274 Loc = e->getExprLoc(); 17275 Range = e->getSourceRange(); 17276 } 17277 }; 17278 17279 } // namespace 17280 17281 /// Consider whether capturing the given variable can possibly lead to 17282 /// a retain cycle. 17283 static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) { 17284 // In ARC, it's captured strongly iff the variable has __strong 17285 // lifetime. In MRR, it's captured strongly if the variable is 17286 // __block and has an appropriate type. 17287 if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 17288 return false; 17289 17290 owner.Variable = var; 17291 if (ref) 17292 owner.setLocsFrom(ref); 17293 return true; 17294 } 17295 17296 static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) { 17297 while (true) { 17298 e = e->IgnoreParens(); 17299 if (CastExpr *cast = dyn_cast<CastExpr>(e)) { 17300 switch (cast->getCastKind()) { 17301 case CK_BitCast: 17302 case CK_LValueBitCast: 17303 case CK_LValueToRValue: 17304 case CK_ARCReclaimReturnedObject: 17305 e = cast->getSubExpr(); 17306 continue; 17307 17308 default: 17309 return false; 17310 } 17311 } 17312 17313 if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(e)) { 17314 ObjCIvarDecl *ivar = ref->getDecl(); 17315 if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 17316 return false; 17317 17318 // Try to find a retain cycle in the base. 17319 if (!findRetainCycleOwner(S, ref->getBase(), owner)) 17320 return false; 17321 17322 if (ref->isFreeIvar()) owner.setLocsFrom(ref); 17323 owner.Indirect = true; 17324 return true; 17325 } 17326 17327 if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) { 17328 VarDecl *var = dyn_cast<VarDecl>(ref->getDecl()); 17329 if (!var) return false; 17330 return considerVariable(var, ref, owner); 17331 } 17332 17333 if (MemberExpr *member = dyn_cast<MemberExpr>(e)) { 17334 if (member->isArrow()) return false; 17335 17336 // Don't count this as an indirect ownership. 17337 e = member->getBase(); 17338 continue; 17339 } 17340 17341 if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { 17342 // Only pay attention to pseudo-objects on property references. 17343 ObjCPropertyRefExpr *pre 17344 = dyn_cast<ObjCPropertyRefExpr>(pseudo->getSyntacticForm() 17345 ->IgnoreParens()); 17346 if (!pre) return false; 17347 if (pre->isImplicitProperty()) return false; 17348 ObjCPropertyDecl *property = pre->getExplicitProperty(); 17349 if (!property->isRetaining() && 17350 !(property->getPropertyIvarDecl() && 17351 property->getPropertyIvarDecl()->getType() 17352 .getObjCLifetime() == Qualifiers::OCL_Strong)) 17353 return false; 17354 17355 owner.Indirect = true; 17356 if (pre->isSuperReceiver()) { 17357 owner.Variable = S.getCurMethodDecl()->getSelfDecl(); 17358 if (!owner.Variable) 17359 return false; 17360 owner.Loc = pre->getLocation(); 17361 owner.Range = pre->getSourceRange(); 17362 return true; 17363 } 17364 e = const_cast<Expr*>(cast<OpaqueValueExpr>(pre->getBase()) 17365 ->getSourceExpr()); 17366 continue; 17367 } 17368 17369 // Array ivars? 17370 17371 return false; 17372 } 17373 } 17374 17375 namespace { 17376 17377 struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> { 17378 VarDecl *Variable; 17379 Expr *Capturer = nullptr; 17380 bool VarWillBeReased = false; 17381 17382 FindCaptureVisitor(ASTContext &Context, VarDecl *variable) 17383 : EvaluatedExprVisitor<FindCaptureVisitor>(Context), 17384 Variable(variable) {} 17385 17386 void VisitDeclRefExpr(DeclRefExpr *ref) { 17387 if (ref->getDecl() == Variable && !Capturer) 17388 Capturer = ref; 17389 } 17390 17391 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) { 17392 if (Capturer) return; 17393 Visit(ref->getBase()); 17394 if (Capturer && ref->isFreeIvar()) 17395 Capturer = ref; 17396 } 17397 17398 void VisitBlockExpr(BlockExpr *block) { 17399 // Look inside nested blocks 17400 if (block->getBlockDecl()->capturesVariable(Variable)) 17401 Visit(block->getBlockDecl()->getBody()); 17402 } 17403 17404 void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) { 17405 if (Capturer) return; 17406 if (OVE->getSourceExpr()) 17407 Visit(OVE->getSourceExpr()); 17408 } 17409 17410 void VisitBinaryOperator(BinaryOperator *BinOp) { 17411 if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign) 17412 return; 17413 Expr *LHS = BinOp->getLHS(); 17414 if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(LHS)) { 17415 if (DRE->getDecl() != Variable) 17416 return; 17417 if (Expr *RHS = BinOp->getRHS()) { 17418 RHS = RHS->IgnoreParenCasts(); 17419 std::optional<llvm::APSInt> Value; 17420 VarWillBeReased = 17421 (RHS && (Value = RHS->getIntegerConstantExpr(Context)) && 17422 *Value == 0); 17423 } 17424 } 17425 } 17426 }; 17427 17428 } // namespace 17429 17430 /// Check whether the given argument is a block which captures a 17431 /// variable. 17432 static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) { 17433 assert(owner.Variable && owner.Loc.isValid()); 17434 17435 e = e->IgnoreParenCasts(); 17436 17437 // Look through [^{...} copy] and Block_copy(^{...}). 17438 if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(e)) { 17439 Selector Cmd = ME->getSelector(); 17440 if (Cmd.isUnarySelector() && Cmd.getNameForSlot(0) == "copy") { 17441 e = ME->getInstanceReceiver(); 17442 if (!e) 17443 return nullptr; 17444 e = e->IgnoreParenCasts(); 17445 } 17446 } else if (CallExpr *CE = dyn_cast<CallExpr>(e)) { 17447 if (CE->getNumArgs() == 1) { 17448 FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl()); 17449 if (Fn) { 17450 const IdentifierInfo *FnI = Fn->getIdentifier(); 17451 if (FnI && FnI->isStr("_Block_copy")) { 17452 e = CE->getArg(0)->IgnoreParenCasts(); 17453 } 17454 } 17455 } 17456 } 17457 17458 BlockExpr *block = dyn_cast<BlockExpr>(e); 17459 if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable)) 17460 return nullptr; 17461 17462 FindCaptureVisitor visitor(S.Context, owner.Variable); 17463 visitor.Visit(block->getBlockDecl()->getBody()); 17464 return visitor.VarWillBeReased ? nullptr : visitor.Capturer; 17465 } 17466 17467 static void diagnoseRetainCycle(Sema &S, Expr *capturer, 17468 RetainCycleOwner &owner) { 17469 assert(capturer); 17470 assert(owner.Variable && owner.Loc.isValid()); 17471 17472 S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle) 17473 << owner.Variable << capturer->getSourceRange(); 17474 S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner) 17475 << owner.Indirect << owner.Range; 17476 } 17477 17478 /// Check for a keyword selector that starts with the word 'add' or 17479 /// 'set'. 17480 static bool isSetterLikeSelector(Selector sel) { 17481 if (sel.isUnarySelector()) return false; 17482 17483 StringRef str = sel.getNameForSlot(0); 17484 while (!str.empty() && str.front() == '_') str = str.substr(1); 17485 if (str.startswith("set")) 17486 str = str.substr(3); 17487 else if (str.startswith("add")) { 17488 // Specially allow 'addOperationWithBlock:'. 17489 if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock")) 17490 return false; 17491 str = str.substr(3); 17492 } 17493 else 17494 return false; 17495 17496 if (str.empty()) return true; 17497 return !isLowercase(str.front()); 17498 } 17499 17500 static std::optional<int> 17501 GetNSMutableArrayArgumentIndex(Sema &S, ObjCMessageExpr *Message) { 17502 bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass( 17503 Message->getReceiverInterface(), 17504 NSAPI::ClassId_NSMutableArray); 17505 if (!IsMutableArray) { 17506 return std::nullopt; 17507 } 17508 17509 Selector Sel = Message->getSelector(); 17510 17511 std::optional<NSAPI::NSArrayMethodKind> MKOpt = 17512 S.NSAPIObj->getNSArrayMethodKind(Sel); 17513 if (!MKOpt) { 17514 return std::nullopt; 17515 } 17516 17517 NSAPI::NSArrayMethodKind MK = *MKOpt; 17518 17519 switch (MK) { 17520 case NSAPI::NSMutableArr_addObject: 17521 case NSAPI::NSMutableArr_insertObjectAtIndex: 17522 case NSAPI::NSMutableArr_setObjectAtIndexedSubscript: 17523 return 0; 17524 case NSAPI::NSMutableArr_replaceObjectAtIndex: 17525 return 1; 17526 17527 default: 17528 return std::nullopt; 17529 } 17530 17531 return std::nullopt; 17532 } 17533 17534 static std::optional<int> 17535 GetNSMutableDictionaryArgumentIndex(Sema &S, ObjCMessageExpr *Message) { 17536 bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass( 17537 Message->getReceiverInterface(), 17538 NSAPI::ClassId_NSMutableDictionary); 17539 if (!IsMutableDictionary) { 17540 return std::nullopt; 17541 } 17542 17543 Selector Sel = Message->getSelector(); 17544 17545 std::optional<NSAPI::NSDictionaryMethodKind> MKOpt = 17546 S.NSAPIObj->getNSDictionaryMethodKind(Sel); 17547 if (!MKOpt) { 17548 return std::nullopt; 17549 } 17550 17551 NSAPI::NSDictionaryMethodKind MK = *MKOpt; 17552 17553 switch (MK) { 17554 case NSAPI::NSMutableDict_setObjectForKey: 17555 case NSAPI::NSMutableDict_setValueForKey: 17556 case NSAPI::NSMutableDict_setObjectForKeyedSubscript: 17557 return 0; 17558 17559 default: 17560 return std::nullopt; 17561 } 17562 17563 return std::nullopt; 17564 } 17565 17566 static std::optional<int> GetNSSetArgumentIndex(Sema &S, 17567 ObjCMessageExpr *Message) { 17568 bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass( 17569 Message->getReceiverInterface(), 17570 NSAPI::ClassId_NSMutableSet); 17571 17572 bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass( 17573 Message->getReceiverInterface(), 17574 NSAPI::ClassId_NSMutableOrderedSet); 17575 if (!IsMutableSet && !IsMutableOrderedSet) { 17576 return std::nullopt; 17577 } 17578 17579 Selector Sel = Message->getSelector(); 17580 17581 std::optional<NSAPI::NSSetMethodKind> MKOpt = 17582 S.NSAPIObj->getNSSetMethodKind(Sel); 17583 if (!MKOpt) { 17584 return std::nullopt; 17585 } 17586 17587 NSAPI::NSSetMethodKind MK = *MKOpt; 17588 17589 switch (MK) { 17590 case NSAPI::NSMutableSet_addObject: 17591 case NSAPI::NSOrderedSet_setObjectAtIndex: 17592 case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript: 17593 case NSAPI::NSOrderedSet_insertObjectAtIndex: 17594 return 0; 17595 case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject: 17596 return 1; 17597 } 17598 17599 return std::nullopt; 17600 } 17601 17602 void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) { 17603 if (!Message->isInstanceMessage()) { 17604 return; 17605 } 17606 17607 std::optional<int> ArgOpt; 17608 17609 if (!(ArgOpt = GetNSMutableArrayArgumentIndex(*this, Message)) && 17610 !(ArgOpt = GetNSMutableDictionaryArgumentIndex(*this, Message)) && 17611 !(ArgOpt = GetNSSetArgumentIndex(*this, Message))) { 17612 return; 17613 } 17614 17615 int ArgIndex = *ArgOpt; 17616 17617 Expr *Arg = Message->getArg(ArgIndex)->IgnoreImpCasts(); 17618 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Arg)) { 17619 Arg = OE->getSourceExpr()->IgnoreImpCasts(); 17620 } 17621 17622 if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) { 17623 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 17624 if (ArgRE->isObjCSelfExpr()) { 17625 Diag(Message->getSourceRange().getBegin(), 17626 diag::warn_objc_circular_container) 17627 << ArgRE->getDecl() << StringRef("'super'"); 17628 } 17629 } 17630 } else { 17631 Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts(); 17632 17633 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) { 17634 Receiver = OE->getSourceExpr()->IgnoreImpCasts(); 17635 } 17636 17637 if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) { 17638 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 17639 if (ReceiverRE->getDecl() == ArgRE->getDecl()) { 17640 ValueDecl *Decl = ReceiverRE->getDecl(); 17641 Diag(Message->getSourceRange().getBegin(), 17642 diag::warn_objc_circular_container) 17643 << Decl << Decl; 17644 if (!ArgRE->isObjCSelfExpr()) { 17645 Diag(Decl->getLocation(), 17646 diag::note_objc_circular_container_declared_here) 17647 << Decl; 17648 } 17649 } 17650 } 17651 } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) { 17652 if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) { 17653 if (IvarRE->getDecl() == IvarArgRE->getDecl()) { 17654 ObjCIvarDecl *Decl = IvarRE->getDecl(); 17655 Diag(Message->getSourceRange().getBegin(), 17656 diag::warn_objc_circular_container) 17657 << Decl << Decl; 17658 Diag(Decl->getLocation(), 17659 diag::note_objc_circular_container_declared_here) 17660 << Decl; 17661 } 17662 } 17663 } 17664 } 17665 } 17666 17667 /// Check a message send to see if it's likely to cause a retain cycle. 17668 void Sema::checkRetainCycles(ObjCMessageExpr *msg) { 17669 // Only check instance methods whose selector looks like a setter. 17670 if (!msg->isInstanceMessage() || !isSetterLikeSelector(msg->getSelector())) 17671 return; 17672 17673 // Try to find a variable that the receiver is strongly owned by. 17674 RetainCycleOwner owner; 17675 if (msg->getReceiverKind() == ObjCMessageExpr::Instance) { 17676 if (!findRetainCycleOwner(*this, msg->getInstanceReceiver(), owner)) 17677 return; 17678 } else { 17679 assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance); 17680 owner.Variable = getCurMethodDecl()->getSelfDecl(); 17681 owner.Loc = msg->getSuperLoc(); 17682 owner.Range = msg->getSuperLoc(); 17683 } 17684 17685 // Check whether the receiver is captured by any of the arguments. 17686 const ObjCMethodDecl *MD = msg->getMethodDecl(); 17687 for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i) { 17688 if (Expr *capturer = findCapturingExpr(*this, msg->getArg(i), owner)) { 17689 // noescape blocks should not be retained by the method. 17690 if (MD && MD->parameters()[i]->hasAttr<NoEscapeAttr>()) 17691 continue; 17692 return diagnoseRetainCycle(*this, capturer, owner); 17693 } 17694 } 17695 } 17696 17697 /// Check a property assign to see if it's likely to cause a retain cycle. 17698 void Sema::checkRetainCycles(Expr *receiver, Expr *argument) { 17699 RetainCycleOwner owner; 17700 if (!findRetainCycleOwner(*this, receiver, owner)) 17701 return; 17702 17703 if (Expr *capturer = findCapturingExpr(*this, argument, owner)) 17704 diagnoseRetainCycle(*this, capturer, owner); 17705 } 17706 17707 void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) { 17708 RetainCycleOwner Owner; 17709 if (!considerVariable(Var, /*DeclRefExpr=*/nullptr, Owner)) 17710 return; 17711 17712 // Because we don't have an expression for the variable, we have to set the 17713 // location explicitly here. 17714 Owner.Loc = Var->getLocation(); 17715 Owner.Range = Var->getSourceRange(); 17716 17717 if (Expr *Capturer = findCapturingExpr(*this, Init, Owner)) 17718 diagnoseRetainCycle(*this, Capturer, Owner); 17719 } 17720 17721 static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc, 17722 Expr *RHS, bool isProperty) { 17723 // Check if RHS is an Objective-C object literal, which also can get 17724 // immediately zapped in a weak reference. Note that we explicitly 17725 // allow ObjCStringLiterals, since those are designed to never really die. 17726 RHS = RHS->IgnoreParenImpCasts(); 17727 17728 // This enum needs to match with the 'select' in 17729 // warn_objc_arc_literal_assign (off-by-1). 17730 Sema::ObjCLiteralKind Kind = S.CheckLiteralKind(RHS); 17731 if (Kind == Sema::LK_String || Kind == Sema::LK_None) 17732 return false; 17733 17734 S.Diag(Loc, diag::warn_arc_literal_assign) 17735 << (unsigned) Kind 17736 << (isProperty ? 0 : 1) 17737 << RHS->getSourceRange(); 17738 17739 return true; 17740 } 17741 17742 static bool checkUnsafeAssignObject(Sema &S, SourceLocation Loc, 17743 Qualifiers::ObjCLifetime LT, 17744 Expr *RHS, bool isProperty) { 17745 // Strip off any implicit cast added to get to the one ARC-specific. 17746 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 17747 if (cast->getCastKind() == CK_ARCConsumeObject) { 17748 S.Diag(Loc, diag::warn_arc_retained_assign) 17749 << (LT == Qualifiers::OCL_ExplicitNone) 17750 << (isProperty ? 0 : 1) 17751 << RHS->getSourceRange(); 17752 return true; 17753 } 17754 RHS = cast->getSubExpr(); 17755 } 17756 17757 if (LT == Qualifiers::OCL_Weak && 17758 checkUnsafeAssignLiteral(S, Loc, RHS, isProperty)) 17759 return true; 17760 17761 return false; 17762 } 17763 17764 bool Sema::checkUnsafeAssigns(SourceLocation Loc, 17765 QualType LHS, Expr *RHS) { 17766 Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime(); 17767 17768 if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone) 17769 return false; 17770 17771 if (checkUnsafeAssignObject(*this, Loc, LT, RHS, false)) 17772 return true; 17773 17774 return false; 17775 } 17776 17777 void Sema::checkUnsafeExprAssigns(SourceLocation Loc, 17778 Expr *LHS, Expr *RHS) { 17779 QualType LHSType; 17780 // PropertyRef on LHS type need be directly obtained from 17781 // its declaration as it has a PseudoType. 17782 ObjCPropertyRefExpr *PRE 17783 = dyn_cast<ObjCPropertyRefExpr>(LHS->IgnoreParens()); 17784 if (PRE && !PRE->isImplicitProperty()) { 17785 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 17786 if (PD) 17787 LHSType = PD->getType(); 17788 } 17789 17790 if (LHSType.isNull()) 17791 LHSType = LHS->getType(); 17792 17793 Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime(); 17794 17795 if (LT == Qualifiers::OCL_Weak) { 17796 if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc)) 17797 getCurFunction()->markSafeWeakUse(LHS); 17798 } 17799 17800 if (checkUnsafeAssigns(Loc, LHSType, RHS)) 17801 return; 17802 17803 // FIXME. Check for other life times. 17804 if (LT != Qualifiers::OCL_None) 17805 return; 17806 17807 if (PRE) { 17808 if (PRE->isImplicitProperty()) 17809 return; 17810 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 17811 if (!PD) 17812 return; 17813 17814 unsigned Attributes = PD->getPropertyAttributes(); 17815 if (Attributes & ObjCPropertyAttribute::kind_assign) { 17816 // when 'assign' attribute was not explicitly specified 17817 // by user, ignore it and rely on property type itself 17818 // for lifetime info. 17819 unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten(); 17820 if (!(AsWrittenAttr & ObjCPropertyAttribute::kind_assign) && 17821 LHSType->isObjCRetainableType()) 17822 return; 17823 17824 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 17825 if (cast->getCastKind() == CK_ARCConsumeObject) { 17826 Diag(Loc, diag::warn_arc_retained_property_assign) 17827 << RHS->getSourceRange(); 17828 return; 17829 } 17830 RHS = cast->getSubExpr(); 17831 } 17832 } else if (Attributes & ObjCPropertyAttribute::kind_weak) { 17833 if (checkUnsafeAssignObject(*this, Loc, Qualifiers::OCL_Weak, RHS, true)) 17834 return; 17835 } 17836 } 17837 } 17838 17839 //===--- CHECK: Empty statement body (-Wempty-body) ---------------------===// 17840 17841 static bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr, 17842 SourceLocation StmtLoc, 17843 const NullStmt *Body) { 17844 // Do not warn if the body is a macro that expands to nothing, e.g: 17845 // 17846 // #define CALL(x) 17847 // if (condition) 17848 // CALL(0); 17849 if (Body->hasLeadingEmptyMacro()) 17850 return false; 17851 17852 // Get line numbers of statement and body. 17853 bool StmtLineInvalid; 17854 unsigned StmtLine = SourceMgr.getPresumedLineNumber(StmtLoc, 17855 &StmtLineInvalid); 17856 if (StmtLineInvalid) 17857 return false; 17858 17859 bool BodyLineInvalid; 17860 unsigned BodyLine = SourceMgr.getSpellingLineNumber(Body->getSemiLoc(), 17861 &BodyLineInvalid); 17862 if (BodyLineInvalid) 17863 return false; 17864 17865 // Warn if null statement and body are on the same line. 17866 if (StmtLine != BodyLine) 17867 return false; 17868 17869 return true; 17870 } 17871 17872 void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc, 17873 const Stmt *Body, 17874 unsigned DiagID) { 17875 // Since this is a syntactic check, don't emit diagnostic for template 17876 // instantiations, this just adds noise. 17877 if (CurrentInstantiationScope) 17878 return; 17879 17880 // The body should be a null statement. 17881 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 17882 if (!NBody) 17883 return; 17884 17885 // Do the usual checks. 17886 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 17887 return; 17888 17889 Diag(NBody->getSemiLoc(), DiagID); 17890 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 17891 } 17892 17893 void Sema::DiagnoseEmptyLoopBody(const Stmt *S, 17894 const Stmt *PossibleBody) { 17895 assert(!CurrentInstantiationScope); // Ensured by caller 17896 17897 SourceLocation StmtLoc; 17898 const Stmt *Body; 17899 unsigned DiagID; 17900 if (const ForStmt *FS = dyn_cast<ForStmt>(S)) { 17901 StmtLoc = FS->getRParenLoc(); 17902 Body = FS->getBody(); 17903 DiagID = diag::warn_empty_for_body; 17904 } else if (const WhileStmt *WS = dyn_cast<WhileStmt>(S)) { 17905 StmtLoc = WS->getRParenLoc(); 17906 Body = WS->getBody(); 17907 DiagID = diag::warn_empty_while_body; 17908 } else 17909 return; // Neither `for' nor `while'. 17910 17911 // The body should be a null statement. 17912 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 17913 if (!NBody) 17914 return; 17915 17916 // Skip expensive checks if diagnostic is disabled. 17917 if (Diags.isIgnored(DiagID, NBody->getSemiLoc())) 17918 return; 17919 17920 // Do the usual checks. 17921 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 17922 return; 17923 17924 // `for(...);' and `while(...);' are popular idioms, so in order to keep 17925 // noise level low, emit diagnostics only if for/while is followed by a 17926 // CompoundStmt, e.g.: 17927 // for (int i = 0; i < n; i++); 17928 // { 17929 // a(i); 17930 // } 17931 // or if for/while is followed by a statement with more indentation 17932 // than for/while itself: 17933 // for (int i = 0; i < n; i++); 17934 // a(i); 17935 bool ProbableTypo = isa<CompoundStmt>(PossibleBody); 17936 if (!ProbableTypo) { 17937 bool BodyColInvalid; 17938 unsigned BodyCol = SourceMgr.getPresumedColumnNumber( 17939 PossibleBody->getBeginLoc(), &BodyColInvalid); 17940 if (BodyColInvalid) 17941 return; 17942 17943 bool StmtColInvalid; 17944 unsigned StmtCol = 17945 SourceMgr.getPresumedColumnNumber(S->getBeginLoc(), &StmtColInvalid); 17946 if (StmtColInvalid) 17947 return; 17948 17949 if (BodyCol > StmtCol) 17950 ProbableTypo = true; 17951 } 17952 17953 if (ProbableTypo) { 17954 Diag(NBody->getSemiLoc(), DiagID); 17955 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 17956 } 17957 } 17958 17959 //===--- CHECK: Warn on self move with std::move. -------------------------===// 17960 17961 /// DiagnoseSelfMove - Emits a warning if a value is moved to itself. 17962 void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, 17963 SourceLocation OpLoc) { 17964 if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc)) 17965 return; 17966 17967 if (inTemplateInstantiation()) 17968 return; 17969 17970 // Strip parens and casts away. 17971 LHSExpr = LHSExpr->IgnoreParenImpCasts(); 17972 RHSExpr = RHSExpr->IgnoreParenImpCasts(); 17973 17974 // Check for a call expression 17975 const CallExpr *CE = dyn_cast<CallExpr>(RHSExpr); 17976 if (!CE || CE->getNumArgs() != 1) 17977 return; 17978 17979 // Check for a call to std::move 17980 if (!CE->isCallToStdMove()) 17981 return; 17982 17983 // Get argument from std::move 17984 RHSExpr = CE->getArg(0); 17985 17986 const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr); 17987 const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr); 17988 17989 // Two DeclRefExpr's, check that the decls are the same. 17990 if (LHSDeclRef && RHSDeclRef) { 17991 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 17992 return; 17993 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 17994 RHSDeclRef->getDecl()->getCanonicalDecl()) 17995 return; 17996 17997 auto D = Diag(OpLoc, diag::warn_self_move) 17998 << LHSExpr->getType() << LHSExpr->getSourceRange() 17999 << RHSExpr->getSourceRange(); 18000 if (const FieldDecl *F = 18001 getSelfAssignmentClassMemberCandidate(RHSDeclRef->getDecl())) 18002 D << 1 << F 18003 << FixItHint::CreateInsertion(LHSDeclRef->getBeginLoc(), "this->"); 18004 else 18005 D << 0; 18006 return; 18007 } 18008 18009 // Member variables require a different approach to check for self moves. 18010 // MemberExpr's are the same if every nested MemberExpr refers to the same 18011 // Decl and that the base Expr's are DeclRefExpr's with the same Decl or 18012 // the base Expr's are CXXThisExpr's. 18013 const Expr *LHSBase = LHSExpr; 18014 const Expr *RHSBase = RHSExpr; 18015 const MemberExpr *LHSME = dyn_cast<MemberExpr>(LHSExpr); 18016 const MemberExpr *RHSME = dyn_cast<MemberExpr>(RHSExpr); 18017 if (!LHSME || !RHSME) 18018 return; 18019 18020 while (LHSME && RHSME) { 18021 if (LHSME->getMemberDecl()->getCanonicalDecl() != 18022 RHSME->getMemberDecl()->getCanonicalDecl()) 18023 return; 18024 18025 LHSBase = LHSME->getBase(); 18026 RHSBase = RHSME->getBase(); 18027 LHSME = dyn_cast<MemberExpr>(LHSBase); 18028 RHSME = dyn_cast<MemberExpr>(RHSBase); 18029 } 18030 18031 LHSDeclRef = dyn_cast<DeclRefExpr>(LHSBase); 18032 RHSDeclRef = dyn_cast<DeclRefExpr>(RHSBase); 18033 if (LHSDeclRef && RHSDeclRef) { 18034 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 18035 return; 18036 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 18037 RHSDeclRef->getDecl()->getCanonicalDecl()) 18038 return; 18039 18040 Diag(OpLoc, diag::warn_self_move) 18041 << LHSExpr->getType() << 0 << LHSExpr->getSourceRange() 18042 << RHSExpr->getSourceRange(); 18043 return; 18044 } 18045 18046 if (isa<CXXThisExpr>(LHSBase) && isa<CXXThisExpr>(RHSBase)) 18047 Diag(OpLoc, diag::warn_self_move) 18048 << LHSExpr->getType() << 0 << LHSExpr->getSourceRange() 18049 << RHSExpr->getSourceRange(); 18050 } 18051 18052 //===--- Layout compatibility ----------------------------------------------// 18053 18054 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2); 18055 18056 /// Check if two enumeration types are layout-compatible. 18057 static bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) { 18058 // C++11 [dcl.enum] p8: 18059 // Two enumeration types are layout-compatible if they have the same 18060 // underlying type. 18061 return ED1->isComplete() && ED2->isComplete() && 18062 C.hasSameType(ED1->getIntegerType(), ED2->getIntegerType()); 18063 } 18064 18065 /// Check if two fields are layout-compatible. 18066 static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1, 18067 FieldDecl *Field2) { 18068 if (!isLayoutCompatible(C, Field1->getType(), Field2->getType())) 18069 return false; 18070 18071 if (Field1->isBitField() != Field2->isBitField()) 18072 return false; 18073 18074 if (Field1->isBitField()) { 18075 // Make sure that the bit-fields are the same length. 18076 unsigned Bits1 = Field1->getBitWidthValue(C); 18077 unsigned Bits2 = Field2->getBitWidthValue(C); 18078 18079 if (Bits1 != Bits2) 18080 return false; 18081 } 18082 18083 return true; 18084 } 18085 18086 /// Check if two standard-layout structs are layout-compatible. 18087 /// (C++11 [class.mem] p17) 18088 static bool isLayoutCompatibleStruct(ASTContext &C, RecordDecl *RD1, 18089 RecordDecl *RD2) { 18090 // If both records are C++ classes, check that base classes match. 18091 if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(RD1)) { 18092 // If one of records is a CXXRecordDecl we are in C++ mode, 18093 // thus the other one is a CXXRecordDecl, too. 18094 const CXXRecordDecl *D2CXX = cast<CXXRecordDecl>(RD2); 18095 // Check number of base classes. 18096 if (D1CXX->getNumBases() != D2CXX->getNumBases()) 18097 return false; 18098 18099 // Check the base classes. 18100 for (CXXRecordDecl::base_class_const_iterator 18101 Base1 = D1CXX->bases_begin(), 18102 BaseEnd1 = D1CXX->bases_end(), 18103 Base2 = D2CXX->bases_begin(); 18104 Base1 != BaseEnd1; 18105 ++Base1, ++Base2) { 18106 if (!isLayoutCompatible(C, Base1->getType(), Base2->getType())) 18107 return false; 18108 } 18109 } else if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(RD2)) { 18110 // If only RD2 is a C++ class, it should have zero base classes. 18111 if (D2CXX->getNumBases() > 0) 18112 return false; 18113 } 18114 18115 // Check the fields. 18116 RecordDecl::field_iterator Field2 = RD2->field_begin(), 18117 Field2End = RD2->field_end(), 18118 Field1 = RD1->field_begin(), 18119 Field1End = RD1->field_end(); 18120 for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) { 18121 if (!isLayoutCompatible(C, *Field1, *Field2)) 18122 return false; 18123 } 18124 if (Field1 != Field1End || Field2 != Field2End) 18125 return false; 18126 18127 return true; 18128 } 18129 18130 /// Check if two standard-layout unions are layout-compatible. 18131 /// (C++11 [class.mem] p18) 18132 static bool isLayoutCompatibleUnion(ASTContext &C, RecordDecl *RD1, 18133 RecordDecl *RD2) { 18134 llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields; 18135 for (auto *Field2 : RD2->fields()) 18136 UnmatchedFields.insert(Field2); 18137 18138 for (auto *Field1 : RD1->fields()) { 18139 llvm::SmallPtrSet<FieldDecl *, 8>::iterator 18140 I = UnmatchedFields.begin(), 18141 E = UnmatchedFields.end(); 18142 18143 for ( ; I != E; ++I) { 18144 if (isLayoutCompatible(C, Field1, *I)) { 18145 bool Result = UnmatchedFields.erase(*I); 18146 (void) Result; 18147 assert(Result); 18148 break; 18149 } 18150 } 18151 if (I == E) 18152 return false; 18153 } 18154 18155 return UnmatchedFields.empty(); 18156 } 18157 18158 static bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1, 18159 RecordDecl *RD2) { 18160 if (RD1->isUnion() != RD2->isUnion()) 18161 return false; 18162 18163 if (RD1->isUnion()) 18164 return isLayoutCompatibleUnion(C, RD1, RD2); 18165 else 18166 return isLayoutCompatibleStruct(C, RD1, RD2); 18167 } 18168 18169 /// Check if two types are layout-compatible in C++11 sense. 18170 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) { 18171 if (T1.isNull() || T2.isNull()) 18172 return false; 18173 18174 // C++11 [basic.types] p11: 18175 // If two types T1 and T2 are the same type, then T1 and T2 are 18176 // layout-compatible types. 18177 if (C.hasSameType(T1, T2)) 18178 return true; 18179 18180 T1 = T1.getCanonicalType().getUnqualifiedType(); 18181 T2 = T2.getCanonicalType().getUnqualifiedType(); 18182 18183 const Type::TypeClass TC1 = T1->getTypeClass(); 18184 const Type::TypeClass TC2 = T2->getTypeClass(); 18185 18186 if (TC1 != TC2) 18187 return false; 18188 18189 if (TC1 == Type::Enum) { 18190 return isLayoutCompatible(C, 18191 cast<EnumType>(T1)->getDecl(), 18192 cast<EnumType>(T2)->getDecl()); 18193 } else if (TC1 == Type::Record) { 18194 if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType()) 18195 return false; 18196 18197 return isLayoutCompatible(C, 18198 cast<RecordType>(T1)->getDecl(), 18199 cast<RecordType>(T2)->getDecl()); 18200 } 18201 18202 return false; 18203 } 18204 18205 //===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----// 18206 18207 /// Given a type tag expression find the type tag itself. 18208 /// 18209 /// \param TypeExpr Type tag expression, as it appears in user's code. 18210 /// 18211 /// \param VD Declaration of an identifier that appears in a type tag. 18212 /// 18213 /// \param MagicValue Type tag magic value. 18214 /// 18215 /// \param isConstantEvaluated whether the evalaution should be performed in 18216 18217 /// constant context. 18218 static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx, 18219 const ValueDecl **VD, uint64_t *MagicValue, 18220 bool isConstantEvaluated) { 18221 while(true) { 18222 if (!TypeExpr) 18223 return false; 18224 18225 TypeExpr = TypeExpr->IgnoreParenImpCasts()->IgnoreParenCasts(); 18226 18227 switch (TypeExpr->getStmtClass()) { 18228 case Stmt::UnaryOperatorClass: { 18229 const UnaryOperator *UO = cast<UnaryOperator>(TypeExpr); 18230 if (UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Deref) { 18231 TypeExpr = UO->getSubExpr(); 18232 continue; 18233 } 18234 return false; 18235 } 18236 18237 case Stmt::DeclRefExprClass: { 18238 const DeclRefExpr *DRE = cast<DeclRefExpr>(TypeExpr); 18239 *VD = DRE->getDecl(); 18240 return true; 18241 } 18242 18243 case Stmt::IntegerLiteralClass: { 18244 const IntegerLiteral *IL = cast<IntegerLiteral>(TypeExpr); 18245 llvm::APInt MagicValueAPInt = IL->getValue(); 18246 if (MagicValueAPInt.getActiveBits() <= 64) { 18247 *MagicValue = MagicValueAPInt.getZExtValue(); 18248 return true; 18249 } else 18250 return false; 18251 } 18252 18253 case Stmt::BinaryConditionalOperatorClass: 18254 case Stmt::ConditionalOperatorClass: { 18255 const AbstractConditionalOperator *ACO = 18256 cast<AbstractConditionalOperator>(TypeExpr); 18257 bool Result; 18258 if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx, 18259 isConstantEvaluated)) { 18260 if (Result) 18261 TypeExpr = ACO->getTrueExpr(); 18262 else 18263 TypeExpr = ACO->getFalseExpr(); 18264 continue; 18265 } 18266 return false; 18267 } 18268 18269 case Stmt::BinaryOperatorClass: { 18270 const BinaryOperator *BO = cast<BinaryOperator>(TypeExpr); 18271 if (BO->getOpcode() == BO_Comma) { 18272 TypeExpr = BO->getRHS(); 18273 continue; 18274 } 18275 return false; 18276 } 18277 18278 default: 18279 return false; 18280 } 18281 } 18282 } 18283 18284 /// Retrieve the C type corresponding to type tag TypeExpr. 18285 /// 18286 /// \param TypeExpr Expression that specifies a type tag. 18287 /// 18288 /// \param MagicValues Registered magic values. 18289 /// 18290 /// \param FoundWrongKind Set to true if a type tag was found, but of a wrong 18291 /// kind. 18292 /// 18293 /// \param TypeInfo Information about the corresponding C type. 18294 /// 18295 /// \param isConstantEvaluated whether the evalaution should be performed in 18296 /// constant context. 18297 /// 18298 /// \returns true if the corresponding C type was found. 18299 static bool GetMatchingCType( 18300 const IdentifierInfo *ArgumentKind, const Expr *TypeExpr, 18301 const ASTContext &Ctx, 18302 const llvm::DenseMap<Sema::TypeTagMagicValue, Sema::TypeTagData> 18303 *MagicValues, 18304 bool &FoundWrongKind, Sema::TypeTagData &TypeInfo, 18305 bool isConstantEvaluated) { 18306 FoundWrongKind = false; 18307 18308 // Variable declaration that has type_tag_for_datatype attribute. 18309 const ValueDecl *VD = nullptr; 18310 18311 uint64_t MagicValue; 18312 18313 if (!FindTypeTagExpr(TypeExpr, Ctx, &VD, &MagicValue, isConstantEvaluated)) 18314 return false; 18315 18316 if (VD) { 18317 if (TypeTagForDatatypeAttr *I = VD->getAttr<TypeTagForDatatypeAttr>()) { 18318 if (I->getArgumentKind() != ArgumentKind) { 18319 FoundWrongKind = true; 18320 return false; 18321 } 18322 TypeInfo.Type = I->getMatchingCType(); 18323 TypeInfo.LayoutCompatible = I->getLayoutCompatible(); 18324 TypeInfo.MustBeNull = I->getMustBeNull(); 18325 return true; 18326 } 18327 return false; 18328 } 18329 18330 if (!MagicValues) 18331 return false; 18332 18333 llvm::DenseMap<Sema::TypeTagMagicValue, 18334 Sema::TypeTagData>::const_iterator I = 18335 MagicValues->find(std::make_pair(ArgumentKind, MagicValue)); 18336 if (I == MagicValues->end()) 18337 return false; 18338 18339 TypeInfo = I->second; 18340 return true; 18341 } 18342 18343 void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, 18344 uint64_t MagicValue, QualType Type, 18345 bool LayoutCompatible, 18346 bool MustBeNull) { 18347 if (!TypeTagForDatatypeMagicValues) 18348 TypeTagForDatatypeMagicValues.reset( 18349 new llvm::DenseMap<TypeTagMagicValue, TypeTagData>); 18350 18351 TypeTagMagicValue Magic(ArgumentKind, MagicValue); 18352 (*TypeTagForDatatypeMagicValues)[Magic] = 18353 TypeTagData(Type, LayoutCompatible, MustBeNull); 18354 } 18355 18356 static bool IsSameCharType(QualType T1, QualType T2) { 18357 const BuiltinType *BT1 = T1->getAs<BuiltinType>(); 18358 if (!BT1) 18359 return false; 18360 18361 const BuiltinType *BT2 = T2->getAs<BuiltinType>(); 18362 if (!BT2) 18363 return false; 18364 18365 BuiltinType::Kind T1Kind = BT1->getKind(); 18366 BuiltinType::Kind T2Kind = BT2->getKind(); 18367 18368 return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) || 18369 (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) || 18370 (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) || 18371 (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar); 18372 } 18373 18374 void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, 18375 const ArrayRef<const Expr *> ExprArgs, 18376 SourceLocation CallSiteLoc) { 18377 const IdentifierInfo *ArgumentKind = Attr->getArgumentKind(); 18378 bool IsPointerAttr = Attr->getIsPointer(); 18379 18380 // Retrieve the argument representing the 'type_tag'. 18381 unsigned TypeTagIdxAST = Attr->getTypeTagIdx().getASTIndex(); 18382 if (TypeTagIdxAST >= ExprArgs.size()) { 18383 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 18384 << 0 << Attr->getTypeTagIdx().getSourceIndex(); 18385 return; 18386 } 18387 const Expr *TypeTagExpr = ExprArgs[TypeTagIdxAST]; 18388 bool FoundWrongKind; 18389 TypeTagData TypeInfo; 18390 if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context, 18391 TypeTagForDatatypeMagicValues.get(), FoundWrongKind, 18392 TypeInfo, isConstantEvaluated())) { 18393 if (FoundWrongKind) 18394 Diag(TypeTagExpr->getExprLoc(), 18395 diag::warn_type_tag_for_datatype_wrong_kind) 18396 << TypeTagExpr->getSourceRange(); 18397 return; 18398 } 18399 18400 // Retrieve the argument representing the 'arg_idx'. 18401 unsigned ArgumentIdxAST = Attr->getArgumentIdx().getASTIndex(); 18402 if (ArgumentIdxAST >= ExprArgs.size()) { 18403 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 18404 << 1 << Attr->getArgumentIdx().getSourceIndex(); 18405 return; 18406 } 18407 const Expr *ArgumentExpr = ExprArgs[ArgumentIdxAST]; 18408 if (IsPointerAttr) { 18409 // Skip implicit cast of pointer to `void *' (as a function argument). 18410 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgumentExpr)) 18411 if (ICE->getType()->isVoidPointerType() && 18412 ICE->getCastKind() == CK_BitCast) 18413 ArgumentExpr = ICE->getSubExpr(); 18414 } 18415 QualType ArgumentType = ArgumentExpr->getType(); 18416 18417 // Passing a `void*' pointer shouldn't trigger a warning. 18418 if (IsPointerAttr && ArgumentType->isVoidPointerType()) 18419 return; 18420 18421 if (TypeInfo.MustBeNull) { 18422 // Type tag with matching void type requires a null pointer. 18423 if (!ArgumentExpr->isNullPointerConstant(Context, 18424 Expr::NPC_ValueDependentIsNotNull)) { 18425 Diag(ArgumentExpr->getExprLoc(), 18426 diag::warn_type_safety_null_pointer_required) 18427 << ArgumentKind->getName() 18428 << ArgumentExpr->getSourceRange() 18429 << TypeTagExpr->getSourceRange(); 18430 } 18431 return; 18432 } 18433 18434 QualType RequiredType = TypeInfo.Type; 18435 if (IsPointerAttr) 18436 RequiredType = Context.getPointerType(RequiredType); 18437 18438 bool mismatch = false; 18439 if (!TypeInfo.LayoutCompatible) { 18440 mismatch = !Context.hasSameType(ArgumentType, RequiredType); 18441 18442 // C++11 [basic.fundamental] p1: 18443 // Plain char, signed char, and unsigned char are three distinct types. 18444 // 18445 // But we treat plain `char' as equivalent to `signed char' or `unsigned 18446 // char' depending on the current char signedness mode. 18447 if (mismatch) 18448 if ((IsPointerAttr && IsSameCharType(ArgumentType->getPointeeType(), 18449 RequiredType->getPointeeType())) || 18450 (!IsPointerAttr && IsSameCharType(ArgumentType, RequiredType))) 18451 mismatch = false; 18452 } else 18453 if (IsPointerAttr) 18454 mismatch = !isLayoutCompatible(Context, 18455 ArgumentType->getPointeeType(), 18456 RequiredType->getPointeeType()); 18457 else 18458 mismatch = !isLayoutCompatible(Context, ArgumentType, RequiredType); 18459 18460 if (mismatch) 18461 Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch) 18462 << ArgumentType << ArgumentKind 18463 << TypeInfo.LayoutCompatible << RequiredType 18464 << ArgumentExpr->getSourceRange() 18465 << TypeTagExpr->getSourceRange(); 18466 } 18467 18468 void Sema::AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, 18469 CharUnits Alignment) { 18470 MisalignedMembers.emplace_back(E, RD, MD, Alignment); 18471 } 18472 18473 void Sema::DiagnoseMisalignedMembers() { 18474 for (MisalignedMember &m : MisalignedMembers) { 18475 const NamedDecl *ND = m.RD; 18476 if (ND->getName().empty()) { 18477 if (const TypedefNameDecl *TD = m.RD->getTypedefNameForAnonDecl()) 18478 ND = TD; 18479 } 18480 Diag(m.E->getBeginLoc(), diag::warn_taking_address_of_packed_member) 18481 << m.MD << ND << m.E->getSourceRange(); 18482 } 18483 MisalignedMembers.clear(); 18484 } 18485 18486 void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) { 18487 E = E->IgnoreParens(); 18488 if (!T->isPointerType() && !T->isIntegerType() && !T->isDependentType()) 18489 return; 18490 if (isa<UnaryOperator>(E) && 18491 cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf) { 18492 auto *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens(); 18493 if (isa<MemberExpr>(Op)) { 18494 auto *MA = llvm::find(MisalignedMembers, MisalignedMember(Op)); 18495 if (MA != MisalignedMembers.end() && 18496 (T->isDependentType() || T->isIntegerType() || 18497 (T->isPointerType() && (T->getPointeeType()->isIncompleteType() || 18498 Context.getTypeAlignInChars( 18499 T->getPointeeType()) <= MA->Alignment)))) 18500 MisalignedMembers.erase(MA); 18501 } 18502 } 18503 } 18504 18505 void Sema::RefersToMemberWithReducedAlignment( 18506 Expr *E, 18507 llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> 18508 Action) { 18509 const auto *ME = dyn_cast<MemberExpr>(E); 18510 if (!ME) 18511 return; 18512 18513 // No need to check expressions with an __unaligned-qualified type. 18514 if (E->getType().getQualifiers().hasUnaligned()) 18515 return; 18516 18517 // For a chain of MemberExpr like "a.b.c.d" this list 18518 // will keep FieldDecl's like [d, c, b]. 18519 SmallVector<FieldDecl *, 4> ReverseMemberChain; 18520 const MemberExpr *TopME = nullptr; 18521 bool AnyIsPacked = false; 18522 do { 18523 QualType BaseType = ME->getBase()->getType(); 18524 if (BaseType->isDependentType()) 18525 return; 18526 if (ME->isArrow()) 18527 BaseType = BaseType->getPointeeType(); 18528 RecordDecl *RD = BaseType->castAs<RecordType>()->getDecl(); 18529 if (RD->isInvalidDecl()) 18530 return; 18531 18532 ValueDecl *MD = ME->getMemberDecl(); 18533 auto *FD = dyn_cast<FieldDecl>(MD); 18534 // We do not care about non-data members. 18535 if (!FD || FD->isInvalidDecl()) 18536 return; 18537 18538 AnyIsPacked = 18539 AnyIsPacked || (RD->hasAttr<PackedAttr>() || MD->hasAttr<PackedAttr>()); 18540 ReverseMemberChain.push_back(FD); 18541 18542 TopME = ME; 18543 ME = dyn_cast<MemberExpr>(ME->getBase()->IgnoreParens()); 18544 } while (ME); 18545 assert(TopME && "We did not compute a topmost MemberExpr!"); 18546 18547 // Not the scope of this diagnostic. 18548 if (!AnyIsPacked) 18549 return; 18550 18551 const Expr *TopBase = TopME->getBase()->IgnoreParenImpCasts(); 18552 const auto *DRE = dyn_cast<DeclRefExpr>(TopBase); 18553 // TODO: The innermost base of the member expression may be too complicated. 18554 // For now, just disregard these cases. This is left for future 18555 // improvement. 18556 if (!DRE && !isa<CXXThisExpr>(TopBase)) 18557 return; 18558 18559 // Alignment expected by the whole expression. 18560 CharUnits ExpectedAlignment = Context.getTypeAlignInChars(E->getType()); 18561 18562 // No need to do anything else with this case. 18563 if (ExpectedAlignment.isOne()) 18564 return; 18565 18566 // Synthesize offset of the whole access. 18567 CharUnits Offset; 18568 for (const FieldDecl *FD : llvm::reverse(ReverseMemberChain)) 18569 Offset += Context.toCharUnitsFromBits(Context.getFieldOffset(FD)); 18570 18571 // Compute the CompleteObjectAlignment as the alignment of the whole chain. 18572 CharUnits CompleteObjectAlignment = Context.getTypeAlignInChars( 18573 ReverseMemberChain.back()->getParent()->getTypeForDecl()); 18574 18575 // The base expression of the innermost MemberExpr may give 18576 // stronger guarantees than the class containing the member. 18577 if (DRE && !TopME->isArrow()) { 18578 const ValueDecl *VD = DRE->getDecl(); 18579 if (!VD->getType()->isReferenceType()) 18580 CompleteObjectAlignment = 18581 std::max(CompleteObjectAlignment, Context.getDeclAlign(VD)); 18582 } 18583 18584 // Check if the synthesized offset fulfills the alignment. 18585 if (Offset % ExpectedAlignment != 0 || 18586 // It may fulfill the offset it but the effective alignment may still be 18587 // lower than the expected expression alignment. 18588 CompleteObjectAlignment < ExpectedAlignment) { 18589 // If this happens, we want to determine a sensible culprit of this. 18590 // Intuitively, watching the chain of member expressions from right to 18591 // left, we start with the required alignment (as required by the field 18592 // type) but some packed attribute in that chain has reduced the alignment. 18593 // It may happen that another packed structure increases it again. But if 18594 // we are here such increase has not been enough. So pointing the first 18595 // FieldDecl that either is packed or else its RecordDecl is, 18596 // seems reasonable. 18597 FieldDecl *FD = nullptr; 18598 CharUnits Alignment; 18599 for (FieldDecl *FDI : ReverseMemberChain) { 18600 if (FDI->hasAttr<PackedAttr>() || 18601 FDI->getParent()->hasAttr<PackedAttr>()) { 18602 FD = FDI; 18603 Alignment = std::min( 18604 Context.getTypeAlignInChars(FD->getType()), 18605 Context.getTypeAlignInChars(FD->getParent()->getTypeForDecl())); 18606 break; 18607 } 18608 } 18609 assert(FD && "We did not find a packed FieldDecl!"); 18610 Action(E, FD->getParent(), FD, Alignment); 18611 } 18612 } 18613 18614 void Sema::CheckAddressOfPackedMember(Expr *rhs) { 18615 using namespace std::placeholders; 18616 18617 RefersToMemberWithReducedAlignment( 18618 rhs, std::bind(&Sema::AddPotentialMisalignedMembers, std::ref(*this), _1, 18619 _2, _3, _4)); 18620 } 18621 18622 bool Sema::PrepareBuiltinElementwiseMathOneArgCall(CallExpr *TheCall) { 18623 if (checkArgCount(*this, TheCall, 1)) 18624 return true; 18625 18626 ExprResult A = UsualUnaryConversions(TheCall->getArg(0)); 18627 if (A.isInvalid()) 18628 return true; 18629 18630 TheCall->setArg(0, A.get()); 18631 QualType TyA = A.get()->getType(); 18632 18633 if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA)) 18634 return true; 18635 18636 TheCall->setType(TyA); 18637 return false; 18638 } 18639 18640 bool Sema::SemaBuiltinElementwiseMath(CallExpr *TheCall) { 18641 if (checkArgCount(*this, TheCall, 2)) 18642 return true; 18643 18644 ExprResult A = TheCall->getArg(0); 18645 ExprResult B = TheCall->getArg(1); 18646 // Do standard promotions between the two arguments, returning their common 18647 // type. 18648 QualType Res = 18649 UsualArithmeticConversions(A, B, TheCall->getExprLoc(), ACK_Comparison); 18650 if (A.isInvalid() || B.isInvalid()) 18651 return true; 18652 18653 QualType TyA = A.get()->getType(); 18654 QualType TyB = B.get()->getType(); 18655 18656 if (Res.isNull() || TyA.getCanonicalType() != TyB.getCanonicalType()) 18657 return Diag(A.get()->getBeginLoc(), 18658 diag::err_typecheck_call_different_arg_types) 18659 << TyA << TyB; 18660 18661 if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA)) 18662 return true; 18663 18664 TheCall->setArg(0, A.get()); 18665 TheCall->setArg(1, B.get()); 18666 TheCall->setType(Res); 18667 return false; 18668 } 18669 18670 bool Sema::SemaBuiltinElementwiseTernaryMath(CallExpr *TheCall) { 18671 if (checkArgCount(*this, TheCall, 3)) 18672 return true; 18673 18674 Expr *Args[3]; 18675 for (int I = 0; I < 3; ++I) { 18676 ExprResult Converted = UsualUnaryConversions(TheCall->getArg(I)); 18677 if (Converted.isInvalid()) 18678 return true; 18679 Args[I] = Converted.get(); 18680 } 18681 18682 int ArgOrdinal = 1; 18683 for (Expr *Arg : Args) { 18684 if (checkFPMathBuiltinElementType(*this, Arg->getBeginLoc(), Arg->getType(), 18685 ArgOrdinal++)) 18686 return true; 18687 } 18688 18689 for (int I = 1; I < 3; ++I) { 18690 if (Args[0]->getType().getCanonicalType() != 18691 Args[I]->getType().getCanonicalType()) { 18692 return Diag(Args[0]->getBeginLoc(), 18693 diag::err_typecheck_call_different_arg_types) 18694 << Args[0]->getType() << Args[I]->getType(); 18695 } 18696 18697 TheCall->setArg(I, Args[I]); 18698 } 18699 18700 TheCall->setType(Args[0]->getType()); 18701 return false; 18702 } 18703 18704 bool Sema::PrepareBuiltinReduceMathOneArgCall(CallExpr *TheCall) { 18705 if (checkArgCount(*this, TheCall, 1)) 18706 return true; 18707 18708 ExprResult A = UsualUnaryConversions(TheCall->getArg(0)); 18709 if (A.isInvalid()) 18710 return true; 18711 18712 TheCall->setArg(0, A.get()); 18713 return false; 18714 } 18715 18716 bool Sema::SemaBuiltinNonDeterministicValue(CallExpr *TheCall) { 18717 if (checkArgCount(*this, TheCall, 1)) 18718 return true; 18719 18720 ExprResult Arg = TheCall->getArg(0); 18721 QualType TyArg = Arg.get()->getType(); 18722 18723 if (!TyArg->isBuiltinType() && !TyArg->isVectorType()) 18724 return Diag(TheCall->getArg(0)->getBeginLoc(), diag::err_builtin_invalid_arg_type) 18725 << 1 << /*vector, integer or floating point ty*/ 0 << TyArg; 18726 18727 TheCall->setType(TyArg); 18728 return false; 18729 } 18730 18731 ExprResult Sema::SemaBuiltinMatrixTranspose(CallExpr *TheCall, 18732 ExprResult CallResult) { 18733 if (checkArgCount(*this, TheCall, 1)) 18734 return ExprError(); 18735 18736 ExprResult MatrixArg = DefaultLvalueConversion(TheCall->getArg(0)); 18737 if (MatrixArg.isInvalid()) 18738 return MatrixArg; 18739 Expr *Matrix = MatrixArg.get(); 18740 18741 auto *MType = Matrix->getType()->getAs<ConstantMatrixType>(); 18742 if (!MType) { 18743 Diag(Matrix->getBeginLoc(), diag::err_builtin_invalid_arg_type) 18744 << 1 << /* matrix ty*/ 1 << Matrix->getType(); 18745 return ExprError(); 18746 } 18747 18748 // Create returned matrix type by swapping rows and columns of the argument 18749 // matrix type. 18750 QualType ResultType = Context.getConstantMatrixType( 18751 MType->getElementType(), MType->getNumColumns(), MType->getNumRows()); 18752 18753 // Change the return type to the type of the returned matrix. 18754 TheCall->setType(ResultType); 18755 18756 // Update call argument to use the possibly converted matrix argument. 18757 TheCall->setArg(0, Matrix); 18758 return CallResult; 18759 } 18760 18761 // Get and verify the matrix dimensions. 18762 static std::optional<unsigned> 18763 getAndVerifyMatrixDimension(Expr *Expr, StringRef Name, Sema &S) { 18764 SourceLocation ErrorPos; 18765 std::optional<llvm::APSInt> Value = 18766 Expr->getIntegerConstantExpr(S.Context, &ErrorPos); 18767 if (!Value) { 18768 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_scalar_unsigned_arg) 18769 << Name; 18770 return {}; 18771 } 18772 uint64_t Dim = Value->getZExtValue(); 18773 if (!ConstantMatrixType::isDimensionValid(Dim)) { 18774 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_invalid_dimension) 18775 << Name << ConstantMatrixType::getMaxElementsPerDimension(); 18776 return {}; 18777 } 18778 return Dim; 18779 } 18780 18781 ExprResult Sema::SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, 18782 ExprResult CallResult) { 18783 if (!getLangOpts().MatrixTypes) { 18784 Diag(TheCall->getBeginLoc(), diag::err_builtin_matrix_disabled); 18785 return ExprError(); 18786 } 18787 18788 if (checkArgCount(*this, TheCall, 4)) 18789 return ExprError(); 18790 18791 unsigned PtrArgIdx = 0; 18792 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 18793 Expr *RowsExpr = TheCall->getArg(1); 18794 Expr *ColumnsExpr = TheCall->getArg(2); 18795 Expr *StrideExpr = TheCall->getArg(3); 18796 18797 bool ArgError = false; 18798 18799 // Check pointer argument. 18800 { 18801 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 18802 if (PtrConv.isInvalid()) 18803 return PtrConv; 18804 PtrExpr = PtrConv.get(); 18805 TheCall->setArg(0, PtrExpr); 18806 if (PtrExpr->isTypeDependent()) { 18807 TheCall->setType(Context.DependentTy); 18808 return TheCall; 18809 } 18810 } 18811 18812 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 18813 QualType ElementTy; 18814 if (!PtrTy) { 18815 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 18816 << PtrArgIdx + 1 << /*pointer to element ty*/ 2 << PtrExpr->getType(); 18817 ArgError = true; 18818 } else { 18819 ElementTy = PtrTy->getPointeeType().getUnqualifiedType(); 18820 18821 if (!ConstantMatrixType::isValidElementType(ElementTy)) { 18822 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 18823 << PtrArgIdx + 1 << /* pointer to element ty*/ 2 18824 << PtrExpr->getType(); 18825 ArgError = true; 18826 } 18827 } 18828 18829 // Apply default Lvalue conversions and convert the expression to size_t. 18830 auto ApplyArgumentConversions = [this](Expr *E) { 18831 ExprResult Conv = DefaultLvalueConversion(E); 18832 if (Conv.isInvalid()) 18833 return Conv; 18834 18835 return tryConvertExprToType(Conv.get(), Context.getSizeType()); 18836 }; 18837 18838 // Apply conversion to row and column expressions. 18839 ExprResult RowsConv = ApplyArgumentConversions(RowsExpr); 18840 if (!RowsConv.isInvalid()) { 18841 RowsExpr = RowsConv.get(); 18842 TheCall->setArg(1, RowsExpr); 18843 } else 18844 RowsExpr = nullptr; 18845 18846 ExprResult ColumnsConv = ApplyArgumentConversions(ColumnsExpr); 18847 if (!ColumnsConv.isInvalid()) { 18848 ColumnsExpr = ColumnsConv.get(); 18849 TheCall->setArg(2, ColumnsExpr); 18850 } else 18851 ColumnsExpr = nullptr; 18852 18853 // If any part of the result matrix type is still pending, just use 18854 // Context.DependentTy, until all parts are resolved. 18855 if ((RowsExpr && RowsExpr->isTypeDependent()) || 18856 (ColumnsExpr && ColumnsExpr->isTypeDependent())) { 18857 TheCall->setType(Context.DependentTy); 18858 return CallResult; 18859 } 18860 18861 // Check row and column dimensions. 18862 std::optional<unsigned> MaybeRows; 18863 if (RowsExpr) 18864 MaybeRows = getAndVerifyMatrixDimension(RowsExpr, "row", *this); 18865 18866 std::optional<unsigned> MaybeColumns; 18867 if (ColumnsExpr) 18868 MaybeColumns = getAndVerifyMatrixDimension(ColumnsExpr, "column", *this); 18869 18870 // Check stride argument. 18871 ExprResult StrideConv = ApplyArgumentConversions(StrideExpr); 18872 if (StrideConv.isInvalid()) 18873 return ExprError(); 18874 StrideExpr = StrideConv.get(); 18875 TheCall->setArg(3, StrideExpr); 18876 18877 if (MaybeRows) { 18878 if (std::optional<llvm::APSInt> Value = 18879 StrideExpr->getIntegerConstantExpr(Context)) { 18880 uint64_t Stride = Value->getZExtValue(); 18881 if (Stride < *MaybeRows) { 18882 Diag(StrideExpr->getBeginLoc(), 18883 diag::err_builtin_matrix_stride_too_small); 18884 ArgError = true; 18885 } 18886 } 18887 } 18888 18889 if (ArgError || !MaybeRows || !MaybeColumns) 18890 return ExprError(); 18891 18892 TheCall->setType( 18893 Context.getConstantMatrixType(ElementTy, *MaybeRows, *MaybeColumns)); 18894 return CallResult; 18895 } 18896 18897 ExprResult Sema::SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, 18898 ExprResult CallResult) { 18899 if (checkArgCount(*this, TheCall, 3)) 18900 return ExprError(); 18901 18902 unsigned PtrArgIdx = 1; 18903 Expr *MatrixExpr = TheCall->getArg(0); 18904 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 18905 Expr *StrideExpr = TheCall->getArg(2); 18906 18907 bool ArgError = false; 18908 18909 { 18910 ExprResult MatrixConv = DefaultLvalueConversion(MatrixExpr); 18911 if (MatrixConv.isInvalid()) 18912 return MatrixConv; 18913 MatrixExpr = MatrixConv.get(); 18914 TheCall->setArg(0, MatrixExpr); 18915 } 18916 if (MatrixExpr->isTypeDependent()) { 18917 TheCall->setType(Context.DependentTy); 18918 return TheCall; 18919 } 18920 18921 auto *MatrixTy = MatrixExpr->getType()->getAs<ConstantMatrixType>(); 18922 if (!MatrixTy) { 18923 Diag(MatrixExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 18924 << 1 << /*matrix ty */ 1 << MatrixExpr->getType(); 18925 ArgError = true; 18926 } 18927 18928 { 18929 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 18930 if (PtrConv.isInvalid()) 18931 return PtrConv; 18932 PtrExpr = PtrConv.get(); 18933 TheCall->setArg(1, PtrExpr); 18934 if (PtrExpr->isTypeDependent()) { 18935 TheCall->setType(Context.DependentTy); 18936 return TheCall; 18937 } 18938 } 18939 18940 // Check pointer argument. 18941 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 18942 if (!PtrTy) { 18943 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 18944 << PtrArgIdx + 1 << /*pointer to element ty*/ 2 << PtrExpr->getType(); 18945 ArgError = true; 18946 } else { 18947 QualType ElementTy = PtrTy->getPointeeType(); 18948 if (ElementTy.isConstQualified()) { 18949 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_store_to_const); 18950 ArgError = true; 18951 } 18952 ElementTy = ElementTy.getUnqualifiedType().getCanonicalType(); 18953 if (MatrixTy && 18954 !Context.hasSameType(ElementTy, MatrixTy->getElementType())) { 18955 Diag(PtrExpr->getBeginLoc(), 18956 diag::err_builtin_matrix_pointer_arg_mismatch) 18957 << ElementTy << MatrixTy->getElementType(); 18958 ArgError = true; 18959 } 18960 } 18961 18962 // Apply default Lvalue conversions and convert the stride expression to 18963 // size_t. 18964 { 18965 ExprResult StrideConv = DefaultLvalueConversion(StrideExpr); 18966 if (StrideConv.isInvalid()) 18967 return StrideConv; 18968 18969 StrideConv = tryConvertExprToType(StrideConv.get(), Context.getSizeType()); 18970 if (StrideConv.isInvalid()) 18971 return StrideConv; 18972 StrideExpr = StrideConv.get(); 18973 TheCall->setArg(2, StrideExpr); 18974 } 18975 18976 // Check stride argument. 18977 if (MatrixTy) { 18978 if (std::optional<llvm::APSInt> Value = 18979 StrideExpr->getIntegerConstantExpr(Context)) { 18980 uint64_t Stride = Value->getZExtValue(); 18981 if (Stride < MatrixTy->getNumRows()) { 18982 Diag(StrideExpr->getBeginLoc(), 18983 diag::err_builtin_matrix_stride_too_small); 18984 ArgError = true; 18985 } 18986 } 18987 } 18988 18989 if (ArgError) 18990 return ExprError(); 18991 18992 return CallResult; 18993 } 18994 18995 /// Checks the argument at the given index is a WebAssembly table and if it 18996 /// is, sets ElTy to the element type. 18997 static bool CheckWasmBuiltinArgIsTable(Sema &S, CallExpr *E, unsigned ArgIndex, 18998 QualType &ElTy) { 18999 Expr *ArgExpr = E->getArg(ArgIndex); 19000 const auto *ATy = dyn_cast<ArrayType>(ArgExpr->getType()); 19001 if (!ATy || !ATy->getElementType().isWebAssemblyReferenceType()) { 19002 return S.Diag(ArgExpr->getBeginLoc(), 19003 diag::err_wasm_builtin_arg_must_be_table_type) 19004 << ArgIndex + 1 << ArgExpr->getSourceRange(); 19005 } 19006 ElTy = ATy->getElementType(); 19007 return false; 19008 } 19009 19010 /// Checks the argument at the given index is an integer. 19011 static bool CheckWasmBuiltinArgIsInteger(Sema &S, CallExpr *E, 19012 unsigned ArgIndex) { 19013 Expr *ArgExpr = E->getArg(ArgIndex); 19014 if (!ArgExpr->getType()->isIntegerType()) { 19015 return S.Diag(ArgExpr->getBeginLoc(), 19016 diag::err_wasm_builtin_arg_must_be_integer_type) 19017 << ArgIndex + 1 << ArgExpr->getSourceRange(); 19018 } 19019 return false; 19020 } 19021 19022 /// Check that the first argument is a WebAssembly table, and the second 19023 /// is an index to use as index into the table. 19024 bool Sema::BuiltinWasmTableGet(CallExpr *TheCall) { 19025 if (checkArgCount(*this, TheCall, 2)) 19026 return true; 19027 19028 QualType ElTy; 19029 if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy)) 19030 return true; 19031 19032 if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 1)) 19033 return true; 19034 19035 // If all is well, we set the type of TheCall to be the type of the 19036 // element of the table. 19037 // i.e. a table.get on an externref table has type externref, 19038 // or whatever the type of the table element is. 19039 TheCall->setType(ElTy); 19040 19041 return false; 19042 } 19043 19044 /// Check that the first argumnet is a WebAssembly table, the second is 19045 /// an index to use as index into the table and the third is the reference 19046 /// type to set into the table. 19047 bool Sema::BuiltinWasmTableSet(CallExpr *TheCall) { 19048 if (checkArgCount(*this, TheCall, 3)) 19049 return true; 19050 19051 QualType ElTy; 19052 if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy)) 19053 return true; 19054 19055 if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 1)) 19056 return true; 19057 19058 if (!Context.hasSameType(ElTy, TheCall->getArg(2)->getType())) 19059 return true; 19060 19061 return false; 19062 } 19063 19064 /// Check that the argument is a WebAssembly table. 19065 bool Sema::BuiltinWasmTableSize(CallExpr *TheCall) { 19066 if (checkArgCount(*this, TheCall, 1)) 19067 return true; 19068 19069 QualType ElTy; 19070 if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy)) 19071 return true; 19072 19073 return false; 19074 } 19075 19076 /// Check that the first argument is a WebAssembly table, the second is the 19077 /// value to use for new elements (of a type matching the table type), the 19078 /// third value is an integer. 19079 bool Sema::BuiltinWasmTableGrow(CallExpr *TheCall) { 19080 if (checkArgCount(*this, TheCall, 3)) 19081 return true; 19082 19083 QualType ElTy; 19084 if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy)) 19085 return true; 19086 19087 Expr *NewElemArg = TheCall->getArg(1); 19088 if (!Context.hasSameType(ElTy, NewElemArg->getType())) { 19089 return Diag(NewElemArg->getBeginLoc(), 19090 diag::err_wasm_builtin_arg_must_match_table_element_type) 19091 << 2 << 1 << NewElemArg->getSourceRange(); 19092 } 19093 19094 if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 2)) 19095 return true; 19096 19097 return false; 19098 } 19099 19100 /// Check that the first argument is a WebAssembly table, the second is an 19101 /// integer, the third is the value to use to fill the table (of a type 19102 /// matching the table type), and the fourth is an integer. 19103 bool Sema::BuiltinWasmTableFill(CallExpr *TheCall) { 19104 if (checkArgCount(*this, TheCall, 4)) 19105 return true; 19106 19107 QualType ElTy; 19108 if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy)) 19109 return true; 19110 19111 if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 1)) 19112 return true; 19113 19114 Expr *NewElemArg = TheCall->getArg(2); 19115 if (!Context.hasSameType(ElTy, NewElemArg->getType())) { 19116 return Diag(NewElemArg->getBeginLoc(), 19117 diag::err_wasm_builtin_arg_must_match_table_element_type) 19118 << 3 << 1 << NewElemArg->getSourceRange(); 19119 } 19120 19121 if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 3)) 19122 return true; 19123 19124 return false; 19125 } 19126 19127 /// Check that the first argument is a WebAssembly table, the second is also a 19128 /// WebAssembly table (of the same element type), and the third to fifth 19129 /// arguments are integers. 19130 bool Sema::BuiltinWasmTableCopy(CallExpr *TheCall) { 19131 if (checkArgCount(*this, TheCall, 5)) 19132 return true; 19133 19134 QualType XElTy; 19135 if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, XElTy)) 19136 return true; 19137 19138 QualType YElTy; 19139 if (CheckWasmBuiltinArgIsTable(*this, TheCall, 1, YElTy)) 19140 return true; 19141 19142 Expr *TableYArg = TheCall->getArg(1); 19143 if (!Context.hasSameType(XElTy, YElTy)) { 19144 return Diag(TableYArg->getBeginLoc(), 19145 diag::err_wasm_builtin_arg_must_match_table_element_type) 19146 << 2 << 1 << TableYArg->getSourceRange(); 19147 } 19148 19149 for (int I = 2; I <= 4; I++) { 19150 if (CheckWasmBuiltinArgIsInteger(*this, TheCall, I)) 19151 return true; 19152 } 19153 19154 return false; 19155 } 19156 19157 /// \brief Enforce the bounds of a TCB 19158 /// CheckTCBEnforcement - Enforces that every function in a named TCB only 19159 /// directly calls other functions in the same TCB as marked by the enforce_tcb 19160 /// and enforce_tcb_leaf attributes. 19161 void Sema::CheckTCBEnforcement(const SourceLocation CallExprLoc, 19162 const NamedDecl *Callee) { 19163 // This warning does not make sense in code that has no runtime behavior. 19164 if (isUnevaluatedContext()) 19165 return; 19166 19167 const NamedDecl *Caller = getCurFunctionOrMethodDecl(); 19168 19169 if (!Caller || !Caller->hasAttr<EnforceTCBAttr>()) 19170 return; 19171 19172 // Search through the enforce_tcb and enforce_tcb_leaf attributes to find 19173 // all TCBs the callee is a part of. 19174 llvm::StringSet<> CalleeTCBs; 19175 for (const auto *A : Callee->specific_attrs<EnforceTCBAttr>()) 19176 CalleeTCBs.insert(A->getTCBName()); 19177 for (const auto *A : Callee->specific_attrs<EnforceTCBLeafAttr>()) 19178 CalleeTCBs.insert(A->getTCBName()); 19179 19180 // Go through the TCBs the caller is a part of and emit warnings if Caller 19181 // is in a TCB that the Callee is not. 19182 for (const auto *A : Caller->specific_attrs<EnforceTCBAttr>()) { 19183 StringRef CallerTCB = A->getTCBName(); 19184 if (CalleeTCBs.count(CallerTCB) == 0) { 19185 this->Diag(CallExprLoc, diag::warn_tcb_enforcement_violation) 19186 << Callee << CallerTCB; 19187 } 19188 } 19189 } 19190