1 //===- SemaChecking.cpp - Extra Semantic Checking -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements extra semantic analysis beyond what is enforced 10 // by the C type system. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "clang/AST/APValue.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/Attr.h" 17 #include "clang/AST/AttrIterator.h" 18 #include "clang/AST/CharUnits.h" 19 #include "clang/AST/Decl.h" 20 #include "clang/AST/DeclBase.h" 21 #include "clang/AST/DeclCXX.h" 22 #include "clang/AST/DeclObjC.h" 23 #include "clang/AST/DeclarationName.h" 24 #include "clang/AST/EvaluatedExprVisitor.h" 25 #include "clang/AST/Expr.h" 26 #include "clang/AST/ExprCXX.h" 27 #include "clang/AST/ExprObjC.h" 28 #include "clang/AST/ExprOpenMP.h" 29 #include "clang/AST/FormatString.h" 30 #include "clang/AST/NSAPI.h" 31 #include "clang/AST/NonTrivialTypeVisitor.h" 32 #include "clang/AST/OperationKinds.h" 33 #include "clang/AST/RecordLayout.h" 34 #include "clang/AST/Stmt.h" 35 #include "clang/AST/TemplateBase.h" 36 #include "clang/AST/Type.h" 37 #include "clang/AST/TypeLoc.h" 38 #include "clang/AST/UnresolvedSet.h" 39 #include "clang/Basic/AddressSpaces.h" 40 #include "clang/Basic/CharInfo.h" 41 #include "clang/Basic/Diagnostic.h" 42 #include "clang/Basic/IdentifierTable.h" 43 #include "clang/Basic/LLVM.h" 44 #include "clang/Basic/LangOptions.h" 45 #include "clang/Basic/OpenCLOptions.h" 46 #include "clang/Basic/OperatorKinds.h" 47 #include "clang/Basic/PartialDiagnostic.h" 48 #include "clang/Basic/SourceLocation.h" 49 #include "clang/Basic/SourceManager.h" 50 #include "clang/Basic/Specifiers.h" 51 #include "clang/Basic/SyncScope.h" 52 #include "clang/Basic/TargetBuiltins.h" 53 #include "clang/Basic/TargetCXXABI.h" 54 #include "clang/Basic/TargetInfo.h" 55 #include "clang/Basic/TypeTraits.h" 56 #include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering. 57 #include "clang/Sema/Initialization.h" 58 #include "clang/Sema/Lookup.h" 59 #include "clang/Sema/Ownership.h" 60 #include "clang/Sema/Scope.h" 61 #include "clang/Sema/ScopeInfo.h" 62 #include "clang/Sema/Sema.h" 63 #include "clang/Sema/SemaInternal.h" 64 #include "llvm/ADT/APFloat.h" 65 #include "llvm/ADT/APInt.h" 66 #include "llvm/ADT/APSInt.h" 67 #include "llvm/ADT/ArrayRef.h" 68 #include "llvm/ADT/DenseMap.h" 69 #include "llvm/ADT/FoldingSet.h" 70 #include "llvm/ADT/STLExtras.h" 71 #include "llvm/ADT/SmallBitVector.h" 72 #include "llvm/ADT/SmallPtrSet.h" 73 #include "llvm/ADT/SmallString.h" 74 #include "llvm/ADT/SmallVector.h" 75 #include "llvm/ADT/StringRef.h" 76 #include "llvm/ADT/StringSet.h" 77 #include "llvm/ADT/StringSwitch.h" 78 #include "llvm/ADT/Triple.h" 79 #include "llvm/Support/AtomicOrdering.h" 80 #include "llvm/Support/Casting.h" 81 #include "llvm/Support/Compiler.h" 82 #include "llvm/Support/ConvertUTF.h" 83 #include "llvm/Support/ErrorHandling.h" 84 #include "llvm/Support/Format.h" 85 #include "llvm/Support/Locale.h" 86 #include "llvm/Support/MathExtras.h" 87 #include "llvm/Support/SaveAndRestore.h" 88 #include "llvm/Support/raw_ostream.h" 89 #include <algorithm> 90 #include <bitset> 91 #include <cassert> 92 #include <cctype> 93 #include <cstddef> 94 #include <cstdint> 95 #include <functional> 96 #include <limits> 97 #include <optional> 98 #include <string> 99 #include <tuple> 100 #include <utility> 101 102 using namespace clang; 103 using namespace sema; 104 105 SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL, 106 unsigned ByteNo) const { 107 return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts, 108 Context.getTargetInfo()); 109 } 110 111 static constexpr unsigned short combineFAPK(Sema::FormatArgumentPassingKind A, 112 Sema::FormatArgumentPassingKind B) { 113 return (A << 8) | B; 114 } 115 116 /// Checks that a call expression's argument count is at least the desired 117 /// number. This is useful when doing custom type-checking on a variadic 118 /// function. Returns true on error. 119 static bool checkArgCountAtLeast(Sema &S, CallExpr *Call, 120 unsigned MinArgCount) { 121 unsigned ArgCount = Call->getNumArgs(); 122 if (ArgCount >= MinArgCount) 123 return false; 124 125 return S.Diag(Call->getEndLoc(), diag::err_typecheck_call_too_few_args) 126 << 0 /*function call*/ << MinArgCount << ArgCount 127 << Call->getSourceRange(); 128 } 129 130 /// Checks that a call expression's argument count is at most the desired 131 /// number. This is useful when doing custom type-checking on a variadic 132 /// function. Returns true on error. 133 static bool checkArgCountAtMost(Sema &S, CallExpr *Call, unsigned MaxArgCount) { 134 unsigned ArgCount = Call->getNumArgs(); 135 if (ArgCount <= MaxArgCount) 136 return false; 137 return S.Diag(Call->getEndLoc(), 138 diag::err_typecheck_call_too_many_args_at_most) 139 << 0 /*function call*/ << MaxArgCount << ArgCount 140 << Call->getSourceRange(); 141 } 142 143 /// Checks that a call expression's argument count is in the desired range. This 144 /// is useful when doing custom type-checking on a variadic function. Returns 145 /// true on error. 146 static bool checkArgCountRange(Sema &S, CallExpr *Call, unsigned MinArgCount, 147 unsigned MaxArgCount) { 148 return checkArgCountAtLeast(S, Call, MinArgCount) || 149 checkArgCountAtMost(S, Call, MaxArgCount); 150 } 151 152 /// Checks that a call expression's argument count is the desired number. 153 /// This is useful when doing custom type-checking. Returns true on error. 154 static bool checkArgCount(Sema &S, CallExpr *Call, unsigned DesiredArgCount) { 155 unsigned ArgCount = Call->getNumArgs(); 156 if (ArgCount == DesiredArgCount) 157 return false; 158 159 if (checkArgCountAtLeast(S, Call, DesiredArgCount)) 160 return true; 161 assert(ArgCount > DesiredArgCount && "should have diagnosed this"); 162 163 // Highlight all the excess arguments. 164 SourceRange Range(Call->getArg(DesiredArgCount)->getBeginLoc(), 165 Call->getArg(ArgCount - 1)->getEndLoc()); 166 167 return S.Diag(Range.getBegin(), diag::err_typecheck_call_too_many_args) 168 << 0 /*function call*/ << DesiredArgCount << ArgCount 169 << Call->getArg(1)->getSourceRange(); 170 } 171 172 static bool convertArgumentToType(Sema &S, Expr *&Value, QualType Ty) { 173 if (Value->isTypeDependent()) 174 return false; 175 176 InitializedEntity Entity = 177 InitializedEntity::InitializeParameter(S.Context, Ty, false); 178 ExprResult Result = 179 S.PerformCopyInitialization(Entity, SourceLocation(), Value); 180 if (Result.isInvalid()) 181 return true; 182 Value = Result.get(); 183 return false; 184 } 185 186 /// Check that the first argument to __builtin_annotation is an integer 187 /// and the second argument is a non-wide string literal. 188 static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) { 189 if (checkArgCount(S, TheCall, 2)) 190 return true; 191 192 // First argument should be an integer. 193 Expr *ValArg = TheCall->getArg(0); 194 QualType Ty = ValArg->getType(); 195 if (!Ty->isIntegerType()) { 196 S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg) 197 << ValArg->getSourceRange(); 198 return true; 199 } 200 201 // Second argument should be a constant string. 202 Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts(); 203 StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg); 204 if (!Literal || !Literal->isOrdinary()) { 205 S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg) 206 << StrArg->getSourceRange(); 207 return true; 208 } 209 210 TheCall->setType(Ty); 211 return false; 212 } 213 214 static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) { 215 // We need at least one argument. 216 if (TheCall->getNumArgs() < 1) { 217 S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 218 << 0 << 1 << TheCall->getNumArgs() 219 << TheCall->getCallee()->getSourceRange(); 220 return true; 221 } 222 223 // All arguments should be wide string literals. 224 for (Expr *Arg : TheCall->arguments()) { 225 auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts()); 226 if (!Literal || !Literal->isWide()) { 227 S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str) 228 << Arg->getSourceRange(); 229 return true; 230 } 231 } 232 233 return false; 234 } 235 236 /// Check that the argument to __builtin_addressof is a glvalue, and set the 237 /// result type to the corresponding pointer type. 238 static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) { 239 if (checkArgCount(S, TheCall, 1)) 240 return true; 241 242 ExprResult Arg(TheCall->getArg(0)); 243 QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc()); 244 if (ResultType.isNull()) 245 return true; 246 247 TheCall->setArg(0, Arg.get()); 248 TheCall->setType(ResultType); 249 return false; 250 } 251 252 /// Check that the argument to __builtin_function_start is a function. 253 static bool SemaBuiltinFunctionStart(Sema &S, CallExpr *TheCall) { 254 if (checkArgCount(S, TheCall, 1)) 255 return true; 256 257 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 258 if (Arg.isInvalid()) 259 return true; 260 261 TheCall->setArg(0, Arg.get()); 262 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>( 263 Arg.get()->getAsBuiltinConstantDeclRef(S.getASTContext())); 264 265 if (!FD) { 266 S.Diag(TheCall->getBeginLoc(), diag::err_function_start_invalid_type) 267 << TheCall->getSourceRange(); 268 return true; 269 } 270 271 return !S.checkAddressOfFunctionIsAvailable(FD, /*Complain=*/true, 272 TheCall->getBeginLoc()); 273 } 274 275 /// Check the number of arguments and set the result type to 276 /// the argument type. 277 static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) { 278 if (checkArgCount(S, TheCall, 1)) 279 return true; 280 281 TheCall->setType(TheCall->getArg(0)->getType()); 282 return false; 283 } 284 285 /// Check that the value argument for __builtin_is_aligned(value, alignment) and 286 /// __builtin_aligned_{up,down}(value, alignment) is an integer or a pointer 287 /// type (but not a function pointer) and that the alignment is a power-of-two. 288 static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) { 289 if (checkArgCount(S, TheCall, 2)) 290 return true; 291 292 clang::Expr *Source = TheCall->getArg(0); 293 bool IsBooleanAlignBuiltin = ID == Builtin::BI__builtin_is_aligned; 294 295 auto IsValidIntegerType = [](QualType Ty) { 296 return Ty->isIntegerType() && !Ty->isEnumeralType() && !Ty->isBooleanType(); 297 }; 298 QualType SrcTy = Source->getType(); 299 // We should also be able to use it with arrays (but not functions!). 300 if (SrcTy->canDecayToPointerType() && SrcTy->isArrayType()) { 301 SrcTy = S.Context.getDecayedType(SrcTy); 302 } 303 if ((!SrcTy->isPointerType() && !IsValidIntegerType(SrcTy)) || 304 SrcTy->isFunctionPointerType()) { 305 // FIXME: this is not quite the right error message since we don't allow 306 // floating point types, or member pointers. 307 S.Diag(Source->getExprLoc(), diag::err_typecheck_expect_scalar_operand) 308 << SrcTy; 309 return true; 310 } 311 312 clang::Expr *AlignOp = TheCall->getArg(1); 313 if (!IsValidIntegerType(AlignOp->getType())) { 314 S.Diag(AlignOp->getExprLoc(), diag::err_typecheck_expect_int) 315 << AlignOp->getType(); 316 return true; 317 } 318 Expr::EvalResult AlignResult; 319 unsigned MaxAlignmentBits = S.Context.getIntWidth(SrcTy) - 1; 320 // We can't check validity of alignment if it is value dependent. 321 if (!AlignOp->isValueDependent() && 322 AlignOp->EvaluateAsInt(AlignResult, S.Context, 323 Expr::SE_AllowSideEffects)) { 324 llvm::APSInt AlignValue = AlignResult.Val.getInt(); 325 llvm::APSInt MaxValue( 326 llvm::APInt::getOneBitSet(MaxAlignmentBits + 1, MaxAlignmentBits)); 327 if (AlignValue < 1) { 328 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_small) << 1; 329 return true; 330 } 331 if (llvm::APSInt::compareValues(AlignValue, MaxValue) > 0) { 332 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_big) 333 << toString(MaxValue, 10); 334 return true; 335 } 336 if (!AlignValue.isPowerOf2()) { 337 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_not_power_of_two); 338 return true; 339 } 340 if (AlignValue == 1) { 341 S.Diag(AlignOp->getExprLoc(), diag::warn_alignment_builtin_useless) 342 << IsBooleanAlignBuiltin; 343 } 344 } 345 346 ExprResult SrcArg = S.PerformCopyInitialization( 347 InitializedEntity::InitializeParameter(S.Context, SrcTy, false), 348 SourceLocation(), Source); 349 if (SrcArg.isInvalid()) 350 return true; 351 TheCall->setArg(0, SrcArg.get()); 352 ExprResult AlignArg = 353 S.PerformCopyInitialization(InitializedEntity::InitializeParameter( 354 S.Context, AlignOp->getType(), false), 355 SourceLocation(), AlignOp); 356 if (AlignArg.isInvalid()) 357 return true; 358 TheCall->setArg(1, AlignArg.get()); 359 // For align_up/align_down, the return type is the same as the (potentially 360 // decayed) argument type including qualifiers. For is_aligned(), the result 361 // is always bool. 362 TheCall->setType(IsBooleanAlignBuiltin ? S.Context.BoolTy : SrcTy); 363 return false; 364 } 365 366 static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall, 367 unsigned BuiltinID) { 368 if (checkArgCount(S, TheCall, 3)) 369 return true; 370 371 // First two arguments should be integers. 372 for (unsigned I = 0; I < 2; ++I) { 373 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(I)); 374 if (Arg.isInvalid()) return true; 375 TheCall->setArg(I, Arg.get()); 376 377 QualType Ty = Arg.get()->getType(); 378 if (!Ty->isIntegerType()) { 379 S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int) 380 << Ty << Arg.get()->getSourceRange(); 381 return true; 382 } 383 } 384 385 // Third argument should be a pointer to a non-const integer. 386 // IRGen correctly handles volatile, restrict, and address spaces, and 387 // the other qualifiers aren't possible. 388 { 389 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(2)); 390 if (Arg.isInvalid()) return true; 391 TheCall->setArg(2, Arg.get()); 392 393 QualType Ty = Arg.get()->getType(); 394 const auto *PtrTy = Ty->getAs<PointerType>(); 395 if (!PtrTy || 396 !PtrTy->getPointeeType()->isIntegerType() || 397 PtrTy->getPointeeType().isConstQualified()) { 398 S.Diag(Arg.get()->getBeginLoc(), 399 diag::err_overflow_builtin_must_be_ptr_int) 400 << Ty << Arg.get()->getSourceRange(); 401 return true; 402 } 403 } 404 405 // Disallow signed bit-precise integer args larger than 128 bits to mul 406 // function until we improve backend support. 407 if (BuiltinID == Builtin::BI__builtin_mul_overflow) { 408 for (unsigned I = 0; I < 3; ++I) { 409 const auto Arg = TheCall->getArg(I); 410 // Third argument will be a pointer. 411 auto Ty = I < 2 ? Arg->getType() : Arg->getType()->getPointeeType(); 412 if (Ty->isBitIntType() && Ty->isSignedIntegerType() && 413 S.getASTContext().getIntWidth(Ty) > 128) 414 return S.Diag(Arg->getBeginLoc(), 415 diag::err_overflow_builtin_bit_int_max_size) 416 << 128; 417 } 418 } 419 420 return false; 421 } 422 423 namespace { 424 struct BuiltinDumpStructGenerator { 425 Sema &S; 426 CallExpr *TheCall; 427 SourceLocation Loc = TheCall->getBeginLoc(); 428 SmallVector<Expr *, 32> Actions; 429 DiagnosticErrorTrap ErrorTracker; 430 PrintingPolicy Policy; 431 432 BuiltinDumpStructGenerator(Sema &S, CallExpr *TheCall) 433 : S(S), TheCall(TheCall), ErrorTracker(S.getDiagnostics()), 434 Policy(S.Context.getPrintingPolicy()) { 435 Policy.AnonymousTagLocations = false; 436 } 437 438 Expr *makeOpaqueValueExpr(Expr *Inner) { 439 auto *OVE = new (S.Context) 440 OpaqueValueExpr(Loc, Inner->getType(), Inner->getValueKind(), 441 Inner->getObjectKind(), Inner); 442 Actions.push_back(OVE); 443 return OVE; 444 } 445 446 Expr *getStringLiteral(llvm::StringRef Str) { 447 Expr *Lit = S.Context.getPredefinedStringLiteralFromCache(Str); 448 // Wrap the literal in parentheses to attach a source location. 449 return new (S.Context) ParenExpr(Loc, Loc, Lit); 450 } 451 452 bool callPrintFunction(llvm::StringRef Format, 453 llvm::ArrayRef<Expr *> Exprs = {}) { 454 SmallVector<Expr *, 8> Args; 455 assert(TheCall->getNumArgs() >= 2); 456 Args.reserve((TheCall->getNumArgs() - 2) + /*Format*/ 1 + Exprs.size()); 457 Args.assign(TheCall->arg_begin() + 2, TheCall->arg_end()); 458 Args.push_back(getStringLiteral(Format)); 459 Args.insert(Args.end(), Exprs.begin(), Exprs.end()); 460 461 // Register a note to explain why we're performing the call. 462 Sema::CodeSynthesisContext Ctx; 463 Ctx.Kind = Sema::CodeSynthesisContext::BuildingBuiltinDumpStructCall; 464 Ctx.PointOfInstantiation = Loc; 465 Ctx.CallArgs = Args.data(); 466 Ctx.NumCallArgs = Args.size(); 467 S.pushCodeSynthesisContext(Ctx); 468 469 ExprResult RealCall = 470 S.BuildCallExpr(/*Scope=*/nullptr, TheCall->getArg(1), 471 TheCall->getBeginLoc(), Args, TheCall->getRParenLoc()); 472 473 S.popCodeSynthesisContext(); 474 if (!RealCall.isInvalid()) 475 Actions.push_back(RealCall.get()); 476 // Bail out if we've hit any errors, even if we managed to build the 477 // call. We don't want to produce more than one error. 478 return RealCall.isInvalid() || ErrorTracker.hasErrorOccurred(); 479 } 480 481 Expr *getIndentString(unsigned Depth) { 482 if (!Depth) 483 return nullptr; 484 485 llvm::SmallString<32> Indent; 486 Indent.resize(Depth * Policy.Indentation, ' '); 487 return getStringLiteral(Indent); 488 } 489 490 Expr *getTypeString(QualType T) { 491 return getStringLiteral(T.getAsString(Policy)); 492 } 493 494 bool appendFormatSpecifier(QualType T, llvm::SmallVectorImpl<char> &Str) { 495 llvm::raw_svector_ostream OS(Str); 496 497 // Format 'bool', 'char', 'signed char', 'unsigned char' as numbers, rather 498 // than trying to print a single character. 499 if (auto *BT = T->getAs<BuiltinType>()) { 500 switch (BT->getKind()) { 501 case BuiltinType::Bool: 502 OS << "%d"; 503 return true; 504 case BuiltinType::Char_U: 505 case BuiltinType::UChar: 506 OS << "%hhu"; 507 return true; 508 case BuiltinType::Char_S: 509 case BuiltinType::SChar: 510 OS << "%hhd"; 511 return true; 512 default: 513 break; 514 } 515 } 516 517 analyze_printf::PrintfSpecifier Specifier; 518 if (Specifier.fixType(T, S.getLangOpts(), S.Context, /*IsObjCLiteral=*/false)) { 519 // We were able to guess how to format this. 520 if (Specifier.getConversionSpecifier().getKind() == 521 analyze_printf::PrintfConversionSpecifier::sArg) { 522 // Wrap double-quotes around a '%s' specifier and limit its maximum 523 // length. Ideally we'd also somehow escape special characters in the 524 // contents but printf doesn't support that. 525 // FIXME: '%s' formatting is not safe in general. 526 OS << '"'; 527 Specifier.setPrecision(analyze_printf::OptionalAmount(32u)); 528 Specifier.toString(OS); 529 OS << '"'; 530 // FIXME: It would be nice to include a '...' if the string doesn't fit 531 // in the length limit. 532 } else { 533 Specifier.toString(OS); 534 } 535 return true; 536 } 537 538 if (T->isPointerType()) { 539 // Format all pointers with '%p'. 540 OS << "%p"; 541 return true; 542 } 543 544 return false; 545 } 546 547 bool dumpUnnamedRecord(const RecordDecl *RD, Expr *E, unsigned Depth) { 548 Expr *IndentLit = getIndentString(Depth); 549 Expr *TypeLit = getTypeString(S.Context.getRecordType(RD)); 550 if (IndentLit ? callPrintFunction("%s%s", {IndentLit, TypeLit}) 551 : callPrintFunction("%s", {TypeLit})) 552 return true; 553 554 return dumpRecordValue(RD, E, IndentLit, Depth); 555 } 556 557 // Dump a record value. E should be a pointer or lvalue referring to an RD. 558 bool dumpRecordValue(const RecordDecl *RD, Expr *E, Expr *RecordIndent, 559 unsigned Depth) { 560 // FIXME: Decide what to do if RD is a union. At least we should probably 561 // turn off printing `const char*` members with `%s`, because that is very 562 // likely to crash if that's not the active member. Whatever we decide, we 563 // should document it. 564 565 // Build an OpaqueValueExpr so we can refer to E more than once without 566 // triggering re-evaluation. 567 Expr *RecordArg = makeOpaqueValueExpr(E); 568 bool RecordArgIsPtr = RecordArg->getType()->isPointerType(); 569 570 if (callPrintFunction(" {\n")) 571 return true; 572 573 // Dump each base class, regardless of whether they're aggregates. 574 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 575 for (const auto &Base : CXXRD->bases()) { 576 QualType BaseType = 577 RecordArgIsPtr ? S.Context.getPointerType(Base.getType()) 578 : S.Context.getLValueReferenceType(Base.getType()); 579 ExprResult BasePtr = S.BuildCStyleCastExpr( 580 Loc, S.Context.getTrivialTypeSourceInfo(BaseType, Loc), Loc, 581 RecordArg); 582 if (BasePtr.isInvalid() || 583 dumpUnnamedRecord(Base.getType()->getAsRecordDecl(), BasePtr.get(), 584 Depth + 1)) 585 return true; 586 } 587 } 588 589 Expr *FieldIndentArg = getIndentString(Depth + 1); 590 591 // Dump each field. 592 for (auto *D : RD->decls()) { 593 auto *IFD = dyn_cast<IndirectFieldDecl>(D); 594 auto *FD = IFD ? IFD->getAnonField() : dyn_cast<FieldDecl>(D); 595 if (!FD || FD->isUnnamedBitfield() || FD->isAnonymousStructOrUnion()) 596 continue; 597 598 llvm::SmallString<20> Format = llvm::StringRef("%s%s %s "); 599 llvm::SmallVector<Expr *, 5> Args = {FieldIndentArg, 600 getTypeString(FD->getType()), 601 getStringLiteral(FD->getName())}; 602 603 if (FD->isBitField()) { 604 Format += ": %zu "; 605 QualType SizeT = S.Context.getSizeType(); 606 llvm::APInt BitWidth(S.Context.getIntWidth(SizeT), 607 FD->getBitWidthValue(S.Context)); 608 Args.push_back(IntegerLiteral::Create(S.Context, BitWidth, SizeT, Loc)); 609 } 610 611 Format += "="; 612 613 ExprResult Field = 614 IFD ? S.BuildAnonymousStructUnionMemberReference( 615 CXXScopeSpec(), Loc, IFD, 616 DeclAccessPair::make(IFD, AS_public), RecordArg, Loc) 617 : S.BuildFieldReferenceExpr( 618 RecordArg, RecordArgIsPtr, Loc, CXXScopeSpec(), FD, 619 DeclAccessPair::make(FD, AS_public), 620 DeclarationNameInfo(FD->getDeclName(), Loc)); 621 if (Field.isInvalid()) 622 return true; 623 624 auto *InnerRD = FD->getType()->getAsRecordDecl(); 625 auto *InnerCXXRD = dyn_cast_or_null<CXXRecordDecl>(InnerRD); 626 if (InnerRD && (!InnerCXXRD || InnerCXXRD->isAggregate())) { 627 // Recursively print the values of members of aggregate record type. 628 if (callPrintFunction(Format, Args) || 629 dumpRecordValue(InnerRD, Field.get(), FieldIndentArg, Depth + 1)) 630 return true; 631 } else { 632 Format += " "; 633 if (appendFormatSpecifier(FD->getType(), Format)) { 634 // We know how to print this field. 635 Args.push_back(Field.get()); 636 } else { 637 // We don't know how to print this field. Print out its address 638 // with a format specifier that a smart tool will be able to 639 // recognize and treat specially. 640 Format += "*%p"; 641 ExprResult FieldAddr = 642 S.BuildUnaryOp(nullptr, Loc, UO_AddrOf, Field.get()); 643 if (FieldAddr.isInvalid()) 644 return true; 645 Args.push_back(FieldAddr.get()); 646 } 647 Format += "\n"; 648 if (callPrintFunction(Format, Args)) 649 return true; 650 } 651 } 652 653 return RecordIndent ? callPrintFunction("%s}\n", RecordIndent) 654 : callPrintFunction("}\n"); 655 } 656 657 Expr *buildWrapper() { 658 auto *Wrapper = PseudoObjectExpr::Create(S.Context, TheCall, Actions, 659 PseudoObjectExpr::NoResult); 660 TheCall->setType(Wrapper->getType()); 661 TheCall->setValueKind(Wrapper->getValueKind()); 662 return Wrapper; 663 } 664 }; 665 } // namespace 666 667 static ExprResult SemaBuiltinDumpStruct(Sema &S, CallExpr *TheCall) { 668 if (checkArgCountAtLeast(S, TheCall, 2)) 669 return ExprError(); 670 671 ExprResult PtrArgResult = S.DefaultLvalueConversion(TheCall->getArg(0)); 672 if (PtrArgResult.isInvalid()) 673 return ExprError(); 674 TheCall->setArg(0, PtrArgResult.get()); 675 676 // First argument should be a pointer to a struct. 677 QualType PtrArgType = PtrArgResult.get()->getType(); 678 if (!PtrArgType->isPointerType() || 679 !PtrArgType->getPointeeType()->isRecordType()) { 680 S.Diag(PtrArgResult.get()->getBeginLoc(), 681 diag::err_expected_struct_pointer_argument) 682 << 1 << TheCall->getDirectCallee() << PtrArgType; 683 return ExprError(); 684 } 685 const RecordDecl *RD = PtrArgType->getPointeeType()->getAsRecordDecl(); 686 687 // Second argument is a callable, but we can't fully validate it until we try 688 // calling it. 689 QualType FnArgType = TheCall->getArg(1)->getType(); 690 if (!FnArgType->isFunctionType() && !FnArgType->isFunctionPointerType() && 691 !FnArgType->isBlockPointerType() && 692 !(S.getLangOpts().CPlusPlus && FnArgType->isRecordType())) { 693 auto *BT = FnArgType->getAs<BuiltinType>(); 694 switch (BT ? BT->getKind() : BuiltinType::Void) { 695 case BuiltinType::Dependent: 696 case BuiltinType::Overload: 697 case BuiltinType::BoundMember: 698 case BuiltinType::PseudoObject: 699 case BuiltinType::UnknownAny: 700 case BuiltinType::BuiltinFn: 701 // This might be a callable. 702 break; 703 704 default: 705 S.Diag(TheCall->getArg(1)->getBeginLoc(), 706 diag::err_expected_callable_argument) 707 << 2 << TheCall->getDirectCallee() << FnArgType; 708 return ExprError(); 709 } 710 } 711 712 BuiltinDumpStructGenerator Generator(S, TheCall); 713 714 // Wrap parentheses around the given pointer. This is not necessary for 715 // correct code generation, but it means that when we pretty-print the call 716 // arguments in our diagnostics we will produce '(&s)->n' instead of the 717 // incorrect '&s->n'. 718 Expr *PtrArg = PtrArgResult.get(); 719 PtrArg = new (S.Context) 720 ParenExpr(PtrArg->getBeginLoc(), 721 S.getLocForEndOfToken(PtrArg->getEndLoc()), PtrArg); 722 if (Generator.dumpUnnamedRecord(RD, PtrArg, 0)) 723 return ExprError(); 724 725 return Generator.buildWrapper(); 726 } 727 728 static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) { 729 if (checkArgCount(S, BuiltinCall, 2)) 730 return true; 731 732 SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc(); 733 Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts(); 734 Expr *Call = BuiltinCall->getArg(0); 735 Expr *Chain = BuiltinCall->getArg(1); 736 737 if (Call->getStmtClass() != Stmt::CallExprClass) { 738 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call) 739 << Call->getSourceRange(); 740 return true; 741 } 742 743 auto CE = cast<CallExpr>(Call); 744 if (CE->getCallee()->getType()->isBlockPointerType()) { 745 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call) 746 << Call->getSourceRange(); 747 return true; 748 } 749 750 const Decl *TargetDecl = CE->getCalleeDecl(); 751 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) 752 if (FD->getBuiltinID()) { 753 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call) 754 << Call->getSourceRange(); 755 return true; 756 } 757 758 if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) { 759 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call) 760 << Call->getSourceRange(); 761 return true; 762 } 763 764 ExprResult ChainResult = S.UsualUnaryConversions(Chain); 765 if (ChainResult.isInvalid()) 766 return true; 767 if (!ChainResult.get()->getType()->isPointerType()) { 768 S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer) 769 << Chain->getSourceRange(); 770 return true; 771 } 772 773 QualType ReturnTy = CE->getCallReturnType(S.Context); 774 QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() }; 775 QualType BuiltinTy = S.Context.getFunctionType( 776 ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo()); 777 QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy); 778 779 Builtin = 780 S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get(); 781 782 BuiltinCall->setType(CE->getType()); 783 BuiltinCall->setValueKind(CE->getValueKind()); 784 BuiltinCall->setObjectKind(CE->getObjectKind()); 785 BuiltinCall->setCallee(Builtin); 786 BuiltinCall->setArg(1, ChainResult.get()); 787 788 return false; 789 } 790 791 namespace { 792 793 class ScanfDiagnosticFormatHandler 794 : public analyze_format_string::FormatStringHandler { 795 // Accepts the argument index (relative to the first destination index) of the 796 // argument whose size we want. 797 using ComputeSizeFunction = 798 llvm::function_ref<std::optional<llvm::APSInt>(unsigned)>; 799 800 // Accepts the argument index (relative to the first destination index), the 801 // destination size, and the source size). 802 using DiagnoseFunction = 803 llvm::function_ref<void(unsigned, unsigned, unsigned)>; 804 805 ComputeSizeFunction ComputeSizeArgument; 806 DiagnoseFunction Diagnose; 807 808 public: 809 ScanfDiagnosticFormatHandler(ComputeSizeFunction ComputeSizeArgument, 810 DiagnoseFunction Diagnose) 811 : ComputeSizeArgument(ComputeSizeArgument), Diagnose(Diagnose) {} 812 813 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 814 const char *StartSpecifier, 815 unsigned specifierLen) override { 816 if (!FS.consumesDataArgument()) 817 return true; 818 819 unsigned NulByte = 0; 820 switch ((FS.getConversionSpecifier().getKind())) { 821 default: 822 return true; 823 case analyze_format_string::ConversionSpecifier::sArg: 824 case analyze_format_string::ConversionSpecifier::ScanListArg: 825 NulByte = 1; 826 break; 827 case analyze_format_string::ConversionSpecifier::cArg: 828 break; 829 } 830 831 analyze_format_string::OptionalAmount FW = FS.getFieldWidth(); 832 if (FW.getHowSpecified() != 833 analyze_format_string::OptionalAmount::HowSpecified::Constant) 834 return true; 835 836 unsigned SourceSize = FW.getConstantAmount() + NulByte; 837 838 std::optional<llvm::APSInt> DestSizeAPS = 839 ComputeSizeArgument(FS.getArgIndex()); 840 if (!DestSizeAPS) 841 return true; 842 843 unsigned DestSize = DestSizeAPS->getZExtValue(); 844 845 if (DestSize < SourceSize) 846 Diagnose(FS.getArgIndex(), DestSize, SourceSize); 847 848 return true; 849 } 850 }; 851 852 class EstimateSizeFormatHandler 853 : public analyze_format_string::FormatStringHandler { 854 size_t Size; 855 856 public: 857 EstimateSizeFormatHandler(StringRef Format) 858 : Size(std::min(Format.find(0), Format.size()) + 859 1 /* null byte always written by sprintf */) {} 860 861 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 862 const char *, unsigned SpecifierLen, 863 const TargetInfo &) override { 864 865 const size_t FieldWidth = computeFieldWidth(FS); 866 const size_t Precision = computePrecision(FS); 867 868 // The actual format. 869 switch (FS.getConversionSpecifier().getKind()) { 870 // Just a char. 871 case analyze_format_string::ConversionSpecifier::cArg: 872 case analyze_format_string::ConversionSpecifier::CArg: 873 Size += std::max(FieldWidth, (size_t)1); 874 break; 875 // Just an integer. 876 case analyze_format_string::ConversionSpecifier::dArg: 877 case analyze_format_string::ConversionSpecifier::DArg: 878 case analyze_format_string::ConversionSpecifier::iArg: 879 case analyze_format_string::ConversionSpecifier::oArg: 880 case analyze_format_string::ConversionSpecifier::OArg: 881 case analyze_format_string::ConversionSpecifier::uArg: 882 case analyze_format_string::ConversionSpecifier::UArg: 883 case analyze_format_string::ConversionSpecifier::xArg: 884 case analyze_format_string::ConversionSpecifier::XArg: 885 Size += std::max(FieldWidth, Precision); 886 break; 887 888 // %g style conversion switches between %f or %e style dynamically. 889 // %f always takes less space, so default to it. 890 case analyze_format_string::ConversionSpecifier::gArg: 891 case analyze_format_string::ConversionSpecifier::GArg: 892 893 // Floating point number in the form '[+]ddd.ddd'. 894 case analyze_format_string::ConversionSpecifier::fArg: 895 case analyze_format_string::ConversionSpecifier::FArg: 896 Size += std::max(FieldWidth, 1 /* integer part */ + 897 (Precision ? 1 + Precision 898 : 0) /* period + decimal */); 899 break; 900 901 // Floating point number in the form '[-]d.ddde[+-]dd'. 902 case analyze_format_string::ConversionSpecifier::eArg: 903 case analyze_format_string::ConversionSpecifier::EArg: 904 Size += 905 std::max(FieldWidth, 906 1 /* integer part */ + 907 (Precision ? 1 + Precision : 0) /* period + decimal */ + 908 1 /* e or E letter */ + 2 /* exponent */); 909 break; 910 911 // Floating point number in the form '[-]0xh.hhhhp±dd'. 912 case analyze_format_string::ConversionSpecifier::aArg: 913 case analyze_format_string::ConversionSpecifier::AArg: 914 Size += 915 std::max(FieldWidth, 916 2 /* 0x */ + 1 /* integer part */ + 917 (Precision ? 1 + Precision : 0) /* period + decimal */ + 918 1 /* p or P letter */ + 1 /* + or - */ + 1 /* value */); 919 break; 920 921 // Just a string. 922 case analyze_format_string::ConversionSpecifier::sArg: 923 case analyze_format_string::ConversionSpecifier::SArg: 924 Size += FieldWidth; 925 break; 926 927 // Just a pointer in the form '0xddd'. 928 case analyze_format_string::ConversionSpecifier::pArg: 929 Size += std::max(FieldWidth, 2 /* leading 0x */ + Precision); 930 break; 931 932 // A plain percent. 933 case analyze_format_string::ConversionSpecifier::PercentArg: 934 Size += 1; 935 break; 936 937 default: 938 break; 939 } 940 941 Size += FS.hasPlusPrefix() || FS.hasSpacePrefix(); 942 943 if (FS.hasAlternativeForm()) { 944 switch (FS.getConversionSpecifier().getKind()) { 945 default: 946 break; 947 // Force a leading '0'. 948 case analyze_format_string::ConversionSpecifier::oArg: 949 Size += 1; 950 break; 951 // Force a leading '0x'. 952 case analyze_format_string::ConversionSpecifier::xArg: 953 case analyze_format_string::ConversionSpecifier::XArg: 954 Size += 2; 955 break; 956 // Force a period '.' before decimal, even if precision is 0. 957 case analyze_format_string::ConversionSpecifier::aArg: 958 case analyze_format_string::ConversionSpecifier::AArg: 959 case analyze_format_string::ConversionSpecifier::eArg: 960 case analyze_format_string::ConversionSpecifier::EArg: 961 case analyze_format_string::ConversionSpecifier::fArg: 962 case analyze_format_string::ConversionSpecifier::FArg: 963 case analyze_format_string::ConversionSpecifier::gArg: 964 case analyze_format_string::ConversionSpecifier::GArg: 965 Size += (Precision ? 0 : 1); 966 break; 967 } 968 } 969 assert(SpecifierLen <= Size && "no underflow"); 970 Size -= SpecifierLen; 971 return true; 972 } 973 974 size_t getSizeLowerBound() const { return Size; } 975 976 private: 977 static size_t computeFieldWidth(const analyze_printf::PrintfSpecifier &FS) { 978 const analyze_format_string::OptionalAmount &FW = FS.getFieldWidth(); 979 size_t FieldWidth = 0; 980 if (FW.getHowSpecified() == analyze_format_string::OptionalAmount::Constant) 981 FieldWidth = FW.getConstantAmount(); 982 return FieldWidth; 983 } 984 985 static size_t computePrecision(const analyze_printf::PrintfSpecifier &FS) { 986 const analyze_format_string::OptionalAmount &FW = FS.getPrecision(); 987 size_t Precision = 0; 988 989 // See man 3 printf for default precision value based on the specifier. 990 switch (FW.getHowSpecified()) { 991 case analyze_format_string::OptionalAmount::NotSpecified: 992 switch (FS.getConversionSpecifier().getKind()) { 993 default: 994 break; 995 case analyze_format_string::ConversionSpecifier::dArg: // %d 996 case analyze_format_string::ConversionSpecifier::DArg: // %D 997 case analyze_format_string::ConversionSpecifier::iArg: // %i 998 Precision = 1; 999 break; 1000 case analyze_format_string::ConversionSpecifier::oArg: // %d 1001 case analyze_format_string::ConversionSpecifier::OArg: // %D 1002 case analyze_format_string::ConversionSpecifier::uArg: // %d 1003 case analyze_format_string::ConversionSpecifier::UArg: // %D 1004 case analyze_format_string::ConversionSpecifier::xArg: // %d 1005 case analyze_format_string::ConversionSpecifier::XArg: // %D 1006 Precision = 1; 1007 break; 1008 case analyze_format_string::ConversionSpecifier::fArg: // %f 1009 case analyze_format_string::ConversionSpecifier::FArg: // %F 1010 case analyze_format_string::ConversionSpecifier::eArg: // %e 1011 case analyze_format_string::ConversionSpecifier::EArg: // %E 1012 case analyze_format_string::ConversionSpecifier::gArg: // %g 1013 case analyze_format_string::ConversionSpecifier::GArg: // %G 1014 Precision = 6; 1015 break; 1016 case analyze_format_string::ConversionSpecifier::pArg: // %d 1017 Precision = 1; 1018 break; 1019 } 1020 break; 1021 case analyze_format_string::OptionalAmount::Constant: 1022 Precision = FW.getConstantAmount(); 1023 break; 1024 default: 1025 break; 1026 } 1027 return Precision; 1028 } 1029 }; 1030 1031 } // namespace 1032 1033 void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, 1034 CallExpr *TheCall) { 1035 if (TheCall->isValueDependent() || TheCall->isTypeDependent() || 1036 isConstantEvaluated()) 1037 return; 1038 1039 bool UseDABAttr = false; 1040 const FunctionDecl *UseDecl = FD; 1041 1042 const auto *DABAttr = FD->getAttr<DiagnoseAsBuiltinAttr>(); 1043 if (DABAttr) { 1044 UseDecl = DABAttr->getFunction(); 1045 assert(UseDecl && "Missing FunctionDecl in DiagnoseAsBuiltin attribute!"); 1046 UseDABAttr = true; 1047 } 1048 1049 unsigned BuiltinID = UseDecl->getBuiltinID(/*ConsiderWrappers=*/true); 1050 1051 if (!BuiltinID) 1052 return; 1053 1054 const TargetInfo &TI = getASTContext().getTargetInfo(); 1055 unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType()); 1056 1057 auto TranslateIndex = [&](unsigned Index) -> std::optional<unsigned> { 1058 // If we refer to a diagnose_as_builtin attribute, we need to change the 1059 // argument index to refer to the arguments of the called function. Unless 1060 // the index is out of bounds, which presumably means it's a variadic 1061 // function. 1062 if (!UseDABAttr) 1063 return Index; 1064 unsigned DABIndices = DABAttr->argIndices_size(); 1065 unsigned NewIndex = Index < DABIndices 1066 ? DABAttr->argIndices_begin()[Index] 1067 : Index - DABIndices + FD->getNumParams(); 1068 if (NewIndex >= TheCall->getNumArgs()) 1069 return std::nullopt; 1070 return NewIndex; 1071 }; 1072 1073 auto ComputeExplicitObjectSizeArgument = 1074 [&](unsigned Index) -> std::optional<llvm::APSInt> { 1075 std::optional<unsigned> IndexOptional = TranslateIndex(Index); 1076 if (!IndexOptional) 1077 return std::nullopt; 1078 unsigned NewIndex = *IndexOptional; 1079 Expr::EvalResult Result; 1080 Expr *SizeArg = TheCall->getArg(NewIndex); 1081 if (!SizeArg->EvaluateAsInt(Result, getASTContext())) 1082 return std::nullopt; 1083 llvm::APSInt Integer = Result.Val.getInt(); 1084 Integer.setIsUnsigned(true); 1085 return Integer; 1086 }; 1087 1088 auto ComputeSizeArgument = 1089 [&](unsigned Index) -> std::optional<llvm::APSInt> { 1090 // If the parameter has a pass_object_size attribute, then we should use its 1091 // (potentially) more strict checking mode. Otherwise, conservatively assume 1092 // type 0. 1093 int BOSType = 0; 1094 // This check can fail for variadic functions. 1095 if (Index < FD->getNumParams()) { 1096 if (const auto *POS = 1097 FD->getParamDecl(Index)->getAttr<PassObjectSizeAttr>()) 1098 BOSType = POS->getType(); 1099 } 1100 1101 std::optional<unsigned> IndexOptional = TranslateIndex(Index); 1102 if (!IndexOptional) 1103 return std::nullopt; 1104 unsigned NewIndex = *IndexOptional; 1105 1106 if (NewIndex >= TheCall->getNumArgs()) 1107 return std::nullopt; 1108 1109 const Expr *ObjArg = TheCall->getArg(NewIndex); 1110 uint64_t Result; 1111 if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType)) 1112 return std::nullopt; 1113 1114 // Get the object size in the target's size_t width. 1115 return llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth); 1116 }; 1117 1118 auto ComputeStrLenArgument = 1119 [&](unsigned Index) -> std::optional<llvm::APSInt> { 1120 std::optional<unsigned> IndexOptional = TranslateIndex(Index); 1121 if (!IndexOptional) 1122 return std::nullopt; 1123 unsigned NewIndex = *IndexOptional; 1124 1125 const Expr *ObjArg = TheCall->getArg(NewIndex); 1126 uint64_t Result; 1127 if (!ObjArg->tryEvaluateStrLen(Result, getASTContext())) 1128 return std::nullopt; 1129 // Add 1 for null byte. 1130 return llvm::APSInt::getUnsigned(Result + 1).extOrTrunc(SizeTypeWidth); 1131 }; 1132 1133 std::optional<llvm::APSInt> SourceSize; 1134 std::optional<llvm::APSInt> DestinationSize; 1135 unsigned DiagID = 0; 1136 bool IsChkVariant = false; 1137 1138 auto GetFunctionName = [&]() { 1139 StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID); 1140 // Skim off the details of whichever builtin was called to produce a better 1141 // diagnostic, as it's unlikely that the user wrote the __builtin 1142 // explicitly. 1143 if (IsChkVariant) { 1144 FunctionName = FunctionName.drop_front(std::strlen("__builtin___")); 1145 FunctionName = FunctionName.drop_back(std::strlen("_chk")); 1146 } else if (FunctionName.startswith("__builtin_")) { 1147 FunctionName = FunctionName.drop_front(std::strlen("__builtin_")); 1148 } 1149 return FunctionName; 1150 }; 1151 1152 switch (BuiltinID) { 1153 default: 1154 return; 1155 case Builtin::BI__builtin_strcpy: 1156 case Builtin::BIstrcpy: { 1157 DiagID = diag::warn_fortify_strlen_overflow; 1158 SourceSize = ComputeStrLenArgument(1); 1159 DestinationSize = ComputeSizeArgument(0); 1160 break; 1161 } 1162 1163 case Builtin::BI__builtin___strcpy_chk: { 1164 DiagID = diag::warn_fortify_strlen_overflow; 1165 SourceSize = ComputeStrLenArgument(1); 1166 DestinationSize = ComputeExplicitObjectSizeArgument(2); 1167 IsChkVariant = true; 1168 break; 1169 } 1170 1171 case Builtin::BIscanf: 1172 case Builtin::BIfscanf: 1173 case Builtin::BIsscanf: { 1174 unsigned FormatIndex = 1; 1175 unsigned DataIndex = 2; 1176 if (BuiltinID == Builtin::BIscanf) { 1177 FormatIndex = 0; 1178 DataIndex = 1; 1179 } 1180 1181 const auto *FormatExpr = 1182 TheCall->getArg(FormatIndex)->IgnoreParenImpCasts(); 1183 1184 const auto *Format = dyn_cast<StringLiteral>(FormatExpr); 1185 if (!Format) 1186 return; 1187 1188 if (!Format->isOrdinary() && !Format->isUTF8()) 1189 return; 1190 1191 auto Diagnose = [&](unsigned ArgIndex, unsigned DestSize, 1192 unsigned SourceSize) { 1193 DiagID = diag::warn_fortify_scanf_overflow; 1194 unsigned Index = ArgIndex + DataIndex; 1195 StringRef FunctionName = GetFunctionName(); 1196 DiagRuntimeBehavior(TheCall->getArg(Index)->getBeginLoc(), TheCall, 1197 PDiag(DiagID) << FunctionName << (Index + 1) 1198 << DestSize << SourceSize); 1199 }; 1200 1201 StringRef FormatStrRef = Format->getString(); 1202 auto ShiftedComputeSizeArgument = [&](unsigned Index) { 1203 return ComputeSizeArgument(Index + DataIndex); 1204 }; 1205 ScanfDiagnosticFormatHandler H(ShiftedComputeSizeArgument, Diagnose); 1206 const char *FormatBytes = FormatStrRef.data(); 1207 const ConstantArrayType *T = 1208 Context.getAsConstantArrayType(Format->getType()); 1209 assert(T && "String literal not of constant array type!"); 1210 size_t TypeSize = T->getSize().getZExtValue(); 1211 1212 // In case there's a null byte somewhere. 1213 size_t StrLen = 1214 std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0)); 1215 1216 analyze_format_string::ParseScanfString(H, FormatBytes, 1217 FormatBytes + StrLen, getLangOpts(), 1218 Context.getTargetInfo()); 1219 1220 // Unlike the other cases, in this one we have already issued the diagnostic 1221 // here, so no need to continue (because unlike the other cases, here the 1222 // diagnostic refers to the argument number). 1223 return; 1224 } 1225 1226 case Builtin::BIsprintf: 1227 case Builtin::BI__builtin___sprintf_chk: { 1228 size_t FormatIndex = BuiltinID == Builtin::BIsprintf ? 1 : 3; 1229 auto *FormatExpr = TheCall->getArg(FormatIndex)->IgnoreParenImpCasts(); 1230 1231 if (auto *Format = dyn_cast<StringLiteral>(FormatExpr)) { 1232 1233 if (!Format->isOrdinary() && !Format->isUTF8()) 1234 return; 1235 1236 StringRef FormatStrRef = Format->getString(); 1237 EstimateSizeFormatHandler H(FormatStrRef); 1238 const char *FormatBytes = FormatStrRef.data(); 1239 const ConstantArrayType *T = 1240 Context.getAsConstantArrayType(Format->getType()); 1241 assert(T && "String literal not of constant array type!"); 1242 size_t TypeSize = T->getSize().getZExtValue(); 1243 1244 // In case there's a null byte somewhere. 1245 size_t StrLen = 1246 std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0)); 1247 if (!analyze_format_string::ParsePrintfString( 1248 H, FormatBytes, FormatBytes + StrLen, getLangOpts(), 1249 Context.getTargetInfo(), false)) { 1250 DiagID = diag::warn_fortify_source_format_overflow; 1251 SourceSize = llvm::APSInt::getUnsigned(H.getSizeLowerBound()) 1252 .extOrTrunc(SizeTypeWidth); 1253 if (BuiltinID == Builtin::BI__builtin___sprintf_chk) { 1254 DestinationSize = ComputeExplicitObjectSizeArgument(2); 1255 IsChkVariant = true; 1256 } else { 1257 DestinationSize = ComputeSizeArgument(0); 1258 } 1259 break; 1260 } 1261 } 1262 return; 1263 } 1264 case Builtin::BI__builtin___memcpy_chk: 1265 case Builtin::BI__builtin___memmove_chk: 1266 case Builtin::BI__builtin___memset_chk: 1267 case Builtin::BI__builtin___strlcat_chk: 1268 case Builtin::BI__builtin___strlcpy_chk: 1269 case Builtin::BI__builtin___strncat_chk: 1270 case Builtin::BI__builtin___strncpy_chk: 1271 case Builtin::BI__builtin___stpncpy_chk: 1272 case Builtin::BI__builtin___memccpy_chk: 1273 case Builtin::BI__builtin___mempcpy_chk: { 1274 DiagID = diag::warn_builtin_chk_overflow; 1275 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 2); 1276 DestinationSize = 1277 ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 1278 IsChkVariant = true; 1279 break; 1280 } 1281 1282 case Builtin::BI__builtin___snprintf_chk: 1283 case Builtin::BI__builtin___vsnprintf_chk: { 1284 DiagID = diag::warn_builtin_chk_overflow; 1285 SourceSize = ComputeExplicitObjectSizeArgument(1); 1286 DestinationSize = ComputeExplicitObjectSizeArgument(3); 1287 IsChkVariant = true; 1288 break; 1289 } 1290 1291 case Builtin::BIstrncat: 1292 case Builtin::BI__builtin_strncat: 1293 case Builtin::BIstrncpy: 1294 case Builtin::BI__builtin_strncpy: 1295 case Builtin::BIstpncpy: 1296 case Builtin::BI__builtin_stpncpy: { 1297 // Whether these functions overflow depends on the runtime strlen of the 1298 // string, not just the buffer size, so emitting the "always overflow" 1299 // diagnostic isn't quite right. We should still diagnose passing a buffer 1300 // size larger than the destination buffer though; this is a runtime abort 1301 // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise. 1302 DiagID = diag::warn_fortify_source_size_mismatch; 1303 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 1304 DestinationSize = ComputeSizeArgument(0); 1305 break; 1306 } 1307 1308 case Builtin::BImemcpy: 1309 case Builtin::BI__builtin_memcpy: 1310 case Builtin::BImemmove: 1311 case Builtin::BI__builtin_memmove: 1312 case Builtin::BImemset: 1313 case Builtin::BI__builtin_memset: 1314 case Builtin::BImempcpy: 1315 case Builtin::BI__builtin_mempcpy: { 1316 DiagID = diag::warn_fortify_source_overflow; 1317 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 1318 DestinationSize = ComputeSizeArgument(0); 1319 break; 1320 } 1321 case Builtin::BIsnprintf: 1322 case Builtin::BI__builtin_snprintf: 1323 case Builtin::BIvsnprintf: 1324 case Builtin::BI__builtin_vsnprintf: { 1325 DiagID = diag::warn_fortify_source_size_mismatch; 1326 SourceSize = ComputeExplicitObjectSizeArgument(1); 1327 DestinationSize = ComputeSizeArgument(0); 1328 break; 1329 } 1330 } 1331 1332 if (!SourceSize || !DestinationSize || 1333 llvm::APSInt::compareValues(*SourceSize, *DestinationSize) <= 0) 1334 return; 1335 1336 StringRef FunctionName = GetFunctionName(); 1337 1338 SmallString<16> DestinationStr; 1339 SmallString<16> SourceStr; 1340 DestinationSize->toString(DestinationStr, /*Radix=*/10); 1341 SourceSize->toString(SourceStr, /*Radix=*/10); 1342 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 1343 PDiag(DiagID) 1344 << FunctionName << DestinationStr << SourceStr); 1345 } 1346 1347 static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall, 1348 Scope::ScopeFlags NeededScopeFlags, 1349 unsigned DiagID) { 1350 // Scopes aren't available during instantiation. Fortunately, builtin 1351 // functions cannot be template args so they cannot be formed through template 1352 // instantiation. Therefore checking once during the parse is sufficient. 1353 if (SemaRef.inTemplateInstantiation()) 1354 return false; 1355 1356 Scope *S = SemaRef.getCurScope(); 1357 while (S && !S->isSEHExceptScope()) 1358 S = S->getParent(); 1359 if (!S || !(S->getFlags() & NeededScopeFlags)) { 1360 auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 1361 SemaRef.Diag(TheCall->getExprLoc(), DiagID) 1362 << DRE->getDecl()->getIdentifier(); 1363 return true; 1364 } 1365 1366 return false; 1367 } 1368 1369 static inline bool isBlockPointer(Expr *Arg) { 1370 return Arg->getType()->isBlockPointerType(); 1371 } 1372 1373 /// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local 1374 /// void*, which is a requirement of device side enqueue. 1375 static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) { 1376 const BlockPointerType *BPT = 1377 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 1378 ArrayRef<QualType> Params = 1379 BPT->getPointeeType()->castAs<FunctionProtoType>()->getParamTypes(); 1380 unsigned ArgCounter = 0; 1381 bool IllegalParams = false; 1382 // Iterate through the block parameters until either one is found that is not 1383 // a local void*, or the block is valid. 1384 for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end(); 1385 I != E; ++I, ++ArgCounter) { 1386 if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() || 1387 (*I)->getPointeeType().getQualifiers().getAddressSpace() != 1388 LangAS::opencl_local) { 1389 // Get the location of the error. If a block literal has been passed 1390 // (BlockExpr) then we can point straight to the offending argument, 1391 // else we just point to the variable reference. 1392 SourceLocation ErrorLoc; 1393 if (isa<BlockExpr>(BlockArg)) { 1394 BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl(); 1395 ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc(); 1396 } else if (isa<DeclRefExpr>(BlockArg)) { 1397 ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc(); 1398 } 1399 S.Diag(ErrorLoc, 1400 diag::err_opencl_enqueue_kernel_blocks_non_local_void_args); 1401 IllegalParams = true; 1402 } 1403 } 1404 1405 return IllegalParams; 1406 } 1407 1408 static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) { 1409 // OpenCL device can support extension but not the feature as extension 1410 // requires subgroup independent forward progress, but subgroup independent 1411 // forward progress is optional in OpenCL C 3.0 __opencl_c_subgroups feature. 1412 if (!S.getOpenCLOptions().isSupported("cl_khr_subgroups", S.getLangOpts()) && 1413 !S.getOpenCLOptions().isSupported("__opencl_c_subgroups", 1414 S.getLangOpts())) { 1415 S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension) 1416 << 1 << Call->getDirectCallee() 1417 << "cl_khr_subgroups or __opencl_c_subgroups"; 1418 return true; 1419 } 1420 return false; 1421 } 1422 1423 static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) { 1424 if (checkArgCount(S, TheCall, 2)) 1425 return true; 1426 1427 if (checkOpenCLSubgroupExt(S, TheCall)) 1428 return true; 1429 1430 // First argument is an ndrange_t type. 1431 Expr *NDRangeArg = TheCall->getArg(0); 1432 if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 1433 S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1434 << TheCall->getDirectCallee() << "'ndrange_t'"; 1435 return true; 1436 } 1437 1438 Expr *BlockArg = TheCall->getArg(1); 1439 if (!isBlockPointer(BlockArg)) { 1440 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1441 << TheCall->getDirectCallee() << "block"; 1442 return true; 1443 } 1444 return checkOpenCLBlockArgs(S, BlockArg); 1445 } 1446 1447 /// OpenCL C v2.0, s6.13.17.6 - Check the argument to the 1448 /// get_kernel_work_group_size 1449 /// and get_kernel_preferred_work_group_size_multiple builtin functions. 1450 static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) { 1451 if (checkArgCount(S, TheCall, 1)) 1452 return true; 1453 1454 Expr *BlockArg = TheCall->getArg(0); 1455 if (!isBlockPointer(BlockArg)) { 1456 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1457 << TheCall->getDirectCallee() << "block"; 1458 return true; 1459 } 1460 return checkOpenCLBlockArgs(S, BlockArg); 1461 } 1462 1463 /// Diagnose integer type and any valid implicit conversion to it. 1464 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, 1465 const QualType &IntType); 1466 1467 static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall, 1468 unsigned Start, unsigned End) { 1469 bool IllegalParams = false; 1470 for (unsigned I = Start; I <= End; ++I) 1471 IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I), 1472 S.Context.getSizeType()); 1473 return IllegalParams; 1474 } 1475 1476 /// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all 1477 /// 'local void*' parameter of passed block. 1478 static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall, 1479 Expr *BlockArg, 1480 unsigned NumNonVarArgs) { 1481 const BlockPointerType *BPT = 1482 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 1483 unsigned NumBlockParams = 1484 BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams(); 1485 unsigned TotalNumArgs = TheCall->getNumArgs(); 1486 1487 // For each argument passed to the block, a corresponding uint needs to 1488 // be passed to describe the size of the local memory. 1489 if (TotalNumArgs != NumBlockParams + NumNonVarArgs) { 1490 S.Diag(TheCall->getBeginLoc(), 1491 diag::err_opencl_enqueue_kernel_local_size_args); 1492 return true; 1493 } 1494 1495 // Check that the sizes of the local memory are specified by integers. 1496 return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs, 1497 TotalNumArgs - 1); 1498 } 1499 1500 /// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different 1501 /// overload formats specified in Table 6.13.17.1. 1502 /// int enqueue_kernel(queue_t queue, 1503 /// kernel_enqueue_flags_t flags, 1504 /// const ndrange_t ndrange, 1505 /// void (^block)(void)) 1506 /// int enqueue_kernel(queue_t queue, 1507 /// kernel_enqueue_flags_t flags, 1508 /// const ndrange_t ndrange, 1509 /// uint num_events_in_wait_list, 1510 /// clk_event_t *event_wait_list, 1511 /// clk_event_t *event_ret, 1512 /// void (^block)(void)) 1513 /// int enqueue_kernel(queue_t queue, 1514 /// kernel_enqueue_flags_t flags, 1515 /// const ndrange_t ndrange, 1516 /// void (^block)(local void*, ...), 1517 /// uint size0, ...) 1518 /// int enqueue_kernel(queue_t queue, 1519 /// kernel_enqueue_flags_t flags, 1520 /// const ndrange_t ndrange, 1521 /// uint num_events_in_wait_list, 1522 /// clk_event_t *event_wait_list, 1523 /// clk_event_t *event_ret, 1524 /// void (^block)(local void*, ...), 1525 /// uint size0, ...) 1526 static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) { 1527 unsigned NumArgs = TheCall->getNumArgs(); 1528 1529 if (NumArgs < 4) { 1530 S.Diag(TheCall->getBeginLoc(), 1531 diag::err_typecheck_call_too_few_args_at_least) 1532 << 0 << 4 << NumArgs; 1533 return true; 1534 } 1535 1536 Expr *Arg0 = TheCall->getArg(0); 1537 Expr *Arg1 = TheCall->getArg(1); 1538 Expr *Arg2 = TheCall->getArg(2); 1539 Expr *Arg3 = TheCall->getArg(3); 1540 1541 // First argument always needs to be a queue_t type. 1542 if (!Arg0->getType()->isQueueT()) { 1543 S.Diag(TheCall->getArg(0)->getBeginLoc(), 1544 diag::err_opencl_builtin_expected_type) 1545 << TheCall->getDirectCallee() << S.Context.OCLQueueTy; 1546 return true; 1547 } 1548 1549 // Second argument always needs to be a kernel_enqueue_flags_t enum value. 1550 if (!Arg1->getType()->isIntegerType()) { 1551 S.Diag(TheCall->getArg(1)->getBeginLoc(), 1552 diag::err_opencl_builtin_expected_type) 1553 << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)"; 1554 return true; 1555 } 1556 1557 // Third argument is always an ndrange_t type. 1558 if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 1559 S.Diag(TheCall->getArg(2)->getBeginLoc(), 1560 diag::err_opencl_builtin_expected_type) 1561 << TheCall->getDirectCallee() << "'ndrange_t'"; 1562 return true; 1563 } 1564 1565 // With four arguments, there is only one form that the function could be 1566 // called in: no events and no variable arguments. 1567 if (NumArgs == 4) { 1568 // check that the last argument is the right block type. 1569 if (!isBlockPointer(Arg3)) { 1570 S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1571 << TheCall->getDirectCallee() << "block"; 1572 return true; 1573 } 1574 // we have a block type, check the prototype 1575 const BlockPointerType *BPT = 1576 cast<BlockPointerType>(Arg3->getType().getCanonicalType()); 1577 if (BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams() > 0) { 1578 S.Diag(Arg3->getBeginLoc(), 1579 diag::err_opencl_enqueue_kernel_blocks_no_args); 1580 return true; 1581 } 1582 return false; 1583 } 1584 // we can have block + varargs. 1585 if (isBlockPointer(Arg3)) 1586 return (checkOpenCLBlockArgs(S, Arg3) || 1587 checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4)); 1588 // last two cases with either exactly 7 args or 7 args and varargs. 1589 if (NumArgs >= 7) { 1590 // check common block argument. 1591 Expr *Arg6 = TheCall->getArg(6); 1592 if (!isBlockPointer(Arg6)) { 1593 S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1594 << TheCall->getDirectCallee() << "block"; 1595 return true; 1596 } 1597 if (checkOpenCLBlockArgs(S, Arg6)) 1598 return true; 1599 1600 // Forth argument has to be any integer type. 1601 if (!Arg3->getType()->isIntegerType()) { 1602 S.Diag(TheCall->getArg(3)->getBeginLoc(), 1603 diag::err_opencl_builtin_expected_type) 1604 << TheCall->getDirectCallee() << "integer"; 1605 return true; 1606 } 1607 // check remaining common arguments. 1608 Expr *Arg4 = TheCall->getArg(4); 1609 Expr *Arg5 = TheCall->getArg(5); 1610 1611 // Fifth argument is always passed as a pointer to clk_event_t. 1612 if (!Arg4->isNullPointerConstant(S.Context, 1613 Expr::NPC_ValueDependentIsNotNull) && 1614 !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) { 1615 S.Diag(TheCall->getArg(4)->getBeginLoc(), 1616 diag::err_opencl_builtin_expected_type) 1617 << TheCall->getDirectCallee() 1618 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1619 return true; 1620 } 1621 1622 // Sixth argument is always passed as a pointer to clk_event_t. 1623 if (!Arg5->isNullPointerConstant(S.Context, 1624 Expr::NPC_ValueDependentIsNotNull) && 1625 !(Arg5->getType()->isPointerType() && 1626 Arg5->getType()->getPointeeType()->isClkEventT())) { 1627 S.Diag(TheCall->getArg(5)->getBeginLoc(), 1628 diag::err_opencl_builtin_expected_type) 1629 << TheCall->getDirectCallee() 1630 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1631 return true; 1632 } 1633 1634 if (NumArgs == 7) 1635 return false; 1636 1637 return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7); 1638 } 1639 1640 // None of the specific case has been detected, give generic error 1641 S.Diag(TheCall->getBeginLoc(), 1642 diag::err_opencl_enqueue_kernel_incorrect_args); 1643 return true; 1644 } 1645 1646 /// Returns OpenCL access qual. 1647 static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) { 1648 return D->getAttr<OpenCLAccessAttr>(); 1649 } 1650 1651 /// Returns true if pipe element type is different from the pointer. 1652 static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) { 1653 const Expr *Arg0 = Call->getArg(0); 1654 // First argument type should always be pipe. 1655 if (!Arg0->getType()->isPipeType()) { 1656 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1657 << Call->getDirectCallee() << Arg0->getSourceRange(); 1658 return true; 1659 } 1660 OpenCLAccessAttr *AccessQual = 1661 getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl()); 1662 // Validates the access qualifier is compatible with the call. 1663 // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be 1664 // read_only and write_only, and assumed to be read_only if no qualifier is 1665 // specified. 1666 switch (Call->getDirectCallee()->getBuiltinID()) { 1667 case Builtin::BIread_pipe: 1668 case Builtin::BIreserve_read_pipe: 1669 case Builtin::BIcommit_read_pipe: 1670 case Builtin::BIwork_group_reserve_read_pipe: 1671 case Builtin::BIsub_group_reserve_read_pipe: 1672 case Builtin::BIwork_group_commit_read_pipe: 1673 case Builtin::BIsub_group_commit_read_pipe: 1674 if (!(!AccessQual || AccessQual->isReadOnly())) { 1675 S.Diag(Arg0->getBeginLoc(), 1676 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1677 << "read_only" << Arg0->getSourceRange(); 1678 return true; 1679 } 1680 break; 1681 case Builtin::BIwrite_pipe: 1682 case Builtin::BIreserve_write_pipe: 1683 case Builtin::BIcommit_write_pipe: 1684 case Builtin::BIwork_group_reserve_write_pipe: 1685 case Builtin::BIsub_group_reserve_write_pipe: 1686 case Builtin::BIwork_group_commit_write_pipe: 1687 case Builtin::BIsub_group_commit_write_pipe: 1688 if (!(AccessQual && AccessQual->isWriteOnly())) { 1689 S.Diag(Arg0->getBeginLoc(), 1690 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1691 << "write_only" << Arg0->getSourceRange(); 1692 return true; 1693 } 1694 break; 1695 default: 1696 break; 1697 } 1698 return false; 1699 } 1700 1701 /// Returns true if pipe element type is different from the pointer. 1702 static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) { 1703 const Expr *Arg0 = Call->getArg(0); 1704 const Expr *ArgIdx = Call->getArg(Idx); 1705 const PipeType *PipeTy = cast<PipeType>(Arg0->getType()); 1706 const QualType EltTy = PipeTy->getElementType(); 1707 const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>(); 1708 // The Idx argument should be a pointer and the type of the pointer and 1709 // the type of pipe element should also be the same. 1710 if (!ArgTy || 1711 !S.Context.hasSameType( 1712 EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) { 1713 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1714 << Call->getDirectCallee() << S.Context.getPointerType(EltTy) 1715 << ArgIdx->getType() << ArgIdx->getSourceRange(); 1716 return true; 1717 } 1718 return false; 1719 } 1720 1721 // Performs semantic analysis for the read/write_pipe call. 1722 // \param S Reference to the semantic analyzer. 1723 // \param Call A pointer to the builtin call. 1724 // \return True if a semantic error has been found, false otherwise. 1725 static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) { 1726 // OpenCL v2.0 s6.13.16.2 - The built-in read/write 1727 // functions have two forms. 1728 switch (Call->getNumArgs()) { 1729 case 2: 1730 if (checkOpenCLPipeArg(S, Call)) 1731 return true; 1732 // The call with 2 arguments should be 1733 // read/write_pipe(pipe T, T*). 1734 // Check packet type T. 1735 if (checkOpenCLPipePacketType(S, Call, 1)) 1736 return true; 1737 break; 1738 1739 case 4: { 1740 if (checkOpenCLPipeArg(S, Call)) 1741 return true; 1742 // The call with 4 arguments should be 1743 // read/write_pipe(pipe T, reserve_id_t, uint, T*). 1744 // Check reserve_id_t. 1745 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1746 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1747 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1748 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1749 return true; 1750 } 1751 1752 // Check the index. 1753 const Expr *Arg2 = Call->getArg(2); 1754 if (!Arg2->getType()->isIntegerType() && 1755 !Arg2->getType()->isUnsignedIntegerType()) { 1756 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1757 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1758 << Arg2->getType() << Arg2->getSourceRange(); 1759 return true; 1760 } 1761 1762 // Check packet type T. 1763 if (checkOpenCLPipePacketType(S, Call, 3)) 1764 return true; 1765 } break; 1766 default: 1767 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num) 1768 << Call->getDirectCallee() << Call->getSourceRange(); 1769 return true; 1770 } 1771 1772 return false; 1773 } 1774 1775 // Performs a semantic analysis on the {work_group_/sub_group_ 1776 // /_}reserve_{read/write}_pipe 1777 // \param S Reference to the semantic analyzer. 1778 // \param Call The call to the builtin function to be analyzed. 1779 // \return True if a semantic error was found, false otherwise. 1780 static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) { 1781 if (checkArgCount(S, Call, 2)) 1782 return true; 1783 1784 if (checkOpenCLPipeArg(S, Call)) 1785 return true; 1786 1787 // Check the reserve size. 1788 if (!Call->getArg(1)->getType()->isIntegerType() && 1789 !Call->getArg(1)->getType()->isUnsignedIntegerType()) { 1790 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1791 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1792 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1793 return true; 1794 } 1795 1796 // Since return type of reserve_read/write_pipe built-in function is 1797 // reserve_id_t, which is not defined in the builtin def file , we used int 1798 // as return type and need to override the return type of these functions. 1799 Call->setType(S.Context.OCLReserveIDTy); 1800 1801 return false; 1802 } 1803 1804 // Performs a semantic analysis on {work_group_/sub_group_ 1805 // /_}commit_{read/write}_pipe 1806 // \param S Reference to the semantic analyzer. 1807 // \param Call The call to the builtin function to be analyzed. 1808 // \return True if a semantic error was found, false otherwise. 1809 static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) { 1810 if (checkArgCount(S, Call, 2)) 1811 return true; 1812 1813 if (checkOpenCLPipeArg(S, Call)) 1814 return true; 1815 1816 // Check reserve_id_t. 1817 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1818 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1819 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1820 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1821 return true; 1822 } 1823 1824 return false; 1825 } 1826 1827 // Performs a semantic analysis on the call to built-in Pipe 1828 // Query Functions. 1829 // \param S Reference to the semantic analyzer. 1830 // \param Call The call to the builtin function to be analyzed. 1831 // \return True if a semantic error was found, false otherwise. 1832 static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) { 1833 if (checkArgCount(S, Call, 1)) 1834 return true; 1835 1836 if (!Call->getArg(0)->getType()->isPipeType()) { 1837 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1838 << Call->getDirectCallee() << Call->getArg(0)->getSourceRange(); 1839 return true; 1840 } 1841 1842 return false; 1843 } 1844 1845 // OpenCL v2.0 s6.13.9 - Address space qualifier functions. 1846 // Performs semantic analysis for the to_global/local/private call. 1847 // \param S Reference to the semantic analyzer. 1848 // \param BuiltinID ID of the builtin function. 1849 // \param Call A pointer to the builtin call. 1850 // \return True if a semantic error has been found, false otherwise. 1851 static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID, 1852 CallExpr *Call) { 1853 if (checkArgCount(S, Call, 1)) 1854 return true; 1855 1856 auto RT = Call->getArg(0)->getType(); 1857 if (!RT->isPointerType() || RT->getPointeeType() 1858 .getAddressSpace() == LangAS::opencl_constant) { 1859 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg) 1860 << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange(); 1861 return true; 1862 } 1863 1864 if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) { 1865 S.Diag(Call->getArg(0)->getBeginLoc(), 1866 diag::warn_opencl_generic_address_space_arg) 1867 << Call->getDirectCallee()->getNameInfo().getAsString() 1868 << Call->getArg(0)->getSourceRange(); 1869 } 1870 1871 RT = RT->getPointeeType(); 1872 auto Qual = RT.getQualifiers(); 1873 switch (BuiltinID) { 1874 case Builtin::BIto_global: 1875 Qual.setAddressSpace(LangAS::opencl_global); 1876 break; 1877 case Builtin::BIto_local: 1878 Qual.setAddressSpace(LangAS::opencl_local); 1879 break; 1880 case Builtin::BIto_private: 1881 Qual.setAddressSpace(LangAS::opencl_private); 1882 break; 1883 default: 1884 llvm_unreachable("Invalid builtin function"); 1885 } 1886 Call->setType(S.Context.getPointerType(S.Context.getQualifiedType( 1887 RT.getUnqualifiedType(), Qual))); 1888 1889 return false; 1890 } 1891 1892 static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) { 1893 if (checkArgCount(S, TheCall, 1)) 1894 return ExprError(); 1895 1896 // Compute __builtin_launder's parameter type from the argument. 1897 // The parameter type is: 1898 // * The type of the argument if it's not an array or function type, 1899 // Otherwise, 1900 // * The decayed argument type. 1901 QualType ParamTy = [&]() { 1902 QualType ArgTy = TheCall->getArg(0)->getType(); 1903 if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe()) 1904 return S.Context.getPointerType(Ty->getElementType()); 1905 if (ArgTy->isFunctionType()) { 1906 return S.Context.getPointerType(ArgTy); 1907 } 1908 return ArgTy; 1909 }(); 1910 1911 TheCall->setType(ParamTy); 1912 1913 auto DiagSelect = [&]() -> std::optional<unsigned> { 1914 if (!ParamTy->isPointerType()) 1915 return 0; 1916 if (ParamTy->isFunctionPointerType()) 1917 return 1; 1918 if (ParamTy->isVoidPointerType()) 1919 return 2; 1920 return std::optional<unsigned>{}; 1921 }(); 1922 if (DiagSelect) { 1923 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg) 1924 << *DiagSelect << TheCall->getSourceRange(); 1925 return ExprError(); 1926 } 1927 1928 // We either have an incomplete class type, or we have a class template 1929 // whose instantiation has not been forced. Example: 1930 // 1931 // template <class T> struct Foo { T value; }; 1932 // Foo<int> *p = nullptr; 1933 // auto *d = __builtin_launder(p); 1934 if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(), 1935 diag::err_incomplete_type)) 1936 return ExprError(); 1937 1938 assert(ParamTy->getPointeeType()->isObjectType() && 1939 "Unhandled non-object pointer case"); 1940 1941 InitializedEntity Entity = 1942 InitializedEntity::InitializeParameter(S.Context, ParamTy, false); 1943 ExprResult Arg = 1944 S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0)); 1945 if (Arg.isInvalid()) 1946 return ExprError(); 1947 TheCall->setArg(0, Arg.get()); 1948 1949 return TheCall; 1950 } 1951 1952 // Emit an error and return true if the current object format type is in the 1953 // list of unsupported types. 1954 static bool CheckBuiltinTargetNotInUnsupported( 1955 Sema &S, unsigned BuiltinID, CallExpr *TheCall, 1956 ArrayRef<llvm::Triple::ObjectFormatType> UnsupportedObjectFormatTypes) { 1957 llvm::Triple::ObjectFormatType CurObjFormat = 1958 S.getASTContext().getTargetInfo().getTriple().getObjectFormat(); 1959 if (llvm::is_contained(UnsupportedObjectFormatTypes, CurObjFormat)) { 1960 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 1961 << TheCall->getSourceRange(); 1962 return true; 1963 } 1964 return false; 1965 } 1966 1967 // Emit an error and return true if the current architecture is not in the list 1968 // of supported architectures. 1969 static bool 1970 CheckBuiltinTargetInSupported(Sema &S, unsigned BuiltinID, CallExpr *TheCall, 1971 ArrayRef<llvm::Triple::ArchType> SupportedArchs) { 1972 llvm::Triple::ArchType CurArch = 1973 S.getASTContext().getTargetInfo().getTriple().getArch(); 1974 if (llvm::is_contained(SupportedArchs, CurArch)) 1975 return false; 1976 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 1977 << TheCall->getSourceRange(); 1978 return true; 1979 } 1980 1981 static void CheckNonNullArgument(Sema &S, const Expr *ArgExpr, 1982 SourceLocation CallSiteLoc); 1983 1984 bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 1985 CallExpr *TheCall) { 1986 switch (TI.getTriple().getArch()) { 1987 default: 1988 // Some builtins don't require additional checking, so just consider these 1989 // acceptable. 1990 return false; 1991 case llvm::Triple::arm: 1992 case llvm::Triple::armeb: 1993 case llvm::Triple::thumb: 1994 case llvm::Triple::thumbeb: 1995 return CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall); 1996 case llvm::Triple::aarch64: 1997 case llvm::Triple::aarch64_32: 1998 case llvm::Triple::aarch64_be: 1999 return CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall); 2000 case llvm::Triple::bpfeb: 2001 case llvm::Triple::bpfel: 2002 return CheckBPFBuiltinFunctionCall(BuiltinID, TheCall); 2003 case llvm::Triple::hexagon: 2004 return CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall); 2005 case llvm::Triple::mips: 2006 case llvm::Triple::mipsel: 2007 case llvm::Triple::mips64: 2008 case llvm::Triple::mips64el: 2009 return CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall); 2010 case llvm::Triple::systemz: 2011 return CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall); 2012 case llvm::Triple::x86: 2013 case llvm::Triple::x86_64: 2014 return CheckX86BuiltinFunctionCall(TI, BuiltinID, TheCall); 2015 case llvm::Triple::ppc: 2016 case llvm::Triple::ppcle: 2017 case llvm::Triple::ppc64: 2018 case llvm::Triple::ppc64le: 2019 return CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall); 2020 case llvm::Triple::amdgcn: 2021 return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall); 2022 case llvm::Triple::riscv32: 2023 case llvm::Triple::riscv64: 2024 return CheckRISCVBuiltinFunctionCall(TI, BuiltinID, TheCall); 2025 case llvm::Triple::loongarch32: 2026 case llvm::Triple::loongarch64: 2027 return CheckLoongArchBuiltinFunctionCall(TI, BuiltinID, TheCall); 2028 } 2029 } 2030 2031 // Check if \p Ty is a valid type for the elementwise math builtins. If it is 2032 // not a valid type, emit an error message and return true. Otherwise return 2033 // false. 2034 static bool checkMathBuiltinElementType(Sema &S, SourceLocation Loc, 2035 QualType Ty) { 2036 if (!Ty->getAs<VectorType>() && !ConstantMatrixType::isValidElementType(Ty)) { 2037 return S.Diag(Loc, diag::err_builtin_invalid_arg_type) 2038 << 1 << /* vector, integer or float ty*/ 0 << Ty; 2039 } 2040 2041 return false; 2042 } 2043 2044 static bool checkFPMathBuiltinElementType(Sema &S, SourceLocation Loc, 2045 QualType ArgTy, int ArgIndex) { 2046 QualType EltTy = ArgTy; 2047 if (auto *VecTy = EltTy->getAs<VectorType>()) 2048 EltTy = VecTy->getElementType(); 2049 2050 if (!EltTy->isRealFloatingType()) { 2051 return S.Diag(Loc, diag::err_builtin_invalid_arg_type) 2052 << ArgIndex << /* vector or float ty*/ 5 << ArgTy; 2053 } 2054 2055 return false; 2056 } 2057 2058 ExprResult 2059 Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, 2060 CallExpr *TheCall) { 2061 ExprResult TheCallResult(TheCall); 2062 2063 // Find out if any arguments are required to be integer constant expressions. 2064 unsigned ICEArguments = 0; 2065 ASTContext::GetBuiltinTypeError Error; 2066 Context.GetBuiltinType(BuiltinID, Error, &ICEArguments); 2067 if (Error != ASTContext::GE_None) 2068 ICEArguments = 0; // Don't diagnose previously diagnosed errors. 2069 2070 // If any arguments are required to be ICE's, check and diagnose. 2071 for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) { 2072 // Skip arguments not required to be ICE's. 2073 if ((ICEArguments & (1 << ArgNo)) == 0) continue; 2074 2075 llvm::APSInt Result; 2076 // If we don't have enough arguments, continue so we can issue better 2077 // diagnostic in checkArgCount(...) 2078 if (ArgNo < TheCall->getNumArgs() && 2079 SemaBuiltinConstantArg(TheCall, ArgNo, Result)) 2080 return true; 2081 ICEArguments &= ~(1 << ArgNo); 2082 } 2083 2084 switch (BuiltinID) { 2085 case Builtin::BI__builtin___CFStringMakeConstantString: 2086 // CFStringMakeConstantString is currently not implemented for GOFF (i.e., 2087 // on z/OS) and for XCOFF (i.e., on AIX). Emit unsupported 2088 if (CheckBuiltinTargetNotInUnsupported( 2089 *this, BuiltinID, TheCall, 2090 {llvm::Triple::GOFF, llvm::Triple::XCOFF})) 2091 return ExprError(); 2092 assert(TheCall->getNumArgs() == 1 && 2093 "Wrong # arguments to builtin CFStringMakeConstantString"); 2094 if (CheckObjCString(TheCall->getArg(0))) 2095 return ExprError(); 2096 break; 2097 case Builtin::BI__builtin_ms_va_start: 2098 case Builtin::BI__builtin_stdarg_start: 2099 case Builtin::BI__builtin_va_start: 2100 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 2101 return ExprError(); 2102 break; 2103 case Builtin::BI__va_start: { 2104 switch (Context.getTargetInfo().getTriple().getArch()) { 2105 case llvm::Triple::aarch64: 2106 case llvm::Triple::arm: 2107 case llvm::Triple::thumb: 2108 if (SemaBuiltinVAStartARMMicrosoft(TheCall)) 2109 return ExprError(); 2110 break; 2111 default: 2112 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 2113 return ExprError(); 2114 break; 2115 } 2116 break; 2117 } 2118 2119 // The acquire, release, and no fence variants are ARM and AArch64 only. 2120 case Builtin::BI_interlockedbittestandset_acq: 2121 case Builtin::BI_interlockedbittestandset_rel: 2122 case Builtin::BI_interlockedbittestandset_nf: 2123 case Builtin::BI_interlockedbittestandreset_acq: 2124 case Builtin::BI_interlockedbittestandreset_rel: 2125 case Builtin::BI_interlockedbittestandreset_nf: 2126 if (CheckBuiltinTargetInSupported( 2127 *this, BuiltinID, TheCall, 2128 {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64})) 2129 return ExprError(); 2130 break; 2131 2132 // The 64-bit bittest variants are x64, ARM, and AArch64 only. 2133 case Builtin::BI_bittest64: 2134 case Builtin::BI_bittestandcomplement64: 2135 case Builtin::BI_bittestandreset64: 2136 case Builtin::BI_bittestandset64: 2137 case Builtin::BI_interlockedbittestandreset64: 2138 case Builtin::BI_interlockedbittestandset64: 2139 if (CheckBuiltinTargetInSupported(*this, BuiltinID, TheCall, 2140 {llvm::Triple::x86_64, llvm::Triple::arm, 2141 llvm::Triple::thumb, 2142 llvm::Triple::aarch64})) 2143 return ExprError(); 2144 break; 2145 2146 case Builtin::BI__builtin_isgreater: 2147 case Builtin::BI__builtin_isgreaterequal: 2148 case Builtin::BI__builtin_isless: 2149 case Builtin::BI__builtin_islessequal: 2150 case Builtin::BI__builtin_islessgreater: 2151 case Builtin::BI__builtin_isunordered: 2152 if (SemaBuiltinUnorderedCompare(TheCall)) 2153 return ExprError(); 2154 break; 2155 case Builtin::BI__builtin_fpclassify: 2156 if (SemaBuiltinFPClassification(TheCall, 6)) 2157 return ExprError(); 2158 break; 2159 case Builtin::BI__builtin_isfinite: 2160 case Builtin::BI__builtin_isinf: 2161 case Builtin::BI__builtin_isinf_sign: 2162 case Builtin::BI__builtin_isnan: 2163 case Builtin::BI__builtin_isnormal: 2164 case Builtin::BI__builtin_signbit: 2165 case Builtin::BI__builtin_signbitf: 2166 case Builtin::BI__builtin_signbitl: 2167 if (SemaBuiltinFPClassification(TheCall, 1)) 2168 return ExprError(); 2169 break; 2170 case Builtin::BI__builtin_shufflevector: 2171 return SemaBuiltinShuffleVector(TheCall); 2172 // TheCall will be freed by the smart pointer here, but that's fine, since 2173 // SemaBuiltinShuffleVector guts it, but then doesn't release it. 2174 case Builtin::BI__builtin_prefetch: 2175 if (SemaBuiltinPrefetch(TheCall)) 2176 return ExprError(); 2177 break; 2178 case Builtin::BI__builtin_alloca_with_align: 2179 case Builtin::BI__builtin_alloca_with_align_uninitialized: 2180 if (SemaBuiltinAllocaWithAlign(TheCall)) 2181 return ExprError(); 2182 [[fallthrough]]; 2183 case Builtin::BI__builtin_alloca: 2184 case Builtin::BI__builtin_alloca_uninitialized: 2185 Diag(TheCall->getBeginLoc(), diag::warn_alloca) 2186 << TheCall->getDirectCallee(); 2187 break; 2188 case Builtin::BI__arithmetic_fence: 2189 if (SemaBuiltinArithmeticFence(TheCall)) 2190 return ExprError(); 2191 break; 2192 case Builtin::BI__assume: 2193 case Builtin::BI__builtin_assume: 2194 if (SemaBuiltinAssume(TheCall)) 2195 return ExprError(); 2196 break; 2197 case Builtin::BI__builtin_assume_aligned: 2198 if (SemaBuiltinAssumeAligned(TheCall)) 2199 return ExprError(); 2200 break; 2201 case Builtin::BI__builtin_dynamic_object_size: 2202 case Builtin::BI__builtin_object_size: 2203 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3)) 2204 return ExprError(); 2205 break; 2206 case Builtin::BI__builtin_longjmp: 2207 if (SemaBuiltinLongjmp(TheCall)) 2208 return ExprError(); 2209 break; 2210 case Builtin::BI__builtin_setjmp: 2211 if (SemaBuiltinSetjmp(TheCall)) 2212 return ExprError(); 2213 break; 2214 case Builtin::BI__builtin_classify_type: 2215 if (checkArgCount(*this, TheCall, 1)) return true; 2216 TheCall->setType(Context.IntTy); 2217 break; 2218 case Builtin::BI__builtin_complex: 2219 if (SemaBuiltinComplex(TheCall)) 2220 return ExprError(); 2221 break; 2222 case Builtin::BI__builtin_constant_p: { 2223 if (checkArgCount(*this, TheCall, 1)) return true; 2224 ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 2225 if (Arg.isInvalid()) return true; 2226 TheCall->setArg(0, Arg.get()); 2227 TheCall->setType(Context.IntTy); 2228 break; 2229 } 2230 case Builtin::BI__builtin_launder: 2231 return SemaBuiltinLaunder(*this, TheCall); 2232 case Builtin::BI__sync_fetch_and_add: 2233 case Builtin::BI__sync_fetch_and_add_1: 2234 case Builtin::BI__sync_fetch_and_add_2: 2235 case Builtin::BI__sync_fetch_and_add_4: 2236 case Builtin::BI__sync_fetch_and_add_8: 2237 case Builtin::BI__sync_fetch_and_add_16: 2238 case Builtin::BI__sync_fetch_and_sub: 2239 case Builtin::BI__sync_fetch_and_sub_1: 2240 case Builtin::BI__sync_fetch_and_sub_2: 2241 case Builtin::BI__sync_fetch_and_sub_4: 2242 case Builtin::BI__sync_fetch_and_sub_8: 2243 case Builtin::BI__sync_fetch_and_sub_16: 2244 case Builtin::BI__sync_fetch_and_or: 2245 case Builtin::BI__sync_fetch_and_or_1: 2246 case Builtin::BI__sync_fetch_and_or_2: 2247 case Builtin::BI__sync_fetch_and_or_4: 2248 case Builtin::BI__sync_fetch_and_or_8: 2249 case Builtin::BI__sync_fetch_and_or_16: 2250 case Builtin::BI__sync_fetch_and_and: 2251 case Builtin::BI__sync_fetch_and_and_1: 2252 case Builtin::BI__sync_fetch_and_and_2: 2253 case Builtin::BI__sync_fetch_and_and_4: 2254 case Builtin::BI__sync_fetch_and_and_8: 2255 case Builtin::BI__sync_fetch_and_and_16: 2256 case Builtin::BI__sync_fetch_and_xor: 2257 case Builtin::BI__sync_fetch_and_xor_1: 2258 case Builtin::BI__sync_fetch_and_xor_2: 2259 case Builtin::BI__sync_fetch_and_xor_4: 2260 case Builtin::BI__sync_fetch_and_xor_8: 2261 case Builtin::BI__sync_fetch_and_xor_16: 2262 case Builtin::BI__sync_fetch_and_nand: 2263 case Builtin::BI__sync_fetch_and_nand_1: 2264 case Builtin::BI__sync_fetch_and_nand_2: 2265 case Builtin::BI__sync_fetch_and_nand_4: 2266 case Builtin::BI__sync_fetch_and_nand_8: 2267 case Builtin::BI__sync_fetch_and_nand_16: 2268 case Builtin::BI__sync_add_and_fetch: 2269 case Builtin::BI__sync_add_and_fetch_1: 2270 case Builtin::BI__sync_add_and_fetch_2: 2271 case Builtin::BI__sync_add_and_fetch_4: 2272 case Builtin::BI__sync_add_and_fetch_8: 2273 case Builtin::BI__sync_add_and_fetch_16: 2274 case Builtin::BI__sync_sub_and_fetch: 2275 case Builtin::BI__sync_sub_and_fetch_1: 2276 case Builtin::BI__sync_sub_and_fetch_2: 2277 case Builtin::BI__sync_sub_and_fetch_4: 2278 case Builtin::BI__sync_sub_and_fetch_8: 2279 case Builtin::BI__sync_sub_and_fetch_16: 2280 case Builtin::BI__sync_and_and_fetch: 2281 case Builtin::BI__sync_and_and_fetch_1: 2282 case Builtin::BI__sync_and_and_fetch_2: 2283 case Builtin::BI__sync_and_and_fetch_4: 2284 case Builtin::BI__sync_and_and_fetch_8: 2285 case Builtin::BI__sync_and_and_fetch_16: 2286 case Builtin::BI__sync_or_and_fetch: 2287 case Builtin::BI__sync_or_and_fetch_1: 2288 case Builtin::BI__sync_or_and_fetch_2: 2289 case Builtin::BI__sync_or_and_fetch_4: 2290 case Builtin::BI__sync_or_and_fetch_8: 2291 case Builtin::BI__sync_or_and_fetch_16: 2292 case Builtin::BI__sync_xor_and_fetch: 2293 case Builtin::BI__sync_xor_and_fetch_1: 2294 case Builtin::BI__sync_xor_and_fetch_2: 2295 case Builtin::BI__sync_xor_and_fetch_4: 2296 case Builtin::BI__sync_xor_and_fetch_8: 2297 case Builtin::BI__sync_xor_and_fetch_16: 2298 case Builtin::BI__sync_nand_and_fetch: 2299 case Builtin::BI__sync_nand_and_fetch_1: 2300 case Builtin::BI__sync_nand_and_fetch_2: 2301 case Builtin::BI__sync_nand_and_fetch_4: 2302 case Builtin::BI__sync_nand_and_fetch_8: 2303 case Builtin::BI__sync_nand_and_fetch_16: 2304 case Builtin::BI__sync_val_compare_and_swap: 2305 case Builtin::BI__sync_val_compare_and_swap_1: 2306 case Builtin::BI__sync_val_compare_and_swap_2: 2307 case Builtin::BI__sync_val_compare_and_swap_4: 2308 case Builtin::BI__sync_val_compare_and_swap_8: 2309 case Builtin::BI__sync_val_compare_and_swap_16: 2310 case Builtin::BI__sync_bool_compare_and_swap: 2311 case Builtin::BI__sync_bool_compare_and_swap_1: 2312 case Builtin::BI__sync_bool_compare_and_swap_2: 2313 case Builtin::BI__sync_bool_compare_and_swap_4: 2314 case Builtin::BI__sync_bool_compare_and_swap_8: 2315 case Builtin::BI__sync_bool_compare_and_swap_16: 2316 case Builtin::BI__sync_lock_test_and_set: 2317 case Builtin::BI__sync_lock_test_and_set_1: 2318 case Builtin::BI__sync_lock_test_and_set_2: 2319 case Builtin::BI__sync_lock_test_and_set_4: 2320 case Builtin::BI__sync_lock_test_and_set_8: 2321 case Builtin::BI__sync_lock_test_and_set_16: 2322 case Builtin::BI__sync_lock_release: 2323 case Builtin::BI__sync_lock_release_1: 2324 case Builtin::BI__sync_lock_release_2: 2325 case Builtin::BI__sync_lock_release_4: 2326 case Builtin::BI__sync_lock_release_8: 2327 case Builtin::BI__sync_lock_release_16: 2328 case Builtin::BI__sync_swap: 2329 case Builtin::BI__sync_swap_1: 2330 case Builtin::BI__sync_swap_2: 2331 case Builtin::BI__sync_swap_4: 2332 case Builtin::BI__sync_swap_8: 2333 case Builtin::BI__sync_swap_16: 2334 return SemaBuiltinAtomicOverloaded(TheCallResult); 2335 case Builtin::BI__sync_synchronize: 2336 Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst) 2337 << TheCall->getCallee()->getSourceRange(); 2338 break; 2339 case Builtin::BI__builtin_nontemporal_load: 2340 case Builtin::BI__builtin_nontemporal_store: 2341 return SemaBuiltinNontemporalOverloaded(TheCallResult); 2342 case Builtin::BI__builtin_memcpy_inline: { 2343 clang::Expr *SizeOp = TheCall->getArg(2); 2344 // We warn about copying to or from `nullptr` pointers when `size` is 2345 // greater than 0. When `size` is value dependent we cannot evaluate its 2346 // value so we bail out. 2347 if (SizeOp->isValueDependent()) 2348 break; 2349 if (!SizeOp->EvaluateKnownConstInt(Context).isZero()) { 2350 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc()); 2351 CheckNonNullArgument(*this, TheCall->getArg(1), TheCall->getExprLoc()); 2352 } 2353 break; 2354 } 2355 case Builtin::BI__builtin_memset_inline: { 2356 clang::Expr *SizeOp = TheCall->getArg(2); 2357 // We warn about filling to `nullptr` pointers when `size` is greater than 2358 // 0. When `size` is value dependent we cannot evaluate its value so we bail 2359 // out. 2360 if (SizeOp->isValueDependent()) 2361 break; 2362 if (!SizeOp->EvaluateKnownConstInt(Context).isZero()) 2363 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc()); 2364 break; 2365 } 2366 #define BUILTIN(ID, TYPE, ATTRS) 2367 #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \ 2368 case Builtin::BI##ID: \ 2369 return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID); 2370 #include "clang/Basic/Builtins.def" 2371 case Builtin::BI__annotation: 2372 if (SemaBuiltinMSVCAnnotation(*this, TheCall)) 2373 return ExprError(); 2374 break; 2375 case Builtin::BI__builtin_annotation: 2376 if (SemaBuiltinAnnotation(*this, TheCall)) 2377 return ExprError(); 2378 break; 2379 case Builtin::BI__builtin_addressof: 2380 if (SemaBuiltinAddressof(*this, TheCall)) 2381 return ExprError(); 2382 break; 2383 case Builtin::BI__builtin_function_start: 2384 if (SemaBuiltinFunctionStart(*this, TheCall)) 2385 return ExprError(); 2386 break; 2387 case Builtin::BI__builtin_is_aligned: 2388 case Builtin::BI__builtin_align_up: 2389 case Builtin::BI__builtin_align_down: 2390 if (SemaBuiltinAlignment(*this, TheCall, BuiltinID)) 2391 return ExprError(); 2392 break; 2393 case Builtin::BI__builtin_add_overflow: 2394 case Builtin::BI__builtin_sub_overflow: 2395 case Builtin::BI__builtin_mul_overflow: 2396 if (SemaBuiltinOverflow(*this, TheCall, BuiltinID)) 2397 return ExprError(); 2398 break; 2399 case Builtin::BI__builtin_operator_new: 2400 case Builtin::BI__builtin_operator_delete: { 2401 bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete; 2402 ExprResult Res = 2403 SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete); 2404 if (Res.isInvalid()) 2405 CorrectDelayedTyposInExpr(TheCallResult.get()); 2406 return Res; 2407 } 2408 case Builtin::BI__builtin_dump_struct: 2409 return SemaBuiltinDumpStruct(*this, TheCall); 2410 case Builtin::BI__builtin_expect_with_probability: { 2411 // We first want to ensure we are called with 3 arguments 2412 if (checkArgCount(*this, TheCall, 3)) 2413 return ExprError(); 2414 // then check probability is constant float in range [0.0, 1.0] 2415 const Expr *ProbArg = TheCall->getArg(2); 2416 SmallVector<PartialDiagnosticAt, 8> Notes; 2417 Expr::EvalResult Eval; 2418 Eval.Diag = &Notes; 2419 if ((!ProbArg->EvaluateAsConstantExpr(Eval, Context)) || 2420 !Eval.Val.isFloat()) { 2421 Diag(ProbArg->getBeginLoc(), diag::err_probability_not_constant_float) 2422 << ProbArg->getSourceRange(); 2423 for (const PartialDiagnosticAt &PDiag : Notes) 2424 Diag(PDiag.first, PDiag.second); 2425 return ExprError(); 2426 } 2427 llvm::APFloat Probability = Eval.Val.getFloat(); 2428 bool LoseInfo = false; 2429 Probability.convert(llvm::APFloat::IEEEdouble(), 2430 llvm::RoundingMode::Dynamic, &LoseInfo); 2431 if (!(Probability >= llvm::APFloat(0.0) && 2432 Probability <= llvm::APFloat(1.0))) { 2433 Diag(ProbArg->getBeginLoc(), diag::err_probability_out_of_range) 2434 << ProbArg->getSourceRange(); 2435 return ExprError(); 2436 } 2437 break; 2438 } 2439 case Builtin::BI__builtin_preserve_access_index: 2440 if (SemaBuiltinPreserveAI(*this, TheCall)) 2441 return ExprError(); 2442 break; 2443 case Builtin::BI__builtin_call_with_static_chain: 2444 if (SemaBuiltinCallWithStaticChain(*this, TheCall)) 2445 return ExprError(); 2446 break; 2447 case Builtin::BI__exception_code: 2448 case Builtin::BI_exception_code: 2449 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope, 2450 diag::err_seh___except_block)) 2451 return ExprError(); 2452 break; 2453 case Builtin::BI__exception_info: 2454 case Builtin::BI_exception_info: 2455 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope, 2456 diag::err_seh___except_filter)) 2457 return ExprError(); 2458 break; 2459 case Builtin::BI__GetExceptionInfo: 2460 if (checkArgCount(*this, TheCall, 1)) 2461 return ExprError(); 2462 2463 if (CheckCXXThrowOperand( 2464 TheCall->getBeginLoc(), 2465 Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()), 2466 TheCall)) 2467 return ExprError(); 2468 2469 TheCall->setType(Context.VoidPtrTy); 2470 break; 2471 case Builtin::BIaddressof: 2472 case Builtin::BI__addressof: 2473 case Builtin::BIforward: 2474 case Builtin::BImove: 2475 case Builtin::BImove_if_noexcept: 2476 case Builtin::BIas_const: { 2477 // These are all expected to be of the form 2478 // T &/&&/* f(U &/&&) 2479 // where T and U only differ in qualification. 2480 if (checkArgCount(*this, TheCall, 1)) 2481 return ExprError(); 2482 QualType Param = FDecl->getParamDecl(0)->getType(); 2483 QualType Result = FDecl->getReturnType(); 2484 bool ReturnsPointer = BuiltinID == Builtin::BIaddressof || 2485 BuiltinID == Builtin::BI__addressof; 2486 if (!(Param->isReferenceType() && 2487 (ReturnsPointer ? Result->isAnyPointerType() 2488 : Result->isReferenceType()) && 2489 Context.hasSameUnqualifiedType(Param->getPointeeType(), 2490 Result->getPointeeType()))) { 2491 Diag(TheCall->getBeginLoc(), diag::err_builtin_move_forward_unsupported) 2492 << FDecl; 2493 return ExprError(); 2494 } 2495 break; 2496 } 2497 // OpenCL v2.0, s6.13.16 - Pipe functions 2498 case Builtin::BIread_pipe: 2499 case Builtin::BIwrite_pipe: 2500 // Since those two functions are declared with var args, we need a semantic 2501 // check for the argument. 2502 if (SemaBuiltinRWPipe(*this, TheCall)) 2503 return ExprError(); 2504 break; 2505 case Builtin::BIreserve_read_pipe: 2506 case Builtin::BIreserve_write_pipe: 2507 case Builtin::BIwork_group_reserve_read_pipe: 2508 case Builtin::BIwork_group_reserve_write_pipe: 2509 if (SemaBuiltinReserveRWPipe(*this, TheCall)) 2510 return ExprError(); 2511 break; 2512 case Builtin::BIsub_group_reserve_read_pipe: 2513 case Builtin::BIsub_group_reserve_write_pipe: 2514 if (checkOpenCLSubgroupExt(*this, TheCall) || 2515 SemaBuiltinReserveRWPipe(*this, TheCall)) 2516 return ExprError(); 2517 break; 2518 case Builtin::BIcommit_read_pipe: 2519 case Builtin::BIcommit_write_pipe: 2520 case Builtin::BIwork_group_commit_read_pipe: 2521 case Builtin::BIwork_group_commit_write_pipe: 2522 if (SemaBuiltinCommitRWPipe(*this, TheCall)) 2523 return ExprError(); 2524 break; 2525 case Builtin::BIsub_group_commit_read_pipe: 2526 case Builtin::BIsub_group_commit_write_pipe: 2527 if (checkOpenCLSubgroupExt(*this, TheCall) || 2528 SemaBuiltinCommitRWPipe(*this, TheCall)) 2529 return ExprError(); 2530 break; 2531 case Builtin::BIget_pipe_num_packets: 2532 case Builtin::BIget_pipe_max_packets: 2533 if (SemaBuiltinPipePackets(*this, TheCall)) 2534 return ExprError(); 2535 break; 2536 case Builtin::BIto_global: 2537 case Builtin::BIto_local: 2538 case Builtin::BIto_private: 2539 if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall)) 2540 return ExprError(); 2541 break; 2542 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions. 2543 case Builtin::BIenqueue_kernel: 2544 if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall)) 2545 return ExprError(); 2546 break; 2547 case Builtin::BIget_kernel_work_group_size: 2548 case Builtin::BIget_kernel_preferred_work_group_size_multiple: 2549 if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall)) 2550 return ExprError(); 2551 break; 2552 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange: 2553 case Builtin::BIget_kernel_sub_group_count_for_ndrange: 2554 if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall)) 2555 return ExprError(); 2556 break; 2557 case Builtin::BI__builtin_os_log_format: 2558 Cleanup.setExprNeedsCleanups(true); 2559 [[fallthrough]]; 2560 case Builtin::BI__builtin_os_log_format_buffer_size: 2561 if (SemaBuiltinOSLogFormat(TheCall)) 2562 return ExprError(); 2563 break; 2564 case Builtin::BI__builtin_frame_address: 2565 case Builtin::BI__builtin_return_address: { 2566 if (SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xFFFF)) 2567 return ExprError(); 2568 2569 // -Wframe-address warning if non-zero passed to builtin 2570 // return/frame address. 2571 Expr::EvalResult Result; 2572 if (!TheCall->getArg(0)->isValueDependent() && 2573 TheCall->getArg(0)->EvaluateAsInt(Result, getASTContext()) && 2574 Result.Val.getInt() != 0) 2575 Diag(TheCall->getBeginLoc(), diag::warn_frame_address) 2576 << ((BuiltinID == Builtin::BI__builtin_return_address) 2577 ? "__builtin_return_address" 2578 : "__builtin_frame_address") 2579 << TheCall->getSourceRange(); 2580 break; 2581 } 2582 2583 // __builtin_elementwise_abs restricts the element type to signed integers or 2584 // floating point types only. 2585 case Builtin::BI__builtin_elementwise_abs: { 2586 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall)) 2587 return ExprError(); 2588 2589 QualType ArgTy = TheCall->getArg(0)->getType(); 2590 QualType EltTy = ArgTy; 2591 2592 if (auto *VecTy = EltTy->getAs<VectorType>()) 2593 EltTy = VecTy->getElementType(); 2594 if (EltTy->isUnsignedIntegerType()) { 2595 Diag(TheCall->getArg(0)->getBeginLoc(), 2596 diag::err_builtin_invalid_arg_type) 2597 << 1 << /* signed integer or float ty*/ 3 << ArgTy; 2598 return ExprError(); 2599 } 2600 break; 2601 } 2602 2603 // These builtins restrict the element type to floating point 2604 // types only. 2605 case Builtin::BI__builtin_elementwise_ceil: 2606 case Builtin::BI__builtin_elementwise_cos: 2607 case Builtin::BI__builtin_elementwise_floor: 2608 case Builtin::BI__builtin_elementwise_roundeven: 2609 case Builtin::BI__builtin_elementwise_sin: 2610 case Builtin::BI__builtin_elementwise_trunc: 2611 case Builtin::BI__builtin_elementwise_canonicalize: { 2612 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall)) 2613 return ExprError(); 2614 2615 QualType ArgTy = TheCall->getArg(0)->getType(); 2616 QualType EltTy = ArgTy; 2617 2618 if (auto *VecTy = EltTy->getAs<VectorType>()) 2619 EltTy = VecTy->getElementType(); 2620 if (!EltTy->isFloatingType()) { 2621 Diag(TheCall->getArg(0)->getBeginLoc(), 2622 diag::err_builtin_invalid_arg_type) 2623 << 1 << /* float ty*/ 5 << ArgTy; 2624 2625 return ExprError(); 2626 } 2627 break; 2628 } 2629 2630 // These builtins restrict the element type to integer 2631 // types only. 2632 case Builtin::BI__builtin_elementwise_add_sat: 2633 case Builtin::BI__builtin_elementwise_sub_sat: { 2634 if (SemaBuiltinElementwiseMath(TheCall)) 2635 return ExprError(); 2636 2637 const Expr *Arg = TheCall->getArg(0); 2638 QualType ArgTy = Arg->getType(); 2639 QualType EltTy = ArgTy; 2640 2641 if (auto *VecTy = EltTy->getAs<VectorType>()) 2642 EltTy = VecTy->getElementType(); 2643 2644 if (!EltTy->isIntegerType()) { 2645 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2646 << 1 << /* integer ty */ 6 << ArgTy; 2647 return ExprError(); 2648 } 2649 break; 2650 } 2651 2652 case Builtin::BI__builtin_elementwise_min: 2653 case Builtin::BI__builtin_elementwise_max: 2654 if (SemaBuiltinElementwiseMath(TheCall)) 2655 return ExprError(); 2656 break; 2657 case Builtin::BI__builtin_elementwise_copysign: { 2658 if (checkArgCount(*this, TheCall, 2)) 2659 return ExprError(); 2660 2661 ExprResult Magnitude = UsualUnaryConversions(TheCall->getArg(0)); 2662 ExprResult Sign = UsualUnaryConversions(TheCall->getArg(1)); 2663 if (Magnitude.isInvalid() || Sign.isInvalid()) 2664 return ExprError(); 2665 2666 QualType MagnitudeTy = Magnitude.get()->getType(); 2667 QualType SignTy = Sign.get()->getType(); 2668 if (checkFPMathBuiltinElementType(*this, TheCall->getArg(0)->getBeginLoc(), 2669 MagnitudeTy, 1) || 2670 checkFPMathBuiltinElementType(*this, TheCall->getArg(1)->getBeginLoc(), 2671 SignTy, 2)) { 2672 return ExprError(); 2673 } 2674 2675 if (MagnitudeTy.getCanonicalType() != SignTy.getCanonicalType()) { 2676 return Diag(Sign.get()->getBeginLoc(), 2677 diag::err_typecheck_call_different_arg_types) 2678 << MagnitudeTy << SignTy; 2679 } 2680 2681 TheCall->setArg(0, Magnitude.get()); 2682 TheCall->setArg(1, Sign.get()); 2683 TheCall->setType(Magnitude.get()->getType()); 2684 break; 2685 } 2686 case Builtin::BI__builtin_reduce_max: 2687 case Builtin::BI__builtin_reduce_min: { 2688 if (PrepareBuiltinReduceMathOneArgCall(TheCall)) 2689 return ExprError(); 2690 2691 const Expr *Arg = TheCall->getArg(0); 2692 const auto *TyA = Arg->getType()->getAs<VectorType>(); 2693 if (!TyA) { 2694 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2695 << 1 << /* vector ty*/ 4 << Arg->getType(); 2696 return ExprError(); 2697 } 2698 2699 TheCall->setType(TyA->getElementType()); 2700 break; 2701 } 2702 2703 // These builtins support vectors of integers only. 2704 // TODO: ADD/MUL should support floating-point types. 2705 case Builtin::BI__builtin_reduce_add: 2706 case Builtin::BI__builtin_reduce_mul: 2707 case Builtin::BI__builtin_reduce_xor: 2708 case Builtin::BI__builtin_reduce_or: 2709 case Builtin::BI__builtin_reduce_and: { 2710 if (PrepareBuiltinReduceMathOneArgCall(TheCall)) 2711 return ExprError(); 2712 2713 const Expr *Arg = TheCall->getArg(0); 2714 const auto *TyA = Arg->getType()->getAs<VectorType>(); 2715 if (!TyA || !TyA->getElementType()->isIntegerType()) { 2716 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2717 << 1 << /* vector of integers */ 6 << Arg->getType(); 2718 return ExprError(); 2719 } 2720 TheCall->setType(TyA->getElementType()); 2721 break; 2722 } 2723 2724 case Builtin::BI__builtin_matrix_transpose: 2725 return SemaBuiltinMatrixTranspose(TheCall, TheCallResult); 2726 2727 case Builtin::BI__builtin_matrix_column_major_load: 2728 return SemaBuiltinMatrixColumnMajorLoad(TheCall, TheCallResult); 2729 2730 case Builtin::BI__builtin_matrix_column_major_store: 2731 return SemaBuiltinMatrixColumnMajorStore(TheCall, TheCallResult); 2732 2733 case Builtin::BI__builtin_get_device_side_mangled_name: { 2734 auto Check = [](CallExpr *TheCall) { 2735 if (TheCall->getNumArgs() != 1) 2736 return false; 2737 auto *DRE = dyn_cast<DeclRefExpr>(TheCall->getArg(0)->IgnoreImpCasts()); 2738 if (!DRE) 2739 return false; 2740 auto *D = DRE->getDecl(); 2741 if (!isa<FunctionDecl>(D) && !isa<VarDecl>(D)) 2742 return false; 2743 return D->hasAttr<CUDAGlobalAttr>() || D->hasAttr<CUDADeviceAttr>() || 2744 D->hasAttr<CUDAConstantAttr>() || D->hasAttr<HIPManagedAttr>(); 2745 }; 2746 if (!Check(TheCall)) { 2747 Diag(TheCall->getBeginLoc(), 2748 diag::err_hip_invalid_args_builtin_mangled_name); 2749 return ExprError(); 2750 } 2751 } 2752 } 2753 2754 // Since the target specific builtins for each arch overlap, only check those 2755 // of the arch we are compiling for. 2756 if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) { 2757 if (Context.BuiltinInfo.isAuxBuiltinID(BuiltinID)) { 2758 assert(Context.getAuxTargetInfo() && 2759 "Aux Target Builtin, but not an aux target?"); 2760 2761 if (CheckTSBuiltinFunctionCall( 2762 *Context.getAuxTargetInfo(), 2763 Context.BuiltinInfo.getAuxBuiltinID(BuiltinID), TheCall)) 2764 return ExprError(); 2765 } else { 2766 if (CheckTSBuiltinFunctionCall(Context.getTargetInfo(), BuiltinID, 2767 TheCall)) 2768 return ExprError(); 2769 } 2770 } 2771 2772 return TheCallResult; 2773 } 2774 2775 // Get the valid immediate range for the specified NEON type code. 2776 static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) { 2777 NeonTypeFlags Type(t); 2778 int IsQuad = ForceQuad ? true : Type.isQuad(); 2779 switch (Type.getEltType()) { 2780 case NeonTypeFlags::Int8: 2781 case NeonTypeFlags::Poly8: 2782 return shift ? 7 : (8 << IsQuad) - 1; 2783 case NeonTypeFlags::Int16: 2784 case NeonTypeFlags::Poly16: 2785 return shift ? 15 : (4 << IsQuad) - 1; 2786 case NeonTypeFlags::Int32: 2787 return shift ? 31 : (2 << IsQuad) - 1; 2788 case NeonTypeFlags::Int64: 2789 case NeonTypeFlags::Poly64: 2790 return shift ? 63 : (1 << IsQuad) - 1; 2791 case NeonTypeFlags::Poly128: 2792 return shift ? 127 : (1 << IsQuad) - 1; 2793 case NeonTypeFlags::Float16: 2794 assert(!shift && "cannot shift float types!"); 2795 return (4 << IsQuad) - 1; 2796 case NeonTypeFlags::Float32: 2797 assert(!shift && "cannot shift float types!"); 2798 return (2 << IsQuad) - 1; 2799 case NeonTypeFlags::Float64: 2800 assert(!shift && "cannot shift float types!"); 2801 return (1 << IsQuad) - 1; 2802 case NeonTypeFlags::BFloat16: 2803 assert(!shift && "cannot shift float types!"); 2804 return (4 << IsQuad) - 1; 2805 } 2806 llvm_unreachable("Invalid NeonTypeFlag!"); 2807 } 2808 2809 /// getNeonEltType - Return the QualType corresponding to the elements of 2810 /// the vector type specified by the NeonTypeFlags. This is used to check 2811 /// the pointer arguments for Neon load/store intrinsics. 2812 static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context, 2813 bool IsPolyUnsigned, bool IsInt64Long) { 2814 switch (Flags.getEltType()) { 2815 case NeonTypeFlags::Int8: 2816 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy; 2817 case NeonTypeFlags::Int16: 2818 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy; 2819 case NeonTypeFlags::Int32: 2820 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy; 2821 case NeonTypeFlags::Int64: 2822 if (IsInt64Long) 2823 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy; 2824 else 2825 return Flags.isUnsigned() ? Context.UnsignedLongLongTy 2826 : Context.LongLongTy; 2827 case NeonTypeFlags::Poly8: 2828 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy; 2829 case NeonTypeFlags::Poly16: 2830 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy; 2831 case NeonTypeFlags::Poly64: 2832 if (IsInt64Long) 2833 return Context.UnsignedLongTy; 2834 else 2835 return Context.UnsignedLongLongTy; 2836 case NeonTypeFlags::Poly128: 2837 break; 2838 case NeonTypeFlags::Float16: 2839 return Context.HalfTy; 2840 case NeonTypeFlags::Float32: 2841 return Context.FloatTy; 2842 case NeonTypeFlags::Float64: 2843 return Context.DoubleTy; 2844 case NeonTypeFlags::BFloat16: 2845 return Context.BFloat16Ty; 2846 } 2847 llvm_unreachable("Invalid NeonTypeFlag!"); 2848 } 2849 2850 bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2851 // Range check SVE intrinsics that take immediate values. 2852 SmallVector<std::tuple<int,int,int>, 3> ImmChecks; 2853 2854 switch (BuiltinID) { 2855 default: 2856 return false; 2857 #define GET_SVE_IMMEDIATE_CHECK 2858 #include "clang/Basic/arm_sve_sema_rangechecks.inc" 2859 #undef GET_SVE_IMMEDIATE_CHECK 2860 } 2861 2862 // Perform all the immediate checks for this builtin call. 2863 bool HasError = false; 2864 for (auto &I : ImmChecks) { 2865 int ArgNum, CheckTy, ElementSizeInBits; 2866 std::tie(ArgNum, CheckTy, ElementSizeInBits) = I; 2867 2868 typedef bool(*OptionSetCheckFnTy)(int64_t Value); 2869 2870 // Function that checks whether the operand (ArgNum) is an immediate 2871 // that is one of the predefined values. 2872 auto CheckImmediateInSet = [&](OptionSetCheckFnTy CheckImm, 2873 int ErrDiag) -> bool { 2874 // We can't check the value of a dependent argument. 2875 Expr *Arg = TheCall->getArg(ArgNum); 2876 if (Arg->isTypeDependent() || Arg->isValueDependent()) 2877 return false; 2878 2879 // Check constant-ness first. 2880 llvm::APSInt Imm; 2881 if (SemaBuiltinConstantArg(TheCall, ArgNum, Imm)) 2882 return true; 2883 2884 if (!CheckImm(Imm.getSExtValue())) 2885 return Diag(TheCall->getBeginLoc(), ErrDiag) << Arg->getSourceRange(); 2886 return false; 2887 }; 2888 2889 switch ((SVETypeFlags::ImmCheckType)CheckTy) { 2890 case SVETypeFlags::ImmCheck0_31: 2891 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 31)) 2892 HasError = true; 2893 break; 2894 case SVETypeFlags::ImmCheck0_13: 2895 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 13)) 2896 HasError = true; 2897 break; 2898 case SVETypeFlags::ImmCheck1_16: 2899 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 16)) 2900 HasError = true; 2901 break; 2902 case SVETypeFlags::ImmCheck0_7: 2903 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 7)) 2904 HasError = true; 2905 break; 2906 case SVETypeFlags::ImmCheckExtract: 2907 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2908 (2048 / ElementSizeInBits) - 1)) 2909 HasError = true; 2910 break; 2911 case SVETypeFlags::ImmCheckShiftRight: 2912 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits)) 2913 HasError = true; 2914 break; 2915 case SVETypeFlags::ImmCheckShiftRightNarrow: 2916 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 2917 ElementSizeInBits / 2)) 2918 HasError = true; 2919 break; 2920 case SVETypeFlags::ImmCheckShiftLeft: 2921 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2922 ElementSizeInBits - 1)) 2923 HasError = true; 2924 break; 2925 case SVETypeFlags::ImmCheckLaneIndex: 2926 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2927 (128 / (1 * ElementSizeInBits)) - 1)) 2928 HasError = true; 2929 break; 2930 case SVETypeFlags::ImmCheckLaneIndexCompRotate: 2931 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2932 (128 / (2 * ElementSizeInBits)) - 1)) 2933 HasError = true; 2934 break; 2935 case SVETypeFlags::ImmCheckLaneIndexDot: 2936 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2937 (128 / (4 * ElementSizeInBits)) - 1)) 2938 HasError = true; 2939 break; 2940 case SVETypeFlags::ImmCheckComplexRot90_270: 2941 if (CheckImmediateInSet([](int64_t V) { return V == 90 || V == 270; }, 2942 diag::err_rotation_argument_to_cadd)) 2943 HasError = true; 2944 break; 2945 case SVETypeFlags::ImmCheckComplexRotAll90: 2946 if (CheckImmediateInSet( 2947 [](int64_t V) { 2948 return V == 0 || V == 90 || V == 180 || V == 270; 2949 }, 2950 diag::err_rotation_argument_to_cmla)) 2951 HasError = true; 2952 break; 2953 case SVETypeFlags::ImmCheck0_1: 2954 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 1)) 2955 HasError = true; 2956 break; 2957 case SVETypeFlags::ImmCheck0_2: 2958 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2)) 2959 HasError = true; 2960 break; 2961 case SVETypeFlags::ImmCheck0_3: 2962 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3)) 2963 HasError = true; 2964 break; 2965 } 2966 } 2967 2968 return HasError; 2969 } 2970 2971 bool Sema::CheckNeonBuiltinFunctionCall(const TargetInfo &TI, 2972 unsigned BuiltinID, CallExpr *TheCall) { 2973 llvm::APSInt Result; 2974 uint64_t mask = 0; 2975 unsigned TV = 0; 2976 int PtrArgNum = -1; 2977 bool HasConstPtr = false; 2978 switch (BuiltinID) { 2979 #define GET_NEON_OVERLOAD_CHECK 2980 #include "clang/Basic/arm_neon.inc" 2981 #include "clang/Basic/arm_fp16.inc" 2982 #undef GET_NEON_OVERLOAD_CHECK 2983 } 2984 2985 // For NEON intrinsics which are overloaded on vector element type, validate 2986 // the immediate which specifies which variant to emit. 2987 unsigned ImmArg = TheCall->getNumArgs()-1; 2988 if (mask) { 2989 if (SemaBuiltinConstantArg(TheCall, ImmArg, Result)) 2990 return true; 2991 2992 TV = Result.getLimitedValue(64); 2993 if ((TV > 63) || (mask & (1ULL << TV)) == 0) 2994 return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code) 2995 << TheCall->getArg(ImmArg)->getSourceRange(); 2996 } 2997 2998 if (PtrArgNum >= 0) { 2999 // Check that pointer arguments have the specified type. 3000 Expr *Arg = TheCall->getArg(PtrArgNum); 3001 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) 3002 Arg = ICE->getSubExpr(); 3003 ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg); 3004 QualType RHSTy = RHS.get()->getType(); 3005 3006 llvm::Triple::ArchType Arch = TI.getTriple().getArch(); 3007 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 || 3008 Arch == llvm::Triple::aarch64_32 || 3009 Arch == llvm::Triple::aarch64_be; 3010 bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong; 3011 QualType EltTy = 3012 getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long); 3013 if (HasConstPtr) 3014 EltTy = EltTy.withConst(); 3015 QualType LHSTy = Context.getPointerType(EltTy); 3016 AssignConvertType ConvTy; 3017 ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS); 3018 if (RHS.isInvalid()) 3019 return true; 3020 if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy, 3021 RHS.get(), AA_Assigning)) 3022 return true; 3023 } 3024 3025 // For NEON intrinsics which take an immediate value as part of the 3026 // instruction, range check them here. 3027 unsigned i = 0, l = 0, u = 0; 3028 switch (BuiltinID) { 3029 default: 3030 return false; 3031 #define GET_NEON_IMMEDIATE_CHECK 3032 #include "clang/Basic/arm_neon.inc" 3033 #include "clang/Basic/arm_fp16.inc" 3034 #undef GET_NEON_IMMEDIATE_CHECK 3035 } 3036 3037 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 3038 } 3039 3040 bool Sema::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 3041 switch (BuiltinID) { 3042 default: 3043 return false; 3044 #include "clang/Basic/arm_mve_builtin_sema.inc" 3045 } 3046 } 3047 3048 bool Sema::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 3049 CallExpr *TheCall) { 3050 bool Err = false; 3051 switch (BuiltinID) { 3052 default: 3053 return false; 3054 #include "clang/Basic/arm_cde_builtin_sema.inc" 3055 } 3056 3057 if (Err) 3058 return true; 3059 3060 return CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), /*WantCDE*/ true); 3061 } 3062 3063 bool Sema::CheckARMCoprocessorImmediate(const TargetInfo &TI, 3064 const Expr *CoprocArg, bool WantCDE) { 3065 if (isConstantEvaluated()) 3066 return false; 3067 3068 // We can't check the value of a dependent argument. 3069 if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent()) 3070 return false; 3071 3072 llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Context); 3073 int64_t CoprocNo = CoprocNoAP.getExtValue(); 3074 assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative"); 3075 3076 uint32_t CDECoprocMask = TI.getARMCDECoprocMask(); 3077 bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo)); 3078 3079 if (IsCDECoproc != WantCDE) 3080 return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc) 3081 << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange(); 3082 3083 return false; 3084 } 3085 3086 bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, 3087 unsigned MaxWidth) { 3088 assert((BuiltinID == ARM::BI__builtin_arm_ldrex || 3089 BuiltinID == ARM::BI__builtin_arm_ldaex || 3090 BuiltinID == ARM::BI__builtin_arm_strex || 3091 BuiltinID == ARM::BI__builtin_arm_stlex || 3092 BuiltinID == AArch64::BI__builtin_arm_ldrex || 3093 BuiltinID == AArch64::BI__builtin_arm_ldaex || 3094 BuiltinID == AArch64::BI__builtin_arm_strex || 3095 BuiltinID == AArch64::BI__builtin_arm_stlex) && 3096 "unexpected ARM builtin"); 3097 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex || 3098 BuiltinID == ARM::BI__builtin_arm_ldaex || 3099 BuiltinID == AArch64::BI__builtin_arm_ldrex || 3100 BuiltinID == AArch64::BI__builtin_arm_ldaex; 3101 3102 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 3103 3104 // Ensure that we have the proper number of arguments. 3105 if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2)) 3106 return true; 3107 3108 // Inspect the pointer argument of the atomic builtin. This should always be 3109 // a pointer type, whose element is an integral scalar or pointer type. 3110 // Because it is a pointer type, we don't have to worry about any implicit 3111 // casts here. 3112 Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1); 3113 ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg); 3114 if (PointerArgRes.isInvalid()) 3115 return true; 3116 PointerArg = PointerArgRes.get(); 3117 3118 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 3119 if (!pointerType) { 3120 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 3121 << PointerArg->getType() << PointerArg->getSourceRange(); 3122 return true; 3123 } 3124 3125 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next 3126 // task is to insert the appropriate casts into the AST. First work out just 3127 // what the appropriate type is. 3128 QualType ValType = pointerType->getPointeeType(); 3129 QualType AddrType = ValType.getUnqualifiedType().withVolatile(); 3130 if (IsLdrex) 3131 AddrType.addConst(); 3132 3133 // Issue a warning if the cast is dodgy. 3134 CastKind CastNeeded = CK_NoOp; 3135 if (!AddrType.isAtLeastAsQualifiedAs(ValType)) { 3136 CastNeeded = CK_BitCast; 3137 Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers) 3138 << PointerArg->getType() << Context.getPointerType(AddrType) 3139 << AA_Passing << PointerArg->getSourceRange(); 3140 } 3141 3142 // Finally, do the cast and replace the argument with the corrected version. 3143 AddrType = Context.getPointerType(AddrType); 3144 PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded); 3145 if (PointerArgRes.isInvalid()) 3146 return true; 3147 PointerArg = PointerArgRes.get(); 3148 3149 TheCall->setArg(IsLdrex ? 0 : 1, PointerArg); 3150 3151 // In general, we allow ints, floats and pointers to be loaded and stored. 3152 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 3153 !ValType->isBlockPointerType() && !ValType->isFloatingType()) { 3154 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr) 3155 << PointerArg->getType() << PointerArg->getSourceRange(); 3156 return true; 3157 } 3158 3159 // But ARM doesn't have instructions to deal with 128-bit versions. 3160 if (Context.getTypeSize(ValType) > MaxWidth) { 3161 assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate"); 3162 Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size) 3163 << PointerArg->getType() << PointerArg->getSourceRange(); 3164 return true; 3165 } 3166 3167 switch (ValType.getObjCLifetime()) { 3168 case Qualifiers::OCL_None: 3169 case Qualifiers::OCL_ExplicitNone: 3170 // okay 3171 break; 3172 3173 case Qualifiers::OCL_Weak: 3174 case Qualifiers::OCL_Strong: 3175 case Qualifiers::OCL_Autoreleasing: 3176 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 3177 << ValType << PointerArg->getSourceRange(); 3178 return true; 3179 } 3180 3181 if (IsLdrex) { 3182 TheCall->setType(ValType); 3183 return false; 3184 } 3185 3186 // Initialize the argument to be stored. 3187 ExprResult ValArg = TheCall->getArg(0); 3188 InitializedEntity Entity = InitializedEntity::InitializeParameter( 3189 Context, ValType, /*consume*/ false); 3190 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 3191 if (ValArg.isInvalid()) 3192 return true; 3193 TheCall->setArg(0, ValArg.get()); 3194 3195 // __builtin_arm_strex always returns an int. It's marked as such in the .def, 3196 // but the custom checker bypasses all default analysis. 3197 TheCall->setType(Context.IntTy); 3198 return false; 3199 } 3200 3201 bool Sema::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 3202 CallExpr *TheCall) { 3203 if (BuiltinID == ARM::BI__builtin_arm_ldrex || 3204 BuiltinID == ARM::BI__builtin_arm_ldaex || 3205 BuiltinID == ARM::BI__builtin_arm_strex || 3206 BuiltinID == ARM::BI__builtin_arm_stlex) { 3207 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64); 3208 } 3209 3210 if (BuiltinID == ARM::BI__builtin_arm_prefetch) { 3211 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3212 SemaBuiltinConstantArgRange(TheCall, 2, 0, 1); 3213 } 3214 3215 if (BuiltinID == ARM::BI__builtin_arm_rsr64 || 3216 BuiltinID == ARM::BI__builtin_arm_wsr64) 3217 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false); 3218 3219 if (BuiltinID == ARM::BI__builtin_arm_rsr || 3220 BuiltinID == ARM::BI__builtin_arm_rsrp || 3221 BuiltinID == ARM::BI__builtin_arm_wsr || 3222 BuiltinID == ARM::BI__builtin_arm_wsrp) 3223 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 3224 3225 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 3226 return true; 3227 if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall)) 3228 return true; 3229 if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall)) 3230 return true; 3231 3232 // For intrinsics which take an immediate value as part of the instruction, 3233 // range check them here. 3234 // FIXME: VFP Intrinsics should error if VFP not present. 3235 switch (BuiltinID) { 3236 default: return false; 3237 case ARM::BI__builtin_arm_ssat: 3238 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32); 3239 case ARM::BI__builtin_arm_usat: 3240 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 3241 case ARM::BI__builtin_arm_ssat16: 3242 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 3243 case ARM::BI__builtin_arm_usat16: 3244 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 3245 case ARM::BI__builtin_arm_vcvtr_f: 3246 case ARM::BI__builtin_arm_vcvtr_d: 3247 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 3248 case ARM::BI__builtin_arm_dmb: 3249 case ARM::BI__builtin_arm_dsb: 3250 case ARM::BI__builtin_arm_isb: 3251 case ARM::BI__builtin_arm_dbg: 3252 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15); 3253 case ARM::BI__builtin_arm_cdp: 3254 case ARM::BI__builtin_arm_cdp2: 3255 case ARM::BI__builtin_arm_mcr: 3256 case ARM::BI__builtin_arm_mcr2: 3257 case ARM::BI__builtin_arm_mrc: 3258 case ARM::BI__builtin_arm_mrc2: 3259 case ARM::BI__builtin_arm_mcrr: 3260 case ARM::BI__builtin_arm_mcrr2: 3261 case ARM::BI__builtin_arm_mrrc: 3262 case ARM::BI__builtin_arm_mrrc2: 3263 case ARM::BI__builtin_arm_ldc: 3264 case ARM::BI__builtin_arm_ldcl: 3265 case ARM::BI__builtin_arm_ldc2: 3266 case ARM::BI__builtin_arm_ldc2l: 3267 case ARM::BI__builtin_arm_stc: 3268 case ARM::BI__builtin_arm_stcl: 3269 case ARM::BI__builtin_arm_stc2: 3270 case ARM::BI__builtin_arm_stc2l: 3271 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15) || 3272 CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), 3273 /*WantCDE*/ false); 3274 } 3275 } 3276 3277 bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, 3278 unsigned BuiltinID, 3279 CallExpr *TheCall) { 3280 if (BuiltinID == AArch64::BI__builtin_arm_ldrex || 3281 BuiltinID == AArch64::BI__builtin_arm_ldaex || 3282 BuiltinID == AArch64::BI__builtin_arm_strex || 3283 BuiltinID == AArch64::BI__builtin_arm_stlex) { 3284 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128); 3285 } 3286 3287 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) { 3288 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3289 SemaBuiltinConstantArgRange(TheCall, 2, 0, 3) || 3290 SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) || 3291 SemaBuiltinConstantArgRange(TheCall, 4, 0, 1); 3292 } 3293 3294 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 || 3295 BuiltinID == AArch64::BI__builtin_arm_wsr64 || 3296 BuiltinID == AArch64::BI__builtin_arm_rsr128 || 3297 BuiltinID == AArch64::BI__builtin_arm_wsr128) 3298 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 3299 3300 // Memory Tagging Extensions (MTE) Intrinsics 3301 if (BuiltinID == AArch64::BI__builtin_arm_irg || 3302 BuiltinID == AArch64::BI__builtin_arm_addg || 3303 BuiltinID == AArch64::BI__builtin_arm_gmi || 3304 BuiltinID == AArch64::BI__builtin_arm_ldg || 3305 BuiltinID == AArch64::BI__builtin_arm_stg || 3306 BuiltinID == AArch64::BI__builtin_arm_subp) { 3307 return SemaBuiltinARMMemoryTaggingCall(BuiltinID, TheCall); 3308 } 3309 3310 if (BuiltinID == AArch64::BI__builtin_arm_rsr || 3311 BuiltinID == AArch64::BI__builtin_arm_rsrp || 3312 BuiltinID == AArch64::BI__builtin_arm_wsr || 3313 BuiltinID == AArch64::BI__builtin_arm_wsrp) 3314 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 3315 3316 // Only check the valid encoding range. Any constant in this range would be 3317 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw 3318 // an exception for incorrect registers. This matches MSVC behavior. 3319 if (BuiltinID == AArch64::BI_ReadStatusReg || 3320 BuiltinID == AArch64::BI_WriteStatusReg) 3321 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff); 3322 3323 if (BuiltinID == AArch64::BI__getReg) 3324 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 3325 3326 if (BuiltinID == AArch64::BI__break) 3327 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xffff); 3328 3329 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 3330 return true; 3331 3332 if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall)) 3333 return true; 3334 3335 // For intrinsics which take an immediate value as part of the instruction, 3336 // range check them here. 3337 unsigned i = 0, l = 0, u = 0; 3338 switch (BuiltinID) { 3339 default: return false; 3340 case AArch64::BI__builtin_arm_dmb: 3341 case AArch64::BI__builtin_arm_dsb: 3342 case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break; 3343 case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break; 3344 } 3345 3346 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 3347 } 3348 3349 static bool isValidBPFPreserveFieldInfoArg(Expr *Arg) { 3350 if (Arg->getType()->getAsPlaceholderType()) 3351 return false; 3352 3353 // The first argument needs to be a record field access. 3354 // If it is an array element access, we delay decision 3355 // to BPF backend to check whether the access is a 3356 // field access or not. 3357 return (Arg->IgnoreParens()->getObjectKind() == OK_BitField || 3358 isa<MemberExpr>(Arg->IgnoreParens()) || 3359 isa<ArraySubscriptExpr>(Arg->IgnoreParens())); 3360 } 3361 3362 static bool isValidBPFPreserveTypeInfoArg(Expr *Arg) { 3363 QualType ArgType = Arg->getType(); 3364 if (ArgType->getAsPlaceholderType()) 3365 return false; 3366 3367 // for TYPE_EXISTENCE/TYPE_MATCH/TYPE_SIZEOF reloc type 3368 // format: 3369 // 1. __builtin_preserve_type_info(*(<type> *)0, flag); 3370 // 2. <type> var; 3371 // __builtin_preserve_type_info(var, flag); 3372 if (!isa<DeclRefExpr>(Arg->IgnoreParens()) && 3373 !isa<UnaryOperator>(Arg->IgnoreParens())) 3374 return false; 3375 3376 // Typedef type. 3377 if (ArgType->getAs<TypedefType>()) 3378 return true; 3379 3380 // Record type or Enum type. 3381 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 3382 if (const auto *RT = Ty->getAs<RecordType>()) { 3383 if (!RT->getDecl()->getDeclName().isEmpty()) 3384 return true; 3385 } else if (const auto *ET = Ty->getAs<EnumType>()) { 3386 if (!ET->getDecl()->getDeclName().isEmpty()) 3387 return true; 3388 } 3389 3390 return false; 3391 } 3392 3393 static bool isValidBPFPreserveEnumValueArg(Expr *Arg) { 3394 QualType ArgType = Arg->getType(); 3395 if (ArgType->getAsPlaceholderType()) 3396 return false; 3397 3398 // for ENUM_VALUE_EXISTENCE/ENUM_VALUE reloc type 3399 // format: 3400 // __builtin_preserve_enum_value(*(<enum_type> *)<enum_value>, 3401 // flag); 3402 const auto *UO = dyn_cast<UnaryOperator>(Arg->IgnoreParens()); 3403 if (!UO) 3404 return false; 3405 3406 const auto *CE = dyn_cast<CStyleCastExpr>(UO->getSubExpr()); 3407 if (!CE) 3408 return false; 3409 if (CE->getCastKind() != CK_IntegralToPointer && 3410 CE->getCastKind() != CK_NullToPointer) 3411 return false; 3412 3413 // The integer must be from an EnumConstantDecl. 3414 const auto *DR = dyn_cast<DeclRefExpr>(CE->getSubExpr()); 3415 if (!DR) 3416 return false; 3417 3418 const EnumConstantDecl *Enumerator = 3419 dyn_cast<EnumConstantDecl>(DR->getDecl()); 3420 if (!Enumerator) 3421 return false; 3422 3423 // The type must be EnumType. 3424 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 3425 const auto *ET = Ty->getAs<EnumType>(); 3426 if (!ET) 3427 return false; 3428 3429 // The enum value must be supported. 3430 return llvm::is_contained(ET->getDecl()->enumerators(), Enumerator); 3431 } 3432 3433 bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID, 3434 CallExpr *TheCall) { 3435 assert((BuiltinID == BPF::BI__builtin_preserve_field_info || 3436 BuiltinID == BPF::BI__builtin_btf_type_id || 3437 BuiltinID == BPF::BI__builtin_preserve_type_info || 3438 BuiltinID == BPF::BI__builtin_preserve_enum_value) && 3439 "unexpected BPF builtin"); 3440 3441 if (checkArgCount(*this, TheCall, 2)) 3442 return true; 3443 3444 // The second argument needs to be a constant int 3445 Expr *Arg = TheCall->getArg(1); 3446 std::optional<llvm::APSInt> Value = Arg->getIntegerConstantExpr(Context); 3447 diag::kind kind; 3448 if (!Value) { 3449 if (BuiltinID == BPF::BI__builtin_preserve_field_info) 3450 kind = diag::err_preserve_field_info_not_const; 3451 else if (BuiltinID == BPF::BI__builtin_btf_type_id) 3452 kind = diag::err_btf_type_id_not_const; 3453 else if (BuiltinID == BPF::BI__builtin_preserve_type_info) 3454 kind = diag::err_preserve_type_info_not_const; 3455 else 3456 kind = diag::err_preserve_enum_value_not_const; 3457 Diag(Arg->getBeginLoc(), kind) << 2 << Arg->getSourceRange(); 3458 return true; 3459 } 3460 3461 // The first argument 3462 Arg = TheCall->getArg(0); 3463 bool InvalidArg = false; 3464 bool ReturnUnsignedInt = true; 3465 if (BuiltinID == BPF::BI__builtin_preserve_field_info) { 3466 if (!isValidBPFPreserveFieldInfoArg(Arg)) { 3467 InvalidArg = true; 3468 kind = diag::err_preserve_field_info_not_field; 3469 } 3470 } else if (BuiltinID == BPF::BI__builtin_preserve_type_info) { 3471 if (!isValidBPFPreserveTypeInfoArg(Arg)) { 3472 InvalidArg = true; 3473 kind = diag::err_preserve_type_info_invalid; 3474 } 3475 } else if (BuiltinID == BPF::BI__builtin_preserve_enum_value) { 3476 if (!isValidBPFPreserveEnumValueArg(Arg)) { 3477 InvalidArg = true; 3478 kind = diag::err_preserve_enum_value_invalid; 3479 } 3480 ReturnUnsignedInt = false; 3481 } else if (BuiltinID == BPF::BI__builtin_btf_type_id) { 3482 ReturnUnsignedInt = false; 3483 } 3484 3485 if (InvalidArg) { 3486 Diag(Arg->getBeginLoc(), kind) << 1 << Arg->getSourceRange(); 3487 return true; 3488 } 3489 3490 if (ReturnUnsignedInt) 3491 TheCall->setType(Context.UnsignedIntTy); 3492 else 3493 TheCall->setType(Context.UnsignedLongTy); 3494 return false; 3495 } 3496 3497 bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 3498 struct ArgInfo { 3499 uint8_t OpNum; 3500 bool IsSigned; 3501 uint8_t BitWidth; 3502 uint8_t Align; 3503 }; 3504 struct BuiltinInfo { 3505 unsigned BuiltinID; 3506 ArgInfo Infos[2]; 3507 }; 3508 3509 static BuiltinInfo Infos[] = { 3510 { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} }, 3511 { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} }, 3512 { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} }, 3513 { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 1 }} }, 3514 { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} }, 3515 { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} }, 3516 { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} }, 3517 { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} }, 3518 { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} }, 3519 { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} }, 3520 { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} }, 3521 3522 { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} }, 3523 { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} }, 3524 { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} }, 3525 { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} }, 3526 { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} }, 3527 { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} }, 3528 { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} }, 3529 { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} }, 3530 { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} }, 3531 { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} }, 3532 { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} }, 3533 3534 { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} }, 3535 { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} }, 3536 { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} }, 3537 { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} }, 3538 { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} }, 3539 { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} }, 3540 { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} }, 3541 { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} }, 3542 { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} }, 3543 { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} }, 3544 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} }, 3545 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} }, 3546 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} }, 3547 { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} }, 3548 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} }, 3549 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} }, 3550 { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} }, 3551 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} }, 3552 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} }, 3553 { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} }, 3554 { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} }, 3555 { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} }, 3556 { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} }, 3557 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} }, 3558 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} }, 3559 { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} }, 3560 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} }, 3561 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} }, 3562 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} }, 3563 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} }, 3564 { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} }, 3565 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} }, 3566 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} }, 3567 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} }, 3568 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} }, 3569 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} }, 3570 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} }, 3571 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} }, 3572 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} }, 3573 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} }, 3574 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} }, 3575 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} }, 3576 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} }, 3577 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} }, 3578 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} }, 3579 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} }, 3580 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} }, 3581 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} }, 3582 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} }, 3583 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} }, 3584 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} }, 3585 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax, 3586 {{ 1, false, 6, 0 }} }, 3587 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} }, 3588 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} }, 3589 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} }, 3590 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} }, 3591 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} }, 3592 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} }, 3593 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax, 3594 {{ 1, false, 5, 0 }} }, 3595 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} }, 3596 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} }, 3597 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} }, 3598 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} }, 3599 { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} }, 3600 { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 }, 3601 { 2, false, 5, 0 }} }, 3602 { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 }, 3603 { 2, false, 6, 0 }} }, 3604 { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 }, 3605 { 3, false, 5, 0 }} }, 3606 { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 }, 3607 { 3, false, 6, 0 }} }, 3608 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} }, 3609 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} }, 3610 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} }, 3611 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} }, 3612 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} }, 3613 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} }, 3614 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} }, 3615 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} }, 3616 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} }, 3617 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} }, 3618 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} }, 3619 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} }, 3620 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} }, 3621 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} }, 3622 { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} }, 3623 { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax, 3624 {{ 2, false, 4, 0 }, 3625 { 3, false, 5, 0 }} }, 3626 { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax, 3627 {{ 2, false, 4, 0 }, 3628 { 3, false, 5, 0 }} }, 3629 { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax, 3630 {{ 2, false, 4, 0 }, 3631 { 3, false, 5, 0 }} }, 3632 { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax, 3633 {{ 2, false, 4, 0 }, 3634 { 3, false, 5, 0 }} }, 3635 { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} }, 3636 { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} }, 3637 { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} }, 3638 { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} }, 3639 { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} }, 3640 { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} }, 3641 { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} }, 3642 { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} }, 3643 { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} }, 3644 { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} }, 3645 { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 }, 3646 { 2, false, 5, 0 }} }, 3647 { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 }, 3648 { 2, false, 6, 0 }} }, 3649 { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} }, 3650 { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} }, 3651 { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} }, 3652 { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} }, 3653 { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} }, 3654 { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} }, 3655 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} }, 3656 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} }, 3657 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax, 3658 {{ 1, false, 4, 0 }} }, 3659 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} }, 3660 { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax, 3661 {{ 1, false, 4, 0 }} }, 3662 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} }, 3663 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} }, 3664 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} }, 3665 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} }, 3666 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} }, 3667 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} }, 3668 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} }, 3669 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} }, 3670 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} }, 3671 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} }, 3672 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} }, 3673 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} }, 3674 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} }, 3675 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} }, 3676 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} }, 3677 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} }, 3678 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} }, 3679 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} }, 3680 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} }, 3681 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, 3682 {{ 3, false, 1, 0 }} }, 3683 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} }, 3684 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} }, 3685 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} }, 3686 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, 3687 {{ 3, false, 1, 0 }} }, 3688 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} }, 3689 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} }, 3690 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} }, 3691 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, 3692 {{ 3, false, 1, 0 }} }, 3693 3694 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10, {{ 2, false, 2, 0 }} }, 3695 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_128B, 3696 {{ 2, false, 2, 0 }} }, 3697 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_vxx, 3698 {{ 3, false, 2, 0 }} }, 3699 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_vxx_128B, 3700 {{ 3, false, 2, 0 }} }, 3701 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10, {{ 2, false, 2, 0 }} }, 3702 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_128B, 3703 {{ 2, false, 2, 0 }} }, 3704 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_vxx, 3705 {{ 3, false, 2, 0 }} }, 3706 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_vxx_128B, 3707 {{ 3, false, 2, 0 }} }, 3708 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi, {{ 2, false, 3, 0 }} }, 3709 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi_128B, {{ 2, false, 3, 0 }} }, 3710 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci, {{ 3, false, 3, 0 }} }, 3711 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci_128B, 3712 {{ 3, false, 3, 0 }} }, 3713 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi, {{ 2, false, 3, 0 }} }, 3714 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi_128B, {{ 2, false, 3, 0 }} }, 3715 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci, {{ 3, false, 3, 0 }} }, 3716 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci_128B, 3717 {{ 3, false, 3, 0 }} }, 3718 }; 3719 3720 // Use a dynamically initialized static to sort the table exactly once on 3721 // first run. 3722 static const bool SortOnce = 3723 (llvm::sort(Infos, 3724 [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) { 3725 return LHS.BuiltinID < RHS.BuiltinID; 3726 }), 3727 true); 3728 (void)SortOnce; 3729 3730 const BuiltinInfo *F = llvm::partition_point( 3731 Infos, [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; }); 3732 if (F == std::end(Infos) || F->BuiltinID != BuiltinID) 3733 return false; 3734 3735 bool Error = false; 3736 3737 for (const ArgInfo &A : F->Infos) { 3738 // Ignore empty ArgInfo elements. 3739 if (A.BitWidth == 0) 3740 continue; 3741 3742 int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0; 3743 int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1; 3744 if (!A.Align) { 3745 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 3746 } else { 3747 unsigned M = 1 << A.Align; 3748 Min *= M; 3749 Max *= M; 3750 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 3751 Error |= SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M); 3752 } 3753 } 3754 return Error; 3755 } 3756 3757 bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, 3758 CallExpr *TheCall) { 3759 return CheckHexagonBuiltinArgument(BuiltinID, TheCall); 3760 } 3761 3762 bool Sema::CheckLoongArchBuiltinFunctionCall(const TargetInfo &TI, 3763 unsigned BuiltinID, 3764 CallExpr *TheCall) { 3765 switch (BuiltinID) { 3766 default: 3767 break; 3768 case LoongArch::BI__builtin_loongarch_cacop_d: 3769 if (!TI.hasFeature("64bit")) 3770 return Diag(TheCall->getBeginLoc(), 3771 diag::err_loongarch_builtin_requires_la64) 3772 << TheCall->getSourceRange(); 3773 LLVM_FALLTHROUGH; 3774 case LoongArch::BI__builtin_loongarch_cacop_w: { 3775 if (BuiltinID == LoongArch::BI__builtin_loongarch_cacop_w && 3776 !TI.hasFeature("32bit")) 3777 return Diag(TheCall->getBeginLoc(), 3778 diag::err_loongarch_builtin_requires_la32) 3779 << TheCall->getSourceRange(); 3780 SemaBuiltinConstantArgRange(TheCall, 0, 0, llvm::maxUIntN(5)); 3781 SemaBuiltinConstantArgRange(TheCall, 2, llvm::minIntN(12), 3782 llvm::maxIntN(12)); 3783 break; 3784 } 3785 case LoongArch::BI__builtin_loongarch_crc_w_b_w: 3786 case LoongArch::BI__builtin_loongarch_crc_w_h_w: 3787 case LoongArch::BI__builtin_loongarch_crc_w_w_w: 3788 case LoongArch::BI__builtin_loongarch_crc_w_d_w: 3789 case LoongArch::BI__builtin_loongarch_crcc_w_b_w: 3790 case LoongArch::BI__builtin_loongarch_crcc_w_h_w: 3791 case LoongArch::BI__builtin_loongarch_crcc_w_w_w: 3792 case LoongArch::BI__builtin_loongarch_crcc_w_d_w: 3793 case LoongArch::BI__builtin_loongarch_iocsrrd_d: 3794 case LoongArch::BI__builtin_loongarch_iocsrwr_d: 3795 case LoongArch::BI__builtin_loongarch_asrtle_d: 3796 case LoongArch::BI__builtin_loongarch_asrtgt_d: 3797 if (!TI.hasFeature("64bit")) 3798 return Diag(TheCall->getBeginLoc(), 3799 diag::err_loongarch_builtin_requires_la64) 3800 << TheCall->getSourceRange(); 3801 break; 3802 case LoongArch::BI__builtin_loongarch_break: 3803 case LoongArch::BI__builtin_loongarch_dbar: 3804 case LoongArch::BI__builtin_loongarch_ibar: 3805 case LoongArch::BI__builtin_loongarch_syscall: 3806 // Check if immediate is in [0, 32767]. 3807 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 32767); 3808 case LoongArch::BI__builtin_loongarch_csrrd_w: 3809 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 16383); 3810 case LoongArch::BI__builtin_loongarch_csrwr_w: 3811 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 16383); 3812 case LoongArch::BI__builtin_loongarch_csrxchg_w: 3813 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 16383); 3814 case LoongArch::BI__builtin_loongarch_csrrd_d: 3815 if (!TI.hasFeature("64bit")) 3816 return Diag(TheCall->getBeginLoc(), 3817 diag::err_loongarch_builtin_requires_la64) 3818 << TheCall->getSourceRange(); 3819 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 16383); 3820 case LoongArch::BI__builtin_loongarch_csrwr_d: 3821 if (!TI.hasFeature("64bit")) 3822 return Diag(TheCall->getBeginLoc(), 3823 diag::err_loongarch_builtin_requires_la64) 3824 << TheCall->getSourceRange(); 3825 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 16383); 3826 case LoongArch::BI__builtin_loongarch_csrxchg_d: 3827 if (!TI.hasFeature("64bit")) 3828 return Diag(TheCall->getBeginLoc(), 3829 diag::err_loongarch_builtin_requires_la64) 3830 << TheCall->getSourceRange(); 3831 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 16383); 3832 case LoongArch::BI__builtin_loongarch_lddir_d: 3833 case LoongArch::BI__builtin_loongarch_ldpte_d: 3834 if (!TI.hasFeature("64bit")) 3835 return Diag(TheCall->getBeginLoc(), 3836 diag::err_loongarch_builtin_requires_la64) 3837 << TheCall->getSourceRange(); 3838 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 3839 case LoongArch::BI__builtin_loongarch_movfcsr2gr: 3840 case LoongArch::BI__builtin_loongarch_movgr2fcsr: 3841 return SemaBuiltinConstantArgRange(TheCall, 0, 0, llvm::maxUIntN(2)); 3842 } 3843 3844 return false; 3845 } 3846 3847 bool Sema::CheckMipsBuiltinFunctionCall(const TargetInfo &TI, 3848 unsigned BuiltinID, CallExpr *TheCall) { 3849 return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) || 3850 CheckMipsBuiltinArgument(BuiltinID, TheCall); 3851 } 3852 3853 bool Sema::CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, 3854 CallExpr *TheCall) { 3855 3856 if (Mips::BI__builtin_mips_addu_qb <= BuiltinID && 3857 BuiltinID <= Mips::BI__builtin_mips_lwx) { 3858 if (!TI.hasFeature("dsp")) 3859 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_dsp); 3860 } 3861 3862 if (Mips::BI__builtin_mips_absq_s_qb <= BuiltinID && 3863 BuiltinID <= Mips::BI__builtin_mips_subuh_r_qb) { 3864 if (!TI.hasFeature("dspr2")) 3865 return Diag(TheCall->getBeginLoc(), 3866 diag::err_mips_builtin_requires_dspr2); 3867 } 3868 3869 if (Mips::BI__builtin_msa_add_a_b <= BuiltinID && 3870 BuiltinID <= Mips::BI__builtin_msa_xori_b) { 3871 if (!TI.hasFeature("msa")) 3872 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_msa); 3873 } 3874 3875 return false; 3876 } 3877 3878 // CheckMipsBuiltinArgument - Checks the constant value passed to the 3879 // intrinsic is correct. The switch statement is ordered by DSP, MSA. The 3880 // ordering for DSP is unspecified. MSA is ordered by the data format used 3881 // by the underlying instruction i.e., df/m, df/n and then by size. 3882 // 3883 // FIXME: The size tests here should instead be tablegen'd along with the 3884 // definitions from include/clang/Basic/BuiltinsMips.def. 3885 // FIXME: GCC is strict on signedness for some of these intrinsics, we should 3886 // be too. 3887 bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 3888 unsigned i = 0, l = 0, u = 0, m = 0; 3889 switch (BuiltinID) { 3890 default: return false; 3891 case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break; 3892 case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break; 3893 case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break; 3894 case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break; 3895 case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break; 3896 case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break; 3897 case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break; 3898 // MSA intrinsics. Instructions (which the intrinsics maps to) which use the 3899 // df/m field. 3900 // These intrinsics take an unsigned 3 bit immediate. 3901 case Mips::BI__builtin_msa_bclri_b: 3902 case Mips::BI__builtin_msa_bnegi_b: 3903 case Mips::BI__builtin_msa_bseti_b: 3904 case Mips::BI__builtin_msa_sat_s_b: 3905 case Mips::BI__builtin_msa_sat_u_b: 3906 case Mips::BI__builtin_msa_slli_b: 3907 case Mips::BI__builtin_msa_srai_b: 3908 case Mips::BI__builtin_msa_srari_b: 3909 case Mips::BI__builtin_msa_srli_b: 3910 case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break; 3911 case Mips::BI__builtin_msa_binsli_b: 3912 case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break; 3913 // These intrinsics take an unsigned 4 bit immediate. 3914 case Mips::BI__builtin_msa_bclri_h: 3915 case Mips::BI__builtin_msa_bnegi_h: 3916 case Mips::BI__builtin_msa_bseti_h: 3917 case Mips::BI__builtin_msa_sat_s_h: 3918 case Mips::BI__builtin_msa_sat_u_h: 3919 case Mips::BI__builtin_msa_slli_h: 3920 case Mips::BI__builtin_msa_srai_h: 3921 case Mips::BI__builtin_msa_srari_h: 3922 case Mips::BI__builtin_msa_srli_h: 3923 case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break; 3924 case Mips::BI__builtin_msa_binsli_h: 3925 case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break; 3926 // These intrinsics take an unsigned 5 bit immediate. 3927 // The first block of intrinsics actually have an unsigned 5 bit field, 3928 // not a df/n field. 3929 case Mips::BI__builtin_msa_cfcmsa: 3930 case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break; 3931 case Mips::BI__builtin_msa_clei_u_b: 3932 case Mips::BI__builtin_msa_clei_u_h: 3933 case Mips::BI__builtin_msa_clei_u_w: 3934 case Mips::BI__builtin_msa_clei_u_d: 3935 case Mips::BI__builtin_msa_clti_u_b: 3936 case Mips::BI__builtin_msa_clti_u_h: 3937 case Mips::BI__builtin_msa_clti_u_w: 3938 case Mips::BI__builtin_msa_clti_u_d: 3939 case Mips::BI__builtin_msa_maxi_u_b: 3940 case Mips::BI__builtin_msa_maxi_u_h: 3941 case Mips::BI__builtin_msa_maxi_u_w: 3942 case Mips::BI__builtin_msa_maxi_u_d: 3943 case Mips::BI__builtin_msa_mini_u_b: 3944 case Mips::BI__builtin_msa_mini_u_h: 3945 case Mips::BI__builtin_msa_mini_u_w: 3946 case Mips::BI__builtin_msa_mini_u_d: 3947 case Mips::BI__builtin_msa_addvi_b: 3948 case Mips::BI__builtin_msa_addvi_h: 3949 case Mips::BI__builtin_msa_addvi_w: 3950 case Mips::BI__builtin_msa_addvi_d: 3951 case Mips::BI__builtin_msa_bclri_w: 3952 case Mips::BI__builtin_msa_bnegi_w: 3953 case Mips::BI__builtin_msa_bseti_w: 3954 case Mips::BI__builtin_msa_sat_s_w: 3955 case Mips::BI__builtin_msa_sat_u_w: 3956 case Mips::BI__builtin_msa_slli_w: 3957 case Mips::BI__builtin_msa_srai_w: 3958 case Mips::BI__builtin_msa_srari_w: 3959 case Mips::BI__builtin_msa_srli_w: 3960 case Mips::BI__builtin_msa_srlri_w: 3961 case Mips::BI__builtin_msa_subvi_b: 3962 case Mips::BI__builtin_msa_subvi_h: 3963 case Mips::BI__builtin_msa_subvi_w: 3964 case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break; 3965 case Mips::BI__builtin_msa_binsli_w: 3966 case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break; 3967 // These intrinsics take an unsigned 6 bit immediate. 3968 case Mips::BI__builtin_msa_bclri_d: 3969 case Mips::BI__builtin_msa_bnegi_d: 3970 case Mips::BI__builtin_msa_bseti_d: 3971 case Mips::BI__builtin_msa_sat_s_d: 3972 case Mips::BI__builtin_msa_sat_u_d: 3973 case Mips::BI__builtin_msa_slli_d: 3974 case Mips::BI__builtin_msa_srai_d: 3975 case Mips::BI__builtin_msa_srari_d: 3976 case Mips::BI__builtin_msa_srli_d: 3977 case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break; 3978 case Mips::BI__builtin_msa_binsli_d: 3979 case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break; 3980 // These intrinsics take a signed 5 bit immediate. 3981 case Mips::BI__builtin_msa_ceqi_b: 3982 case Mips::BI__builtin_msa_ceqi_h: 3983 case Mips::BI__builtin_msa_ceqi_w: 3984 case Mips::BI__builtin_msa_ceqi_d: 3985 case Mips::BI__builtin_msa_clti_s_b: 3986 case Mips::BI__builtin_msa_clti_s_h: 3987 case Mips::BI__builtin_msa_clti_s_w: 3988 case Mips::BI__builtin_msa_clti_s_d: 3989 case Mips::BI__builtin_msa_clei_s_b: 3990 case Mips::BI__builtin_msa_clei_s_h: 3991 case Mips::BI__builtin_msa_clei_s_w: 3992 case Mips::BI__builtin_msa_clei_s_d: 3993 case Mips::BI__builtin_msa_maxi_s_b: 3994 case Mips::BI__builtin_msa_maxi_s_h: 3995 case Mips::BI__builtin_msa_maxi_s_w: 3996 case Mips::BI__builtin_msa_maxi_s_d: 3997 case Mips::BI__builtin_msa_mini_s_b: 3998 case Mips::BI__builtin_msa_mini_s_h: 3999 case Mips::BI__builtin_msa_mini_s_w: 4000 case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break; 4001 // These intrinsics take an unsigned 8 bit immediate. 4002 case Mips::BI__builtin_msa_andi_b: 4003 case Mips::BI__builtin_msa_nori_b: 4004 case Mips::BI__builtin_msa_ori_b: 4005 case Mips::BI__builtin_msa_shf_b: 4006 case Mips::BI__builtin_msa_shf_h: 4007 case Mips::BI__builtin_msa_shf_w: 4008 case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break; 4009 case Mips::BI__builtin_msa_bseli_b: 4010 case Mips::BI__builtin_msa_bmnzi_b: 4011 case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break; 4012 // df/n format 4013 // These intrinsics take an unsigned 4 bit immediate. 4014 case Mips::BI__builtin_msa_copy_s_b: 4015 case Mips::BI__builtin_msa_copy_u_b: 4016 case Mips::BI__builtin_msa_insve_b: 4017 case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break; 4018 case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break; 4019 // These intrinsics take an unsigned 3 bit immediate. 4020 case Mips::BI__builtin_msa_copy_s_h: 4021 case Mips::BI__builtin_msa_copy_u_h: 4022 case Mips::BI__builtin_msa_insve_h: 4023 case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break; 4024 case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break; 4025 // These intrinsics take an unsigned 2 bit immediate. 4026 case Mips::BI__builtin_msa_copy_s_w: 4027 case Mips::BI__builtin_msa_copy_u_w: 4028 case Mips::BI__builtin_msa_insve_w: 4029 case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break; 4030 case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break; 4031 // These intrinsics take an unsigned 1 bit immediate. 4032 case Mips::BI__builtin_msa_copy_s_d: 4033 case Mips::BI__builtin_msa_copy_u_d: 4034 case Mips::BI__builtin_msa_insve_d: 4035 case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break; 4036 case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break; 4037 // Memory offsets and immediate loads. 4038 // These intrinsics take a signed 10 bit immediate. 4039 case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break; 4040 case Mips::BI__builtin_msa_ldi_h: 4041 case Mips::BI__builtin_msa_ldi_w: 4042 case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break; 4043 case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break; 4044 case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break; 4045 case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break; 4046 case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break; 4047 case Mips::BI__builtin_msa_ldr_d: i = 1; l = -4096; u = 4088; m = 8; break; 4048 case Mips::BI__builtin_msa_ldr_w: i = 1; l = -2048; u = 2044; m = 4; break; 4049 case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break; 4050 case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break; 4051 case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break; 4052 case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break; 4053 case Mips::BI__builtin_msa_str_d: i = 2; l = -4096; u = 4088; m = 8; break; 4054 case Mips::BI__builtin_msa_str_w: i = 2; l = -2048; u = 2044; m = 4; break; 4055 } 4056 4057 if (!m) 4058 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 4059 4060 return SemaBuiltinConstantArgRange(TheCall, i, l, u) || 4061 SemaBuiltinConstantArgMultiple(TheCall, i, m); 4062 } 4063 4064 /// DecodePPCMMATypeFromStr - This decodes one PPC MMA type descriptor from Str, 4065 /// advancing the pointer over the consumed characters. The decoded type is 4066 /// returned. If the decoded type represents a constant integer with a 4067 /// constraint on its value then Mask is set to that value. The type descriptors 4068 /// used in Str are specific to PPC MMA builtins and are documented in the file 4069 /// defining the PPC builtins. 4070 static QualType DecodePPCMMATypeFromStr(ASTContext &Context, const char *&Str, 4071 unsigned &Mask) { 4072 bool RequireICE = false; 4073 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 4074 switch (*Str++) { 4075 case 'V': 4076 return Context.getVectorType(Context.UnsignedCharTy, 16, 4077 VectorType::VectorKind::AltiVecVector); 4078 case 'i': { 4079 char *End; 4080 unsigned size = strtoul(Str, &End, 10); 4081 assert(End != Str && "Missing constant parameter constraint"); 4082 Str = End; 4083 Mask = size; 4084 return Context.IntTy; 4085 } 4086 case 'W': { 4087 char *End; 4088 unsigned size = strtoul(Str, &End, 10); 4089 assert(End != Str && "Missing PowerPC MMA type size"); 4090 Str = End; 4091 QualType Type; 4092 switch (size) { 4093 #define PPC_VECTOR_TYPE(typeName, Id, size) \ 4094 case size: Type = Context.Id##Ty; break; 4095 #include "clang/Basic/PPCTypes.def" 4096 default: llvm_unreachable("Invalid PowerPC MMA vector type"); 4097 } 4098 bool CheckVectorArgs = false; 4099 while (!CheckVectorArgs) { 4100 switch (*Str++) { 4101 case '*': 4102 Type = Context.getPointerType(Type); 4103 break; 4104 case 'C': 4105 Type = Type.withConst(); 4106 break; 4107 default: 4108 CheckVectorArgs = true; 4109 --Str; 4110 break; 4111 } 4112 } 4113 return Type; 4114 } 4115 default: 4116 return Context.DecodeTypeStr(--Str, Context, Error, RequireICE, true); 4117 } 4118 } 4119 4120 static bool isPPC_64Builtin(unsigned BuiltinID) { 4121 // These builtins only work on PPC 64bit targets. 4122 switch (BuiltinID) { 4123 case PPC::BI__builtin_divde: 4124 case PPC::BI__builtin_divdeu: 4125 case PPC::BI__builtin_bpermd: 4126 case PPC::BI__builtin_pdepd: 4127 case PPC::BI__builtin_pextd: 4128 case PPC::BI__builtin_ppc_ldarx: 4129 case PPC::BI__builtin_ppc_stdcx: 4130 case PPC::BI__builtin_ppc_tdw: 4131 case PPC::BI__builtin_ppc_trapd: 4132 case PPC::BI__builtin_ppc_cmpeqb: 4133 case PPC::BI__builtin_ppc_setb: 4134 case PPC::BI__builtin_ppc_mulhd: 4135 case PPC::BI__builtin_ppc_mulhdu: 4136 case PPC::BI__builtin_ppc_maddhd: 4137 case PPC::BI__builtin_ppc_maddhdu: 4138 case PPC::BI__builtin_ppc_maddld: 4139 case PPC::BI__builtin_ppc_load8r: 4140 case PPC::BI__builtin_ppc_store8r: 4141 case PPC::BI__builtin_ppc_insert_exp: 4142 case PPC::BI__builtin_ppc_extract_sig: 4143 case PPC::BI__builtin_ppc_addex: 4144 case PPC::BI__builtin_darn: 4145 case PPC::BI__builtin_darn_raw: 4146 case PPC::BI__builtin_ppc_compare_and_swaplp: 4147 case PPC::BI__builtin_ppc_fetch_and_addlp: 4148 case PPC::BI__builtin_ppc_fetch_and_andlp: 4149 case PPC::BI__builtin_ppc_fetch_and_orlp: 4150 case PPC::BI__builtin_ppc_fetch_and_swaplp: 4151 return true; 4152 } 4153 return false; 4154 } 4155 4156 static bool SemaFeatureCheck(Sema &S, CallExpr *TheCall, 4157 StringRef FeatureToCheck, unsigned DiagID, 4158 StringRef DiagArg = "") { 4159 if (S.Context.getTargetInfo().hasFeature(FeatureToCheck)) 4160 return false; 4161 4162 if (DiagArg.empty()) 4163 S.Diag(TheCall->getBeginLoc(), DiagID) << TheCall->getSourceRange(); 4164 else 4165 S.Diag(TheCall->getBeginLoc(), DiagID) 4166 << DiagArg << TheCall->getSourceRange(); 4167 4168 return true; 4169 } 4170 4171 /// Returns true if the argument consists of one contiguous run of 1s with any 4172 /// number of 0s on either side. The 1s are allowed to wrap from LSB to MSB, so 4173 /// 0x000FFF0, 0x0000FFFF, 0xFF0000FF, 0x0 are all runs. 0x0F0F0000 is not, 4174 /// since all 1s are not contiguous. 4175 bool Sema::SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum) { 4176 llvm::APSInt Result; 4177 // We can't check the value of a dependent argument. 4178 Expr *Arg = TheCall->getArg(ArgNum); 4179 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4180 return false; 4181 4182 // Check constant-ness first. 4183 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4184 return true; 4185 4186 // Check contiguous run of 1s, 0xFF0000FF is also a run of 1s. 4187 if (Result.isShiftedMask() || (~Result).isShiftedMask()) 4188 return false; 4189 4190 return Diag(TheCall->getBeginLoc(), 4191 diag::err_argument_not_contiguous_bit_field) 4192 << ArgNum << Arg->getSourceRange(); 4193 } 4194 4195 bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 4196 CallExpr *TheCall) { 4197 unsigned i = 0, l = 0, u = 0; 4198 bool IsTarget64Bit = TI.getTypeWidth(TI.getIntPtrType()) == 64; 4199 llvm::APSInt Result; 4200 4201 if (isPPC_64Builtin(BuiltinID) && !IsTarget64Bit) 4202 return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt) 4203 << TheCall->getSourceRange(); 4204 4205 switch (BuiltinID) { 4206 default: return false; 4207 case PPC::BI__builtin_altivec_crypto_vshasigmaw: 4208 case PPC::BI__builtin_altivec_crypto_vshasigmad: 4209 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 4210 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 4211 case PPC::BI__builtin_altivec_dss: 4212 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3); 4213 case PPC::BI__builtin_tbegin: 4214 case PPC::BI__builtin_tend: 4215 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1) || 4216 SemaFeatureCheck(*this, TheCall, "htm", 4217 diag::err_ppc_builtin_requires_htm); 4218 case PPC::BI__builtin_tsr: 4219 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) || 4220 SemaFeatureCheck(*this, TheCall, "htm", 4221 diag::err_ppc_builtin_requires_htm); 4222 case PPC::BI__builtin_tabortwc: 4223 case PPC::BI__builtin_tabortdc: 4224 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || 4225 SemaFeatureCheck(*this, TheCall, "htm", 4226 diag::err_ppc_builtin_requires_htm); 4227 case PPC::BI__builtin_tabortwci: 4228 case PPC::BI__builtin_tabortdci: 4229 return SemaFeatureCheck(*this, TheCall, "htm", 4230 diag::err_ppc_builtin_requires_htm) || 4231 (SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || 4232 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31)); 4233 case PPC::BI__builtin_tabort: 4234 case PPC::BI__builtin_tcheck: 4235 case PPC::BI__builtin_treclaim: 4236 case PPC::BI__builtin_trechkpt: 4237 case PPC::BI__builtin_tendall: 4238 case PPC::BI__builtin_tresume: 4239 case PPC::BI__builtin_tsuspend: 4240 case PPC::BI__builtin_get_texasr: 4241 case PPC::BI__builtin_get_texasru: 4242 case PPC::BI__builtin_get_tfhar: 4243 case PPC::BI__builtin_get_tfiar: 4244 case PPC::BI__builtin_set_texasr: 4245 case PPC::BI__builtin_set_texasru: 4246 case PPC::BI__builtin_set_tfhar: 4247 case PPC::BI__builtin_set_tfiar: 4248 case PPC::BI__builtin_ttest: 4249 return SemaFeatureCheck(*this, TheCall, "htm", 4250 diag::err_ppc_builtin_requires_htm); 4251 // According to GCC 'Basic PowerPC Built-in Functions Available on ISA 2.05', 4252 // __builtin_(un)pack_longdouble are available only if long double uses IBM 4253 // extended double representation. 4254 case PPC::BI__builtin_unpack_longdouble: 4255 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 1)) 4256 return true; 4257 [[fallthrough]]; 4258 case PPC::BI__builtin_pack_longdouble: 4259 if (&TI.getLongDoubleFormat() != &llvm::APFloat::PPCDoubleDouble()) 4260 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_requires_abi) 4261 << "ibmlongdouble"; 4262 return false; 4263 case PPC::BI__builtin_altivec_dst: 4264 case PPC::BI__builtin_altivec_dstt: 4265 case PPC::BI__builtin_altivec_dstst: 4266 case PPC::BI__builtin_altivec_dststt: 4267 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 4268 case PPC::BI__builtin_vsx_xxpermdi: 4269 case PPC::BI__builtin_vsx_xxsldwi: 4270 return SemaBuiltinVSX(TheCall); 4271 case PPC::BI__builtin_divwe: 4272 case PPC::BI__builtin_divweu: 4273 case PPC::BI__builtin_divde: 4274 case PPC::BI__builtin_divdeu: 4275 return SemaFeatureCheck(*this, TheCall, "extdiv", 4276 diag::err_ppc_builtin_only_on_arch, "7"); 4277 case PPC::BI__builtin_bpermd: 4278 return SemaFeatureCheck(*this, TheCall, "bpermd", 4279 diag::err_ppc_builtin_only_on_arch, "7"); 4280 case PPC::BI__builtin_unpack_vector_int128: 4281 return SemaFeatureCheck(*this, TheCall, "vsx", 4282 diag::err_ppc_builtin_only_on_arch, "7") || 4283 SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 4284 case PPC::BI__builtin_pack_vector_int128: 4285 return SemaFeatureCheck(*this, TheCall, "vsx", 4286 diag::err_ppc_builtin_only_on_arch, "7"); 4287 case PPC::BI__builtin_pdepd: 4288 case PPC::BI__builtin_pextd: 4289 return SemaFeatureCheck(*this, TheCall, "isa-v31-instructions", 4290 diag::err_ppc_builtin_only_on_arch, "10"); 4291 case PPC::BI__builtin_altivec_vgnb: 4292 return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7); 4293 case PPC::BI__builtin_vsx_xxeval: 4294 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 255); 4295 case PPC::BI__builtin_altivec_vsldbi: 4296 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 4297 case PPC::BI__builtin_altivec_vsrdbi: 4298 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 4299 case PPC::BI__builtin_vsx_xxpermx: 4300 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 7); 4301 case PPC::BI__builtin_ppc_tw: 4302 case PPC::BI__builtin_ppc_tdw: 4303 return SemaBuiltinConstantArgRange(TheCall, 2, 1, 31); 4304 case PPC::BI__builtin_ppc_cmpeqb: 4305 case PPC::BI__builtin_ppc_setb: 4306 case PPC::BI__builtin_ppc_maddhd: 4307 case PPC::BI__builtin_ppc_maddhdu: 4308 case PPC::BI__builtin_ppc_maddld: 4309 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4310 diag::err_ppc_builtin_only_on_arch, "9"); 4311 case PPC::BI__builtin_ppc_cmprb: 4312 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4313 diag::err_ppc_builtin_only_on_arch, "9") || 4314 SemaBuiltinConstantArgRange(TheCall, 0, 0, 1); 4315 // For __rlwnm, __rlwimi and __rldimi, the last parameter mask must 4316 // be a constant that represents a contiguous bit field. 4317 case PPC::BI__builtin_ppc_rlwnm: 4318 return SemaValueIsRunOfOnes(TheCall, 2); 4319 case PPC::BI__builtin_ppc_rlwimi: 4320 case PPC::BI__builtin_ppc_rldimi: 4321 return SemaBuiltinConstantArg(TheCall, 2, Result) || 4322 SemaValueIsRunOfOnes(TheCall, 3); 4323 case PPC::BI__builtin_ppc_extract_exp: 4324 case PPC::BI__builtin_ppc_extract_sig: 4325 case PPC::BI__builtin_ppc_insert_exp: 4326 return SemaFeatureCheck(*this, TheCall, "power9-vector", 4327 diag::err_ppc_builtin_only_on_arch, "9"); 4328 case PPC::BI__builtin_ppc_addex: { 4329 if (SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4330 diag::err_ppc_builtin_only_on_arch, "9") || 4331 SemaBuiltinConstantArgRange(TheCall, 2, 0, 3)) 4332 return true; 4333 // Output warning for reserved values 1 to 3. 4334 int ArgValue = 4335 TheCall->getArg(2)->getIntegerConstantExpr(Context)->getSExtValue(); 4336 if (ArgValue != 0) 4337 Diag(TheCall->getBeginLoc(), diag::warn_argument_undefined_behaviour) 4338 << ArgValue; 4339 return false; 4340 } 4341 case PPC::BI__builtin_ppc_mtfsb0: 4342 case PPC::BI__builtin_ppc_mtfsb1: 4343 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 4344 case PPC::BI__builtin_ppc_mtfsf: 4345 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 255); 4346 case PPC::BI__builtin_ppc_mtfsfi: 4347 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) || 4348 SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 4349 case PPC::BI__builtin_ppc_alignx: 4350 return SemaBuiltinConstantArgPower2(TheCall, 0); 4351 case PPC::BI__builtin_ppc_rdlam: 4352 return SemaValueIsRunOfOnes(TheCall, 2); 4353 case PPC::BI__builtin_ppc_icbt: 4354 case PPC::BI__builtin_ppc_sthcx: 4355 case PPC::BI__builtin_ppc_stbcx: 4356 case PPC::BI__builtin_ppc_lharx: 4357 case PPC::BI__builtin_ppc_lbarx: 4358 return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions", 4359 diag::err_ppc_builtin_only_on_arch, "8"); 4360 case PPC::BI__builtin_vsx_ldrmb: 4361 case PPC::BI__builtin_vsx_strmb: 4362 return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions", 4363 diag::err_ppc_builtin_only_on_arch, "8") || 4364 SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 4365 case PPC::BI__builtin_altivec_vcntmbb: 4366 case PPC::BI__builtin_altivec_vcntmbh: 4367 case PPC::BI__builtin_altivec_vcntmbw: 4368 case PPC::BI__builtin_altivec_vcntmbd: 4369 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 4370 case PPC::BI__builtin_darn: 4371 case PPC::BI__builtin_darn_raw: 4372 case PPC::BI__builtin_darn_32: 4373 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4374 diag::err_ppc_builtin_only_on_arch, "9"); 4375 case PPC::BI__builtin_vsx_xxgenpcvbm: 4376 case PPC::BI__builtin_vsx_xxgenpcvhm: 4377 case PPC::BI__builtin_vsx_xxgenpcvwm: 4378 case PPC::BI__builtin_vsx_xxgenpcvdm: 4379 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3); 4380 case PPC::BI__builtin_ppc_compare_exp_uo: 4381 case PPC::BI__builtin_ppc_compare_exp_lt: 4382 case PPC::BI__builtin_ppc_compare_exp_gt: 4383 case PPC::BI__builtin_ppc_compare_exp_eq: 4384 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4385 diag::err_ppc_builtin_only_on_arch, "9") || 4386 SemaFeatureCheck(*this, TheCall, "vsx", 4387 diag::err_ppc_builtin_requires_vsx); 4388 case PPC::BI__builtin_ppc_test_data_class: { 4389 // Check if the first argument of the __builtin_ppc_test_data_class call is 4390 // valid. The argument must be 'float' or 'double' or '__float128'. 4391 QualType ArgType = TheCall->getArg(0)->getType(); 4392 if (ArgType != QualType(Context.FloatTy) && 4393 ArgType != QualType(Context.DoubleTy) && 4394 ArgType != QualType(Context.Float128Ty)) 4395 return Diag(TheCall->getBeginLoc(), 4396 diag::err_ppc_invalid_test_data_class_type); 4397 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4398 diag::err_ppc_builtin_only_on_arch, "9") || 4399 SemaFeatureCheck(*this, TheCall, "vsx", 4400 diag::err_ppc_builtin_requires_vsx) || 4401 SemaBuiltinConstantArgRange(TheCall, 1, 0, 127); 4402 } 4403 case PPC::BI__builtin_ppc_maxfe: 4404 case PPC::BI__builtin_ppc_minfe: 4405 case PPC::BI__builtin_ppc_maxfl: 4406 case PPC::BI__builtin_ppc_minfl: 4407 case PPC::BI__builtin_ppc_maxfs: 4408 case PPC::BI__builtin_ppc_minfs: { 4409 if (Context.getTargetInfo().getTriple().isOSAIX() && 4410 (BuiltinID == PPC::BI__builtin_ppc_maxfe || 4411 BuiltinID == PPC::BI__builtin_ppc_minfe)) 4412 return Diag(TheCall->getBeginLoc(), diag::err_target_unsupported_type) 4413 << "builtin" << true << 128 << QualType(Context.LongDoubleTy) 4414 << false << Context.getTargetInfo().getTriple().str(); 4415 // Argument type should be exact. 4416 QualType ArgType = QualType(Context.LongDoubleTy); 4417 if (BuiltinID == PPC::BI__builtin_ppc_maxfl || 4418 BuiltinID == PPC::BI__builtin_ppc_minfl) 4419 ArgType = QualType(Context.DoubleTy); 4420 else if (BuiltinID == PPC::BI__builtin_ppc_maxfs || 4421 BuiltinID == PPC::BI__builtin_ppc_minfs) 4422 ArgType = QualType(Context.FloatTy); 4423 for (unsigned I = 0, E = TheCall->getNumArgs(); I < E; ++I) 4424 if (TheCall->getArg(I)->getType() != ArgType) 4425 return Diag(TheCall->getBeginLoc(), 4426 diag::err_typecheck_convert_incompatible) 4427 << TheCall->getArg(I)->getType() << ArgType << 1 << 0 << 0; 4428 return false; 4429 } 4430 case PPC::BI__builtin_ppc_load8r: 4431 case PPC::BI__builtin_ppc_store8r: 4432 return SemaFeatureCheck(*this, TheCall, "isa-v206-instructions", 4433 diag::err_ppc_builtin_only_on_arch, "7"); 4434 #define CUSTOM_BUILTIN(Name, Intr, Types, Acc) \ 4435 case PPC::BI__builtin_##Name: \ 4436 return SemaBuiltinPPCMMACall(TheCall, BuiltinID, Types); 4437 #include "clang/Basic/BuiltinsPPC.def" 4438 } 4439 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 4440 } 4441 4442 // Check if the given type is a non-pointer PPC MMA type. This function is used 4443 // in Sema to prevent invalid uses of restricted PPC MMA types. 4444 bool Sema::CheckPPCMMAType(QualType Type, SourceLocation TypeLoc) { 4445 if (Type->isPointerType() || Type->isArrayType()) 4446 return false; 4447 4448 QualType CoreType = Type.getCanonicalType().getUnqualifiedType(); 4449 #define PPC_VECTOR_TYPE(Name, Id, Size) || CoreType == Context.Id##Ty 4450 if (false 4451 #include "clang/Basic/PPCTypes.def" 4452 ) { 4453 Diag(TypeLoc, diag::err_ppc_invalid_use_mma_type); 4454 return true; 4455 } 4456 return false; 4457 } 4458 4459 bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, 4460 CallExpr *TheCall) { 4461 // position of memory order and scope arguments in the builtin 4462 unsigned OrderIndex, ScopeIndex; 4463 switch (BuiltinID) { 4464 case AMDGPU::BI__builtin_amdgcn_atomic_inc32: 4465 case AMDGPU::BI__builtin_amdgcn_atomic_inc64: 4466 case AMDGPU::BI__builtin_amdgcn_atomic_dec32: 4467 case AMDGPU::BI__builtin_amdgcn_atomic_dec64: 4468 OrderIndex = 2; 4469 ScopeIndex = 3; 4470 break; 4471 case AMDGPU::BI__builtin_amdgcn_fence: 4472 OrderIndex = 0; 4473 ScopeIndex = 1; 4474 break; 4475 default: 4476 return false; 4477 } 4478 4479 ExprResult Arg = TheCall->getArg(OrderIndex); 4480 auto ArgExpr = Arg.get(); 4481 Expr::EvalResult ArgResult; 4482 4483 if (!ArgExpr->EvaluateAsInt(ArgResult, Context)) 4484 return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int) 4485 << ArgExpr->getType(); 4486 auto Ord = ArgResult.Val.getInt().getZExtValue(); 4487 4488 // Check validity of memory ordering as per C11 / C++11's memody model. 4489 // Only fence needs check. Atomic dec/inc allow all memory orders. 4490 if (!llvm::isValidAtomicOrderingCABI(Ord)) 4491 return Diag(ArgExpr->getBeginLoc(), 4492 diag::warn_atomic_op_has_invalid_memory_order) 4493 << ArgExpr->getSourceRange(); 4494 switch (static_cast<llvm::AtomicOrderingCABI>(Ord)) { 4495 case llvm::AtomicOrderingCABI::relaxed: 4496 case llvm::AtomicOrderingCABI::consume: 4497 if (BuiltinID == AMDGPU::BI__builtin_amdgcn_fence) 4498 return Diag(ArgExpr->getBeginLoc(), 4499 diag::warn_atomic_op_has_invalid_memory_order) 4500 << ArgExpr->getSourceRange(); 4501 break; 4502 case llvm::AtomicOrderingCABI::acquire: 4503 case llvm::AtomicOrderingCABI::release: 4504 case llvm::AtomicOrderingCABI::acq_rel: 4505 case llvm::AtomicOrderingCABI::seq_cst: 4506 break; 4507 } 4508 4509 Arg = TheCall->getArg(ScopeIndex); 4510 ArgExpr = Arg.get(); 4511 Expr::EvalResult ArgResult1; 4512 // Check that sync scope is a constant literal 4513 if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, Context)) 4514 return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal) 4515 << ArgExpr->getType(); 4516 4517 return false; 4518 } 4519 4520 bool Sema::CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum) { 4521 llvm::APSInt Result; 4522 4523 // We can't check the value of a dependent argument. 4524 Expr *Arg = TheCall->getArg(ArgNum); 4525 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4526 return false; 4527 4528 // Check constant-ness first. 4529 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4530 return true; 4531 4532 int64_t Val = Result.getSExtValue(); 4533 if ((Val >= 0 && Val <= 3) || (Val >= 5 && Val <= 7)) 4534 return false; 4535 4536 return Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_invalid_lmul) 4537 << Arg->getSourceRange(); 4538 } 4539 4540 bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, 4541 unsigned BuiltinID, 4542 CallExpr *TheCall) { 4543 // CodeGenFunction can also detect this, but this gives a better error 4544 // message. 4545 bool FeatureMissing = false; 4546 SmallVector<StringRef> ReqFeatures; 4547 StringRef Features = Context.BuiltinInfo.getRequiredFeatures(BuiltinID); 4548 Features.split(ReqFeatures, ','); 4549 4550 // Check if each required feature is included 4551 for (StringRef F : ReqFeatures) { 4552 SmallVector<StringRef> ReqOpFeatures; 4553 F.split(ReqOpFeatures, '|'); 4554 4555 if (llvm::none_of(ReqOpFeatures, 4556 [&TI](StringRef OF) { return TI.hasFeature(OF); })) { 4557 std::string FeatureStrs; 4558 bool IsExtension = true; 4559 for (StringRef OF : ReqOpFeatures) { 4560 // If the feature is 64bit, alter the string so it will print better in 4561 // the diagnostic. 4562 if (OF == "64bit") { 4563 assert(ReqOpFeatures.size() == 1 && "Expected '64bit' to be alone"); 4564 OF = "RV64"; 4565 IsExtension = false; 4566 } 4567 if (OF == "32bit") { 4568 assert(ReqOpFeatures.size() == 1 && "Expected '32bit' to be alone"); 4569 OF = "RV32"; 4570 IsExtension = false; 4571 } 4572 4573 // Convert features like "zbr" and "experimental-zbr" to "Zbr". 4574 OF.consume_front("experimental-"); 4575 std::string FeatureStr = OF.str(); 4576 FeatureStr[0] = std::toupper(FeatureStr[0]); 4577 // Combine strings. 4578 FeatureStrs += FeatureStrs == "" ? "" : ", "; 4579 FeatureStrs += "'"; 4580 FeatureStrs += FeatureStr; 4581 FeatureStrs += "'"; 4582 } 4583 // Error message 4584 FeatureMissing = true; 4585 Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_requires_extension) 4586 << IsExtension 4587 << TheCall->getSourceRange() << StringRef(FeatureStrs); 4588 } 4589 } 4590 4591 if (FeatureMissing) 4592 return true; 4593 4594 switch (BuiltinID) { 4595 case RISCVVector::BI__builtin_rvv_vsetvli: 4596 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3) || 4597 CheckRISCVLMUL(TheCall, 2); 4598 case RISCVVector::BI__builtin_rvv_vsetvlimax: 4599 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || 4600 CheckRISCVLMUL(TheCall, 1); 4601 case RISCVVector::BI__builtin_rvv_vget_v: { 4602 ASTContext::BuiltinVectorTypeInfo ResVecInfo = 4603 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4604 TheCall->getType().getCanonicalType().getTypePtr())); 4605 ASTContext::BuiltinVectorTypeInfo VecInfo = 4606 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4607 TheCall->getArg(0)->getType().getCanonicalType().getTypePtr())); 4608 unsigned MaxIndex = 4609 (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors) / 4610 (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors); 4611 return SemaBuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1); 4612 } 4613 case RISCVVector::BI__builtin_rvv_vset_v: { 4614 ASTContext::BuiltinVectorTypeInfo ResVecInfo = 4615 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4616 TheCall->getType().getCanonicalType().getTypePtr())); 4617 ASTContext::BuiltinVectorTypeInfo VecInfo = 4618 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4619 TheCall->getArg(2)->getType().getCanonicalType().getTypePtr())); 4620 unsigned MaxIndex = 4621 (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors) / 4622 (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors); 4623 return SemaBuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1); 4624 } 4625 // Check if byteselect is in [0, 3] 4626 case RISCV::BI__builtin_riscv_aes32dsi_32: 4627 case RISCV::BI__builtin_riscv_aes32dsmi_32: 4628 case RISCV::BI__builtin_riscv_aes32esi_32: 4629 case RISCV::BI__builtin_riscv_aes32esmi_32: 4630 case RISCV::BI__builtin_riscv_sm4ks: 4631 case RISCV::BI__builtin_riscv_sm4ed: 4632 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 4633 // Check if rnum is in [0, 10] 4634 case RISCV::BI__builtin_riscv_aes64ks1i_64: 4635 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 10); 4636 } 4637 4638 return false; 4639 } 4640 4641 bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, 4642 CallExpr *TheCall) { 4643 if (BuiltinID == SystemZ::BI__builtin_tabort) { 4644 Expr *Arg = TheCall->getArg(0); 4645 if (std::optional<llvm::APSInt> AbortCode = 4646 Arg->getIntegerConstantExpr(Context)) 4647 if (AbortCode->getSExtValue() >= 0 && AbortCode->getSExtValue() < 256) 4648 return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code) 4649 << Arg->getSourceRange(); 4650 } 4651 4652 // For intrinsics which take an immediate value as part of the instruction, 4653 // range check them here. 4654 unsigned i = 0, l = 0, u = 0; 4655 switch (BuiltinID) { 4656 default: return false; 4657 case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break; 4658 case SystemZ::BI__builtin_s390_verimb: 4659 case SystemZ::BI__builtin_s390_verimh: 4660 case SystemZ::BI__builtin_s390_verimf: 4661 case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break; 4662 case SystemZ::BI__builtin_s390_vfaeb: 4663 case SystemZ::BI__builtin_s390_vfaeh: 4664 case SystemZ::BI__builtin_s390_vfaef: 4665 case SystemZ::BI__builtin_s390_vfaebs: 4666 case SystemZ::BI__builtin_s390_vfaehs: 4667 case SystemZ::BI__builtin_s390_vfaefs: 4668 case SystemZ::BI__builtin_s390_vfaezb: 4669 case SystemZ::BI__builtin_s390_vfaezh: 4670 case SystemZ::BI__builtin_s390_vfaezf: 4671 case SystemZ::BI__builtin_s390_vfaezbs: 4672 case SystemZ::BI__builtin_s390_vfaezhs: 4673 case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break; 4674 case SystemZ::BI__builtin_s390_vfisb: 4675 case SystemZ::BI__builtin_s390_vfidb: 4676 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) || 4677 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 4678 case SystemZ::BI__builtin_s390_vftcisb: 4679 case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break; 4680 case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break; 4681 case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break; 4682 case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break; 4683 case SystemZ::BI__builtin_s390_vstrcb: 4684 case SystemZ::BI__builtin_s390_vstrch: 4685 case SystemZ::BI__builtin_s390_vstrcf: 4686 case SystemZ::BI__builtin_s390_vstrczb: 4687 case SystemZ::BI__builtin_s390_vstrczh: 4688 case SystemZ::BI__builtin_s390_vstrczf: 4689 case SystemZ::BI__builtin_s390_vstrcbs: 4690 case SystemZ::BI__builtin_s390_vstrchs: 4691 case SystemZ::BI__builtin_s390_vstrcfs: 4692 case SystemZ::BI__builtin_s390_vstrczbs: 4693 case SystemZ::BI__builtin_s390_vstrczhs: 4694 case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break; 4695 case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break; 4696 case SystemZ::BI__builtin_s390_vfminsb: 4697 case SystemZ::BI__builtin_s390_vfmaxsb: 4698 case SystemZ::BI__builtin_s390_vfmindb: 4699 case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break; 4700 case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break; 4701 case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break; 4702 case SystemZ::BI__builtin_s390_vclfnhs: 4703 case SystemZ::BI__builtin_s390_vclfnls: 4704 case SystemZ::BI__builtin_s390_vcfn: 4705 case SystemZ::BI__builtin_s390_vcnf: i = 1; l = 0; u = 15; break; 4706 case SystemZ::BI__builtin_s390_vcrnfs: i = 2; l = 0; u = 15; break; 4707 } 4708 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 4709 } 4710 4711 /// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *). 4712 /// This checks that the target supports __builtin_cpu_supports and 4713 /// that the string argument is constant and valid. 4714 static bool SemaBuiltinCpuSupports(Sema &S, const TargetInfo &TI, 4715 CallExpr *TheCall) { 4716 Expr *Arg = TheCall->getArg(0); 4717 4718 // Check if the argument is a string literal. 4719 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 4720 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 4721 << Arg->getSourceRange(); 4722 4723 // Check the contents of the string. 4724 StringRef Feature = 4725 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 4726 if (!TI.validateCpuSupports(Feature)) 4727 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports) 4728 << Arg->getSourceRange(); 4729 return false; 4730 } 4731 4732 /// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *). 4733 /// This checks that the target supports __builtin_cpu_is and 4734 /// that the string argument is constant and valid. 4735 static bool SemaBuiltinCpuIs(Sema &S, const TargetInfo &TI, CallExpr *TheCall) { 4736 Expr *Arg = TheCall->getArg(0); 4737 4738 // Check if the argument is a string literal. 4739 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 4740 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 4741 << Arg->getSourceRange(); 4742 4743 // Check the contents of the string. 4744 StringRef Feature = 4745 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 4746 if (!TI.validateCpuIs(Feature)) 4747 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is) 4748 << Arg->getSourceRange(); 4749 return false; 4750 } 4751 4752 // Check if the rounding mode is legal. 4753 bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) { 4754 // Indicates if this instruction has rounding control or just SAE. 4755 bool HasRC = false; 4756 4757 unsigned ArgNum = 0; 4758 switch (BuiltinID) { 4759 default: 4760 return false; 4761 case X86::BI__builtin_ia32_vcvttsd2si32: 4762 case X86::BI__builtin_ia32_vcvttsd2si64: 4763 case X86::BI__builtin_ia32_vcvttsd2usi32: 4764 case X86::BI__builtin_ia32_vcvttsd2usi64: 4765 case X86::BI__builtin_ia32_vcvttss2si32: 4766 case X86::BI__builtin_ia32_vcvttss2si64: 4767 case X86::BI__builtin_ia32_vcvttss2usi32: 4768 case X86::BI__builtin_ia32_vcvttss2usi64: 4769 case X86::BI__builtin_ia32_vcvttsh2si32: 4770 case X86::BI__builtin_ia32_vcvttsh2si64: 4771 case X86::BI__builtin_ia32_vcvttsh2usi32: 4772 case X86::BI__builtin_ia32_vcvttsh2usi64: 4773 ArgNum = 1; 4774 break; 4775 case X86::BI__builtin_ia32_maxpd512: 4776 case X86::BI__builtin_ia32_maxps512: 4777 case X86::BI__builtin_ia32_minpd512: 4778 case X86::BI__builtin_ia32_minps512: 4779 case X86::BI__builtin_ia32_maxph512: 4780 case X86::BI__builtin_ia32_minph512: 4781 ArgNum = 2; 4782 break; 4783 case X86::BI__builtin_ia32_vcvtph2pd512_mask: 4784 case X86::BI__builtin_ia32_vcvtph2psx512_mask: 4785 case X86::BI__builtin_ia32_cvtps2pd512_mask: 4786 case X86::BI__builtin_ia32_cvttpd2dq512_mask: 4787 case X86::BI__builtin_ia32_cvttpd2qq512_mask: 4788 case X86::BI__builtin_ia32_cvttpd2udq512_mask: 4789 case X86::BI__builtin_ia32_cvttpd2uqq512_mask: 4790 case X86::BI__builtin_ia32_cvttps2dq512_mask: 4791 case X86::BI__builtin_ia32_cvttps2qq512_mask: 4792 case X86::BI__builtin_ia32_cvttps2udq512_mask: 4793 case X86::BI__builtin_ia32_cvttps2uqq512_mask: 4794 case X86::BI__builtin_ia32_vcvttph2w512_mask: 4795 case X86::BI__builtin_ia32_vcvttph2uw512_mask: 4796 case X86::BI__builtin_ia32_vcvttph2dq512_mask: 4797 case X86::BI__builtin_ia32_vcvttph2udq512_mask: 4798 case X86::BI__builtin_ia32_vcvttph2qq512_mask: 4799 case X86::BI__builtin_ia32_vcvttph2uqq512_mask: 4800 case X86::BI__builtin_ia32_exp2pd_mask: 4801 case X86::BI__builtin_ia32_exp2ps_mask: 4802 case X86::BI__builtin_ia32_getexppd512_mask: 4803 case X86::BI__builtin_ia32_getexpps512_mask: 4804 case X86::BI__builtin_ia32_getexpph512_mask: 4805 case X86::BI__builtin_ia32_rcp28pd_mask: 4806 case X86::BI__builtin_ia32_rcp28ps_mask: 4807 case X86::BI__builtin_ia32_rsqrt28pd_mask: 4808 case X86::BI__builtin_ia32_rsqrt28ps_mask: 4809 case X86::BI__builtin_ia32_vcomisd: 4810 case X86::BI__builtin_ia32_vcomiss: 4811 case X86::BI__builtin_ia32_vcomish: 4812 case X86::BI__builtin_ia32_vcvtph2ps512_mask: 4813 ArgNum = 3; 4814 break; 4815 case X86::BI__builtin_ia32_cmppd512_mask: 4816 case X86::BI__builtin_ia32_cmpps512_mask: 4817 case X86::BI__builtin_ia32_cmpsd_mask: 4818 case X86::BI__builtin_ia32_cmpss_mask: 4819 case X86::BI__builtin_ia32_cmpsh_mask: 4820 case X86::BI__builtin_ia32_vcvtsh2sd_round_mask: 4821 case X86::BI__builtin_ia32_vcvtsh2ss_round_mask: 4822 case X86::BI__builtin_ia32_cvtss2sd_round_mask: 4823 case X86::BI__builtin_ia32_getexpsd128_round_mask: 4824 case X86::BI__builtin_ia32_getexpss128_round_mask: 4825 case X86::BI__builtin_ia32_getexpsh128_round_mask: 4826 case X86::BI__builtin_ia32_getmantpd512_mask: 4827 case X86::BI__builtin_ia32_getmantps512_mask: 4828 case X86::BI__builtin_ia32_getmantph512_mask: 4829 case X86::BI__builtin_ia32_maxsd_round_mask: 4830 case X86::BI__builtin_ia32_maxss_round_mask: 4831 case X86::BI__builtin_ia32_maxsh_round_mask: 4832 case X86::BI__builtin_ia32_minsd_round_mask: 4833 case X86::BI__builtin_ia32_minss_round_mask: 4834 case X86::BI__builtin_ia32_minsh_round_mask: 4835 case X86::BI__builtin_ia32_rcp28sd_round_mask: 4836 case X86::BI__builtin_ia32_rcp28ss_round_mask: 4837 case X86::BI__builtin_ia32_reducepd512_mask: 4838 case X86::BI__builtin_ia32_reduceps512_mask: 4839 case X86::BI__builtin_ia32_reduceph512_mask: 4840 case X86::BI__builtin_ia32_rndscalepd_mask: 4841 case X86::BI__builtin_ia32_rndscaleps_mask: 4842 case X86::BI__builtin_ia32_rndscaleph_mask: 4843 case X86::BI__builtin_ia32_rsqrt28sd_round_mask: 4844 case X86::BI__builtin_ia32_rsqrt28ss_round_mask: 4845 ArgNum = 4; 4846 break; 4847 case X86::BI__builtin_ia32_fixupimmpd512_mask: 4848 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 4849 case X86::BI__builtin_ia32_fixupimmps512_mask: 4850 case X86::BI__builtin_ia32_fixupimmps512_maskz: 4851 case X86::BI__builtin_ia32_fixupimmsd_mask: 4852 case X86::BI__builtin_ia32_fixupimmsd_maskz: 4853 case X86::BI__builtin_ia32_fixupimmss_mask: 4854 case X86::BI__builtin_ia32_fixupimmss_maskz: 4855 case X86::BI__builtin_ia32_getmantsd_round_mask: 4856 case X86::BI__builtin_ia32_getmantss_round_mask: 4857 case X86::BI__builtin_ia32_getmantsh_round_mask: 4858 case X86::BI__builtin_ia32_rangepd512_mask: 4859 case X86::BI__builtin_ia32_rangeps512_mask: 4860 case X86::BI__builtin_ia32_rangesd128_round_mask: 4861 case X86::BI__builtin_ia32_rangess128_round_mask: 4862 case X86::BI__builtin_ia32_reducesd_mask: 4863 case X86::BI__builtin_ia32_reducess_mask: 4864 case X86::BI__builtin_ia32_reducesh_mask: 4865 case X86::BI__builtin_ia32_rndscalesd_round_mask: 4866 case X86::BI__builtin_ia32_rndscaless_round_mask: 4867 case X86::BI__builtin_ia32_rndscalesh_round_mask: 4868 ArgNum = 5; 4869 break; 4870 case X86::BI__builtin_ia32_vcvtsd2si64: 4871 case X86::BI__builtin_ia32_vcvtsd2si32: 4872 case X86::BI__builtin_ia32_vcvtsd2usi32: 4873 case X86::BI__builtin_ia32_vcvtsd2usi64: 4874 case X86::BI__builtin_ia32_vcvtss2si32: 4875 case X86::BI__builtin_ia32_vcvtss2si64: 4876 case X86::BI__builtin_ia32_vcvtss2usi32: 4877 case X86::BI__builtin_ia32_vcvtss2usi64: 4878 case X86::BI__builtin_ia32_vcvtsh2si32: 4879 case X86::BI__builtin_ia32_vcvtsh2si64: 4880 case X86::BI__builtin_ia32_vcvtsh2usi32: 4881 case X86::BI__builtin_ia32_vcvtsh2usi64: 4882 case X86::BI__builtin_ia32_sqrtpd512: 4883 case X86::BI__builtin_ia32_sqrtps512: 4884 case X86::BI__builtin_ia32_sqrtph512: 4885 ArgNum = 1; 4886 HasRC = true; 4887 break; 4888 case X86::BI__builtin_ia32_addph512: 4889 case X86::BI__builtin_ia32_divph512: 4890 case X86::BI__builtin_ia32_mulph512: 4891 case X86::BI__builtin_ia32_subph512: 4892 case X86::BI__builtin_ia32_addpd512: 4893 case X86::BI__builtin_ia32_addps512: 4894 case X86::BI__builtin_ia32_divpd512: 4895 case X86::BI__builtin_ia32_divps512: 4896 case X86::BI__builtin_ia32_mulpd512: 4897 case X86::BI__builtin_ia32_mulps512: 4898 case X86::BI__builtin_ia32_subpd512: 4899 case X86::BI__builtin_ia32_subps512: 4900 case X86::BI__builtin_ia32_cvtsi2sd64: 4901 case X86::BI__builtin_ia32_cvtsi2ss32: 4902 case X86::BI__builtin_ia32_cvtsi2ss64: 4903 case X86::BI__builtin_ia32_cvtusi2sd64: 4904 case X86::BI__builtin_ia32_cvtusi2ss32: 4905 case X86::BI__builtin_ia32_cvtusi2ss64: 4906 case X86::BI__builtin_ia32_vcvtusi2sh: 4907 case X86::BI__builtin_ia32_vcvtusi642sh: 4908 case X86::BI__builtin_ia32_vcvtsi2sh: 4909 case X86::BI__builtin_ia32_vcvtsi642sh: 4910 ArgNum = 2; 4911 HasRC = true; 4912 break; 4913 case X86::BI__builtin_ia32_cvtdq2ps512_mask: 4914 case X86::BI__builtin_ia32_cvtudq2ps512_mask: 4915 case X86::BI__builtin_ia32_vcvtpd2ph512_mask: 4916 case X86::BI__builtin_ia32_vcvtps2phx512_mask: 4917 case X86::BI__builtin_ia32_cvtpd2ps512_mask: 4918 case X86::BI__builtin_ia32_cvtpd2dq512_mask: 4919 case X86::BI__builtin_ia32_cvtpd2qq512_mask: 4920 case X86::BI__builtin_ia32_cvtpd2udq512_mask: 4921 case X86::BI__builtin_ia32_cvtpd2uqq512_mask: 4922 case X86::BI__builtin_ia32_cvtps2dq512_mask: 4923 case X86::BI__builtin_ia32_cvtps2qq512_mask: 4924 case X86::BI__builtin_ia32_cvtps2udq512_mask: 4925 case X86::BI__builtin_ia32_cvtps2uqq512_mask: 4926 case X86::BI__builtin_ia32_cvtqq2pd512_mask: 4927 case X86::BI__builtin_ia32_cvtqq2ps512_mask: 4928 case X86::BI__builtin_ia32_cvtuqq2pd512_mask: 4929 case X86::BI__builtin_ia32_cvtuqq2ps512_mask: 4930 case X86::BI__builtin_ia32_vcvtdq2ph512_mask: 4931 case X86::BI__builtin_ia32_vcvtudq2ph512_mask: 4932 case X86::BI__builtin_ia32_vcvtw2ph512_mask: 4933 case X86::BI__builtin_ia32_vcvtuw2ph512_mask: 4934 case X86::BI__builtin_ia32_vcvtph2w512_mask: 4935 case X86::BI__builtin_ia32_vcvtph2uw512_mask: 4936 case X86::BI__builtin_ia32_vcvtph2dq512_mask: 4937 case X86::BI__builtin_ia32_vcvtph2udq512_mask: 4938 case X86::BI__builtin_ia32_vcvtph2qq512_mask: 4939 case X86::BI__builtin_ia32_vcvtph2uqq512_mask: 4940 case X86::BI__builtin_ia32_vcvtqq2ph512_mask: 4941 case X86::BI__builtin_ia32_vcvtuqq2ph512_mask: 4942 ArgNum = 3; 4943 HasRC = true; 4944 break; 4945 case X86::BI__builtin_ia32_addsh_round_mask: 4946 case X86::BI__builtin_ia32_addss_round_mask: 4947 case X86::BI__builtin_ia32_addsd_round_mask: 4948 case X86::BI__builtin_ia32_divsh_round_mask: 4949 case X86::BI__builtin_ia32_divss_round_mask: 4950 case X86::BI__builtin_ia32_divsd_round_mask: 4951 case X86::BI__builtin_ia32_mulsh_round_mask: 4952 case X86::BI__builtin_ia32_mulss_round_mask: 4953 case X86::BI__builtin_ia32_mulsd_round_mask: 4954 case X86::BI__builtin_ia32_subsh_round_mask: 4955 case X86::BI__builtin_ia32_subss_round_mask: 4956 case X86::BI__builtin_ia32_subsd_round_mask: 4957 case X86::BI__builtin_ia32_scalefph512_mask: 4958 case X86::BI__builtin_ia32_scalefpd512_mask: 4959 case X86::BI__builtin_ia32_scalefps512_mask: 4960 case X86::BI__builtin_ia32_scalefsd_round_mask: 4961 case X86::BI__builtin_ia32_scalefss_round_mask: 4962 case X86::BI__builtin_ia32_scalefsh_round_mask: 4963 case X86::BI__builtin_ia32_cvtsd2ss_round_mask: 4964 case X86::BI__builtin_ia32_vcvtss2sh_round_mask: 4965 case X86::BI__builtin_ia32_vcvtsd2sh_round_mask: 4966 case X86::BI__builtin_ia32_sqrtsd_round_mask: 4967 case X86::BI__builtin_ia32_sqrtss_round_mask: 4968 case X86::BI__builtin_ia32_sqrtsh_round_mask: 4969 case X86::BI__builtin_ia32_vfmaddsd3_mask: 4970 case X86::BI__builtin_ia32_vfmaddsd3_maskz: 4971 case X86::BI__builtin_ia32_vfmaddsd3_mask3: 4972 case X86::BI__builtin_ia32_vfmaddss3_mask: 4973 case X86::BI__builtin_ia32_vfmaddss3_maskz: 4974 case X86::BI__builtin_ia32_vfmaddss3_mask3: 4975 case X86::BI__builtin_ia32_vfmaddsh3_mask: 4976 case X86::BI__builtin_ia32_vfmaddsh3_maskz: 4977 case X86::BI__builtin_ia32_vfmaddsh3_mask3: 4978 case X86::BI__builtin_ia32_vfmaddpd512_mask: 4979 case X86::BI__builtin_ia32_vfmaddpd512_maskz: 4980 case X86::BI__builtin_ia32_vfmaddpd512_mask3: 4981 case X86::BI__builtin_ia32_vfmsubpd512_mask3: 4982 case X86::BI__builtin_ia32_vfmaddps512_mask: 4983 case X86::BI__builtin_ia32_vfmaddps512_maskz: 4984 case X86::BI__builtin_ia32_vfmaddps512_mask3: 4985 case X86::BI__builtin_ia32_vfmsubps512_mask3: 4986 case X86::BI__builtin_ia32_vfmaddph512_mask: 4987 case X86::BI__builtin_ia32_vfmaddph512_maskz: 4988 case X86::BI__builtin_ia32_vfmaddph512_mask3: 4989 case X86::BI__builtin_ia32_vfmsubph512_mask3: 4990 case X86::BI__builtin_ia32_vfmaddsubpd512_mask: 4991 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz: 4992 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3: 4993 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3: 4994 case X86::BI__builtin_ia32_vfmaddsubps512_mask: 4995 case X86::BI__builtin_ia32_vfmaddsubps512_maskz: 4996 case X86::BI__builtin_ia32_vfmaddsubps512_mask3: 4997 case X86::BI__builtin_ia32_vfmsubaddps512_mask3: 4998 case X86::BI__builtin_ia32_vfmaddsubph512_mask: 4999 case X86::BI__builtin_ia32_vfmaddsubph512_maskz: 5000 case X86::BI__builtin_ia32_vfmaddsubph512_mask3: 5001 case X86::BI__builtin_ia32_vfmsubaddph512_mask3: 5002 case X86::BI__builtin_ia32_vfmaddcsh_mask: 5003 case X86::BI__builtin_ia32_vfmaddcsh_round_mask: 5004 case X86::BI__builtin_ia32_vfmaddcsh_round_mask3: 5005 case X86::BI__builtin_ia32_vfmaddcph512_mask: 5006 case X86::BI__builtin_ia32_vfmaddcph512_maskz: 5007 case X86::BI__builtin_ia32_vfmaddcph512_mask3: 5008 case X86::BI__builtin_ia32_vfcmaddcsh_mask: 5009 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask: 5010 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3: 5011 case X86::BI__builtin_ia32_vfcmaddcph512_mask: 5012 case X86::BI__builtin_ia32_vfcmaddcph512_maskz: 5013 case X86::BI__builtin_ia32_vfcmaddcph512_mask3: 5014 case X86::BI__builtin_ia32_vfmulcsh_mask: 5015 case X86::BI__builtin_ia32_vfmulcph512_mask: 5016 case X86::BI__builtin_ia32_vfcmulcsh_mask: 5017 case X86::BI__builtin_ia32_vfcmulcph512_mask: 5018 ArgNum = 4; 5019 HasRC = true; 5020 break; 5021 } 5022 5023 llvm::APSInt Result; 5024 5025 // We can't check the value of a dependent argument. 5026 Expr *Arg = TheCall->getArg(ArgNum); 5027 if (Arg->isTypeDependent() || Arg->isValueDependent()) 5028 return false; 5029 5030 // Check constant-ness first. 5031 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 5032 return true; 5033 5034 // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit 5035 // is set. If the intrinsic has rounding control(bits 1:0), make sure its only 5036 // combined with ROUND_NO_EXC. If the intrinsic does not have rounding 5037 // control, allow ROUND_NO_EXC and ROUND_CUR_DIRECTION together. 5038 if (Result == 4/*ROUND_CUR_DIRECTION*/ || 5039 Result == 8/*ROUND_NO_EXC*/ || 5040 (!HasRC && Result == 12/*ROUND_CUR_DIRECTION|ROUND_NO_EXC*/) || 5041 (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11)) 5042 return false; 5043 5044 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding) 5045 << Arg->getSourceRange(); 5046 } 5047 5048 // Check if the gather/scatter scale is legal. 5049 bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, 5050 CallExpr *TheCall) { 5051 unsigned ArgNum = 0; 5052 switch (BuiltinID) { 5053 default: 5054 return false; 5055 case X86::BI__builtin_ia32_gatherpfdpd: 5056 case X86::BI__builtin_ia32_gatherpfdps: 5057 case X86::BI__builtin_ia32_gatherpfqpd: 5058 case X86::BI__builtin_ia32_gatherpfqps: 5059 case X86::BI__builtin_ia32_scatterpfdpd: 5060 case X86::BI__builtin_ia32_scatterpfdps: 5061 case X86::BI__builtin_ia32_scatterpfqpd: 5062 case X86::BI__builtin_ia32_scatterpfqps: 5063 ArgNum = 3; 5064 break; 5065 case X86::BI__builtin_ia32_gatherd_pd: 5066 case X86::BI__builtin_ia32_gatherd_pd256: 5067 case X86::BI__builtin_ia32_gatherq_pd: 5068 case X86::BI__builtin_ia32_gatherq_pd256: 5069 case X86::BI__builtin_ia32_gatherd_ps: 5070 case X86::BI__builtin_ia32_gatherd_ps256: 5071 case X86::BI__builtin_ia32_gatherq_ps: 5072 case X86::BI__builtin_ia32_gatherq_ps256: 5073 case X86::BI__builtin_ia32_gatherd_q: 5074 case X86::BI__builtin_ia32_gatherd_q256: 5075 case X86::BI__builtin_ia32_gatherq_q: 5076 case X86::BI__builtin_ia32_gatherq_q256: 5077 case X86::BI__builtin_ia32_gatherd_d: 5078 case X86::BI__builtin_ia32_gatherd_d256: 5079 case X86::BI__builtin_ia32_gatherq_d: 5080 case X86::BI__builtin_ia32_gatherq_d256: 5081 case X86::BI__builtin_ia32_gather3div2df: 5082 case X86::BI__builtin_ia32_gather3div2di: 5083 case X86::BI__builtin_ia32_gather3div4df: 5084 case X86::BI__builtin_ia32_gather3div4di: 5085 case X86::BI__builtin_ia32_gather3div4sf: 5086 case X86::BI__builtin_ia32_gather3div4si: 5087 case X86::BI__builtin_ia32_gather3div8sf: 5088 case X86::BI__builtin_ia32_gather3div8si: 5089 case X86::BI__builtin_ia32_gather3siv2df: 5090 case X86::BI__builtin_ia32_gather3siv2di: 5091 case X86::BI__builtin_ia32_gather3siv4df: 5092 case X86::BI__builtin_ia32_gather3siv4di: 5093 case X86::BI__builtin_ia32_gather3siv4sf: 5094 case X86::BI__builtin_ia32_gather3siv4si: 5095 case X86::BI__builtin_ia32_gather3siv8sf: 5096 case X86::BI__builtin_ia32_gather3siv8si: 5097 case X86::BI__builtin_ia32_gathersiv8df: 5098 case X86::BI__builtin_ia32_gathersiv16sf: 5099 case X86::BI__builtin_ia32_gatherdiv8df: 5100 case X86::BI__builtin_ia32_gatherdiv16sf: 5101 case X86::BI__builtin_ia32_gathersiv8di: 5102 case X86::BI__builtin_ia32_gathersiv16si: 5103 case X86::BI__builtin_ia32_gatherdiv8di: 5104 case X86::BI__builtin_ia32_gatherdiv16si: 5105 case X86::BI__builtin_ia32_scatterdiv2df: 5106 case X86::BI__builtin_ia32_scatterdiv2di: 5107 case X86::BI__builtin_ia32_scatterdiv4df: 5108 case X86::BI__builtin_ia32_scatterdiv4di: 5109 case X86::BI__builtin_ia32_scatterdiv4sf: 5110 case X86::BI__builtin_ia32_scatterdiv4si: 5111 case X86::BI__builtin_ia32_scatterdiv8sf: 5112 case X86::BI__builtin_ia32_scatterdiv8si: 5113 case X86::BI__builtin_ia32_scattersiv2df: 5114 case X86::BI__builtin_ia32_scattersiv2di: 5115 case X86::BI__builtin_ia32_scattersiv4df: 5116 case X86::BI__builtin_ia32_scattersiv4di: 5117 case X86::BI__builtin_ia32_scattersiv4sf: 5118 case X86::BI__builtin_ia32_scattersiv4si: 5119 case X86::BI__builtin_ia32_scattersiv8sf: 5120 case X86::BI__builtin_ia32_scattersiv8si: 5121 case X86::BI__builtin_ia32_scattersiv8df: 5122 case X86::BI__builtin_ia32_scattersiv16sf: 5123 case X86::BI__builtin_ia32_scatterdiv8df: 5124 case X86::BI__builtin_ia32_scatterdiv16sf: 5125 case X86::BI__builtin_ia32_scattersiv8di: 5126 case X86::BI__builtin_ia32_scattersiv16si: 5127 case X86::BI__builtin_ia32_scatterdiv8di: 5128 case X86::BI__builtin_ia32_scatterdiv16si: 5129 ArgNum = 4; 5130 break; 5131 } 5132 5133 llvm::APSInt Result; 5134 5135 // We can't check the value of a dependent argument. 5136 Expr *Arg = TheCall->getArg(ArgNum); 5137 if (Arg->isTypeDependent() || Arg->isValueDependent()) 5138 return false; 5139 5140 // Check constant-ness first. 5141 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 5142 return true; 5143 5144 if (Result == 1 || Result == 2 || Result == 4 || Result == 8) 5145 return false; 5146 5147 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale) 5148 << Arg->getSourceRange(); 5149 } 5150 5151 enum { TileRegLow = 0, TileRegHigh = 7 }; 5152 5153 bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, 5154 ArrayRef<int> ArgNums) { 5155 for (int ArgNum : ArgNums) { 5156 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, TileRegLow, TileRegHigh)) 5157 return true; 5158 } 5159 return false; 5160 } 5161 5162 bool Sema::CheckX86BuiltinTileDuplicate(CallExpr *TheCall, 5163 ArrayRef<int> ArgNums) { 5164 // Because the max number of tile register is TileRegHigh + 1, so here we use 5165 // each bit to represent the usage of them in bitset. 5166 std::bitset<TileRegHigh + 1> ArgValues; 5167 for (int ArgNum : ArgNums) { 5168 Expr *Arg = TheCall->getArg(ArgNum); 5169 if (Arg->isTypeDependent() || Arg->isValueDependent()) 5170 continue; 5171 5172 llvm::APSInt Result; 5173 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 5174 return true; 5175 int ArgExtValue = Result.getExtValue(); 5176 assert((ArgExtValue >= TileRegLow || ArgExtValue <= TileRegHigh) && 5177 "Incorrect tile register num."); 5178 if (ArgValues.test(ArgExtValue)) 5179 return Diag(TheCall->getBeginLoc(), 5180 diag::err_x86_builtin_tile_arg_duplicate) 5181 << TheCall->getArg(ArgNum)->getSourceRange(); 5182 ArgValues.set(ArgExtValue); 5183 } 5184 return false; 5185 } 5186 5187 bool Sema::CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, 5188 ArrayRef<int> ArgNums) { 5189 return CheckX86BuiltinTileArgumentsRange(TheCall, ArgNums) || 5190 CheckX86BuiltinTileDuplicate(TheCall, ArgNums); 5191 } 5192 5193 bool Sema::CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall) { 5194 switch (BuiltinID) { 5195 default: 5196 return false; 5197 case X86::BI__builtin_ia32_tileloadd64: 5198 case X86::BI__builtin_ia32_tileloaddt164: 5199 case X86::BI__builtin_ia32_tilestored64: 5200 case X86::BI__builtin_ia32_tilezero: 5201 return CheckX86BuiltinTileArgumentsRange(TheCall, 0); 5202 case X86::BI__builtin_ia32_tdpbssd: 5203 case X86::BI__builtin_ia32_tdpbsud: 5204 case X86::BI__builtin_ia32_tdpbusd: 5205 case X86::BI__builtin_ia32_tdpbuud: 5206 case X86::BI__builtin_ia32_tdpbf16ps: 5207 case X86::BI__builtin_ia32_tdpfp16ps: 5208 return CheckX86BuiltinTileRangeAndDuplicate(TheCall, {0, 1, 2}); 5209 } 5210 } 5211 static bool isX86_32Builtin(unsigned BuiltinID) { 5212 // These builtins only work on x86-32 targets. 5213 switch (BuiltinID) { 5214 case X86::BI__builtin_ia32_readeflags_u32: 5215 case X86::BI__builtin_ia32_writeeflags_u32: 5216 return true; 5217 } 5218 5219 return false; 5220 } 5221 5222 bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 5223 CallExpr *TheCall) { 5224 if (BuiltinID == X86::BI__builtin_cpu_supports) 5225 return SemaBuiltinCpuSupports(*this, TI, TheCall); 5226 5227 if (BuiltinID == X86::BI__builtin_cpu_is) 5228 return SemaBuiltinCpuIs(*this, TI, TheCall); 5229 5230 // Check for 32-bit only builtins on a 64-bit target. 5231 const llvm::Triple &TT = TI.getTriple(); 5232 if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID)) 5233 return Diag(TheCall->getCallee()->getBeginLoc(), 5234 diag::err_32_bit_builtin_64_bit_tgt); 5235 5236 // If the intrinsic has rounding or SAE make sure its valid. 5237 if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall)) 5238 return true; 5239 5240 // If the intrinsic has a gather/scatter scale immediate make sure its valid. 5241 if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall)) 5242 return true; 5243 5244 // If the intrinsic has a tile arguments, make sure they are valid. 5245 if (CheckX86BuiltinTileArguments(BuiltinID, TheCall)) 5246 return true; 5247 5248 // For intrinsics which take an immediate value as part of the instruction, 5249 // range check them here. 5250 int i = 0, l = 0, u = 0; 5251 switch (BuiltinID) { 5252 default: 5253 return false; 5254 case X86::BI__builtin_ia32_vec_ext_v2si: 5255 case X86::BI__builtin_ia32_vec_ext_v2di: 5256 case X86::BI__builtin_ia32_vextractf128_pd256: 5257 case X86::BI__builtin_ia32_vextractf128_ps256: 5258 case X86::BI__builtin_ia32_vextractf128_si256: 5259 case X86::BI__builtin_ia32_extract128i256: 5260 case X86::BI__builtin_ia32_extractf64x4_mask: 5261 case X86::BI__builtin_ia32_extracti64x4_mask: 5262 case X86::BI__builtin_ia32_extractf32x8_mask: 5263 case X86::BI__builtin_ia32_extracti32x8_mask: 5264 case X86::BI__builtin_ia32_extractf64x2_256_mask: 5265 case X86::BI__builtin_ia32_extracti64x2_256_mask: 5266 case X86::BI__builtin_ia32_extractf32x4_256_mask: 5267 case X86::BI__builtin_ia32_extracti32x4_256_mask: 5268 i = 1; l = 0; u = 1; 5269 break; 5270 case X86::BI__builtin_ia32_vec_set_v2di: 5271 case X86::BI__builtin_ia32_vinsertf128_pd256: 5272 case X86::BI__builtin_ia32_vinsertf128_ps256: 5273 case X86::BI__builtin_ia32_vinsertf128_si256: 5274 case X86::BI__builtin_ia32_insert128i256: 5275 case X86::BI__builtin_ia32_insertf32x8: 5276 case X86::BI__builtin_ia32_inserti32x8: 5277 case X86::BI__builtin_ia32_insertf64x4: 5278 case X86::BI__builtin_ia32_inserti64x4: 5279 case X86::BI__builtin_ia32_insertf64x2_256: 5280 case X86::BI__builtin_ia32_inserti64x2_256: 5281 case X86::BI__builtin_ia32_insertf32x4_256: 5282 case X86::BI__builtin_ia32_inserti32x4_256: 5283 i = 2; l = 0; u = 1; 5284 break; 5285 case X86::BI__builtin_ia32_vpermilpd: 5286 case X86::BI__builtin_ia32_vec_ext_v4hi: 5287 case X86::BI__builtin_ia32_vec_ext_v4si: 5288 case X86::BI__builtin_ia32_vec_ext_v4sf: 5289 case X86::BI__builtin_ia32_vec_ext_v4di: 5290 case X86::BI__builtin_ia32_extractf32x4_mask: 5291 case X86::BI__builtin_ia32_extracti32x4_mask: 5292 case X86::BI__builtin_ia32_extractf64x2_512_mask: 5293 case X86::BI__builtin_ia32_extracti64x2_512_mask: 5294 i = 1; l = 0; u = 3; 5295 break; 5296 case X86::BI_mm_prefetch: 5297 case X86::BI__builtin_ia32_vec_ext_v8hi: 5298 case X86::BI__builtin_ia32_vec_ext_v8si: 5299 i = 1; l = 0; u = 7; 5300 break; 5301 case X86::BI__builtin_ia32_sha1rnds4: 5302 case X86::BI__builtin_ia32_blendpd: 5303 case X86::BI__builtin_ia32_shufpd: 5304 case X86::BI__builtin_ia32_vec_set_v4hi: 5305 case X86::BI__builtin_ia32_vec_set_v4si: 5306 case X86::BI__builtin_ia32_vec_set_v4di: 5307 case X86::BI__builtin_ia32_shuf_f32x4_256: 5308 case X86::BI__builtin_ia32_shuf_f64x2_256: 5309 case X86::BI__builtin_ia32_shuf_i32x4_256: 5310 case X86::BI__builtin_ia32_shuf_i64x2_256: 5311 case X86::BI__builtin_ia32_insertf64x2_512: 5312 case X86::BI__builtin_ia32_inserti64x2_512: 5313 case X86::BI__builtin_ia32_insertf32x4: 5314 case X86::BI__builtin_ia32_inserti32x4: 5315 i = 2; l = 0; u = 3; 5316 break; 5317 case X86::BI__builtin_ia32_vpermil2pd: 5318 case X86::BI__builtin_ia32_vpermil2pd256: 5319 case X86::BI__builtin_ia32_vpermil2ps: 5320 case X86::BI__builtin_ia32_vpermil2ps256: 5321 i = 3; l = 0; u = 3; 5322 break; 5323 case X86::BI__builtin_ia32_cmpb128_mask: 5324 case X86::BI__builtin_ia32_cmpw128_mask: 5325 case X86::BI__builtin_ia32_cmpd128_mask: 5326 case X86::BI__builtin_ia32_cmpq128_mask: 5327 case X86::BI__builtin_ia32_cmpb256_mask: 5328 case X86::BI__builtin_ia32_cmpw256_mask: 5329 case X86::BI__builtin_ia32_cmpd256_mask: 5330 case X86::BI__builtin_ia32_cmpq256_mask: 5331 case X86::BI__builtin_ia32_cmpb512_mask: 5332 case X86::BI__builtin_ia32_cmpw512_mask: 5333 case X86::BI__builtin_ia32_cmpd512_mask: 5334 case X86::BI__builtin_ia32_cmpq512_mask: 5335 case X86::BI__builtin_ia32_ucmpb128_mask: 5336 case X86::BI__builtin_ia32_ucmpw128_mask: 5337 case X86::BI__builtin_ia32_ucmpd128_mask: 5338 case X86::BI__builtin_ia32_ucmpq128_mask: 5339 case X86::BI__builtin_ia32_ucmpb256_mask: 5340 case X86::BI__builtin_ia32_ucmpw256_mask: 5341 case X86::BI__builtin_ia32_ucmpd256_mask: 5342 case X86::BI__builtin_ia32_ucmpq256_mask: 5343 case X86::BI__builtin_ia32_ucmpb512_mask: 5344 case X86::BI__builtin_ia32_ucmpw512_mask: 5345 case X86::BI__builtin_ia32_ucmpd512_mask: 5346 case X86::BI__builtin_ia32_ucmpq512_mask: 5347 case X86::BI__builtin_ia32_vpcomub: 5348 case X86::BI__builtin_ia32_vpcomuw: 5349 case X86::BI__builtin_ia32_vpcomud: 5350 case X86::BI__builtin_ia32_vpcomuq: 5351 case X86::BI__builtin_ia32_vpcomb: 5352 case X86::BI__builtin_ia32_vpcomw: 5353 case X86::BI__builtin_ia32_vpcomd: 5354 case X86::BI__builtin_ia32_vpcomq: 5355 case X86::BI__builtin_ia32_vec_set_v8hi: 5356 case X86::BI__builtin_ia32_vec_set_v8si: 5357 i = 2; l = 0; u = 7; 5358 break; 5359 case X86::BI__builtin_ia32_vpermilpd256: 5360 case X86::BI__builtin_ia32_roundps: 5361 case X86::BI__builtin_ia32_roundpd: 5362 case X86::BI__builtin_ia32_roundps256: 5363 case X86::BI__builtin_ia32_roundpd256: 5364 case X86::BI__builtin_ia32_getmantpd128_mask: 5365 case X86::BI__builtin_ia32_getmantpd256_mask: 5366 case X86::BI__builtin_ia32_getmantps128_mask: 5367 case X86::BI__builtin_ia32_getmantps256_mask: 5368 case X86::BI__builtin_ia32_getmantpd512_mask: 5369 case X86::BI__builtin_ia32_getmantps512_mask: 5370 case X86::BI__builtin_ia32_getmantph128_mask: 5371 case X86::BI__builtin_ia32_getmantph256_mask: 5372 case X86::BI__builtin_ia32_getmantph512_mask: 5373 case X86::BI__builtin_ia32_vec_ext_v16qi: 5374 case X86::BI__builtin_ia32_vec_ext_v16hi: 5375 i = 1; l = 0; u = 15; 5376 break; 5377 case X86::BI__builtin_ia32_pblendd128: 5378 case X86::BI__builtin_ia32_blendps: 5379 case X86::BI__builtin_ia32_blendpd256: 5380 case X86::BI__builtin_ia32_shufpd256: 5381 case X86::BI__builtin_ia32_roundss: 5382 case X86::BI__builtin_ia32_roundsd: 5383 case X86::BI__builtin_ia32_rangepd128_mask: 5384 case X86::BI__builtin_ia32_rangepd256_mask: 5385 case X86::BI__builtin_ia32_rangepd512_mask: 5386 case X86::BI__builtin_ia32_rangeps128_mask: 5387 case X86::BI__builtin_ia32_rangeps256_mask: 5388 case X86::BI__builtin_ia32_rangeps512_mask: 5389 case X86::BI__builtin_ia32_getmantsd_round_mask: 5390 case X86::BI__builtin_ia32_getmantss_round_mask: 5391 case X86::BI__builtin_ia32_getmantsh_round_mask: 5392 case X86::BI__builtin_ia32_vec_set_v16qi: 5393 case X86::BI__builtin_ia32_vec_set_v16hi: 5394 i = 2; l = 0; u = 15; 5395 break; 5396 case X86::BI__builtin_ia32_vec_ext_v32qi: 5397 i = 1; l = 0; u = 31; 5398 break; 5399 case X86::BI__builtin_ia32_cmpps: 5400 case X86::BI__builtin_ia32_cmpss: 5401 case X86::BI__builtin_ia32_cmppd: 5402 case X86::BI__builtin_ia32_cmpsd: 5403 case X86::BI__builtin_ia32_cmpps256: 5404 case X86::BI__builtin_ia32_cmppd256: 5405 case X86::BI__builtin_ia32_cmpps128_mask: 5406 case X86::BI__builtin_ia32_cmppd128_mask: 5407 case X86::BI__builtin_ia32_cmpps256_mask: 5408 case X86::BI__builtin_ia32_cmppd256_mask: 5409 case X86::BI__builtin_ia32_cmpps512_mask: 5410 case X86::BI__builtin_ia32_cmppd512_mask: 5411 case X86::BI__builtin_ia32_cmpsd_mask: 5412 case X86::BI__builtin_ia32_cmpss_mask: 5413 case X86::BI__builtin_ia32_vec_set_v32qi: 5414 i = 2; l = 0; u = 31; 5415 break; 5416 case X86::BI__builtin_ia32_permdf256: 5417 case X86::BI__builtin_ia32_permdi256: 5418 case X86::BI__builtin_ia32_permdf512: 5419 case X86::BI__builtin_ia32_permdi512: 5420 case X86::BI__builtin_ia32_vpermilps: 5421 case X86::BI__builtin_ia32_vpermilps256: 5422 case X86::BI__builtin_ia32_vpermilpd512: 5423 case X86::BI__builtin_ia32_vpermilps512: 5424 case X86::BI__builtin_ia32_pshufd: 5425 case X86::BI__builtin_ia32_pshufd256: 5426 case X86::BI__builtin_ia32_pshufd512: 5427 case X86::BI__builtin_ia32_pshufhw: 5428 case X86::BI__builtin_ia32_pshufhw256: 5429 case X86::BI__builtin_ia32_pshufhw512: 5430 case X86::BI__builtin_ia32_pshuflw: 5431 case X86::BI__builtin_ia32_pshuflw256: 5432 case X86::BI__builtin_ia32_pshuflw512: 5433 case X86::BI__builtin_ia32_vcvtps2ph: 5434 case X86::BI__builtin_ia32_vcvtps2ph_mask: 5435 case X86::BI__builtin_ia32_vcvtps2ph256: 5436 case X86::BI__builtin_ia32_vcvtps2ph256_mask: 5437 case X86::BI__builtin_ia32_vcvtps2ph512_mask: 5438 case X86::BI__builtin_ia32_rndscaleps_128_mask: 5439 case X86::BI__builtin_ia32_rndscalepd_128_mask: 5440 case X86::BI__builtin_ia32_rndscaleps_256_mask: 5441 case X86::BI__builtin_ia32_rndscalepd_256_mask: 5442 case X86::BI__builtin_ia32_rndscaleps_mask: 5443 case X86::BI__builtin_ia32_rndscalepd_mask: 5444 case X86::BI__builtin_ia32_rndscaleph_mask: 5445 case X86::BI__builtin_ia32_reducepd128_mask: 5446 case X86::BI__builtin_ia32_reducepd256_mask: 5447 case X86::BI__builtin_ia32_reducepd512_mask: 5448 case X86::BI__builtin_ia32_reduceps128_mask: 5449 case X86::BI__builtin_ia32_reduceps256_mask: 5450 case X86::BI__builtin_ia32_reduceps512_mask: 5451 case X86::BI__builtin_ia32_reduceph128_mask: 5452 case X86::BI__builtin_ia32_reduceph256_mask: 5453 case X86::BI__builtin_ia32_reduceph512_mask: 5454 case X86::BI__builtin_ia32_prold512: 5455 case X86::BI__builtin_ia32_prolq512: 5456 case X86::BI__builtin_ia32_prold128: 5457 case X86::BI__builtin_ia32_prold256: 5458 case X86::BI__builtin_ia32_prolq128: 5459 case X86::BI__builtin_ia32_prolq256: 5460 case X86::BI__builtin_ia32_prord512: 5461 case X86::BI__builtin_ia32_prorq512: 5462 case X86::BI__builtin_ia32_prord128: 5463 case X86::BI__builtin_ia32_prord256: 5464 case X86::BI__builtin_ia32_prorq128: 5465 case X86::BI__builtin_ia32_prorq256: 5466 case X86::BI__builtin_ia32_fpclasspd128_mask: 5467 case X86::BI__builtin_ia32_fpclasspd256_mask: 5468 case X86::BI__builtin_ia32_fpclassps128_mask: 5469 case X86::BI__builtin_ia32_fpclassps256_mask: 5470 case X86::BI__builtin_ia32_fpclassps512_mask: 5471 case X86::BI__builtin_ia32_fpclasspd512_mask: 5472 case X86::BI__builtin_ia32_fpclassph128_mask: 5473 case X86::BI__builtin_ia32_fpclassph256_mask: 5474 case X86::BI__builtin_ia32_fpclassph512_mask: 5475 case X86::BI__builtin_ia32_fpclasssd_mask: 5476 case X86::BI__builtin_ia32_fpclassss_mask: 5477 case X86::BI__builtin_ia32_fpclasssh_mask: 5478 case X86::BI__builtin_ia32_pslldqi128_byteshift: 5479 case X86::BI__builtin_ia32_pslldqi256_byteshift: 5480 case X86::BI__builtin_ia32_pslldqi512_byteshift: 5481 case X86::BI__builtin_ia32_psrldqi128_byteshift: 5482 case X86::BI__builtin_ia32_psrldqi256_byteshift: 5483 case X86::BI__builtin_ia32_psrldqi512_byteshift: 5484 case X86::BI__builtin_ia32_kshiftliqi: 5485 case X86::BI__builtin_ia32_kshiftlihi: 5486 case X86::BI__builtin_ia32_kshiftlisi: 5487 case X86::BI__builtin_ia32_kshiftlidi: 5488 case X86::BI__builtin_ia32_kshiftriqi: 5489 case X86::BI__builtin_ia32_kshiftrihi: 5490 case X86::BI__builtin_ia32_kshiftrisi: 5491 case X86::BI__builtin_ia32_kshiftridi: 5492 i = 1; l = 0; u = 255; 5493 break; 5494 case X86::BI__builtin_ia32_vperm2f128_pd256: 5495 case X86::BI__builtin_ia32_vperm2f128_ps256: 5496 case X86::BI__builtin_ia32_vperm2f128_si256: 5497 case X86::BI__builtin_ia32_permti256: 5498 case X86::BI__builtin_ia32_pblendw128: 5499 case X86::BI__builtin_ia32_pblendw256: 5500 case X86::BI__builtin_ia32_blendps256: 5501 case X86::BI__builtin_ia32_pblendd256: 5502 case X86::BI__builtin_ia32_palignr128: 5503 case X86::BI__builtin_ia32_palignr256: 5504 case X86::BI__builtin_ia32_palignr512: 5505 case X86::BI__builtin_ia32_alignq512: 5506 case X86::BI__builtin_ia32_alignd512: 5507 case X86::BI__builtin_ia32_alignd128: 5508 case X86::BI__builtin_ia32_alignd256: 5509 case X86::BI__builtin_ia32_alignq128: 5510 case X86::BI__builtin_ia32_alignq256: 5511 case X86::BI__builtin_ia32_vcomisd: 5512 case X86::BI__builtin_ia32_vcomiss: 5513 case X86::BI__builtin_ia32_shuf_f32x4: 5514 case X86::BI__builtin_ia32_shuf_f64x2: 5515 case X86::BI__builtin_ia32_shuf_i32x4: 5516 case X86::BI__builtin_ia32_shuf_i64x2: 5517 case X86::BI__builtin_ia32_shufpd512: 5518 case X86::BI__builtin_ia32_shufps: 5519 case X86::BI__builtin_ia32_shufps256: 5520 case X86::BI__builtin_ia32_shufps512: 5521 case X86::BI__builtin_ia32_dbpsadbw128: 5522 case X86::BI__builtin_ia32_dbpsadbw256: 5523 case X86::BI__builtin_ia32_dbpsadbw512: 5524 case X86::BI__builtin_ia32_vpshldd128: 5525 case X86::BI__builtin_ia32_vpshldd256: 5526 case X86::BI__builtin_ia32_vpshldd512: 5527 case X86::BI__builtin_ia32_vpshldq128: 5528 case X86::BI__builtin_ia32_vpshldq256: 5529 case X86::BI__builtin_ia32_vpshldq512: 5530 case X86::BI__builtin_ia32_vpshldw128: 5531 case X86::BI__builtin_ia32_vpshldw256: 5532 case X86::BI__builtin_ia32_vpshldw512: 5533 case X86::BI__builtin_ia32_vpshrdd128: 5534 case X86::BI__builtin_ia32_vpshrdd256: 5535 case X86::BI__builtin_ia32_vpshrdd512: 5536 case X86::BI__builtin_ia32_vpshrdq128: 5537 case X86::BI__builtin_ia32_vpshrdq256: 5538 case X86::BI__builtin_ia32_vpshrdq512: 5539 case X86::BI__builtin_ia32_vpshrdw128: 5540 case X86::BI__builtin_ia32_vpshrdw256: 5541 case X86::BI__builtin_ia32_vpshrdw512: 5542 i = 2; l = 0; u = 255; 5543 break; 5544 case X86::BI__builtin_ia32_fixupimmpd512_mask: 5545 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 5546 case X86::BI__builtin_ia32_fixupimmps512_mask: 5547 case X86::BI__builtin_ia32_fixupimmps512_maskz: 5548 case X86::BI__builtin_ia32_fixupimmsd_mask: 5549 case X86::BI__builtin_ia32_fixupimmsd_maskz: 5550 case X86::BI__builtin_ia32_fixupimmss_mask: 5551 case X86::BI__builtin_ia32_fixupimmss_maskz: 5552 case X86::BI__builtin_ia32_fixupimmpd128_mask: 5553 case X86::BI__builtin_ia32_fixupimmpd128_maskz: 5554 case X86::BI__builtin_ia32_fixupimmpd256_mask: 5555 case X86::BI__builtin_ia32_fixupimmpd256_maskz: 5556 case X86::BI__builtin_ia32_fixupimmps128_mask: 5557 case X86::BI__builtin_ia32_fixupimmps128_maskz: 5558 case X86::BI__builtin_ia32_fixupimmps256_mask: 5559 case X86::BI__builtin_ia32_fixupimmps256_maskz: 5560 case X86::BI__builtin_ia32_pternlogd512_mask: 5561 case X86::BI__builtin_ia32_pternlogd512_maskz: 5562 case X86::BI__builtin_ia32_pternlogq512_mask: 5563 case X86::BI__builtin_ia32_pternlogq512_maskz: 5564 case X86::BI__builtin_ia32_pternlogd128_mask: 5565 case X86::BI__builtin_ia32_pternlogd128_maskz: 5566 case X86::BI__builtin_ia32_pternlogd256_mask: 5567 case X86::BI__builtin_ia32_pternlogd256_maskz: 5568 case X86::BI__builtin_ia32_pternlogq128_mask: 5569 case X86::BI__builtin_ia32_pternlogq128_maskz: 5570 case X86::BI__builtin_ia32_pternlogq256_mask: 5571 case X86::BI__builtin_ia32_pternlogq256_maskz: 5572 i = 3; l = 0; u = 255; 5573 break; 5574 case X86::BI__builtin_ia32_gatherpfdpd: 5575 case X86::BI__builtin_ia32_gatherpfdps: 5576 case X86::BI__builtin_ia32_gatherpfqpd: 5577 case X86::BI__builtin_ia32_gatherpfqps: 5578 case X86::BI__builtin_ia32_scatterpfdpd: 5579 case X86::BI__builtin_ia32_scatterpfdps: 5580 case X86::BI__builtin_ia32_scatterpfqpd: 5581 case X86::BI__builtin_ia32_scatterpfqps: 5582 i = 4; l = 2; u = 3; 5583 break; 5584 case X86::BI__builtin_ia32_reducesd_mask: 5585 case X86::BI__builtin_ia32_reducess_mask: 5586 case X86::BI__builtin_ia32_rndscalesd_round_mask: 5587 case X86::BI__builtin_ia32_rndscaless_round_mask: 5588 case X86::BI__builtin_ia32_rndscalesh_round_mask: 5589 case X86::BI__builtin_ia32_reducesh_mask: 5590 i = 4; l = 0; u = 255; 5591 break; 5592 case X86::BI__builtin_ia32_cmpccxadd32: 5593 case X86::BI__builtin_ia32_cmpccxadd64: 5594 i = 3; l = 0; u = 15; 5595 break; 5596 } 5597 5598 // Note that we don't force a hard error on the range check here, allowing 5599 // template-generated or macro-generated dead code to potentially have out-of- 5600 // range values. These need to code generate, but don't need to necessarily 5601 // make any sense. We use a warning that defaults to an error. 5602 return SemaBuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false); 5603 } 5604 5605 /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo 5606 /// parameter with the FormatAttr's correct format_idx and firstDataArg. 5607 /// Returns true when the format fits the function and the FormatStringInfo has 5608 /// been populated. 5609 bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, 5610 bool IsVariadic, FormatStringInfo *FSI) { 5611 if (Format->getFirstArg() == 0) 5612 FSI->ArgPassingKind = FAPK_VAList; 5613 else if (IsVariadic) 5614 FSI->ArgPassingKind = FAPK_Variadic; 5615 else 5616 FSI->ArgPassingKind = FAPK_Fixed; 5617 FSI->FormatIdx = Format->getFormatIdx() - 1; 5618 FSI->FirstDataArg = 5619 FSI->ArgPassingKind == FAPK_VAList ? 0 : Format->getFirstArg() - 1; 5620 5621 // The way the format attribute works in GCC, the implicit this argument 5622 // of member functions is counted. However, it doesn't appear in our own 5623 // lists, so decrement format_idx in that case. 5624 if (IsCXXMember) { 5625 if(FSI->FormatIdx == 0) 5626 return false; 5627 --FSI->FormatIdx; 5628 if (FSI->FirstDataArg != 0) 5629 --FSI->FirstDataArg; 5630 } 5631 return true; 5632 } 5633 5634 /// Checks if a the given expression evaluates to null. 5635 /// 5636 /// Returns true if the value evaluates to null. 5637 static bool CheckNonNullExpr(Sema &S, const Expr *Expr) { 5638 // If the expression has non-null type, it doesn't evaluate to null. 5639 if (auto nullability = Expr->IgnoreImplicit()->getType()->getNullability()) { 5640 if (*nullability == NullabilityKind::NonNull) 5641 return false; 5642 } 5643 5644 // As a special case, transparent unions initialized with zero are 5645 // considered null for the purposes of the nonnull attribute. 5646 if (const RecordType *UT = Expr->getType()->getAsUnionType()) { 5647 if (UT->getDecl()->hasAttr<TransparentUnionAttr>()) 5648 if (const CompoundLiteralExpr *CLE = 5649 dyn_cast<CompoundLiteralExpr>(Expr)) 5650 if (const InitListExpr *ILE = 5651 dyn_cast<InitListExpr>(CLE->getInitializer())) 5652 Expr = ILE->getInit(0); 5653 } 5654 5655 bool Result; 5656 return (!Expr->isValueDependent() && 5657 Expr->EvaluateAsBooleanCondition(Result, S.Context) && 5658 !Result); 5659 } 5660 5661 static void CheckNonNullArgument(Sema &S, 5662 const Expr *ArgExpr, 5663 SourceLocation CallSiteLoc) { 5664 if (CheckNonNullExpr(S, ArgExpr)) 5665 S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr, 5666 S.PDiag(diag::warn_null_arg) 5667 << ArgExpr->getSourceRange()); 5668 } 5669 5670 bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) { 5671 FormatStringInfo FSI; 5672 if ((GetFormatStringType(Format) == FST_NSString) && 5673 getFormatStringInfo(Format, false, true, &FSI)) { 5674 Idx = FSI.FormatIdx; 5675 return true; 5676 } 5677 return false; 5678 } 5679 5680 /// Diagnose use of %s directive in an NSString which is being passed 5681 /// as formatting string to formatting method. 5682 static void 5683 DiagnoseCStringFormatDirectiveInCFAPI(Sema &S, 5684 const NamedDecl *FDecl, 5685 Expr **Args, 5686 unsigned NumArgs) { 5687 unsigned Idx = 0; 5688 bool Format = false; 5689 ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily(); 5690 if (SFFamily == ObjCStringFormatFamily::SFF_CFString) { 5691 Idx = 2; 5692 Format = true; 5693 } 5694 else 5695 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 5696 if (S.GetFormatNSStringIdx(I, Idx)) { 5697 Format = true; 5698 break; 5699 } 5700 } 5701 if (!Format || NumArgs <= Idx) 5702 return; 5703 const Expr *FormatExpr = Args[Idx]; 5704 if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr)) 5705 FormatExpr = CSCE->getSubExpr(); 5706 const StringLiteral *FormatString; 5707 if (const ObjCStringLiteral *OSL = 5708 dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts())) 5709 FormatString = OSL->getString(); 5710 else 5711 FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts()); 5712 if (!FormatString) 5713 return; 5714 if (S.FormatStringHasSArg(FormatString)) { 5715 S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string) 5716 << "%s" << 1 << 1; 5717 S.Diag(FDecl->getLocation(), diag::note_entity_declared_at) 5718 << FDecl->getDeclName(); 5719 } 5720 } 5721 5722 /// Determine whether the given type has a non-null nullability annotation. 5723 static bool isNonNullType(QualType type) { 5724 if (auto nullability = type->getNullability()) 5725 return *nullability == NullabilityKind::NonNull; 5726 5727 return false; 5728 } 5729 5730 static void CheckNonNullArguments(Sema &S, 5731 const NamedDecl *FDecl, 5732 const FunctionProtoType *Proto, 5733 ArrayRef<const Expr *> Args, 5734 SourceLocation CallSiteLoc) { 5735 assert((FDecl || Proto) && "Need a function declaration or prototype"); 5736 5737 // Already checked by constant evaluator. 5738 if (S.isConstantEvaluated()) 5739 return; 5740 // Check the attributes attached to the method/function itself. 5741 llvm::SmallBitVector NonNullArgs; 5742 if (FDecl) { 5743 // Handle the nonnull attribute on the function/method declaration itself. 5744 for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) { 5745 if (!NonNull->args_size()) { 5746 // Easy case: all pointer arguments are nonnull. 5747 for (const auto *Arg : Args) 5748 if (S.isValidPointerAttrType(Arg->getType())) 5749 CheckNonNullArgument(S, Arg, CallSiteLoc); 5750 return; 5751 } 5752 5753 for (const ParamIdx &Idx : NonNull->args()) { 5754 unsigned IdxAST = Idx.getASTIndex(); 5755 if (IdxAST >= Args.size()) 5756 continue; 5757 if (NonNullArgs.empty()) 5758 NonNullArgs.resize(Args.size()); 5759 NonNullArgs.set(IdxAST); 5760 } 5761 } 5762 } 5763 5764 if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) { 5765 // Handle the nonnull attribute on the parameters of the 5766 // function/method. 5767 ArrayRef<ParmVarDecl*> parms; 5768 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl)) 5769 parms = FD->parameters(); 5770 else 5771 parms = cast<ObjCMethodDecl>(FDecl)->parameters(); 5772 5773 unsigned ParamIndex = 0; 5774 for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end(); 5775 I != E; ++I, ++ParamIndex) { 5776 const ParmVarDecl *PVD = *I; 5777 if (PVD->hasAttr<NonNullAttr>() || isNonNullType(PVD->getType())) { 5778 if (NonNullArgs.empty()) 5779 NonNullArgs.resize(Args.size()); 5780 5781 NonNullArgs.set(ParamIndex); 5782 } 5783 } 5784 } else { 5785 // If we have a non-function, non-method declaration but no 5786 // function prototype, try to dig out the function prototype. 5787 if (!Proto) { 5788 if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) { 5789 QualType type = VD->getType().getNonReferenceType(); 5790 if (auto pointerType = type->getAs<PointerType>()) 5791 type = pointerType->getPointeeType(); 5792 else if (auto blockType = type->getAs<BlockPointerType>()) 5793 type = blockType->getPointeeType(); 5794 // FIXME: data member pointers? 5795 5796 // Dig out the function prototype, if there is one. 5797 Proto = type->getAs<FunctionProtoType>(); 5798 } 5799 } 5800 5801 // Fill in non-null argument information from the nullability 5802 // information on the parameter types (if we have them). 5803 if (Proto) { 5804 unsigned Index = 0; 5805 for (auto paramType : Proto->getParamTypes()) { 5806 if (isNonNullType(paramType)) { 5807 if (NonNullArgs.empty()) 5808 NonNullArgs.resize(Args.size()); 5809 5810 NonNullArgs.set(Index); 5811 } 5812 5813 ++Index; 5814 } 5815 } 5816 } 5817 5818 // Check for non-null arguments. 5819 for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size(); 5820 ArgIndex != ArgIndexEnd; ++ArgIndex) { 5821 if (NonNullArgs[ArgIndex]) 5822 CheckNonNullArgument(S, Args[ArgIndex], Args[ArgIndex]->getExprLoc()); 5823 } 5824 } 5825 5826 // 16 byte ByVal alignment not due to a vector member is not honoured by XL 5827 // on AIX. Emit a warning here that users are generating binary incompatible 5828 // code to be safe. 5829 // Here we try to get information about the alignment of the struct member 5830 // from the struct passed to the caller function. We only warn when the struct 5831 // is passed byval, hence the series of checks and early returns if we are a not 5832 // passing a struct byval. 5833 void Sema::checkAIXMemberAlignment(SourceLocation Loc, const Expr *Arg) { 5834 const auto *ICE = dyn_cast<ImplicitCastExpr>(Arg->IgnoreParens()); 5835 if (!ICE) 5836 return; 5837 5838 const auto *DR = dyn_cast<DeclRefExpr>(ICE->getSubExpr()); 5839 if (!DR) 5840 return; 5841 5842 const auto *PD = dyn_cast<ParmVarDecl>(DR->getDecl()); 5843 if (!PD || !PD->getType()->isRecordType()) 5844 return; 5845 5846 QualType ArgType = Arg->getType(); 5847 for (const FieldDecl *FD : 5848 ArgType->castAs<RecordType>()->getDecl()->fields()) { 5849 if (const auto *AA = FD->getAttr<AlignedAttr>()) { 5850 CharUnits Alignment = 5851 Context.toCharUnitsFromBits(AA->getAlignment(Context)); 5852 if (Alignment.getQuantity() == 16) { 5853 Diag(FD->getLocation(), diag::warn_not_xl_compatible) << FD; 5854 Diag(Loc, diag::note_misaligned_member_used_here) << PD; 5855 } 5856 } 5857 } 5858 } 5859 5860 /// Warn if a pointer or reference argument passed to a function points to an 5861 /// object that is less aligned than the parameter. This can happen when 5862 /// creating a typedef with a lower alignment than the original type and then 5863 /// calling functions defined in terms of the original type. 5864 void Sema::CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl, 5865 StringRef ParamName, QualType ArgTy, 5866 QualType ParamTy) { 5867 5868 // If a function accepts a pointer or reference type 5869 if (!ParamTy->isPointerType() && !ParamTy->isReferenceType()) 5870 return; 5871 5872 // If the parameter is a pointer type, get the pointee type for the 5873 // argument too. If the parameter is a reference type, don't try to get 5874 // the pointee type for the argument. 5875 if (ParamTy->isPointerType()) 5876 ArgTy = ArgTy->getPointeeType(); 5877 5878 // Remove reference or pointer 5879 ParamTy = ParamTy->getPointeeType(); 5880 5881 // Find expected alignment, and the actual alignment of the passed object. 5882 // getTypeAlignInChars requires complete types 5883 if (ArgTy.isNull() || ParamTy->isDependentType() || 5884 ParamTy->isIncompleteType() || ArgTy->isIncompleteType() || 5885 ParamTy->isUndeducedType() || ArgTy->isUndeducedType()) 5886 return; 5887 5888 CharUnits ParamAlign = Context.getTypeAlignInChars(ParamTy); 5889 CharUnits ArgAlign = Context.getTypeAlignInChars(ArgTy); 5890 5891 // If the argument is less aligned than the parameter, there is a 5892 // potential alignment issue. 5893 if (ArgAlign < ParamAlign) 5894 Diag(Loc, diag::warn_param_mismatched_alignment) 5895 << (int)ArgAlign.getQuantity() << (int)ParamAlign.getQuantity() 5896 << ParamName << (FDecl != nullptr) << FDecl; 5897 } 5898 5899 /// Handles the checks for format strings, non-POD arguments to vararg 5900 /// functions, NULL arguments passed to non-NULL parameters, and diagnose_if 5901 /// attributes. 5902 void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, 5903 const Expr *ThisArg, ArrayRef<const Expr *> Args, 5904 bool IsMemberFunction, SourceLocation Loc, 5905 SourceRange Range, VariadicCallType CallType) { 5906 // FIXME: We should check as much as we can in the template definition. 5907 if (CurContext->isDependentContext()) 5908 return; 5909 5910 // Printf and scanf checking. 5911 llvm::SmallBitVector CheckedVarArgs; 5912 if (FDecl) { 5913 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 5914 // Only create vector if there are format attributes. 5915 CheckedVarArgs.resize(Args.size()); 5916 5917 CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range, 5918 CheckedVarArgs); 5919 } 5920 } 5921 5922 // Refuse POD arguments that weren't caught by the format string 5923 // checks above. 5924 auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl); 5925 if (CallType != VariadicDoesNotApply && 5926 (!FD || FD->getBuiltinID() != Builtin::BI__noop)) { 5927 unsigned NumParams = Proto ? Proto->getNumParams() 5928 : FDecl && isa<FunctionDecl>(FDecl) 5929 ? cast<FunctionDecl>(FDecl)->getNumParams() 5930 : FDecl && isa<ObjCMethodDecl>(FDecl) 5931 ? cast<ObjCMethodDecl>(FDecl)->param_size() 5932 : 0; 5933 5934 for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) { 5935 // Args[ArgIdx] can be null in malformed code. 5936 if (const Expr *Arg = Args[ArgIdx]) { 5937 if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx]) 5938 checkVariadicArgument(Arg, CallType); 5939 } 5940 } 5941 } 5942 5943 if (FDecl || Proto) { 5944 CheckNonNullArguments(*this, FDecl, Proto, Args, Loc); 5945 5946 // Type safety checking. 5947 if (FDecl) { 5948 for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>()) 5949 CheckArgumentWithTypeTag(I, Args, Loc); 5950 } 5951 } 5952 5953 // Check that passed arguments match the alignment of original arguments. 5954 // Try to get the missing prototype from the declaration. 5955 if (!Proto && FDecl) { 5956 const auto *FT = FDecl->getFunctionType(); 5957 if (isa_and_nonnull<FunctionProtoType>(FT)) 5958 Proto = cast<FunctionProtoType>(FDecl->getFunctionType()); 5959 } 5960 if (Proto) { 5961 // For variadic functions, we may have more args than parameters. 5962 // For some K&R functions, we may have less args than parameters. 5963 const auto N = std::min<unsigned>(Proto->getNumParams(), Args.size()); 5964 for (unsigned ArgIdx = 0; ArgIdx < N; ++ArgIdx) { 5965 // Args[ArgIdx] can be null in malformed code. 5966 if (const Expr *Arg = Args[ArgIdx]) { 5967 if (Arg->containsErrors()) 5968 continue; 5969 5970 if (Context.getTargetInfo().getTriple().isOSAIX() && FDecl && Arg && 5971 FDecl->hasLinkage() && 5972 FDecl->getFormalLinkage() != InternalLinkage && 5973 CallType == VariadicDoesNotApply) 5974 checkAIXMemberAlignment((Arg->getExprLoc()), Arg); 5975 5976 QualType ParamTy = Proto->getParamType(ArgIdx); 5977 QualType ArgTy = Arg->getType(); 5978 CheckArgAlignment(Arg->getExprLoc(), FDecl, std::to_string(ArgIdx + 1), 5979 ArgTy, ParamTy); 5980 } 5981 } 5982 } 5983 5984 if (FDecl && FDecl->hasAttr<AllocAlignAttr>()) { 5985 auto *AA = FDecl->getAttr<AllocAlignAttr>(); 5986 const Expr *Arg = Args[AA->getParamIndex().getASTIndex()]; 5987 if (!Arg->isValueDependent()) { 5988 Expr::EvalResult Align; 5989 if (Arg->EvaluateAsInt(Align, Context)) { 5990 const llvm::APSInt &I = Align.Val.getInt(); 5991 if (!I.isPowerOf2()) 5992 Diag(Arg->getExprLoc(), diag::warn_alignment_not_power_of_two) 5993 << Arg->getSourceRange(); 5994 5995 if (I > Sema::MaximumAlignment) 5996 Diag(Arg->getExprLoc(), diag::warn_assume_aligned_too_great) 5997 << Arg->getSourceRange() << Sema::MaximumAlignment; 5998 } 5999 } 6000 } 6001 6002 if (FD) 6003 diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc); 6004 } 6005 6006 /// CheckConstructorCall - Check a constructor call for correctness and safety 6007 /// properties not enforced by the C type system. 6008 void Sema::CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType, 6009 ArrayRef<const Expr *> Args, 6010 const FunctionProtoType *Proto, 6011 SourceLocation Loc) { 6012 VariadicCallType CallType = 6013 Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply; 6014 6015 auto *Ctor = cast<CXXConstructorDecl>(FDecl); 6016 CheckArgAlignment(Loc, FDecl, "'this'", Context.getPointerType(ThisType), 6017 Context.getPointerType(Ctor->getThisObjectType())); 6018 6019 checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true, 6020 Loc, SourceRange(), CallType); 6021 } 6022 6023 /// CheckFunctionCall - Check a direct function call for various correctness 6024 /// and safety properties not strictly enforced by the C type system. 6025 bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, 6026 const FunctionProtoType *Proto) { 6027 bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) && 6028 isa<CXXMethodDecl>(FDecl); 6029 bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) || 6030 IsMemberOperatorCall; 6031 VariadicCallType CallType = getVariadicCallType(FDecl, Proto, 6032 TheCall->getCallee()); 6033 Expr** Args = TheCall->getArgs(); 6034 unsigned NumArgs = TheCall->getNumArgs(); 6035 6036 Expr *ImplicitThis = nullptr; 6037 if (IsMemberOperatorCall && !FDecl->isStatic()) { 6038 // If this is a call to a non-static member operator, hide the first 6039 // argument from checkCall. 6040 // FIXME: Our choice of AST representation here is less than ideal. 6041 ImplicitThis = Args[0]; 6042 ++Args; 6043 --NumArgs; 6044 } else if (IsMemberFunction && !FDecl->isStatic()) 6045 ImplicitThis = 6046 cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument(); 6047 6048 if (ImplicitThis) { 6049 // ImplicitThis may or may not be a pointer, depending on whether . or -> is 6050 // used. 6051 QualType ThisType = ImplicitThis->getType(); 6052 if (!ThisType->isPointerType()) { 6053 assert(!ThisType->isReferenceType()); 6054 ThisType = Context.getPointerType(ThisType); 6055 } 6056 6057 QualType ThisTypeFromDecl = 6058 Context.getPointerType(cast<CXXMethodDecl>(FDecl)->getThisObjectType()); 6059 6060 CheckArgAlignment(TheCall->getRParenLoc(), FDecl, "'this'", ThisType, 6061 ThisTypeFromDecl); 6062 } 6063 6064 checkCall(FDecl, Proto, ImplicitThis, llvm::ArrayRef(Args, NumArgs), 6065 IsMemberFunction, TheCall->getRParenLoc(), 6066 TheCall->getCallee()->getSourceRange(), CallType); 6067 6068 IdentifierInfo *FnInfo = FDecl->getIdentifier(); 6069 // None of the checks below are needed for functions that don't have 6070 // simple names (e.g., C++ conversion functions). 6071 if (!FnInfo) 6072 return false; 6073 6074 // Enforce TCB except for builtin calls, which are always allowed. 6075 if (FDecl->getBuiltinID() == 0) 6076 CheckTCBEnforcement(TheCall->getExprLoc(), FDecl); 6077 6078 CheckAbsoluteValueFunction(TheCall, FDecl); 6079 CheckMaxUnsignedZero(TheCall, FDecl); 6080 6081 if (getLangOpts().ObjC) 6082 DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs); 6083 6084 unsigned CMId = FDecl->getMemoryFunctionKind(); 6085 6086 // Handle memory setting and copying functions. 6087 switch (CMId) { 6088 case 0: 6089 return false; 6090 case Builtin::BIstrlcpy: // fallthrough 6091 case Builtin::BIstrlcat: 6092 CheckStrlcpycatArguments(TheCall, FnInfo); 6093 break; 6094 case Builtin::BIstrncat: 6095 CheckStrncatArguments(TheCall, FnInfo); 6096 break; 6097 case Builtin::BIfree: 6098 CheckFreeArguments(TheCall); 6099 break; 6100 default: 6101 CheckMemaccessArguments(TheCall, CMId, FnInfo); 6102 } 6103 6104 return false; 6105 } 6106 6107 bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac, 6108 ArrayRef<const Expr *> Args) { 6109 VariadicCallType CallType = 6110 Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply; 6111 6112 checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args, 6113 /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(), 6114 CallType); 6115 6116 CheckTCBEnforcement(lbrac, Method); 6117 6118 return false; 6119 } 6120 6121 bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, 6122 const FunctionProtoType *Proto) { 6123 QualType Ty; 6124 if (const auto *V = dyn_cast<VarDecl>(NDecl)) 6125 Ty = V->getType().getNonReferenceType(); 6126 else if (const auto *F = dyn_cast<FieldDecl>(NDecl)) 6127 Ty = F->getType().getNonReferenceType(); 6128 else 6129 return false; 6130 6131 if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() && 6132 !Ty->isFunctionProtoType()) 6133 return false; 6134 6135 VariadicCallType CallType; 6136 if (!Proto || !Proto->isVariadic()) { 6137 CallType = VariadicDoesNotApply; 6138 } else if (Ty->isBlockPointerType()) { 6139 CallType = VariadicBlock; 6140 } else { // Ty->isFunctionPointerType() 6141 CallType = VariadicFunction; 6142 } 6143 6144 checkCall(NDecl, Proto, /*ThisArg=*/nullptr, 6145 llvm::ArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 6146 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 6147 TheCall->getCallee()->getSourceRange(), CallType); 6148 6149 return false; 6150 } 6151 6152 /// Checks function calls when a FunctionDecl or a NamedDecl is not available, 6153 /// such as function pointers returned from functions. 6154 bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) { 6155 VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto, 6156 TheCall->getCallee()); 6157 checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr, 6158 llvm::ArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 6159 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 6160 TheCall->getCallee()->getSourceRange(), CallType); 6161 6162 return false; 6163 } 6164 6165 static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) { 6166 if (!llvm::isValidAtomicOrderingCABI(Ordering)) 6167 return false; 6168 6169 auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering; 6170 switch (Op) { 6171 case AtomicExpr::AO__c11_atomic_init: 6172 case AtomicExpr::AO__opencl_atomic_init: 6173 llvm_unreachable("There is no ordering argument for an init"); 6174 6175 case AtomicExpr::AO__c11_atomic_load: 6176 case AtomicExpr::AO__opencl_atomic_load: 6177 case AtomicExpr::AO__hip_atomic_load: 6178 case AtomicExpr::AO__atomic_load_n: 6179 case AtomicExpr::AO__atomic_load: 6180 return OrderingCABI != llvm::AtomicOrderingCABI::release && 6181 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 6182 6183 case AtomicExpr::AO__c11_atomic_store: 6184 case AtomicExpr::AO__opencl_atomic_store: 6185 case AtomicExpr::AO__hip_atomic_store: 6186 case AtomicExpr::AO__atomic_store: 6187 case AtomicExpr::AO__atomic_store_n: 6188 return OrderingCABI != llvm::AtomicOrderingCABI::consume && 6189 OrderingCABI != llvm::AtomicOrderingCABI::acquire && 6190 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 6191 6192 default: 6193 return true; 6194 } 6195 } 6196 6197 ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, 6198 AtomicExpr::AtomicOp Op) { 6199 CallExpr *TheCall = cast<CallExpr>(TheCallResult.get()); 6200 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 6201 MultiExprArg Args{TheCall->getArgs(), TheCall->getNumArgs()}; 6202 return BuildAtomicExpr({TheCall->getBeginLoc(), TheCall->getEndLoc()}, 6203 DRE->getSourceRange(), TheCall->getRParenLoc(), Args, 6204 Op); 6205 } 6206 6207 ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, 6208 SourceLocation RParenLoc, MultiExprArg Args, 6209 AtomicExpr::AtomicOp Op, 6210 AtomicArgumentOrder ArgOrder) { 6211 // All the non-OpenCL operations take one of the following forms. 6212 // The OpenCL operations take the __c11 forms with one extra argument for 6213 // synchronization scope. 6214 enum { 6215 // C __c11_atomic_init(A *, C) 6216 Init, 6217 6218 // C __c11_atomic_load(A *, int) 6219 Load, 6220 6221 // void __atomic_load(A *, CP, int) 6222 LoadCopy, 6223 6224 // void __atomic_store(A *, CP, int) 6225 Copy, 6226 6227 // C __c11_atomic_add(A *, M, int) 6228 Arithmetic, 6229 6230 // C __atomic_exchange_n(A *, CP, int) 6231 Xchg, 6232 6233 // void __atomic_exchange(A *, C *, CP, int) 6234 GNUXchg, 6235 6236 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int) 6237 C11CmpXchg, 6238 6239 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int) 6240 GNUCmpXchg 6241 } Form = Init; 6242 6243 const unsigned NumForm = GNUCmpXchg + 1; 6244 const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 }; 6245 const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 }; 6246 // where: 6247 // C is an appropriate type, 6248 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins, 6249 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise, 6250 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and 6251 // the int parameters are for orderings. 6252 6253 static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm 6254 && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm, 6255 "need to update code for modified forms"); 6256 static_assert(AtomicExpr::AO__c11_atomic_init == 0 && 6257 AtomicExpr::AO__c11_atomic_fetch_min + 1 == 6258 AtomicExpr::AO__atomic_load, 6259 "need to update code for modified C11 atomics"); 6260 bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init && 6261 Op <= AtomicExpr::AO__opencl_atomic_fetch_max; 6262 bool IsHIP = Op >= AtomicExpr::AO__hip_atomic_load && 6263 Op <= AtomicExpr::AO__hip_atomic_fetch_max; 6264 bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init && 6265 Op <= AtomicExpr::AO__c11_atomic_fetch_min) || 6266 IsOpenCL; 6267 bool IsN = Op == AtomicExpr::AO__atomic_load_n || 6268 Op == AtomicExpr::AO__atomic_store_n || 6269 Op == AtomicExpr::AO__atomic_exchange_n || 6270 Op == AtomicExpr::AO__atomic_compare_exchange_n; 6271 bool IsAddSub = false; 6272 6273 switch (Op) { 6274 case AtomicExpr::AO__c11_atomic_init: 6275 case AtomicExpr::AO__opencl_atomic_init: 6276 Form = Init; 6277 break; 6278 6279 case AtomicExpr::AO__c11_atomic_load: 6280 case AtomicExpr::AO__opencl_atomic_load: 6281 case AtomicExpr::AO__hip_atomic_load: 6282 case AtomicExpr::AO__atomic_load_n: 6283 Form = Load; 6284 break; 6285 6286 case AtomicExpr::AO__atomic_load: 6287 Form = LoadCopy; 6288 break; 6289 6290 case AtomicExpr::AO__c11_atomic_store: 6291 case AtomicExpr::AO__opencl_atomic_store: 6292 case AtomicExpr::AO__hip_atomic_store: 6293 case AtomicExpr::AO__atomic_store: 6294 case AtomicExpr::AO__atomic_store_n: 6295 Form = Copy; 6296 break; 6297 case AtomicExpr::AO__hip_atomic_fetch_add: 6298 case AtomicExpr::AO__hip_atomic_fetch_min: 6299 case AtomicExpr::AO__hip_atomic_fetch_max: 6300 case AtomicExpr::AO__c11_atomic_fetch_add: 6301 case AtomicExpr::AO__c11_atomic_fetch_sub: 6302 case AtomicExpr::AO__opencl_atomic_fetch_add: 6303 case AtomicExpr::AO__opencl_atomic_fetch_sub: 6304 case AtomicExpr::AO__atomic_fetch_add: 6305 case AtomicExpr::AO__atomic_fetch_sub: 6306 case AtomicExpr::AO__atomic_add_fetch: 6307 case AtomicExpr::AO__atomic_sub_fetch: 6308 IsAddSub = true; 6309 Form = Arithmetic; 6310 break; 6311 case AtomicExpr::AO__c11_atomic_fetch_and: 6312 case AtomicExpr::AO__c11_atomic_fetch_or: 6313 case AtomicExpr::AO__c11_atomic_fetch_xor: 6314 case AtomicExpr::AO__hip_atomic_fetch_and: 6315 case AtomicExpr::AO__hip_atomic_fetch_or: 6316 case AtomicExpr::AO__hip_atomic_fetch_xor: 6317 case AtomicExpr::AO__c11_atomic_fetch_nand: 6318 case AtomicExpr::AO__opencl_atomic_fetch_and: 6319 case AtomicExpr::AO__opencl_atomic_fetch_or: 6320 case AtomicExpr::AO__opencl_atomic_fetch_xor: 6321 case AtomicExpr::AO__atomic_fetch_and: 6322 case AtomicExpr::AO__atomic_fetch_or: 6323 case AtomicExpr::AO__atomic_fetch_xor: 6324 case AtomicExpr::AO__atomic_fetch_nand: 6325 case AtomicExpr::AO__atomic_and_fetch: 6326 case AtomicExpr::AO__atomic_or_fetch: 6327 case AtomicExpr::AO__atomic_xor_fetch: 6328 case AtomicExpr::AO__atomic_nand_fetch: 6329 Form = Arithmetic; 6330 break; 6331 case AtomicExpr::AO__c11_atomic_fetch_min: 6332 case AtomicExpr::AO__c11_atomic_fetch_max: 6333 case AtomicExpr::AO__opencl_atomic_fetch_min: 6334 case AtomicExpr::AO__opencl_atomic_fetch_max: 6335 case AtomicExpr::AO__atomic_min_fetch: 6336 case AtomicExpr::AO__atomic_max_fetch: 6337 case AtomicExpr::AO__atomic_fetch_min: 6338 case AtomicExpr::AO__atomic_fetch_max: 6339 Form = Arithmetic; 6340 break; 6341 6342 case AtomicExpr::AO__c11_atomic_exchange: 6343 case AtomicExpr::AO__hip_atomic_exchange: 6344 case AtomicExpr::AO__opencl_atomic_exchange: 6345 case AtomicExpr::AO__atomic_exchange_n: 6346 Form = Xchg; 6347 break; 6348 6349 case AtomicExpr::AO__atomic_exchange: 6350 Form = GNUXchg; 6351 break; 6352 6353 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 6354 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 6355 case AtomicExpr::AO__hip_atomic_compare_exchange_strong: 6356 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: 6357 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: 6358 case AtomicExpr::AO__hip_atomic_compare_exchange_weak: 6359 Form = C11CmpXchg; 6360 break; 6361 6362 case AtomicExpr::AO__atomic_compare_exchange: 6363 case AtomicExpr::AO__atomic_compare_exchange_n: 6364 Form = GNUCmpXchg; 6365 break; 6366 } 6367 6368 unsigned AdjustedNumArgs = NumArgs[Form]; 6369 if ((IsOpenCL || IsHIP) && Op != AtomicExpr::AO__opencl_atomic_init) 6370 ++AdjustedNumArgs; 6371 // Check we have the right number of arguments. 6372 if (Args.size() < AdjustedNumArgs) { 6373 Diag(CallRange.getEnd(), diag::err_typecheck_call_too_few_args) 6374 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 6375 << ExprRange; 6376 return ExprError(); 6377 } else if (Args.size() > AdjustedNumArgs) { 6378 Diag(Args[AdjustedNumArgs]->getBeginLoc(), 6379 diag::err_typecheck_call_too_many_args) 6380 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 6381 << ExprRange; 6382 return ExprError(); 6383 } 6384 6385 // Inspect the first argument of the atomic operation. 6386 Expr *Ptr = Args[0]; 6387 ExprResult ConvertedPtr = DefaultFunctionArrayLvalueConversion(Ptr); 6388 if (ConvertedPtr.isInvalid()) 6389 return ExprError(); 6390 6391 Ptr = ConvertedPtr.get(); 6392 const PointerType *pointerType = Ptr->getType()->getAs<PointerType>(); 6393 if (!pointerType) { 6394 Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer) 6395 << Ptr->getType() << Ptr->getSourceRange(); 6396 return ExprError(); 6397 } 6398 6399 // For a __c11 builtin, this should be a pointer to an _Atomic type. 6400 QualType AtomTy = pointerType->getPointeeType(); // 'A' 6401 QualType ValType = AtomTy; // 'C' 6402 if (IsC11) { 6403 if (!AtomTy->isAtomicType()) { 6404 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic) 6405 << Ptr->getType() << Ptr->getSourceRange(); 6406 return ExprError(); 6407 } 6408 if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) || 6409 AtomTy.getAddressSpace() == LangAS::opencl_constant) { 6410 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_atomic) 6411 << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType() 6412 << Ptr->getSourceRange(); 6413 return ExprError(); 6414 } 6415 ValType = AtomTy->castAs<AtomicType>()->getValueType(); 6416 } else if (Form != Load && Form != LoadCopy) { 6417 if (ValType.isConstQualified()) { 6418 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_pointer) 6419 << Ptr->getType() << Ptr->getSourceRange(); 6420 return ExprError(); 6421 } 6422 } 6423 6424 // For an arithmetic operation, the implied arithmetic must be well-formed. 6425 if (Form == Arithmetic) { 6426 // GCC does not enforce these rules for GNU atomics, but we do to help catch 6427 // trivial type errors. 6428 auto IsAllowedValueType = [&](QualType ValType) { 6429 if (ValType->isIntegerType()) 6430 return true; 6431 if (ValType->isPointerType()) 6432 return true; 6433 if (!ValType->isFloatingType()) 6434 return false; 6435 // LLVM Parser does not allow atomicrmw with x86_fp80 type. 6436 if (ValType->isSpecificBuiltinType(BuiltinType::LongDouble) && 6437 &Context.getTargetInfo().getLongDoubleFormat() == 6438 &llvm::APFloat::x87DoubleExtended()) 6439 return false; 6440 return true; 6441 }; 6442 if (IsAddSub && !IsAllowedValueType(ValType)) { 6443 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_ptr_or_fp) 6444 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 6445 return ExprError(); 6446 } 6447 if (!IsAddSub && !ValType->isIntegerType()) { 6448 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int) 6449 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 6450 return ExprError(); 6451 } 6452 if (IsC11 && ValType->isPointerType() && 6453 RequireCompleteType(Ptr->getBeginLoc(), ValType->getPointeeType(), 6454 diag::err_incomplete_type)) { 6455 return ExprError(); 6456 } 6457 } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) { 6458 // For __atomic_*_n operations, the value type must be a scalar integral or 6459 // pointer type which is 1, 2, 4, 8 or 16 bytes in length. 6460 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr) 6461 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 6462 return ExprError(); 6463 } 6464 6465 if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) && 6466 !AtomTy->isScalarType()) { 6467 // For GNU atomics, require a trivially-copyable type. This is not part of 6468 // the GNU atomics specification but we enforce it for consistency with 6469 // other atomics which generally all require a trivially-copyable type. This 6470 // is because atomics just copy bits. 6471 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_trivial_copy) 6472 << Ptr->getType() << Ptr->getSourceRange(); 6473 return ExprError(); 6474 } 6475 6476 switch (ValType.getObjCLifetime()) { 6477 case Qualifiers::OCL_None: 6478 case Qualifiers::OCL_ExplicitNone: 6479 // okay 6480 break; 6481 6482 case Qualifiers::OCL_Weak: 6483 case Qualifiers::OCL_Strong: 6484 case Qualifiers::OCL_Autoreleasing: 6485 // FIXME: Can this happen? By this point, ValType should be known 6486 // to be trivially copyable. 6487 Diag(ExprRange.getBegin(), diag::err_arc_atomic_ownership) 6488 << ValType << Ptr->getSourceRange(); 6489 return ExprError(); 6490 } 6491 6492 // All atomic operations have an overload which takes a pointer to a volatile 6493 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself 6494 // into the result or the other operands. Similarly atomic_load takes a 6495 // pointer to a const 'A'. 6496 ValType.removeLocalVolatile(); 6497 ValType.removeLocalConst(); 6498 QualType ResultType = ValType; 6499 if (Form == Copy || Form == LoadCopy || Form == GNUXchg || 6500 Form == Init) 6501 ResultType = Context.VoidTy; 6502 else if (Form == C11CmpXchg || Form == GNUCmpXchg) 6503 ResultType = Context.BoolTy; 6504 6505 // The type of a parameter passed 'by value'. In the GNU atomics, such 6506 // arguments are actually passed as pointers. 6507 QualType ByValType = ValType; // 'CP' 6508 bool IsPassedByAddress = false; 6509 if (!IsC11 && !IsHIP && !IsN) { 6510 ByValType = Ptr->getType(); 6511 IsPassedByAddress = true; 6512 } 6513 6514 SmallVector<Expr *, 5> APIOrderedArgs; 6515 if (ArgOrder == Sema::AtomicArgumentOrder::AST) { 6516 APIOrderedArgs.push_back(Args[0]); 6517 switch (Form) { 6518 case Init: 6519 case Load: 6520 APIOrderedArgs.push_back(Args[1]); // Val1/Order 6521 break; 6522 case LoadCopy: 6523 case Copy: 6524 case Arithmetic: 6525 case Xchg: 6526 APIOrderedArgs.push_back(Args[2]); // Val1 6527 APIOrderedArgs.push_back(Args[1]); // Order 6528 break; 6529 case GNUXchg: 6530 APIOrderedArgs.push_back(Args[2]); // Val1 6531 APIOrderedArgs.push_back(Args[3]); // Val2 6532 APIOrderedArgs.push_back(Args[1]); // Order 6533 break; 6534 case C11CmpXchg: 6535 APIOrderedArgs.push_back(Args[2]); // Val1 6536 APIOrderedArgs.push_back(Args[4]); // Val2 6537 APIOrderedArgs.push_back(Args[1]); // Order 6538 APIOrderedArgs.push_back(Args[3]); // OrderFail 6539 break; 6540 case GNUCmpXchg: 6541 APIOrderedArgs.push_back(Args[2]); // Val1 6542 APIOrderedArgs.push_back(Args[4]); // Val2 6543 APIOrderedArgs.push_back(Args[5]); // Weak 6544 APIOrderedArgs.push_back(Args[1]); // Order 6545 APIOrderedArgs.push_back(Args[3]); // OrderFail 6546 break; 6547 } 6548 } else 6549 APIOrderedArgs.append(Args.begin(), Args.end()); 6550 6551 // The first argument's non-CV pointer type is used to deduce the type of 6552 // subsequent arguments, except for: 6553 // - weak flag (always converted to bool) 6554 // - memory order (always converted to int) 6555 // - scope (always converted to int) 6556 for (unsigned i = 0; i != APIOrderedArgs.size(); ++i) { 6557 QualType Ty; 6558 if (i < NumVals[Form] + 1) { 6559 switch (i) { 6560 case 0: 6561 // The first argument is always a pointer. It has a fixed type. 6562 // It is always dereferenced, a nullptr is undefined. 6563 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 6564 // Nothing else to do: we already know all we want about this pointer. 6565 continue; 6566 case 1: 6567 // The second argument is the non-atomic operand. For arithmetic, this 6568 // is always passed by value, and for a compare_exchange it is always 6569 // passed by address. For the rest, GNU uses by-address and C11 uses 6570 // by-value. 6571 assert(Form != Load); 6572 if (Form == Arithmetic && ValType->isPointerType()) 6573 Ty = Context.getPointerDiffType(); 6574 else if (Form == Init || Form == Arithmetic) 6575 Ty = ValType; 6576 else if (Form == Copy || Form == Xchg) { 6577 if (IsPassedByAddress) { 6578 // The value pointer is always dereferenced, a nullptr is undefined. 6579 CheckNonNullArgument(*this, APIOrderedArgs[i], 6580 ExprRange.getBegin()); 6581 } 6582 Ty = ByValType; 6583 } else { 6584 Expr *ValArg = APIOrderedArgs[i]; 6585 // The value pointer is always dereferenced, a nullptr is undefined. 6586 CheckNonNullArgument(*this, ValArg, ExprRange.getBegin()); 6587 LangAS AS = LangAS::Default; 6588 // Keep address space of non-atomic pointer type. 6589 if (const PointerType *PtrTy = 6590 ValArg->getType()->getAs<PointerType>()) { 6591 AS = PtrTy->getPointeeType().getAddressSpace(); 6592 } 6593 Ty = Context.getPointerType( 6594 Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS)); 6595 } 6596 break; 6597 case 2: 6598 // The third argument to compare_exchange / GNU exchange is the desired 6599 // value, either by-value (for the C11 and *_n variant) or as a pointer. 6600 if (IsPassedByAddress) 6601 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 6602 Ty = ByValType; 6603 break; 6604 case 3: 6605 // The fourth argument to GNU compare_exchange is a 'weak' flag. 6606 Ty = Context.BoolTy; 6607 break; 6608 } 6609 } else { 6610 // The order(s) and scope are always converted to int. 6611 Ty = Context.IntTy; 6612 } 6613 6614 InitializedEntity Entity = 6615 InitializedEntity::InitializeParameter(Context, Ty, false); 6616 ExprResult Arg = APIOrderedArgs[i]; 6617 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 6618 if (Arg.isInvalid()) 6619 return true; 6620 APIOrderedArgs[i] = Arg.get(); 6621 } 6622 6623 // Permute the arguments into a 'consistent' order. 6624 SmallVector<Expr*, 5> SubExprs; 6625 SubExprs.push_back(Ptr); 6626 switch (Form) { 6627 case Init: 6628 // Note, AtomicExpr::getVal1() has a special case for this atomic. 6629 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6630 break; 6631 case Load: 6632 SubExprs.push_back(APIOrderedArgs[1]); // Order 6633 break; 6634 case LoadCopy: 6635 case Copy: 6636 case Arithmetic: 6637 case Xchg: 6638 SubExprs.push_back(APIOrderedArgs[2]); // Order 6639 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6640 break; 6641 case GNUXchg: 6642 // Note, AtomicExpr::getVal2() has a special case for this atomic. 6643 SubExprs.push_back(APIOrderedArgs[3]); // Order 6644 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6645 SubExprs.push_back(APIOrderedArgs[2]); // Val2 6646 break; 6647 case C11CmpXchg: 6648 SubExprs.push_back(APIOrderedArgs[3]); // Order 6649 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6650 SubExprs.push_back(APIOrderedArgs[4]); // OrderFail 6651 SubExprs.push_back(APIOrderedArgs[2]); // Val2 6652 break; 6653 case GNUCmpXchg: 6654 SubExprs.push_back(APIOrderedArgs[4]); // Order 6655 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6656 SubExprs.push_back(APIOrderedArgs[5]); // OrderFail 6657 SubExprs.push_back(APIOrderedArgs[2]); // Val2 6658 SubExprs.push_back(APIOrderedArgs[3]); // Weak 6659 break; 6660 } 6661 6662 if (SubExprs.size() >= 2 && Form != Init) { 6663 if (std::optional<llvm::APSInt> Result = 6664 SubExprs[1]->getIntegerConstantExpr(Context)) 6665 if (!isValidOrderingForOp(Result->getSExtValue(), Op)) 6666 Diag(SubExprs[1]->getBeginLoc(), 6667 diag::warn_atomic_op_has_invalid_memory_order) 6668 << SubExprs[1]->getSourceRange(); 6669 } 6670 6671 if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) { 6672 auto *Scope = Args[Args.size() - 1]; 6673 if (std::optional<llvm::APSInt> Result = 6674 Scope->getIntegerConstantExpr(Context)) { 6675 if (!ScopeModel->isValid(Result->getZExtValue())) 6676 Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope) 6677 << Scope->getSourceRange(); 6678 } 6679 SubExprs.push_back(Scope); 6680 } 6681 6682 AtomicExpr *AE = new (Context) 6683 AtomicExpr(ExprRange.getBegin(), SubExprs, ResultType, Op, RParenLoc); 6684 6685 if ((Op == AtomicExpr::AO__c11_atomic_load || 6686 Op == AtomicExpr::AO__c11_atomic_store || 6687 Op == AtomicExpr::AO__opencl_atomic_load || 6688 Op == AtomicExpr::AO__hip_atomic_load || 6689 Op == AtomicExpr::AO__opencl_atomic_store || 6690 Op == AtomicExpr::AO__hip_atomic_store) && 6691 Context.AtomicUsesUnsupportedLibcall(AE)) 6692 Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib) 6693 << ((Op == AtomicExpr::AO__c11_atomic_load || 6694 Op == AtomicExpr::AO__opencl_atomic_load || 6695 Op == AtomicExpr::AO__hip_atomic_load) 6696 ? 0 6697 : 1); 6698 6699 if (ValType->isBitIntType()) { 6700 Diag(Ptr->getExprLoc(), diag::err_atomic_builtin_bit_int_prohibit); 6701 return ExprError(); 6702 } 6703 6704 return AE; 6705 } 6706 6707 /// checkBuiltinArgument - Given a call to a builtin function, perform 6708 /// normal type-checking on the given argument, updating the call in 6709 /// place. This is useful when a builtin function requires custom 6710 /// type-checking for some of its arguments but not necessarily all of 6711 /// them. 6712 /// 6713 /// Returns true on error. 6714 static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) { 6715 FunctionDecl *Fn = E->getDirectCallee(); 6716 assert(Fn && "builtin call without direct callee!"); 6717 6718 ParmVarDecl *Param = Fn->getParamDecl(ArgIndex); 6719 InitializedEntity Entity = 6720 InitializedEntity::InitializeParameter(S.Context, Param); 6721 6722 ExprResult Arg = E->getArg(ArgIndex); 6723 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 6724 if (Arg.isInvalid()) 6725 return true; 6726 6727 E->setArg(ArgIndex, Arg.get()); 6728 return false; 6729 } 6730 6731 /// We have a call to a function like __sync_fetch_and_add, which is an 6732 /// overloaded function based on the pointer type of its first argument. 6733 /// The main BuildCallExpr routines have already promoted the types of 6734 /// arguments because all of these calls are prototyped as void(...). 6735 /// 6736 /// This function goes through and does final semantic checking for these 6737 /// builtins, as well as generating any warnings. 6738 ExprResult 6739 Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) { 6740 CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get()); 6741 Expr *Callee = TheCall->getCallee(); 6742 DeclRefExpr *DRE = cast<DeclRefExpr>(Callee->IgnoreParenCasts()); 6743 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 6744 6745 // Ensure that we have at least one argument to do type inference from. 6746 if (TheCall->getNumArgs() < 1) { 6747 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 6748 << 0 << 1 << TheCall->getNumArgs() << Callee->getSourceRange(); 6749 return ExprError(); 6750 } 6751 6752 // Inspect the first argument of the atomic builtin. This should always be 6753 // a pointer type, whose element is an integral scalar or pointer type. 6754 // Because it is a pointer type, we don't have to worry about any implicit 6755 // casts here. 6756 // FIXME: We don't allow floating point scalars as input. 6757 Expr *FirstArg = TheCall->getArg(0); 6758 ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg); 6759 if (FirstArgResult.isInvalid()) 6760 return ExprError(); 6761 FirstArg = FirstArgResult.get(); 6762 TheCall->setArg(0, FirstArg); 6763 6764 const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>(); 6765 if (!pointerType) { 6766 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 6767 << FirstArg->getType() << FirstArg->getSourceRange(); 6768 return ExprError(); 6769 } 6770 6771 QualType ValType = pointerType->getPointeeType(); 6772 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 6773 !ValType->isBlockPointerType()) { 6774 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr) 6775 << FirstArg->getType() << FirstArg->getSourceRange(); 6776 return ExprError(); 6777 } 6778 6779 if (ValType.isConstQualified()) { 6780 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const) 6781 << FirstArg->getType() << FirstArg->getSourceRange(); 6782 return ExprError(); 6783 } 6784 6785 switch (ValType.getObjCLifetime()) { 6786 case Qualifiers::OCL_None: 6787 case Qualifiers::OCL_ExplicitNone: 6788 // okay 6789 break; 6790 6791 case Qualifiers::OCL_Weak: 6792 case Qualifiers::OCL_Strong: 6793 case Qualifiers::OCL_Autoreleasing: 6794 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 6795 << ValType << FirstArg->getSourceRange(); 6796 return ExprError(); 6797 } 6798 6799 // Strip any qualifiers off ValType. 6800 ValType = ValType.getUnqualifiedType(); 6801 6802 // The majority of builtins return a value, but a few have special return 6803 // types, so allow them to override appropriately below. 6804 QualType ResultType = ValType; 6805 6806 // We need to figure out which concrete builtin this maps onto. For example, 6807 // __sync_fetch_and_add with a 2 byte object turns into 6808 // __sync_fetch_and_add_2. 6809 #define BUILTIN_ROW(x) \ 6810 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \ 6811 Builtin::BI##x##_8, Builtin::BI##x##_16 } 6812 6813 static const unsigned BuiltinIndices[][5] = { 6814 BUILTIN_ROW(__sync_fetch_and_add), 6815 BUILTIN_ROW(__sync_fetch_and_sub), 6816 BUILTIN_ROW(__sync_fetch_and_or), 6817 BUILTIN_ROW(__sync_fetch_and_and), 6818 BUILTIN_ROW(__sync_fetch_and_xor), 6819 BUILTIN_ROW(__sync_fetch_and_nand), 6820 6821 BUILTIN_ROW(__sync_add_and_fetch), 6822 BUILTIN_ROW(__sync_sub_and_fetch), 6823 BUILTIN_ROW(__sync_and_and_fetch), 6824 BUILTIN_ROW(__sync_or_and_fetch), 6825 BUILTIN_ROW(__sync_xor_and_fetch), 6826 BUILTIN_ROW(__sync_nand_and_fetch), 6827 6828 BUILTIN_ROW(__sync_val_compare_and_swap), 6829 BUILTIN_ROW(__sync_bool_compare_and_swap), 6830 BUILTIN_ROW(__sync_lock_test_and_set), 6831 BUILTIN_ROW(__sync_lock_release), 6832 BUILTIN_ROW(__sync_swap) 6833 }; 6834 #undef BUILTIN_ROW 6835 6836 // Determine the index of the size. 6837 unsigned SizeIndex; 6838 switch (Context.getTypeSizeInChars(ValType).getQuantity()) { 6839 case 1: SizeIndex = 0; break; 6840 case 2: SizeIndex = 1; break; 6841 case 4: SizeIndex = 2; break; 6842 case 8: SizeIndex = 3; break; 6843 case 16: SizeIndex = 4; break; 6844 default: 6845 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size) 6846 << FirstArg->getType() << FirstArg->getSourceRange(); 6847 return ExprError(); 6848 } 6849 6850 // Each of these builtins has one pointer argument, followed by some number of 6851 // values (0, 1 or 2) followed by a potentially empty varags list of stuff 6852 // that we ignore. Find out which row of BuiltinIndices to read from as well 6853 // as the number of fixed args. 6854 unsigned BuiltinID = FDecl->getBuiltinID(); 6855 unsigned BuiltinIndex, NumFixed = 1; 6856 bool WarnAboutSemanticsChange = false; 6857 switch (BuiltinID) { 6858 default: llvm_unreachable("Unknown overloaded atomic builtin!"); 6859 case Builtin::BI__sync_fetch_and_add: 6860 case Builtin::BI__sync_fetch_and_add_1: 6861 case Builtin::BI__sync_fetch_and_add_2: 6862 case Builtin::BI__sync_fetch_and_add_4: 6863 case Builtin::BI__sync_fetch_and_add_8: 6864 case Builtin::BI__sync_fetch_and_add_16: 6865 BuiltinIndex = 0; 6866 break; 6867 6868 case Builtin::BI__sync_fetch_and_sub: 6869 case Builtin::BI__sync_fetch_and_sub_1: 6870 case Builtin::BI__sync_fetch_and_sub_2: 6871 case Builtin::BI__sync_fetch_and_sub_4: 6872 case Builtin::BI__sync_fetch_and_sub_8: 6873 case Builtin::BI__sync_fetch_and_sub_16: 6874 BuiltinIndex = 1; 6875 break; 6876 6877 case Builtin::BI__sync_fetch_and_or: 6878 case Builtin::BI__sync_fetch_and_or_1: 6879 case Builtin::BI__sync_fetch_and_or_2: 6880 case Builtin::BI__sync_fetch_and_or_4: 6881 case Builtin::BI__sync_fetch_and_or_8: 6882 case Builtin::BI__sync_fetch_and_or_16: 6883 BuiltinIndex = 2; 6884 break; 6885 6886 case Builtin::BI__sync_fetch_and_and: 6887 case Builtin::BI__sync_fetch_and_and_1: 6888 case Builtin::BI__sync_fetch_and_and_2: 6889 case Builtin::BI__sync_fetch_and_and_4: 6890 case Builtin::BI__sync_fetch_and_and_8: 6891 case Builtin::BI__sync_fetch_and_and_16: 6892 BuiltinIndex = 3; 6893 break; 6894 6895 case Builtin::BI__sync_fetch_and_xor: 6896 case Builtin::BI__sync_fetch_and_xor_1: 6897 case Builtin::BI__sync_fetch_and_xor_2: 6898 case Builtin::BI__sync_fetch_and_xor_4: 6899 case Builtin::BI__sync_fetch_and_xor_8: 6900 case Builtin::BI__sync_fetch_and_xor_16: 6901 BuiltinIndex = 4; 6902 break; 6903 6904 case Builtin::BI__sync_fetch_and_nand: 6905 case Builtin::BI__sync_fetch_and_nand_1: 6906 case Builtin::BI__sync_fetch_and_nand_2: 6907 case Builtin::BI__sync_fetch_and_nand_4: 6908 case Builtin::BI__sync_fetch_and_nand_8: 6909 case Builtin::BI__sync_fetch_and_nand_16: 6910 BuiltinIndex = 5; 6911 WarnAboutSemanticsChange = true; 6912 break; 6913 6914 case Builtin::BI__sync_add_and_fetch: 6915 case Builtin::BI__sync_add_and_fetch_1: 6916 case Builtin::BI__sync_add_and_fetch_2: 6917 case Builtin::BI__sync_add_and_fetch_4: 6918 case Builtin::BI__sync_add_and_fetch_8: 6919 case Builtin::BI__sync_add_and_fetch_16: 6920 BuiltinIndex = 6; 6921 break; 6922 6923 case Builtin::BI__sync_sub_and_fetch: 6924 case Builtin::BI__sync_sub_and_fetch_1: 6925 case Builtin::BI__sync_sub_and_fetch_2: 6926 case Builtin::BI__sync_sub_and_fetch_4: 6927 case Builtin::BI__sync_sub_and_fetch_8: 6928 case Builtin::BI__sync_sub_and_fetch_16: 6929 BuiltinIndex = 7; 6930 break; 6931 6932 case Builtin::BI__sync_and_and_fetch: 6933 case Builtin::BI__sync_and_and_fetch_1: 6934 case Builtin::BI__sync_and_and_fetch_2: 6935 case Builtin::BI__sync_and_and_fetch_4: 6936 case Builtin::BI__sync_and_and_fetch_8: 6937 case Builtin::BI__sync_and_and_fetch_16: 6938 BuiltinIndex = 8; 6939 break; 6940 6941 case Builtin::BI__sync_or_and_fetch: 6942 case Builtin::BI__sync_or_and_fetch_1: 6943 case Builtin::BI__sync_or_and_fetch_2: 6944 case Builtin::BI__sync_or_and_fetch_4: 6945 case Builtin::BI__sync_or_and_fetch_8: 6946 case Builtin::BI__sync_or_and_fetch_16: 6947 BuiltinIndex = 9; 6948 break; 6949 6950 case Builtin::BI__sync_xor_and_fetch: 6951 case Builtin::BI__sync_xor_and_fetch_1: 6952 case Builtin::BI__sync_xor_and_fetch_2: 6953 case Builtin::BI__sync_xor_and_fetch_4: 6954 case Builtin::BI__sync_xor_and_fetch_8: 6955 case Builtin::BI__sync_xor_and_fetch_16: 6956 BuiltinIndex = 10; 6957 break; 6958 6959 case Builtin::BI__sync_nand_and_fetch: 6960 case Builtin::BI__sync_nand_and_fetch_1: 6961 case Builtin::BI__sync_nand_and_fetch_2: 6962 case Builtin::BI__sync_nand_and_fetch_4: 6963 case Builtin::BI__sync_nand_and_fetch_8: 6964 case Builtin::BI__sync_nand_and_fetch_16: 6965 BuiltinIndex = 11; 6966 WarnAboutSemanticsChange = true; 6967 break; 6968 6969 case Builtin::BI__sync_val_compare_and_swap: 6970 case Builtin::BI__sync_val_compare_and_swap_1: 6971 case Builtin::BI__sync_val_compare_and_swap_2: 6972 case Builtin::BI__sync_val_compare_and_swap_4: 6973 case Builtin::BI__sync_val_compare_and_swap_8: 6974 case Builtin::BI__sync_val_compare_and_swap_16: 6975 BuiltinIndex = 12; 6976 NumFixed = 2; 6977 break; 6978 6979 case Builtin::BI__sync_bool_compare_and_swap: 6980 case Builtin::BI__sync_bool_compare_and_swap_1: 6981 case Builtin::BI__sync_bool_compare_and_swap_2: 6982 case Builtin::BI__sync_bool_compare_and_swap_4: 6983 case Builtin::BI__sync_bool_compare_and_swap_8: 6984 case Builtin::BI__sync_bool_compare_and_swap_16: 6985 BuiltinIndex = 13; 6986 NumFixed = 2; 6987 ResultType = Context.BoolTy; 6988 break; 6989 6990 case Builtin::BI__sync_lock_test_and_set: 6991 case Builtin::BI__sync_lock_test_and_set_1: 6992 case Builtin::BI__sync_lock_test_and_set_2: 6993 case Builtin::BI__sync_lock_test_and_set_4: 6994 case Builtin::BI__sync_lock_test_and_set_8: 6995 case Builtin::BI__sync_lock_test_and_set_16: 6996 BuiltinIndex = 14; 6997 break; 6998 6999 case Builtin::BI__sync_lock_release: 7000 case Builtin::BI__sync_lock_release_1: 7001 case Builtin::BI__sync_lock_release_2: 7002 case Builtin::BI__sync_lock_release_4: 7003 case Builtin::BI__sync_lock_release_8: 7004 case Builtin::BI__sync_lock_release_16: 7005 BuiltinIndex = 15; 7006 NumFixed = 0; 7007 ResultType = Context.VoidTy; 7008 break; 7009 7010 case Builtin::BI__sync_swap: 7011 case Builtin::BI__sync_swap_1: 7012 case Builtin::BI__sync_swap_2: 7013 case Builtin::BI__sync_swap_4: 7014 case Builtin::BI__sync_swap_8: 7015 case Builtin::BI__sync_swap_16: 7016 BuiltinIndex = 16; 7017 break; 7018 } 7019 7020 // Now that we know how many fixed arguments we expect, first check that we 7021 // have at least that many. 7022 if (TheCall->getNumArgs() < 1+NumFixed) { 7023 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 7024 << 0 << 1 + NumFixed << TheCall->getNumArgs() 7025 << Callee->getSourceRange(); 7026 return ExprError(); 7027 } 7028 7029 Diag(TheCall->getEndLoc(), diag::warn_atomic_implicit_seq_cst) 7030 << Callee->getSourceRange(); 7031 7032 if (WarnAboutSemanticsChange) { 7033 Diag(TheCall->getEndLoc(), diag::warn_sync_fetch_and_nand_semantics_change) 7034 << Callee->getSourceRange(); 7035 } 7036 7037 // Get the decl for the concrete builtin from this, we can tell what the 7038 // concrete integer type we should convert to is. 7039 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex]; 7040 StringRef NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID); 7041 FunctionDecl *NewBuiltinDecl; 7042 if (NewBuiltinID == BuiltinID) 7043 NewBuiltinDecl = FDecl; 7044 else { 7045 // Perform builtin lookup to avoid redeclaring it. 7046 DeclarationName DN(&Context.Idents.get(NewBuiltinName)); 7047 LookupResult Res(*this, DN, DRE->getBeginLoc(), LookupOrdinaryName); 7048 LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true); 7049 assert(Res.getFoundDecl()); 7050 NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl()); 7051 if (!NewBuiltinDecl) 7052 return ExprError(); 7053 } 7054 7055 // The first argument --- the pointer --- has a fixed type; we 7056 // deduce the types of the rest of the arguments accordingly. Walk 7057 // the remaining arguments, converting them to the deduced value type. 7058 for (unsigned i = 0; i != NumFixed; ++i) { 7059 ExprResult Arg = TheCall->getArg(i+1); 7060 7061 // GCC does an implicit conversion to the pointer or integer ValType. This 7062 // can fail in some cases (1i -> int**), check for this error case now. 7063 // Initialize the argument. 7064 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 7065 ValType, /*consume*/ false); 7066 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 7067 if (Arg.isInvalid()) 7068 return ExprError(); 7069 7070 // Okay, we have something that *can* be converted to the right type. Check 7071 // to see if there is a potentially weird extension going on here. This can 7072 // happen when you do an atomic operation on something like an char* and 7073 // pass in 42. The 42 gets converted to char. This is even more strange 7074 // for things like 45.123 -> char, etc. 7075 // FIXME: Do this check. 7076 TheCall->setArg(i+1, Arg.get()); 7077 } 7078 7079 // Create a new DeclRefExpr to refer to the new decl. 7080 DeclRefExpr *NewDRE = DeclRefExpr::Create( 7081 Context, DRE->getQualifierLoc(), SourceLocation(), NewBuiltinDecl, 7082 /*enclosing*/ false, DRE->getLocation(), Context.BuiltinFnTy, 7083 DRE->getValueKind(), nullptr, nullptr, DRE->isNonOdrUse()); 7084 7085 // Set the callee in the CallExpr. 7086 // FIXME: This loses syntactic information. 7087 QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType()); 7088 ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy, 7089 CK_BuiltinFnToFnPtr); 7090 TheCall->setCallee(PromotedCall.get()); 7091 7092 // Change the result type of the call to match the original value type. This 7093 // is arbitrary, but the codegen for these builtins ins design to handle it 7094 // gracefully. 7095 TheCall->setType(ResultType); 7096 7097 // Prohibit problematic uses of bit-precise integer types with atomic 7098 // builtins. The arguments would have already been converted to the first 7099 // argument's type, so only need to check the first argument. 7100 const auto *BitIntValType = ValType->getAs<BitIntType>(); 7101 if (BitIntValType && !llvm::isPowerOf2_64(BitIntValType->getNumBits())) { 7102 Diag(FirstArg->getExprLoc(), diag::err_atomic_builtin_ext_int_size); 7103 return ExprError(); 7104 } 7105 7106 return TheCallResult; 7107 } 7108 7109 /// SemaBuiltinNontemporalOverloaded - We have a call to 7110 /// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an 7111 /// overloaded function based on the pointer type of its last argument. 7112 /// 7113 /// This function goes through and does final semantic checking for these 7114 /// builtins. 7115 ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) { 7116 CallExpr *TheCall = (CallExpr *)TheCallResult.get(); 7117 DeclRefExpr *DRE = 7118 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 7119 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 7120 unsigned BuiltinID = FDecl->getBuiltinID(); 7121 assert((BuiltinID == Builtin::BI__builtin_nontemporal_store || 7122 BuiltinID == Builtin::BI__builtin_nontemporal_load) && 7123 "Unexpected nontemporal load/store builtin!"); 7124 bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store; 7125 unsigned numArgs = isStore ? 2 : 1; 7126 7127 // Ensure that we have the proper number of arguments. 7128 if (checkArgCount(*this, TheCall, numArgs)) 7129 return ExprError(); 7130 7131 // Inspect the last argument of the nontemporal builtin. This should always 7132 // be a pointer type, from which we imply the type of the memory access. 7133 // Because it is a pointer type, we don't have to worry about any implicit 7134 // casts here. 7135 Expr *PointerArg = TheCall->getArg(numArgs - 1); 7136 ExprResult PointerArgResult = 7137 DefaultFunctionArrayLvalueConversion(PointerArg); 7138 7139 if (PointerArgResult.isInvalid()) 7140 return ExprError(); 7141 PointerArg = PointerArgResult.get(); 7142 TheCall->setArg(numArgs - 1, PointerArg); 7143 7144 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 7145 if (!pointerType) { 7146 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer) 7147 << PointerArg->getType() << PointerArg->getSourceRange(); 7148 return ExprError(); 7149 } 7150 7151 QualType ValType = pointerType->getPointeeType(); 7152 7153 // Strip any qualifiers off ValType. 7154 ValType = ValType.getUnqualifiedType(); 7155 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 7156 !ValType->isBlockPointerType() && !ValType->isFloatingType() && 7157 !ValType->isVectorType()) { 7158 Diag(DRE->getBeginLoc(), 7159 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector) 7160 << PointerArg->getType() << PointerArg->getSourceRange(); 7161 return ExprError(); 7162 } 7163 7164 if (!isStore) { 7165 TheCall->setType(ValType); 7166 return TheCallResult; 7167 } 7168 7169 ExprResult ValArg = TheCall->getArg(0); 7170 InitializedEntity Entity = InitializedEntity::InitializeParameter( 7171 Context, ValType, /*consume*/ false); 7172 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 7173 if (ValArg.isInvalid()) 7174 return ExprError(); 7175 7176 TheCall->setArg(0, ValArg.get()); 7177 TheCall->setType(Context.VoidTy); 7178 return TheCallResult; 7179 } 7180 7181 /// CheckObjCString - Checks that the argument to the builtin 7182 /// CFString constructor is correct 7183 /// Note: It might also make sense to do the UTF-16 conversion here (would 7184 /// simplify the backend). 7185 bool Sema::CheckObjCString(Expr *Arg) { 7186 Arg = Arg->IgnoreParenCasts(); 7187 StringLiteral *Literal = dyn_cast<StringLiteral>(Arg); 7188 7189 if (!Literal || !Literal->isOrdinary()) { 7190 Diag(Arg->getBeginLoc(), diag::err_cfstring_literal_not_string_constant) 7191 << Arg->getSourceRange(); 7192 return true; 7193 } 7194 7195 if (Literal->containsNonAsciiOrNull()) { 7196 StringRef String = Literal->getString(); 7197 unsigned NumBytes = String.size(); 7198 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes); 7199 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data(); 7200 llvm::UTF16 *ToPtr = &ToBuf[0]; 7201 7202 llvm::ConversionResult Result = 7203 llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr, 7204 ToPtr + NumBytes, llvm::strictConversion); 7205 // Check for conversion failure. 7206 if (Result != llvm::conversionOK) 7207 Diag(Arg->getBeginLoc(), diag::warn_cfstring_truncated) 7208 << Arg->getSourceRange(); 7209 } 7210 return false; 7211 } 7212 7213 /// CheckObjCString - Checks that the format string argument to the os_log() 7214 /// and os_trace() functions is correct, and converts it to const char *. 7215 ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) { 7216 Arg = Arg->IgnoreParenCasts(); 7217 auto *Literal = dyn_cast<StringLiteral>(Arg); 7218 if (!Literal) { 7219 if (auto *ObjcLiteral = dyn_cast<ObjCStringLiteral>(Arg)) { 7220 Literal = ObjcLiteral->getString(); 7221 } 7222 } 7223 7224 if (!Literal || (!Literal->isOrdinary() && !Literal->isUTF8())) { 7225 return ExprError( 7226 Diag(Arg->getBeginLoc(), diag::err_os_log_format_not_string_constant) 7227 << Arg->getSourceRange()); 7228 } 7229 7230 ExprResult Result(Literal); 7231 QualType ResultTy = Context.getPointerType(Context.CharTy.withConst()); 7232 InitializedEntity Entity = 7233 InitializedEntity::InitializeParameter(Context, ResultTy, false); 7234 Result = PerformCopyInitialization(Entity, SourceLocation(), Result); 7235 return Result; 7236 } 7237 7238 /// Check that the user is calling the appropriate va_start builtin for the 7239 /// target and calling convention. 7240 static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) { 7241 const llvm::Triple &TT = S.Context.getTargetInfo().getTriple(); 7242 bool IsX64 = TT.getArch() == llvm::Triple::x86_64; 7243 bool IsAArch64 = (TT.getArch() == llvm::Triple::aarch64 || 7244 TT.getArch() == llvm::Triple::aarch64_32); 7245 bool IsWindows = TT.isOSWindows(); 7246 bool IsMSVAStart = BuiltinID == Builtin::BI__builtin_ms_va_start; 7247 if (IsX64 || IsAArch64) { 7248 CallingConv CC = CC_C; 7249 if (const FunctionDecl *FD = S.getCurFunctionDecl()) 7250 CC = FD->getType()->castAs<FunctionType>()->getCallConv(); 7251 if (IsMSVAStart) { 7252 // Don't allow this in System V ABI functions. 7253 if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_Win64)) 7254 return S.Diag(Fn->getBeginLoc(), 7255 diag::err_ms_va_start_used_in_sysv_function); 7256 } else { 7257 // On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions. 7258 // On x64 Windows, don't allow this in System V ABI functions. 7259 // (Yes, that means there's no corresponding way to support variadic 7260 // System V ABI functions on Windows.) 7261 if ((IsWindows && CC == CC_X86_64SysV) || 7262 (!IsWindows && CC == CC_Win64)) 7263 return S.Diag(Fn->getBeginLoc(), 7264 diag::err_va_start_used_in_wrong_abi_function) 7265 << !IsWindows; 7266 } 7267 return false; 7268 } 7269 7270 if (IsMSVAStart) 7271 return S.Diag(Fn->getBeginLoc(), diag::err_builtin_x64_aarch64_only); 7272 return false; 7273 } 7274 7275 static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn, 7276 ParmVarDecl **LastParam = nullptr) { 7277 // Determine whether the current function, block, or obj-c method is variadic 7278 // and get its parameter list. 7279 bool IsVariadic = false; 7280 ArrayRef<ParmVarDecl *> Params; 7281 DeclContext *Caller = S.CurContext; 7282 if (auto *Block = dyn_cast<BlockDecl>(Caller)) { 7283 IsVariadic = Block->isVariadic(); 7284 Params = Block->parameters(); 7285 } else if (auto *FD = dyn_cast<FunctionDecl>(Caller)) { 7286 IsVariadic = FD->isVariadic(); 7287 Params = FD->parameters(); 7288 } else if (auto *MD = dyn_cast<ObjCMethodDecl>(Caller)) { 7289 IsVariadic = MD->isVariadic(); 7290 // FIXME: This isn't correct for methods (results in bogus warning). 7291 Params = MD->parameters(); 7292 } else if (isa<CapturedDecl>(Caller)) { 7293 // We don't support va_start in a CapturedDecl. 7294 S.Diag(Fn->getBeginLoc(), diag::err_va_start_captured_stmt); 7295 return true; 7296 } else { 7297 // This must be some other declcontext that parses exprs. 7298 S.Diag(Fn->getBeginLoc(), diag::err_va_start_outside_function); 7299 return true; 7300 } 7301 7302 if (!IsVariadic) { 7303 S.Diag(Fn->getBeginLoc(), diag::err_va_start_fixed_function); 7304 return true; 7305 } 7306 7307 if (LastParam) 7308 *LastParam = Params.empty() ? nullptr : Params.back(); 7309 7310 return false; 7311 } 7312 7313 /// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start' 7314 /// for validity. Emit an error and return true on failure; return false 7315 /// on success. 7316 bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) { 7317 Expr *Fn = TheCall->getCallee(); 7318 7319 if (checkVAStartABI(*this, BuiltinID, Fn)) 7320 return true; 7321 7322 // In C2x mode, va_start only needs one argument. However, the builtin still 7323 // requires two arguments (which matches the behavior of the GCC builtin), 7324 // <stdarg.h> passes `0` as the second argument in C2x mode. 7325 if (checkArgCount(*this, TheCall, 2)) 7326 return true; 7327 7328 // Type-check the first argument normally. 7329 if (checkBuiltinArgument(*this, TheCall, 0)) 7330 return true; 7331 7332 // Check that the current function is variadic, and get its last parameter. 7333 ParmVarDecl *LastParam; 7334 if (checkVAStartIsInVariadicFunction(*this, Fn, &LastParam)) 7335 return true; 7336 7337 // Verify that the second argument to the builtin is the last argument of the 7338 // current function or method. In C2x mode, if the second argument is an 7339 // integer constant expression with value 0, then we don't bother with this 7340 // check. 7341 bool SecondArgIsLastNamedArgument = false; 7342 const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts(); 7343 if (std::optional<llvm::APSInt> Val = 7344 TheCall->getArg(1)->getIntegerConstantExpr(Context); 7345 Val && LangOpts.C2x && *Val == 0) 7346 return false; 7347 7348 // These are valid if SecondArgIsLastNamedArgument is false after the next 7349 // block. 7350 QualType Type; 7351 SourceLocation ParamLoc; 7352 bool IsCRegister = false; 7353 7354 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) { 7355 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) { 7356 SecondArgIsLastNamedArgument = PV == LastParam; 7357 7358 Type = PV->getType(); 7359 ParamLoc = PV->getLocation(); 7360 IsCRegister = 7361 PV->getStorageClass() == SC_Register && !getLangOpts().CPlusPlus; 7362 } 7363 } 7364 7365 if (!SecondArgIsLastNamedArgument) 7366 Diag(TheCall->getArg(1)->getBeginLoc(), 7367 diag::warn_second_arg_of_va_start_not_last_named_param); 7368 else if (IsCRegister || Type->isReferenceType() || 7369 Type->isSpecificBuiltinType(BuiltinType::Float) || [=] { 7370 // Promotable integers are UB, but enumerations need a bit of 7371 // extra checking to see what their promotable type actually is. 7372 if (!Context.isPromotableIntegerType(Type)) 7373 return false; 7374 if (!Type->isEnumeralType()) 7375 return true; 7376 const EnumDecl *ED = Type->castAs<EnumType>()->getDecl(); 7377 return !(ED && 7378 Context.typesAreCompatible(ED->getPromotionType(), Type)); 7379 }()) { 7380 unsigned Reason = 0; 7381 if (Type->isReferenceType()) Reason = 1; 7382 else if (IsCRegister) Reason = 2; 7383 Diag(Arg->getBeginLoc(), diag::warn_va_start_type_is_undefined) << Reason; 7384 Diag(ParamLoc, diag::note_parameter_type) << Type; 7385 } 7386 7387 return false; 7388 } 7389 7390 bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) { 7391 auto IsSuitablyTypedFormatArgument = [this](const Expr *Arg) -> bool { 7392 const LangOptions &LO = getLangOpts(); 7393 7394 if (LO.CPlusPlus) 7395 return Arg->getType() 7396 .getCanonicalType() 7397 .getTypePtr() 7398 ->getPointeeType() 7399 .withoutLocalFastQualifiers() == Context.CharTy; 7400 7401 // In C, allow aliasing through `char *`, this is required for AArch64 at 7402 // least. 7403 return true; 7404 }; 7405 7406 // void __va_start(va_list *ap, const char *named_addr, size_t slot_size, 7407 // const char *named_addr); 7408 7409 Expr *Func = Call->getCallee(); 7410 7411 if (Call->getNumArgs() < 3) 7412 return Diag(Call->getEndLoc(), 7413 diag::err_typecheck_call_too_few_args_at_least) 7414 << 0 /*function call*/ << 3 << Call->getNumArgs(); 7415 7416 // Type-check the first argument normally. 7417 if (checkBuiltinArgument(*this, Call, 0)) 7418 return true; 7419 7420 // Check that the current function is variadic. 7421 if (checkVAStartIsInVariadicFunction(*this, Func)) 7422 return true; 7423 7424 // __va_start on Windows does not validate the parameter qualifiers 7425 7426 const Expr *Arg1 = Call->getArg(1)->IgnoreParens(); 7427 const Type *Arg1Ty = Arg1->getType().getCanonicalType().getTypePtr(); 7428 7429 const Expr *Arg2 = Call->getArg(2)->IgnoreParens(); 7430 const Type *Arg2Ty = Arg2->getType().getCanonicalType().getTypePtr(); 7431 7432 const QualType &ConstCharPtrTy = 7433 Context.getPointerType(Context.CharTy.withConst()); 7434 if (!Arg1Ty->isPointerType() || !IsSuitablyTypedFormatArgument(Arg1)) 7435 Diag(Arg1->getBeginLoc(), diag::err_typecheck_convert_incompatible) 7436 << Arg1->getType() << ConstCharPtrTy << 1 /* different class */ 7437 << 0 /* qualifier difference */ 7438 << 3 /* parameter mismatch */ 7439 << 2 << Arg1->getType() << ConstCharPtrTy; 7440 7441 const QualType SizeTy = Context.getSizeType(); 7442 if (Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy) 7443 Diag(Arg2->getBeginLoc(), diag::err_typecheck_convert_incompatible) 7444 << Arg2->getType() << SizeTy << 1 /* different class */ 7445 << 0 /* qualifier difference */ 7446 << 3 /* parameter mismatch */ 7447 << 3 << Arg2->getType() << SizeTy; 7448 7449 return false; 7450 } 7451 7452 /// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and 7453 /// friends. This is declared to take (...), so we have to check everything. 7454 bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) { 7455 if (checkArgCount(*this, TheCall, 2)) 7456 return true; 7457 7458 ExprResult OrigArg0 = TheCall->getArg(0); 7459 ExprResult OrigArg1 = TheCall->getArg(1); 7460 7461 // Do standard promotions between the two arguments, returning their common 7462 // type. 7463 QualType Res = UsualArithmeticConversions( 7464 OrigArg0, OrigArg1, TheCall->getExprLoc(), ACK_Comparison); 7465 if (OrigArg0.isInvalid() || OrigArg1.isInvalid()) 7466 return true; 7467 7468 // Make sure any conversions are pushed back into the call; this is 7469 // type safe since unordered compare builtins are declared as "_Bool 7470 // foo(...)". 7471 TheCall->setArg(0, OrigArg0.get()); 7472 TheCall->setArg(1, OrigArg1.get()); 7473 7474 if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent()) 7475 return false; 7476 7477 // If the common type isn't a real floating type, then the arguments were 7478 // invalid for this operation. 7479 if (Res.isNull() || !Res->isRealFloatingType()) 7480 return Diag(OrigArg0.get()->getBeginLoc(), 7481 diag::err_typecheck_call_invalid_ordered_compare) 7482 << OrigArg0.get()->getType() << OrigArg1.get()->getType() 7483 << SourceRange(OrigArg0.get()->getBeginLoc(), 7484 OrigArg1.get()->getEndLoc()); 7485 7486 return false; 7487 } 7488 7489 /// SemaBuiltinSemaBuiltinFPClassification - Handle functions like 7490 /// __builtin_isnan and friends. This is declared to take (...), so we have 7491 /// to check everything. We expect the last argument to be a floating point 7492 /// value. 7493 bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) { 7494 if (checkArgCount(*this, TheCall, NumArgs)) 7495 return true; 7496 7497 // __builtin_fpclassify is the only case where NumArgs != 1, so we can count 7498 // on all preceding parameters just being int. Try all of those. 7499 for (unsigned i = 0; i < NumArgs - 1; ++i) { 7500 Expr *Arg = TheCall->getArg(i); 7501 7502 if (Arg->isTypeDependent()) 7503 return false; 7504 7505 ExprResult Res = PerformImplicitConversion(Arg, Context.IntTy, AA_Passing); 7506 7507 if (Res.isInvalid()) 7508 return true; 7509 TheCall->setArg(i, Res.get()); 7510 } 7511 7512 Expr *OrigArg = TheCall->getArg(NumArgs-1); 7513 7514 if (OrigArg->isTypeDependent()) 7515 return false; 7516 7517 // Usual Unary Conversions will convert half to float, which we want for 7518 // machines that use fp16 conversion intrinsics. Else, we wnat to leave the 7519 // type how it is, but do normal L->Rvalue conversions. 7520 if (Context.getTargetInfo().useFP16ConversionIntrinsics()) 7521 OrigArg = UsualUnaryConversions(OrigArg).get(); 7522 else 7523 OrigArg = DefaultFunctionArrayLvalueConversion(OrigArg).get(); 7524 TheCall->setArg(NumArgs - 1, OrigArg); 7525 7526 // This operation requires a non-_Complex floating-point number. 7527 if (!OrigArg->getType()->isRealFloatingType()) 7528 return Diag(OrigArg->getBeginLoc(), 7529 diag::err_typecheck_call_invalid_unary_fp) 7530 << OrigArg->getType() << OrigArg->getSourceRange(); 7531 7532 return false; 7533 } 7534 7535 /// Perform semantic analysis for a call to __builtin_complex. 7536 bool Sema::SemaBuiltinComplex(CallExpr *TheCall) { 7537 if (checkArgCount(*this, TheCall, 2)) 7538 return true; 7539 7540 bool Dependent = false; 7541 for (unsigned I = 0; I != 2; ++I) { 7542 Expr *Arg = TheCall->getArg(I); 7543 QualType T = Arg->getType(); 7544 if (T->isDependentType()) { 7545 Dependent = true; 7546 continue; 7547 } 7548 7549 // Despite supporting _Complex int, GCC requires a real floating point type 7550 // for the operands of __builtin_complex. 7551 if (!T->isRealFloatingType()) { 7552 return Diag(Arg->getBeginLoc(), diag::err_typecheck_call_requires_real_fp) 7553 << Arg->getType() << Arg->getSourceRange(); 7554 } 7555 7556 ExprResult Converted = DefaultLvalueConversion(Arg); 7557 if (Converted.isInvalid()) 7558 return true; 7559 TheCall->setArg(I, Converted.get()); 7560 } 7561 7562 if (Dependent) { 7563 TheCall->setType(Context.DependentTy); 7564 return false; 7565 } 7566 7567 Expr *Real = TheCall->getArg(0); 7568 Expr *Imag = TheCall->getArg(1); 7569 if (!Context.hasSameType(Real->getType(), Imag->getType())) { 7570 return Diag(Real->getBeginLoc(), 7571 diag::err_typecheck_call_different_arg_types) 7572 << Real->getType() << Imag->getType() 7573 << Real->getSourceRange() << Imag->getSourceRange(); 7574 } 7575 7576 // We don't allow _Complex _Float16 nor _Complex __fp16 as type specifiers; 7577 // don't allow this builtin to form those types either. 7578 // FIXME: Should we allow these types? 7579 if (Real->getType()->isFloat16Type()) 7580 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 7581 << "_Float16"; 7582 if (Real->getType()->isHalfType()) 7583 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 7584 << "half"; 7585 7586 TheCall->setType(Context.getComplexType(Real->getType())); 7587 return false; 7588 } 7589 7590 // Customized Sema Checking for VSX builtins that have the following signature: 7591 // vector [...] builtinName(vector [...], vector [...], const int); 7592 // Which takes the same type of vectors (any legal vector type) for the first 7593 // two arguments and takes compile time constant for the third argument. 7594 // Example builtins are : 7595 // vector double vec_xxpermdi(vector double, vector double, int); 7596 // vector short vec_xxsldwi(vector short, vector short, int); 7597 bool Sema::SemaBuiltinVSX(CallExpr *TheCall) { 7598 unsigned ExpectedNumArgs = 3; 7599 if (checkArgCount(*this, TheCall, ExpectedNumArgs)) 7600 return true; 7601 7602 // Check the third argument is a compile time constant 7603 if (!TheCall->getArg(2)->isIntegerConstantExpr(Context)) 7604 return Diag(TheCall->getBeginLoc(), 7605 diag::err_vsx_builtin_nonconstant_argument) 7606 << 3 /* argument index */ << TheCall->getDirectCallee() 7607 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 7608 TheCall->getArg(2)->getEndLoc()); 7609 7610 QualType Arg1Ty = TheCall->getArg(0)->getType(); 7611 QualType Arg2Ty = TheCall->getArg(1)->getType(); 7612 7613 // Check the type of argument 1 and argument 2 are vectors. 7614 SourceLocation BuiltinLoc = TheCall->getBeginLoc(); 7615 if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) || 7616 (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) { 7617 return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector) 7618 << TheCall->getDirectCallee() 7619 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 7620 TheCall->getArg(1)->getEndLoc()); 7621 } 7622 7623 // Check the first two arguments are the same type. 7624 if (!Context.hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) { 7625 return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector) 7626 << TheCall->getDirectCallee() 7627 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 7628 TheCall->getArg(1)->getEndLoc()); 7629 } 7630 7631 // When default clang type checking is turned off and the customized type 7632 // checking is used, the returning type of the function must be explicitly 7633 // set. Otherwise it is _Bool by default. 7634 TheCall->setType(Arg1Ty); 7635 7636 return false; 7637 } 7638 7639 /// SemaBuiltinShuffleVector - Handle __builtin_shufflevector. 7640 // This is declared to take (...), so we have to check everything. 7641 ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) { 7642 if (TheCall->getNumArgs() < 2) 7643 return ExprError(Diag(TheCall->getEndLoc(), 7644 diag::err_typecheck_call_too_few_args_at_least) 7645 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 7646 << TheCall->getSourceRange()); 7647 7648 // Determine which of the following types of shufflevector we're checking: 7649 // 1) unary, vector mask: (lhs, mask) 7650 // 2) binary, scalar mask: (lhs, rhs, index, ..., index) 7651 QualType resType = TheCall->getArg(0)->getType(); 7652 unsigned numElements = 0; 7653 7654 if (!TheCall->getArg(0)->isTypeDependent() && 7655 !TheCall->getArg(1)->isTypeDependent()) { 7656 QualType LHSType = TheCall->getArg(0)->getType(); 7657 QualType RHSType = TheCall->getArg(1)->getType(); 7658 7659 if (!LHSType->isVectorType() || !RHSType->isVectorType()) 7660 return ExprError( 7661 Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_non_vector) 7662 << TheCall->getDirectCallee() 7663 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 7664 TheCall->getArg(1)->getEndLoc())); 7665 7666 numElements = LHSType->castAs<VectorType>()->getNumElements(); 7667 unsigned numResElements = TheCall->getNumArgs() - 2; 7668 7669 // Check to see if we have a call with 2 vector arguments, the unary shuffle 7670 // with mask. If so, verify that RHS is an integer vector type with the 7671 // same number of elts as lhs. 7672 if (TheCall->getNumArgs() == 2) { 7673 if (!RHSType->hasIntegerRepresentation() || 7674 RHSType->castAs<VectorType>()->getNumElements() != numElements) 7675 return ExprError(Diag(TheCall->getBeginLoc(), 7676 diag::err_vec_builtin_incompatible_vector) 7677 << TheCall->getDirectCallee() 7678 << SourceRange(TheCall->getArg(1)->getBeginLoc(), 7679 TheCall->getArg(1)->getEndLoc())); 7680 } else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) { 7681 return ExprError(Diag(TheCall->getBeginLoc(), 7682 diag::err_vec_builtin_incompatible_vector) 7683 << TheCall->getDirectCallee() 7684 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 7685 TheCall->getArg(1)->getEndLoc())); 7686 } else if (numElements != numResElements) { 7687 QualType eltType = LHSType->castAs<VectorType>()->getElementType(); 7688 resType = Context.getVectorType(eltType, numResElements, 7689 VectorType::GenericVector); 7690 } 7691 } 7692 7693 for (unsigned i = 2; i < TheCall->getNumArgs(); i++) { 7694 if (TheCall->getArg(i)->isTypeDependent() || 7695 TheCall->getArg(i)->isValueDependent()) 7696 continue; 7697 7698 std::optional<llvm::APSInt> Result; 7699 if (!(Result = TheCall->getArg(i)->getIntegerConstantExpr(Context))) 7700 return ExprError(Diag(TheCall->getBeginLoc(), 7701 diag::err_shufflevector_nonconstant_argument) 7702 << TheCall->getArg(i)->getSourceRange()); 7703 7704 // Allow -1 which will be translated to undef in the IR. 7705 if (Result->isSigned() && Result->isAllOnes()) 7706 continue; 7707 7708 if (Result->getActiveBits() > 64 || 7709 Result->getZExtValue() >= numElements * 2) 7710 return ExprError(Diag(TheCall->getBeginLoc(), 7711 diag::err_shufflevector_argument_too_large) 7712 << TheCall->getArg(i)->getSourceRange()); 7713 } 7714 7715 SmallVector<Expr*, 32> exprs; 7716 7717 for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) { 7718 exprs.push_back(TheCall->getArg(i)); 7719 TheCall->setArg(i, nullptr); 7720 } 7721 7722 return new (Context) ShuffleVectorExpr(Context, exprs, resType, 7723 TheCall->getCallee()->getBeginLoc(), 7724 TheCall->getRParenLoc()); 7725 } 7726 7727 /// SemaConvertVectorExpr - Handle __builtin_convertvector 7728 ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, 7729 SourceLocation BuiltinLoc, 7730 SourceLocation RParenLoc) { 7731 ExprValueKind VK = VK_PRValue; 7732 ExprObjectKind OK = OK_Ordinary; 7733 QualType DstTy = TInfo->getType(); 7734 QualType SrcTy = E->getType(); 7735 7736 if (!SrcTy->isVectorType() && !SrcTy->isDependentType()) 7737 return ExprError(Diag(BuiltinLoc, 7738 diag::err_convertvector_non_vector) 7739 << E->getSourceRange()); 7740 if (!DstTy->isVectorType() && !DstTy->isDependentType()) 7741 return ExprError(Diag(BuiltinLoc, 7742 diag::err_convertvector_non_vector_type)); 7743 7744 if (!SrcTy->isDependentType() && !DstTy->isDependentType()) { 7745 unsigned SrcElts = SrcTy->castAs<VectorType>()->getNumElements(); 7746 unsigned DstElts = DstTy->castAs<VectorType>()->getNumElements(); 7747 if (SrcElts != DstElts) 7748 return ExprError(Diag(BuiltinLoc, 7749 diag::err_convertvector_incompatible_vector) 7750 << E->getSourceRange()); 7751 } 7752 7753 return new (Context) 7754 ConvertVectorExpr(E, TInfo, DstTy, VK, OK, BuiltinLoc, RParenLoc); 7755 } 7756 7757 /// SemaBuiltinPrefetch - Handle __builtin_prefetch. 7758 // This is declared to take (const void*, ...) and can take two 7759 // optional constant int args. 7760 bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) { 7761 unsigned NumArgs = TheCall->getNumArgs(); 7762 7763 if (NumArgs > 3) 7764 return Diag(TheCall->getEndLoc(), 7765 diag::err_typecheck_call_too_many_args_at_most) 7766 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 7767 7768 // Argument 0 is checked for us and the remaining arguments must be 7769 // constant integers. 7770 for (unsigned i = 1; i != NumArgs; ++i) 7771 if (SemaBuiltinConstantArgRange(TheCall, i, 0, i == 1 ? 1 : 3)) 7772 return true; 7773 7774 return false; 7775 } 7776 7777 /// SemaBuiltinArithmeticFence - Handle __arithmetic_fence. 7778 bool Sema::SemaBuiltinArithmeticFence(CallExpr *TheCall) { 7779 if (!Context.getTargetInfo().checkArithmeticFenceSupported()) 7780 return Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 7781 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 7782 if (checkArgCount(*this, TheCall, 1)) 7783 return true; 7784 Expr *Arg = TheCall->getArg(0); 7785 if (Arg->isInstantiationDependent()) 7786 return false; 7787 7788 QualType ArgTy = Arg->getType(); 7789 if (!ArgTy->hasFloatingRepresentation()) 7790 return Diag(TheCall->getEndLoc(), diag::err_typecheck_expect_flt_or_vector) 7791 << ArgTy; 7792 if (Arg->isLValue()) { 7793 ExprResult FirstArg = DefaultLvalueConversion(Arg); 7794 TheCall->setArg(0, FirstArg.get()); 7795 } 7796 TheCall->setType(TheCall->getArg(0)->getType()); 7797 return false; 7798 } 7799 7800 /// SemaBuiltinAssume - Handle __assume (MS Extension). 7801 // __assume does not evaluate its arguments, and should warn if its argument 7802 // has side effects. 7803 bool Sema::SemaBuiltinAssume(CallExpr *TheCall) { 7804 Expr *Arg = TheCall->getArg(0); 7805 if (Arg->isInstantiationDependent()) return false; 7806 7807 if (Arg->HasSideEffects(Context)) 7808 Diag(Arg->getBeginLoc(), diag::warn_assume_side_effects) 7809 << Arg->getSourceRange() 7810 << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier(); 7811 7812 return false; 7813 } 7814 7815 /// Handle __builtin_alloca_with_align. This is declared 7816 /// as (size_t, size_t) where the second size_t must be a power of 2 greater 7817 /// than 8. 7818 bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) { 7819 // The alignment must be a constant integer. 7820 Expr *Arg = TheCall->getArg(1); 7821 7822 // We can't check the value of a dependent argument. 7823 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 7824 if (const auto *UE = 7825 dyn_cast<UnaryExprOrTypeTraitExpr>(Arg->IgnoreParenImpCasts())) 7826 if (UE->getKind() == UETT_AlignOf || 7827 UE->getKind() == UETT_PreferredAlignOf) 7828 Diag(TheCall->getBeginLoc(), diag::warn_alloca_align_alignof) 7829 << Arg->getSourceRange(); 7830 7831 llvm::APSInt Result = Arg->EvaluateKnownConstInt(Context); 7832 7833 if (!Result.isPowerOf2()) 7834 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 7835 << Arg->getSourceRange(); 7836 7837 if (Result < Context.getCharWidth()) 7838 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_small) 7839 << (unsigned)Context.getCharWidth() << Arg->getSourceRange(); 7840 7841 if (Result > std::numeric_limits<int32_t>::max()) 7842 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_big) 7843 << std::numeric_limits<int32_t>::max() << Arg->getSourceRange(); 7844 } 7845 7846 return false; 7847 } 7848 7849 /// Handle __builtin_assume_aligned. This is declared 7850 /// as (const void*, size_t, ...) and can take one optional constant int arg. 7851 bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) { 7852 if (checkArgCountRange(*this, TheCall, 2, 3)) 7853 return true; 7854 7855 unsigned NumArgs = TheCall->getNumArgs(); 7856 Expr *FirstArg = TheCall->getArg(0); 7857 7858 { 7859 ExprResult FirstArgResult = 7860 DefaultFunctionArrayLvalueConversion(FirstArg); 7861 if (FirstArgResult.isInvalid()) 7862 return true; 7863 TheCall->setArg(0, FirstArgResult.get()); 7864 } 7865 7866 // The alignment must be a constant integer. 7867 Expr *SecondArg = TheCall->getArg(1); 7868 7869 // We can't check the value of a dependent argument. 7870 if (!SecondArg->isValueDependent()) { 7871 llvm::APSInt Result; 7872 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 7873 return true; 7874 7875 if (!Result.isPowerOf2()) 7876 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 7877 << SecondArg->getSourceRange(); 7878 7879 if (Result > Sema::MaximumAlignment) 7880 Diag(TheCall->getBeginLoc(), diag::warn_assume_aligned_too_great) 7881 << SecondArg->getSourceRange() << Sema::MaximumAlignment; 7882 } 7883 7884 if (NumArgs > 2) { 7885 Expr *ThirdArg = TheCall->getArg(2); 7886 if (convertArgumentToType(*this, ThirdArg, Context.getSizeType())) 7887 return true; 7888 TheCall->setArg(2, ThirdArg); 7889 } 7890 7891 return false; 7892 } 7893 7894 bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) { 7895 unsigned BuiltinID = 7896 cast<FunctionDecl>(TheCall->getCalleeDecl())->getBuiltinID(); 7897 bool IsSizeCall = BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size; 7898 7899 unsigned NumArgs = TheCall->getNumArgs(); 7900 unsigned NumRequiredArgs = IsSizeCall ? 1 : 2; 7901 if (NumArgs < NumRequiredArgs) { 7902 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 7903 << 0 /* function call */ << NumRequiredArgs << NumArgs 7904 << TheCall->getSourceRange(); 7905 } 7906 if (NumArgs >= NumRequiredArgs + 0x100) { 7907 return Diag(TheCall->getEndLoc(), 7908 diag::err_typecheck_call_too_many_args_at_most) 7909 << 0 /* function call */ << (NumRequiredArgs + 0xff) << NumArgs 7910 << TheCall->getSourceRange(); 7911 } 7912 unsigned i = 0; 7913 7914 // For formatting call, check buffer arg. 7915 if (!IsSizeCall) { 7916 ExprResult Arg(TheCall->getArg(i)); 7917 InitializedEntity Entity = InitializedEntity::InitializeParameter( 7918 Context, Context.VoidPtrTy, false); 7919 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 7920 if (Arg.isInvalid()) 7921 return true; 7922 TheCall->setArg(i, Arg.get()); 7923 i++; 7924 } 7925 7926 // Check string literal arg. 7927 unsigned FormatIdx = i; 7928 { 7929 ExprResult Arg = CheckOSLogFormatStringArg(TheCall->getArg(i)); 7930 if (Arg.isInvalid()) 7931 return true; 7932 TheCall->setArg(i, Arg.get()); 7933 i++; 7934 } 7935 7936 // Make sure variadic args are scalar. 7937 unsigned FirstDataArg = i; 7938 while (i < NumArgs) { 7939 ExprResult Arg = DefaultVariadicArgumentPromotion( 7940 TheCall->getArg(i), VariadicFunction, nullptr); 7941 if (Arg.isInvalid()) 7942 return true; 7943 CharUnits ArgSize = Context.getTypeSizeInChars(Arg.get()->getType()); 7944 if (ArgSize.getQuantity() >= 0x100) { 7945 return Diag(Arg.get()->getEndLoc(), diag::err_os_log_argument_too_big) 7946 << i << (int)ArgSize.getQuantity() << 0xff 7947 << TheCall->getSourceRange(); 7948 } 7949 TheCall->setArg(i, Arg.get()); 7950 i++; 7951 } 7952 7953 // Check formatting specifiers. NOTE: We're only doing this for the non-size 7954 // call to avoid duplicate diagnostics. 7955 if (!IsSizeCall) { 7956 llvm::SmallBitVector CheckedVarArgs(NumArgs, false); 7957 ArrayRef<const Expr *> Args(TheCall->getArgs(), TheCall->getNumArgs()); 7958 bool Success = CheckFormatArguments( 7959 Args, FAPK_Variadic, FormatIdx, FirstDataArg, FST_OSLog, 7960 VariadicFunction, TheCall->getBeginLoc(), SourceRange(), 7961 CheckedVarArgs); 7962 if (!Success) 7963 return true; 7964 } 7965 7966 if (IsSizeCall) { 7967 TheCall->setType(Context.getSizeType()); 7968 } else { 7969 TheCall->setType(Context.VoidPtrTy); 7970 } 7971 return false; 7972 } 7973 7974 /// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr 7975 /// TheCall is a constant expression. 7976 bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, 7977 llvm::APSInt &Result) { 7978 Expr *Arg = TheCall->getArg(ArgNum); 7979 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 7980 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 7981 7982 if (Arg->isTypeDependent() || Arg->isValueDependent()) return false; 7983 7984 std::optional<llvm::APSInt> R; 7985 if (!(R = Arg->getIntegerConstantExpr(Context))) 7986 return Diag(TheCall->getBeginLoc(), diag::err_constant_integer_arg_type) 7987 << FDecl->getDeclName() << Arg->getSourceRange(); 7988 Result = *R; 7989 return false; 7990 } 7991 7992 /// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr 7993 /// TheCall is a constant expression in the range [Low, High]. 7994 bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, 7995 int Low, int High, bool RangeIsError) { 7996 if (isConstantEvaluated()) 7997 return false; 7998 llvm::APSInt Result; 7999 8000 // We can't check the value of a dependent argument. 8001 Expr *Arg = TheCall->getArg(ArgNum); 8002 if (Arg->isTypeDependent() || Arg->isValueDependent()) 8003 return false; 8004 8005 // Check constant-ness first. 8006 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 8007 return true; 8008 8009 if (Result.getSExtValue() < Low || Result.getSExtValue() > High) { 8010 if (RangeIsError) 8011 return Diag(TheCall->getBeginLoc(), diag::err_argument_invalid_range) 8012 << toString(Result, 10) << Low << High << Arg->getSourceRange(); 8013 else 8014 // Defer the warning until we know if the code will be emitted so that 8015 // dead code can ignore this. 8016 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 8017 PDiag(diag::warn_argument_invalid_range) 8018 << toString(Result, 10) << Low << High 8019 << Arg->getSourceRange()); 8020 } 8021 8022 return false; 8023 } 8024 8025 /// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr 8026 /// TheCall is a constant expression is a multiple of Num.. 8027 bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, 8028 unsigned Num) { 8029 llvm::APSInt Result; 8030 8031 // We can't check the value of a dependent argument. 8032 Expr *Arg = TheCall->getArg(ArgNum); 8033 if (Arg->isTypeDependent() || Arg->isValueDependent()) 8034 return false; 8035 8036 // Check constant-ness first. 8037 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 8038 return true; 8039 8040 if (Result.getSExtValue() % Num != 0) 8041 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_multiple) 8042 << Num << Arg->getSourceRange(); 8043 8044 return false; 8045 } 8046 8047 /// SemaBuiltinConstantArgPower2 - Check if argument ArgNum of TheCall is a 8048 /// constant expression representing a power of 2. 8049 bool Sema::SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum) { 8050 llvm::APSInt Result; 8051 8052 // We can't check the value of a dependent argument. 8053 Expr *Arg = TheCall->getArg(ArgNum); 8054 if (Arg->isTypeDependent() || Arg->isValueDependent()) 8055 return false; 8056 8057 // Check constant-ness first. 8058 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 8059 return true; 8060 8061 // Bit-twiddling to test for a power of 2: for x > 0, x & (x-1) is zero if 8062 // and only if x is a power of 2. 8063 if (Result.isStrictlyPositive() && (Result & (Result - 1)) == 0) 8064 return false; 8065 8066 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_power_of_2) 8067 << Arg->getSourceRange(); 8068 } 8069 8070 static bool IsShiftedByte(llvm::APSInt Value) { 8071 if (Value.isNegative()) 8072 return false; 8073 8074 // Check if it's a shifted byte, by shifting it down 8075 while (true) { 8076 // If the value fits in the bottom byte, the check passes. 8077 if (Value < 0x100) 8078 return true; 8079 8080 // Otherwise, if the value has _any_ bits in the bottom byte, the check 8081 // fails. 8082 if ((Value & 0xFF) != 0) 8083 return false; 8084 8085 // If the bottom 8 bits are all 0, but something above that is nonzero, 8086 // then shifting the value right by 8 bits won't affect whether it's a 8087 // shifted byte or not. So do that, and go round again. 8088 Value >>= 8; 8089 } 8090 } 8091 8092 /// SemaBuiltinConstantArgShiftedByte - Check if argument ArgNum of TheCall is 8093 /// a constant expression representing an arbitrary byte value shifted left by 8094 /// a multiple of 8 bits. 8095 bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, 8096 unsigned ArgBits) { 8097 llvm::APSInt Result; 8098 8099 // We can't check the value of a dependent argument. 8100 Expr *Arg = TheCall->getArg(ArgNum); 8101 if (Arg->isTypeDependent() || Arg->isValueDependent()) 8102 return false; 8103 8104 // Check constant-ness first. 8105 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 8106 return true; 8107 8108 // Truncate to the given size. 8109 Result = Result.getLoBits(ArgBits); 8110 Result.setIsUnsigned(true); 8111 8112 if (IsShiftedByte(Result)) 8113 return false; 8114 8115 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_shifted_byte) 8116 << Arg->getSourceRange(); 8117 } 8118 8119 /// SemaBuiltinConstantArgShiftedByteOr0xFF - Check if argument ArgNum of 8120 /// TheCall is a constant expression representing either a shifted byte value, 8121 /// or a value of the form 0x??FF (i.e. a member of the arithmetic progression 8122 /// 0x00FF, 0x01FF, ..., 0xFFFF). This strange range check is needed for some 8123 /// Arm MVE intrinsics. 8124 bool Sema::SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, 8125 int ArgNum, 8126 unsigned ArgBits) { 8127 llvm::APSInt Result; 8128 8129 // We can't check the value of a dependent argument. 8130 Expr *Arg = TheCall->getArg(ArgNum); 8131 if (Arg->isTypeDependent() || Arg->isValueDependent()) 8132 return false; 8133 8134 // Check constant-ness first. 8135 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 8136 return true; 8137 8138 // Truncate to the given size. 8139 Result = Result.getLoBits(ArgBits); 8140 Result.setIsUnsigned(true); 8141 8142 // Check to see if it's in either of the required forms. 8143 if (IsShiftedByte(Result) || 8144 (Result > 0 && Result < 0x10000 && (Result & 0xFF) == 0xFF)) 8145 return false; 8146 8147 return Diag(TheCall->getBeginLoc(), 8148 diag::err_argument_not_shifted_byte_or_xxff) 8149 << Arg->getSourceRange(); 8150 } 8151 8152 /// SemaBuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions 8153 bool Sema::SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) { 8154 if (BuiltinID == AArch64::BI__builtin_arm_irg) { 8155 if (checkArgCount(*this, TheCall, 2)) 8156 return true; 8157 Expr *Arg0 = TheCall->getArg(0); 8158 Expr *Arg1 = TheCall->getArg(1); 8159 8160 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 8161 if (FirstArg.isInvalid()) 8162 return true; 8163 QualType FirstArgType = FirstArg.get()->getType(); 8164 if (!FirstArgType->isAnyPointerType()) 8165 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 8166 << "first" << FirstArgType << Arg0->getSourceRange(); 8167 TheCall->setArg(0, FirstArg.get()); 8168 8169 ExprResult SecArg = DefaultLvalueConversion(Arg1); 8170 if (SecArg.isInvalid()) 8171 return true; 8172 QualType SecArgType = SecArg.get()->getType(); 8173 if (!SecArgType->isIntegerType()) 8174 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 8175 << "second" << SecArgType << Arg1->getSourceRange(); 8176 8177 // Derive the return type from the pointer argument. 8178 TheCall->setType(FirstArgType); 8179 return false; 8180 } 8181 8182 if (BuiltinID == AArch64::BI__builtin_arm_addg) { 8183 if (checkArgCount(*this, TheCall, 2)) 8184 return true; 8185 8186 Expr *Arg0 = TheCall->getArg(0); 8187 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 8188 if (FirstArg.isInvalid()) 8189 return true; 8190 QualType FirstArgType = FirstArg.get()->getType(); 8191 if (!FirstArgType->isAnyPointerType()) 8192 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 8193 << "first" << FirstArgType << Arg0->getSourceRange(); 8194 TheCall->setArg(0, FirstArg.get()); 8195 8196 // Derive the return type from the pointer argument. 8197 TheCall->setType(FirstArgType); 8198 8199 // Second arg must be an constant in range [0,15] 8200 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 8201 } 8202 8203 if (BuiltinID == AArch64::BI__builtin_arm_gmi) { 8204 if (checkArgCount(*this, TheCall, 2)) 8205 return true; 8206 Expr *Arg0 = TheCall->getArg(0); 8207 Expr *Arg1 = TheCall->getArg(1); 8208 8209 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 8210 if (FirstArg.isInvalid()) 8211 return true; 8212 QualType FirstArgType = FirstArg.get()->getType(); 8213 if (!FirstArgType->isAnyPointerType()) 8214 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 8215 << "first" << FirstArgType << Arg0->getSourceRange(); 8216 8217 QualType SecArgType = Arg1->getType(); 8218 if (!SecArgType->isIntegerType()) 8219 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 8220 << "second" << SecArgType << Arg1->getSourceRange(); 8221 TheCall->setType(Context.IntTy); 8222 return false; 8223 } 8224 8225 if (BuiltinID == AArch64::BI__builtin_arm_ldg || 8226 BuiltinID == AArch64::BI__builtin_arm_stg) { 8227 if (checkArgCount(*this, TheCall, 1)) 8228 return true; 8229 Expr *Arg0 = TheCall->getArg(0); 8230 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 8231 if (FirstArg.isInvalid()) 8232 return true; 8233 8234 QualType FirstArgType = FirstArg.get()->getType(); 8235 if (!FirstArgType->isAnyPointerType()) 8236 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 8237 << "first" << FirstArgType << Arg0->getSourceRange(); 8238 TheCall->setArg(0, FirstArg.get()); 8239 8240 // Derive the return type from the pointer argument. 8241 if (BuiltinID == AArch64::BI__builtin_arm_ldg) 8242 TheCall->setType(FirstArgType); 8243 return false; 8244 } 8245 8246 if (BuiltinID == AArch64::BI__builtin_arm_subp) { 8247 Expr *ArgA = TheCall->getArg(0); 8248 Expr *ArgB = TheCall->getArg(1); 8249 8250 ExprResult ArgExprA = DefaultFunctionArrayLvalueConversion(ArgA); 8251 ExprResult ArgExprB = DefaultFunctionArrayLvalueConversion(ArgB); 8252 8253 if (ArgExprA.isInvalid() || ArgExprB.isInvalid()) 8254 return true; 8255 8256 QualType ArgTypeA = ArgExprA.get()->getType(); 8257 QualType ArgTypeB = ArgExprB.get()->getType(); 8258 8259 auto isNull = [&] (Expr *E) -> bool { 8260 return E->isNullPointerConstant( 8261 Context, Expr::NPC_ValueDependentIsNotNull); }; 8262 8263 // argument should be either a pointer or null 8264 if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA)) 8265 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 8266 << "first" << ArgTypeA << ArgA->getSourceRange(); 8267 8268 if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB)) 8269 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 8270 << "second" << ArgTypeB << ArgB->getSourceRange(); 8271 8272 // Ensure Pointee types are compatible 8273 if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) && 8274 ArgTypeB->isAnyPointerType() && !isNull(ArgB)) { 8275 QualType pointeeA = ArgTypeA->getPointeeType(); 8276 QualType pointeeB = ArgTypeB->getPointeeType(); 8277 if (!Context.typesAreCompatible( 8278 Context.getCanonicalType(pointeeA).getUnqualifiedType(), 8279 Context.getCanonicalType(pointeeB).getUnqualifiedType())) { 8280 return Diag(TheCall->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible) 8281 << ArgTypeA << ArgTypeB << ArgA->getSourceRange() 8282 << ArgB->getSourceRange(); 8283 } 8284 } 8285 8286 // at least one argument should be pointer type 8287 if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType()) 8288 return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer) 8289 << ArgTypeA << ArgTypeB << ArgA->getSourceRange(); 8290 8291 if (isNull(ArgA)) // adopt type of the other pointer 8292 ArgExprA = ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer); 8293 8294 if (isNull(ArgB)) 8295 ArgExprB = ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer); 8296 8297 TheCall->setArg(0, ArgExprA.get()); 8298 TheCall->setArg(1, ArgExprB.get()); 8299 TheCall->setType(Context.LongLongTy); 8300 return false; 8301 } 8302 assert(false && "Unhandled ARM MTE intrinsic"); 8303 return true; 8304 } 8305 8306 /// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr 8307 /// TheCall is an ARM/AArch64 special register string literal. 8308 bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, 8309 int ArgNum, unsigned ExpectedFieldNum, 8310 bool AllowName) { 8311 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 || 8312 BuiltinID == ARM::BI__builtin_arm_wsr64 || 8313 BuiltinID == ARM::BI__builtin_arm_rsr || 8314 BuiltinID == ARM::BI__builtin_arm_rsrp || 8315 BuiltinID == ARM::BI__builtin_arm_wsr || 8316 BuiltinID == ARM::BI__builtin_arm_wsrp; 8317 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 || 8318 BuiltinID == AArch64::BI__builtin_arm_wsr64 || 8319 BuiltinID == AArch64::BI__builtin_arm_rsr128 || 8320 BuiltinID == AArch64::BI__builtin_arm_wsr128 || 8321 BuiltinID == AArch64::BI__builtin_arm_rsr || 8322 BuiltinID == AArch64::BI__builtin_arm_rsrp || 8323 BuiltinID == AArch64::BI__builtin_arm_wsr || 8324 BuiltinID == AArch64::BI__builtin_arm_wsrp; 8325 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin."); 8326 8327 // We can't check the value of a dependent argument. 8328 Expr *Arg = TheCall->getArg(ArgNum); 8329 if (Arg->isTypeDependent() || Arg->isValueDependent()) 8330 return false; 8331 8332 // Check if the argument is a string literal. 8333 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 8334 return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 8335 << Arg->getSourceRange(); 8336 8337 // Check the type of special register given. 8338 StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 8339 SmallVector<StringRef, 6> Fields; 8340 Reg.split(Fields, ":"); 8341 8342 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1)) 8343 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 8344 << Arg->getSourceRange(); 8345 8346 // If the string is the name of a register then we cannot check that it is 8347 // valid here but if the string is of one the forms described in ACLE then we 8348 // can check that the supplied fields are integers and within the valid 8349 // ranges. 8350 if (Fields.size() > 1) { 8351 bool FiveFields = Fields.size() == 5; 8352 8353 bool ValidString = true; 8354 if (IsARMBuiltin) { 8355 ValidString &= Fields[0].startswith_insensitive("cp") || 8356 Fields[0].startswith_insensitive("p"); 8357 if (ValidString) 8358 Fields[0] = Fields[0].drop_front( 8359 Fields[0].startswith_insensitive("cp") ? 2 : 1); 8360 8361 ValidString &= Fields[2].startswith_insensitive("c"); 8362 if (ValidString) 8363 Fields[2] = Fields[2].drop_front(1); 8364 8365 if (FiveFields) { 8366 ValidString &= Fields[3].startswith_insensitive("c"); 8367 if (ValidString) 8368 Fields[3] = Fields[3].drop_front(1); 8369 } 8370 } 8371 8372 SmallVector<int, 5> Ranges; 8373 if (FiveFields) 8374 Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7}); 8375 else 8376 Ranges.append({15, 7, 15}); 8377 8378 for (unsigned i=0; i<Fields.size(); ++i) { 8379 int IntField; 8380 ValidString &= !Fields[i].getAsInteger(10, IntField); 8381 ValidString &= (IntField >= 0 && IntField <= Ranges[i]); 8382 } 8383 8384 if (!ValidString) 8385 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 8386 << Arg->getSourceRange(); 8387 } else if (IsAArch64Builtin && Fields.size() == 1) { 8388 // This code validates writes to PSTATE registers. 8389 8390 // Not a write. 8391 if (TheCall->getNumArgs() != 2) 8392 return false; 8393 8394 // The 128-bit system register accesses do not touch PSTATE. 8395 if (BuiltinID == AArch64::BI__builtin_arm_rsr128 || 8396 BuiltinID == AArch64::BI__builtin_arm_wsr128) 8397 return false; 8398 8399 // These are the named PSTATE accesses using "MSR (immediate)" instructions, 8400 // along with the upper limit on the immediates allowed. 8401 auto MaxLimit = llvm::StringSwitch<std::optional<unsigned>>(Reg) 8402 .CaseLower("spsel", 15) 8403 .CaseLower("daifclr", 15) 8404 .CaseLower("daifset", 15) 8405 .CaseLower("pan", 15) 8406 .CaseLower("uao", 15) 8407 .CaseLower("dit", 15) 8408 .CaseLower("ssbs", 15) 8409 .CaseLower("tco", 15) 8410 .CaseLower("allint", 1) 8411 .CaseLower("pm", 1) 8412 .Default(std::nullopt); 8413 8414 // If this is not a named PSTATE, just continue without validating, as this 8415 // will be lowered to an "MSR (register)" instruction directly 8416 if (!MaxLimit) 8417 return false; 8418 8419 // Here we only allow constants in the range for that pstate, as required by 8420 // the ACLE. 8421 // 8422 // While clang also accepts the names of system registers in its ACLE 8423 // intrinsics, we prevent this with the PSTATE names used in MSR (immediate) 8424 // as the value written via a register is different to the value used as an 8425 // immediate to have the same effect. e.g., for the instruction `msr tco, 8426 // x0`, it is bit 25 of register x0 that is written into PSTATE.TCO, but 8427 // with `msr tco, #imm`, it is bit 0 of xN that is written into PSTATE.TCO. 8428 // 8429 // If a programmer wants to codegen the MSR (register) form of `msr tco, 8430 // xN`, they can still do so by specifying the register using five 8431 // colon-separated numbers in a string. 8432 return SemaBuiltinConstantArgRange(TheCall, 1, 0, *MaxLimit); 8433 } 8434 8435 return false; 8436 } 8437 8438 /// SemaBuiltinPPCMMACall - Check the call to a PPC MMA builtin for validity. 8439 /// Emit an error and return true on failure; return false on success. 8440 /// TypeStr is a string containing the type descriptor of the value returned by 8441 /// the builtin and the descriptors of the expected type of the arguments. 8442 bool Sema::SemaBuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID, 8443 const char *TypeStr) { 8444 8445 assert((TypeStr[0] != '\0') && 8446 "Invalid types in PPC MMA builtin declaration"); 8447 8448 switch (BuiltinID) { 8449 default: 8450 // This function is called in CheckPPCBuiltinFunctionCall where the 8451 // BuiltinID is guaranteed to be an MMA or pair vector memop builtin, here 8452 // we are isolating the pair vector memop builtins that can be used with mma 8453 // off so the default case is every builtin that requires mma and paired 8454 // vector memops. 8455 if (SemaFeatureCheck(*this, TheCall, "paired-vector-memops", 8456 diag::err_ppc_builtin_only_on_arch, "10") || 8457 SemaFeatureCheck(*this, TheCall, "mma", 8458 diag::err_ppc_builtin_only_on_arch, "10")) 8459 return true; 8460 break; 8461 case PPC::BI__builtin_vsx_lxvp: 8462 case PPC::BI__builtin_vsx_stxvp: 8463 case PPC::BI__builtin_vsx_assemble_pair: 8464 case PPC::BI__builtin_vsx_disassemble_pair: 8465 if (SemaFeatureCheck(*this, TheCall, "paired-vector-memops", 8466 diag::err_ppc_builtin_only_on_arch, "10")) 8467 return true; 8468 break; 8469 } 8470 8471 unsigned Mask = 0; 8472 unsigned ArgNum = 0; 8473 8474 // The first type in TypeStr is the type of the value returned by the 8475 // builtin. So we first read that type and change the type of TheCall. 8476 QualType type = DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 8477 TheCall->setType(type); 8478 8479 while (*TypeStr != '\0') { 8480 Mask = 0; 8481 QualType ExpectedType = DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 8482 if (ArgNum >= TheCall->getNumArgs()) { 8483 ArgNum++; 8484 break; 8485 } 8486 8487 Expr *Arg = TheCall->getArg(ArgNum); 8488 QualType PassedType = Arg->getType(); 8489 QualType StrippedRVType = PassedType.getCanonicalType(); 8490 8491 // Strip Restrict/Volatile qualifiers. 8492 if (StrippedRVType.isRestrictQualified() || 8493 StrippedRVType.isVolatileQualified()) 8494 StrippedRVType = StrippedRVType.getCanonicalType().getUnqualifiedType(); 8495 8496 // The only case where the argument type and expected type are allowed to 8497 // mismatch is if the argument type is a non-void pointer (or array) and 8498 // expected type is a void pointer. 8499 if (StrippedRVType != ExpectedType) 8500 if (!(ExpectedType->isVoidPointerType() && 8501 (StrippedRVType->isPointerType() || StrippedRVType->isArrayType()))) 8502 return Diag(Arg->getBeginLoc(), 8503 diag::err_typecheck_convert_incompatible) 8504 << PassedType << ExpectedType << 1 << 0 << 0; 8505 8506 // If the value of the Mask is not 0, we have a constraint in the size of 8507 // the integer argument so here we ensure the argument is a constant that 8508 // is in the valid range. 8509 if (Mask != 0 && 8510 SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, Mask, true)) 8511 return true; 8512 8513 ArgNum++; 8514 } 8515 8516 // In case we exited early from the previous loop, there are other types to 8517 // read from TypeStr. So we need to read them all to ensure we have the right 8518 // number of arguments in TheCall and if it is not the case, to display a 8519 // better error message. 8520 while (*TypeStr != '\0') { 8521 (void) DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 8522 ArgNum++; 8523 } 8524 if (checkArgCount(*this, TheCall, ArgNum)) 8525 return true; 8526 8527 return false; 8528 } 8529 8530 /// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val). 8531 /// This checks that the target supports __builtin_longjmp and 8532 /// that val is a constant 1. 8533 bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) { 8534 if (!Context.getTargetInfo().hasSjLjLowering()) 8535 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_unsupported) 8536 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 8537 8538 Expr *Arg = TheCall->getArg(1); 8539 llvm::APSInt Result; 8540 8541 // TODO: This is less than ideal. Overload this to take a value. 8542 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 8543 return true; 8544 8545 if (Result != 1) 8546 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_invalid_val) 8547 << SourceRange(Arg->getBeginLoc(), Arg->getEndLoc()); 8548 8549 return false; 8550 } 8551 8552 /// SemaBuiltinSetjmp - Handle __builtin_setjmp(void *env[5]). 8553 /// This checks that the target supports __builtin_setjmp. 8554 bool Sema::SemaBuiltinSetjmp(CallExpr *TheCall) { 8555 if (!Context.getTargetInfo().hasSjLjLowering()) 8556 return Diag(TheCall->getBeginLoc(), diag::err_builtin_setjmp_unsupported) 8557 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 8558 return false; 8559 } 8560 8561 namespace { 8562 8563 class UncoveredArgHandler { 8564 enum { Unknown = -1, AllCovered = -2 }; 8565 8566 signed FirstUncoveredArg = Unknown; 8567 SmallVector<const Expr *, 4> DiagnosticExprs; 8568 8569 public: 8570 UncoveredArgHandler() = default; 8571 8572 bool hasUncoveredArg() const { 8573 return (FirstUncoveredArg >= 0); 8574 } 8575 8576 unsigned getUncoveredArg() const { 8577 assert(hasUncoveredArg() && "no uncovered argument"); 8578 return FirstUncoveredArg; 8579 } 8580 8581 void setAllCovered() { 8582 // A string has been found with all arguments covered, so clear out 8583 // the diagnostics. 8584 DiagnosticExprs.clear(); 8585 FirstUncoveredArg = AllCovered; 8586 } 8587 8588 void Update(signed NewFirstUncoveredArg, const Expr *StrExpr) { 8589 assert(NewFirstUncoveredArg >= 0 && "Outside range"); 8590 8591 // Don't update if a previous string covers all arguments. 8592 if (FirstUncoveredArg == AllCovered) 8593 return; 8594 8595 // UncoveredArgHandler tracks the highest uncovered argument index 8596 // and with it all the strings that match this index. 8597 if (NewFirstUncoveredArg == FirstUncoveredArg) 8598 DiagnosticExprs.push_back(StrExpr); 8599 else if (NewFirstUncoveredArg > FirstUncoveredArg) { 8600 DiagnosticExprs.clear(); 8601 DiagnosticExprs.push_back(StrExpr); 8602 FirstUncoveredArg = NewFirstUncoveredArg; 8603 } 8604 } 8605 8606 void Diagnose(Sema &S, bool IsFunctionCall, const Expr *ArgExpr); 8607 }; 8608 8609 enum StringLiteralCheckType { 8610 SLCT_NotALiteral, 8611 SLCT_UncheckedLiteral, 8612 SLCT_CheckedLiteral 8613 }; 8614 8615 } // namespace 8616 8617 static void sumOffsets(llvm::APSInt &Offset, llvm::APSInt Addend, 8618 BinaryOperatorKind BinOpKind, 8619 bool AddendIsRight) { 8620 unsigned BitWidth = Offset.getBitWidth(); 8621 unsigned AddendBitWidth = Addend.getBitWidth(); 8622 // There might be negative interim results. 8623 if (Addend.isUnsigned()) { 8624 Addend = Addend.zext(++AddendBitWidth); 8625 Addend.setIsSigned(true); 8626 } 8627 // Adjust the bit width of the APSInts. 8628 if (AddendBitWidth > BitWidth) { 8629 Offset = Offset.sext(AddendBitWidth); 8630 BitWidth = AddendBitWidth; 8631 } else if (BitWidth > AddendBitWidth) { 8632 Addend = Addend.sext(BitWidth); 8633 } 8634 8635 bool Ov = false; 8636 llvm::APSInt ResOffset = Offset; 8637 if (BinOpKind == BO_Add) 8638 ResOffset = Offset.sadd_ov(Addend, Ov); 8639 else { 8640 assert(AddendIsRight && BinOpKind == BO_Sub && 8641 "operator must be add or sub with addend on the right"); 8642 ResOffset = Offset.ssub_ov(Addend, Ov); 8643 } 8644 8645 // We add an offset to a pointer here so we should support an offset as big as 8646 // possible. 8647 if (Ov) { 8648 assert(BitWidth <= std::numeric_limits<unsigned>::max() / 2 && 8649 "index (intermediate) result too big"); 8650 Offset = Offset.sext(2 * BitWidth); 8651 sumOffsets(Offset, Addend, BinOpKind, AddendIsRight); 8652 return; 8653 } 8654 8655 Offset = ResOffset; 8656 } 8657 8658 namespace { 8659 8660 // This is a wrapper class around StringLiteral to support offsetted string 8661 // literals as format strings. It takes the offset into account when returning 8662 // the string and its length or the source locations to display notes correctly. 8663 class FormatStringLiteral { 8664 const StringLiteral *FExpr; 8665 int64_t Offset; 8666 8667 public: 8668 FormatStringLiteral(const StringLiteral *fexpr, int64_t Offset = 0) 8669 : FExpr(fexpr), Offset(Offset) {} 8670 8671 StringRef getString() const { 8672 return FExpr->getString().drop_front(Offset); 8673 } 8674 8675 unsigned getByteLength() const { 8676 return FExpr->getByteLength() - getCharByteWidth() * Offset; 8677 } 8678 8679 unsigned getLength() const { return FExpr->getLength() - Offset; } 8680 unsigned getCharByteWidth() const { return FExpr->getCharByteWidth(); } 8681 8682 StringLiteral::StringKind getKind() const { return FExpr->getKind(); } 8683 8684 QualType getType() const { return FExpr->getType(); } 8685 8686 bool isAscii() const { return FExpr->isOrdinary(); } 8687 bool isWide() const { return FExpr->isWide(); } 8688 bool isUTF8() const { return FExpr->isUTF8(); } 8689 bool isUTF16() const { return FExpr->isUTF16(); } 8690 bool isUTF32() const { return FExpr->isUTF32(); } 8691 bool isPascal() const { return FExpr->isPascal(); } 8692 8693 SourceLocation getLocationOfByte( 8694 unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, 8695 const TargetInfo &Target, unsigned *StartToken = nullptr, 8696 unsigned *StartTokenByteOffset = nullptr) const { 8697 return FExpr->getLocationOfByte(ByteNo + Offset, SM, Features, Target, 8698 StartToken, StartTokenByteOffset); 8699 } 8700 8701 SourceLocation getBeginLoc() const LLVM_READONLY { 8702 return FExpr->getBeginLoc().getLocWithOffset(Offset); 8703 } 8704 8705 SourceLocation getEndLoc() const LLVM_READONLY { return FExpr->getEndLoc(); } 8706 }; 8707 8708 } // namespace 8709 8710 static void CheckFormatString( 8711 Sema &S, const FormatStringLiteral *FExpr, const Expr *OrigFormatExpr, 8712 ArrayRef<const Expr *> Args, Sema::FormatArgumentPassingKind APK, 8713 unsigned format_idx, unsigned firstDataArg, Sema::FormatStringType Type, 8714 bool inFunctionCall, Sema::VariadicCallType CallType, 8715 llvm::SmallBitVector &CheckedVarArgs, UncoveredArgHandler &UncoveredArg, 8716 bool IgnoreStringsWithoutSpecifiers); 8717 8718 static const Expr *maybeConstEvalStringLiteral(ASTContext &Context, 8719 const Expr *E); 8720 8721 // Determine if an expression is a string literal or constant string. 8722 // If this function returns false on the arguments to a function expecting a 8723 // format string, we will usually need to emit a warning. 8724 // True string literals are then checked by CheckFormatString. 8725 static StringLiteralCheckType 8726 checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args, 8727 Sema::FormatArgumentPassingKind APK, unsigned format_idx, 8728 unsigned firstDataArg, Sema::FormatStringType Type, 8729 Sema::VariadicCallType CallType, bool InFunctionCall, 8730 llvm::SmallBitVector &CheckedVarArgs, 8731 UncoveredArgHandler &UncoveredArg, llvm::APSInt Offset, 8732 bool IgnoreStringsWithoutSpecifiers = false) { 8733 if (S.isConstantEvaluated()) 8734 return SLCT_NotALiteral; 8735 tryAgain: 8736 assert(Offset.isSigned() && "invalid offset"); 8737 8738 if (E->isTypeDependent() || E->isValueDependent()) 8739 return SLCT_NotALiteral; 8740 8741 E = E->IgnoreParenCasts(); 8742 8743 if (E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull)) 8744 // Technically -Wformat-nonliteral does not warn about this case. 8745 // The behavior of printf and friends in this case is implementation 8746 // dependent. Ideally if the format string cannot be null then 8747 // it should have a 'nonnull' attribute in the function prototype. 8748 return SLCT_UncheckedLiteral; 8749 8750 switch (E->getStmtClass()) { 8751 case Stmt::InitListExprClass: 8752 // Handle expressions like {"foobar"}. 8753 if (const clang::Expr *SLE = maybeConstEvalStringLiteral(S.Context, E)) { 8754 return checkFormatStringExpr(S, SLE, Args, APK, format_idx, firstDataArg, 8755 Type, CallType, /*InFunctionCall*/ false, 8756 CheckedVarArgs, UncoveredArg, Offset, 8757 IgnoreStringsWithoutSpecifiers); 8758 } 8759 return SLCT_NotALiteral; 8760 case Stmt::BinaryConditionalOperatorClass: 8761 case Stmt::ConditionalOperatorClass: { 8762 // The expression is a literal if both sub-expressions were, and it was 8763 // completely checked only if both sub-expressions were checked. 8764 const AbstractConditionalOperator *C = 8765 cast<AbstractConditionalOperator>(E); 8766 8767 // Determine whether it is necessary to check both sub-expressions, for 8768 // example, because the condition expression is a constant that can be 8769 // evaluated at compile time. 8770 bool CheckLeft = true, CheckRight = true; 8771 8772 bool Cond; 8773 if (C->getCond()->EvaluateAsBooleanCondition(Cond, S.getASTContext(), 8774 S.isConstantEvaluated())) { 8775 if (Cond) 8776 CheckRight = false; 8777 else 8778 CheckLeft = false; 8779 } 8780 8781 // We need to maintain the offsets for the right and the left hand side 8782 // separately to check if every possible indexed expression is a valid 8783 // string literal. They might have different offsets for different string 8784 // literals in the end. 8785 StringLiteralCheckType Left; 8786 if (!CheckLeft) 8787 Left = SLCT_UncheckedLiteral; 8788 else { 8789 Left = checkFormatStringExpr(S, C->getTrueExpr(), Args, APK, format_idx, 8790 firstDataArg, Type, CallType, InFunctionCall, 8791 CheckedVarArgs, UncoveredArg, Offset, 8792 IgnoreStringsWithoutSpecifiers); 8793 if (Left == SLCT_NotALiteral || !CheckRight) { 8794 return Left; 8795 } 8796 } 8797 8798 StringLiteralCheckType Right = checkFormatStringExpr( 8799 S, C->getFalseExpr(), Args, APK, format_idx, firstDataArg, Type, 8800 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 8801 IgnoreStringsWithoutSpecifiers); 8802 8803 return (CheckLeft && Left < Right) ? Left : Right; 8804 } 8805 8806 case Stmt::ImplicitCastExprClass: 8807 E = cast<ImplicitCastExpr>(E)->getSubExpr(); 8808 goto tryAgain; 8809 8810 case Stmt::OpaqueValueExprClass: 8811 if (const Expr *src = cast<OpaqueValueExpr>(E)->getSourceExpr()) { 8812 E = src; 8813 goto tryAgain; 8814 } 8815 return SLCT_NotALiteral; 8816 8817 case Stmt::PredefinedExprClass: 8818 // While __func__, etc., are technically not string literals, they 8819 // cannot contain format specifiers and thus are not a security 8820 // liability. 8821 return SLCT_UncheckedLiteral; 8822 8823 case Stmt::DeclRefExprClass: { 8824 const DeclRefExpr *DR = cast<DeclRefExpr>(E); 8825 8826 // As an exception, do not flag errors for variables binding to 8827 // const string literals. 8828 if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) { 8829 bool isConstant = false; 8830 QualType T = DR->getType(); 8831 8832 if (const ArrayType *AT = S.Context.getAsArrayType(T)) { 8833 isConstant = AT->getElementType().isConstant(S.Context); 8834 } else if (const PointerType *PT = T->getAs<PointerType>()) { 8835 isConstant = T.isConstant(S.Context) && 8836 PT->getPointeeType().isConstant(S.Context); 8837 } else if (T->isObjCObjectPointerType()) { 8838 // In ObjC, there is usually no "const ObjectPointer" type, 8839 // so don't check if the pointee type is constant. 8840 isConstant = T.isConstant(S.Context); 8841 } 8842 8843 if (isConstant) { 8844 if (const Expr *Init = VD->getAnyInitializer()) { 8845 // Look through initializers like const char c[] = { "foo" } 8846 if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) { 8847 if (InitList->isStringLiteralInit()) 8848 Init = InitList->getInit(0)->IgnoreParenImpCasts(); 8849 } 8850 return checkFormatStringExpr( 8851 S, Init, Args, APK, format_idx, firstDataArg, Type, CallType, 8852 /*InFunctionCall*/ false, CheckedVarArgs, UncoveredArg, Offset); 8853 } 8854 } 8855 8856 // When the format argument is an argument of this function, and this 8857 // function also has the format attribute, there are several interactions 8858 // for which there shouldn't be a warning. For instance, when calling 8859 // v*printf from a function that has the printf format attribute, we 8860 // should not emit a warning about using `fmt`, even though it's not 8861 // constant, because the arguments have already been checked for the 8862 // caller of `logmessage`: 8863 // 8864 // __attribute__((format(printf, 1, 2))) 8865 // void logmessage(char const *fmt, ...) { 8866 // va_list ap; 8867 // va_start(ap, fmt); 8868 // vprintf(fmt, ap); /* do not emit a warning about "fmt" */ 8869 // ... 8870 // } 8871 // 8872 // Another interaction that we need to support is calling a variadic 8873 // format function from a format function that has fixed arguments. For 8874 // instance: 8875 // 8876 // __attribute__((format(printf, 1, 2))) 8877 // void logstring(char const *fmt, char const *str) { 8878 // printf(fmt, str); /* do not emit a warning about "fmt" */ 8879 // } 8880 // 8881 // Same (and perhaps more relatably) for the variadic template case: 8882 // 8883 // template<typename... Args> 8884 // __attribute__((format(printf, 1, 2))) 8885 // void log(const char *fmt, Args&&... args) { 8886 // printf(fmt, forward<Args>(args)...); 8887 // /* do not emit a warning about "fmt" */ 8888 // } 8889 // 8890 // Due to implementation difficulty, we only check the format, not the 8891 // format arguments, in all cases. 8892 // 8893 if (const auto *PV = dyn_cast<ParmVarDecl>(VD)) { 8894 if (const auto *D = dyn_cast<Decl>(PV->getDeclContext())) { 8895 for (const auto *PVFormat : D->specific_attrs<FormatAttr>()) { 8896 bool IsCXXMember = false; 8897 if (const auto *MD = dyn_cast<CXXMethodDecl>(D)) 8898 IsCXXMember = MD->isInstance(); 8899 8900 bool IsVariadic = false; 8901 if (const FunctionType *FnTy = D->getFunctionType()) 8902 IsVariadic = cast<FunctionProtoType>(FnTy)->isVariadic(); 8903 else if (const auto *BD = dyn_cast<BlockDecl>(D)) 8904 IsVariadic = BD->isVariadic(); 8905 else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(D)) 8906 IsVariadic = OMD->isVariadic(); 8907 8908 Sema::FormatStringInfo CallerFSI; 8909 if (Sema::getFormatStringInfo(PVFormat, IsCXXMember, IsVariadic, 8910 &CallerFSI)) { 8911 // We also check if the formats are compatible. 8912 // We can't pass a 'scanf' string to a 'printf' function. 8913 if (PV->getFunctionScopeIndex() == CallerFSI.FormatIdx && 8914 Type == S.GetFormatStringType(PVFormat)) { 8915 // Lastly, check that argument passing kinds transition in a 8916 // way that makes sense: 8917 // from a caller with FAPK_VAList, allow FAPK_VAList 8918 // from a caller with FAPK_Fixed, allow FAPK_Fixed 8919 // from a caller with FAPK_Fixed, allow FAPK_Variadic 8920 // from a caller with FAPK_Variadic, allow FAPK_VAList 8921 switch (combineFAPK(CallerFSI.ArgPassingKind, APK)) { 8922 case combineFAPK(Sema::FAPK_VAList, Sema::FAPK_VAList): 8923 case combineFAPK(Sema::FAPK_Fixed, Sema::FAPK_Fixed): 8924 case combineFAPK(Sema::FAPK_Fixed, Sema::FAPK_Variadic): 8925 case combineFAPK(Sema::FAPK_Variadic, Sema::FAPK_VAList): 8926 return SLCT_UncheckedLiteral; 8927 } 8928 } 8929 } 8930 } 8931 } 8932 } 8933 } 8934 8935 return SLCT_NotALiteral; 8936 } 8937 8938 case Stmt::CallExprClass: 8939 case Stmt::CXXMemberCallExprClass: { 8940 const CallExpr *CE = cast<CallExpr>(E); 8941 if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) { 8942 bool IsFirst = true; 8943 StringLiteralCheckType CommonResult; 8944 for (const auto *FA : ND->specific_attrs<FormatArgAttr>()) { 8945 const Expr *Arg = CE->getArg(FA->getFormatIdx().getASTIndex()); 8946 StringLiteralCheckType Result = checkFormatStringExpr( 8947 S, Arg, Args, APK, format_idx, firstDataArg, Type, CallType, 8948 InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 8949 IgnoreStringsWithoutSpecifiers); 8950 if (IsFirst) { 8951 CommonResult = Result; 8952 IsFirst = false; 8953 } 8954 } 8955 if (!IsFirst) 8956 return CommonResult; 8957 8958 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) { 8959 unsigned BuiltinID = FD->getBuiltinID(); 8960 if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString || 8961 BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) { 8962 const Expr *Arg = CE->getArg(0); 8963 return checkFormatStringExpr( 8964 S, Arg, Args, APK, format_idx, firstDataArg, Type, CallType, 8965 InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 8966 IgnoreStringsWithoutSpecifiers); 8967 } 8968 } 8969 } 8970 if (const Expr *SLE = maybeConstEvalStringLiteral(S.Context, E)) 8971 return checkFormatStringExpr(S, SLE, Args, APK, format_idx, firstDataArg, 8972 Type, CallType, /*InFunctionCall*/ false, 8973 CheckedVarArgs, UncoveredArg, Offset, 8974 IgnoreStringsWithoutSpecifiers); 8975 return SLCT_NotALiteral; 8976 } 8977 case Stmt::ObjCMessageExprClass: { 8978 const auto *ME = cast<ObjCMessageExpr>(E); 8979 if (const auto *MD = ME->getMethodDecl()) { 8980 if (const auto *FA = MD->getAttr<FormatArgAttr>()) { 8981 // As a special case heuristic, if we're using the method -[NSBundle 8982 // localizedStringForKey:value:table:], ignore any key strings that lack 8983 // format specifiers. The idea is that if the key doesn't have any 8984 // format specifiers then its probably just a key to map to the 8985 // localized strings. If it does have format specifiers though, then its 8986 // likely that the text of the key is the format string in the 8987 // programmer's language, and should be checked. 8988 const ObjCInterfaceDecl *IFace; 8989 if (MD->isInstanceMethod() && (IFace = MD->getClassInterface()) && 8990 IFace->getIdentifier()->isStr("NSBundle") && 8991 MD->getSelector().isKeywordSelector( 8992 {"localizedStringForKey", "value", "table"})) { 8993 IgnoreStringsWithoutSpecifiers = true; 8994 } 8995 8996 const Expr *Arg = ME->getArg(FA->getFormatIdx().getASTIndex()); 8997 return checkFormatStringExpr( 8998 S, Arg, Args, APK, format_idx, firstDataArg, Type, CallType, 8999 InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 9000 IgnoreStringsWithoutSpecifiers); 9001 } 9002 } 9003 9004 return SLCT_NotALiteral; 9005 } 9006 case Stmt::ObjCStringLiteralClass: 9007 case Stmt::StringLiteralClass: { 9008 const StringLiteral *StrE = nullptr; 9009 9010 if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E)) 9011 StrE = ObjCFExpr->getString(); 9012 else 9013 StrE = cast<StringLiteral>(E); 9014 9015 if (StrE) { 9016 if (Offset.isNegative() || Offset > StrE->getLength()) { 9017 // TODO: It would be better to have an explicit warning for out of 9018 // bounds literals. 9019 return SLCT_NotALiteral; 9020 } 9021 FormatStringLiteral FStr(StrE, Offset.sextOrTrunc(64).getSExtValue()); 9022 CheckFormatString(S, &FStr, E, Args, APK, format_idx, firstDataArg, Type, 9023 InFunctionCall, CallType, CheckedVarArgs, UncoveredArg, 9024 IgnoreStringsWithoutSpecifiers); 9025 return SLCT_CheckedLiteral; 9026 } 9027 9028 return SLCT_NotALiteral; 9029 } 9030 case Stmt::BinaryOperatorClass: { 9031 const BinaryOperator *BinOp = cast<BinaryOperator>(E); 9032 9033 // A string literal + an int offset is still a string literal. 9034 if (BinOp->isAdditiveOp()) { 9035 Expr::EvalResult LResult, RResult; 9036 9037 bool LIsInt = BinOp->getLHS()->EvaluateAsInt( 9038 LResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 9039 bool RIsInt = BinOp->getRHS()->EvaluateAsInt( 9040 RResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 9041 9042 if (LIsInt != RIsInt) { 9043 BinaryOperatorKind BinOpKind = BinOp->getOpcode(); 9044 9045 if (LIsInt) { 9046 if (BinOpKind == BO_Add) { 9047 sumOffsets(Offset, LResult.Val.getInt(), BinOpKind, RIsInt); 9048 E = BinOp->getRHS(); 9049 goto tryAgain; 9050 } 9051 } else { 9052 sumOffsets(Offset, RResult.Val.getInt(), BinOpKind, RIsInt); 9053 E = BinOp->getLHS(); 9054 goto tryAgain; 9055 } 9056 } 9057 } 9058 9059 return SLCT_NotALiteral; 9060 } 9061 case Stmt::UnaryOperatorClass: { 9062 const UnaryOperator *UnaOp = cast<UnaryOperator>(E); 9063 auto ASE = dyn_cast<ArraySubscriptExpr>(UnaOp->getSubExpr()); 9064 if (UnaOp->getOpcode() == UO_AddrOf && ASE) { 9065 Expr::EvalResult IndexResult; 9066 if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context, 9067 Expr::SE_NoSideEffects, 9068 S.isConstantEvaluated())) { 9069 sumOffsets(Offset, IndexResult.Val.getInt(), BO_Add, 9070 /*RHS is int*/ true); 9071 E = ASE->getBase(); 9072 goto tryAgain; 9073 } 9074 } 9075 9076 return SLCT_NotALiteral; 9077 } 9078 9079 default: 9080 return SLCT_NotALiteral; 9081 } 9082 } 9083 9084 // If this expression can be evaluated at compile-time, 9085 // check if the result is a StringLiteral and return it 9086 // otherwise return nullptr 9087 static const Expr *maybeConstEvalStringLiteral(ASTContext &Context, 9088 const Expr *E) { 9089 Expr::EvalResult Result; 9090 if (E->EvaluateAsRValue(Result, Context) && Result.Val.isLValue()) { 9091 const auto *LVE = Result.Val.getLValueBase().dyn_cast<const Expr *>(); 9092 if (isa_and_nonnull<StringLiteral>(LVE)) 9093 return LVE; 9094 } 9095 return nullptr; 9096 } 9097 9098 Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) { 9099 return llvm::StringSwitch<FormatStringType>(Format->getType()->getName()) 9100 .Case("scanf", FST_Scanf) 9101 .Cases("printf", "printf0", FST_Printf) 9102 .Cases("NSString", "CFString", FST_NSString) 9103 .Case("strftime", FST_Strftime) 9104 .Case("strfmon", FST_Strfmon) 9105 .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf) 9106 .Case("freebsd_kprintf", FST_FreeBSDKPrintf) 9107 .Case("os_trace", FST_OSLog) 9108 .Case("os_log", FST_OSLog) 9109 .Default(FST_Unknown); 9110 } 9111 9112 /// CheckFormatArguments - Check calls to printf and scanf (and similar 9113 /// functions) for correct use of format strings. 9114 /// Returns true if a format string has been fully checked. 9115 bool Sema::CheckFormatArguments(const FormatAttr *Format, 9116 ArrayRef<const Expr *> Args, bool IsCXXMember, 9117 VariadicCallType CallType, SourceLocation Loc, 9118 SourceRange Range, 9119 llvm::SmallBitVector &CheckedVarArgs) { 9120 FormatStringInfo FSI; 9121 if (getFormatStringInfo(Format, IsCXXMember, CallType != VariadicDoesNotApply, 9122 &FSI)) 9123 return CheckFormatArguments(Args, FSI.ArgPassingKind, FSI.FormatIdx, 9124 FSI.FirstDataArg, GetFormatStringType(Format), 9125 CallType, Loc, Range, CheckedVarArgs); 9126 return false; 9127 } 9128 9129 bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args, 9130 Sema::FormatArgumentPassingKind APK, 9131 unsigned format_idx, unsigned firstDataArg, 9132 FormatStringType Type, 9133 VariadicCallType CallType, SourceLocation Loc, 9134 SourceRange Range, 9135 llvm::SmallBitVector &CheckedVarArgs) { 9136 // CHECK: printf/scanf-like function is called with no format string. 9137 if (format_idx >= Args.size()) { 9138 Diag(Loc, diag::warn_missing_format_string) << Range; 9139 return false; 9140 } 9141 9142 const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts(); 9143 9144 // CHECK: format string is not a string literal. 9145 // 9146 // Dynamically generated format strings are difficult to 9147 // automatically vet at compile time. Requiring that format strings 9148 // are string literals: (1) permits the checking of format strings by 9149 // the compiler and thereby (2) can practically remove the source of 9150 // many format string exploits. 9151 9152 // Format string can be either ObjC string (e.g. @"%d") or 9153 // C string (e.g. "%d") 9154 // ObjC string uses the same format specifiers as C string, so we can use 9155 // the same format string checking logic for both ObjC and C strings. 9156 UncoveredArgHandler UncoveredArg; 9157 StringLiteralCheckType CT = checkFormatStringExpr( 9158 *this, OrigFormatExpr, Args, APK, format_idx, firstDataArg, Type, 9159 CallType, 9160 /*IsFunctionCall*/ true, CheckedVarArgs, UncoveredArg, 9161 /*no string offset*/ llvm::APSInt(64, false) = 0); 9162 9163 // Generate a diagnostic where an uncovered argument is detected. 9164 if (UncoveredArg.hasUncoveredArg()) { 9165 unsigned ArgIdx = UncoveredArg.getUncoveredArg() + firstDataArg; 9166 assert(ArgIdx < Args.size() && "ArgIdx outside bounds"); 9167 UncoveredArg.Diagnose(*this, /*IsFunctionCall*/true, Args[ArgIdx]); 9168 } 9169 9170 if (CT != SLCT_NotALiteral) 9171 // Literal format string found, check done! 9172 return CT == SLCT_CheckedLiteral; 9173 9174 // Strftime is particular as it always uses a single 'time' argument, 9175 // so it is safe to pass a non-literal string. 9176 if (Type == FST_Strftime) 9177 return false; 9178 9179 // Do not emit diag when the string param is a macro expansion and the 9180 // format is either NSString or CFString. This is a hack to prevent 9181 // diag when using the NSLocalizedString and CFCopyLocalizedString macros 9182 // which are usually used in place of NS and CF string literals. 9183 SourceLocation FormatLoc = Args[format_idx]->getBeginLoc(); 9184 if (Type == FST_NSString && SourceMgr.isInSystemMacro(FormatLoc)) 9185 return false; 9186 9187 // If there are no arguments specified, warn with -Wformat-security, otherwise 9188 // warn only with -Wformat-nonliteral. 9189 if (Args.size() == firstDataArg) { 9190 Diag(FormatLoc, diag::warn_format_nonliteral_noargs) 9191 << OrigFormatExpr->getSourceRange(); 9192 switch (Type) { 9193 default: 9194 break; 9195 case FST_Kprintf: 9196 case FST_FreeBSDKPrintf: 9197 case FST_Printf: 9198 Diag(FormatLoc, diag::note_format_security_fixit) 9199 << FixItHint::CreateInsertion(FormatLoc, "\"%s\", "); 9200 break; 9201 case FST_NSString: 9202 Diag(FormatLoc, diag::note_format_security_fixit) 9203 << FixItHint::CreateInsertion(FormatLoc, "@\"%@\", "); 9204 break; 9205 } 9206 } else { 9207 Diag(FormatLoc, diag::warn_format_nonliteral) 9208 << OrigFormatExpr->getSourceRange(); 9209 } 9210 return false; 9211 } 9212 9213 namespace { 9214 9215 class CheckFormatHandler : public analyze_format_string::FormatStringHandler { 9216 protected: 9217 Sema &S; 9218 const FormatStringLiteral *FExpr; 9219 const Expr *OrigFormatExpr; 9220 const Sema::FormatStringType FSType; 9221 const unsigned FirstDataArg; 9222 const unsigned NumDataArgs; 9223 const char *Beg; // Start of format string. 9224 const Sema::FormatArgumentPassingKind ArgPassingKind; 9225 ArrayRef<const Expr *> Args; 9226 unsigned FormatIdx; 9227 llvm::SmallBitVector CoveredArgs; 9228 bool usesPositionalArgs = false; 9229 bool atFirstArg = true; 9230 bool inFunctionCall; 9231 Sema::VariadicCallType CallType; 9232 llvm::SmallBitVector &CheckedVarArgs; 9233 UncoveredArgHandler &UncoveredArg; 9234 9235 public: 9236 CheckFormatHandler(Sema &s, const FormatStringLiteral *fexpr, 9237 const Expr *origFormatExpr, 9238 const Sema::FormatStringType type, unsigned firstDataArg, 9239 unsigned numDataArgs, const char *beg, 9240 Sema::FormatArgumentPassingKind APK, 9241 ArrayRef<const Expr *> Args, unsigned formatIdx, 9242 bool inFunctionCall, Sema::VariadicCallType callType, 9243 llvm::SmallBitVector &CheckedVarArgs, 9244 UncoveredArgHandler &UncoveredArg) 9245 : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), FSType(type), 9246 FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), Beg(beg), 9247 ArgPassingKind(APK), Args(Args), FormatIdx(formatIdx), 9248 inFunctionCall(inFunctionCall), CallType(callType), 9249 CheckedVarArgs(CheckedVarArgs), UncoveredArg(UncoveredArg) { 9250 CoveredArgs.resize(numDataArgs); 9251 CoveredArgs.reset(); 9252 } 9253 9254 void DoneProcessing(); 9255 9256 void HandleIncompleteSpecifier(const char *startSpecifier, 9257 unsigned specifierLen) override; 9258 9259 void HandleInvalidLengthModifier( 9260 const analyze_format_string::FormatSpecifier &FS, 9261 const analyze_format_string::ConversionSpecifier &CS, 9262 const char *startSpecifier, unsigned specifierLen, 9263 unsigned DiagID); 9264 9265 void HandleNonStandardLengthModifier( 9266 const analyze_format_string::FormatSpecifier &FS, 9267 const char *startSpecifier, unsigned specifierLen); 9268 9269 void HandleNonStandardConversionSpecifier( 9270 const analyze_format_string::ConversionSpecifier &CS, 9271 const char *startSpecifier, unsigned specifierLen); 9272 9273 void HandlePosition(const char *startPos, unsigned posLen) override; 9274 9275 void HandleInvalidPosition(const char *startSpecifier, 9276 unsigned specifierLen, 9277 analyze_format_string::PositionContext p) override; 9278 9279 void HandleZeroPosition(const char *startPos, unsigned posLen) override; 9280 9281 void HandleNullChar(const char *nullCharacter) override; 9282 9283 template <typename Range> 9284 static void 9285 EmitFormatDiagnostic(Sema &S, bool inFunctionCall, const Expr *ArgumentExpr, 9286 const PartialDiagnostic &PDiag, SourceLocation StringLoc, 9287 bool IsStringLocation, Range StringRange, 9288 ArrayRef<FixItHint> Fixit = std::nullopt); 9289 9290 protected: 9291 bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc, 9292 const char *startSpec, 9293 unsigned specifierLen, 9294 const char *csStart, unsigned csLen); 9295 9296 void HandlePositionalNonpositionalArgs(SourceLocation Loc, 9297 const char *startSpec, 9298 unsigned specifierLen); 9299 9300 SourceRange getFormatStringRange(); 9301 CharSourceRange getSpecifierRange(const char *startSpecifier, 9302 unsigned specifierLen); 9303 SourceLocation getLocationOfByte(const char *x); 9304 9305 const Expr *getDataArg(unsigned i) const; 9306 9307 bool CheckNumArgs(const analyze_format_string::FormatSpecifier &FS, 9308 const analyze_format_string::ConversionSpecifier &CS, 9309 const char *startSpecifier, unsigned specifierLen, 9310 unsigned argIndex); 9311 9312 template <typename Range> 9313 void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc, 9314 bool IsStringLocation, Range StringRange, 9315 ArrayRef<FixItHint> Fixit = std::nullopt); 9316 }; 9317 9318 } // namespace 9319 9320 SourceRange CheckFormatHandler::getFormatStringRange() { 9321 return OrigFormatExpr->getSourceRange(); 9322 } 9323 9324 CharSourceRange CheckFormatHandler:: 9325 getSpecifierRange(const char *startSpecifier, unsigned specifierLen) { 9326 SourceLocation Start = getLocationOfByte(startSpecifier); 9327 SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1); 9328 9329 // Advance the end SourceLocation by one due to half-open ranges. 9330 End = End.getLocWithOffset(1); 9331 9332 return CharSourceRange::getCharRange(Start, End); 9333 } 9334 9335 SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) { 9336 return FExpr->getLocationOfByte(x - Beg, S.getSourceManager(), 9337 S.getLangOpts(), S.Context.getTargetInfo()); 9338 } 9339 9340 void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier, 9341 unsigned specifierLen){ 9342 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier), 9343 getLocationOfByte(startSpecifier), 9344 /*IsStringLocation*/true, 9345 getSpecifierRange(startSpecifier, specifierLen)); 9346 } 9347 9348 void CheckFormatHandler::HandleInvalidLengthModifier( 9349 const analyze_format_string::FormatSpecifier &FS, 9350 const analyze_format_string::ConversionSpecifier &CS, 9351 const char *startSpecifier, unsigned specifierLen, unsigned DiagID) { 9352 using namespace analyze_format_string; 9353 9354 const LengthModifier &LM = FS.getLengthModifier(); 9355 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 9356 9357 // See if we know how to fix this length modifier. 9358 std::optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 9359 if (FixedLM) { 9360 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 9361 getLocationOfByte(LM.getStart()), 9362 /*IsStringLocation*/true, 9363 getSpecifierRange(startSpecifier, specifierLen)); 9364 9365 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 9366 << FixedLM->toString() 9367 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 9368 9369 } else { 9370 FixItHint Hint; 9371 if (DiagID == diag::warn_format_nonsensical_length) 9372 Hint = FixItHint::CreateRemoval(LMRange); 9373 9374 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 9375 getLocationOfByte(LM.getStart()), 9376 /*IsStringLocation*/true, 9377 getSpecifierRange(startSpecifier, specifierLen), 9378 Hint); 9379 } 9380 } 9381 9382 void CheckFormatHandler::HandleNonStandardLengthModifier( 9383 const analyze_format_string::FormatSpecifier &FS, 9384 const char *startSpecifier, unsigned specifierLen) { 9385 using namespace analyze_format_string; 9386 9387 const LengthModifier &LM = FS.getLengthModifier(); 9388 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 9389 9390 // See if we know how to fix this length modifier. 9391 std::optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 9392 if (FixedLM) { 9393 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 9394 << LM.toString() << 0, 9395 getLocationOfByte(LM.getStart()), 9396 /*IsStringLocation*/true, 9397 getSpecifierRange(startSpecifier, specifierLen)); 9398 9399 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 9400 << FixedLM->toString() 9401 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 9402 9403 } else { 9404 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 9405 << LM.toString() << 0, 9406 getLocationOfByte(LM.getStart()), 9407 /*IsStringLocation*/true, 9408 getSpecifierRange(startSpecifier, specifierLen)); 9409 } 9410 } 9411 9412 void CheckFormatHandler::HandleNonStandardConversionSpecifier( 9413 const analyze_format_string::ConversionSpecifier &CS, 9414 const char *startSpecifier, unsigned specifierLen) { 9415 using namespace analyze_format_string; 9416 9417 // See if we know how to fix this conversion specifier. 9418 std::optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier(); 9419 if (FixedCS) { 9420 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 9421 << CS.toString() << /*conversion specifier*/1, 9422 getLocationOfByte(CS.getStart()), 9423 /*IsStringLocation*/true, 9424 getSpecifierRange(startSpecifier, specifierLen)); 9425 9426 CharSourceRange CSRange = getSpecifierRange(CS.getStart(), CS.getLength()); 9427 S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier) 9428 << FixedCS->toString() 9429 << FixItHint::CreateReplacement(CSRange, FixedCS->toString()); 9430 } else { 9431 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 9432 << CS.toString() << /*conversion specifier*/1, 9433 getLocationOfByte(CS.getStart()), 9434 /*IsStringLocation*/true, 9435 getSpecifierRange(startSpecifier, specifierLen)); 9436 } 9437 } 9438 9439 void CheckFormatHandler::HandlePosition(const char *startPos, 9440 unsigned posLen) { 9441 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg), 9442 getLocationOfByte(startPos), 9443 /*IsStringLocation*/true, 9444 getSpecifierRange(startPos, posLen)); 9445 } 9446 9447 void 9448 CheckFormatHandler::HandleInvalidPosition(const char *startPos, unsigned posLen, 9449 analyze_format_string::PositionContext p) { 9450 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_positional_specifier) 9451 << (unsigned) p, 9452 getLocationOfByte(startPos), /*IsStringLocation*/true, 9453 getSpecifierRange(startPos, posLen)); 9454 } 9455 9456 void CheckFormatHandler::HandleZeroPosition(const char *startPos, 9457 unsigned posLen) { 9458 EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier), 9459 getLocationOfByte(startPos), 9460 /*IsStringLocation*/true, 9461 getSpecifierRange(startPos, posLen)); 9462 } 9463 9464 void CheckFormatHandler::HandleNullChar(const char *nullCharacter) { 9465 if (!isa<ObjCStringLiteral>(OrigFormatExpr)) { 9466 // The presence of a null character is likely an error. 9467 EmitFormatDiagnostic( 9468 S.PDiag(diag::warn_printf_format_string_contains_null_char), 9469 getLocationOfByte(nullCharacter), /*IsStringLocation*/true, 9470 getFormatStringRange()); 9471 } 9472 } 9473 9474 // Note that this may return NULL if there was an error parsing or building 9475 // one of the argument expressions. 9476 const Expr *CheckFormatHandler::getDataArg(unsigned i) const { 9477 return Args[FirstDataArg + i]; 9478 } 9479 9480 void CheckFormatHandler::DoneProcessing() { 9481 // Does the number of data arguments exceed the number of 9482 // format conversions in the format string? 9483 if (ArgPassingKind != Sema::FAPK_VAList) { 9484 // Find any arguments that weren't covered. 9485 CoveredArgs.flip(); 9486 signed notCoveredArg = CoveredArgs.find_first(); 9487 if (notCoveredArg >= 0) { 9488 assert((unsigned)notCoveredArg < NumDataArgs); 9489 UncoveredArg.Update(notCoveredArg, OrigFormatExpr); 9490 } else { 9491 UncoveredArg.setAllCovered(); 9492 } 9493 } 9494 } 9495 9496 void UncoveredArgHandler::Diagnose(Sema &S, bool IsFunctionCall, 9497 const Expr *ArgExpr) { 9498 assert(hasUncoveredArg() && DiagnosticExprs.size() > 0 && 9499 "Invalid state"); 9500 9501 if (!ArgExpr) 9502 return; 9503 9504 SourceLocation Loc = ArgExpr->getBeginLoc(); 9505 9506 if (S.getSourceManager().isInSystemMacro(Loc)) 9507 return; 9508 9509 PartialDiagnostic PDiag = S.PDiag(diag::warn_printf_data_arg_not_used); 9510 for (auto E : DiagnosticExprs) 9511 PDiag << E->getSourceRange(); 9512 9513 CheckFormatHandler::EmitFormatDiagnostic( 9514 S, IsFunctionCall, DiagnosticExprs[0], 9515 PDiag, Loc, /*IsStringLocation*/false, 9516 DiagnosticExprs[0]->getSourceRange()); 9517 } 9518 9519 bool 9520 CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex, 9521 SourceLocation Loc, 9522 const char *startSpec, 9523 unsigned specifierLen, 9524 const char *csStart, 9525 unsigned csLen) { 9526 bool keepGoing = true; 9527 if (argIndex < NumDataArgs) { 9528 // Consider the argument coverered, even though the specifier doesn't 9529 // make sense. 9530 CoveredArgs.set(argIndex); 9531 } 9532 else { 9533 // If argIndex exceeds the number of data arguments we 9534 // don't issue a warning because that is just a cascade of warnings (and 9535 // they may have intended '%%' anyway). We don't want to continue processing 9536 // the format string after this point, however, as we will like just get 9537 // gibberish when trying to match arguments. 9538 keepGoing = false; 9539 } 9540 9541 StringRef Specifier(csStart, csLen); 9542 9543 // If the specifier in non-printable, it could be the first byte of a UTF-8 9544 // sequence. In that case, print the UTF-8 code point. If not, print the byte 9545 // hex value. 9546 std::string CodePointStr; 9547 if (!llvm::sys::locale::isPrint(*csStart)) { 9548 llvm::UTF32 CodePoint; 9549 const llvm::UTF8 **B = reinterpret_cast<const llvm::UTF8 **>(&csStart); 9550 const llvm::UTF8 *E = 9551 reinterpret_cast<const llvm::UTF8 *>(csStart + csLen); 9552 llvm::ConversionResult Result = 9553 llvm::convertUTF8Sequence(B, E, &CodePoint, llvm::strictConversion); 9554 9555 if (Result != llvm::conversionOK) { 9556 unsigned char FirstChar = *csStart; 9557 CodePoint = (llvm::UTF32)FirstChar; 9558 } 9559 9560 llvm::raw_string_ostream OS(CodePointStr); 9561 if (CodePoint < 256) 9562 OS << "\\x" << llvm::format("%02x", CodePoint); 9563 else if (CodePoint <= 0xFFFF) 9564 OS << "\\u" << llvm::format("%04x", CodePoint); 9565 else 9566 OS << "\\U" << llvm::format("%08x", CodePoint); 9567 OS.flush(); 9568 Specifier = CodePointStr; 9569 } 9570 9571 EmitFormatDiagnostic( 9572 S.PDiag(diag::warn_format_invalid_conversion) << Specifier, Loc, 9573 /*IsStringLocation*/ true, getSpecifierRange(startSpec, specifierLen)); 9574 9575 return keepGoing; 9576 } 9577 9578 void 9579 CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc, 9580 const char *startSpec, 9581 unsigned specifierLen) { 9582 EmitFormatDiagnostic( 9583 S.PDiag(diag::warn_format_mix_positional_nonpositional_args), 9584 Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen)); 9585 } 9586 9587 bool 9588 CheckFormatHandler::CheckNumArgs( 9589 const analyze_format_string::FormatSpecifier &FS, 9590 const analyze_format_string::ConversionSpecifier &CS, 9591 const char *startSpecifier, unsigned specifierLen, unsigned argIndex) { 9592 9593 if (argIndex >= NumDataArgs) { 9594 PartialDiagnostic PDiag = FS.usesPositionalArg() 9595 ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args) 9596 << (argIndex+1) << NumDataArgs) 9597 : S.PDiag(diag::warn_printf_insufficient_data_args); 9598 EmitFormatDiagnostic( 9599 PDiag, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true, 9600 getSpecifierRange(startSpecifier, specifierLen)); 9601 9602 // Since more arguments than conversion tokens are given, by extension 9603 // all arguments are covered, so mark this as so. 9604 UncoveredArg.setAllCovered(); 9605 return false; 9606 } 9607 return true; 9608 } 9609 9610 template<typename Range> 9611 void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag, 9612 SourceLocation Loc, 9613 bool IsStringLocation, 9614 Range StringRange, 9615 ArrayRef<FixItHint> FixIt) { 9616 EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag, 9617 Loc, IsStringLocation, StringRange, FixIt); 9618 } 9619 9620 /// If the format string is not within the function call, emit a note 9621 /// so that the function call and string are in diagnostic messages. 9622 /// 9623 /// \param InFunctionCall if true, the format string is within the function 9624 /// call and only one diagnostic message will be produced. Otherwise, an 9625 /// extra note will be emitted pointing to location of the format string. 9626 /// 9627 /// \param ArgumentExpr the expression that is passed as the format string 9628 /// argument in the function call. Used for getting locations when two 9629 /// diagnostics are emitted. 9630 /// 9631 /// \param PDiag the callee should already have provided any strings for the 9632 /// diagnostic message. This function only adds locations and fixits 9633 /// to diagnostics. 9634 /// 9635 /// \param Loc primary location for diagnostic. If two diagnostics are 9636 /// required, one will be at Loc and a new SourceLocation will be created for 9637 /// the other one. 9638 /// 9639 /// \param IsStringLocation if true, Loc points to the format string should be 9640 /// used for the note. Otherwise, Loc points to the argument list and will 9641 /// be used with PDiag. 9642 /// 9643 /// \param StringRange some or all of the string to highlight. This is 9644 /// templated so it can accept either a CharSourceRange or a SourceRange. 9645 /// 9646 /// \param FixIt optional fix it hint for the format string. 9647 template <typename Range> 9648 void CheckFormatHandler::EmitFormatDiagnostic( 9649 Sema &S, bool InFunctionCall, const Expr *ArgumentExpr, 9650 const PartialDiagnostic &PDiag, SourceLocation Loc, bool IsStringLocation, 9651 Range StringRange, ArrayRef<FixItHint> FixIt) { 9652 if (InFunctionCall) { 9653 const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PDiag); 9654 D << StringRange; 9655 D << FixIt; 9656 } else { 9657 S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag) 9658 << ArgumentExpr->getSourceRange(); 9659 9660 const Sema::SemaDiagnosticBuilder &Note = 9661 S.Diag(IsStringLocation ? Loc : StringRange.getBegin(), 9662 diag::note_format_string_defined); 9663 9664 Note << StringRange; 9665 Note << FixIt; 9666 } 9667 } 9668 9669 //===--- CHECK: Printf format string checking ------------------------------===// 9670 9671 namespace { 9672 9673 class CheckPrintfHandler : public CheckFormatHandler { 9674 public: 9675 CheckPrintfHandler(Sema &s, const FormatStringLiteral *fexpr, 9676 const Expr *origFormatExpr, 9677 const Sema::FormatStringType type, unsigned firstDataArg, 9678 unsigned numDataArgs, bool isObjC, const char *beg, 9679 Sema::FormatArgumentPassingKind APK, 9680 ArrayRef<const Expr *> Args, unsigned formatIdx, 9681 bool inFunctionCall, Sema::VariadicCallType CallType, 9682 llvm::SmallBitVector &CheckedVarArgs, 9683 UncoveredArgHandler &UncoveredArg) 9684 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 9685 numDataArgs, beg, APK, Args, formatIdx, 9686 inFunctionCall, CallType, CheckedVarArgs, 9687 UncoveredArg) {} 9688 9689 bool isObjCContext() const { return FSType == Sema::FST_NSString; } 9690 9691 /// Returns true if '%@' specifiers are allowed in the format string. 9692 bool allowsObjCArg() const { 9693 return FSType == Sema::FST_NSString || FSType == Sema::FST_OSLog || 9694 FSType == Sema::FST_OSTrace; 9695 } 9696 9697 bool HandleInvalidPrintfConversionSpecifier( 9698 const analyze_printf::PrintfSpecifier &FS, 9699 const char *startSpecifier, 9700 unsigned specifierLen) override; 9701 9702 void handleInvalidMaskType(StringRef MaskType) override; 9703 9704 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 9705 const char *startSpecifier, unsigned specifierLen, 9706 const TargetInfo &Target) override; 9707 bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 9708 const char *StartSpecifier, 9709 unsigned SpecifierLen, 9710 const Expr *E); 9711 9712 bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k, 9713 const char *startSpecifier, unsigned specifierLen); 9714 void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS, 9715 const analyze_printf::OptionalAmount &Amt, 9716 unsigned type, 9717 const char *startSpecifier, unsigned specifierLen); 9718 void HandleFlag(const analyze_printf::PrintfSpecifier &FS, 9719 const analyze_printf::OptionalFlag &flag, 9720 const char *startSpecifier, unsigned specifierLen); 9721 void HandleIgnoredFlag(const analyze_printf::PrintfSpecifier &FS, 9722 const analyze_printf::OptionalFlag &ignoredFlag, 9723 const analyze_printf::OptionalFlag &flag, 9724 const char *startSpecifier, unsigned specifierLen); 9725 bool checkForCStrMembers(const analyze_printf::ArgType &AT, 9726 const Expr *E); 9727 9728 void HandleEmptyObjCModifierFlag(const char *startFlag, 9729 unsigned flagLen) override; 9730 9731 void HandleInvalidObjCModifierFlag(const char *startFlag, 9732 unsigned flagLen) override; 9733 9734 void HandleObjCFlagsWithNonObjCConversion(const char *flagsStart, 9735 const char *flagsEnd, 9736 const char *conversionPosition) 9737 override; 9738 }; 9739 9740 } // namespace 9741 9742 bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier( 9743 const analyze_printf::PrintfSpecifier &FS, 9744 const char *startSpecifier, 9745 unsigned specifierLen) { 9746 const analyze_printf::PrintfConversionSpecifier &CS = 9747 FS.getConversionSpecifier(); 9748 9749 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 9750 getLocationOfByte(CS.getStart()), 9751 startSpecifier, specifierLen, 9752 CS.getStart(), CS.getLength()); 9753 } 9754 9755 void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType) { 9756 S.Diag(getLocationOfByte(MaskType.data()), diag::err_invalid_mask_type_size); 9757 } 9758 9759 bool CheckPrintfHandler::HandleAmount( 9760 const analyze_format_string::OptionalAmount &Amt, unsigned k, 9761 const char *startSpecifier, unsigned specifierLen) { 9762 if (Amt.hasDataArgument()) { 9763 if (ArgPassingKind != Sema::FAPK_VAList) { 9764 unsigned argIndex = Amt.getArgIndex(); 9765 if (argIndex >= NumDataArgs) { 9766 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg) 9767 << k, 9768 getLocationOfByte(Amt.getStart()), 9769 /*IsStringLocation*/ true, 9770 getSpecifierRange(startSpecifier, specifierLen)); 9771 // Don't do any more checking. We will just emit 9772 // spurious errors. 9773 return false; 9774 } 9775 9776 // Type check the data argument. It should be an 'int'. 9777 // Although not in conformance with C99, we also allow the argument to be 9778 // an 'unsigned int' as that is a reasonably safe case. GCC also 9779 // doesn't emit a warning for that case. 9780 CoveredArgs.set(argIndex); 9781 const Expr *Arg = getDataArg(argIndex); 9782 if (!Arg) 9783 return false; 9784 9785 QualType T = Arg->getType(); 9786 9787 const analyze_printf::ArgType &AT = Amt.getArgType(S.Context); 9788 assert(AT.isValid()); 9789 9790 if (!AT.matchesType(S.Context, T)) { 9791 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type) 9792 << k << AT.getRepresentativeTypeName(S.Context) 9793 << T << Arg->getSourceRange(), 9794 getLocationOfByte(Amt.getStart()), 9795 /*IsStringLocation*/true, 9796 getSpecifierRange(startSpecifier, specifierLen)); 9797 // Don't do any more checking. We will just emit 9798 // spurious errors. 9799 return false; 9800 } 9801 } 9802 } 9803 return true; 9804 } 9805 9806 void CheckPrintfHandler::HandleInvalidAmount( 9807 const analyze_printf::PrintfSpecifier &FS, 9808 const analyze_printf::OptionalAmount &Amt, 9809 unsigned type, 9810 const char *startSpecifier, 9811 unsigned specifierLen) { 9812 const analyze_printf::PrintfConversionSpecifier &CS = 9813 FS.getConversionSpecifier(); 9814 9815 FixItHint fixit = 9816 Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant 9817 ? FixItHint::CreateRemoval(getSpecifierRange(Amt.getStart(), 9818 Amt.getConstantLength())) 9819 : FixItHint(); 9820 9821 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount) 9822 << type << CS.toString(), 9823 getLocationOfByte(Amt.getStart()), 9824 /*IsStringLocation*/true, 9825 getSpecifierRange(startSpecifier, specifierLen), 9826 fixit); 9827 } 9828 9829 void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS, 9830 const analyze_printf::OptionalFlag &flag, 9831 const char *startSpecifier, 9832 unsigned specifierLen) { 9833 // Warn about pointless flag with a fixit removal. 9834 const analyze_printf::PrintfConversionSpecifier &CS = 9835 FS.getConversionSpecifier(); 9836 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag) 9837 << flag.toString() << CS.toString(), 9838 getLocationOfByte(flag.getPosition()), 9839 /*IsStringLocation*/true, 9840 getSpecifierRange(startSpecifier, specifierLen), 9841 FixItHint::CreateRemoval( 9842 getSpecifierRange(flag.getPosition(), 1))); 9843 } 9844 9845 void CheckPrintfHandler::HandleIgnoredFlag( 9846 const analyze_printf::PrintfSpecifier &FS, 9847 const analyze_printf::OptionalFlag &ignoredFlag, 9848 const analyze_printf::OptionalFlag &flag, 9849 const char *startSpecifier, 9850 unsigned specifierLen) { 9851 // Warn about ignored flag with a fixit removal. 9852 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag) 9853 << ignoredFlag.toString() << flag.toString(), 9854 getLocationOfByte(ignoredFlag.getPosition()), 9855 /*IsStringLocation*/true, 9856 getSpecifierRange(startSpecifier, specifierLen), 9857 FixItHint::CreateRemoval( 9858 getSpecifierRange(ignoredFlag.getPosition(), 1))); 9859 } 9860 9861 void CheckPrintfHandler::HandleEmptyObjCModifierFlag(const char *startFlag, 9862 unsigned flagLen) { 9863 // Warn about an empty flag. 9864 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_empty_objc_flag), 9865 getLocationOfByte(startFlag), 9866 /*IsStringLocation*/true, 9867 getSpecifierRange(startFlag, flagLen)); 9868 } 9869 9870 void CheckPrintfHandler::HandleInvalidObjCModifierFlag(const char *startFlag, 9871 unsigned flagLen) { 9872 // Warn about an invalid flag. 9873 auto Range = getSpecifierRange(startFlag, flagLen); 9874 StringRef flag(startFlag, flagLen); 9875 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_invalid_objc_flag) << flag, 9876 getLocationOfByte(startFlag), 9877 /*IsStringLocation*/true, 9878 Range, FixItHint::CreateRemoval(Range)); 9879 } 9880 9881 void CheckPrintfHandler::HandleObjCFlagsWithNonObjCConversion( 9882 const char *flagsStart, const char *flagsEnd, const char *conversionPosition) { 9883 // Warn about using '[...]' without a '@' conversion. 9884 auto Range = getSpecifierRange(flagsStart, flagsEnd - flagsStart + 1); 9885 auto diag = diag::warn_printf_ObjCflags_without_ObjCConversion; 9886 EmitFormatDiagnostic(S.PDiag(diag) << StringRef(conversionPosition, 1), 9887 getLocationOfByte(conversionPosition), 9888 /*IsStringLocation*/true, 9889 Range, FixItHint::CreateRemoval(Range)); 9890 } 9891 9892 // Determines if the specified is a C++ class or struct containing 9893 // a member with the specified name and kind (e.g. a CXXMethodDecl named 9894 // "c_str()"). 9895 template<typename MemberKind> 9896 static llvm::SmallPtrSet<MemberKind*, 1> 9897 CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) { 9898 const RecordType *RT = Ty->getAs<RecordType>(); 9899 llvm::SmallPtrSet<MemberKind*, 1> Results; 9900 9901 if (!RT) 9902 return Results; 9903 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 9904 if (!RD || !RD->getDefinition()) 9905 return Results; 9906 9907 LookupResult R(S, &S.Context.Idents.get(Name), SourceLocation(), 9908 Sema::LookupMemberName); 9909 R.suppressDiagnostics(); 9910 9911 // We just need to include all members of the right kind turned up by the 9912 // filter, at this point. 9913 if (S.LookupQualifiedName(R, RT->getDecl())) 9914 for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) { 9915 NamedDecl *decl = (*I)->getUnderlyingDecl(); 9916 if (MemberKind *FK = dyn_cast<MemberKind>(decl)) 9917 Results.insert(FK); 9918 } 9919 return Results; 9920 } 9921 9922 /// Check if we could call '.c_str()' on an object. 9923 /// 9924 /// FIXME: This returns the wrong results in some cases (if cv-qualifiers don't 9925 /// allow the call, or if it would be ambiguous). 9926 bool Sema::hasCStrMethod(const Expr *E) { 9927 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 9928 9929 MethodSet Results = 9930 CXXRecordMembersNamed<CXXMethodDecl>("c_str", *this, E->getType()); 9931 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 9932 MI != ME; ++MI) 9933 if ((*MI)->getMinRequiredArguments() == 0) 9934 return true; 9935 return false; 9936 } 9937 9938 // Check if a (w)string was passed when a (w)char* was needed, and offer a 9939 // better diagnostic if so. AT is assumed to be valid. 9940 // Returns true when a c_str() conversion method is found. 9941 bool CheckPrintfHandler::checkForCStrMembers( 9942 const analyze_printf::ArgType &AT, const Expr *E) { 9943 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 9944 9945 MethodSet Results = 9946 CXXRecordMembersNamed<CXXMethodDecl>("c_str", S, E->getType()); 9947 9948 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 9949 MI != ME; ++MI) { 9950 const CXXMethodDecl *Method = *MI; 9951 if (Method->getMinRequiredArguments() == 0 && 9952 AT.matchesType(S.Context, Method->getReturnType())) { 9953 // FIXME: Suggest parens if the expression needs them. 9954 SourceLocation EndLoc = S.getLocForEndOfToken(E->getEndLoc()); 9955 S.Diag(E->getBeginLoc(), diag::note_printf_c_str) 9956 << "c_str()" << FixItHint::CreateInsertion(EndLoc, ".c_str()"); 9957 return true; 9958 } 9959 } 9960 9961 return false; 9962 } 9963 9964 bool CheckPrintfHandler::HandlePrintfSpecifier( 9965 const analyze_printf::PrintfSpecifier &FS, const char *startSpecifier, 9966 unsigned specifierLen, const TargetInfo &Target) { 9967 using namespace analyze_format_string; 9968 using namespace analyze_printf; 9969 9970 const PrintfConversionSpecifier &CS = FS.getConversionSpecifier(); 9971 9972 if (FS.consumesDataArgument()) { 9973 if (atFirstArg) { 9974 atFirstArg = false; 9975 usesPositionalArgs = FS.usesPositionalArg(); 9976 } 9977 else if (usesPositionalArgs != FS.usesPositionalArg()) { 9978 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 9979 startSpecifier, specifierLen); 9980 return false; 9981 } 9982 } 9983 9984 // First check if the field width, precision, and conversion specifier 9985 // have matching data arguments. 9986 if (!HandleAmount(FS.getFieldWidth(), /* field width */ 0, 9987 startSpecifier, specifierLen)) { 9988 return false; 9989 } 9990 9991 if (!HandleAmount(FS.getPrecision(), /* precision */ 1, 9992 startSpecifier, specifierLen)) { 9993 return false; 9994 } 9995 9996 if (!CS.consumesDataArgument()) { 9997 // FIXME: Technically specifying a precision or field width here 9998 // makes no sense. Worth issuing a warning at some point. 9999 return true; 10000 } 10001 10002 // Consume the argument. 10003 unsigned argIndex = FS.getArgIndex(); 10004 if (argIndex < NumDataArgs) { 10005 // The check to see if the argIndex is valid will come later. 10006 // We set the bit here because we may exit early from this 10007 // function if we encounter some other error. 10008 CoveredArgs.set(argIndex); 10009 } 10010 10011 // FreeBSD kernel extensions. 10012 if (CS.getKind() == ConversionSpecifier::FreeBSDbArg || 10013 CS.getKind() == ConversionSpecifier::FreeBSDDArg) { 10014 // We need at least two arguments. 10015 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex + 1)) 10016 return false; 10017 10018 // Claim the second argument. 10019 CoveredArgs.set(argIndex + 1); 10020 10021 // Type check the first argument (int for %b, pointer for %D) 10022 const Expr *Ex = getDataArg(argIndex); 10023 const analyze_printf::ArgType &AT = 10024 (CS.getKind() == ConversionSpecifier::FreeBSDbArg) ? 10025 ArgType(S.Context.IntTy) : ArgType::CPointerTy; 10026 if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType())) 10027 EmitFormatDiagnostic( 10028 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 10029 << AT.getRepresentativeTypeName(S.Context) << Ex->getType() 10030 << false << Ex->getSourceRange(), 10031 Ex->getBeginLoc(), /*IsStringLocation*/ false, 10032 getSpecifierRange(startSpecifier, specifierLen)); 10033 10034 // Type check the second argument (char * for both %b and %D) 10035 Ex = getDataArg(argIndex + 1); 10036 const analyze_printf::ArgType &AT2 = ArgType::CStrTy; 10037 if (AT2.isValid() && !AT2.matchesType(S.Context, Ex->getType())) 10038 EmitFormatDiagnostic( 10039 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 10040 << AT2.getRepresentativeTypeName(S.Context) << Ex->getType() 10041 << false << Ex->getSourceRange(), 10042 Ex->getBeginLoc(), /*IsStringLocation*/ false, 10043 getSpecifierRange(startSpecifier, specifierLen)); 10044 10045 return true; 10046 } 10047 10048 // Check for using an Objective-C specific conversion specifier 10049 // in a non-ObjC literal. 10050 if (!allowsObjCArg() && CS.isObjCArg()) { 10051 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 10052 specifierLen); 10053 } 10054 10055 // %P can only be used with os_log. 10056 if (FSType != Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::PArg) { 10057 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 10058 specifierLen); 10059 } 10060 10061 // %n is not allowed with os_log. 10062 if (FSType == Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::nArg) { 10063 EmitFormatDiagnostic(S.PDiag(diag::warn_os_log_format_narg), 10064 getLocationOfByte(CS.getStart()), 10065 /*IsStringLocation*/ false, 10066 getSpecifierRange(startSpecifier, specifierLen)); 10067 10068 return true; 10069 } 10070 10071 // Only scalars are allowed for os_trace. 10072 if (FSType == Sema::FST_OSTrace && 10073 (CS.getKind() == ConversionSpecifier::PArg || 10074 CS.getKind() == ConversionSpecifier::sArg || 10075 CS.getKind() == ConversionSpecifier::ObjCObjArg)) { 10076 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 10077 specifierLen); 10078 } 10079 10080 // Check for use of public/private annotation outside of os_log(). 10081 if (FSType != Sema::FST_OSLog) { 10082 if (FS.isPublic().isSet()) { 10083 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 10084 << "public", 10085 getLocationOfByte(FS.isPublic().getPosition()), 10086 /*IsStringLocation*/ false, 10087 getSpecifierRange(startSpecifier, specifierLen)); 10088 } 10089 if (FS.isPrivate().isSet()) { 10090 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 10091 << "private", 10092 getLocationOfByte(FS.isPrivate().getPosition()), 10093 /*IsStringLocation*/ false, 10094 getSpecifierRange(startSpecifier, specifierLen)); 10095 } 10096 } 10097 10098 const llvm::Triple &Triple = Target.getTriple(); 10099 if (CS.getKind() == ConversionSpecifier::nArg && 10100 (Triple.isAndroid() || Triple.isOSFuchsia())) { 10101 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_narg_not_supported), 10102 getLocationOfByte(CS.getStart()), 10103 /*IsStringLocation*/ false, 10104 getSpecifierRange(startSpecifier, specifierLen)); 10105 } 10106 10107 // Check for invalid use of field width 10108 if (!FS.hasValidFieldWidth()) { 10109 HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0, 10110 startSpecifier, specifierLen); 10111 } 10112 10113 // Check for invalid use of precision 10114 if (!FS.hasValidPrecision()) { 10115 HandleInvalidAmount(FS, FS.getPrecision(), /* precision */ 1, 10116 startSpecifier, specifierLen); 10117 } 10118 10119 // Precision is mandatory for %P specifier. 10120 if (CS.getKind() == ConversionSpecifier::PArg && 10121 FS.getPrecision().getHowSpecified() == OptionalAmount::NotSpecified) { 10122 EmitFormatDiagnostic(S.PDiag(diag::warn_format_P_no_precision), 10123 getLocationOfByte(startSpecifier), 10124 /*IsStringLocation*/ false, 10125 getSpecifierRange(startSpecifier, specifierLen)); 10126 } 10127 10128 // Check each flag does not conflict with any other component. 10129 if (!FS.hasValidThousandsGroupingPrefix()) 10130 HandleFlag(FS, FS.hasThousandsGrouping(), startSpecifier, specifierLen); 10131 if (!FS.hasValidLeadingZeros()) 10132 HandleFlag(FS, FS.hasLeadingZeros(), startSpecifier, specifierLen); 10133 if (!FS.hasValidPlusPrefix()) 10134 HandleFlag(FS, FS.hasPlusPrefix(), startSpecifier, specifierLen); 10135 if (!FS.hasValidSpacePrefix()) 10136 HandleFlag(FS, FS.hasSpacePrefix(), startSpecifier, specifierLen); 10137 if (!FS.hasValidAlternativeForm()) 10138 HandleFlag(FS, FS.hasAlternativeForm(), startSpecifier, specifierLen); 10139 if (!FS.hasValidLeftJustified()) 10140 HandleFlag(FS, FS.isLeftJustified(), startSpecifier, specifierLen); 10141 10142 // Check that flags are not ignored by another flag 10143 if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+' 10144 HandleIgnoredFlag(FS, FS.hasSpacePrefix(), FS.hasPlusPrefix(), 10145 startSpecifier, specifierLen); 10146 if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-' 10147 HandleIgnoredFlag(FS, FS.hasLeadingZeros(), FS.isLeftJustified(), 10148 startSpecifier, specifierLen); 10149 10150 // Check the length modifier is valid with the given conversion specifier. 10151 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 10152 S.getLangOpts())) 10153 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 10154 diag::warn_format_nonsensical_length); 10155 else if (!FS.hasStandardLengthModifier()) 10156 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 10157 else if (!FS.hasStandardLengthConversionCombination()) 10158 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 10159 diag::warn_format_non_standard_conversion_spec); 10160 10161 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 10162 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 10163 10164 // The remaining checks depend on the data arguments. 10165 if (ArgPassingKind == Sema::FAPK_VAList) 10166 return true; 10167 10168 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 10169 return false; 10170 10171 const Expr *Arg = getDataArg(argIndex); 10172 if (!Arg) 10173 return true; 10174 10175 return checkFormatExpr(FS, startSpecifier, specifierLen, Arg); 10176 } 10177 10178 static bool requiresParensToAddCast(const Expr *E) { 10179 // FIXME: We should have a general way to reason about operator 10180 // precedence and whether parens are actually needed here. 10181 // Take care of a few common cases where they aren't. 10182 const Expr *Inside = E->IgnoreImpCasts(); 10183 if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Inside)) 10184 Inside = POE->getSyntacticForm()->IgnoreImpCasts(); 10185 10186 switch (Inside->getStmtClass()) { 10187 case Stmt::ArraySubscriptExprClass: 10188 case Stmt::CallExprClass: 10189 case Stmt::CharacterLiteralClass: 10190 case Stmt::CXXBoolLiteralExprClass: 10191 case Stmt::DeclRefExprClass: 10192 case Stmt::FloatingLiteralClass: 10193 case Stmt::IntegerLiteralClass: 10194 case Stmt::MemberExprClass: 10195 case Stmt::ObjCArrayLiteralClass: 10196 case Stmt::ObjCBoolLiteralExprClass: 10197 case Stmt::ObjCBoxedExprClass: 10198 case Stmt::ObjCDictionaryLiteralClass: 10199 case Stmt::ObjCEncodeExprClass: 10200 case Stmt::ObjCIvarRefExprClass: 10201 case Stmt::ObjCMessageExprClass: 10202 case Stmt::ObjCPropertyRefExprClass: 10203 case Stmt::ObjCStringLiteralClass: 10204 case Stmt::ObjCSubscriptRefExprClass: 10205 case Stmt::ParenExprClass: 10206 case Stmt::StringLiteralClass: 10207 case Stmt::UnaryOperatorClass: 10208 return false; 10209 default: 10210 return true; 10211 } 10212 } 10213 10214 static std::pair<QualType, StringRef> 10215 shouldNotPrintDirectly(const ASTContext &Context, 10216 QualType IntendedTy, 10217 const Expr *E) { 10218 // Use a 'while' to peel off layers of typedefs. 10219 QualType TyTy = IntendedTy; 10220 while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) { 10221 StringRef Name = UserTy->getDecl()->getName(); 10222 QualType CastTy = llvm::StringSwitch<QualType>(Name) 10223 .Case("CFIndex", Context.getNSIntegerType()) 10224 .Case("NSInteger", Context.getNSIntegerType()) 10225 .Case("NSUInteger", Context.getNSUIntegerType()) 10226 .Case("SInt32", Context.IntTy) 10227 .Case("UInt32", Context.UnsignedIntTy) 10228 .Default(QualType()); 10229 10230 if (!CastTy.isNull()) 10231 return std::make_pair(CastTy, Name); 10232 10233 TyTy = UserTy->desugar(); 10234 } 10235 10236 // Strip parens if necessary. 10237 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) 10238 return shouldNotPrintDirectly(Context, 10239 PE->getSubExpr()->getType(), 10240 PE->getSubExpr()); 10241 10242 // If this is a conditional expression, then its result type is constructed 10243 // via usual arithmetic conversions and thus there might be no necessary 10244 // typedef sugar there. Recurse to operands to check for NSInteger & 10245 // Co. usage condition. 10246 if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) { 10247 QualType TrueTy, FalseTy; 10248 StringRef TrueName, FalseName; 10249 10250 std::tie(TrueTy, TrueName) = 10251 shouldNotPrintDirectly(Context, 10252 CO->getTrueExpr()->getType(), 10253 CO->getTrueExpr()); 10254 std::tie(FalseTy, FalseName) = 10255 shouldNotPrintDirectly(Context, 10256 CO->getFalseExpr()->getType(), 10257 CO->getFalseExpr()); 10258 10259 if (TrueTy == FalseTy) 10260 return std::make_pair(TrueTy, TrueName); 10261 else if (TrueTy.isNull()) 10262 return std::make_pair(FalseTy, FalseName); 10263 else if (FalseTy.isNull()) 10264 return std::make_pair(TrueTy, TrueName); 10265 } 10266 10267 return std::make_pair(QualType(), StringRef()); 10268 } 10269 10270 /// Return true if \p ICE is an implicit argument promotion of an arithmetic 10271 /// type. Bit-field 'promotions' from a higher ranked type to a lower ranked 10272 /// type do not count. 10273 static bool 10274 isArithmeticArgumentPromotion(Sema &S, const ImplicitCastExpr *ICE) { 10275 QualType From = ICE->getSubExpr()->getType(); 10276 QualType To = ICE->getType(); 10277 // It's an integer promotion if the destination type is the promoted 10278 // source type. 10279 if (ICE->getCastKind() == CK_IntegralCast && 10280 S.Context.isPromotableIntegerType(From) && 10281 S.Context.getPromotedIntegerType(From) == To) 10282 return true; 10283 // Look through vector types, since we do default argument promotion for 10284 // those in OpenCL. 10285 if (const auto *VecTy = From->getAs<ExtVectorType>()) 10286 From = VecTy->getElementType(); 10287 if (const auto *VecTy = To->getAs<ExtVectorType>()) 10288 To = VecTy->getElementType(); 10289 // It's a floating promotion if the source type is a lower rank. 10290 return ICE->getCastKind() == CK_FloatingCast && 10291 S.Context.getFloatingTypeOrder(From, To) < 0; 10292 } 10293 10294 bool 10295 CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 10296 const char *StartSpecifier, 10297 unsigned SpecifierLen, 10298 const Expr *E) { 10299 using namespace analyze_format_string; 10300 using namespace analyze_printf; 10301 10302 // Now type check the data expression that matches the 10303 // format specifier. 10304 const analyze_printf::ArgType &AT = FS.getArgType(S.Context, isObjCContext()); 10305 if (!AT.isValid()) 10306 return true; 10307 10308 QualType ExprTy = E->getType(); 10309 while (const TypeOfExprType *TET = dyn_cast<TypeOfExprType>(ExprTy)) { 10310 ExprTy = TET->getUnderlyingExpr()->getType(); 10311 } 10312 10313 // When using the format attribute in C++, you can receive a function or an 10314 // array that will necessarily decay to a pointer when passed to the final 10315 // format consumer. Apply decay before type comparison. 10316 if (ExprTy->canDecayToPointerType()) 10317 ExprTy = S.Context.getDecayedType(ExprTy); 10318 10319 // Diagnose attempts to print a boolean value as a character. Unlike other 10320 // -Wformat diagnostics, this is fine from a type perspective, but it still 10321 // doesn't make sense. 10322 if (FS.getConversionSpecifier().getKind() == ConversionSpecifier::cArg && 10323 E->isKnownToHaveBooleanValue()) { 10324 const CharSourceRange &CSR = 10325 getSpecifierRange(StartSpecifier, SpecifierLen); 10326 SmallString<4> FSString; 10327 llvm::raw_svector_ostream os(FSString); 10328 FS.toString(os); 10329 EmitFormatDiagnostic(S.PDiag(diag::warn_format_bool_as_character) 10330 << FSString, 10331 E->getExprLoc(), false, CSR); 10332 return true; 10333 } 10334 10335 ArgType::MatchKind ImplicitMatch = ArgType::NoMatch; 10336 ArgType::MatchKind Match = AT.matchesType(S.Context, ExprTy); 10337 if (Match == ArgType::Match) 10338 return true; 10339 10340 // NoMatchPromotionTypeConfusion should be only returned in ImplictCastExpr 10341 assert(Match != ArgType::NoMatchPromotionTypeConfusion); 10342 10343 // Look through argument promotions for our error message's reported type. 10344 // This includes the integral and floating promotions, but excludes array 10345 // and function pointer decay (seeing that an argument intended to be a 10346 // string has type 'char [6]' is probably more confusing than 'char *') and 10347 // certain bitfield promotions (bitfields can be 'demoted' to a lesser type). 10348 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 10349 if (isArithmeticArgumentPromotion(S, ICE)) { 10350 E = ICE->getSubExpr(); 10351 ExprTy = E->getType(); 10352 10353 // Check if we didn't match because of an implicit cast from a 'char' 10354 // or 'short' to an 'int'. This is done because printf is a varargs 10355 // function. 10356 if (ICE->getType() == S.Context.IntTy || 10357 ICE->getType() == S.Context.UnsignedIntTy) { 10358 // All further checking is done on the subexpression 10359 ImplicitMatch = AT.matchesType(S.Context, ExprTy); 10360 if (ImplicitMatch == ArgType::Match) 10361 return true; 10362 } 10363 } 10364 } else if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) { 10365 // Special case for 'a', which has type 'int' in C. 10366 // Note, however, that we do /not/ want to treat multibyte constants like 10367 // 'MooV' as characters! This form is deprecated but still exists. In 10368 // addition, don't treat expressions as of type 'char' if one byte length 10369 // modifier is provided. 10370 if (ExprTy == S.Context.IntTy && 10371 FS.getLengthModifier().getKind() != LengthModifier::AsChar) 10372 if (llvm::isUIntN(S.Context.getCharWidth(), CL->getValue())) { 10373 ExprTy = S.Context.CharTy; 10374 // To improve check results, we consider a character literal in C 10375 // to be a 'char' rather than an 'int'. 'printf("%hd", 'a');' is 10376 // more likely a type confusion situation, so we will suggest to 10377 // use '%hhd' instead by discarding the MatchPromotion. 10378 if (Match == ArgType::MatchPromotion) 10379 Match = ArgType::NoMatch; 10380 } 10381 } 10382 if (Match == ArgType::MatchPromotion) { 10383 // WG14 N2562 only clarified promotions in *printf 10384 // For NSLog in ObjC, just preserve -Wformat behavior 10385 if (!S.getLangOpts().ObjC && 10386 ImplicitMatch != ArgType::NoMatchPromotionTypeConfusion && 10387 ImplicitMatch != ArgType::NoMatchTypeConfusion) 10388 return true; 10389 Match = ArgType::NoMatch; 10390 } 10391 if (ImplicitMatch == ArgType::NoMatchPedantic || 10392 ImplicitMatch == ArgType::NoMatchTypeConfusion) 10393 Match = ImplicitMatch; 10394 assert(Match != ArgType::MatchPromotion); 10395 // Look through enums to their underlying type. 10396 bool IsEnum = false; 10397 if (auto EnumTy = ExprTy->getAs<EnumType>()) { 10398 ExprTy = EnumTy->getDecl()->getIntegerType(); 10399 IsEnum = true; 10400 } 10401 10402 // %C in an Objective-C context prints a unichar, not a wchar_t. 10403 // If the argument is an integer of some kind, believe the %C and suggest 10404 // a cast instead of changing the conversion specifier. 10405 QualType IntendedTy = ExprTy; 10406 if (isObjCContext() && 10407 FS.getConversionSpecifier().getKind() == ConversionSpecifier::CArg) { 10408 if (ExprTy->isIntegralOrUnscopedEnumerationType() && 10409 !ExprTy->isCharType()) { 10410 // 'unichar' is defined as a typedef of unsigned short, but we should 10411 // prefer using the typedef if it is visible. 10412 IntendedTy = S.Context.UnsignedShortTy; 10413 10414 // While we are here, check if the value is an IntegerLiteral that happens 10415 // to be within the valid range. 10416 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) { 10417 const llvm::APInt &V = IL->getValue(); 10418 if (V.getActiveBits() <= S.Context.getTypeSize(IntendedTy)) 10419 return true; 10420 } 10421 10422 LookupResult Result(S, &S.Context.Idents.get("unichar"), E->getBeginLoc(), 10423 Sema::LookupOrdinaryName); 10424 if (S.LookupName(Result, S.getCurScope())) { 10425 NamedDecl *ND = Result.getFoundDecl(); 10426 if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(ND)) 10427 if (TD->getUnderlyingType() == IntendedTy) 10428 IntendedTy = S.Context.getTypedefType(TD); 10429 } 10430 } 10431 } 10432 10433 // Special-case some of Darwin's platform-independence types by suggesting 10434 // casts to primitive types that are known to be large enough. 10435 bool ShouldNotPrintDirectly = false; StringRef CastTyName; 10436 if (S.Context.getTargetInfo().getTriple().isOSDarwin()) { 10437 QualType CastTy; 10438 std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E); 10439 if (!CastTy.isNull()) { 10440 // %zi/%zu and %td/%tu are OK to use for NSInteger/NSUInteger of type int 10441 // (long in ASTContext). Only complain to pedants. 10442 if ((CastTyName == "NSInteger" || CastTyName == "NSUInteger") && 10443 (AT.isSizeT() || AT.isPtrdiffT()) && 10444 AT.matchesType(S.Context, CastTy)) 10445 Match = ArgType::NoMatchPedantic; 10446 IntendedTy = CastTy; 10447 ShouldNotPrintDirectly = true; 10448 } 10449 } 10450 10451 // We may be able to offer a FixItHint if it is a supported type. 10452 PrintfSpecifier fixedFS = FS; 10453 bool Success = 10454 fixedFS.fixType(IntendedTy, S.getLangOpts(), S.Context, isObjCContext()); 10455 10456 if (Success) { 10457 // Get the fix string from the fixed format specifier 10458 SmallString<16> buf; 10459 llvm::raw_svector_ostream os(buf); 10460 fixedFS.toString(os); 10461 10462 CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen); 10463 10464 if (IntendedTy == ExprTy && !ShouldNotPrintDirectly) { 10465 unsigned Diag; 10466 switch (Match) { 10467 case ArgType::Match: 10468 case ArgType::MatchPromotion: 10469 case ArgType::NoMatchPromotionTypeConfusion: 10470 llvm_unreachable("expected non-matching"); 10471 case ArgType::NoMatchPedantic: 10472 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 10473 break; 10474 case ArgType::NoMatchTypeConfusion: 10475 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 10476 break; 10477 case ArgType::NoMatch: 10478 Diag = diag::warn_format_conversion_argument_type_mismatch; 10479 break; 10480 } 10481 10482 // In this case, the specifier is wrong and should be changed to match 10483 // the argument. 10484 EmitFormatDiagnostic(S.PDiag(Diag) 10485 << AT.getRepresentativeTypeName(S.Context) 10486 << IntendedTy << IsEnum << E->getSourceRange(), 10487 E->getBeginLoc(), 10488 /*IsStringLocation*/ false, SpecRange, 10489 FixItHint::CreateReplacement(SpecRange, os.str())); 10490 } else { 10491 // The canonical type for formatting this value is different from the 10492 // actual type of the expression. (This occurs, for example, with Darwin's 10493 // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but 10494 // should be printed as 'long' for 64-bit compatibility.) 10495 // Rather than emitting a normal format/argument mismatch, we want to 10496 // add a cast to the recommended type (and correct the format string 10497 // if necessary). 10498 SmallString<16> CastBuf; 10499 llvm::raw_svector_ostream CastFix(CastBuf); 10500 CastFix << "("; 10501 IntendedTy.print(CastFix, S.Context.getPrintingPolicy()); 10502 CastFix << ")"; 10503 10504 SmallVector<FixItHint,4> Hints; 10505 if (!AT.matchesType(S.Context, IntendedTy) || ShouldNotPrintDirectly) 10506 Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str())); 10507 10508 if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) { 10509 // If there's already a cast present, just replace it. 10510 SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc()); 10511 Hints.push_back(FixItHint::CreateReplacement(CastRange, CastFix.str())); 10512 10513 } else if (!requiresParensToAddCast(E)) { 10514 // If the expression has high enough precedence, 10515 // just write the C-style cast. 10516 Hints.push_back( 10517 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 10518 } else { 10519 // Otherwise, add parens around the expression as well as the cast. 10520 CastFix << "("; 10521 Hints.push_back( 10522 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 10523 10524 SourceLocation After = S.getLocForEndOfToken(E->getEndLoc()); 10525 Hints.push_back(FixItHint::CreateInsertion(After, ")")); 10526 } 10527 10528 if (ShouldNotPrintDirectly) { 10529 // The expression has a type that should not be printed directly. 10530 // We extract the name from the typedef because we don't want to show 10531 // the underlying type in the diagnostic. 10532 StringRef Name; 10533 if (const auto *TypedefTy = ExprTy->getAs<TypedefType>()) 10534 Name = TypedefTy->getDecl()->getName(); 10535 else 10536 Name = CastTyName; 10537 unsigned Diag = Match == ArgType::NoMatchPedantic 10538 ? diag::warn_format_argument_needs_cast_pedantic 10539 : diag::warn_format_argument_needs_cast; 10540 EmitFormatDiagnostic(S.PDiag(Diag) << Name << IntendedTy << IsEnum 10541 << E->getSourceRange(), 10542 E->getBeginLoc(), /*IsStringLocation=*/false, 10543 SpecRange, Hints); 10544 } else { 10545 // In this case, the expression could be printed using a different 10546 // specifier, but we've decided that the specifier is probably correct 10547 // and we should cast instead. Just use the normal warning message. 10548 EmitFormatDiagnostic( 10549 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 10550 << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum 10551 << E->getSourceRange(), 10552 E->getBeginLoc(), /*IsStringLocation*/ false, SpecRange, Hints); 10553 } 10554 } 10555 } else { 10556 const CharSourceRange &CSR = getSpecifierRange(StartSpecifier, 10557 SpecifierLen); 10558 // Since the warning for passing non-POD types to variadic functions 10559 // was deferred until now, we emit a warning for non-POD 10560 // arguments here. 10561 bool EmitTypeMismatch = false; 10562 switch (S.isValidVarArgType(ExprTy)) { 10563 case Sema::VAK_Valid: 10564 case Sema::VAK_ValidInCXX11: { 10565 unsigned Diag; 10566 switch (Match) { 10567 case ArgType::Match: 10568 case ArgType::MatchPromotion: 10569 case ArgType::NoMatchPromotionTypeConfusion: 10570 llvm_unreachable("expected non-matching"); 10571 case ArgType::NoMatchPedantic: 10572 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 10573 break; 10574 case ArgType::NoMatchTypeConfusion: 10575 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 10576 break; 10577 case ArgType::NoMatch: 10578 Diag = diag::warn_format_conversion_argument_type_mismatch; 10579 break; 10580 } 10581 10582 EmitFormatDiagnostic( 10583 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy 10584 << IsEnum << CSR << E->getSourceRange(), 10585 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 10586 break; 10587 } 10588 case Sema::VAK_Undefined: 10589 case Sema::VAK_MSVCUndefined: 10590 if (CallType == Sema::VariadicDoesNotApply) { 10591 EmitTypeMismatch = true; 10592 } else { 10593 EmitFormatDiagnostic( 10594 S.PDiag(diag::warn_non_pod_vararg_with_format_string) 10595 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType 10596 << AT.getRepresentativeTypeName(S.Context) << CSR 10597 << E->getSourceRange(), 10598 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 10599 checkForCStrMembers(AT, E); 10600 } 10601 break; 10602 10603 case Sema::VAK_Invalid: 10604 if (CallType == Sema::VariadicDoesNotApply) 10605 EmitTypeMismatch = true; 10606 else if (ExprTy->isObjCObjectType()) 10607 EmitFormatDiagnostic( 10608 S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format) 10609 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType 10610 << AT.getRepresentativeTypeName(S.Context) << CSR 10611 << E->getSourceRange(), 10612 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 10613 else 10614 // FIXME: If this is an initializer list, suggest removing the braces 10615 // or inserting a cast to the target type. 10616 S.Diag(E->getBeginLoc(), diag::err_cannot_pass_to_vararg_format) 10617 << isa<InitListExpr>(E) << ExprTy << CallType 10618 << AT.getRepresentativeTypeName(S.Context) << E->getSourceRange(); 10619 break; 10620 } 10621 10622 if (EmitTypeMismatch) { 10623 // The function is not variadic, so we do not generate warnings about 10624 // being allowed to pass that object as a variadic argument. Instead, 10625 // since there are inherently no printf specifiers for types which cannot 10626 // be passed as variadic arguments, emit a plain old specifier mismatch 10627 // argument. 10628 EmitFormatDiagnostic( 10629 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 10630 << AT.getRepresentativeTypeName(S.Context) << ExprTy << false 10631 << E->getSourceRange(), 10632 E->getBeginLoc(), false, CSR); 10633 } 10634 10635 assert(FirstDataArg + FS.getArgIndex() < CheckedVarArgs.size() && 10636 "format string specifier index out of range"); 10637 CheckedVarArgs[FirstDataArg + FS.getArgIndex()] = true; 10638 } 10639 10640 return true; 10641 } 10642 10643 //===--- CHECK: Scanf format string checking ------------------------------===// 10644 10645 namespace { 10646 10647 class CheckScanfHandler : public CheckFormatHandler { 10648 public: 10649 CheckScanfHandler(Sema &s, const FormatStringLiteral *fexpr, 10650 const Expr *origFormatExpr, Sema::FormatStringType type, 10651 unsigned firstDataArg, unsigned numDataArgs, 10652 const char *beg, Sema::FormatArgumentPassingKind APK, 10653 ArrayRef<const Expr *> Args, unsigned formatIdx, 10654 bool inFunctionCall, Sema::VariadicCallType CallType, 10655 llvm::SmallBitVector &CheckedVarArgs, 10656 UncoveredArgHandler &UncoveredArg) 10657 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 10658 numDataArgs, beg, APK, Args, formatIdx, 10659 inFunctionCall, CallType, CheckedVarArgs, 10660 UncoveredArg) {} 10661 10662 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 10663 const char *startSpecifier, 10664 unsigned specifierLen) override; 10665 10666 bool HandleInvalidScanfConversionSpecifier( 10667 const analyze_scanf::ScanfSpecifier &FS, 10668 const char *startSpecifier, 10669 unsigned specifierLen) override; 10670 10671 void HandleIncompleteScanList(const char *start, const char *end) override; 10672 }; 10673 10674 } // namespace 10675 10676 void CheckScanfHandler::HandleIncompleteScanList(const char *start, 10677 const char *end) { 10678 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete), 10679 getLocationOfByte(end), /*IsStringLocation*/true, 10680 getSpecifierRange(start, end - start)); 10681 } 10682 10683 bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier( 10684 const analyze_scanf::ScanfSpecifier &FS, 10685 const char *startSpecifier, 10686 unsigned specifierLen) { 10687 const analyze_scanf::ScanfConversionSpecifier &CS = 10688 FS.getConversionSpecifier(); 10689 10690 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 10691 getLocationOfByte(CS.getStart()), 10692 startSpecifier, specifierLen, 10693 CS.getStart(), CS.getLength()); 10694 } 10695 10696 bool CheckScanfHandler::HandleScanfSpecifier( 10697 const analyze_scanf::ScanfSpecifier &FS, 10698 const char *startSpecifier, 10699 unsigned specifierLen) { 10700 using namespace analyze_scanf; 10701 using namespace analyze_format_string; 10702 10703 const ScanfConversionSpecifier &CS = FS.getConversionSpecifier(); 10704 10705 // Handle case where '%' and '*' don't consume an argument. These shouldn't 10706 // be used to decide if we are using positional arguments consistently. 10707 if (FS.consumesDataArgument()) { 10708 if (atFirstArg) { 10709 atFirstArg = false; 10710 usesPositionalArgs = FS.usesPositionalArg(); 10711 } 10712 else if (usesPositionalArgs != FS.usesPositionalArg()) { 10713 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 10714 startSpecifier, specifierLen); 10715 return false; 10716 } 10717 } 10718 10719 // Check if the field with is non-zero. 10720 const OptionalAmount &Amt = FS.getFieldWidth(); 10721 if (Amt.getHowSpecified() == OptionalAmount::Constant) { 10722 if (Amt.getConstantAmount() == 0) { 10723 const CharSourceRange &R = getSpecifierRange(Amt.getStart(), 10724 Amt.getConstantLength()); 10725 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width), 10726 getLocationOfByte(Amt.getStart()), 10727 /*IsStringLocation*/true, R, 10728 FixItHint::CreateRemoval(R)); 10729 } 10730 } 10731 10732 if (!FS.consumesDataArgument()) { 10733 // FIXME: Technically specifying a precision or field width here 10734 // makes no sense. Worth issuing a warning at some point. 10735 return true; 10736 } 10737 10738 // Consume the argument. 10739 unsigned argIndex = FS.getArgIndex(); 10740 if (argIndex < NumDataArgs) { 10741 // The check to see if the argIndex is valid will come later. 10742 // We set the bit here because we may exit early from this 10743 // function if we encounter some other error. 10744 CoveredArgs.set(argIndex); 10745 } 10746 10747 // Check the length modifier is valid with the given conversion specifier. 10748 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 10749 S.getLangOpts())) 10750 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 10751 diag::warn_format_nonsensical_length); 10752 else if (!FS.hasStandardLengthModifier()) 10753 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 10754 else if (!FS.hasStandardLengthConversionCombination()) 10755 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 10756 diag::warn_format_non_standard_conversion_spec); 10757 10758 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 10759 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 10760 10761 // The remaining checks depend on the data arguments. 10762 if (ArgPassingKind == Sema::FAPK_VAList) 10763 return true; 10764 10765 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 10766 return false; 10767 10768 // Check that the argument type matches the format specifier. 10769 const Expr *Ex = getDataArg(argIndex); 10770 if (!Ex) 10771 return true; 10772 10773 const analyze_format_string::ArgType &AT = FS.getArgType(S.Context); 10774 10775 if (!AT.isValid()) { 10776 return true; 10777 } 10778 10779 analyze_format_string::ArgType::MatchKind Match = 10780 AT.matchesType(S.Context, Ex->getType()); 10781 bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic; 10782 if (Match == analyze_format_string::ArgType::Match) 10783 return true; 10784 10785 ScanfSpecifier fixedFS = FS; 10786 bool Success = fixedFS.fixType(Ex->getType(), Ex->IgnoreImpCasts()->getType(), 10787 S.getLangOpts(), S.Context); 10788 10789 unsigned Diag = 10790 Pedantic ? diag::warn_format_conversion_argument_type_mismatch_pedantic 10791 : diag::warn_format_conversion_argument_type_mismatch; 10792 10793 if (Success) { 10794 // Get the fix string from the fixed format specifier. 10795 SmallString<128> buf; 10796 llvm::raw_svector_ostream os(buf); 10797 fixedFS.toString(os); 10798 10799 EmitFormatDiagnostic( 10800 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) 10801 << Ex->getType() << false << Ex->getSourceRange(), 10802 Ex->getBeginLoc(), 10803 /*IsStringLocation*/ false, 10804 getSpecifierRange(startSpecifier, specifierLen), 10805 FixItHint::CreateReplacement( 10806 getSpecifierRange(startSpecifier, specifierLen), os.str())); 10807 } else { 10808 EmitFormatDiagnostic(S.PDiag(Diag) 10809 << AT.getRepresentativeTypeName(S.Context) 10810 << Ex->getType() << false << Ex->getSourceRange(), 10811 Ex->getBeginLoc(), 10812 /*IsStringLocation*/ false, 10813 getSpecifierRange(startSpecifier, specifierLen)); 10814 } 10815 10816 return true; 10817 } 10818 10819 static void CheckFormatString( 10820 Sema &S, const FormatStringLiteral *FExpr, const Expr *OrigFormatExpr, 10821 ArrayRef<const Expr *> Args, Sema::FormatArgumentPassingKind APK, 10822 unsigned format_idx, unsigned firstDataArg, Sema::FormatStringType Type, 10823 bool inFunctionCall, Sema::VariadicCallType CallType, 10824 llvm::SmallBitVector &CheckedVarArgs, UncoveredArgHandler &UncoveredArg, 10825 bool IgnoreStringsWithoutSpecifiers) { 10826 // CHECK: is the format string a wide literal? 10827 if (!FExpr->isAscii() && !FExpr->isUTF8()) { 10828 CheckFormatHandler::EmitFormatDiagnostic( 10829 S, inFunctionCall, Args[format_idx], 10830 S.PDiag(diag::warn_format_string_is_wide_literal), FExpr->getBeginLoc(), 10831 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 10832 return; 10833 } 10834 10835 // Str - The format string. NOTE: this is NOT null-terminated! 10836 StringRef StrRef = FExpr->getString(); 10837 const char *Str = StrRef.data(); 10838 // Account for cases where the string literal is truncated in a declaration. 10839 const ConstantArrayType *T = 10840 S.Context.getAsConstantArrayType(FExpr->getType()); 10841 assert(T && "String literal not of constant array type!"); 10842 size_t TypeSize = T->getSize().getZExtValue(); 10843 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 10844 const unsigned numDataArgs = Args.size() - firstDataArg; 10845 10846 if (IgnoreStringsWithoutSpecifiers && 10847 !analyze_format_string::parseFormatStringHasFormattingSpecifiers( 10848 Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo())) 10849 return; 10850 10851 // Emit a warning if the string literal is truncated and does not contain an 10852 // embedded null character. 10853 if (TypeSize <= StrRef.size() && !StrRef.substr(0, TypeSize).contains('\0')) { 10854 CheckFormatHandler::EmitFormatDiagnostic( 10855 S, inFunctionCall, Args[format_idx], 10856 S.PDiag(diag::warn_printf_format_string_not_null_terminated), 10857 FExpr->getBeginLoc(), 10858 /*IsStringLocation=*/true, OrigFormatExpr->getSourceRange()); 10859 return; 10860 } 10861 10862 // CHECK: empty format string? 10863 if (StrLen == 0 && numDataArgs > 0) { 10864 CheckFormatHandler::EmitFormatDiagnostic( 10865 S, inFunctionCall, Args[format_idx], 10866 S.PDiag(diag::warn_empty_format_string), FExpr->getBeginLoc(), 10867 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 10868 return; 10869 } 10870 10871 if (Type == Sema::FST_Printf || Type == Sema::FST_NSString || 10872 Type == Sema::FST_FreeBSDKPrintf || Type == Sema::FST_OSLog || 10873 Type == Sema::FST_OSTrace) { 10874 CheckPrintfHandler H( 10875 S, FExpr, OrigFormatExpr, Type, firstDataArg, numDataArgs, 10876 (Type == Sema::FST_NSString || Type == Sema::FST_OSTrace), Str, APK, 10877 Args, format_idx, inFunctionCall, CallType, CheckedVarArgs, 10878 UncoveredArg); 10879 10880 if (!analyze_format_string::ParsePrintfString( 10881 H, Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo(), 10882 Type == Sema::FST_FreeBSDKPrintf)) 10883 H.DoneProcessing(); 10884 } else if (Type == Sema::FST_Scanf) { 10885 CheckScanfHandler H(S, FExpr, OrigFormatExpr, Type, firstDataArg, 10886 numDataArgs, Str, APK, Args, format_idx, inFunctionCall, 10887 CallType, CheckedVarArgs, UncoveredArg); 10888 10889 if (!analyze_format_string::ParseScanfString( 10890 H, Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo())) 10891 H.DoneProcessing(); 10892 } // TODO: handle other formats 10893 } 10894 10895 bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) { 10896 // Str - The format string. NOTE: this is NOT null-terminated! 10897 StringRef StrRef = FExpr->getString(); 10898 const char *Str = StrRef.data(); 10899 // Account for cases where the string literal is truncated in a declaration. 10900 const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType()); 10901 assert(T && "String literal not of constant array type!"); 10902 size_t TypeSize = T->getSize().getZExtValue(); 10903 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 10904 return analyze_format_string::ParseFormatStringHasSArg(Str, Str + StrLen, 10905 getLangOpts(), 10906 Context.getTargetInfo()); 10907 } 10908 10909 //===--- CHECK: Warn on use of wrong absolute value function. -------------===// 10910 10911 // Returns the related absolute value function that is larger, of 0 if one 10912 // does not exist. 10913 static unsigned getLargerAbsoluteValueFunction(unsigned AbsFunction) { 10914 switch (AbsFunction) { 10915 default: 10916 return 0; 10917 10918 case Builtin::BI__builtin_abs: 10919 return Builtin::BI__builtin_labs; 10920 case Builtin::BI__builtin_labs: 10921 return Builtin::BI__builtin_llabs; 10922 case Builtin::BI__builtin_llabs: 10923 return 0; 10924 10925 case Builtin::BI__builtin_fabsf: 10926 return Builtin::BI__builtin_fabs; 10927 case Builtin::BI__builtin_fabs: 10928 return Builtin::BI__builtin_fabsl; 10929 case Builtin::BI__builtin_fabsl: 10930 return 0; 10931 10932 case Builtin::BI__builtin_cabsf: 10933 return Builtin::BI__builtin_cabs; 10934 case Builtin::BI__builtin_cabs: 10935 return Builtin::BI__builtin_cabsl; 10936 case Builtin::BI__builtin_cabsl: 10937 return 0; 10938 10939 case Builtin::BIabs: 10940 return Builtin::BIlabs; 10941 case Builtin::BIlabs: 10942 return Builtin::BIllabs; 10943 case Builtin::BIllabs: 10944 return 0; 10945 10946 case Builtin::BIfabsf: 10947 return Builtin::BIfabs; 10948 case Builtin::BIfabs: 10949 return Builtin::BIfabsl; 10950 case Builtin::BIfabsl: 10951 return 0; 10952 10953 case Builtin::BIcabsf: 10954 return Builtin::BIcabs; 10955 case Builtin::BIcabs: 10956 return Builtin::BIcabsl; 10957 case Builtin::BIcabsl: 10958 return 0; 10959 } 10960 } 10961 10962 // Returns the argument type of the absolute value function. 10963 static QualType getAbsoluteValueArgumentType(ASTContext &Context, 10964 unsigned AbsType) { 10965 if (AbsType == 0) 10966 return QualType(); 10967 10968 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 10969 QualType BuiltinType = Context.GetBuiltinType(AbsType, Error); 10970 if (Error != ASTContext::GE_None) 10971 return QualType(); 10972 10973 const FunctionProtoType *FT = BuiltinType->getAs<FunctionProtoType>(); 10974 if (!FT) 10975 return QualType(); 10976 10977 if (FT->getNumParams() != 1) 10978 return QualType(); 10979 10980 return FT->getParamType(0); 10981 } 10982 10983 // Returns the best absolute value function, or zero, based on type and 10984 // current absolute value function. 10985 static unsigned getBestAbsFunction(ASTContext &Context, QualType ArgType, 10986 unsigned AbsFunctionKind) { 10987 unsigned BestKind = 0; 10988 uint64_t ArgSize = Context.getTypeSize(ArgType); 10989 for (unsigned Kind = AbsFunctionKind; Kind != 0; 10990 Kind = getLargerAbsoluteValueFunction(Kind)) { 10991 QualType ParamType = getAbsoluteValueArgumentType(Context, Kind); 10992 if (Context.getTypeSize(ParamType) >= ArgSize) { 10993 if (BestKind == 0) 10994 BestKind = Kind; 10995 else if (Context.hasSameType(ParamType, ArgType)) { 10996 BestKind = Kind; 10997 break; 10998 } 10999 } 11000 } 11001 return BestKind; 11002 } 11003 11004 enum AbsoluteValueKind { 11005 AVK_Integer, 11006 AVK_Floating, 11007 AVK_Complex 11008 }; 11009 11010 static AbsoluteValueKind getAbsoluteValueKind(QualType T) { 11011 if (T->isIntegralOrEnumerationType()) 11012 return AVK_Integer; 11013 if (T->isRealFloatingType()) 11014 return AVK_Floating; 11015 if (T->isAnyComplexType()) 11016 return AVK_Complex; 11017 11018 llvm_unreachable("Type not integer, floating, or complex"); 11019 } 11020 11021 // Changes the absolute value function to a different type. Preserves whether 11022 // the function is a builtin. 11023 static unsigned changeAbsFunction(unsigned AbsKind, 11024 AbsoluteValueKind ValueKind) { 11025 switch (ValueKind) { 11026 case AVK_Integer: 11027 switch (AbsKind) { 11028 default: 11029 return 0; 11030 case Builtin::BI__builtin_fabsf: 11031 case Builtin::BI__builtin_fabs: 11032 case Builtin::BI__builtin_fabsl: 11033 case Builtin::BI__builtin_cabsf: 11034 case Builtin::BI__builtin_cabs: 11035 case Builtin::BI__builtin_cabsl: 11036 return Builtin::BI__builtin_abs; 11037 case Builtin::BIfabsf: 11038 case Builtin::BIfabs: 11039 case Builtin::BIfabsl: 11040 case Builtin::BIcabsf: 11041 case Builtin::BIcabs: 11042 case Builtin::BIcabsl: 11043 return Builtin::BIabs; 11044 } 11045 case AVK_Floating: 11046 switch (AbsKind) { 11047 default: 11048 return 0; 11049 case Builtin::BI__builtin_abs: 11050 case Builtin::BI__builtin_labs: 11051 case Builtin::BI__builtin_llabs: 11052 case Builtin::BI__builtin_cabsf: 11053 case Builtin::BI__builtin_cabs: 11054 case Builtin::BI__builtin_cabsl: 11055 return Builtin::BI__builtin_fabsf; 11056 case Builtin::BIabs: 11057 case Builtin::BIlabs: 11058 case Builtin::BIllabs: 11059 case Builtin::BIcabsf: 11060 case Builtin::BIcabs: 11061 case Builtin::BIcabsl: 11062 return Builtin::BIfabsf; 11063 } 11064 case AVK_Complex: 11065 switch (AbsKind) { 11066 default: 11067 return 0; 11068 case Builtin::BI__builtin_abs: 11069 case Builtin::BI__builtin_labs: 11070 case Builtin::BI__builtin_llabs: 11071 case Builtin::BI__builtin_fabsf: 11072 case Builtin::BI__builtin_fabs: 11073 case Builtin::BI__builtin_fabsl: 11074 return Builtin::BI__builtin_cabsf; 11075 case Builtin::BIabs: 11076 case Builtin::BIlabs: 11077 case Builtin::BIllabs: 11078 case Builtin::BIfabsf: 11079 case Builtin::BIfabs: 11080 case Builtin::BIfabsl: 11081 return Builtin::BIcabsf; 11082 } 11083 } 11084 llvm_unreachable("Unable to convert function"); 11085 } 11086 11087 static unsigned getAbsoluteValueFunctionKind(const FunctionDecl *FDecl) { 11088 const IdentifierInfo *FnInfo = FDecl->getIdentifier(); 11089 if (!FnInfo) 11090 return 0; 11091 11092 switch (FDecl->getBuiltinID()) { 11093 default: 11094 return 0; 11095 case Builtin::BI__builtin_abs: 11096 case Builtin::BI__builtin_fabs: 11097 case Builtin::BI__builtin_fabsf: 11098 case Builtin::BI__builtin_fabsl: 11099 case Builtin::BI__builtin_labs: 11100 case Builtin::BI__builtin_llabs: 11101 case Builtin::BI__builtin_cabs: 11102 case Builtin::BI__builtin_cabsf: 11103 case Builtin::BI__builtin_cabsl: 11104 case Builtin::BIabs: 11105 case Builtin::BIlabs: 11106 case Builtin::BIllabs: 11107 case Builtin::BIfabs: 11108 case Builtin::BIfabsf: 11109 case Builtin::BIfabsl: 11110 case Builtin::BIcabs: 11111 case Builtin::BIcabsf: 11112 case Builtin::BIcabsl: 11113 return FDecl->getBuiltinID(); 11114 } 11115 llvm_unreachable("Unknown Builtin type"); 11116 } 11117 11118 // If the replacement is valid, emit a note with replacement function. 11119 // Additionally, suggest including the proper header if not already included. 11120 static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range, 11121 unsigned AbsKind, QualType ArgType) { 11122 bool EmitHeaderHint = true; 11123 const char *HeaderName = nullptr; 11124 StringRef FunctionName; 11125 if (S.getLangOpts().CPlusPlus && !ArgType->isAnyComplexType()) { 11126 FunctionName = "std::abs"; 11127 if (ArgType->isIntegralOrEnumerationType()) { 11128 HeaderName = "cstdlib"; 11129 } else if (ArgType->isRealFloatingType()) { 11130 HeaderName = "cmath"; 11131 } else { 11132 llvm_unreachable("Invalid Type"); 11133 } 11134 11135 // Lookup all std::abs 11136 if (NamespaceDecl *Std = S.getStdNamespace()) { 11137 LookupResult R(S, &S.Context.Idents.get("abs"), Loc, Sema::LookupAnyName); 11138 R.suppressDiagnostics(); 11139 S.LookupQualifiedName(R, Std); 11140 11141 for (const auto *I : R) { 11142 const FunctionDecl *FDecl = nullptr; 11143 if (const UsingShadowDecl *UsingD = dyn_cast<UsingShadowDecl>(I)) { 11144 FDecl = dyn_cast<FunctionDecl>(UsingD->getTargetDecl()); 11145 } else { 11146 FDecl = dyn_cast<FunctionDecl>(I); 11147 } 11148 if (!FDecl) 11149 continue; 11150 11151 // Found std::abs(), check that they are the right ones. 11152 if (FDecl->getNumParams() != 1) 11153 continue; 11154 11155 // Check that the parameter type can handle the argument. 11156 QualType ParamType = FDecl->getParamDecl(0)->getType(); 11157 if (getAbsoluteValueKind(ArgType) == getAbsoluteValueKind(ParamType) && 11158 S.Context.getTypeSize(ArgType) <= 11159 S.Context.getTypeSize(ParamType)) { 11160 // Found a function, don't need the header hint. 11161 EmitHeaderHint = false; 11162 break; 11163 } 11164 } 11165 } 11166 } else { 11167 FunctionName = S.Context.BuiltinInfo.getName(AbsKind); 11168 HeaderName = S.Context.BuiltinInfo.getHeaderName(AbsKind); 11169 11170 if (HeaderName) { 11171 DeclarationName DN(&S.Context.Idents.get(FunctionName)); 11172 LookupResult R(S, DN, Loc, Sema::LookupAnyName); 11173 R.suppressDiagnostics(); 11174 S.LookupName(R, S.getCurScope()); 11175 11176 if (R.isSingleResult()) { 11177 FunctionDecl *FD = dyn_cast<FunctionDecl>(R.getFoundDecl()); 11178 if (FD && FD->getBuiltinID() == AbsKind) { 11179 EmitHeaderHint = false; 11180 } else { 11181 return; 11182 } 11183 } else if (!R.empty()) { 11184 return; 11185 } 11186 } 11187 } 11188 11189 S.Diag(Loc, diag::note_replace_abs_function) 11190 << FunctionName << FixItHint::CreateReplacement(Range, FunctionName); 11191 11192 if (!HeaderName) 11193 return; 11194 11195 if (!EmitHeaderHint) 11196 return; 11197 11198 S.Diag(Loc, diag::note_include_header_or_declare) << HeaderName 11199 << FunctionName; 11200 } 11201 11202 template <std::size_t StrLen> 11203 static bool IsStdFunction(const FunctionDecl *FDecl, 11204 const char (&Str)[StrLen]) { 11205 if (!FDecl) 11206 return false; 11207 if (!FDecl->getIdentifier() || !FDecl->getIdentifier()->isStr(Str)) 11208 return false; 11209 if (!FDecl->isInStdNamespace()) 11210 return false; 11211 11212 return true; 11213 } 11214 11215 // Warn when using the wrong abs() function. 11216 void Sema::CheckAbsoluteValueFunction(const CallExpr *Call, 11217 const FunctionDecl *FDecl) { 11218 if (Call->getNumArgs() != 1) 11219 return; 11220 11221 unsigned AbsKind = getAbsoluteValueFunctionKind(FDecl); 11222 bool IsStdAbs = IsStdFunction(FDecl, "abs"); 11223 if (AbsKind == 0 && !IsStdAbs) 11224 return; 11225 11226 QualType ArgType = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 11227 QualType ParamType = Call->getArg(0)->getType(); 11228 11229 // Unsigned types cannot be negative. Suggest removing the absolute value 11230 // function call. 11231 if (ArgType->isUnsignedIntegerType()) { 11232 StringRef FunctionName = 11233 IsStdAbs ? "std::abs" : Context.BuiltinInfo.getName(AbsKind); 11234 Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType; 11235 Diag(Call->getExprLoc(), diag::note_remove_abs) 11236 << FunctionName 11237 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()); 11238 return; 11239 } 11240 11241 // Taking the absolute value of a pointer is very suspicious, they probably 11242 // wanted to index into an array, dereference a pointer, call a function, etc. 11243 if (ArgType->isPointerType() || ArgType->canDecayToPointerType()) { 11244 unsigned DiagType = 0; 11245 if (ArgType->isFunctionType()) 11246 DiagType = 1; 11247 else if (ArgType->isArrayType()) 11248 DiagType = 2; 11249 11250 Diag(Call->getExprLoc(), diag::warn_pointer_abs) << DiagType << ArgType; 11251 return; 11252 } 11253 11254 // std::abs has overloads which prevent most of the absolute value problems 11255 // from occurring. 11256 if (IsStdAbs) 11257 return; 11258 11259 AbsoluteValueKind ArgValueKind = getAbsoluteValueKind(ArgType); 11260 AbsoluteValueKind ParamValueKind = getAbsoluteValueKind(ParamType); 11261 11262 // The argument and parameter are the same kind. Check if they are the right 11263 // size. 11264 if (ArgValueKind == ParamValueKind) { 11265 if (Context.getTypeSize(ArgType) <= Context.getTypeSize(ParamType)) 11266 return; 11267 11268 unsigned NewAbsKind = getBestAbsFunction(Context, ArgType, AbsKind); 11269 Diag(Call->getExprLoc(), diag::warn_abs_too_small) 11270 << FDecl << ArgType << ParamType; 11271 11272 if (NewAbsKind == 0) 11273 return; 11274 11275 emitReplacement(*this, Call->getExprLoc(), 11276 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 11277 return; 11278 } 11279 11280 // ArgValueKind != ParamValueKind 11281 // The wrong type of absolute value function was used. Attempt to find the 11282 // proper one. 11283 unsigned NewAbsKind = changeAbsFunction(AbsKind, ArgValueKind); 11284 NewAbsKind = getBestAbsFunction(Context, ArgType, NewAbsKind); 11285 if (NewAbsKind == 0) 11286 return; 11287 11288 Diag(Call->getExprLoc(), diag::warn_wrong_absolute_value_type) 11289 << FDecl << ParamValueKind << ArgValueKind; 11290 11291 emitReplacement(*this, Call->getExprLoc(), 11292 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 11293 } 11294 11295 //===--- CHECK: Warn on use of std::max and unsigned zero. r---------------===// 11296 void Sema::CheckMaxUnsignedZero(const CallExpr *Call, 11297 const FunctionDecl *FDecl) { 11298 if (!Call || !FDecl) return; 11299 11300 // Ignore template specializations and macros. 11301 if (inTemplateInstantiation()) return; 11302 if (Call->getExprLoc().isMacroID()) return; 11303 11304 // Only care about the one template argument, two function parameter std::max 11305 if (Call->getNumArgs() != 2) return; 11306 if (!IsStdFunction(FDecl, "max")) return; 11307 const auto * ArgList = FDecl->getTemplateSpecializationArgs(); 11308 if (!ArgList) return; 11309 if (ArgList->size() != 1) return; 11310 11311 // Check that template type argument is unsigned integer. 11312 const auto& TA = ArgList->get(0); 11313 if (TA.getKind() != TemplateArgument::Type) return; 11314 QualType ArgType = TA.getAsType(); 11315 if (!ArgType->isUnsignedIntegerType()) return; 11316 11317 // See if either argument is a literal zero. 11318 auto IsLiteralZeroArg = [](const Expr* E) -> bool { 11319 const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E); 11320 if (!MTE) return false; 11321 const auto *Num = dyn_cast<IntegerLiteral>(MTE->getSubExpr()); 11322 if (!Num) return false; 11323 if (Num->getValue() != 0) return false; 11324 return true; 11325 }; 11326 11327 const Expr *FirstArg = Call->getArg(0); 11328 const Expr *SecondArg = Call->getArg(1); 11329 const bool IsFirstArgZero = IsLiteralZeroArg(FirstArg); 11330 const bool IsSecondArgZero = IsLiteralZeroArg(SecondArg); 11331 11332 // Only warn when exactly one argument is zero. 11333 if (IsFirstArgZero == IsSecondArgZero) return; 11334 11335 SourceRange FirstRange = FirstArg->getSourceRange(); 11336 SourceRange SecondRange = SecondArg->getSourceRange(); 11337 11338 SourceRange ZeroRange = IsFirstArgZero ? FirstRange : SecondRange; 11339 11340 Diag(Call->getExprLoc(), diag::warn_max_unsigned_zero) 11341 << IsFirstArgZero << Call->getCallee()->getSourceRange() << ZeroRange; 11342 11343 // Deduce what parts to remove so that "std::max(0u, foo)" becomes "(foo)". 11344 SourceRange RemovalRange; 11345 if (IsFirstArgZero) { 11346 RemovalRange = SourceRange(FirstRange.getBegin(), 11347 SecondRange.getBegin().getLocWithOffset(-1)); 11348 } else { 11349 RemovalRange = SourceRange(getLocForEndOfToken(FirstRange.getEnd()), 11350 SecondRange.getEnd()); 11351 } 11352 11353 Diag(Call->getExprLoc(), diag::note_remove_max_call) 11354 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()) 11355 << FixItHint::CreateRemoval(RemovalRange); 11356 } 11357 11358 //===--- CHECK: Standard memory functions ---------------------------------===// 11359 11360 /// Takes the expression passed to the size_t parameter of functions 11361 /// such as memcmp, strncat, etc and warns if it's a comparison. 11362 /// 11363 /// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`. 11364 static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E, 11365 IdentifierInfo *FnName, 11366 SourceLocation FnLoc, 11367 SourceLocation RParenLoc) { 11368 const BinaryOperator *Size = dyn_cast<BinaryOperator>(E); 11369 if (!Size) 11370 return false; 11371 11372 // if E is binop and op is <=>, >, <, >=, <=, ==, &&, ||: 11373 if (!Size->isComparisonOp() && !Size->isLogicalOp()) 11374 return false; 11375 11376 SourceRange SizeRange = Size->getSourceRange(); 11377 S.Diag(Size->getOperatorLoc(), diag::warn_memsize_comparison) 11378 << SizeRange << FnName; 11379 S.Diag(FnLoc, diag::note_memsize_comparison_paren) 11380 << FnName 11381 << FixItHint::CreateInsertion( 11382 S.getLocForEndOfToken(Size->getLHS()->getEndLoc()), ")") 11383 << FixItHint::CreateRemoval(RParenLoc); 11384 S.Diag(SizeRange.getBegin(), diag::note_memsize_comparison_cast_silence) 11385 << FixItHint::CreateInsertion(SizeRange.getBegin(), "(size_t)(") 11386 << FixItHint::CreateInsertion(S.getLocForEndOfToken(SizeRange.getEnd()), 11387 ")"); 11388 11389 return true; 11390 } 11391 11392 /// Determine whether the given type is or contains a dynamic class type 11393 /// (e.g., whether it has a vtable). 11394 static const CXXRecordDecl *getContainedDynamicClass(QualType T, 11395 bool &IsContained) { 11396 // Look through array types while ignoring qualifiers. 11397 const Type *Ty = T->getBaseElementTypeUnsafe(); 11398 IsContained = false; 11399 11400 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 11401 RD = RD ? RD->getDefinition() : nullptr; 11402 if (!RD || RD->isInvalidDecl()) 11403 return nullptr; 11404 11405 if (RD->isDynamicClass()) 11406 return RD; 11407 11408 // Check all the fields. If any bases were dynamic, the class is dynamic. 11409 // It's impossible for a class to transitively contain itself by value, so 11410 // infinite recursion is impossible. 11411 for (auto *FD : RD->fields()) { 11412 bool SubContained; 11413 if (const CXXRecordDecl *ContainedRD = 11414 getContainedDynamicClass(FD->getType(), SubContained)) { 11415 IsContained = true; 11416 return ContainedRD; 11417 } 11418 } 11419 11420 return nullptr; 11421 } 11422 11423 static const UnaryExprOrTypeTraitExpr *getAsSizeOfExpr(const Expr *E) { 11424 if (const auto *Unary = dyn_cast<UnaryExprOrTypeTraitExpr>(E)) 11425 if (Unary->getKind() == UETT_SizeOf) 11426 return Unary; 11427 return nullptr; 11428 } 11429 11430 /// If E is a sizeof expression, returns its argument expression, 11431 /// otherwise returns NULL. 11432 static const Expr *getSizeOfExprArg(const Expr *E) { 11433 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 11434 if (!SizeOf->isArgumentType()) 11435 return SizeOf->getArgumentExpr()->IgnoreParenImpCasts(); 11436 return nullptr; 11437 } 11438 11439 /// If E is a sizeof expression, returns its argument type. 11440 static QualType getSizeOfArgType(const Expr *E) { 11441 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 11442 return SizeOf->getTypeOfArgument(); 11443 return QualType(); 11444 } 11445 11446 namespace { 11447 11448 struct SearchNonTrivialToInitializeField 11449 : DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField> { 11450 using Super = 11451 DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField>; 11452 11453 SearchNonTrivialToInitializeField(const Expr *E, Sema &S) : E(E), S(S) {} 11454 11455 void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType FT, 11456 SourceLocation SL) { 11457 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 11458 asDerived().visitArray(PDIK, AT, SL); 11459 return; 11460 } 11461 11462 Super::visitWithKind(PDIK, FT, SL); 11463 } 11464 11465 void visitARCStrong(QualType FT, SourceLocation SL) { 11466 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 11467 } 11468 void visitARCWeak(QualType FT, SourceLocation SL) { 11469 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 11470 } 11471 void visitStruct(QualType FT, SourceLocation SL) { 11472 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 11473 visit(FD->getType(), FD->getLocation()); 11474 } 11475 void visitArray(QualType::PrimitiveDefaultInitializeKind PDIK, 11476 const ArrayType *AT, SourceLocation SL) { 11477 visit(getContext().getBaseElementType(AT), SL); 11478 } 11479 void visitTrivial(QualType FT, SourceLocation SL) {} 11480 11481 static void diag(QualType RT, const Expr *E, Sema &S) { 11482 SearchNonTrivialToInitializeField(E, S).visitStruct(RT, SourceLocation()); 11483 } 11484 11485 ASTContext &getContext() { return S.getASTContext(); } 11486 11487 const Expr *E; 11488 Sema &S; 11489 }; 11490 11491 struct SearchNonTrivialToCopyField 11492 : CopiedTypeVisitor<SearchNonTrivialToCopyField, false> { 11493 using Super = CopiedTypeVisitor<SearchNonTrivialToCopyField, false>; 11494 11495 SearchNonTrivialToCopyField(const Expr *E, Sema &S) : E(E), S(S) {} 11496 11497 void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType FT, 11498 SourceLocation SL) { 11499 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 11500 asDerived().visitArray(PCK, AT, SL); 11501 return; 11502 } 11503 11504 Super::visitWithKind(PCK, FT, SL); 11505 } 11506 11507 void visitARCStrong(QualType FT, SourceLocation SL) { 11508 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 11509 } 11510 void visitARCWeak(QualType FT, SourceLocation SL) { 11511 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 11512 } 11513 void visitStruct(QualType FT, SourceLocation SL) { 11514 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 11515 visit(FD->getType(), FD->getLocation()); 11516 } 11517 void visitArray(QualType::PrimitiveCopyKind PCK, const ArrayType *AT, 11518 SourceLocation SL) { 11519 visit(getContext().getBaseElementType(AT), SL); 11520 } 11521 void preVisit(QualType::PrimitiveCopyKind PCK, QualType FT, 11522 SourceLocation SL) {} 11523 void visitTrivial(QualType FT, SourceLocation SL) {} 11524 void visitVolatileTrivial(QualType FT, SourceLocation SL) {} 11525 11526 static void diag(QualType RT, const Expr *E, Sema &S) { 11527 SearchNonTrivialToCopyField(E, S).visitStruct(RT, SourceLocation()); 11528 } 11529 11530 ASTContext &getContext() { return S.getASTContext(); } 11531 11532 const Expr *E; 11533 Sema &S; 11534 }; 11535 11536 } 11537 11538 /// Detect if \c SizeofExpr is likely to calculate the sizeof an object. 11539 static bool doesExprLikelyComputeSize(const Expr *SizeofExpr) { 11540 SizeofExpr = SizeofExpr->IgnoreParenImpCasts(); 11541 11542 if (const auto *BO = dyn_cast<BinaryOperator>(SizeofExpr)) { 11543 if (BO->getOpcode() != BO_Mul && BO->getOpcode() != BO_Add) 11544 return false; 11545 11546 return doesExprLikelyComputeSize(BO->getLHS()) || 11547 doesExprLikelyComputeSize(BO->getRHS()); 11548 } 11549 11550 return getAsSizeOfExpr(SizeofExpr) != nullptr; 11551 } 11552 11553 /// Check if the ArgLoc originated from a macro passed to the call at CallLoc. 11554 /// 11555 /// \code 11556 /// #define MACRO 0 11557 /// foo(MACRO); 11558 /// foo(0); 11559 /// \endcode 11560 /// 11561 /// This should return true for the first call to foo, but not for the second 11562 /// (regardless of whether foo is a macro or function). 11563 static bool isArgumentExpandedFromMacro(SourceManager &SM, 11564 SourceLocation CallLoc, 11565 SourceLocation ArgLoc) { 11566 if (!CallLoc.isMacroID()) 11567 return SM.getFileID(CallLoc) != SM.getFileID(ArgLoc); 11568 11569 return SM.getFileID(SM.getImmediateMacroCallerLoc(CallLoc)) != 11570 SM.getFileID(SM.getImmediateMacroCallerLoc(ArgLoc)); 11571 } 11572 11573 /// Diagnose cases like 'memset(buf, sizeof(buf), 0)', which should have the 11574 /// last two arguments transposed. 11575 static void CheckMemaccessSize(Sema &S, unsigned BId, const CallExpr *Call) { 11576 if (BId != Builtin::BImemset && BId != Builtin::BIbzero) 11577 return; 11578 11579 const Expr *SizeArg = 11580 Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts(); 11581 11582 auto isLiteralZero = [](const Expr *E) { 11583 return (isa<IntegerLiteral>(E) && 11584 cast<IntegerLiteral>(E)->getValue() == 0) || 11585 (isa<CharacterLiteral>(E) && 11586 cast<CharacterLiteral>(E)->getValue() == 0); 11587 }; 11588 11589 // If we're memsetting or bzeroing 0 bytes, then this is likely an error. 11590 SourceLocation CallLoc = Call->getRParenLoc(); 11591 SourceManager &SM = S.getSourceManager(); 11592 if (isLiteralZero(SizeArg) && 11593 !isArgumentExpandedFromMacro(SM, CallLoc, SizeArg->getExprLoc())) { 11594 11595 SourceLocation DiagLoc = SizeArg->getExprLoc(); 11596 11597 // Some platforms #define bzero to __builtin_memset. See if this is the 11598 // case, and if so, emit a better diagnostic. 11599 if (BId == Builtin::BIbzero || 11600 (CallLoc.isMacroID() && Lexer::getImmediateMacroName( 11601 CallLoc, SM, S.getLangOpts()) == "bzero")) { 11602 S.Diag(DiagLoc, diag::warn_suspicious_bzero_size); 11603 S.Diag(DiagLoc, diag::note_suspicious_bzero_size_silence); 11604 } else if (!isLiteralZero(Call->getArg(1)->IgnoreImpCasts())) { 11605 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 0; 11606 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 0; 11607 } 11608 return; 11609 } 11610 11611 // If the second argument to a memset is a sizeof expression and the third 11612 // isn't, this is also likely an error. This should catch 11613 // 'memset(buf, sizeof(buf), 0xff)'. 11614 if (BId == Builtin::BImemset && 11615 doesExprLikelyComputeSize(Call->getArg(1)) && 11616 !doesExprLikelyComputeSize(Call->getArg(2))) { 11617 SourceLocation DiagLoc = Call->getArg(1)->getExprLoc(); 11618 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 1; 11619 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 1; 11620 return; 11621 } 11622 } 11623 11624 /// Check for dangerous or invalid arguments to memset(). 11625 /// 11626 /// This issues warnings on known problematic, dangerous or unspecified 11627 /// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp' 11628 /// function calls. 11629 /// 11630 /// \param Call The call expression to diagnose. 11631 void Sema::CheckMemaccessArguments(const CallExpr *Call, 11632 unsigned BId, 11633 IdentifierInfo *FnName) { 11634 assert(BId != 0); 11635 11636 // It is possible to have a non-standard definition of memset. Validate 11637 // we have enough arguments, and if not, abort further checking. 11638 unsigned ExpectedNumArgs = 11639 (BId == Builtin::BIstrndup || BId == Builtin::BIbzero ? 2 : 3); 11640 if (Call->getNumArgs() < ExpectedNumArgs) 11641 return; 11642 11643 unsigned LastArg = (BId == Builtin::BImemset || BId == Builtin::BIbzero || 11644 BId == Builtin::BIstrndup ? 1 : 2); 11645 unsigned LenArg = 11646 (BId == Builtin::BIbzero || BId == Builtin::BIstrndup ? 1 : 2); 11647 const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts(); 11648 11649 if (CheckMemorySizeofForComparison(*this, LenExpr, FnName, 11650 Call->getBeginLoc(), Call->getRParenLoc())) 11651 return; 11652 11653 // Catch cases like 'memset(buf, sizeof(buf), 0)'. 11654 CheckMemaccessSize(*this, BId, Call); 11655 11656 // We have special checking when the length is a sizeof expression. 11657 QualType SizeOfArgTy = getSizeOfArgType(LenExpr); 11658 const Expr *SizeOfArg = getSizeOfExprArg(LenExpr); 11659 llvm::FoldingSetNodeID SizeOfArgID; 11660 11661 // Although widely used, 'bzero' is not a standard function. Be more strict 11662 // with the argument types before allowing diagnostics and only allow the 11663 // form bzero(ptr, sizeof(...)). 11664 QualType FirstArgTy = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 11665 if (BId == Builtin::BIbzero && !FirstArgTy->getAs<PointerType>()) 11666 return; 11667 11668 for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) { 11669 const Expr *Dest = Call->getArg(ArgIdx)->IgnoreParenImpCasts(); 11670 SourceRange ArgRange = Call->getArg(ArgIdx)->getSourceRange(); 11671 11672 QualType DestTy = Dest->getType(); 11673 QualType PointeeTy; 11674 if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) { 11675 PointeeTy = DestPtrTy->getPointeeType(); 11676 11677 // Never warn about void type pointers. This can be used to suppress 11678 // false positives. 11679 if (PointeeTy->isVoidType()) 11680 continue; 11681 11682 // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by 11683 // actually comparing the expressions for equality. Because computing the 11684 // expression IDs can be expensive, we only do this if the diagnostic is 11685 // enabled. 11686 if (SizeOfArg && 11687 !Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, 11688 SizeOfArg->getExprLoc())) { 11689 // We only compute IDs for expressions if the warning is enabled, and 11690 // cache the sizeof arg's ID. 11691 if (SizeOfArgID == llvm::FoldingSetNodeID()) 11692 SizeOfArg->Profile(SizeOfArgID, Context, true); 11693 llvm::FoldingSetNodeID DestID; 11694 Dest->Profile(DestID, Context, true); 11695 if (DestID == SizeOfArgID) { 11696 // TODO: For strncpy() and friends, this could suggest sizeof(dst) 11697 // over sizeof(src) as well. 11698 unsigned ActionIdx = 0; // Default is to suggest dereferencing. 11699 StringRef ReadableName = FnName->getName(); 11700 11701 if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Dest)) 11702 if (UnaryOp->getOpcode() == UO_AddrOf) 11703 ActionIdx = 1; // If its an address-of operator, just remove it. 11704 if (!PointeeTy->isIncompleteType() && 11705 (Context.getTypeSize(PointeeTy) == Context.getCharWidth())) 11706 ActionIdx = 2; // If the pointee's size is sizeof(char), 11707 // suggest an explicit length. 11708 11709 // If the function is defined as a builtin macro, do not show macro 11710 // expansion. 11711 SourceLocation SL = SizeOfArg->getExprLoc(); 11712 SourceRange DSR = Dest->getSourceRange(); 11713 SourceRange SSR = SizeOfArg->getSourceRange(); 11714 SourceManager &SM = getSourceManager(); 11715 11716 if (SM.isMacroArgExpansion(SL)) { 11717 ReadableName = Lexer::getImmediateMacroName(SL, SM, LangOpts); 11718 SL = SM.getSpellingLoc(SL); 11719 DSR = SourceRange(SM.getSpellingLoc(DSR.getBegin()), 11720 SM.getSpellingLoc(DSR.getEnd())); 11721 SSR = SourceRange(SM.getSpellingLoc(SSR.getBegin()), 11722 SM.getSpellingLoc(SSR.getEnd())); 11723 } 11724 11725 DiagRuntimeBehavior(SL, SizeOfArg, 11726 PDiag(diag::warn_sizeof_pointer_expr_memaccess) 11727 << ReadableName 11728 << PointeeTy 11729 << DestTy 11730 << DSR 11731 << SSR); 11732 DiagRuntimeBehavior(SL, SizeOfArg, 11733 PDiag(diag::warn_sizeof_pointer_expr_memaccess_note) 11734 << ActionIdx 11735 << SSR); 11736 11737 break; 11738 } 11739 } 11740 11741 // Also check for cases where the sizeof argument is the exact same 11742 // type as the memory argument, and where it points to a user-defined 11743 // record type. 11744 if (SizeOfArgTy != QualType()) { 11745 if (PointeeTy->isRecordType() && 11746 Context.typesAreCompatible(SizeOfArgTy, DestTy)) { 11747 DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest, 11748 PDiag(diag::warn_sizeof_pointer_type_memaccess) 11749 << FnName << SizeOfArgTy << ArgIdx 11750 << PointeeTy << Dest->getSourceRange() 11751 << LenExpr->getSourceRange()); 11752 break; 11753 } 11754 } 11755 } else if (DestTy->isArrayType()) { 11756 PointeeTy = DestTy; 11757 } 11758 11759 if (PointeeTy == QualType()) 11760 continue; 11761 11762 // Always complain about dynamic classes. 11763 bool IsContained; 11764 if (const CXXRecordDecl *ContainedRD = 11765 getContainedDynamicClass(PointeeTy, IsContained)) { 11766 11767 unsigned OperationType = 0; 11768 const bool IsCmp = BId == Builtin::BImemcmp || BId == Builtin::BIbcmp; 11769 // "overwritten" if we're warning about the destination for any call 11770 // but memcmp; otherwise a verb appropriate to the call. 11771 if (ArgIdx != 0 || IsCmp) { 11772 if (BId == Builtin::BImemcpy) 11773 OperationType = 1; 11774 else if(BId == Builtin::BImemmove) 11775 OperationType = 2; 11776 else if (IsCmp) 11777 OperationType = 3; 11778 } 11779 11780 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 11781 PDiag(diag::warn_dyn_class_memaccess) 11782 << (IsCmp ? ArgIdx + 2 : ArgIdx) << FnName 11783 << IsContained << ContainedRD << OperationType 11784 << Call->getCallee()->getSourceRange()); 11785 } else if (PointeeTy.hasNonTrivialObjCLifetime() && 11786 BId != Builtin::BImemset) 11787 DiagRuntimeBehavior( 11788 Dest->getExprLoc(), Dest, 11789 PDiag(diag::warn_arc_object_memaccess) 11790 << ArgIdx << FnName << PointeeTy 11791 << Call->getCallee()->getSourceRange()); 11792 else if (const auto *RT = PointeeTy->getAs<RecordType>()) { 11793 if ((BId == Builtin::BImemset || BId == Builtin::BIbzero) && 11794 RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) { 11795 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 11796 PDiag(diag::warn_cstruct_memaccess) 11797 << ArgIdx << FnName << PointeeTy << 0); 11798 SearchNonTrivialToInitializeField::diag(PointeeTy, Dest, *this); 11799 } else if ((BId == Builtin::BImemcpy || BId == Builtin::BImemmove) && 11800 RT->getDecl()->isNonTrivialToPrimitiveCopy()) { 11801 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 11802 PDiag(diag::warn_cstruct_memaccess) 11803 << ArgIdx << FnName << PointeeTy << 1); 11804 SearchNonTrivialToCopyField::diag(PointeeTy, Dest, *this); 11805 } else { 11806 continue; 11807 } 11808 } else 11809 continue; 11810 11811 DiagRuntimeBehavior( 11812 Dest->getExprLoc(), Dest, 11813 PDiag(diag::note_bad_memaccess_silence) 11814 << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)")); 11815 break; 11816 } 11817 } 11818 11819 // A little helper routine: ignore addition and subtraction of integer literals. 11820 // This intentionally does not ignore all integer constant expressions because 11821 // we don't want to remove sizeof(). 11822 static const Expr *ignoreLiteralAdditions(const Expr *Ex, ASTContext &Ctx) { 11823 Ex = Ex->IgnoreParenCasts(); 11824 11825 while (true) { 11826 const BinaryOperator * BO = dyn_cast<BinaryOperator>(Ex); 11827 if (!BO || !BO->isAdditiveOp()) 11828 break; 11829 11830 const Expr *RHS = BO->getRHS()->IgnoreParenCasts(); 11831 const Expr *LHS = BO->getLHS()->IgnoreParenCasts(); 11832 11833 if (isa<IntegerLiteral>(RHS)) 11834 Ex = LHS; 11835 else if (isa<IntegerLiteral>(LHS)) 11836 Ex = RHS; 11837 else 11838 break; 11839 } 11840 11841 return Ex; 11842 } 11843 11844 static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty, 11845 ASTContext &Context) { 11846 // Only handle constant-sized or VLAs, but not flexible members. 11847 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(Ty)) { 11848 // Only issue the FIXIT for arrays of size > 1. 11849 if (CAT->getSize().getSExtValue() <= 1) 11850 return false; 11851 } else if (!Ty->isVariableArrayType()) { 11852 return false; 11853 } 11854 return true; 11855 } 11856 11857 // Warn if the user has made the 'size' argument to strlcpy or strlcat 11858 // be the size of the source, instead of the destination. 11859 void Sema::CheckStrlcpycatArguments(const CallExpr *Call, 11860 IdentifierInfo *FnName) { 11861 11862 // Don't crash if the user has the wrong number of arguments 11863 unsigned NumArgs = Call->getNumArgs(); 11864 if ((NumArgs != 3) && (NumArgs != 4)) 11865 return; 11866 11867 const Expr *SrcArg = ignoreLiteralAdditions(Call->getArg(1), Context); 11868 const Expr *SizeArg = ignoreLiteralAdditions(Call->getArg(2), Context); 11869 const Expr *CompareWithSrc = nullptr; 11870 11871 if (CheckMemorySizeofForComparison(*this, SizeArg, FnName, 11872 Call->getBeginLoc(), Call->getRParenLoc())) 11873 return; 11874 11875 // Look for 'strlcpy(dst, x, sizeof(x))' 11876 if (const Expr *Ex = getSizeOfExprArg(SizeArg)) 11877 CompareWithSrc = Ex; 11878 else { 11879 // Look for 'strlcpy(dst, x, strlen(x))' 11880 if (const CallExpr *SizeCall = dyn_cast<CallExpr>(SizeArg)) { 11881 if (SizeCall->getBuiltinCallee() == Builtin::BIstrlen && 11882 SizeCall->getNumArgs() == 1) 11883 CompareWithSrc = ignoreLiteralAdditions(SizeCall->getArg(0), Context); 11884 } 11885 } 11886 11887 if (!CompareWithSrc) 11888 return; 11889 11890 // Determine if the argument to sizeof/strlen is equal to the source 11891 // argument. In principle there's all kinds of things you could do 11892 // here, for instance creating an == expression and evaluating it with 11893 // EvaluateAsBooleanCondition, but this uses a more direct technique: 11894 const DeclRefExpr *SrcArgDRE = dyn_cast<DeclRefExpr>(SrcArg); 11895 if (!SrcArgDRE) 11896 return; 11897 11898 const DeclRefExpr *CompareWithSrcDRE = dyn_cast<DeclRefExpr>(CompareWithSrc); 11899 if (!CompareWithSrcDRE || 11900 SrcArgDRE->getDecl() != CompareWithSrcDRE->getDecl()) 11901 return; 11902 11903 const Expr *OriginalSizeArg = Call->getArg(2); 11904 Diag(CompareWithSrcDRE->getBeginLoc(), diag::warn_strlcpycat_wrong_size) 11905 << OriginalSizeArg->getSourceRange() << FnName; 11906 11907 // Output a FIXIT hint if the destination is an array (rather than a 11908 // pointer to an array). This could be enhanced to handle some 11909 // pointers if we know the actual size, like if DstArg is 'array+2' 11910 // we could say 'sizeof(array)-2'. 11911 const Expr *DstArg = Call->getArg(0)->IgnoreParenImpCasts(); 11912 if (!isConstantSizeArrayWithMoreThanOneElement(DstArg->getType(), Context)) 11913 return; 11914 11915 SmallString<128> sizeString; 11916 llvm::raw_svector_ostream OS(sizeString); 11917 OS << "sizeof("; 11918 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 11919 OS << ")"; 11920 11921 Diag(OriginalSizeArg->getBeginLoc(), diag::note_strlcpycat_wrong_size) 11922 << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(), 11923 OS.str()); 11924 } 11925 11926 /// Check if two expressions refer to the same declaration. 11927 static bool referToTheSameDecl(const Expr *E1, const Expr *E2) { 11928 if (const DeclRefExpr *D1 = dyn_cast_or_null<DeclRefExpr>(E1)) 11929 if (const DeclRefExpr *D2 = dyn_cast_or_null<DeclRefExpr>(E2)) 11930 return D1->getDecl() == D2->getDecl(); 11931 return false; 11932 } 11933 11934 static const Expr *getStrlenExprArg(const Expr *E) { 11935 if (const CallExpr *CE = dyn_cast<CallExpr>(E)) { 11936 const FunctionDecl *FD = CE->getDirectCallee(); 11937 if (!FD || FD->getMemoryFunctionKind() != Builtin::BIstrlen) 11938 return nullptr; 11939 return CE->getArg(0)->IgnoreParenCasts(); 11940 } 11941 return nullptr; 11942 } 11943 11944 // Warn on anti-patterns as the 'size' argument to strncat. 11945 // The correct size argument should look like following: 11946 // strncat(dst, src, sizeof(dst) - strlen(dest) - 1); 11947 void Sema::CheckStrncatArguments(const CallExpr *CE, 11948 IdentifierInfo *FnName) { 11949 // Don't crash if the user has the wrong number of arguments. 11950 if (CE->getNumArgs() < 3) 11951 return; 11952 const Expr *DstArg = CE->getArg(0)->IgnoreParenCasts(); 11953 const Expr *SrcArg = CE->getArg(1)->IgnoreParenCasts(); 11954 const Expr *LenArg = CE->getArg(2)->IgnoreParenCasts(); 11955 11956 if (CheckMemorySizeofForComparison(*this, LenArg, FnName, CE->getBeginLoc(), 11957 CE->getRParenLoc())) 11958 return; 11959 11960 // Identify common expressions, which are wrongly used as the size argument 11961 // to strncat and may lead to buffer overflows. 11962 unsigned PatternType = 0; 11963 if (const Expr *SizeOfArg = getSizeOfExprArg(LenArg)) { 11964 // - sizeof(dst) 11965 if (referToTheSameDecl(SizeOfArg, DstArg)) 11966 PatternType = 1; 11967 // - sizeof(src) 11968 else if (referToTheSameDecl(SizeOfArg, SrcArg)) 11969 PatternType = 2; 11970 } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(LenArg)) { 11971 if (BE->getOpcode() == BO_Sub) { 11972 const Expr *L = BE->getLHS()->IgnoreParenCasts(); 11973 const Expr *R = BE->getRHS()->IgnoreParenCasts(); 11974 // - sizeof(dst) - strlen(dst) 11975 if (referToTheSameDecl(DstArg, getSizeOfExprArg(L)) && 11976 referToTheSameDecl(DstArg, getStrlenExprArg(R))) 11977 PatternType = 1; 11978 // - sizeof(src) - (anything) 11979 else if (referToTheSameDecl(SrcArg, getSizeOfExprArg(L))) 11980 PatternType = 2; 11981 } 11982 } 11983 11984 if (PatternType == 0) 11985 return; 11986 11987 // Generate the diagnostic. 11988 SourceLocation SL = LenArg->getBeginLoc(); 11989 SourceRange SR = LenArg->getSourceRange(); 11990 SourceManager &SM = getSourceManager(); 11991 11992 // If the function is defined as a builtin macro, do not show macro expansion. 11993 if (SM.isMacroArgExpansion(SL)) { 11994 SL = SM.getSpellingLoc(SL); 11995 SR = SourceRange(SM.getSpellingLoc(SR.getBegin()), 11996 SM.getSpellingLoc(SR.getEnd())); 11997 } 11998 11999 // Check if the destination is an array (rather than a pointer to an array). 12000 QualType DstTy = DstArg->getType(); 12001 bool isKnownSizeArray = isConstantSizeArrayWithMoreThanOneElement(DstTy, 12002 Context); 12003 if (!isKnownSizeArray) { 12004 if (PatternType == 1) 12005 Diag(SL, diag::warn_strncat_wrong_size) << SR; 12006 else 12007 Diag(SL, diag::warn_strncat_src_size) << SR; 12008 return; 12009 } 12010 12011 if (PatternType == 1) 12012 Diag(SL, diag::warn_strncat_large_size) << SR; 12013 else 12014 Diag(SL, diag::warn_strncat_src_size) << SR; 12015 12016 SmallString<128> sizeString; 12017 llvm::raw_svector_ostream OS(sizeString); 12018 OS << "sizeof("; 12019 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 12020 OS << ") - "; 12021 OS << "strlen("; 12022 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 12023 OS << ") - 1"; 12024 12025 Diag(SL, diag::note_strncat_wrong_size) 12026 << FixItHint::CreateReplacement(SR, OS.str()); 12027 } 12028 12029 namespace { 12030 void CheckFreeArgumentsOnLvalue(Sema &S, const std::string &CalleeName, 12031 const UnaryOperator *UnaryExpr, const Decl *D) { 12032 if (isa<FieldDecl, FunctionDecl, VarDecl>(D)) { 12033 S.Diag(UnaryExpr->getBeginLoc(), diag::warn_free_nonheap_object) 12034 << CalleeName << 0 /*object: */ << cast<NamedDecl>(D); 12035 return; 12036 } 12037 } 12038 12039 void CheckFreeArgumentsAddressof(Sema &S, const std::string &CalleeName, 12040 const UnaryOperator *UnaryExpr) { 12041 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(UnaryExpr->getSubExpr())) { 12042 const Decl *D = Lvalue->getDecl(); 12043 if (isa<DeclaratorDecl>(D)) 12044 if (!dyn_cast<DeclaratorDecl>(D)->getType()->isReferenceType()) 12045 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, D); 12046 } 12047 12048 if (const auto *Lvalue = dyn_cast<MemberExpr>(UnaryExpr->getSubExpr())) 12049 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, 12050 Lvalue->getMemberDecl()); 12051 } 12052 12053 void CheckFreeArgumentsPlus(Sema &S, const std::string &CalleeName, 12054 const UnaryOperator *UnaryExpr) { 12055 const auto *Lambda = dyn_cast<LambdaExpr>( 12056 UnaryExpr->getSubExpr()->IgnoreImplicitAsWritten()->IgnoreParens()); 12057 if (!Lambda) 12058 return; 12059 12060 S.Diag(Lambda->getBeginLoc(), diag::warn_free_nonheap_object) 12061 << CalleeName << 2 /*object: lambda expression*/; 12062 } 12063 12064 void CheckFreeArgumentsStackArray(Sema &S, const std::string &CalleeName, 12065 const DeclRefExpr *Lvalue) { 12066 const auto *Var = dyn_cast<VarDecl>(Lvalue->getDecl()); 12067 if (Var == nullptr) 12068 return; 12069 12070 S.Diag(Lvalue->getBeginLoc(), diag::warn_free_nonheap_object) 12071 << CalleeName << 0 /*object: */ << Var; 12072 } 12073 12074 void CheckFreeArgumentsCast(Sema &S, const std::string &CalleeName, 12075 const CastExpr *Cast) { 12076 SmallString<128> SizeString; 12077 llvm::raw_svector_ostream OS(SizeString); 12078 12079 clang::CastKind Kind = Cast->getCastKind(); 12080 if (Kind == clang::CK_BitCast && 12081 !Cast->getSubExpr()->getType()->isFunctionPointerType()) 12082 return; 12083 if (Kind == clang::CK_IntegralToPointer && 12084 !isa<IntegerLiteral>( 12085 Cast->getSubExpr()->IgnoreParenImpCasts()->IgnoreParens())) 12086 return; 12087 12088 switch (Cast->getCastKind()) { 12089 case clang::CK_BitCast: 12090 case clang::CK_IntegralToPointer: 12091 case clang::CK_FunctionToPointerDecay: 12092 OS << '\''; 12093 Cast->printPretty(OS, nullptr, S.getPrintingPolicy()); 12094 OS << '\''; 12095 break; 12096 default: 12097 return; 12098 } 12099 12100 S.Diag(Cast->getBeginLoc(), diag::warn_free_nonheap_object) 12101 << CalleeName << 0 /*object: */ << OS.str(); 12102 } 12103 } // namespace 12104 12105 /// Alerts the user that they are attempting to free a non-malloc'd object. 12106 void Sema::CheckFreeArguments(const CallExpr *E) { 12107 const std::string CalleeName = 12108 cast<FunctionDecl>(E->getCalleeDecl())->getQualifiedNameAsString(); 12109 12110 { // Prefer something that doesn't involve a cast to make things simpler. 12111 const Expr *Arg = E->getArg(0)->IgnoreParenCasts(); 12112 if (const auto *UnaryExpr = dyn_cast<UnaryOperator>(Arg)) 12113 switch (UnaryExpr->getOpcode()) { 12114 case UnaryOperator::Opcode::UO_AddrOf: 12115 return CheckFreeArgumentsAddressof(*this, CalleeName, UnaryExpr); 12116 case UnaryOperator::Opcode::UO_Plus: 12117 return CheckFreeArgumentsPlus(*this, CalleeName, UnaryExpr); 12118 default: 12119 break; 12120 } 12121 12122 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(Arg)) 12123 if (Lvalue->getType()->isArrayType()) 12124 return CheckFreeArgumentsStackArray(*this, CalleeName, Lvalue); 12125 12126 if (const auto *Label = dyn_cast<AddrLabelExpr>(Arg)) { 12127 Diag(Label->getBeginLoc(), diag::warn_free_nonheap_object) 12128 << CalleeName << 0 /*object: */ << Label->getLabel()->getIdentifier(); 12129 return; 12130 } 12131 12132 if (isa<BlockExpr>(Arg)) { 12133 Diag(Arg->getBeginLoc(), diag::warn_free_nonheap_object) 12134 << CalleeName << 1 /*object: block*/; 12135 return; 12136 } 12137 } 12138 // Maybe the cast was important, check after the other cases. 12139 if (const auto *Cast = dyn_cast<CastExpr>(E->getArg(0))) 12140 return CheckFreeArgumentsCast(*this, CalleeName, Cast); 12141 } 12142 12143 void 12144 Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType, 12145 SourceLocation ReturnLoc, 12146 bool isObjCMethod, 12147 const AttrVec *Attrs, 12148 const FunctionDecl *FD) { 12149 // Check if the return value is null but should not be. 12150 if (((Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs)) || 12151 (!isObjCMethod && isNonNullType(lhsType))) && 12152 CheckNonNullExpr(*this, RetValExp)) 12153 Diag(ReturnLoc, diag::warn_null_ret) 12154 << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange(); 12155 12156 // C++11 [basic.stc.dynamic.allocation]p4: 12157 // If an allocation function declared with a non-throwing 12158 // exception-specification fails to allocate storage, it shall return 12159 // a null pointer. Any other allocation function that fails to allocate 12160 // storage shall indicate failure only by throwing an exception [...] 12161 if (FD) { 12162 OverloadedOperatorKind Op = FD->getOverloadedOperator(); 12163 if (Op == OO_New || Op == OO_Array_New) { 12164 const FunctionProtoType *Proto 12165 = FD->getType()->castAs<FunctionProtoType>(); 12166 if (!Proto->isNothrow(/*ResultIfDependent*/true) && 12167 CheckNonNullExpr(*this, RetValExp)) 12168 Diag(ReturnLoc, diag::warn_operator_new_returns_null) 12169 << FD << getLangOpts().CPlusPlus11; 12170 } 12171 } 12172 12173 // PPC MMA non-pointer types are not allowed as return type. Checking the type 12174 // here prevent the user from using a PPC MMA type as trailing return type. 12175 if (Context.getTargetInfo().getTriple().isPPC64()) 12176 CheckPPCMMAType(RetValExp->getType(), ReturnLoc); 12177 } 12178 12179 /// Check for comparisons of floating-point values using == and !=. Issue a 12180 /// warning if the comparison is not likely to do what the programmer intended. 12181 void Sema::CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS, 12182 BinaryOperatorKind Opcode) { 12183 if (!BinaryOperator::isEqualityOp(Opcode)) 12184 return; 12185 12186 // Match and capture subexpressions such as "(float) X == 0.1". 12187 FloatingLiteral *FPLiteral; 12188 CastExpr *FPCast; 12189 auto getCastAndLiteral = [&FPLiteral, &FPCast](Expr *L, Expr *R) { 12190 FPLiteral = dyn_cast<FloatingLiteral>(L->IgnoreParens()); 12191 FPCast = dyn_cast<CastExpr>(R->IgnoreParens()); 12192 return FPLiteral && FPCast; 12193 }; 12194 12195 if (getCastAndLiteral(LHS, RHS) || getCastAndLiteral(RHS, LHS)) { 12196 auto *SourceTy = FPCast->getSubExpr()->getType()->getAs<BuiltinType>(); 12197 auto *TargetTy = FPLiteral->getType()->getAs<BuiltinType>(); 12198 if (SourceTy && TargetTy && SourceTy->isFloatingPoint() && 12199 TargetTy->isFloatingPoint()) { 12200 bool Lossy; 12201 llvm::APFloat TargetC = FPLiteral->getValue(); 12202 TargetC.convert(Context.getFloatTypeSemantics(QualType(SourceTy, 0)), 12203 llvm::APFloat::rmNearestTiesToEven, &Lossy); 12204 if (Lossy) { 12205 // If the literal cannot be represented in the source type, then a 12206 // check for == is always false and check for != is always true. 12207 Diag(Loc, diag::warn_float_compare_literal) 12208 << (Opcode == BO_EQ) << QualType(SourceTy, 0) 12209 << LHS->getSourceRange() << RHS->getSourceRange(); 12210 return; 12211 } 12212 } 12213 } 12214 12215 // Match a more general floating-point equality comparison (-Wfloat-equal). 12216 Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts(); 12217 Expr* RightExprSansParen = RHS->IgnoreParenImpCasts(); 12218 12219 // Special case: check for x == x (which is OK). 12220 // Do not emit warnings for such cases. 12221 if (auto *DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen)) 12222 if (auto *DRR = dyn_cast<DeclRefExpr>(RightExprSansParen)) 12223 if (DRL->getDecl() == DRR->getDecl()) 12224 return; 12225 12226 // Special case: check for comparisons against literals that can be exactly 12227 // represented by APFloat. In such cases, do not emit a warning. This 12228 // is a heuristic: often comparison against such literals are used to 12229 // detect if a value in a variable has not changed. This clearly can 12230 // lead to false negatives. 12231 if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) { 12232 if (FLL->isExact()) 12233 return; 12234 } else 12235 if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen)) 12236 if (FLR->isExact()) 12237 return; 12238 12239 // Check for comparisons with builtin types. 12240 if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen)) 12241 if (CL->getBuiltinCallee()) 12242 return; 12243 12244 if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen)) 12245 if (CR->getBuiltinCallee()) 12246 return; 12247 12248 // Emit the diagnostic. 12249 Diag(Loc, diag::warn_floatingpoint_eq) 12250 << LHS->getSourceRange() << RHS->getSourceRange(); 12251 } 12252 12253 //===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===// 12254 //===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===// 12255 12256 namespace { 12257 12258 /// Structure recording the 'active' range of an integer-valued 12259 /// expression. 12260 struct IntRange { 12261 /// The number of bits active in the int. Note that this includes exactly one 12262 /// sign bit if !NonNegative. 12263 unsigned Width; 12264 12265 /// True if the int is known not to have negative values. If so, all leading 12266 /// bits before Width are known zero, otherwise they are known to be the 12267 /// same as the MSB within Width. 12268 bool NonNegative; 12269 12270 IntRange(unsigned Width, bool NonNegative) 12271 : Width(Width), NonNegative(NonNegative) {} 12272 12273 /// Number of bits excluding the sign bit. 12274 unsigned valueBits() const { 12275 return NonNegative ? Width : Width - 1; 12276 } 12277 12278 /// Returns the range of the bool type. 12279 static IntRange forBoolType() { 12280 return IntRange(1, true); 12281 } 12282 12283 /// Returns the range of an opaque value of the given integral type. 12284 static IntRange forValueOfType(ASTContext &C, QualType T) { 12285 return forValueOfCanonicalType(C, 12286 T->getCanonicalTypeInternal().getTypePtr()); 12287 } 12288 12289 /// Returns the range of an opaque value of a canonical integral type. 12290 static IntRange forValueOfCanonicalType(ASTContext &C, const Type *T) { 12291 assert(T->isCanonicalUnqualified()); 12292 12293 if (const VectorType *VT = dyn_cast<VectorType>(T)) 12294 T = VT->getElementType().getTypePtr(); 12295 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 12296 T = CT->getElementType().getTypePtr(); 12297 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 12298 T = AT->getValueType().getTypePtr(); 12299 12300 if (!C.getLangOpts().CPlusPlus) { 12301 // For enum types in C code, use the underlying datatype. 12302 if (const EnumType *ET = dyn_cast<EnumType>(T)) 12303 T = ET->getDecl()->getIntegerType().getDesugaredType(C).getTypePtr(); 12304 } else if (const EnumType *ET = dyn_cast<EnumType>(T)) { 12305 // For enum types in C++, use the known bit width of the enumerators. 12306 EnumDecl *Enum = ET->getDecl(); 12307 // In C++11, enums can have a fixed underlying type. Use this type to 12308 // compute the range. 12309 if (Enum->isFixed()) { 12310 return IntRange(C.getIntWidth(QualType(T, 0)), 12311 !ET->isSignedIntegerOrEnumerationType()); 12312 } 12313 12314 unsigned NumPositive = Enum->getNumPositiveBits(); 12315 unsigned NumNegative = Enum->getNumNegativeBits(); 12316 12317 if (NumNegative == 0) 12318 return IntRange(NumPositive, true/*NonNegative*/); 12319 else 12320 return IntRange(std::max(NumPositive + 1, NumNegative), 12321 false/*NonNegative*/); 12322 } 12323 12324 if (const auto *EIT = dyn_cast<BitIntType>(T)) 12325 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 12326 12327 const BuiltinType *BT = cast<BuiltinType>(T); 12328 assert(BT->isInteger()); 12329 12330 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 12331 } 12332 12333 /// Returns the "target" range of a canonical integral type, i.e. 12334 /// the range of values expressible in the type. 12335 /// 12336 /// This matches forValueOfCanonicalType except that enums have the 12337 /// full range of their type, not the range of their enumerators. 12338 static IntRange forTargetOfCanonicalType(ASTContext &C, const Type *T) { 12339 assert(T->isCanonicalUnqualified()); 12340 12341 if (const VectorType *VT = dyn_cast<VectorType>(T)) 12342 T = VT->getElementType().getTypePtr(); 12343 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 12344 T = CT->getElementType().getTypePtr(); 12345 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 12346 T = AT->getValueType().getTypePtr(); 12347 if (const EnumType *ET = dyn_cast<EnumType>(T)) 12348 T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr(); 12349 12350 if (const auto *EIT = dyn_cast<BitIntType>(T)) 12351 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 12352 12353 const BuiltinType *BT = cast<BuiltinType>(T); 12354 assert(BT->isInteger()); 12355 12356 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 12357 } 12358 12359 /// Returns the supremum of two ranges: i.e. their conservative merge. 12360 static IntRange join(IntRange L, IntRange R) { 12361 bool Unsigned = L.NonNegative && R.NonNegative; 12362 return IntRange(std::max(L.valueBits(), R.valueBits()) + !Unsigned, 12363 L.NonNegative && R.NonNegative); 12364 } 12365 12366 /// Return the range of a bitwise-AND of the two ranges. 12367 static IntRange bit_and(IntRange L, IntRange R) { 12368 unsigned Bits = std::max(L.Width, R.Width); 12369 bool NonNegative = false; 12370 if (L.NonNegative) { 12371 Bits = std::min(Bits, L.Width); 12372 NonNegative = true; 12373 } 12374 if (R.NonNegative) { 12375 Bits = std::min(Bits, R.Width); 12376 NonNegative = true; 12377 } 12378 return IntRange(Bits, NonNegative); 12379 } 12380 12381 /// Return the range of a sum of the two ranges. 12382 static IntRange sum(IntRange L, IntRange R) { 12383 bool Unsigned = L.NonNegative && R.NonNegative; 12384 return IntRange(std::max(L.valueBits(), R.valueBits()) + 1 + !Unsigned, 12385 Unsigned); 12386 } 12387 12388 /// Return the range of a difference of the two ranges. 12389 static IntRange difference(IntRange L, IntRange R) { 12390 // We need a 1-bit-wider range if: 12391 // 1) LHS can be negative: least value can be reduced. 12392 // 2) RHS can be negative: greatest value can be increased. 12393 bool CanWiden = !L.NonNegative || !R.NonNegative; 12394 bool Unsigned = L.NonNegative && R.Width == 0; 12395 return IntRange(std::max(L.valueBits(), R.valueBits()) + CanWiden + 12396 !Unsigned, 12397 Unsigned); 12398 } 12399 12400 /// Return the range of a product of the two ranges. 12401 static IntRange product(IntRange L, IntRange R) { 12402 // If both LHS and RHS can be negative, we can form 12403 // -2^L * -2^R = 2^(L + R) 12404 // which requires L + R + 1 value bits to represent. 12405 bool CanWiden = !L.NonNegative && !R.NonNegative; 12406 bool Unsigned = L.NonNegative && R.NonNegative; 12407 return IntRange(L.valueBits() + R.valueBits() + CanWiden + !Unsigned, 12408 Unsigned); 12409 } 12410 12411 /// Return the range of a remainder operation between the two ranges. 12412 static IntRange rem(IntRange L, IntRange R) { 12413 // The result of a remainder can't be larger than the result of 12414 // either side. The sign of the result is the sign of the LHS. 12415 bool Unsigned = L.NonNegative; 12416 return IntRange(std::min(L.valueBits(), R.valueBits()) + !Unsigned, 12417 Unsigned); 12418 } 12419 }; 12420 12421 } // namespace 12422 12423 static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value, 12424 unsigned MaxWidth) { 12425 if (value.isSigned() && value.isNegative()) 12426 return IntRange(value.getMinSignedBits(), false); 12427 12428 if (value.getBitWidth() > MaxWidth) 12429 value = value.trunc(MaxWidth); 12430 12431 // isNonNegative() just checks the sign bit without considering 12432 // signedness. 12433 return IntRange(value.getActiveBits(), true); 12434 } 12435 12436 static IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty, 12437 unsigned MaxWidth) { 12438 if (result.isInt()) 12439 return GetValueRange(C, result.getInt(), MaxWidth); 12440 12441 if (result.isVector()) { 12442 IntRange R = GetValueRange(C, result.getVectorElt(0), Ty, MaxWidth); 12443 for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) { 12444 IntRange El = GetValueRange(C, result.getVectorElt(i), Ty, MaxWidth); 12445 R = IntRange::join(R, El); 12446 } 12447 return R; 12448 } 12449 12450 if (result.isComplexInt()) { 12451 IntRange R = GetValueRange(C, result.getComplexIntReal(), MaxWidth); 12452 IntRange I = GetValueRange(C, result.getComplexIntImag(), MaxWidth); 12453 return IntRange::join(R, I); 12454 } 12455 12456 // This can happen with lossless casts to intptr_t of "based" lvalues. 12457 // Assume it might use arbitrary bits. 12458 // FIXME: The only reason we need to pass the type in here is to get 12459 // the sign right on this one case. It would be nice if APValue 12460 // preserved this. 12461 assert(result.isLValue() || result.isAddrLabelDiff()); 12462 return IntRange(MaxWidth, Ty->isUnsignedIntegerOrEnumerationType()); 12463 } 12464 12465 static QualType GetExprType(const Expr *E) { 12466 QualType Ty = E->getType(); 12467 if (const AtomicType *AtomicRHS = Ty->getAs<AtomicType>()) 12468 Ty = AtomicRHS->getValueType(); 12469 return Ty; 12470 } 12471 12472 /// Pseudo-evaluate the given integer expression, estimating the 12473 /// range of values it might take. 12474 /// 12475 /// \param MaxWidth The width to which the value will be truncated. 12476 /// \param Approximate If \c true, return a likely range for the result: in 12477 /// particular, assume that arithmetic on narrower types doesn't leave 12478 /// those types. If \c false, return a range including all possible 12479 /// result values. 12480 static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth, 12481 bool InConstantContext, bool Approximate) { 12482 E = E->IgnoreParens(); 12483 12484 // Try a full evaluation first. 12485 Expr::EvalResult result; 12486 if (E->EvaluateAsRValue(result, C, InConstantContext)) 12487 return GetValueRange(C, result.Val, GetExprType(E), MaxWidth); 12488 12489 // I think we only want to look through implicit casts here; if the 12490 // user has an explicit widening cast, we should treat the value as 12491 // being of the new, wider type. 12492 if (const auto *CE = dyn_cast<ImplicitCastExpr>(E)) { 12493 if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue) 12494 return GetExprRange(C, CE->getSubExpr(), MaxWidth, InConstantContext, 12495 Approximate); 12496 12497 IntRange OutputTypeRange = IntRange::forValueOfType(C, GetExprType(CE)); 12498 12499 bool isIntegerCast = CE->getCastKind() == CK_IntegralCast || 12500 CE->getCastKind() == CK_BooleanToSignedIntegral; 12501 12502 // Assume that non-integer casts can span the full range of the type. 12503 if (!isIntegerCast) 12504 return OutputTypeRange; 12505 12506 IntRange SubRange = GetExprRange(C, CE->getSubExpr(), 12507 std::min(MaxWidth, OutputTypeRange.Width), 12508 InConstantContext, Approximate); 12509 12510 // Bail out if the subexpr's range is as wide as the cast type. 12511 if (SubRange.Width >= OutputTypeRange.Width) 12512 return OutputTypeRange; 12513 12514 // Otherwise, we take the smaller width, and we're non-negative if 12515 // either the output type or the subexpr is. 12516 return IntRange(SubRange.Width, 12517 SubRange.NonNegative || OutputTypeRange.NonNegative); 12518 } 12519 12520 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 12521 // If we can fold the condition, just take that operand. 12522 bool CondResult; 12523 if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C)) 12524 return GetExprRange(C, 12525 CondResult ? CO->getTrueExpr() : CO->getFalseExpr(), 12526 MaxWidth, InConstantContext, Approximate); 12527 12528 // Otherwise, conservatively merge. 12529 // GetExprRange requires an integer expression, but a throw expression 12530 // results in a void type. 12531 Expr *E = CO->getTrueExpr(); 12532 IntRange L = E->getType()->isVoidType() 12533 ? IntRange{0, true} 12534 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 12535 E = CO->getFalseExpr(); 12536 IntRange R = E->getType()->isVoidType() 12537 ? IntRange{0, true} 12538 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 12539 return IntRange::join(L, R); 12540 } 12541 12542 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 12543 IntRange (*Combine)(IntRange, IntRange) = IntRange::join; 12544 12545 switch (BO->getOpcode()) { 12546 case BO_Cmp: 12547 llvm_unreachable("builtin <=> should have class type"); 12548 12549 // Boolean-valued operations are single-bit and positive. 12550 case BO_LAnd: 12551 case BO_LOr: 12552 case BO_LT: 12553 case BO_GT: 12554 case BO_LE: 12555 case BO_GE: 12556 case BO_EQ: 12557 case BO_NE: 12558 return IntRange::forBoolType(); 12559 12560 // The type of the assignments is the type of the LHS, so the RHS 12561 // is not necessarily the same type. 12562 case BO_MulAssign: 12563 case BO_DivAssign: 12564 case BO_RemAssign: 12565 case BO_AddAssign: 12566 case BO_SubAssign: 12567 case BO_XorAssign: 12568 case BO_OrAssign: 12569 // TODO: bitfields? 12570 return IntRange::forValueOfType(C, GetExprType(E)); 12571 12572 // Simple assignments just pass through the RHS, which will have 12573 // been coerced to the LHS type. 12574 case BO_Assign: 12575 // TODO: bitfields? 12576 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 12577 Approximate); 12578 12579 // Operations with opaque sources are black-listed. 12580 case BO_PtrMemD: 12581 case BO_PtrMemI: 12582 return IntRange::forValueOfType(C, GetExprType(E)); 12583 12584 // Bitwise-and uses the *infinum* of the two source ranges. 12585 case BO_And: 12586 case BO_AndAssign: 12587 Combine = IntRange::bit_and; 12588 break; 12589 12590 // Left shift gets black-listed based on a judgement call. 12591 case BO_Shl: 12592 // ...except that we want to treat '1 << (blah)' as logically 12593 // positive. It's an important idiom. 12594 if (IntegerLiteral *I 12595 = dyn_cast<IntegerLiteral>(BO->getLHS()->IgnoreParenCasts())) { 12596 if (I->getValue() == 1) { 12597 IntRange R = IntRange::forValueOfType(C, GetExprType(E)); 12598 return IntRange(R.Width, /*NonNegative*/ true); 12599 } 12600 } 12601 [[fallthrough]]; 12602 12603 case BO_ShlAssign: 12604 return IntRange::forValueOfType(C, GetExprType(E)); 12605 12606 // Right shift by a constant can narrow its left argument. 12607 case BO_Shr: 12608 case BO_ShrAssign: { 12609 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext, 12610 Approximate); 12611 12612 // If the shift amount is a positive constant, drop the width by 12613 // that much. 12614 if (std::optional<llvm::APSInt> shift = 12615 BO->getRHS()->getIntegerConstantExpr(C)) { 12616 if (shift->isNonNegative()) { 12617 unsigned zext = shift->getZExtValue(); 12618 if (zext >= L.Width) 12619 L.Width = (L.NonNegative ? 0 : 1); 12620 else 12621 L.Width -= zext; 12622 } 12623 } 12624 12625 return L; 12626 } 12627 12628 // Comma acts as its right operand. 12629 case BO_Comma: 12630 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 12631 Approximate); 12632 12633 case BO_Add: 12634 if (!Approximate) 12635 Combine = IntRange::sum; 12636 break; 12637 12638 case BO_Sub: 12639 if (BO->getLHS()->getType()->isPointerType()) 12640 return IntRange::forValueOfType(C, GetExprType(E)); 12641 if (!Approximate) 12642 Combine = IntRange::difference; 12643 break; 12644 12645 case BO_Mul: 12646 if (!Approximate) 12647 Combine = IntRange::product; 12648 break; 12649 12650 // The width of a division result is mostly determined by the size 12651 // of the LHS. 12652 case BO_Div: { 12653 // Don't 'pre-truncate' the operands. 12654 unsigned opWidth = C.getIntWidth(GetExprType(E)); 12655 IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, 12656 Approximate); 12657 12658 // If the divisor is constant, use that. 12659 if (std::optional<llvm::APSInt> divisor = 12660 BO->getRHS()->getIntegerConstantExpr(C)) { 12661 unsigned log2 = divisor->logBase2(); // floor(log_2(divisor)) 12662 if (log2 >= L.Width) 12663 L.Width = (L.NonNegative ? 0 : 1); 12664 else 12665 L.Width = std::min(L.Width - log2, MaxWidth); 12666 return L; 12667 } 12668 12669 // Otherwise, just use the LHS's width. 12670 // FIXME: This is wrong if the LHS could be its minimal value and the RHS 12671 // could be -1. 12672 IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, 12673 Approximate); 12674 return IntRange(L.Width, L.NonNegative && R.NonNegative); 12675 } 12676 12677 case BO_Rem: 12678 Combine = IntRange::rem; 12679 break; 12680 12681 // The default behavior is okay for these. 12682 case BO_Xor: 12683 case BO_Or: 12684 break; 12685 } 12686 12687 // Combine the two ranges, but limit the result to the type in which we 12688 // performed the computation. 12689 QualType T = GetExprType(E); 12690 unsigned opWidth = C.getIntWidth(T); 12691 IntRange L = 12692 GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, Approximate); 12693 IntRange R = 12694 GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, Approximate); 12695 IntRange C = Combine(L, R); 12696 C.NonNegative |= T->isUnsignedIntegerOrEnumerationType(); 12697 C.Width = std::min(C.Width, MaxWidth); 12698 return C; 12699 } 12700 12701 if (const auto *UO = dyn_cast<UnaryOperator>(E)) { 12702 switch (UO->getOpcode()) { 12703 // Boolean-valued operations are white-listed. 12704 case UO_LNot: 12705 return IntRange::forBoolType(); 12706 12707 // Operations with opaque sources are black-listed. 12708 case UO_Deref: 12709 case UO_AddrOf: // should be impossible 12710 return IntRange::forValueOfType(C, GetExprType(E)); 12711 12712 default: 12713 return GetExprRange(C, UO->getSubExpr(), MaxWidth, InConstantContext, 12714 Approximate); 12715 } 12716 } 12717 12718 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 12719 return GetExprRange(C, OVE->getSourceExpr(), MaxWidth, InConstantContext, 12720 Approximate); 12721 12722 if (const auto *BitField = E->getSourceBitField()) 12723 return IntRange(BitField->getBitWidthValue(C), 12724 BitField->getType()->isUnsignedIntegerOrEnumerationType()); 12725 12726 return IntRange::forValueOfType(C, GetExprType(E)); 12727 } 12728 12729 static IntRange GetExprRange(ASTContext &C, const Expr *E, 12730 bool InConstantContext, bool Approximate) { 12731 return GetExprRange(C, E, C.getIntWidth(GetExprType(E)), InConstantContext, 12732 Approximate); 12733 } 12734 12735 /// Checks whether the given value, which currently has the given 12736 /// source semantics, has the same value when coerced through the 12737 /// target semantics. 12738 static bool IsSameFloatAfterCast(const llvm::APFloat &value, 12739 const llvm::fltSemantics &Src, 12740 const llvm::fltSemantics &Tgt) { 12741 llvm::APFloat truncated = value; 12742 12743 bool ignored; 12744 truncated.convert(Src, llvm::APFloat::rmNearestTiesToEven, &ignored); 12745 truncated.convert(Tgt, llvm::APFloat::rmNearestTiesToEven, &ignored); 12746 12747 return truncated.bitwiseIsEqual(value); 12748 } 12749 12750 /// Checks whether the given value, which currently has the given 12751 /// source semantics, has the same value when coerced through the 12752 /// target semantics. 12753 /// 12754 /// The value might be a vector of floats (or a complex number). 12755 static bool IsSameFloatAfterCast(const APValue &value, 12756 const llvm::fltSemantics &Src, 12757 const llvm::fltSemantics &Tgt) { 12758 if (value.isFloat()) 12759 return IsSameFloatAfterCast(value.getFloat(), Src, Tgt); 12760 12761 if (value.isVector()) { 12762 for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i) 12763 if (!IsSameFloatAfterCast(value.getVectorElt(i), Src, Tgt)) 12764 return false; 12765 return true; 12766 } 12767 12768 assert(value.isComplexFloat()); 12769 return (IsSameFloatAfterCast(value.getComplexFloatReal(), Src, Tgt) && 12770 IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt)); 12771 } 12772 12773 static void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC, 12774 bool IsListInit = false); 12775 12776 static bool IsEnumConstOrFromMacro(Sema &S, Expr *E) { 12777 // Suppress cases where we are comparing against an enum constant. 12778 if (const DeclRefExpr *DR = 12779 dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) 12780 if (isa<EnumConstantDecl>(DR->getDecl())) 12781 return true; 12782 12783 // Suppress cases where the value is expanded from a macro, unless that macro 12784 // is how a language represents a boolean literal. This is the case in both C 12785 // and Objective-C. 12786 SourceLocation BeginLoc = E->getBeginLoc(); 12787 if (BeginLoc.isMacroID()) { 12788 StringRef MacroName = Lexer::getImmediateMacroName( 12789 BeginLoc, S.getSourceManager(), S.getLangOpts()); 12790 return MacroName != "YES" && MacroName != "NO" && 12791 MacroName != "true" && MacroName != "false"; 12792 } 12793 12794 return false; 12795 } 12796 12797 static bool isKnownToHaveUnsignedValue(Expr *E) { 12798 return E->getType()->isIntegerType() && 12799 (!E->getType()->isSignedIntegerType() || 12800 !E->IgnoreParenImpCasts()->getType()->isSignedIntegerType()); 12801 } 12802 12803 namespace { 12804 /// The promoted range of values of a type. In general this has the 12805 /// following structure: 12806 /// 12807 /// |-----------| . . . |-----------| 12808 /// ^ ^ ^ ^ 12809 /// Min HoleMin HoleMax Max 12810 /// 12811 /// ... where there is only a hole if a signed type is promoted to unsigned 12812 /// (in which case Min and Max are the smallest and largest representable 12813 /// values). 12814 struct PromotedRange { 12815 // Min, or HoleMax if there is a hole. 12816 llvm::APSInt PromotedMin; 12817 // Max, or HoleMin if there is a hole. 12818 llvm::APSInt PromotedMax; 12819 12820 PromotedRange(IntRange R, unsigned BitWidth, bool Unsigned) { 12821 if (R.Width == 0) 12822 PromotedMin = PromotedMax = llvm::APSInt(BitWidth, Unsigned); 12823 else if (R.Width >= BitWidth && !Unsigned) { 12824 // Promotion made the type *narrower*. This happens when promoting 12825 // a < 32-bit unsigned / <= 32-bit signed bit-field to 'signed int'. 12826 // Treat all values of 'signed int' as being in range for now. 12827 PromotedMin = llvm::APSInt::getMinValue(BitWidth, Unsigned); 12828 PromotedMax = llvm::APSInt::getMaxValue(BitWidth, Unsigned); 12829 } else { 12830 PromotedMin = llvm::APSInt::getMinValue(R.Width, R.NonNegative) 12831 .extOrTrunc(BitWidth); 12832 PromotedMin.setIsUnsigned(Unsigned); 12833 12834 PromotedMax = llvm::APSInt::getMaxValue(R.Width, R.NonNegative) 12835 .extOrTrunc(BitWidth); 12836 PromotedMax.setIsUnsigned(Unsigned); 12837 } 12838 } 12839 12840 // Determine whether this range is contiguous (has no hole). 12841 bool isContiguous() const { return PromotedMin <= PromotedMax; } 12842 12843 // Where a constant value is within the range. 12844 enum ComparisonResult { 12845 LT = 0x1, 12846 LE = 0x2, 12847 GT = 0x4, 12848 GE = 0x8, 12849 EQ = 0x10, 12850 NE = 0x20, 12851 InRangeFlag = 0x40, 12852 12853 Less = LE | LT | NE, 12854 Min = LE | InRangeFlag, 12855 InRange = InRangeFlag, 12856 Max = GE | InRangeFlag, 12857 Greater = GE | GT | NE, 12858 12859 OnlyValue = LE | GE | EQ | InRangeFlag, 12860 InHole = NE 12861 }; 12862 12863 ComparisonResult compare(const llvm::APSInt &Value) const { 12864 assert(Value.getBitWidth() == PromotedMin.getBitWidth() && 12865 Value.isUnsigned() == PromotedMin.isUnsigned()); 12866 if (!isContiguous()) { 12867 assert(Value.isUnsigned() && "discontiguous range for signed compare"); 12868 if (Value.isMinValue()) return Min; 12869 if (Value.isMaxValue()) return Max; 12870 if (Value >= PromotedMin) return InRange; 12871 if (Value <= PromotedMax) return InRange; 12872 return InHole; 12873 } 12874 12875 switch (llvm::APSInt::compareValues(Value, PromotedMin)) { 12876 case -1: return Less; 12877 case 0: return PromotedMin == PromotedMax ? OnlyValue : Min; 12878 case 1: 12879 switch (llvm::APSInt::compareValues(Value, PromotedMax)) { 12880 case -1: return InRange; 12881 case 0: return Max; 12882 case 1: return Greater; 12883 } 12884 } 12885 12886 llvm_unreachable("impossible compare result"); 12887 } 12888 12889 static std::optional<StringRef> 12890 constantValue(BinaryOperatorKind Op, ComparisonResult R, bool ConstantOnRHS) { 12891 if (Op == BO_Cmp) { 12892 ComparisonResult LTFlag = LT, GTFlag = GT; 12893 if (ConstantOnRHS) std::swap(LTFlag, GTFlag); 12894 12895 if (R & EQ) return StringRef("'std::strong_ordering::equal'"); 12896 if (R & LTFlag) return StringRef("'std::strong_ordering::less'"); 12897 if (R & GTFlag) return StringRef("'std::strong_ordering::greater'"); 12898 return std::nullopt; 12899 } 12900 12901 ComparisonResult TrueFlag, FalseFlag; 12902 if (Op == BO_EQ) { 12903 TrueFlag = EQ; 12904 FalseFlag = NE; 12905 } else if (Op == BO_NE) { 12906 TrueFlag = NE; 12907 FalseFlag = EQ; 12908 } else { 12909 if ((Op == BO_LT || Op == BO_GE) ^ ConstantOnRHS) { 12910 TrueFlag = LT; 12911 FalseFlag = GE; 12912 } else { 12913 TrueFlag = GT; 12914 FalseFlag = LE; 12915 } 12916 if (Op == BO_GE || Op == BO_LE) 12917 std::swap(TrueFlag, FalseFlag); 12918 } 12919 if (R & TrueFlag) 12920 return StringRef("true"); 12921 if (R & FalseFlag) 12922 return StringRef("false"); 12923 return std::nullopt; 12924 } 12925 }; 12926 } 12927 12928 static bool HasEnumType(Expr *E) { 12929 // Strip off implicit integral promotions. 12930 while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 12931 if (ICE->getCastKind() != CK_IntegralCast && 12932 ICE->getCastKind() != CK_NoOp) 12933 break; 12934 E = ICE->getSubExpr(); 12935 } 12936 12937 return E->getType()->isEnumeralType(); 12938 } 12939 12940 static int classifyConstantValue(Expr *Constant) { 12941 // The values of this enumeration are used in the diagnostics 12942 // diag::warn_out_of_range_compare and diag::warn_tautological_bool_compare. 12943 enum ConstantValueKind { 12944 Miscellaneous = 0, 12945 LiteralTrue, 12946 LiteralFalse 12947 }; 12948 if (auto *BL = dyn_cast<CXXBoolLiteralExpr>(Constant)) 12949 return BL->getValue() ? ConstantValueKind::LiteralTrue 12950 : ConstantValueKind::LiteralFalse; 12951 return ConstantValueKind::Miscellaneous; 12952 } 12953 12954 static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E, 12955 Expr *Constant, Expr *Other, 12956 const llvm::APSInt &Value, 12957 bool RhsConstant) { 12958 if (S.inTemplateInstantiation()) 12959 return false; 12960 12961 Expr *OriginalOther = Other; 12962 12963 Constant = Constant->IgnoreParenImpCasts(); 12964 Other = Other->IgnoreParenImpCasts(); 12965 12966 // Suppress warnings on tautological comparisons between values of the same 12967 // enumeration type. There are only two ways we could warn on this: 12968 // - If the constant is outside the range of representable values of 12969 // the enumeration. In such a case, we should warn about the cast 12970 // to enumeration type, not about the comparison. 12971 // - If the constant is the maximum / minimum in-range value. For an 12972 // enumeratin type, such comparisons can be meaningful and useful. 12973 if (Constant->getType()->isEnumeralType() && 12974 S.Context.hasSameUnqualifiedType(Constant->getType(), Other->getType())) 12975 return false; 12976 12977 IntRange OtherValueRange = GetExprRange( 12978 S.Context, Other, S.isConstantEvaluated(), /*Approximate*/ false); 12979 12980 QualType OtherT = Other->getType(); 12981 if (const auto *AT = OtherT->getAs<AtomicType>()) 12982 OtherT = AT->getValueType(); 12983 IntRange OtherTypeRange = IntRange::forValueOfType(S.Context, OtherT); 12984 12985 // Special case for ObjC BOOL on targets where its a typedef for a signed char 12986 // (Namely, macOS). FIXME: IntRange::forValueOfType should do this. 12987 bool IsObjCSignedCharBool = S.getLangOpts().ObjC && 12988 S.NSAPIObj->isObjCBOOLType(OtherT) && 12989 OtherT->isSpecificBuiltinType(BuiltinType::SChar); 12990 12991 // Whether we're treating Other as being a bool because of the form of 12992 // expression despite it having another type (typically 'int' in C). 12993 bool OtherIsBooleanDespiteType = 12994 !OtherT->isBooleanType() && Other->isKnownToHaveBooleanValue(); 12995 if (OtherIsBooleanDespiteType || IsObjCSignedCharBool) 12996 OtherTypeRange = OtherValueRange = IntRange::forBoolType(); 12997 12998 // Check if all values in the range of possible values of this expression 12999 // lead to the same comparison outcome. 13000 PromotedRange OtherPromotedValueRange(OtherValueRange, Value.getBitWidth(), 13001 Value.isUnsigned()); 13002 auto Cmp = OtherPromotedValueRange.compare(Value); 13003 auto Result = PromotedRange::constantValue(E->getOpcode(), Cmp, RhsConstant); 13004 if (!Result) 13005 return false; 13006 13007 // Also consider the range determined by the type alone. This allows us to 13008 // classify the warning under the proper diagnostic group. 13009 bool TautologicalTypeCompare = false; 13010 { 13011 PromotedRange OtherPromotedTypeRange(OtherTypeRange, Value.getBitWidth(), 13012 Value.isUnsigned()); 13013 auto TypeCmp = OtherPromotedTypeRange.compare(Value); 13014 if (auto TypeResult = PromotedRange::constantValue(E->getOpcode(), TypeCmp, 13015 RhsConstant)) { 13016 TautologicalTypeCompare = true; 13017 Cmp = TypeCmp; 13018 Result = TypeResult; 13019 } 13020 } 13021 13022 // Don't warn if the non-constant operand actually always evaluates to the 13023 // same value. 13024 if (!TautologicalTypeCompare && OtherValueRange.Width == 0) 13025 return false; 13026 13027 // Suppress the diagnostic for an in-range comparison if the constant comes 13028 // from a macro or enumerator. We don't want to diagnose 13029 // 13030 // some_long_value <= INT_MAX 13031 // 13032 // when sizeof(int) == sizeof(long). 13033 bool InRange = Cmp & PromotedRange::InRangeFlag; 13034 if (InRange && IsEnumConstOrFromMacro(S, Constant)) 13035 return false; 13036 13037 // A comparison of an unsigned bit-field against 0 is really a type problem, 13038 // even though at the type level the bit-field might promote to 'signed int'. 13039 if (Other->refersToBitField() && InRange && Value == 0 && 13040 Other->getType()->isUnsignedIntegerOrEnumerationType()) 13041 TautologicalTypeCompare = true; 13042 13043 // If this is a comparison to an enum constant, include that 13044 // constant in the diagnostic. 13045 const EnumConstantDecl *ED = nullptr; 13046 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Constant)) 13047 ED = dyn_cast<EnumConstantDecl>(DR->getDecl()); 13048 13049 // Should be enough for uint128 (39 decimal digits) 13050 SmallString<64> PrettySourceValue; 13051 llvm::raw_svector_ostream OS(PrettySourceValue); 13052 if (ED) { 13053 OS << '\'' << *ED << "' (" << Value << ")"; 13054 } else if (auto *BL = dyn_cast<ObjCBoolLiteralExpr>( 13055 Constant->IgnoreParenImpCasts())) { 13056 OS << (BL->getValue() ? "YES" : "NO"); 13057 } else { 13058 OS << Value; 13059 } 13060 13061 if (!TautologicalTypeCompare) { 13062 S.Diag(E->getOperatorLoc(), diag::warn_tautological_compare_value_range) 13063 << RhsConstant << OtherValueRange.Width << OtherValueRange.NonNegative 13064 << E->getOpcodeStr() << OS.str() << *Result 13065 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 13066 return true; 13067 } 13068 13069 if (IsObjCSignedCharBool) { 13070 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 13071 S.PDiag(diag::warn_tautological_compare_objc_bool) 13072 << OS.str() << *Result); 13073 return true; 13074 } 13075 13076 // FIXME: We use a somewhat different formatting for the in-range cases and 13077 // cases involving boolean values for historical reasons. We should pick a 13078 // consistent way of presenting these diagnostics. 13079 if (!InRange || Other->isKnownToHaveBooleanValue()) { 13080 13081 S.DiagRuntimeBehavior( 13082 E->getOperatorLoc(), E, 13083 S.PDiag(!InRange ? diag::warn_out_of_range_compare 13084 : diag::warn_tautological_bool_compare) 13085 << OS.str() << classifyConstantValue(Constant) << OtherT 13086 << OtherIsBooleanDespiteType << *Result 13087 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange()); 13088 } else { 13089 bool IsCharTy = OtherT.withoutLocalFastQualifiers() == S.Context.CharTy; 13090 unsigned Diag = 13091 (isKnownToHaveUnsignedValue(OriginalOther) && Value == 0) 13092 ? (HasEnumType(OriginalOther) 13093 ? diag::warn_unsigned_enum_always_true_comparison 13094 : IsCharTy ? diag::warn_unsigned_char_always_true_comparison 13095 : diag::warn_unsigned_always_true_comparison) 13096 : diag::warn_tautological_constant_compare; 13097 13098 S.Diag(E->getOperatorLoc(), Diag) 13099 << RhsConstant << OtherT << E->getOpcodeStr() << OS.str() << *Result 13100 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 13101 } 13102 13103 return true; 13104 } 13105 13106 /// Analyze the operands of the given comparison. Implements the 13107 /// fallback case from AnalyzeComparison. 13108 static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) { 13109 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 13110 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 13111 } 13112 13113 /// Implements -Wsign-compare. 13114 /// 13115 /// \param E the binary operator to check for warnings 13116 static void AnalyzeComparison(Sema &S, BinaryOperator *E) { 13117 // The type the comparison is being performed in. 13118 QualType T = E->getLHS()->getType(); 13119 13120 // Only analyze comparison operators where both sides have been converted to 13121 // the same type. 13122 if (!S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType())) 13123 return AnalyzeImpConvsInComparison(S, E); 13124 13125 // Don't analyze value-dependent comparisons directly. 13126 if (E->isValueDependent()) 13127 return AnalyzeImpConvsInComparison(S, E); 13128 13129 Expr *LHS = E->getLHS(); 13130 Expr *RHS = E->getRHS(); 13131 13132 if (T->isIntegralType(S.Context)) { 13133 std::optional<llvm::APSInt> RHSValue = 13134 RHS->getIntegerConstantExpr(S.Context); 13135 std::optional<llvm::APSInt> LHSValue = 13136 LHS->getIntegerConstantExpr(S.Context); 13137 13138 // We don't care about expressions whose result is a constant. 13139 if (RHSValue && LHSValue) 13140 return AnalyzeImpConvsInComparison(S, E); 13141 13142 // We only care about expressions where just one side is literal 13143 if ((bool)RHSValue ^ (bool)LHSValue) { 13144 // Is the constant on the RHS or LHS? 13145 const bool RhsConstant = (bool)RHSValue; 13146 Expr *Const = RhsConstant ? RHS : LHS; 13147 Expr *Other = RhsConstant ? LHS : RHS; 13148 const llvm::APSInt &Value = RhsConstant ? *RHSValue : *LHSValue; 13149 13150 // Check whether an integer constant comparison results in a value 13151 // of 'true' or 'false'. 13152 if (CheckTautologicalComparison(S, E, Const, Other, Value, RhsConstant)) 13153 return AnalyzeImpConvsInComparison(S, E); 13154 } 13155 } 13156 13157 if (!T->hasUnsignedIntegerRepresentation()) { 13158 // We don't do anything special if this isn't an unsigned integral 13159 // comparison: we're only interested in integral comparisons, and 13160 // signed comparisons only happen in cases we don't care to warn about. 13161 return AnalyzeImpConvsInComparison(S, E); 13162 } 13163 13164 LHS = LHS->IgnoreParenImpCasts(); 13165 RHS = RHS->IgnoreParenImpCasts(); 13166 13167 if (!S.getLangOpts().CPlusPlus) { 13168 // Avoid warning about comparison of integers with different signs when 13169 // RHS/LHS has a `typeof(E)` type whose sign is different from the sign of 13170 // the type of `E`. 13171 if (const auto *TET = dyn_cast<TypeOfExprType>(LHS->getType())) 13172 LHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 13173 if (const auto *TET = dyn_cast<TypeOfExprType>(RHS->getType())) 13174 RHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 13175 } 13176 13177 // Check to see if one of the (unmodified) operands is of different 13178 // signedness. 13179 Expr *signedOperand, *unsignedOperand; 13180 if (LHS->getType()->hasSignedIntegerRepresentation()) { 13181 assert(!RHS->getType()->hasSignedIntegerRepresentation() && 13182 "unsigned comparison between two signed integer expressions?"); 13183 signedOperand = LHS; 13184 unsignedOperand = RHS; 13185 } else if (RHS->getType()->hasSignedIntegerRepresentation()) { 13186 signedOperand = RHS; 13187 unsignedOperand = LHS; 13188 } else { 13189 return AnalyzeImpConvsInComparison(S, E); 13190 } 13191 13192 // Otherwise, calculate the effective range of the signed operand. 13193 IntRange signedRange = GetExprRange( 13194 S.Context, signedOperand, S.isConstantEvaluated(), /*Approximate*/ true); 13195 13196 // Go ahead and analyze implicit conversions in the operands. Note 13197 // that we skip the implicit conversions on both sides. 13198 AnalyzeImplicitConversions(S, LHS, E->getOperatorLoc()); 13199 AnalyzeImplicitConversions(S, RHS, E->getOperatorLoc()); 13200 13201 // If the signed range is non-negative, -Wsign-compare won't fire. 13202 if (signedRange.NonNegative) 13203 return; 13204 13205 // For (in)equality comparisons, if the unsigned operand is a 13206 // constant which cannot collide with a overflowed signed operand, 13207 // then reinterpreting the signed operand as unsigned will not 13208 // change the result of the comparison. 13209 if (E->isEqualityOp()) { 13210 unsigned comparisonWidth = S.Context.getIntWidth(T); 13211 IntRange unsignedRange = 13212 GetExprRange(S.Context, unsignedOperand, S.isConstantEvaluated(), 13213 /*Approximate*/ true); 13214 13215 // We should never be unable to prove that the unsigned operand is 13216 // non-negative. 13217 assert(unsignedRange.NonNegative && "unsigned range includes negative?"); 13218 13219 if (unsignedRange.Width < comparisonWidth) 13220 return; 13221 } 13222 13223 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 13224 S.PDiag(diag::warn_mixed_sign_comparison) 13225 << LHS->getType() << RHS->getType() 13226 << LHS->getSourceRange() << RHS->getSourceRange()); 13227 } 13228 13229 /// Analyzes an attempt to assign the given value to a bitfield. 13230 /// 13231 /// Returns true if there was something fishy about the attempt. 13232 static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init, 13233 SourceLocation InitLoc) { 13234 assert(Bitfield->isBitField()); 13235 if (Bitfield->isInvalidDecl()) 13236 return false; 13237 13238 // White-list bool bitfields. 13239 QualType BitfieldType = Bitfield->getType(); 13240 if (BitfieldType->isBooleanType()) 13241 return false; 13242 13243 if (BitfieldType->isEnumeralType()) { 13244 EnumDecl *BitfieldEnumDecl = BitfieldType->castAs<EnumType>()->getDecl(); 13245 // If the underlying enum type was not explicitly specified as an unsigned 13246 // type and the enum contain only positive values, MSVC++ will cause an 13247 // inconsistency by storing this as a signed type. 13248 if (S.getLangOpts().CPlusPlus11 && 13249 !BitfieldEnumDecl->getIntegerTypeSourceInfo() && 13250 BitfieldEnumDecl->getNumPositiveBits() > 0 && 13251 BitfieldEnumDecl->getNumNegativeBits() == 0) { 13252 S.Diag(InitLoc, diag::warn_no_underlying_type_specified_for_enum_bitfield) 13253 << BitfieldEnumDecl; 13254 } 13255 } 13256 13257 // Ignore value- or type-dependent expressions. 13258 if (Bitfield->getBitWidth()->isValueDependent() || 13259 Bitfield->getBitWidth()->isTypeDependent() || 13260 Init->isValueDependent() || 13261 Init->isTypeDependent()) 13262 return false; 13263 13264 Expr *OriginalInit = Init->IgnoreParenImpCasts(); 13265 unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context); 13266 13267 Expr::EvalResult Result; 13268 if (!OriginalInit->EvaluateAsInt(Result, S.Context, 13269 Expr::SE_AllowSideEffects)) { 13270 // The RHS is not constant. If the RHS has an enum type, make sure the 13271 // bitfield is wide enough to hold all the values of the enum without 13272 // truncation. 13273 if (const auto *EnumTy = OriginalInit->getType()->getAs<EnumType>()) { 13274 EnumDecl *ED = EnumTy->getDecl(); 13275 bool SignedBitfield = BitfieldType->isSignedIntegerType(); 13276 13277 // Enum types are implicitly signed on Windows, so check if there are any 13278 // negative enumerators to see if the enum was intended to be signed or 13279 // not. 13280 bool SignedEnum = ED->getNumNegativeBits() > 0; 13281 13282 // Check for surprising sign changes when assigning enum values to a 13283 // bitfield of different signedness. If the bitfield is signed and we 13284 // have exactly the right number of bits to store this unsigned enum, 13285 // suggest changing the enum to an unsigned type. This typically happens 13286 // on Windows where unfixed enums always use an underlying type of 'int'. 13287 unsigned DiagID = 0; 13288 if (SignedEnum && !SignedBitfield) { 13289 DiagID = diag::warn_unsigned_bitfield_assigned_signed_enum; 13290 } else if (SignedBitfield && !SignedEnum && 13291 ED->getNumPositiveBits() == FieldWidth) { 13292 DiagID = diag::warn_signed_bitfield_enum_conversion; 13293 } 13294 13295 if (DiagID) { 13296 S.Diag(InitLoc, DiagID) << Bitfield << ED; 13297 TypeSourceInfo *TSI = Bitfield->getTypeSourceInfo(); 13298 SourceRange TypeRange = 13299 TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange(); 13300 S.Diag(Bitfield->getTypeSpecStartLoc(), diag::note_change_bitfield_sign) 13301 << SignedEnum << TypeRange; 13302 } 13303 13304 // Compute the required bitwidth. If the enum has negative values, we need 13305 // one more bit than the normal number of positive bits to represent the 13306 // sign bit. 13307 unsigned BitsNeeded = SignedEnum ? std::max(ED->getNumPositiveBits() + 1, 13308 ED->getNumNegativeBits()) 13309 : ED->getNumPositiveBits(); 13310 13311 // Check the bitwidth. 13312 if (BitsNeeded > FieldWidth) { 13313 Expr *WidthExpr = Bitfield->getBitWidth(); 13314 S.Diag(InitLoc, diag::warn_bitfield_too_small_for_enum) 13315 << Bitfield << ED; 13316 S.Diag(WidthExpr->getExprLoc(), diag::note_widen_bitfield) 13317 << BitsNeeded << ED << WidthExpr->getSourceRange(); 13318 } 13319 } 13320 13321 return false; 13322 } 13323 13324 llvm::APSInt Value = Result.Val.getInt(); 13325 13326 unsigned OriginalWidth = Value.getBitWidth(); 13327 13328 // In C, the macro 'true' from stdbool.h will evaluate to '1'; To reduce 13329 // false positives where the user is demonstrating they intend to use the 13330 // bit-field as a Boolean, check to see if the value is 1 and we're assigning 13331 // to a one-bit bit-field to see if the value came from a macro named 'true'. 13332 bool OneAssignedToOneBitBitfield = FieldWidth == 1 && Value == 1; 13333 if (OneAssignedToOneBitBitfield && !S.LangOpts.CPlusPlus) { 13334 SourceLocation MaybeMacroLoc = OriginalInit->getBeginLoc(); 13335 if (S.SourceMgr.isInSystemMacro(MaybeMacroLoc) && 13336 S.findMacroSpelling(MaybeMacroLoc, "true")) 13337 return false; 13338 } 13339 13340 if (!Value.isSigned() || Value.isNegative()) 13341 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(OriginalInit)) 13342 if (UO->getOpcode() == UO_Minus || UO->getOpcode() == UO_Not) 13343 OriginalWidth = Value.getMinSignedBits(); 13344 13345 if (OriginalWidth <= FieldWidth) 13346 return false; 13347 13348 // Compute the value which the bitfield will contain. 13349 llvm::APSInt TruncatedValue = Value.trunc(FieldWidth); 13350 TruncatedValue.setIsSigned(BitfieldType->isSignedIntegerType()); 13351 13352 // Check whether the stored value is equal to the original value. 13353 TruncatedValue = TruncatedValue.extend(OriginalWidth); 13354 if (llvm::APSInt::isSameValue(Value, TruncatedValue)) 13355 return false; 13356 13357 std::string PrettyValue = toString(Value, 10); 13358 std::string PrettyTrunc = toString(TruncatedValue, 10); 13359 13360 S.Diag(InitLoc, OneAssignedToOneBitBitfield 13361 ? diag::warn_impcast_single_bit_bitield_precision_constant 13362 : diag::warn_impcast_bitfield_precision_constant) 13363 << PrettyValue << PrettyTrunc << OriginalInit->getType() 13364 << Init->getSourceRange(); 13365 13366 return true; 13367 } 13368 13369 /// Analyze the given simple or compound assignment for warning-worthy 13370 /// operations. 13371 static void AnalyzeAssignment(Sema &S, BinaryOperator *E) { 13372 // Just recurse on the LHS. 13373 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 13374 13375 // We want to recurse on the RHS as normal unless we're assigning to 13376 // a bitfield. 13377 if (FieldDecl *Bitfield = E->getLHS()->getSourceBitField()) { 13378 if (AnalyzeBitFieldAssignment(S, Bitfield, E->getRHS(), 13379 E->getOperatorLoc())) { 13380 // Recurse, ignoring any implicit conversions on the RHS. 13381 return AnalyzeImplicitConversions(S, E->getRHS()->IgnoreParenImpCasts(), 13382 E->getOperatorLoc()); 13383 } 13384 } 13385 13386 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 13387 13388 // Diagnose implicitly sequentially-consistent atomic assignment. 13389 if (E->getLHS()->getType()->isAtomicType()) 13390 S.Diag(E->getRHS()->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 13391 } 13392 13393 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 13394 static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T, 13395 SourceLocation CContext, unsigned diag, 13396 bool pruneControlFlow = false) { 13397 if (pruneControlFlow) { 13398 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13399 S.PDiag(diag) 13400 << SourceType << T << E->getSourceRange() 13401 << SourceRange(CContext)); 13402 return; 13403 } 13404 S.Diag(E->getExprLoc(), diag) 13405 << SourceType << T << E->getSourceRange() << SourceRange(CContext); 13406 } 13407 13408 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 13409 static void DiagnoseImpCast(Sema &S, Expr *E, QualType T, 13410 SourceLocation CContext, 13411 unsigned diag, bool pruneControlFlow = false) { 13412 DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow); 13413 } 13414 13415 static bool isObjCSignedCharBool(Sema &S, QualType Ty) { 13416 return Ty->isSpecificBuiltinType(BuiltinType::SChar) && 13417 S.getLangOpts().ObjC && S.NSAPIObj->isObjCBOOLType(Ty); 13418 } 13419 13420 static void adornObjCBoolConversionDiagWithTernaryFixit( 13421 Sema &S, Expr *SourceExpr, const Sema::SemaDiagnosticBuilder &Builder) { 13422 Expr *Ignored = SourceExpr->IgnoreImplicit(); 13423 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(Ignored)) 13424 Ignored = OVE->getSourceExpr(); 13425 bool NeedsParens = isa<AbstractConditionalOperator>(Ignored) || 13426 isa<BinaryOperator>(Ignored) || 13427 isa<CXXOperatorCallExpr>(Ignored); 13428 SourceLocation EndLoc = S.getLocForEndOfToken(SourceExpr->getEndLoc()); 13429 if (NeedsParens) 13430 Builder << FixItHint::CreateInsertion(SourceExpr->getBeginLoc(), "(") 13431 << FixItHint::CreateInsertion(EndLoc, ")"); 13432 Builder << FixItHint::CreateInsertion(EndLoc, " ? YES : NO"); 13433 } 13434 13435 /// Diagnose an implicit cast from a floating point value to an integer value. 13436 static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T, 13437 SourceLocation CContext) { 13438 const bool IsBool = T->isSpecificBuiltinType(BuiltinType::Bool); 13439 const bool PruneWarnings = S.inTemplateInstantiation(); 13440 13441 Expr *InnerE = E->IgnoreParenImpCasts(); 13442 // We also want to warn on, e.g., "int i = -1.234" 13443 if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(InnerE)) 13444 if (UOp->getOpcode() == UO_Minus || UOp->getOpcode() == UO_Plus) 13445 InnerE = UOp->getSubExpr()->IgnoreParenImpCasts(); 13446 13447 const bool IsLiteral = 13448 isa<FloatingLiteral>(E) || isa<FloatingLiteral>(InnerE); 13449 13450 llvm::APFloat Value(0.0); 13451 bool IsConstant = 13452 E->EvaluateAsFloat(Value, S.Context, Expr::SE_AllowSideEffects); 13453 if (!IsConstant) { 13454 if (isObjCSignedCharBool(S, T)) { 13455 return adornObjCBoolConversionDiagWithTernaryFixit( 13456 S, E, 13457 S.Diag(CContext, diag::warn_impcast_float_to_objc_signed_char_bool) 13458 << E->getType()); 13459 } 13460 13461 return DiagnoseImpCast(S, E, T, CContext, 13462 diag::warn_impcast_float_integer, PruneWarnings); 13463 } 13464 13465 bool isExact = false; 13466 13467 llvm::APSInt IntegerValue(S.Context.getIntWidth(T), 13468 T->hasUnsignedIntegerRepresentation()); 13469 llvm::APFloat::opStatus Result = Value.convertToInteger( 13470 IntegerValue, llvm::APFloat::rmTowardZero, &isExact); 13471 13472 // FIXME: Force the precision of the source value down so we don't print 13473 // digits which are usually useless (we don't really care here if we 13474 // truncate a digit by accident in edge cases). Ideally, APFloat::toString 13475 // would automatically print the shortest representation, but it's a bit 13476 // tricky to implement. 13477 SmallString<16> PrettySourceValue; 13478 unsigned precision = llvm::APFloat::semanticsPrecision(Value.getSemantics()); 13479 precision = (precision * 59 + 195) / 196; 13480 Value.toString(PrettySourceValue, precision); 13481 13482 if (isObjCSignedCharBool(S, T) && IntegerValue != 0 && IntegerValue != 1) { 13483 return adornObjCBoolConversionDiagWithTernaryFixit( 13484 S, E, 13485 S.Diag(CContext, diag::warn_impcast_constant_value_to_objc_bool) 13486 << PrettySourceValue); 13487 } 13488 13489 if (Result == llvm::APFloat::opOK && isExact) { 13490 if (IsLiteral) return; 13491 return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer, 13492 PruneWarnings); 13493 } 13494 13495 // Conversion of a floating-point value to a non-bool integer where the 13496 // integral part cannot be represented by the integer type is undefined. 13497 if (!IsBool && Result == llvm::APFloat::opInvalidOp) 13498 return DiagnoseImpCast( 13499 S, E, T, CContext, 13500 IsLiteral ? diag::warn_impcast_literal_float_to_integer_out_of_range 13501 : diag::warn_impcast_float_to_integer_out_of_range, 13502 PruneWarnings); 13503 13504 unsigned DiagID = 0; 13505 if (IsLiteral) { 13506 // Warn on floating point literal to integer. 13507 DiagID = diag::warn_impcast_literal_float_to_integer; 13508 } else if (IntegerValue == 0) { 13509 if (Value.isZero()) { // Skip -0.0 to 0 conversion. 13510 return DiagnoseImpCast(S, E, T, CContext, 13511 diag::warn_impcast_float_integer, PruneWarnings); 13512 } 13513 // Warn on non-zero to zero conversion. 13514 DiagID = diag::warn_impcast_float_to_integer_zero; 13515 } else { 13516 if (IntegerValue.isUnsigned()) { 13517 if (!IntegerValue.isMaxValue()) { 13518 return DiagnoseImpCast(S, E, T, CContext, 13519 diag::warn_impcast_float_integer, PruneWarnings); 13520 } 13521 } else { // IntegerValue.isSigned() 13522 if (!IntegerValue.isMaxSignedValue() && 13523 !IntegerValue.isMinSignedValue()) { 13524 return DiagnoseImpCast(S, E, T, CContext, 13525 diag::warn_impcast_float_integer, PruneWarnings); 13526 } 13527 } 13528 // Warn on evaluatable floating point expression to integer conversion. 13529 DiagID = diag::warn_impcast_float_to_integer; 13530 } 13531 13532 SmallString<16> PrettyTargetValue; 13533 if (IsBool) 13534 PrettyTargetValue = Value.isZero() ? "false" : "true"; 13535 else 13536 IntegerValue.toString(PrettyTargetValue); 13537 13538 if (PruneWarnings) { 13539 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13540 S.PDiag(DiagID) 13541 << E->getType() << T.getUnqualifiedType() 13542 << PrettySourceValue << PrettyTargetValue 13543 << E->getSourceRange() << SourceRange(CContext)); 13544 } else { 13545 S.Diag(E->getExprLoc(), DiagID) 13546 << E->getType() << T.getUnqualifiedType() << PrettySourceValue 13547 << PrettyTargetValue << E->getSourceRange() << SourceRange(CContext); 13548 } 13549 } 13550 13551 /// Analyze the given compound assignment for the possible losing of 13552 /// floating-point precision. 13553 static void AnalyzeCompoundAssignment(Sema &S, BinaryOperator *E) { 13554 assert(isa<CompoundAssignOperator>(E) && 13555 "Must be compound assignment operation"); 13556 // Recurse on the LHS and RHS in here 13557 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 13558 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 13559 13560 if (E->getLHS()->getType()->isAtomicType()) 13561 S.Diag(E->getOperatorLoc(), diag::warn_atomic_implicit_seq_cst); 13562 13563 // Now check the outermost expression 13564 const auto *ResultBT = E->getLHS()->getType()->getAs<BuiltinType>(); 13565 const auto *RBT = cast<CompoundAssignOperator>(E) 13566 ->getComputationResultType() 13567 ->getAs<BuiltinType>(); 13568 13569 // The below checks assume source is floating point. 13570 if (!ResultBT || !RBT || !RBT->isFloatingPoint()) return; 13571 13572 // If source is floating point but target is an integer. 13573 if (ResultBT->isInteger()) 13574 return DiagnoseImpCast(S, E, E->getRHS()->getType(), E->getLHS()->getType(), 13575 E->getExprLoc(), diag::warn_impcast_float_integer); 13576 13577 if (!ResultBT->isFloatingPoint()) 13578 return; 13579 13580 // If both source and target are floating points, warn about losing precision. 13581 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 13582 QualType(ResultBT, 0), QualType(RBT, 0)); 13583 if (Order < 0 && !S.SourceMgr.isInSystemMacro(E->getOperatorLoc())) 13584 // warn about dropping FP rank. 13585 DiagnoseImpCast(S, E->getRHS(), E->getLHS()->getType(), E->getOperatorLoc(), 13586 diag::warn_impcast_float_result_precision); 13587 } 13588 13589 static std::string PrettyPrintInRange(const llvm::APSInt &Value, 13590 IntRange Range) { 13591 if (!Range.Width) return "0"; 13592 13593 llvm::APSInt ValueInRange = Value; 13594 ValueInRange.setIsSigned(!Range.NonNegative); 13595 ValueInRange = ValueInRange.trunc(Range.Width); 13596 return toString(ValueInRange, 10); 13597 } 13598 13599 static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) { 13600 if (!isa<ImplicitCastExpr>(Ex)) 13601 return false; 13602 13603 Expr *InnerE = Ex->IgnoreParenImpCasts(); 13604 const Type *Target = S.Context.getCanonicalType(Ex->getType()).getTypePtr(); 13605 const Type *Source = 13606 S.Context.getCanonicalType(InnerE->getType()).getTypePtr(); 13607 if (Target->isDependentType()) 13608 return false; 13609 13610 const BuiltinType *FloatCandidateBT = 13611 dyn_cast<BuiltinType>(ToBool ? Source : Target); 13612 const Type *BoolCandidateType = ToBool ? Target : Source; 13613 13614 return (BoolCandidateType->isSpecificBuiltinType(BuiltinType::Bool) && 13615 FloatCandidateBT && (FloatCandidateBT->isFloatingPoint())); 13616 } 13617 13618 static void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall, 13619 SourceLocation CC) { 13620 unsigned NumArgs = TheCall->getNumArgs(); 13621 for (unsigned i = 0; i < NumArgs; ++i) { 13622 Expr *CurrA = TheCall->getArg(i); 13623 if (!IsImplicitBoolFloatConversion(S, CurrA, true)) 13624 continue; 13625 13626 bool IsSwapped = ((i > 0) && 13627 IsImplicitBoolFloatConversion(S, TheCall->getArg(i - 1), false)); 13628 IsSwapped |= ((i < (NumArgs - 1)) && 13629 IsImplicitBoolFloatConversion(S, TheCall->getArg(i + 1), false)); 13630 if (IsSwapped) { 13631 // Warn on this floating-point to bool conversion. 13632 DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(), 13633 CurrA->getType(), CC, 13634 diag::warn_impcast_floating_point_to_bool); 13635 } 13636 } 13637 } 13638 13639 static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T, 13640 SourceLocation CC) { 13641 if (S.Diags.isIgnored(diag::warn_impcast_null_pointer_to_integer, 13642 E->getExprLoc())) 13643 return; 13644 13645 // Don't warn on functions which have return type nullptr_t. 13646 if (isa<CallExpr>(E)) 13647 return; 13648 13649 // Check for NULL (GNUNull) or nullptr (CXX11_nullptr). 13650 const Expr *NewE = E->IgnoreParenImpCasts(); 13651 bool IsGNUNullExpr = isa<GNUNullExpr>(NewE); 13652 bool HasNullPtrType = NewE->getType()->isNullPtrType(); 13653 if (!IsGNUNullExpr && !HasNullPtrType) 13654 return; 13655 13656 // Return if target type is a safe conversion. 13657 if (T->isAnyPointerType() || T->isBlockPointerType() || 13658 T->isMemberPointerType() || !T->isScalarType() || T->isNullPtrType()) 13659 return; 13660 13661 SourceLocation Loc = E->getSourceRange().getBegin(); 13662 13663 // Venture through the macro stacks to get to the source of macro arguments. 13664 // The new location is a better location than the complete location that was 13665 // passed in. 13666 Loc = S.SourceMgr.getTopMacroCallerLoc(Loc); 13667 CC = S.SourceMgr.getTopMacroCallerLoc(CC); 13668 13669 // __null is usually wrapped in a macro. Go up a macro if that is the case. 13670 if (IsGNUNullExpr && Loc.isMacroID()) { 13671 StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics( 13672 Loc, S.SourceMgr, S.getLangOpts()); 13673 if (MacroName == "NULL") 13674 Loc = S.SourceMgr.getImmediateExpansionRange(Loc).getBegin(); 13675 } 13676 13677 // Only warn if the null and context location are in the same macro expansion. 13678 if (S.SourceMgr.getFileID(Loc) != S.SourceMgr.getFileID(CC)) 13679 return; 13680 13681 S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer) 13682 << HasNullPtrType << T << SourceRange(CC) 13683 << FixItHint::CreateReplacement(Loc, 13684 S.getFixItZeroLiteralForType(T, Loc)); 13685 } 13686 13687 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 13688 ObjCArrayLiteral *ArrayLiteral); 13689 13690 static void 13691 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 13692 ObjCDictionaryLiteral *DictionaryLiteral); 13693 13694 /// Check a single element within a collection literal against the 13695 /// target element type. 13696 static void checkObjCCollectionLiteralElement(Sema &S, 13697 QualType TargetElementType, 13698 Expr *Element, 13699 unsigned ElementKind) { 13700 // Skip a bitcast to 'id' or qualified 'id'. 13701 if (auto ICE = dyn_cast<ImplicitCastExpr>(Element)) { 13702 if (ICE->getCastKind() == CK_BitCast && 13703 ICE->getSubExpr()->getType()->getAs<ObjCObjectPointerType>()) 13704 Element = ICE->getSubExpr(); 13705 } 13706 13707 QualType ElementType = Element->getType(); 13708 ExprResult ElementResult(Element); 13709 if (ElementType->getAs<ObjCObjectPointerType>() && 13710 S.CheckSingleAssignmentConstraints(TargetElementType, 13711 ElementResult, 13712 false, false) 13713 != Sema::Compatible) { 13714 S.Diag(Element->getBeginLoc(), diag::warn_objc_collection_literal_element) 13715 << ElementType << ElementKind << TargetElementType 13716 << Element->getSourceRange(); 13717 } 13718 13719 if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Element)) 13720 checkObjCArrayLiteral(S, TargetElementType, ArrayLiteral); 13721 else if (auto DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Element)) 13722 checkObjCDictionaryLiteral(S, TargetElementType, DictionaryLiteral); 13723 } 13724 13725 /// Check an Objective-C array literal being converted to the given 13726 /// target type. 13727 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 13728 ObjCArrayLiteral *ArrayLiteral) { 13729 if (!S.NSArrayDecl) 13730 return; 13731 13732 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 13733 if (!TargetObjCPtr) 13734 return; 13735 13736 if (TargetObjCPtr->isUnspecialized() || 13737 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 13738 != S.NSArrayDecl->getCanonicalDecl()) 13739 return; 13740 13741 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 13742 if (TypeArgs.size() != 1) 13743 return; 13744 13745 QualType TargetElementType = TypeArgs[0]; 13746 for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) { 13747 checkObjCCollectionLiteralElement(S, TargetElementType, 13748 ArrayLiteral->getElement(I), 13749 0); 13750 } 13751 } 13752 13753 /// Check an Objective-C dictionary literal being converted to the given 13754 /// target type. 13755 static void 13756 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 13757 ObjCDictionaryLiteral *DictionaryLiteral) { 13758 if (!S.NSDictionaryDecl) 13759 return; 13760 13761 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 13762 if (!TargetObjCPtr) 13763 return; 13764 13765 if (TargetObjCPtr->isUnspecialized() || 13766 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 13767 != S.NSDictionaryDecl->getCanonicalDecl()) 13768 return; 13769 13770 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 13771 if (TypeArgs.size() != 2) 13772 return; 13773 13774 QualType TargetKeyType = TypeArgs[0]; 13775 QualType TargetObjectType = TypeArgs[1]; 13776 for (unsigned I = 0, N = DictionaryLiteral->getNumElements(); I != N; ++I) { 13777 auto Element = DictionaryLiteral->getKeyValueElement(I); 13778 checkObjCCollectionLiteralElement(S, TargetKeyType, Element.Key, 1); 13779 checkObjCCollectionLiteralElement(S, TargetObjectType, Element.Value, 2); 13780 } 13781 } 13782 13783 // Helper function to filter out cases for constant width constant conversion. 13784 // Don't warn on char array initialization or for non-decimal values. 13785 static bool isSameWidthConstantConversion(Sema &S, Expr *E, QualType T, 13786 SourceLocation CC) { 13787 // If initializing from a constant, and the constant starts with '0', 13788 // then it is a binary, octal, or hexadecimal. Allow these constants 13789 // to fill all the bits, even if there is a sign change. 13790 if (auto *IntLit = dyn_cast<IntegerLiteral>(E->IgnoreParenImpCasts())) { 13791 const char FirstLiteralCharacter = 13792 S.getSourceManager().getCharacterData(IntLit->getBeginLoc())[0]; 13793 if (FirstLiteralCharacter == '0') 13794 return false; 13795 } 13796 13797 // If the CC location points to a '{', and the type is char, then assume 13798 // assume it is an array initialization. 13799 if (CC.isValid() && T->isCharType()) { 13800 const char FirstContextCharacter = 13801 S.getSourceManager().getCharacterData(CC)[0]; 13802 if (FirstContextCharacter == '{') 13803 return false; 13804 } 13805 13806 return true; 13807 } 13808 13809 static const IntegerLiteral *getIntegerLiteral(Expr *E) { 13810 const auto *IL = dyn_cast<IntegerLiteral>(E); 13811 if (!IL) { 13812 if (auto *UO = dyn_cast<UnaryOperator>(E)) { 13813 if (UO->getOpcode() == UO_Minus) 13814 return dyn_cast<IntegerLiteral>(UO->getSubExpr()); 13815 } 13816 } 13817 13818 return IL; 13819 } 13820 13821 static void DiagnoseIntInBoolContext(Sema &S, Expr *E) { 13822 E = E->IgnoreParenImpCasts(); 13823 SourceLocation ExprLoc = E->getExprLoc(); 13824 13825 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 13826 BinaryOperator::Opcode Opc = BO->getOpcode(); 13827 Expr::EvalResult Result; 13828 // Do not diagnose unsigned shifts. 13829 if (Opc == BO_Shl) { 13830 const auto *LHS = getIntegerLiteral(BO->getLHS()); 13831 const auto *RHS = getIntegerLiteral(BO->getRHS()); 13832 if (LHS && LHS->getValue() == 0) 13833 S.Diag(ExprLoc, diag::warn_left_shift_always) << 0; 13834 else if (!E->isValueDependent() && LHS && RHS && 13835 RHS->getValue().isNonNegative() && 13836 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) 13837 S.Diag(ExprLoc, diag::warn_left_shift_always) 13838 << (Result.Val.getInt() != 0); 13839 else if (E->getType()->isSignedIntegerType()) 13840 S.Diag(ExprLoc, diag::warn_left_shift_in_bool_context) << E; 13841 } 13842 } 13843 13844 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 13845 const auto *LHS = getIntegerLiteral(CO->getTrueExpr()); 13846 const auto *RHS = getIntegerLiteral(CO->getFalseExpr()); 13847 if (!LHS || !RHS) 13848 return; 13849 if ((LHS->getValue() == 0 || LHS->getValue() == 1) && 13850 (RHS->getValue() == 0 || RHS->getValue() == 1)) 13851 // Do not diagnose common idioms. 13852 return; 13853 if (LHS->getValue() != 0 && RHS->getValue() != 0) 13854 S.Diag(ExprLoc, diag::warn_integer_constants_in_conditional_always_true); 13855 } 13856 } 13857 13858 static void CheckImplicitConversion(Sema &S, Expr *E, QualType T, 13859 SourceLocation CC, 13860 bool *ICContext = nullptr, 13861 bool IsListInit = false) { 13862 if (E->isTypeDependent() || E->isValueDependent()) return; 13863 13864 const Type *Source = S.Context.getCanonicalType(E->getType()).getTypePtr(); 13865 const Type *Target = S.Context.getCanonicalType(T).getTypePtr(); 13866 if (Source == Target) return; 13867 if (Target->isDependentType()) return; 13868 13869 // If the conversion context location is invalid don't complain. We also 13870 // don't want to emit a warning if the issue occurs from the expansion of 13871 // a system macro. The problem is that 'getSpellingLoc()' is slow, so we 13872 // delay this check as long as possible. Once we detect we are in that 13873 // scenario, we just return. 13874 if (CC.isInvalid()) 13875 return; 13876 13877 if (Source->isAtomicType()) 13878 S.Diag(E->getExprLoc(), diag::warn_atomic_implicit_seq_cst); 13879 13880 // Diagnose implicit casts to bool. 13881 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) { 13882 if (isa<StringLiteral>(E)) 13883 // Warn on string literal to bool. Checks for string literals in logical 13884 // and expressions, for instance, assert(0 && "error here"), are 13885 // prevented by a check in AnalyzeImplicitConversions(). 13886 return DiagnoseImpCast(S, E, T, CC, 13887 diag::warn_impcast_string_literal_to_bool); 13888 if (isa<ObjCStringLiteral>(E) || isa<ObjCArrayLiteral>(E) || 13889 isa<ObjCDictionaryLiteral>(E) || isa<ObjCBoxedExpr>(E)) { 13890 // This covers the literal expressions that evaluate to Objective-C 13891 // objects. 13892 return DiagnoseImpCast(S, E, T, CC, 13893 diag::warn_impcast_objective_c_literal_to_bool); 13894 } 13895 if (Source->isPointerType() || Source->canDecayToPointerType()) { 13896 // Warn on pointer to bool conversion that is always true. 13897 S.DiagnoseAlwaysNonNullPointer(E, Expr::NPCK_NotNull, /*IsEqual*/ false, 13898 SourceRange(CC)); 13899 } 13900 } 13901 13902 // If the we're converting a constant to an ObjC BOOL on a platform where BOOL 13903 // is a typedef for signed char (macOS), then that constant value has to be 1 13904 // or 0. 13905 if (isObjCSignedCharBool(S, T) && Source->isIntegralType(S.Context)) { 13906 Expr::EvalResult Result; 13907 if (E->EvaluateAsInt(Result, S.getASTContext(), 13908 Expr::SE_AllowSideEffects)) { 13909 if (Result.Val.getInt() != 1 && Result.Val.getInt() != 0) { 13910 adornObjCBoolConversionDiagWithTernaryFixit( 13911 S, E, 13912 S.Diag(CC, diag::warn_impcast_constant_value_to_objc_bool) 13913 << toString(Result.Val.getInt(), 10)); 13914 } 13915 return; 13916 } 13917 } 13918 13919 // Check implicit casts from Objective-C collection literals to specialized 13920 // collection types, e.g., NSArray<NSString *> *. 13921 if (auto *ArrayLiteral = dyn_cast<ObjCArrayLiteral>(E)) 13922 checkObjCArrayLiteral(S, QualType(Target, 0), ArrayLiteral); 13923 else if (auto *DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(E)) 13924 checkObjCDictionaryLiteral(S, QualType(Target, 0), DictionaryLiteral); 13925 13926 // Strip vector types. 13927 if (isa<VectorType>(Source)) { 13928 if (Target->isVLSTBuiltinType() && 13929 (S.Context.areCompatibleSveTypes(QualType(Target, 0), 13930 QualType(Source, 0)) || 13931 S.Context.areLaxCompatibleSveTypes(QualType(Target, 0), 13932 QualType(Source, 0)))) 13933 return; 13934 13935 if (!isa<VectorType>(Target)) { 13936 if (S.SourceMgr.isInSystemMacro(CC)) 13937 return; 13938 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar); 13939 } 13940 13941 // If the vector cast is cast between two vectors of the same size, it is 13942 // a bitcast, not a conversion. 13943 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target)) 13944 return; 13945 13946 Source = cast<VectorType>(Source)->getElementType().getTypePtr(); 13947 Target = cast<VectorType>(Target)->getElementType().getTypePtr(); 13948 } 13949 if (auto VecTy = dyn_cast<VectorType>(Target)) 13950 Target = VecTy->getElementType().getTypePtr(); 13951 13952 // Strip complex types. 13953 if (isa<ComplexType>(Source)) { 13954 if (!isa<ComplexType>(Target)) { 13955 if (S.SourceMgr.isInSystemMacro(CC) || Target->isBooleanType()) 13956 return; 13957 13958 return DiagnoseImpCast(S, E, T, CC, 13959 S.getLangOpts().CPlusPlus 13960 ? diag::err_impcast_complex_scalar 13961 : diag::warn_impcast_complex_scalar); 13962 } 13963 13964 Source = cast<ComplexType>(Source)->getElementType().getTypePtr(); 13965 Target = cast<ComplexType>(Target)->getElementType().getTypePtr(); 13966 } 13967 13968 const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source); 13969 const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target); 13970 13971 // Strip SVE vector types 13972 if (SourceBT && SourceBT->isVLSTBuiltinType()) { 13973 // Need the original target type for vector type checks 13974 const Type *OriginalTarget = S.Context.getCanonicalType(T).getTypePtr(); 13975 // Handle conversion from scalable to fixed when msve-vector-bits is 13976 // specified 13977 if (S.Context.areCompatibleSveTypes(QualType(OriginalTarget, 0), 13978 QualType(Source, 0)) || 13979 S.Context.areLaxCompatibleSveTypes(QualType(OriginalTarget, 0), 13980 QualType(Source, 0))) 13981 return; 13982 13983 // If the vector cast is cast between two vectors of the same size, it is 13984 // a bitcast, not a conversion. 13985 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target)) 13986 return; 13987 13988 Source = SourceBT->getSveEltType(S.Context).getTypePtr(); 13989 } 13990 13991 if (TargetBT && TargetBT->isVLSTBuiltinType()) 13992 Target = TargetBT->getSveEltType(S.Context).getTypePtr(); 13993 13994 // If the source is floating point... 13995 if (SourceBT && SourceBT->isFloatingPoint()) { 13996 // ...and the target is floating point... 13997 if (TargetBT && TargetBT->isFloatingPoint()) { 13998 // ...then warn if we're dropping FP rank. 13999 14000 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 14001 QualType(SourceBT, 0), QualType(TargetBT, 0)); 14002 if (Order > 0) { 14003 // Don't warn about float constants that are precisely 14004 // representable in the target type. 14005 Expr::EvalResult result; 14006 if (E->EvaluateAsRValue(result, S.Context)) { 14007 // Value might be a float, a float vector, or a float complex. 14008 if (IsSameFloatAfterCast(result.Val, 14009 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)), 14010 S.Context.getFloatTypeSemantics(QualType(SourceBT, 0)))) 14011 return; 14012 } 14013 14014 if (S.SourceMgr.isInSystemMacro(CC)) 14015 return; 14016 14017 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision); 14018 } 14019 // ... or possibly if we're increasing rank, too 14020 else if (Order < 0) { 14021 if (S.SourceMgr.isInSystemMacro(CC)) 14022 return; 14023 14024 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_double_promotion); 14025 } 14026 return; 14027 } 14028 14029 // If the target is integral, always warn. 14030 if (TargetBT && TargetBT->isInteger()) { 14031 if (S.SourceMgr.isInSystemMacro(CC)) 14032 return; 14033 14034 DiagnoseFloatingImpCast(S, E, T, CC); 14035 } 14036 14037 // Detect the case where a call result is converted from floating-point to 14038 // to bool, and the final argument to the call is converted from bool, to 14039 // discover this typo: 14040 // 14041 // bool b = fabs(x < 1.0); // should be "bool b = fabs(x) < 1.0;" 14042 // 14043 // FIXME: This is an incredibly special case; is there some more general 14044 // way to detect this class of misplaced-parentheses bug? 14045 if (Target->isBooleanType() && isa<CallExpr>(E)) { 14046 // Check last argument of function call to see if it is an 14047 // implicit cast from a type matching the type the result 14048 // is being cast to. 14049 CallExpr *CEx = cast<CallExpr>(E); 14050 if (unsigned NumArgs = CEx->getNumArgs()) { 14051 Expr *LastA = CEx->getArg(NumArgs - 1); 14052 Expr *InnerE = LastA->IgnoreParenImpCasts(); 14053 if (isa<ImplicitCastExpr>(LastA) && 14054 InnerE->getType()->isBooleanType()) { 14055 // Warn on this floating-point to bool conversion 14056 DiagnoseImpCast(S, E, T, CC, 14057 diag::warn_impcast_floating_point_to_bool); 14058 } 14059 } 14060 } 14061 return; 14062 } 14063 14064 // Valid casts involving fixed point types should be accounted for here. 14065 if (Source->isFixedPointType()) { 14066 if (Target->isUnsaturatedFixedPointType()) { 14067 Expr::EvalResult Result; 14068 if (E->EvaluateAsFixedPoint(Result, S.Context, Expr::SE_AllowSideEffects, 14069 S.isConstantEvaluated())) { 14070 llvm::APFixedPoint Value = Result.Val.getFixedPoint(); 14071 llvm::APFixedPoint MaxVal = S.Context.getFixedPointMax(T); 14072 llvm::APFixedPoint MinVal = S.Context.getFixedPointMin(T); 14073 if (Value > MaxVal || Value < MinVal) { 14074 S.DiagRuntimeBehavior(E->getExprLoc(), E, 14075 S.PDiag(diag::warn_impcast_fixed_point_range) 14076 << Value.toString() << T 14077 << E->getSourceRange() 14078 << clang::SourceRange(CC)); 14079 return; 14080 } 14081 } 14082 } else if (Target->isIntegerType()) { 14083 Expr::EvalResult Result; 14084 if (!S.isConstantEvaluated() && 14085 E->EvaluateAsFixedPoint(Result, S.Context, 14086 Expr::SE_AllowSideEffects)) { 14087 llvm::APFixedPoint FXResult = Result.Val.getFixedPoint(); 14088 14089 bool Overflowed; 14090 llvm::APSInt IntResult = FXResult.convertToInt( 14091 S.Context.getIntWidth(T), 14092 Target->isSignedIntegerOrEnumerationType(), &Overflowed); 14093 14094 if (Overflowed) { 14095 S.DiagRuntimeBehavior(E->getExprLoc(), E, 14096 S.PDiag(diag::warn_impcast_fixed_point_range) 14097 << FXResult.toString() << T 14098 << E->getSourceRange() 14099 << clang::SourceRange(CC)); 14100 return; 14101 } 14102 } 14103 } 14104 } else if (Target->isUnsaturatedFixedPointType()) { 14105 if (Source->isIntegerType()) { 14106 Expr::EvalResult Result; 14107 if (!S.isConstantEvaluated() && 14108 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) { 14109 llvm::APSInt Value = Result.Val.getInt(); 14110 14111 bool Overflowed; 14112 llvm::APFixedPoint IntResult = llvm::APFixedPoint::getFromIntValue( 14113 Value, S.Context.getFixedPointSemantics(T), &Overflowed); 14114 14115 if (Overflowed) { 14116 S.DiagRuntimeBehavior(E->getExprLoc(), E, 14117 S.PDiag(diag::warn_impcast_fixed_point_range) 14118 << toString(Value, /*Radix=*/10) << T 14119 << E->getSourceRange() 14120 << clang::SourceRange(CC)); 14121 return; 14122 } 14123 } 14124 } 14125 } 14126 14127 // If we are casting an integer type to a floating point type without 14128 // initialization-list syntax, we might lose accuracy if the floating 14129 // point type has a narrower significand than the integer type. 14130 if (SourceBT && TargetBT && SourceBT->isIntegerType() && 14131 TargetBT->isFloatingType() && !IsListInit) { 14132 // Determine the number of precision bits in the source integer type. 14133 IntRange SourceRange = GetExprRange(S.Context, E, S.isConstantEvaluated(), 14134 /*Approximate*/ true); 14135 unsigned int SourcePrecision = SourceRange.Width; 14136 14137 // Determine the number of precision bits in the 14138 // target floating point type. 14139 unsigned int TargetPrecision = llvm::APFloatBase::semanticsPrecision( 14140 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 14141 14142 if (SourcePrecision > 0 && TargetPrecision > 0 && 14143 SourcePrecision > TargetPrecision) { 14144 14145 if (std::optional<llvm::APSInt> SourceInt = 14146 E->getIntegerConstantExpr(S.Context)) { 14147 // If the source integer is a constant, convert it to the target 14148 // floating point type. Issue a warning if the value changes 14149 // during the whole conversion. 14150 llvm::APFloat TargetFloatValue( 14151 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 14152 llvm::APFloat::opStatus ConversionStatus = 14153 TargetFloatValue.convertFromAPInt( 14154 *SourceInt, SourceBT->isSignedInteger(), 14155 llvm::APFloat::rmNearestTiesToEven); 14156 14157 if (ConversionStatus != llvm::APFloat::opOK) { 14158 SmallString<32> PrettySourceValue; 14159 SourceInt->toString(PrettySourceValue, 10); 14160 SmallString<32> PrettyTargetValue; 14161 TargetFloatValue.toString(PrettyTargetValue, TargetPrecision); 14162 14163 S.DiagRuntimeBehavior( 14164 E->getExprLoc(), E, 14165 S.PDiag(diag::warn_impcast_integer_float_precision_constant) 14166 << PrettySourceValue << PrettyTargetValue << E->getType() << T 14167 << E->getSourceRange() << clang::SourceRange(CC)); 14168 } 14169 } else { 14170 // Otherwise, the implicit conversion may lose precision. 14171 DiagnoseImpCast(S, E, T, CC, 14172 diag::warn_impcast_integer_float_precision); 14173 } 14174 } 14175 } 14176 14177 DiagnoseNullConversion(S, E, T, CC); 14178 14179 S.DiscardMisalignedMemberAddress(Target, E); 14180 14181 if (Target->isBooleanType()) 14182 DiagnoseIntInBoolContext(S, E); 14183 14184 if (!Source->isIntegerType() || !Target->isIntegerType()) 14185 return; 14186 14187 // TODO: remove this early return once the false positives for constant->bool 14188 // in templates, macros, etc, are reduced or removed. 14189 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) 14190 return; 14191 14192 if (isObjCSignedCharBool(S, T) && !Source->isCharType() && 14193 !E->isKnownToHaveBooleanValue(/*Semantic=*/false)) { 14194 return adornObjCBoolConversionDiagWithTernaryFixit( 14195 S, E, 14196 S.Diag(CC, diag::warn_impcast_int_to_objc_signed_char_bool) 14197 << E->getType()); 14198 } 14199 14200 IntRange SourceTypeRange = 14201 IntRange::forTargetOfCanonicalType(S.Context, Source); 14202 IntRange LikelySourceRange = 14203 GetExprRange(S.Context, E, S.isConstantEvaluated(), /*Approximate*/ true); 14204 IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target); 14205 14206 if (LikelySourceRange.Width > TargetRange.Width) { 14207 // If the source is a constant, use a default-on diagnostic. 14208 // TODO: this should happen for bitfield stores, too. 14209 Expr::EvalResult Result; 14210 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects, 14211 S.isConstantEvaluated())) { 14212 llvm::APSInt Value(32); 14213 Value = Result.Val.getInt(); 14214 14215 if (S.SourceMgr.isInSystemMacro(CC)) 14216 return; 14217 14218 std::string PrettySourceValue = toString(Value, 10); 14219 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 14220 14221 S.DiagRuntimeBehavior( 14222 E->getExprLoc(), E, 14223 S.PDiag(diag::warn_impcast_integer_precision_constant) 14224 << PrettySourceValue << PrettyTargetValue << E->getType() << T 14225 << E->getSourceRange() << SourceRange(CC)); 14226 return; 14227 } 14228 14229 // People want to build with -Wshorten-64-to-32 and not -Wconversion. 14230 if (S.SourceMgr.isInSystemMacro(CC)) 14231 return; 14232 14233 if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64) 14234 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32, 14235 /* pruneControlFlow */ true); 14236 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision); 14237 } 14238 14239 if (TargetRange.Width > SourceTypeRange.Width) { 14240 if (auto *UO = dyn_cast<UnaryOperator>(E)) 14241 if (UO->getOpcode() == UO_Minus) 14242 if (Source->isUnsignedIntegerType()) { 14243 if (Target->isUnsignedIntegerType()) 14244 return DiagnoseImpCast(S, E, T, CC, 14245 diag::warn_impcast_high_order_zero_bits); 14246 if (Target->isSignedIntegerType()) 14247 return DiagnoseImpCast(S, E, T, CC, 14248 diag::warn_impcast_nonnegative_result); 14249 } 14250 } 14251 14252 if (TargetRange.Width == LikelySourceRange.Width && 14253 !TargetRange.NonNegative && LikelySourceRange.NonNegative && 14254 Source->isSignedIntegerType()) { 14255 // Warn when doing a signed to signed conversion, warn if the positive 14256 // source value is exactly the width of the target type, which will 14257 // cause a negative value to be stored. 14258 14259 Expr::EvalResult Result; 14260 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects) && 14261 !S.SourceMgr.isInSystemMacro(CC)) { 14262 llvm::APSInt Value = Result.Val.getInt(); 14263 if (isSameWidthConstantConversion(S, E, T, CC)) { 14264 std::string PrettySourceValue = toString(Value, 10); 14265 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 14266 14267 S.DiagRuntimeBehavior( 14268 E->getExprLoc(), E, 14269 S.PDiag(diag::warn_impcast_integer_precision_constant) 14270 << PrettySourceValue << PrettyTargetValue << E->getType() << T 14271 << E->getSourceRange() << SourceRange(CC)); 14272 return; 14273 } 14274 } 14275 14276 // Fall through for non-constants to give a sign conversion warning. 14277 } 14278 14279 if ((!isa<EnumType>(Target) || !isa<EnumType>(Source)) && 14280 ((TargetRange.NonNegative && !LikelySourceRange.NonNegative) || 14281 (!TargetRange.NonNegative && LikelySourceRange.NonNegative && 14282 LikelySourceRange.Width == TargetRange.Width))) { 14283 if (S.SourceMgr.isInSystemMacro(CC)) 14284 return; 14285 14286 unsigned DiagID = diag::warn_impcast_integer_sign; 14287 14288 // Traditionally, gcc has warned about this under -Wsign-compare. 14289 // We also want to warn about it in -Wconversion. 14290 // So if -Wconversion is off, use a completely identical diagnostic 14291 // in the sign-compare group. 14292 // The conditional-checking code will 14293 if (ICContext) { 14294 DiagID = diag::warn_impcast_integer_sign_conditional; 14295 *ICContext = true; 14296 } 14297 14298 return DiagnoseImpCast(S, E, T, CC, DiagID); 14299 } 14300 14301 // Diagnose conversions between different enumeration types. 14302 // In C, we pretend that the type of an EnumConstantDecl is its enumeration 14303 // type, to give us better diagnostics. 14304 QualType SourceType = E->getType(); 14305 if (!S.getLangOpts().CPlusPlus) { 14306 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 14307 if (EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(DRE->getDecl())) { 14308 EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext()); 14309 SourceType = S.Context.getTypeDeclType(Enum); 14310 Source = S.Context.getCanonicalType(SourceType).getTypePtr(); 14311 } 14312 } 14313 14314 if (const EnumType *SourceEnum = Source->getAs<EnumType>()) 14315 if (const EnumType *TargetEnum = Target->getAs<EnumType>()) 14316 if (SourceEnum->getDecl()->hasNameForLinkage() && 14317 TargetEnum->getDecl()->hasNameForLinkage() && 14318 SourceEnum != TargetEnum) { 14319 if (S.SourceMgr.isInSystemMacro(CC)) 14320 return; 14321 14322 return DiagnoseImpCast(S, E, SourceType, T, CC, 14323 diag::warn_impcast_different_enum_types); 14324 } 14325 } 14326 14327 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 14328 SourceLocation CC, QualType T); 14329 14330 static void CheckConditionalOperand(Sema &S, Expr *E, QualType T, 14331 SourceLocation CC, bool &ICContext) { 14332 E = E->IgnoreParenImpCasts(); 14333 14334 if (auto *CO = dyn_cast<AbstractConditionalOperator>(E)) 14335 return CheckConditionalOperator(S, CO, CC, T); 14336 14337 AnalyzeImplicitConversions(S, E, CC); 14338 if (E->getType() != T) 14339 return CheckImplicitConversion(S, E, T, CC, &ICContext); 14340 } 14341 14342 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 14343 SourceLocation CC, QualType T) { 14344 AnalyzeImplicitConversions(S, E->getCond(), E->getQuestionLoc()); 14345 14346 Expr *TrueExpr = E->getTrueExpr(); 14347 if (auto *BCO = dyn_cast<BinaryConditionalOperator>(E)) 14348 TrueExpr = BCO->getCommon(); 14349 14350 bool Suspicious = false; 14351 CheckConditionalOperand(S, TrueExpr, T, CC, Suspicious); 14352 CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious); 14353 14354 if (T->isBooleanType()) 14355 DiagnoseIntInBoolContext(S, E); 14356 14357 // If -Wconversion would have warned about either of the candidates 14358 // for a signedness conversion to the context type... 14359 if (!Suspicious) return; 14360 14361 // ...but it's currently ignored... 14362 if (!S.Diags.isIgnored(diag::warn_impcast_integer_sign_conditional, CC)) 14363 return; 14364 14365 // ...then check whether it would have warned about either of the 14366 // candidates for a signedness conversion to the condition type. 14367 if (E->getType() == T) return; 14368 14369 Suspicious = false; 14370 CheckImplicitConversion(S, TrueExpr->IgnoreParenImpCasts(), 14371 E->getType(), CC, &Suspicious); 14372 if (!Suspicious) 14373 CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(), 14374 E->getType(), CC, &Suspicious); 14375 } 14376 14377 /// Check conversion of given expression to boolean. 14378 /// Input argument E is a logical expression. 14379 static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) { 14380 if (S.getLangOpts().Bool) 14381 return; 14382 if (E->IgnoreParenImpCasts()->getType()->isAtomicType()) 14383 return; 14384 CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC); 14385 } 14386 14387 namespace { 14388 struct AnalyzeImplicitConversionsWorkItem { 14389 Expr *E; 14390 SourceLocation CC; 14391 bool IsListInit; 14392 }; 14393 } 14394 14395 /// Data recursive variant of AnalyzeImplicitConversions. Subexpressions 14396 /// that should be visited are added to WorkList. 14397 static void AnalyzeImplicitConversions( 14398 Sema &S, AnalyzeImplicitConversionsWorkItem Item, 14399 llvm::SmallVectorImpl<AnalyzeImplicitConversionsWorkItem> &WorkList) { 14400 Expr *OrigE = Item.E; 14401 SourceLocation CC = Item.CC; 14402 14403 QualType T = OrigE->getType(); 14404 Expr *E = OrigE->IgnoreParenImpCasts(); 14405 14406 // Propagate whether we are in a C++ list initialization expression. 14407 // If so, we do not issue warnings for implicit int-float conversion 14408 // precision loss, because C++11 narrowing already handles it. 14409 bool IsListInit = Item.IsListInit || 14410 (isa<InitListExpr>(OrigE) && S.getLangOpts().CPlusPlus); 14411 14412 if (E->isTypeDependent() || E->isValueDependent()) 14413 return; 14414 14415 Expr *SourceExpr = E; 14416 // Examine, but don't traverse into the source expression of an 14417 // OpaqueValueExpr, since it may have multiple parents and we don't want to 14418 // emit duplicate diagnostics. Its fine to examine the form or attempt to 14419 // evaluate it in the context of checking the specific conversion to T though. 14420 if (auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 14421 if (auto *Src = OVE->getSourceExpr()) 14422 SourceExpr = Src; 14423 14424 if (const auto *UO = dyn_cast<UnaryOperator>(SourceExpr)) 14425 if (UO->getOpcode() == UO_Not && 14426 UO->getSubExpr()->isKnownToHaveBooleanValue()) 14427 S.Diag(UO->getBeginLoc(), diag::warn_bitwise_negation_bool) 14428 << OrigE->getSourceRange() << T->isBooleanType() 14429 << FixItHint::CreateReplacement(UO->getBeginLoc(), "!"); 14430 14431 if (const auto *BO = dyn_cast<BinaryOperator>(SourceExpr)) 14432 if ((BO->getOpcode() == BO_And || BO->getOpcode() == BO_Or) && 14433 BO->getLHS()->isKnownToHaveBooleanValue() && 14434 BO->getRHS()->isKnownToHaveBooleanValue() && 14435 BO->getLHS()->HasSideEffects(S.Context) && 14436 BO->getRHS()->HasSideEffects(S.Context)) { 14437 S.Diag(BO->getBeginLoc(), diag::warn_bitwise_instead_of_logical) 14438 << (BO->getOpcode() == BO_And ? "&" : "|") << OrigE->getSourceRange() 14439 << FixItHint::CreateReplacement( 14440 BO->getOperatorLoc(), 14441 (BO->getOpcode() == BO_And ? "&&" : "||")); 14442 S.Diag(BO->getBeginLoc(), diag::note_cast_operand_to_int); 14443 } 14444 14445 // For conditional operators, we analyze the arguments as if they 14446 // were being fed directly into the output. 14447 if (auto *CO = dyn_cast<AbstractConditionalOperator>(SourceExpr)) { 14448 CheckConditionalOperator(S, CO, CC, T); 14449 return; 14450 } 14451 14452 // Check implicit argument conversions for function calls. 14453 if (CallExpr *Call = dyn_cast<CallExpr>(SourceExpr)) 14454 CheckImplicitArgumentConversions(S, Call, CC); 14455 14456 // Go ahead and check any implicit conversions we might have skipped. 14457 // The non-canonical typecheck is just an optimization; 14458 // CheckImplicitConversion will filter out dead implicit conversions. 14459 if (SourceExpr->getType() != T) 14460 CheckImplicitConversion(S, SourceExpr, T, CC, nullptr, IsListInit); 14461 14462 // Now continue drilling into this expression. 14463 14464 if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) { 14465 // The bound subexpressions in a PseudoObjectExpr are not reachable 14466 // as transitive children. 14467 // FIXME: Use a more uniform representation for this. 14468 for (auto *SE : POE->semantics()) 14469 if (auto *OVE = dyn_cast<OpaqueValueExpr>(SE)) 14470 WorkList.push_back({OVE->getSourceExpr(), CC, IsListInit}); 14471 } 14472 14473 // Skip past explicit casts. 14474 if (auto *CE = dyn_cast<ExplicitCastExpr>(E)) { 14475 E = CE->getSubExpr()->IgnoreParenImpCasts(); 14476 if (!CE->getType()->isVoidType() && E->getType()->isAtomicType()) 14477 S.Diag(E->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 14478 WorkList.push_back({E, CC, IsListInit}); 14479 return; 14480 } 14481 14482 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 14483 // Do a somewhat different check with comparison operators. 14484 if (BO->isComparisonOp()) 14485 return AnalyzeComparison(S, BO); 14486 14487 // And with simple assignments. 14488 if (BO->getOpcode() == BO_Assign) 14489 return AnalyzeAssignment(S, BO); 14490 // And with compound assignments. 14491 if (BO->isAssignmentOp()) 14492 return AnalyzeCompoundAssignment(S, BO); 14493 } 14494 14495 // These break the otherwise-useful invariant below. Fortunately, 14496 // we don't really need to recurse into them, because any internal 14497 // expressions should have been analyzed already when they were 14498 // built into statements. 14499 if (isa<StmtExpr>(E)) return; 14500 14501 // Don't descend into unevaluated contexts. 14502 if (isa<UnaryExprOrTypeTraitExpr>(E)) return; 14503 14504 // Now just recurse over the expression's children. 14505 CC = E->getExprLoc(); 14506 BinaryOperator *BO = dyn_cast<BinaryOperator>(E); 14507 bool IsLogicalAndOperator = BO && BO->getOpcode() == BO_LAnd; 14508 for (Stmt *SubStmt : E->children()) { 14509 Expr *ChildExpr = dyn_cast_or_null<Expr>(SubStmt); 14510 if (!ChildExpr) 14511 continue; 14512 14513 if (auto *CSE = dyn_cast<CoroutineSuspendExpr>(E)) 14514 if (ChildExpr == CSE->getOperand()) 14515 // Do not recurse over a CoroutineSuspendExpr's operand. 14516 // The operand is also a subexpression of getCommonExpr(), and 14517 // recursing into it directly would produce duplicate diagnostics. 14518 continue; 14519 14520 if (IsLogicalAndOperator && 14521 isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts())) 14522 // Ignore checking string literals that are in logical and operators. 14523 // This is a common pattern for asserts. 14524 continue; 14525 WorkList.push_back({ChildExpr, CC, IsListInit}); 14526 } 14527 14528 if (BO && BO->isLogicalOp()) { 14529 Expr *SubExpr = BO->getLHS()->IgnoreParenImpCasts(); 14530 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 14531 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 14532 14533 SubExpr = BO->getRHS()->IgnoreParenImpCasts(); 14534 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 14535 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 14536 } 14537 14538 if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E)) { 14539 if (U->getOpcode() == UO_LNot) { 14540 ::CheckBoolLikeConversion(S, U->getSubExpr(), CC); 14541 } else if (U->getOpcode() != UO_AddrOf) { 14542 if (U->getSubExpr()->getType()->isAtomicType()) 14543 S.Diag(U->getSubExpr()->getBeginLoc(), 14544 diag::warn_atomic_implicit_seq_cst); 14545 } 14546 } 14547 } 14548 14549 /// AnalyzeImplicitConversions - Find and report any interesting 14550 /// implicit conversions in the given expression. There are a couple 14551 /// of competing diagnostics here, -Wconversion and -Wsign-compare. 14552 static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC, 14553 bool IsListInit/*= false*/) { 14554 llvm::SmallVector<AnalyzeImplicitConversionsWorkItem, 16> WorkList; 14555 WorkList.push_back({OrigE, CC, IsListInit}); 14556 while (!WorkList.empty()) 14557 AnalyzeImplicitConversions(S, WorkList.pop_back_val(), WorkList); 14558 } 14559 14560 /// Diagnose integer type and any valid implicit conversion to it. 14561 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) { 14562 // Taking into account implicit conversions, 14563 // allow any integer. 14564 if (!E->getType()->isIntegerType()) { 14565 S.Diag(E->getBeginLoc(), 14566 diag::err_opencl_enqueue_kernel_invalid_local_size_type); 14567 return true; 14568 } 14569 // Potentially emit standard warnings for implicit conversions if enabled 14570 // using -Wconversion. 14571 CheckImplicitConversion(S, E, IntT, E->getBeginLoc()); 14572 return false; 14573 } 14574 14575 // Helper function for Sema::DiagnoseAlwaysNonNullPointer. 14576 // Returns true when emitting a warning about taking the address of a reference. 14577 static bool CheckForReference(Sema &SemaRef, const Expr *E, 14578 const PartialDiagnostic &PD) { 14579 E = E->IgnoreParenImpCasts(); 14580 14581 const FunctionDecl *FD = nullptr; 14582 14583 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { 14584 if (!DRE->getDecl()->getType()->isReferenceType()) 14585 return false; 14586 } else if (const MemberExpr *M = dyn_cast<MemberExpr>(E)) { 14587 if (!M->getMemberDecl()->getType()->isReferenceType()) 14588 return false; 14589 } else if (const CallExpr *Call = dyn_cast<CallExpr>(E)) { 14590 if (!Call->getCallReturnType(SemaRef.Context)->isReferenceType()) 14591 return false; 14592 FD = Call->getDirectCallee(); 14593 } else { 14594 return false; 14595 } 14596 14597 SemaRef.Diag(E->getExprLoc(), PD); 14598 14599 // If possible, point to location of function. 14600 if (FD) { 14601 SemaRef.Diag(FD->getLocation(), diag::note_reference_is_return_value) << FD; 14602 } 14603 14604 return true; 14605 } 14606 14607 // Returns true if the SourceLocation is expanded from any macro body. 14608 // Returns false if the SourceLocation is invalid, is from not in a macro 14609 // expansion, or is from expanded from a top-level macro argument. 14610 static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) { 14611 if (Loc.isInvalid()) 14612 return false; 14613 14614 while (Loc.isMacroID()) { 14615 if (SM.isMacroBodyExpansion(Loc)) 14616 return true; 14617 Loc = SM.getImmediateMacroCallerLoc(Loc); 14618 } 14619 14620 return false; 14621 } 14622 14623 /// Diagnose pointers that are always non-null. 14624 /// \param E the expression containing the pointer 14625 /// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is 14626 /// compared to a null pointer 14627 /// \param IsEqual True when the comparison is equal to a null pointer 14628 /// \param Range Extra SourceRange to highlight in the diagnostic 14629 void Sema::DiagnoseAlwaysNonNullPointer(Expr *E, 14630 Expr::NullPointerConstantKind NullKind, 14631 bool IsEqual, SourceRange Range) { 14632 if (!E) 14633 return; 14634 14635 // Don't warn inside macros. 14636 if (E->getExprLoc().isMacroID()) { 14637 const SourceManager &SM = getSourceManager(); 14638 if (IsInAnyMacroBody(SM, E->getExprLoc()) || 14639 IsInAnyMacroBody(SM, Range.getBegin())) 14640 return; 14641 } 14642 E = E->IgnoreImpCasts(); 14643 14644 const bool IsCompare = NullKind != Expr::NPCK_NotNull; 14645 14646 if (isa<CXXThisExpr>(E)) { 14647 unsigned DiagID = IsCompare ? diag::warn_this_null_compare 14648 : diag::warn_this_bool_conversion; 14649 Diag(E->getExprLoc(), DiagID) << E->getSourceRange() << Range << IsEqual; 14650 return; 14651 } 14652 14653 bool IsAddressOf = false; 14654 14655 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 14656 if (UO->getOpcode() != UO_AddrOf) 14657 return; 14658 IsAddressOf = true; 14659 E = UO->getSubExpr(); 14660 } 14661 14662 if (IsAddressOf) { 14663 unsigned DiagID = IsCompare 14664 ? diag::warn_address_of_reference_null_compare 14665 : diag::warn_address_of_reference_bool_conversion; 14666 PartialDiagnostic PD = PDiag(DiagID) << E->getSourceRange() << Range 14667 << IsEqual; 14668 if (CheckForReference(*this, E, PD)) { 14669 return; 14670 } 14671 } 14672 14673 auto ComplainAboutNonnullParamOrCall = [&](const Attr *NonnullAttr) { 14674 bool IsParam = isa<NonNullAttr>(NonnullAttr); 14675 std::string Str; 14676 llvm::raw_string_ostream S(Str); 14677 E->printPretty(S, nullptr, getPrintingPolicy()); 14678 unsigned DiagID = IsCompare ? diag::warn_nonnull_expr_compare 14679 : diag::warn_cast_nonnull_to_bool; 14680 Diag(E->getExprLoc(), DiagID) << IsParam << S.str() 14681 << E->getSourceRange() << Range << IsEqual; 14682 Diag(NonnullAttr->getLocation(), diag::note_declared_nonnull) << IsParam; 14683 }; 14684 14685 // If we have a CallExpr that is tagged with returns_nonnull, we can complain. 14686 if (auto *Call = dyn_cast<CallExpr>(E->IgnoreParenImpCasts())) { 14687 if (auto *Callee = Call->getDirectCallee()) { 14688 if (const Attr *A = Callee->getAttr<ReturnsNonNullAttr>()) { 14689 ComplainAboutNonnullParamOrCall(A); 14690 return; 14691 } 14692 } 14693 } 14694 14695 // Expect to find a single Decl. Skip anything more complicated. 14696 ValueDecl *D = nullptr; 14697 if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) { 14698 D = R->getDecl(); 14699 } else if (MemberExpr *M = dyn_cast<MemberExpr>(E)) { 14700 D = M->getMemberDecl(); 14701 } 14702 14703 // Weak Decls can be null. 14704 if (!D || D->isWeak()) 14705 return; 14706 14707 // Check for parameter decl with nonnull attribute 14708 if (const auto* PV = dyn_cast<ParmVarDecl>(D)) { 14709 if (getCurFunction() && 14710 !getCurFunction()->ModifiedNonNullParams.count(PV)) { 14711 if (const Attr *A = PV->getAttr<NonNullAttr>()) { 14712 ComplainAboutNonnullParamOrCall(A); 14713 return; 14714 } 14715 14716 if (const auto *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) { 14717 // Skip function template not specialized yet. 14718 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 14719 return; 14720 auto ParamIter = llvm::find(FD->parameters(), PV); 14721 assert(ParamIter != FD->param_end()); 14722 unsigned ParamNo = std::distance(FD->param_begin(), ParamIter); 14723 14724 for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) { 14725 if (!NonNull->args_size()) { 14726 ComplainAboutNonnullParamOrCall(NonNull); 14727 return; 14728 } 14729 14730 for (const ParamIdx &ArgNo : NonNull->args()) { 14731 if (ArgNo.getASTIndex() == ParamNo) { 14732 ComplainAboutNonnullParamOrCall(NonNull); 14733 return; 14734 } 14735 } 14736 } 14737 } 14738 } 14739 } 14740 14741 QualType T = D->getType(); 14742 const bool IsArray = T->isArrayType(); 14743 const bool IsFunction = T->isFunctionType(); 14744 14745 // Address of function is used to silence the function warning. 14746 if (IsAddressOf && IsFunction) { 14747 return; 14748 } 14749 14750 // Found nothing. 14751 if (!IsAddressOf && !IsFunction && !IsArray) 14752 return; 14753 14754 // Pretty print the expression for the diagnostic. 14755 std::string Str; 14756 llvm::raw_string_ostream S(Str); 14757 E->printPretty(S, nullptr, getPrintingPolicy()); 14758 14759 unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare 14760 : diag::warn_impcast_pointer_to_bool; 14761 enum { 14762 AddressOf, 14763 FunctionPointer, 14764 ArrayPointer 14765 } DiagType; 14766 if (IsAddressOf) 14767 DiagType = AddressOf; 14768 else if (IsFunction) 14769 DiagType = FunctionPointer; 14770 else if (IsArray) 14771 DiagType = ArrayPointer; 14772 else 14773 llvm_unreachable("Could not determine diagnostic."); 14774 Diag(E->getExprLoc(), DiagID) << DiagType << S.str() << E->getSourceRange() 14775 << Range << IsEqual; 14776 14777 if (!IsFunction) 14778 return; 14779 14780 // Suggest '&' to silence the function warning. 14781 Diag(E->getExprLoc(), diag::note_function_warning_silence) 14782 << FixItHint::CreateInsertion(E->getBeginLoc(), "&"); 14783 14784 // Check to see if '()' fixit should be emitted. 14785 QualType ReturnType; 14786 UnresolvedSet<4> NonTemplateOverloads; 14787 tryExprAsCall(*E, ReturnType, NonTemplateOverloads); 14788 if (ReturnType.isNull()) 14789 return; 14790 14791 if (IsCompare) { 14792 // There are two cases here. If there is null constant, the only suggest 14793 // for a pointer return type. If the null is 0, then suggest if the return 14794 // type is a pointer or an integer type. 14795 if (!ReturnType->isPointerType()) { 14796 if (NullKind == Expr::NPCK_ZeroExpression || 14797 NullKind == Expr::NPCK_ZeroLiteral) { 14798 if (!ReturnType->isIntegerType()) 14799 return; 14800 } else { 14801 return; 14802 } 14803 } 14804 } else { // !IsCompare 14805 // For function to bool, only suggest if the function pointer has bool 14806 // return type. 14807 if (!ReturnType->isSpecificBuiltinType(BuiltinType::Bool)) 14808 return; 14809 } 14810 Diag(E->getExprLoc(), diag::note_function_to_function_call) 14811 << FixItHint::CreateInsertion(getLocForEndOfToken(E->getEndLoc()), "()"); 14812 } 14813 14814 /// Diagnoses "dangerous" implicit conversions within the given 14815 /// expression (which is a full expression). Implements -Wconversion 14816 /// and -Wsign-compare. 14817 /// 14818 /// \param CC the "context" location of the implicit conversion, i.e. 14819 /// the most location of the syntactic entity requiring the implicit 14820 /// conversion 14821 void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) { 14822 // Don't diagnose in unevaluated contexts. 14823 if (isUnevaluatedContext()) 14824 return; 14825 14826 // Don't diagnose for value- or type-dependent expressions. 14827 if (E->isTypeDependent() || E->isValueDependent()) 14828 return; 14829 14830 // Check for array bounds violations in cases where the check isn't triggered 14831 // elsewhere for other Expr types (like BinaryOperators), e.g. when an 14832 // ArraySubscriptExpr is on the RHS of a variable initialization. 14833 CheckArrayAccess(E); 14834 14835 // This is not the right CC for (e.g.) a variable initialization. 14836 AnalyzeImplicitConversions(*this, E, CC); 14837 } 14838 14839 /// CheckBoolLikeConversion - Check conversion of given expression to boolean. 14840 /// Input argument E is a logical expression. 14841 void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) { 14842 ::CheckBoolLikeConversion(*this, E, CC); 14843 } 14844 14845 /// Diagnose when expression is an integer constant expression and its evaluation 14846 /// results in integer overflow 14847 void Sema::CheckForIntOverflow (Expr *E) { 14848 // Use a work list to deal with nested struct initializers. 14849 SmallVector<Expr *, 2> Exprs(1, E); 14850 14851 do { 14852 Expr *OriginalE = Exprs.pop_back_val(); 14853 Expr *E = OriginalE->IgnoreParenCasts(); 14854 14855 if (isa<BinaryOperator>(E)) { 14856 E->EvaluateForOverflow(Context); 14857 continue; 14858 } 14859 14860 if (auto InitList = dyn_cast<InitListExpr>(OriginalE)) 14861 Exprs.append(InitList->inits().begin(), InitList->inits().end()); 14862 else if (isa<ObjCBoxedExpr>(OriginalE)) 14863 E->EvaluateForOverflow(Context); 14864 else if (auto Call = dyn_cast<CallExpr>(E)) 14865 Exprs.append(Call->arg_begin(), Call->arg_end()); 14866 else if (auto Message = dyn_cast<ObjCMessageExpr>(E)) 14867 Exprs.append(Message->arg_begin(), Message->arg_end()); 14868 else if (auto Construct = dyn_cast<CXXConstructExpr>(E)) 14869 Exprs.append(Construct->arg_begin(), Construct->arg_end()); 14870 else if (auto Array = dyn_cast<ArraySubscriptExpr>(E)) 14871 Exprs.push_back(Array->getIdx()); 14872 else if (auto Compound = dyn_cast<CompoundLiteralExpr>(E)) 14873 Exprs.push_back(Compound->getInitializer()); 14874 else if (auto New = dyn_cast<CXXNewExpr>(E)) { 14875 if (New->isArray()) 14876 if (auto ArraySize = New->getArraySize()) 14877 Exprs.push_back(*ArraySize); 14878 } 14879 } while (!Exprs.empty()); 14880 } 14881 14882 namespace { 14883 14884 /// Visitor for expressions which looks for unsequenced operations on the 14885 /// same object. 14886 class SequenceChecker : public ConstEvaluatedExprVisitor<SequenceChecker> { 14887 using Base = ConstEvaluatedExprVisitor<SequenceChecker>; 14888 14889 /// A tree of sequenced regions within an expression. Two regions are 14890 /// unsequenced if one is an ancestor or a descendent of the other. When we 14891 /// finish processing an expression with sequencing, such as a comma 14892 /// expression, we fold its tree nodes into its parent, since they are 14893 /// unsequenced with respect to nodes we will visit later. 14894 class SequenceTree { 14895 struct Value { 14896 explicit Value(unsigned Parent) : Parent(Parent), Merged(false) {} 14897 unsigned Parent : 31; 14898 unsigned Merged : 1; 14899 }; 14900 SmallVector<Value, 8> Values; 14901 14902 public: 14903 /// A region within an expression which may be sequenced with respect 14904 /// to some other region. 14905 class Seq { 14906 friend class SequenceTree; 14907 14908 unsigned Index; 14909 14910 explicit Seq(unsigned N) : Index(N) {} 14911 14912 public: 14913 Seq() : Index(0) {} 14914 }; 14915 14916 SequenceTree() { Values.push_back(Value(0)); } 14917 Seq root() const { return Seq(0); } 14918 14919 /// Create a new sequence of operations, which is an unsequenced 14920 /// subset of \p Parent. This sequence of operations is sequenced with 14921 /// respect to other children of \p Parent. 14922 Seq allocate(Seq Parent) { 14923 Values.push_back(Value(Parent.Index)); 14924 return Seq(Values.size() - 1); 14925 } 14926 14927 /// Merge a sequence of operations into its parent. 14928 void merge(Seq S) { 14929 Values[S.Index].Merged = true; 14930 } 14931 14932 /// Determine whether two operations are unsequenced. This operation 14933 /// is asymmetric: \p Cur should be the more recent sequence, and \p Old 14934 /// should have been merged into its parent as appropriate. 14935 bool isUnsequenced(Seq Cur, Seq Old) { 14936 unsigned C = representative(Cur.Index); 14937 unsigned Target = representative(Old.Index); 14938 while (C >= Target) { 14939 if (C == Target) 14940 return true; 14941 C = Values[C].Parent; 14942 } 14943 return false; 14944 } 14945 14946 private: 14947 /// Pick a representative for a sequence. 14948 unsigned representative(unsigned K) { 14949 if (Values[K].Merged) 14950 // Perform path compression as we go. 14951 return Values[K].Parent = representative(Values[K].Parent); 14952 return K; 14953 } 14954 }; 14955 14956 /// An object for which we can track unsequenced uses. 14957 using Object = const NamedDecl *; 14958 14959 /// Different flavors of object usage which we track. We only track the 14960 /// least-sequenced usage of each kind. 14961 enum UsageKind { 14962 /// A read of an object. Multiple unsequenced reads are OK. 14963 UK_Use, 14964 14965 /// A modification of an object which is sequenced before the value 14966 /// computation of the expression, such as ++n in C++. 14967 UK_ModAsValue, 14968 14969 /// A modification of an object which is not sequenced before the value 14970 /// computation of the expression, such as n++. 14971 UK_ModAsSideEffect, 14972 14973 UK_Count = UK_ModAsSideEffect + 1 14974 }; 14975 14976 /// Bundle together a sequencing region and the expression corresponding 14977 /// to a specific usage. One Usage is stored for each usage kind in UsageInfo. 14978 struct Usage { 14979 const Expr *UsageExpr; 14980 SequenceTree::Seq Seq; 14981 14982 Usage() : UsageExpr(nullptr) {} 14983 }; 14984 14985 struct UsageInfo { 14986 Usage Uses[UK_Count]; 14987 14988 /// Have we issued a diagnostic for this object already? 14989 bool Diagnosed; 14990 14991 UsageInfo() : Diagnosed(false) {} 14992 }; 14993 using UsageInfoMap = llvm::SmallDenseMap<Object, UsageInfo, 16>; 14994 14995 Sema &SemaRef; 14996 14997 /// Sequenced regions within the expression. 14998 SequenceTree Tree; 14999 15000 /// Declaration modifications and references which we have seen. 15001 UsageInfoMap UsageMap; 15002 15003 /// The region we are currently within. 15004 SequenceTree::Seq Region; 15005 15006 /// Filled in with declarations which were modified as a side-effect 15007 /// (that is, post-increment operations). 15008 SmallVectorImpl<std::pair<Object, Usage>> *ModAsSideEffect = nullptr; 15009 15010 /// Expressions to check later. We defer checking these to reduce 15011 /// stack usage. 15012 SmallVectorImpl<const Expr *> &WorkList; 15013 15014 /// RAII object wrapping the visitation of a sequenced subexpression of an 15015 /// expression. At the end of this process, the side-effects of the evaluation 15016 /// become sequenced with respect to the value computation of the result, so 15017 /// we downgrade any UK_ModAsSideEffect within the evaluation to 15018 /// UK_ModAsValue. 15019 struct SequencedSubexpression { 15020 SequencedSubexpression(SequenceChecker &Self) 15021 : Self(Self), OldModAsSideEffect(Self.ModAsSideEffect) { 15022 Self.ModAsSideEffect = &ModAsSideEffect; 15023 } 15024 15025 ~SequencedSubexpression() { 15026 for (const std::pair<Object, Usage> &M : llvm::reverse(ModAsSideEffect)) { 15027 // Add a new usage with usage kind UK_ModAsValue, and then restore 15028 // the previous usage with UK_ModAsSideEffect (thus clearing it if 15029 // the previous one was empty). 15030 UsageInfo &UI = Self.UsageMap[M.first]; 15031 auto &SideEffectUsage = UI.Uses[UK_ModAsSideEffect]; 15032 Self.addUsage(M.first, UI, SideEffectUsage.UsageExpr, UK_ModAsValue); 15033 SideEffectUsage = M.second; 15034 } 15035 Self.ModAsSideEffect = OldModAsSideEffect; 15036 } 15037 15038 SequenceChecker &Self; 15039 SmallVector<std::pair<Object, Usage>, 4> ModAsSideEffect; 15040 SmallVectorImpl<std::pair<Object, Usage>> *OldModAsSideEffect; 15041 }; 15042 15043 /// RAII object wrapping the visitation of a subexpression which we might 15044 /// choose to evaluate as a constant. If any subexpression is evaluated and 15045 /// found to be non-constant, this allows us to suppress the evaluation of 15046 /// the outer expression. 15047 class EvaluationTracker { 15048 public: 15049 EvaluationTracker(SequenceChecker &Self) 15050 : Self(Self), Prev(Self.EvalTracker) { 15051 Self.EvalTracker = this; 15052 } 15053 15054 ~EvaluationTracker() { 15055 Self.EvalTracker = Prev; 15056 if (Prev) 15057 Prev->EvalOK &= EvalOK; 15058 } 15059 15060 bool evaluate(const Expr *E, bool &Result) { 15061 if (!EvalOK || E->isValueDependent()) 15062 return false; 15063 EvalOK = E->EvaluateAsBooleanCondition( 15064 Result, Self.SemaRef.Context, Self.SemaRef.isConstantEvaluated()); 15065 return EvalOK; 15066 } 15067 15068 private: 15069 SequenceChecker &Self; 15070 EvaluationTracker *Prev; 15071 bool EvalOK = true; 15072 } *EvalTracker = nullptr; 15073 15074 /// Find the object which is produced by the specified expression, 15075 /// if any. 15076 Object getObject(const Expr *E, bool Mod) const { 15077 E = E->IgnoreParenCasts(); 15078 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 15079 if (Mod && (UO->getOpcode() == UO_PreInc || UO->getOpcode() == UO_PreDec)) 15080 return getObject(UO->getSubExpr(), Mod); 15081 } else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 15082 if (BO->getOpcode() == BO_Comma) 15083 return getObject(BO->getRHS(), Mod); 15084 if (Mod && BO->isAssignmentOp()) 15085 return getObject(BO->getLHS(), Mod); 15086 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) { 15087 // FIXME: Check for more interesting cases, like "x.n = ++x.n". 15088 if (isa<CXXThisExpr>(ME->getBase()->IgnoreParenCasts())) 15089 return ME->getMemberDecl(); 15090 } else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 15091 // FIXME: If this is a reference, map through to its value. 15092 return DRE->getDecl(); 15093 return nullptr; 15094 } 15095 15096 /// Note that an object \p O was modified or used by an expression 15097 /// \p UsageExpr with usage kind \p UK. \p UI is the \p UsageInfo for 15098 /// the object \p O as obtained via the \p UsageMap. 15099 void addUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, UsageKind UK) { 15100 // Get the old usage for the given object and usage kind. 15101 Usage &U = UI.Uses[UK]; 15102 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) { 15103 // If we have a modification as side effect and are in a sequenced 15104 // subexpression, save the old Usage so that we can restore it later 15105 // in SequencedSubexpression::~SequencedSubexpression. 15106 if (UK == UK_ModAsSideEffect && ModAsSideEffect) 15107 ModAsSideEffect->push_back(std::make_pair(O, U)); 15108 // Then record the new usage with the current sequencing region. 15109 U.UsageExpr = UsageExpr; 15110 U.Seq = Region; 15111 } 15112 } 15113 15114 /// Check whether a modification or use of an object \p O in an expression 15115 /// \p UsageExpr conflicts with a prior usage of kind \p OtherKind. \p UI is 15116 /// the \p UsageInfo for the object \p O as obtained via the \p UsageMap. 15117 /// \p IsModMod is true when we are checking for a mod-mod unsequenced 15118 /// usage and false we are checking for a mod-use unsequenced usage. 15119 void checkUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, 15120 UsageKind OtherKind, bool IsModMod) { 15121 if (UI.Diagnosed) 15122 return; 15123 15124 const Usage &U = UI.Uses[OtherKind]; 15125 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) 15126 return; 15127 15128 const Expr *Mod = U.UsageExpr; 15129 const Expr *ModOrUse = UsageExpr; 15130 if (OtherKind == UK_Use) 15131 std::swap(Mod, ModOrUse); 15132 15133 SemaRef.DiagRuntimeBehavior( 15134 Mod->getExprLoc(), {Mod, ModOrUse}, 15135 SemaRef.PDiag(IsModMod ? diag::warn_unsequenced_mod_mod 15136 : diag::warn_unsequenced_mod_use) 15137 << O << SourceRange(ModOrUse->getExprLoc())); 15138 UI.Diagnosed = true; 15139 } 15140 15141 // A note on note{Pre, Post}{Use, Mod}: 15142 // 15143 // (It helps to follow the algorithm with an expression such as 15144 // "((++k)++, k) = k" or "k = (k++, k++)". Both contain unsequenced 15145 // operations before C++17 and both are well-defined in C++17). 15146 // 15147 // When visiting a node which uses/modify an object we first call notePreUse 15148 // or notePreMod before visiting its sub-expression(s). At this point the 15149 // children of the current node have not yet been visited and so the eventual 15150 // uses/modifications resulting from the children of the current node have not 15151 // been recorded yet. 15152 // 15153 // We then visit the children of the current node. After that notePostUse or 15154 // notePostMod is called. These will 1) detect an unsequenced modification 15155 // as side effect (as in "k++ + k") and 2) add a new usage with the 15156 // appropriate usage kind. 15157 // 15158 // We also have to be careful that some operation sequences modification as 15159 // side effect as well (for example: || or ,). To account for this we wrap 15160 // the visitation of such a sub-expression (for example: the LHS of || or ,) 15161 // with SequencedSubexpression. SequencedSubexpression is an RAII object 15162 // which record usages which are modifications as side effect, and then 15163 // downgrade them (or more accurately restore the previous usage which was a 15164 // modification as side effect) when exiting the scope of the sequenced 15165 // subexpression. 15166 15167 void notePreUse(Object O, const Expr *UseExpr) { 15168 UsageInfo &UI = UsageMap[O]; 15169 // Uses conflict with other modifications. 15170 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/false); 15171 } 15172 15173 void notePostUse(Object O, const Expr *UseExpr) { 15174 UsageInfo &UI = UsageMap[O]; 15175 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsSideEffect, 15176 /*IsModMod=*/false); 15177 addUsage(O, UI, UseExpr, /*UsageKind=*/UK_Use); 15178 } 15179 15180 void notePreMod(Object O, const Expr *ModExpr) { 15181 UsageInfo &UI = UsageMap[O]; 15182 // Modifications conflict with other modifications and with uses. 15183 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/true); 15184 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_Use, /*IsModMod=*/false); 15185 } 15186 15187 void notePostMod(Object O, const Expr *ModExpr, UsageKind UK) { 15188 UsageInfo &UI = UsageMap[O]; 15189 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsSideEffect, 15190 /*IsModMod=*/true); 15191 addUsage(O, UI, ModExpr, /*UsageKind=*/UK); 15192 } 15193 15194 public: 15195 SequenceChecker(Sema &S, const Expr *E, 15196 SmallVectorImpl<const Expr *> &WorkList) 15197 : Base(S.Context), SemaRef(S), Region(Tree.root()), WorkList(WorkList) { 15198 Visit(E); 15199 // Silence a -Wunused-private-field since WorkList is now unused. 15200 // TODO: Evaluate if it can be used, and if not remove it. 15201 (void)this->WorkList; 15202 } 15203 15204 void VisitStmt(const Stmt *S) { 15205 // Skip all statements which aren't expressions for now. 15206 } 15207 15208 void VisitExpr(const Expr *E) { 15209 // By default, just recurse to evaluated subexpressions. 15210 Base::VisitStmt(E); 15211 } 15212 15213 void VisitCastExpr(const CastExpr *E) { 15214 Object O = Object(); 15215 if (E->getCastKind() == CK_LValueToRValue) 15216 O = getObject(E->getSubExpr(), false); 15217 15218 if (O) 15219 notePreUse(O, E); 15220 VisitExpr(E); 15221 if (O) 15222 notePostUse(O, E); 15223 } 15224 15225 void VisitSequencedExpressions(const Expr *SequencedBefore, 15226 const Expr *SequencedAfter) { 15227 SequenceTree::Seq BeforeRegion = Tree.allocate(Region); 15228 SequenceTree::Seq AfterRegion = Tree.allocate(Region); 15229 SequenceTree::Seq OldRegion = Region; 15230 15231 { 15232 SequencedSubexpression SeqBefore(*this); 15233 Region = BeforeRegion; 15234 Visit(SequencedBefore); 15235 } 15236 15237 Region = AfterRegion; 15238 Visit(SequencedAfter); 15239 15240 Region = OldRegion; 15241 15242 Tree.merge(BeforeRegion); 15243 Tree.merge(AfterRegion); 15244 } 15245 15246 void VisitArraySubscriptExpr(const ArraySubscriptExpr *ASE) { 15247 // C++17 [expr.sub]p1: 15248 // The expression E1[E2] is identical (by definition) to *((E1)+(E2)). The 15249 // expression E1 is sequenced before the expression E2. 15250 if (SemaRef.getLangOpts().CPlusPlus17) 15251 VisitSequencedExpressions(ASE->getLHS(), ASE->getRHS()); 15252 else { 15253 Visit(ASE->getLHS()); 15254 Visit(ASE->getRHS()); 15255 } 15256 } 15257 15258 void VisitBinPtrMemD(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 15259 void VisitBinPtrMemI(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 15260 void VisitBinPtrMem(const BinaryOperator *BO) { 15261 // C++17 [expr.mptr.oper]p4: 15262 // Abbreviating pm-expression.*cast-expression as E1.*E2, [...] 15263 // the expression E1 is sequenced before the expression E2. 15264 if (SemaRef.getLangOpts().CPlusPlus17) 15265 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 15266 else { 15267 Visit(BO->getLHS()); 15268 Visit(BO->getRHS()); 15269 } 15270 } 15271 15272 void VisitBinShl(const BinaryOperator *BO) { VisitBinShlShr(BO); } 15273 void VisitBinShr(const BinaryOperator *BO) { VisitBinShlShr(BO); } 15274 void VisitBinShlShr(const BinaryOperator *BO) { 15275 // C++17 [expr.shift]p4: 15276 // The expression E1 is sequenced before the expression E2. 15277 if (SemaRef.getLangOpts().CPlusPlus17) 15278 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 15279 else { 15280 Visit(BO->getLHS()); 15281 Visit(BO->getRHS()); 15282 } 15283 } 15284 15285 void VisitBinComma(const BinaryOperator *BO) { 15286 // C++11 [expr.comma]p1: 15287 // Every value computation and side effect associated with the left 15288 // expression is sequenced before every value computation and side 15289 // effect associated with the right expression. 15290 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 15291 } 15292 15293 void VisitBinAssign(const BinaryOperator *BO) { 15294 SequenceTree::Seq RHSRegion; 15295 SequenceTree::Seq LHSRegion; 15296 if (SemaRef.getLangOpts().CPlusPlus17) { 15297 RHSRegion = Tree.allocate(Region); 15298 LHSRegion = Tree.allocate(Region); 15299 } else { 15300 RHSRegion = Region; 15301 LHSRegion = Region; 15302 } 15303 SequenceTree::Seq OldRegion = Region; 15304 15305 // C++11 [expr.ass]p1: 15306 // [...] the assignment is sequenced after the value computation 15307 // of the right and left operands, [...] 15308 // 15309 // so check it before inspecting the operands and update the 15310 // map afterwards. 15311 Object O = getObject(BO->getLHS(), /*Mod=*/true); 15312 if (O) 15313 notePreMod(O, BO); 15314 15315 if (SemaRef.getLangOpts().CPlusPlus17) { 15316 // C++17 [expr.ass]p1: 15317 // [...] The right operand is sequenced before the left operand. [...] 15318 { 15319 SequencedSubexpression SeqBefore(*this); 15320 Region = RHSRegion; 15321 Visit(BO->getRHS()); 15322 } 15323 15324 Region = LHSRegion; 15325 Visit(BO->getLHS()); 15326 15327 if (O && isa<CompoundAssignOperator>(BO)) 15328 notePostUse(O, BO); 15329 15330 } else { 15331 // C++11 does not specify any sequencing between the LHS and RHS. 15332 Region = LHSRegion; 15333 Visit(BO->getLHS()); 15334 15335 if (O && isa<CompoundAssignOperator>(BO)) 15336 notePostUse(O, BO); 15337 15338 Region = RHSRegion; 15339 Visit(BO->getRHS()); 15340 } 15341 15342 // C++11 [expr.ass]p1: 15343 // the assignment is sequenced [...] before the value computation of the 15344 // assignment expression. 15345 // C11 6.5.16/3 has no such rule. 15346 Region = OldRegion; 15347 if (O) 15348 notePostMod(O, BO, 15349 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 15350 : UK_ModAsSideEffect); 15351 if (SemaRef.getLangOpts().CPlusPlus17) { 15352 Tree.merge(RHSRegion); 15353 Tree.merge(LHSRegion); 15354 } 15355 } 15356 15357 void VisitCompoundAssignOperator(const CompoundAssignOperator *CAO) { 15358 VisitBinAssign(CAO); 15359 } 15360 15361 void VisitUnaryPreInc(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 15362 void VisitUnaryPreDec(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 15363 void VisitUnaryPreIncDec(const UnaryOperator *UO) { 15364 Object O = getObject(UO->getSubExpr(), true); 15365 if (!O) 15366 return VisitExpr(UO); 15367 15368 notePreMod(O, UO); 15369 Visit(UO->getSubExpr()); 15370 // C++11 [expr.pre.incr]p1: 15371 // the expression ++x is equivalent to x+=1 15372 notePostMod(O, UO, 15373 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 15374 : UK_ModAsSideEffect); 15375 } 15376 15377 void VisitUnaryPostInc(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 15378 void VisitUnaryPostDec(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 15379 void VisitUnaryPostIncDec(const UnaryOperator *UO) { 15380 Object O = getObject(UO->getSubExpr(), true); 15381 if (!O) 15382 return VisitExpr(UO); 15383 15384 notePreMod(O, UO); 15385 Visit(UO->getSubExpr()); 15386 notePostMod(O, UO, UK_ModAsSideEffect); 15387 } 15388 15389 void VisitBinLOr(const BinaryOperator *BO) { 15390 // C++11 [expr.log.or]p2: 15391 // If the second expression is evaluated, every value computation and 15392 // side effect associated with the first expression is sequenced before 15393 // every value computation and side effect associated with the 15394 // second expression. 15395 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 15396 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 15397 SequenceTree::Seq OldRegion = Region; 15398 15399 EvaluationTracker Eval(*this); 15400 { 15401 SequencedSubexpression Sequenced(*this); 15402 Region = LHSRegion; 15403 Visit(BO->getLHS()); 15404 } 15405 15406 // C++11 [expr.log.or]p1: 15407 // [...] the second operand is not evaluated if the first operand 15408 // evaluates to true. 15409 bool EvalResult = false; 15410 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 15411 bool ShouldVisitRHS = !EvalOK || (EvalOK && !EvalResult); 15412 if (ShouldVisitRHS) { 15413 Region = RHSRegion; 15414 Visit(BO->getRHS()); 15415 } 15416 15417 Region = OldRegion; 15418 Tree.merge(LHSRegion); 15419 Tree.merge(RHSRegion); 15420 } 15421 15422 void VisitBinLAnd(const BinaryOperator *BO) { 15423 // C++11 [expr.log.and]p2: 15424 // If the second expression is evaluated, every value computation and 15425 // side effect associated with the first expression is sequenced before 15426 // every value computation and side effect associated with the 15427 // second expression. 15428 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 15429 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 15430 SequenceTree::Seq OldRegion = Region; 15431 15432 EvaluationTracker Eval(*this); 15433 { 15434 SequencedSubexpression Sequenced(*this); 15435 Region = LHSRegion; 15436 Visit(BO->getLHS()); 15437 } 15438 15439 // C++11 [expr.log.and]p1: 15440 // [...] the second operand is not evaluated if the first operand is false. 15441 bool EvalResult = false; 15442 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 15443 bool ShouldVisitRHS = !EvalOK || (EvalOK && EvalResult); 15444 if (ShouldVisitRHS) { 15445 Region = RHSRegion; 15446 Visit(BO->getRHS()); 15447 } 15448 15449 Region = OldRegion; 15450 Tree.merge(LHSRegion); 15451 Tree.merge(RHSRegion); 15452 } 15453 15454 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO) { 15455 // C++11 [expr.cond]p1: 15456 // [...] Every value computation and side effect associated with the first 15457 // expression is sequenced before every value computation and side effect 15458 // associated with the second or third expression. 15459 SequenceTree::Seq ConditionRegion = Tree.allocate(Region); 15460 15461 // No sequencing is specified between the true and false expression. 15462 // However since exactly one of both is going to be evaluated we can 15463 // consider them to be sequenced. This is needed to avoid warning on 15464 // something like "x ? y+= 1 : y += 2;" in the case where we will visit 15465 // both the true and false expressions because we can't evaluate x. 15466 // This will still allow us to detect an expression like (pre C++17) 15467 // "(x ? y += 1 : y += 2) = y". 15468 // 15469 // We don't wrap the visitation of the true and false expression with 15470 // SequencedSubexpression because we don't want to downgrade modifications 15471 // as side effect in the true and false expressions after the visition 15472 // is done. (for example in the expression "(x ? y++ : y++) + y" we should 15473 // not warn between the two "y++", but we should warn between the "y++" 15474 // and the "y". 15475 SequenceTree::Seq TrueRegion = Tree.allocate(Region); 15476 SequenceTree::Seq FalseRegion = Tree.allocate(Region); 15477 SequenceTree::Seq OldRegion = Region; 15478 15479 EvaluationTracker Eval(*this); 15480 { 15481 SequencedSubexpression Sequenced(*this); 15482 Region = ConditionRegion; 15483 Visit(CO->getCond()); 15484 } 15485 15486 // C++11 [expr.cond]p1: 15487 // [...] The first expression is contextually converted to bool (Clause 4). 15488 // It is evaluated and if it is true, the result of the conditional 15489 // expression is the value of the second expression, otherwise that of the 15490 // third expression. Only one of the second and third expressions is 15491 // evaluated. [...] 15492 bool EvalResult = false; 15493 bool EvalOK = Eval.evaluate(CO->getCond(), EvalResult); 15494 bool ShouldVisitTrueExpr = !EvalOK || (EvalOK && EvalResult); 15495 bool ShouldVisitFalseExpr = !EvalOK || (EvalOK && !EvalResult); 15496 if (ShouldVisitTrueExpr) { 15497 Region = TrueRegion; 15498 Visit(CO->getTrueExpr()); 15499 } 15500 if (ShouldVisitFalseExpr) { 15501 Region = FalseRegion; 15502 Visit(CO->getFalseExpr()); 15503 } 15504 15505 Region = OldRegion; 15506 Tree.merge(ConditionRegion); 15507 Tree.merge(TrueRegion); 15508 Tree.merge(FalseRegion); 15509 } 15510 15511 void VisitCallExpr(const CallExpr *CE) { 15512 // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions. 15513 15514 if (CE->isUnevaluatedBuiltinCall(Context)) 15515 return; 15516 15517 // C++11 [intro.execution]p15: 15518 // When calling a function [...], every value computation and side effect 15519 // associated with any argument expression, or with the postfix expression 15520 // designating the called function, is sequenced before execution of every 15521 // expression or statement in the body of the function [and thus before 15522 // the value computation of its result]. 15523 SequencedSubexpression Sequenced(*this); 15524 SemaRef.runWithSufficientStackSpace(CE->getExprLoc(), [&] { 15525 // C++17 [expr.call]p5 15526 // The postfix-expression is sequenced before each expression in the 15527 // expression-list and any default argument. [...] 15528 SequenceTree::Seq CalleeRegion; 15529 SequenceTree::Seq OtherRegion; 15530 if (SemaRef.getLangOpts().CPlusPlus17) { 15531 CalleeRegion = Tree.allocate(Region); 15532 OtherRegion = Tree.allocate(Region); 15533 } else { 15534 CalleeRegion = Region; 15535 OtherRegion = Region; 15536 } 15537 SequenceTree::Seq OldRegion = Region; 15538 15539 // Visit the callee expression first. 15540 Region = CalleeRegion; 15541 if (SemaRef.getLangOpts().CPlusPlus17) { 15542 SequencedSubexpression Sequenced(*this); 15543 Visit(CE->getCallee()); 15544 } else { 15545 Visit(CE->getCallee()); 15546 } 15547 15548 // Then visit the argument expressions. 15549 Region = OtherRegion; 15550 for (const Expr *Argument : CE->arguments()) 15551 Visit(Argument); 15552 15553 Region = OldRegion; 15554 if (SemaRef.getLangOpts().CPlusPlus17) { 15555 Tree.merge(CalleeRegion); 15556 Tree.merge(OtherRegion); 15557 } 15558 }); 15559 } 15560 15561 void VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *CXXOCE) { 15562 // C++17 [over.match.oper]p2: 15563 // [...] the operator notation is first transformed to the equivalent 15564 // function-call notation as summarized in Table 12 (where @ denotes one 15565 // of the operators covered in the specified subclause). However, the 15566 // operands are sequenced in the order prescribed for the built-in 15567 // operator (Clause 8). 15568 // 15569 // From the above only overloaded binary operators and overloaded call 15570 // operators have sequencing rules in C++17 that we need to handle 15571 // separately. 15572 if (!SemaRef.getLangOpts().CPlusPlus17 || 15573 (CXXOCE->getNumArgs() != 2 && CXXOCE->getOperator() != OO_Call)) 15574 return VisitCallExpr(CXXOCE); 15575 15576 enum { 15577 NoSequencing, 15578 LHSBeforeRHS, 15579 RHSBeforeLHS, 15580 LHSBeforeRest 15581 } SequencingKind; 15582 switch (CXXOCE->getOperator()) { 15583 case OO_Equal: 15584 case OO_PlusEqual: 15585 case OO_MinusEqual: 15586 case OO_StarEqual: 15587 case OO_SlashEqual: 15588 case OO_PercentEqual: 15589 case OO_CaretEqual: 15590 case OO_AmpEqual: 15591 case OO_PipeEqual: 15592 case OO_LessLessEqual: 15593 case OO_GreaterGreaterEqual: 15594 SequencingKind = RHSBeforeLHS; 15595 break; 15596 15597 case OO_LessLess: 15598 case OO_GreaterGreater: 15599 case OO_AmpAmp: 15600 case OO_PipePipe: 15601 case OO_Comma: 15602 case OO_ArrowStar: 15603 case OO_Subscript: 15604 SequencingKind = LHSBeforeRHS; 15605 break; 15606 15607 case OO_Call: 15608 SequencingKind = LHSBeforeRest; 15609 break; 15610 15611 default: 15612 SequencingKind = NoSequencing; 15613 break; 15614 } 15615 15616 if (SequencingKind == NoSequencing) 15617 return VisitCallExpr(CXXOCE); 15618 15619 // This is a call, so all subexpressions are sequenced before the result. 15620 SequencedSubexpression Sequenced(*this); 15621 15622 SemaRef.runWithSufficientStackSpace(CXXOCE->getExprLoc(), [&] { 15623 assert(SemaRef.getLangOpts().CPlusPlus17 && 15624 "Should only get there with C++17 and above!"); 15625 assert((CXXOCE->getNumArgs() == 2 || CXXOCE->getOperator() == OO_Call) && 15626 "Should only get there with an overloaded binary operator" 15627 " or an overloaded call operator!"); 15628 15629 if (SequencingKind == LHSBeforeRest) { 15630 assert(CXXOCE->getOperator() == OO_Call && 15631 "We should only have an overloaded call operator here!"); 15632 15633 // This is very similar to VisitCallExpr, except that we only have the 15634 // C++17 case. The postfix-expression is the first argument of the 15635 // CXXOperatorCallExpr. The expressions in the expression-list, if any, 15636 // are in the following arguments. 15637 // 15638 // Note that we intentionally do not visit the callee expression since 15639 // it is just a decayed reference to a function. 15640 SequenceTree::Seq PostfixExprRegion = Tree.allocate(Region); 15641 SequenceTree::Seq ArgsRegion = Tree.allocate(Region); 15642 SequenceTree::Seq OldRegion = Region; 15643 15644 assert(CXXOCE->getNumArgs() >= 1 && 15645 "An overloaded call operator must have at least one argument" 15646 " for the postfix-expression!"); 15647 const Expr *PostfixExpr = CXXOCE->getArgs()[0]; 15648 llvm::ArrayRef<const Expr *> Args(CXXOCE->getArgs() + 1, 15649 CXXOCE->getNumArgs() - 1); 15650 15651 // Visit the postfix-expression first. 15652 { 15653 Region = PostfixExprRegion; 15654 SequencedSubexpression Sequenced(*this); 15655 Visit(PostfixExpr); 15656 } 15657 15658 // Then visit the argument expressions. 15659 Region = ArgsRegion; 15660 for (const Expr *Arg : Args) 15661 Visit(Arg); 15662 15663 Region = OldRegion; 15664 Tree.merge(PostfixExprRegion); 15665 Tree.merge(ArgsRegion); 15666 } else { 15667 assert(CXXOCE->getNumArgs() == 2 && 15668 "Should only have two arguments here!"); 15669 assert((SequencingKind == LHSBeforeRHS || 15670 SequencingKind == RHSBeforeLHS) && 15671 "Unexpected sequencing kind!"); 15672 15673 // We do not visit the callee expression since it is just a decayed 15674 // reference to a function. 15675 const Expr *E1 = CXXOCE->getArg(0); 15676 const Expr *E2 = CXXOCE->getArg(1); 15677 if (SequencingKind == RHSBeforeLHS) 15678 std::swap(E1, E2); 15679 15680 return VisitSequencedExpressions(E1, E2); 15681 } 15682 }); 15683 } 15684 15685 void VisitCXXConstructExpr(const CXXConstructExpr *CCE) { 15686 // This is a call, so all subexpressions are sequenced before the result. 15687 SequencedSubexpression Sequenced(*this); 15688 15689 if (!CCE->isListInitialization()) 15690 return VisitExpr(CCE); 15691 15692 // In C++11, list initializations are sequenced. 15693 SmallVector<SequenceTree::Seq, 32> Elts; 15694 SequenceTree::Seq Parent = Region; 15695 for (CXXConstructExpr::const_arg_iterator I = CCE->arg_begin(), 15696 E = CCE->arg_end(); 15697 I != E; ++I) { 15698 Region = Tree.allocate(Parent); 15699 Elts.push_back(Region); 15700 Visit(*I); 15701 } 15702 15703 // Forget that the initializers are sequenced. 15704 Region = Parent; 15705 for (unsigned I = 0; I < Elts.size(); ++I) 15706 Tree.merge(Elts[I]); 15707 } 15708 15709 void VisitInitListExpr(const InitListExpr *ILE) { 15710 if (!SemaRef.getLangOpts().CPlusPlus11) 15711 return VisitExpr(ILE); 15712 15713 // In C++11, list initializations are sequenced. 15714 SmallVector<SequenceTree::Seq, 32> Elts; 15715 SequenceTree::Seq Parent = Region; 15716 for (unsigned I = 0; I < ILE->getNumInits(); ++I) { 15717 const Expr *E = ILE->getInit(I); 15718 if (!E) 15719 continue; 15720 Region = Tree.allocate(Parent); 15721 Elts.push_back(Region); 15722 Visit(E); 15723 } 15724 15725 // Forget that the initializers are sequenced. 15726 Region = Parent; 15727 for (unsigned I = 0; I < Elts.size(); ++I) 15728 Tree.merge(Elts[I]); 15729 } 15730 }; 15731 15732 } // namespace 15733 15734 void Sema::CheckUnsequencedOperations(const Expr *E) { 15735 SmallVector<const Expr *, 8> WorkList; 15736 WorkList.push_back(E); 15737 while (!WorkList.empty()) { 15738 const Expr *Item = WorkList.pop_back_val(); 15739 SequenceChecker(*this, Item, WorkList); 15740 } 15741 } 15742 15743 void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc, 15744 bool IsConstexpr) { 15745 llvm::SaveAndRestore ConstantContext(isConstantEvaluatedOverride, 15746 IsConstexpr || isa<ConstantExpr>(E)); 15747 CheckImplicitConversions(E, CheckLoc); 15748 if (!E->isInstantiationDependent()) 15749 CheckUnsequencedOperations(E); 15750 if (!IsConstexpr && !E->isValueDependent()) 15751 CheckForIntOverflow(E); 15752 DiagnoseMisalignedMembers(); 15753 } 15754 15755 void Sema::CheckBitFieldInitialization(SourceLocation InitLoc, 15756 FieldDecl *BitField, 15757 Expr *Init) { 15758 (void) AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc); 15759 } 15760 15761 static void diagnoseArrayStarInParamType(Sema &S, QualType PType, 15762 SourceLocation Loc) { 15763 if (!PType->isVariablyModifiedType()) 15764 return; 15765 if (const auto *PointerTy = dyn_cast<PointerType>(PType)) { 15766 diagnoseArrayStarInParamType(S, PointerTy->getPointeeType(), Loc); 15767 return; 15768 } 15769 if (const auto *ReferenceTy = dyn_cast<ReferenceType>(PType)) { 15770 diagnoseArrayStarInParamType(S, ReferenceTy->getPointeeType(), Loc); 15771 return; 15772 } 15773 if (const auto *ParenTy = dyn_cast<ParenType>(PType)) { 15774 diagnoseArrayStarInParamType(S, ParenTy->getInnerType(), Loc); 15775 return; 15776 } 15777 15778 const ArrayType *AT = S.Context.getAsArrayType(PType); 15779 if (!AT) 15780 return; 15781 15782 if (AT->getSizeModifier() != ArrayType::Star) { 15783 diagnoseArrayStarInParamType(S, AT->getElementType(), Loc); 15784 return; 15785 } 15786 15787 S.Diag(Loc, diag::err_array_star_in_function_definition); 15788 } 15789 15790 /// CheckParmsForFunctionDef - Check that the parameters of the given 15791 /// function are appropriate for the definition of a function. This 15792 /// takes care of any checks that cannot be performed on the 15793 /// declaration itself, e.g., that the types of each of the function 15794 /// parameters are complete. 15795 bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, 15796 bool CheckParameterNames) { 15797 bool HasInvalidParm = false; 15798 for (ParmVarDecl *Param : Parameters) { 15799 // C99 6.7.5.3p4: the parameters in a parameter type list in a 15800 // function declarator that is part of a function definition of 15801 // that function shall not have incomplete type. 15802 // 15803 // This is also C++ [dcl.fct]p6. 15804 if (!Param->isInvalidDecl() && 15805 RequireCompleteType(Param->getLocation(), Param->getType(), 15806 diag::err_typecheck_decl_incomplete_type)) { 15807 Param->setInvalidDecl(); 15808 HasInvalidParm = true; 15809 } 15810 15811 // C99 6.9.1p5: If the declarator includes a parameter type list, the 15812 // declaration of each parameter shall include an identifier. 15813 if (CheckParameterNames && Param->getIdentifier() == nullptr && 15814 !Param->isImplicit() && !getLangOpts().CPlusPlus) { 15815 // Diagnose this as an extension in C17 and earlier. 15816 if (!getLangOpts().C2x) 15817 Diag(Param->getLocation(), diag::ext_parameter_name_omitted_c2x); 15818 } 15819 15820 // C99 6.7.5.3p12: 15821 // If the function declarator is not part of a definition of that 15822 // function, parameters may have incomplete type and may use the [*] 15823 // notation in their sequences of declarator specifiers to specify 15824 // variable length array types. 15825 QualType PType = Param->getOriginalType(); 15826 // FIXME: This diagnostic should point the '[*]' if source-location 15827 // information is added for it. 15828 diagnoseArrayStarInParamType(*this, PType, Param->getLocation()); 15829 15830 // If the parameter is a c++ class type and it has to be destructed in the 15831 // callee function, declare the destructor so that it can be called by the 15832 // callee function. Do not perform any direct access check on the dtor here. 15833 if (!Param->isInvalidDecl()) { 15834 if (CXXRecordDecl *ClassDecl = Param->getType()->getAsCXXRecordDecl()) { 15835 if (!ClassDecl->isInvalidDecl() && 15836 !ClassDecl->hasIrrelevantDestructor() && 15837 !ClassDecl->isDependentContext() && 15838 ClassDecl->isParamDestroyedInCallee()) { 15839 CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl); 15840 MarkFunctionReferenced(Param->getLocation(), Destructor); 15841 DiagnoseUseOfDecl(Destructor, Param->getLocation()); 15842 } 15843 } 15844 } 15845 15846 // Parameters with the pass_object_size attribute only need to be marked 15847 // constant at function definitions. Because we lack information about 15848 // whether we're on a declaration or definition when we're instantiating the 15849 // attribute, we need to check for constness here. 15850 if (const auto *Attr = Param->getAttr<PassObjectSizeAttr>()) 15851 if (!Param->getType().isConstQualified()) 15852 Diag(Param->getLocation(), diag::err_attribute_pointers_only) 15853 << Attr->getSpelling() << 1; 15854 15855 // Check for parameter names shadowing fields from the class. 15856 if (LangOpts.CPlusPlus && !Param->isInvalidDecl()) { 15857 // The owning context for the parameter should be the function, but we 15858 // want to see if this function's declaration context is a record. 15859 DeclContext *DC = Param->getDeclContext(); 15860 if (DC && DC->isFunctionOrMethod()) { 15861 if (auto *RD = dyn_cast<CXXRecordDecl>(DC->getParent())) 15862 CheckShadowInheritedFields(Param->getLocation(), Param->getDeclName(), 15863 RD, /*DeclIsField*/ false); 15864 } 15865 } 15866 } 15867 15868 return HasInvalidParm; 15869 } 15870 15871 std::optional<std::pair< 15872 CharUnits, CharUnits>> static getBaseAlignmentAndOffsetFromPtr(const Expr 15873 *E, 15874 ASTContext 15875 &Ctx); 15876 15877 /// Compute the alignment and offset of the base class object given the 15878 /// derived-to-base cast expression and the alignment and offset of the derived 15879 /// class object. 15880 static std::pair<CharUnits, CharUnits> 15881 getDerivedToBaseAlignmentAndOffset(const CastExpr *CE, QualType DerivedType, 15882 CharUnits BaseAlignment, CharUnits Offset, 15883 ASTContext &Ctx) { 15884 for (auto PathI = CE->path_begin(), PathE = CE->path_end(); PathI != PathE; 15885 ++PathI) { 15886 const CXXBaseSpecifier *Base = *PathI; 15887 const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl(); 15888 if (Base->isVirtual()) { 15889 // The complete object may have a lower alignment than the non-virtual 15890 // alignment of the base, in which case the base may be misaligned. Choose 15891 // the smaller of the non-virtual alignment and BaseAlignment, which is a 15892 // conservative lower bound of the complete object alignment. 15893 CharUnits NonVirtualAlignment = 15894 Ctx.getASTRecordLayout(BaseDecl).getNonVirtualAlignment(); 15895 BaseAlignment = std::min(BaseAlignment, NonVirtualAlignment); 15896 Offset = CharUnits::Zero(); 15897 } else { 15898 const ASTRecordLayout &RL = 15899 Ctx.getASTRecordLayout(DerivedType->getAsCXXRecordDecl()); 15900 Offset += RL.getBaseClassOffset(BaseDecl); 15901 } 15902 DerivedType = Base->getType(); 15903 } 15904 15905 return std::make_pair(BaseAlignment, Offset); 15906 } 15907 15908 /// Compute the alignment and offset of a binary additive operator. 15909 static std::optional<std::pair<CharUnits, CharUnits>> 15910 getAlignmentAndOffsetFromBinAddOrSub(const Expr *PtrE, const Expr *IntE, 15911 bool IsSub, ASTContext &Ctx) { 15912 QualType PointeeType = PtrE->getType()->getPointeeType(); 15913 15914 if (!PointeeType->isConstantSizeType()) 15915 return std::nullopt; 15916 15917 auto P = getBaseAlignmentAndOffsetFromPtr(PtrE, Ctx); 15918 15919 if (!P) 15920 return std::nullopt; 15921 15922 CharUnits EltSize = Ctx.getTypeSizeInChars(PointeeType); 15923 if (std::optional<llvm::APSInt> IdxRes = IntE->getIntegerConstantExpr(Ctx)) { 15924 CharUnits Offset = EltSize * IdxRes->getExtValue(); 15925 if (IsSub) 15926 Offset = -Offset; 15927 return std::make_pair(P->first, P->second + Offset); 15928 } 15929 15930 // If the integer expression isn't a constant expression, compute the lower 15931 // bound of the alignment using the alignment and offset of the pointer 15932 // expression and the element size. 15933 return std::make_pair( 15934 P->first.alignmentAtOffset(P->second).alignmentAtOffset(EltSize), 15935 CharUnits::Zero()); 15936 } 15937 15938 /// This helper function takes an lvalue expression and returns the alignment of 15939 /// a VarDecl and a constant offset from the VarDecl. 15940 std::optional<std::pair< 15941 CharUnits, 15942 CharUnits>> static getBaseAlignmentAndOffsetFromLValue(const Expr *E, 15943 ASTContext &Ctx) { 15944 E = E->IgnoreParens(); 15945 switch (E->getStmtClass()) { 15946 default: 15947 break; 15948 case Stmt::CStyleCastExprClass: 15949 case Stmt::CXXStaticCastExprClass: 15950 case Stmt::ImplicitCastExprClass: { 15951 auto *CE = cast<CastExpr>(E); 15952 const Expr *From = CE->getSubExpr(); 15953 switch (CE->getCastKind()) { 15954 default: 15955 break; 15956 case CK_NoOp: 15957 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 15958 case CK_UncheckedDerivedToBase: 15959 case CK_DerivedToBase: { 15960 auto P = getBaseAlignmentAndOffsetFromLValue(From, Ctx); 15961 if (!P) 15962 break; 15963 return getDerivedToBaseAlignmentAndOffset(CE, From->getType(), P->first, 15964 P->second, Ctx); 15965 } 15966 } 15967 break; 15968 } 15969 case Stmt::ArraySubscriptExprClass: { 15970 auto *ASE = cast<ArraySubscriptExpr>(E); 15971 return getAlignmentAndOffsetFromBinAddOrSub(ASE->getBase(), ASE->getIdx(), 15972 false, Ctx); 15973 } 15974 case Stmt::DeclRefExprClass: { 15975 if (auto *VD = dyn_cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())) { 15976 // FIXME: If VD is captured by copy or is an escaping __block variable, 15977 // use the alignment of VD's type. 15978 if (!VD->getType()->isReferenceType()) 15979 return std::make_pair(Ctx.getDeclAlign(VD), CharUnits::Zero()); 15980 if (VD->hasInit()) 15981 return getBaseAlignmentAndOffsetFromLValue(VD->getInit(), Ctx); 15982 } 15983 break; 15984 } 15985 case Stmt::MemberExprClass: { 15986 auto *ME = cast<MemberExpr>(E); 15987 auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()); 15988 if (!FD || FD->getType()->isReferenceType() || 15989 FD->getParent()->isInvalidDecl()) 15990 break; 15991 std::optional<std::pair<CharUnits, CharUnits>> P; 15992 if (ME->isArrow()) 15993 P = getBaseAlignmentAndOffsetFromPtr(ME->getBase(), Ctx); 15994 else 15995 P = getBaseAlignmentAndOffsetFromLValue(ME->getBase(), Ctx); 15996 if (!P) 15997 break; 15998 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(FD->getParent()); 15999 uint64_t Offset = Layout.getFieldOffset(FD->getFieldIndex()); 16000 return std::make_pair(P->first, 16001 P->second + CharUnits::fromQuantity(Offset)); 16002 } 16003 case Stmt::UnaryOperatorClass: { 16004 auto *UO = cast<UnaryOperator>(E); 16005 switch (UO->getOpcode()) { 16006 default: 16007 break; 16008 case UO_Deref: 16009 return getBaseAlignmentAndOffsetFromPtr(UO->getSubExpr(), Ctx); 16010 } 16011 break; 16012 } 16013 case Stmt::BinaryOperatorClass: { 16014 auto *BO = cast<BinaryOperator>(E); 16015 auto Opcode = BO->getOpcode(); 16016 switch (Opcode) { 16017 default: 16018 break; 16019 case BO_Comma: 16020 return getBaseAlignmentAndOffsetFromLValue(BO->getRHS(), Ctx); 16021 } 16022 break; 16023 } 16024 } 16025 return std::nullopt; 16026 } 16027 16028 /// This helper function takes a pointer expression and returns the alignment of 16029 /// a VarDecl and a constant offset from the VarDecl. 16030 std::optional<std::pair< 16031 CharUnits, CharUnits>> static getBaseAlignmentAndOffsetFromPtr(const Expr 16032 *E, 16033 ASTContext 16034 &Ctx) { 16035 E = E->IgnoreParens(); 16036 switch (E->getStmtClass()) { 16037 default: 16038 break; 16039 case Stmt::CStyleCastExprClass: 16040 case Stmt::CXXStaticCastExprClass: 16041 case Stmt::ImplicitCastExprClass: { 16042 auto *CE = cast<CastExpr>(E); 16043 const Expr *From = CE->getSubExpr(); 16044 switch (CE->getCastKind()) { 16045 default: 16046 break; 16047 case CK_NoOp: 16048 return getBaseAlignmentAndOffsetFromPtr(From, Ctx); 16049 case CK_ArrayToPointerDecay: 16050 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 16051 case CK_UncheckedDerivedToBase: 16052 case CK_DerivedToBase: { 16053 auto P = getBaseAlignmentAndOffsetFromPtr(From, Ctx); 16054 if (!P) 16055 break; 16056 return getDerivedToBaseAlignmentAndOffset( 16057 CE, From->getType()->getPointeeType(), P->first, P->second, Ctx); 16058 } 16059 } 16060 break; 16061 } 16062 case Stmt::CXXThisExprClass: { 16063 auto *RD = E->getType()->getPointeeType()->getAsCXXRecordDecl(); 16064 CharUnits Alignment = Ctx.getASTRecordLayout(RD).getNonVirtualAlignment(); 16065 return std::make_pair(Alignment, CharUnits::Zero()); 16066 } 16067 case Stmt::UnaryOperatorClass: { 16068 auto *UO = cast<UnaryOperator>(E); 16069 if (UO->getOpcode() == UO_AddrOf) 16070 return getBaseAlignmentAndOffsetFromLValue(UO->getSubExpr(), Ctx); 16071 break; 16072 } 16073 case Stmt::BinaryOperatorClass: { 16074 auto *BO = cast<BinaryOperator>(E); 16075 auto Opcode = BO->getOpcode(); 16076 switch (Opcode) { 16077 default: 16078 break; 16079 case BO_Add: 16080 case BO_Sub: { 16081 const Expr *LHS = BO->getLHS(), *RHS = BO->getRHS(); 16082 if (Opcode == BO_Add && !RHS->getType()->isIntegralOrEnumerationType()) 16083 std::swap(LHS, RHS); 16084 return getAlignmentAndOffsetFromBinAddOrSub(LHS, RHS, Opcode == BO_Sub, 16085 Ctx); 16086 } 16087 case BO_Comma: 16088 return getBaseAlignmentAndOffsetFromPtr(BO->getRHS(), Ctx); 16089 } 16090 break; 16091 } 16092 } 16093 return std::nullopt; 16094 } 16095 16096 static CharUnits getPresumedAlignmentOfPointer(const Expr *E, Sema &S) { 16097 // See if we can compute the alignment of a VarDecl and an offset from it. 16098 std::optional<std::pair<CharUnits, CharUnits>> P = 16099 getBaseAlignmentAndOffsetFromPtr(E, S.Context); 16100 16101 if (P) 16102 return P->first.alignmentAtOffset(P->second); 16103 16104 // If that failed, return the type's alignment. 16105 return S.Context.getTypeAlignInChars(E->getType()->getPointeeType()); 16106 } 16107 16108 /// CheckCastAlign - Implements -Wcast-align, which warns when a 16109 /// pointer cast increases the alignment requirements. 16110 void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) { 16111 // This is actually a lot of work to potentially be doing on every 16112 // cast; don't do it if we're ignoring -Wcast_align (as is the default). 16113 if (getDiagnostics().isIgnored(diag::warn_cast_align, TRange.getBegin())) 16114 return; 16115 16116 // Ignore dependent types. 16117 if (T->isDependentType() || Op->getType()->isDependentType()) 16118 return; 16119 16120 // Require that the destination be a pointer type. 16121 const PointerType *DestPtr = T->getAs<PointerType>(); 16122 if (!DestPtr) return; 16123 16124 // If the destination has alignment 1, we're done. 16125 QualType DestPointee = DestPtr->getPointeeType(); 16126 if (DestPointee->isIncompleteType()) return; 16127 CharUnits DestAlign = Context.getTypeAlignInChars(DestPointee); 16128 if (DestAlign.isOne()) return; 16129 16130 // Require that the source be a pointer type. 16131 const PointerType *SrcPtr = Op->getType()->getAs<PointerType>(); 16132 if (!SrcPtr) return; 16133 QualType SrcPointee = SrcPtr->getPointeeType(); 16134 16135 // Explicitly allow casts from cv void*. We already implicitly 16136 // allowed casts to cv void*, since they have alignment 1. 16137 // Also allow casts involving incomplete types, which implicitly 16138 // includes 'void'. 16139 if (SrcPointee->isIncompleteType()) return; 16140 16141 CharUnits SrcAlign = getPresumedAlignmentOfPointer(Op, *this); 16142 16143 if (SrcAlign >= DestAlign) return; 16144 16145 Diag(TRange.getBegin(), diag::warn_cast_align) 16146 << Op->getType() << T 16147 << static_cast<unsigned>(SrcAlign.getQuantity()) 16148 << static_cast<unsigned>(DestAlign.getQuantity()) 16149 << TRange << Op->getSourceRange(); 16150 } 16151 16152 void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, 16153 const ArraySubscriptExpr *ASE, 16154 bool AllowOnePastEnd, bool IndexNegated) { 16155 // Already diagnosed by the constant evaluator. 16156 if (isConstantEvaluated()) 16157 return; 16158 16159 IndexExpr = IndexExpr->IgnoreParenImpCasts(); 16160 if (IndexExpr->isValueDependent()) 16161 return; 16162 16163 const Type *EffectiveType = 16164 BaseExpr->getType()->getPointeeOrArrayElementType(); 16165 BaseExpr = BaseExpr->IgnoreParenCasts(); 16166 const ConstantArrayType *ArrayTy = 16167 Context.getAsConstantArrayType(BaseExpr->getType()); 16168 16169 LangOptions::StrictFlexArraysLevelKind 16170 StrictFlexArraysLevel = getLangOpts().getStrictFlexArraysLevel(); 16171 16172 const Type *BaseType = 16173 ArrayTy == nullptr ? nullptr : ArrayTy->getElementType().getTypePtr(); 16174 bool IsUnboundedArray = 16175 BaseType == nullptr || BaseExpr->isFlexibleArrayMemberLike( 16176 Context, StrictFlexArraysLevel, 16177 /*IgnoreTemplateOrMacroSubstitution=*/true); 16178 if (EffectiveType->isDependentType() || 16179 (!IsUnboundedArray && BaseType->isDependentType())) 16180 return; 16181 16182 Expr::EvalResult Result; 16183 if (!IndexExpr->EvaluateAsInt(Result, Context, Expr::SE_AllowSideEffects)) 16184 return; 16185 16186 llvm::APSInt index = Result.Val.getInt(); 16187 if (IndexNegated) { 16188 index.setIsUnsigned(false); 16189 index = -index; 16190 } 16191 16192 if (IsUnboundedArray) { 16193 if (EffectiveType->isFunctionType()) 16194 return; 16195 if (index.isUnsigned() || !index.isNegative()) { 16196 const auto &ASTC = getASTContext(); 16197 unsigned AddrBits = ASTC.getTargetInfo().getPointerWidth( 16198 EffectiveType->getCanonicalTypeInternal().getAddressSpace()); 16199 if (index.getBitWidth() < AddrBits) 16200 index = index.zext(AddrBits); 16201 std::optional<CharUnits> ElemCharUnits = 16202 ASTC.getTypeSizeInCharsIfKnown(EffectiveType); 16203 // PR50741 - If EffectiveType has unknown size (e.g., if it's a void 16204 // pointer) bounds-checking isn't meaningful. 16205 if (!ElemCharUnits) 16206 return; 16207 llvm::APInt ElemBytes(index.getBitWidth(), ElemCharUnits->getQuantity()); 16208 // If index has more active bits than address space, we already know 16209 // we have a bounds violation to warn about. Otherwise, compute 16210 // address of (index + 1)th element, and warn about bounds violation 16211 // only if that address exceeds address space. 16212 if (index.getActiveBits() <= AddrBits) { 16213 bool Overflow; 16214 llvm::APInt Product(index); 16215 Product += 1; 16216 Product = Product.umul_ov(ElemBytes, Overflow); 16217 if (!Overflow && Product.getActiveBits() <= AddrBits) 16218 return; 16219 } 16220 16221 // Need to compute max possible elements in address space, since that 16222 // is included in diag message. 16223 llvm::APInt MaxElems = llvm::APInt::getMaxValue(AddrBits); 16224 MaxElems = MaxElems.zext(std::max(AddrBits + 1, ElemBytes.getBitWidth())); 16225 MaxElems += 1; 16226 ElemBytes = ElemBytes.zextOrTrunc(MaxElems.getBitWidth()); 16227 MaxElems = MaxElems.udiv(ElemBytes); 16228 16229 unsigned DiagID = 16230 ASE ? diag::warn_array_index_exceeds_max_addressable_bounds 16231 : diag::warn_ptr_arith_exceeds_max_addressable_bounds; 16232 16233 // Diag message shows element size in bits and in "bytes" (platform- 16234 // dependent CharUnits) 16235 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 16236 PDiag(DiagID) 16237 << toString(index, 10, true) << AddrBits 16238 << (unsigned)ASTC.toBits(*ElemCharUnits) 16239 << toString(ElemBytes, 10, false) 16240 << toString(MaxElems, 10, false) 16241 << (unsigned)MaxElems.getLimitedValue(~0U) 16242 << IndexExpr->getSourceRange()); 16243 16244 const NamedDecl *ND = nullptr; 16245 // Try harder to find a NamedDecl to point at in the note. 16246 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr)) 16247 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 16248 if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 16249 ND = DRE->getDecl(); 16250 if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr)) 16251 ND = ME->getMemberDecl(); 16252 16253 if (ND) 16254 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 16255 PDiag(diag::note_array_declared_here) << ND); 16256 } 16257 return; 16258 } 16259 16260 if (index.isUnsigned() || !index.isNegative()) { 16261 // It is possible that the type of the base expression after 16262 // IgnoreParenCasts is incomplete, even though the type of the base 16263 // expression before IgnoreParenCasts is complete (see PR39746 for an 16264 // example). In this case we have no information about whether the array 16265 // access exceeds the array bounds. However we can still diagnose an array 16266 // access which precedes the array bounds. 16267 if (BaseType->isIncompleteType()) 16268 return; 16269 16270 llvm::APInt size = ArrayTy->getSize(); 16271 16272 if (BaseType != EffectiveType) { 16273 // Make sure we're comparing apples to apples when comparing index to 16274 // size. 16275 uint64_t ptrarith_typesize = Context.getTypeSize(EffectiveType); 16276 uint64_t array_typesize = Context.getTypeSize(BaseType); 16277 16278 // Handle ptrarith_typesize being zero, such as when casting to void*. 16279 // Use the size in bits (what "getTypeSize()" returns) rather than bytes. 16280 if (!ptrarith_typesize) 16281 ptrarith_typesize = Context.getCharWidth(); 16282 16283 if (ptrarith_typesize != array_typesize) { 16284 // There's a cast to a different size type involved. 16285 uint64_t ratio = array_typesize / ptrarith_typesize; 16286 16287 // TODO: Be smarter about handling cases where array_typesize is not a 16288 // multiple of ptrarith_typesize. 16289 if (ptrarith_typesize * ratio == array_typesize) 16290 size *= llvm::APInt(size.getBitWidth(), ratio); 16291 } 16292 } 16293 16294 if (size.getBitWidth() > index.getBitWidth()) 16295 index = index.zext(size.getBitWidth()); 16296 else if (size.getBitWidth() < index.getBitWidth()) 16297 size = size.zext(index.getBitWidth()); 16298 16299 // For array subscripting the index must be less than size, but for pointer 16300 // arithmetic also allow the index (offset) to be equal to size since 16301 // computing the next address after the end of the array is legal and 16302 // commonly done e.g. in C++ iterators and range-based for loops. 16303 if (AllowOnePastEnd ? index.ule(size) : index.ult(size)) 16304 return; 16305 16306 // Suppress the warning if the subscript expression (as identified by the 16307 // ']' location) and the index expression are both from macro expansions 16308 // within a system header. 16309 if (ASE) { 16310 SourceLocation RBracketLoc = SourceMgr.getSpellingLoc( 16311 ASE->getRBracketLoc()); 16312 if (SourceMgr.isInSystemHeader(RBracketLoc)) { 16313 SourceLocation IndexLoc = 16314 SourceMgr.getSpellingLoc(IndexExpr->getBeginLoc()); 16315 if (SourceMgr.isWrittenInSameFile(RBracketLoc, IndexLoc)) 16316 return; 16317 } 16318 } 16319 16320 unsigned DiagID = ASE ? diag::warn_array_index_exceeds_bounds 16321 : diag::warn_ptr_arith_exceeds_bounds; 16322 unsigned CastMsg = (!ASE || BaseType == EffectiveType) ? 0 : 1; 16323 QualType CastMsgTy = ASE ? ASE->getLHS()->getType() : QualType(); 16324 16325 DiagRuntimeBehavior( 16326 BaseExpr->getBeginLoc(), BaseExpr, 16327 PDiag(DiagID) << toString(index, 10, true) << ArrayTy->desugar() 16328 << CastMsg << CastMsgTy << IndexExpr->getSourceRange()); 16329 } else { 16330 unsigned DiagID = diag::warn_array_index_precedes_bounds; 16331 if (!ASE) { 16332 DiagID = diag::warn_ptr_arith_precedes_bounds; 16333 if (index.isNegative()) index = -index; 16334 } 16335 16336 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 16337 PDiag(DiagID) << toString(index, 10, true) 16338 << IndexExpr->getSourceRange()); 16339 } 16340 16341 const NamedDecl *ND = nullptr; 16342 // Try harder to find a NamedDecl to point at in the note. 16343 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr)) 16344 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 16345 if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 16346 ND = DRE->getDecl(); 16347 if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr)) 16348 ND = ME->getMemberDecl(); 16349 16350 if (ND) 16351 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 16352 PDiag(diag::note_array_declared_here) << ND); 16353 } 16354 16355 void Sema::CheckArrayAccess(const Expr *expr) { 16356 int AllowOnePastEnd = 0; 16357 while (expr) { 16358 expr = expr->IgnoreParenImpCasts(); 16359 switch (expr->getStmtClass()) { 16360 case Stmt::ArraySubscriptExprClass: { 16361 const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(expr); 16362 CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE, 16363 AllowOnePastEnd > 0); 16364 expr = ASE->getBase(); 16365 break; 16366 } 16367 case Stmt::MemberExprClass: { 16368 expr = cast<MemberExpr>(expr)->getBase(); 16369 break; 16370 } 16371 case Stmt::OMPArraySectionExprClass: { 16372 const OMPArraySectionExpr *ASE = cast<OMPArraySectionExpr>(expr); 16373 if (ASE->getLowerBound()) 16374 CheckArrayAccess(ASE->getBase(), ASE->getLowerBound(), 16375 /*ASE=*/nullptr, AllowOnePastEnd > 0); 16376 return; 16377 } 16378 case Stmt::UnaryOperatorClass: { 16379 // Only unwrap the * and & unary operators 16380 const UnaryOperator *UO = cast<UnaryOperator>(expr); 16381 expr = UO->getSubExpr(); 16382 switch (UO->getOpcode()) { 16383 case UO_AddrOf: 16384 AllowOnePastEnd++; 16385 break; 16386 case UO_Deref: 16387 AllowOnePastEnd--; 16388 break; 16389 default: 16390 return; 16391 } 16392 break; 16393 } 16394 case Stmt::ConditionalOperatorClass: { 16395 const ConditionalOperator *cond = cast<ConditionalOperator>(expr); 16396 if (const Expr *lhs = cond->getLHS()) 16397 CheckArrayAccess(lhs); 16398 if (const Expr *rhs = cond->getRHS()) 16399 CheckArrayAccess(rhs); 16400 return; 16401 } 16402 case Stmt::CXXOperatorCallExprClass: { 16403 const auto *OCE = cast<CXXOperatorCallExpr>(expr); 16404 for (const auto *Arg : OCE->arguments()) 16405 CheckArrayAccess(Arg); 16406 return; 16407 } 16408 default: 16409 return; 16410 } 16411 } 16412 } 16413 16414 //===--- CHECK: Objective-C retain cycles ----------------------------------// 16415 16416 namespace { 16417 16418 struct RetainCycleOwner { 16419 VarDecl *Variable = nullptr; 16420 SourceRange Range; 16421 SourceLocation Loc; 16422 bool Indirect = false; 16423 16424 RetainCycleOwner() = default; 16425 16426 void setLocsFrom(Expr *e) { 16427 Loc = e->getExprLoc(); 16428 Range = e->getSourceRange(); 16429 } 16430 }; 16431 16432 } // namespace 16433 16434 /// Consider whether capturing the given variable can possibly lead to 16435 /// a retain cycle. 16436 static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) { 16437 // In ARC, it's captured strongly iff the variable has __strong 16438 // lifetime. In MRR, it's captured strongly if the variable is 16439 // __block and has an appropriate type. 16440 if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 16441 return false; 16442 16443 owner.Variable = var; 16444 if (ref) 16445 owner.setLocsFrom(ref); 16446 return true; 16447 } 16448 16449 static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) { 16450 while (true) { 16451 e = e->IgnoreParens(); 16452 if (CastExpr *cast = dyn_cast<CastExpr>(e)) { 16453 switch (cast->getCastKind()) { 16454 case CK_BitCast: 16455 case CK_LValueBitCast: 16456 case CK_LValueToRValue: 16457 case CK_ARCReclaimReturnedObject: 16458 e = cast->getSubExpr(); 16459 continue; 16460 16461 default: 16462 return false; 16463 } 16464 } 16465 16466 if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(e)) { 16467 ObjCIvarDecl *ivar = ref->getDecl(); 16468 if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 16469 return false; 16470 16471 // Try to find a retain cycle in the base. 16472 if (!findRetainCycleOwner(S, ref->getBase(), owner)) 16473 return false; 16474 16475 if (ref->isFreeIvar()) owner.setLocsFrom(ref); 16476 owner.Indirect = true; 16477 return true; 16478 } 16479 16480 if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) { 16481 VarDecl *var = dyn_cast<VarDecl>(ref->getDecl()); 16482 if (!var) return false; 16483 return considerVariable(var, ref, owner); 16484 } 16485 16486 if (MemberExpr *member = dyn_cast<MemberExpr>(e)) { 16487 if (member->isArrow()) return false; 16488 16489 // Don't count this as an indirect ownership. 16490 e = member->getBase(); 16491 continue; 16492 } 16493 16494 if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { 16495 // Only pay attention to pseudo-objects on property references. 16496 ObjCPropertyRefExpr *pre 16497 = dyn_cast<ObjCPropertyRefExpr>(pseudo->getSyntacticForm() 16498 ->IgnoreParens()); 16499 if (!pre) return false; 16500 if (pre->isImplicitProperty()) return false; 16501 ObjCPropertyDecl *property = pre->getExplicitProperty(); 16502 if (!property->isRetaining() && 16503 !(property->getPropertyIvarDecl() && 16504 property->getPropertyIvarDecl()->getType() 16505 .getObjCLifetime() == Qualifiers::OCL_Strong)) 16506 return false; 16507 16508 owner.Indirect = true; 16509 if (pre->isSuperReceiver()) { 16510 owner.Variable = S.getCurMethodDecl()->getSelfDecl(); 16511 if (!owner.Variable) 16512 return false; 16513 owner.Loc = pre->getLocation(); 16514 owner.Range = pre->getSourceRange(); 16515 return true; 16516 } 16517 e = const_cast<Expr*>(cast<OpaqueValueExpr>(pre->getBase()) 16518 ->getSourceExpr()); 16519 continue; 16520 } 16521 16522 // Array ivars? 16523 16524 return false; 16525 } 16526 } 16527 16528 namespace { 16529 16530 struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> { 16531 ASTContext &Context; 16532 VarDecl *Variable; 16533 Expr *Capturer = nullptr; 16534 bool VarWillBeReased = false; 16535 16536 FindCaptureVisitor(ASTContext &Context, VarDecl *variable) 16537 : EvaluatedExprVisitor<FindCaptureVisitor>(Context), 16538 Context(Context), Variable(variable) {} 16539 16540 void VisitDeclRefExpr(DeclRefExpr *ref) { 16541 if (ref->getDecl() == Variable && !Capturer) 16542 Capturer = ref; 16543 } 16544 16545 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) { 16546 if (Capturer) return; 16547 Visit(ref->getBase()); 16548 if (Capturer && ref->isFreeIvar()) 16549 Capturer = ref; 16550 } 16551 16552 void VisitBlockExpr(BlockExpr *block) { 16553 // Look inside nested blocks 16554 if (block->getBlockDecl()->capturesVariable(Variable)) 16555 Visit(block->getBlockDecl()->getBody()); 16556 } 16557 16558 void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) { 16559 if (Capturer) return; 16560 if (OVE->getSourceExpr()) 16561 Visit(OVE->getSourceExpr()); 16562 } 16563 16564 void VisitBinaryOperator(BinaryOperator *BinOp) { 16565 if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign) 16566 return; 16567 Expr *LHS = BinOp->getLHS(); 16568 if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(LHS)) { 16569 if (DRE->getDecl() != Variable) 16570 return; 16571 if (Expr *RHS = BinOp->getRHS()) { 16572 RHS = RHS->IgnoreParenCasts(); 16573 std::optional<llvm::APSInt> Value; 16574 VarWillBeReased = 16575 (RHS && (Value = RHS->getIntegerConstantExpr(Context)) && 16576 *Value == 0); 16577 } 16578 } 16579 } 16580 }; 16581 16582 } // namespace 16583 16584 /// Check whether the given argument is a block which captures a 16585 /// variable. 16586 static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) { 16587 assert(owner.Variable && owner.Loc.isValid()); 16588 16589 e = e->IgnoreParenCasts(); 16590 16591 // Look through [^{...} copy] and Block_copy(^{...}). 16592 if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(e)) { 16593 Selector Cmd = ME->getSelector(); 16594 if (Cmd.isUnarySelector() && Cmd.getNameForSlot(0) == "copy") { 16595 e = ME->getInstanceReceiver(); 16596 if (!e) 16597 return nullptr; 16598 e = e->IgnoreParenCasts(); 16599 } 16600 } else if (CallExpr *CE = dyn_cast<CallExpr>(e)) { 16601 if (CE->getNumArgs() == 1) { 16602 FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl()); 16603 if (Fn) { 16604 const IdentifierInfo *FnI = Fn->getIdentifier(); 16605 if (FnI && FnI->isStr("_Block_copy")) { 16606 e = CE->getArg(0)->IgnoreParenCasts(); 16607 } 16608 } 16609 } 16610 } 16611 16612 BlockExpr *block = dyn_cast<BlockExpr>(e); 16613 if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable)) 16614 return nullptr; 16615 16616 FindCaptureVisitor visitor(S.Context, owner.Variable); 16617 visitor.Visit(block->getBlockDecl()->getBody()); 16618 return visitor.VarWillBeReased ? nullptr : visitor.Capturer; 16619 } 16620 16621 static void diagnoseRetainCycle(Sema &S, Expr *capturer, 16622 RetainCycleOwner &owner) { 16623 assert(capturer); 16624 assert(owner.Variable && owner.Loc.isValid()); 16625 16626 S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle) 16627 << owner.Variable << capturer->getSourceRange(); 16628 S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner) 16629 << owner.Indirect << owner.Range; 16630 } 16631 16632 /// Check for a keyword selector that starts with the word 'add' or 16633 /// 'set'. 16634 static bool isSetterLikeSelector(Selector sel) { 16635 if (sel.isUnarySelector()) return false; 16636 16637 StringRef str = sel.getNameForSlot(0); 16638 while (!str.empty() && str.front() == '_') str = str.substr(1); 16639 if (str.startswith("set")) 16640 str = str.substr(3); 16641 else if (str.startswith("add")) { 16642 // Specially allow 'addOperationWithBlock:'. 16643 if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock")) 16644 return false; 16645 str = str.substr(3); 16646 } 16647 else 16648 return false; 16649 16650 if (str.empty()) return true; 16651 return !isLowercase(str.front()); 16652 } 16653 16654 static std::optional<int> 16655 GetNSMutableArrayArgumentIndex(Sema &S, ObjCMessageExpr *Message) { 16656 bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass( 16657 Message->getReceiverInterface(), 16658 NSAPI::ClassId_NSMutableArray); 16659 if (!IsMutableArray) { 16660 return std::nullopt; 16661 } 16662 16663 Selector Sel = Message->getSelector(); 16664 16665 std::optional<NSAPI::NSArrayMethodKind> MKOpt = 16666 S.NSAPIObj->getNSArrayMethodKind(Sel); 16667 if (!MKOpt) { 16668 return std::nullopt; 16669 } 16670 16671 NSAPI::NSArrayMethodKind MK = *MKOpt; 16672 16673 switch (MK) { 16674 case NSAPI::NSMutableArr_addObject: 16675 case NSAPI::NSMutableArr_insertObjectAtIndex: 16676 case NSAPI::NSMutableArr_setObjectAtIndexedSubscript: 16677 return 0; 16678 case NSAPI::NSMutableArr_replaceObjectAtIndex: 16679 return 1; 16680 16681 default: 16682 return std::nullopt; 16683 } 16684 16685 return std::nullopt; 16686 } 16687 16688 static std::optional<int> 16689 GetNSMutableDictionaryArgumentIndex(Sema &S, ObjCMessageExpr *Message) { 16690 bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass( 16691 Message->getReceiverInterface(), 16692 NSAPI::ClassId_NSMutableDictionary); 16693 if (!IsMutableDictionary) { 16694 return std::nullopt; 16695 } 16696 16697 Selector Sel = Message->getSelector(); 16698 16699 std::optional<NSAPI::NSDictionaryMethodKind> MKOpt = 16700 S.NSAPIObj->getNSDictionaryMethodKind(Sel); 16701 if (!MKOpt) { 16702 return std::nullopt; 16703 } 16704 16705 NSAPI::NSDictionaryMethodKind MK = *MKOpt; 16706 16707 switch (MK) { 16708 case NSAPI::NSMutableDict_setObjectForKey: 16709 case NSAPI::NSMutableDict_setValueForKey: 16710 case NSAPI::NSMutableDict_setObjectForKeyedSubscript: 16711 return 0; 16712 16713 default: 16714 return std::nullopt; 16715 } 16716 16717 return std::nullopt; 16718 } 16719 16720 static std::optional<int> GetNSSetArgumentIndex(Sema &S, 16721 ObjCMessageExpr *Message) { 16722 bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass( 16723 Message->getReceiverInterface(), 16724 NSAPI::ClassId_NSMutableSet); 16725 16726 bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass( 16727 Message->getReceiverInterface(), 16728 NSAPI::ClassId_NSMutableOrderedSet); 16729 if (!IsMutableSet && !IsMutableOrderedSet) { 16730 return std::nullopt; 16731 } 16732 16733 Selector Sel = Message->getSelector(); 16734 16735 std::optional<NSAPI::NSSetMethodKind> MKOpt = 16736 S.NSAPIObj->getNSSetMethodKind(Sel); 16737 if (!MKOpt) { 16738 return std::nullopt; 16739 } 16740 16741 NSAPI::NSSetMethodKind MK = *MKOpt; 16742 16743 switch (MK) { 16744 case NSAPI::NSMutableSet_addObject: 16745 case NSAPI::NSOrderedSet_setObjectAtIndex: 16746 case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript: 16747 case NSAPI::NSOrderedSet_insertObjectAtIndex: 16748 return 0; 16749 case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject: 16750 return 1; 16751 } 16752 16753 return std::nullopt; 16754 } 16755 16756 void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) { 16757 if (!Message->isInstanceMessage()) { 16758 return; 16759 } 16760 16761 std::optional<int> ArgOpt; 16762 16763 if (!(ArgOpt = GetNSMutableArrayArgumentIndex(*this, Message)) && 16764 !(ArgOpt = GetNSMutableDictionaryArgumentIndex(*this, Message)) && 16765 !(ArgOpt = GetNSSetArgumentIndex(*this, Message))) { 16766 return; 16767 } 16768 16769 int ArgIndex = *ArgOpt; 16770 16771 Expr *Arg = Message->getArg(ArgIndex)->IgnoreImpCasts(); 16772 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Arg)) { 16773 Arg = OE->getSourceExpr()->IgnoreImpCasts(); 16774 } 16775 16776 if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) { 16777 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 16778 if (ArgRE->isObjCSelfExpr()) { 16779 Diag(Message->getSourceRange().getBegin(), 16780 diag::warn_objc_circular_container) 16781 << ArgRE->getDecl() << StringRef("'super'"); 16782 } 16783 } 16784 } else { 16785 Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts(); 16786 16787 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) { 16788 Receiver = OE->getSourceExpr()->IgnoreImpCasts(); 16789 } 16790 16791 if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) { 16792 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 16793 if (ReceiverRE->getDecl() == ArgRE->getDecl()) { 16794 ValueDecl *Decl = ReceiverRE->getDecl(); 16795 Diag(Message->getSourceRange().getBegin(), 16796 diag::warn_objc_circular_container) 16797 << Decl << Decl; 16798 if (!ArgRE->isObjCSelfExpr()) { 16799 Diag(Decl->getLocation(), 16800 diag::note_objc_circular_container_declared_here) 16801 << Decl; 16802 } 16803 } 16804 } 16805 } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) { 16806 if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) { 16807 if (IvarRE->getDecl() == IvarArgRE->getDecl()) { 16808 ObjCIvarDecl *Decl = IvarRE->getDecl(); 16809 Diag(Message->getSourceRange().getBegin(), 16810 diag::warn_objc_circular_container) 16811 << Decl << Decl; 16812 Diag(Decl->getLocation(), 16813 diag::note_objc_circular_container_declared_here) 16814 << Decl; 16815 } 16816 } 16817 } 16818 } 16819 } 16820 16821 /// Check a message send to see if it's likely to cause a retain cycle. 16822 void Sema::checkRetainCycles(ObjCMessageExpr *msg) { 16823 // Only check instance methods whose selector looks like a setter. 16824 if (!msg->isInstanceMessage() || !isSetterLikeSelector(msg->getSelector())) 16825 return; 16826 16827 // Try to find a variable that the receiver is strongly owned by. 16828 RetainCycleOwner owner; 16829 if (msg->getReceiverKind() == ObjCMessageExpr::Instance) { 16830 if (!findRetainCycleOwner(*this, msg->getInstanceReceiver(), owner)) 16831 return; 16832 } else { 16833 assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance); 16834 owner.Variable = getCurMethodDecl()->getSelfDecl(); 16835 owner.Loc = msg->getSuperLoc(); 16836 owner.Range = msg->getSuperLoc(); 16837 } 16838 16839 // Check whether the receiver is captured by any of the arguments. 16840 const ObjCMethodDecl *MD = msg->getMethodDecl(); 16841 for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i) { 16842 if (Expr *capturer = findCapturingExpr(*this, msg->getArg(i), owner)) { 16843 // noescape blocks should not be retained by the method. 16844 if (MD && MD->parameters()[i]->hasAttr<NoEscapeAttr>()) 16845 continue; 16846 return diagnoseRetainCycle(*this, capturer, owner); 16847 } 16848 } 16849 } 16850 16851 /// Check a property assign to see if it's likely to cause a retain cycle. 16852 void Sema::checkRetainCycles(Expr *receiver, Expr *argument) { 16853 RetainCycleOwner owner; 16854 if (!findRetainCycleOwner(*this, receiver, owner)) 16855 return; 16856 16857 if (Expr *capturer = findCapturingExpr(*this, argument, owner)) 16858 diagnoseRetainCycle(*this, capturer, owner); 16859 } 16860 16861 void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) { 16862 RetainCycleOwner Owner; 16863 if (!considerVariable(Var, /*DeclRefExpr=*/nullptr, Owner)) 16864 return; 16865 16866 // Because we don't have an expression for the variable, we have to set the 16867 // location explicitly here. 16868 Owner.Loc = Var->getLocation(); 16869 Owner.Range = Var->getSourceRange(); 16870 16871 if (Expr *Capturer = findCapturingExpr(*this, Init, Owner)) 16872 diagnoseRetainCycle(*this, Capturer, Owner); 16873 } 16874 16875 static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc, 16876 Expr *RHS, bool isProperty) { 16877 // Check if RHS is an Objective-C object literal, which also can get 16878 // immediately zapped in a weak reference. Note that we explicitly 16879 // allow ObjCStringLiterals, since those are designed to never really die. 16880 RHS = RHS->IgnoreParenImpCasts(); 16881 16882 // This enum needs to match with the 'select' in 16883 // warn_objc_arc_literal_assign (off-by-1). 16884 Sema::ObjCLiteralKind Kind = S.CheckLiteralKind(RHS); 16885 if (Kind == Sema::LK_String || Kind == Sema::LK_None) 16886 return false; 16887 16888 S.Diag(Loc, diag::warn_arc_literal_assign) 16889 << (unsigned) Kind 16890 << (isProperty ? 0 : 1) 16891 << RHS->getSourceRange(); 16892 16893 return true; 16894 } 16895 16896 static bool checkUnsafeAssignObject(Sema &S, SourceLocation Loc, 16897 Qualifiers::ObjCLifetime LT, 16898 Expr *RHS, bool isProperty) { 16899 // Strip off any implicit cast added to get to the one ARC-specific. 16900 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 16901 if (cast->getCastKind() == CK_ARCConsumeObject) { 16902 S.Diag(Loc, diag::warn_arc_retained_assign) 16903 << (LT == Qualifiers::OCL_ExplicitNone) 16904 << (isProperty ? 0 : 1) 16905 << RHS->getSourceRange(); 16906 return true; 16907 } 16908 RHS = cast->getSubExpr(); 16909 } 16910 16911 if (LT == Qualifiers::OCL_Weak && 16912 checkUnsafeAssignLiteral(S, Loc, RHS, isProperty)) 16913 return true; 16914 16915 return false; 16916 } 16917 16918 bool Sema::checkUnsafeAssigns(SourceLocation Loc, 16919 QualType LHS, Expr *RHS) { 16920 Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime(); 16921 16922 if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone) 16923 return false; 16924 16925 if (checkUnsafeAssignObject(*this, Loc, LT, RHS, false)) 16926 return true; 16927 16928 return false; 16929 } 16930 16931 void Sema::checkUnsafeExprAssigns(SourceLocation Loc, 16932 Expr *LHS, Expr *RHS) { 16933 QualType LHSType; 16934 // PropertyRef on LHS type need be directly obtained from 16935 // its declaration as it has a PseudoType. 16936 ObjCPropertyRefExpr *PRE 16937 = dyn_cast<ObjCPropertyRefExpr>(LHS->IgnoreParens()); 16938 if (PRE && !PRE->isImplicitProperty()) { 16939 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 16940 if (PD) 16941 LHSType = PD->getType(); 16942 } 16943 16944 if (LHSType.isNull()) 16945 LHSType = LHS->getType(); 16946 16947 Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime(); 16948 16949 if (LT == Qualifiers::OCL_Weak) { 16950 if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc)) 16951 getCurFunction()->markSafeWeakUse(LHS); 16952 } 16953 16954 if (checkUnsafeAssigns(Loc, LHSType, RHS)) 16955 return; 16956 16957 // FIXME. Check for other life times. 16958 if (LT != Qualifiers::OCL_None) 16959 return; 16960 16961 if (PRE) { 16962 if (PRE->isImplicitProperty()) 16963 return; 16964 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 16965 if (!PD) 16966 return; 16967 16968 unsigned Attributes = PD->getPropertyAttributes(); 16969 if (Attributes & ObjCPropertyAttribute::kind_assign) { 16970 // when 'assign' attribute was not explicitly specified 16971 // by user, ignore it and rely on property type itself 16972 // for lifetime info. 16973 unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten(); 16974 if (!(AsWrittenAttr & ObjCPropertyAttribute::kind_assign) && 16975 LHSType->isObjCRetainableType()) 16976 return; 16977 16978 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 16979 if (cast->getCastKind() == CK_ARCConsumeObject) { 16980 Diag(Loc, diag::warn_arc_retained_property_assign) 16981 << RHS->getSourceRange(); 16982 return; 16983 } 16984 RHS = cast->getSubExpr(); 16985 } 16986 } else if (Attributes & ObjCPropertyAttribute::kind_weak) { 16987 if (checkUnsafeAssignObject(*this, Loc, Qualifiers::OCL_Weak, RHS, true)) 16988 return; 16989 } 16990 } 16991 } 16992 16993 //===--- CHECK: Empty statement body (-Wempty-body) ---------------------===// 16994 16995 static bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr, 16996 SourceLocation StmtLoc, 16997 const NullStmt *Body) { 16998 // Do not warn if the body is a macro that expands to nothing, e.g: 16999 // 17000 // #define CALL(x) 17001 // if (condition) 17002 // CALL(0); 17003 if (Body->hasLeadingEmptyMacro()) 17004 return false; 17005 17006 // Get line numbers of statement and body. 17007 bool StmtLineInvalid; 17008 unsigned StmtLine = SourceMgr.getPresumedLineNumber(StmtLoc, 17009 &StmtLineInvalid); 17010 if (StmtLineInvalid) 17011 return false; 17012 17013 bool BodyLineInvalid; 17014 unsigned BodyLine = SourceMgr.getSpellingLineNumber(Body->getSemiLoc(), 17015 &BodyLineInvalid); 17016 if (BodyLineInvalid) 17017 return false; 17018 17019 // Warn if null statement and body are on the same line. 17020 if (StmtLine != BodyLine) 17021 return false; 17022 17023 return true; 17024 } 17025 17026 void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc, 17027 const Stmt *Body, 17028 unsigned DiagID) { 17029 // Since this is a syntactic check, don't emit diagnostic for template 17030 // instantiations, this just adds noise. 17031 if (CurrentInstantiationScope) 17032 return; 17033 17034 // The body should be a null statement. 17035 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 17036 if (!NBody) 17037 return; 17038 17039 // Do the usual checks. 17040 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 17041 return; 17042 17043 Diag(NBody->getSemiLoc(), DiagID); 17044 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 17045 } 17046 17047 void Sema::DiagnoseEmptyLoopBody(const Stmt *S, 17048 const Stmt *PossibleBody) { 17049 assert(!CurrentInstantiationScope); // Ensured by caller 17050 17051 SourceLocation StmtLoc; 17052 const Stmt *Body; 17053 unsigned DiagID; 17054 if (const ForStmt *FS = dyn_cast<ForStmt>(S)) { 17055 StmtLoc = FS->getRParenLoc(); 17056 Body = FS->getBody(); 17057 DiagID = diag::warn_empty_for_body; 17058 } else if (const WhileStmt *WS = dyn_cast<WhileStmt>(S)) { 17059 StmtLoc = WS->getRParenLoc(); 17060 Body = WS->getBody(); 17061 DiagID = diag::warn_empty_while_body; 17062 } else 17063 return; // Neither `for' nor `while'. 17064 17065 // The body should be a null statement. 17066 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 17067 if (!NBody) 17068 return; 17069 17070 // Skip expensive checks if diagnostic is disabled. 17071 if (Diags.isIgnored(DiagID, NBody->getSemiLoc())) 17072 return; 17073 17074 // Do the usual checks. 17075 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 17076 return; 17077 17078 // `for(...);' and `while(...);' are popular idioms, so in order to keep 17079 // noise level low, emit diagnostics only if for/while is followed by a 17080 // CompoundStmt, e.g.: 17081 // for (int i = 0; i < n; i++); 17082 // { 17083 // a(i); 17084 // } 17085 // or if for/while is followed by a statement with more indentation 17086 // than for/while itself: 17087 // for (int i = 0; i < n; i++); 17088 // a(i); 17089 bool ProbableTypo = isa<CompoundStmt>(PossibleBody); 17090 if (!ProbableTypo) { 17091 bool BodyColInvalid; 17092 unsigned BodyCol = SourceMgr.getPresumedColumnNumber( 17093 PossibleBody->getBeginLoc(), &BodyColInvalid); 17094 if (BodyColInvalid) 17095 return; 17096 17097 bool StmtColInvalid; 17098 unsigned StmtCol = 17099 SourceMgr.getPresumedColumnNumber(S->getBeginLoc(), &StmtColInvalid); 17100 if (StmtColInvalid) 17101 return; 17102 17103 if (BodyCol > StmtCol) 17104 ProbableTypo = true; 17105 } 17106 17107 if (ProbableTypo) { 17108 Diag(NBody->getSemiLoc(), DiagID); 17109 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 17110 } 17111 } 17112 17113 //===--- CHECK: Warn on self move with std::move. -------------------------===// 17114 17115 /// DiagnoseSelfMove - Emits a warning if a value is moved to itself. 17116 void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, 17117 SourceLocation OpLoc) { 17118 if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc)) 17119 return; 17120 17121 if (inTemplateInstantiation()) 17122 return; 17123 17124 // Strip parens and casts away. 17125 LHSExpr = LHSExpr->IgnoreParenImpCasts(); 17126 RHSExpr = RHSExpr->IgnoreParenImpCasts(); 17127 17128 // Check for a call expression 17129 const CallExpr *CE = dyn_cast<CallExpr>(RHSExpr); 17130 if (!CE || CE->getNumArgs() != 1) 17131 return; 17132 17133 // Check for a call to std::move 17134 if (!CE->isCallToStdMove()) 17135 return; 17136 17137 // Get argument from std::move 17138 RHSExpr = CE->getArg(0); 17139 17140 const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr); 17141 const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr); 17142 17143 // Two DeclRefExpr's, check that the decls are the same. 17144 if (LHSDeclRef && RHSDeclRef) { 17145 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 17146 return; 17147 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 17148 RHSDeclRef->getDecl()->getCanonicalDecl()) 17149 return; 17150 17151 auto D = Diag(OpLoc, diag::warn_self_move) 17152 << LHSExpr->getType() << LHSExpr->getSourceRange() 17153 << RHSExpr->getSourceRange(); 17154 if (const FieldDecl *F = 17155 getSelfAssignmentClassMemberCandidate(RHSDeclRef->getDecl())) 17156 D << 1 << F 17157 << FixItHint::CreateInsertion(LHSDeclRef->getBeginLoc(), "this->"); 17158 else 17159 D << 0; 17160 return; 17161 } 17162 17163 // Member variables require a different approach to check for self moves. 17164 // MemberExpr's are the same if every nested MemberExpr refers to the same 17165 // Decl and that the base Expr's are DeclRefExpr's with the same Decl or 17166 // the base Expr's are CXXThisExpr's. 17167 const Expr *LHSBase = LHSExpr; 17168 const Expr *RHSBase = RHSExpr; 17169 const MemberExpr *LHSME = dyn_cast<MemberExpr>(LHSExpr); 17170 const MemberExpr *RHSME = dyn_cast<MemberExpr>(RHSExpr); 17171 if (!LHSME || !RHSME) 17172 return; 17173 17174 while (LHSME && RHSME) { 17175 if (LHSME->getMemberDecl()->getCanonicalDecl() != 17176 RHSME->getMemberDecl()->getCanonicalDecl()) 17177 return; 17178 17179 LHSBase = LHSME->getBase(); 17180 RHSBase = RHSME->getBase(); 17181 LHSME = dyn_cast<MemberExpr>(LHSBase); 17182 RHSME = dyn_cast<MemberExpr>(RHSBase); 17183 } 17184 17185 LHSDeclRef = dyn_cast<DeclRefExpr>(LHSBase); 17186 RHSDeclRef = dyn_cast<DeclRefExpr>(RHSBase); 17187 if (LHSDeclRef && RHSDeclRef) { 17188 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 17189 return; 17190 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 17191 RHSDeclRef->getDecl()->getCanonicalDecl()) 17192 return; 17193 17194 Diag(OpLoc, diag::warn_self_move) 17195 << LHSExpr->getType() << 0 << LHSExpr->getSourceRange() 17196 << RHSExpr->getSourceRange(); 17197 return; 17198 } 17199 17200 if (isa<CXXThisExpr>(LHSBase) && isa<CXXThisExpr>(RHSBase)) 17201 Diag(OpLoc, diag::warn_self_move) 17202 << LHSExpr->getType() << 0 << LHSExpr->getSourceRange() 17203 << RHSExpr->getSourceRange(); 17204 } 17205 17206 //===--- Layout compatibility ----------------------------------------------// 17207 17208 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2); 17209 17210 /// Check if two enumeration types are layout-compatible. 17211 static bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) { 17212 // C++11 [dcl.enum] p8: 17213 // Two enumeration types are layout-compatible if they have the same 17214 // underlying type. 17215 return ED1->isComplete() && ED2->isComplete() && 17216 C.hasSameType(ED1->getIntegerType(), ED2->getIntegerType()); 17217 } 17218 17219 /// Check if two fields are layout-compatible. 17220 static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1, 17221 FieldDecl *Field2) { 17222 if (!isLayoutCompatible(C, Field1->getType(), Field2->getType())) 17223 return false; 17224 17225 if (Field1->isBitField() != Field2->isBitField()) 17226 return false; 17227 17228 if (Field1->isBitField()) { 17229 // Make sure that the bit-fields are the same length. 17230 unsigned Bits1 = Field1->getBitWidthValue(C); 17231 unsigned Bits2 = Field2->getBitWidthValue(C); 17232 17233 if (Bits1 != Bits2) 17234 return false; 17235 } 17236 17237 return true; 17238 } 17239 17240 /// Check if two standard-layout structs are layout-compatible. 17241 /// (C++11 [class.mem] p17) 17242 static bool isLayoutCompatibleStruct(ASTContext &C, RecordDecl *RD1, 17243 RecordDecl *RD2) { 17244 // If both records are C++ classes, check that base classes match. 17245 if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(RD1)) { 17246 // If one of records is a CXXRecordDecl we are in C++ mode, 17247 // thus the other one is a CXXRecordDecl, too. 17248 const CXXRecordDecl *D2CXX = cast<CXXRecordDecl>(RD2); 17249 // Check number of base classes. 17250 if (D1CXX->getNumBases() != D2CXX->getNumBases()) 17251 return false; 17252 17253 // Check the base classes. 17254 for (CXXRecordDecl::base_class_const_iterator 17255 Base1 = D1CXX->bases_begin(), 17256 BaseEnd1 = D1CXX->bases_end(), 17257 Base2 = D2CXX->bases_begin(); 17258 Base1 != BaseEnd1; 17259 ++Base1, ++Base2) { 17260 if (!isLayoutCompatible(C, Base1->getType(), Base2->getType())) 17261 return false; 17262 } 17263 } else if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(RD2)) { 17264 // If only RD2 is a C++ class, it should have zero base classes. 17265 if (D2CXX->getNumBases() > 0) 17266 return false; 17267 } 17268 17269 // Check the fields. 17270 RecordDecl::field_iterator Field2 = RD2->field_begin(), 17271 Field2End = RD2->field_end(), 17272 Field1 = RD1->field_begin(), 17273 Field1End = RD1->field_end(); 17274 for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) { 17275 if (!isLayoutCompatible(C, *Field1, *Field2)) 17276 return false; 17277 } 17278 if (Field1 != Field1End || Field2 != Field2End) 17279 return false; 17280 17281 return true; 17282 } 17283 17284 /// Check if two standard-layout unions are layout-compatible. 17285 /// (C++11 [class.mem] p18) 17286 static bool isLayoutCompatibleUnion(ASTContext &C, RecordDecl *RD1, 17287 RecordDecl *RD2) { 17288 llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields; 17289 for (auto *Field2 : RD2->fields()) 17290 UnmatchedFields.insert(Field2); 17291 17292 for (auto *Field1 : RD1->fields()) { 17293 llvm::SmallPtrSet<FieldDecl *, 8>::iterator 17294 I = UnmatchedFields.begin(), 17295 E = UnmatchedFields.end(); 17296 17297 for ( ; I != E; ++I) { 17298 if (isLayoutCompatible(C, Field1, *I)) { 17299 bool Result = UnmatchedFields.erase(*I); 17300 (void) Result; 17301 assert(Result); 17302 break; 17303 } 17304 } 17305 if (I == E) 17306 return false; 17307 } 17308 17309 return UnmatchedFields.empty(); 17310 } 17311 17312 static bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1, 17313 RecordDecl *RD2) { 17314 if (RD1->isUnion() != RD2->isUnion()) 17315 return false; 17316 17317 if (RD1->isUnion()) 17318 return isLayoutCompatibleUnion(C, RD1, RD2); 17319 else 17320 return isLayoutCompatibleStruct(C, RD1, RD2); 17321 } 17322 17323 /// Check if two types are layout-compatible in C++11 sense. 17324 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) { 17325 if (T1.isNull() || T2.isNull()) 17326 return false; 17327 17328 // C++11 [basic.types] p11: 17329 // If two types T1 and T2 are the same type, then T1 and T2 are 17330 // layout-compatible types. 17331 if (C.hasSameType(T1, T2)) 17332 return true; 17333 17334 T1 = T1.getCanonicalType().getUnqualifiedType(); 17335 T2 = T2.getCanonicalType().getUnqualifiedType(); 17336 17337 const Type::TypeClass TC1 = T1->getTypeClass(); 17338 const Type::TypeClass TC2 = T2->getTypeClass(); 17339 17340 if (TC1 != TC2) 17341 return false; 17342 17343 if (TC1 == Type::Enum) { 17344 return isLayoutCompatible(C, 17345 cast<EnumType>(T1)->getDecl(), 17346 cast<EnumType>(T2)->getDecl()); 17347 } else if (TC1 == Type::Record) { 17348 if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType()) 17349 return false; 17350 17351 return isLayoutCompatible(C, 17352 cast<RecordType>(T1)->getDecl(), 17353 cast<RecordType>(T2)->getDecl()); 17354 } 17355 17356 return false; 17357 } 17358 17359 //===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----// 17360 17361 /// Given a type tag expression find the type tag itself. 17362 /// 17363 /// \param TypeExpr Type tag expression, as it appears in user's code. 17364 /// 17365 /// \param VD Declaration of an identifier that appears in a type tag. 17366 /// 17367 /// \param MagicValue Type tag magic value. 17368 /// 17369 /// \param isConstantEvaluated whether the evalaution should be performed in 17370 17371 /// constant context. 17372 static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx, 17373 const ValueDecl **VD, uint64_t *MagicValue, 17374 bool isConstantEvaluated) { 17375 while(true) { 17376 if (!TypeExpr) 17377 return false; 17378 17379 TypeExpr = TypeExpr->IgnoreParenImpCasts()->IgnoreParenCasts(); 17380 17381 switch (TypeExpr->getStmtClass()) { 17382 case Stmt::UnaryOperatorClass: { 17383 const UnaryOperator *UO = cast<UnaryOperator>(TypeExpr); 17384 if (UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Deref) { 17385 TypeExpr = UO->getSubExpr(); 17386 continue; 17387 } 17388 return false; 17389 } 17390 17391 case Stmt::DeclRefExprClass: { 17392 const DeclRefExpr *DRE = cast<DeclRefExpr>(TypeExpr); 17393 *VD = DRE->getDecl(); 17394 return true; 17395 } 17396 17397 case Stmt::IntegerLiteralClass: { 17398 const IntegerLiteral *IL = cast<IntegerLiteral>(TypeExpr); 17399 llvm::APInt MagicValueAPInt = IL->getValue(); 17400 if (MagicValueAPInt.getActiveBits() <= 64) { 17401 *MagicValue = MagicValueAPInt.getZExtValue(); 17402 return true; 17403 } else 17404 return false; 17405 } 17406 17407 case Stmt::BinaryConditionalOperatorClass: 17408 case Stmt::ConditionalOperatorClass: { 17409 const AbstractConditionalOperator *ACO = 17410 cast<AbstractConditionalOperator>(TypeExpr); 17411 bool Result; 17412 if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx, 17413 isConstantEvaluated)) { 17414 if (Result) 17415 TypeExpr = ACO->getTrueExpr(); 17416 else 17417 TypeExpr = ACO->getFalseExpr(); 17418 continue; 17419 } 17420 return false; 17421 } 17422 17423 case Stmt::BinaryOperatorClass: { 17424 const BinaryOperator *BO = cast<BinaryOperator>(TypeExpr); 17425 if (BO->getOpcode() == BO_Comma) { 17426 TypeExpr = BO->getRHS(); 17427 continue; 17428 } 17429 return false; 17430 } 17431 17432 default: 17433 return false; 17434 } 17435 } 17436 } 17437 17438 /// Retrieve the C type corresponding to type tag TypeExpr. 17439 /// 17440 /// \param TypeExpr Expression that specifies a type tag. 17441 /// 17442 /// \param MagicValues Registered magic values. 17443 /// 17444 /// \param FoundWrongKind Set to true if a type tag was found, but of a wrong 17445 /// kind. 17446 /// 17447 /// \param TypeInfo Information about the corresponding C type. 17448 /// 17449 /// \param isConstantEvaluated whether the evalaution should be performed in 17450 /// constant context. 17451 /// 17452 /// \returns true if the corresponding C type was found. 17453 static bool GetMatchingCType( 17454 const IdentifierInfo *ArgumentKind, const Expr *TypeExpr, 17455 const ASTContext &Ctx, 17456 const llvm::DenseMap<Sema::TypeTagMagicValue, Sema::TypeTagData> 17457 *MagicValues, 17458 bool &FoundWrongKind, Sema::TypeTagData &TypeInfo, 17459 bool isConstantEvaluated) { 17460 FoundWrongKind = false; 17461 17462 // Variable declaration that has type_tag_for_datatype attribute. 17463 const ValueDecl *VD = nullptr; 17464 17465 uint64_t MagicValue; 17466 17467 if (!FindTypeTagExpr(TypeExpr, Ctx, &VD, &MagicValue, isConstantEvaluated)) 17468 return false; 17469 17470 if (VD) { 17471 if (TypeTagForDatatypeAttr *I = VD->getAttr<TypeTagForDatatypeAttr>()) { 17472 if (I->getArgumentKind() != ArgumentKind) { 17473 FoundWrongKind = true; 17474 return false; 17475 } 17476 TypeInfo.Type = I->getMatchingCType(); 17477 TypeInfo.LayoutCompatible = I->getLayoutCompatible(); 17478 TypeInfo.MustBeNull = I->getMustBeNull(); 17479 return true; 17480 } 17481 return false; 17482 } 17483 17484 if (!MagicValues) 17485 return false; 17486 17487 llvm::DenseMap<Sema::TypeTagMagicValue, 17488 Sema::TypeTagData>::const_iterator I = 17489 MagicValues->find(std::make_pair(ArgumentKind, MagicValue)); 17490 if (I == MagicValues->end()) 17491 return false; 17492 17493 TypeInfo = I->second; 17494 return true; 17495 } 17496 17497 void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, 17498 uint64_t MagicValue, QualType Type, 17499 bool LayoutCompatible, 17500 bool MustBeNull) { 17501 if (!TypeTagForDatatypeMagicValues) 17502 TypeTagForDatatypeMagicValues.reset( 17503 new llvm::DenseMap<TypeTagMagicValue, TypeTagData>); 17504 17505 TypeTagMagicValue Magic(ArgumentKind, MagicValue); 17506 (*TypeTagForDatatypeMagicValues)[Magic] = 17507 TypeTagData(Type, LayoutCompatible, MustBeNull); 17508 } 17509 17510 static bool IsSameCharType(QualType T1, QualType T2) { 17511 const BuiltinType *BT1 = T1->getAs<BuiltinType>(); 17512 if (!BT1) 17513 return false; 17514 17515 const BuiltinType *BT2 = T2->getAs<BuiltinType>(); 17516 if (!BT2) 17517 return false; 17518 17519 BuiltinType::Kind T1Kind = BT1->getKind(); 17520 BuiltinType::Kind T2Kind = BT2->getKind(); 17521 17522 return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) || 17523 (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) || 17524 (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) || 17525 (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar); 17526 } 17527 17528 void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, 17529 const ArrayRef<const Expr *> ExprArgs, 17530 SourceLocation CallSiteLoc) { 17531 const IdentifierInfo *ArgumentKind = Attr->getArgumentKind(); 17532 bool IsPointerAttr = Attr->getIsPointer(); 17533 17534 // Retrieve the argument representing the 'type_tag'. 17535 unsigned TypeTagIdxAST = Attr->getTypeTagIdx().getASTIndex(); 17536 if (TypeTagIdxAST >= ExprArgs.size()) { 17537 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 17538 << 0 << Attr->getTypeTagIdx().getSourceIndex(); 17539 return; 17540 } 17541 const Expr *TypeTagExpr = ExprArgs[TypeTagIdxAST]; 17542 bool FoundWrongKind; 17543 TypeTagData TypeInfo; 17544 if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context, 17545 TypeTagForDatatypeMagicValues.get(), FoundWrongKind, 17546 TypeInfo, isConstantEvaluated())) { 17547 if (FoundWrongKind) 17548 Diag(TypeTagExpr->getExprLoc(), 17549 diag::warn_type_tag_for_datatype_wrong_kind) 17550 << TypeTagExpr->getSourceRange(); 17551 return; 17552 } 17553 17554 // Retrieve the argument representing the 'arg_idx'. 17555 unsigned ArgumentIdxAST = Attr->getArgumentIdx().getASTIndex(); 17556 if (ArgumentIdxAST >= ExprArgs.size()) { 17557 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 17558 << 1 << Attr->getArgumentIdx().getSourceIndex(); 17559 return; 17560 } 17561 const Expr *ArgumentExpr = ExprArgs[ArgumentIdxAST]; 17562 if (IsPointerAttr) { 17563 // Skip implicit cast of pointer to `void *' (as a function argument). 17564 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgumentExpr)) 17565 if (ICE->getType()->isVoidPointerType() && 17566 ICE->getCastKind() == CK_BitCast) 17567 ArgumentExpr = ICE->getSubExpr(); 17568 } 17569 QualType ArgumentType = ArgumentExpr->getType(); 17570 17571 // Passing a `void*' pointer shouldn't trigger a warning. 17572 if (IsPointerAttr && ArgumentType->isVoidPointerType()) 17573 return; 17574 17575 if (TypeInfo.MustBeNull) { 17576 // Type tag with matching void type requires a null pointer. 17577 if (!ArgumentExpr->isNullPointerConstant(Context, 17578 Expr::NPC_ValueDependentIsNotNull)) { 17579 Diag(ArgumentExpr->getExprLoc(), 17580 diag::warn_type_safety_null_pointer_required) 17581 << ArgumentKind->getName() 17582 << ArgumentExpr->getSourceRange() 17583 << TypeTagExpr->getSourceRange(); 17584 } 17585 return; 17586 } 17587 17588 QualType RequiredType = TypeInfo.Type; 17589 if (IsPointerAttr) 17590 RequiredType = Context.getPointerType(RequiredType); 17591 17592 bool mismatch = false; 17593 if (!TypeInfo.LayoutCompatible) { 17594 mismatch = !Context.hasSameType(ArgumentType, RequiredType); 17595 17596 // C++11 [basic.fundamental] p1: 17597 // Plain char, signed char, and unsigned char are three distinct types. 17598 // 17599 // But we treat plain `char' as equivalent to `signed char' or `unsigned 17600 // char' depending on the current char signedness mode. 17601 if (mismatch) 17602 if ((IsPointerAttr && IsSameCharType(ArgumentType->getPointeeType(), 17603 RequiredType->getPointeeType())) || 17604 (!IsPointerAttr && IsSameCharType(ArgumentType, RequiredType))) 17605 mismatch = false; 17606 } else 17607 if (IsPointerAttr) 17608 mismatch = !isLayoutCompatible(Context, 17609 ArgumentType->getPointeeType(), 17610 RequiredType->getPointeeType()); 17611 else 17612 mismatch = !isLayoutCompatible(Context, ArgumentType, RequiredType); 17613 17614 if (mismatch) 17615 Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch) 17616 << ArgumentType << ArgumentKind 17617 << TypeInfo.LayoutCompatible << RequiredType 17618 << ArgumentExpr->getSourceRange() 17619 << TypeTagExpr->getSourceRange(); 17620 } 17621 17622 void Sema::AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, 17623 CharUnits Alignment) { 17624 MisalignedMembers.emplace_back(E, RD, MD, Alignment); 17625 } 17626 17627 void Sema::DiagnoseMisalignedMembers() { 17628 for (MisalignedMember &m : MisalignedMembers) { 17629 const NamedDecl *ND = m.RD; 17630 if (ND->getName().empty()) { 17631 if (const TypedefNameDecl *TD = m.RD->getTypedefNameForAnonDecl()) 17632 ND = TD; 17633 } 17634 Diag(m.E->getBeginLoc(), diag::warn_taking_address_of_packed_member) 17635 << m.MD << ND << m.E->getSourceRange(); 17636 } 17637 MisalignedMembers.clear(); 17638 } 17639 17640 void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) { 17641 E = E->IgnoreParens(); 17642 if (!T->isPointerType() && !T->isIntegerType() && !T->isDependentType()) 17643 return; 17644 if (isa<UnaryOperator>(E) && 17645 cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf) { 17646 auto *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens(); 17647 if (isa<MemberExpr>(Op)) { 17648 auto *MA = llvm::find(MisalignedMembers, MisalignedMember(Op)); 17649 if (MA != MisalignedMembers.end() && 17650 (T->isDependentType() || T->isIntegerType() || 17651 (T->isPointerType() && (T->getPointeeType()->isIncompleteType() || 17652 Context.getTypeAlignInChars( 17653 T->getPointeeType()) <= MA->Alignment)))) 17654 MisalignedMembers.erase(MA); 17655 } 17656 } 17657 } 17658 17659 void Sema::RefersToMemberWithReducedAlignment( 17660 Expr *E, 17661 llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> 17662 Action) { 17663 const auto *ME = dyn_cast<MemberExpr>(E); 17664 if (!ME) 17665 return; 17666 17667 // No need to check expressions with an __unaligned-qualified type. 17668 if (E->getType().getQualifiers().hasUnaligned()) 17669 return; 17670 17671 // For a chain of MemberExpr like "a.b.c.d" this list 17672 // will keep FieldDecl's like [d, c, b]. 17673 SmallVector<FieldDecl *, 4> ReverseMemberChain; 17674 const MemberExpr *TopME = nullptr; 17675 bool AnyIsPacked = false; 17676 do { 17677 QualType BaseType = ME->getBase()->getType(); 17678 if (BaseType->isDependentType()) 17679 return; 17680 if (ME->isArrow()) 17681 BaseType = BaseType->getPointeeType(); 17682 RecordDecl *RD = BaseType->castAs<RecordType>()->getDecl(); 17683 if (RD->isInvalidDecl()) 17684 return; 17685 17686 ValueDecl *MD = ME->getMemberDecl(); 17687 auto *FD = dyn_cast<FieldDecl>(MD); 17688 // We do not care about non-data members. 17689 if (!FD || FD->isInvalidDecl()) 17690 return; 17691 17692 AnyIsPacked = 17693 AnyIsPacked || (RD->hasAttr<PackedAttr>() || MD->hasAttr<PackedAttr>()); 17694 ReverseMemberChain.push_back(FD); 17695 17696 TopME = ME; 17697 ME = dyn_cast<MemberExpr>(ME->getBase()->IgnoreParens()); 17698 } while (ME); 17699 assert(TopME && "We did not compute a topmost MemberExpr!"); 17700 17701 // Not the scope of this diagnostic. 17702 if (!AnyIsPacked) 17703 return; 17704 17705 const Expr *TopBase = TopME->getBase()->IgnoreParenImpCasts(); 17706 const auto *DRE = dyn_cast<DeclRefExpr>(TopBase); 17707 // TODO: The innermost base of the member expression may be too complicated. 17708 // For now, just disregard these cases. This is left for future 17709 // improvement. 17710 if (!DRE && !isa<CXXThisExpr>(TopBase)) 17711 return; 17712 17713 // Alignment expected by the whole expression. 17714 CharUnits ExpectedAlignment = Context.getTypeAlignInChars(E->getType()); 17715 17716 // No need to do anything else with this case. 17717 if (ExpectedAlignment.isOne()) 17718 return; 17719 17720 // Synthesize offset of the whole access. 17721 CharUnits Offset; 17722 for (const FieldDecl *FD : llvm::reverse(ReverseMemberChain)) 17723 Offset += Context.toCharUnitsFromBits(Context.getFieldOffset(FD)); 17724 17725 // Compute the CompleteObjectAlignment as the alignment of the whole chain. 17726 CharUnits CompleteObjectAlignment = Context.getTypeAlignInChars( 17727 ReverseMemberChain.back()->getParent()->getTypeForDecl()); 17728 17729 // The base expression of the innermost MemberExpr may give 17730 // stronger guarantees than the class containing the member. 17731 if (DRE && !TopME->isArrow()) { 17732 const ValueDecl *VD = DRE->getDecl(); 17733 if (!VD->getType()->isReferenceType()) 17734 CompleteObjectAlignment = 17735 std::max(CompleteObjectAlignment, Context.getDeclAlign(VD)); 17736 } 17737 17738 // Check if the synthesized offset fulfills the alignment. 17739 if (Offset % ExpectedAlignment != 0 || 17740 // It may fulfill the offset it but the effective alignment may still be 17741 // lower than the expected expression alignment. 17742 CompleteObjectAlignment < ExpectedAlignment) { 17743 // If this happens, we want to determine a sensible culprit of this. 17744 // Intuitively, watching the chain of member expressions from right to 17745 // left, we start with the required alignment (as required by the field 17746 // type) but some packed attribute in that chain has reduced the alignment. 17747 // It may happen that another packed structure increases it again. But if 17748 // we are here such increase has not been enough. So pointing the first 17749 // FieldDecl that either is packed or else its RecordDecl is, 17750 // seems reasonable. 17751 FieldDecl *FD = nullptr; 17752 CharUnits Alignment; 17753 for (FieldDecl *FDI : ReverseMemberChain) { 17754 if (FDI->hasAttr<PackedAttr>() || 17755 FDI->getParent()->hasAttr<PackedAttr>()) { 17756 FD = FDI; 17757 Alignment = std::min( 17758 Context.getTypeAlignInChars(FD->getType()), 17759 Context.getTypeAlignInChars(FD->getParent()->getTypeForDecl())); 17760 break; 17761 } 17762 } 17763 assert(FD && "We did not find a packed FieldDecl!"); 17764 Action(E, FD->getParent(), FD, Alignment); 17765 } 17766 } 17767 17768 void Sema::CheckAddressOfPackedMember(Expr *rhs) { 17769 using namespace std::placeholders; 17770 17771 RefersToMemberWithReducedAlignment( 17772 rhs, std::bind(&Sema::AddPotentialMisalignedMembers, std::ref(*this), _1, 17773 _2, _3, _4)); 17774 } 17775 17776 bool Sema::PrepareBuiltinElementwiseMathOneArgCall(CallExpr *TheCall) { 17777 if (checkArgCount(*this, TheCall, 1)) 17778 return true; 17779 17780 ExprResult A = UsualUnaryConversions(TheCall->getArg(0)); 17781 if (A.isInvalid()) 17782 return true; 17783 17784 TheCall->setArg(0, A.get()); 17785 QualType TyA = A.get()->getType(); 17786 17787 if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA)) 17788 return true; 17789 17790 TheCall->setType(TyA); 17791 return false; 17792 } 17793 17794 bool Sema::SemaBuiltinElementwiseMath(CallExpr *TheCall) { 17795 if (checkArgCount(*this, TheCall, 2)) 17796 return true; 17797 17798 ExprResult A = TheCall->getArg(0); 17799 ExprResult B = TheCall->getArg(1); 17800 // Do standard promotions between the two arguments, returning their common 17801 // type. 17802 QualType Res = 17803 UsualArithmeticConversions(A, B, TheCall->getExprLoc(), ACK_Comparison); 17804 if (A.isInvalid() || B.isInvalid()) 17805 return true; 17806 17807 QualType TyA = A.get()->getType(); 17808 QualType TyB = B.get()->getType(); 17809 17810 if (Res.isNull() || TyA.getCanonicalType() != TyB.getCanonicalType()) 17811 return Diag(A.get()->getBeginLoc(), 17812 diag::err_typecheck_call_different_arg_types) 17813 << TyA << TyB; 17814 17815 if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA)) 17816 return true; 17817 17818 TheCall->setArg(0, A.get()); 17819 TheCall->setArg(1, B.get()); 17820 TheCall->setType(Res); 17821 return false; 17822 } 17823 17824 bool Sema::PrepareBuiltinReduceMathOneArgCall(CallExpr *TheCall) { 17825 if (checkArgCount(*this, TheCall, 1)) 17826 return true; 17827 17828 ExprResult A = UsualUnaryConversions(TheCall->getArg(0)); 17829 if (A.isInvalid()) 17830 return true; 17831 17832 TheCall->setArg(0, A.get()); 17833 return false; 17834 } 17835 17836 ExprResult Sema::SemaBuiltinMatrixTranspose(CallExpr *TheCall, 17837 ExprResult CallResult) { 17838 if (checkArgCount(*this, TheCall, 1)) 17839 return ExprError(); 17840 17841 ExprResult MatrixArg = DefaultLvalueConversion(TheCall->getArg(0)); 17842 if (MatrixArg.isInvalid()) 17843 return MatrixArg; 17844 Expr *Matrix = MatrixArg.get(); 17845 17846 auto *MType = Matrix->getType()->getAs<ConstantMatrixType>(); 17847 if (!MType) { 17848 Diag(Matrix->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17849 << 1 << /* matrix ty*/ 1 << Matrix->getType(); 17850 return ExprError(); 17851 } 17852 17853 // Create returned matrix type by swapping rows and columns of the argument 17854 // matrix type. 17855 QualType ResultType = Context.getConstantMatrixType( 17856 MType->getElementType(), MType->getNumColumns(), MType->getNumRows()); 17857 17858 // Change the return type to the type of the returned matrix. 17859 TheCall->setType(ResultType); 17860 17861 // Update call argument to use the possibly converted matrix argument. 17862 TheCall->setArg(0, Matrix); 17863 return CallResult; 17864 } 17865 17866 // Get and verify the matrix dimensions. 17867 static std::optional<unsigned> 17868 getAndVerifyMatrixDimension(Expr *Expr, StringRef Name, Sema &S) { 17869 SourceLocation ErrorPos; 17870 std::optional<llvm::APSInt> Value = 17871 Expr->getIntegerConstantExpr(S.Context, &ErrorPos); 17872 if (!Value) { 17873 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_scalar_unsigned_arg) 17874 << Name; 17875 return {}; 17876 } 17877 uint64_t Dim = Value->getZExtValue(); 17878 if (!ConstantMatrixType::isDimensionValid(Dim)) { 17879 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_invalid_dimension) 17880 << Name << ConstantMatrixType::getMaxElementsPerDimension(); 17881 return {}; 17882 } 17883 return Dim; 17884 } 17885 17886 ExprResult Sema::SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, 17887 ExprResult CallResult) { 17888 if (!getLangOpts().MatrixTypes) { 17889 Diag(TheCall->getBeginLoc(), diag::err_builtin_matrix_disabled); 17890 return ExprError(); 17891 } 17892 17893 if (checkArgCount(*this, TheCall, 4)) 17894 return ExprError(); 17895 17896 unsigned PtrArgIdx = 0; 17897 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 17898 Expr *RowsExpr = TheCall->getArg(1); 17899 Expr *ColumnsExpr = TheCall->getArg(2); 17900 Expr *StrideExpr = TheCall->getArg(3); 17901 17902 bool ArgError = false; 17903 17904 // Check pointer argument. 17905 { 17906 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 17907 if (PtrConv.isInvalid()) 17908 return PtrConv; 17909 PtrExpr = PtrConv.get(); 17910 TheCall->setArg(0, PtrExpr); 17911 if (PtrExpr->isTypeDependent()) { 17912 TheCall->setType(Context.DependentTy); 17913 return TheCall; 17914 } 17915 } 17916 17917 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 17918 QualType ElementTy; 17919 if (!PtrTy) { 17920 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17921 << PtrArgIdx + 1 << /*pointer to element ty*/ 2 << PtrExpr->getType(); 17922 ArgError = true; 17923 } else { 17924 ElementTy = PtrTy->getPointeeType().getUnqualifiedType(); 17925 17926 if (!ConstantMatrixType::isValidElementType(ElementTy)) { 17927 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17928 << PtrArgIdx + 1 << /* pointer to element ty*/ 2 17929 << PtrExpr->getType(); 17930 ArgError = true; 17931 } 17932 } 17933 17934 // Apply default Lvalue conversions and convert the expression to size_t. 17935 auto ApplyArgumentConversions = [this](Expr *E) { 17936 ExprResult Conv = DefaultLvalueConversion(E); 17937 if (Conv.isInvalid()) 17938 return Conv; 17939 17940 return tryConvertExprToType(Conv.get(), Context.getSizeType()); 17941 }; 17942 17943 // Apply conversion to row and column expressions. 17944 ExprResult RowsConv = ApplyArgumentConversions(RowsExpr); 17945 if (!RowsConv.isInvalid()) { 17946 RowsExpr = RowsConv.get(); 17947 TheCall->setArg(1, RowsExpr); 17948 } else 17949 RowsExpr = nullptr; 17950 17951 ExprResult ColumnsConv = ApplyArgumentConversions(ColumnsExpr); 17952 if (!ColumnsConv.isInvalid()) { 17953 ColumnsExpr = ColumnsConv.get(); 17954 TheCall->setArg(2, ColumnsExpr); 17955 } else 17956 ColumnsExpr = nullptr; 17957 17958 // If any part of the result matrix type is still pending, just use 17959 // Context.DependentTy, until all parts are resolved. 17960 if ((RowsExpr && RowsExpr->isTypeDependent()) || 17961 (ColumnsExpr && ColumnsExpr->isTypeDependent())) { 17962 TheCall->setType(Context.DependentTy); 17963 return CallResult; 17964 } 17965 17966 // Check row and column dimensions. 17967 std::optional<unsigned> MaybeRows; 17968 if (RowsExpr) 17969 MaybeRows = getAndVerifyMatrixDimension(RowsExpr, "row", *this); 17970 17971 std::optional<unsigned> MaybeColumns; 17972 if (ColumnsExpr) 17973 MaybeColumns = getAndVerifyMatrixDimension(ColumnsExpr, "column", *this); 17974 17975 // Check stride argument. 17976 ExprResult StrideConv = ApplyArgumentConversions(StrideExpr); 17977 if (StrideConv.isInvalid()) 17978 return ExprError(); 17979 StrideExpr = StrideConv.get(); 17980 TheCall->setArg(3, StrideExpr); 17981 17982 if (MaybeRows) { 17983 if (std::optional<llvm::APSInt> Value = 17984 StrideExpr->getIntegerConstantExpr(Context)) { 17985 uint64_t Stride = Value->getZExtValue(); 17986 if (Stride < *MaybeRows) { 17987 Diag(StrideExpr->getBeginLoc(), 17988 diag::err_builtin_matrix_stride_too_small); 17989 ArgError = true; 17990 } 17991 } 17992 } 17993 17994 if (ArgError || !MaybeRows || !MaybeColumns) 17995 return ExprError(); 17996 17997 TheCall->setType( 17998 Context.getConstantMatrixType(ElementTy, *MaybeRows, *MaybeColumns)); 17999 return CallResult; 18000 } 18001 18002 ExprResult Sema::SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, 18003 ExprResult CallResult) { 18004 if (checkArgCount(*this, TheCall, 3)) 18005 return ExprError(); 18006 18007 unsigned PtrArgIdx = 1; 18008 Expr *MatrixExpr = TheCall->getArg(0); 18009 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 18010 Expr *StrideExpr = TheCall->getArg(2); 18011 18012 bool ArgError = false; 18013 18014 { 18015 ExprResult MatrixConv = DefaultLvalueConversion(MatrixExpr); 18016 if (MatrixConv.isInvalid()) 18017 return MatrixConv; 18018 MatrixExpr = MatrixConv.get(); 18019 TheCall->setArg(0, MatrixExpr); 18020 } 18021 if (MatrixExpr->isTypeDependent()) { 18022 TheCall->setType(Context.DependentTy); 18023 return TheCall; 18024 } 18025 18026 auto *MatrixTy = MatrixExpr->getType()->getAs<ConstantMatrixType>(); 18027 if (!MatrixTy) { 18028 Diag(MatrixExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 18029 << 1 << /*matrix ty */ 1 << MatrixExpr->getType(); 18030 ArgError = true; 18031 } 18032 18033 { 18034 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 18035 if (PtrConv.isInvalid()) 18036 return PtrConv; 18037 PtrExpr = PtrConv.get(); 18038 TheCall->setArg(1, PtrExpr); 18039 if (PtrExpr->isTypeDependent()) { 18040 TheCall->setType(Context.DependentTy); 18041 return TheCall; 18042 } 18043 } 18044 18045 // Check pointer argument. 18046 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 18047 if (!PtrTy) { 18048 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 18049 << PtrArgIdx + 1 << /*pointer to element ty*/ 2 << PtrExpr->getType(); 18050 ArgError = true; 18051 } else { 18052 QualType ElementTy = PtrTy->getPointeeType(); 18053 if (ElementTy.isConstQualified()) { 18054 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_store_to_const); 18055 ArgError = true; 18056 } 18057 ElementTy = ElementTy.getUnqualifiedType().getCanonicalType(); 18058 if (MatrixTy && 18059 !Context.hasSameType(ElementTy, MatrixTy->getElementType())) { 18060 Diag(PtrExpr->getBeginLoc(), 18061 diag::err_builtin_matrix_pointer_arg_mismatch) 18062 << ElementTy << MatrixTy->getElementType(); 18063 ArgError = true; 18064 } 18065 } 18066 18067 // Apply default Lvalue conversions and convert the stride expression to 18068 // size_t. 18069 { 18070 ExprResult StrideConv = DefaultLvalueConversion(StrideExpr); 18071 if (StrideConv.isInvalid()) 18072 return StrideConv; 18073 18074 StrideConv = tryConvertExprToType(StrideConv.get(), Context.getSizeType()); 18075 if (StrideConv.isInvalid()) 18076 return StrideConv; 18077 StrideExpr = StrideConv.get(); 18078 TheCall->setArg(2, StrideExpr); 18079 } 18080 18081 // Check stride argument. 18082 if (MatrixTy) { 18083 if (std::optional<llvm::APSInt> Value = 18084 StrideExpr->getIntegerConstantExpr(Context)) { 18085 uint64_t Stride = Value->getZExtValue(); 18086 if (Stride < MatrixTy->getNumRows()) { 18087 Diag(StrideExpr->getBeginLoc(), 18088 diag::err_builtin_matrix_stride_too_small); 18089 ArgError = true; 18090 } 18091 } 18092 } 18093 18094 if (ArgError) 18095 return ExprError(); 18096 18097 return CallResult; 18098 } 18099 18100 /// \brief Enforce the bounds of a TCB 18101 /// CheckTCBEnforcement - Enforces that every function in a named TCB only 18102 /// directly calls other functions in the same TCB as marked by the enforce_tcb 18103 /// and enforce_tcb_leaf attributes. 18104 void Sema::CheckTCBEnforcement(const SourceLocation CallExprLoc, 18105 const NamedDecl *Callee) { 18106 const NamedDecl *Caller = getCurFunctionOrMethodDecl(); 18107 18108 if (!Caller || !Caller->hasAttr<EnforceTCBAttr>()) 18109 return; 18110 18111 // Search through the enforce_tcb and enforce_tcb_leaf attributes to find 18112 // all TCBs the callee is a part of. 18113 llvm::StringSet<> CalleeTCBs; 18114 for (const auto *A : Callee->specific_attrs<EnforceTCBAttr>()) 18115 CalleeTCBs.insert(A->getTCBName()); 18116 for (const auto *A : Callee->specific_attrs<EnforceTCBLeafAttr>()) 18117 CalleeTCBs.insert(A->getTCBName()); 18118 18119 // Go through the TCBs the caller is a part of and emit warnings if Caller 18120 // is in a TCB that the Callee is not. 18121 for (const auto *A : Caller->specific_attrs<EnforceTCBAttr>()) { 18122 StringRef CallerTCB = A->getTCBName(); 18123 if (CalleeTCBs.count(CallerTCB) == 0) { 18124 this->Diag(CallExprLoc, diag::warn_tcb_enforcement_violation) 18125 << Callee << CallerTCB; 18126 } 18127 } 18128 } 18129