1 //===- SemaChecking.cpp - Extra Semantic Checking -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements extra semantic analysis beyond what is enforced 10 // by the C type system. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "clang/AST/APValue.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/Attr.h" 17 #include "clang/AST/AttrIterator.h" 18 #include "clang/AST/CharUnits.h" 19 #include "clang/AST/Decl.h" 20 #include "clang/AST/DeclBase.h" 21 #include "clang/AST/DeclCXX.h" 22 #include "clang/AST/DeclObjC.h" 23 #include "clang/AST/DeclarationName.h" 24 #include "clang/AST/EvaluatedExprVisitor.h" 25 #include "clang/AST/Expr.h" 26 #include "clang/AST/ExprCXX.h" 27 #include "clang/AST/ExprObjC.h" 28 #include "clang/AST/ExprOpenMP.h" 29 #include "clang/AST/FormatString.h" 30 #include "clang/AST/NSAPI.h" 31 #include "clang/AST/NonTrivialTypeVisitor.h" 32 #include "clang/AST/OperationKinds.h" 33 #include "clang/AST/RecordLayout.h" 34 #include "clang/AST/Stmt.h" 35 #include "clang/AST/TemplateBase.h" 36 #include "clang/AST/Type.h" 37 #include "clang/AST/TypeLoc.h" 38 #include "clang/AST/UnresolvedSet.h" 39 #include "clang/Basic/AddressSpaces.h" 40 #include "clang/Basic/CharInfo.h" 41 #include "clang/Basic/Diagnostic.h" 42 #include "clang/Basic/IdentifierTable.h" 43 #include "clang/Basic/LLVM.h" 44 #include "clang/Basic/LangOptions.h" 45 #include "clang/Basic/OpenCLOptions.h" 46 #include "clang/Basic/OperatorKinds.h" 47 #include "clang/Basic/PartialDiagnostic.h" 48 #include "clang/Basic/SourceLocation.h" 49 #include "clang/Basic/SourceManager.h" 50 #include "clang/Basic/Specifiers.h" 51 #include "clang/Basic/SyncScope.h" 52 #include "clang/Basic/TargetBuiltins.h" 53 #include "clang/Basic/TargetCXXABI.h" 54 #include "clang/Basic/TargetInfo.h" 55 #include "clang/Basic/TypeTraits.h" 56 #include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering. 57 #include "clang/Sema/Initialization.h" 58 #include "clang/Sema/Lookup.h" 59 #include "clang/Sema/Ownership.h" 60 #include "clang/Sema/Scope.h" 61 #include "clang/Sema/ScopeInfo.h" 62 #include "clang/Sema/Sema.h" 63 #include "clang/Sema/SemaInternal.h" 64 #include "llvm/ADT/APFloat.h" 65 #include "llvm/ADT/APInt.h" 66 #include "llvm/ADT/APSInt.h" 67 #include "llvm/ADT/ArrayRef.h" 68 #include "llvm/ADT/DenseMap.h" 69 #include "llvm/ADT/FoldingSet.h" 70 #include "llvm/ADT/None.h" 71 #include "llvm/ADT/Optional.h" 72 #include "llvm/ADT/STLExtras.h" 73 #include "llvm/ADT/SmallBitVector.h" 74 #include "llvm/ADT/SmallPtrSet.h" 75 #include "llvm/ADT/SmallString.h" 76 #include "llvm/ADT/SmallVector.h" 77 #include "llvm/ADT/StringRef.h" 78 #include "llvm/ADT/StringSet.h" 79 #include "llvm/ADT/StringSwitch.h" 80 #include "llvm/ADT/Triple.h" 81 #include "llvm/Support/AtomicOrdering.h" 82 #include "llvm/Support/Casting.h" 83 #include "llvm/Support/Compiler.h" 84 #include "llvm/Support/ConvertUTF.h" 85 #include "llvm/Support/ErrorHandling.h" 86 #include "llvm/Support/Format.h" 87 #include "llvm/Support/Locale.h" 88 #include "llvm/Support/MathExtras.h" 89 #include "llvm/Support/SaveAndRestore.h" 90 #include "llvm/Support/raw_ostream.h" 91 #include <algorithm> 92 #include <bitset> 93 #include <cassert> 94 #include <cctype> 95 #include <cstddef> 96 #include <cstdint> 97 #include <functional> 98 #include <limits> 99 #include <string> 100 #include <tuple> 101 #include <utility> 102 103 using namespace clang; 104 using namespace sema; 105 106 SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL, 107 unsigned ByteNo) const { 108 return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts, 109 Context.getTargetInfo()); 110 } 111 112 static constexpr unsigned short combineFAPK(Sema::FormatArgumentPassingKind A, 113 Sema::FormatArgumentPassingKind B) { 114 return (A << 8) | B; 115 } 116 117 /// Checks that a call expression's argument count is at least the desired 118 /// number. This is useful when doing custom type-checking on a variadic 119 /// function. Returns true on error. 120 static bool checkArgCountAtLeast(Sema &S, CallExpr *Call, 121 unsigned MinArgCount) { 122 unsigned ArgCount = Call->getNumArgs(); 123 if (ArgCount >= MinArgCount) 124 return false; 125 126 return S.Diag(Call->getEndLoc(), diag::err_typecheck_call_too_few_args) 127 << 0 /*function call*/ << MinArgCount << ArgCount 128 << Call->getSourceRange(); 129 } 130 131 /// Checks that a call expression's argument count is the desired number. 132 /// This is useful when doing custom type-checking. Returns true on error. 133 static bool checkArgCount(Sema &S, CallExpr *Call, unsigned DesiredArgCount) { 134 unsigned ArgCount = Call->getNumArgs(); 135 if (ArgCount == DesiredArgCount) 136 return false; 137 138 if (checkArgCountAtLeast(S, Call, DesiredArgCount)) 139 return true; 140 assert(ArgCount > DesiredArgCount && "should have diagnosed this"); 141 142 // Highlight all the excess arguments. 143 SourceRange Range(Call->getArg(DesiredArgCount)->getBeginLoc(), 144 Call->getArg(ArgCount - 1)->getEndLoc()); 145 146 return S.Diag(Range.getBegin(), diag::err_typecheck_call_too_many_args) 147 << 0 /*function call*/ << DesiredArgCount << ArgCount 148 << Call->getArg(1)->getSourceRange(); 149 } 150 151 /// Check that the first argument to __builtin_annotation is an integer 152 /// and the second argument is a non-wide string literal. 153 static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) { 154 if (checkArgCount(S, TheCall, 2)) 155 return true; 156 157 // First argument should be an integer. 158 Expr *ValArg = TheCall->getArg(0); 159 QualType Ty = ValArg->getType(); 160 if (!Ty->isIntegerType()) { 161 S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg) 162 << ValArg->getSourceRange(); 163 return true; 164 } 165 166 // Second argument should be a constant string. 167 Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts(); 168 StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg); 169 if (!Literal || !Literal->isOrdinary()) { 170 S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg) 171 << StrArg->getSourceRange(); 172 return true; 173 } 174 175 TheCall->setType(Ty); 176 return false; 177 } 178 179 static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) { 180 // We need at least one argument. 181 if (TheCall->getNumArgs() < 1) { 182 S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 183 << 0 << 1 << TheCall->getNumArgs() 184 << TheCall->getCallee()->getSourceRange(); 185 return true; 186 } 187 188 // All arguments should be wide string literals. 189 for (Expr *Arg : TheCall->arguments()) { 190 auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts()); 191 if (!Literal || !Literal->isWide()) { 192 S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str) 193 << Arg->getSourceRange(); 194 return true; 195 } 196 } 197 198 return false; 199 } 200 201 /// Check that the argument to __builtin_addressof is a glvalue, and set the 202 /// result type to the corresponding pointer type. 203 static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) { 204 if (checkArgCount(S, TheCall, 1)) 205 return true; 206 207 ExprResult Arg(TheCall->getArg(0)); 208 QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc()); 209 if (ResultType.isNull()) 210 return true; 211 212 TheCall->setArg(0, Arg.get()); 213 TheCall->setType(ResultType); 214 return false; 215 } 216 217 /// Check that the argument to __builtin_function_start is a function. 218 static bool SemaBuiltinFunctionStart(Sema &S, CallExpr *TheCall) { 219 if (checkArgCount(S, TheCall, 1)) 220 return true; 221 222 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 223 if (Arg.isInvalid()) 224 return true; 225 226 TheCall->setArg(0, Arg.get()); 227 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>( 228 Arg.get()->getAsBuiltinConstantDeclRef(S.getASTContext())); 229 230 if (!FD) { 231 S.Diag(TheCall->getBeginLoc(), diag::err_function_start_invalid_type) 232 << TheCall->getSourceRange(); 233 return true; 234 } 235 236 return !S.checkAddressOfFunctionIsAvailable(FD, /*Complain=*/true, 237 TheCall->getBeginLoc()); 238 } 239 240 /// Check the number of arguments and set the result type to 241 /// the argument type. 242 static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) { 243 if (checkArgCount(S, TheCall, 1)) 244 return true; 245 246 TheCall->setType(TheCall->getArg(0)->getType()); 247 return false; 248 } 249 250 /// Check that the value argument for __builtin_is_aligned(value, alignment) and 251 /// __builtin_aligned_{up,down}(value, alignment) is an integer or a pointer 252 /// type (but not a function pointer) and that the alignment is a power-of-two. 253 static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) { 254 if (checkArgCount(S, TheCall, 2)) 255 return true; 256 257 clang::Expr *Source = TheCall->getArg(0); 258 bool IsBooleanAlignBuiltin = ID == Builtin::BI__builtin_is_aligned; 259 260 auto IsValidIntegerType = [](QualType Ty) { 261 return Ty->isIntegerType() && !Ty->isEnumeralType() && !Ty->isBooleanType(); 262 }; 263 QualType SrcTy = Source->getType(); 264 // We should also be able to use it with arrays (but not functions!). 265 if (SrcTy->canDecayToPointerType() && SrcTy->isArrayType()) { 266 SrcTy = S.Context.getDecayedType(SrcTy); 267 } 268 if ((!SrcTy->isPointerType() && !IsValidIntegerType(SrcTy)) || 269 SrcTy->isFunctionPointerType()) { 270 // FIXME: this is not quite the right error message since we don't allow 271 // floating point types, or member pointers. 272 S.Diag(Source->getExprLoc(), diag::err_typecheck_expect_scalar_operand) 273 << SrcTy; 274 return true; 275 } 276 277 clang::Expr *AlignOp = TheCall->getArg(1); 278 if (!IsValidIntegerType(AlignOp->getType())) { 279 S.Diag(AlignOp->getExprLoc(), diag::err_typecheck_expect_int) 280 << AlignOp->getType(); 281 return true; 282 } 283 Expr::EvalResult AlignResult; 284 unsigned MaxAlignmentBits = S.Context.getIntWidth(SrcTy) - 1; 285 // We can't check validity of alignment if it is value dependent. 286 if (!AlignOp->isValueDependent() && 287 AlignOp->EvaluateAsInt(AlignResult, S.Context, 288 Expr::SE_AllowSideEffects)) { 289 llvm::APSInt AlignValue = AlignResult.Val.getInt(); 290 llvm::APSInt MaxValue( 291 llvm::APInt::getOneBitSet(MaxAlignmentBits + 1, MaxAlignmentBits)); 292 if (AlignValue < 1) { 293 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_small) << 1; 294 return true; 295 } 296 if (llvm::APSInt::compareValues(AlignValue, MaxValue) > 0) { 297 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_big) 298 << toString(MaxValue, 10); 299 return true; 300 } 301 if (!AlignValue.isPowerOf2()) { 302 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_not_power_of_two); 303 return true; 304 } 305 if (AlignValue == 1) { 306 S.Diag(AlignOp->getExprLoc(), diag::warn_alignment_builtin_useless) 307 << IsBooleanAlignBuiltin; 308 } 309 } 310 311 ExprResult SrcArg = S.PerformCopyInitialization( 312 InitializedEntity::InitializeParameter(S.Context, SrcTy, false), 313 SourceLocation(), Source); 314 if (SrcArg.isInvalid()) 315 return true; 316 TheCall->setArg(0, SrcArg.get()); 317 ExprResult AlignArg = 318 S.PerformCopyInitialization(InitializedEntity::InitializeParameter( 319 S.Context, AlignOp->getType(), false), 320 SourceLocation(), AlignOp); 321 if (AlignArg.isInvalid()) 322 return true; 323 TheCall->setArg(1, AlignArg.get()); 324 // For align_up/align_down, the return type is the same as the (potentially 325 // decayed) argument type including qualifiers. For is_aligned(), the result 326 // is always bool. 327 TheCall->setType(IsBooleanAlignBuiltin ? S.Context.BoolTy : SrcTy); 328 return false; 329 } 330 331 static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall, 332 unsigned BuiltinID) { 333 if (checkArgCount(S, TheCall, 3)) 334 return true; 335 336 // First two arguments should be integers. 337 for (unsigned I = 0; I < 2; ++I) { 338 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(I)); 339 if (Arg.isInvalid()) return true; 340 TheCall->setArg(I, Arg.get()); 341 342 QualType Ty = Arg.get()->getType(); 343 if (!Ty->isIntegerType()) { 344 S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int) 345 << Ty << Arg.get()->getSourceRange(); 346 return true; 347 } 348 } 349 350 // Third argument should be a pointer to a non-const integer. 351 // IRGen correctly handles volatile, restrict, and address spaces, and 352 // the other qualifiers aren't possible. 353 { 354 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(2)); 355 if (Arg.isInvalid()) return true; 356 TheCall->setArg(2, Arg.get()); 357 358 QualType Ty = Arg.get()->getType(); 359 const auto *PtrTy = Ty->getAs<PointerType>(); 360 if (!PtrTy || 361 !PtrTy->getPointeeType()->isIntegerType() || 362 PtrTy->getPointeeType().isConstQualified()) { 363 S.Diag(Arg.get()->getBeginLoc(), 364 diag::err_overflow_builtin_must_be_ptr_int) 365 << Ty << Arg.get()->getSourceRange(); 366 return true; 367 } 368 } 369 370 // Disallow signed bit-precise integer args larger than 128 bits to mul 371 // function until we improve backend support. 372 if (BuiltinID == Builtin::BI__builtin_mul_overflow) { 373 for (unsigned I = 0; I < 3; ++I) { 374 const auto Arg = TheCall->getArg(I); 375 // Third argument will be a pointer. 376 auto Ty = I < 2 ? Arg->getType() : Arg->getType()->getPointeeType(); 377 if (Ty->isBitIntType() && Ty->isSignedIntegerType() && 378 S.getASTContext().getIntWidth(Ty) > 128) 379 return S.Diag(Arg->getBeginLoc(), 380 diag::err_overflow_builtin_bit_int_max_size) 381 << 128; 382 } 383 } 384 385 return false; 386 } 387 388 namespace { 389 struct BuiltinDumpStructGenerator { 390 Sema &S; 391 CallExpr *TheCall; 392 SourceLocation Loc = TheCall->getBeginLoc(); 393 SmallVector<Expr *, 32> Actions; 394 DiagnosticErrorTrap ErrorTracker; 395 PrintingPolicy Policy; 396 397 BuiltinDumpStructGenerator(Sema &S, CallExpr *TheCall) 398 : S(S), TheCall(TheCall), ErrorTracker(S.getDiagnostics()), 399 Policy(S.Context.getPrintingPolicy()) { 400 Policy.AnonymousTagLocations = false; 401 } 402 403 Expr *makeOpaqueValueExpr(Expr *Inner) { 404 auto *OVE = new (S.Context) 405 OpaqueValueExpr(Loc, Inner->getType(), Inner->getValueKind(), 406 Inner->getObjectKind(), Inner); 407 Actions.push_back(OVE); 408 return OVE; 409 } 410 411 Expr *getStringLiteral(llvm::StringRef Str) { 412 Expr *Lit = S.Context.getPredefinedStringLiteralFromCache(Str); 413 // Wrap the literal in parentheses to attach a source location. 414 return new (S.Context) ParenExpr(Loc, Loc, Lit); 415 } 416 417 bool callPrintFunction(llvm::StringRef Format, 418 llvm::ArrayRef<Expr *> Exprs = {}) { 419 SmallVector<Expr *, 8> Args; 420 assert(TheCall->getNumArgs() >= 2); 421 Args.reserve((TheCall->getNumArgs() - 2) + /*Format*/ 1 + Exprs.size()); 422 Args.assign(TheCall->arg_begin() + 2, TheCall->arg_end()); 423 Args.push_back(getStringLiteral(Format)); 424 Args.insert(Args.end(), Exprs.begin(), Exprs.end()); 425 426 // Register a note to explain why we're performing the call. 427 Sema::CodeSynthesisContext Ctx; 428 Ctx.Kind = Sema::CodeSynthesisContext::BuildingBuiltinDumpStructCall; 429 Ctx.PointOfInstantiation = Loc; 430 Ctx.CallArgs = Args.data(); 431 Ctx.NumCallArgs = Args.size(); 432 S.pushCodeSynthesisContext(Ctx); 433 434 ExprResult RealCall = 435 S.BuildCallExpr(/*Scope=*/nullptr, TheCall->getArg(1), 436 TheCall->getBeginLoc(), Args, TheCall->getRParenLoc()); 437 438 S.popCodeSynthesisContext(); 439 if (!RealCall.isInvalid()) 440 Actions.push_back(RealCall.get()); 441 // Bail out if we've hit any errors, even if we managed to build the 442 // call. We don't want to produce more than one error. 443 return RealCall.isInvalid() || ErrorTracker.hasErrorOccurred(); 444 } 445 446 Expr *getIndentString(unsigned Depth) { 447 if (!Depth) 448 return nullptr; 449 450 llvm::SmallString<32> Indent; 451 Indent.resize(Depth * Policy.Indentation, ' '); 452 return getStringLiteral(Indent); 453 } 454 455 Expr *getTypeString(QualType T) { 456 return getStringLiteral(T.getAsString(Policy)); 457 } 458 459 bool appendFormatSpecifier(QualType T, llvm::SmallVectorImpl<char> &Str) { 460 llvm::raw_svector_ostream OS(Str); 461 462 // Format 'bool', 'char', 'signed char', 'unsigned char' as numbers, rather 463 // than trying to print a single character. 464 if (auto *BT = T->getAs<BuiltinType>()) { 465 switch (BT->getKind()) { 466 case BuiltinType::Bool: 467 OS << "%d"; 468 return true; 469 case BuiltinType::Char_U: 470 case BuiltinType::UChar: 471 OS << "%hhu"; 472 return true; 473 case BuiltinType::Char_S: 474 case BuiltinType::SChar: 475 OS << "%hhd"; 476 return true; 477 default: 478 break; 479 } 480 } 481 482 analyze_printf::PrintfSpecifier Specifier; 483 if (Specifier.fixType(T, S.getLangOpts(), S.Context, /*IsObjCLiteral=*/false)) { 484 // We were able to guess how to format this. 485 if (Specifier.getConversionSpecifier().getKind() == 486 analyze_printf::PrintfConversionSpecifier::sArg) { 487 // Wrap double-quotes around a '%s' specifier and limit its maximum 488 // length. Ideally we'd also somehow escape special characters in the 489 // contents but printf doesn't support that. 490 // FIXME: '%s' formatting is not safe in general. 491 OS << '"'; 492 Specifier.setPrecision(analyze_printf::OptionalAmount(32u)); 493 Specifier.toString(OS); 494 OS << '"'; 495 // FIXME: It would be nice to include a '...' if the string doesn't fit 496 // in the length limit. 497 } else { 498 Specifier.toString(OS); 499 } 500 return true; 501 } 502 503 if (T->isPointerType()) { 504 // Format all pointers with '%p'. 505 OS << "%p"; 506 return true; 507 } 508 509 return false; 510 } 511 512 bool dumpUnnamedRecord(const RecordDecl *RD, Expr *E, unsigned Depth) { 513 Expr *IndentLit = getIndentString(Depth); 514 Expr *TypeLit = getTypeString(S.Context.getRecordType(RD)); 515 if (IndentLit ? callPrintFunction("%s%s", {IndentLit, TypeLit}) 516 : callPrintFunction("%s", {TypeLit})) 517 return true; 518 519 return dumpRecordValue(RD, E, IndentLit, Depth); 520 } 521 522 // Dump a record value. E should be a pointer or lvalue referring to an RD. 523 bool dumpRecordValue(const RecordDecl *RD, Expr *E, Expr *RecordIndent, 524 unsigned Depth) { 525 // FIXME: Decide what to do if RD is a union. At least we should probably 526 // turn off printing `const char*` members with `%s`, because that is very 527 // likely to crash if that's not the active member. Whatever we decide, we 528 // should document it. 529 530 // Build an OpaqueValueExpr so we can refer to E more than once without 531 // triggering re-evaluation. 532 Expr *RecordArg = makeOpaqueValueExpr(E); 533 bool RecordArgIsPtr = RecordArg->getType()->isPointerType(); 534 535 if (callPrintFunction(" {\n")) 536 return true; 537 538 // Dump each base class, regardless of whether they're aggregates. 539 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 540 for (const auto &Base : CXXRD->bases()) { 541 QualType BaseType = 542 RecordArgIsPtr ? S.Context.getPointerType(Base.getType()) 543 : S.Context.getLValueReferenceType(Base.getType()); 544 ExprResult BasePtr = S.BuildCStyleCastExpr( 545 Loc, S.Context.getTrivialTypeSourceInfo(BaseType, Loc), Loc, 546 RecordArg); 547 if (BasePtr.isInvalid() || 548 dumpUnnamedRecord(Base.getType()->getAsRecordDecl(), BasePtr.get(), 549 Depth + 1)) 550 return true; 551 } 552 } 553 554 Expr *FieldIndentArg = getIndentString(Depth + 1); 555 556 // Dump each field. 557 for (auto *D : RD->decls()) { 558 auto *IFD = dyn_cast<IndirectFieldDecl>(D); 559 auto *FD = IFD ? IFD->getAnonField() : dyn_cast<FieldDecl>(D); 560 if (!FD || FD->isUnnamedBitfield() || FD->isAnonymousStructOrUnion()) 561 continue; 562 563 llvm::SmallString<20> Format = llvm::StringRef("%s%s %s "); 564 llvm::SmallVector<Expr *, 5> Args = {FieldIndentArg, 565 getTypeString(FD->getType()), 566 getStringLiteral(FD->getName())}; 567 568 if (FD->isBitField()) { 569 Format += ": %zu "; 570 QualType SizeT = S.Context.getSizeType(); 571 llvm::APInt BitWidth(S.Context.getIntWidth(SizeT), 572 FD->getBitWidthValue(S.Context)); 573 Args.push_back(IntegerLiteral::Create(S.Context, BitWidth, SizeT, Loc)); 574 } 575 576 Format += "="; 577 578 ExprResult Field = 579 IFD ? S.BuildAnonymousStructUnionMemberReference( 580 CXXScopeSpec(), Loc, IFD, 581 DeclAccessPair::make(IFD, AS_public), RecordArg, Loc) 582 : S.BuildFieldReferenceExpr( 583 RecordArg, RecordArgIsPtr, Loc, CXXScopeSpec(), FD, 584 DeclAccessPair::make(FD, AS_public), 585 DeclarationNameInfo(FD->getDeclName(), Loc)); 586 if (Field.isInvalid()) 587 return true; 588 589 auto *InnerRD = FD->getType()->getAsRecordDecl(); 590 auto *InnerCXXRD = dyn_cast_or_null<CXXRecordDecl>(InnerRD); 591 if (InnerRD && (!InnerCXXRD || InnerCXXRD->isAggregate())) { 592 // Recursively print the values of members of aggregate record type. 593 if (callPrintFunction(Format, Args) || 594 dumpRecordValue(InnerRD, Field.get(), FieldIndentArg, Depth + 1)) 595 return true; 596 } else { 597 Format += " "; 598 if (appendFormatSpecifier(FD->getType(), Format)) { 599 // We know how to print this field. 600 Args.push_back(Field.get()); 601 } else { 602 // We don't know how to print this field. Print out its address 603 // with a format specifier that a smart tool will be able to 604 // recognize and treat specially. 605 Format += "*%p"; 606 ExprResult FieldAddr = 607 S.BuildUnaryOp(nullptr, Loc, UO_AddrOf, Field.get()); 608 if (FieldAddr.isInvalid()) 609 return true; 610 Args.push_back(FieldAddr.get()); 611 } 612 Format += "\n"; 613 if (callPrintFunction(Format, Args)) 614 return true; 615 } 616 } 617 618 return RecordIndent ? callPrintFunction("%s}\n", RecordIndent) 619 : callPrintFunction("}\n"); 620 } 621 622 Expr *buildWrapper() { 623 auto *Wrapper = PseudoObjectExpr::Create(S.Context, TheCall, Actions, 624 PseudoObjectExpr::NoResult); 625 TheCall->setType(Wrapper->getType()); 626 TheCall->setValueKind(Wrapper->getValueKind()); 627 return Wrapper; 628 } 629 }; 630 } // namespace 631 632 static ExprResult SemaBuiltinDumpStruct(Sema &S, CallExpr *TheCall) { 633 if (checkArgCountAtLeast(S, TheCall, 2)) 634 return ExprError(); 635 636 ExprResult PtrArgResult = S.DefaultLvalueConversion(TheCall->getArg(0)); 637 if (PtrArgResult.isInvalid()) 638 return ExprError(); 639 TheCall->setArg(0, PtrArgResult.get()); 640 641 // First argument should be a pointer to a struct. 642 QualType PtrArgType = PtrArgResult.get()->getType(); 643 if (!PtrArgType->isPointerType() || 644 !PtrArgType->getPointeeType()->isRecordType()) { 645 S.Diag(PtrArgResult.get()->getBeginLoc(), 646 diag::err_expected_struct_pointer_argument) 647 << 1 << TheCall->getDirectCallee() << PtrArgType; 648 return ExprError(); 649 } 650 const RecordDecl *RD = PtrArgType->getPointeeType()->getAsRecordDecl(); 651 652 // Second argument is a callable, but we can't fully validate it until we try 653 // calling it. 654 QualType FnArgType = TheCall->getArg(1)->getType(); 655 if (!FnArgType->isFunctionType() && !FnArgType->isFunctionPointerType() && 656 !FnArgType->isBlockPointerType() && 657 !(S.getLangOpts().CPlusPlus && FnArgType->isRecordType())) { 658 auto *BT = FnArgType->getAs<BuiltinType>(); 659 switch (BT ? BT->getKind() : BuiltinType::Void) { 660 case BuiltinType::Dependent: 661 case BuiltinType::Overload: 662 case BuiltinType::BoundMember: 663 case BuiltinType::PseudoObject: 664 case BuiltinType::UnknownAny: 665 case BuiltinType::BuiltinFn: 666 // This might be a callable. 667 break; 668 669 default: 670 S.Diag(TheCall->getArg(1)->getBeginLoc(), 671 diag::err_expected_callable_argument) 672 << 2 << TheCall->getDirectCallee() << FnArgType; 673 return ExprError(); 674 } 675 } 676 677 BuiltinDumpStructGenerator Generator(S, TheCall); 678 679 // Wrap parentheses around the given pointer. This is not necessary for 680 // correct code generation, but it means that when we pretty-print the call 681 // arguments in our diagnostics we will produce '(&s)->n' instead of the 682 // incorrect '&s->n'. 683 Expr *PtrArg = PtrArgResult.get(); 684 PtrArg = new (S.Context) 685 ParenExpr(PtrArg->getBeginLoc(), 686 S.getLocForEndOfToken(PtrArg->getEndLoc()), PtrArg); 687 if (Generator.dumpUnnamedRecord(RD, PtrArg, 0)) 688 return ExprError(); 689 690 return Generator.buildWrapper(); 691 } 692 693 static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) { 694 if (checkArgCount(S, BuiltinCall, 2)) 695 return true; 696 697 SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc(); 698 Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts(); 699 Expr *Call = BuiltinCall->getArg(0); 700 Expr *Chain = BuiltinCall->getArg(1); 701 702 if (Call->getStmtClass() != Stmt::CallExprClass) { 703 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call) 704 << Call->getSourceRange(); 705 return true; 706 } 707 708 auto CE = cast<CallExpr>(Call); 709 if (CE->getCallee()->getType()->isBlockPointerType()) { 710 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call) 711 << Call->getSourceRange(); 712 return true; 713 } 714 715 const Decl *TargetDecl = CE->getCalleeDecl(); 716 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) 717 if (FD->getBuiltinID()) { 718 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call) 719 << Call->getSourceRange(); 720 return true; 721 } 722 723 if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) { 724 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call) 725 << Call->getSourceRange(); 726 return true; 727 } 728 729 ExprResult ChainResult = S.UsualUnaryConversions(Chain); 730 if (ChainResult.isInvalid()) 731 return true; 732 if (!ChainResult.get()->getType()->isPointerType()) { 733 S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer) 734 << Chain->getSourceRange(); 735 return true; 736 } 737 738 QualType ReturnTy = CE->getCallReturnType(S.Context); 739 QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() }; 740 QualType BuiltinTy = S.Context.getFunctionType( 741 ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo()); 742 QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy); 743 744 Builtin = 745 S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get(); 746 747 BuiltinCall->setType(CE->getType()); 748 BuiltinCall->setValueKind(CE->getValueKind()); 749 BuiltinCall->setObjectKind(CE->getObjectKind()); 750 BuiltinCall->setCallee(Builtin); 751 BuiltinCall->setArg(1, ChainResult.get()); 752 753 return false; 754 } 755 756 namespace { 757 758 class ScanfDiagnosticFormatHandler 759 : public analyze_format_string::FormatStringHandler { 760 // Accepts the argument index (relative to the first destination index) of the 761 // argument whose size we want. 762 using ComputeSizeFunction = 763 llvm::function_ref<Optional<llvm::APSInt>(unsigned)>; 764 765 // Accepts the argument index (relative to the first destination index), the 766 // destination size, and the source size). 767 using DiagnoseFunction = 768 llvm::function_ref<void(unsigned, unsigned, unsigned)>; 769 770 ComputeSizeFunction ComputeSizeArgument; 771 DiagnoseFunction Diagnose; 772 773 public: 774 ScanfDiagnosticFormatHandler(ComputeSizeFunction ComputeSizeArgument, 775 DiagnoseFunction Diagnose) 776 : ComputeSizeArgument(ComputeSizeArgument), Diagnose(Diagnose) {} 777 778 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 779 const char *StartSpecifier, 780 unsigned specifierLen) override { 781 if (!FS.consumesDataArgument()) 782 return true; 783 784 unsigned NulByte = 0; 785 switch ((FS.getConversionSpecifier().getKind())) { 786 default: 787 return true; 788 case analyze_format_string::ConversionSpecifier::sArg: 789 case analyze_format_string::ConversionSpecifier::ScanListArg: 790 NulByte = 1; 791 break; 792 case analyze_format_string::ConversionSpecifier::cArg: 793 break; 794 } 795 796 analyze_format_string::OptionalAmount FW = FS.getFieldWidth(); 797 if (FW.getHowSpecified() != 798 analyze_format_string::OptionalAmount::HowSpecified::Constant) 799 return true; 800 801 unsigned SourceSize = FW.getConstantAmount() + NulByte; 802 803 Optional<llvm::APSInt> DestSizeAPS = ComputeSizeArgument(FS.getArgIndex()); 804 if (!DestSizeAPS) 805 return true; 806 807 unsigned DestSize = DestSizeAPS->getZExtValue(); 808 809 if (DestSize < SourceSize) 810 Diagnose(FS.getArgIndex(), DestSize, SourceSize); 811 812 return true; 813 } 814 }; 815 816 class EstimateSizeFormatHandler 817 : public analyze_format_string::FormatStringHandler { 818 size_t Size; 819 820 public: 821 EstimateSizeFormatHandler(StringRef Format) 822 : Size(std::min(Format.find(0), Format.size()) + 823 1 /* null byte always written by sprintf */) {} 824 825 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 826 const char *, unsigned SpecifierLen, 827 const TargetInfo &) override { 828 829 const size_t FieldWidth = computeFieldWidth(FS); 830 const size_t Precision = computePrecision(FS); 831 832 // The actual format. 833 switch (FS.getConversionSpecifier().getKind()) { 834 // Just a char. 835 case analyze_format_string::ConversionSpecifier::cArg: 836 case analyze_format_string::ConversionSpecifier::CArg: 837 Size += std::max(FieldWidth, (size_t)1); 838 break; 839 // Just an integer. 840 case analyze_format_string::ConversionSpecifier::dArg: 841 case analyze_format_string::ConversionSpecifier::DArg: 842 case analyze_format_string::ConversionSpecifier::iArg: 843 case analyze_format_string::ConversionSpecifier::oArg: 844 case analyze_format_string::ConversionSpecifier::OArg: 845 case analyze_format_string::ConversionSpecifier::uArg: 846 case analyze_format_string::ConversionSpecifier::UArg: 847 case analyze_format_string::ConversionSpecifier::xArg: 848 case analyze_format_string::ConversionSpecifier::XArg: 849 Size += std::max(FieldWidth, Precision); 850 break; 851 852 // %g style conversion switches between %f or %e style dynamically. 853 // %f always takes less space, so default to it. 854 case analyze_format_string::ConversionSpecifier::gArg: 855 case analyze_format_string::ConversionSpecifier::GArg: 856 857 // Floating point number in the form '[+]ddd.ddd'. 858 case analyze_format_string::ConversionSpecifier::fArg: 859 case analyze_format_string::ConversionSpecifier::FArg: 860 Size += std::max(FieldWidth, 1 /* integer part */ + 861 (Precision ? 1 + Precision 862 : 0) /* period + decimal */); 863 break; 864 865 // Floating point number in the form '[-]d.ddde[+-]dd'. 866 case analyze_format_string::ConversionSpecifier::eArg: 867 case analyze_format_string::ConversionSpecifier::EArg: 868 Size += 869 std::max(FieldWidth, 870 1 /* integer part */ + 871 (Precision ? 1 + Precision : 0) /* period + decimal */ + 872 1 /* e or E letter */ + 2 /* exponent */); 873 break; 874 875 // Floating point number in the form '[-]0xh.hhhhp±dd'. 876 case analyze_format_string::ConversionSpecifier::aArg: 877 case analyze_format_string::ConversionSpecifier::AArg: 878 Size += 879 std::max(FieldWidth, 880 2 /* 0x */ + 1 /* integer part */ + 881 (Precision ? 1 + Precision : 0) /* period + decimal */ + 882 1 /* p or P letter */ + 1 /* + or - */ + 1 /* value */); 883 break; 884 885 // Just a string. 886 case analyze_format_string::ConversionSpecifier::sArg: 887 case analyze_format_string::ConversionSpecifier::SArg: 888 Size += FieldWidth; 889 break; 890 891 // Just a pointer in the form '0xddd'. 892 case analyze_format_string::ConversionSpecifier::pArg: 893 Size += std::max(FieldWidth, 2 /* leading 0x */ + Precision); 894 break; 895 896 // A plain percent. 897 case analyze_format_string::ConversionSpecifier::PercentArg: 898 Size += 1; 899 break; 900 901 default: 902 break; 903 } 904 905 Size += FS.hasPlusPrefix() || FS.hasSpacePrefix(); 906 907 if (FS.hasAlternativeForm()) { 908 switch (FS.getConversionSpecifier().getKind()) { 909 default: 910 break; 911 // Force a leading '0'. 912 case analyze_format_string::ConversionSpecifier::oArg: 913 Size += 1; 914 break; 915 // Force a leading '0x'. 916 case analyze_format_string::ConversionSpecifier::xArg: 917 case analyze_format_string::ConversionSpecifier::XArg: 918 Size += 2; 919 break; 920 // Force a period '.' before decimal, even if precision is 0. 921 case analyze_format_string::ConversionSpecifier::aArg: 922 case analyze_format_string::ConversionSpecifier::AArg: 923 case analyze_format_string::ConversionSpecifier::eArg: 924 case analyze_format_string::ConversionSpecifier::EArg: 925 case analyze_format_string::ConversionSpecifier::fArg: 926 case analyze_format_string::ConversionSpecifier::FArg: 927 case analyze_format_string::ConversionSpecifier::gArg: 928 case analyze_format_string::ConversionSpecifier::GArg: 929 Size += (Precision ? 0 : 1); 930 break; 931 } 932 } 933 assert(SpecifierLen <= Size && "no underflow"); 934 Size -= SpecifierLen; 935 return true; 936 } 937 938 size_t getSizeLowerBound() const { return Size; } 939 940 private: 941 static size_t computeFieldWidth(const analyze_printf::PrintfSpecifier &FS) { 942 const analyze_format_string::OptionalAmount &FW = FS.getFieldWidth(); 943 size_t FieldWidth = 0; 944 if (FW.getHowSpecified() == analyze_format_string::OptionalAmount::Constant) 945 FieldWidth = FW.getConstantAmount(); 946 return FieldWidth; 947 } 948 949 static size_t computePrecision(const analyze_printf::PrintfSpecifier &FS) { 950 const analyze_format_string::OptionalAmount &FW = FS.getPrecision(); 951 size_t Precision = 0; 952 953 // See man 3 printf for default precision value based on the specifier. 954 switch (FW.getHowSpecified()) { 955 case analyze_format_string::OptionalAmount::NotSpecified: 956 switch (FS.getConversionSpecifier().getKind()) { 957 default: 958 break; 959 case analyze_format_string::ConversionSpecifier::dArg: // %d 960 case analyze_format_string::ConversionSpecifier::DArg: // %D 961 case analyze_format_string::ConversionSpecifier::iArg: // %i 962 Precision = 1; 963 break; 964 case analyze_format_string::ConversionSpecifier::oArg: // %d 965 case analyze_format_string::ConversionSpecifier::OArg: // %D 966 case analyze_format_string::ConversionSpecifier::uArg: // %d 967 case analyze_format_string::ConversionSpecifier::UArg: // %D 968 case analyze_format_string::ConversionSpecifier::xArg: // %d 969 case analyze_format_string::ConversionSpecifier::XArg: // %D 970 Precision = 1; 971 break; 972 case analyze_format_string::ConversionSpecifier::fArg: // %f 973 case analyze_format_string::ConversionSpecifier::FArg: // %F 974 case analyze_format_string::ConversionSpecifier::eArg: // %e 975 case analyze_format_string::ConversionSpecifier::EArg: // %E 976 case analyze_format_string::ConversionSpecifier::gArg: // %g 977 case analyze_format_string::ConversionSpecifier::GArg: // %G 978 Precision = 6; 979 break; 980 case analyze_format_string::ConversionSpecifier::pArg: // %d 981 Precision = 1; 982 break; 983 } 984 break; 985 case analyze_format_string::OptionalAmount::Constant: 986 Precision = FW.getConstantAmount(); 987 break; 988 default: 989 break; 990 } 991 return Precision; 992 } 993 }; 994 995 } // namespace 996 997 void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, 998 CallExpr *TheCall) { 999 if (TheCall->isValueDependent() || TheCall->isTypeDependent() || 1000 isConstantEvaluated()) 1001 return; 1002 1003 bool UseDABAttr = false; 1004 const FunctionDecl *UseDecl = FD; 1005 1006 const auto *DABAttr = FD->getAttr<DiagnoseAsBuiltinAttr>(); 1007 if (DABAttr) { 1008 UseDecl = DABAttr->getFunction(); 1009 assert(UseDecl && "Missing FunctionDecl in DiagnoseAsBuiltin attribute!"); 1010 UseDABAttr = true; 1011 } 1012 1013 unsigned BuiltinID = UseDecl->getBuiltinID(/*ConsiderWrappers=*/true); 1014 1015 if (!BuiltinID) 1016 return; 1017 1018 const TargetInfo &TI = getASTContext().getTargetInfo(); 1019 unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType()); 1020 1021 auto TranslateIndex = [&](unsigned Index) -> Optional<unsigned> { 1022 // If we refer to a diagnose_as_builtin attribute, we need to change the 1023 // argument index to refer to the arguments of the called function. Unless 1024 // the index is out of bounds, which presumably means it's a variadic 1025 // function. 1026 if (!UseDABAttr) 1027 return Index; 1028 unsigned DABIndices = DABAttr->argIndices_size(); 1029 unsigned NewIndex = Index < DABIndices 1030 ? DABAttr->argIndices_begin()[Index] 1031 : Index - DABIndices + FD->getNumParams(); 1032 if (NewIndex >= TheCall->getNumArgs()) 1033 return llvm::None; 1034 return NewIndex; 1035 }; 1036 1037 auto ComputeExplicitObjectSizeArgument = 1038 [&](unsigned Index) -> Optional<llvm::APSInt> { 1039 Optional<unsigned> IndexOptional = TranslateIndex(Index); 1040 if (!IndexOptional) 1041 return llvm::None; 1042 unsigned NewIndex = *IndexOptional; 1043 Expr::EvalResult Result; 1044 Expr *SizeArg = TheCall->getArg(NewIndex); 1045 if (!SizeArg->EvaluateAsInt(Result, getASTContext())) 1046 return llvm::None; 1047 llvm::APSInt Integer = Result.Val.getInt(); 1048 Integer.setIsUnsigned(true); 1049 return Integer; 1050 }; 1051 1052 auto ComputeSizeArgument = [&](unsigned Index) -> Optional<llvm::APSInt> { 1053 // If the parameter has a pass_object_size attribute, then we should use its 1054 // (potentially) more strict checking mode. Otherwise, conservatively assume 1055 // type 0. 1056 int BOSType = 0; 1057 // This check can fail for variadic functions. 1058 if (Index < FD->getNumParams()) { 1059 if (const auto *POS = 1060 FD->getParamDecl(Index)->getAttr<PassObjectSizeAttr>()) 1061 BOSType = POS->getType(); 1062 } 1063 1064 Optional<unsigned> IndexOptional = TranslateIndex(Index); 1065 if (!IndexOptional) 1066 return llvm::None; 1067 unsigned NewIndex = *IndexOptional; 1068 1069 const Expr *ObjArg = TheCall->getArg(NewIndex); 1070 uint64_t Result; 1071 if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType)) 1072 return llvm::None; 1073 1074 // Get the object size in the target's size_t width. 1075 return llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth); 1076 }; 1077 1078 auto ComputeStrLenArgument = [&](unsigned Index) -> Optional<llvm::APSInt> { 1079 Optional<unsigned> IndexOptional = TranslateIndex(Index); 1080 if (!IndexOptional) 1081 return llvm::None; 1082 unsigned NewIndex = *IndexOptional; 1083 1084 const Expr *ObjArg = TheCall->getArg(NewIndex); 1085 uint64_t Result; 1086 if (!ObjArg->tryEvaluateStrLen(Result, getASTContext())) 1087 return llvm::None; 1088 // Add 1 for null byte. 1089 return llvm::APSInt::getUnsigned(Result + 1).extOrTrunc(SizeTypeWidth); 1090 }; 1091 1092 Optional<llvm::APSInt> SourceSize; 1093 Optional<llvm::APSInt> DestinationSize; 1094 unsigned DiagID = 0; 1095 bool IsChkVariant = false; 1096 1097 auto GetFunctionName = [&]() { 1098 StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID); 1099 // Skim off the details of whichever builtin was called to produce a better 1100 // diagnostic, as it's unlikely that the user wrote the __builtin 1101 // explicitly. 1102 if (IsChkVariant) { 1103 FunctionName = FunctionName.drop_front(std::strlen("__builtin___")); 1104 FunctionName = FunctionName.drop_back(std::strlen("_chk")); 1105 } else if (FunctionName.startswith("__builtin_")) { 1106 FunctionName = FunctionName.drop_front(std::strlen("__builtin_")); 1107 } 1108 return FunctionName; 1109 }; 1110 1111 switch (BuiltinID) { 1112 default: 1113 return; 1114 case Builtin::BI__builtin_strcpy: 1115 case Builtin::BIstrcpy: { 1116 DiagID = diag::warn_fortify_strlen_overflow; 1117 SourceSize = ComputeStrLenArgument(1); 1118 DestinationSize = ComputeSizeArgument(0); 1119 break; 1120 } 1121 1122 case Builtin::BI__builtin___strcpy_chk: { 1123 DiagID = diag::warn_fortify_strlen_overflow; 1124 SourceSize = ComputeStrLenArgument(1); 1125 DestinationSize = ComputeExplicitObjectSizeArgument(2); 1126 IsChkVariant = true; 1127 break; 1128 } 1129 1130 case Builtin::BIscanf: 1131 case Builtin::BIfscanf: 1132 case Builtin::BIsscanf: { 1133 unsigned FormatIndex = 1; 1134 unsigned DataIndex = 2; 1135 if (BuiltinID == Builtin::BIscanf) { 1136 FormatIndex = 0; 1137 DataIndex = 1; 1138 } 1139 1140 const auto *FormatExpr = 1141 TheCall->getArg(FormatIndex)->IgnoreParenImpCasts(); 1142 1143 const auto *Format = dyn_cast<StringLiteral>(FormatExpr); 1144 if (!Format) 1145 return; 1146 1147 if (!Format->isOrdinary() && !Format->isUTF8()) 1148 return; 1149 1150 auto Diagnose = [&](unsigned ArgIndex, unsigned DestSize, 1151 unsigned SourceSize) { 1152 DiagID = diag::warn_fortify_scanf_overflow; 1153 unsigned Index = ArgIndex + DataIndex; 1154 StringRef FunctionName = GetFunctionName(); 1155 DiagRuntimeBehavior(TheCall->getArg(Index)->getBeginLoc(), TheCall, 1156 PDiag(DiagID) << FunctionName << (Index + 1) 1157 << DestSize << SourceSize); 1158 }; 1159 1160 StringRef FormatStrRef = Format->getString(); 1161 auto ShiftedComputeSizeArgument = [&](unsigned Index) { 1162 return ComputeSizeArgument(Index + DataIndex); 1163 }; 1164 ScanfDiagnosticFormatHandler H(ShiftedComputeSizeArgument, Diagnose); 1165 const char *FormatBytes = FormatStrRef.data(); 1166 const ConstantArrayType *T = 1167 Context.getAsConstantArrayType(Format->getType()); 1168 assert(T && "String literal not of constant array type!"); 1169 size_t TypeSize = T->getSize().getZExtValue(); 1170 1171 // In case there's a null byte somewhere. 1172 size_t StrLen = 1173 std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0)); 1174 1175 analyze_format_string::ParseScanfString(H, FormatBytes, 1176 FormatBytes + StrLen, getLangOpts(), 1177 Context.getTargetInfo()); 1178 1179 // Unlike the other cases, in this one we have already issued the diagnostic 1180 // here, so no need to continue (because unlike the other cases, here the 1181 // diagnostic refers to the argument number). 1182 return; 1183 } 1184 1185 case Builtin::BIsprintf: 1186 case Builtin::BI__builtin___sprintf_chk: { 1187 size_t FormatIndex = BuiltinID == Builtin::BIsprintf ? 1 : 3; 1188 auto *FormatExpr = TheCall->getArg(FormatIndex)->IgnoreParenImpCasts(); 1189 1190 if (auto *Format = dyn_cast<StringLiteral>(FormatExpr)) { 1191 1192 if (!Format->isOrdinary() && !Format->isUTF8()) 1193 return; 1194 1195 StringRef FormatStrRef = Format->getString(); 1196 EstimateSizeFormatHandler H(FormatStrRef); 1197 const char *FormatBytes = FormatStrRef.data(); 1198 const ConstantArrayType *T = 1199 Context.getAsConstantArrayType(Format->getType()); 1200 assert(T && "String literal not of constant array type!"); 1201 size_t TypeSize = T->getSize().getZExtValue(); 1202 1203 // In case there's a null byte somewhere. 1204 size_t StrLen = 1205 std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0)); 1206 if (!analyze_format_string::ParsePrintfString( 1207 H, FormatBytes, FormatBytes + StrLen, getLangOpts(), 1208 Context.getTargetInfo(), false)) { 1209 DiagID = diag::warn_fortify_source_format_overflow; 1210 SourceSize = llvm::APSInt::getUnsigned(H.getSizeLowerBound()) 1211 .extOrTrunc(SizeTypeWidth); 1212 if (BuiltinID == Builtin::BI__builtin___sprintf_chk) { 1213 DestinationSize = ComputeExplicitObjectSizeArgument(2); 1214 IsChkVariant = true; 1215 } else { 1216 DestinationSize = ComputeSizeArgument(0); 1217 } 1218 break; 1219 } 1220 } 1221 return; 1222 } 1223 case Builtin::BI__builtin___memcpy_chk: 1224 case Builtin::BI__builtin___memmove_chk: 1225 case Builtin::BI__builtin___memset_chk: 1226 case Builtin::BI__builtin___strlcat_chk: 1227 case Builtin::BI__builtin___strlcpy_chk: 1228 case Builtin::BI__builtin___strncat_chk: 1229 case Builtin::BI__builtin___strncpy_chk: 1230 case Builtin::BI__builtin___stpncpy_chk: 1231 case Builtin::BI__builtin___memccpy_chk: 1232 case Builtin::BI__builtin___mempcpy_chk: { 1233 DiagID = diag::warn_builtin_chk_overflow; 1234 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 2); 1235 DestinationSize = 1236 ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 1237 IsChkVariant = true; 1238 break; 1239 } 1240 1241 case Builtin::BI__builtin___snprintf_chk: 1242 case Builtin::BI__builtin___vsnprintf_chk: { 1243 DiagID = diag::warn_builtin_chk_overflow; 1244 SourceSize = ComputeExplicitObjectSizeArgument(1); 1245 DestinationSize = ComputeExplicitObjectSizeArgument(3); 1246 IsChkVariant = true; 1247 break; 1248 } 1249 1250 case Builtin::BIstrncat: 1251 case Builtin::BI__builtin_strncat: 1252 case Builtin::BIstrncpy: 1253 case Builtin::BI__builtin_strncpy: 1254 case Builtin::BIstpncpy: 1255 case Builtin::BI__builtin_stpncpy: { 1256 // Whether these functions overflow depends on the runtime strlen of the 1257 // string, not just the buffer size, so emitting the "always overflow" 1258 // diagnostic isn't quite right. We should still diagnose passing a buffer 1259 // size larger than the destination buffer though; this is a runtime abort 1260 // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise. 1261 DiagID = diag::warn_fortify_source_size_mismatch; 1262 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 1263 DestinationSize = ComputeSizeArgument(0); 1264 break; 1265 } 1266 1267 case Builtin::BImemcpy: 1268 case Builtin::BI__builtin_memcpy: 1269 case Builtin::BImemmove: 1270 case Builtin::BI__builtin_memmove: 1271 case Builtin::BImemset: 1272 case Builtin::BI__builtin_memset: 1273 case Builtin::BImempcpy: 1274 case Builtin::BI__builtin_mempcpy: { 1275 DiagID = diag::warn_fortify_source_overflow; 1276 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 1277 DestinationSize = ComputeSizeArgument(0); 1278 break; 1279 } 1280 case Builtin::BIsnprintf: 1281 case Builtin::BI__builtin_snprintf: 1282 case Builtin::BIvsnprintf: 1283 case Builtin::BI__builtin_vsnprintf: { 1284 DiagID = diag::warn_fortify_source_size_mismatch; 1285 SourceSize = ComputeExplicitObjectSizeArgument(1); 1286 DestinationSize = ComputeSizeArgument(0); 1287 break; 1288 } 1289 } 1290 1291 if (!SourceSize || !DestinationSize || 1292 llvm::APSInt::compareValues(*SourceSize, *DestinationSize) <= 0) 1293 return; 1294 1295 StringRef FunctionName = GetFunctionName(); 1296 1297 SmallString<16> DestinationStr; 1298 SmallString<16> SourceStr; 1299 DestinationSize->toString(DestinationStr, /*Radix=*/10); 1300 SourceSize->toString(SourceStr, /*Radix=*/10); 1301 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 1302 PDiag(DiagID) 1303 << FunctionName << DestinationStr << SourceStr); 1304 } 1305 1306 static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall, 1307 Scope::ScopeFlags NeededScopeFlags, 1308 unsigned DiagID) { 1309 // Scopes aren't available during instantiation. Fortunately, builtin 1310 // functions cannot be template args so they cannot be formed through template 1311 // instantiation. Therefore checking once during the parse is sufficient. 1312 if (SemaRef.inTemplateInstantiation()) 1313 return false; 1314 1315 Scope *S = SemaRef.getCurScope(); 1316 while (S && !S->isSEHExceptScope()) 1317 S = S->getParent(); 1318 if (!S || !(S->getFlags() & NeededScopeFlags)) { 1319 auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 1320 SemaRef.Diag(TheCall->getExprLoc(), DiagID) 1321 << DRE->getDecl()->getIdentifier(); 1322 return true; 1323 } 1324 1325 return false; 1326 } 1327 1328 static inline bool isBlockPointer(Expr *Arg) { 1329 return Arg->getType()->isBlockPointerType(); 1330 } 1331 1332 /// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local 1333 /// void*, which is a requirement of device side enqueue. 1334 static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) { 1335 const BlockPointerType *BPT = 1336 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 1337 ArrayRef<QualType> Params = 1338 BPT->getPointeeType()->castAs<FunctionProtoType>()->getParamTypes(); 1339 unsigned ArgCounter = 0; 1340 bool IllegalParams = false; 1341 // Iterate through the block parameters until either one is found that is not 1342 // a local void*, or the block is valid. 1343 for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end(); 1344 I != E; ++I, ++ArgCounter) { 1345 if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() || 1346 (*I)->getPointeeType().getQualifiers().getAddressSpace() != 1347 LangAS::opencl_local) { 1348 // Get the location of the error. If a block literal has been passed 1349 // (BlockExpr) then we can point straight to the offending argument, 1350 // else we just point to the variable reference. 1351 SourceLocation ErrorLoc; 1352 if (isa<BlockExpr>(BlockArg)) { 1353 BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl(); 1354 ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc(); 1355 } else if (isa<DeclRefExpr>(BlockArg)) { 1356 ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc(); 1357 } 1358 S.Diag(ErrorLoc, 1359 diag::err_opencl_enqueue_kernel_blocks_non_local_void_args); 1360 IllegalParams = true; 1361 } 1362 } 1363 1364 return IllegalParams; 1365 } 1366 1367 static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) { 1368 // OpenCL device can support extension but not the feature as extension 1369 // requires subgroup independent forward progress, but subgroup independent 1370 // forward progress is optional in OpenCL C 3.0 __opencl_c_subgroups feature. 1371 if (!S.getOpenCLOptions().isSupported("cl_khr_subgroups", S.getLangOpts()) && 1372 !S.getOpenCLOptions().isSupported("__opencl_c_subgroups", 1373 S.getLangOpts())) { 1374 S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension) 1375 << 1 << Call->getDirectCallee() 1376 << "cl_khr_subgroups or __opencl_c_subgroups"; 1377 return true; 1378 } 1379 return false; 1380 } 1381 1382 static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) { 1383 if (checkArgCount(S, TheCall, 2)) 1384 return true; 1385 1386 if (checkOpenCLSubgroupExt(S, TheCall)) 1387 return true; 1388 1389 // First argument is an ndrange_t type. 1390 Expr *NDRangeArg = TheCall->getArg(0); 1391 if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 1392 S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1393 << TheCall->getDirectCallee() << "'ndrange_t'"; 1394 return true; 1395 } 1396 1397 Expr *BlockArg = TheCall->getArg(1); 1398 if (!isBlockPointer(BlockArg)) { 1399 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1400 << TheCall->getDirectCallee() << "block"; 1401 return true; 1402 } 1403 return checkOpenCLBlockArgs(S, BlockArg); 1404 } 1405 1406 /// OpenCL C v2.0, s6.13.17.6 - Check the argument to the 1407 /// get_kernel_work_group_size 1408 /// and get_kernel_preferred_work_group_size_multiple builtin functions. 1409 static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) { 1410 if (checkArgCount(S, TheCall, 1)) 1411 return true; 1412 1413 Expr *BlockArg = TheCall->getArg(0); 1414 if (!isBlockPointer(BlockArg)) { 1415 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1416 << TheCall->getDirectCallee() << "block"; 1417 return true; 1418 } 1419 return checkOpenCLBlockArgs(S, BlockArg); 1420 } 1421 1422 /// Diagnose integer type and any valid implicit conversion to it. 1423 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, 1424 const QualType &IntType); 1425 1426 static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall, 1427 unsigned Start, unsigned End) { 1428 bool IllegalParams = false; 1429 for (unsigned I = Start; I <= End; ++I) 1430 IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I), 1431 S.Context.getSizeType()); 1432 return IllegalParams; 1433 } 1434 1435 /// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all 1436 /// 'local void*' parameter of passed block. 1437 static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall, 1438 Expr *BlockArg, 1439 unsigned NumNonVarArgs) { 1440 const BlockPointerType *BPT = 1441 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 1442 unsigned NumBlockParams = 1443 BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams(); 1444 unsigned TotalNumArgs = TheCall->getNumArgs(); 1445 1446 // For each argument passed to the block, a corresponding uint needs to 1447 // be passed to describe the size of the local memory. 1448 if (TotalNumArgs != NumBlockParams + NumNonVarArgs) { 1449 S.Diag(TheCall->getBeginLoc(), 1450 diag::err_opencl_enqueue_kernel_local_size_args); 1451 return true; 1452 } 1453 1454 // Check that the sizes of the local memory are specified by integers. 1455 return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs, 1456 TotalNumArgs - 1); 1457 } 1458 1459 /// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different 1460 /// overload formats specified in Table 6.13.17.1. 1461 /// int enqueue_kernel(queue_t queue, 1462 /// kernel_enqueue_flags_t flags, 1463 /// const ndrange_t ndrange, 1464 /// void (^block)(void)) 1465 /// int enqueue_kernel(queue_t queue, 1466 /// kernel_enqueue_flags_t flags, 1467 /// const ndrange_t ndrange, 1468 /// uint num_events_in_wait_list, 1469 /// clk_event_t *event_wait_list, 1470 /// clk_event_t *event_ret, 1471 /// void (^block)(void)) 1472 /// int enqueue_kernel(queue_t queue, 1473 /// kernel_enqueue_flags_t flags, 1474 /// const ndrange_t ndrange, 1475 /// void (^block)(local void*, ...), 1476 /// uint size0, ...) 1477 /// int enqueue_kernel(queue_t queue, 1478 /// kernel_enqueue_flags_t flags, 1479 /// const ndrange_t ndrange, 1480 /// uint num_events_in_wait_list, 1481 /// clk_event_t *event_wait_list, 1482 /// clk_event_t *event_ret, 1483 /// void (^block)(local void*, ...), 1484 /// uint size0, ...) 1485 static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) { 1486 unsigned NumArgs = TheCall->getNumArgs(); 1487 1488 if (NumArgs < 4) { 1489 S.Diag(TheCall->getBeginLoc(), 1490 diag::err_typecheck_call_too_few_args_at_least) 1491 << 0 << 4 << NumArgs; 1492 return true; 1493 } 1494 1495 Expr *Arg0 = TheCall->getArg(0); 1496 Expr *Arg1 = TheCall->getArg(1); 1497 Expr *Arg2 = TheCall->getArg(2); 1498 Expr *Arg3 = TheCall->getArg(3); 1499 1500 // First argument always needs to be a queue_t type. 1501 if (!Arg0->getType()->isQueueT()) { 1502 S.Diag(TheCall->getArg(0)->getBeginLoc(), 1503 diag::err_opencl_builtin_expected_type) 1504 << TheCall->getDirectCallee() << S.Context.OCLQueueTy; 1505 return true; 1506 } 1507 1508 // Second argument always needs to be a kernel_enqueue_flags_t enum value. 1509 if (!Arg1->getType()->isIntegerType()) { 1510 S.Diag(TheCall->getArg(1)->getBeginLoc(), 1511 diag::err_opencl_builtin_expected_type) 1512 << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)"; 1513 return true; 1514 } 1515 1516 // Third argument is always an ndrange_t type. 1517 if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 1518 S.Diag(TheCall->getArg(2)->getBeginLoc(), 1519 diag::err_opencl_builtin_expected_type) 1520 << TheCall->getDirectCallee() << "'ndrange_t'"; 1521 return true; 1522 } 1523 1524 // With four arguments, there is only one form that the function could be 1525 // called in: no events and no variable arguments. 1526 if (NumArgs == 4) { 1527 // check that the last argument is the right block type. 1528 if (!isBlockPointer(Arg3)) { 1529 S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1530 << TheCall->getDirectCallee() << "block"; 1531 return true; 1532 } 1533 // we have a block type, check the prototype 1534 const BlockPointerType *BPT = 1535 cast<BlockPointerType>(Arg3->getType().getCanonicalType()); 1536 if (BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams() > 0) { 1537 S.Diag(Arg3->getBeginLoc(), 1538 diag::err_opencl_enqueue_kernel_blocks_no_args); 1539 return true; 1540 } 1541 return false; 1542 } 1543 // we can have block + varargs. 1544 if (isBlockPointer(Arg3)) 1545 return (checkOpenCLBlockArgs(S, Arg3) || 1546 checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4)); 1547 // last two cases with either exactly 7 args or 7 args and varargs. 1548 if (NumArgs >= 7) { 1549 // check common block argument. 1550 Expr *Arg6 = TheCall->getArg(6); 1551 if (!isBlockPointer(Arg6)) { 1552 S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1553 << TheCall->getDirectCallee() << "block"; 1554 return true; 1555 } 1556 if (checkOpenCLBlockArgs(S, Arg6)) 1557 return true; 1558 1559 // Forth argument has to be any integer type. 1560 if (!Arg3->getType()->isIntegerType()) { 1561 S.Diag(TheCall->getArg(3)->getBeginLoc(), 1562 diag::err_opencl_builtin_expected_type) 1563 << TheCall->getDirectCallee() << "integer"; 1564 return true; 1565 } 1566 // check remaining common arguments. 1567 Expr *Arg4 = TheCall->getArg(4); 1568 Expr *Arg5 = TheCall->getArg(5); 1569 1570 // Fifth argument is always passed as a pointer to clk_event_t. 1571 if (!Arg4->isNullPointerConstant(S.Context, 1572 Expr::NPC_ValueDependentIsNotNull) && 1573 !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) { 1574 S.Diag(TheCall->getArg(4)->getBeginLoc(), 1575 diag::err_opencl_builtin_expected_type) 1576 << TheCall->getDirectCallee() 1577 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1578 return true; 1579 } 1580 1581 // Sixth argument is always passed as a pointer to clk_event_t. 1582 if (!Arg5->isNullPointerConstant(S.Context, 1583 Expr::NPC_ValueDependentIsNotNull) && 1584 !(Arg5->getType()->isPointerType() && 1585 Arg5->getType()->getPointeeType()->isClkEventT())) { 1586 S.Diag(TheCall->getArg(5)->getBeginLoc(), 1587 diag::err_opencl_builtin_expected_type) 1588 << TheCall->getDirectCallee() 1589 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1590 return true; 1591 } 1592 1593 if (NumArgs == 7) 1594 return false; 1595 1596 return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7); 1597 } 1598 1599 // None of the specific case has been detected, give generic error 1600 S.Diag(TheCall->getBeginLoc(), 1601 diag::err_opencl_enqueue_kernel_incorrect_args); 1602 return true; 1603 } 1604 1605 /// Returns OpenCL access qual. 1606 static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) { 1607 return D->getAttr<OpenCLAccessAttr>(); 1608 } 1609 1610 /// Returns true if pipe element type is different from the pointer. 1611 static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) { 1612 const Expr *Arg0 = Call->getArg(0); 1613 // First argument type should always be pipe. 1614 if (!Arg0->getType()->isPipeType()) { 1615 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1616 << Call->getDirectCallee() << Arg0->getSourceRange(); 1617 return true; 1618 } 1619 OpenCLAccessAttr *AccessQual = 1620 getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl()); 1621 // Validates the access qualifier is compatible with the call. 1622 // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be 1623 // read_only and write_only, and assumed to be read_only if no qualifier is 1624 // specified. 1625 switch (Call->getDirectCallee()->getBuiltinID()) { 1626 case Builtin::BIread_pipe: 1627 case Builtin::BIreserve_read_pipe: 1628 case Builtin::BIcommit_read_pipe: 1629 case Builtin::BIwork_group_reserve_read_pipe: 1630 case Builtin::BIsub_group_reserve_read_pipe: 1631 case Builtin::BIwork_group_commit_read_pipe: 1632 case Builtin::BIsub_group_commit_read_pipe: 1633 if (!(!AccessQual || AccessQual->isReadOnly())) { 1634 S.Diag(Arg0->getBeginLoc(), 1635 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1636 << "read_only" << Arg0->getSourceRange(); 1637 return true; 1638 } 1639 break; 1640 case Builtin::BIwrite_pipe: 1641 case Builtin::BIreserve_write_pipe: 1642 case Builtin::BIcommit_write_pipe: 1643 case Builtin::BIwork_group_reserve_write_pipe: 1644 case Builtin::BIsub_group_reserve_write_pipe: 1645 case Builtin::BIwork_group_commit_write_pipe: 1646 case Builtin::BIsub_group_commit_write_pipe: 1647 if (!(AccessQual && AccessQual->isWriteOnly())) { 1648 S.Diag(Arg0->getBeginLoc(), 1649 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1650 << "write_only" << Arg0->getSourceRange(); 1651 return true; 1652 } 1653 break; 1654 default: 1655 break; 1656 } 1657 return false; 1658 } 1659 1660 /// Returns true if pipe element type is different from the pointer. 1661 static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) { 1662 const Expr *Arg0 = Call->getArg(0); 1663 const Expr *ArgIdx = Call->getArg(Idx); 1664 const PipeType *PipeTy = cast<PipeType>(Arg0->getType()); 1665 const QualType EltTy = PipeTy->getElementType(); 1666 const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>(); 1667 // The Idx argument should be a pointer and the type of the pointer and 1668 // the type of pipe element should also be the same. 1669 if (!ArgTy || 1670 !S.Context.hasSameType( 1671 EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) { 1672 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1673 << Call->getDirectCallee() << S.Context.getPointerType(EltTy) 1674 << ArgIdx->getType() << ArgIdx->getSourceRange(); 1675 return true; 1676 } 1677 return false; 1678 } 1679 1680 // Performs semantic analysis for the read/write_pipe call. 1681 // \param S Reference to the semantic analyzer. 1682 // \param Call A pointer to the builtin call. 1683 // \return True if a semantic error has been found, false otherwise. 1684 static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) { 1685 // OpenCL v2.0 s6.13.16.2 - The built-in read/write 1686 // functions have two forms. 1687 switch (Call->getNumArgs()) { 1688 case 2: 1689 if (checkOpenCLPipeArg(S, Call)) 1690 return true; 1691 // The call with 2 arguments should be 1692 // read/write_pipe(pipe T, T*). 1693 // Check packet type T. 1694 if (checkOpenCLPipePacketType(S, Call, 1)) 1695 return true; 1696 break; 1697 1698 case 4: { 1699 if (checkOpenCLPipeArg(S, Call)) 1700 return true; 1701 // The call with 4 arguments should be 1702 // read/write_pipe(pipe T, reserve_id_t, uint, T*). 1703 // Check reserve_id_t. 1704 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1705 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1706 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1707 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1708 return true; 1709 } 1710 1711 // Check the index. 1712 const Expr *Arg2 = Call->getArg(2); 1713 if (!Arg2->getType()->isIntegerType() && 1714 !Arg2->getType()->isUnsignedIntegerType()) { 1715 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1716 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1717 << Arg2->getType() << Arg2->getSourceRange(); 1718 return true; 1719 } 1720 1721 // Check packet type T. 1722 if (checkOpenCLPipePacketType(S, Call, 3)) 1723 return true; 1724 } break; 1725 default: 1726 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num) 1727 << Call->getDirectCallee() << Call->getSourceRange(); 1728 return true; 1729 } 1730 1731 return false; 1732 } 1733 1734 // Performs a semantic analysis on the {work_group_/sub_group_ 1735 // /_}reserve_{read/write}_pipe 1736 // \param S Reference to the semantic analyzer. 1737 // \param Call The call to the builtin function to be analyzed. 1738 // \return True if a semantic error was found, false otherwise. 1739 static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) { 1740 if (checkArgCount(S, Call, 2)) 1741 return true; 1742 1743 if (checkOpenCLPipeArg(S, Call)) 1744 return true; 1745 1746 // Check the reserve size. 1747 if (!Call->getArg(1)->getType()->isIntegerType() && 1748 !Call->getArg(1)->getType()->isUnsignedIntegerType()) { 1749 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1750 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1751 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1752 return true; 1753 } 1754 1755 // Since return type of reserve_read/write_pipe built-in function is 1756 // reserve_id_t, which is not defined in the builtin def file , we used int 1757 // as return type and need to override the return type of these functions. 1758 Call->setType(S.Context.OCLReserveIDTy); 1759 1760 return false; 1761 } 1762 1763 // Performs a semantic analysis on {work_group_/sub_group_ 1764 // /_}commit_{read/write}_pipe 1765 // \param S Reference to the semantic analyzer. 1766 // \param Call The call to the builtin function to be analyzed. 1767 // \return True if a semantic error was found, false otherwise. 1768 static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) { 1769 if (checkArgCount(S, Call, 2)) 1770 return true; 1771 1772 if (checkOpenCLPipeArg(S, Call)) 1773 return true; 1774 1775 // Check reserve_id_t. 1776 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1777 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1778 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1779 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1780 return true; 1781 } 1782 1783 return false; 1784 } 1785 1786 // Performs a semantic analysis on the call to built-in Pipe 1787 // Query Functions. 1788 // \param S Reference to the semantic analyzer. 1789 // \param Call The call to the builtin function to be analyzed. 1790 // \return True if a semantic error was found, false otherwise. 1791 static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) { 1792 if (checkArgCount(S, Call, 1)) 1793 return true; 1794 1795 if (!Call->getArg(0)->getType()->isPipeType()) { 1796 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1797 << Call->getDirectCallee() << Call->getArg(0)->getSourceRange(); 1798 return true; 1799 } 1800 1801 return false; 1802 } 1803 1804 // OpenCL v2.0 s6.13.9 - Address space qualifier functions. 1805 // Performs semantic analysis for the to_global/local/private call. 1806 // \param S Reference to the semantic analyzer. 1807 // \param BuiltinID ID of the builtin function. 1808 // \param Call A pointer to the builtin call. 1809 // \return True if a semantic error has been found, false otherwise. 1810 static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID, 1811 CallExpr *Call) { 1812 if (checkArgCount(S, Call, 1)) 1813 return true; 1814 1815 auto RT = Call->getArg(0)->getType(); 1816 if (!RT->isPointerType() || RT->getPointeeType() 1817 .getAddressSpace() == LangAS::opencl_constant) { 1818 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg) 1819 << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange(); 1820 return true; 1821 } 1822 1823 if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) { 1824 S.Diag(Call->getArg(0)->getBeginLoc(), 1825 diag::warn_opencl_generic_address_space_arg) 1826 << Call->getDirectCallee()->getNameInfo().getAsString() 1827 << Call->getArg(0)->getSourceRange(); 1828 } 1829 1830 RT = RT->getPointeeType(); 1831 auto Qual = RT.getQualifiers(); 1832 switch (BuiltinID) { 1833 case Builtin::BIto_global: 1834 Qual.setAddressSpace(LangAS::opencl_global); 1835 break; 1836 case Builtin::BIto_local: 1837 Qual.setAddressSpace(LangAS::opencl_local); 1838 break; 1839 case Builtin::BIto_private: 1840 Qual.setAddressSpace(LangAS::opencl_private); 1841 break; 1842 default: 1843 llvm_unreachable("Invalid builtin function"); 1844 } 1845 Call->setType(S.Context.getPointerType(S.Context.getQualifiedType( 1846 RT.getUnqualifiedType(), Qual))); 1847 1848 return false; 1849 } 1850 1851 static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) { 1852 if (checkArgCount(S, TheCall, 1)) 1853 return ExprError(); 1854 1855 // Compute __builtin_launder's parameter type from the argument. 1856 // The parameter type is: 1857 // * The type of the argument if it's not an array or function type, 1858 // Otherwise, 1859 // * The decayed argument type. 1860 QualType ParamTy = [&]() { 1861 QualType ArgTy = TheCall->getArg(0)->getType(); 1862 if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe()) 1863 return S.Context.getPointerType(Ty->getElementType()); 1864 if (ArgTy->isFunctionType()) { 1865 return S.Context.getPointerType(ArgTy); 1866 } 1867 return ArgTy; 1868 }(); 1869 1870 TheCall->setType(ParamTy); 1871 1872 auto DiagSelect = [&]() -> llvm::Optional<unsigned> { 1873 if (!ParamTy->isPointerType()) 1874 return 0; 1875 if (ParamTy->isFunctionPointerType()) 1876 return 1; 1877 if (ParamTy->isVoidPointerType()) 1878 return 2; 1879 return llvm::Optional<unsigned>{}; 1880 }(); 1881 if (DiagSelect) { 1882 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg) 1883 << DiagSelect.value() << TheCall->getSourceRange(); 1884 return ExprError(); 1885 } 1886 1887 // We either have an incomplete class type, or we have a class template 1888 // whose instantiation has not been forced. Example: 1889 // 1890 // template <class T> struct Foo { T value; }; 1891 // Foo<int> *p = nullptr; 1892 // auto *d = __builtin_launder(p); 1893 if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(), 1894 diag::err_incomplete_type)) 1895 return ExprError(); 1896 1897 assert(ParamTy->getPointeeType()->isObjectType() && 1898 "Unhandled non-object pointer case"); 1899 1900 InitializedEntity Entity = 1901 InitializedEntity::InitializeParameter(S.Context, ParamTy, false); 1902 ExprResult Arg = 1903 S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0)); 1904 if (Arg.isInvalid()) 1905 return ExprError(); 1906 TheCall->setArg(0, Arg.get()); 1907 1908 return TheCall; 1909 } 1910 1911 // Emit an error and return true if the current object format type is in the 1912 // list of unsupported types. 1913 static bool CheckBuiltinTargetNotInUnsupported( 1914 Sema &S, unsigned BuiltinID, CallExpr *TheCall, 1915 ArrayRef<llvm::Triple::ObjectFormatType> UnsupportedObjectFormatTypes) { 1916 llvm::Triple::ObjectFormatType CurObjFormat = 1917 S.getASTContext().getTargetInfo().getTriple().getObjectFormat(); 1918 if (llvm::is_contained(UnsupportedObjectFormatTypes, CurObjFormat)) { 1919 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 1920 << TheCall->getSourceRange(); 1921 return true; 1922 } 1923 return false; 1924 } 1925 1926 // Emit an error and return true if the current architecture is not in the list 1927 // of supported architectures. 1928 static bool 1929 CheckBuiltinTargetInSupported(Sema &S, unsigned BuiltinID, CallExpr *TheCall, 1930 ArrayRef<llvm::Triple::ArchType> SupportedArchs) { 1931 llvm::Triple::ArchType CurArch = 1932 S.getASTContext().getTargetInfo().getTriple().getArch(); 1933 if (llvm::is_contained(SupportedArchs, CurArch)) 1934 return false; 1935 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 1936 << TheCall->getSourceRange(); 1937 return true; 1938 } 1939 1940 static void CheckNonNullArgument(Sema &S, const Expr *ArgExpr, 1941 SourceLocation CallSiteLoc); 1942 1943 bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 1944 CallExpr *TheCall) { 1945 switch (TI.getTriple().getArch()) { 1946 default: 1947 // Some builtins don't require additional checking, so just consider these 1948 // acceptable. 1949 return false; 1950 case llvm::Triple::arm: 1951 case llvm::Triple::armeb: 1952 case llvm::Triple::thumb: 1953 case llvm::Triple::thumbeb: 1954 return CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall); 1955 case llvm::Triple::aarch64: 1956 case llvm::Triple::aarch64_32: 1957 case llvm::Triple::aarch64_be: 1958 return CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall); 1959 case llvm::Triple::bpfeb: 1960 case llvm::Triple::bpfel: 1961 return CheckBPFBuiltinFunctionCall(BuiltinID, TheCall); 1962 case llvm::Triple::hexagon: 1963 return CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall); 1964 case llvm::Triple::mips: 1965 case llvm::Triple::mipsel: 1966 case llvm::Triple::mips64: 1967 case llvm::Triple::mips64el: 1968 return CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall); 1969 case llvm::Triple::systemz: 1970 return CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall); 1971 case llvm::Triple::x86: 1972 case llvm::Triple::x86_64: 1973 return CheckX86BuiltinFunctionCall(TI, BuiltinID, TheCall); 1974 case llvm::Triple::ppc: 1975 case llvm::Triple::ppcle: 1976 case llvm::Triple::ppc64: 1977 case llvm::Triple::ppc64le: 1978 return CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall); 1979 case llvm::Triple::amdgcn: 1980 return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall); 1981 case llvm::Triple::riscv32: 1982 case llvm::Triple::riscv64: 1983 return CheckRISCVBuiltinFunctionCall(TI, BuiltinID, TheCall); 1984 } 1985 } 1986 1987 ExprResult 1988 Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, 1989 CallExpr *TheCall) { 1990 ExprResult TheCallResult(TheCall); 1991 1992 // Find out if any arguments are required to be integer constant expressions. 1993 unsigned ICEArguments = 0; 1994 ASTContext::GetBuiltinTypeError Error; 1995 Context.GetBuiltinType(BuiltinID, Error, &ICEArguments); 1996 if (Error != ASTContext::GE_None) 1997 ICEArguments = 0; // Don't diagnose previously diagnosed errors. 1998 1999 // If any arguments are required to be ICE's, check and diagnose. 2000 for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) { 2001 // Skip arguments not required to be ICE's. 2002 if ((ICEArguments & (1 << ArgNo)) == 0) continue; 2003 2004 llvm::APSInt Result; 2005 // If we don't have enough arguments, continue so we can issue better 2006 // diagnostic in checkArgCount(...) 2007 if (ArgNo < TheCall->getNumArgs() && 2008 SemaBuiltinConstantArg(TheCall, ArgNo, Result)) 2009 return true; 2010 ICEArguments &= ~(1 << ArgNo); 2011 } 2012 2013 switch (BuiltinID) { 2014 case Builtin::BI__builtin___CFStringMakeConstantString: 2015 // CFStringMakeConstantString is currently not implemented for GOFF (i.e., 2016 // on z/OS) and for XCOFF (i.e., on AIX). Emit unsupported 2017 if (CheckBuiltinTargetNotInUnsupported( 2018 *this, BuiltinID, TheCall, 2019 {llvm::Triple::GOFF, llvm::Triple::XCOFF})) 2020 return ExprError(); 2021 assert(TheCall->getNumArgs() == 1 && 2022 "Wrong # arguments to builtin CFStringMakeConstantString"); 2023 if (CheckObjCString(TheCall->getArg(0))) 2024 return ExprError(); 2025 break; 2026 case Builtin::BI__builtin_ms_va_start: 2027 case Builtin::BI__builtin_stdarg_start: 2028 case Builtin::BI__builtin_va_start: 2029 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 2030 return ExprError(); 2031 break; 2032 case Builtin::BI__va_start: { 2033 switch (Context.getTargetInfo().getTriple().getArch()) { 2034 case llvm::Triple::aarch64: 2035 case llvm::Triple::arm: 2036 case llvm::Triple::thumb: 2037 if (SemaBuiltinVAStartARMMicrosoft(TheCall)) 2038 return ExprError(); 2039 break; 2040 default: 2041 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 2042 return ExprError(); 2043 break; 2044 } 2045 break; 2046 } 2047 2048 // The acquire, release, and no fence variants are ARM and AArch64 only. 2049 case Builtin::BI_interlockedbittestandset_acq: 2050 case Builtin::BI_interlockedbittestandset_rel: 2051 case Builtin::BI_interlockedbittestandset_nf: 2052 case Builtin::BI_interlockedbittestandreset_acq: 2053 case Builtin::BI_interlockedbittestandreset_rel: 2054 case Builtin::BI_interlockedbittestandreset_nf: 2055 if (CheckBuiltinTargetInSupported( 2056 *this, BuiltinID, TheCall, 2057 {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64})) 2058 return ExprError(); 2059 break; 2060 2061 // The 64-bit bittest variants are x64, ARM, and AArch64 only. 2062 case Builtin::BI_bittest64: 2063 case Builtin::BI_bittestandcomplement64: 2064 case Builtin::BI_bittestandreset64: 2065 case Builtin::BI_bittestandset64: 2066 case Builtin::BI_interlockedbittestandreset64: 2067 case Builtin::BI_interlockedbittestandset64: 2068 if (CheckBuiltinTargetInSupported(*this, BuiltinID, TheCall, 2069 {llvm::Triple::x86_64, llvm::Triple::arm, 2070 llvm::Triple::thumb, 2071 llvm::Triple::aarch64})) 2072 return ExprError(); 2073 break; 2074 2075 case Builtin::BI__builtin_isgreater: 2076 case Builtin::BI__builtin_isgreaterequal: 2077 case Builtin::BI__builtin_isless: 2078 case Builtin::BI__builtin_islessequal: 2079 case Builtin::BI__builtin_islessgreater: 2080 case Builtin::BI__builtin_isunordered: 2081 if (SemaBuiltinUnorderedCompare(TheCall)) 2082 return ExprError(); 2083 break; 2084 case Builtin::BI__builtin_fpclassify: 2085 if (SemaBuiltinFPClassification(TheCall, 6)) 2086 return ExprError(); 2087 break; 2088 case Builtin::BI__builtin_isfinite: 2089 case Builtin::BI__builtin_isinf: 2090 case Builtin::BI__builtin_isinf_sign: 2091 case Builtin::BI__builtin_isnan: 2092 case Builtin::BI__builtin_isnormal: 2093 case Builtin::BI__builtin_signbit: 2094 case Builtin::BI__builtin_signbitf: 2095 case Builtin::BI__builtin_signbitl: 2096 if (SemaBuiltinFPClassification(TheCall, 1)) 2097 return ExprError(); 2098 break; 2099 case Builtin::BI__builtin_shufflevector: 2100 return SemaBuiltinShuffleVector(TheCall); 2101 // TheCall will be freed by the smart pointer here, but that's fine, since 2102 // SemaBuiltinShuffleVector guts it, but then doesn't release it. 2103 case Builtin::BI__builtin_prefetch: 2104 if (SemaBuiltinPrefetch(TheCall)) 2105 return ExprError(); 2106 break; 2107 case Builtin::BI__builtin_alloca_with_align: 2108 case Builtin::BI__builtin_alloca_with_align_uninitialized: 2109 if (SemaBuiltinAllocaWithAlign(TheCall)) 2110 return ExprError(); 2111 LLVM_FALLTHROUGH; 2112 case Builtin::BI__builtin_alloca: 2113 case Builtin::BI__builtin_alloca_uninitialized: 2114 Diag(TheCall->getBeginLoc(), diag::warn_alloca) 2115 << TheCall->getDirectCallee(); 2116 break; 2117 case Builtin::BI__arithmetic_fence: 2118 if (SemaBuiltinArithmeticFence(TheCall)) 2119 return ExprError(); 2120 break; 2121 case Builtin::BI__assume: 2122 case Builtin::BI__builtin_assume: 2123 if (SemaBuiltinAssume(TheCall)) 2124 return ExprError(); 2125 break; 2126 case Builtin::BI__builtin_assume_aligned: 2127 if (SemaBuiltinAssumeAligned(TheCall)) 2128 return ExprError(); 2129 break; 2130 case Builtin::BI__builtin_dynamic_object_size: 2131 case Builtin::BI__builtin_object_size: 2132 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3)) 2133 return ExprError(); 2134 break; 2135 case Builtin::BI__builtin_longjmp: 2136 if (SemaBuiltinLongjmp(TheCall)) 2137 return ExprError(); 2138 break; 2139 case Builtin::BI__builtin_setjmp: 2140 if (SemaBuiltinSetjmp(TheCall)) 2141 return ExprError(); 2142 break; 2143 case Builtin::BI__builtin_classify_type: 2144 if (checkArgCount(*this, TheCall, 1)) return true; 2145 TheCall->setType(Context.IntTy); 2146 break; 2147 case Builtin::BI__builtin_complex: 2148 if (SemaBuiltinComplex(TheCall)) 2149 return ExprError(); 2150 break; 2151 case Builtin::BI__builtin_constant_p: { 2152 if (checkArgCount(*this, TheCall, 1)) return true; 2153 ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 2154 if (Arg.isInvalid()) return true; 2155 TheCall->setArg(0, Arg.get()); 2156 TheCall->setType(Context.IntTy); 2157 break; 2158 } 2159 case Builtin::BI__builtin_launder: 2160 return SemaBuiltinLaunder(*this, TheCall); 2161 case Builtin::BI__sync_fetch_and_add: 2162 case Builtin::BI__sync_fetch_and_add_1: 2163 case Builtin::BI__sync_fetch_and_add_2: 2164 case Builtin::BI__sync_fetch_and_add_4: 2165 case Builtin::BI__sync_fetch_and_add_8: 2166 case Builtin::BI__sync_fetch_and_add_16: 2167 case Builtin::BI__sync_fetch_and_sub: 2168 case Builtin::BI__sync_fetch_and_sub_1: 2169 case Builtin::BI__sync_fetch_and_sub_2: 2170 case Builtin::BI__sync_fetch_and_sub_4: 2171 case Builtin::BI__sync_fetch_and_sub_8: 2172 case Builtin::BI__sync_fetch_and_sub_16: 2173 case Builtin::BI__sync_fetch_and_or: 2174 case Builtin::BI__sync_fetch_and_or_1: 2175 case Builtin::BI__sync_fetch_and_or_2: 2176 case Builtin::BI__sync_fetch_and_or_4: 2177 case Builtin::BI__sync_fetch_and_or_8: 2178 case Builtin::BI__sync_fetch_and_or_16: 2179 case Builtin::BI__sync_fetch_and_and: 2180 case Builtin::BI__sync_fetch_and_and_1: 2181 case Builtin::BI__sync_fetch_and_and_2: 2182 case Builtin::BI__sync_fetch_and_and_4: 2183 case Builtin::BI__sync_fetch_and_and_8: 2184 case Builtin::BI__sync_fetch_and_and_16: 2185 case Builtin::BI__sync_fetch_and_xor: 2186 case Builtin::BI__sync_fetch_and_xor_1: 2187 case Builtin::BI__sync_fetch_and_xor_2: 2188 case Builtin::BI__sync_fetch_and_xor_4: 2189 case Builtin::BI__sync_fetch_and_xor_8: 2190 case Builtin::BI__sync_fetch_and_xor_16: 2191 case Builtin::BI__sync_fetch_and_nand: 2192 case Builtin::BI__sync_fetch_and_nand_1: 2193 case Builtin::BI__sync_fetch_and_nand_2: 2194 case Builtin::BI__sync_fetch_and_nand_4: 2195 case Builtin::BI__sync_fetch_and_nand_8: 2196 case Builtin::BI__sync_fetch_and_nand_16: 2197 case Builtin::BI__sync_add_and_fetch: 2198 case Builtin::BI__sync_add_and_fetch_1: 2199 case Builtin::BI__sync_add_and_fetch_2: 2200 case Builtin::BI__sync_add_and_fetch_4: 2201 case Builtin::BI__sync_add_and_fetch_8: 2202 case Builtin::BI__sync_add_and_fetch_16: 2203 case Builtin::BI__sync_sub_and_fetch: 2204 case Builtin::BI__sync_sub_and_fetch_1: 2205 case Builtin::BI__sync_sub_and_fetch_2: 2206 case Builtin::BI__sync_sub_and_fetch_4: 2207 case Builtin::BI__sync_sub_and_fetch_8: 2208 case Builtin::BI__sync_sub_and_fetch_16: 2209 case Builtin::BI__sync_and_and_fetch: 2210 case Builtin::BI__sync_and_and_fetch_1: 2211 case Builtin::BI__sync_and_and_fetch_2: 2212 case Builtin::BI__sync_and_and_fetch_4: 2213 case Builtin::BI__sync_and_and_fetch_8: 2214 case Builtin::BI__sync_and_and_fetch_16: 2215 case Builtin::BI__sync_or_and_fetch: 2216 case Builtin::BI__sync_or_and_fetch_1: 2217 case Builtin::BI__sync_or_and_fetch_2: 2218 case Builtin::BI__sync_or_and_fetch_4: 2219 case Builtin::BI__sync_or_and_fetch_8: 2220 case Builtin::BI__sync_or_and_fetch_16: 2221 case Builtin::BI__sync_xor_and_fetch: 2222 case Builtin::BI__sync_xor_and_fetch_1: 2223 case Builtin::BI__sync_xor_and_fetch_2: 2224 case Builtin::BI__sync_xor_and_fetch_4: 2225 case Builtin::BI__sync_xor_and_fetch_8: 2226 case Builtin::BI__sync_xor_and_fetch_16: 2227 case Builtin::BI__sync_nand_and_fetch: 2228 case Builtin::BI__sync_nand_and_fetch_1: 2229 case Builtin::BI__sync_nand_and_fetch_2: 2230 case Builtin::BI__sync_nand_and_fetch_4: 2231 case Builtin::BI__sync_nand_and_fetch_8: 2232 case Builtin::BI__sync_nand_and_fetch_16: 2233 case Builtin::BI__sync_val_compare_and_swap: 2234 case Builtin::BI__sync_val_compare_and_swap_1: 2235 case Builtin::BI__sync_val_compare_and_swap_2: 2236 case Builtin::BI__sync_val_compare_and_swap_4: 2237 case Builtin::BI__sync_val_compare_and_swap_8: 2238 case Builtin::BI__sync_val_compare_and_swap_16: 2239 case Builtin::BI__sync_bool_compare_and_swap: 2240 case Builtin::BI__sync_bool_compare_and_swap_1: 2241 case Builtin::BI__sync_bool_compare_and_swap_2: 2242 case Builtin::BI__sync_bool_compare_and_swap_4: 2243 case Builtin::BI__sync_bool_compare_and_swap_8: 2244 case Builtin::BI__sync_bool_compare_and_swap_16: 2245 case Builtin::BI__sync_lock_test_and_set: 2246 case Builtin::BI__sync_lock_test_and_set_1: 2247 case Builtin::BI__sync_lock_test_and_set_2: 2248 case Builtin::BI__sync_lock_test_and_set_4: 2249 case Builtin::BI__sync_lock_test_and_set_8: 2250 case Builtin::BI__sync_lock_test_and_set_16: 2251 case Builtin::BI__sync_lock_release: 2252 case Builtin::BI__sync_lock_release_1: 2253 case Builtin::BI__sync_lock_release_2: 2254 case Builtin::BI__sync_lock_release_4: 2255 case Builtin::BI__sync_lock_release_8: 2256 case Builtin::BI__sync_lock_release_16: 2257 case Builtin::BI__sync_swap: 2258 case Builtin::BI__sync_swap_1: 2259 case Builtin::BI__sync_swap_2: 2260 case Builtin::BI__sync_swap_4: 2261 case Builtin::BI__sync_swap_8: 2262 case Builtin::BI__sync_swap_16: 2263 return SemaBuiltinAtomicOverloaded(TheCallResult); 2264 case Builtin::BI__sync_synchronize: 2265 Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst) 2266 << TheCall->getCallee()->getSourceRange(); 2267 break; 2268 case Builtin::BI__builtin_nontemporal_load: 2269 case Builtin::BI__builtin_nontemporal_store: 2270 return SemaBuiltinNontemporalOverloaded(TheCallResult); 2271 case Builtin::BI__builtin_memcpy_inline: { 2272 clang::Expr *SizeOp = TheCall->getArg(2); 2273 // We warn about copying to or from `nullptr` pointers when `size` is 2274 // greater than 0. When `size` is value dependent we cannot evaluate its 2275 // value so we bail out. 2276 if (SizeOp->isValueDependent()) 2277 break; 2278 if (!SizeOp->EvaluateKnownConstInt(Context).isZero()) { 2279 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc()); 2280 CheckNonNullArgument(*this, TheCall->getArg(1), TheCall->getExprLoc()); 2281 } 2282 break; 2283 } 2284 case Builtin::BI__builtin_memset_inline: { 2285 clang::Expr *SizeOp = TheCall->getArg(2); 2286 // We warn about filling to `nullptr` pointers when `size` is greater than 2287 // 0. When `size` is value dependent we cannot evaluate its value so we bail 2288 // out. 2289 if (SizeOp->isValueDependent()) 2290 break; 2291 if (!SizeOp->EvaluateKnownConstInt(Context).isZero()) 2292 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc()); 2293 break; 2294 } 2295 #define BUILTIN(ID, TYPE, ATTRS) 2296 #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \ 2297 case Builtin::BI##ID: \ 2298 return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID); 2299 #include "clang/Basic/Builtins.def" 2300 case Builtin::BI__annotation: 2301 if (SemaBuiltinMSVCAnnotation(*this, TheCall)) 2302 return ExprError(); 2303 break; 2304 case Builtin::BI__builtin_annotation: 2305 if (SemaBuiltinAnnotation(*this, TheCall)) 2306 return ExprError(); 2307 break; 2308 case Builtin::BI__builtin_addressof: 2309 if (SemaBuiltinAddressof(*this, TheCall)) 2310 return ExprError(); 2311 break; 2312 case Builtin::BI__builtin_function_start: 2313 if (SemaBuiltinFunctionStart(*this, TheCall)) 2314 return ExprError(); 2315 break; 2316 case Builtin::BI__builtin_is_aligned: 2317 case Builtin::BI__builtin_align_up: 2318 case Builtin::BI__builtin_align_down: 2319 if (SemaBuiltinAlignment(*this, TheCall, BuiltinID)) 2320 return ExprError(); 2321 break; 2322 case Builtin::BI__builtin_add_overflow: 2323 case Builtin::BI__builtin_sub_overflow: 2324 case Builtin::BI__builtin_mul_overflow: 2325 if (SemaBuiltinOverflow(*this, TheCall, BuiltinID)) 2326 return ExprError(); 2327 break; 2328 case Builtin::BI__builtin_operator_new: 2329 case Builtin::BI__builtin_operator_delete: { 2330 bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete; 2331 ExprResult Res = 2332 SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete); 2333 if (Res.isInvalid()) 2334 CorrectDelayedTyposInExpr(TheCallResult.get()); 2335 return Res; 2336 } 2337 case Builtin::BI__builtin_dump_struct: 2338 return SemaBuiltinDumpStruct(*this, TheCall); 2339 case Builtin::BI__builtin_expect_with_probability: { 2340 // We first want to ensure we are called with 3 arguments 2341 if (checkArgCount(*this, TheCall, 3)) 2342 return ExprError(); 2343 // then check probability is constant float in range [0.0, 1.0] 2344 const Expr *ProbArg = TheCall->getArg(2); 2345 SmallVector<PartialDiagnosticAt, 8> Notes; 2346 Expr::EvalResult Eval; 2347 Eval.Diag = &Notes; 2348 if ((!ProbArg->EvaluateAsConstantExpr(Eval, Context)) || 2349 !Eval.Val.isFloat()) { 2350 Diag(ProbArg->getBeginLoc(), diag::err_probability_not_constant_float) 2351 << ProbArg->getSourceRange(); 2352 for (const PartialDiagnosticAt &PDiag : Notes) 2353 Diag(PDiag.first, PDiag.second); 2354 return ExprError(); 2355 } 2356 llvm::APFloat Probability = Eval.Val.getFloat(); 2357 bool LoseInfo = false; 2358 Probability.convert(llvm::APFloat::IEEEdouble(), 2359 llvm::RoundingMode::Dynamic, &LoseInfo); 2360 if (!(Probability >= llvm::APFloat(0.0) && 2361 Probability <= llvm::APFloat(1.0))) { 2362 Diag(ProbArg->getBeginLoc(), diag::err_probability_out_of_range) 2363 << ProbArg->getSourceRange(); 2364 return ExprError(); 2365 } 2366 break; 2367 } 2368 case Builtin::BI__builtin_preserve_access_index: 2369 if (SemaBuiltinPreserveAI(*this, TheCall)) 2370 return ExprError(); 2371 break; 2372 case Builtin::BI__builtin_call_with_static_chain: 2373 if (SemaBuiltinCallWithStaticChain(*this, TheCall)) 2374 return ExprError(); 2375 break; 2376 case Builtin::BI__exception_code: 2377 case Builtin::BI_exception_code: 2378 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope, 2379 diag::err_seh___except_block)) 2380 return ExprError(); 2381 break; 2382 case Builtin::BI__exception_info: 2383 case Builtin::BI_exception_info: 2384 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope, 2385 diag::err_seh___except_filter)) 2386 return ExprError(); 2387 break; 2388 case Builtin::BI__GetExceptionInfo: 2389 if (checkArgCount(*this, TheCall, 1)) 2390 return ExprError(); 2391 2392 if (CheckCXXThrowOperand( 2393 TheCall->getBeginLoc(), 2394 Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()), 2395 TheCall)) 2396 return ExprError(); 2397 2398 TheCall->setType(Context.VoidPtrTy); 2399 break; 2400 case Builtin::BIaddressof: 2401 case Builtin::BI__addressof: 2402 case Builtin::BIforward: 2403 case Builtin::BImove: 2404 case Builtin::BImove_if_noexcept: 2405 case Builtin::BIas_const: { 2406 // These are all expected to be of the form 2407 // T &/&&/* f(U &/&&) 2408 // where T and U only differ in qualification. 2409 if (checkArgCount(*this, TheCall, 1)) 2410 return ExprError(); 2411 QualType Param = FDecl->getParamDecl(0)->getType(); 2412 QualType Result = FDecl->getReturnType(); 2413 bool ReturnsPointer = BuiltinID == Builtin::BIaddressof || 2414 BuiltinID == Builtin::BI__addressof; 2415 if (!(Param->isReferenceType() && 2416 (ReturnsPointer ? Result->isAnyPointerType() 2417 : Result->isReferenceType()) && 2418 Context.hasSameUnqualifiedType(Param->getPointeeType(), 2419 Result->getPointeeType()))) { 2420 Diag(TheCall->getBeginLoc(), diag::err_builtin_move_forward_unsupported) 2421 << FDecl; 2422 return ExprError(); 2423 } 2424 break; 2425 } 2426 // OpenCL v2.0, s6.13.16 - Pipe functions 2427 case Builtin::BIread_pipe: 2428 case Builtin::BIwrite_pipe: 2429 // Since those two functions are declared with var args, we need a semantic 2430 // check for the argument. 2431 if (SemaBuiltinRWPipe(*this, TheCall)) 2432 return ExprError(); 2433 break; 2434 case Builtin::BIreserve_read_pipe: 2435 case Builtin::BIreserve_write_pipe: 2436 case Builtin::BIwork_group_reserve_read_pipe: 2437 case Builtin::BIwork_group_reserve_write_pipe: 2438 if (SemaBuiltinReserveRWPipe(*this, TheCall)) 2439 return ExprError(); 2440 break; 2441 case Builtin::BIsub_group_reserve_read_pipe: 2442 case Builtin::BIsub_group_reserve_write_pipe: 2443 if (checkOpenCLSubgroupExt(*this, TheCall) || 2444 SemaBuiltinReserveRWPipe(*this, TheCall)) 2445 return ExprError(); 2446 break; 2447 case Builtin::BIcommit_read_pipe: 2448 case Builtin::BIcommit_write_pipe: 2449 case Builtin::BIwork_group_commit_read_pipe: 2450 case Builtin::BIwork_group_commit_write_pipe: 2451 if (SemaBuiltinCommitRWPipe(*this, TheCall)) 2452 return ExprError(); 2453 break; 2454 case Builtin::BIsub_group_commit_read_pipe: 2455 case Builtin::BIsub_group_commit_write_pipe: 2456 if (checkOpenCLSubgroupExt(*this, TheCall) || 2457 SemaBuiltinCommitRWPipe(*this, TheCall)) 2458 return ExprError(); 2459 break; 2460 case Builtin::BIget_pipe_num_packets: 2461 case Builtin::BIget_pipe_max_packets: 2462 if (SemaBuiltinPipePackets(*this, TheCall)) 2463 return ExprError(); 2464 break; 2465 case Builtin::BIto_global: 2466 case Builtin::BIto_local: 2467 case Builtin::BIto_private: 2468 if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall)) 2469 return ExprError(); 2470 break; 2471 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions. 2472 case Builtin::BIenqueue_kernel: 2473 if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall)) 2474 return ExprError(); 2475 break; 2476 case Builtin::BIget_kernel_work_group_size: 2477 case Builtin::BIget_kernel_preferred_work_group_size_multiple: 2478 if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall)) 2479 return ExprError(); 2480 break; 2481 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange: 2482 case Builtin::BIget_kernel_sub_group_count_for_ndrange: 2483 if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall)) 2484 return ExprError(); 2485 break; 2486 case Builtin::BI__builtin_os_log_format: 2487 Cleanup.setExprNeedsCleanups(true); 2488 LLVM_FALLTHROUGH; 2489 case Builtin::BI__builtin_os_log_format_buffer_size: 2490 if (SemaBuiltinOSLogFormat(TheCall)) 2491 return ExprError(); 2492 break; 2493 case Builtin::BI__builtin_frame_address: 2494 case Builtin::BI__builtin_return_address: { 2495 if (SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xFFFF)) 2496 return ExprError(); 2497 2498 // -Wframe-address warning if non-zero passed to builtin 2499 // return/frame address. 2500 Expr::EvalResult Result; 2501 if (!TheCall->getArg(0)->isValueDependent() && 2502 TheCall->getArg(0)->EvaluateAsInt(Result, getASTContext()) && 2503 Result.Val.getInt() != 0) 2504 Diag(TheCall->getBeginLoc(), diag::warn_frame_address) 2505 << ((BuiltinID == Builtin::BI__builtin_return_address) 2506 ? "__builtin_return_address" 2507 : "__builtin_frame_address") 2508 << TheCall->getSourceRange(); 2509 break; 2510 } 2511 2512 // __builtin_elementwise_abs restricts the element type to signed integers or 2513 // floating point types only. 2514 case Builtin::BI__builtin_elementwise_abs: { 2515 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall)) 2516 return ExprError(); 2517 2518 QualType ArgTy = TheCall->getArg(0)->getType(); 2519 QualType EltTy = ArgTy; 2520 2521 if (auto *VecTy = EltTy->getAs<VectorType>()) 2522 EltTy = VecTy->getElementType(); 2523 if (EltTy->isUnsignedIntegerType()) { 2524 Diag(TheCall->getArg(0)->getBeginLoc(), 2525 diag::err_builtin_invalid_arg_type) 2526 << 1 << /* signed integer or float ty*/ 3 << ArgTy; 2527 return ExprError(); 2528 } 2529 break; 2530 } 2531 2532 // These builtins restrict the element type to floating point 2533 // types only. 2534 case Builtin::BI__builtin_elementwise_ceil: 2535 case Builtin::BI__builtin_elementwise_floor: 2536 case Builtin::BI__builtin_elementwise_roundeven: 2537 case Builtin::BI__builtin_elementwise_trunc: { 2538 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall)) 2539 return ExprError(); 2540 2541 QualType ArgTy = TheCall->getArg(0)->getType(); 2542 QualType EltTy = ArgTy; 2543 2544 if (auto *VecTy = EltTy->getAs<VectorType>()) 2545 EltTy = VecTy->getElementType(); 2546 if (!EltTy->isFloatingType()) { 2547 Diag(TheCall->getArg(0)->getBeginLoc(), 2548 diag::err_builtin_invalid_arg_type) 2549 << 1 << /* float ty*/ 5 << ArgTy; 2550 2551 return ExprError(); 2552 } 2553 break; 2554 } 2555 2556 // These builtins restrict the element type to integer 2557 // types only. 2558 case Builtin::BI__builtin_elementwise_add_sat: 2559 case Builtin::BI__builtin_elementwise_sub_sat: { 2560 if (SemaBuiltinElementwiseMath(TheCall)) 2561 return ExprError(); 2562 2563 const Expr *Arg = TheCall->getArg(0); 2564 QualType ArgTy = Arg->getType(); 2565 QualType EltTy = ArgTy; 2566 2567 if (auto *VecTy = EltTy->getAs<VectorType>()) 2568 EltTy = VecTy->getElementType(); 2569 2570 if (!EltTy->isIntegerType()) { 2571 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2572 << 1 << /* integer ty */ 6 << ArgTy; 2573 return ExprError(); 2574 } 2575 break; 2576 } 2577 2578 case Builtin::BI__builtin_elementwise_min: 2579 case Builtin::BI__builtin_elementwise_max: 2580 if (SemaBuiltinElementwiseMath(TheCall)) 2581 return ExprError(); 2582 break; 2583 case Builtin::BI__builtin_reduce_max: 2584 case Builtin::BI__builtin_reduce_min: { 2585 if (PrepareBuiltinReduceMathOneArgCall(TheCall)) 2586 return ExprError(); 2587 2588 const Expr *Arg = TheCall->getArg(0); 2589 const auto *TyA = Arg->getType()->getAs<VectorType>(); 2590 if (!TyA) { 2591 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2592 << 1 << /* vector ty*/ 4 << Arg->getType(); 2593 return ExprError(); 2594 } 2595 2596 TheCall->setType(TyA->getElementType()); 2597 break; 2598 } 2599 2600 // These builtins support vectors of integers only. 2601 // TODO: ADD/MUL should support floating-point types. 2602 case Builtin::BI__builtin_reduce_add: 2603 case Builtin::BI__builtin_reduce_mul: 2604 case Builtin::BI__builtin_reduce_xor: 2605 case Builtin::BI__builtin_reduce_or: 2606 case Builtin::BI__builtin_reduce_and: { 2607 if (PrepareBuiltinReduceMathOneArgCall(TheCall)) 2608 return ExprError(); 2609 2610 const Expr *Arg = TheCall->getArg(0); 2611 const auto *TyA = Arg->getType()->getAs<VectorType>(); 2612 if (!TyA || !TyA->getElementType()->isIntegerType()) { 2613 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2614 << 1 << /* vector of integers */ 6 << Arg->getType(); 2615 return ExprError(); 2616 } 2617 TheCall->setType(TyA->getElementType()); 2618 break; 2619 } 2620 2621 case Builtin::BI__builtin_matrix_transpose: 2622 return SemaBuiltinMatrixTranspose(TheCall, TheCallResult); 2623 2624 case Builtin::BI__builtin_matrix_column_major_load: 2625 return SemaBuiltinMatrixColumnMajorLoad(TheCall, TheCallResult); 2626 2627 case Builtin::BI__builtin_matrix_column_major_store: 2628 return SemaBuiltinMatrixColumnMajorStore(TheCall, TheCallResult); 2629 2630 case Builtin::BI__builtin_get_device_side_mangled_name: { 2631 auto Check = [](CallExpr *TheCall) { 2632 if (TheCall->getNumArgs() != 1) 2633 return false; 2634 auto *DRE = dyn_cast<DeclRefExpr>(TheCall->getArg(0)->IgnoreImpCasts()); 2635 if (!DRE) 2636 return false; 2637 auto *D = DRE->getDecl(); 2638 if (!isa<FunctionDecl>(D) && !isa<VarDecl>(D)) 2639 return false; 2640 return D->hasAttr<CUDAGlobalAttr>() || D->hasAttr<CUDADeviceAttr>() || 2641 D->hasAttr<CUDAConstantAttr>() || D->hasAttr<HIPManagedAttr>(); 2642 }; 2643 if (!Check(TheCall)) { 2644 Diag(TheCall->getBeginLoc(), 2645 diag::err_hip_invalid_args_builtin_mangled_name); 2646 return ExprError(); 2647 } 2648 } 2649 } 2650 2651 // Since the target specific builtins for each arch overlap, only check those 2652 // of the arch we are compiling for. 2653 if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) { 2654 if (Context.BuiltinInfo.isAuxBuiltinID(BuiltinID)) { 2655 assert(Context.getAuxTargetInfo() && 2656 "Aux Target Builtin, but not an aux target?"); 2657 2658 if (CheckTSBuiltinFunctionCall( 2659 *Context.getAuxTargetInfo(), 2660 Context.BuiltinInfo.getAuxBuiltinID(BuiltinID), TheCall)) 2661 return ExprError(); 2662 } else { 2663 if (CheckTSBuiltinFunctionCall(Context.getTargetInfo(), BuiltinID, 2664 TheCall)) 2665 return ExprError(); 2666 } 2667 } 2668 2669 return TheCallResult; 2670 } 2671 2672 // Get the valid immediate range for the specified NEON type code. 2673 static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) { 2674 NeonTypeFlags Type(t); 2675 int IsQuad = ForceQuad ? true : Type.isQuad(); 2676 switch (Type.getEltType()) { 2677 case NeonTypeFlags::Int8: 2678 case NeonTypeFlags::Poly8: 2679 return shift ? 7 : (8 << IsQuad) - 1; 2680 case NeonTypeFlags::Int16: 2681 case NeonTypeFlags::Poly16: 2682 return shift ? 15 : (4 << IsQuad) - 1; 2683 case NeonTypeFlags::Int32: 2684 return shift ? 31 : (2 << IsQuad) - 1; 2685 case NeonTypeFlags::Int64: 2686 case NeonTypeFlags::Poly64: 2687 return shift ? 63 : (1 << IsQuad) - 1; 2688 case NeonTypeFlags::Poly128: 2689 return shift ? 127 : (1 << IsQuad) - 1; 2690 case NeonTypeFlags::Float16: 2691 assert(!shift && "cannot shift float types!"); 2692 return (4 << IsQuad) - 1; 2693 case NeonTypeFlags::Float32: 2694 assert(!shift && "cannot shift float types!"); 2695 return (2 << IsQuad) - 1; 2696 case NeonTypeFlags::Float64: 2697 assert(!shift && "cannot shift float types!"); 2698 return (1 << IsQuad) - 1; 2699 case NeonTypeFlags::BFloat16: 2700 assert(!shift && "cannot shift float types!"); 2701 return (4 << IsQuad) - 1; 2702 } 2703 llvm_unreachable("Invalid NeonTypeFlag!"); 2704 } 2705 2706 /// getNeonEltType - Return the QualType corresponding to the elements of 2707 /// the vector type specified by the NeonTypeFlags. This is used to check 2708 /// the pointer arguments for Neon load/store intrinsics. 2709 static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context, 2710 bool IsPolyUnsigned, bool IsInt64Long) { 2711 switch (Flags.getEltType()) { 2712 case NeonTypeFlags::Int8: 2713 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy; 2714 case NeonTypeFlags::Int16: 2715 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy; 2716 case NeonTypeFlags::Int32: 2717 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy; 2718 case NeonTypeFlags::Int64: 2719 if (IsInt64Long) 2720 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy; 2721 else 2722 return Flags.isUnsigned() ? Context.UnsignedLongLongTy 2723 : Context.LongLongTy; 2724 case NeonTypeFlags::Poly8: 2725 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy; 2726 case NeonTypeFlags::Poly16: 2727 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy; 2728 case NeonTypeFlags::Poly64: 2729 if (IsInt64Long) 2730 return Context.UnsignedLongTy; 2731 else 2732 return Context.UnsignedLongLongTy; 2733 case NeonTypeFlags::Poly128: 2734 break; 2735 case NeonTypeFlags::Float16: 2736 return Context.HalfTy; 2737 case NeonTypeFlags::Float32: 2738 return Context.FloatTy; 2739 case NeonTypeFlags::Float64: 2740 return Context.DoubleTy; 2741 case NeonTypeFlags::BFloat16: 2742 return Context.BFloat16Ty; 2743 } 2744 llvm_unreachable("Invalid NeonTypeFlag!"); 2745 } 2746 2747 bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2748 // Range check SVE intrinsics that take immediate values. 2749 SmallVector<std::tuple<int,int,int>, 3> ImmChecks; 2750 2751 switch (BuiltinID) { 2752 default: 2753 return false; 2754 #define GET_SVE_IMMEDIATE_CHECK 2755 #include "clang/Basic/arm_sve_sema_rangechecks.inc" 2756 #undef GET_SVE_IMMEDIATE_CHECK 2757 } 2758 2759 // Perform all the immediate checks for this builtin call. 2760 bool HasError = false; 2761 for (auto &I : ImmChecks) { 2762 int ArgNum, CheckTy, ElementSizeInBits; 2763 std::tie(ArgNum, CheckTy, ElementSizeInBits) = I; 2764 2765 typedef bool(*OptionSetCheckFnTy)(int64_t Value); 2766 2767 // Function that checks whether the operand (ArgNum) is an immediate 2768 // that is one of the predefined values. 2769 auto CheckImmediateInSet = [&](OptionSetCheckFnTy CheckImm, 2770 int ErrDiag) -> bool { 2771 // We can't check the value of a dependent argument. 2772 Expr *Arg = TheCall->getArg(ArgNum); 2773 if (Arg->isTypeDependent() || Arg->isValueDependent()) 2774 return false; 2775 2776 // Check constant-ness first. 2777 llvm::APSInt Imm; 2778 if (SemaBuiltinConstantArg(TheCall, ArgNum, Imm)) 2779 return true; 2780 2781 if (!CheckImm(Imm.getSExtValue())) 2782 return Diag(TheCall->getBeginLoc(), ErrDiag) << Arg->getSourceRange(); 2783 return false; 2784 }; 2785 2786 switch ((SVETypeFlags::ImmCheckType)CheckTy) { 2787 case SVETypeFlags::ImmCheck0_31: 2788 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 31)) 2789 HasError = true; 2790 break; 2791 case SVETypeFlags::ImmCheck0_13: 2792 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 13)) 2793 HasError = true; 2794 break; 2795 case SVETypeFlags::ImmCheck1_16: 2796 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 16)) 2797 HasError = true; 2798 break; 2799 case SVETypeFlags::ImmCheck0_7: 2800 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 7)) 2801 HasError = true; 2802 break; 2803 case SVETypeFlags::ImmCheckExtract: 2804 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2805 (2048 / ElementSizeInBits) - 1)) 2806 HasError = true; 2807 break; 2808 case SVETypeFlags::ImmCheckShiftRight: 2809 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits)) 2810 HasError = true; 2811 break; 2812 case SVETypeFlags::ImmCheckShiftRightNarrow: 2813 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 2814 ElementSizeInBits / 2)) 2815 HasError = true; 2816 break; 2817 case SVETypeFlags::ImmCheckShiftLeft: 2818 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2819 ElementSizeInBits - 1)) 2820 HasError = true; 2821 break; 2822 case SVETypeFlags::ImmCheckLaneIndex: 2823 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2824 (128 / (1 * ElementSizeInBits)) - 1)) 2825 HasError = true; 2826 break; 2827 case SVETypeFlags::ImmCheckLaneIndexCompRotate: 2828 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2829 (128 / (2 * ElementSizeInBits)) - 1)) 2830 HasError = true; 2831 break; 2832 case SVETypeFlags::ImmCheckLaneIndexDot: 2833 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2834 (128 / (4 * ElementSizeInBits)) - 1)) 2835 HasError = true; 2836 break; 2837 case SVETypeFlags::ImmCheckComplexRot90_270: 2838 if (CheckImmediateInSet([](int64_t V) { return V == 90 || V == 270; }, 2839 diag::err_rotation_argument_to_cadd)) 2840 HasError = true; 2841 break; 2842 case SVETypeFlags::ImmCheckComplexRotAll90: 2843 if (CheckImmediateInSet( 2844 [](int64_t V) { 2845 return V == 0 || V == 90 || V == 180 || V == 270; 2846 }, 2847 diag::err_rotation_argument_to_cmla)) 2848 HasError = true; 2849 break; 2850 case SVETypeFlags::ImmCheck0_1: 2851 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 1)) 2852 HasError = true; 2853 break; 2854 case SVETypeFlags::ImmCheck0_2: 2855 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2)) 2856 HasError = true; 2857 break; 2858 case SVETypeFlags::ImmCheck0_3: 2859 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3)) 2860 HasError = true; 2861 break; 2862 } 2863 } 2864 2865 return HasError; 2866 } 2867 2868 bool Sema::CheckNeonBuiltinFunctionCall(const TargetInfo &TI, 2869 unsigned BuiltinID, CallExpr *TheCall) { 2870 llvm::APSInt Result; 2871 uint64_t mask = 0; 2872 unsigned TV = 0; 2873 int PtrArgNum = -1; 2874 bool HasConstPtr = false; 2875 switch (BuiltinID) { 2876 #define GET_NEON_OVERLOAD_CHECK 2877 #include "clang/Basic/arm_neon.inc" 2878 #include "clang/Basic/arm_fp16.inc" 2879 #undef GET_NEON_OVERLOAD_CHECK 2880 } 2881 2882 // For NEON intrinsics which are overloaded on vector element type, validate 2883 // the immediate which specifies which variant to emit. 2884 unsigned ImmArg = TheCall->getNumArgs()-1; 2885 if (mask) { 2886 if (SemaBuiltinConstantArg(TheCall, ImmArg, Result)) 2887 return true; 2888 2889 TV = Result.getLimitedValue(64); 2890 if ((TV > 63) || (mask & (1ULL << TV)) == 0) 2891 return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code) 2892 << TheCall->getArg(ImmArg)->getSourceRange(); 2893 } 2894 2895 if (PtrArgNum >= 0) { 2896 // Check that pointer arguments have the specified type. 2897 Expr *Arg = TheCall->getArg(PtrArgNum); 2898 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) 2899 Arg = ICE->getSubExpr(); 2900 ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg); 2901 QualType RHSTy = RHS.get()->getType(); 2902 2903 llvm::Triple::ArchType Arch = TI.getTriple().getArch(); 2904 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 || 2905 Arch == llvm::Triple::aarch64_32 || 2906 Arch == llvm::Triple::aarch64_be; 2907 bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong; 2908 QualType EltTy = 2909 getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long); 2910 if (HasConstPtr) 2911 EltTy = EltTy.withConst(); 2912 QualType LHSTy = Context.getPointerType(EltTy); 2913 AssignConvertType ConvTy; 2914 ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS); 2915 if (RHS.isInvalid()) 2916 return true; 2917 if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy, 2918 RHS.get(), AA_Assigning)) 2919 return true; 2920 } 2921 2922 // For NEON intrinsics which take an immediate value as part of the 2923 // instruction, range check them here. 2924 unsigned i = 0, l = 0, u = 0; 2925 switch (BuiltinID) { 2926 default: 2927 return false; 2928 #define GET_NEON_IMMEDIATE_CHECK 2929 #include "clang/Basic/arm_neon.inc" 2930 #include "clang/Basic/arm_fp16.inc" 2931 #undef GET_NEON_IMMEDIATE_CHECK 2932 } 2933 2934 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 2935 } 2936 2937 bool Sema::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2938 switch (BuiltinID) { 2939 default: 2940 return false; 2941 #include "clang/Basic/arm_mve_builtin_sema.inc" 2942 } 2943 } 2944 2945 bool Sema::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 2946 CallExpr *TheCall) { 2947 bool Err = false; 2948 switch (BuiltinID) { 2949 default: 2950 return false; 2951 #include "clang/Basic/arm_cde_builtin_sema.inc" 2952 } 2953 2954 if (Err) 2955 return true; 2956 2957 return CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), /*WantCDE*/ true); 2958 } 2959 2960 bool Sema::CheckARMCoprocessorImmediate(const TargetInfo &TI, 2961 const Expr *CoprocArg, bool WantCDE) { 2962 if (isConstantEvaluated()) 2963 return false; 2964 2965 // We can't check the value of a dependent argument. 2966 if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent()) 2967 return false; 2968 2969 llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Context); 2970 int64_t CoprocNo = CoprocNoAP.getExtValue(); 2971 assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative"); 2972 2973 uint32_t CDECoprocMask = TI.getARMCDECoprocMask(); 2974 bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo)); 2975 2976 if (IsCDECoproc != WantCDE) 2977 return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc) 2978 << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange(); 2979 2980 return false; 2981 } 2982 2983 bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, 2984 unsigned MaxWidth) { 2985 assert((BuiltinID == ARM::BI__builtin_arm_ldrex || 2986 BuiltinID == ARM::BI__builtin_arm_ldaex || 2987 BuiltinID == ARM::BI__builtin_arm_strex || 2988 BuiltinID == ARM::BI__builtin_arm_stlex || 2989 BuiltinID == AArch64::BI__builtin_arm_ldrex || 2990 BuiltinID == AArch64::BI__builtin_arm_ldaex || 2991 BuiltinID == AArch64::BI__builtin_arm_strex || 2992 BuiltinID == AArch64::BI__builtin_arm_stlex) && 2993 "unexpected ARM builtin"); 2994 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex || 2995 BuiltinID == ARM::BI__builtin_arm_ldaex || 2996 BuiltinID == AArch64::BI__builtin_arm_ldrex || 2997 BuiltinID == AArch64::BI__builtin_arm_ldaex; 2998 2999 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 3000 3001 // Ensure that we have the proper number of arguments. 3002 if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2)) 3003 return true; 3004 3005 // Inspect the pointer argument of the atomic builtin. This should always be 3006 // a pointer type, whose element is an integral scalar or pointer type. 3007 // Because it is a pointer type, we don't have to worry about any implicit 3008 // casts here. 3009 Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1); 3010 ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg); 3011 if (PointerArgRes.isInvalid()) 3012 return true; 3013 PointerArg = PointerArgRes.get(); 3014 3015 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 3016 if (!pointerType) { 3017 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 3018 << PointerArg->getType() << PointerArg->getSourceRange(); 3019 return true; 3020 } 3021 3022 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next 3023 // task is to insert the appropriate casts into the AST. First work out just 3024 // what the appropriate type is. 3025 QualType ValType = pointerType->getPointeeType(); 3026 QualType AddrType = ValType.getUnqualifiedType().withVolatile(); 3027 if (IsLdrex) 3028 AddrType.addConst(); 3029 3030 // Issue a warning if the cast is dodgy. 3031 CastKind CastNeeded = CK_NoOp; 3032 if (!AddrType.isAtLeastAsQualifiedAs(ValType)) { 3033 CastNeeded = CK_BitCast; 3034 Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers) 3035 << PointerArg->getType() << Context.getPointerType(AddrType) 3036 << AA_Passing << PointerArg->getSourceRange(); 3037 } 3038 3039 // Finally, do the cast and replace the argument with the corrected version. 3040 AddrType = Context.getPointerType(AddrType); 3041 PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded); 3042 if (PointerArgRes.isInvalid()) 3043 return true; 3044 PointerArg = PointerArgRes.get(); 3045 3046 TheCall->setArg(IsLdrex ? 0 : 1, PointerArg); 3047 3048 // In general, we allow ints, floats and pointers to be loaded and stored. 3049 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 3050 !ValType->isBlockPointerType() && !ValType->isFloatingType()) { 3051 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr) 3052 << PointerArg->getType() << PointerArg->getSourceRange(); 3053 return true; 3054 } 3055 3056 // But ARM doesn't have instructions to deal with 128-bit versions. 3057 if (Context.getTypeSize(ValType) > MaxWidth) { 3058 assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate"); 3059 Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size) 3060 << PointerArg->getType() << PointerArg->getSourceRange(); 3061 return true; 3062 } 3063 3064 switch (ValType.getObjCLifetime()) { 3065 case Qualifiers::OCL_None: 3066 case Qualifiers::OCL_ExplicitNone: 3067 // okay 3068 break; 3069 3070 case Qualifiers::OCL_Weak: 3071 case Qualifiers::OCL_Strong: 3072 case Qualifiers::OCL_Autoreleasing: 3073 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 3074 << ValType << PointerArg->getSourceRange(); 3075 return true; 3076 } 3077 3078 if (IsLdrex) { 3079 TheCall->setType(ValType); 3080 return false; 3081 } 3082 3083 // Initialize the argument to be stored. 3084 ExprResult ValArg = TheCall->getArg(0); 3085 InitializedEntity Entity = InitializedEntity::InitializeParameter( 3086 Context, ValType, /*consume*/ false); 3087 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 3088 if (ValArg.isInvalid()) 3089 return true; 3090 TheCall->setArg(0, ValArg.get()); 3091 3092 // __builtin_arm_strex always returns an int. It's marked as such in the .def, 3093 // but the custom checker bypasses all default analysis. 3094 TheCall->setType(Context.IntTy); 3095 return false; 3096 } 3097 3098 bool Sema::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 3099 CallExpr *TheCall) { 3100 if (BuiltinID == ARM::BI__builtin_arm_ldrex || 3101 BuiltinID == ARM::BI__builtin_arm_ldaex || 3102 BuiltinID == ARM::BI__builtin_arm_strex || 3103 BuiltinID == ARM::BI__builtin_arm_stlex) { 3104 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64); 3105 } 3106 3107 if (BuiltinID == ARM::BI__builtin_arm_prefetch) { 3108 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3109 SemaBuiltinConstantArgRange(TheCall, 2, 0, 1); 3110 } 3111 3112 if (BuiltinID == ARM::BI__builtin_arm_rsr64 || 3113 BuiltinID == ARM::BI__builtin_arm_wsr64) 3114 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false); 3115 3116 if (BuiltinID == ARM::BI__builtin_arm_rsr || 3117 BuiltinID == ARM::BI__builtin_arm_rsrp || 3118 BuiltinID == ARM::BI__builtin_arm_wsr || 3119 BuiltinID == ARM::BI__builtin_arm_wsrp) 3120 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 3121 3122 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 3123 return true; 3124 if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall)) 3125 return true; 3126 if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall)) 3127 return true; 3128 3129 // For intrinsics which take an immediate value as part of the instruction, 3130 // range check them here. 3131 // FIXME: VFP Intrinsics should error if VFP not present. 3132 switch (BuiltinID) { 3133 default: return false; 3134 case ARM::BI__builtin_arm_ssat: 3135 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32); 3136 case ARM::BI__builtin_arm_usat: 3137 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 3138 case ARM::BI__builtin_arm_ssat16: 3139 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 3140 case ARM::BI__builtin_arm_usat16: 3141 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 3142 case ARM::BI__builtin_arm_vcvtr_f: 3143 case ARM::BI__builtin_arm_vcvtr_d: 3144 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 3145 case ARM::BI__builtin_arm_dmb: 3146 case ARM::BI__builtin_arm_dsb: 3147 case ARM::BI__builtin_arm_isb: 3148 case ARM::BI__builtin_arm_dbg: 3149 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15); 3150 case ARM::BI__builtin_arm_cdp: 3151 case ARM::BI__builtin_arm_cdp2: 3152 case ARM::BI__builtin_arm_mcr: 3153 case ARM::BI__builtin_arm_mcr2: 3154 case ARM::BI__builtin_arm_mrc: 3155 case ARM::BI__builtin_arm_mrc2: 3156 case ARM::BI__builtin_arm_mcrr: 3157 case ARM::BI__builtin_arm_mcrr2: 3158 case ARM::BI__builtin_arm_mrrc: 3159 case ARM::BI__builtin_arm_mrrc2: 3160 case ARM::BI__builtin_arm_ldc: 3161 case ARM::BI__builtin_arm_ldcl: 3162 case ARM::BI__builtin_arm_ldc2: 3163 case ARM::BI__builtin_arm_ldc2l: 3164 case ARM::BI__builtin_arm_stc: 3165 case ARM::BI__builtin_arm_stcl: 3166 case ARM::BI__builtin_arm_stc2: 3167 case ARM::BI__builtin_arm_stc2l: 3168 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15) || 3169 CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), 3170 /*WantCDE*/ false); 3171 } 3172 } 3173 3174 bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, 3175 unsigned BuiltinID, 3176 CallExpr *TheCall) { 3177 if (BuiltinID == AArch64::BI__builtin_arm_ldrex || 3178 BuiltinID == AArch64::BI__builtin_arm_ldaex || 3179 BuiltinID == AArch64::BI__builtin_arm_strex || 3180 BuiltinID == AArch64::BI__builtin_arm_stlex) { 3181 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128); 3182 } 3183 3184 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) { 3185 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3186 SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) || 3187 SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) || 3188 SemaBuiltinConstantArgRange(TheCall, 4, 0, 1); 3189 } 3190 3191 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 || 3192 BuiltinID == AArch64::BI__builtin_arm_wsr64) 3193 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 3194 3195 // Memory Tagging Extensions (MTE) Intrinsics 3196 if (BuiltinID == AArch64::BI__builtin_arm_irg || 3197 BuiltinID == AArch64::BI__builtin_arm_addg || 3198 BuiltinID == AArch64::BI__builtin_arm_gmi || 3199 BuiltinID == AArch64::BI__builtin_arm_ldg || 3200 BuiltinID == AArch64::BI__builtin_arm_stg || 3201 BuiltinID == AArch64::BI__builtin_arm_subp) { 3202 return SemaBuiltinARMMemoryTaggingCall(BuiltinID, TheCall); 3203 } 3204 3205 if (BuiltinID == AArch64::BI__builtin_arm_rsr || 3206 BuiltinID == AArch64::BI__builtin_arm_rsrp || 3207 BuiltinID == AArch64::BI__builtin_arm_wsr || 3208 BuiltinID == AArch64::BI__builtin_arm_wsrp) 3209 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 3210 3211 // Only check the valid encoding range. Any constant in this range would be 3212 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw 3213 // an exception for incorrect registers. This matches MSVC behavior. 3214 if (BuiltinID == AArch64::BI_ReadStatusReg || 3215 BuiltinID == AArch64::BI_WriteStatusReg) 3216 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff); 3217 3218 if (BuiltinID == AArch64::BI__getReg) 3219 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 3220 3221 if (BuiltinID == AArch64::BI__break) 3222 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xffff); 3223 3224 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 3225 return true; 3226 3227 if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall)) 3228 return true; 3229 3230 // For intrinsics which take an immediate value as part of the instruction, 3231 // range check them here. 3232 unsigned i = 0, l = 0, u = 0; 3233 switch (BuiltinID) { 3234 default: return false; 3235 case AArch64::BI__builtin_arm_dmb: 3236 case AArch64::BI__builtin_arm_dsb: 3237 case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break; 3238 case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break; 3239 } 3240 3241 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 3242 } 3243 3244 static bool isValidBPFPreserveFieldInfoArg(Expr *Arg) { 3245 if (Arg->getType()->getAsPlaceholderType()) 3246 return false; 3247 3248 // The first argument needs to be a record field access. 3249 // If it is an array element access, we delay decision 3250 // to BPF backend to check whether the access is a 3251 // field access or not. 3252 return (Arg->IgnoreParens()->getObjectKind() == OK_BitField || 3253 isa<MemberExpr>(Arg->IgnoreParens()) || 3254 isa<ArraySubscriptExpr>(Arg->IgnoreParens())); 3255 } 3256 3257 static bool isValidBPFPreserveTypeInfoArg(Expr *Arg) { 3258 QualType ArgType = Arg->getType(); 3259 if (ArgType->getAsPlaceholderType()) 3260 return false; 3261 3262 // for TYPE_EXISTENCE/TYPE_MATCH/TYPE_SIZEOF reloc type 3263 // format: 3264 // 1. __builtin_preserve_type_info(*(<type> *)0, flag); 3265 // 2. <type> var; 3266 // __builtin_preserve_type_info(var, flag); 3267 if (!isa<DeclRefExpr>(Arg->IgnoreParens()) && 3268 !isa<UnaryOperator>(Arg->IgnoreParens())) 3269 return false; 3270 3271 // Typedef type. 3272 if (ArgType->getAs<TypedefType>()) 3273 return true; 3274 3275 // Record type or Enum type. 3276 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 3277 if (const auto *RT = Ty->getAs<RecordType>()) { 3278 if (!RT->getDecl()->getDeclName().isEmpty()) 3279 return true; 3280 } else if (const auto *ET = Ty->getAs<EnumType>()) { 3281 if (!ET->getDecl()->getDeclName().isEmpty()) 3282 return true; 3283 } 3284 3285 return false; 3286 } 3287 3288 static bool isValidBPFPreserveEnumValueArg(Expr *Arg) { 3289 QualType ArgType = Arg->getType(); 3290 if (ArgType->getAsPlaceholderType()) 3291 return false; 3292 3293 // for ENUM_VALUE_EXISTENCE/ENUM_VALUE reloc type 3294 // format: 3295 // __builtin_preserve_enum_value(*(<enum_type> *)<enum_value>, 3296 // flag); 3297 const auto *UO = dyn_cast<UnaryOperator>(Arg->IgnoreParens()); 3298 if (!UO) 3299 return false; 3300 3301 const auto *CE = dyn_cast<CStyleCastExpr>(UO->getSubExpr()); 3302 if (!CE) 3303 return false; 3304 if (CE->getCastKind() != CK_IntegralToPointer && 3305 CE->getCastKind() != CK_NullToPointer) 3306 return false; 3307 3308 // The integer must be from an EnumConstantDecl. 3309 const auto *DR = dyn_cast<DeclRefExpr>(CE->getSubExpr()); 3310 if (!DR) 3311 return false; 3312 3313 const EnumConstantDecl *Enumerator = 3314 dyn_cast<EnumConstantDecl>(DR->getDecl()); 3315 if (!Enumerator) 3316 return false; 3317 3318 // The type must be EnumType. 3319 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 3320 const auto *ET = Ty->getAs<EnumType>(); 3321 if (!ET) 3322 return false; 3323 3324 // The enum value must be supported. 3325 return llvm::is_contained(ET->getDecl()->enumerators(), Enumerator); 3326 } 3327 3328 bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID, 3329 CallExpr *TheCall) { 3330 assert((BuiltinID == BPF::BI__builtin_preserve_field_info || 3331 BuiltinID == BPF::BI__builtin_btf_type_id || 3332 BuiltinID == BPF::BI__builtin_preserve_type_info || 3333 BuiltinID == BPF::BI__builtin_preserve_enum_value) && 3334 "unexpected BPF builtin"); 3335 3336 if (checkArgCount(*this, TheCall, 2)) 3337 return true; 3338 3339 // The second argument needs to be a constant int 3340 Expr *Arg = TheCall->getArg(1); 3341 Optional<llvm::APSInt> Value = Arg->getIntegerConstantExpr(Context); 3342 diag::kind kind; 3343 if (!Value) { 3344 if (BuiltinID == BPF::BI__builtin_preserve_field_info) 3345 kind = diag::err_preserve_field_info_not_const; 3346 else if (BuiltinID == BPF::BI__builtin_btf_type_id) 3347 kind = diag::err_btf_type_id_not_const; 3348 else if (BuiltinID == BPF::BI__builtin_preserve_type_info) 3349 kind = diag::err_preserve_type_info_not_const; 3350 else 3351 kind = diag::err_preserve_enum_value_not_const; 3352 Diag(Arg->getBeginLoc(), kind) << 2 << Arg->getSourceRange(); 3353 return true; 3354 } 3355 3356 // The first argument 3357 Arg = TheCall->getArg(0); 3358 bool InvalidArg = false; 3359 bool ReturnUnsignedInt = true; 3360 if (BuiltinID == BPF::BI__builtin_preserve_field_info) { 3361 if (!isValidBPFPreserveFieldInfoArg(Arg)) { 3362 InvalidArg = true; 3363 kind = diag::err_preserve_field_info_not_field; 3364 } 3365 } else if (BuiltinID == BPF::BI__builtin_preserve_type_info) { 3366 if (!isValidBPFPreserveTypeInfoArg(Arg)) { 3367 InvalidArg = true; 3368 kind = diag::err_preserve_type_info_invalid; 3369 } 3370 } else if (BuiltinID == BPF::BI__builtin_preserve_enum_value) { 3371 if (!isValidBPFPreserveEnumValueArg(Arg)) { 3372 InvalidArg = true; 3373 kind = diag::err_preserve_enum_value_invalid; 3374 } 3375 ReturnUnsignedInt = false; 3376 } else if (BuiltinID == BPF::BI__builtin_btf_type_id) { 3377 ReturnUnsignedInt = false; 3378 } 3379 3380 if (InvalidArg) { 3381 Diag(Arg->getBeginLoc(), kind) << 1 << Arg->getSourceRange(); 3382 return true; 3383 } 3384 3385 if (ReturnUnsignedInt) 3386 TheCall->setType(Context.UnsignedIntTy); 3387 else 3388 TheCall->setType(Context.UnsignedLongTy); 3389 return false; 3390 } 3391 3392 bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 3393 struct ArgInfo { 3394 uint8_t OpNum; 3395 bool IsSigned; 3396 uint8_t BitWidth; 3397 uint8_t Align; 3398 }; 3399 struct BuiltinInfo { 3400 unsigned BuiltinID; 3401 ArgInfo Infos[2]; 3402 }; 3403 3404 static BuiltinInfo Infos[] = { 3405 { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} }, 3406 { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} }, 3407 { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} }, 3408 { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 1 }} }, 3409 { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} }, 3410 { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} }, 3411 { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} }, 3412 { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} }, 3413 { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} }, 3414 { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} }, 3415 { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} }, 3416 3417 { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} }, 3418 { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} }, 3419 { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} }, 3420 { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} }, 3421 { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} }, 3422 { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} }, 3423 { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} }, 3424 { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} }, 3425 { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} }, 3426 { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} }, 3427 { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} }, 3428 3429 { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} }, 3430 { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} }, 3431 { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} }, 3432 { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} }, 3433 { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} }, 3434 { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} }, 3435 { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} }, 3436 { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} }, 3437 { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} }, 3438 { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} }, 3439 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} }, 3440 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} }, 3441 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} }, 3442 { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} }, 3443 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} }, 3444 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} }, 3445 { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} }, 3446 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} }, 3447 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} }, 3448 { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} }, 3449 { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} }, 3450 { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} }, 3451 { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} }, 3452 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} }, 3453 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} }, 3454 { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} }, 3455 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} }, 3456 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} }, 3457 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} }, 3458 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} }, 3459 { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} }, 3460 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} }, 3461 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} }, 3462 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} }, 3463 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} }, 3464 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} }, 3465 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} }, 3466 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} }, 3467 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} }, 3468 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} }, 3469 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} }, 3470 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} }, 3471 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} }, 3472 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} }, 3473 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} }, 3474 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} }, 3475 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} }, 3476 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} }, 3477 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} }, 3478 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} }, 3479 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} }, 3480 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax, 3481 {{ 1, false, 6, 0 }} }, 3482 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} }, 3483 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} }, 3484 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} }, 3485 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} }, 3486 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} }, 3487 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} }, 3488 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax, 3489 {{ 1, false, 5, 0 }} }, 3490 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} }, 3491 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} }, 3492 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} }, 3493 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} }, 3494 { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} }, 3495 { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 }, 3496 { 2, false, 5, 0 }} }, 3497 { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 }, 3498 { 2, false, 6, 0 }} }, 3499 { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 }, 3500 { 3, false, 5, 0 }} }, 3501 { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 }, 3502 { 3, false, 6, 0 }} }, 3503 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} }, 3504 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} }, 3505 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} }, 3506 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} }, 3507 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} }, 3508 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} }, 3509 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} }, 3510 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} }, 3511 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} }, 3512 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} }, 3513 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} }, 3514 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} }, 3515 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} }, 3516 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} }, 3517 { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} }, 3518 { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax, 3519 {{ 2, false, 4, 0 }, 3520 { 3, false, 5, 0 }} }, 3521 { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax, 3522 {{ 2, false, 4, 0 }, 3523 { 3, false, 5, 0 }} }, 3524 { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax, 3525 {{ 2, false, 4, 0 }, 3526 { 3, false, 5, 0 }} }, 3527 { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax, 3528 {{ 2, false, 4, 0 }, 3529 { 3, false, 5, 0 }} }, 3530 { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} }, 3531 { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} }, 3532 { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} }, 3533 { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} }, 3534 { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} }, 3535 { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} }, 3536 { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} }, 3537 { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} }, 3538 { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} }, 3539 { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} }, 3540 { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 }, 3541 { 2, false, 5, 0 }} }, 3542 { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 }, 3543 { 2, false, 6, 0 }} }, 3544 { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} }, 3545 { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} }, 3546 { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} }, 3547 { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} }, 3548 { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} }, 3549 { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} }, 3550 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} }, 3551 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} }, 3552 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax, 3553 {{ 1, false, 4, 0 }} }, 3554 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} }, 3555 { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax, 3556 {{ 1, false, 4, 0 }} }, 3557 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} }, 3558 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} }, 3559 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} }, 3560 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} }, 3561 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} }, 3562 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} }, 3563 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} }, 3564 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} }, 3565 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} }, 3566 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} }, 3567 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} }, 3568 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} }, 3569 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} }, 3570 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} }, 3571 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} }, 3572 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} }, 3573 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} }, 3574 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} }, 3575 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} }, 3576 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, 3577 {{ 3, false, 1, 0 }} }, 3578 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} }, 3579 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} }, 3580 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} }, 3581 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, 3582 {{ 3, false, 1, 0 }} }, 3583 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} }, 3584 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} }, 3585 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} }, 3586 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, 3587 {{ 3, false, 1, 0 }} }, 3588 }; 3589 3590 // Use a dynamically initialized static to sort the table exactly once on 3591 // first run. 3592 static const bool SortOnce = 3593 (llvm::sort(Infos, 3594 [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) { 3595 return LHS.BuiltinID < RHS.BuiltinID; 3596 }), 3597 true); 3598 (void)SortOnce; 3599 3600 const BuiltinInfo *F = llvm::partition_point( 3601 Infos, [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; }); 3602 if (F == std::end(Infos) || F->BuiltinID != BuiltinID) 3603 return false; 3604 3605 bool Error = false; 3606 3607 for (const ArgInfo &A : F->Infos) { 3608 // Ignore empty ArgInfo elements. 3609 if (A.BitWidth == 0) 3610 continue; 3611 3612 int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0; 3613 int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1; 3614 if (!A.Align) { 3615 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 3616 } else { 3617 unsigned M = 1 << A.Align; 3618 Min *= M; 3619 Max *= M; 3620 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 3621 Error |= SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M); 3622 } 3623 } 3624 return Error; 3625 } 3626 3627 bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, 3628 CallExpr *TheCall) { 3629 return CheckHexagonBuiltinArgument(BuiltinID, TheCall); 3630 } 3631 3632 bool Sema::CheckMipsBuiltinFunctionCall(const TargetInfo &TI, 3633 unsigned BuiltinID, CallExpr *TheCall) { 3634 return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) || 3635 CheckMipsBuiltinArgument(BuiltinID, TheCall); 3636 } 3637 3638 bool Sema::CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, 3639 CallExpr *TheCall) { 3640 3641 if (Mips::BI__builtin_mips_addu_qb <= BuiltinID && 3642 BuiltinID <= Mips::BI__builtin_mips_lwx) { 3643 if (!TI.hasFeature("dsp")) 3644 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_dsp); 3645 } 3646 3647 if (Mips::BI__builtin_mips_absq_s_qb <= BuiltinID && 3648 BuiltinID <= Mips::BI__builtin_mips_subuh_r_qb) { 3649 if (!TI.hasFeature("dspr2")) 3650 return Diag(TheCall->getBeginLoc(), 3651 diag::err_mips_builtin_requires_dspr2); 3652 } 3653 3654 if (Mips::BI__builtin_msa_add_a_b <= BuiltinID && 3655 BuiltinID <= Mips::BI__builtin_msa_xori_b) { 3656 if (!TI.hasFeature("msa")) 3657 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_msa); 3658 } 3659 3660 return false; 3661 } 3662 3663 // CheckMipsBuiltinArgument - Checks the constant value passed to the 3664 // intrinsic is correct. The switch statement is ordered by DSP, MSA. The 3665 // ordering for DSP is unspecified. MSA is ordered by the data format used 3666 // by the underlying instruction i.e., df/m, df/n and then by size. 3667 // 3668 // FIXME: The size tests here should instead be tablegen'd along with the 3669 // definitions from include/clang/Basic/BuiltinsMips.def. 3670 // FIXME: GCC is strict on signedness for some of these intrinsics, we should 3671 // be too. 3672 bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 3673 unsigned i = 0, l = 0, u = 0, m = 0; 3674 switch (BuiltinID) { 3675 default: return false; 3676 case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break; 3677 case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break; 3678 case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break; 3679 case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break; 3680 case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break; 3681 case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break; 3682 case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break; 3683 // MSA intrinsics. Instructions (which the intrinsics maps to) which use the 3684 // df/m field. 3685 // These intrinsics take an unsigned 3 bit immediate. 3686 case Mips::BI__builtin_msa_bclri_b: 3687 case Mips::BI__builtin_msa_bnegi_b: 3688 case Mips::BI__builtin_msa_bseti_b: 3689 case Mips::BI__builtin_msa_sat_s_b: 3690 case Mips::BI__builtin_msa_sat_u_b: 3691 case Mips::BI__builtin_msa_slli_b: 3692 case Mips::BI__builtin_msa_srai_b: 3693 case Mips::BI__builtin_msa_srari_b: 3694 case Mips::BI__builtin_msa_srli_b: 3695 case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break; 3696 case Mips::BI__builtin_msa_binsli_b: 3697 case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break; 3698 // These intrinsics take an unsigned 4 bit immediate. 3699 case Mips::BI__builtin_msa_bclri_h: 3700 case Mips::BI__builtin_msa_bnegi_h: 3701 case Mips::BI__builtin_msa_bseti_h: 3702 case Mips::BI__builtin_msa_sat_s_h: 3703 case Mips::BI__builtin_msa_sat_u_h: 3704 case Mips::BI__builtin_msa_slli_h: 3705 case Mips::BI__builtin_msa_srai_h: 3706 case Mips::BI__builtin_msa_srari_h: 3707 case Mips::BI__builtin_msa_srli_h: 3708 case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break; 3709 case Mips::BI__builtin_msa_binsli_h: 3710 case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break; 3711 // These intrinsics take an unsigned 5 bit immediate. 3712 // The first block of intrinsics actually have an unsigned 5 bit field, 3713 // not a df/n field. 3714 case Mips::BI__builtin_msa_cfcmsa: 3715 case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break; 3716 case Mips::BI__builtin_msa_clei_u_b: 3717 case Mips::BI__builtin_msa_clei_u_h: 3718 case Mips::BI__builtin_msa_clei_u_w: 3719 case Mips::BI__builtin_msa_clei_u_d: 3720 case Mips::BI__builtin_msa_clti_u_b: 3721 case Mips::BI__builtin_msa_clti_u_h: 3722 case Mips::BI__builtin_msa_clti_u_w: 3723 case Mips::BI__builtin_msa_clti_u_d: 3724 case Mips::BI__builtin_msa_maxi_u_b: 3725 case Mips::BI__builtin_msa_maxi_u_h: 3726 case Mips::BI__builtin_msa_maxi_u_w: 3727 case Mips::BI__builtin_msa_maxi_u_d: 3728 case Mips::BI__builtin_msa_mini_u_b: 3729 case Mips::BI__builtin_msa_mini_u_h: 3730 case Mips::BI__builtin_msa_mini_u_w: 3731 case Mips::BI__builtin_msa_mini_u_d: 3732 case Mips::BI__builtin_msa_addvi_b: 3733 case Mips::BI__builtin_msa_addvi_h: 3734 case Mips::BI__builtin_msa_addvi_w: 3735 case Mips::BI__builtin_msa_addvi_d: 3736 case Mips::BI__builtin_msa_bclri_w: 3737 case Mips::BI__builtin_msa_bnegi_w: 3738 case Mips::BI__builtin_msa_bseti_w: 3739 case Mips::BI__builtin_msa_sat_s_w: 3740 case Mips::BI__builtin_msa_sat_u_w: 3741 case Mips::BI__builtin_msa_slli_w: 3742 case Mips::BI__builtin_msa_srai_w: 3743 case Mips::BI__builtin_msa_srari_w: 3744 case Mips::BI__builtin_msa_srli_w: 3745 case Mips::BI__builtin_msa_srlri_w: 3746 case Mips::BI__builtin_msa_subvi_b: 3747 case Mips::BI__builtin_msa_subvi_h: 3748 case Mips::BI__builtin_msa_subvi_w: 3749 case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break; 3750 case Mips::BI__builtin_msa_binsli_w: 3751 case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break; 3752 // These intrinsics take an unsigned 6 bit immediate. 3753 case Mips::BI__builtin_msa_bclri_d: 3754 case Mips::BI__builtin_msa_bnegi_d: 3755 case Mips::BI__builtin_msa_bseti_d: 3756 case Mips::BI__builtin_msa_sat_s_d: 3757 case Mips::BI__builtin_msa_sat_u_d: 3758 case Mips::BI__builtin_msa_slli_d: 3759 case Mips::BI__builtin_msa_srai_d: 3760 case Mips::BI__builtin_msa_srari_d: 3761 case Mips::BI__builtin_msa_srli_d: 3762 case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break; 3763 case Mips::BI__builtin_msa_binsli_d: 3764 case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break; 3765 // These intrinsics take a signed 5 bit immediate. 3766 case Mips::BI__builtin_msa_ceqi_b: 3767 case Mips::BI__builtin_msa_ceqi_h: 3768 case Mips::BI__builtin_msa_ceqi_w: 3769 case Mips::BI__builtin_msa_ceqi_d: 3770 case Mips::BI__builtin_msa_clti_s_b: 3771 case Mips::BI__builtin_msa_clti_s_h: 3772 case Mips::BI__builtin_msa_clti_s_w: 3773 case Mips::BI__builtin_msa_clti_s_d: 3774 case Mips::BI__builtin_msa_clei_s_b: 3775 case Mips::BI__builtin_msa_clei_s_h: 3776 case Mips::BI__builtin_msa_clei_s_w: 3777 case Mips::BI__builtin_msa_clei_s_d: 3778 case Mips::BI__builtin_msa_maxi_s_b: 3779 case Mips::BI__builtin_msa_maxi_s_h: 3780 case Mips::BI__builtin_msa_maxi_s_w: 3781 case Mips::BI__builtin_msa_maxi_s_d: 3782 case Mips::BI__builtin_msa_mini_s_b: 3783 case Mips::BI__builtin_msa_mini_s_h: 3784 case Mips::BI__builtin_msa_mini_s_w: 3785 case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break; 3786 // These intrinsics take an unsigned 8 bit immediate. 3787 case Mips::BI__builtin_msa_andi_b: 3788 case Mips::BI__builtin_msa_nori_b: 3789 case Mips::BI__builtin_msa_ori_b: 3790 case Mips::BI__builtin_msa_shf_b: 3791 case Mips::BI__builtin_msa_shf_h: 3792 case Mips::BI__builtin_msa_shf_w: 3793 case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break; 3794 case Mips::BI__builtin_msa_bseli_b: 3795 case Mips::BI__builtin_msa_bmnzi_b: 3796 case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break; 3797 // df/n format 3798 // These intrinsics take an unsigned 4 bit immediate. 3799 case Mips::BI__builtin_msa_copy_s_b: 3800 case Mips::BI__builtin_msa_copy_u_b: 3801 case Mips::BI__builtin_msa_insve_b: 3802 case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break; 3803 case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break; 3804 // These intrinsics take an unsigned 3 bit immediate. 3805 case Mips::BI__builtin_msa_copy_s_h: 3806 case Mips::BI__builtin_msa_copy_u_h: 3807 case Mips::BI__builtin_msa_insve_h: 3808 case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break; 3809 case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break; 3810 // These intrinsics take an unsigned 2 bit immediate. 3811 case Mips::BI__builtin_msa_copy_s_w: 3812 case Mips::BI__builtin_msa_copy_u_w: 3813 case Mips::BI__builtin_msa_insve_w: 3814 case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break; 3815 case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break; 3816 // These intrinsics take an unsigned 1 bit immediate. 3817 case Mips::BI__builtin_msa_copy_s_d: 3818 case Mips::BI__builtin_msa_copy_u_d: 3819 case Mips::BI__builtin_msa_insve_d: 3820 case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break; 3821 case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break; 3822 // Memory offsets and immediate loads. 3823 // These intrinsics take a signed 10 bit immediate. 3824 case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break; 3825 case Mips::BI__builtin_msa_ldi_h: 3826 case Mips::BI__builtin_msa_ldi_w: 3827 case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break; 3828 case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break; 3829 case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break; 3830 case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break; 3831 case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break; 3832 case Mips::BI__builtin_msa_ldr_d: i = 1; l = -4096; u = 4088; m = 8; break; 3833 case Mips::BI__builtin_msa_ldr_w: i = 1; l = -2048; u = 2044; m = 4; break; 3834 case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break; 3835 case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break; 3836 case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break; 3837 case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break; 3838 case Mips::BI__builtin_msa_str_d: i = 2; l = -4096; u = 4088; m = 8; break; 3839 case Mips::BI__builtin_msa_str_w: i = 2; l = -2048; u = 2044; m = 4; break; 3840 } 3841 3842 if (!m) 3843 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3844 3845 return SemaBuiltinConstantArgRange(TheCall, i, l, u) || 3846 SemaBuiltinConstantArgMultiple(TheCall, i, m); 3847 } 3848 3849 /// DecodePPCMMATypeFromStr - This decodes one PPC MMA type descriptor from Str, 3850 /// advancing the pointer over the consumed characters. The decoded type is 3851 /// returned. If the decoded type represents a constant integer with a 3852 /// constraint on its value then Mask is set to that value. The type descriptors 3853 /// used in Str are specific to PPC MMA builtins and are documented in the file 3854 /// defining the PPC builtins. 3855 static QualType DecodePPCMMATypeFromStr(ASTContext &Context, const char *&Str, 3856 unsigned &Mask) { 3857 bool RequireICE = false; 3858 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 3859 switch (*Str++) { 3860 case 'V': 3861 return Context.getVectorType(Context.UnsignedCharTy, 16, 3862 VectorType::VectorKind::AltiVecVector); 3863 case 'i': { 3864 char *End; 3865 unsigned size = strtoul(Str, &End, 10); 3866 assert(End != Str && "Missing constant parameter constraint"); 3867 Str = End; 3868 Mask = size; 3869 return Context.IntTy; 3870 } 3871 case 'W': { 3872 char *End; 3873 unsigned size = strtoul(Str, &End, 10); 3874 assert(End != Str && "Missing PowerPC MMA type size"); 3875 Str = End; 3876 QualType Type; 3877 switch (size) { 3878 #define PPC_VECTOR_TYPE(typeName, Id, size) \ 3879 case size: Type = Context.Id##Ty; break; 3880 #include "clang/Basic/PPCTypes.def" 3881 default: llvm_unreachable("Invalid PowerPC MMA vector type"); 3882 } 3883 bool CheckVectorArgs = false; 3884 while (!CheckVectorArgs) { 3885 switch (*Str++) { 3886 case '*': 3887 Type = Context.getPointerType(Type); 3888 break; 3889 case 'C': 3890 Type = Type.withConst(); 3891 break; 3892 default: 3893 CheckVectorArgs = true; 3894 --Str; 3895 break; 3896 } 3897 } 3898 return Type; 3899 } 3900 default: 3901 return Context.DecodeTypeStr(--Str, Context, Error, RequireICE, true); 3902 } 3903 } 3904 3905 static bool isPPC_64Builtin(unsigned BuiltinID) { 3906 // These builtins only work on PPC 64bit targets. 3907 switch (BuiltinID) { 3908 case PPC::BI__builtin_divde: 3909 case PPC::BI__builtin_divdeu: 3910 case PPC::BI__builtin_bpermd: 3911 case PPC::BI__builtin_pdepd: 3912 case PPC::BI__builtin_pextd: 3913 case PPC::BI__builtin_ppc_ldarx: 3914 case PPC::BI__builtin_ppc_stdcx: 3915 case PPC::BI__builtin_ppc_tdw: 3916 case PPC::BI__builtin_ppc_trapd: 3917 case PPC::BI__builtin_ppc_cmpeqb: 3918 case PPC::BI__builtin_ppc_setb: 3919 case PPC::BI__builtin_ppc_mulhd: 3920 case PPC::BI__builtin_ppc_mulhdu: 3921 case PPC::BI__builtin_ppc_maddhd: 3922 case PPC::BI__builtin_ppc_maddhdu: 3923 case PPC::BI__builtin_ppc_maddld: 3924 case PPC::BI__builtin_ppc_load8r: 3925 case PPC::BI__builtin_ppc_store8r: 3926 case PPC::BI__builtin_ppc_insert_exp: 3927 case PPC::BI__builtin_ppc_extract_sig: 3928 case PPC::BI__builtin_ppc_addex: 3929 case PPC::BI__builtin_darn: 3930 case PPC::BI__builtin_darn_raw: 3931 case PPC::BI__builtin_ppc_compare_and_swaplp: 3932 case PPC::BI__builtin_ppc_fetch_and_addlp: 3933 case PPC::BI__builtin_ppc_fetch_and_andlp: 3934 case PPC::BI__builtin_ppc_fetch_and_orlp: 3935 case PPC::BI__builtin_ppc_fetch_and_swaplp: 3936 return true; 3937 } 3938 return false; 3939 } 3940 3941 static bool SemaFeatureCheck(Sema &S, CallExpr *TheCall, 3942 StringRef FeatureToCheck, unsigned DiagID, 3943 StringRef DiagArg = "") { 3944 if (S.Context.getTargetInfo().hasFeature(FeatureToCheck)) 3945 return false; 3946 3947 if (DiagArg.empty()) 3948 S.Diag(TheCall->getBeginLoc(), DiagID) << TheCall->getSourceRange(); 3949 else 3950 S.Diag(TheCall->getBeginLoc(), DiagID) 3951 << DiagArg << TheCall->getSourceRange(); 3952 3953 return true; 3954 } 3955 3956 /// Returns true if the argument consists of one contiguous run of 1s with any 3957 /// number of 0s on either side. The 1s are allowed to wrap from LSB to MSB, so 3958 /// 0x000FFF0, 0x0000FFFF, 0xFF0000FF, 0x0 are all runs. 0x0F0F0000 is not, 3959 /// since all 1s are not contiguous. 3960 bool Sema::SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum) { 3961 llvm::APSInt Result; 3962 // We can't check the value of a dependent argument. 3963 Expr *Arg = TheCall->getArg(ArgNum); 3964 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3965 return false; 3966 3967 // Check constant-ness first. 3968 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3969 return true; 3970 3971 // Check contiguous run of 1s, 0xFF0000FF is also a run of 1s. 3972 if (Result.isShiftedMask() || (~Result).isShiftedMask()) 3973 return false; 3974 3975 return Diag(TheCall->getBeginLoc(), 3976 diag::err_argument_not_contiguous_bit_field) 3977 << ArgNum << Arg->getSourceRange(); 3978 } 3979 3980 bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 3981 CallExpr *TheCall) { 3982 unsigned i = 0, l = 0, u = 0; 3983 bool IsTarget64Bit = TI.getTypeWidth(TI.getIntPtrType()) == 64; 3984 llvm::APSInt Result; 3985 3986 if (isPPC_64Builtin(BuiltinID) && !IsTarget64Bit) 3987 return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt) 3988 << TheCall->getSourceRange(); 3989 3990 switch (BuiltinID) { 3991 default: return false; 3992 case PPC::BI__builtin_altivec_crypto_vshasigmaw: 3993 case PPC::BI__builtin_altivec_crypto_vshasigmad: 3994 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3995 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 3996 case PPC::BI__builtin_altivec_dss: 3997 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3); 3998 case PPC::BI__builtin_tbegin: 3999 case PPC::BI__builtin_tend: 4000 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1) || 4001 SemaFeatureCheck(*this, TheCall, "htm", 4002 diag::err_ppc_builtin_requires_htm); 4003 case PPC::BI__builtin_tsr: 4004 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) || 4005 SemaFeatureCheck(*this, TheCall, "htm", 4006 diag::err_ppc_builtin_requires_htm); 4007 case PPC::BI__builtin_tabortwc: 4008 case PPC::BI__builtin_tabortdc: 4009 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || 4010 SemaFeatureCheck(*this, TheCall, "htm", 4011 diag::err_ppc_builtin_requires_htm); 4012 case PPC::BI__builtin_tabortwci: 4013 case PPC::BI__builtin_tabortdci: 4014 return SemaFeatureCheck(*this, TheCall, "htm", 4015 diag::err_ppc_builtin_requires_htm) || 4016 (SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || 4017 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31)); 4018 case PPC::BI__builtin_tabort: 4019 case PPC::BI__builtin_tcheck: 4020 case PPC::BI__builtin_treclaim: 4021 case PPC::BI__builtin_trechkpt: 4022 case PPC::BI__builtin_tendall: 4023 case PPC::BI__builtin_tresume: 4024 case PPC::BI__builtin_tsuspend: 4025 case PPC::BI__builtin_get_texasr: 4026 case PPC::BI__builtin_get_texasru: 4027 case PPC::BI__builtin_get_tfhar: 4028 case PPC::BI__builtin_get_tfiar: 4029 case PPC::BI__builtin_set_texasr: 4030 case PPC::BI__builtin_set_texasru: 4031 case PPC::BI__builtin_set_tfhar: 4032 case PPC::BI__builtin_set_tfiar: 4033 case PPC::BI__builtin_ttest: 4034 return SemaFeatureCheck(*this, TheCall, "htm", 4035 diag::err_ppc_builtin_requires_htm); 4036 // According to GCC 'Basic PowerPC Built-in Functions Available on ISA 2.05', 4037 // __builtin_(un)pack_longdouble are available only if long double uses IBM 4038 // extended double representation. 4039 case PPC::BI__builtin_unpack_longdouble: 4040 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 1)) 4041 return true; 4042 LLVM_FALLTHROUGH; 4043 case PPC::BI__builtin_pack_longdouble: 4044 if (&TI.getLongDoubleFormat() != &llvm::APFloat::PPCDoubleDouble()) 4045 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_requires_abi) 4046 << "ibmlongdouble"; 4047 return false; 4048 case PPC::BI__builtin_altivec_dst: 4049 case PPC::BI__builtin_altivec_dstt: 4050 case PPC::BI__builtin_altivec_dstst: 4051 case PPC::BI__builtin_altivec_dststt: 4052 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 4053 case PPC::BI__builtin_vsx_xxpermdi: 4054 case PPC::BI__builtin_vsx_xxsldwi: 4055 return SemaBuiltinVSX(TheCall); 4056 case PPC::BI__builtin_divwe: 4057 case PPC::BI__builtin_divweu: 4058 case PPC::BI__builtin_divde: 4059 case PPC::BI__builtin_divdeu: 4060 return SemaFeatureCheck(*this, TheCall, "extdiv", 4061 diag::err_ppc_builtin_only_on_arch, "7"); 4062 case PPC::BI__builtin_bpermd: 4063 return SemaFeatureCheck(*this, TheCall, "bpermd", 4064 diag::err_ppc_builtin_only_on_arch, "7"); 4065 case PPC::BI__builtin_unpack_vector_int128: 4066 return SemaFeatureCheck(*this, TheCall, "vsx", 4067 diag::err_ppc_builtin_only_on_arch, "7") || 4068 SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 4069 case PPC::BI__builtin_pack_vector_int128: 4070 return SemaFeatureCheck(*this, TheCall, "vsx", 4071 diag::err_ppc_builtin_only_on_arch, "7"); 4072 case PPC::BI__builtin_pdepd: 4073 case PPC::BI__builtin_pextd: 4074 return SemaFeatureCheck(*this, TheCall, "isa-v31-instructions", 4075 diag::err_ppc_builtin_only_on_arch, "10"); 4076 case PPC::BI__builtin_altivec_vgnb: 4077 return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7); 4078 case PPC::BI__builtin_vsx_xxeval: 4079 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 255); 4080 case PPC::BI__builtin_altivec_vsldbi: 4081 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 4082 case PPC::BI__builtin_altivec_vsrdbi: 4083 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 4084 case PPC::BI__builtin_vsx_xxpermx: 4085 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 7); 4086 case PPC::BI__builtin_ppc_tw: 4087 case PPC::BI__builtin_ppc_tdw: 4088 return SemaBuiltinConstantArgRange(TheCall, 2, 1, 31); 4089 case PPC::BI__builtin_ppc_cmpeqb: 4090 case PPC::BI__builtin_ppc_setb: 4091 case PPC::BI__builtin_ppc_maddhd: 4092 case PPC::BI__builtin_ppc_maddhdu: 4093 case PPC::BI__builtin_ppc_maddld: 4094 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4095 diag::err_ppc_builtin_only_on_arch, "9"); 4096 case PPC::BI__builtin_ppc_cmprb: 4097 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4098 diag::err_ppc_builtin_only_on_arch, "9") || 4099 SemaBuiltinConstantArgRange(TheCall, 0, 0, 1); 4100 // For __rlwnm, __rlwimi and __rldimi, the last parameter mask must 4101 // be a constant that represents a contiguous bit field. 4102 case PPC::BI__builtin_ppc_rlwnm: 4103 return SemaValueIsRunOfOnes(TheCall, 2); 4104 case PPC::BI__builtin_ppc_rlwimi: 4105 case PPC::BI__builtin_ppc_rldimi: 4106 return SemaBuiltinConstantArg(TheCall, 2, Result) || 4107 SemaValueIsRunOfOnes(TheCall, 3); 4108 case PPC::BI__builtin_ppc_extract_exp: 4109 case PPC::BI__builtin_ppc_extract_sig: 4110 case PPC::BI__builtin_ppc_insert_exp: 4111 return SemaFeatureCheck(*this, TheCall, "power9-vector", 4112 diag::err_ppc_builtin_only_on_arch, "9"); 4113 case PPC::BI__builtin_ppc_addex: { 4114 if (SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4115 diag::err_ppc_builtin_only_on_arch, "9") || 4116 SemaBuiltinConstantArgRange(TheCall, 2, 0, 3)) 4117 return true; 4118 // Output warning for reserved values 1 to 3. 4119 int ArgValue = 4120 TheCall->getArg(2)->getIntegerConstantExpr(Context)->getSExtValue(); 4121 if (ArgValue != 0) 4122 Diag(TheCall->getBeginLoc(), diag::warn_argument_undefined_behaviour) 4123 << ArgValue; 4124 return false; 4125 } 4126 case PPC::BI__builtin_ppc_mtfsb0: 4127 case PPC::BI__builtin_ppc_mtfsb1: 4128 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 4129 case PPC::BI__builtin_ppc_mtfsf: 4130 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 255); 4131 case PPC::BI__builtin_ppc_mtfsfi: 4132 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) || 4133 SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 4134 case PPC::BI__builtin_ppc_alignx: 4135 return SemaBuiltinConstantArgPower2(TheCall, 0); 4136 case PPC::BI__builtin_ppc_rdlam: 4137 return SemaValueIsRunOfOnes(TheCall, 2); 4138 case PPC::BI__builtin_ppc_icbt: 4139 case PPC::BI__builtin_ppc_sthcx: 4140 case PPC::BI__builtin_ppc_stbcx: 4141 case PPC::BI__builtin_ppc_lharx: 4142 case PPC::BI__builtin_ppc_lbarx: 4143 return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions", 4144 diag::err_ppc_builtin_only_on_arch, "8"); 4145 case PPC::BI__builtin_vsx_ldrmb: 4146 case PPC::BI__builtin_vsx_strmb: 4147 return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions", 4148 diag::err_ppc_builtin_only_on_arch, "8") || 4149 SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 4150 case PPC::BI__builtin_altivec_vcntmbb: 4151 case PPC::BI__builtin_altivec_vcntmbh: 4152 case PPC::BI__builtin_altivec_vcntmbw: 4153 case PPC::BI__builtin_altivec_vcntmbd: 4154 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 4155 case PPC::BI__builtin_darn: 4156 case PPC::BI__builtin_darn_raw: 4157 case PPC::BI__builtin_darn_32: 4158 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4159 diag::err_ppc_builtin_only_on_arch, "9"); 4160 case PPC::BI__builtin_vsx_xxgenpcvbm: 4161 case PPC::BI__builtin_vsx_xxgenpcvhm: 4162 case PPC::BI__builtin_vsx_xxgenpcvwm: 4163 case PPC::BI__builtin_vsx_xxgenpcvdm: 4164 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3); 4165 case PPC::BI__builtin_ppc_compare_exp_uo: 4166 case PPC::BI__builtin_ppc_compare_exp_lt: 4167 case PPC::BI__builtin_ppc_compare_exp_gt: 4168 case PPC::BI__builtin_ppc_compare_exp_eq: 4169 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4170 diag::err_ppc_builtin_only_on_arch, "9") || 4171 SemaFeatureCheck(*this, TheCall, "vsx", 4172 diag::err_ppc_builtin_requires_vsx); 4173 case PPC::BI__builtin_ppc_test_data_class: { 4174 // Check if the first argument of the __builtin_ppc_test_data_class call is 4175 // valid. The argument must be either a 'float' or a 'double'. 4176 QualType ArgType = TheCall->getArg(0)->getType(); 4177 if (ArgType != QualType(Context.FloatTy) && 4178 ArgType != QualType(Context.DoubleTy)) 4179 return Diag(TheCall->getBeginLoc(), 4180 diag::err_ppc_invalid_test_data_class_type); 4181 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4182 diag::err_ppc_builtin_only_on_arch, "9") || 4183 SemaFeatureCheck(*this, TheCall, "vsx", 4184 diag::err_ppc_builtin_requires_vsx) || 4185 SemaBuiltinConstantArgRange(TheCall, 1, 0, 127); 4186 } 4187 case PPC::BI__builtin_ppc_maxfe: 4188 case PPC::BI__builtin_ppc_minfe: 4189 case PPC::BI__builtin_ppc_maxfl: 4190 case PPC::BI__builtin_ppc_minfl: 4191 case PPC::BI__builtin_ppc_maxfs: 4192 case PPC::BI__builtin_ppc_minfs: { 4193 if (Context.getTargetInfo().getTriple().isOSAIX() && 4194 (BuiltinID == PPC::BI__builtin_ppc_maxfe || 4195 BuiltinID == PPC::BI__builtin_ppc_minfe)) 4196 return Diag(TheCall->getBeginLoc(), diag::err_target_unsupported_type) 4197 << "builtin" << true << 128 << QualType(Context.LongDoubleTy) 4198 << false << Context.getTargetInfo().getTriple().str(); 4199 // Argument type should be exact. 4200 QualType ArgType = QualType(Context.LongDoubleTy); 4201 if (BuiltinID == PPC::BI__builtin_ppc_maxfl || 4202 BuiltinID == PPC::BI__builtin_ppc_minfl) 4203 ArgType = QualType(Context.DoubleTy); 4204 else if (BuiltinID == PPC::BI__builtin_ppc_maxfs || 4205 BuiltinID == PPC::BI__builtin_ppc_minfs) 4206 ArgType = QualType(Context.FloatTy); 4207 for (unsigned I = 0, E = TheCall->getNumArgs(); I < E; ++I) 4208 if (TheCall->getArg(I)->getType() != ArgType) 4209 return Diag(TheCall->getBeginLoc(), 4210 diag::err_typecheck_convert_incompatible) 4211 << TheCall->getArg(I)->getType() << ArgType << 1 << 0 << 0; 4212 return false; 4213 } 4214 case PPC::BI__builtin_ppc_load8r: 4215 case PPC::BI__builtin_ppc_store8r: 4216 return SemaFeatureCheck(*this, TheCall, "isa-v206-instructions", 4217 diag::err_ppc_builtin_only_on_arch, "7"); 4218 #define CUSTOM_BUILTIN(Name, Intr, Types, Acc) \ 4219 case PPC::BI__builtin_##Name: \ 4220 return SemaBuiltinPPCMMACall(TheCall, BuiltinID, Types); 4221 #include "clang/Basic/BuiltinsPPC.def" 4222 } 4223 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 4224 } 4225 4226 // Check if the given type is a non-pointer PPC MMA type. This function is used 4227 // in Sema to prevent invalid uses of restricted PPC MMA types. 4228 bool Sema::CheckPPCMMAType(QualType Type, SourceLocation TypeLoc) { 4229 if (Type->isPointerType() || Type->isArrayType()) 4230 return false; 4231 4232 QualType CoreType = Type.getCanonicalType().getUnqualifiedType(); 4233 #define PPC_VECTOR_TYPE(Name, Id, Size) || CoreType == Context.Id##Ty 4234 if (false 4235 #include "clang/Basic/PPCTypes.def" 4236 ) { 4237 Diag(TypeLoc, diag::err_ppc_invalid_use_mma_type); 4238 return true; 4239 } 4240 return false; 4241 } 4242 4243 bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, 4244 CallExpr *TheCall) { 4245 // position of memory order and scope arguments in the builtin 4246 unsigned OrderIndex, ScopeIndex; 4247 switch (BuiltinID) { 4248 case AMDGPU::BI__builtin_amdgcn_atomic_inc32: 4249 case AMDGPU::BI__builtin_amdgcn_atomic_inc64: 4250 case AMDGPU::BI__builtin_amdgcn_atomic_dec32: 4251 case AMDGPU::BI__builtin_amdgcn_atomic_dec64: 4252 OrderIndex = 2; 4253 ScopeIndex = 3; 4254 break; 4255 case AMDGPU::BI__builtin_amdgcn_fence: 4256 OrderIndex = 0; 4257 ScopeIndex = 1; 4258 break; 4259 default: 4260 return false; 4261 } 4262 4263 ExprResult Arg = TheCall->getArg(OrderIndex); 4264 auto ArgExpr = Arg.get(); 4265 Expr::EvalResult ArgResult; 4266 4267 if (!ArgExpr->EvaluateAsInt(ArgResult, Context)) 4268 return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int) 4269 << ArgExpr->getType(); 4270 auto Ord = ArgResult.Val.getInt().getZExtValue(); 4271 4272 // Check validity of memory ordering as per C11 / C++11's memody model. 4273 // Only fence needs check. Atomic dec/inc allow all memory orders. 4274 if (!llvm::isValidAtomicOrderingCABI(Ord)) 4275 return Diag(ArgExpr->getBeginLoc(), 4276 diag::warn_atomic_op_has_invalid_memory_order) 4277 << ArgExpr->getSourceRange(); 4278 switch (static_cast<llvm::AtomicOrderingCABI>(Ord)) { 4279 case llvm::AtomicOrderingCABI::relaxed: 4280 case llvm::AtomicOrderingCABI::consume: 4281 if (BuiltinID == AMDGPU::BI__builtin_amdgcn_fence) 4282 return Diag(ArgExpr->getBeginLoc(), 4283 diag::warn_atomic_op_has_invalid_memory_order) 4284 << ArgExpr->getSourceRange(); 4285 break; 4286 case llvm::AtomicOrderingCABI::acquire: 4287 case llvm::AtomicOrderingCABI::release: 4288 case llvm::AtomicOrderingCABI::acq_rel: 4289 case llvm::AtomicOrderingCABI::seq_cst: 4290 break; 4291 } 4292 4293 Arg = TheCall->getArg(ScopeIndex); 4294 ArgExpr = Arg.get(); 4295 Expr::EvalResult ArgResult1; 4296 // Check that sync scope is a constant literal 4297 if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, Context)) 4298 return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal) 4299 << ArgExpr->getType(); 4300 4301 return false; 4302 } 4303 4304 bool Sema::CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum) { 4305 llvm::APSInt Result; 4306 4307 // We can't check the value of a dependent argument. 4308 Expr *Arg = TheCall->getArg(ArgNum); 4309 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4310 return false; 4311 4312 // Check constant-ness first. 4313 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4314 return true; 4315 4316 int64_t Val = Result.getSExtValue(); 4317 if ((Val >= 0 && Val <= 3) || (Val >= 5 && Val <= 7)) 4318 return false; 4319 4320 return Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_invalid_lmul) 4321 << Arg->getSourceRange(); 4322 } 4323 4324 static bool isRISCV32Builtin(unsigned BuiltinID) { 4325 // These builtins only work on riscv32 targets. 4326 switch (BuiltinID) { 4327 case RISCV::BI__builtin_riscv_zip_32: 4328 case RISCV::BI__builtin_riscv_unzip_32: 4329 case RISCV::BI__builtin_riscv_aes32dsi_32: 4330 case RISCV::BI__builtin_riscv_aes32dsmi_32: 4331 case RISCV::BI__builtin_riscv_aes32esi_32: 4332 case RISCV::BI__builtin_riscv_aes32esmi_32: 4333 case RISCV::BI__builtin_riscv_sha512sig0h_32: 4334 case RISCV::BI__builtin_riscv_sha512sig0l_32: 4335 case RISCV::BI__builtin_riscv_sha512sig1h_32: 4336 case RISCV::BI__builtin_riscv_sha512sig1l_32: 4337 case RISCV::BI__builtin_riscv_sha512sum0r_32: 4338 case RISCV::BI__builtin_riscv_sha512sum1r_32: 4339 return true; 4340 } 4341 4342 return false; 4343 } 4344 4345 bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, 4346 unsigned BuiltinID, 4347 CallExpr *TheCall) { 4348 // CodeGenFunction can also detect this, but this gives a better error 4349 // message. 4350 bool FeatureMissing = false; 4351 SmallVector<StringRef> ReqFeatures; 4352 StringRef Features = Context.BuiltinInfo.getRequiredFeatures(BuiltinID); 4353 Features.split(ReqFeatures, ','); 4354 4355 // Check for 32-bit only builtins on a 64-bit target. 4356 const llvm::Triple &TT = TI.getTriple(); 4357 if (TT.getArch() != llvm::Triple::riscv32 && isRISCV32Builtin(BuiltinID)) 4358 return Diag(TheCall->getCallee()->getBeginLoc(), 4359 diag::err_32_bit_builtin_64_bit_tgt); 4360 4361 // Check if each required feature is included 4362 for (StringRef F : ReqFeatures) { 4363 SmallVector<StringRef> ReqOpFeatures; 4364 F.split(ReqOpFeatures, '|'); 4365 bool HasFeature = false; 4366 for (StringRef OF : ReqOpFeatures) { 4367 if (TI.hasFeature(OF)) { 4368 HasFeature = true; 4369 continue; 4370 } 4371 } 4372 4373 if (!HasFeature) { 4374 std::string FeatureStrs; 4375 for (StringRef OF : ReqOpFeatures) { 4376 // If the feature is 64bit, alter the string so it will print better in 4377 // the diagnostic. 4378 if (OF == "64bit") 4379 OF = "RV64"; 4380 4381 // Convert features like "zbr" and "experimental-zbr" to "Zbr". 4382 OF.consume_front("experimental-"); 4383 std::string FeatureStr = OF.str(); 4384 FeatureStr[0] = std::toupper(FeatureStr[0]); 4385 // Combine strings. 4386 FeatureStrs += FeatureStrs == "" ? "" : ", "; 4387 FeatureStrs += "'"; 4388 FeatureStrs += FeatureStr; 4389 FeatureStrs += "'"; 4390 } 4391 // Error message 4392 FeatureMissing = true; 4393 Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_requires_extension) 4394 << TheCall->getSourceRange() << StringRef(FeatureStrs); 4395 } 4396 } 4397 4398 if (FeatureMissing) 4399 return true; 4400 4401 switch (BuiltinID) { 4402 case RISCVVector::BI__builtin_rvv_vsetvli: 4403 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3) || 4404 CheckRISCVLMUL(TheCall, 2); 4405 case RISCVVector::BI__builtin_rvv_vsetvlimax: 4406 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || 4407 CheckRISCVLMUL(TheCall, 1); 4408 case RISCVVector::BI__builtin_rvv_vget_v: { 4409 ASTContext::BuiltinVectorTypeInfo ResVecInfo = 4410 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4411 TheCall->getType().getCanonicalType().getTypePtr())); 4412 ASTContext::BuiltinVectorTypeInfo VecInfo = 4413 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4414 TheCall->getArg(0)->getType().getCanonicalType().getTypePtr())); 4415 unsigned MaxIndex = 4416 (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors) / 4417 (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors); 4418 return SemaBuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1); 4419 } 4420 case RISCVVector::BI__builtin_rvv_vset_v: { 4421 ASTContext::BuiltinVectorTypeInfo ResVecInfo = 4422 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4423 TheCall->getType().getCanonicalType().getTypePtr())); 4424 ASTContext::BuiltinVectorTypeInfo VecInfo = 4425 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4426 TheCall->getArg(2)->getType().getCanonicalType().getTypePtr())); 4427 unsigned MaxIndex = 4428 (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors) / 4429 (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors); 4430 return SemaBuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1); 4431 } 4432 // Check if byteselect is in [0, 3] 4433 case RISCV::BI__builtin_riscv_aes32dsi_32: 4434 case RISCV::BI__builtin_riscv_aes32dsmi_32: 4435 case RISCV::BI__builtin_riscv_aes32esi_32: 4436 case RISCV::BI__builtin_riscv_aes32esmi_32: 4437 case RISCV::BI__builtin_riscv_sm4ks: 4438 case RISCV::BI__builtin_riscv_sm4ed: 4439 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 4440 // Check if rnum is in [0, 10] 4441 case RISCV::BI__builtin_riscv_aes64ks1i_64: 4442 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 10); 4443 } 4444 4445 return false; 4446 } 4447 4448 bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, 4449 CallExpr *TheCall) { 4450 if (BuiltinID == SystemZ::BI__builtin_tabort) { 4451 Expr *Arg = TheCall->getArg(0); 4452 if (Optional<llvm::APSInt> AbortCode = Arg->getIntegerConstantExpr(Context)) 4453 if (AbortCode->getSExtValue() >= 0 && AbortCode->getSExtValue() < 256) 4454 return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code) 4455 << Arg->getSourceRange(); 4456 } 4457 4458 // For intrinsics which take an immediate value as part of the instruction, 4459 // range check them here. 4460 unsigned i = 0, l = 0, u = 0; 4461 switch (BuiltinID) { 4462 default: return false; 4463 case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break; 4464 case SystemZ::BI__builtin_s390_verimb: 4465 case SystemZ::BI__builtin_s390_verimh: 4466 case SystemZ::BI__builtin_s390_verimf: 4467 case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break; 4468 case SystemZ::BI__builtin_s390_vfaeb: 4469 case SystemZ::BI__builtin_s390_vfaeh: 4470 case SystemZ::BI__builtin_s390_vfaef: 4471 case SystemZ::BI__builtin_s390_vfaebs: 4472 case SystemZ::BI__builtin_s390_vfaehs: 4473 case SystemZ::BI__builtin_s390_vfaefs: 4474 case SystemZ::BI__builtin_s390_vfaezb: 4475 case SystemZ::BI__builtin_s390_vfaezh: 4476 case SystemZ::BI__builtin_s390_vfaezf: 4477 case SystemZ::BI__builtin_s390_vfaezbs: 4478 case SystemZ::BI__builtin_s390_vfaezhs: 4479 case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break; 4480 case SystemZ::BI__builtin_s390_vfisb: 4481 case SystemZ::BI__builtin_s390_vfidb: 4482 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) || 4483 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 4484 case SystemZ::BI__builtin_s390_vftcisb: 4485 case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break; 4486 case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break; 4487 case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break; 4488 case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break; 4489 case SystemZ::BI__builtin_s390_vstrcb: 4490 case SystemZ::BI__builtin_s390_vstrch: 4491 case SystemZ::BI__builtin_s390_vstrcf: 4492 case SystemZ::BI__builtin_s390_vstrczb: 4493 case SystemZ::BI__builtin_s390_vstrczh: 4494 case SystemZ::BI__builtin_s390_vstrczf: 4495 case SystemZ::BI__builtin_s390_vstrcbs: 4496 case SystemZ::BI__builtin_s390_vstrchs: 4497 case SystemZ::BI__builtin_s390_vstrcfs: 4498 case SystemZ::BI__builtin_s390_vstrczbs: 4499 case SystemZ::BI__builtin_s390_vstrczhs: 4500 case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break; 4501 case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break; 4502 case SystemZ::BI__builtin_s390_vfminsb: 4503 case SystemZ::BI__builtin_s390_vfmaxsb: 4504 case SystemZ::BI__builtin_s390_vfmindb: 4505 case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break; 4506 case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break; 4507 case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break; 4508 case SystemZ::BI__builtin_s390_vclfnhs: 4509 case SystemZ::BI__builtin_s390_vclfnls: 4510 case SystemZ::BI__builtin_s390_vcfn: 4511 case SystemZ::BI__builtin_s390_vcnf: i = 1; l = 0; u = 15; break; 4512 case SystemZ::BI__builtin_s390_vcrnfs: i = 2; l = 0; u = 15; break; 4513 } 4514 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 4515 } 4516 4517 /// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *). 4518 /// This checks that the target supports __builtin_cpu_supports and 4519 /// that the string argument is constant and valid. 4520 static bool SemaBuiltinCpuSupports(Sema &S, const TargetInfo &TI, 4521 CallExpr *TheCall) { 4522 Expr *Arg = TheCall->getArg(0); 4523 4524 // Check if the argument is a string literal. 4525 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 4526 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 4527 << Arg->getSourceRange(); 4528 4529 // Check the contents of the string. 4530 StringRef Feature = 4531 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 4532 if (!TI.validateCpuSupports(Feature)) 4533 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports) 4534 << Arg->getSourceRange(); 4535 return false; 4536 } 4537 4538 /// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *). 4539 /// This checks that the target supports __builtin_cpu_is and 4540 /// that the string argument is constant and valid. 4541 static bool SemaBuiltinCpuIs(Sema &S, const TargetInfo &TI, CallExpr *TheCall) { 4542 Expr *Arg = TheCall->getArg(0); 4543 4544 // Check if the argument is a string literal. 4545 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 4546 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 4547 << Arg->getSourceRange(); 4548 4549 // Check the contents of the string. 4550 StringRef Feature = 4551 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 4552 if (!TI.validateCpuIs(Feature)) 4553 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is) 4554 << Arg->getSourceRange(); 4555 return false; 4556 } 4557 4558 // Check if the rounding mode is legal. 4559 bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) { 4560 // Indicates if this instruction has rounding control or just SAE. 4561 bool HasRC = false; 4562 4563 unsigned ArgNum = 0; 4564 switch (BuiltinID) { 4565 default: 4566 return false; 4567 case X86::BI__builtin_ia32_vcvttsd2si32: 4568 case X86::BI__builtin_ia32_vcvttsd2si64: 4569 case X86::BI__builtin_ia32_vcvttsd2usi32: 4570 case X86::BI__builtin_ia32_vcvttsd2usi64: 4571 case X86::BI__builtin_ia32_vcvttss2si32: 4572 case X86::BI__builtin_ia32_vcvttss2si64: 4573 case X86::BI__builtin_ia32_vcvttss2usi32: 4574 case X86::BI__builtin_ia32_vcvttss2usi64: 4575 case X86::BI__builtin_ia32_vcvttsh2si32: 4576 case X86::BI__builtin_ia32_vcvttsh2si64: 4577 case X86::BI__builtin_ia32_vcvttsh2usi32: 4578 case X86::BI__builtin_ia32_vcvttsh2usi64: 4579 ArgNum = 1; 4580 break; 4581 case X86::BI__builtin_ia32_maxpd512: 4582 case X86::BI__builtin_ia32_maxps512: 4583 case X86::BI__builtin_ia32_minpd512: 4584 case X86::BI__builtin_ia32_minps512: 4585 case X86::BI__builtin_ia32_maxph512: 4586 case X86::BI__builtin_ia32_minph512: 4587 ArgNum = 2; 4588 break; 4589 case X86::BI__builtin_ia32_vcvtph2pd512_mask: 4590 case X86::BI__builtin_ia32_vcvtph2psx512_mask: 4591 case X86::BI__builtin_ia32_cvtps2pd512_mask: 4592 case X86::BI__builtin_ia32_cvttpd2dq512_mask: 4593 case X86::BI__builtin_ia32_cvttpd2qq512_mask: 4594 case X86::BI__builtin_ia32_cvttpd2udq512_mask: 4595 case X86::BI__builtin_ia32_cvttpd2uqq512_mask: 4596 case X86::BI__builtin_ia32_cvttps2dq512_mask: 4597 case X86::BI__builtin_ia32_cvttps2qq512_mask: 4598 case X86::BI__builtin_ia32_cvttps2udq512_mask: 4599 case X86::BI__builtin_ia32_cvttps2uqq512_mask: 4600 case X86::BI__builtin_ia32_vcvttph2w512_mask: 4601 case X86::BI__builtin_ia32_vcvttph2uw512_mask: 4602 case X86::BI__builtin_ia32_vcvttph2dq512_mask: 4603 case X86::BI__builtin_ia32_vcvttph2udq512_mask: 4604 case X86::BI__builtin_ia32_vcvttph2qq512_mask: 4605 case X86::BI__builtin_ia32_vcvttph2uqq512_mask: 4606 case X86::BI__builtin_ia32_exp2pd_mask: 4607 case X86::BI__builtin_ia32_exp2ps_mask: 4608 case X86::BI__builtin_ia32_getexppd512_mask: 4609 case X86::BI__builtin_ia32_getexpps512_mask: 4610 case X86::BI__builtin_ia32_getexpph512_mask: 4611 case X86::BI__builtin_ia32_rcp28pd_mask: 4612 case X86::BI__builtin_ia32_rcp28ps_mask: 4613 case X86::BI__builtin_ia32_rsqrt28pd_mask: 4614 case X86::BI__builtin_ia32_rsqrt28ps_mask: 4615 case X86::BI__builtin_ia32_vcomisd: 4616 case X86::BI__builtin_ia32_vcomiss: 4617 case X86::BI__builtin_ia32_vcomish: 4618 case X86::BI__builtin_ia32_vcvtph2ps512_mask: 4619 ArgNum = 3; 4620 break; 4621 case X86::BI__builtin_ia32_cmppd512_mask: 4622 case X86::BI__builtin_ia32_cmpps512_mask: 4623 case X86::BI__builtin_ia32_cmpsd_mask: 4624 case X86::BI__builtin_ia32_cmpss_mask: 4625 case X86::BI__builtin_ia32_cmpsh_mask: 4626 case X86::BI__builtin_ia32_vcvtsh2sd_round_mask: 4627 case X86::BI__builtin_ia32_vcvtsh2ss_round_mask: 4628 case X86::BI__builtin_ia32_cvtss2sd_round_mask: 4629 case X86::BI__builtin_ia32_getexpsd128_round_mask: 4630 case X86::BI__builtin_ia32_getexpss128_round_mask: 4631 case X86::BI__builtin_ia32_getexpsh128_round_mask: 4632 case X86::BI__builtin_ia32_getmantpd512_mask: 4633 case X86::BI__builtin_ia32_getmantps512_mask: 4634 case X86::BI__builtin_ia32_getmantph512_mask: 4635 case X86::BI__builtin_ia32_maxsd_round_mask: 4636 case X86::BI__builtin_ia32_maxss_round_mask: 4637 case X86::BI__builtin_ia32_maxsh_round_mask: 4638 case X86::BI__builtin_ia32_minsd_round_mask: 4639 case X86::BI__builtin_ia32_minss_round_mask: 4640 case X86::BI__builtin_ia32_minsh_round_mask: 4641 case X86::BI__builtin_ia32_rcp28sd_round_mask: 4642 case X86::BI__builtin_ia32_rcp28ss_round_mask: 4643 case X86::BI__builtin_ia32_reducepd512_mask: 4644 case X86::BI__builtin_ia32_reduceps512_mask: 4645 case X86::BI__builtin_ia32_reduceph512_mask: 4646 case X86::BI__builtin_ia32_rndscalepd_mask: 4647 case X86::BI__builtin_ia32_rndscaleps_mask: 4648 case X86::BI__builtin_ia32_rndscaleph_mask: 4649 case X86::BI__builtin_ia32_rsqrt28sd_round_mask: 4650 case X86::BI__builtin_ia32_rsqrt28ss_round_mask: 4651 ArgNum = 4; 4652 break; 4653 case X86::BI__builtin_ia32_fixupimmpd512_mask: 4654 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 4655 case X86::BI__builtin_ia32_fixupimmps512_mask: 4656 case X86::BI__builtin_ia32_fixupimmps512_maskz: 4657 case X86::BI__builtin_ia32_fixupimmsd_mask: 4658 case X86::BI__builtin_ia32_fixupimmsd_maskz: 4659 case X86::BI__builtin_ia32_fixupimmss_mask: 4660 case X86::BI__builtin_ia32_fixupimmss_maskz: 4661 case X86::BI__builtin_ia32_getmantsd_round_mask: 4662 case X86::BI__builtin_ia32_getmantss_round_mask: 4663 case X86::BI__builtin_ia32_getmantsh_round_mask: 4664 case X86::BI__builtin_ia32_rangepd512_mask: 4665 case X86::BI__builtin_ia32_rangeps512_mask: 4666 case X86::BI__builtin_ia32_rangesd128_round_mask: 4667 case X86::BI__builtin_ia32_rangess128_round_mask: 4668 case X86::BI__builtin_ia32_reducesd_mask: 4669 case X86::BI__builtin_ia32_reducess_mask: 4670 case X86::BI__builtin_ia32_reducesh_mask: 4671 case X86::BI__builtin_ia32_rndscalesd_round_mask: 4672 case X86::BI__builtin_ia32_rndscaless_round_mask: 4673 case X86::BI__builtin_ia32_rndscalesh_round_mask: 4674 ArgNum = 5; 4675 break; 4676 case X86::BI__builtin_ia32_vcvtsd2si64: 4677 case X86::BI__builtin_ia32_vcvtsd2si32: 4678 case X86::BI__builtin_ia32_vcvtsd2usi32: 4679 case X86::BI__builtin_ia32_vcvtsd2usi64: 4680 case X86::BI__builtin_ia32_vcvtss2si32: 4681 case X86::BI__builtin_ia32_vcvtss2si64: 4682 case X86::BI__builtin_ia32_vcvtss2usi32: 4683 case X86::BI__builtin_ia32_vcvtss2usi64: 4684 case X86::BI__builtin_ia32_vcvtsh2si32: 4685 case X86::BI__builtin_ia32_vcvtsh2si64: 4686 case X86::BI__builtin_ia32_vcvtsh2usi32: 4687 case X86::BI__builtin_ia32_vcvtsh2usi64: 4688 case X86::BI__builtin_ia32_sqrtpd512: 4689 case X86::BI__builtin_ia32_sqrtps512: 4690 case X86::BI__builtin_ia32_sqrtph512: 4691 ArgNum = 1; 4692 HasRC = true; 4693 break; 4694 case X86::BI__builtin_ia32_addph512: 4695 case X86::BI__builtin_ia32_divph512: 4696 case X86::BI__builtin_ia32_mulph512: 4697 case X86::BI__builtin_ia32_subph512: 4698 case X86::BI__builtin_ia32_addpd512: 4699 case X86::BI__builtin_ia32_addps512: 4700 case X86::BI__builtin_ia32_divpd512: 4701 case X86::BI__builtin_ia32_divps512: 4702 case X86::BI__builtin_ia32_mulpd512: 4703 case X86::BI__builtin_ia32_mulps512: 4704 case X86::BI__builtin_ia32_subpd512: 4705 case X86::BI__builtin_ia32_subps512: 4706 case X86::BI__builtin_ia32_cvtsi2sd64: 4707 case X86::BI__builtin_ia32_cvtsi2ss32: 4708 case X86::BI__builtin_ia32_cvtsi2ss64: 4709 case X86::BI__builtin_ia32_cvtusi2sd64: 4710 case X86::BI__builtin_ia32_cvtusi2ss32: 4711 case X86::BI__builtin_ia32_cvtusi2ss64: 4712 case X86::BI__builtin_ia32_vcvtusi2sh: 4713 case X86::BI__builtin_ia32_vcvtusi642sh: 4714 case X86::BI__builtin_ia32_vcvtsi2sh: 4715 case X86::BI__builtin_ia32_vcvtsi642sh: 4716 ArgNum = 2; 4717 HasRC = true; 4718 break; 4719 case X86::BI__builtin_ia32_cvtdq2ps512_mask: 4720 case X86::BI__builtin_ia32_cvtudq2ps512_mask: 4721 case X86::BI__builtin_ia32_vcvtpd2ph512_mask: 4722 case X86::BI__builtin_ia32_vcvtps2phx512_mask: 4723 case X86::BI__builtin_ia32_cvtpd2ps512_mask: 4724 case X86::BI__builtin_ia32_cvtpd2dq512_mask: 4725 case X86::BI__builtin_ia32_cvtpd2qq512_mask: 4726 case X86::BI__builtin_ia32_cvtpd2udq512_mask: 4727 case X86::BI__builtin_ia32_cvtpd2uqq512_mask: 4728 case X86::BI__builtin_ia32_cvtps2dq512_mask: 4729 case X86::BI__builtin_ia32_cvtps2qq512_mask: 4730 case X86::BI__builtin_ia32_cvtps2udq512_mask: 4731 case X86::BI__builtin_ia32_cvtps2uqq512_mask: 4732 case X86::BI__builtin_ia32_cvtqq2pd512_mask: 4733 case X86::BI__builtin_ia32_cvtqq2ps512_mask: 4734 case X86::BI__builtin_ia32_cvtuqq2pd512_mask: 4735 case X86::BI__builtin_ia32_cvtuqq2ps512_mask: 4736 case X86::BI__builtin_ia32_vcvtdq2ph512_mask: 4737 case X86::BI__builtin_ia32_vcvtudq2ph512_mask: 4738 case X86::BI__builtin_ia32_vcvtw2ph512_mask: 4739 case X86::BI__builtin_ia32_vcvtuw2ph512_mask: 4740 case X86::BI__builtin_ia32_vcvtph2w512_mask: 4741 case X86::BI__builtin_ia32_vcvtph2uw512_mask: 4742 case X86::BI__builtin_ia32_vcvtph2dq512_mask: 4743 case X86::BI__builtin_ia32_vcvtph2udq512_mask: 4744 case X86::BI__builtin_ia32_vcvtph2qq512_mask: 4745 case X86::BI__builtin_ia32_vcvtph2uqq512_mask: 4746 case X86::BI__builtin_ia32_vcvtqq2ph512_mask: 4747 case X86::BI__builtin_ia32_vcvtuqq2ph512_mask: 4748 ArgNum = 3; 4749 HasRC = true; 4750 break; 4751 case X86::BI__builtin_ia32_addsh_round_mask: 4752 case X86::BI__builtin_ia32_addss_round_mask: 4753 case X86::BI__builtin_ia32_addsd_round_mask: 4754 case X86::BI__builtin_ia32_divsh_round_mask: 4755 case X86::BI__builtin_ia32_divss_round_mask: 4756 case X86::BI__builtin_ia32_divsd_round_mask: 4757 case X86::BI__builtin_ia32_mulsh_round_mask: 4758 case X86::BI__builtin_ia32_mulss_round_mask: 4759 case X86::BI__builtin_ia32_mulsd_round_mask: 4760 case X86::BI__builtin_ia32_subsh_round_mask: 4761 case X86::BI__builtin_ia32_subss_round_mask: 4762 case X86::BI__builtin_ia32_subsd_round_mask: 4763 case X86::BI__builtin_ia32_scalefph512_mask: 4764 case X86::BI__builtin_ia32_scalefpd512_mask: 4765 case X86::BI__builtin_ia32_scalefps512_mask: 4766 case X86::BI__builtin_ia32_scalefsd_round_mask: 4767 case X86::BI__builtin_ia32_scalefss_round_mask: 4768 case X86::BI__builtin_ia32_scalefsh_round_mask: 4769 case X86::BI__builtin_ia32_cvtsd2ss_round_mask: 4770 case X86::BI__builtin_ia32_vcvtss2sh_round_mask: 4771 case X86::BI__builtin_ia32_vcvtsd2sh_round_mask: 4772 case X86::BI__builtin_ia32_sqrtsd_round_mask: 4773 case X86::BI__builtin_ia32_sqrtss_round_mask: 4774 case X86::BI__builtin_ia32_sqrtsh_round_mask: 4775 case X86::BI__builtin_ia32_vfmaddsd3_mask: 4776 case X86::BI__builtin_ia32_vfmaddsd3_maskz: 4777 case X86::BI__builtin_ia32_vfmaddsd3_mask3: 4778 case X86::BI__builtin_ia32_vfmaddss3_mask: 4779 case X86::BI__builtin_ia32_vfmaddss3_maskz: 4780 case X86::BI__builtin_ia32_vfmaddss3_mask3: 4781 case X86::BI__builtin_ia32_vfmaddsh3_mask: 4782 case X86::BI__builtin_ia32_vfmaddsh3_maskz: 4783 case X86::BI__builtin_ia32_vfmaddsh3_mask3: 4784 case X86::BI__builtin_ia32_vfmaddpd512_mask: 4785 case X86::BI__builtin_ia32_vfmaddpd512_maskz: 4786 case X86::BI__builtin_ia32_vfmaddpd512_mask3: 4787 case X86::BI__builtin_ia32_vfmsubpd512_mask3: 4788 case X86::BI__builtin_ia32_vfmaddps512_mask: 4789 case X86::BI__builtin_ia32_vfmaddps512_maskz: 4790 case X86::BI__builtin_ia32_vfmaddps512_mask3: 4791 case X86::BI__builtin_ia32_vfmsubps512_mask3: 4792 case X86::BI__builtin_ia32_vfmaddph512_mask: 4793 case X86::BI__builtin_ia32_vfmaddph512_maskz: 4794 case X86::BI__builtin_ia32_vfmaddph512_mask3: 4795 case X86::BI__builtin_ia32_vfmsubph512_mask3: 4796 case X86::BI__builtin_ia32_vfmaddsubpd512_mask: 4797 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz: 4798 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3: 4799 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3: 4800 case X86::BI__builtin_ia32_vfmaddsubps512_mask: 4801 case X86::BI__builtin_ia32_vfmaddsubps512_maskz: 4802 case X86::BI__builtin_ia32_vfmaddsubps512_mask3: 4803 case X86::BI__builtin_ia32_vfmsubaddps512_mask3: 4804 case X86::BI__builtin_ia32_vfmaddsubph512_mask: 4805 case X86::BI__builtin_ia32_vfmaddsubph512_maskz: 4806 case X86::BI__builtin_ia32_vfmaddsubph512_mask3: 4807 case X86::BI__builtin_ia32_vfmsubaddph512_mask3: 4808 case X86::BI__builtin_ia32_vfmaddcsh_mask: 4809 case X86::BI__builtin_ia32_vfmaddcsh_round_mask: 4810 case X86::BI__builtin_ia32_vfmaddcsh_round_mask3: 4811 case X86::BI__builtin_ia32_vfmaddcph512_mask: 4812 case X86::BI__builtin_ia32_vfmaddcph512_maskz: 4813 case X86::BI__builtin_ia32_vfmaddcph512_mask3: 4814 case X86::BI__builtin_ia32_vfcmaddcsh_mask: 4815 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask: 4816 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3: 4817 case X86::BI__builtin_ia32_vfcmaddcph512_mask: 4818 case X86::BI__builtin_ia32_vfcmaddcph512_maskz: 4819 case X86::BI__builtin_ia32_vfcmaddcph512_mask3: 4820 case X86::BI__builtin_ia32_vfmulcsh_mask: 4821 case X86::BI__builtin_ia32_vfmulcph512_mask: 4822 case X86::BI__builtin_ia32_vfcmulcsh_mask: 4823 case X86::BI__builtin_ia32_vfcmulcph512_mask: 4824 ArgNum = 4; 4825 HasRC = true; 4826 break; 4827 } 4828 4829 llvm::APSInt Result; 4830 4831 // We can't check the value of a dependent argument. 4832 Expr *Arg = TheCall->getArg(ArgNum); 4833 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4834 return false; 4835 4836 // Check constant-ness first. 4837 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4838 return true; 4839 4840 // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit 4841 // is set. If the intrinsic has rounding control(bits 1:0), make sure its only 4842 // combined with ROUND_NO_EXC. If the intrinsic does not have rounding 4843 // control, allow ROUND_NO_EXC and ROUND_CUR_DIRECTION together. 4844 if (Result == 4/*ROUND_CUR_DIRECTION*/ || 4845 Result == 8/*ROUND_NO_EXC*/ || 4846 (!HasRC && Result == 12/*ROUND_CUR_DIRECTION|ROUND_NO_EXC*/) || 4847 (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11)) 4848 return false; 4849 4850 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding) 4851 << Arg->getSourceRange(); 4852 } 4853 4854 // Check if the gather/scatter scale is legal. 4855 bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, 4856 CallExpr *TheCall) { 4857 unsigned ArgNum = 0; 4858 switch (BuiltinID) { 4859 default: 4860 return false; 4861 case X86::BI__builtin_ia32_gatherpfdpd: 4862 case X86::BI__builtin_ia32_gatherpfdps: 4863 case X86::BI__builtin_ia32_gatherpfqpd: 4864 case X86::BI__builtin_ia32_gatherpfqps: 4865 case X86::BI__builtin_ia32_scatterpfdpd: 4866 case X86::BI__builtin_ia32_scatterpfdps: 4867 case X86::BI__builtin_ia32_scatterpfqpd: 4868 case X86::BI__builtin_ia32_scatterpfqps: 4869 ArgNum = 3; 4870 break; 4871 case X86::BI__builtin_ia32_gatherd_pd: 4872 case X86::BI__builtin_ia32_gatherd_pd256: 4873 case X86::BI__builtin_ia32_gatherq_pd: 4874 case X86::BI__builtin_ia32_gatherq_pd256: 4875 case X86::BI__builtin_ia32_gatherd_ps: 4876 case X86::BI__builtin_ia32_gatherd_ps256: 4877 case X86::BI__builtin_ia32_gatherq_ps: 4878 case X86::BI__builtin_ia32_gatherq_ps256: 4879 case X86::BI__builtin_ia32_gatherd_q: 4880 case X86::BI__builtin_ia32_gatherd_q256: 4881 case X86::BI__builtin_ia32_gatherq_q: 4882 case X86::BI__builtin_ia32_gatherq_q256: 4883 case X86::BI__builtin_ia32_gatherd_d: 4884 case X86::BI__builtin_ia32_gatherd_d256: 4885 case X86::BI__builtin_ia32_gatherq_d: 4886 case X86::BI__builtin_ia32_gatherq_d256: 4887 case X86::BI__builtin_ia32_gather3div2df: 4888 case X86::BI__builtin_ia32_gather3div2di: 4889 case X86::BI__builtin_ia32_gather3div4df: 4890 case X86::BI__builtin_ia32_gather3div4di: 4891 case X86::BI__builtin_ia32_gather3div4sf: 4892 case X86::BI__builtin_ia32_gather3div4si: 4893 case X86::BI__builtin_ia32_gather3div8sf: 4894 case X86::BI__builtin_ia32_gather3div8si: 4895 case X86::BI__builtin_ia32_gather3siv2df: 4896 case X86::BI__builtin_ia32_gather3siv2di: 4897 case X86::BI__builtin_ia32_gather3siv4df: 4898 case X86::BI__builtin_ia32_gather3siv4di: 4899 case X86::BI__builtin_ia32_gather3siv4sf: 4900 case X86::BI__builtin_ia32_gather3siv4si: 4901 case X86::BI__builtin_ia32_gather3siv8sf: 4902 case X86::BI__builtin_ia32_gather3siv8si: 4903 case X86::BI__builtin_ia32_gathersiv8df: 4904 case X86::BI__builtin_ia32_gathersiv16sf: 4905 case X86::BI__builtin_ia32_gatherdiv8df: 4906 case X86::BI__builtin_ia32_gatherdiv16sf: 4907 case X86::BI__builtin_ia32_gathersiv8di: 4908 case X86::BI__builtin_ia32_gathersiv16si: 4909 case X86::BI__builtin_ia32_gatherdiv8di: 4910 case X86::BI__builtin_ia32_gatherdiv16si: 4911 case X86::BI__builtin_ia32_scatterdiv2df: 4912 case X86::BI__builtin_ia32_scatterdiv2di: 4913 case X86::BI__builtin_ia32_scatterdiv4df: 4914 case X86::BI__builtin_ia32_scatterdiv4di: 4915 case X86::BI__builtin_ia32_scatterdiv4sf: 4916 case X86::BI__builtin_ia32_scatterdiv4si: 4917 case X86::BI__builtin_ia32_scatterdiv8sf: 4918 case X86::BI__builtin_ia32_scatterdiv8si: 4919 case X86::BI__builtin_ia32_scattersiv2df: 4920 case X86::BI__builtin_ia32_scattersiv2di: 4921 case X86::BI__builtin_ia32_scattersiv4df: 4922 case X86::BI__builtin_ia32_scattersiv4di: 4923 case X86::BI__builtin_ia32_scattersiv4sf: 4924 case X86::BI__builtin_ia32_scattersiv4si: 4925 case X86::BI__builtin_ia32_scattersiv8sf: 4926 case X86::BI__builtin_ia32_scattersiv8si: 4927 case X86::BI__builtin_ia32_scattersiv8df: 4928 case X86::BI__builtin_ia32_scattersiv16sf: 4929 case X86::BI__builtin_ia32_scatterdiv8df: 4930 case X86::BI__builtin_ia32_scatterdiv16sf: 4931 case X86::BI__builtin_ia32_scattersiv8di: 4932 case X86::BI__builtin_ia32_scattersiv16si: 4933 case X86::BI__builtin_ia32_scatterdiv8di: 4934 case X86::BI__builtin_ia32_scatterdiv16si: 4935 ArgNum = 4; 4936 break; 4937 } 4938 4939 llvm::APSInt Result; 4940 4941 // We can't check the value of a dependent argument. 4942 Expr *Arg = TheCall->getArg(ArgNum); 4943 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4944 return false; 4945 4946 // Check constant-ness first. 4947 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4948 return true; 4949 4950 if (Result == 1 || Result == 2 || Result == 4 || Result == 8) 4951 return false; 4952 4953 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale) 4954 << Arg->getSourceRange(); 4955 } 4956 4957 enum { TileRegLow = 0, TileRegHigh = 7 }; 4958 4959 bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, 4960 ArrayRef<int> ArgNums) { 4961 for (int ArgNum : ArgNums) { 4962 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, TileRegLow, TileRegHigh)) 4963 return true; 4964 } 4965 return false; 4966 } 4967 4968 bool Sema::CheckX86BuiltinTileDuplicate(CallExpr *TheCall, 4969 ArrayRef<int> ArgNums) { 4970 // Because the max number of tile register is TileRegHigh + 1, so here we use 4971 // each bit to represent the usage of them in bitset. 4972 std::bitset<TileRegHigh + 1> ArgValues; 4973 for (int ArgNum : ArgNums) { 4974 Expr *Arg = TheCall->getArg(ArgNum); 4975 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4976 continue; 4977 4978 llvm::APSInt Result; 4979 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4980 return true; 4981 int ArgExtValue = Result.getExtValue(); 4982 assert((ArgExtValue >= TileRegLow || ArgExtValue <= TileRegHigh) && 4983 "Incorrect tile register num."); 4984 if (ArgValues.test(ArgExtValue)) 4985 return Diag(TheCall->getBeginLoc(), 4986 diag::err_x86_builtin_tile_arg_duplicate) 4987 << TheCall->getArg(ArgNum)->getSourceRange(); 4988 ArgValues.set(ArgExtValue); 4989 } 4990 return false; 4991 } 4992 4993 bool Sema::CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, 4994 ArrayRef<int> ArgNums) { 4995 return CheckX86BuiltinTileArgumentsRange(TheCall, ArgNums) || 4996 CheckX86BuiltinTileDuplicate(TheCall, ArgNums); 4997 } 4998 4999 bool Sema::CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall) { 5000 switch (BuiltinID) { 5001 default: 5002 return false; 5003 case X86::BI__builtin_ia32_tileloadd64: 5004 case X86::BI__builtin_ia32_tileloaddt164: 5005 case X86::BI__builtin_ia32_tilestored64: 5006 case X86::BI__builtin_ia32_tilezero: 5007 return CheckX86BuiltinTileArgumentsRange(TheCall, 0); 5008 case X86::BI__builtin_ia32_tdpbssd: 5009 case X86::BI__builtin_ia32_tdpbsud: 5010 case X86::BI__builtin_ia32_tdpbusd: 5011 case X86::BI__builtin_ia32_tdpbuud: 5012 case X86::BI__builtin_ia32_tdpbf16ps: 5013 return CheckX86BuiltinTileRangeAndDuplicate(TheCall, {0, 1, 2}); 5014 } 5015 } 5016 static bool isX86_32Builtin(unsigned BuiltinID) { 5017 // These builtins only work on x86-32 targets. 5018 switch (BuiltinID) { 5019 case X86::BI__builtin_ia32_readeflags_u32: 5020 case X86::BI__builtin_ia32_writeeflags_u32: 5021 return true; 5022 } 5023 5024 return false; 5025 } 5026 5027 bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 5028 CallExpr *TheCall) { 5029 if (BuiltinID == X86::BI__builtin_cpu_supports) 5030 return SemaBuiltinCpuSupports(*this, TI, TheCall); 5031 5032 if (BuiltinID == X86::BI__builtin_cpu_is) 5033 return SemaBuiltinCpuIs(*this, TI, TheCall); 5034 5035 // Check for 32-bit only builtins on a 64-bit target. 5036 const llvm::Triple &TT = TI.getTriple(); 5037 if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID)) 5038 return Diag(TheCall->getCallee()->getBeginLoc(), 5039 diag::err_32_bit_builtin_64_bit_tgt); 5040 5041 // If the intrinsic has rounding or SAE make sure its valid. 5042 if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall)) 5043 return true; 5044 5045 // If the intrinsic has a gather/scatter scale immediate make sure its valid. 5046 if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall)) 5047 return true; 5048 5049 // If the intrinsic has a tile arguments, make sure they are valid. 5050 if (CheckX86BuiltinTileArguments(BuiltinID, TheCall)) 5051 return true; 5052 5053 // For intrinsics which take an immediate value as part of the instruction, 5054 // range check them here. 5055 int i = 0, l = 0, u = 0; 5056 switch (BuiltinID) { 5057 default: 5058 return false; 5059 case X86::BI__builtin_ia32_vec_ext_v2si: 5060 case X86::BI__builtin_ia32_vec_ext_v2di: 5061 case X86::BI__builtin_ia32_vextractf128_pd256: 5062 case X86::BI__builtin_ia32_vextractf128_ps256: 5063 case X86::BI__builtin_ia32_vextractf128_si256: 5064 case X86::BI__builtin_ia32_extract128i256: 5065 case X86::BI__builtin_ia32_extractf64x4_mask: 5066 case X86::BI__builtin_ia32_extracti64x4_mask: 5067 case X86::BI__builtin_ia32_extractf32x8_mask: 5068 case X86::BI__builtin_ia32_extracti32x8_mask: 5069 case X86::BI__builtin_ia32_extractf64x2_256_mask: 5070 case X86::BI__builtin_ia32_extracti64x2_256_mask: 5071 case X86::BI__builtin_ia32_extractf32x4_256_mask: 5072 case X86::BI__builtin_ia32_extracti32x4_256_mask: 5073 i = 1; l = 0; u = 1; 5074 break; 5075 case X86::BI__builtin_ia32_vec_set_v2di: 5076 case X86::BI__builtin_ia32_vinsertf128_pd256: 5077 case X86::BI__builtin_ia32_vinsertf128_ps256: 5078 case X86::BI__builtin_ia32_vinsertf128_si256: 5079 case X86::BI__builtin_ia32_insert128i256: 5080 case X86::BI__builtin_ia32_insertf32x8: 5081 case X86::BI__builtin_ia32_inserti32x8: 5082 case X86::BI__builtin_ia32_insertf64x4: 5083 case X86::BI__builtin_ia32_inserti64x4: 5084 case X86::BI__builtin_ia32_insertf64x2_256: 5085 case X86::BI__builtin_ia32_inserti64x2_256: 5086 case X86::BI__builtin_ia32_insertf32x4_256: 5087 case X86::BI__builtin_ia32_inserti32x4_256: 5088 i = 2; l = 0; u = 1; 5089 break; 5090 case X86::BI__builtin_ia32_vpermilpd: 5091 case X86::BI__builtin_ia32_vec_ext_v4hi: 5092 case X86::BI__builtin_ia32_vec_ext_v4si: 5093 case X86::BI__builtin_ia32_vec_ext_v4sf: 5094 case X86::BI__builtin_ia32_vec_ext_v4di: 5095 case X86::BI__builtin_ia32_extractf32x4_mask: 5096 case X86::BI__builtin_ia32_extracti32x4_mask: 5097 case X86::BI__builtin_ia32_extractf64x2_512_mask: 5098 case X86::BI__builtin_ia32_extracti64x2_512_mask: 5099 i = 1; l = 0; u = 3; 5100 break; 5101 case X86::BI_mm_prefetch: 5102 case X86::BI__builtin_ia32_vec_ext_v8hi: 5103 case X86::BI__builtin_ia32_vec_ext_v8si: 5104 i = 1; l = 0; u = 7; 5105 break; 5106 case X86::BI__builtin_ia32_sha1rnds4: 5107 case X86::BI__builtin_ia32_blendpd: 5108 case X86::BI__builtin_ia32_shufpd: 5109 case X86::BI__builtin_ia32_vec_set_v4hi: 5110 case X86::BI__builtin_ia32_vec_set_v4si: 5111 case X86::BI__builtin_ia32_vec_set_v4di: 5112 case X86::BI__builtin_ia32_shuf_f32x4_256: 5113 case X86::BI__builtin_ia32_shuf_f64x2_256: 5114 case X86::BI__builtin_ia32_shuf_i32x4_256: 5115 case X86::BI__builtin_ia32_shuf_i64x2_256: 5116 case X86::BI__builtin_ia32_insertf64x2_512: 5117 case X86::BI__builtin_ia32_inserti64x2_512: 5118 case X86::BI__builtin_ia32_insertf32x4: 5119 case X86::BI__builtin_ia32_inserti32x4: 5120 i = 2; l = 0; u = 3; 5121 break; 5122 case X86::BI__builtin_ia32_vpermil2pd: 5123 case X86::BI__builtin_ia32_vpermil2pd256: 5124 case X86::BI__builtin_ia32_vpermil2ps: 5125 case X86::BI__builtin_ia32_vpermil2ps256: 5126 i = 3; l = 0; u = 3; 5127 break; 5128 case X86::BI__builtin_ia32_cmpb128_mask: 5129 case X86::BI__builtin_ia32_cmpw128_mask: 5130 case X86::BI__builtin_ia32_cmpd128_mask: 5131 case X86::BI__builtin_ia32_cmpq128_mask: 5132 case X86::BI__builtin_ia32_cmpb256_mask: 5133 case X86::BI__builtin_ia32_cmpw256_mask: 5134 case X86::BI__builtin_ia32_cmpd256_mask: 5135 case X86::BI__builtin_ia32_cmpq256_mask: 5136 case X86::BI__builtin_ia32_cmpb512_mask: 5137 case X86::BI__builtin_ia32_cmpw512_mask: 5138 case X86::BI__builtin_ia32_cmpd512_mask: 5139 case X86::BI__builtin_ia32_cmpq512_mask: 5140 case X86::BI__builtin_ia32_ucmpb128_mask: 5141 case X86::BI__builtin_ia32_ucmpw128_mask: 5142 case X86::BI__builtin_ia32_ucmpd128_mask: 5143 case X86::BI__builtin_ia32_ucmpq128_mask: 5144 case X86::BI__builtin_ia32_ucmpb256_mask: 5145 case X86::BI__builtin_ia32_ucmpw256_mask: 5146 case X86::BI__builtin_ia32_ucmpd256_mask: 5147 case X86::BI__builtin_ia32_ucmpq256_mask: 5148 case X86::BI__builtin_ia32_ucmpb512_mask: 5149 case X86::BI__builtin_ia32_ucmpw512_mask: 5150 case X86::BI__builtin_ia32_ucmpd512_mask: 5151 case X86::BI__builtin_ia32_ucmpq512_mask: 5152 case X86::BI__builtin_ia32_vpcomub: 5153 case X86::BI__builtin_ia32_vpcomuw: 5154 case X86::BI__builtin_ia32_vpcomud: 5155 case X86::BI__builtin_ia32_vpcomuq: 5156 case X86::BI__builtin_ia32_vpcomb: 5157 case X86::BI__builtin_ia32_vpcomw: 5158 case X86::BI__builtin_ia32_vpcomd: 5159 case X86::BI__builtin_ia32_vpcomq: 5160 case X86::BI__builtin_ia32_vec_set_v8hi: 5161 case X86::BI__builtin_ia32_vec_set_v8si: 5162 i = 2; l = 0; u = 7; 5163 break; 5164 case X86::BI__builtin_ia32_vpermilpd256: 5165 case X86::BI__builtin_ia32_roundps: 5166 case X86::BI__builtin_ia32_roundpd: 5167 case X86::BI__builtin_ia32_roundps256: 5168 case X86::BI__builtin_ia32_roundpd256: 5169 case X86::BI__builtin_ia32_getmantpd128_mask: 5170 case X86::BI__builtin_ia32_getmantpd256_mask: 5171 case X86::BI__builtin_ia32_getmantps128_mask: 5172 case X86::BI__builtin_ia32_getmantps256_mask: 5173 case X86::BI__builtin_ia32_getmantpd512_mask: 5174 case X86::BI__builtin_ia32_getmantps512_mask: 5175 case X86::BI__builtin_ia32_getmantph128_mask: 5176 case X86::BI__builtin_ia32_getmantph256_mask: 5177 case X86::BI__builtin_ia32_getmantph512_mask: 5178 case X86::BI__builtin_ia32_vec_ext_v16qi: 5179 case X86::BI__builtin_ia32_vec_ext_v16hi: 5180 i = 1; l = 0; u = 15; 5181 break; 5182 case X86::BI__builtin_ia32_pblendd128: 5183 case X86::BI__builtin_ia32_blendps: 5184 case X86::BI__builtin_ia32_blendpd256: 5185 case X86::BI__builtin_ia32_shufpd256: 5186 case X86::BI__builtin_ia32_roundss: 5187 case X86::BI__builtin_ia32_roundsd: 5188 case X86::BI__builtin_ia32_rangepd128_mask: 5189 case X86::BI__builtin_ia32_rangepd256_mask: 5190 case X86::BI__builtin_ia32_rangepd512_mask: 5191 case X86::BI__builtin_ia32_rangeps128_mask: 5192 case X86::BI__builtin_ia32_rangeps256_mask: 5193 case X86::BI__builtin_ia32_rangeps512_mask: 5194 case X86::BI__builtin_ia32_getmantsd_round_mask: 5195 case X86::BI__builtin_ia32_getmantss_round_mask: 5196 case X86::BI__builtin_ia32_getmantsh_round_mask: 5197 case X86::BI__builtin_ia32_vec_set_v16qi: 5198 case X86::BI__builtin_ia32_vec_set_v16hi: 5199 i = 2; l = 0; u = 15; 5200 break; 5201 case X86::BI__builtin_ia32_vec_ext_v32qi: 5202 i = 1; l = 0; u = 31; 5203 break; 5204 case X86::BI__builtin_ia32_cmpps: 5205 case X86::BI__builtin_ia32_cmpss: 5206 case X86::BI__builtin_ia32_cmppd: 5207 case X86::BI__builtin_ia32_cmpsd: 5208 case X86::BI__builtin_ia32_cmpps256: 5209 case X86::BI__builtin_ia32_cmppd256: 5210 case X86::BI__builtin_ia32_cmpps128_mask: 5211 case X86::BI__builtin_ia32_cmppd128_mask: 5212 case X86::BI__builtin_ia32_cmpps256_mask: 5213 case X86::BI__builtin_ia32_cmppd256_mask: 5214 case X86::BI__builtin_ia32_cmpps512_mask: 5215 case X86::BI__builtin_ia32_cmppd512_mask: 5216 case X86::BI__builtin_ia32_cmpsd_mask: 5217 case X86::BI__builtin_ia32_cmpss_mask: 5218 case X86::BI__builtin_ia32_vec_set_v32qi: 5219 i = 2; l = 0; u = 31; 5220 break; 5221 case X86::BI__builtin_ia32_permdf256: 5222 case X86::BI__builtin_ia32_permdi256: 5223 case X86::BI__builtin_ia32_permdf512: 5224 case X86::BI__builtin_ia32_permdi512: 5225 case X86::BI__builtin_ia32_vpermilps: 5226 case X86::BI__builtin_ia32_vpermilps256: 5227 case X86::BI__builtin_ia32_vpermilpd512: 5228 case X86::BI__builtin_ia32_vpermilps512: 5229 case X86::BI__builtin_ia32_pshufd: 5230 case X86::BI__builtin_ia32_pshufd256: 5231 case X86::BI__builtin_ia32_pshufd512: 5232 case X86::BI__builtin_ia32_pshufhw: 5233 case X86::BI__builtin_ia32_pshufhw256: 5234 case X86::BI__builtin_ia32_pshufhw512: 5235 case X86::BI__builtin_ia32_pshuflw: 5236 case X86::BI__builtin_ia32_pshuflw256: 5237 case X86::BI__builtin_ia32_pshuflw512: 5238 case X86::BI__builtin_ia32_vcvtps2ph: 5239 case X86::BI__builtin_ia32_vcvtps2ph_mask: 5240 case X86::BI__builtin_ia32_vcvtps2ph256: 5241 case X86::BI__builtin_ia32_vcvtps2ph256_mask: 5242 case X86::BI__builtin_ia32_vcvtps2ph512_mask: 5243 case X86::BI__builtin_ia32_rndscaleps_128_mask: 5244 case X86::BI__builtin_ia32_rndscalepd_128_mask: 5245 case X86::BI__builtin_ia32_rndscaleps_256_mask: 5246 case X86::BI__builtin_ia32_rndscalepd_256_mask: 5247 case X86::BI__builtin_ia32_rndscaleps_mask: 5248 case X86::BI__builtin_ia32_rndscalepd_mask: 5249 case X86::BI__builtin_ia32_rndscaleph_mask: 5250 case X86::BI__builtin_ia32_reducepd128_mask: 5251 case X86::BI__builtin_ia32_reducepd256_mask: 5252 case X86::BI__builtin_ia32_reducepd512_mask: 5253 case X86::BI__builtin_ia32_reduceps128_mask: 5254 case X86::BI__builtin_ia32_reduceps256_mask: 5255 case X86::BI__builtin_ia32_reduceps512_mask: 5256 case X86::BI__builtin_ia32_reduceph128_mask: 5257 case X86::BI__builtin_ia32_reduceph256_mask: 5258 case X86::BI__builtin_ia32_reduceph512_mask: 5259 case X86::BI__builtin_ia32_prold512: 5260 case X86::BI__builtin_ia32_prolq512: 5261 case X86::BI__builtin_ia32_prold128: 5262 case X86::BI__builtin_ia32_prold256: 5263 case X86::BI__builtin_ia32_prolq128: 5264 case X86::BI__builtin_ia32_prolq256: 5265 case X86::BI__builtin_ia32_prord512: 5266 case X86::BI__builtin_ia32_prorq512: 5267 case X86::BI__builtin_ia32_prord128: 5268 case X86::BI__builtin_ia32_prord256: 5269 case X86::BI__builtin_ia32_prorq128: 5270 case X86::BI__builtin_ia32_prorq256: 5271 case X86::BI__builtin_ia32_fpclasspd128_mask: 5272 case X86::BI__builtin_ia32_fpclasspd256_mask: 5273 case X86::BI__builtin_ia32_fpclassps128_mask: 5274 case X86::BI__builtin_ia32_fpclassps256_mask: 5275 case X86::BI__builtin_ia32_fpclassps512_mask: 5276 case X86::BI__builtin_ia32_fpclasspd512_mask: 5277 case X86::BI__builtin_ia32_fpclassph128_mask: 5278 case X86::BI__builtin_ia32_fpclassph256_mask: 5279 case X86::BI__builtin_ia32_fpclassph512_mask: 5280 case X86::BI__builtin_ia32_fpclasssd_mask: 5281 case X86::BI__builtin_ia32_fpclassss_mask: 5282 case X86::BI__builtin_ia32_fpclasssh_mask: 5283 case X86::BI__builtin_ia32_pslldqi128_byteshift: 5284 case X86::BI__builtin_ia32_pslldqi256_byteshift: 5285 case X86::BI__builtin_ia32_pslldqi512_byteshift: 5286 case X86::BI__builtin_ia32_psrldqi128_byteshift: 5287 case X86::BI__builtin_ia32_psrldqi256_byteshift: 5288 case X86::BI__builtin_ia32_psrldqi512_byteshift: 5289 case X86::BI__builtin_ia32_kshiftliqi: 5290 case X86::BI__builtin_ia32_kshiftlihi: 5291 case X86::BI__builtin_ia32_kshiftlisi: 5292 case X86::BI__builtin_ia32_kshiftlidi: 5293 case X86::BI__builtin_ia32_kshiftriqi: 5294 case X86::BI__builtin_ia32_kshiftrihi: 5295 case X86::BI__builtin_ia32_kshiftrisi: 5296 case X86::BI__builtin_ia32_kshiftridi: 5297 i = 1; l = 0; u = 255; 5298 break; 5299 case X86::BI__builtin_ia32_vperm2f128_pd256: 5300 case X86::BI__builtin_ia32_vperm2f128_ps256: 5301 case X86::BI__builtin_ia32_vperm2f128_si256: 5302 case X86::BI__builtin_ia32_permti256: 5303 case X86::BI__builtin_ia32_pblendw128: 5304 case X86::BI__builtin_ia32_pblendw256: 5305 case X86::BI__builtin_ia32_blendps256: 5306 case X86::BI__builtin_ia32_pblendd256: 5307 case X86::BI__builtin_ia32_palignr128: 5308 case X86::BI__builtin_ia32_palignr256: 5309 case X86::BI__builtin_ia32_palignr512: 5310 case X86::BI__builtin_ia32_alignq512: 5311 case X86::BI__builtin_ia32_alignd512: 5312 case X86::BI__builtin_ia32_alignd128: 5313 case X86::BI__builtin_ia32_alignd256: 5314 case X86::BI__builtin_ia32_alignq128: 5315 case X86::BI__builtin_ia32_alignq256: 5316 case X86::BI__builtin_ia32_vcomisd: 5317 case X86::BI__builtin_ia32_vcomiss: 5318 case X86::BI__builtin_ia32_shuf_f32x4: 5319 case X86::BI__builtin_ia32_shuf_f64x2: 5320 case X86::BI__builtin_ia32_shuf_i32x4: 5321 case X86::BI__builtin_ia32_shuf_i64x2: 5322 case X86::BI__builtin_ia32_shufpd512: 5323 case X86::BI__builtin_ia32_shufps: 5324 case X86::BI__builtin_ia32_shufps256: 5325 case X86::BI__builtin_ia32_shufps512: 5326 case X86::BI__builtin_ia32_dbpsadbw128: 5327 case X86::BI__builtin_ia32_dbpsadbw256: 5328 case X86::BI__builtin_ia32_dbpsadbw512: 5329 case X86::BI__builtin_ia32_vpshldd128: 5330 case X86::BI__builtin_ia32_vpshldd256: 5331 case X86::BI__builtin_ia32_vpshldd512: 5332 case X86::BI__builtin_ia32_vpshldq128: 5333 case X86::BI__builtin_ia32_vpshldq256: 5334 case X86::BI__builtin_ia32_vpshldq512: 5335 case X86::BI__builtin_ia32_vpshldw128: 5336 case X86::BI__builtin_ia32_vpshldw256: 5337 case X86::BI__builtin_ia32_vpshldw512: 5338 case X86::BI__builtin_ia32_vpshrdd128: 5339 case X86::BI__builtin_ia32_vpshrdd256: 5340 case X86::BI__builtin_ia32_vpshrdd512: 5341 case X86::BI__builtin_ia32_vpshrdq128: 5342 case X86::BI__builtin_ia32_vpshrdq256: 5343 case X86::BI__builtin_ia32_vpshrdq512: 5344 case X86::BI__builtin_ia32_vpshrdw128: 5345 case X86::BI__builtin_ia32_vpshrdw256: 5346 case X86::BI__builtin_ia32_vpshrdw512: 5347 i = 2; l = 0; u = 255; 5348 break; 5349 case X86::BI__builtin_ia32_fixupimmpd512_mask: 5350 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 5351 case X86::BI__builtin_ia32_fixupimmps512_mask: 5352 case X86::BI__builtin_ia32_fixupimmps512_maskz: 5353 case X86::BI__builtin_ia32_fixupimmsd_mask: 5354 case X86::BI__builtin_ia32_fixupimmsd_maskz: 5355 case X86::BI__builtin_ia32_fixupimmss_mask: 5356 case X86::BI__builtin_ia32_fixupimmss_maskz: 5357 case X86::BI__builtin_ia32_fixupimmpd128_mask: 5358 case X86::BI__builtin_ia32_fixupimmpd128_maskz: 5359 case X86::BI__builtin_ia32_fixupimmpd256_mask: 5360 case X86::BI__builtin_ia32_fixupimmpd256_maskz: 5361 case X86::BI__builtin_ia32_fixupimmps128_mask: 5362 case X86::BI__builtin_ia32_fixupimmps128_maskz: 5363 case X86::BI__builtin_ia32_fixupimmps256_mask: 5364 case X86::BI__builtin_ia32_fixupimmps256_maskz: 5365 case X86::BI__builtin_ia32_pternlogd512_mask: 5366 case X86::BI__builtin_ia32_pternlogd512_maskz: 5367 case X86::BI__builtin_ia32_pternlogq512_mask: 5368 case X86::BI__builtin_ia32_pternlogq512_maskz: 5369 case X86::BI__builtin_ia32_pternlogd128_mask: 5370 case X86::BI__builtin_ia32_pternlogd128_maskz: 5371 case X86::BI__builtin_ia32_pternlogd256_mask: 5372 case X86::BI__builtin_ia32_pternlogd256_maskz: 5373 case X86::BI__builtin_ia32_pternlogq128_mask: 5374 case X86::BI__builtin_ia32_pternlogq128_maskz: 5375 case X86::BI__builtin_ia32_pternlogq256_mask: 5376 case X86::BI__builtin_ia32_pternlogq256_maskz: 5377 i = 3; l = 0; u = 255; 5378 break; 5379 case X86::BI__builtin_ia32_gatherpfdpd: 5380 case X86::BI__builtin_ia32_gatherpfdps: 5381 case X86::BI__builtin_ia32_gatherpfqpd: 5382 case X86::BI__builtin_ia32_gatherpfqps: 5383 case X86::BI__builtin_ia32_scatterpfdpd: 5384 case X86::BI__builtin_ia32_scatterpfdps: 5385 case X86::BI__builtin_ia32_scatterpfqpd: 5386 case X86::BI__builtin_ia32_scatterpfqps: 5387 i = 4; l = 2; u = 3; 5388 break; 5389 case X86::BI__builtin_ia32_reducesd_mask: 5390 case X86::BI__builtin_ia32_reducess_mask: 5391 case X86::BI__builtin_ia32_rndscalesd_round_mask: 5392 case X86::BI__builtin_ia32_rndscaless_round_mask: 5393 case X86::BI__builtin_ia32_rndscalesh_round_mask: 5394 case X86::BI__builtin_ia32_reducesh_mask: 5395 i = 4; l = 0; u = 255; 5396 break; 5397 } 5398 5399 // Note that we don't force a hard error on the range check here, allowing 5400 // template-generated or macro-generated dead code to potentially have out-of- 5401 // range values. These need to code generate, but don't need to necessarily 5402 // make any sense. We use a warning that defaults to an error. 5403 return SemaBuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false); 5404 } 5405 5406 /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo 5407 /// parameter with the FormatAttr's correct format_idx and firstDataArg. 5408 /// Returns true when the format fits the function and the FormatStringInfo has 5409 /// been populated. 5410 bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, 5411 bool IsVariadic, FormatStringInfo *FSI) { 5412 if (Format->getFirstArg() == 0) 5413 FSI->ArgPassingKind = FAPK_VAList; 5414 else if (IsVariadic) 5415 FSI->ArgPassingKind = FAPK_Variadic; 5416 else 5417 FSI->ArgPassingKind = FAPK_Fixed; 5418 FSI->FormatIdx = Format->getFormatIdx() - 1; 5419 FSI->FirstDataArg = 5420 FSI->ArgPassingKind == FAPK_VAList ? 0 : Format->getFirstArg() - 1; 5421 5422 // The way the format attribute works in GCC, the implicit this argument 5423 // of member functions is counted. However, it doesn't appear in our own 5424 // lists, so decrement format_idx in that case. 5425 if (IsCXXMember) { 5426 if(FSI->FormatIdx == 0) 5427 return false; 5428 --FSI->FormatIdx; 5429 if (FSI->FirstDataArg != 0) 5430 --FSI->FirstDataArg; 5431 } 5432 return true; 5433 } 5434 5435 /// Checks if a the given expression evaluates to null. 5436 /// 5437 /// Returns true if the value evaluates to null. 5438 static bool CheckNonNullExpr(Sema &S, const Expr *Expr) { 5439 // If the expression has non-null type, it doesn't evaluate to null. 5440 if (auto nullability 5441 = Expr->IgnoreImplicit()->getType()->getNullability(S.Context)) { 5442 if (*nullability == NullabilityKind::NonNull) 5443 return false; 5444 } 5445 5446 // As a special case, transparent unions initialized with zero are 5447 // considered null for the purposes of the nonnull attribute. 5448 if (const RecordType *UT = Expr->getType()->getAsUnionType()) { 5449 if (UT->getDecl()->hasAttr<TransparentUnionAttr>()) 5450 if (const CompoundLiteralExpr *CLE = 5451 dyn_cast<CompoundLiteralExpr>(Expr)) 5452 if (const InitListExpr *ILE = 5453 dyn_cast<InitListExpr>(CLE->getInitializer())) 5454 Expr = ILE->getInit(0); 5455 } 5456 5457 bool Result; 5458 return (!Expr->isValueDependent() && 5459 Expr->EvaluateAsBooleanCondition(Result, S.Context) && 5460 !Result); 5461 } 5462 5463 static void CheckNonNullArgument(Sema &S, 5464 const Expr *ArgExpr, 5465 SourceLocation CallSiteLoc) { 5466 if (CheckNonNullExpr(S, ArgExpr)) 5467 S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr, 5468 S.PDiag(diag::warn_null_arg) 5469 << ArgExpr->getSourceRange()); 5470 } 5471 5472 bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) { 5473 FormatStringInfo FSI; 5474 if ((GetFormatStringType(Format) == FST_NSString) && 5475 getFormatStringInfo(Format, false, true, &FSI)) { 5476 Idx = FSI.FormatIdx; 5477 return true; 5478 } 5479 return false; 5480 } 5481 5482 /// Diagnose use of %s directive in an NSString which is being passed 5483 /// as formatting string to formatting method. 5484 static void 5485 DiagnoseCStringFormatDirectiveInCFAPI(Sema &S, 5486 const NamedDecl *FDecl, 5487 Expr **Args, 5488 unsigned NumArgs) { 5489 unsigned Idx = 0; 5490 bool Format = false; 5491 ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily(); 5492 if (SFFamily == ObjCStringFormatFamily::SFF_CFString) { 5493 Idx = 2; 5494 Format = true; 5495 } 5496 else 5497 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 5498 if (S.GetFormatNSStringIdx(I, Idx)) { 5499 Format = true; 5500 break; 5501 } 5502 } 5503 if (!Format || NumArgs <= Idx) 5504 return; 5505 const Expr *FormatExpr = Args[Idx]; 5506 if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr)) 5507 FormatExpr = CSCE->getSubExpr(); 5508 const StringLiteral *FormatString; 5509 if (const ObjCStringLiteral *OSL = 5510 dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts())) 5511 FormatString = OSL->getString(); 5512 else 5513 FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts()); 5514 if (!FormatString) 5515 return; 5516 if (S.FormatStringHasSArg(FormatString)) { 5517 S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string) 5518 << "%s" << 1 << 1; 5519 S.Diag(FDecl->getLocation(), diag::note_entity_declared_at) 5520 << FDecl->getDeclName(); 5521 } 5522 } 5523 5524 /// Determine whether the given type has a non-null nullability annotation. 5525 static bool isNonNullType(ASTContext &ctx, QualType type) { 5526 if (auto nullability = type->getNullability(ctx)) 5527 return *nullability == NullabilityKind::NonNull; 5528 5529 return false; 5530 } 5531 5532 static void CheckNonNullArguments(Sema &S, 5533 const NamedDecl *FDecl, 5534 const FunctionProtoType *Proto, 5535 ArrayRef<const Expr *> Args, 5536 SourceLocation CallSiteLoc) { 5537 assert((FDecl || Proto) && "Need a function declaration or prototype"); 5538 5539 // Already checked by by constant evaluator. 5540 if (S.isConstantEvaluated()) 5541 return; 5542 // Check the attributes attached to the method/function itself. 5543 llvm::SmallBitVector NonNullArgs; 5544 if (FDecl) { 5545 // Handle the nonnull attribute on the function/method declaration itself. 5546 for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) { 5547 if (!NonNull->args_size()) { 5548 // Easy case: all pointer arguments are nonnull. 5549 for (const auto *Arg : Args) 5550 if (S.isValidPointerAttrType(Arg->getType())) 5551 CheckNonNullArgument(S, Arg, CallSiteLoc); 5552 return; 5553 } 5554 5555 for (const ParamIdx &Idx : NonNull->args()) { 5556 unsigned IdxAST = Idx.getASTIndex(); 5557 if (IdxAST >= Args.size()) 5558 continue; 5559 if (NonNullArgs.empty()) 5560 NonNullArgs.resize(Args.size()); 5561 NonNullArgs.set(IdxAST); 5562 } 5563 } 5564 } 5565 5566 if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) { 5567 // Handle the nonnull attribute on the parameters of the 5568 // function/method. 5569 ArrayRef<ParmVarDecl*> parms; 5570 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl)) 5571 parms = FD->parameters(); 5572 else 5573 parms = cast<ObjCMethodDecl>(FDecl)->parameters(); 5574 5575 unsigned ParamIndex = 0; 5576 for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end(); 5577 I != E; ++I, ++ParamIndex) { 5578 const ParmVarDecl *PVD = *I; 5579 if (PVD->hasAttr<NonNullAttr>() || 5580 isNonNullType(S.Context, PVD->getType())) { 5581 if (NonNullArgs.empty()) 5582 NonNullArgs.resize(Args.size()); 5583 5584 NonNullArgs.set(ParamIndex); 5585 } 5586 } 5587 } else { 5588 // If we have a non-function, non-method declaration but no 5589 // function prototype, try to dig out the function prototype. 5590 if (!Proto) { 5591 if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) { 5592 QualType type = VD->getType().getNonReferenceType(); 5593 if (auto pointerType = type->getAs<PointerType>()) 5594 type = pointerType->getPointeeType(); 5595 else if (auto blockType = type->getAs<BlockPointerType>()) 5596 type = blockType->getPointeeType(); 5597 // FIXME: data member pointers? 5598 5599 // Dig out the function prototype, if there is one. 5600 Proto = type->getAs<FunctionProtoType>(); 5601 } 5602 } 5603 5604 // Fill in non-null argument information from the nullability 5605 // information on the parameter types (if we have them). 5606 if (Proto) { 5607 unsigned Index = 0; 5608 for (auto paramType : Proto->getParamTypes()) { 5609 if (isNonNullType(S.Context, paramType)) { 5610 if (NonNullArgs.empty()) 5611 NonNullArgs.resize(Args.size()); 5612 5613 NonNullArgs.set(Index); 5614 } 5615 5616 ++Index; 5617 } 5618 } 5619 } 5620 5621 // Check for non-null arguments. 5622 for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size(); 5623 ArgIndex != ArgIndexEnd; ++ArgIndex) { 5624 if (NonNullArgs[ArgIndex]) 5625 CheckNonNullArgument(S, Args[ArgIndex], CallSiteLoc); 5626 } 5627 } 5628 5629 // 16 byte ByVal alignment not due to a vector member is not honoured by XL 5630 // on AIX. Emit a warning here that users are generating binary incompatible 5631 // code to be safe. 5632 // Here we try to get information about the alignment of the struct member 5633 // from the struct passed to the caller function. We only warn when the struct 5634 // is passed byval, hence the series of checks and early returns if we are a not 5635 // passing a struct byval. 5636 void Sema::checkAIXMemberAlignment(SourceLocation Loc, const Expr *Arg) { 5637 const auto *ICE = dyn_cast<ImplicitCastExpr>(Arg->IgnoreParens()); 5638 if (!ICE) 5639 return; 5640 5641 const auto *DR = dyn_cast<DeclRefExpr>(ICE->getSubExpr()); 5642 if (!DR) 5643 return; 5644 5645 const auto *PD = dyn_cast<ParmVarDecl>(DR->getDecl()); 5646 if (!PD || !PD->getType()->isRecordType()) 5647 return; 5648 5649 QualType ArgType = Arg->getType(); 5650 for (const FieldDecl *FD : 5651 ArgType->castAs<RecordType>()->getDecl()->fields()) { 5652 if (const auto *AA = FD->getAttr<AlignedAttr>()) { 5653 CharUnits Alignment = 5654 Context.toCharUnitsFromBits(AA->getAlignment(Context)); 5655 if (Alignment.getQuantity() == 16) { 5656 Diag(FD->getLocation(), diag::warn_not_xl_compatible) << FD; 5657 Diag(Loc, diag::note_misaligned_member_used_here) << PD; 5658 } 5659 } 5660 } 5661 } 5662 5663 /// Warn if a pointer or reference argument passed to a function points to an 5664 /// object that is less aligned than the parameter. This can happen when 5665 /// creating a typedef with a lower alignment than the original type and then 5666 /// calling functions defined in terms of the original type. 5667 void Sema::CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl, 5668 StringRef ParamName, QualType ArgTy, 5669 QualType ParamTy) { 5670 5671 // If a function accepts a pointer or reference type 5672 if (!ParamTy->isPointerType() && !ParamTy->isReferenceType()) 5673 return; 5674 5675 // If the parameter is a pointer type, get the pointee type for the 5676 // argument too. If the parameter is a reference type, don't try to get 5677 // the pointee type for the argument. 5678 if (ParamTy->isPointerType()) 5679 ArgTy = ArgTy->getPointeeType(); 5680 5681 // Remove reference or pointer 5682 ParamTy = ParamTy->getPointeeType(); 5683 5684 // Find expected alignment, and the actual alignment of the passed object. 5685 // getTypeAlignInChars requires complete types 5686 if (ArgTy.isNull() || ParamTy->isIncompleteType() || 5687 ArgTy->isIncompleteType() || ParamTy->isUndeducedType() || 5688 ArgTy->isUndeducedType()) 5689 return; 5690 5691 CharUnits ParamAlign = Context.getTypeAlignInChars(ParamTy); 5692 CharUnits ArgAlign = Context.getTypeAlignInChars(ArgTy); 5693 5694 // If the argument is less aligned than the parameter, there is a 5695 // potential alignment issue. 5696 if (ArgAlign < ParamAlign) 5697 Diag(Loc, diag::warn_param_mismatched_alignment) 5698 << (int)ArgAlign.getQuantity() << (int)ParamAlign.getQuantity() 5699 << ParamName << (FDecl != nullptr) << FDecl; 5700 } 5701 5702 /// Handles the checks for format strings, non-POD arguments to vararg 5703 /// functions, NULL arguments passed to non-NULL parameters, and diagnose_if 5704 /// attributes. 5705 void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, 5706 const Expr *ThisArg, ArrayRef<const Expr *> Args, 5707 bool IsMemberFunction, SourceLocation Loc, 5708 SourceRange Range, VariadicCallType CallType) { 5709 // FIXME: We should check as much as we can in the template definition. 5710 if (CurContext->isDependentContext()) 5711 return; 5712 5713 // Printf and scanf checking. 5714 llvm::SmallBitVector CheckedVarArgs; 5715 if (FDecl) { 5716 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 5717 // Only create vector if there are format attributes. 5718 CheckedVarArgs.resize(Args.size()); 5719 5720 CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range, 5721 CheckedVarArgs); 5722 } 5723 } 5724 5725 // Refuse POD arguments that weren't caught by the format string 5726 // checks above. 5727 auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl); 5728 if (CallType != VariadicDoesNotApply && 5729 (!FD || FD->getBuiltinID() != Builtin::BI__noop)) { 5730 unsigned NumParams = Proto ? Proto->getNumParams() 5731 : FDecl && isa<FunctionDecl>(FDecl) 5732 ? cast<FunctionDecl>(FDecl)->getNumParams() 5733 : FDecl && isa<ObjCMethodDecl>(FDecl) 5734 ? cast<ObjCMethodDecl>(FDecl)->param_size() 5735 : 0; 5736 5737 for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) { 5738 // Args[ArgIdx] can be null in malformed code. 5739 if (const Expr *Arg = Args[ArgIdx]) { 5740 if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx]) 5741 checkVariadicArgument(Arg, CallType); 5742 } 5743 } 5744 } 5745 5746 if (FDecl || Proto) { 5747 CheckNonNullArguments(*this, FDecl, Proto, Args, Loc); 5748 5749 // Type safety checking. 5750 if (FDecl) { 5751 for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>()) 5752 CheckArgumentWithTypeTag(I, Args, Loc); 5753 } 5754 } 5755 5756 // Check that passed arguments match the alignment of original arguments. 5757 // Try to get the missing prototype from the declaration. 5758 if (!Proto && FDecl) { 5759 const auto *FT = FDecl->getFunctionType(); 5760 if (isa_and_nonnull<FunctionProtoType>(FT)) 5761 Proto = cast<FunctionProtoType>(FDecl->getFunctionType()); 5762 } 5763 if (Proto) { 5764 // For variadic functions, we may have more args than parameters. 5765 // For some K&R functions, we may have less args than parameters. 5766 const auto N = std::min<unsigned>(Proto->getNumParams(), Args.size()); 5767 for (unsigned ArgIdx = 0; ArgIdx < N; ++ArgIdx) { 5768 // Args[ArgIdx] can be null in malformed code. 5769 if (const Expr *Arg = Args[ArgIdx]) { 5770 if (Arg->containsErrors()) 5771 continue; 5772 5773 if (Context.getTargetInfo().getTriple().isOSAIX() && FDecl && Arg && 5774 FDecl->hasLinkage() && 5775 FDecl->getFormalLinkage() != InternalLinkage && 5776 CallType == VariadicDoesNotApply) 5777 checkAIXMemberAlignment((Arg->getExprLoc()), Arg); 5778 5779 QualType ParamTy = Proto->getParamType(ArgIdx); 5780 QualType ArgTy = Arg->getType(); 5781 CheckArgAlignment(Arg->getExprLoc(), FDecl, std::to_string(ArgIdx + 1), 5782 ArgTy, ParamTy); 5783 } 5784 } 5785 } 5786 5787 if (FDecl && FDecl->hasAttr<AllocAlignAttr>()) { 5788 auto *AA = FDecl->getAttr<AllocAlignAttr>(); 5789 const Expr *Arg = Args[AA->getParamIndex().getASTIndex()]; 5790 if (!Arg->isValueDependent()) { 5791 Expr::EvalResult Align; 5792 if (Arg->EvaluateAsInt(Align, Context)) { 5793 const llvm::APSInt &I = Align.Val.getInt(); 5794 if (!I.isPowerOf2()) 5795 Diag(Arg->getExprLoc(), diag::warn_alignment_not_power_of_two) 5796 << Arg->getSourceRange(); 5797 5798 if (I > Sema::MaximumAlignment) 5799 Diag(Arg->getExprLoc(), diag::warn_assume_aligned_too_great) 5800 << Arg->getSourceRange() << Sema::MaximumAlignment; 5801 } 5802 } 5803 } 5804 5805 if (FD) 5806 diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc); 5807 } 5808 5809 /// CheckConstructorCall - Check a constructor call for correctness and safety 5810 /// properties not enforced by the C type system. 5811 void Sema::CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType, 5812 ArrayRef<const Expr *> Args, 5813 const FunctionProtoType *Proto, 5814 SourceLocation Loc) { 5815 VariadicCallType CallType = 5816 Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply; 5817 5818 auto *Ctor = cast<CXXConstructorDecl>(FDecl); 5819 CheckArgAlignment(Loc, FDecl, "'this'", Context.getPointerType(ThisType), 5820 Context.getPointerType(Ctor->getThisObjectType())); 5821 5822 checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true, 5823 Loc, SourceRange(), CallType); 5824 } 5825 5826 /// CheckFunctionCall - Check a direct function call for various correctness 5827 /// and safety properties not strictly enforced by the C type system. 5828 bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, 5829 const FunctionProtoType *Proto) { 5830 bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) && 5831 isa<CXXMethodDecl>(FDecl); 5832 bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) || 5833 IsMemberOperatorCall; 5834 VariadicCallType CallType = getVariadicCallType(FDecl, Proto, 5835 TheCall->getCallee()); 5836 Expr** Args = TheCall->getArgs(); 5837 unsigned NumArgs = TheCall->getNumArgs(); 5838 5839 Expr *ImplicitThis = nullptr; 5840 if (IsMemberOperatorCall) { 5841 // If this is a call to a member operator, hide the first argument 5842 // from checkCall. 5843 // FIXME: Our choice of AST representation here is less than ideal. 5844 ImplicitThis = Args[0]; 5845 ++Args; 5846 --NumArgs; 5847 } else if (IsMemberFunction) 5848 ImplicitThis = 5849 cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument(); 5850 5851 if (ImplicitThis) { 5852 // ImplicitThis may or may not be a pointer, depending on whether . or -> is 5853 // used. 5854 QualType ThisType = ImplicitThis->getType(); 5855 if (!ThisType->isPointerType()) { 5856 assert(!ThisType->isReferenceType()); 5857 ThisType = Context.getPointerType(ThisType); 5858 } 5859 5860 QualType ThisTypeFromDecl = 5861 Context.getPointerType(cast<CXXMethodDecl>(FDecl)->getThisObjectType()); 5862 5863 CheckArgAlignment(TheCall->getRParenLoc(), FDecl, "'this'", ThisType, 5864 ThisTypeFromDecl); 5865 } 5866 5867 checkCall(FDecl, Proto, ImplicitThis, llvm::makeArrayRef(Args, NumArgs), 5868 IsMemberFunction, TheCall->getRParenLoc(), 5869 TheCall->getCallee()->getSourceRange(), CallType); 5870 5871 IdentifierInfo *FnInfo = FDecl->getIdentifier(); 5872 // None of the checks below are needed for functions that don't have 5873 // simple names (e.g., C++ conversion functions). 5874 if (!FnInfo) 5875 return false; 5876 5877 // Enforce TCB except for builtin calls, which are always allowed. 5878 if (FDecl->getBuiltinID() == 0) 5879 CheckTCBEnforcement(TheCall->getExprLoc(), FDecl); 5880 5881 CheckAbsoluteValueFunction(TheCall, FDecl); 5882 CheckMaxUnsignedZero(TheCall, FDecl); 5883 5884 if (getLangOpts().ObjC) 5885 DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs); 5886 5887 unsigned CMId = FDecl->getMemoryFunctionKind(); 5888 5889 // Handle memory setting and copying functions. 5890 switch (CMId) { 5891 case 0: 5892 return false; 5893 case Builtin::BIstrlcpy: // fallthrough 5894 case Builtin::BIstrlcat: 5895 CheckStrlcpycatArguments(TheCall, FnInfo); 5896 break; 5897 case Builtin::BIstrncat: 5898 CheckStrncatArguments(TheCall, FnInfo); 5899 break; 5900 case Builtin::BIfree: 5901 CheckFreeArguments(TheCall); 5902 break; 5903 default: 5904 CheckMemaccessArguments(TheCall, CMId, FnInfo); 5905 } 5906 5907 return false; 5908 } 5909 5910 bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac, 5911 ArrayRef<const Expr *> Args) { 5912 VariadicCallType CallType = 5913 Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply; 5914 5915 checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args, 5916 /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(), 5917 CallType); 5918 5919 CheckTCBEnforcement(lbrac, Method); 5920 5921 return false; 5922 } 5923 5924 bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, 5925 const FunctionProtoType *Proto) { 5926 QualType Ty; 5927 if (const auto *V = dyn_cast<VarDecl>(NDecl)) 5928 Ty = V->getType().getNonReferenceType(); 5929 else if (const auto *F = dyn_cast<FieldDecl>(NDecl)) 5930 Ty = F->getType().getNonReferenceType(); 5931 else 5932 return false; 5933 5934 if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() && 5935 !Ty->isFunctionProtoType()) 5936 return false; 5937 5938 VariadicCallType CallType; 5939 if (!Proto || !Proto->isVariadic()) { 5940 CallType = VariadicDoesNotApply; 5941 } else if (Ty->isBlockPointerType()) { 5942 CallType = VariadicBlock; 5943 } else { // Ty->isFunctionPointerType() 5944 CallType = VariadicFunction; 5945 } 5946 5947 checkCall(NDecl, Proto, /*ThisArg=*/nullptr, 5948 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 5949 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 5950 TheCall->getCallee()->getSourceRange(), CallType); 5951 5952 return false; 5953 } 5954 5955 /// Checks function calls when a FunctionDecl or a NamedDecl is not available, 5956 /// such as function pointers returned from functions. 5957 bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) { 5958 VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto, 5959 TheCall->getCallee()); 5960 checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr, 5961 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 5962 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 5963 TheCall->getCallee()->getSourceRange(), CallType); 5964 5965 return false; 5966 } 5967 5968 static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) { 5969 if (!llvm::isValidAtomicOrderingCABI(Ordering)) 5970 return false; 5971 5972 auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering; 5973 switch (Op) { 5974 case AtomicExpr::AO__c11_atomic_init: 5975 case AtomicExpr::AO__opencl_atomic_init: 5976 llvm_unreachable("There is no ordering argument for an init"); 5977 5978 case AtomicExpr::AO__c11_atomic_load: 5979 case AtomicExpr::AO__opencl_atomic_load: 5980 case AtomicExpr::AO__hip_atomic_load: 5981 case AtomicExpr::AO__atomic_load_n: 5982 case AtomicExpr::AO__atomic_load: 5983 return OrderingCABI != llvm::AtomicOrderingCABI::release && 5984 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 5985 5986 case AtomicExpr::AO__c11_atomic_store: 5987 case AtomicExpr::AO__opencl_atomic_store: 5988 case AtomicExpr::AO__hip_atomic_store: 5989 case AtomicExpr::AO__atomic_store: 5990 case AtomicExpr::AO__atomic_store_n: 5991 return OrderingCABI != llvm::AtomicOrderingCABI::consume && 5992 OrderingCABI != llvm::AtomicOrderingCABI::acquire && 5993 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 5994 5995 default: 5996 return true; 5997 } 5998 } 5999 6000 ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, 6001 AtomicExpr::AtomicOp Op) { 6002 CallExpr *TheCall = cast<CallExpr>(TheCallResult.get()); 6003 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 6004 MultiExprArg Args{TheCall->getArgs(), TheCall->getNumArgs()}; 6005 return BuildAtomicExpr({TheCall->getBeginLoc(), TheCall->getEndLoc()}, 6006 DRE->getSourceRange(), TheCall->getRParenLoc(), Args, 6007 Op); 6008 } 6009 6010 ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, 6011 SourceLocation RParenLoc, MultiExprArg Args, 6012 AtomicExpr::AtomicOp Op, 6013 AtomicArgumentOrder ArgOrder) { 6014 // All the non-OpenCL operations take one of the following forms. 6015 // The OpenCL operations take the __c11 forms with one extra argument for 6016 // synchronization scope. 6017 enum { 6018 // C __c11_atomic_init(A *, C) 6019 Init, 6020 6021 // C __c11_atomic_load(A *, int) 6022 Load, 6023 6024 // void __atomic_load(A *, CP, int) 6025 LoadCopy, 6026 6027 // void __atomic_store(A *, CP, int) 6028 Copy, 6029 6030 // C __c11_atomic_add(A *, M, int) 6031 Arithmetic, 6032 6033 // C __atomic_exchange_n(A *, CP, int) 6034 Xchg, 6035 6036 // void __atomic_exchange(A *, C *, CP, int) 6037 GNUXchg, 6038 6039 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int) 6040 C11CmpXchg, 6041 6042 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int) 6043 GNUCmpXchg 6044 } Form = Init; 6045 6046 const unsigned NumForm = GNUCmpXchg + 1; 6047 const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 }; 6048 const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 }; 6049 // where: 6050 // C is an appropriate type, 6051 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins, 6052 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise, 6053 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and 6054 // the int parameters are for orderings. 6055 6056 static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm 6057 && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm, 6058 "need to update code for modified forms"); 6059 static_assert(AtomicExpr::AO__c11_atomic_init == 0 && 6060 AtomicExpr::AO__c11_atomic_fetch_min + 1 == 6061 AtomicExpr::AO__atomic_load, 6062 "need to update code for modified C11 atomics"); 6063 bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init && 6064 Op <= AtomicExpr::AO__opencl_atomic_fetch_max; 6065 bool IsHIP = Op >= AtomicExpr::AO__hip_atomic_load && 6066 Op <= AtomicExpr::AO__hip_atomic_fetch_max; 6067 bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init && 6068 Op <= AtomicExpr::AO__c11_atomic_fetch_min) || 6069 IsOpenCL; 6070 bool IsN = Op == AtomicExpr::AO__atomic_load_n || 6071 Op == AtomicExpr::AO__atomic_store_n || 6072 Op == AtomicExpr::AO__atomic_exchange_n || 6073 Op == AtomicExpr::AO__atomic_compare_exchange_n; 6074 bool IsAddSub = false; 6075 6076 switch (Op) { 6077 case AtomicExpr::AO__c11_atomic_init: 6078 case AtomicExpr::AO__opencl_atomic_init: 6079 Form = Init; 6080 break; 6081 6082 case AtomicExpr::AO__c11_atomic_load: 6083 case AtomicExpr::AO__opencl_atomic_load: 6084 case AtomicExpr::AO__hip_atomic_load: 6085 case AtomicExpr::AO__atomic_load_n: 6086 Form = Load; 6087 break; 6088 6089 case AtomicExpr::AO__atomic_load: 6090 Form = LoadCopy; 6091 break; 6092 6093 case AtomicExpr::AO__c11_atomic_store: 6094 case AtomicExpr::AO__opencl_atomic_store: 6095 case AtomicExpr::AO__hip_atomic_store: 6096 case AtomicExpr::AO__atomic_store: 6097 case AtomicExpr::AO__atomic_store_n: 6098 Form = Copy; 6099 break; 6100 case AtomicExpr::AO__hip_atomic_fetch_add: 6101 case AtomicExpr::AO__hip_atomic_fetch_min: 6102 case AtomicExpr::AO__hip_atomic_fetch_max: 6103 case AtomicExpr::AO__c11_atomic_fetch_add: 6104 case AtomicExpr::AO__c11_atomic_fetch_sub: 6105 case AtomicExpr::AO__opencl_atomic_fetch_add: 6106 case AtomicExpr::AO__opencl_atomic_fetch_sub: 6107 case AtomicExpr::AO__atomic_fetch_add: 6108 case AtomicExpr::AO__atomic_fetch_sub: 6109 case AtomicExpr::AO__atomic_add_fetch: 6110 case AtomicExpr::AO__atomic_sub_fetch: 6111 IsAddSub = true; 6112 Form = Arithmetic; 6113 break; 6114 case AtomicExpr::AO__c11_atomic_fetch_and: 6115 case AtomicExpr::AO__c11_atomic_fetch_or: 6116 case AtomicExpr::AO__c11_atomic_fetch_xor: 6117 case AtomicExpr::AO__hip_atomic_fetch_and: 6118 case AtomicExpr::AO__hip_atomic_fetch_or: 6119 case AtomicExpr::AO__hip_atomic_fetch_xor: 6120 case AtomicExpr::AO__c11_atomic_fetch_nand: 6121 case AtomicExpr::AO__opencl_atomic_fetch_and: 6122 case AtomicExpr::AO__opencl_atomic_fetch_or: 6123 case AtomicExpr::AO__opencl_atomic_fetch_xor: 6124 case AtomicExpr::AO__atomic_fetch_and: 6125 case AtomicExpr::AO__atomic_fetch_or: 6126 case AtomicExpr::AO__atomic_fetch_xor: 6127 case AtomicExpr::AO__atomic_fetch_nand: 6128 case AtomicExpr::AO__atomic_and_fetch: 6129 case AtomicExpr::AO__atomic_or_fetch: 6130 case AtomicExpr::AO__atomic_xor_fetch: 6131 case AtomicExpr::AO__atomic_nand_fetch: 6132 Form = Arithmetic; 6133 break; 6134 case AtomicExpr::AO__c11_atomic_fetch_min: 6135 case AtomicExpr::AO__c11_atomic_fetch_max: 6136 case AtomicExpr::AO__opencl_atomic_fetch_min: 6137 case AtomicExpr::AO__opencl_atomic_fetch_max: 6138 case AtomicExpr::AO__atomic_min_fetch: 6139 case AtomicExpr::AO__atomic_max_fetch: 6140 case AtomicExpr::AO__atomic_fetch_min: 6141 case AtomicExpr::AO__atomic_fetch_max: 6142 Form = Arithmetic; 6143 break; 6144 6145 case AtomicExpr::AO__c11_atomic_exchange: 6146 case AtomicExpr::AO__hip_atomic_exchange: 6147 case AtomicExpr::AO__opencl_atomic_exchange: 6148 case AtomicExpr::AO__atomic_exchange_n: 6149 Form = Xchg; 6150 break; 6151 6152 case AtomicExpr::AO__atomic_exchange: 6153 Form = GNUXchg; 6154 break; 6155 6156 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 6157 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 6158 case AtomicExpr::AO__hip_atomic_compare_exchange_strong: 6159 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: 6160 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: 6161 case AtomicExpr::AO__hip_atomic_compare_exchange_weak: 6162 Form = C11CmpXchg; 6163 break; 6164 6165 case AtomicExpr::AO__atomic_compare_exchange: 6166 case AtomicExpr::AO__atomic_compare_exchange_n: 6167 Form = GNUCmpXchg; 6168 break; 6169 } 6170 6171 unsigned AdjustedNumArgs = NumArgs[Form]; 6172 if ((IsOpenCL || IsHIP) && Op != AtomicExpr::AO__opencl_atomic_init) 6173 ++AdjustedNumArgs; 6174 // Check we have the right number of arguments. 6175 if (Args.size() < AdjustedNumArgs) { 6176 Diag(CallRange.getEnd(), diag::err_typecheck_call_too_few_args) 6177 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 6178 << ExprRange; 6179 return ExprError(); 6180 } else if (Args.size() > AdjustedNumArgs) { 6181 Diag(Args[AdjustedNumArgs]->getBeginLoc(), 6182 diag::err_typecheck_call_too_many_args) 6183 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 6184 << ExprRange; 6185 return ExprError(); 6186 } 6187 6188 // Inspect the first argument of the atomic operation. 6189 Expr *Ptr = Args[0]; 6190 ExprResult ConvertedPtr = DefaultFunctionArrayLvalueConversion(Ptr); 6191 if (ConvertedPtr.isInvalid()) 6192 return ExprError(); 6193 6194 Ptr = ConvertedPtr.get(); 6195 const PointerType *pointerType = Ptr->getType()->getAs<PointerType>(); 6196 if (!pointerType) { 6197 Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer) 6198 << Ptr->getType() << Ptr->getSourceRange(); 6199 return ExprError(); 6200 } 6201 6202 // For a __c11 builtin, this should be a pointer to an _Atomic type. 6203 QualType AtomTy = pointerType->getPointeeType(); // 'A' 6204 QualType ValType = AtomTy; // 'C' 6205 if (IsC11) { 6206 if (!AtomTy->isAtomicType()) { 6207 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic) 6208 << Ptr->getType() << Ptr->getSourceRange(); 6209 return ExprError(); 6210 } 6211 if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) || 6212 AtomTy.getAddressSpace() == LangAS::opencl_constant) { 6213 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_atomic) 6214 << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType() 6215 << Ptr->getSourceRange(); 6216 return ExprError(); 6217 } 6218 ValType = AtomTy->castAs<AtomicType>()->getValueType(); 6219 } else if (Form != Load && Form != LoadCopy) { 6220 if (ValType.isConstQualified()) { 6221 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_pointer) 6222 << Ptr->getType() << Ptr->getSourceRange(); 6223 return ExprError(); 6224 } 6225 } 6226 6227 // For an arithmetic operation, the implied arithmetic must be well-formed. 6228 if (Form == Arithmetic) { 6229 // GCC does not enforce these rules for GNU atomics, but we do to help catch 6230 // trivial type errors. 6231 auto IsAllowedValueType = [&](QualType ValType) { 6232 if (ValType->isIntegerType()) 6233 return true; 6234 if (ValType->isPointerType()) 6235 return true; 6236 if (!ValType->isFloatingType()) 6237 return false; 6238 // LLVM Parser does not allow atomicrmw with x86_fp80 type. 6239 if (ValType->isSpecificBuiltinType(BuiltinType::LongDouble) && 6240 &Context.getTargetInfo().getLongDoubleFormat() == 6241 &llvm::APFloat::x87DoubleExtended()) 6242 return false; 6243 return true; 6244 }; 6245 if (IsAddSub && !IsAllowedValueType(ValType)) { 6246 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_ptr_or_fp) 6247 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 6248 return ExprError(); 6249 } 6250 if (!IsAddSub && !ValType->isIntegerType()) { 6251 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int) 6252 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 6253 return ExprError(); 6254 } 6255 if (IsC11 && ValType->isPointerType() && 6256 RequireCompleteType(Ptr->getBeginLoc(), ValType->getPointeeType(), 6257 diag::err_incomplete_type)) { 6258 return ExprError(); 6259 } 6260 } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) { 6261 // For __atomic_*_n operations, the value type must be a scalar integral or 6262 // pointer type which is 1, 2, 4, 8 or 16 bytes in length. 6263 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr) 6264 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 6265 return ExprError(); 6266 } 6267 6268 if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) && 6269 !AtomTy->isScalarType()) { 6270 // For GNU atomics, require a trivially-copyable type. This is not part of 6271 // the GNU atomics specification but we enforce it for consistency with 6272 // other atomics which generally all require a trivially-copyable type. This 6273 // is because atomics just copy bits. 6274 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_trivial_copy) 6275 << Ptr->getType() << Ptr->getSourceRange(); 6276 return ExprError(); 6277 } 6278 6279 switch (ValType.getObjCLifetime()) { 6280 case Qualifiers::OCL_None: 6281 case Qualifiers::OCL_ExplicitNone: 6282 // okay 6283 break; 6284 6285 case Qualifiers::OCL_Weak: 6286 case Qualifiers::OCL_Strong: 6287 case Qualifiers::OCL_Autoreleasing: 6288 // FIXME: Can this happen? By this point, ValType should be known 6289 // to be trivially copyable. 6290 Diag(ExprRange.getBegin(), diag::err_arc_atomic_ownership) 6291 << ValType << Ptr->getSourceRange(); 6292 return ExprError(); 6293 } 6294 6295 // All atomic operations have an overload which takes a pointer to a volatile 6296 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself 6297 // into the result or the other operands. Similarly atomic_load takes a 6298 // pointer to a const 'A'. 6299 ValType.removeLocalVolatile(); 6300 ValType.removeLocalConst(); 6301 QualType ResultType = ValType; 6302 if (Form == Copy || Form == LoadCopy || Form == GNUXchg || 6303 Form == Init) 6304 ResultType = Context.VoidTy; 6305 else if (Form == C11CmpXchg || Form == GNUCmpXchg) 6306 ResultType = Context.BoolTy; 6307 6308 // The type of a parameter passed 'by value'. In the GNU atomics, such 6309 // arguments are actually passed as pointers. 6310 QualType ByValType = ValType; // 'CP' 6311 bool IsPassedByAddress = false; 6312 if (!IsC11 && !IsHIP && !IsN) { 6313 ByValType = Ptr->getType(); 6314 IsPassedByAddress = true; 6315 } 6316 6317 SmallVector<Expr *, 5> APIOrderedArgs; 6318 if (ArgOrder == Sema::AtomicArgumentOrder::AST) { 6319 APIOrderedArgs.push_back(Args[0]); 6320 switch (Form) { 6321 case Init: 6322 case Load: 6323 APIOrderedArgs.push_back(Args[1]); // Val1/Order 6324 break; 6325 case LoadCopy: 6326 case Copy: 6327 case Arithmetic: 6328 case Xchg: 6329 APIOrderedArgs.push_back(Args[2]); // Val1 6330 APIOrderedArgs.push_back(Args[1]); // Order 6331 break; 6332 case GNUXchg: 6333 APIOrderedArgs.push_back(Args[2]); // Val1 6334 APIOrderedArgs.push_back(Args[3]); // Val2 6335 APIOrderedArgs.push_back(Args[1]); // Order 6336 break; 6337 case C11CmpXchg: 6338 APIOrderedArgs.push_back(Args[2]); // Val1 6339 APIOrderedArgs.push_back(Args[4]); // Val2 6340 APIOrderedArgs.push_back(Args[1]); // Order 6341 APIOrderedArgs.push_back(Args[3]); // OrderFail 6342 break; 6343 case GNUCmpXchg: 6344 APIOrderedArgs.push_back(Args[2]); // Val1 6345 APIOrderedArgs.push_back(Args[4]); // Val2 6346 APIOrderedArgs.push_back(Args[5]); // Weak 6347 APIOrderedArgs.push_back(Args[1]); // Order 6348 APIOrderedArgs.push_back(Args[3]); // OrderFail 6349 break; 6350 } 6351 } else 6352 APIOrderedArgs.append(Args.begin(), Args.end()); 6353 6354 // The first argument's non-CV pointer type is used to deduce the type of 6355 // subsequent arguments, except for: 6356 // - weak flag (always converted to bool) 6357 // - memory order (always converted to int) 6358 // - scope (always converted to int) 6359 for (unsigned i = 0; i != APIOrderedArgs.size(); ++i) { 6360 QualType Ty; 6361 if (i < NumVals[Form] + 1) { 6362 switch (i) { 6363 case 0: 6364 // The first argument is always a pointer. It has a fixed type. 6365 // It is always dereferenced, a nullptr is undefined. 6366 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 6367 // Nothing else to do: we already know all we want about this pointer. 6368 continue; 6369 case 1: 6370 // The second argument is the non-atomic operand. For arithmetic, this 6371 // is always passed by value, and for a compare_exchange it is always 6372 // passed by address. For the rest, GNU uses by-address and C11 uses 6373 // by-value. 6374 assert(Form != Load); 6375 if (Form == Arithmetic && ValType->isPointerType()) 6376 Ty = Context.getPointerDiffType(); 6377 else if (Form == Init || Form == Arithmetic) 6378 Ty = ValType; 6379 else if (Form == Copy || Form == Xchg) { 6380 if (IsPassedByAddress) { 6381 // The value pointer is always dereferenced, a nullptr is undefined. 6382 CheckNonNullArgument(*this, APIOrderedArgs[i], 6383 ExprRange.getBegin()); 6384 } 6385 Ty = ByValType; 6386 } else { 6387 Expr *ValArg = APIOrderedArgs[i]; 6388 // The value pointer is always dereferenced, a nullptr is undefined. 6389 CheckNonNullArgument(*this, ValArg, ExprRange.getBegin()); 6390 LangAS AS = LangAS::Default; 6391 // Keep address space of non-atomic pointer type. 6392 if (const PointerType *PtrTy = 6393 ValArg->getType()->getAs<PointerType>()) { 6394 AS = PtrTy->getPointeeType().getAddressSpace(); 6395 } 6396 Ty = Context.getPointerType( 6397 Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS)); 6398 } 6399 break; 6400 case 2: 6401 // The third argument to compare_exchange / GNU exchange is the desired 6402 // value, either by-value (for the C11 and *_n variant) or as a pointer. 6403 if (IsPassedByAddress) 6404 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 6405 Ty = ByValType; 6406 break; 6407 case 3: 6408 // The fourth argument to GNU compare_exchange is a 'weak' flag. 6409 Ty = Context.BoolTy; 6410 break; 6411 } 6412 } else { 6413 // The order(s) and scope are always converted to int. 6414 Ty = Context.IntTy; 6415 } 6416 6417 InitializedEntity Entity = 6418 InitializedEntity::InitializeParameter(Context, Ty, false); 6419 ExprResult Arg = APIOrderedArgs[i]; 6420 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 6421 if (Arg.isInvalid()) 6422 return true; 6423 APIOrderedArgs[i] = Arg.get(); 6424 } 6425 6426 // Permute the arguments into a 'consistent' order. 6427 SmallVector<Expr*, 5> SubExprs; 6428 SubExprs.push_back(Ptr); 6429 switch (Form) { 6430 case Init: 6431 // Note, AtomicExpr::getVal1() has a special case for this atomic. 6432 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6433 break; 6434 case Load: 6435 SubExprs.push_back(APIOrderedArgs[1]); // Order 6436 break; 6437 case LoadCopy: 6438 case Copy: 6439 case Arithmetic: 6440 case Xchg: 6441 SubExprs.push_back(APIOrderedArgs[2]); // Order 6442 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6443 break; 6444 case GNUXchg: 6445 // Note, AtomicExpr::getVal2() has a special case for this atomic. 6446 SubExprs.push_back(APIOrderedArgs[3]); // Order 6447 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6448 SubExprs.push_back(APIOrderedArgs[2]); // Val2 6449 break; 6450 case C11CmpXchg: 6451 SubExprs.push_back(APIOrderedArgs[3]); // Order 6452 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6453 SubExprs.push_back(APIOrderedArgs[4]); // OrderFail 6454 SubExprs.push_back(APIOrderedArgs[2]); // Val2 6455 break; 6456 case GNUCmpXchg: 6457 SubExprs.push_back(APIOrderedArgs[4]); // Order 6458 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6459 SubExprs.push_back(APIOrderedArgs[5]); // OrderFail 6460 SubExprs.push_back(APIOrderedArgs[2]); // Val2 6461 SubExprs.push_back(APIOrderedArgs[3]); // Weak 6462 break; 6463 } 6464 6465 if (SubExprs.size() >= 2 && Form != Init) { 6466 if (Optional<llvm::APSInt> Result = 6467 SubExprs[1]->getIntegerConstantExpr(Context)) 6468 if (!isValidOrderingForOp(Result->getSExtValue(), Op)) 6469 Diag(SubExprs[1]->getBeginLoc(), 6470 diag::warn_atomic_op_has_invalid_memory_order) 6471 << SubExprs[1]->getSourceRange(); 6472 } 6473 6474 if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) { 6475 auto *Scope = Args[Args.size() - 1]; 6476 if (Optional<llvm::APSInt> Result = 6477 Scope->getIntegerConstantExpr(Context)) { 6478 if (!ScopeModel->isValid(Result->getZExtValue())) 6479 Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope) 6480 << Scope->getSourceRange(); 6481 } 6482 SubExprs.push_back(Scope); 6483 } 6484 6485 AtomicExpr *AE = new (Context) 6486 AtomicExpr(ExprRange.getBegin(), SubExprs, ResultType, Op, RParenLoc); 6487 6488 if ((Op == AtomicExpr::AO__c11_atomic_load || 6489 Op == AtomicExpr::AO__c11_atomic_store || 6490 Op == AtomicExpr::AO__opencl_atomic_load || 6491 Op == AtomicExpr::AO__hip_atomic_load || 6492 Op == AtomicExpr::AO__opencl_atomic_store || 6493 Op == AtomicExpr::AO__hip_atomic_store) && 6494 Context.AtomicUsesUnsupportedLibcall(AE)) 6495 Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib) 6496 << ((Op == AtomicExpr::AO__c11_atomic_load || 6497 Op == AtomicExpr::AO__opencl_atomic_load || 6498 Op == AtomicExpr::AO__hip_atomic_load) 6499 ? 0 6500 : 1); 6501 6502 if (ValType->isBitIntType()) { 6503 Diag(Ptr->getExprLoc(), diag::err_atomic_builtin_bit_int_prohibit); 6504 return ExprError(); 6505 } 6506 6507 return AE; 6508 } 6509 6510 /// checkBuiltinArgument - Given a call to a builtin function, perform 6511 /// normal type-checking on the given argument, updating the call in 6512 /// place. This is useful when a builtin function requires custom 6513 /// type-checking for some of its arguments but not necessarily all of 6514 /// them. 6515 /// 6516 /// Returns true on error. 6517 static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) { 6518 FunctionDecl *Fn = E->getDirectCallee(); 6519 assert(Fn && "builtin call without direct callee!"); 6520 6521 ParmVarDecl *Param = Fn->getParamDecl(ArgIndex); 6522 InitializedEntity Entity = 6523 InitializedEntity::InitializeParameter(S.Context, Param); 6524 6525 ExprResult Arg = E->getArg(ArgIndex); 6526 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 6527 if (Arg.isInvalid()) 6528 return true; 6529 6530 E->setArg(ArgIndex, Arg.get()); 6531 return false; 6532 } 6533 6534 /// We have a call to a function like __sync_fetch_and_add, which is an 6535 /// overloaded function based on the pointer type of its first argument. 6536 /// The main BuildCallExpr routines have already promoted the types of 6537 /// arguments because all of these calls are prototyped as void(...). 6538 /// 6539 /// This function goes through and does final semantic checking for these 6540 /// builtins, as well as generating any warnings. 6541 ExprResult 6542 Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) { 6543 CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get()); 6544 Expr *Callee = TheCall->getCallee(); 6545 DeclRefExpr *DRE = cast<DeclRefExpr>(Callee->IgnoreParenCasts()); 6546 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 6547 6548 // Ensure that we have at least one argument to do type inference from. 6549 if (TheCall->getNumArgs() < 1) { 6550 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 6551 << 0 << 1 << TheCall->getNumArgs() << Callee->getSourceRange(); 6552 return ExprError(); 6553 } 6554 6555 // Inspect the first argument of the atomic builtin. This should always be 6556 // a pointer type, whose element is an integral scalar or pointer type. 6557 // Because it is a pointer type, we don't have to worry about any implicit 6558 // casts here. 6559 // FIXME: We don't allow floating point scalars as input. 6560 Expr *FirstArg = TheCall->getArg(0); 6561 ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg); 6562 if (FirstArgResult.isInvalid()) 6563 return ExprError(); 6564 FirstArg = FirstArgResult.get(); 6565 TheCall->setArg(0, FirstArg); 6566 6567 const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>(); 6568 if (!pointerType) { 6569 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 6570 << FirstArg->getType() << FirstArg->getSourceRange(); 6571 return ExprError(); 6572 } 6573 6574 QualType ValType = pointerType->getPointeeType(); 6575 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 6576 !ValType->isBlockPointerType()) { 6577 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr) 6578 << FirstArg->getType() << FirstArg->getSourceRange(); 6579 return ExprError(); 6580 } 6581 6582 if (ValType.isConstQualified()) { 6583 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const) 6584 << FirstArg->getType() << FirstArg->getSourceRange(); 6585 return ExprError(); 6586 } 6587 6588 switch (ValType.getObjCLifetime()) { 6589 case Qualifiers::OCL_None: 6590 case Qualifiers::OCL_ExplicitNone: 6591 // okay 6592 break; 6593 6594 case Qualifiers::OCL_Weak: 6595 case Qualifiers::OCL_Strong: 6596 case Qualifiers::OCL_Autoreleasing: 6597 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 6598 << ValType << FirstArg->getSourceRange(); 6599 return ExprError(); 6600 } 6601 6602 // Strip any qualifiers off ValType. 6603 ValType = ValType.getUnqualifiedType(); 6604 6605 // The majority of builtins return a value, but a few have special return 6606 // types, so allow them to override appropriately below. 6607 QualType ResultType = ValType; 6608 6609 // We need to figure out which concrete builtin this maps onto. For example, 6610 // __sync_fetch_and_add with a 2 byte object turns into 6611 // __sync_fetch_and_add_2. 6612 #define BUILTIN_ROW(x) \ 6613 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \ 6614 Builtin::BI##x##_8, Builtin::BI##x##_16 } 6615 6616 static const unsigned BuiltinIndices[][5] = { 6617 BUILTIN_ROW(__sync_fetch_and_add), 6618 BUILTIN_ROW(__sync_fetch_and_sub), 6619 BUILTIN_ROW(__sync_fetch_and_or), 6620 BUILTIN_ROW(__sync_fetch_and_and), 6621 BUILTIN_ROW(__sync_fetch_and_xor), 6622 BUILTIN_ROW(__sync_fetch_and_nand), 6623 6624 BUILTIN_ROW(__sync_add_and_fetch), 6625 BUILTIN_ROW(__sync_sub_and_fetch), 6626 BUILTIN_ROW(__sync_and_and_fetch), 6627 BUILTIN_ROW(__sync_or_and_fetch), 6628 BUILTIN_ROW(__sync_xor_and_fetch), 6629 BUILTIN_ROW(__sync_nand_and_fetch), 6630 6631 BUILTIN_ROW(__sync_val_compare_and_swap), 6632 BUILTIN_ROW(__sync_bool_compare_and_swap), 6633 BUILTIN_ROW(__sync_lock_test_and_set), 6634 BUILTIN_ROW(__sync_lock_release), 6635 BUILTIN_ROW(__sync_swap) 6636 }; 6637 #undef BUILTIN_ROW 6638 6639 // Determine the index of the size. 6640 unsigned SizeIndex; 6641 switch (Context.getTypeSizeInChars(ValType).getQuantity()) { 6642 case 1: SizeIndex = 0; break; 6643 case 2: SizeIndex = 1; break; 6644 case 4: SizeIndex = 2; break; 6645 case 8: SizeIndex = 3; break; 6646 case 16: SizeIndex = 4; break; 6647 default: 6648 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size) 6649 << FirstArg->getType() << FirstArg->getSourceRange(); 6650 return ExprError(); 6651 } 6652 6653 // Each of these builtins has one pointer argument, followed by some number of 6654 // values (0, 1 or 2) followed by a potentially empty varags list of stuff 6655 // that we ignore. Find out which row of BuiltinIndices to read from as well 6656 // as the number of fixed args. 6657 unsigned BuiltinID = FDecl->getBuiltinID(); 6658 unsigned BuiltinIndex, NumFixed = 1; 6659 bool WarnAboutSemanticsChange = false; 6660 switch (BuiltinID) { 6661 default: llvm_unreachable("Unknown overloaded atomic builtin!"); 6662 case Builtin::BI__sync_fetch_and_add: 6663 case Builtin::BI__sync_fetch_and_add_1: 6664 case Builtin::BI__sync_fetch_and_add_2: 6665 case Builtin::BI__sync_fetch_and_add_4: 6666 case Builtin::BI__sync_fetch_and_add_8: 6667 case Builtin::BI__sync_fetch_and_add_16: 6668 BuiltinIndex = 0; 6669 break; 6670 6671 case Builtin::BI__sync_fetch_and_sub: 6672 case Builtin::BI__sync_fetch_and_sub_1: 6673 case Builtin::BI__sync_fetch_and_sub_2: 6674 case Builtin::BI__sync_fetch_and_sub_4: 6675 case Builtin::BI__sync_fetch_and_sub_8: 6676 case Builtin::BI__sync_fetch_and_sub_16: 6677 BuiltinIndex = 1; 6678 break; 6679 6680 case Builtin::BI__sync_fetch_and_or: 6681 case Builtin::BI__sync_fetch_and_or_1: 6682 case Builtin::BI__sync_fetch_and_or_2: 6683 case Builtin::BI__sync_fetch_and_or_4: 6684 case Builtin::BI__sync_fetch_and_or_8: 6685 case Builtin::BI__sync_fetch_and_or_16: 6686 BuiltinIndex = 2; 6687 break; 6688 6689 case Builtin::BI__sync_fetch_and_and: 6690 case Builtin::BI__sync_fetch_and_and_1: 6691 case Builtin::BI__sync_fetch_and_and_2: 6692 case Builtin::BI__sync_fetch_and_and_4: 6693 case Builtin::BI__sync_fetch_and_and_8: 6694 case Builtin::BI__sync_fetch_and_and_16: 6695 BuiltinIndex = 3; 6696 break; 6697 6698 case Builtin::BI__sync_fetch_and_xor: 6699 case Builtin::BI__sync_fetch_and_xor_1: 6700 case Builtin::BI__sync_fetch_and_xor_2: 6701 case Builtin::BI__sync_fetch_and_xor_4: 6702 case Builtin::BI__sync_fetch_and_xor_8: 6703 case Builtin::BI__sync_fetch_and_xor_16: 6704 BuiltinIndex = 4; 6705 break; 6706 6707 case Builtin::BI__sync_fetch_and_nand: 6708 case Builtin::BI__sync_fetch_and_nand_1: 6709 case Builtin::BI__sync_fetch_and_nand_2: 6710 case Builtin::BI__sync_fetch_and_nand_4: 6711 case Builtin::BI__sync_fetch_and_nand_8: 6712 case Builtin::BI__sync_fetch_and_nand_16: 6713 BuiltinIndex = 5; 6714 WarnAboutSemanticsChange = true; 6715 break; 6716 6717 case Builtin::BI__sync_add_and_fetch: 6718 case Builtin::BI__sync_add_and_fetch_1: 6719 case Builtin::BI__sync_add_and_fetch_2: 6720 case Builtin::BI__sync_add_and_fetch_4: 6721 case Builtin::BI__sync_add_and_fetch_8: 6722 case Builtin::BI__sync_add_and_fetch_16: 6723 BuiltinIndex = 6; 6724 break; 6725 6726 case Builtin::BI__sync_sub_and_fetch: 6727 case Builtin::BI__sync_sub_and_fetch_1: 6728 case Builtin::BI__sync_sub_and_fetch_2: 6729 case Builtin::BI__sync_sub_and_fetch_4: 6730 case Builtin::BI__sync_sub_and_fetch_8: 6731 case Builtin::BI__sync_sub_and_fetch_16: 6732 BuiltinIndex = 7; 6733 break; 6734 6735 case Builtin::BI__sync_and_and_fetch: 6736 case Builtin::BI__sync_and_and_fetch_1: 6737 case Builtin::BI__sync_and_and_fetch_2: 6738 case Builtin::BI__sync_and_and_fetch_4: 6739 case Builtin::BI__sync_and_and_fetch_8: 6740 case Builtin::BI__sync_and_and_fetch_16: 6741 BuiltinIndex = 8; 6742 break; 6743 6744 case Builtin::BI__sync_or_and_fetch: 6745 case Builtin::BI__sync_or_and_fetch_1: 6746 case Builtin::BI__sync_or_and_fetch_2: 6747 case Builtin::BI__sync_or_and_fetch_4: 6748 case Builtin::BI__sync_or_and_fetch_8: 6749 case Builtin::BI__sync_or_and_fetch_16: 6750 BuiltinIndex = 9; 6751 break; 6752 6753 case Builtin::BI__sync_xor_and_fetch: 6754 case Builtin::BI__sync_xor_and_fetch_1: 6755 case Builtin::BI__sync_xor_and_fetch_2: 6756 case Builtin::BI__sync_xor_and_fetch_4: 6757 case Builtin::BI__sync_xor_and_fetch_8: 6758 case Builtin::BI__sync_xor_and_fetch_16: 6759 BuiltinIndex = 10; 6760 break; 6761 6762 case Builtin::BI__sync_nand_and_fetch: 6763 case Builtin::BI__sync_nand_and_fetch_1: 6764 case Builtin::BI__sync_nand_and_fetch_2: 6765 case Builtin::BI__sync_nand_and_fetch_4: 6766 case Builtin::BI__sync_nand_and_fetch_8: 6767 case Builtin::BI__sync_nand_and_fetch_16: 6768 BuiltinIndex = 11; 6769 WarnAboutSemanticsChange = true; 6770 break; 6771 6772 case Builtin::BI__sync_val_compare_and_swap: 6773 case Builtin::BI__sync_val_compare_and_swap_1: 6774 case Builtin::BI__sync_val_compare_and_swap_2: 6775 case Builtin::BI__sync_val_compare_and_swap_4: 6776 case Builtin::BI__sync_val_compare_and_swap_8: 6777 case Builtin::BI__sync_val_compare_and_swap_16: 6778 BuiltinIndex = 12; 6779 NumFixed = 2; 6780 break; 6781 6782 case Builtin::BI__sync_bool_compare_and_swap: 6783 case Builtin::BI__sync_bool_compare_and_swap_1: 6784 case Builtin::BI__sync_bool_compare_and_swap_2: 6785 case Builtin::BI__sync_bool_compare_and_swap_4: 6786 case Builtin::BI__sync_bool_compare_and_swap_8: 6787 case Builtin::BI__sync_bool_compare_and_swap_16: 6788 BuiltinIndex = 13; 6789 NumFixed = 2; 6790 ResultType = Context.BoolTy; 6791 break; 6792 6793 case Builtin::BI__sync_lock_test_and_set: 6794 case Builtin::BI__sync_lock_test_and_set_1: 6795 case Builtin::BI__sync_lock_test_and_set_2: 6796 case Builtin::BI__sync_lock_test_and_set_4: 6797 case Builtin::BI__sync_lock_test_and_set_8: 6798 case Builtin::BI__sync_lock_test_and_set_16: 6799 BuiltinIndex = 14; 6800 break; 6801 6802 case Builtin::BI__sync_lock_release: 6803 case Builtin::BI__sync_lock_release_1: 6804 case Builtin::BI__sync_lock_release_2: 6805 case Builtin::BI__sync_lock_release_4: 6806 case Builtin::BI__sync_lock_release_8: 6807 case Builtin::BI__sync_lock_release_16: 6808 BuiltinIndex = 15; 6809 NumFixed = 0; 6810 ResultType = Context.VoidTy; 6811 break; 6812 6813 case Builtin::BI__sync_swap: 6814 case Builtin::BI__sync_swap_1: 6815 case Builtin::BI__sync_swap_2: 6816 case Builtin::BI__sync_swap_4: 6817 case Builtin::BI__sync_swap_8: 6818 case Builtin::BI__sync_swap_16: 6819 BuiltinIndex = 16; 6820 break; 6821 } 6822 6823 // Now that we know how many fixed arguments we expect, first check that we 6824 // have at least that many. 6825 if (TheCall->getNumArgs() < 1+NumFixed) { 6826 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 6827 << 0 << 1 + NumFixed << TheCall->getNumArgs() 6828 << Callee->getSourceRange(); 6829 return ExprError(); 6830 } 6831 6832 Diag(TheCall->getEndLoc(), diag::warn_atomic_implicit_seq_cst) 6833 << Callee->getSourceRange(); 6834 6835 if (WarnAboutSemanticsChange) { 6836 Diag(TheCall->getEndLoc(), diag::warn_sync_fetch_and_nand_semantics_change) 6837 << Callee->getSourceRange(); 6838 } 6839 6840 // Get the decl for the concrete builtin from this, we can tell what the 6841 // concrete integer type we should convert to is. 6842 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex]; 6843 const char *NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID); 6844 FunctionDecl *NewBuiltinDecl; 6845 if (NewBuiltinID == BuiltinID) 6846 NewBuiltinDecl = FDecl; 6847 else { 6848 // Perform builtin lookup to avoid redeclaring it. 6849 DeclarationName DN(&Context.Idents.get(NewBuiltinName)); 6850 LookupResult Res(*this, DN, DRE->getBeginLoc(), LookupOrdinaryName); 6851 LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true); 6852 assert(Res.getFoundDecl()); 6853 NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl()); 6854 if (!NewBuiltinDecl) 6855 return ExprError(); 6856 } 6857 6858 // The first argument --- the pointer --- has a fixed type; we 6859 // deduce the types of the rest of the arguments accordingly. Walk 6860 // the remaining arguments, converting them to the deduced value type. 6861 for (unsigned i = 0; i != NumFixed; ++i) { 6862 ExprResult Arg = TheCall->getArg(i+1); 6863 6864 // GCC does an implicit conversion to the pointer or integer ValType. This 6865 // can fail in some cases (1i -> int**), check for this error case now. 6866 // Initialize the argument. 6867 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 6868 ValType, /*consume*/ false); 6869 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 6870 if (Arg.isInvalid()) 6871 return ExprError(); 6872 6873 // Okay, we have something that *can* be converted to the right type. Check 6874 // to see if there is a potentially weird extension going on here. This can 6875 // happen when you do an atomic operation on something like an char* and 6876 // pass in 42. The 42 gets converted to char. This is even more strange 6877 // for things like 45.123 -> char, etc. 6878 // FIXME: Do this check. 6879 TheCall->setArg(i+1, Arg.get()); 6880 } 6881 6882 // Create a new DeclRefExpr to refer to the new decl. 6883 DeclRefExpr *NewDRE = DeclRefExpr::Create( 6884 Context, DRE->getQualifierLoc(), SourceLocation(), NewBuiltinDecl, 6885 /*enclosing*/ false, DRE->getLocation(), Context.BuiltinFnTy, 6886 DRE->getValueKind(), nullptr, nullptr, DRE->isNonOdrUse()); 6887 6888 // Set the callee in the CallExpr. 6889 // FIXME: This loses syntactic information. 6890 QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType()); 6891 ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy, 6892 CK_BuiltinFnToFnPtr); 6893 TheCall->setCallee(PromotedCall.get()); 6894 6895 // Change the result type of the call to match the original value type. This 6896 // is arbitrary, but the codegen for these builtins ins design to handle it 6897 // gracefully. 6898 TheCall->setType(ResultType); 6899 6900 // Prohibit problematic uses of bit-precise integer types with atomic 6901 // builtins. The arguments would have already been converted to the first 6902 // argument's type, so only need to check the first argument. 6903 const auto *BitIntValType = ValType->getAs<BitIntType>(); 6904 if (BitIntValType && !llvm::isPowerOf2_64(BitIntValType->getNumBits())) { 6905 Diag(FirstArg->getExprLoc(), diag::err_atomic_builtin_ext_int_size); 6906 return ExprError(); 6907 } 6908 6909 return TheCallResult; 6910 } 6911 6912 /// SemaBuiltinNontemporalOverloaded - We have a call to 6913 /// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an 6914 /// overloaded function based on the pointer type of its last argument. 6915 /// 6916 /// This function goes through and does final semantic checking for these 6917 /// builtins. 6918 ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) { 6919 CallExpr *TheCall = (CallExpr *)TheCallResult.get(); 6920 DeclRefExpr *DRE = 6921 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 6922 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 6923 unsigned BuiltinID = FDecl->getBuiltinID(); 6924 assert((BuiltinID == Builtin::BI__builtin_nontemporal_store || 6925 BuiltinID == Builtin::BI__builtin_nontemporal_load) && 6926 "Unexpected nontemporal load/store builtin!"); 6927 bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store; 6928 unsigned numArgs = isStore ? 2 : 1; 6929 6930 // Ensure that we have the proper number of arguments. 6931 if (checkArgCount(*this, TheCall, numArgs)) 6932 return ExprError(); 6933 6934 // Inspect the last argument of the nontemporal builtin. This should always 6935 // be a pointer type, from which we imply the type of the memory access. 6936 // Because it is a pointer type, we don't have to worry about any implicit 6937 // casts here. 6938 Expr *PointerArg = TheCall->getArg(numArgs - 1); 6939 ExprResult PointerArgResult = 6940 DefaultFunctionArrayLvalueConversion(PointerArg); 6941 6942 if (PointerArgResult.isInvalid()) 6943 return ExprError(); 6944 PointerArg = PointerArgResult.get(); 6945 TheCall->setArg(numArgs - 1, PointerArg); 6946 6947 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 6948 if (!pointerType) { 6949 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer) 6950 << PointerArg->getType() << PointerArg->getSourceRange(); 6951 return ExprError(); 6952 } 6953 6954 QualType ValType = pointerType->getPointeeType(); 6955 6956 // Strip any qualifiers off ValType. 6957 ValType = ValType.getUnqualifiedType(); 6958 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 6959 !ValType->isBlockPointerType() && !ValType->isFloatingType() && 6960 !ValType->isVectorType()) { 6961 Diag(DRE->getBeginLoc(), 6962 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector) 6963 << PointerArg->getType() << PointerArg->getSourceRange(); 6964 return ExprError(); 6965 } 6966 6967 if (!isStore) { 6968 TheCall->setType(ValType); 6969 return TheCallResult; 6970 } 6971 6972 ExprResult ValArg = TheCall->getArg(0); 6973 InitializedEntity Entity = InitializedEntity::InitializeParameter( 6974 Context, ValType, /*consume*/ false); 6975 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 6976 if (ValArg.isInvalid()) 6977 return ExprError(); 6978 6979 TheCall->setArg(0, ValArg.get()); 6980 TheCall->setType(Context.VoidTy); 6981 return TheCallResult; 6982 } 6983 6984 /// CheckObjCString - Checks that the argument to the builtin 6985 /// CFString constructor is correct 6986 /// Note: It might also make sense to do the UTF-16 conversion here (would 6987 /// simplify the backend). 6988 bool Sema::CheckObjCString(Expr *Arg) { 6989 Arg = Arg->IgnoreParenCasts(); 6990 StringLiteral *Literal = dyn_cast<StringLiteral>(Arg); 6991 6992 if (!Literal || !Literal->isOrdinary()) { 6993 Diag(Arg->getBeginLoc(), diag::err_cfstring_literal_not_string_constant) 6994 << Arg->getSourceRange(); 6995 return true; 6996 } 6997 6998 if (Literal->containsNonAsciiOrNull()) { 6999 StringRef String = Literal->getString(); 7000 unsigned NumBytes = String.size(); 7001 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes); 7002 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data(); 7003 llvm::UTF16 *ToPtr = &ToBuf[0]; 7004 7005 llvm::ConversionResult Result = 7006 llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr, 7007 ToPtr + NumBytes, llvm::strictConversion); 7008 // Check for conversion failure. 7009 if (Result != llvm::conversionOK) 7010 Diag(Arg->getBeginLoc(), diag::warn_cfstring_truncated) 7011 << Arg->getSourceRange(); 7012 } 7013 return false; 7014 } 7015 7016 /// CheckObjCString - Checks that the format string argument to the os_log() 7017 /// and os_trace() functions is correct, and converts it to const char *. 7018 ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) { 7019 Arg = Arg->IgnoreParenCasts(); 7020 auto *Literal = dyn_cast<StringLiteral>(Arg); 7021 if (!Literal) { 7022 if (auto *ObjcLiteral = dyn_cast<ObjCStringLiteral>(Arg)) { 7023 Literal = ObjcLiteral->getString(); 7024 } 7025 } 7026 7027 if (!Literal || (!Literal->isOrdinary() && !Literal->isUTF8())) { 7028 return ExprError( 7029 Diag(Arg->getBeginLoc(), diag::err_os_log_format_not_string_constant) 7030 << Arg->getSourceRange()); 7031 } 7032 7033 ExprResult Result(Literal); 7034 QualType ResultTy = Context.getPointerType(Context.CharTy.withConst()); 7035 InitializedEntity Entity = 7036 InitializedEntity::InitializeParameter(Context, ResultTy, false); 7037 Result = PerformCopyInitialization(Entity, SourceLocation(), Result); 7038 return Result; 7039 } 7040 7041 /// Check that the user is calling the appropriate va_start builtin for the 7042 /// target and calling convention. 7043 static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) { 7044 const llvm::Triple &TT = S.Context.getTargetInfo().getTriple(); 7045 bool IsX64 = TT.getArch() == llvm::Triple::x86_64; 7046 bool IsAArch64 = (TT.getArch() == llvm::Triple::aarch64 || 7047 TT.getArch() == llvm::Triple::aarch64_32); 7048 bool IsWindows = TT.isOSWindows(); 7049 bool IsMSVAStart = BuiltinID == Builtin::BI__builtin_ms_va_start; 7050 if (IsX64 || IsAArch64) { 7051 CallingConv CC = CC_C; 7052 if (const FunctionDecl *FD = S.getCurFunctionDecl()) 7053 CC = FD->getType()->castAs<FunctionType>()->getCallConv(); 7054 if (IsMSVAStart) { 7055 // Don't allow this in System V ABI functions. 7056 if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_Win64)) 7057 return S.Diag(Fn->getBeginLoc(), 7058 diag::err_ms_va_start_used_in_sysv_function); 7059 } else { 7060 // On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions. 7061 // On x64 Windows, don't allow this in System V ABI functions. 7062 // (Yes, that means there's no corresponding way to support variadic 7063 // System V ABI functions on Windows.) 7064 if ((IsWindows && CC == CC_X86_64SysV) || 7065 (!IsWindows && CC == CC_Win64)) 7066 return S.Diag(Fn->getBeginLoc(), 7067 diag::err_va_start_used_in_wrong_abi_function) 7068 << !IsWindows; 7069 } 7070 return false; 7071 } 7072 7073 if (IsMSVAStart) 7074 return S.Diag(Fn->getBeginLoc(), diag::err_builtin_x64_aarch64_only); 7075 return false; 7076 } 7077 7078 static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn, 7079 ParmVarDecl **LastParam = nullptr) { 7080 // Determine whether the current function, block, or obj-c method is variadic 7081 // and get its parameter list. 7082 bool IsVariadic = false; 7083 ArrayRef<ParmVarDecl *> Params; 7084 DeclContext *Caller = S.CurContext; 7085 if (auto *Block = dyn_cast<BlockDecl>(Caller)) { 7086 IsVariadic = Block->isVariadic(); 7087 Params = Block->parameters(); 7088 } else if (auto *FD = dyn_cast<FunctionDecl>(Caller)) { 7089 IsVariadic = FD->isVariadic(); 7090 Params = FD->parameters(); 7091 } else if (auto *MD = dyn_cast<ObjCMethodDecl>(Caller)) { 7092 IsVariadic = MD->isVariadic(); 7093 // FIXME: This isn't correct for methods (results in bogus warning). 7094 Params = MD->parameters(); 7095 } else if (isa<CapturedDecl>(Caller)) { 7096 // We don't support va_start in a CapturedDecl. 7097 S.Diag(Fn->getBeginLoc(), diag::err_va_start_captured_stmt); 7098 return true; 7099 } else { 7100 // This must be some other declcontext that parses exprs. 7101 S.Diag(Fn->getBeginLoc(), diag::err_va_start_outside_function); 7102 return true; 7103 } 7104 7105 if (!IsVariadic) { 7106 S.Diag(Fn->getBeginLoc(), diag::err_va_start_fixed_function); 7107 return true; 7108 } 7109 7110 if (LastParam) 7111 *LastParam = Params.empty() ? nullptr : Params.back(); 7112 7113 return false; 7114 } 7115 7116 /// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start' 7117 /// for validity. Emit an error and return true on failure; return false 7118 /// on success. 7119 bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) { 7120 Expr *Fn = TheCall->getCallee(); 7121 7122 if (checkVAStartABI(*this, BuiltinID, Fn)) 7123 return true; 7124 7125 if (checkArgCount(*this, TheCall, 2)) 7126 return true; 7127 7128 // Type-check the first argument normally. 7129 if (checkBuiltinArgument(*this, TheCall, 0)) 7130 return true; 7131 7132 // Check that the current function is variadic, and get its last parameter. 7133 ParmVarDecl *LastParam; 7134 if (checkVAStartIsInVariadicFunction(*this, Fn, &LastParam)) 7135 return true; 7136 7137 // Verify that the second argument to the builtin is the last argument of the 7138 // current function or method. 7139 bool SecondArgIsLastNamedArgument = false; 7140 const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts(); 7141 7142 // These are valid if SecondArgIsLastNamedArgument is false after the next 7143 // block. 7144 QualType Type; 7145 SourceLocation ParamLoc; 7146 bool IsCRegister = false; 7147 7148 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) { 7149 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) { 7150 SecondArgIsLastNamedArgument = PV == LastParam; 7151 7152 Type = PV->getType(); 7153 ParamLoc = PV->getLocation(); 7154 IsCRegister = 7155 PV->getStorageClass() == SC_Register && !getLangOpts().CPlusPlus; 7156 } 7157 } 7158 7159 if (!SecondArgIsLastNamedArgument) 7160 Diag(TheCall->getArg(1)->getBeginLoc(), 7161 diag::warn_second_arg_of_va_start_not_last_named_param); 7162 else if (IsCRegister || Type->isReferenceType() || 7163 Type->isSpecificBuiltinType(BuiltinType::Float) || [=] { 7164 // Promotable integers are UB, but enumerations need a bit of 7165 // extra checking to see what their promotable type actually is. 7166 if (!Type->isPromotableIntegerType()) 7167 return false; 7168 if (!Type->isEnumeralType()) 7169 return true; 7170 const EnumDecl *ED = Type->castAs<EnumType>()->getDecl(); 7171 return !(ED && 7172 Context.typesAreCompatible(ED->getPromotionType(), Type)); 7173 }()) { 7174 unsigned Reason = 0; 7175 if (Type->isReferenceType()) Reason = 1; 7176 else if (IsCRegister) Reason = 2; 7177 Diag(Arg->getBeginLoc(), diag::warn_va_start_type_is_undefined) << Reason; 7178 Diag(ParamLoc, diag::note_parameter_type) << Type; 7179 } 7180 7181 TheCall->setType(Context.VoidTy); 7182 return false; 7183 } 7184 7185 bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) { 7186 auto IsSuitablyTypedFormatArgument = [this](const Expr *Arg) -> bool { 7187 const LangOptions &LO = getLangOpts(); 7188 7189 if (LO.CPlusPlus) 7190 return Arg->getType() 7191 .getCanonicalType() 7192 .getTypePtr() 7193 ->getPointeeType() 7194 .withoutLocalFastQualifiers() == Context.CharTy; 7195 7196 // In C, allow aliasing through `char *`, this is required for AArch64 at 7197 // least. 7198 return true; 7199 }; 7200 7201 // void __va_start(va_list *ap, const char *named_addr, size_t slot_size, 7202 // const char *named_addr); 7203 7204 Expr *Func = Call->getCallee(); 7205 7206 if (Call->getNumArgs() < 3) 7207 return Diag(Call->getEndLoc(), 7208 diag::err_typecheck_call_too_few_args_at_least) 7209 << 0 /*function call*/ << 3 << Call->getNumArgs(); 7210 7211 // Type-check the first argument normally. 7212 if (checkBuiltinArgument(*this, Call, 0)) 7213 return true; 7214 7215 // Check that the current function is variadic. 7216 if (checkVAStartIsInVariadicFunction(*this, Func)) 7217 return true; 7218 7219 // __va_start on Windows does not validate the parameter qualifiers 7220 7221 const Expr *Arg1 = Call->getArg(1)->IgnoreParens(); 7222 const Type *Arg1Ty = Arg1->getType().getCanonicalType().getTypePtr(); 7223 7224 const Expr *Arg2 = Call->getArg(2)->IgnoreParens(); 7225 const Type *Arg2Ty = Arg2->getType().getCanonicalType().getTypePtr(); 7226 7227 const QualType &ConstCharPtrTy = 7228 Context.getPointerType(Context.CharTy.withConst()); 7229 if (!Arg1Ty->isPointerType() || !IsSuitablyTypedFormatArgument(Arg1)) 7230 Diag(Arg1->getBeginLoc(), diag::err_typecheck_convert_incompatible) 7231 << Arg1->getType() << ConstCharPtrTy << 1 /* different class */ 7232 << 0 /* qualifier difference */ 7233 << 3 /* parameter mismatch */ 7234 << 2 << Arg1->getType() << ConstCharPtrTy; 7235 7236 const QualType SizeTy = Context.getSizeType(); 7237 if (Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy) 7238 Diag(Arg2->getBeginLoc(), diag::err_typecheck_convert_incompatible) 7239 << Arg2->getType() << SizeTy << 1 /* different class */ 7240 << 0 /* qualifier difference */ 7241 << 3 /* parameter mismatch */ 7242 << 3 << Arg2->getType() << SizeTy; 7243 7244 return false; 7245 } 7246 7247 /// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and 7248 /// friends. This is declared to take (...), so we have to check everything. 7249 bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) { 7250 if (checkArgCount(*this, TheCall, 2)) 7251 return true; 7252 7253 ExprResult OrigArg0 = TheCall->getArg(0); 7254 ExprResult OrigArg1 = TheCall->getArg(1); 7255 7256 // Do standard promotions between the two arguments, returning their common 7257 // type. 7258 QualType Res = UsualArithmeticConversions( 7259 OrigArg0, OrigArg1, TheCall->getExprLoc(), ACK_Comparison); 7260 if (OrigArg0.isInvalid() || OrigArg1.isInvalid()) 7261 return true; 7262 7263 // Make sure any conversions are pushed back into the call; this is 7264 // type safe since unordered compare builtins are declared as "_Bool 7265 // foo(...)". 7266 TheCall->setArg(0, OrigArg0.get()); 7267 TheCall->setArg(1, OrigArg1.get()); 7268 7269 if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent()) 7270 return false; 7271 7272 // If the common type isn't a real floating type, then the arguments were 7273 // invalid for this operation. 7274 if (Res.isNull() || !Res->isRealFloatingType()) 7275 return Diag(OrigArg0.get()->getBeginLoc(), 7276 diag::err_typecheck_call_invalid_ordered_compare) 7277 << OrigArg0.get()->getType() << OrigArg1.get()->getType() 7278 << SourceRange(OrigArg0.get()->getBeginLoc(), 7279 OrigArg1.get()->getEndLoc()); 7280 7281 return false; 7282 } 7283 7284 /// SemaBuiltinSemaBuiltinFPClassification - Handle functions like 7285 /// __builtin_isnan and friends. This is declared to take (...), so we have 7286 /// to check everything. We expect the last argument to be a floating point 7287 /// value. 7288 bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) { 7289 if (checkArgCount(*this, TheCall, NumArgs)) 7290 return true; 7291 7292 // __builtin_fpclassify is the only case where NumArgs != 1, so we can count 7293 // on all preceding parameters just being int. Try all of those. 7294 for (unsigned i = 0; i < NumArgs - 1; ++i) { 7295 Expr *Arg = TheCall->getArg(i); 7296 7297 if (Arg->isTypeDependent()) 7298 return false; 7299 7300 ExprResult Res = PerformImplicitConversion(Arg, Context.IntTy, AA_Passing); 7301 7302 if (Res.isInvalid()) 7303 return true; 7304 TheCall->setArg(i, Res.get()); 7305 } 7306 7307 Expr *OrigArg = TheCall->getArg(NumArgs-1); 7308 7309 if (OrigArg->isTypeDependent()) 7310 return false; 7311 7312 // Usual Unary Conversions will convert half to float, which we want for 7313 // machines that use fp16 conversion intrinsics. Else, we wnat to leave the 7314 // type how it is, but do normal L->Rvalue conversions. 7315 if (Context.getTargetInfo().useFP16ConversionIntrinsics()) 7316 OrigArg = UsualUnaryConversions(OrigArg).get(); 7317 else 7318 OrigArg = DefaultFunctionArrayLvalueConversion(OrigArg).get(); 7319 TheCall->setArg(NumArgs - 1, OrigArg); 7320 7321 // This operation requires a non-_Complex floating-point number. 7322 if (!OrigArg->getType()->isRealFloatingType()) 7323 return Diag(OrigArg->getBeginLoc(), 7324 diag::err_typecheck_call_invalid_unary_fp) 7325 << OrigArg->getType() << OrigArg->getSourceRange(); 7326 7327 return false; 7328 } 7329 7330 /// Perform semantic analysis for a call to __builtin_complex. 7331 bool Sema::SemaBuiltinComplex(CallExpr *TheCall) { 7332 if (checkArgCount(*this, TheCall, 2)) 7333 return true; 7334 7335 bool Dependent = false; 7336 for (unsigned I = 0; I != 2; ++I) { 7337 Expr *Arg = TheCall->getArg(I); 7338 QualType T = Arg->getType(); 7339 if (T->isDependentType()) { 7340 Dependent = true; 7341 continue; 7342 } 7343 7344 // Despite supporting _Complex int, GCC requires a real floating point type 7345 // for the operands of __builtin_complex. 7346 if (!T->isRealFloatingType()) { 7347 return Diag(Arg->getBeginLoc(), diag::err_typecheck_call_requires_real_fp) 7348 << Arg->getType() << Arg->getSourceRange(); 7349 } 7350 7351 ExprResult Converted = DefaultLvalueConversion(Arg); 7352 if (Converted.isInvalid()) 7353 return true; 7354 TheCall->setArg(I, Converted.get()); 7355 } 7356 7357 if (Dependent) { 7358 TheCall->setType(Context.DependentTy); 7359 return false; 7360 } 7361 7362 Expr *Real = TheCall->getArg(0); 7363 Expr *Imag = TheCall->getArg(1); 7364 if (!Context.hasSameType(Real->getType(), Imag->getType())) { 7365 return Diag(Real->getBeginLoc(), 7366 diag::err_typecheck_call_different_arg_types) 7367 << Real->getType() << Imag->getType() 7368 << Real->getSourceRange() << Imag->getSourceRange(); 7369 } 7370 7371 // We don't allow _Complex _Float16 nor _Complex __fp16 as type specifiers; 7372 // don't allow this builtin to form those types either. 7373 // FIXME: Should we allow these types? 7374 if (Real->getType()->isFloat16Type()) 7375 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 7376 << "_Float16"; 7377 if (Real->getType()->isHalfType()) 7378 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 7379 << "half"; 7380 7381 TheCall->setType(Context.getComplexType(Real->getType())); 7382 return false; 7383 } 7384 7385 // Customized Sema Checking for VSX builtins that have the following signature: 7386 // vector [...] builtinName(vector [...], vector [...], const int); 7387 // Which takes the same type of vectors (any legal vector type) for the first 7388 // two arguments and takes compile time constant for the third argument. 7389 // Example builtins are : 7390 // vector double vec_xxpermdi(vector double, vector double, int); 7391 // vector short vec_xxsldwi(vector short, vector short, int); 7392 bool Sema::SemaBuiltinVSX(CallExpr *TheCall) { 7393 unsigned ExpectedNumArgs = 3; 7394 if (checkArgCount(*this, TheCall, ExpectedNumArgs)) 7395 return true; 7396 7397 // Check the third argument is a compile time constant 7398 if (!TheCall->getArg(2)->isIntegerConstantExpr(Context)) 7399 return Diag(TheCall->getBeginLoc(), 7400 diag::err_vsx_builtin_nonconstant_argument) 7401 << 3 /* argument index */ << TheCall->getDirectCallee() 7402 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 7403 TheCall->getArg(2)->getEndLoc()); 7404 7405 QualType Arg1Ty = TheCall->getArg(0)->getType(); 7406 QualType Arg2Ty = TheCall->getArg(1)->getType(); 7407 7408 // Check the type of argument 1 and argument 2 are vectors. 7409 SourceLocation BuiltinLoc = TheCall->getBeginLoc(); 7410 if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) || 7411 (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) { 7412 return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector) 7413 << TheCall->getDirectCallee() 7414 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 7415 TheCall->getArg(1)->getEndLoc()); 7416 } 7417 7418 // Check the first two arguments are the same type. 7419 if (!Context.hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) { 7420 return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector) 7421 << TheCall->getDirectCallee() 7422 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 7423 TheCall->getArg(1)->getEndLoc()); 7424 } 7425 7426 // When default clang type checking is turned off and the customized type 7427 // checking is used, the returning type of the function must be explicitly 7428 // set. Otherwise it is _Bool by default. 7429 TheCall->setType(Arg1Ty); 7430 7431 return false; 7432 } 7433 7434 /// SemaBuiltinShuffleVector - Handle __builtin_shufflevector. 7435 // This is declared to take (...), so we have to check everything. 7436 ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) { 7437 if (TheCall->getNumArgs() < 2) 7438 return ExprError(Diag(TheCall->getEndLoc(), 7439 diag::err_typecheck_call_too_few_args_at_least) 7440 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 7441 << TheCall->getSourceRange()); 7442 7443 // Determine which of the following types of shufflevector we're checking: 7444 // 1) unary, vector mask: (lhs, mask) 7445 // 2) binary, scalar mask: (lhs, rhs, index, ..., index) 7446 QualType resType = TheCall->getArg(0)->getType(); 7447 unsigned numElements = 0; 7448 7449 if (!TheCall->getArg(0)->isTypeDependent() && 7450 !TheCall->getArg(1)->isTypeDependent()) { 7451 QualType LHSType = TheCall->getArg(0)->getType(); 7452 QualType RHSType = TheCall->getArg(1)->getType(); 7453 7454 if (!LHSType->isVectorType() || !RHSType->isVectorType()) 7455 return ExprError( 7456 Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_non_vector) 7457 << TheCall->getDirectCallee() 7458 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 7459 TheCall->getArg(1)->getEndLoc())); 7460 7461 numElements = LHSType->castAs<VectorType>()->getNumElements(); 7462 unsigned numResElements = TheCall->getNumArgs() - 2; 7463 7464 // Check to see if we have a call with 2 vector arguments, the unary shuffle 7465 // with mask. If so, verify that RHS is an integer vector type with the 7466 // same number of elts as lhs. 7467 if (TheCall->getNumArgs() == 2) { 7468 if (!RHSType->hasIntegerRepresentation() || 7469 RHSType->castAs<VectorType>()->getNumElements() != numElements) 7470 return ExprError(Diag(TheCall->getBeginLoc(), 7471 diag::err_vec_builtin_incompatible_vector) 7472 << TheCall->getDirectCallee() 7473 << SourceRange(TheCall->getArg(1)->getBeginLoc(), 7474 TheCall->getArg(1)->getEndLoc())); 7475 } else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) { 7476 return ExprError(Diag(TheCall->getBeginLoc(), 7477 diag::err_vec_builtin_incompatible_vector) 7478 << TheCall->getDirectCallee() 7479 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 7480 TheCall->getArg(1)->getEndLoc())); 7481 } else if (numElements != numResElements) { 7482 QualType eltType = LHSType->castAs<VectorType>()->getElementType(); 7483 resType = Context.getVectorType(eltType, numResElements, 7484 VectorType::GenericVector); 7485 } 7486 } 7487 7488 for (unsigned i = 2; i < TheCall->getNumArgs(); i++) { 7489 if (TheCall->getArg(i)->isTypeDependent() || 7490 TheCall->getArg(i)->isValueDependent()) 7491 continue; 7492 7493 Optional<llvm::APSInt> Result; 7494 if (!(Result = TheCall->getArg(i)->getIntegerConstantExpr(Context))) 7495 return ExprError(Diag(TheCall->getBeginLoc(), 7496 diag::err_shufflevector_nonconstant_argument) 7497 << TheCall->getArg(i)->getSourceRange()); 7498 7499 // Allow -1 which will be translated to undef in the IR. 7500 if (Result->isSigned() && Result->isAllOnes()) 7501 continue; 7502 7503 if (Result->getActiveBits() > 64 || 7504 Result->getZExtValue() >= numElements * 2) 7505 return ExprError(Diag(TheCall->getBeginLoc(), 7506 diag::err_shufflevector_argument_too_large) 7507 << TheCall->getArg(i)->getSourceRange()); 7508 } 7509 7510 SmallVector<Expr*, 32> exprs; 7511 7512 for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) { 7513 exprs.push_back(TheCall->getArg(i)); 7514 TheCall->setArg(i, nullptr); 7515 } 7516 7517 return new (Context) ShuffleVectorExpr(Context, exprs, resType, 7518 TheCall->getCallee()->getBeginLoc(), 7519 TheCall->getRParenLoc()); 7520 } 7521 7522 /// SemaConvertVectorExpr - Handle __builtin_convertvector 7523 ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, 7524 SourceLocation BuiltinLoc, 7525 SourceLocation RParenLoc) { 7526 ExprValueKind VK = VK_PRValue; 7527 ExprObjectKind OK = OK_Ordinary; 7528 QualType DstTy = TInfo->getType(); 7529 QualType SrcTy = E->getType(); 7530 7531 if (!SrcTy->isVectorType() && !SrcTy->isDependentType()) 7532 return ExprError(Diag(BuiltinLoc, 7533 diag::err_convertvector_non_vector) 7534 << E->getSourceRange()); 7535 if (!DstTy->isVectorType() && !DstTy->isDependentType()) 7536 return ExprError(Diag(BuiltinLoc, 7537 diag::err_convertvector_non_vector_type)); 7538 7539 if (!SrcTy->isDependentType() && !DstTy->isDependentType()) { 7540 unsigned SrcElts = SrcTy->castAs<VectorType>()->getNumElements(); 7541 unsigned DstElts = DstTy->castAs<VectorType>()->getNumElements(); 7542 if (SrcElts != DstElts) 7543 return ExprError(Diag(BuiltinLoc, 7544 diag::err_convertvector_incompatible_vector) 7545 << E->getSourceRange()); 7546 } 7547 7548 return new (Context) 7549 ConvertVectorExpr(E, TInfo, DstTy, VK, OK, BuiltinLoc, RParenLoc); 7550 } 7551 7552 /// SemaBuiltinPrefetch - Handle __builtin_prefetch. 7553 // This is declared to take (const void*, ...) and can take two 7554 // optional constant int args. 7555 bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) { 7556 unsigned NumArgs = TheCall->getNumArgs(); 7557 7558 if (NumArgs > 3) 7559 return Diag(TheCall->getEndLoc(), 7560 diag::err_typecheck_call_too_many_args_at_most) 7561 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 7562 7563 // Argument 0 is checked for us and the remaining arguments must be 7564 // constant integers. 7565 for (unsigned i = 1; i != NumArgs; ++i) 7566 if (SemaBuiltinConstantArgRange(TheCall, i, 0, i == 1 ? 1 : 3)) 7567 return true; 7568 7569 return false; 7570 } 7571 7572 /// SemaBuiltinArithmeticFence - Handle __arithmetic_fence. 7573 bool Sema::SemaBuiltinArithmeticFence(CallExpr *TheCall) { 7574 if (!Context.getTargetInfo().checkArithmeticFenceSupported()) 7575 return Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 7576 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 7577 if (checkArgCount(*this, TheCall, 1)) 7578 return true; 7579 Expr *Arg = TheCall->getArg(0); 7580 if (Arg->isInstantiationDependent()) 7581 return false; 7582 7583 QualType ArgTy = Arg->getType(); 7584 if (!ArgTy->hasFloatingRepresentation()) 7585 return Diag(TheCall->getEndLoc(), diag::err_typecheck_expect_flt_or_vector) 7586 << ArgTy; 7587 if (Arg->isLValue()) { 7588 ExprResult FirstArg = DefaultLvalueConversion(Arg); 7589 TheCall->setArg(0, FirstArg.get()); 7590 } 7591 TheCall->setType(TheCall->getArg(0)->getType()); 7592 return false; 7593 } 7594 7595 /// SemaBuiltinAssume - Handle __assume (MS Extension). 7596 // __assume does not evaluate its arguments, and should warn if its argument 7597 // has side effects. 7598 bool Sema::SemaBuiltinAssume(CallExpr *TheCall) { 7599 Expr *Arg = TheCall->getArg(0); 7600 if (Arg->isInstantiationDependent()) return false; 7601 7602 if (Arg->HasSideEffects(Context)) 7603 Diag(Arg->getBeginLoc(), diag::warn_assume_side_effects) 7604 << Arg->getSourceRange() 7605 << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier(); 7606 7607 return false; 7608 } 7609 7610 /// Handle __builtin_alloca_with_align. This is declared 7611 /// as (size_t, size_t) where the second size_t must be a power of 2 greater 7612 /// than 8. 7613 bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) { 7614 // The alignment must be a constant integer. 7615 Expr *Arg = TheCall->getArg(1); 7616 7617 // We can't check the value of a dependent argument. 7618 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 7619 if (const auto *UE = 7620 dyn_cast<UnaryExprOrTypeTraitExpr>(Arg->IgnoreParenImpCasts())) 7621 if (UE->getKind() == UETT_AlignOf || 7622 UE->getKind() == UETT_PreferredAlignOf) 7623 Diag(TheCall->getBeginLoc(), diag::warn_alloca_align_alignof) 7624 << Arg->getSourceRange(); 7625 7626 llvm::APSInt Result = Arg->EvaluateKnownConstInt(Context); 7627 7628 if (!Result.isPowerOf2()) 7629 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 7630 << Arg->getSourceRange(); 7631 7632 if (Result < Context.getCharWidth()) 7633 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_small) 7634 << (unsigned)Context.getCharWidth() << Arg->getSourceRange(); 7635 7636 if (Result > std::numeric_limits<int32_t>::max()) 7637 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_big) 7638 << std::numeric_limits<int32_t>::max() << Arg->getSourceRange(); 7639 } 7640 7641 return false; 7642 } 7643 7644 /// Handle __builtin_assume_aligned. This is declared 7645 /// as (const void*, size_t, ...) and can take one optional constant int arg. 7646 bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) { 7647 unsigned NumArgs = TheCall->getNumArgs(); 7648 7649 if (NumArgs > 3) 7650 return Diag(TheCall->getEndLoc(), 7651 diag::err_typecheck_call_too_many_args_at_most) 7652 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 7653 7654 // The alignment must be a constant integer. 7655 Expr *Arg = TheCall->getArg(1); 7656 7657 // We can't check the value of a dependent argument. 7658 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 7659 llvm::APSInt Result; 7660 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 7661 return true; 7662 7663 if (!Result.isPowerOf2()) 7664 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 7665 << Arg->getSourceRange(); 7666 7667 if (Result > Sema::MaximumAlignment) 7668 Diag(TheCall->getBeginLoc(), diag::warn_assume_aligned_too_great) 7669 << Arg->getSourceRange() << Sema::MaximumAlignment; 7670 } 7671 7672 if (NumArgs > 2) { 7673 ExprResult Arg(TheCall->getArg(2)); 7674 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 7675 Context.getSizeType(), false); 7676 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 7677 if (Arg.isInvalid()) return true; 7678 TheCall->setArg(2, Arg.get()); 7679 } 7680 7681 return false; 7682 } 7683 7684 bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) { 7685 unsigned BuiltinID = 7686 cast<FunctionDecl>(TheCall->getCalleeDecl())->getBuiltinID(); 7687 bool IsSizeCall = BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size; 7688 7689 unsigned NumArgs = TheCall->getNumArgs(); 7690 unsigned NumRequiredArgs = IsSizeCall ? 1 : 2; 7691 if (NumArgs < NumRequiredArgs) { 7692 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 7693 << 0 /* function call */ << NumRequiredArgs << NumArgs 7694 << TheCall->getSourceRange(); 7695 } 7696 if (NumArgs >= NumRequiredArgs + 0x100) { 7697 return Diag(TheCall->getEndLoc(), 7698 diag::err_typecheck_call_too_many_args_at_most) 7699 << 0 /* function call */ << (NumRequiredArgs + 0xff) << NumArgs 7700 << TheCall->getSourceRange(); 7701 } 7702 unsigned i = 0; 7703 7704 // For formatting call, check buffer arg. 7705 if (!IsSizeCall) { 7706 ExprResult Arg(TheCall->getArg(i)); 7707 InitializedEntity Entity = InitializedEntity::InitializeParameter( 7708 Context, Context.VoidPtrTy, false); 7709 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 7710 if (Arg.isInvalid()) 7711 return true; 7712 TheCall->setArg(i, Arg.get()); 7713 i++; 7714 } 7715 7716 // Check string literal arg. 7717 unsigned FormatIdx = i; 7718 { 7719 ExprResult Arg = CheckOSLogFormatStringArg(TheCall->getArg(i)); 7720 if (Arg.isInvalid()) 7721 return true; 7722 TheCall->setArg(i, Arg.get()); 7723 i++; 7724 } 7725 7726 // Make sure variadic args are scalar. 7727 unsigned FirstDataArg = i; 7728 while (i < NumArgs) { 7729 ExprResult Arg = DefaultVariadicArgumentPromotion( 7730 TheCall->getArg(i), VariadicFunction, nullptr); 7731 if (Arg.isInvalid()) 7732 return true; 7733 CharUnits ArgSize = Context.getTypeSizeInChars(Arg.get()->getType()); 7734 if (ArgSize.getQuantity() >= 0x100) { 7735 return Diag(Arg.get()->getEndLoc(), diag::err_os_log_argument_too_big) 7736 << i << (int)ArgSize.getQuantity() << 0xff 7737 << TheCall->getSourceRange(); 7738 } 7739 TheCall->setArg(i, Arg.get()); 7740 i++; 7741 } 7742 7743 // Check formatting specifiers. NOTE: We're only doing this for the non-size 7744 // call to avoid duplicate diagnostics. 7745 if (!IsSizeCall) { 7746 llvm::SmallBitVector CheckedVarArgs(NumArgs, false); 7747 ArrayRef<const Expr *> Args(TheCall->getArgs(), TheCall->getNumArgs()); 7748 bool Success = CheckFormatArguments( 7749 Args, FAPK_Variadic, FormatIdx, FirstDataArg, FST_OSLog, 7750 VariadicFunction, TheCall->getBeginLoc(), SourceRange(), 7751 CheckedVarArgs); 7752 if (!Success) 7753 return true; 7754 } 7755 7756 if (IsSizeCall) { 7757 TheCall->setType(Context.getSizeType()); 7758 } else { 7759 TheCall->setType(Context.VoidPtrTy); 7760 } 7761 return false; 7762 } 7763 7764 /// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr 7765 /// TheCall is a constant expression. 7766 bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, 7767 llvm::APSInt &Result) { 7768 Expr *Arg = TheCall->getArg(ArgNum); 7769 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 7770 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 7771 7772 if (Arg->isTypeDependent() || Arg->isValueDependent()) return false; 7773 7774 Optional<llvm::APSInt> R; 7775 if (!(R = Arg->getIntegerConstantExpr(Context))) 7776 return Diag(TheCall->getBeginLoc(), diag::err_constant_integer_arg_type) 7777 << FDecl->getDeclName() << Arg->getSourceRange(); 7778 Result = *R; 7779 return false; 7780 } 7781 7782 /// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr 7783 /// TheCall is a constant expression in the range [Low, High]. 7784 bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, 7785 int Low, int High, bool RangeIsError) { 7786 if (isConstantEvaluated()) 7787 return false; 7788 llvm::APSInt Result; 7789 7790 // We can't check the value of a dependent argument. 7791 Expr *Arg = TheCall->getArg(ArgNum); 7792 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7793 return false; 7794 7795 // Check constant-ness first. 7796 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7797 return true; 7798 7799 if (Result.getSExtValue() < Low || Result.getSExtValue() > High) { 7800 if (RangeIsError) 7801 return Diag(TheCall->getBeginLoc(), diag::err_argument_invalid_range) 7802 << toString(Result, 10) << Low << High << Arg->getSourceRange(); 7803 else 7804 // Defer the warning until we know if the code will be emitted so that 7805 // dead code can ignore this. 7806 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 7807 PDiag(diag::warn_argument_invalid_range) 7808 << toString(Result, 10) << Low << High 7809 << Arg->getSourceRange()); 7810 } 7811 7812 return false; 7813 } 7814 7815 /// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr 7816 /// TheCall is a constant expression is a multiple of Num.. 7817 bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, 7818 unsigned Num) { 7819 llvm::APSInt Result; 7820 7821 // We can't check the value of a dependent argument. 7822 Expr *Arg = TheCall->getArg(ArgNum); 7823 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7824 return false; 7825 7826 // Check constant-ness first. 7827 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7828 return true; 7829 7830 if (Result.getSExtValue() % Num != 0) 7831 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_multiple) 7832 << Num << Arg->getSourceRange(); 7833 7834 return false; 7835 } 7836 7837 /// SemaBuiltinConstantArgPower2 - Check if argument ArgNum of TheCall is a 7838 /// constant expression representing a power of 2. 7839 bool Sema::SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum) { 7840 llvm::APSInt Result; 7841 7842 // We can't check the value of a dependent argument. 7843 Expr *Arg = TheCall->getArg(ArgNum); 7844 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7845 return false; 7846 7847 // Check constant-ness first. 7848 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7849 return true; 7850 7851 // Bit-twiddling to test for a power of 2: for x > 0, x & (x-1) is zero if 7852 // and only if x is a power of 2. 7853 if (Result.isStrictlyPositive() && (Result & (Result - 1)) == 0) 7854 return false; 7855 7856 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_power_of_2) 7857 << Arg->getSourceRange(); 7858 } 7859 7860 static bool IsShiftedByte(llvm::APSInt Value) { 7861 if (Value.isNegative()) 7862 return false; 7863 7864 // Check if it's a shifted byte, by shifting it down 7865 while (true) { 7866 // If the value fits in the bottom byte, the check passes. 7867 if (Value < 0x100) 7868 return true; 7869 7870 // Otherwise, if the value has _any_ bits in the bottom byte, the check 7871 // fails. 7872 if ((Value & 0xFF) != 0) 7873 return false; 7874 7875 // If the bottom 8 bits are all 0, but something above that is nonzero, 7876 // then shifting the value right by 8 bits won't affect whether it's a 7877 // shifted byte or not. So do that, and go round again. 7878 Value >>= 8; 7879 } 7880 } 7881 7882 /// SemaBuiltinConstantArgShiftedByte - Check if argument ArgNum of TheCall is 7883 /// a constant expression representing an arbitrary byte value shifted left by 7884 /// a multiple of 8 bits. 7885 bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, 7886 unsigned ArgBits) { 7887 llvm::APSInt Result; 7888 7889 // We can't check the value of a dependent argument. 7890 Expr *Arg = TheCall->getArg(ArgNum); 7891 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7892 return false; 7893 7894 // Check constant-ness first. 7895 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7896 return true; 7897 7898 // Truncate to the given size. 7899 Result = Result.getLoBits(ArgBits); 7900 Result.setIsUnsigned(true); 7901 7902 if (IsShiftedByte(Result)) 7903 return false; 7904 7905 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_shifted_byte) 7906 << Arg->getSourceRange(); 7907 } 7908 7909 /// SemaBuiltinConstantArgShiftedByteOr0xFF - Check if argument ArgNum of 7910 /// TheCall is a constant expression representing either a shifted byte value, 7911 /// or a value of the form 0x??FF (i.e. a member of the arithmetic progression 7912 /// 0x00FF, 0x01FF, ..., 0xFFFF). This strange range check is needed for some 7913 /// Arm MVE intrinsics. 7914 bool Sema::SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, 7915 int ArgNum, 7916 unsigned ArgBits) { 7917 llvm::APSInt Result; 7918 7919 // We can't check the value of a dependent argument. 7920 Expr *Arg = TheCall->getArg(ArgNum); 7921 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7922 return false; 7923 7924 // Check constant-ness first. 7925 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7926 return true; 7927 7928 // Truncate to the given size. 7929 Result = Result.getLoBits(ArgBits); 7930 Result.setIsUnsigned(true); 7931 7932 // Check to see if it's in either of the required forms. 7933 if (IsShiftedByte(Result) || 7934 (Result > 0 && Result < 0x10000 && (Result & 0xFF) == 0xFF)) 7935 return false; 7936 7937 return Diag(TheCall->getBeginLoc(), 7938 diag::err_argument_not_shifted_byte_or_xxff) 7939 << Arg->getSourceRange(); 7940 } 7941 7942 /// SemaBuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions 7943 bool Sema::SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) { 7944 if (BuiltinID == AArch64::BI__builtin_arm_irg) { 7945 if (checkArgCount(*this, TheCall, 2)) 7946 return true; 7947 Expr *Arg0 = TheCall->getArg(0); 7948 Expr *Arg1 = TheCall->getArg(1); 7949 7950 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7951 if (FirstArg.isInvalid()) 7952 return true; 7953 QualType FirstArgType = FirstArg.get()->getType(); 7954 if (!FirstArgType->isAnyPointerType()) 7955 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7956 << "first" << FirstArgType << Arg0->getSourceRange(); 7957 TheCall->setArg(0, FirstArg.get()); 7958 7959 ExprResult SecArg = DefaultLvalueConversion(Arg1); 7960 if (SecArg.isInvalid()) 7961 return true; 7962 QualType SecArgType = SecArg.get()->getType(); 7963 if (!SecArgType->isIntegerType()) 7964 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 7965 << "second" << SecArgType << Arg1->getSourceRange(); 7966 7967 // Derive the return type from the pointer argument. 7968 TheCall->setType(FirstArgType); 7969 return false; 7970 } 7971 7972 if (BuiltinID == AArch64::BI__builtin_arm_addg) { 7973 if (checkArgCount(*this, TheCall, 2)) 7974 return true; 7975 7976 Expr *Arg0 = TheCall->getArg(0); 7977 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7978 if (FirstArg.isInvalid()) 7979 return true; 7980 QualType FirstArgType = FirstArg.get()->getType(); 7981 if (!FirstArgType->isAnyPointerType()) 7982 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7983 << "first" << FirstArgType << Arg0->getSourceRange(); 7984 TheCall->setArg(0, FirstArg.get()); 7985 7986 // Derive the return type from the pointer argument. 7987 TheCall->setType(FirstArgType); 7988 7989 // Second arg must be an constant in range [0,15] 7990 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 7991 } 7992 7993 if (BuiltinID == AArch64::BI__builtin_arm_gmi) { 7994 if (checkArgCount(*this, TheCall, 2)) 7995 return true; 7996 Expr *Arg0 = TheCall->getArg(0); 7997 Expr *Arg1 = TheCall->getArg(1); 7998 7999 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 8000 if (FirstArg.isInvalid()) 8001 return true; 8002 QualType FirstArgType = FirstArg.get()->getType(); 8003 if (!FirstArgType->isAnyPointerType()) 8004 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 8005 << "first" << FirstArgType << Arg0->getSourceRange(); 8006 8007 QualType SecArgType = Arg1->getType(); 8008 if (!SecArgType->isIntegerType()) 8009 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 8010 << "second" << SecArgType << Arg1->getSourceRange(); 8011 TheCall->setType(Context.IntTy); 8012 return false; 8013 } 8014 8015 if (BuiltinID == AArch64::BI__builtin_arm_ldg || 8016 BuiltinID == AArch64::BI__builtin_arm_stg) { 8017 if (checkArgCount(*this, TheCall, 1)) 8018 return true; 8019 Expr *Arg0 = TheCall->getArg(0); 8020 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 8021 if (FirstArg.isInvalid()) 8022 return true; 8023 8024 QualType FirstArgType = FirstArg.get()->getType(); 8025 if (!FirstArgType->isAnyPointerType()) 8026 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 8027 << "first" << FirstArgType << Arg0->getSourceRange(); 8028 TheCall->setArg(0, FirstArg.get()); 8029 8030 // Derive the return type from the pointer argument. 8031 if (BuiltinID == AArch64::BI__builtin_arm_ldg) 8032 TheCall->setType(FirstArgType); 8033 return false; 8034 } 8035 8036 if (BuiltinID == AArch64::BI__builtin_arm_subp) { 8037 Expr *ArgA = TheCall->getArg(0); 8038 Expr *ArgB = TheCall->getArg(1); 8039 8040 ExprResult ArgExprA = DefaultFunctionArrayLvalueConversion(ArgA); 8041 ExprResult ArgExprB = DefaultFunctionArrayLvalueConversion(ArgB); 8042 8043 if (ArgExprA.isInvalid() || ArgExprB.isInvalid()) 8044 return true; 8045 8046 QualType ArgTypeA = ArgExprA.get()->getType(); 8047 QualType ArgTypeB = ArgExprB.get()->getType(); 8048 8049 auto isNull = [&] (Expr *E) -> bool { 8050 return E->isNullPointerConstant( 8051 Context, Expr::NPC_ValueDependentIsNotNull); }; 8052 8053 // argument should be either a pointer or null 8054 if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA)) 8055 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 8056 << "first" << ArgTypeA << ArgA->getSourceRange(); 8057 8058 if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB)) 8059 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 8060 << "second" << ArgTypeB << ArgB->getSourceRange(); 8061 8062 // Ensure Pointee types are compatible 8063 if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) && 8064 ArgTypeB->isAnyPointerType() && !isNull(ArgB)) { 8065 QualType pointeeA = ArgTypeA->getPointeeType(); 8066 QualType pointeeB = ArgTypeB->getPointeeType(); 8067 if (!Context.typesAreCompatible( 8068 Context.getCanonicalType(pointeeA).getUnqualifiedType(), 8069 Context.getCanonicalType(pointeeB).getUnqualifiedType())) { 8070 return Diag(TheCall->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible) 8071 << ArgTypeA << ArgTypeB << ArgA->getSourceRange() 8072 << ArgB->getSourceRange(); 8073 } 8074 } 8075 8076 // at least one argument should be pointer type 8077 if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType()) 8078 return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer) 8079 << ArgTypeA << ArgTypeB << ArgA->getSourceRange(); 8080 8081 if (isNull(ArgA)) // adopt type of the other pointer 8082 ArgExprA = ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer); 8083 8084 if (isNull(ArgB)) 8085 ArgExprB = ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer); 8086 8087 TheCall->setArg(0, ArgExprA.get()); 8088 TheCall->setArg(1, ArgExprB.get()); 8089 TheCall->setType(Context.LongLongTy); 8090 return false; 8091 } 8092 assert(false && "Unhandled ARM MTE intrinsic"); 8093 return true; 8094 } 8095 8096 /// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr 8097 /// TheCall is an ARM/AArch64 special register string literal. 8098 bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, 8099 int ArgNum, unsigned ExpectedFieldNum, 8100 bool AllowName) { 8101 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 || 8102 BuiltinID == ARM::BI__builtin_arm_wsr64 || 8103 BuiltinID == ARM::BI__builtin_arm_rsr || 8104 BuiltinID == ARM::BI__builtin_arm_rsrp || 8105 BuiltinID == ARM::BI__builtin_arm_wsr || 8106 BuiltinID == ARM::BI__builtin_arm_wsrp; 8107 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 || 8108 BuiltinID == AArch64::BI__builtin_arm_wsr64 || 8109 BuiltinID == AArch64::BI__builtin_arm_rsr || 8110 BuiltinID == AArch64::BI__builtin_arm_rsrp || 8111 BuiltinID == AArch64::BI__builtin_arm_wsr || 8112 BuiltinID == AArch64::BI__builtin_arm_wsrp; 8113 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin."); 8114 8115 // We can't check the value of a dependent argument. 8116 Expr *Arg = TheCall->getArg(ArgNum); 8117 if (Arg->isTypeDependent() || Arg->isValueDependent()) 8118 return false; 8119 8120 // Check if the argument is a string literal. 8121 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 8122 return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 8123 << Arg->getSourceRange(); 8124 8125 // Check the type of special register given. 8126 StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 8127 SmallVector<StringRef, 6> Fields; 8128 Reg.split(Fields, ":"); 8129 8130 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1)) 8131 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 8132 << Arg->getSourceRange(); 8133 8134 // If the string is the name of a register then we cannot check that it is 8135 // valid here but if the string is of one the forms described in ACLE then we 8136 // can check that the supplied fields are integers and within the valid 8137 // ranges. 8138 if (Fields.size() > 1) { 8139 bool FiveFields = Fields.size() == 5; 8140 8141 bool ValidString = true; 8142 if (IsARMBuiltin) { 8143 ValidString &= Fields[0].startswith_insensitive("cp") || 8144 Fields[0].startswith_insensitive("p"); 8145 if (ValidString) 8146 Fields[0] = Fields[0].drop_front( 8147 Fields[0].startswith_insensitive("cp") ? 2 : 1); 8148 8149 ValidString &= Fields[2].startswith_insensitive("c"); 8150 if (ValidString) 8151 Fields[2] = Fields[2].drop_front(1); 8152 8153 if (FiveFields) { 8154 ValidString &= Fields[3].startswith_insensitive("c"); 8155 if (ValidString) 8156 Fields[3] = Fields[3].drop_front(1); 8157 } 8158 } 8159 8160 SmallVector<int, 5> Ranges; 8161 if (FiveFields) 8162 Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7}); 8163 else 8164 Ranges.append({15, 7, 15}); 8165 8166 for (unsigned i=0; i<Fields.size(); ++i) { 8167 int IntField; 8168 ValidString &= !Fields[i].getAsInteger(10, IntField); 8169 ValidString &= (IntField >= 0 && IntField <= Ranges[i]); 8170 } 8171 8172 if (!ValidString) 8173 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 8174 << Arg->getSourceRange(); 8175 } else if (IsAArch64Builtin && Fields.size() == 1) { 8176 // If the register name is one of those that appear in the condition below 8177 // and the special register builtin being used is one of the write builtins, 8178 // then we require that the argument provided for writing to the register 8179 // is an integer constant expression. This is because it will be lowered to 8180 // an MSR (immediate) instruction, so we need to know the immediate at 8181 // compile time. 8182 if (TheCall->getNumArgs() != 2) 8183 return false; 8184 8185 std::string RegLower = Reg.lower(); 8186 if (RegLower != "spsel" && RegLower != "daifset" && RegLower != "daifclr" && 8187 RegLower != "pan" && RegLower != "uao") 8188 return false; 8189 8190 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 8191 } 8192 8193 return false; 8194 } 8195 8196 /// SemaBuiltinPPCMMACall - Check the call to a PPC MMA builtin for validity. 8197 /// Emit an error and return true on failure; return false on success. 8198 /// TypeStr is a string containing the type descriptor of the value returned by 8199 /// the builtin and the descriptors of the expected type of the arguments. 8200 bool Sema::SemaBuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID, 8201 const char *TypeStr) { 8202 8203 assert((TypeStr[0] != '\0') && 8204 "Invalid types in PPC MMA builtin declaration"); 8205 8206 switch (BuiltinID) { 8207 default: 8208 // This function is called in CheckPPCBuiltinFunctionCall where the 8209 // BuiltinID is guaranteed to be an MMA or pair vector memop builtin, here 8210 // we are isolating the pair vector memop builtins that can be used with mma 8211 // off so the default case is every builtin that requires mma and paired 8212 // vector memops. 8213 if (SemaFeatureCheck(*this, TheCall, "paired-vector-memops", 8214 diag::err_ppc_builtin_only_on_arch, "10") || 8215 SemaFeatureCheck(*this, TheCall, "mma", 8216 diag::err_ppc_builtin_only_on_arch, "10")) 8217 return true; 8218 break; 8219 case PPC::BI__builtin_vsx_lxvp: 8220 case PPC::BI__builtin_vsx_stxvp: 8221 case PPC::BI__builtin_vsx_assemble_pair: 8222 case PPC::BI__builtin_vsx_disassemble_pair: 8223 if (SemaFeatureCheck(*this, TheCall, "paired-vector-memops", 8224 diag::err_ppc_builtin_only_on_arch, "10")) 8225 return true; 8226 break; 8227 } 8228 8229 unsigned Mask = 0; 8230 unsigned ArgNum = 0; 8231 8232 // The first type in TypeStr is the type of the value returned by the 8233 // builtin. So we first read that type and change the type of TheCall. 8234 QualType type = DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 8235 TheCall->setType(type); 8236 8237 while (*TypeStr != '\0') { 8238 Mask = 0; 8239 QualType ExpectedType = DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 8240 if (ArgNum >= TheCall->getNumArgs()) { 8241 ArgNum++; 8242 break; 8243 } 8244 8245 Expr *Arg = TheCall->getArg(ArgNum); 8246 QualType PassedType = Arg->getType(); 8247 QualType StrippedRVType = PassedType.getCanonicalType(); 8248 8249 // Strip Restrict/Volatile qualifiers. 8250 if (StrippedRVType.isRestrictQualified() || 8251 StrippedRVType.isVolatileQualified()) 8252 StrippedRVType = StrippedRVType.getCanonicalType().getUnqualifiedType(); 8253 8254 // The only case where the argument type and expected type are allowed to 8255 // mismatch is if the argument type is a non-void pointer (or array) and 8256 // expected type is a void pointer. 8257 if (StrippedRVType != ExpectedType) 8258 if (!(ExpectedType->isVoidPointerType() && 8259 (StrippedRVType->isPointerType() || StrippedRVType->isArrayType()))) 8260 return Diag(Arg->getBeginLoc(), 8261 diag::err_typecheck_convert_incompatible) 8262 << PassedType << ExpectedType << 1 << 0 << 0; 8263 8264 // If the value of the Mask is not 0, we have a constraint in the size of 8265 // the integer argument so here we ensure the argument is a constant that 8266 // is in the valid range. 8267 if (Mask != 0 && 8268 SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, Mask, true)) 8269 return true; 8270 8271 ArgNum++; 8272 } 8273 8274 // In case we exited early from the previous loop, there are other types to 8275 // read from TypeStr. So we need to read them all to ensure we have the right 8276 // number of arguments in TheCall and if it is not the case, to display a 8277 // better error message. 8278 while (*TypeStr != '\0') { 8279 (void) DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 8280 ArgNum++; 8281 } 8282 if (checkArgCount(*this, TheCall, ArgNum)) 8283 return true; 8284 8285 return false; 8286 } 8287 8288 /// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val). 8289 /// This checks that the target supports __builtin_longjmp and 8290 /// that val is a constant 1. 8291 bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) { 8292 if (!Context.getTargetInfo().hasSjLjLowering()) 8293 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_unsupported) 8294 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 8295 8296 Expr *Arg = TheCall->getArg(1); 8297 llvm::APSInt Result; 8298 8299 // TODO: This is less than ideal. Overload this to take a value. 8300 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 8301 return true; 8302 8303 if (Result != 1) 8304 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_invalid_val) 8305 << SourceRange(Arg->getBeginLoc(), Arg->getEndLoc()); 8306 8307 return false; 8308 } 8309 8310 /// SemaBuiltinSetjmp - Handle __builtin_setjmp(void *env[5]). 8311 /// This checks that the target supports __builtin_setjmp. 8312 bool Sema::SemaBuiltinSetjmp(CallExpr *TheCall) { 8313 if (!Context.getTargetInfo().hasSjLjLowering()) 8314 return Diag(TheCall->getBeginLoc(), diag::err_builtin_setjmp_unsupported) 8315 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 8316 return false; 8317 } 8318 8319 namespace { 8320 8321 class UncoveredArgHandler { 8322 enum { Unknown = -1, AllCovered = -2 }; 8323 8324 signed FirstUncoveredArg = Unknown; 8325 SmallVector<const Expr *, 4> DiagnosticExprs; 8326 8327 public: 8328 UncoveredArgHandler() = default; 8329 8330 bool hasUncoveredArg() const { 8331 return (FirstUncoveredArg >= 0); 8332 } 8333 8334 unsigned getUncoveredArg() const { 8335 assert(hasUncoveredArg() && "no uncovered argument"); 8336 return FirstUncoveredArg; 8337 } 8338 8339 void setAllCovered() { 8340 // A string has been found with all arguments covered, so clear out 8341 // the diagnostics. 8342 DiagnosticExprs.clear(); 8343 FirstUncoveredArg = AllCovered; 8344 } 8345 8346 void Update(signed NewFirstUncoveredArg, const Expr *StrExpr) { 8347 assert(NewFirstUncoveredArg >= 0 && "Outside range"); 8348 8349 // Don't update if a previous string covers all arguments. 8350 if (FirstUncoveredArg == AllCovered) 8351 return; 8352 8353 // UncoveredArgHandler tracks the highest uncovered argument index 8354 // and with it all the strings that match this index. 8355 if (NewFirstUncoveredArg == FirstUncoveredArg) 8356 DiagnosticExprs.push_back(StrExpr); 8357 else if (NewFirstUncoveredArg > FirstUncoveredArg) { 8358 DiagnosticExprs.clear(); 8359 DiagnosticExprs.push_back(StrExpr); 8360 FirstUncoveredArg = NewFirstUncoveredArg; 8361 } 8362 } 8363 8364 void Diagnose(Sema &S, bool IsFunctionCall, const Expr *ArgExpr); 8365 }; 8366 8367 enum StringLiteralCheckType { 8368 SLCT_NotALiteral, 8369 SLCT_UncheckedLiteral, 8370 SLCT_CheckedLiteral 8371 }; 8372 8373 } // namespace 8374 8375 static void sumOffsets(llvm::APSInt &Offset, llvm::APSInt Addend, 8376 BinaryOperatorKind BinOpKind, 8377 bool AddendIsRight) { 8378 unsigned BitWidth = Offset.getBitWidth(); 8379 unsigned AddendBitWidth = Addend.getBitWidth(); 8380 // There might be negative interim results. 8381 if (Addend.isUnsigned()) { 8382 Addend = Addend.zext(++AddendBitWidth); 8383 Addend.setIsSigned(true); 8384 } 8385 // Adjust the bit width of the APSInts. 8386 if (AddendBitWidth > BitWidth) { 8387 Offset = Offset.sext(AddendBitWidth); 8388 BitWidth = AddendBitWidth; 8389 } else if (BitWidth > AddendBitWidth) { 8390 Addend = Addend.sext(BitWidth); 8391 } 8392 8393 bool Ov = false; 8394 llvm::APSInt ResOffset = Offset; 8395 if (BinOpKind == BO_Add) 8396 ResOffset = Offset.sadd_ov(Addend, Ov); 8397 else { 8398 assert(AddendIsRight && BinOpKind == BO_Sub && 8399 "operator must be add or sub with addend on the right"); 8400 ResOffset = Offset.ssub_ov(Addend, Ov); 8401 } 8402 8403 // We add an offset to a pointer here so we should support an offset as big as 8404 // possible. 8405 if (Ov) { 8406 assert(BitWidth <= std::numeric_limits<unsigned>::max() / 2 && 8407 "index (intermediate) result too big"); 8408 Offset = Offset.sext(2 * BitWidth); 8409 sumOffsets(Offset, Addend, BinOpKind, AddendIsRight); 8410 return; 8411 } 8412 8413 Offset = ResOffset; 8414 } 8415 8416 namespace { 8417 8418 // This is a wrapper class around StringLiteral to support offsetted string 8419 // literals as format strings. It takes the offset into account when returning 8420 // the string and its length or the source locations to display notes correctly. 8421 class FormatStringLiteral { 8422 const StringLiteral *FExpr; 8423 int64_t Offset; 8424 8425 public: 8426 FormatStringLiteral(const StringLiteral *fexpr, int64_t Offset = 0) 8427 : FExpr(fexpr), Offset(Offset) {} 8428 8429 StringRef getString() const { 8430 return FExpr->getString().drop_front(Offset); 8431 } 8432 8433 unsigned getByteLength() const { 8434 return FExpr->getByteLength() - getCharByteWidth() * Offset; 8435 } 8436 8437 unsigned getLength() const { return FExpr->getLength() - Offset; } 8438 unsigned getCharByteWidth() const { return FExpr->getCharByteWidth(); } 8439 8440 StringLiteral::StringKind getKind() const { return FExpr->getKind(); } 8441 8442 QualType getType() const { return FExpr->getType(); } 8443 8444 bool isAscii() const { return FExpr->isOrdinary(); } 8445 bool isWide() const { return FExpr->isWide(); } 8446 bool isUTF8() const { return FExpr->isUTF8(); } 8447 bool isUTF16() const { return FExpr->isUTF16(); } 8448 bool isUTF32() const { return FExpr->isUTF32(); } 8449 bool isPascal() const { return FExpr->isPascal(); } 8450 8451 SourceLocation getLocationOfByte( 8452 unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, 8453 const TargetInfo &Target, unsigned *StartToken = nullptr, 8454 unsigned *StartTokenByteOffset = nullptr) const { 8455 return FExpr->getLocationOfByte(ByteNo + Offset, SM, Features, Target, 8456 StartToken, StartTokenByteOffset); 8457 } 8458 8459 SourceLocation getBeginLoc() const LLVM_READONLY { 8460 return FExpr->getBeginLoc().getLocWithOffset(Offset); 8461 } 8462 8463 SourceLocation getEndLoc() const LLVM_READONLY { return FExpr->getEndLoc(); } 8464 }; 8465 8466 } // namespace 8467 8468 static void CheckFormatString( 8469 Sema &S, const FormatStringLiteral *FExpr, const Expr *OrigFormatExpr, 8470 ArrayRef<const Expr *> Args, Sema::FormatArgumentPassingKind APK, 8471 unsigned format_idx, unsigned firstDataArg, Sema::FormatStringType Type, 8472 bool inFunctionCall, Sema::VariadicCallType CallType, 8473 llvm::SmallBitVector &CheckedVarArgs, UncoveredArgHandler &UncoveredArg, 8474 bool IgnoreStringsWithoutSpecifiers); 8475 8476 // Determine if an expression is a string literal or constant string. 8477 // If this function returns false on the arguments to a function expecting a 8478 // format string, we will usually need to emit a warning. 8479 // True string literals are then checked by CheckFormatString. 8480 static StringLiteralCheckType 8481 checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args, 8482 Sema::FormatArgumentPassingKind APK, unsigned format_idx, 8483 unsigned firstDataArg, Sema::FormatStringType Type, 8484 Sema::VariadicCallType CallType, bool InFunctionCall, 8485 llvm::SmallBitVector &CheckedVarArgs, 8486 UncoveredArgHandler &UncoveredArg, llvm::APSInt Offset, 8487 bool IgnoreStringsWithoutSpecifiers = false) { 8488 if (S.isConstantEvaluated()) 8489 return SLCT_NotALiteral; 8490 tryAgain: 8491 assert(Offset.isSigned() && "invalid offset"); 8492 8493 if (E->isTypeDependent() || E->isValueDependent()) 8494 return SLCT_NotALiteral; 8495 8496 E = E->IgnoreParenCasts(); 8497 8498 if (E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull)) 8499 // Technically -Wformat-nonliteral does not warn about this case. 8500 // The behavior of printf and friends in this case is implementation 8501 // dependent. Ideally if the format string cannot be null then 8502 // it should have a 'nonnull' attribute in the function prototype. 8503 return SLCT_UncheckedLiteral; 8504 8505 switch (E->getStmtClass()) { 8506 case Stmt::BinaryConditionalOperatorClass: 8507 case Stmt::ConditionalOperatorClass: { 8508 // The expression is a literal if both sub-expressions were, and it was 8509 // completely checked only if both sub-expressions were checked. 8510 const AbstractConditionalOperator *C = 8511 cast<AbstractConditionalOperator>(E); 8512 8513 // Determine whether it is necessary to check both sub-expressions, for 8514 // example, because the condition expression is a constant that can be 8515 // evaluated at compile time. 8516 bool CheckLeft = true, CheckRight = true; 8517 8518 bool Cond; 8519 if (C->getCond()->EvaluateAsBooleanCondition(Cond, S.getASTContext(), 8520 S.isConstantEvaluated())) { 8521 if (Cond) 8522 CheckRight = false; 8523 else 8524 CheckLeft = false; 8525 } 8526 8527 // We need to maintain the offsets for the right and the left hand side 8528 // separately to check if every possible indexed expression is a valid 8529 // string literal. They might have different offsets for different string 8530 // literals in the end. 8531 StringLiteralCheckType Left; 8532 if (!CheckLeft) 8533 Left = SLCT_UncheckedLiteral; 8534 else { 8535 Left = checkFormatStringExpr(S, C->getTrueExpr(), Args, APK, format_idx, 8536 firstDataArg, Type, CallType, InFunctionCall, 8537 CheckedVarArgs, UncoveredArg, Offset, 8538 IgnoreStringsWithoutSpecifiers); 8539 if (Left == SLCT_NotALiteral || !CheckRight) { 8540 return Left; 8541 } 8542 } 8543 8544 StringLiteralCheckType Right = checkFormatStringExpr( 8545 S, C->getFalseExpr(), Args, APK, format_idx, firstDataArg, Type, 8546 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 8547 IgnoreStringsWithoutSpecifiers); 8548 8549 return (CheckLeft && Left < Right) ? Left : Right; 8550 } 8551 8552 case Stmt::ImplicitCastExprClass: 8553 E = cast<ImplicitCastExpr>(E)->getSubExpr(); 8554 goto tryAgain; 8555 8556 case Stmt::OpaqueValueExprClass: 8557 if (const Expr *src = cast<OpaqueValueExpr>(E)->getSourceExpr()) { 8558 E = src; 8559 goto tryAgain; 8560 } 8561 return SLCT_NotALiteral; 8562 8563 case Stmt::PredefinedExprClass: 8564 // While __func__, etc., are technically not string literals, they 8565 // cannot contain format specifiers and thus are not a security 8566 // liability. 8567 return SLCT_UncheckedLiteral; 8568 8569 case Stmt::DeclRefExprClass: { 8570 const DeclRefExpr *DR = cast<DeclRefExpr>(E); 8571 8572 // As an exception, do not flag errors for variables binding to 8573 // const string literals. 8574 if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) { 8575 bool isConstant = false; 8576 QualType T = DR->getType(); 8577 8578 if (const ArrayType *AT = S.Context.getAsArrayType(T)) { 8579 isConstant = AT->getElementType().isConstant(S.Context); 8580 } else if (const PointerType *PT = T->getAs<PointerType>()) { 8581 isConstant = T.isConstant(S.Context) && 8582 PT->getPointeeType().isConstant(S.Context); 8583 } else if (T->isObjCObjectPointerType()) { 8584 // In ObjC, there is usually no "const ObjectPointer" type, 8585 // so don't check if the pointee type is constant. 8586 isConstant = T.isConstant(S.Context); 8587 } 8588 8589 if (isConstant) { 8590 if (const Expr *Init = VD->getAnyInitializer()) { 8591 // Look through initializers like const char c[] = { "foo" } 8592 if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) { 8593 if (InitList->isStringLiteralInit()) 8594 Init = InitList->getInit(0)->IgnoreParenImpCasts(); 8595 } 8596 return checkFormatStringExpr( 8597 S, Init, Args, APK, format_idx, firstDataArg, Type, CallType, 8598 /*InFunctionCall*/ false, CheckedVarArgs, UncoveredArg, Offset); 8599 } 8600 } 8601 8602 // When the format argument is an argument of this function, and this 8603 // function also has the format attribute, there are several interactions 8604 // for which there shouldn't be a warning. For instance, when calling 8605 // v*printf from a function that has the printf format attribute, we 8606 // should not emit a warning about using `fmt`, even though it's not 8607 // constant, because the arguments have already been checked for the 8608 // caller of `logmessage`: 8609 // 8610 // __attribute__((format(printf, 1, 2))) 8611 // void logmessage(char const *fmt, ...) { 8612 // va_list ap; 8613 // va_start(ap, fmt); 8614 // vprintf(fmt, ap); /* do not emit a warning about "fmt" */ 8615 // ... 8616 // } 8617 // 8618 // Another interaction that we need to support is calling a variadic 8619 // format function from a format function that has fixed arguments. For 8620 // instance: 8621 // 8622 // __attribute__((format(printf, 1, 2))) 8623 // void logstring(char const *fmt, char const *str) { 8624 // printf(fmt, str); /* do not emit a warning about "fmt" */ 8625 // } 8626 // 8627 // Same (and perhaps more relatably) for the variadic template case: 8628 // 8629 // template<typename... Args> 8630 // __attribute__((format(printf, 1, 2))) 8631 // void log(const char *fmt, Args&&... args) { 8632 // printf(fmt, forward<Args>(args)...); 8633 // /* do not emit a warning about "fmt" */ 8634 // } 8635 // 8636 // Due to implementation difficulty, we only check the format, not the 8637 // format arguments, in all cases. 8638 // 8639 if (const auto *PV = dyn_cast<ParmVarDecl>(VD)) { 8640 if (const auto *D = dyn_cast<Decl>(PV->getDeclContext())) { 8641 for (const auto *PVFormat : D->specific_attrs<FormatAttr>()) { 8642 bool IsCXXMember = false; 8643 if (const auto *MD = dyn_cast<CXXMethodDecl>(D)) 8644 IsCXXMember = MD->isInstance(); 8645 8646 bool IsVariadic = false; 8647 if (const FunctionType *FnTy = D->getFunctionType()) 8648 IsVariadic = cast<FunctionProtoType>(FnTy)->isVariadic(); 8649 else if (const auto *BD = dyn_cast<BlockDecl>(D)) 8650 IsVariadic = BD->isVariadic(); 8651 else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(D)) 8652 IsVariadic = OMD->isVariadic(); 8653 8654 Sema::FormatStringInfo CallerFSI; 8655 if (Sema::getFormatStringInfo(PVFormat, IsCXXMember, IsVariadic, 8656 &CallerFSI)) { 8657 // We also check if the formats are compatible. 8658 // We can't pass a 'scanf' string to a 'printf' function. 8659 if (PV->getFunctionScopeIndex() == CallerFSI.FormatIdx && 8660 Type == S.GetFormatStringType(PVFormat)) { 8661 // Lastly, check that argument passing kinds transition in a 8662 // way that makes sense: 8663 // from a caller with FAPK_VAList, allow FAPK_VAList 8664 // from a caller with FAPK_Fixed, allow FAPK_Fixed 8665 // from a caller with FAPK_Fixed, allow FAPK_Variadic 8666 // from a caller with FAPK_Variadic, allow FAPK_VAList 8667 switch (combineFAPK(CallerFSI.ArgPassingKind, APK)) { 8668 case combineFAPK(Sema::FAPK_VAList, Sema::FAPK_VAList): 8669 case combineFAPK(Sema::FAPK_Fixed, Sema::FAPK_Fixed): 8670 case combineFAPK(Sema::FAPK_Fixed, Sema::FAPK_Variadic): 8671 case combineFAPK(Sema::FAPK_Variadic, Sema::FAPK_VAList): 8672 return SLCT_UncheckedLiteral; 8673 } 8674 } 8675 } 8676 } 8677 } 8678 } 8679 } 8680 8681 return SLCT_NotALiteral; 8682 } 8683 8684 case Stmt::CallExprClass: 8685 case Stmt::CXXMemberCallExprClass: { 8686 const CallExpr *CE = cast<CallExpr>(E); 8687 if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) { 8688 bool IsFirst = true; 8689 StringLiteralCheckType CommonResult; 8690 for (const auto *FA : ND->specific_attrs<FormatArgAttr>()) { 8691 const Expr *Arg = CE->getArg(FA->getFormatIdx().getASTIndex()); 8692 StringLiteralCheckType Result = checkFormatStringExpr( 8693 S, Arg, Args, APK, format_idx, firstDataArg, Type, CallType, 8694 InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 8695 IgnoreStringsWithoutSpecifiers); 8696 if (IsFirst) { 8697 CommonResult = Result; 8698 IsFirst = false; 8699 } 8700 } 8701 if (!IsFirst) 8702 return CommonResult; 8703 8704 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) { 8705 unsigned BuiltinID = FD->getBuiltinID(); 8706 if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString || 8707 BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) { 8708 const Expr *Arg = CE->getArg(0); 8709 return checkFormatStringExpr( 8710 S, Arg, Args, APK, format_idx, firstDataArg, Type, CallType, 8711 InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 8712 IgnoreStringsWithoutSpecifiers); 8713 } 8714 } 8715 } 8716 8717 return SLCT_NotALiteral; 8718 } 8719 case Stmt::ObjCMessageExprClass: { 8720 const auto *ME = cast<ObjCMessageExpr>(E); 8721 if (const auto *MD = ME->getMethodDecl()) { 8722 if (const auto *FA = MD->getAttr<FormatArgAttr>()) { 8723 // As a special case heuristic, if we're using the method -[NSBundle 8724 // localizedStringForKey:value:table:], ignore any key strings that lack 8725 // format specifiers. The idea is that if the key doesn't have any 8726 // format specifiers then its probably just a key to map to the 8727 // localized strings. If it does have format specifiers though, then its 8728 // likely that the text of the key is the format string in the 8729 // programmer's language, and should be checked. 8730 const ObjCInterfaceDecl *IFace; 8731 if (MD->isInstanceMethod() && (IFace = MD->getClassInterface()) && 8732 IFace->getIdentifier()->isStr("NSBundle") && 8733 MD->getSelector().isKeywordSelector( 8734 {"localizedStringForKey", "value", "table"})) { 8735 IgnoreStringsWithoutSpecifiers = true; 8736 } 8737 8738 const Expr *Arg = ME->getArg(FA->getFormatIdx().getASTIndex()); 8739 return checkFormatStringExpr( 8740 S, Arg, Args, APK, format_idx, firstDataArg, Type, CallType, 8741 InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 8742 IgnoreStringsWithoutSpecifiers); 8743 } 8744 } 8745 8746 return SLCT_NotALiteral; 8747 } 8748 case Stmt::ObjCStringLiteralClass: 8749 case Stmt::StringLiteralClass: { 8750 const StringLiteral *StrE = nullptr; 8751 8752 if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E)) 8753 StrE = ObjCFExpr->getString(); 8754 else 8755 StrE = cast<StringLiteral>(E); 8756 8757 if (StrE) { 8758 if (Offset.isNegative() || Offset > StrE->getLength()) { 8759 // TODO: It would be better to have an explicit warning for out of 8760 // bounds literals. 8761 return SLCT_NotALiteral; 8762 } 8763 FormatStringLiteral FStr(StrE, Offset.sextOrTrunc(64).getSExtValue()); 8764 CheckFormatString(S, &FStr, E, Args, APK, format_idx, firstDataArg, Type, 8765 InFunctionCall, CallType, CheckedVarArgs, UncoveredArg, 8766 IgnoreStringsWithoutSpecifiers); 8767 return SLCT_CheckedLiteral; 8768 } 8769 8770 return SLCT_NotALiteral; 8771 } 8772 case Stmt::BinaryOperatorClass: { 8773 const BinaryOperator *BinOp = cast<BinaryOperator>(E); 8774 8775 // A string literal + an int offset is still a string literal. 8776 if (BinOp->isAdditiveOp()) { 8777 Expr::EvalResult LResult, RResult; 8778 8779 bool LIsInt = BinOp->getLHS()->EvaluateAsInt( 8780 LResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 8781 bool RIsInt = BinOp->getRHS()->EvaluateAsInt( 8782 RResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 8783 8784 if (LIsInt != RIsInt) { 8785 BinaryOperatorKind BinOpKind = BinOp->getOpcode(); 8786 8787 if (LIsInt) { 8788 if (BinOpKind == BO_Add) { 8789 sumOffsets(Offset, LResult.Val.getInt(), BinOpKind, RIsInt); 8790 E = BinOp->getRHS(); 8791 goto tryAgain; 8792 } 8793 } else { 8794 sumOffsets(Offset, RResult.Val.getInt(), BinOpKind, RIsInt); 8795 E = BinOp->getLHS(); 8796 goto tryAgain; 8797 } 8798 } 8799 } 8800 8801 return SLCT_NotALiteral; 8802 } 8803 case Stmt::UnaryOperatorClass: { 8804 const UnaryOperator *UnaOp = cast<UnaryOperator>(E); 8805 auto ASE = dyn_cast<ArraySubscriptExpr>(UnaOp->getSubExpr()); 8806 if (UnaOp->getOpcode() == UO_AddrOf && ASE) { 8807 Expr::EvalResult IndexResult; 8808 if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context, 8809 Expr::SE_NoSideEffects, 8810 S.isConstantEvaluated())) { 8811 sumOffsets(Offset, IndexResult.Val.getInt(), BO_Add, 8812 /*RHS is int*/ true); 8813 E = ASE->getBase(); 8814 goto tryAgain; 8815 } 8816 } 8817 8818 return SLCT_NotALiteral; 8819 } 8820 8821 default: 8822 return SLCT_NotALiteral; 8823 } 8824 } 8825 8826 Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) { 8827 return llvm::StringSwitch<FormatStringType>(Format->getType()->getName()) 8828 .Case("scanf", FST_Scanf) 8829 .Cases("printf", "printf0", FST_Printf) 8830 .Cases("NSString", "CFString", FST_NSString) 8831 .Case("strftime", FST_Strftime) 8832 .Case("strfmon", FST_Strfmon) 8833 .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf) 8834 .Case("freebsd_kprintf", FST_FreeBSDKPrintf) 8835 .Case("os_trace", FST_OSLog) 8836 .Case("os_log", FST_OSLog) 8837 .Default(FST_Unknown); 8838 } 8839 8840 /// CheckFormatArguments - Check calls to printf and scanf (and similar 8841 /// functions) for correct use of format strings. 8842 /// Returns true if a format string has been fully checked. 8843 bool Sema::CheckFormatArguments(const FormatAttr *Format, 8844 ArrayRef<const Expr *> Args, bool IsCXXMember, 8845 VariadicCallType CallType, SourceLocation Loc, 8846 SourceRange Range, 8847 llvm::SmallBitVector &CheckedVarArgs) { 8848 FormatStringInfo FSI; 8849 if (getFormatStringInfo(Format, IsCXXMember, CallType != VariadicDoesNotApply, 8850 &FSI)) 8851 return CheckFormatArguments(Args, FSI.ArgPassingKind, FSI.FormatIdx, 8852 FSI.FirstDataArg, GetFormatStringType(Format), 8853 CallType, Loc, Range, CheckedVarArgs); 8854 return false; 8855 } 8856 8857 bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args, 8858 Sema::FormatArgumentPassingKind APK, 8859 unsigned format_idx, unsigned firstDataArg, 8860 FormatStringType Type, 8861 VariadicCallType CallType, SourceLocation Loc, 8862 SourceRange Range, 8863 llvm::SmallBitVector &CheckedVarArgs) { 8864 // CHECK: printf/scanf-like function is called with no format string. 8865 if (format_idx >= Args.size()) { 8866 Diag(Loc, diag::warn_missing_format_string) << Range; 8867 return false; 8868 } 8869 8870 const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts(); 8871 8872 // CHECK: format string is not a string literal. 8873 // 8874 // Dynamically generated format strings are difficult to 8875 // automatically vet at compile time. Requiring that format strings 8876 // are string literals: (1) permits the checking of format strings by 8877 // the compiler and thereby (2) can practically remove the source of 8878 // many format string exploits. 8879 8880 // Format string can be either ObjC string (e.g. @"%d") or 8881 // C string (e.g. "%d") 8882 // ObjC string uses the same format specifiers as C string, so we can use 8883 // the same format string checking logic for both ObjC and C strings. 8884 UncoveredArgHandler UncoveredArg; 8885 StringLiteralCheckType CT = checkFormatStringExpr( 8886 *this, OrigFormatExpr, Args, APK, format_idx, firstDataArg, Type, 8887 CallType, 8888 /*IsFunctionCall*/ true, CheckedVarArgs, UncoveredArg, 8889 /*no string offset*/ llvm::APSInt(64, false) = 0); 8890 8891 // Generate a diagnostic where an uncovered argument is detected. 8892 if (UncoveredArg.hasUncoveredArg()) { 8893 unsigned ArgIdx = UncoveredArg.getUncoveredArg() + firstDataArg; 8894 assert(ArgIdx < Args.size() && "ArgIdx outside bounds"); 8895 UncoveredArg.Diagnose(*this, /*IsFunctionCall*/true, Args[ArgIdx]); 8896 } 8897 8898 if (CT != SLCT_NotALiteral) 8899 // Literal format string found, check done! 8900 return CT == SLCT_CheckedLiteral; 8901 8902 // Strftime is particular as it always uses a single 'time' argument, 8903 // so it is safe to pass a non-literal string. 8904 if (Type == FST_Strftime) 8905 return false; 8906 8907 // Do not emit diag when the string param is a macro expansion and the 8908 // format is either NSString or CFString. This is a hack to prevent 8909 // diag when using the NSLocalizedString and CFCopyLocalizedString macros 8910 // which are usually used in place of NS and CF string literals. 8911 SourceLocation FormatLoc = Args[format_idx]->getBeginLoc(); 8912 if (Type == FST_NSString && SourceMgr.isInSystemMacro(FormatLoc)) 8913 return false; 8914 8915 // If there are no arguments specified, warn with -Wformat-security, otherwise 8916 // warn only with -Wformat-nonliteral. 8917 if (Args.size() == firstDataArg) { 8918 Diag(FormatLoc, diag::warn_format_nonliteral_noargs) 8919 << OrigFormatExpr->getSourceRange(); 8920 switch (Type) { 8921 default: 8922 break; 8923 case FST_Kprintf: 8924 case FST_FreeBSDKPrintf: 8925 case FST_Printf: 8926 Diag(FormatLoc, diag::note_format_security_fixit) 8927 << FixItHint::CreateInsertion(FormatLoc, "\"%s\", "); 8928 break; 8929 case FST_NSString: 8930 Diag(FormatLoc, diag::note_format_security_fixit) 8931 << FixItHint::CreateInsertion(FormatLoc, "@\"%@\", "); 8932 break; 8933 } 8934 } else { 8935 Diag(FormatLoc, diag::warn_format_nonliteral) 8936 << OrigFormatExpr->getSourceRange(); 8937 } 8938 return false; 8939 } 8940 8941 namespace { 8942 8943 class CheckFormatHandler : public analyze_format_string::FormatStringHandler { 8944 protected: 8945 Sema &S; 8946 const FormatStringLiteral *FExpr; 8947 const Expr *OrigFormatExpr; 8948 const Sema::FormatStringType FSType; 8949 const unsigned FirstDataArg; 8950 const unsigned NumDataArgs; 8951 const char *Beg; // Start of format string. 8952 const Sema::FormatArgumentPassingKind ArgPassingKind; 8953 ArrayRef<const Expr *> Args; 8954 unsigned FormatIdx; 8955 llvm::SmallBitVector CoveredArgs; 8956 bool usesPositionalArgs = false; 8957 bool atFirstArg = true; 8958 bool inFunctionCall; 8959 Sema::VariadicCallType CallType; 8960 llvm::SmallBitVector &CheckedVarArgs; 8961 UncoveredArgHandler &UncoveredArg; 8962 8963 public: 8964 CheckFormatHandler(Sema &s, const FormatStringLiteral *fexpr, 8965 const Expr *origFormatExpr, 8966 const Sema::FormatStringType type, unsigned firstDataArg, 8967 unsigned numDataArgs, const char *beg, 8968 Sema::FormatArgumentPassingKind APK, 8969 ArrayRef<const Expr *> Args, unsigned formatIdx, 8970 bool inFunctionCall, Sema::VariadicCallType callType, 8971 llvm::SmallBitVector &CheckedVarArgs, 8972 UncoveredArgHandler &UncoveredArg) 8973 : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), FSType(type), 8974 FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), Beg(beg), 8975 ArgPassingKind(APK), Args(Args), FormatIdx(formatIdx), 8976 inFunctionCall(inFunctionCall), CallType(callType), 8977 CheckedVarArgs(CheckedVarArgs), UncoveredArg(UncoveredArg) { 8978 CoveredArgs.resize(numDataArgs); 8979 CoveredArgs.reset(); 8980 } 8981 8982 void DoneProcessing(); 8983 8984 void HandleIncompleteSpecifier(const char *startSpecifier, 8985 unsigned specifierLen) override; 8986 8987 void HandleInvalidLengthModifier( 8988 const analyze_format_string::FormatSpecifier &FS, 8989 const analyze_format_string::ConversionSpecifier &CS, 8990 const char *startSpecifier, unsigned specifierLen, 8991 unsigned DiagID); 8992 8993 void HandleNonStandardLengthModifier( 8994 const analyze_format_string::FormatSpecifier &FS, 8995 const char *startSpecifier, unsigned specifierLen); 8996 8997 void HandleNonStandardConversionSpecifier( 8998 const analyze_format_string::ConversionSpecifier &CS, 8999 const char *startSpecifier, unsigned specifierLen); 9000 9001 void HandlePosition(const char *startPos, unsigned posLen) override; 9002 9003 void HandleInvalidPosition(const char *startSpecifier, 9004 unsigned specifierLen, 9005 analyze_format_string::PositionContext p) override; 9006 9007 void HandleZeroPosition(const char *startPos, unsigned posLen) override; 9008 9009 void HandleNullChar(const char *nullCharacter) override; 9010 9011 template <typename Range> 9012 static void 9013 EmitFormatDiagnostic(Sema &S, bool inFunctionCall, const Expr *ArgumentExpr, 9014 const PartialDiagnostic &PDiag, SourceLocation StringLoc, 9015 bool IsStringLocation, Range StringRange, 9016 ArrayRef<FixItHint> Fixit = None); 9017 9018 protected: 9019 bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc, 9020 const char *startSpec, 9021 unsigned specifierLen, 9022 const char *csStart, unsigned csLen); 9023 9024 void HandlePositionalNonpositionalArgs(SourceLocation Loc, 9025 const char *startSpec, 9026 unsigned specifierLen); 9027 9028 SourceRange getFormatStringRange(); 9029 CharSourceRange getSpecifierRange(const char *startSpecifier, 9030 unsigned specifierLen); 9031 SourceLocation getLocationOfByte(const char *x); 9032 9033 const Expr *getDataArg(unsigned i) const; 9034 9035 bool CheckNumArgs(const analyze_format_string::FormatSpecifier &FS, 9036 const analyze_format_string::ConversionSpecifier &CS, 9037 const char *startSpecifier, unsigned specifierLen, 9038 unsigned argIndex); 9039 9040 template <typename Range> 9041 void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc, 9042 bool IsStringLocation, Range StringRange, 9043 ArrayRef<FixItHint> Fixit = None); 9044 }; 9045 9046 } // namespace 9047 9048 SourceRange CheckFormatHandler::getFormatStringRange() { 9049 return OrigFormatExpr->getSourceRange(); 9050 } 9051 9052 CharSourceRange CheckFormatHandler:: 9053 getSpecifierRange(const char *startSpecifier, unsigned specifierLen) { 9054 SourceLocation Start = getLocationOfByte(startSpecifier); 9055 SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1); 9056 9057 // Advance the end SourceLocation by one due to half-open ranges. 9058 End = End.getLocWithOffset(1); 9059 9060 return CharSourceRange::getCharRange(Start, End); 9061 } 9062 9063 SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) { 9064 return FExpr->getLocationOfByte(x - Beg, S.getSourceManager(), 9065 S.getLangOpts(), S.Context.getTargetInfo()); 9066 } 9067 9068 void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier, 9069 unsigned specifierLen){ 9070 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier), 9071 getLocationOfByte(startSpecifier), 9072 /*IsStringLocation*/true, 9073 getSpecifierRange(startSpecifier, specifierLen)); 9074 } 9075 9076 void CheckFormatHandler::HandleInvalidLengthModifier( 9077 const analyze_format_string::FormatSpecifier &FS, 9078 const analyze_format_string::ConversionSpecifier &CS, 9079 const char *startSpecifier, unsigned specifierLen, unsigned DiagID) { 9080 using namespace analyze_format_string; 9081 9082 const LengthModifier &LM = FS.getLengthModifier(); 9083 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 9084 9085 // See if we know how to fix this length modifier. 9086 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 9087 if (FixedLM) { 9088 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 9089 getLocationOfByte(LM.getStart()), 9090 /*IsStringLocation*/true, 9091 getSpecifierRange(startSpecifier, specifierLen)); 9092 9093 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 9094 << FixedLM->toString() 9095 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 9096 9097 } else { 9098 FixItHint Hint; 9099 if (DiagID == diag::warn_format_nonsensical_length) 9100 Hint = FixItHint::CreateRemoval(LMRange); 9101 9102 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 9103 getLocationOfByte(LM.getStart()), 9104 /*IsStringLocation*/true, 9105 getSpecifierRange(startSpecifier, specifierLen), 9106 Hint); 9107 } 9108 } 9109 9110 void CheckFormatHandler::HandleNonStandardLengthModifier( 9111 const analyze_format_string::FormatSpecifier &FS, 9112 const char *startSpecifier, unsigned specifierLen) { 9113 using namespace analyze_format_string; 9114 9115 const LengthModifier &LM = FS.getLengthModifier(); 9116 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 9117 9118 // See if we know how to fix this length modifier. 9119 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 9120 if (FixedLM) { 9121 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 9122 << LM.toString() << 0, 9123 getLocationOfByte(LM.getStart()), 9124 /*IsStringLocation*/true, 9125 getSpecifierRange(startSpecifier, specifierLen)); 9126 9127 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 9128 << FixedLM->toString() 9129 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 9130 9131 } else { 9132 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 9133 << LM.toString() << 0, 9134 getLocationOfByte(LM.getStart()), 9135 /*IsStringLocation*/true, 9136 getSpecifierRange(startSpecifier, specifierLen)); 9137 } 9138 } 9139 9140 void CheckFormatHandler::HandleNonStandardConversionSpecifier( 9141 const analyze_format_string::ConversionSpecifier &CS, 9142 const char *startSpecifier, unsigned specifierLen) { 9143 using namespace analyze_format_string; 9144 9145 // See if we know how to fix this conversion specifier. 9146 Optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier(); 9147 if (FixedCS) { 9148 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 9149 << CS.toString() << /*conversion specifier*/1, 9150 getLocationOfByte(CS.getStart()), 9151 /*IsStringLocation*/true, 9152 getSpecifierRange(startSpecifier, specifierLen)); 9153 9154 CharSourceRange CSRange = getSpecifierRange(CS.getStart(), CS.getLength()); 9155 S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier) 9156 << FixedCS->toString() 9157 << FixItHint::CreateReplacement(CSRange, FixedCS->toString()); 9158 } else { 9159 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 9160 << CS.toString() << /*conversion specifier*/1, 9161 getLocationOfByte(CS.getStart()), 9162 /*IsStringLocation*/true, 9163 getSpecifierRange(startSpecifier, specifierLen)); 9164 } 9165 } 9166 9167 void CheckFormatHandler::HandlePosition(const char *startPos, 9168 unsigned posLen) { 9169 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg), 9170 getLocationOfByte(startPos), 9171 /*IsStringLocation*/true, 9172 getSpecifierRange(startPos, posLen)); 9173 } 9174 9175 void 9176 CheckFormatHandler::HandleInvalidPosition(const char *startPos, unsigned posLen, 9177 analyze_format_string::PositionContext p) { 9178 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_positional_specifier) 9179 << (unsigned) p, 9180 getLocationOfByte(startPos), /*IsStringLocation*/true, 9181 getSpecifierRange(startPos, posLen)); 9182 } 9183 9184 void CheckFormatHandler::HandleZeroPosition(const char *startPos, 9185 unsigned posLen) { 9186 EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier), 9187 getLocationOfByte(startPos), 9188 /*IsStringLocation*/true, 9189 getSpecifierRange(startPos, posLen)); 9190 } 9191 9192 void CheckFormatHandler::HandleNullChar(const char *nullCharacter) { 9193 if (!isa<ObjCStringLiteral>(OrigFormatExpr)) { 9194 // The presence of a null character is likely an error. 9195 EmitFormatDiagnostic( 9196 S.PDiag(diag::warn_printf_format_string_contains_null_char), 9197 getLocationOfByte(nullCharacter), /*IsStringLocation*/true, 9198 getFormatStringRange()); 9199 } 9200 } 9201 9202 // Note that this may return NULL if there was an error parsing or building 9203 // one of the argument expressions. 9204 const Expr *CheckFormatHandler::getDataArg(unsigned i) const { 9205 return Args[FirstDataArg + i]; 9206 } 9207 9208 void CheckFormatHandler::DoneProcessing() { 9209 // Does the number of data arguments exceed the number of 9210 // format conversions in the format string? 9211 if (ArgPassingKind != Sema::FAPK_VAList) { 9212 // Find any arguments that weren't covered. 9213 CoveredArgs.flip(); 9214 signed notCoveredArg = CoveredArgs.find_first(); 9215 if (notCoveredArg >= 0) { 9216 assert((unsigned)notCoveredArg < NumDataArgs); 9217 UncoveredArg.Update(notCoveredArg, OrigFormatExpr); 9218 } else { 9219 UncoveredArg.setAllCovered(); 9220 } 9221 } 9222 } 9223 9224 void UncoveredArgHandler::Diagnose(Sema &S, bool IsFunctionCall, 9225 const Expr *ArgExpr) { 9226 assert(hasUncoveredArg() && DiagnosticExprs.size() > 0 && 9227 "Invalid state"); 9228 9229 if (!ArgExpr) 9230 return; 9231 9232 SourceLocation Loc = ArgExpr->getBeginLoc(); 9233 9234 if (S.getSourceManager().isInSystemMacro(Loc)) 9235 return; 9236 9237 PartialDiagnostic PDiag = S.PDiag(diag::warn_printf_data_arg_not_used); 9238 for (auto E : DiagnosticExprs) 9239 PDiag << E->getSourceRange(); 9240 9241 CheckFormatHandler::EmitFormatDiagnostic( 9242 S, IsFunctionCall, DiagnosticExprs[0], 9243 PDiag, Loc, /*IsStringLocation*/false, 9244 DiagnosticExprs[0]->getSourceRange()); 9245 } 9246 9247 bool 9248 CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex, 9249 SourceLocation Loc, 9250 const char *startSpec, 9251 unsigned specifierLen, 9252 const char *csStart, 9253 unsigned csLen) { 9254 bool keepGoing = true; 9255 if (argIndex < NumDataArgs) { 9256 // Consider the argument coverered, even though the specifier doesn't 9257 // make sense. 9258 CoveredArgs.set(argIndex); 9259 } 9260 else { 9261 // If argIndex exceeds the number of data arguments we 9262 // don't issue a warning because that is just a cascade of warnings (and 9263 // they may have intended '%%' anyway). We don't want to continue processing 9264 // the format string after this point, however, as we will like just get 9265 // gibberish when trying to match arguments. 9266 keepGoing = false; 9267 } 9268 9269 StringRef Specifier(csStart, csLen); 9270 9271 // If the specifier in non-printable, it could be the first byte of a UTF-8 9272 // sequence. In that case, print the UTF-8 code point. If not, print the byte 9273 // hex value. 9274 std::string CodePointStr; 9275 if (!llvm::sys::locale::isPrint(*csStart)) { 9276 llvm::UTF32 CodePoint; 9277 const llvm::UTF8 **B = reinterpret_cast<const llvm::UTF8 **>(&csStart); 9278 const llvm::UTF8 *E = 9279 reinterpret_cast<const llvm::UTF8 *>(csStart + csLen); 9280 llvm::ConversionResult Result = 9281 llvm::convertUTF8Sequence(B, E, &CodePoint, llvm::strictConversion); 9282 9283 if (Result != llvm::conversionOK) { 9284 unsigned char FirstChar = *csStart; 9285 CodePoint = (llvm::UTF32)FirstChar; 9286 } 9287 9288 llvm::raw_string_ostream OS(CodePointStr); 9289 if (CodePoint < 256) 9290 OS << "\\x" << llvm::format("%02x", CodePoint); 9291 else if (CodePoint <= 0xFFFF) 9292 OS << "\\u" << llvm::format("%04x", CodePoint); 9293 else 9294 OS << "\\U" << llvm::format("%08x", CodePoint); 9295 OS.flush(); 9296 Specifier = CodePointStr; 9297 } 9298 9299 EmitFormatDiagnostic( 9300 S.PDiag(diag::warn_format_invalid_conversion) << Specifier, Loc, 9301 /*IsStringLocation*/ true, getSpecifierRange(startSpec, specifierLen)); 9302 9303 return keepGoing; 9304 } 9305 9306 void 9307 CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc, 9308 const char *startSpec, 9309 unsigned specifierLen) { 9310 EmitFormatDiagnostic( 9311 S.PDiag(diag::warn_format_mix_positional_nonpositional_args), 9312 Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen)); 9313 } 9314 9315 bool 9316 CheckFormatHandler::CheckNumArgs( 9317 const analyze_format_string::FormatSpecifier &FS, 9318 const analyze_format_string::ConversionSpecifier &CS, 9319 const char *startSpecifier, unsigned specifierLen, unsigned argIndex) { 9320 9321 if (argIndex >= NumDataArgs) { 9322 PartialDiagnostic PDiag = FS.usesPositionalArg() 9323 ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args) 9324 << (argIndex+1) << NumDataArgs) 9325 : S.PDiag(diag::warn_printf_insufficient_data_args); 9326 EmitFormatDiagnostic( 9327 PDiag, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true, 9328 getSpecifierRange(startSpecifier, specifierLen)); 9329 9330 // Since more arguments than conversion tokens are given, by extension 9331 // all arguments are covered, so mark this as so. 9332 UncoveredArg.setAllCovered(); 9333 return false; 9334 } 9335 return true; 9336 } 9337 9338 template<typename Range> 9339 void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag, 9340 SourceLocation Loc, 9341 bool IsStringLocation, 9342 Range StringRange, 9343 ArrayRef<FixItHint> FixIt) { 9344 EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag, 9345 Loc, IsStringLocation, StringRange, FixIt); 9346 } 9347 9348 /// If the format string is not within the function call, emit a note 9349 /// so that the function call and string are in diagnostic messages. 9350 /// 9351 /// \param InFunctionCall if true, the format string is within the function 9352 /// call and only one diagnostic message will be produced. Otherwise, an 9353 /// extra note will be emitted pointing to location of the format string. 9354 /// 9355 /// \param ArgumentExpr the expression that is passed as the format string 9356 /// argument in the function call. Used for getting locations when two 9357 /// diagnostics are emitted. 9358 /// 9359 /// \param PDiag the callee should already have provided any strings for the 9360 /// diagnostic message. This function only adds locations and fixits 9361 /// to diagnostics. 9362 /// 9363 /// \param Loc primary location for diagnostic. If two diagnostics are 9364 /// required, one will be at Loc and a new SourceLocation will be created for 9365 /// the other one. 9366 /// 9367 /// \param IsStringLocation if true, Loc points to the format string should be 9368 /// used for the note. Otherwise, Loc points to the argument list and will 9369 /// be used with PDiag. 9370 /// 9371 /// \param StringRange some or all of the string to highlight. This is 9372 /// templated so it can accept either a CharSourceRange or a SourceRange. 9373 /// 9374 /// \param FixIt optional fix it hint for the format string. 9375 template <typename Range> 9376 void CheckFormatHandler::EmitFormatDiagnostic( 9377 Sema &S, bool InFunctionCall, const Expr *ArgumentExpr, 9378 const PartialDiagnostic &PDiag, SourceLocation Loc, bool IsStringLocation, 9379 Range StringRange, ArrayRef<FixItHint> FixIt) { 9380 if (InFunctionCall) { 9381 const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PDiag); 9382 D << StringRange; 9383 D << FixIt; 9384 } else { 9385 S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag) 9386 << ArgumentExpr->getSourceRange(); 9387 9388 const Sema::SemaDiagnosticBuilder &Note = 9389 S.Diag(IsStringLocation ? Loc : StringRange.getBegin(), 9390 diag::note_format_string_defined); 9391 9392 Note << StringRange; 9393 Note << FixIt; 9394 } 9395 } 9396 9397 //===--- CHECK: Printf format string checking ------------------------------===// 9398 9399 namespace { 9400 9401 class CheckPrintfHandler : public CheckFormatHandler { 9402 public: 9403 CheckPrintfHandler(Sema &s, const FormatStringLiteral *fexpr, 9404 const Expr *origFormatExpr, 9405 const Sema::FormatStringType type, unsigned firstDataArg, 9406 unsigned numDataArgs, bool isObjC, const char *beg, 9407 Sema::FormatArgumentPassingKind APK, 9408 ArrayRef<const Expr *> Args, unsigned formatIdx, 9409 bool inFunctionCall, Sema::VariadicCallType CallType, 9410 llvm::SmallBitVector &CheckedVarArgs, 9411 UncoveredArgHandler &UncoveredArg) 9412 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 9413 numDataArgs, beg, APK, Args, formatIdx, 9414 inFunctionCall, CallType, CheckedVarArgs, 9415 UncoveredArg) {} 9416 9417 bool isObjCContext() const { return FSType == Sema::FST_NSString; } 9418 9419 /// Returns true if '%@' specifiers are allowed in the format string. 9420 bool allowsObjCArg() const { 9421 return FSType == Sema::FST_NSString || FSType == Sema::FST_OSLog || 9422 FSType == Sema::FST_OSTrace; 9423 } 9424 9425 bool HandleInvalidPrintfConversionSpecifier( 9426 const analyze_printf::PrintfSpecifier &FS, 9427 const char *startSpecifier, 9428 unsigned specifierLen) override; 9429 9430 void handleInvalidMaskType(StringRef MaskType) override; 9431 9432 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 9433 const char *startSpecifier, unsigned specifierLen, 9434 const TargetInfo &Target) override; 9435 bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 9436 const char *StartSpecifier, 9437 unsigned SpecifierLen, 9438 const Expr *E); 9439 9440 bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k, 9441 const char *startSpecifier, unsigned specifierLen); 9442 void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS, 9443 const analyze_printf::OptionalAmount &Amt, 9444 unsigned type, 9445 const char *startSpecifier, unsigned specifierLen); 9446 void HandleFlag(const analyze_printf::PrintfSpecifier &FS, 9447 const analyze_printf::OptionalFlag &flag, 9448 const char *startSpecifier, unsigned specifierLen); 9449 void HandleIgnoredFlag(const analyze_printf::PrintfSpecifier &FS, 9450 const analyze_printf::OptionalFlag &ignoredFlag, 9451 const analyze_printf::OptionalFlag &flag, 9452 const char *startSpecifier, unsigned specifierLen); 9453 bool checkForCStrMembers(const analyze_printf::ArgType &AT, 9454 const Expr *E); 9455 9456 void HandleEmptyObjCModifierFlag(const char *startFlag, 9457 unsigned flagLen) override; 9458 9459 void HandleInvalidObjCModifierFlag(const char *startFlag, 9460 unsigned flagLen) override; 9461 9462 void HandleObjCFlagsWithNonObjCConversion(const char *flagsStart, 9463 const char *flagsEnd, 9464 const char *conversionPosition) 9465 override; 9466 }; 9467 9468 } // namespace 9469 9470 bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier( 9471 const analyze_printf::PrintfSpecifier &FS, 9472 const char *startSpecifier, 9473 unsigned specifierLen) { 9474 const analyze_printf::PrintfConversionSpecifier &CS = 9475 FS.getConversionSpecifier(); 9476 9477 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 9478 getLocationOfByte(CS.getStart()), 9479 startSpecifier, specifierLen, 9480 CS.getStart(), CS.getLength()); 9481 } 9482 9483 void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType) { 9484 S.Diag(getLocationOfByte(MaskType.data()), diag::err_invalid_mask_type_size); 9485 } 9486 9487 bool CheckPrintfHandler::HandleAmount( 9488 const analyze_format_string::OptionalAmount &Amt, unsigned k, 9489 const char *startSpecifier, unsigned specifierLen) { 9490 if (Amt.hasDataArgument()) { 9491 if (ArgPassingKind != Sema::FAPK_VAList) { 9492 unsigned argIndex = Amt.getArgIndex(); 9493 if (argIndex >= NumDataArgs) { 9494 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg) 9495 << k, 9496 getLocationOfByte(Amt.getStart()), 9497 /*IsStringLocation*/ true, 9498 getSpecifierRange(startSpecifier, specifierLen)); 9499 // Don't do any more checking. We will just emit 9500 // spurious errors. 9501 return false; 9502 } 9503 9504 // Type check the data argument. It should be an 'int'. 9505 // Although not in conformance with C99, we also allow the argument to be 9506 // an 'unsigned int' as that is a reasonably safe case. GCC also 9507 // doesn't emit a warning for that case. 9508 CoveredArgs.set(argIndex); 9509 const Expr *Arg = getDataArg(argIndex); 9510 if (!Arg) 9511 return false; 9512 9513 QualType T = Arg->getType(); 9514 9515 const analyze_printf::ArgType &AT = Amt.getArgType(S.Context); 9516 assert(AT.isValid()); 9517 9518 if (!AT.matchesType(S.Context, T)) { 9519 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type) 9520 << k << AT.getRepresentativeTypeName(S.Context) 9521 << T << Arg->getSourceRange(), 9522 getLocationOfByte(Amt.getStart()), 9523 /*IsStringLocation*/true, 9524 getSpecifierRange(startSpecifier, specifierLen)); 9525 // Don't do any more checking. We will just emit 9526 // spurious errors. 9527 return false; 9528 } 9529 } 9530 } 9531 return true; 9532 } 9533 9534 void CheckPrintfHandler::HandleInvalidAmount( 9535 const analyze_printf::PrintfSpecifier &FS, 9536 const analyze_printf::OptionalAmount &Amt, 9537 unsigned type, 9538 const char *startSpecifier, 9539 unsigned specifierLen) { 9540 const analyze_printf::PrintfConversionSpecifier &CS = 9541 FS.getConversionSpecifier(); 9542 9543 FixItHint fixit = 9544 Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant 9545 ? FixItHint::CreateRemoval(getSpecifierRange(Amt.getStart(), 9546 Amt.getConstantLength())) 9547 : FixItHint(); 9548 9549 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount) 9550 << type << CS.toString(), 9551 getLocationOfByte(Amt.getStart()), 9552 /*IsStringLocation*/true, 9553 getSpecifierRange(startSpecifier, specifierLen), 9554 fixit); 9555 } 9556 9557 void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS, 9558 const analyze_printf::OptionalFlag &flag, 9559 const char *startSpecifier, 9560 unsigned specifierLen) { 9561 // Warn about pointless flag with a fixit removal. 9562 const analyze_printf::PrintfConversionSpecifier &CS = 9563 FS.getConversionSpecifier(); 9564 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag) 9565 << flag.toString() << CS.toString(), 9566 getLocationOfByte(flag.getPosition()), 9567 /*IsStringLocation*/true, 9568 getSpecifierRange(startSpecifier, specifierLen), 9569 FixItHint::CreateRemoval( 9570 getSpecifierRange(flag.getPosition(), 1))); 9571 } 9572 9573 void CheckPrintfHandler::HandleIgnoredFlag( 9574 const analyze_printf::PrintfSpecifier &FS, 9575 const analyze_printf::OptionalFlag &ignoredFlag, 9576 const analyze_printf::OptionalFlag &flag, 9577 const char *startSpecifier, 9578 unsigned specifierLen) { 9579 // Warn about ignored flag with a fixit removal. 9580 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag) 9581 << ignoredFlag.toString() << flag.toString(), 9582 getLocationOfByte(ignoredFlag.getPosition()), 9583 /*IsStringLocation*/true, 9584 getSpecifierRange(startSpecifier, specifierLen), 9585 FixItHint::CreateRemoval( 9586 getSpecifierRange(ignoredFlag.getPosition(), 1))); 9587 } 9588 9589 void CheckPrintfHandler::HandleEmptyObjCModifierFlag(const char *startFlag, 9590 unsigned flagLen) { 9591 // Warn about an empty flag. 9592 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_empty_objc_flag), 9593 getLocationOfByte(startFlag), 9594 /*IsStringLocation*/true, 9595 getSpecifierRange(startFlag, flagLen)); 9596 } 9597 9598 void CheckPrintfHandler::HandleInvalidObjCModifierFlag(const char *startFlag, 9599 unsigned flagLen) { 9600 // Warn about an invalid flag. 9601 auto Range = getSpecifierRange(startFlag, flagLen); 9602 StringRef flag(startFlag, flagLen); 9603 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_invalid_objc_flag) << flag, 9604 getLocationOfByte(startFlag), 9605 /*IsStringLocation*/true, 9606 Range, FixItHint::CreateRemoval(Range)); 9607 } 9608 9609 void CheckPrintfHandler::HandleObjCFlagsWithNonObjCConversion( 9610 const char *flagsStart, const char *flagsEnd, const char *conversionPosition) { 9611 // Warn about using '[...]' without a '@' conversion. 9612 auto Range = getSpecifierRange(flagsStart, flagsEnd - flagsStart + 1); 9613 auto diag = diag::warn_printf_ObjCflags_without_ObjCConversion; 9614 EmitFormatDiagnostic(S.PDiag(diag) << StringRef(conversionPosition, 1), 9615 getLocationOfByte(conversionPosition), 9616 /*IsStringLocation*/true, 9617 Range, FixItHint::CreateRemoval(Range)); 9618 } 9619 9620 // Determines if the specified is a C++ class or struct containing 9621 // a member with the specified name and kind (e.g. a CXXMethodDecl named 9622 // "c_str()"). 9623 template<typename MemberKind> 9624 static llvm::SmallPtrSet<MemberKind*, 1> 9625 CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) { 9626 const RecordType *RT = Ty->getAs<RecordType>(); 9627 llvm::SmallPtrSet<MemberKind*, 1> Results; 9628 9629 if (!RT) 9630 return Results; 9631 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 9632 if (!RD || !RD->getDefinition()) 9633 return Results; 9634 9635 LookupResult R(S, &S.Context.Idents.get(Name), SourceLocation(), 9636 Sema::LookupMemberName); 9637 R.suppressDiagnostics(); 9638 9639 // We just need to include all members of the right kind turned up by the 9640 // filter, at this point. 9641 if (S.LookupQualifiedName(R, RT->getDecl())) 9642 for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) { 9643 NamedDecl *decl = (*I)->getUnderlyingDecl(); 9644 if (MemberKind *FK = dyn_cast<MemberKind>(decl)) 9645 Results.insert(FK); 9646 } 9647 return Results; 9648 } 9649 9650 /// Check if we could call '.c_str()' on an object. 9651 /// 9652 /// FIXME: This returns the wrong results in some cases (if cv-qualifiers don't 9653 /// allow the call, or if it would be ambiguous). 9654 bool Sema::hasCStrMethod(const Expr *E) { 9655 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 9656 9657 MethodSet Results = 9658 CXXRecordMembersNamed<CXXMethodDecl>("c_str", *this, E->getType()); 9659 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 9660 MI != ME; ++MI) 9661 if ((*MI)->getMinRequiredArguments() == 0) 9662 return true; 9663 return false; 9664 } 9665 9666 // Check if a (w)string was passed when a (w)char* was needed, and offer a 9667 // better diagnostic if so. AT is assumed to be valid. 9668 // Returns true when a c_str() conversion method is found. 9669 bool CheckPrintfHandler::checkForCStrMembers( 9670 const analyze_printf::ArgType &AT, const Expr *E) { 9671 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 9672 9673 MethodSet Results = 9674 CXXRecordMembersNamed<CXXMethodDecl>("c_str", S, E->getType()); 9675 9676 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 9677 MI != ME; ++MI) { 9678 const CXXMethodDecl *Method = *MI; 9679 if (Method->getMinRequiredArguments() == 0 && 9680 AT.matchesType(S.Context, Method->getReturnType())) { 9681 // FIXME: Suggest parens if the expression needs them. 9682 SourceLocation EndLoc = S.getLocForEndOfToken(E->getEndLoc()); 9683 S.Diag(E->getBeginLoc(), diag::note_printf_c_str) 9684 << "c_str()" << FixItHint::CreateInsertion(EndLoc, ".c_str()"); 9685 return true; 9686 } 9687 } 9688 9689 return false; 9690 } 9691 9692 bool CheckPrintfHandler::HandlePrintfSpecifier( 9693 const analyze_printf::PrintfSpecifier &FS, const char *startSpecifier, 9694 unsigned specifierLen, const TargetInfo &Target) { 9695 using namespace analyze_format_string; 9696 using namespace analyze_printf; 9697 9698 const PrintfConversionSpecifier &CS = FS.getConversionSpecifier(); 9699 9700 if (FS.consumesDataArgument()) { 9701 if (atFirstArg) { 9702 atFirstArg = false; 9703 usesPositionalArgs = FS.usesPositionalArg(); 9704 } 9705 else if (usesPositionalArgs != FS.usesPositionalArg()) { 9706 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 9707 startSpecifier, specifierLen); 9708 return false; 9709 } 9710 } 9711 9712 // First check if the field width, precision, and conversion specifier 9713 // have matching data arguments. 9714 if (!HandleAmount(FS.getFieldWidth(), /* field width */ 0, 9715 startSpecifier, specifierLen)) { 9716 return false; 9717 } 9718 9719 if (!HandleAmount(FS.getPrecision(), /* precision */ 1, 9720 startSpecifier, specifierLen)) { 9721 return false; 9722 } 9723 9724 if (!CS.consumesDataArgument()) { 9725 // FIXME: Technically specifying a precision or field width here 9726 // makes no sense. Worth issuing a warning at some point. 9727 return true; 9728 } 9729 9730 // Consume the argument. 9731 unsigned argIndex = FS.getArgIndex(); 9732 if (argIndex < NumDataArgs) { 9733 // The check to see if the argIndex is valid will come later. 9734 // We set the bit here because we may exit early from this 9735 // function if we encounter some other error. 9736 CoveredArgs.set(argIndex); 9737 } 9738 9739 // FreeBSD kernel extensions. 9740 if (CS.getKind() == ConversionSpecifier::FreeBSDbArg || 9741 CS.getKind() == ConversionSpecifier::FreeBSDDArg) { 9742 // We need at least two arguments. 9743 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex + 1)) 9744 return false; 9745 9746 // Claim the second argument. 9747 CoveredArgs.set(argIndex + 1); 9748 9749 // Type check the first argument (int for %b, pointer for %D) 9750 const Expr *Ex = getDataArg(argIndex); 9751 const analyze_printf::ArgType &AT = 9752 (CS.getKind() == ConversionSpecifier::FreeBSDbArg) ? 9753 ArgType(S.Context.IntTy) : ArgType::CPointerTy; 9754 if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType())) 9755 EmitFormatDiagnostic( 9756 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 9757 << AT.getRepresentativeTypeName(S.Context) << Ex->getType() 9758 << false << Ex->getSourceRange(), 9759 Ex->getBeginLoc(), /*IsStringLocation*/ false, 9760 getSpecifierRange(startSpecifier, specifierLen)); 9761 9762 // Type check the second argument (char * for both %b and %D) 9763 Ex = getDataArg(argIndex + 1); 9764 const analyze_printf::ArgType &AT2 = ArgType::CStrTy; 9765 if (AT2.isValid() && !AT2.matchesType(S.Context, Ex->getType())) 9766 EmitFormatDiagnostic( 9767 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 9768 << AT2.getRepresentativeTypeName(S.Context) << Ex->getType() 9769 << false << Ex->getSourceRange(), 9770 Ex->getBeginLoc(), /*IsStringLocation*/ false, 9771 getSpecifierRange(startSpecifier, specifierLen)); 9772 9773 return true; 9774 } 9775 9776 // Check for using an Objective-C specific conversion specifier 9777 // in a non-ObjC literal. 9778 if (!allowsObjCArg() && CS.isObjCArg()) { 9779 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 9780 specifierLen); 9781 } 9782 9783 // %P can only be used with os_log. 9784 if (FSType != Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::PArg) { 9785 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 9786 specifierLen); 9787 } 9788 9789 // %n is not allowed with os_log. 9790 if (FSType == Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::nArg) { 9791 EmitFormatDiagnostic(S.PDiag(diag::warn_os_log_format_narg), 9792 getLocationOfByte(CS.getStart()), 9793 /*IsStringLocation*/ false, 9794 getSpecifierRange(startSpecifier, specifierLen)); 9795 9796 return true; 9797 } 9798 9799 // Only scalars are allowed for os_trace. 9800 if (FSType == Sema::FST_OSTrace && 9801 (CS.getKind() == ConversionSpecifier::PArg || 9802 CS.getKind() == ConversionSpecifier::sArg || 9803 CS.getKind() == ConversionSpecifier::ObjCObjArg)) { 9804 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 9805 specifierLen); 9806 } 9807 9808 // Check for use of public/private annotation outside of os_log(). 9809 if (FSType != Sema::FST_OSLog) { 9810 if (FS.isPublic().isSet()) { 9811 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 9812 << "public", 9813 getLocationOfByte(FS.isPublic().getPosition()), 9814 /*IsStringLocation*/ false, 9815 getSpecifierRange(startSpecifier, specifierLen)); 9816 } 9817 if (FS.isPrivate().isSet()) { 9818 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 9819 << "private", 9820 getLocationOfByte(FS.isPrivate().getPosition()), 9821 /*IsStringLocation*/ false, 9822 getSpecifierRange(startSpecifier, specifierLen)); 9823 } 9824 } 9825 9826 const llvm::Triple &Triple = Target.getTriple(); 9827 if (CS.getKind() == ConversionSpecifier::nArg && 9828 (Triple.isAndroid() || Triple.isOSFuchsia())) { 9829 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_narg_not_supported), 9830 getLocationOfByte(CS.getStart()), 9831 /*IsStringLocation*/ false, 9832 getSpecifierRange(startSpecifier, specifierLen)); 9833 } 9834 9835 // Check for invalid use of field width 9836 if (!FS.hasValidFieldWidth()) { 9837 HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0, 9838 startSpecifier, specifierLen); 9839 } 9840 9841 // Check for invalid use of precision 9842 if (!FS.hasValidPrecision()) { 9843 HandleInvalidAmount(FS, FS.getPrecision(), /* precision */ 1, 9844 startSpecifier, specifierLen); 9845 } 9846 9847 // Precision is mandatory for %P specifier. 9848 if (CS.getKind() == ConversionSpecifier::PArg && 9849 FS.getPrecision().getHowSpecified() == OptionalAmount::NotSpecified) { 9850 EmitFormatDiagnostic(S.PDiag(diag::warn_format_P_no_precision), 9851 getLocationOfByte(startSpecifier), 9852 /*IsStringLocation*/ false, 9853 getSpecifierRange(startSpecifier, specifierLen)); 9854 } 9855 9856 // Check each flag does not conflict with any other component. 9857 if (!FS.hasValidThousandsGroupingPrefix()) 9858 HandleFlag(FS, FS.hasThousandsGrouping(), startSpecifier, specifierLen); 9859 if (!FS.hasValidLeadingZeros()) 9860 HandleFlag(FS, FS.hasLeadingZeros(), startSpecifier, specifierLen); 9861 if (!FS.hasValidPlusPrefix()) 9862 HandleFlag(FS, FS.hasPlusPrefix(), startSpecifier, specifierLen); 9863 if (!FS.hasValidSpacePrefix()) 9864 HandleFlag(FS, FS.hasSpacePrefix(), startSpecifier, specifierLen); 9865 if (!FS.hasValidAlternativeForm()) 9866 HandleFlag(FS, FS.hasAlternativeForm(), startSpecifier, specifierLen); 9867 if (!FS.hasValidLeftJustified()) 9868 HandleFlag(FS, FS.isLeftJustified(), startSpecifier, specifierLen); 9869 9870 // Check that flags are not ignored by another flag 9871 if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+' 9872 HandleIgnoredFlag(FS, FS.hasSpacePrefix(), FS.hasPlusPrefix(), 9873 startSpecifier, specifierLen); 9874 if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-' 9875 HandleIgnoredFlag(FS, FS.hasLeadingZeros(), FS.isLeftJustified(), 9876 startSpecifier, specifierLen); 9877 9878 // Check the length modifier is valid with the given conversion specifier. 9879 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 9880 S.getLangOpts())) 9881 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 9882 diag::warn_format_nonsensical_length); 9883 else if (!FS.hasStandardLengthModifier()) 9884 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 9885 else if (!FS.hasStandardLengthConversionCombination()) 9886 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 9887 diag::warn_format_non_standard_conversion_spec); 9888 9889 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 9890 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 9891 9892 // The remaining checks depend on the data arguments. 9893 if (ArgPassingKind == Sema::FAPK_VAList) 9894 return true; 9895 9896 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 9897 return false; 9898 9899 const Expr *Arg = getDataArg(argIndex); 9900 if (!Arg) 9901 return true; 9902 9903 return checkFormatExpr(FS, startSpecifier, specifierLen, Arg); 9904 } 9905 9906 static bool requiresParensToAddCast(const Expr *E) { 9907 // FIXME: We should have a general way to reason about operator 9908 // precedence and whether parens are actually needed here. 9909 // Take care of a few common cases where they aren't. 9910 const Expr *Inside = E->IgnoreImpCasts(); 9911 if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Inside)) 9912 Inside = POE->getSyntacticForm()->IgnoreImpCasts(); 9913 9914 switch (Inside->getStmtClass()) { 9915 case Stmt::ArraySubscriptExprClass: 9916 case Stmt::CallExprClass: 9917 case Stmt::CharacterLiteralClass: 9918 case Stmt::CXXBoolLiteralExprClass: 9919 case Stmt::DeclRefExprClass: 9920 case Stmt::FloatingLiteralClass: 9921 case Stmt::IntegerLiteralClass: 9922 case Stmt::MemberExprClass: 9923 case Stmt::ObjCArrayLiteralClass: 9924 case Stmt::ObjCBoolLiteralExprClass: 9925 case Stmt::ObjCBoxedExprClass: 9926 case Stmt::ObjCDictionaryLiteralClass: 9927 case Stmt::ObjCEncodeExprClass: 9928 case Stmt::ObjCIvarRefExprClass: 9929 case Stmt::ObjCMessageExprClass: 9930 case Stmt::ObjCPropertyRefExprClass: 9931 case Stmt::ObjCStringLiteralClass: 9932 case Stmt::ObjCSubscriptRefExprClass: 9933 case Stmt::ParenExprClass: 9934 case Stmt::StringLiteralClass: 9935 case Stmt::UnaryOperatorClass: 9936 return false; 9937 default: 9938 return true; 9939 } 9940 } 9941 9942 static std::pair<QualType, StringRef> 9943 shouldNotPrintDirectly(const ASTContext &Context, 9944 QualType IntendedTy, 9945 const Expr *E) { 9946 // Use a 'while' to peel off layers of typedefs. 9947 QualType TyTy = IntendedTy; 9948 while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) { 9949 StringRef Name = UserTy->getDecl()->getName(); 9950 QualType CastTy = llvm::StringSwitch<QualType>(Name) 9951 .Case("CFIndex", Context.getNSIntegerType()) 9952 .Case("NSInteger", Context.getNSIntegerType()) 9953 .Case("NSUInteger", Context.getNSUIntegerType()) 9954 .Case("SInt32", Context.IntTy) 9955 .Case("UInt32", Context.UnsignedIntTy) 9956 .Default(QualType()); 9957 9958 if (!CastTy.isNull()) 9959 return std::make_pair(CastTy, Name); 9960 9961 TyTy = UserTy->desugar(); 9962 } 9963 9964 // Strip parens if necessary. 9965 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) 9966 return shouldNotPrintDirectly(Context, 9967 PE->getSubExpr()->getType(), 9968 PE->getSubExpr()); 9969 9970 // If this is a conditional expression, then its result type is constructed 9971 // via usual arithmetic conversions and thus there might be no necessary 9972 // typedef sugar there. Recurse to operands to check for NSInteger & 9973 // Co. usage condition. 9974 if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) { 9975 QualType TrueTy, FalseTy; 9976 StringRef TrueName, FalseName; 9977 9978 std::tie(TrueTy, TrueName) = 9979 shouldNotPrintDirectly(Context, 9980 CO->getTrueExpr()->getType(), 9981 CO->getTrueExpr()); 9982 std::tie(FalseTy, FalseName) = 9983 shouldNotPrintDirectly(Context, 9984 CO->getFalseExpr()->getType(), 9985 CO->getFalseExpr()); 9986 9987 if (TrueTy == FalseTy) 9988 return std::make_pair(TrueTy, TrueName); 9989 else if (TrueTy.isNull()) 9990 return std::make_pair(FalseTy, FalseName); 9991 else if (FalseTy.isNull()) 9992 return std::make_pair(TrueTy, TrueName); 9993 } 9994 9995 return std::make_pair(QualType(), StringRef()); 9996 } 9997 9998 /// Return true if \p ICE is an implicit argument promotion of an arithmetic 9999 /// type. Bit-field 'promotions' from a higher ranked type to a lower ranked 10000 /// type do not count. 10001 static bool 10002 isArithmeticArgumentPromotion(Sema &S, const ImplicitCastExpr *ICE) { 10003 QualType From = ICE->getSubExpr()->getType(); 10004 QualType To = ICE->getType(); 10005 // It's an integer promotion if the destination type is the promoted 10006 // source type. 10007 if (ICE->getCastKind() == CK_IntegralCast && 10008 From->isPromotableIntegerType() && 10009 S.Context.getPromotedIntegerType(From) == To) 10010 return true; 10011 // Look through vector types, since we do default argument promotion for 10012 // those in OpenCL. 10013 if (const auto *VecTy = From->getAs<ExtVectorType>()) 10014 From = VecTy->getElementType(); 10015 if (const auto *VecTy = To->getAs<ExtVectorType>()) 10016 To = VecTy->getElementType(); 10017 // It's a floating promotion if the source type is a lower rank. 10018 return ICE->getCastKind() == CK_FloatingCast && 10019 S.Context.getFloatingTypeOrder(From, To) < 0; 10020 } 10021 10022 bool 10023 CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 10024 const char *StartSpecifier, 10025 unsigned SpecifierLen, 10026 const Expr *E) { 10027 using namespace analyze_format_string; 10028 using namespace analyze_printf; 10029 10030 // Now type check the data expression that matches the 10031 // format specifier. 10032 const analyze_printf::ArgType &AT = FS.getArgType(S.Context, isObjCContext()); 10033 if (!AT.isValid()) 10034 return true; 10035 10036 QualType ExprTy = E->getType(); 10037 while (const TypeOfExprType *TET = dyn_cast<TypeOfExprType>(ExprTy)) { 10038 ExprTy = TET->getUnderlyingExpr()->getType(); 10039 } 10040 10041 // When using the format attribute in C++, you can receive a function or an 10042 // array that will necessarily decay to a pointer when passed to the final 10043 // format consumer. Apply decay before type comparison. 10044 if (ExprTy->canDecayToPointerType()) 10045 ExprTy = S.Context.getDecayedType(ExprTy); 10046 10047 // Diagnose attempts to print a boolean value as a character. Unlike other 10048 // -Wformat diagnostics, this is fine from a type perspective, but it still 10049 // doesn't make sense. 10050 if (FS.getConversionSpecifier().getKind() == ConversionSpecifier::cArg && 10051 E->isKnownToHaveBooleanValue()) { 10052 const CharSourceRange &CSR = 10053 getSpecifierRange(StartSpecifier, SpecifierLen); 10054 SmallString<4> FSString; 10055 llvm::raw_svector_ostream os(FSString); 10056 FS.toString(os); 10057 EmitFormatDiagnostic(S.PDiag(diag::warn_format_bool_as_character) 10058 << FSString, 10059 E->getExprLoc(), false, CSR); 10060 return true; 10061 } 10062 10063 analyze_printf::ArgType::MatchKind Match = AT.matchesType(S.Context, ExprTy); 10064 if (Match == analyze_printf::ArgType::Match) 10065 return true; 10066 10067 // Look through argument promotions for our error message's reported type. 10068 // This includes the integral and floating promotions, but excludes array 10069 // and function pointer decay (seeing that an argument intended to be a 10070 // string has type 'char [6]' is probably more confusing than 'char *') and 10071 // certain bitfield promotions (bitfields can be 'demoted' to a lesser type). 10072 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 10073 if (isArithmeticArgumentPromotion(S, ICE)) { 10074 E = ICE->getSubExpr(); 10075 ExprTy = E->getType(); 10076 10077 // Check if we didn't match because of an implicit cast from a 'char' 10078 // or 'short' to an 'int'. This is done because printf is a varargs 10079 // function. 10080 if (ICE->getType() == S.Context.IntTy || 10081 ICE->getType() == S.Context.UnsignedIntTy) { 10082 // All further checking is done on the subexpression 10083 const analyze_printf::ArgType::MatchKind ImplicitMatch = 10084 AT.matchesType(S.Context, ExprTy); 10085 if (ImplicitMatch == analyze_printf::ArgType::Match) 10086 return true; 10087 if (ImplicitMatch == ArgType::NoMatchPedantic || 10088 ImplicitMatch == ArgType::NoMatchTypeConfusion) 10089 Match = ImplicitMatch; 10090 } 10091 } 10092 } else if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) { 10093 // Special case for 'a', which has type 'int' in C. 10094 // Note, however, that we do /not/ want to treat multibyte constants like 10095 // 'MooV' as characters! This form is deprecated but still exists. In 10096 // addition, don't treat expressions as of type 'char' if one byte length 10097 // modifier is provided. 10098 if (ExprTy == S.Context.IntTy && 10099 FS.getLengthModifier().getKind() != LengthModifier::AsChar) 10100 if (llvm::isUIntN(S.Context.getCharWidth(), CL->getValue())) 10101 ExprTy = S.Context.CharTy; 10102 } 10103 10104 // Look through enums to their underlying type. 10105 bool IsEnum = false; 10106 if (auto EnumTy = ExprTy->getAs<EnumType>()) { 10107 ExprTy = EnumTy->getDecl()->getIntegerType(); 10108 IsEnum = true; 10109 } 10110 10111 // %C in an Objective-C context prints a unichar, not a wchar_t. 10112 // If the argument is an integer of some kind, believe the %C and suggest 10113 // a cast instead of changing the conversion specifier. 10114 QualType IntendedTy = ExprTy; 10115 if (isObjCContext() && 10116 FS.getConversionSpecifier().getKind() == ConversionSpecifier::CArg) { 10117 if (ExprTy->isIntegralOrUnscopedEnumerationType() && 10118 !ExprTy->isCharType()) { 10119 // 'unichar' is defined as a typedef of unsigned short, but we should 10120 // prefer using the typedef if it is visible. 10121 IntendedTy = S.Context.UnsignedShortTy; 10122 10123 // While we are here, check if the value is an IntegerLiteral that happens 10124 // to be within the valid range. 10125 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) { 10126 const llvm::APInt &V = IL->getValue(); 10127 if (V.getActiveBits() <= S.Context.getTypeSize(IntendedTy)) 10128 return true; 10129 } 10130 10131 LookupResult Result(S, &S.Context.Idents.get("unichar"), E->getBeginLoc(), 10132 Sema::LookupOrdinaryName); 10133 if (S.LookupName(Result, S.getCurScope())) { 10134 NamedDecl *ND = Result.getFoundDecl(); 10135 if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(ND)) 10136 if (TD->getUnderlyingType() == IntendedTy) 10137 IntendedTy = S.Context.getTypedefType(TD); 10138 } 10139 } 10140 } 10141 10142 // Special-case some of Darwin's platform-independence types by suggesting 10143 // casts to primitive types that are known to be large enough. 10144 bool ShouldNotPrintDirectly = false; StringRef CastTyName; 10145 if (S.Context.getTargetInfo().getTriple().isOSDarwin()) { 10146 QualType CastTy; 10147 std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E); 10148 if (!CastTy.isNull()) { 10149 // %zi/%zu and %td/%tu are OK to use for NSInteger/NSUInteger of type int 10150 // (long in ASTContext). Only complain to pedants. 10151 if ((CastTyName == "NSInteger" || CastTyName == "NSUInteger") && 10152 (AT.isSizeT() || AT.isPtrdiffT()) && 10153 AT.matchesType(S.Context, CastTy)) 10154 Match = ArgType::NoMatchPedantic; 10155 IntendedTy = CastTy; 10156 ShouldNotPrintDirectly = true; 10157 } 10158 } 10159 10160 // We may be able to offer a FixItHint if it is a supported type. 10161 PrintfSpecifier fixedFS = FS; 10162 bool Success = 10163 fixedFS.fixType(IntendedTy, S.getLangOpts(), S.Context, isObjCContext()); 10164 10165 if (Success) { 10166 // Get the fix string from the fixed format specifier 10167 SmallString<16> buf; 10168 llvm::raw_svector_ostream os(buf); 10169 fixedFS.toString(os); 10170 10171 CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen); 10172 10173 if (IntendedTy == ExprTy && !ShouldNotPrintDirectly) { 10174 unsigned Diag; 10175 switch (Match) { 10176 case ArgType::Match: llvm_unreachable("expected non-matching"); 10177 case ArgType::NoMatchPedantic: 10178 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 10179 break; 10180 case ArgType::NoMatchTypeConfusion: 10181 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 10182 break; 10183 case ArgType::NoMatch: 10184 Diag = diag::warn_format_conversion_argument_type_mismatch; 10185 break; 10186 } 10187 10188 // In this case, the specifier is wrong and should be changed to match 10189 // the argument. 10190 EmitFormatDiagnostic(S.PDiag(Diag) 10191 << AT.getRepresentativeTypeName(S.Context) 10192 << IntendedTy << IsEnum << E->getSourceRange(), 10193 E->getBeginLoc(), 10194 /*IsStringLocation*/ false, SpecRange, 10195 FixItHint::CreateReplacement(SpecRange, os.str())); 10196 } else { 10197 // The canonical type for formatting this value is different from the 10198 // actual type of the expression. (This occurs, for example, with Darwin's 10199 // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but 10200 // should be printed as 'long' for 64-bit compatibility.) 10201 // Rather than emitting a normal format/argument mismatch, we want to 10202 // add a cast to the recommended type (and correct the format string 10203 // if necessary). 10204 SmallString<16> CastBuf; 10205 llvm::raw_svector_ostream CastFix(CastBuf); 10206 CastFix << "("; 10207 IntendedTy.print(CastFix, S.Context.getPrintingPolicy()); 10208 CastFix << ")"; 10209 10210 SmallVector<FixItHint,4> Hints; 10211 if (!AT.matchesType(S.Context, IntendedTy) || ShouldNotPrintDirectly) 10212 Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str())); 10213 10214 if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) { 10215 // If there's already a cast present, just replace it. 10216 SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc()); 10217 Hints.push_back(FixItHint::CreateReplacement(CastRange, CastFix.str())); 10218 10219 } else if (!requiresParensToAddCast(E)) { 10220 // If the expression has high enough precedence, 10221 // just write the C-style cast. 10222 Hints.push_back( 10223 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 10224 } else { 10225 // Otherwise, add parens around the expression as well as the cast. 10226 CastFix << "("; 10227 Hints.push_back( 10228 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 10229 10230 SourceLocation After = S.getLocForEndOfToken(E->getEndLoc()); 10231 Hints.push_back(FixItHint::CreateInsertion(After, ")")); 10232 } 10233 10234 if (ShouldNotPrintDirectly) { 10235 // The expression has a type that should not be printed directly. 10236 // We extract the name from the typedef because we don't want to show 10237 // the underlying type in the diagnostic. 10238 StringRef Name; 10239 if (const TypedefType *TypedefTy = dyn_cast<TypedefType>(ExprTy)) 10240 Name = TypedefTy->getDecl()->getName(); 10241 else 10242 Name = CastTyName; 10243 unsigned Diag = Match == ArgType::NoMatchPedantic 10244 ? diag::warn_format_argument_needs_cast_pedantic 10245 : diag::warn_format_argument_needs_cast; 10246 EmitFormatDiagnostic(S.PDiag(Diag) << Name << IntendedTy << IsEnum 10247 << E->getSourceRange(), 10248 E->getBeginLoc(), /*IsStringLocation=*/false, 10249 SpecRange, Hints); 10250 } else { 10251 // In this case, the expression could be printed using a different 10252 // specifier, but we've decided that the specifier is probably correct 10253 // and we should cast instead. Just use the normal warning message. 10254 EmitFormatDiagnostic( 10255 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 10256 << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum 10257 << E->getSourceRange(), 10258 E->getBeginLoc(), /*IsStringLocation*/ false, SpecRange, Hints); 10259 } 10260 } 10261 } else { 10262 const CharSourceRange &CSR = getSpecifierRange(StartSpecifier, 10263 SpecifierLen); 10264 // Since the warning for passing non-POD types to variadic functions 10265 // was deferred until now, we emit a warning for non-POD 10266 // arguments here. 10267 bool EmitTypeMismatch = false; 10268 switch (S.isValidVarArgType(ExprTy)) { 10269 case Sema::VAK_Valid: 10270 case Sema::VAK_ValidInCXX11: { 10271 unsigned Diag; 10272 switch (Match) { 10273 case ArgType::Match: llvm_unreachable("expected non-matching"); 10274 case ArgType::NoMatchPedantic: 10275 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 10276 break; 10277 case ArgType::NoMatchTypeConfusion: 10278 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 10279 break; 10280 case ArgType::NoMatch: 10281 Diag = diag::warn_format_conversion_argument_type_mismatch; 10282 break; 10283 } 10284 10285 EmitFormatDiagnostic( 10286 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy 10287 << IsEnum << CSR << E->getSourceRange(), 10288 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 10289 break; 10290 } 10291 case Sema::VAK_Undefined: 10292 case Sema::VAK_MSVCUndefined: 10293 if (CallType == Sema::VariadicDoesNotApply) { 10294 EmitTypeMismatch = true; 10295 } else { 10296 EmitFormatDiagnostic( 10297 S.PDiag(diag::warn_non_pod_vararg_with_format_string) 10298 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType 10299 << AT.getRepresentativeTypeName(S.Context) << CSR 10300 << E->getSourceRange(), 10301 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 10302 checkForCStrMembers(AT, E); 10303 } 10304 break; 10305 10306 case Sema::VAK_Invalid: 10307 if (CallType == Sema::VariadicDoesNotApply) 10308 EmitTypeMismatch = true; 10309 else if (ExprTy->isObjCObjectType()) 10310 EmitFormatDiagnostic( 10311 S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format) 10312 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType 10313 << AT.getRepresentativeTypeName(S.Context) << CSR 10314 << E->getSourceRange(), 10315 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 10316 else 10317 // FIXME: If this is an initializer list, suggest removing the braces 10318 // or inserting a cast to the target type. 10319 S.Diag(E->getBeginLoc(), diag::err_cannot_pass_to_vararg_format) 10320 << isa<InitListExpr>(E) << ExprTy << CallType 10321 << AT.getRepresentativeTypeName(S.Context) << E->getSourceRange(); 10322 break; 10323 } 10324 10325 if (EmitTypeMismatch) { 10326 // The function is not variadic, so we do not generate warnings about 10327 // being allowed to pass that object as a variadic argument. Instead, 10328 // since there are inherently no printf specifiers for types which cannot 10329 // be passed as variadic arguments, emit a plain old specifier mismatch 10330 // argument. 10331 EmitFormatDiagnostic( 10332 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 10333 << AT.getRepresentativeTypeName(S.Context) << ExprTy << false 10334 << E->getSourceRange(), 10335 E->getBeginLoc(), false, CSR); 10336 } 10337 10338 assert(FirstDataArg + FS.getArgIndex() < CheckedVarArgs.size() && 10339 "format string specifier index out of range"); 10340 CheckedVarArgs[FirstDataArg + FS.getArgIndex()] = true; 10341 } 10342 10343 return true; 10344 } 10345 10346 //===--- CHECK: Scanf format string checking ------------------------------===// 10347 10348 namespace { 10349 10350 class CheckScanfHandler : public CheckFormatHandler { 10351 public: 10352 CheckScanfHandler(Sema &s, const FormatStringLiteral *fexpr, 10353 const Expr *origFormatExpr, Sema::FormatStringType type, 10354 unsigned firstDataArg, unsigned numDataArgs, 10355 const char *beg, Sema::FormatArgumentPassingKind APK, 10356 ArrayRef<const Expr *> Args, unsigned formatIdx, 10357 bool inFunctionCall, Sema::VariadicCallType CallType, 10358 llvm::SmallBitVector &CheckedVarArgs, 10359 UncoveredArgHandler &UncoveredArg) 10360 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 10361 numDataArgs, beg, APK, Args, formatIdx, 10362 inFunctionCall, CallType, CheckedVarArgs, 10363 UncoveredArg) {} 10364 10365 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 10366 const char *startSpecifier, 10367 unsigned specifierLen) override; 10368 10369 bool HandleInvalidScanfConversionSpecifier( 10370 const analyze_scanf::ScanfSpecifier &FS, 10371 const char *startSpecifier, 10372 unsigned specifierLen) override; 10373 10374 void HandleIncompleteScanList(const char *start, const char *end) override; 10375 }; 10376 10377 } // namespace 10378 10379 void CheckScanfHandler::HandleIncompleteScanList(const char *start, 10380 const char *end) { 10381 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete), 10382 getLocationOfByte(end), /*IsStringLocation*/true, 10383 getSpecifierRange(start, end - start)); 10384 } 10385 10386 bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier( 10387 const analyze_scanf::ScanfSpecifier &FS, 10388 const char *startSpecifier, 10389 unsigned specifierLen) { 10390 const analyze_scanf::ScanfConversionSpecifier &CS = 10391 FS.getConversionSpecifier(); 10392 10393 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 10394 getLocationOfByte(CS.getStart()), 10395 startSpecifier, specifierLen, 10396 CS.getStart(), CS.getLength()); 10397 } 10398 10399 bool CheckScanfHandler::HandleScanfSpecifier( 10400 const analyze_scanf::ScanfSpecifier &FS, 10401 const char *startSpecifier, 10402 unsigned specifierLen) { 10403 using namespace analyze_scanf; 10404 using namespace analyze_format_string; 10405 10406 const ScanfConversionSpecifier &CS = FS.getConversionSpecifier(); 10407 10408 // Handle case where '%' and '*' don't consume an argument. These shouldn't 10409 // be used to decide if we are using positional arguments consistently. 10410 if (FS.consumesDataArgument()) { 10411 if (atFirstArg) { 10412 atFirstArg = false; 10413 usesPositionalArgs = FS.usesPositionalArg(); 10414 } 10415 else if (usesPositionalArgs != FS.usesPositionalArg()) { 10416 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 10417 startSpecifier, specifierLen); 10418 return false; 10419 } 10420 } 10421 10422 // Check if the field with is non-zero. 10423 const OptionalAmount &Amt = FS.getFieldWidth(); 10424 if (Amt.getHowSpecified() == OptionalAmount::Constant) { 10425 if (Amt.getConstantAmount() == 0) { 10426 const CharSourceRange &R = getSpecifierRange(Amt.getStart(), 10427 Amt.getConstantLength()); 10428 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width), 10429 getLocationOfByte(Amt.getStart()), 10430 /*IsStringLocation*/true, R, 10431 FixItHint::CreateRemoval(R)); 10432 } 10433 } 10434 10435 if (!FS.consumesDataArgument()) { 10436 // FIXME: Technically specifying a precision or field width here 10437 // makes no sense. Worth issuing a warning at some point. 10438 return true; 10439 } 10440 10441 // Consume the argument. 10442 unsigned argIndex = FS.getArgIndex(); 10443 if (argIndex < NumDataArgs) { 10444 // The check to see if the argIndex is valid will come later. 10445 // We set the bit here because we may exit early from this 10446 // function if we encounter some other error. 10447 CoveredArgs.set(argIndex); 10448 } 10449 10450 // Check the length modifier is valid with the given conversion specifier. 10451 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 10452 S.getLangOpts())) 10453 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 10454 diag::warn_format_nonsensical_length); 10455 else if (!FS.hasStandardLengthModifier()) 10456 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 10457 else if (!FS.hasStandardLengthConversionCombination()) 10458 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 10459 diag::warn_format_non_standard_conversion_spec); 10460 10461 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 10462 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 10463 10464 // The remaining checks depend on the data arguments. 10465 if (ArgPassingKind == Sema::FAPK_VAList) 10466 return true; 10467 10468 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 10469 return false; 10470 10471 // Check that the argument type matches the format specifier. 10472 const Expr *Ex = getDataArg(argIndex); 10473 if (!Ex) 10474 return true; 10475 10476 const analyze_format_string::ArgType &AT = FS.getArgType(S.Context); 10477 10478 if (!AT.isValid()) { 10479 return true; 10480 } 10481 10482 analyze_format_string::ArgType::MatchKind Match = 10483 AT.matchesType(S.Context, Ex->getType()); 10484 bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic; 10485 if (Match == analyze_format_string::ArgType::Match) 10486 return true; 10487 10488 ScanfSpecifier fixedFS = FS; 10489 bool Success = fixedFS.fixType(Ex->getType(), Ex->IgnoreImpCasts()->getType(), 10490 S.getLangOpts(), S.Context); 10491 10492 unsigned Diag = 10493 Pedantic ? diag::warn_format_conversion_argument_type_mismatch_pedantic 10494 : diag::warn_format_conversion_argument_type_mismatch; 10495 10496 if (Success) { 10497 // Get the fix string from the fixed format specifier. 10498 SmallString<128> buf; 10499 llvm::raw_svector_ostream os(buf); 10500 fixedFS.toString(os); 10501 10502 EmitFormatDiagnostic( 10503 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) 10504 << Ex->getType() << false << Ex->getSourceRange(), 10505 Ex->getBeginLoc(), 10506 /*IsStringLocation*/ false, 10507 getSpecifierRange(startSpecifier, specifierLen), 10508 FixItHint::CreateReplacement( 10509 getSpecifierRange(startSpecifier, specifierLen), os.str())); 10510 } else { 10511 EmitFormatDiagnostic(S.PDiag(Diag) 10512 << AT.getRepresentativeTypeName(S.Context) 10513 << Ex->getType() << false << Ex->getSourceRange(), 10514 Ex->getBeginLoc(), 10515 /*IsStringLocation*/ false, 10516 getSpecifierRange(startSpecifier, specifierLen)); 10517 } 10518 10519 return true; 10520 } 10521 10522 static void CheckFormatString( 10523 Sema &S, const FormatStringLiteral *FExpr, const Expr *OrigFormatExpr, 10524 ArrayRef<const Expr *> Args, Sema::FormatArgumentPassingKind APK, 10525 unsigned format_idx, unsigned firstDataArg, Sema::FormatStringType Type, 10526 bool inFunctionCall, Sema::VariadicCallType CallType, 10527 llvm::SmallBitVector &CheckedVarArgs, UncoveredArgHandler &UncoveredArg, 10528 bool IgnoreStringsWithoutSpecifiers) { 10529 // CHECK: is the format string a wide literal? 10530 if (!FExpr->isAscii() && !FExpr->isUTF8()) { 10531 CheckFormatHandler::EmitFormatDiagnostic( 10532 S, inFunctionCall, Args[format_idx], 10533 S.PDiag(diag::warn_format_string_is_wide_literal), FExpr->getBeginLoc(), 10534 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 10535 return; 10536 } 10537 10538 // Str - The format string. NOTE: this is NOT null-terminated! 10539 StringRef StrRef = FExpr->getString(); 10540 const char *Str = StrRef.data(); 10541 // Account for cases where the string literal is truncated in a declaration. 10542 const ConstantArrayType *T = 10543 S.Context.getAsConstantArrayType(FExpr->getType()); 10544 assert(T && "String literal not of constant array type!"); 10545 size_t TypeSize = T->getSize().getZExtValue(); 10546 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 10547 const unsigned numDataArgs = Args.size() - firstDataArg; 10548 10549 if (IgnoreStringsWithoutSpecifiers && 10550 !analyze_format_string::parseFormatStringHasFormattingSpecifiers( 10551 Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo())) 10552 return; 10553 10554 // Emit a warning if the string literal is truncated and does not contain an 10555 // embedded null character. 10556 if (TypeSize <= StrRef.size() && !StrRef.substr(0, TypeSize).contains('\0')) { 10557 CheckFormatHandler::EmitFormatDiagnostic( 10558 S, inFunctionCall, Args[format_idx], 10559 S.PDiag(diag::warn_printf_format_string_not_null_terminated), 10560 FExpr->getBeginLoc(), 10561 /*IsStringLocation=*/true, OrigFormatExpr->getSourceRange()); 10562 return; 10563 } 10564 10565 // CHECK: empty format string? 10566 if (StrLen == 0 && numDataArgs > 0) { 10567 CheckFormatHandler::EmitFormatDiagnostic( 10568 S, inFunctionCall, Args[format_idx], 10569 S.PDiag(diag::warn_empty_format_string), FExpr->getBeginLoc(), 10570 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 10571 return; 10572 } 10573 10574 if (Type == Sema::FST_Printf || Type == Sema::FST_NSString || 10575 Type == Sema::FST_FreeBSDKPrintf || Type == Sema::FST_OSLog || 10576 Type == Sema::FST_OSTrace) { 10577 CheckPrintfHandler H( 10578 S, FExpr, OrigFormatExpr, Type, firstDataArg, numDataArgs, 10579 (Type == Sema::FST_NSString || Type == Sema::FST_OSTrace), Str, APK, 10580 Args, format_idx, inFunctionCall, CallType, CheckedVarArgs, 10581 UncoveredArg); 10582 10583 if (!analyze_format_string::ParsePrintfString( 10584 H, Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo(), 10585 Type == Sema::FST_FreeBSDKPrintf)) 10586 H.DoneProcessing(); 10587 } else if (Type == Sema::FST_Scanf) { 10588 CheckScanfHandler H(S, FExpr, OrigFormatExpr, Type, firstDataArg, 10589 numDataArgs, Str, APK, Args, format_idx, inFunctionCall, 10590 CallType, CheckedVarArgs, UncoveredArg); 10591 10592 if (!analyze_format_string::ParseScanfString( 10593 H, Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo())) 10594 H.DoneProcessing(); 10595 } // TODO: handle other formats 10596 } 10597 10598 bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) { 10599 // Str - The format string. NOTE: this is NOT null-terminated! 10600 StringRef StrRef = FExpr->getString(); 10601 const char *Str = StrRef.data(); 10602 // Account for cases where the string literal is truncated in a declaration. 10603 const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType()); 10604 assert(T && "String literal not of constant array type!"); 10605 size_t TypeSize = T->getSize().getZExtValue(); 10606 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 10607 return analyze_format_string::ParseFormatStringHasSArg(Str, Str + StrLen, 10608 getLangOpts(), 10609 Context.getTargetInfo()); 10610 } 10611 10612 //===--- CHECK: Warn on use of wrong absolute value function. -------------===// 10613 10614 // Returns the related absolute value function that is larger, of 0 if one 10615 // does not exist. 10616 static unsigned getLargerAbsoluteValueFunction(unsigned AbsFunction) { 10617 switch (AbsFunction) { 10618 default: 10619 return 0; 10620 10621 case Builtin::BI__builtin_abs: 10622 return Builtin::BI__builtin_labs; 10623 case Builtin::BI__builtin_labs: 10624 return Builtin::BI__builtin_llabs; 10625 case Builtin::BI__builtin_llabs: 10626 return 0; 10627 10628 case Builtin::BI__builtin_fabsf: 10629 return Builtin::BI__builtin_fabs; 10630 case Builtin::BI__builtin_fabs: 10631 return Builtin::BI__builtin_fabsl; 10632 case Builtin::BI__builtin_fabsl: 10633 return 0; 10634 10635 case Builtin::BI__builtin_cabsf: 10636 return Builtin::BI__builtin_cabs; 10637 case Builtin::BI__builtin_cabs: 10638 return Builtin::BI__builtin_cabsl; 10639 case Builtin::BI__builtin_cabsl: 10640 return 0; 10641 10642 case Builtin::BIabs: 10643 return Builtin::BIlabs; 10644 case Builtin::BIlabs: 10645 return Builtin::BIllabs; 10646 case Builtin::BIllabs: 10647 return 0; 10648 10649 case Builtin::BIfabsf: 10650 return Builtin::BIfabs; 10651 case Builtin::BIfabs: 10652 return Builtin::BIfabsl; 10653 case Builtin::BIfabsl: 10654 return 0; 10655 10656 case Builtin::BIcabsf: 10657 return Builtin::BIcabs; 10658 case Builtin::BIcabs: 10659 return Builtin::BIcabsl; 10660 case Builtin::BIcabsl: 10661 return 0; 10662 } 10663 } 10664 10665 // Returns the argument type of the absolute value function. 10666 static QualType getAbsoluteValueArgumentType(ASTContext &Context, 10667 unsigned AbsType) { 10668 if (AbsType == 0) 10669 return QualType(); 10670 10671 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 10672 QualType BuiltinType = Context.GetBuiltinType(AbsType, Error); 10673 if (Error != ASTContext::GE_None) 10674 return QualType(); 10675 10676 const FunctionProtoType *FT = BuiltinType->getAs<FunctionProtoType>(); 10677 if (!FT) 10678 return QualType(); 10679 10680 if (FT->getNumParams() != 1) 10681 return QualType(); 10682 10683 return FT->getParamType(0); 10684 } 10685 10686 // Returns the best absolute value function, or zero, based on type and 10687 // current absolute value function. 10688 static unsigned getBestAbsFunction(ASTContext &Context, QualType ArgType, 10689 unsigned AbsFunctionKind) { 10690 unsigned BestKind = 0; 10691 uint64_t ArgSize = Context.getTypeSize(ArgType); 10692 for (unsigned Kind = AbsFunctionKind; Kind != 0; 10693 Kind = getLargerAbsoluteValueFunction(Kind)) { 10694 QualType ParamType = getAbsoluteValueArgumentType(Context, Kind); 10695 if (Context.getTypeSize(ParamType) >= ArgSize) { 10696 if (BestKind == 0) 10697 BestKind = Kind; 10698 else if (Context.hasSameType(ParamType, ArgType)) { 10699 BestKind = Kind; 10700 break; 10701 } 10702 } 10703 } 10704 return BestKind; 10705 } 10706 10707 enum AbsoluteValueKind { 10708 AVK_Integer, 10709 AVK_Floating, 10710 AVK_Complex 10711 }; 10712 10713 static AbsoluteValueKind getAbsoluteValueKind(QualType T) { 10714 if (T->isIntegralOrEnumerationType()) 10715 return AVK_Integer; 10716 if (T->isRealFloatingType()) 10717 return AVK_Floating; 10718 if (T->isAnyComplexType()) 10719 return AVK_Complex; 10720 10721 llvm_unreachable("Type not integer, floating, or complex"); 10722 } 10723 10724 // Changes the absolute value function to a different type. Preserves whether 10725 // the function is a builtin. 10726 static unsigned changeAbsFunction(unsigned AbsKind, 10727 AbsoluteValueKind ValueKind) { 10728 switch (ValueKind) { 10729 case AVK_Integer: 10730 switch (AbsKind) { 10731 default: 10732 return 0; 10733 case Builtin::BI__builtin_fabsf: 10734 case Builtin::BI__builtin_fabs: 10735 case Builtin::BI__builtin_fabsl: 10736 case Builtin::BI__builtin_cabsf: 10737 case Builtin::BI__builtin_cabs: 10738 case Builtin::BI__builtin_cabsl: 10739 return Builtin::BI__builtin_abs; 10740 case Builtin::BIfabsf: 10741 case Builtin::BIfabs: 10742 case Builtin::BIfabsl: 10743 case Builtin::BIcabsf: 10744 case Builtin::BIcabs: 10745 case Builtin::BIcabsl: 10746 return Builtin::BIabs; 10747 } 10748 case AVK_Floating: 10749 switch (AbsKind) { 10750 default: 10751 return 0; 10752 case Builtin::BI__builtin_abs: 10753 case Builtin::BI__builtin_labs: 10754 case Builtin::BI__builtin_llabs: 10755 case Builtin::BI__builtin_cabsf: 10756 case Builtin::BI__builtin_cabs: 10757 case Builtin::BI__builtin_cabsl: 10758 return Builtin::BI__builtin_fabsf; 10759 case Builtin::BIabs: 10760 case Builtin::BIlabs: 10761 case Builtin::BIllabs: 10762 case Builtin::BIcabsf: 10763 case Builtin::BIcabs: 10764 case Builtin::BIcabsl: 10765 return Builtin::BIfabsf; 10766 } 10767 case AVK_Complex: 10768 switch (AbsKind) { 10769 default: 10770 return 0; 10771 case Builtin::BI__builtin_abs: 10772 case Builtin::BI__builtin_labs: 10773 case Builtin::BI__builtin_llabs: 10774 case Builtin::BI__builtin_fabsf: 10775 case Builtin::BI__builtin_fabs: 10776 case Builtin::BI__builtin_fabsl: 10777 return Builtin::BI__builtin_cabsf; 10778 case Builtin::BIabs: 10779 case Builtin::BIlabs: 10780 case Builtin::BIllabs: 10781 case Builtin::BIfabsf: 10782 case Builtin::BIfabs: 10783 case Builtin::BIfabsl: 10784 return Builtin::BIcabsf; 10785 } 10786 } 10787 llvm_unreachable("Unable to convert function"); 10788 } 10789 10790 static unsigned getAbsoluteValueFunctionKind(const FunctionDecl *FDecl) { 10791 const IdentifierInfo *FnInfo = FDecl->getIdentifier(); 10792 if (!FnInfo) 10793 return 0; 10794 10795 switch (FDecl->getBuiltinID()) { 10796 default: 10797 return 0; 10798 case Builtin::BI__builtin_abs: 10799 case Builtin::BI__builtin_fabs: 10800 case Builtin::BI__builtin_fabsf: 10801 case Builtin::BI__builtin_fabsl: 10802 case Builtin::BI__builtin_labs: 10803 case Builtin::BI__builtin_llabs: 10804 case Builtin::BI__builtin_cabs: 10805 case Builtin::BI__builtin_cabsf: 10806 case Builtin::BI__builtin_cabsl: 10807 case Builtin::BIabs: 10808 case Builtin::BIlabs: 10809 case Builtin::BIllabs: 10810 case Builtin::BIfabs: 10811 case Builtin::BIfabsf: 10812 case Builtin::BIfabsl: 10813 case Builtin::BIcabs: 10814 case Builtin::BIcabsf: 10815 case Builtin::BIcabsl: 10816 return FDecl->getBuiltinID(); 10817 } 10818 llvm_unreachable("Unknown Builtin type"); 10819 } 10820 10821 // If the replacement is valid, emit a note with replacement function. 10822 // Additionally, suggest including the proper header if not already included. 10823 static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range, 10824 unsigned AbsKind, QualType ArgType) { 10825 bool EmitHeaderHint = true; 10826 const char *HeaderName = nullptr; 10827 const char *FunctionName = nullptr; 10828 if (S.getLangOpts().CPlusPlus && !ArgType->isAnyComplexType()) { 10829 FunctionName = "std::abs"; 10830 if (ArgType->isIntegralOrEnumerationType()) { 10831 HeaderName = "cstdlib"; 10832 } else if (ArgType->isRealFloatingType()) { 10833 HeaderName = "cmath"; 10834 } else { 10835 llvm_unreachable("Invalid Type"); 10836 } 10837 10838 // Lookup all std::abs 10839 if (NamespaceDecl *Std = S.getStdNamespace()) { 10840 LookupResult R(S, &S.Context.Idents.get("abs"), Loc, Sema::LookupAnyName); 10841 R.suppressDiagnostics(); 10842 S.LookupQualifiedName(R, Std); 10843 10844 for (const auto *I : R) { 10845 const FunctionDecl *FDecl = nullptr; 10846 if (const UsingShadowDecl *UsingD = dyn_cast<UsingShadowDecl>(I)) { 10847 FDecl = dyn_cast<FunctionDecl>(UsingD->getTargetDecl()); 10848 } else { 10849 FDecl = dyn_cast<FunctionDecl>(I); 10850 } 10851 if (!FDecl) 10852 continue; 10853 10854 // Found std::abs(), check that they are the right ones. 10855 if (FDecl->getNumParams() != 1) 10856 continue; 10857 10858 // Check that the parameter type can handle the argument. 10859 QualType ParamType = FDecl->getParamDecl(0)->getType(); 10860 if (getAbsoluteValueKind(ArgType) == getAbsoluteValueKind(ParamType) && 10861 S.Context.getTypeSize(ArgType) <= 10862 S.Context.getTypeSize(ParamType)) { 10863 // Found a function, don't need the header hint. 10864 EmitHeaderHint = false; 10865 break; 10866 } 10867 } 10868 } 10869 } else { 10870 FunctionName = S.Context.BuiltinInfo.getName(AbsKind); 10871 HeaderName = S.Context.BuiltinInfo.getHeaderName(AbsKind); 10872 10873 if (HeaderName) { 10874 DeclarationName DN(&S.Context.Idents.get(FunctionName)); 10875 LookupResult R(S, DN, Loc, Sema::LookupAnyName); 10876 R.suppressDiagnostics(); 10877 S.LookupName(R, S.getCurScope()); 10878 10879 if (R.isSingleResult()) { 10880 FunctionDecl *FD = dyn_cast<FunctionDecl>(R.getFoundDecl()); 10881 if (FD && FD->getBuiltinID() == AbsKind) { 10882 EmitHeaderHint = false; 10883 } else { 10884 return; 10885 } 10886 } else if (!R.empty()) { 10887 return; 10888 } 10889 } 10890 } 10891 10892 S.Diag(Loc, diag::note_replace_abs_function) 10893 << FunctionName << FixItHint::CreateReplacement(Range, FunctionName); 10894 10895 if (!HeaderName) 10896 return; 10897 10898 if (!EmitHeaderHint) 10899 return; 10900 10901 S.Diag(Loc, diag::note_include_header_or_declare) << HeaderName 10902 << FunctionName; 10903 } 10904 10905 template <std::size_t StrLen> 10906 static bool IsStdFunction(const FunctionDecl *FDecl, 10907 const char (&Str)[StrLen]) { 10908 if (!FDecl) 10909 return false; 10910 if (!FDecl->getIdentifier() || !FDecl->getIdentifier()->isStr(Str)) 10911 return false; 10912 if (!FDecl->isInStdNamespace()) 10913 return false; 10914 10915 return true; 10916 } 10917 10918 // Warn when using the wrong abs() function. 10919 void Sema::CheckAbsoluteValueFunction(const CallExpr *Call, 10920 const FunctionDecl *FDecl) { 10921 if (Call->getNumArgs() != 1) 10922 return; 10923 10924 unsigned AbsKind = getAbsoluteValueFunctionKind(FDecl); 10925 bool IsStdAbs = IsStdFunction(FDecl, "abs"); 10926 if (AbsKind == 0 && !IsStdAbs) 10927 return; 10928 10929 QualType ArgType = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 10930 QualType ParamType = Call->getArg(0)->getType(); 10931 10932 // Unsigned types cannot be negative. Suggest removing the absolute value 10933 // function call. 10934 if (ArgType->isUnsignedIntegerType()) { 10935 const char *FunctionName = 10936 IsStdAbs ? "std::abs" : Context.BuiltinInfo.getName(AbsKind); 10937 Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType; 10938 Diag(Call->getExprLoc(), diag::note_remove_abs) 10939 << FunctionName 10940 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()); 10941 return; 10942 } 10943 10944 // Taking the absolute value of a pointer is very suspicious, they probably 10945 // wanted to index into an array, dereference a pointer, call a function, etc. 10946 if (ArgType->isPointerType() || ArgType->canDecayToPointerType()) { 10947 unsigned DiagType = 0; 10948 if (ArgType->isFunctionType()) 10949 DiagType = 1; 10950 else if (ArgType->isArrayType()) 10951 DiagType = 2; 10952 10953 Diag(Call->getExprLoc(), diag::warn_pointer_abs) << DiagType << ArgType; 10954 return; 10955 } 10956 10957 // std::abs has overloads which prevent most of the absolute value problems 10958 // from occurring. 10959 if (IsStdAbs) 10960 return; 10961 10962 AbsoluteValueKind ArgValueKind = getAbsoluteValueKind(ArgType); 10963 AbsoluteValueKind ParamValueKind = getAbsoluteValueKind(ParamType); 10964 10965 // The argument and parameter are the same kind. Check if they are the right 10966 // size. 10967 if (ArgValueKind == ParamValueKind) { 10968 if (Context.getTypeSize(ArgType) <= Context.getTypeSize(ParamType)) 10969 return; 10970 10971 unsigned NewAbsKind = getBestAbsFunction(Context, ArgType, AbsKind); 10972 Diag(Call->getExprLoc(), diag::warn_abs_too_small) 10973 << FDecl << ArgType << ParamType; 10974 10975 if (NewAbsKind == 0) 10976 return; 10977 10978 emitReplacement(*this, Call->getExprLoc(), 10979 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 10980 return; 10981 } 10982 10983 // ArgValueKind != ParamValueKind 10984 // The wrong type of absolute value function was used. Attempt to find the 10985 // proper one. 10986 unsigned NewAbsKind = changeAbsFunction(AbsKind, ArgValueKind); 10987 NewAbsKind = getBestAbsFunction(Context, ArgType, NewAbsKind); 10988 if (NewAbsKind == 0) 10989 return; 10990 10991 Diag(Call->getExprLoc(), diag::warn_wrong_absolute_value_type) 10992 << FDecl << ParamValueKind << ArgValueKind; 10993 10994 emitReplacement(*this, Call->getExprLoc(), 10995 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 10996 } 10997 10998 //===--- CHECK: Warn on use of std::max and unsigned zero. r---------------===// 10999 void Sema::CheckMaxUnsignedZero(const CallExpr *Call, 11000 const FunctionDecl *FDecl) { 11001 if (!Call || !FDecl) return; 11002 11003 // Ignore template specializations and macros. 11004 if (inTemplateInstantiation()) return; 11005 if (Call->getExprLoc().isMacroID()) return; 11006 11007 // Only care about the one template argument, two function parameter std::max 11008 if (Call->getNumArgs() != 2) return; 11009 if (!IsStdFunction(FDecl, "max")) return; 11010 const auto * ArgList = FDecl->getTemplateSpecializationArgs(); 11011 if (!ArgList) return; 11012 if (ArgList->size() != 1) return; 11013 11014 // Check that template type argument is unsigned integer. 11015 const auto& TA = ArgList->get(0); 11016 if (TA.getKind() != TemplateArgument::Type) return; 11017 QualType ArgType = TA.getAsType(); 11018 if (!ArgType->isUnsignedIntegerType()) return; 11019 11020 // See if either argument is a literal zero. 11021 auto IsLiteralZeroArg = [](const Expr* E) -> bool { 11022 const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E); 11023 if (!MTE) return false; 11024 const auto *Num = dyn_cast<IntegerLiteral>(MTE->getSubExpr()); 11025 if (!Num) return false; 11026 if (Num->getValue() != 0) return false; 11027 return true; 11028 }; 11029 11030 const Expr *FirstArg = Call->getArg(0); 11031 const Expr *SecondArg = Call->getArg(1); 11032 const bool IsFirstArgZero = IsLiteralZeroArg(FirstArg); 11033 const bool IsSecondArgZero = IsLiteralZeroArg(SecondArg); 11034 11035 // Only warn when exactly one argument is zero. 11036 if (IsFirstArgZero == IsSecondArgZero) return; 11037 11038 SourceRange FirstRange = FirstArg->getSourceRange(); 11039 SourceRange SecondRange = SecondArg->getSourceRange(); 11040 11041 SourceRange ZeroRange = IsFirstArgZero ? FirstRange : SecondRange; 11042 11043 Diag(Call->getExprLoc(), diag::warn_max_unsigned_zero) 11044 << IsFirstArgZero << Call->getCallee()->getSourceRange() << ZeroRange; 11045 11046 // Deduce what parts to remove so that "std::max(0u, foo)" becomes "(foo)". 11047 SourceRange RemovalRange; 11048 if (IsFirstArgZero) { 11049 RemovalRange = SourceRange(FirstRange.getBegin(), 11050 SecondRange.getBegin().getLocWithOffset(-1)); 11051 } else { 11052 RemovalRange = SourceRange(getLocForEndOfToken(FirstRange.getEnd()), 11053 SecondRange.getEnd()); 11054 } 11055 11056 Diag(Call->getExprLoc(), diag::note_remove_max_call) 11057 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()) 11058 << FixItHint::CreateRemoval(RemovalRange); 11059 } 11060 11061 //===--- CHECK: Standard memory functions ---------------------------------===// 11062 11063 /// Takes the expression passed to the size_t parameter of functions 11064 /// such as memcmp, strncat, etc and warns if it's a comparison. 11065 /// 11066 /// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`. 11067 static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E, 11068 IdentifierInfo *FnName, 11069 SourceLocation FnLoc, 11070 SourceLocation RParenLoc) { 11071 const BinaryOperator *Size = dyn_cast<BinaryOperator>(E); 11072 if (!Size) 11073 return false; 11074 11075 // if E is binop and op is <=>, >, <, >=, <=, ==, &&, ||: 11076 if (!Size->isComparisonOp() && !Size->isLogicalOp()) 11077 return false; 11078 11079 SourceRange SizeRange = Size->getSourceRange(); 11080 S.Diag(Size->getOperatorLoc(), diag::warn_memsize_comparison) 11081 << SizeRange << FnName; 11082 S.Diag(FnLoc, diag::note_memsize_comparison_paren) 11083 << FnName 11084 << FixItHint::CreateInsertion( 11085 S.getLocForEndOfToken(Size->getLHS()->getEndLoc()), ")") 11086 << FixItHint::CreateRemoval(RParenLoc); 11087 S.Diag(SizeRange.getBegin(), diag::note_memsize_comparison_cast_silence) 11088 << FixItHint::CreateInsertion(SizeRange.getBegin(), "(size_t)(") 11089 << FixItHint::CreateInsertion(S.getLocForEndOfToken(SizeRange.getEnd()), 11090 ")"); 11091 11092 return true; 11093 } 11094 11095 /// Determine whether the given type is or contains a dynamic class type 11096 /// (e.g., whether it has a vtable). 11097 static const CXXRecordDecl *getContainedDynamicClass(QualType T, 11098 bool &IsContained) { 11099 // Look through array types while ignoring qualifiers. 11100 const Type *Ty = T->getBaseElementTypeUnsafe(); 11101 IsContained = false; 11102 11103 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 11104 RD = RD ? RD->getDefinition() : nullptr; 11105 if (!RD || RD->isInvalidDecl()) 11106 return nullptr; 11107 11108 if (RD->isDynamicClass()) 11109 return RD; 11110 11111 // Check all the fields. If any bases were dynamic, the class is dynamic. 11112 // It's impossible for a class to transitively contain itself by value, so 11113 // infinite recursion is impossible. 11114 for (auto *FD : RD->fields()) { 11115 bool SubContained; 11116 if (const CXXRecordDecl *ContainedRD = 11117 getContainedDynamicClass(FD->getType(), SubContained)) { 11118 IsContained = true; 11119 return ContainedRD; 11120 } 11121 } 11122 11123 return nullptr; 11124 } 11125 11126 static const UnaryExprOrTypeTraitExpr *getAsSizeOfExpr(const Expr *E) { 11127 if (const auto *Unary = dyn_cast<UnaryExprOrTypeTraitExpr>(E)) 11128 if (Unary->getKind() == UETT_SizeOf) 11129 return Unary; 11130 return nullptr; 11131 } 11132 11133 /// If E is a sizeof expression, returns its argument expression, 11134 /// otherwise returns NULL. 11135 static const Expr *getSizeOfExprArg(const Expr *E) { 11136 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 11137 if (!SizeOf->isArgumentType()) 11138 return SizeOf->getArgumentExpr()->IgnoreParenImpCasts(); 11139 return nullptr; 11140 } 11141 11142 /// If E is a sizeof expression, returns its argument type. 11143 static QualType getSizeOfArgType(const Expr *E) { 11144 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 11145 return SizeOf->getTypeOfArgument(); 11146 return QualType(); 11147 } 11148 11149 namespace { 11150 11151 struct SearchNonTrivialToInitializeField 11152 : DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField> { 11153 using Super = 11154 DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField>; 11155 11156 SearchNonTrivialToInitializeField(const Expr *E, Sema &S) : E(E), S(S) {} 11157 11158 void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType FT, 11159 SourceLocation SL) { 11160 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 11161 asDerived().visitArray(PDIK, AT, SL); 11162 return; 11163 } 11164 11165 Super::visitWithKind(PDIK, FT, SL); 11166 } 11167 11168 void visitARCStrong(QualType FT, SourceLocation SL) { 11169 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 11170 } 11171 void visitARCWeak(QualType FT, SourceLocation SL) { 11172 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 11173 } 11174 void visitStruct(QualType FT, SourceLocation SL) { 11175 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 11176 visit(FD->getType(), FD->getLocation()); 11177 } 11178 void visitArray(QualType::PrimitiveDefaultInitializeKind PDIK, 11179 const ArrayType *AT, SourceLocation SL) { 11180 visit(getContext().getBaseElementType(AT), SL); 11181 } 11182 void visitTrivial(QualType FT, SourceLocation SL) {} 11183 11184 static void diag(QualType RT, const Expr *E, Sema &S) { 11185 SearchNonTrivialToInitializeField(E, S).visitStruct(RT, SourceLocation()); 11186 } 11187 11188 ASTContext &getContext() { return S.getASTContext(); } 11189 11190 const Expr *E; 11191 Sema &S; 11192 }; 11193 11194 struct SearchNonTrivialToCopyField 11195 : CopiedTypeVisitor<SearchNonTrivialToCopyField, false> { 11196 using Super = CopiedTypeVisitor<SearchNonTrivialToCopyField, false>; 11197 11198 SearchNonTrivialToCopyField(const Expr *E, Sema &S) : E(E), S(S) {} 11199 11200 void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType FT, 11201 SourceLocation SL) { 11202 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 11203 asDerived().visitArray(PCK, AT, SL); 11204 return; 11205 } 11206 11207 Super::visitWithKind(PCK, FT, SL); 11208 } 11209 11210 void visitARCStrong(QualType FT, SourceLocation SL) { 11211 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 11212 } 11213 void visitARCWeak(QualType FT, SourceLocation SL) { 11214 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 11215 } 11216 void visitStruct(QualType FT, SourceLocation SL) { 11217 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 11218 visit(FD->getType(), FD->getLocation()); 11219 } 11220 void visitArray(QualType::PrimitiveCopyKind PCK, const ArrayType *AT, 11221 SourceLocation SL) { 11222 visit(getContext().getBaseElementType(AT), SL); 11223 } 11224 void preVisit(QualType::PrimitiveCopyKind PCK, QualType FT, 11225 SourceLocation SL) {} 11226 void visitTrivial(QualType FT, SourceLocation SL) {} 11227 void visitVolatileTrivial(QualType FT, SourceLocation SL) {} 11228 11229 static void diag(QualType RT, const Expr *E, Sema &S) { 11230 SearchNonTrivialToCopyField(E, S).visitStruct(RT, SourceLocation()); 11231 } 11232 11233 ASTContext &getContext() { return S.getASTContext(); } 11234 11235 const Expr *E; 11236 Sema &S; 11237 }; 11238 11239 } 11240 11241 /// Detect if \c SizeofExpr is likely to calculate the sizeof an object. 11242 static bool doesExprLikelyComputeSize(const Expr *SizeofExpr) { 11243 SizeofExpr = SizeofExpr->IgnoreParenImpCasts(); 11244 11245 if (const auto *BO = dyn_cast<BinaryOperator>(SizeofExpr)) { 11246 if (BO->getOpcode() != BO_Mul && BO->getOpcode() != BO_Add) 11247 return false; 11248 11249 return doesExprLikelyComputeSize(BO->getLHS()) || 11250 doesExprLikelyComputeSize(BO->getRHS()); 11251 } 11252 11253 return getAsSizeOfExpr(SizeofExpr) != nullptr; 11254 } 11255 11256 /// Check if the ArgLoc originated from a macro passed to the call at CallLoc. 11257 /// 11258 /// \code 11259 /// #define MACRO 0 11260 /// foo(MACRO); 11261 /// foo(0); 11262 /// \endcode 11263 /// 11264 /// This should return true for the first call to foo, but not for the second 11265 /// (regardless of whether foo is a macro or function). 11266 static bool isArgumentExpandedFromMacro(SourceManager &SM, 11267 SourceLocation CallLoc, 11268 SourceLocation ArgLoc) { 11269 if (!CallLoc.isMacroID()) 11270 return SM.getFileID(CallLoc) != SM.getFileID(ArgLoc); 11271 11272 return SM.getFileID(SM.getImmediateMacroCallerLoc(CallLoc)) != 11273 SM.getFileID(SM.getImmediateMacroCallerLoc(ArgLoc)); 11274 } 11275 11276 /// Diagnose cases like 'memset(buf, sizeof(buf), 0)', which should have the 11277 /// last two arguments transposed. 11278 static void CheckMemaccessSize(Sema &S, unsigned BId, const CallExpr *Call) { 11279 if (BId != Builtin::BImemset && BId != Builtin::BIbzero) 11280 return; 11281 11282 const Expr *SizeArg = 11283 Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts(); 11284 11285 auto isLiteralZero = [](const Expr *E) { 11286 return (isa<IntegerLiteral>(E) && 11287 cast<IntegerLiteral>(E)->getValue() == 0) || 11288 (isa<CharacterLiteral>(E) && 11289 cast<CharacterLiteral>(E)->getValue() == 0); 11290 }; 11291 11292 // If we're memsetting or bzeroing 0 bytes, then this is likely an error. 11293 SourceLocation CallLoc = Call->getRParenLoc(); 11294 SourceManager &SM = S.getSourceManager(); 11295 if (isLiteralZero(SizeArg) && 11296 !isArgumentExpandedFromMacro(SM, CallLoc, SizeArg->getExprLoc())) { 11297 11298 SourceLocation DiagLoc = SizeArg->getExprLoc(); 11299 11300 // Some platforms #define bzero to __builtin_memset. See if this is the 11301 // case, and if so, emit a better diagnostic. 11302 if (BId == Builtin::BIbzero || 11303 (CallLoc.isMacroID() && Lexer::getImmediateMacroName( 11304 CallLoc, SM, S.getLangOpts()) == "bzero")) { 11305 S.Diag(DiagLoc, diag::warn_suspicious_bzero_size); 11306 S.Diag(DiagLoc, diag::note_suspicious_bzero_size_silence); 11307 } else if (!isLiteralZero(Call->getArg(1)->IgnoreImpCasts())) { 11308 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 0; 11309 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 0; 11310 } 11311 return; 11312 } 11313 11314 // If the second argument to a memset is a sizeof expression and the third 11315 // isn't, this is also likely an error. This should catch 11316 // 'memset(buf, sizeof(buf), 0xff)'. 11317 if (BId == Builtin::BImemset && 11318 doesExprLikelyComputeSize(Call->getArg(1)) && 11319 !doesExprLikelyComputeSize(Call->getArg(2))) { 11320 SourceLocation DiagLoc = Call->getArg(1)->getExprLoc(); 11321 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 1; 11322 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 1; 11323 return; 11324 } 11325 } 11326 11327 /// Check for dangerous or invalid arguments to memset(). 11328 /// 11329 /// This issues warnings on known problematic, dangerous or unspecified 11330 /// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp' 11331 /// function calls. 11332 /// 11333 /// \param Call The call expression to diagnose. 11334 void Sema::CheckMemaccessArguments(const CallExpr *Call, 11335 unsigned BId, 11336 IdentifierInfo *FnName) { 11337 assert(BId != 0); 11338 11339 // It is possible to have a non-standard definition of memset. Validate 11340 // we have enough arguments, and if not, abort further checking. 11341 unsigned ExpectedNumArgs = 11342 (BId == Builtin::BIstrndup || BId == Builtin::BIbzero ? 2 : 3); 11343 if (Call->getNumArgs() < ExpectedNumArgs) 11344 return; 11345 11346 unsigned LastArg = (BId == Builtin::BImemset || BId == Builtin::BIbzero || 11347 BId == Builtin::BIstrndup ? 1 : 2); 11348 unsigned LenArg = 11349 (BId == Builtin::BIbzero || BId == Builtin::BIstrndup ? 1 : 2); 11350 const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts(); 11351 11352 if (CheckMemorySizeofForComparison(*this, LenExpr, FnName, 11353 Call->getBeginLoc(), Call->getRParenLoc())) 11354 return; 11355 11356 // Catch cases like 'memset(buf, sizeof(buf), 0)'. 11357 CheckMemaccessSize(*this, BId, Call); 11358 11359 // We have special checking when the length is a sizeof expression. 11360 QualType SizeOfArgTy = getSizeOfArgType(LenExpr); 11361 const Expr *SizeOfArg = getSizeOfExprArg(LenExpr); 11362 llvm::FoldingSetNodeID SizeOfArgID; 11363 11364 // Although widely used, 'bzero' is not a standard function. Be more strict 11365 // with the argument types before allowing diagnostics and only allow the 11366 // form bzero(ptr, sizeof(...)). 11367 QualType FirstArgTy = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 11368 if (BId == Builtin::BIbzero && !FirstArgTy->getAs<PointerType>()) 11369 return; 11370 11371 for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) { 11372 const Expr *Dest = Call->getArg(ArgIdx)->IgnoreParenImpCasts(); 11373 SourceRange ArgRange = Call->getArg(ArgIdx)->getSourceRange(); 11374 11375 QualType DestTy = Dest->getType(); 11376 QualType PointeeTy; 11377 if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) { 11378 PointeeTy = DestPtrTy->getPointeeType(); 11379 11380 // Never warn about void type pointers. This can be used to suppress 11381 // false positives. 11382 if (PointeeTy->isVoidType()) 11383 continue; 11384 11385 // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by 11386 // actually comparing the expressions for equality. Because computing the 11387 // expression IDs can be expensive, we only do this if the diagnostic is 11388 // enabled. 11389 if (SizeOfArg && 11390 !Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, 11391 SizeOfArg->getExprLoc())) { 11392 // We only compute IDs for expressions if the warning is enabled, and 11393 // cache the sizeof arg's ID. 11394 if (SizeOfArgID == llvm::FoldingSetNodeID()) 11395 SizeOfArg->Profile(SizeOfArgID, Context, true); 11396 llvm::FoldingSetNodeID DestID; 11397 Dest->Profile(DestID, Context, true); 11398 if (DestID == SizeOfArgID) { 11399 // TODO: For strncpy() and friends, this could suggest sizeof(dst) 11400 // over sizeof(src) as well. 11401 unsigned ActionIdx = 0; // Default is to suggest dereferencing. 11402 StringRef ReadableName = FnName->getName(); 11403 11404 if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Dest)) 11405 if (UnaryOp->getOpcode() == UO_AddrOf) 11406 ActionIdx = 1; // If its an address-of operator, just remove it. 11407 if (!PointeeTy->isIncompleteType() && 11408 (Context.getTypeSize(PointeeTy) == Context.getCharWidth())) 11409 ActionIdx = 2; // If the pointee's size is sizeof(char), 11410 // suggest an explicit length. 11411 11412 // If the function is defined as a builtin macro, do not show macro 11413 // expansion. 11414 SourceLocation SL = SizeOfArg->getExprLoc(); 11415 SourceRange DSR = Dest->getSourceRange(); 11416 SourceRange SSR = SizeOfArg->getSourceRange(); 11417 SourceManager &SM = getSourceManager(); 11418 11419 if (SM.isMacroArgExpansion(SL)) { 11420 ReadableName = Lexer::getImmediateMacroName(SL, SM, LangOpts); 11421 SL = SM.getSpellingLoc(SL); 11422 DSR = SourceRange(SM.getSpellingLoc(DSR.getBegin()), 11423 SM.getSpellingLoc(DSR.getEnd())); 11424 SSR = SourceRange(SM.getSpellingLoc(SSR.getBegin()), 11425 SM.getSpellingLoc(SSR.getEnd())); 11426 } 11427 11428 DiagRuntimeBehavior(SL, SizeOfArg, 11429 PDiag(diag::warn_sizeof_pointer_expr_memaccess) 11430 << ReadableName 11431 << PointeeTy 11432 << DestTy 11433 << DSR 11434 << SSR); 11435 DiagRuntimeBehavior(SL, SizeOfArg, 11436 PDiag(diag::warn_sizeof_pointer_expr_memaccess_note) 11437 << ActionIdx 11438 << SSR); 11439 11440 break; 11441 } 11442 } 11443 11444 // Also check for cases where the sizeof argument is the exact same 11445 // type as the memory argument, and where it points to a user-defined 11446 // record type. 11447 if (SizeOfArgTy != QualType()) { 11448 if (PointeeTy->isRecordType() && 11449 Context.typesAreCompatible(SizeOfArgTy, DestTy)) { 11450 DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest, 11451 PDiag(diag::warn_sizeof_pointer_type_memaccess) 11452 << FnName << SizeOfArgTy << ArgIdx 11453 << PointeeTy << Dest->getSourceRange() 11454 << LenExpr->getSourceRange()); 11455 break; 11456 } 11457 } 11458 } else if (DestTy->isArrayType()) { 11459 PointeeTy = DestTy; 11460 } 11461 11462 if (PointeeTy == QualType()) 11463 continue; 11464 11465 // Always complain about dynamic classes. 11466 bool IsContained; 11467 if (const CXXRecordDecl *ContainedRD = 11468 getContainedDynamicClass(PointeeTy, IsContained)) { 11469 11470 unsigned OperationType = 0; 11471 const bool IsCmp = BId == Builtin::BImemcmp || BId == Builtin::BIbcmp; 11472 // "overwritten" if we're warning about the destination for any call 11473 // but memcmp; otherwise a verb appropriate to the call. 11474 if (ArgIdx != 0 || IsCmp) { 11475 if (BId == Builtin::BImemcpy) 11476 OperationType = 1; 11477 else if(BId == Builtin::BImemmove) 11478 OperationType = 2; 11479 else if (IsCmp) 11480 OperationType = 3; 11481 } 11482 11483 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 11484 PDiag(diag::warn_dyn_class_memaccess) 11485 << (IsCmp ? ArgIdx + 2 : ArgIdx) << FnName 11486 << IsContained << ContainedRD << OperationType 11487 << Call->getCallee()->getSourceRange()); 11488 } else if (PointeeTy.hasNonTrivialObjCLifetime() && 11489 BId != Builtin::BImemset) 11490 DiagRuntimeBehavior( 11491 Dest->getExprLoc(), Dest, 11492 PDiag(diag::warn_arc_object_memaccess) 11493 << ArgIdx << FnName << PointeeTy 11494 << Call->getCallee()->getSourceRange()); 11495 else if (const auto *RT = PointeeTy->getAs<RecordType>()) { 11496 if ((BId == Builtin::BImemset || BId == Builtin::BIbzero) && 11497 RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) { 11498 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 11499 PDiag(diag::warn_cstruct_memaccess) 11500 << ArgIdx << FnName << PointeeTy << 0); 11501 SearchNonTrivialToInitializeField::diag(PointeeTy, Dest, *this); 11502 } else if ((BId == Builtin::BImemcpy || BId == Builtin::BImemmove) && 11503 RT->getDecl()->isNonTrivialToPrimitiveCopy()) { 11504 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 11505 PDiag(diag::warn_cstruct_memaccess) 11506 << ArgIdx << FnName << PointeeTy << 1); 11507 SearchNonTrivialToCopyField::diag(PointeeTy, Dest, *this); 11508 } else { 11509 continue; 11510 } 11511 } else 11512 continue; 11513 11514 DiagRuntimeBehavior( 11515 Dest->getExprLoc(), Dest, 11516 PDiag(diag::note_bad_memaccess_silence) 11517 << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)")); 11518 break; 11519 } 11520 } 11521 11522 // A little helper routine: ignore addition and subtraction of integer literals. 11523 // This intentionally does not ignore all integer constant expressions because 11524 // we don't want to remove sizeof(). 11525 static const Expr *ignoreLiteralAdditions(const Expr *Ex, ASTContext &Ctx) { 11526 Ex = Ex->IgnoreParenCasts(); 11527 11528 while (true) { 11529 const BinaryOperator * BO = dyn_cast<BinaryOperator>(Ex); 11530 if (!BO || !BO->isAdditiveOp()) 11531 break; 11532 11533 const Expr *RHS = BO->getRHS()->IgnoreParenCasts(); 11534 const Expr *LHS = BO->getLHS()->IgnoreParenCasts(); 11535 11536 if (isa<IntegerLiteral>(RHS)) 11537 Ex = LHS; 11538 else if (isa<IntegerLiteral>(LHS)) 11539 Ex = RHS; 11540 else 11541 break; 11542 } 11543 11544 return Ex; 11545 } 11546 11547 static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty, 11548 ASTContext &Context) { 11549 // Only handle constant-sized or VLAs, but not flexible members. 11550 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(Ty)) { 11551 // Only issue the FIXIT for arrays of size > 1. 11552 if (CAT->getSize().getSExtValue() <= 1) 11553 return false; 11554 } else if (!Ty->isVariableArrayType()) { 11555 return false; 11556 } 11557 return true; 11558 } 11559 11560 // Warn if the user has made the 'size' argument to strlcpy or strlcat 11561 // be the size of the source, instead of the destination. 11562 void Sema::CheckStrlcpycatArguments(const CallExpr *Call, 11563 IdentifierInfo *FnName) { 11564 11565 // Don't crash if the user has the wrong number of arguments 11566 unsigned NumArgs = Call->getNumArgs(); 11567 if ((NumArgs != 3) && (NumArgs != 4)) 11568 return; 11569 11570 const Expr *SrcArg = ignoreLiteralAdditions(Call->getArg(1), Context); 11571 const Expr *SizeArg = ignoreLiteralAdditions(Call->getArg(2), Context); 11572 const Expr *CompareWithSrc = nullptr; 11573 11574 if (CheckMemorySizeofForComparison(*this, SizeArg, FnName, 11575 Call->getBeginLoc(), Call->getRParenLoc())) 11576 return; 11577 11578 // Look for 'strlcpy(dst, x, sizeof(x))' 11579 if (const Expr *Ex = getSizeOfExprArg(SizeArg)) 11580 CompareWithSrc = Ex; 11581 else { 11582 // Look for 'strlcpy(dst, x, strlen(x))' 11583 if (const CallExpr *SizeCall = dyn_cast<CallExpr>(SizeArg)) { 11584 if (SizeCall->getBuiltinCallee() == Builtin::BIstrlen && 11585 SizeCall->getNumArgs() == 1) 11586 CompareWithSrc = ignoreLiteralAdditions(SizeCall->getArg(0), Context); 11587 } 11588 } 11589 11590 if (!CompareWithSrc) 11591 return; 11592 11593 // Determine if the argument to sizeof/strlen is equal to the source 11594 // argument. In principle there's all kinds of things you could do 11595 // here, for instance creating an == expression and evaluating it with 11596 // EvaluateAsBooleanCondition, but this uses a more direct technique: 11597 const DeclRefExpr *SrcArgDRE = dyn_cast<DeclRefExpr>(SrcArg); 11598 if (!SrcArgDRE) 11599 return; 11600 11601 const DeclRefExpr *CompareWithSrcDRE = dyn_cast<DeclRefExpr>(CompareWithSrc); 11602 if (!CompareWithSrcDRE || 11603 SrcArgDRE->getDecl() != CompareWithSrcDRE->getDecl()) 11604 return; 11605 11606 const Expr *OriginalSizeArg = Call->getArg(2); 11607 Diag(CompareWithSrcDRE->getBeginLoc(), diag::warn_strlcpycat_wrong_size) 11608 << OriginalSizeArg->getSourceRange() << FnName; 11609 11610 // Output a FIXIT hint if the destination is an array (rather than a 11611 // pointer to an array). This could be enhanced to handle some 11612 // pointers if we know the actual size, like if DstArg is 'array+2' 11613 // we could say 'sizeof(array)-2'. 11614 const Expr *DstArg = Call->getArg(0)->IgnoreParenImpCasts(); 11615 if (!isConstantSizeArrayWithMoreThanOneElement(DstArg->getType(), Context)) 11616 return; 11617 11618 SmallString<128> sizeString; 11619 llvm::raw_svector_ostream OS(sizeString); 11620 OS << "sizeof("; 11621 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 11622 OS << ")"; 11623 11624 Diag(OriginalSizeArg->getBeginLoc(), diag::note_strlcpycat_wrong_size) 11625 << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(), 11626 OS.str()); 11627 } 11628 11629 /// Check if two expressions refer to the same declaration. 11630 static bool referToTheSameDecl(const Expr *E1, const Expr *E2) { 11631 if (const DeclRefExpr *D1 = dyn_cast_or_null<DeclRefExpr>(E1)) 11632 if (const DeclRefExpr *D2 = dyn_cast_or_null<DeclRefExpr>(E2)) 11633 return D1->getDecl() == D2->getDecl(); 11634 return false; 11635 } 11636 11637 static const Expr *getStrlenExprArg(const Expr *E) { 11638 if (const CallExpr *CE = dyn_cast<CallExpr>(E)) { 11639 const FunctionDecl *FD = CE->getDirectCallee(); 11640 if (!FD || FD->getMemoryFunctionKind() != Builtin::BIstrlen) 11641 return nullptr; 11642 return CE->getArg(0)->IgnoreParenCasts(); 11643 } 11644 return nullptr; 11645 } 11646 11647 // Warn on anti-patterns as the 'size' argument to strncat. 11648 // The correct size argument should look like following: 11649 // strncat(dst, src, sizeof(dst) - strlen(dest) - 1); 11650 void Sema::CheckStrncatArguments(const CallExpr *CE, 11651 IdentifierInfo *FnName) { 11652 // Don't crash if the user has the wrong number of arguments. 11653 if (CE->getNumArgs() < 3) 11654 return; 11655 const Expr *DstArg = CE->getArg(0)->IgnoreParenCasts(); 11656 const Expr *SrcArg = CE->getArg(1)->IgnoreParenCasts(); 11657 const Expr *LenArg = CE->getArg(2)->IgnoreParenCasts(); 11658 11659 if (CheckMemorySizeofForComparison(*this, LenArg, FnName, CE->getBeginLoc(), 11660 CE->getRParenLoc())) 11661 return; 11662 11663 // Identify common expressions, which are wrongly used as the size argument 11664 // to strncat and may lead to buffer overflows. 11665 unsigned PatternType = 0; 11666 if (const Expr *SizeOfArg = getSizeOfExprArg(LenArg)) { 11667 // - sizeof(dst) 11668 if (referToTheSameDecl(SizeOfArg, DstArg)) 11669 PatternType = 1; 11670 // - sizeof(src) 11671 else if (referToTheSameDecl(SizeOfArg, SrcArg)) 11672 PatternType = 2; 11673 } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(LenArg)) { 11674 if (BE->getOpcode() == BO_Sub) { 11675 const Expr *L = BE->getLHS()->IgnoreParenCasts(); 11676 const Expr *R = BE->getRHS()->IgnoreParenCasts(); 11677 // - sizeof(dst) - strlen(dst) 11678 if (referToTheSameDecl(DstArg, getSizeOfExprArg(L)) && 11679 referToTheSameDecl(DstArg, getStrlenExprArg(R))) 11680 PatternType = 1; 11681 // - sizeof(src) - (anything) 11682 else if (referToTheSameDecl(SrcArg, getSizeOfExprArg(L))) 11683 PatternType = 2; 11684 } 11685 } 11686 11687 if (PatternType == 0) 11688 return; 11689 11690 // Generate the diagnostic. 11691 SourceLocation SL = LenArg->getBeginLoc(); 11692 SourceRange SR = LenArg->getSourceRange(); 11693 SourceManager &SM = getSourceManager(); 11694 11695 // If the function is defined as a builtin macro, do not show macro expansion. 11696 if (SM.isMacroArgExpansion(SL)) { 11697 SL = SM.getSpellingLoc(SL); 11698 SR = SourceRange(SM.getSpellingLoc(SR.getBegin()), 11699 SM.getSpellingLoc(SR.getEnd())); 11700 } 11701 11702 // Check if the destination is an array (rather than a pointer to an array). 11703 QualType DstTy = DstArg->getType(); 11704 bool isKnownSizeArray = isConstantSizeArrayWithMoreThanOneElement(DstTy, 11705 Context); 11706 if (!isKnownSizeArray) { 11707 if (PatternType == 1) 11708 Diag(SL, diag::warn_strncat_wrong_size) << SR; 11709 else 11710 Diag(SL, diag::warn_strncat_src_size) << SR; 11711 return; 11712 } 11713 11714 if (PatternType == 1) 11715 Diag(SL, diag::warn_strncat_large_size) << SR; 11716 else 11717 Diag(SL, diag::warn_strncat_src_size) << SR; 11718 11719 SmallString<128> sizeString; 11720 llvm::raw_svector_ostream OS(sizeString); 11721 OS << "sizeof("; 11722 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 11723 OS << ") - "; 11724 OS << "strlen("; 11725 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 11726 OS << ") - 1"; 11727 11728 Diag(SL, diag::note_strncat_wrong_size) 11729 << FixItHint::CreateReplacement(SR, OS.str()); 11730 } 11731 11732 namespace { 11733 void CheckFreeArgumentsOnLvalue(Sema &S, const std::string &CalleeName, 11734 const UnaryOperator *UnaryExpr, const Decl *D) { 11735 if (isa<FieldDecl, FunctionDecl, VarDecl>(D)) { 11736 S.Diag(UnaryExpr->getBeginLoc(), diag::warn_free_nonheap_object) 11737 << CalleeName << 0 /*object: */ << cast<NamedDecl>(D); 11738 return; 11739 } 11740 } 11741 11742 void CheckFreeArgumentsAddressof(Sema &S, const std::string &CalleeName, 11743 const UnaryOperator *UnaryExpr) { 11744 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(UnaryExpr->getSubExpr())) { 11745 const Decl *D = Lvalue->getDecl(); 11746 if (isa<DeclaratorDecl>(D)) 11747 if (!dyn_cast<DeclaratorDecl>(D)->getType()->isReferenceType()) 11748 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, D); 11749 } 11750 11751 if (const auto *Lvalue = dyn_cast<MemberExpr>(UnaryExpr->getSubExpr())) 11752 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, 11753 Lvalue->getMemberDecl()); 11754 } 11755 11756 void CheckFreeArgumentsPlus(Sema &S, const std::string &CalleeName, 11757 const UnaryOperator *UnaryExpr) { 11758 const auto *Lambda = dyn_cast<LambdaExpr>( 11759 UnaryExpr->getSubExpr()->IgnoreImplicitAsWritten()->IgnoreParens()); 11760 if (!Lambda) 11761 return; 11762 11763 S.Diag(Lambda->getBeginLoc(), diag::warn_free_nonheap_object) 11764 << CalleeName << 2 /*object: lambda expression*/; 11765 } 11766 11767 void CheckFreeArgumentsStackArray(Sema &S, const std::string &CalleeName, 11768 const DeclRefExpr *Lvalue) { 11769 const auto *Var = dyn_cast<VarDecl>(Lvalue->getDecl()); 11770 if (Var == nullptr) 11771 return; 11772 11773 S.Diag(Lvalue->getBeginLoc(), diag::warn_free_nonheap_object) 11774 << CalleeName << 0 /*object: */ << Var; 11775 } 11776 11777 void CheckFreeArgumentsCast(Sema &S, const std::string &CalleeName, 11778 const CastExpr *Cast) { 11779 SmallString<128> SizeString; 11780 llvm::raw_svector_ostream OS(SizeString); 11781 11782 clang::CastKind Kind = Cast->getCastKind(); 11783 if (Kind == clang::CK_BitCast && 11784 !Cast->getSubExpr()->getType()->isFunctionPointerType()) 11785 return; 11786 if (Kind == clang::CK_IntegralToPointer && 11787 !isa<IntegerLiteral>( 11788 Cast->getSubExpr()->IgnoreParenImpCasts()->IgnoreParens())) 11789 return; 11790 11791 switch (Cast->getCastKind()) { 11792 case clang::CK_BitCast: 11793 case clang::CK_IntegralToPointer: 11794 case clang::CK_FunctionToPointerDecay: 11795 OS << '\''; 11796 Cast->printPretty(OS, nullptr, S.getPrintingPolicy()); 11797 OS << '\''; 11798 break; 11799 default: 11800 return; 11801 } 11802 11803 S.Diag(Cast->getBeginLoc(), diag::warn_free_nonheap_object) 11804 << CalleeName << 0 /*object: */ << OS.str(); 11805 } 11806 } // namespace 11807 11808 /// Alerts the user that they are attempting to free a non-malloc'd object. 11809 void Sema::CheckFreeArguments(const CallExpr *E) { 11810 const std::string CalleeName = 11811 cast<FunctionDecl>(E->getCalleeDecl())->getQualifiedNameAsString(); 11812 11813 { // Prefer something that doesn't involve a cast to make things simpler. 11814 const Expr *Arg = E->getArg(0)->IgnoreParenCasts(); 11815 if (const auto *UnaryExpr = dyn_cast<UnaryOperator>(Arg)) 11816 switch (UnaryExpr->getOpcode()) { 11817 case UnaryOperator::Opcode::UO_AddrOf: 11818 return CheckFreeArgumentsAddressof(*this, CalleeName, UnaryExpr); 11819 case UnaryOperator::Opcode::UO_Plus: 11820 return CheckFreeArgumentsPlus(*this, CalleeName, UnaryExpr); 11821 default: 11822 break; 11823 } 11824 11825 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(Arg)) 11826 if (Lvalue->getType()->isArrayType()) 11827 return CheckFreeArgumentsStackArray(*this, CalleeName, Lvalue); 11828 11829 if (const auto *Label = dyn_cast<AddrLabelExpr>(Arg)) { 11830 Diag(Label->getBeginLoc(), diag::warn_free_nonheap_object) 11831 << CalleeName << 0 /*object: */ << Label->getLabel()->getIdentifier(); 11832 return; 11833 } 11834 11835 if (isa<BlockExpr>(Arg)) { 11836 Diag(Arg->getBeginLoc(), diag::warn_free_nonheap_object) 11837 << CalleeName << 1 /*object: block*/; 11838 return; 11839 } 11840 } 11841 // Maybe the cast was important, check after the other cases. 11842 if (const auto *Cast = dyn_cast<CastExpr>(E->getArg(0))) 11843 return CheckFreeArgumentsCast(*this, CalleeName, Cast); 11844 } 11845 11846 void 11847 Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType, 11848 SourceLocation ReturnLoc, 11849 bool isObjCMethod, 11850 const AttrVec *Attrs, 11851 const FunctionDecl *FD) { 11852 // Check if the return value is null but should not be. 11853 if (((Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs)) || 11854 (!isObjCMethod && isNonNullType(Context, lhsType))) && 11855 CheckNonNullExpr(*this, RetValExp)) 11856 Diag(ReturnLoc, diag::warn_null_ret) 11857 << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange(); 11858 11859 // C++11 [basic.stc.dynamic.allocation]p4: 11860 // If an allocation function declared with a non-throwing 11861 // exception-specification fails to allocate storage, it shall return 11862 // a null pointer. Any other allocation function that fails to allocate 11863 // storage shall indicate failure only by throwing an exception [...] 11864 if (FD) { 11865 OverloadedOperatorKind Op = FD->getOverloadedOperator(); 11866 if (Op == OO_New || Op == OO_Array_New) { 11867 const FunctionProtoType *Proto 11868 = FD->getType()->castAs<FunctionProtoType>(); 11869 if (!Proto->isNothrow(/*ResultIfDependent*/true) && 11870 CheckNonNullExpr(*this, RetValExp)) 11871 Diag(ReturnLoc, diag::warn_operator_new_returns_null) 11872 << FD << getLangOpts().CPlusPlus11; 11873 } 11874 } 11875 11876 // PPC MMA non-pointer types are not allowed as return type. Checking the type 11877 // here prevent the user from using a PPC MMA type as trailing return type. 11878 if (Context.getTargetInfo().getTriple().isPPC64()) 11879 CheckPPCMMAType(RetValExp->getType(), ReturnLoc); 11880 } 11881 11882 /// Check for comparisons of floating-point values using == and !=. Issue a 11883 /// warning if the comparison is not likely to do what the programmer intended. 11884 void Sema::CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS, 11885 BinaryOperatorKind Opcode) { 11886 if (!BinaryOperator::isEqualityOp(Opcode)) 11887 return; 11888 11889 // Match and capture subexpressions such as "(float) X == 0.1". 11890 FloatingLiteral *FPLiteral; 11891 CastExpr *FPCast; 11892 auto getCastAndLiteral = [&FPLiteral, &FPCast](Expr *L, Expr *R) { 11893 FPLiteral = dyn_cast<FloatingLiteral>(L->IgnoreParens()); 11894 FPCast = dyn_cast<CastExpr>(R->IgnoreParens()); 11895 return FPLiteral && FPCast; 11896 }; 11897 11898 if (getCastAndLiteral(LHS, RHS) || getCastAndLiteral(RHS, LHS)) { 11899 auto *SourceTy = FPCast->getSubExpr()->getType()->getAs<BuiltinType>(); 11900 auto *TargetTy = FPLiteral->getType()->getAs<BuiltinType>(); 11901 if (SourceTy && TargetTy && SourceTy->isFloatingPoint() && 11902 TargetTy->isFloatingPoint()) { 11903 bool Lossy; 11904 llvm::APFloat TargetC = FPLiteral->getValue(); 11905 TargetC.convert(Context.getFloatTypeSemantics(QualType(SourceTy, 0)), 11906 llvm::APFloat::rmNearestTiesToEven, &Lossy); 11907 if (Lossy) { 11908 // If the literal cannot be represented in the source type, then a 11909 // check for == is always false and check for != is always true. 11910 Diag(Loc, diag::warn_float_compare_literal) 11911 << (Opcode == BO_EQ) << QualType(SourceTy, 0) 11912 << LHS->getSourceRange() << RHS->getSourceRange(); 11913 return; 11914 } 11915 } 11916 } 11917 11918 // Match a more general floating-point equality comparison (-Wfloat-equal). 11919 Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts(); 11920 Expr* RightExprSansParen = RHS->IgnoreParenImpCasts(); 11921 11922 // Special case: check for x == x (which is OK). 11923 // Do not emit warnings for such cases. 11924 if (auto *DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen)) 11925 if (auto *DRR = dyn_cast<DeclRefExpr>(RightExprSansParen)) 11926 if (DRL->getDecl() == DRR->getDecl()) 11927 return; 11928 11929 // Special case: check for comparisons against literals that can be exactly 11930 // represented by APFloat. In such cases, do not emit a warning. This 11931 // is a heuristic: often comparison against such literals are used to 11932 // detect if a value in a variable has not changed. This clearly can 11933 // lead to false negatives. 11934 if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) { 11935 if (FLL->isExact()) 11936 return; 11937 } else 11938 if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen)) 11939 if (FLR->isExact()) 11940 return; 11941 11942 // Check for comparisons with builtin types. 11943 if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen)) 11944 if (CL->getBuiltinCallee()) 11945 return; 11946 11947 if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen)) 11948 if (CR->getBuiltinCallee()) 11949 return; 11950 11951 // Emit the diagnostic. 11952 Diag(Loc, diag::warn_floatingpoint_eq) 11953 << LHS->getSourceRange() << RHS->getSourceRange(); 11954 } 11955 11956 //===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===// 11957 //===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===// 11958 11959 namespace { 11960 11961 /// Structure recording the 'active' range of an integer-valued 11962 /// expression. 11963 struct IntRange { 11964 /// The number of bits active in the int. Note that this includes exactly one 11965 /// sign bit if !NonNegative. 11966 unsigned Width; 11967 11968 /// True if the int is known not to have negative values. If so, all leading 11969 /// bits before Width are known zero, otherwise they are known to be the 11970 /// same as the MSB within Width. 11971 bool NonNegative; 11972 11973 IntRange(unsigned Width, bool NonNegative) 11974 : Width(Width), NonNegative(NonNegative) {} 11975 11976 /// Number of bits excluding the sign bit. 11977 unsigned valueBits() const { 11978 return NonNegative ? Width : Width - 1; 11979 } 11980 11981 /// Returns the range of the bool type. 11982 static IntRange forBoolType() { 11983 return IntRange(1, true); 11984 } 11985 11986 /// Returns the range of an opaque value of the given integral type. 11987 static IntRange forValueOfType(ASTContext &C, QualType T) { 11988 return forValueOfCanonicalType(C, 11989 T->getCanonicalTypeInternal().getTypePtr()); 11990 } 11991 11992 /// Returns the range of an opaque value of a canonical integral type. 11993 static IntRange forValueOfCanonicalType(ASTContext &C, const Type *T) { 11994 assert(T->isCanonicalUnqualified()); 11995 11996 if (const VectorType *VT = dyn_cast<VectorType>(T)) 11997 T = VT->getElementType().getTypePtr(); 11998 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 11999 T = CT->getElementType().getTypePtr(); 12000 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 12001 T = AT->getValueType().getTypePtr(); 12002 12003 if (!C.getLangOpts().CPlusPlus) { 12004 // For enum types in C code, use the underlying datatype. 12005 if (const EnumType *ET = dyn_cast<EnumType>(T)) 12006 T = ET->getDecl()->getIntegerType().getDesugaredType(C).getTypePtr(); 12007 } else if (const EnumType *ET = dyn_cast<EnumType>(T)) { 12008 // For enum types in C++, use the known bit width of the enumerators. 12009 EnumDecl *Enum = ET->getDecl(); 12010 // In C++11, enums can have a fixed underlying type. Use this type to 12011 // compute the range. 12012 if (Enum->isFixed()) { 12013 return IntRange(C.getIntWidth(QualType(T, 0)), 12014 !ET->isSignedIntegerOrEnumerationType()); 12015 } 12016 12017 unsigned NumPositive = Enum->getNumPositiveBits(); 12018 unsigned NumNegative = Enum->getNumNegativeBits(); 12019 12020 if (NumNegative == 0) 12021 return IntRange(NumPositive, true/*NonNegative*/); 12022 else 12023 return IntRange(std::max(NumPositive + 1, NumNegative), 12024 false/*NonNegative*/); 12025 } 12026 12027 if (const auto *EIT = dyn_cast<BitIntType>(T)) 12028 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 12029 12030 const BuiltinType *BT = cast<BuiltinType>(T); 12031 assert(BT->isInteger()); 12032 12033 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 12034 } 12035 12036 /// Returns the "target" range of a canonical integral type, i.e. 12037 /// the range of values expressible in the type. 12038 /// 12039 /// This matches forValueOfCanonicalType except that enums have the 12040 /// full range of their type, not the range of their enumerators. 12041 static IntRange forTargetOfCanonicalType(ASTContext &C, const Type *T) { 12042 assert(T->isCanonicalUnqualified()); 12043 12044 if (const VectorType *VT = dyn_cast<VectorType>(T)) 12045 T = VT->getElementType().getTypePtr(); 12046 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 12047 T = CT->getElementType().getTypePtr(); 12048 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 12049 T = AT->getValueType().getTypePtr(); 12050 if (const EnumType *ET = dyn_cast<EnumType>(T)) 12051 T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr(); 12052 12053 if (const auto *EIT = dyn_cast<BitIntType>(T)) 12054 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 12055 12056 const BuiltinType *BT = cast<BuiltinType>(T); 12057 assert(BT->isInteger()); 12058 12059 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 12060 } 12061 12062 /// Returns the supremum of two ranges: i.e. their conservative merge. 12063 static IntRange join(IntRange L, IntRange R) { 12064 bool Unsigned = L.NonNegative && R.NonNegative; 12065 return IntRange(std::max(L.valueBits(), R.valueBits()) + !Unsigned, 12066 L.NonNegative && R.NonNegative); 12067 } 12068 12069 /// Return the range of a bitwise-AND of the two ranges. 12070 static IntRange bit_and(IntRange L, IntRange R) { 12071 unsigned Bits = std::max(L.Width, R.Width); 12072 bool NonNegative = false; 12073 if (L.NonNegative) { 12074 Bits = std::min(Bits, L.Width); 12075 NonNegative = true; 12076 } 12077 if (R.NonNegative) { 12078 Bits = std::min(Bits, R.Width); 12079 NonNegative = true; 12080 } 12081 return IntRange(Bits, NonNegative); 12082 } 12083 12084 /// Return the range of a sum of the two ranges. 12085 static IntRange sum(IntRange L, IntRange R) { 12086 bool Unsigned = L.NonNegative && R.NonNegative; 12087 return IntRange(std::max(L.valueBits(), R.valueBits()) + 1 + !Unsigned, 12088 Unsigned); 12089 } 12090 12091 /// Return the range of a difference of the two ranges. 12092 static IntRange difference(IntRange L, IntRange R) { 12093 // We need a 1-bit-wider range if: 12094 // 1) LHS can be negative: least value can be reduced. 12095 // 2) RHS can be negative: greatest value can be increased. 12096 bool CanWiden = !L.NonNegative || !R.NonNegative; 12097 bool Unsigned = L.NonNegative && R.Width == 0; 12098 return IntRange(std::max(L.valueBits(), R.valueBits()) + CanWiden + 12099 !Unsigned, 12100 Unsigned); 12101 } 12102 12103 /// Return the range of a product of the two ranges. 12104 static IntRange product(IntRange L, IntRange R) { 12105 // If both LHS and RHS can be negative, we can form 12106 // -2^L * -2^R = 2^(L + R) 12107 // which requires L + R + 1 value bits to represent. 12108 bool CanWiden = !L.NonNegative && !R.NonNegative; 12109 bool Unsigned = L.NonNegative && R.NonNegative; 12110 return IntRange(L.valueBits() + R.valueBits() + CanWiden + !Unsigned, 12111 Unsigned); 12112 } 12113 12114 /// Return the range of a remainder operation between the two ranges. 12115 static IntRange rem(IntRange L, IntRange R) { 12116 // The result of a remainder can't be larger than the result of 12117 // either side. The sign of the result is the sign of the LHS. 12118 bool Unsigned = L.NonNegative; 12119 return IntRange(std::min(L.valueBits(), R.valueBits()) + !Unsigned, 12120 Unsigned); 12121 } 12122 }; 12123 12124 } // namespace 12125 12126 static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value, 12127 unsigned MaxWidth) { 12128 if (value.isSigned() && value.isNegative()) 12129 return IntRange(value.getMinSignedBits(), false); 12130 12131 if (value.getBitWidth() > MaxWidth) 12132 value = value.trunc(MaxWidth); 12133 12134 // isNonNegative() just checks the sign bit without considering 12135 // signedness. 12136 return IntRange(value.getActiveBits(), true); 12137 } 12138 12139 static IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty, 12140 unsigned MaxWidth) { 12141 if (result.isInt()) 12142 return GetValueRange(C, result.getInt(), MaxWidth); 12143 12144 if (result.isVector()) { 12145 IntRange R = GetValueRange(C, result.getVectorElt(0), Ty, MaxWidth); 12146 for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) { 12147 IntRange El = GetValueRange(C, result.getVectorElt(i), Ty, MaxWidth); 12148 R = IntRange::join(R, El); 12149 } 12150 return R; 12151 } 12152 12153 if (result.isComplexInt()) { 12154 IntRange R = GetValueRange(C, result.getComplexIntReal(), MaxWidth); 12155 IntRange I = GetValueRange(C, result.getComplexIntImag(), MaxWidth); 12156 return IntRange::join(R, I); 12157 } 12158 12159 // This can happen with lossless casts to intptr_t of "based" lvalues. 12160 // Assume it might use arbitrary bits. 12161 // FIXME: The only reason we need to pass the type in here is to get 12162 // the sign right on this one case. It would be nice if APValue 12163 // preserved this. 12164 assert(result.isLValue() || result.isAddrLabelDiff()); 12165 return IntRange(MaxWidth, Ty->isUnsignedIntegerOrEnumerationType()); 12166 } 12167 12168 static QualType GetExprType(const Expr *E) { 12169 QualType Ty = E->getType(); 12170 if (const AtomicType *AtomicRHS = Ty->getAs<AtomicType>()) 12171 Ty = AtomicRHS->getValueType(); 12172 return Ty; 12173 } 12174 12175 /// Pseudo-evaluate the given integer expression, estimating the 12176 /// range of values it might take. 12177 /// 12178 /// \param MaxWidth The width to which the value will be truncated. 12179 /// \param Approximate If \c true, return a likely range for the result: in 12180 /// particular, assume that arithmetic on narrower types doesn't leave 12181 /// those types. If \c false, return a range including all possible 12182 /// result values. 12183 static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth, 12184 bool InConstantContext, bool Approximate) { 12185 E = E->IgnoreParens(); 12186 12187 // Try a full evaluation first. 12188 Expr::EvalResult result; 12189 if (E->EvaluateAsRValue(result, C, InConstantContext)) 12190 return GetValueRange(C, result.Val, GetExprType(E), MaxWidth); 12191 12192 // I think we only want to look through implicit casts here; if the 12193 // user has an explicit widening cast, we should treat the value as 12194 // being of the new, wider type. 12195 if (const auto *CE = dyn_cast<ImplicitCastExpr>(E)) { 12196 if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue) 12197 return GetExprRange(C, CE->getSubExpr(), MaxWidth, InConstantContext, 12198 Approximate); 12199 12200 IntRange OutputTypeRange = IntRange::forValueOfType(C, GetExprType(CE)); 12201 12202 bool isIntegerCast = CE->getCastKind() == CK_IntegralCast || 12203 CE->getCastKind() == CK_BooleanToSignedIntegral; 12204 12205 // Assume that non-integer casts can span the full range of the type. 12206 if (!isIntegerCast) 12207 return OutputTypeRange; 12208 12209 IntRange SubRange = GetExprRange(C, CE->getSubExpr(), 12210 std::min(MaxWidth, OutputTypeRange.Width), 12211 InConstantContext, Approximate); 12212 12213 // Bail out if the subexpr's range is as wide as the cast type. 12214 if (SubRange.Width >= OutputTypeRange.Width) 12215 return OutputTypeRange; 12216 12217 // Otherwise, we take the smaller width, and we're non-negative if 12218 // either the output type or the subexpr is. 12219 return IntRange(SubRange.Width, 12220 SubRange.NonNegative || OutputTypeRange.NonNegative); 12221 } 12222 12223 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 12224 // If we can fold the condition, just take that operand. 12225 bool CondResult; 12226 if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C)) 12227 return GetExprRange(C, 12228 CondResult ? CO->getTrueExpr() : CO->getFalseExpr(), 12229 MaxWidth, InConstantContext, Approximate); 12230 12231 // Otherwise, conservatively merge. 12232 // GetExprRange requires an integer expression, but a throw expression 12233 // results in a void type. 12234 Expr *E = CO->getTrueExpr(); 12235 IntRange L = E->getType()->isVoidType() 12236 ? IntRange{0, true} 12237 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 12238 E = CO->getFalseExpr(); 12239 IntRange R = E->getType()->isVoidType() 12240 ? IntRange{0, true} 12241 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 12242 return IntRange::join(L, R); 12243 } 12244 12245 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 12246 IntRange (*Combine)(IntRange, IntRange) = IntRange::join; 12247 12248 switch (BO->getOpcode()) { 12249 case BO_Cmp: 12250 llvm_unreachable("builtin <=> should have class type"); 12251 12252 // Boolean-valued operations are single-bit and positive. 12253 case BO_LAnd: 12254 case BO_LOr: 12255 case BO_LT: 12256 case BO_GT: 12257 case BO_LE: 12258 case BO_GE: 12259 case BO_EQ: 12260 case BO_NE: 12261 return IntRange::forBoolType(); 12262 12263 // The type of the assignments is the type of the LHS, so the RHS 12264 // is not necessarily the same type. 12265 case BO_MulAssign: 12266 case BO_DivAssign: 12267 case BO_RemAssign: 12268 case BO_AddAssign: 12269 case BO_SubAssign: 12270 case BO_XorAssign: 12271 case BO_OrAssign: 12272 // TODO: bitfields? 12273 return IntRange::forValueOfType(C, GetExprType(E)); 12274 12275 // Simple assignments just pass through the RHS, which will have 12276 // been coerced to the LHS type. 12277 case BO_Assign: 12278 // TODO: bitfields? 12279 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 12280 Approximate); 12281 12282 // Operations with opaque sources are black-listed. 12283 case BO_PtrMemD: 12284 case BO_PtrMemI: 12285 return IntRange::forValueOfType(C, GetExprType(E)); 12286 12287 // Bitwise-and uses the *infinum* of the two source ranges. 12288 case BO_And: 12289 case BO_AndAssign: 12290 Combine = IntRange::bit_and; 12291 break; 12292 12293 // Left shift gets black-listed based on a judgement call. 12294 case BO_Shl: 12295 // ...except that we want to treat '1 << (blah)' as logically 12296 // positive. It's an important idiom. 12297 if (IntegerLiteral *I 12298 = dyn_cast<IntegerLiteral>(BO->getLHS()->IgnoreParenCasts())) { 12299 if (I->getValue() == 1) { 12300 IntRange R = IntRange::forValueOfType(C, GetExprType(E)); 12301 return IntRange(R.Width, /*NonNegative*/ true); 12302 } 12303 } 12304 LLVM_FALLTHROUGH; 12305 12306 case BO_ShlAssign: 12307 return IntRange::forValueOfType(C, GetExprType(E)); 12308 12309 // Right shift by a constant can narrow its left argument. 12310 case BO_Shr: 12311 case BO_ShrAssign: { 12312 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext, 12313 Approximate); 12314 12315 // If the shift amount is a positive constant, drop the width by 12316 // that much. 12317 if (Optional<llvm::APSInt> shift = 12318 BO->getRHS()->getIntegerConstantExpr(C)) { 12319 if (shift->isNonNegative()) { 12320 unsigned zext = shift->getZExtValue(); 12321 if (zext >= L.Width) 12322 L.Width = (L.NonNegative ? 0 : 1); 12323 else 12324 L.Width -= zext; 12325 } 12326 } 12327 12328 return L; 12329 } 12330 12331 // Comma acts as its right operand. 12332 case BO_Comma: 12333 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 12334 Approximate); 12335 12336 case BO_Add: 12337 if (!Approximate) 12338 Combine = IntRange::sum; 12339 break; 12340 12341 case BO_Sub: 12342 if (BO->getLHS()->getType()->isPointerType()) 12343 return IntRange::forValueOfType(C, GetExprType(E)); 12344 if (!Approximate) 12345 Combine = IntRange::difference; 12346 break; 12347 12348 case BO_Mul: 12349 if (!Approximate) 12350 Combine = IntRange::product; 12351 break; 12352 12353 // The width of a division result is mostly determined by the size 12354 // of the LHS. 12355 case BO_Div: { 12356 // Don't 'pre-truncate' the operands. 12357 unsigned opWidth = C.getIntWidth(GetExprType(E)); 12358 IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, 12359 Approximate); 12360 12361 // If the divisor is constant, use that. 12362 if (Optional<llvm::APSInt> divisor = 12363 BO->getRHS()->getIntegerConstantExpr(C)) { 12364 unsigned log2 = divisor->logBase2(); // floor(log_2(divisor)) 12365 if (log2 >= L.Width) 12366 L.Width = (L.NonNegative ? 0 : 1); 12367 else 12368 L.Width = std::min(L.Width - log2, MaxWidth); 12369 return L; 12370 } 12371 12372 // Otherwise, just use the LHS's width. 12373 // FIXME: This is wrong if the LHS could be its minimal value and the RHS 12374 // could be -1. 12375 IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, 12376 Approximate); 12377 return IntRange(L.Width, L.NonNegative && R.NonNegative); 12378 } 12379 12380 case BO_Rem: 12381 Combine = IntRange::rem; 12382 break; 12383 12384 // The default behavior is okay for these. 12385 case BO_Xor: 12386 case BO_Or: 12387 break; 12388 } 12389 12390 // Combine the two ranges, but limit the result to the type in which we 12391 // performed the computation. 12392 QualType T = GetExprType(E); 12393 unsigned opWidth = C.getIntWidth(T); 12394 IntRange L = 12395 GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, Approximate); 12396 IntRange R = 12397 GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, Approximate); 12398 IntRange C = Combine(L, R); 12399 C.NonNegative |= T->isUnsignedIntegerOrEnumerationType(); 12400 C.Width = std::min(C.Width, MaxWidth); 12401 return C; 12402 } 12403 12404 if (const auto *UO = dyn_cast<UnaryOperator>(E)) { 12405 switch (UO->getOpcode()) { 12406 // Boolean-valued operations are white-listed. 12407 case UO_LNot: 12408 return IntRange::forBoolType(); 12409 12410 // Operations with opaque sources are black-listed. 12411 case UO_Deref: 12412 case UO_AddrOf: // should be impossible 12413 return IntRange::forValueOfType(C, GetExprType(E)); 12414 12415 default: 12416 return GetExprRange(C, UO->getSubExpr(), MaxWidth, InConstantContext, 12417 Approximate); 12418 } 12419 } 12420 12421 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 12422 return GetExprRange(C, OVE->getSourceExpr(), MaxWidth, InConstantContext, 12423 Approximate); 12424 12425 if (const auto *BitField = E->getSourceBitField()) 12426 return IntRange(BitField->getBitWidthValue(C), 12427 BitField->getType()->isUnsignedIntegerOrEnumerationType()); 12428 12429 return IntRange::forValueOfType(C, GetExprType(E)); 12430 } 12431 12432 static IntRange GetExprRange(ASTContext &C, const Expr *E, 12433 bool InConstantContext, bool Approximate) { 12434 return GetExprRange(C, E, C.getIntWidth(GetExprType(E)), InConstantContext, 12435 Approximate); 12436 } 12437 12438 /// Checks whether the given value, which currently has the given 12439 /// source semantics, has the same value when coerced through the 12440 /// target semantics. 12441 static bool IsSameFloatAfterCast(const llvm::APFloat &value, 12442 const llvm::fltSemantics &Src, 12443 const llvm::fltSemantics &Tgt) { 12444 llvm::APFloat truncated = value; 12445 12446 bool ignored; 12447 truncated.convert(Src, llvm::APFloat::rmNearestTiesToEven, &ignored); 12448 truncated.convert(Tgt, llvm::APFloat::rmNearestTiesToEven, &ignored); 12449 12450 return truncated.bitwiseIsEqual(value); 12451 } 12452 12453 /// Checks whether the given value, which currently has the given 12454 /// source semantics, has the same value when coerced through the 12455 /// target semantics. 12456 /// 12457 /// The value might be a vector of floats (or a complex number). 12458 static bool IsSameFloatAfterCast(const APValue &value, 12459 const llvm::fltSemantics &Src, 12460 const llvm::fltSemantics &Tgt) { 12461 if (value.isFloat()) 12462 return IsSameFloatAfterCast(value.getFloat(), Src, Tgt); 12463 12464 if (value.isVector()) { 12465 for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i) 12466 if (!IsSameFloatAfterCast(value.getVectorElt(i), Src, Tgt)) 12467 return false; 12468 return true; 12469 } 12470 12471 assert(value.isComplexFloat()); 12472 return (IsSameFloatAfterCast(value.getComplexFloatReal(), Src, Tgt) && 12473 IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt)); 12474 } 12475 12476 static void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC, 12477 bool IsListInit = false); 12478 12479 static bool IsEnumConstOrFromMacro(Sema &S, Expr *E) { 12480 // Suppress cases where we are comparing against an enum constant. 12481 if (const DeclRefExpr *DR = 12482 dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) 12483 if (isa<EnumConstantDecl>(DR->getDecl())) 12484 return true; 12485 12486 // Suppress cases where the value is expanded from a macro, unless that macro 12487 // is how a language represents a boolean literal. This is the case in both C 12488 // and Objective-C. 12489 SourceLocation BeginLoc = E->getBeginLoc(); 12490 if (BeginLoc.isMacroID()) { 12491 StringRef MacroName = Lexer::getImmediateMacroName( 12492 BeginLoc, S.getSourceManager(), S.getLangOpts()); 12493 return MacroName != "YES" && MacroName != "NO" && 12494 MacroName != "true" && MacroName != "false"; 12495 } 12496 12497 return false; 12498 } 12499 12500 static bool isKnownToHaveUnsignedValue(Expr *E) { 12501 return E->getType()->isIntegerType() && 12502 (!E->getType()->isSignedIntegerType() || 12503 !E->IgnoreParenImpCasts()->getType()->isSignedIntegerType()); 12504 } 12505 12506 namespace { 12507 /// The promoted range of values of a type. In general this has the 12508 /// following structure: 12509 /// 12510 /// |-----------| . . . |-----------| 12511 /// ^ ^ ^ ^ 12512 /// Min HoleMin HoleMax Max 12513 /// 12514 /// ... where there is only a hole if a signed type is promoted to unsigned 12515 /// (in which case Min and Max are the smallest and largest representable 12516 /// values). 12517 struct PromotedRange { 12518 // Min, or HoleMax if there is a hole. 12519 llvm::APSInt PromotedMin; 12520 // Max, or HoleMin if there is a hole. 12521 llvm::APSInt PromotedMax; 12522 12523 PromotedRange(IntRange R, unsigned BitWidth, bool Unsigned) { 12524 if (R.Width == 0) 12525 PromotedMin = PromotedMax = llvm::APSInt(BitWidth, Unsigned); 12526 else if (R.Width >= BitWidth && !Unsigned) { 12527 // Promotion made the type *narrower*. This happens when promoting 12528 // a < 32-bit unsigned / <= 32-bit signed bit-field to 'signed int'. 12529 // Treat all values of 'signed int' as being in range for now. 12530 PromotedMin = llvm::APSInt::getMinValue(BitWidth, Unsigned); 12531 PromotedMax = llvm::APSInt::getMaxValue(BitWidth, Unsigned); 12532 } else { 12533 PromotedMin = llvm::APSInt::getMinValue(R.Width, R.NonNegative) 12534 .extOrTrunc(BitWidth); 12535 PromotedMin.setIsUnsigned(Unsigned); 12536 12537 PromotedMax = llvm::APSInt::getMaxValue(R.Width, R.NonNegative) 12538 .extOrTrunc(BitWidth); 12539 PromotedMax.setIsUnsigned(Unsigned); 12540 } 12541 } 12542 12543 // Determine whether this range is contiguous (has no hole). 12544 bool isContiguous() const { return PromotedMin <= PromotedMax; } 12545 12546 // Where a constant value is within the range. 12547 enum ComparisonResult { 12548 LT = 0x1, 12549 LE = 0x2, 12550 GT = 0x4, 12551 GE = 0x8, 12552 EQ = 0x10, 12553 NE = 0x20, 12554 InRangeFlag = 0x40, 12555 12556 Less = LE | LT | NE, 12557 Min = LE | InRangeFlag, 12558 InRange = InRangeFlag, 12559 Max = GE | InRangeFlag, 12560 Greater = GE | GT | NE, 12561 12562 OnlyValue = LE | GE | EQ | InRangeFlag, 12563 InHole = NE 12564 }; 12565 12566 ComparisonResult compare(const llvm::APSInt &Value) const { 12567 assert(Value.getBitWidth() == PromotedMin.getBitWidth() && 12568 Value.isUnsigned() == PromotedMin.isUnsigned()); 12569 if (!isContiguous()) { 12570 assert(Value.isUnsigned() && "discontiguous range for signed compare"); 12571 if (Value.isMinValue()) return Min; 12572 if (Value.isMaxValue()) return Max; 12573 if (Value >= PromotedMin) return InRange; 12574 if (Value <= PromotedMax) return InRange; 12575 return InHole; 12576 } 12577 12578 switch (llvm::APSInt::compareValues(Value, PromotedMin)) { 12579 case -1: return Less; 12580 case 0: return PromotedMin == PromotedMax ? OnlyValue : Min; 12581 case 1: 12582 switch (llvm::APSInt::compareValues(Value, PromotedMax)) { 12583 case -1: return InRange; 12584 case 0: return Max; 12585 case 1: return Greater; 12586 } 12587 } 12588 12589 llvm_unreachable("impossible compare result"); 12590 } 12591 12592 static llvm::Optional<StringRef> 12593 constantValue(BinaryOperatorKind Op, ComparisonResult R, bool ConstantOnRHS) { 12594 if (Op == BO_Cmp) { 12595 ComparisonResult LTFlag = LT, GTFlag = GT; 12596 if (ConstantOnRHS) std::swap(LTFlag, GTFlag); 12597 12598 if (R & EQ) return StringRef("'std::strong_ordering::equal'"); 12599 if (R & LTFlag) return StringRef("'std::strong_ordering::less'"); 12600 if (R & GTFlag) return StringRef("'std::strong_ordering::greater'"); 12601 return llvm::None; 12602 } 12603 12604 ComparisonResult TrueFlag, FalseFlag; 12605 if (Op == BO_EQ) { 12606 TrueFlag = EQ; 12607 FalseFlag = NE; 12608 } else if (Op == BO_NE) { 12609 TrueFlag = NE; 12610 FalseFlag = EQ; 12611 } else { 12612 if ((Op == BO_LT || Op == BO_GE) ^ ConstantOnRHS) { 12613 TrueFlag = LT; 12614 FalseFlag = GE; 12615 } else { 12616 TrueFlag = GT; 12617 FalseFlag = LE; 12618 } 12619 if (Op == BO_GE || Op == BO_LE) 12620 std::swap(TrueFlag, FalseFlag); 12621 } 12622 if (R & TrueFlag) 12623 return StringRef("true"); 12624 if (R & FalseFlag) 12625 return StringRef("false"); 12626 return llvm::None; 12627 } 12628 }; 12629 } 12630 12631 static bool HasEnumType(Expr *E) { 12632 // Strip off implicit integral promotions. 12633 while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 12634 if (ICE->getCastKind() != CK_IntegralCast && 12635 ICE->getCastKind() != CK_NoOp) 12636 break; 12637 E = ICE->getSubExpr(); 12638 } 12639 12640 return E->getType()->isEnumeralType(); 12641 } 12642 12643 static int classifyConstantValue(Expr *Constant) { 12644 // The values of this enumeration are used in the diagnostics 12645 // diag::warn_out_of_range_compare and diag::warn_tautological_bool_compare. 12646 enum ConstantValueKind { 12647 Miscellaneous = 0, 12648 LiteralTrue, 12649 LiteralFalse 12650 }; 12651 if (auto *BL = dyn_cast<CXXBoolLiteralExpr>(Constant)) 12652 return BL->getValue() ? ConstantValueKind::LiteralTrue 12653 : ConstantValueKind::LiteralFalse; 12654 return ConstantValueKind::Miscellaneous; 12655 } 12656 12657 static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E, 12658 Expr *Constant, Expr *Other, 12659 const llvm::APSInt &Value, 12660 bool RhsConstant) { 12661 if (S.inTemplateInstantiation()) 12662 return false; 12663 12664 Expr *OriginalOther = Other; 12665 12666 Constant = Constant->IgnoreParenImpCasts(); 12667 Other = Other->IgnoreParenImpCasts(); 12668 12669 // Suppress warnings on tautological comparisons between values of the same 12670 // enumeration type. There are only two ways we could warn on this: 12671 // - If the constant is outside the range of representable values of 12672 // the enumeration. In such a case, we should warn about the cast 12673 // to enumeration type, not about the comparison. 12674 // - If the constant is the maximum / minimum in-range value. For an 12675 // enumeratin type, such comparisons can be meaningful and useful. 12676 if (Constant->getType()->isEnumeralType() && 12677 S.Context.hasSameUnqualifiedType(Constant->getType(), Other->getType())) 12678 return false; 12679 12680 IntRange OtherValueRange = GetExprRange( 12681 S.Context, Other, S.isConstantEvaluated(), /*Approximate*/ false); 12682 12683 QualType OtherT = Other->getType(); 12684 if (const auto *AT = OtherT->getAs<AtomicType>()) 12685 OtherT = AT->getValueType(); 12686 IntRange OtherTypeRange = IntRange::forValueOfType(S.Context, OtherT); 12687 12688 // Special case for ObjC BOOL on targets where its a typedef for a signed char 12689 // (Namely, macOS). FIXME: IntRange::forValueOfType should do this. 12690 bool IsObjCSignedCharBool = S.getLangOpts().ObjC && 12691 S.NSAPIObj->isObjCBOOLType(OtherT) && 12692 OtherT->isSpecificBuiltinType(BuiltinType::SChar); 12693 12694 // Whether we're treating Other as being a bool because of the form of 12695 // expression despite it having another type (typically 'int' in C). 12696 bool OtherIsBooleanDespiteType = 12697 !OtherT->isBooleanType() && Other->isKnownToHaveBooleanValue(); 12698 if (OtherIsBooleanDespiteType || IsObjCSignedCharBool) 12699 OtherTypeRange = OtherValueRange = IntRange::forBoolType(); 12700 12701 // Check if all values in the range of possible values of this expression 12702 // lead to the same comparison outcome. 12703 PromotedRange OtherPromotedValueRange(OtherValueRange, Value.getBitWidth(), 12704 Value.isUnsigned()); 12705 auto Cmp = OtherPromotedValueRange.compare(Value); 12706 auto Result = PromotedRange::constantValue(E->getOpcode(), Cmp, RhsConstant); 12707 if (!Result) 12708 return false; 12709 12710 // Also consider the range determined by the type alone. This allows us to 12711 // classify the warning under the proper diagnostic group. 12712 bool TautologicalTypeCompare = false; 12713 { 12714 PromotedRange OtherPromotedTypeRange(OtherTypeRange, Value.getBitWidth(), 12715 Value.isUnsigned()); 12716 auto TypeCmp = OtherPromotedTypeRange.compare(Value); 12717 if (auto TypeResult = PromotedRange::constantValue(E->getOpcode(), TypeCmp, 12718 RhsConstant)) { 12719 TautologicalTypeCompare = true; 12720 Cmp = TypeCmp; 12721 Result = TypeResult; 12722 } 12723 } 12724 12725 // Don't warn if the non-constant operand actually always evaluates to the 12726 // same value. 12727 if (!TautologicalTypeCompare && OtherValueRange.Width == 0) 12728 return false; 12729 12730 // Suppress the diagnostic for an in-range comparison if the constant comes 12731 // from a macro or enumerator. We don't want to diagnose 12732 // 12733 // some_long_value <= INT_MAX 12734 // 12735 // when sizeof(int) == sizeof(long). 12736 bool InRange = Cmp & PromotedRange::InRangeFlag; 12737 if (InRange && IsEnumConstOrFromMacro(S, Constant)) 12738 return false; 12739 12740 // A comparison of an unsigned bit-field against 0 is really a type problem, 12741 // even though at the type level the bit-field might promote to 'signed int'. 12742 if (Other->refersToBitField() && InRange && Value == 0 && 12743 Other->getType()->isUnsignedIntegerOrEnumerationType()) 12744 TautologicalTypeCompare = true; 12745 12746 // If this is a comparison to an enum constant, include that 12747 // constant in the diagnostic. 12748 const EnumConstantDecl *ED = nullptr; 12749 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Constant)) 12750 ED = dyn_cast<EnumConstantDecl>(DR->getDecl()); 12751 12752 // Should be enough for uint128 (39 decimal digits) 12753 SmallString<64> PrettySourceValue; 12754 llvm::raw_svector_ostream OS(PrettySourceValue); 12755 if (ED) { 12756 OS << '\'' << *ED << "' (" << Value << ")"; 12757 } else if (auto *BL = dyn_cast<ObjCBoolLiteralExpr>( 12758 Constant->IgnoreParenImpCasts())) { 12759 OS << (BL->getValue() ? "YES" : "NO"); 12760 } else { 12761 OS << Value; 12762 } 12763 12764 if (!TautologicalTypeCompare) { 12765 S.Diag(E->getOperatorLoc(), diag::warn_tautological_compare_value_range) 12766 << RhsConstant << OtherValueRange.Width << OtherValueRange.NonNegative 12767 << E->getOpcodeStr() << OS.str() << *Result 12768 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 12769 return true; 12770 } 12771 12772 if (IsObjCSignedCharBool) { 12773 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 12774 S.PDiag(diag::warn_tautological_compare_objc_bool) 12775 << OS.str() << *Result); 12776 return true; 12777 } 12778 12779 // FIXME: We use a somewhat different formatting for the in-range cases and 12780 // cases involving boolean values for historical reasons. We should pick a 12781 // consistent way of presenting these diagnostics. 12782 if (!InRange || Other->isKnownToHaveBooleanValue()) { 12783 12784 S.DiagRuntimeBehavior( 12785 E->getOperatorLoc(), E, 12786 S.PDiag(!InRange ? diag::warn_out_of_range_compare 12787 : diag::warn_tautological_bool_compare) 12788 << OS.str() << classifyConstantValue(Constant) << OtherT 12789 << OtherIsBooleanDespiteType << *Result 12790 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange()); 12791 } else { 12792 bool IsCharTy = OtherT.withoutLocalFastQualifiers() == S.Context.CharTy; 12793 unsigned Diag = 12794 (isKnownToHaveUnsignedValue(OriginalOther) && Value == 0) 12795 ? (HasEnumType(OriginalOther) 12796 ? diag::warn_unsigned_enum_always_true_comparison 12797 : IsCharTy ? diag::warn_unsigned_char_always_true_comparison 12798 : diag::warn_unsigned_always_true_comparison) 12799 : diag::warn_tautological_constant_compare; 12800 12801 S.Diag(E->getOperatorLoc(), Diag) 12802 << RhsConstant << OtherT << E->getOpcodeStr() << OS.str() << *Result 12803 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 12804 } 12805 12806 return true; 12807 } 12808 12809 /// Analyze the operands of the given comparison. Implements the 12810 /// fallback case from AnalyzeComparison. 12811 static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) { 12812 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 12813 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 12814 } 12815 12816 /// Implements -Wsign-compare. 12817 /// 12818 /// \param E the binary operator to check for warnings 12819 static void AnalyzeComparison(Sema &S, BinaryOperator *E) { 12820 // The type the comparison is being performed in. 12821 QualType T = E->getLHS()->getType(); 12822 12823 // Only analyze comparison operators where both sides have been converted to 12824 // the same type. 12825 if (!S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType())) 12826 return AnalyzeImpConvsInComparison(S, E); 12827 12828 // Don't analyze value-dependent comparisons directly. 12829 if (E->isValueDependent()) 12830 return AnalyzeImpConvsInComparison(S, E); 12831 12832 Expr *LHS = E->getLHS(); 12833 Expr *RHS = E->getRHS(); 12834 12835 if (T->isIntegralType(S.Context)) { 12836 Optional<llvm::APSInt> RHSValue = RHS->getIntegerConstantExpr(S.Context); 12837 Optional<llvm::APSInt> LHSValue = LHS->getIntegerConstantExpr(S.Context); 12838 12839 // We don't care about expressions whose result is a constant. 12840 if (RHSValue && LHSValue) 12841 return AnalyzeImpConvsInComparison(S, E); 12842 12843 // We only care about expressions where just one side is literal 12844 if ((bool)RHSValue ^ (bool)LHSValue) { 12845 // Is the constant on the RHS or LHS? 12846 const bool RhsConstant = (bool)RHSValue; 12847 Expr *Const = RhsConstant ? RHS : LHS; 12848 Expr *Other = RhsConstant ? LHS : RHS; 12849 const llvm::APSInt &Value = RhsConstant ? *RHSValue : *LHSValue; 12850 12851 // Check whether an integer constant comparison results in a value 12852 // of 'true' or 'false'. 12853 if (CheckTautologicalComparison(S, E, Const, Other, Value, RhsConstant)) 12854 return AnalyzeImpConvsInComparison(S, E); 12855 } 12856 } 12857 12858 if (!T->hasUnsignedIntegerRepresentation()) { 12859 // We don't do anything special if this isn't an unsigned integral 12860 // comparison: we're only interested in integral comparisons, and 12861 // signed comparisons only happen in cases we don't care to warn about. 12862 return AnalyzeImpConvsInComparison(S, E); 12863 } 12864 12865 LHS = LHS->IgnoreParenImpCasts(); 12866 RHS = RHS->IgnoreParenImpCasts(); 12867 12868 if (!S.getLangOpts().CPlusPlus) { 12869 // Avoid warning about comparison of integers with different signs when 12870 // RHS/LHS has a `typeof(E)` type whose sign is different from the sign of 12871 // the type of `E`. 12872 if (const auto *TET = dyn_cast<TypeOfExprType>(LHS->getType())) 12873 LHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 12874 if (const auto *TET = dyn_cast<TypeOfExprType>(RHS->getType())) 12875 RHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 12876 } 12877 12878 // Check to see if one of the (unmodified) operands is of different 12879 // signedness. 12880 Expr *signedOperand, *unsignedOperand; 12881 if (LHS->getType()->hasSignedIntegerRepresentation()) { 12882 assert(!RHS->getType()->hasSignedIntegerRepresentation() && 12883 "unsigned comparison between two signed integer expressions?"); 12884 signedOperand = LHS; 12885 unsignedOperand = RHS; 12886 } else if (RHS->getType()->hasSignedIntegerRepresentation()) { 12887 signedOperand = RHS; 12888 unsignedOperand = LHS; 12889 } else { 12890 return AnalyzeImpConvsInComparison(S, E); 12891 } 12892 12893 // Otherwise, calculate the effective range of the signed operand. 12894 IntRange signedRange = GetExprRange( 12895 S.Context, signedOperand, S.isConstantEvaluated(), /*Approximate*/ true); 12896 12897 // Go ahead and analyze implicit conversions in the operands. Note 12898 // that we skip the implicit conversions on both sides. 12899 AnalyzeImplicitConversions(S, LHS, E->getOperatorLoc()); 12900 AnalyzeImplicitConversions(S, RHS, E->getOperatorLoc()); 12901 12902 // If the signed range is non-negative, -Wsign-compare won't fire. 12903 if (signedRange.NonNegative) 12904 return; 12905 12906 // For (in)equality comparisons, if the unsigned operand is a 12907 // constant which cannot collide with a overflowed signed operand, 12908 // then reinterpreting the signed operand as unsigned will not 12909 // change the result of the comparison. 12910 if (E->isEqualityOp()) { 12911 unsigned comparisonWidth = S.Context.getIntWidth(T); 12912 IntRange unsignedRange = 12913 GetExprRange(S.Context, unsignedOperand, S.isConstantEvaluated(), 12914 /*Approximate*/ true); 12915 12916 // We should never be unable to prove that the unsigned operand is 12917 // non-negative. 12918 assert(unsignedRange.NonNegative && "unsigned range includes negative?"); 12919 12920 if (unsignedRange.Width < comparisonWidth) 12921 return; 12922 } 12923 12924 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 12925 S.PDiag(diag::warn_mixed_sign_comparison) 12926 << LHS->getType() << RHS->getType() 12927 << LHS->getSourceRange() << RHS->getSourceRange()); 12928 } 12929 12930 /// Analyzes an attempt to assign the given value to a bitfield. 12931 /// 12932 /// Returns true if there was something fishy about the attempt. 12933 static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init, 12934 SourceLocation InitLoc) { 12935 assert(Bitfield->isBitField()); 12936 if (Bitfield->isInvalidDecl()) 12937 return false; 12938 12939 // White-list bool bitfields. 12940 QualType BitfieldType = Bitfield->getType(); 12941 if (BitfieldType->isBooleanType()) 12942 return false; 12943 12944 if (BitfieldType->isEnumeralType()) { 12945 EnumDecl *BitfieldEnumDecl = BitfieldType->castAs<EnumType>()->getDecl(); 12946 // If the underlying enum type was not explicitly specified as an unsigned 12947 // type and the enum contain only positive values, MSVC++ will cause an 12948 // inconsistency by storing this as a signed type. 12949 if (S.getLangOpts().CPlusPlus11 && 12950 !BitfieldEnumDecl->getIntegerTypeSourceInfo() && 12951 BitfieldEnumDecl->getNumPositiveBits() > 0 && 12952 BitfieldEnumDecl->getNumNegativeBits() == 0) { 12953 S.Diag(InitLoc, diag::warn_no_underlying_type_specified_for_enum_bitfield) 12954 << BitfieldEnumDecl; 12955 } 12956 } 12957 12958 if (Bitfield->getType()->isBooleanType()) 12959 return false; 12960 12961 // Ignore value- or type-dependent expressions. 12962 if (Bitfield->getBitWidth()->isValueDependent() || 12963 Bitfield->getBitWidth()->isTypeDependent() || 12964 Init->isValueDependent() || 12965 Init->isTypeDependent()) 12966 return false; 12967 12968 Expr *OriginalInit = Init->IgnoreParenImpCasts(); 12969 unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context); 12970 12971 Expr::EvalResult Result; 12972 if (!OriginalInit->EvaluateAsInt(Result, S.Context, 12973 Expr::SE_AllowSideEffects)) { 12974 // The RHS is not constant. If the RHS has an enum type, make sure the 12975 // bitfield is wide enough to hold all the values of the enum without 12976 // truncation. 12977 if (const auto *EnumTy = OriginalInit->getType()->getAs<EnumType>()) { 12978 EnumDecl *ED = EnumTy->getDecl(); 12979 bool SignedBitfield = BitfieldType->isSignedIntegerType(); 12980 12981 // Enum types are implicitly signed on Windows, so check if there are any 12982 // negative enumerators to see if the enum was intended to be signed or 12983 // not. 12984 bool SignedEnum = ED->getNumNegativeBits() > 0; 12985 12986 // Check for surprising sign changes when assigning enum values to a 12987 // bitfield of different signedness. If the bitfield is signed and we 12988 // have exactly the right number of bits to store this unsigned enum, 12989 // suggest changing the enum to an unsigned type. This typically happens 12990 // on Windows where unfixed enums always use an underlying type of 'int'. 12991 unsigned DiagID = 0; 12992 if (SignedEnum && !SignedBitfield) { 12993 DiagID = diag::warn_unsigned_bitfield_assigned_signed_enum; 12994 } else if (SignedBitfield && !SignedEnum && 12995 ED->getNumPositiveBits() == FieldWidth) { 12996 DiagID = diag::warn_signed_bitfield_enum_conversion; 12997 } 12998 12999 if (DiagID) { 13000 S.Diag(InitLoc, DiagID) << Bitfield << ED; 13001 TypeSourceInfo *TSI = Bitfield->getTypeSourceInfo(); 13002 SourceRange TypeRange = 13003 TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange(); 13004 S.Diag(Bitfield->getTypeSpecStartLoc(), diag::note_change_bitfield_sign) 13005 << SignedEnum << TypeRange; 13006 } 13007 13008 // Compute the required bitwidth. If the enum has negative values, we need 13009 // one more bit than the normal number of positive bits to represent the 13010 // sign bit. 13011 unsigned BitsNeeded = SignedEnum ? std::max(ED->getNumPositiveBits() + 1, 13012 ED->getNumNegativeBits()) 13013 : ED->getNumPositiveBits(); 13014 13015 // Check the bitwidth. 13016 if (BitsNeeded > FieldWidth) { 13017 Expr *WidthExpr = Bitfield->getBitWidth(); 13018 S.Diag(InitLoc, diag::warn_bitfield_too_small_for_enum) 13019 << Bitfield << ED; 13020 S.Diag(WidthExpr->getExprLoc(), diag::note_widen_bitfield) 13021 << BitsNeeded << ED << WidthExpr->getSourceRange(); 13022 } 13023 } 13024 13025 return false; 13026 } 13027 13028 llvm::APSInt Value = Result.Val.getInt(); 13029 13030 unsigned OriginalWidth = Value.getBitWidth(); 13031 13032 if (!Value.isSigned() || Value.isNegative()) 13033 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(OriginalInit)) 13034 if (UO->getOpcode() == UO_Minus || UO->getOpcode() == UO_Not) 13035 OriginalWidth = Value.getMinSignedBits(); 13036 13037 if (OriginalWidth <= FieldWidth) 13038 return false; 13039 13040 // Compute the value which the bitfield will contain. 13041 llvm::APSInt TruncatedValue = Value.trunc(FieldWidth); 13042 TruncatedValue.setIsSigned(BitfieldType->isSignedIntegerType()); 13043 13044 // Check whether the stored value is equal to the original value. 13045 TruncatedValue = TruncatedValue.extend(OriginalWidth); 13046 if (llvm::APSInt::isSameValue(Value, TruncatedValue)) 13047 return false; 13048 13049 // Special-case bitfields of width 1: booleans are naturally 0/1, and 13050 // therefore don't strictly fit into a signed bitfield of width 1. 13051 if (FieldWidth == 1 && Value == 1) 13052 return false; 13053 13054 std::string PrettyValue = toString(Value, 10); 13055 std::string PrettyTrunc = toString(TruncatedValue, 10); 13056 13057 S.Diag(InitLoc, diag::warn_impcast_bitfield_precision_constant) 13058 << PrettyValue << PrettyTrunc << OriginalInit->getType() 13059 << Init->getSourceRange(); 13060 13061 return true; 13062 } 13063 13064 /// Analyze the given simple or compound assignment for warning-worthy 13065 /// operations. 13066 static void AnalyzeAssignment(Sema &S, BinaryOperator *E) { 13067 // Just recurse on the LHS. 13068 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 13069 13070 // We want to recurse on the RHS as normal unless we're assigning to 13071 // a bitfield. 13072 if (FieldDecl *Bitfield = E->getLHS()->getSourceBitField()) { 13073 if (AnalyzeBitFieldAssignment(S, Bitfield, E->getRHS(), 13074 E->getOperatorLoc())) { 13075 // Recurse, ignoring any implicit conversions on the RHS. 13076 return AnalyzeImplicitConversions(S, E->getRHS()->IgnoreParenImpCasts(), 13077 E->getOperatorLoc()); 13078 } 13079 } 13080 13081 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 13082 13083 // Diagnose implicitly sequentially-consistent atomic assignment. 13084 if (E->getLHS()->getType()->isAtomicType()) 13085 S.Diag(E->getRHS()->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 13086 } 13087 13088 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 13089 static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T, 13090 SourceLocation CContext, unsigned diag, 13091 bool pruneControlFlow = false) { 13092 if (pruneControlFlow) { 13093 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13094 S.PDiag(diag) 13095 << SourceType << T << E->getSourceRange() 13096 << SourceRange(CContext)); 13097 return; 13098 } 13099 S.Diag(E->getExprLoc(), diag) 13100 << SourceType << T << E->getSourceRange() << SourceRange(CContext); 13101 } 13102 13103 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 13104 static void DiagnoseImpCast(Sema &S, Expr *E, QualType T, 13105 SourceLocation CContext, 13106 unsigned diag, bool pruneControlFlow = false) { 13107 DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow); 13108 } 13109 13110 static bool isObjCSignedCharBool(Sema &S, QualType Ty) { 13111 return Ty->isSpecificBuiltinType(BuiltinType::SChar) && 13112 S.getLangOpts().ObjC && S.NSAPIObj->isObjCBOOLType(Ty); 13113 } 13114 13115 static void adornObjCBoolConversionDiagWithTernaryFixit( 13116 Sema &S, Expr *SourceExpr, const Sema::SemaDiagnosticBuilder &Builder) { 13117 Expr *Ignored = SourceExpr->IgnoreImplicit(); 13118 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(Ignored)) 13119 Ignored = OVE->getSourceExpr(); 13120 bool NeedsParens = isa<AbstractConditionalOperator>(Ignored) || 13121 isa<BinaryOperator>(Ignored) || 13122 isa<CXXOperatorCallExpr>(Ignored); 13123 SourceLocation EndLoc = S.getLocForEndOfToken(SourceExpr->getEndLoc()); 13124 if (NeedsParens) 13125 Builder << FixItHint::CreateInsertion(SourceExpr->getBeginLoc(), "(") 13126 << FixItHint::CreateInsertion(EndLoc, ")"); 13127 Builder << FixItHint::CreateInsertion(EndLoc, " ? YES : NO"); 13128 } 13129 13130 /// Diagnose an implicit cast from a floating point value to an integer value. 13131 static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T, 13132 SourceLocation CContext) { 13133 const bool IsBool = T->isSpecificBuiltinType(BuiltinType::Bool); 13134 const bool PruneWarnings = S.inTemplateInstantiation(); 13135 13136 Expr *InnerE = E->IgnoreParenImpCasts(); 13137 // We also want to warn on, e.g., "int i = -1.234" 13138 if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(InnerE)) 13139 if (UOp->getOpcode() == UO_Minus || UOp->getOpcode() == UO_Plus) 13140 InnerE = UOp->getSubExpr()->IgnoreParenImpCasts(); 13141 13142 const bool IsLiteral = 13143 isa<FloatingLiteral>(E) || isa<FloatingLiteral>(InnerE); 13144 13145 llvm::APFloat Value(0.0); 13146 bool IsConstant = 13147 E->EvaluateAsFloat(Value, S.Context, Expr::SE_AllowSideEffects); 13148 if (!IsConstant) { 13149 if (isObjCSignedCharBool(S, T)) { 13150 return adornObjCBoolConversionDiagWithTernaryFixit( 13151 S, E, 13152 S.Diag(CContext, diag::warn_impcast_float_to_objc_signed_char_bool) 13153 << E->getType()); 13154 } 13155 13156 return DiagnoseImpCast(S, E, T, CContext, 13157 diag::warn_impcast_float_integer, PruneWarnings); 13158 } 13159 13160 bool isExact = false; 13161 13162 llvm::APSInt IntegerValue(S.Context.getIntWidth(T), 13163 T->hasUnsignedIntegerRepresentation()); 13164 llvm::APFloat::opStatus Result = Value.convertToInteger( 13165 IntegerValue, llvm::APFloat::rmTowardZero, &isExact); 13166 13167 // FIXME: Force the precision of the source value down so we don't print 13168 // digits which are usually useless (we don't really care here if we 13169 // truncate a digit by accident in edge cases). Ideally, APFloat::toString 13170 // would automatically print the shortest representation, but it's a bit 13171 // tricky to implement. 13172 SmallString<16> PrettySourceValue; 13173 unsigned precision = llvm::APFloat::semanticsPrecision(Value.getSemantics()); 13174 precision = (precision * 59 + 195) / 196; 13175 Value.toString(PrettySourceValue, precision); 13176 13177 if (isObjCSignedCharBool(S, T) && IntegerValue != 0 && IntegerValue != 1) { 13178 return adornObjCBoolConversionDiagWithTernaryFixit( 13179 S, E, 13180 S.Diag(CContext, diag::warn_impcast_constant_value_to_objc_bool) 13181 << PrettySourceValue); 13182 } 13183 13184 if (Result == llvm::APFloat::opOK && isExact) { 13185 if (IsLiteral) return; 13186 return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer, 13187 PruneWarnings); 13188 } 13189 13190 // Conversion of a floating-point value to a non-bool integer where the 13191 // integral part cannot be represented by the integer type is undefined. 13192 if (!IsBool && Result == llvm::APFloat::opInvalidOp) 13193 return DiagnoseImpCast( 13194 S, E, T, CContext, 13195 IsLiteral ? diag::warn_impcast_literal_float_to_integer_out_of_range 13196 : diag::warn_impcast_float_to_integer_out_of_range, 13197 PruneWarnings); 13198 13199 unsigned DiagID = 0; 13200 if (IsLiteral) { 13201 // Warn on floating point literal to integer. 13202 DiagID = diag::warn_impcast_literal_float_to_integer; 13203 } else if (IntegerValue == 0) { 13204 if (Value.isZero()) { // Skip -0.0 to 0 conversion. 13205 return DiagnoseImpCast(S, E, T, CContext, 13206 diag::warn_impcast_float_integer, PruneWarnings); 13207 } 13208 // Warn on non-zero to zero conversion. 13209 DiagID = diag::warn_impcast_float_to_integer_zero; 13210 } else { 13211 if (IntegerValue.isUnsigned()) { 13212 if (!IntegerValue.isMaxValue()) { 13213 return DiagnoseImpCast(S, E, T, CContext, 13214 diag::warn_impcast_float_integer, PruneWarnings); 13215 } 13216 } else { // IntegerValue.isSigned() 13217 if (!IntegerValue.isMaxSignedValue() && 13218 !IntegerValue.isMinSignedValue()) { 13219 return DiagnoseImpCast(S, E, T, CContext, 13220 diag::warn_impcast_float_integer, PruneWarnings); 13221 } 13222 } 13223 // Warn on evaluatable floating point expression to integer conversion. 13224 DiagID = diag::warn_impcast_float_to_integer; 13225 } 13226 13227 SmallString<16> PrettyTargetValue; 13228 if (IsBool) 13229 PrettyTargetValue = Value.isZero() ? "false" : "true"; 13230 else 13231 IntegerValue.toString(PrettyTargetValue); 13232 13233 if (PruneWarnings) { 13234 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13235 S.PDiag(DiagID) 13236 << E->getType() << T.getUnqualifiedType() 13237 << PrettySourceValue << PrettyTargetValue 13238 << E->getSourceRange() << SourceRange(CContext)); 13239 } else { 13240 S.Diag(E->getExprLoc(), DiagID) 13241 << E->getType() << T.getUnqualifiedType() << PrettySourceValue 13242 << PrettyTargetValue << E->getSourceRange() << SourceRange(CContext); 13243 } 13244 } 13245 13246 /// Analyze the given compound assignment for the possible losing of 13247 /// floating-point precision. 13248 static void AnalyzeCompoundAssignment(Sema &S, BinaryOperator *E) { 13249 assert(isa<CompoundAssignOperator>(E) && 13250 "Must be compound assignment operation"); 13251 // Recurse on the LHS and RHS in here 13252 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 13253 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 13254 13255 if (E->getLHS()->getType()->isAtomicType()) 13256 S.Diag(E->getOperatorLoc(), diag::warn_atomic_implicit_seq_cst); 13257 13258 // Now check the outermost expression 13259 const auto *ResultBT = E->getLHS()->getType()->getAs<BuiltinType>(); 13260 const auto *RBT = cast<CompoundAssignOperator>(E) 13261 ->getComputationResultType() 13262 ->getAs<BuiltinType>(); 13263 13264 // The below checks assume source is floating point. 13265 if (!ResultBT || !RBT || !RBT->isFloatingPoint()) return; 13266 13267 // If source is floating point but target is an integer. 13268 if (ResultBT->isInteger()) 13269 return DiagnoseImpCast(S, E, E->getRHS()->getType(), E->getLHS()->getType(), 13270 E->getExprLoc(), diag::warn_impcast_float_integer); 13271 13272 if (!ResultBT->isFloatingPoint()) 13273 return; 13274 13275 // If both source and target are floating points, warn about losing precision. 13276 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 13277 QualType(ResultBT, 0), QualType(RBT, 0)); 13278 if (Order < 0 && !S.SourceMgr.isInSystemMacro(E->getOperatorLoc())) 13279 // warn about dropping FP rank. 13280 DiagnoseImpCast(S, E->getRHS(), E->getLHS()->getType(), E->getOperatorLoc(), 13281 diag::warn_impcast_float_result_precision); 13282 } 13283 13284 static std::string PrettyPrintInRange(const llvm::APSInt &Value, 13285 IntRange Range) { 13286 if (!Range.Width) return "0"; 13287 13288 llvm::APSInt ValueInRange = Value; 13289 ValueInRange.setIsSigned(!Range.NonNegative); 13290 ValueInRange = ValueInRange.trunc(Range.Width); 13291 return toString(ValueInRange, 10); 13292 } 13293 13294 static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) { 13295 if (!isa<ImplicitCastExpr>(Ex)) 13296 return false; 13297 13298 Expr *InnerE = Ex->IgnoreParenImpCasts(); 13299 const Type *Target = S.Context.getCanonicalType(Ex->getType()).getTypePtr(); 13300 const Type *Source = 13301 S.Context.getCanonicalType(InnerE->getType()).getTypePtr(); 13302 if (Target->isDependentType()) 13303 return false; 13304 13305 const BuiltinType *FloatCandidateBT = 13306 dyn_cast<BuiltinType>(ToBool ? Source : Target); 13307 const Type *BoolCandidateType = ToBool ? Target : Source; 13308 13309 return (BoolCandidateType->isSpecificBuiltinType(BuiltinType::Bool) && 13310 FloatCandidateBT && (FloatCandidateBT->isFloatingPoint())); 13311 } 13312 13313 static void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall, 13314 SourceLocation CC) { 13315 unsigned NumArgs = TheCall->getNumArgs(); 13316 for (unsigned i = 0; i < NumArgs; ++i) { 13317 Expr *CurrA = TheCall->getArg(i); 13318 if (!IsImplicitBoolFloatConversion(S, CurrA, true)) 13319 continue; 13320 13321 bool IsSwapped = ((i > 0) && 13322 IsImplicitBoolFloatConversion(S, TheCall->getArg(i - 1), false)); 13323 IsSwapped |= ((i < (NumArgs - 1)) && 13324 IsImplicitBoolFloatConversion(S, TheCall->getArg(i + 1), false)); 13325 if (IsSwapped) { 13326 // Warn on this floating-point to bool conversion. 13327 DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(), 13328 CurrA->getType(), CC, 13329 diag::warn_impcast_floating_point_to_bool); 13330 } 13331 } 13332 } 13333 13334 static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T, 13335 SourceLocation CC) { 13336 if (S.Diags.isIgnored(diag::warn_impcast_null_pointer_to_integer, 13337 E->getExprLoc())) 13338 return; 13339 13340 // Don't warn on functions which have return type nullptr_t. 13341 if (isa<CallExpr>(E)) 13342 return; 13343 13344 // Check for NULL (GNUNull) or nullptr (CXX11_nullptr). 13345 const Expr::NullPointerConstantKind NullKind = 13346 E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull); 13347 if (NullKind != Expr::NPCK_GNUNull && NullKind != Expr::NPCK_CXX11_nullptr) 13348 return; 13349 13350 // Return if target type is a safe conversion. 13351 if (T->isAnyPointerType() || T->isBlockPointerType() || 13352 T->isMemberPointerType() || !T->isScalarType() || T->isNullPtrType()) 13353 return; 13354 13355 SourceLocation Loc = E->getSourceRange().getBegin(); 13356 13357 // Venture through the macro stacks to get to the source of macro arguments. 13358 // The new location is a better location than the complete location that was 13359 // passed in. 13360 Loc = S.SourceMgr.getTopMacroCallerLoc(Loc); 13361 CC = S.SourceMgr.getTopMacroCallerLoc(CC); 13362 13363 // __null is usually wrapped in a macro. Go up a macro if that is the case. 13364 if (NullKind == Expr::NPCK_GNUNull && Loc.isMacroID()) { 13365 StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics( 13366 Loc, S.SourceMgr, S.getLangOpts()); 13367 if (MacroName == "NULL") 13368 Loc = S.SourceMgr.getImmediateExpansionRange(Loc).getBegin(); 13369 } 13370 13371 // Only warn if the null and context location are in the same macro expansion. 13372 if (S.SourceMgr.getFileID(Loc) != S.SourceMgr.getFileID(CC)) 13373 return; 13374 13375 S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer) 13376 << (NullKind == Expr::NPCK_CXX11_nullptr) << T << SourceRange(CC) 13377 << FixItHint::CreateReplacement(Loc, 13378 S.getFixItZeroLiteralForType(T, Loc)); 13379 } 13380 13381 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 13382 ObjCArrayLiteral *ArrayLiteral); 13383 13384 static void 13385 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 13386 ObjCDictionaryLiteral *DictionaryLiteral); 13387 13388 /// Check a single element within a collection literal against the 13389 /// target element type. 13390 static void checkObjCCollectionLiteralElement(Sema &S, 13391 QualType TargetElementType, 13392 Expr *Element, 13393 unsigned ElementKind) { 13394 // Skip a bitcast to 'id' or qualified 'id'. 13395 if (auto ICE = dyn_cast<ImplicitCastExpr>(Element)) { 13396 if (ICE->getCastKind() == CK_BitCast && 13397 ICE->getSubExpr()->getType()->getAs<ObjCObjectPointerType>()) 13398 Element = ICE->getSubExpr(); 13399 } 13400 13401 QualType ElementType = Element->getType(); 13402 ExprResult ElementResult(Element); 13403 if (ElementType->getAs<ObjCObjectPointerType>() && 13404 S.CheckSingleAssignmentConstraints(TargetElementType, 13405 ElementResult, 13406 false, false) 13407 != Sema::Compatible) { 13408 S.Diag(Element->getBeginLoc(), diag::warn_objc_collection_literal_element) 13409 << ElementType << ElementKind << TargetElementType 13410 << Element->getSourceRange(); 13411 } 13412 13413 if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Element)) 13414 checkObjCArrayLiteral(S, TargetElementType, ArrayLiteral); 13415 else if (auto DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Element)) 13416 checkObjCDictionaryLiteral(S, TargetElementType, DictionaryLiteral); 13417 } 13418 13419 /// Check an Objective-C array literal being converted to the given 13420 /// target type. 13421 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 13422 ObjCArrayLiteral *ArrayLiteral) { 13423 if (!S.NSArrayDecl) 13424 return; 13425 13426 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 13427 if (!TargetObjCPtr) 13428 return; 13429 13430 if (TargetObjCPtr->isUnspecialized() || 13431 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 13432 != S.NSArrayDecl->getCanonicalDecl()) 13433 return; 13434 13435 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 13436 if (TypeArgs.size() != 1) 13437 return; 13438 13439 QualType TargetElementType = TypeArgs[0]; 13440 for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) { 13441 checkObjCCollectionLiteralElement(S, TargetElementType, 13442 ArrayLiteral->getElement(I), 13443 0); 13444 } 13445 } 13446 13447 /// Check an Objective-C dictionary literal being converted to the given 13448 /// target type. 13449 static void 13450 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 13451 ObjCDictionaryLiteral *DictionaryLiteral) { 13452 if (!S.NSDictionaryDecl) 13453 return; 13454 13455 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 13456 if (!TargetObjCPtr) 13457 return; 13458 13459 if (TargetObjCPtr->isUnspecialized() || 13460 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 13461 != S.NSDictionaryDecl->getCanonicalDecl()) 13462 return; 13463 13464 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 13465 if (TypeArgs.size() != 2) 13466 return; 13467 13468 QualType TargetKeyType = TypeArgs[0]; 13469 QualType TargetObjectType = TypeArgs[1]; 13470 for (unsigned I = 0, N = DictionaryLiteral->getNumElements(); I != N; ++I) { 13471 auto Element = DictionaryLiteral->getKeyValueElement(I); 13472 checkObjCCollectionLiteralElement(S, TargetKeyType, Element.Key, 1); 13473 checkObjCCollectionLiteralElement(S, TargetObjectType, Element.Value, 2); 13474 } 13475 } 13476 13477 // Helper function to filter out cases for constant width constant conversion. 13478 // Don't warn on char array initialization or for non-decimal values. 13479 static bool isSameWidthConstantConversion(Sema &S, Expr *E, QualType T, 13480 SourceLocation CC) { 13481 // If initializing from a constant, and the constant starts with '0', 13482 // then it is a binary, octal, or hexadecimal. Allow these constants 13483 // to fill all the bits, even if there is a sign change. 13484 if (auto *IntLit = dyn_cast<IntegerLiteral>(E->IgnoreParenImpCasts())) { 13485 const char FirstLiteralCharacter = 13486 S.getSourceManager().getCharacterData(IntLit->getBeginLoc())[0]; 13487 if (FirstLiteralCharacter == '0') 13488 return false; 13489 } 13490 13491 // If the CC location points to a '{', and the type is char, then assume 13492 // assume it is an array initialization. 13493 if (CC.isValid() && T->isCharType()) { 13494 const char FirstContextCharacter = 13495 S.getSourceManager().getCharacterData(CC)[0]; 13496 if (FirstContextCharacter == '{') 13497 return false; 13498 } 13499 13500 return true; 13501 } 13502 13503 static const IntegerLiteral *getIntegerLiteral(Expr *E) { 13504 const auto *IL = dyn_cast<IntegerLiteral>(E); 13505 if (!IL) { 13506 if (auto *UO = dyn_cast<UnaryOperator>(E)) { 13507 if (UO->getOpcode() == UO_Minus) 13508 return dyn_cast<IntegerLiteral>(UO->getSubExpr()); 13509 } 13510 } 13511 13512 return IL; 13513 } 13514 13515 static void DiagnoseIntInBoolContext(Sema &S, Expr *E) { 13516 E = E->IgnoreParenImpCasts(); 13517 SourceLocation ExprLoc = E->getExprLoc(); 13518 13519 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 13520 BinaryOperator::Opcode Opc = BO->getOpcode(); 13521 Expr::EvalResult Result; 13522 // Do not diagnose unsigned shifts. 13523 if (Opc == BO_Shl) { 13524 const auto *LHS = getIntegerLiteral(BO->getLHS()); 13525 const auto *RHS = getIntegerLiteral(BO->getRHS()); 13526 if (LHS && LHS->getValue() == 0) 13527 S.Diag(ExprLoc, diag::warn_left_shift_always) << 0; 13528 else if (!E->isValueDependent() && LHS && RHS && 13529 RHS->getValue().isNonNegative() && 13530 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) 13531 S.Diag(ExprLoc, diag::warn_left_shift_always) 13532 << (Result.Val.getInt() != 0); 13533 else if (E->getType()->isSignedIntegerType()) 13534 S.Diag(ExprLoc, diag::warn_left_shift_in_bool_context) << E; 13535 } 13536 } 13537 13538 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 13539 const auto *LHS = getIntegerLiteral(CO->getTrueExpr()); 13540 const auto *RHS = getIntegerLiteral(CO->getFalseExpr()); 13541 if (!LHS || !RHS) 13542 return; 13543 if ((LHS->getValue() == 0 || LHS->getValue() == 1) && 13544 (RHS->getValue() == 0 || RHS->getValue() == 1)) 13545 // Do not diagnose common idioms. 13546 return; 13547 if (LHS->getValue() != 0 && RHS->getValue() != 0) 13548 S.Diag(ExprLoc, diag::warn_integer_constants_in_conditional_always_true); 13549 } 13550 } 13551 13552 static void CheckImplicitConversion(Sema &S, Expr *E, QualType T, 13553 SourceLocation CC, 13554 bool *ICContext = nullptr, 13555 bool IsListInit = false) { 13556 if (E->isTypeDependent() || E->isValueDependent()) return; 13557 13558 const Type *Source = S.Context.getCanonicalType(E->getType()).getTypePtr(); 13559 const Type *Target = S.Context.getCanonicalType(T).getTypePtr(); 13560 if (Source == Target) return; 13561 if (Target->isDependentType()) return; 13562 13563 // If the conversion context location is invalid don't complain. We also 13564 // don't want to emit a warning if the issue occurs from the expansion of 13565 // a system macro. The problem is that 'getSpellingLoc()' is slow, so we 13566 // delay this check as long as possible. Once we detect we are in that 13567 // scenario, we just return. 13568 if (CC.isInvalid()) 13569 return; 13570 13571 if (Source->isAtomicType()) 13572 S.Diag(E->getExprLoc(), diag::warn_atomic_implicit_seq_cst); 13573 13574 // Diagnose implicit casts to bool. 13575 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) { 13576 if (isa<StringLiteral>(E)) 13577 // Warn on string literal to bool. Checks for string literals in logical 13578 // and expressions, for instance, assert(0 && "error here"), are 13579 // prevented by a check in AnalyzeImplicitConversions(). 13580 return DiagnoseImpCast(S, E, T, CC, 13581 diag::warn_impcast_string_literal_to_bool); 13582 if (isa<ObjCStringLiteral>(E) || isa<ObjCArrayLiteral>(E) || 13583 isa<ObjCDictionaryLiteral>(E) || isa<ObjCBoxedExpr>(E)) { 13584 // This covers the literal expressions that evaluate to Objective-C 13585 // objects. 13586 return DiagnoseImpCast(S, E, T, CC, 13587 diag::warn_impcast_objective_c_literal_to_bool); 13588 } 13589 if (Source->isPointerType() || Source->canDecayToPointerType()) { 13590 // Warn on pointer to bool conversion that is always true. 13591 S.DiagnoseAlwaysNonNullPointer(E, Expr::NPCK_NotNull, /*IsEqual*/ false, 13592 SourceRange(CC)); 13593 } 13594 } 13595 13596 // If the we're converting a constant to an ObjC BOOL on a platform where BOOL 13597 // is a typedef for signed char (macOS), then that constant value has to be 1 13598 // or 0. 13599 if (isObjCSignedCharBool(S, T) && Source->isIntegralType(S.Context)) { 13600 Expr::EvalResult Result; 13601 if (E->EvaluateAsInt(Result, S.getASTContext(), 13602 Expr::SE_AllowSideEffects)) { 13603 if (Result.Val.getInt() != 1 && Result.Val.getInt() != 0) { 13604 adornObjCBoolConversionDiagWithTernaryFixit( 13605 S, E, 13606 S.Diag(CC, diag::warn_impcast_constant_value_to_objc_bool) 13607 << toString(Result.Val.getInt(), 10)); 13608 } 13609 return; 13610 } 13611 } 13612 13613 // Check implicit casts from Objective-C collection literals to specialized 13614 // collection types, e.g., NSArray<NSString *> *. 13615 if (auto *ArrayLiteral = dyn_cast<ObjCArrayLiteral>(E)) 13616 checkObjCArrayLiteral(S, QualType(Target, 0), ArrayLiteral); 13617 else if (auto *DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(E)) 13618 checkObjCDictionaryLiteral(S, QualType(Target, 0), DictionaryLiteral); 13619 13620 // Strip vector types. 13621 if (isa<VectorType>(Source)) { 13622 if (Target->isVLSTBuiltinType() && 13623 (S.Context.areCompatibleSveTypes(QualType(Target, 0), 13624 QualType(Source, 0)) || 13625 S.Context.areLaxCompatibleSveTypes(QualType(Target, 0), 13626 QualType(Source, 0)))) 13627 return; 13628 13629 if (!isa<VectorType>(Target)) { 13630 if (S.SourceMgr.isInSystemMacro(CC)) 13631 return; 13632 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar); 13633 } 13634 13635 // If the vector cast is cast between two vectors of the same size, it is 13636 // a bitcast, not a conversion. 13637 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target)) 13638 return; 13639 13640 Source = cast<VectorType>(Source)->getElementType().getTypePtr(); 13641 Target = cast<VectorType>(Target)->getElementType().getTypePtr(); 13642 } 13643 if (auto VecTy = dyn_cast<VectorType>(Target)) 13644 Target = VecTy->getElementType().getTypePtr(); 13645 13646 // Strip complex types. 13647 if (isa<ComplexType>(Source)) { 13648 if (!isa<ComplexType>(Target)) { 13649 if (S.SourceMgr.isInSystemMacro(CC) || Target->isBooleanType()) 13650 return; 13651 13652 return DiagnoseImpCast(S, E, T, CC, 13653 S.getLangOpts().CPlusPlus 13654 ? diag::err_impcast_complex_scalar 13655 : diag::warn_impcast_complex_scalar); 13656 } 13657 13658 Source = cast<ComplexType>(Source)->getElementType().getTypePtr(); 13659 Target = cast<ComplexType>(Target)->getElementType().getTypePtr(); 13660 } 13661 13662 const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source); 13663 const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target); 13664 13665 // Strip SVE vector types 13666 if (SourceBT && SourceBT->isVLSTBuiltinType()) { 13667 // Need the original target type for vector type checks 13668 const Type *OriginalTarget = S.Context.getCanonicalType(T).getTypePtr(); 13669 // Handle conversion from scalable to fixed when msve-vector-bits is 13670 // specified 13671 if (S.Context.areCompatibleSveTypes(QualType(OriginalTarget, 0), 13672 QualType(Source, 0)) || 13673 S.Context.areLaxCompatibleSveTypes(QualType(OriginalTarget, 0), 13674 QualType(Source, 0))) 13675 return; 13676 13677 // If the vector cast is cast between two vectors of the same size, it is 13678 // a bitcast, not a conversion. 13679 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target)) 13680 return; 13681 13682 Source = SourceBT->getSveEltType(S.Context).getTypePtr(); 13683 } 13684 13685 if (TargetBT && TargetBT->isVLSTBuiltinType()) 13686 Target = TargetBT->getSveEltType(S.Context).getTypePtr(); 13687 13688 // If the source is floating point... 13689 if (SourceBT && SourceBT->isFloatingPoint()) { 13690 // ...and the target is floating point... 13691 if (TargetBT && TargetBT->isFloatingPoint()) { 13692 // ...then warn if we're dropping FP rank. 13693 13694 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 13695 QualType(SourceBT, 0), QualType(TargetBT, 0)); 13696 if (Order > 0) { 13697 // Don't warn about float constants that are precisely 13698 // representable in the target type. 13699 Expr::EvalResult result; 13700 if (E->EvaluateAsRValue(result, S.Context)) { 13701 // Value might be a float, a float vector, or a float complex. 13702 if (IsSameFloatAfterCast(result.Val, 13703 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)), 13704 S.Context.getFloatTypeSemantics(QualType(SourceBT, 0)))) 13705 return; 13706 } 13707 13708 if (S.SourceMgr.isInSystemMacro(CC)) 13709 return; 13710 13711 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision); 13712 } 13713 // ... or possibly if we're increasing rank, too 13714 else if (Order < 0) { 13715 if (S.SourceMgr.isInSystemMacro(CC)) 13716 return; 13717 13718 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_double_promotion); 13719 } 13720 return; 13721 } 13722 13723 // If the target is integral, always warn. 13724 if (TargetBT && TargetBT->isInteger()) { 13725 if (S.SourceMgr.isInSystemMacro(CC)) 13726 return; 13727 13728 DiagnoseFloatingImpCast(S, E, T, CC); 13729 } 13730 13731 // Detect the case where a call result is converted from floating-point to 13732 // to bool, and the final argument to the call is converted from bool, to 13733 // discover this typo: 13734 // 13735 // bool b = fabs(x < 1.0); // should be "bool b = fabs(x) < 1.0;" 13736 // 13737 // FIXME: This is an incredibly special case; is there some more general 13738 // way to detect this class of misplaced-parentheses bug? 13739 if (Target->isBooleanType() && isa<CallExpr>(E)) { 13740 // Check last argument of function call to see if it is an 13741 // implicit cast from a type matching the type the result 13742 // is being cast to. 13743 CallExpr *CEx = cast<CallExpr>(E); 13744 if (unsigned NumArgs = CEx->getNumArgs()) { 13745 Expr *LastA = CEx->getArg(NumArgs - 1); 13746 Expr *InnerE = LastA->IgnoreParenImpCasts(); 13747 if (isa<ImplicitCastExpr>(LastA) && 13748 InnerE->getType()->isBooleanType()) { 13749 // Warn on this floating-point to bool conversion 13750 DiagnoseImpCast(S, E, T, CC, 13751 diag::warn_impcast_floating_point_to_bool); 13752 } 13753 } 13754 } 13755 return; 13756 } 13757 13758 // Valid casts involving fixed point types should be accounted for here. 13759 if (Source->isFixedPointType()) { 13760 if (Target->isUnsaturatedFixedPointType()) { 13761 Expr::EvalResult Result; 13762 if (E->EvaluateAsFixedPoint(Result, S.Context, Expr::SE_AllowSideEffects, 13763 S.isConstantEvaluated())) { 13764 llvm::APFixedPoint Value = Result.Val.getFixedPoint(); 13765 llvm::APFixedPoint MaxVal = S.Context.getFixedPointMax(T); 13766 llvm::APFixedPoint MinVal = S.Context.getFixedPointMin(T); 13767 if (Value > MaxVal || Value < MinVal) { 13768 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13769 S.PDiag(diag::warn_impcast_fixed_point_range) 13770 << Value.toString() << T 13771 << E->getSourceRange() 13772 << clang::SourceRange(CC)); 13773 return; 13774 } 13775 } 13776 } else if (Target->isIntegerType()) { 13777 Expr::EvalResult Result; 13778 if (!S.isConstantEvaluated() && 13779 E->EvaluateAsFixedPoint(Result, S.Context, 13780 Expr::SE_AllowSideEffects)) { 13781 llvm::APFixedPoint FXResult = Result.Val.getFixedPoint(); 13782 13783 bool Overflowed; 13784 llvm::APSInt IntResult = FXResult.convertToInt( 13785 S.Context.getIntWidth(T), 13786 Target->isSignedIntegerOrEnumerationType(), &Overflowed); 13787 13788 if (Overflowed) { 13789 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13790 S.PDiag(diag::warn_impcast_fixed_point_range) 13791 << FXResult.toString() << T 13792 << E->getSourceRange() 13793 << clang::SourceRange(CC)); 13794 return; 13795 } 13796 } 13797 } 13798 } else if (Target->isUnsaturatedFixedPointType()) { 13799 if (Source->isIntegerType()) { 13800 Expr::EvalResult Result; 13801 if (!S.isConstantEvaluated() && 13802 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) { 13803 llvm::APSInt Value = Result.Val.getInt(); 13804 13805 bool Overflowed; 13806 llvm::APFixedPoint IntResult = llvm::APFixedPoint::getFromIntValue( 13807 Value, S.Context.getFixedPointSemantics(T), &Overflowed); 13808 13809 if (Overflowed) { 13810 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13811 S.PDiag(diag::warn_impcast_fixed_point_range) 13812 << toString(Value, /*Radix=*/10) << T 13813 << E->getSourceRange() 13814 << clang::SourceRange(CC)); 13815 return; 13816 } 13817 } 13818 } 13819 } 13820 13821 // If we are casting an integer type to a floating point type without 13822 // initialization-list syntax, we might lose accuracy if the floating 13823 // point type has a narrower significand than the integer type. 13824 if (SourceBT && TargetBT && SourceBT->isIntegerType() && 13825 TargetBT->isFloatingType() && !IsListInit) { 13826 // Determine the number of precision bits in the source integer type. 13827 IntRange SourceRange = GetExprRange(S.Context, E, S.isConstantEvaluated(), 13828 /*Approximate*/ true); 13829 unsigned int SourcePrecision = SourceRange.Width; 13830 13831 // Determine the number of precision bits in the 13832 // target floating point type. 13833 unsigned int TargetPrecision = llvm::APFloatBase::semanticsPrecision( 13834 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 13835 13836 if (SourcePrecision > 0 && TargetPrecision > 0 && 13837 SourcePrecision > TargetPrecision) { 13838 13839 if (Optional<llvm::APSInt> SourceInt = 13840 E->getIntegerConstantExpr(S.Context)) { 13841 // If the source integer is a constant, convert it to the target 13842 // floating point type. Issue a warning if the value changes 13843 // during the whole conversion. 13844 llvm::APFloat TargetFloatValue( 13845 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 13846 llvm::APFloat::opStatus ConversionStatus = 13847 TargetFloatValue.convertFromAPInt( 13848 *SourceInt, SourceBT->isSignedInteger(), 13849 llvm::APFloat::rmNearestTiesToEven); 13850 13851 if (ConversionStatus != llvm::APFloat::opOK) { 13852 SmallString<32> PrettySourceValue; 13853 SourceInt->toString(PrettySourceValue, 10); 13854 SmallString<32> PrettyTargetValue; 13855 TargetFloatValue.toString(PrettyTargetValue, TargetPrecision); 13856 13857 S.DiagRuntimeBehavior( 13858 E->getExprLoc(), E, 13859 S.PDiag(diag::warn_impcast_integer_float_precision_constant) 13860 << PrettySourceValue << PrettyTargetValue << E->getType() << T 13861 << E->getSourceRange() << clang::SourceRange(CC)); 13862 } 13863 } else { 13864 // Otherwise, the implicit conversion may lose precision. 13865 DiagnoseImpCast(S, E, T, CC, 13866 diag::warn_impcast_integer_float_precision); 13867 } 13868 } 13869 } 13870 13871 DiagnoseNullConversion(S, E, T, CC); 13872 13873 S.DiscardMisalignedMemberAddress(Target, E); 13874 13875 if (Target->isBooleanType()) 13876 DiagnoseIntInBoolContext(S, E); 13877 13878 if (!Source->isIntegerType() || !Target->isIntegerType()) 13879 return; 13880 13881 // TODO: remove this early return once the false positives for constant->bool 13882 // in templates, macros, etc, are reduced or removed. 13883 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) 13884 return; 13885 13886 if (isObjCSignedCharBool(S, T) && !Source->isCharType() && 13887 !E->isKnownToHaveBooleanValue(/*Semantic=*/false)) { 13888 return adornObjCBoolConversionDiagWithTernaryFixit( 13889 S, E, 13890 S.Diag(CC, diag::warn_impcast_int_to_objc_signed_char_bool) 13891 << E->getType()); 13892 } 13893 13894 IntRange SourceTypeRange = 13895 IntRange::forTargetOfCanonicalType(S.Context, Source); 13896 IntRange LikelySourceRange = 13897 GetExprRange(S.Context, E, S.isConstantEvaluated(), /*Approximate*/ true); 13898 IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target); 13899 13900 if (LikelySourceRange.Width > TargetRange.Width) { 13901 // If the source is a constant, use a default-on diagnostic. 13902 // TODO: this should happen for bitfield stores, too. 13903 Expr::EvalResult Result; 13904 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects, 13905 S.isConstantEvaluated())) { 13906 llvm::APSInt Value(32); 13907 Value = Result.Val.getInt(); 13908 13909 if (S.SourceMgr.isInSystemMacro(CC)) 13910 return; 13911 13912 std::string PrettySourceValue = toString(Value, 10); 13913 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 13914 13915 S.DiagRuntimeBehavior( 13916 E->getExprLoc(), E, 13917 S.PDiag(diag::warn_impcast_integer_precision_constant) 13918 << PrettySourceValue << PrettyTargetValue << E->getType() << T 13919 << E->getSourceRange() << SourceRange(CC)); 13920 return; 13921 } 13922 13923 // People want to build with -Wshorten-64-to-32 and not -Wconversion. 13924 if (S.SourceMgr.isInSystemMacro(CC)) 13925 return; 13926 13927 if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64) 13928 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32, 13929 /* pruneControlFlow */ true); 13930 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision); 13931 } 13932 13933 if (TargetRange.Width > SourceTypeRange.Width) { 13934 if (auto *UO = dyn_cast<UnaryOperator>(E)) 13935 if (UO->getOpcode() == UO_Minus) 13936 if (Source->isUnsignedIntegerType()) { 13937 if (Target->isUnsignedIntegerType()) 13938 return DiagnoseImpCast(S, E, T, CC, 13939 diag::warn_impcast_high_order_zero_bits); 13940 if (Target->isSignedIntegerType()) 13941 return DiagnoseImpCast(S, E, T, CC, 13942 diag::warn_impcast_nonnegative_result); 13943 } 13944 } 13945 13946 if (TargetRange.Width == LikelySourceRange.Width && 13947 !TargetRange.NonNegative && LikelySourceRange.NonNegative && 13948 Source->isSignedIntegerType()) { 13949 // Warn when doing a signed to signed conversion, warn if the positive 13950 // source value is exactly the width of the target type, which will 13951 // cause a negative value to be stored. 13952 13953 Expr::EvalResult Result; 13954 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects) && 13955 !S.SourceMgr.isInSystemMacro(CC)) { 13956 llvm::APSInt Value = Result.Val.getInt(); 13957 if (isSameWidthConstantConversion(S, E, T, CC)) { 13958 std::string PrettySourceValue = toString(Value, 10); 13959 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 13960 13961 S.DiagRuntimeBehavior( 13962 E->getExprLoc(), E, 13963 S.PDiag(diag::warn_impcast_integer_precision_constant) 13964 << PrettySourceValue << PrettyTargetValue << E->getType() << T 13965 << E->getSourceRange() << SourceRange(CC)); 13966 return; 13967 } 13968 } 13969 13970 // Fall through for non-constants to give a sign conversion warning. 13971 } 13972 13973 if ((!isa<EnumType>(Target) || !isa<EnumType>(Source)) && 13974 ((TargetRange.NonNegative && !LikelySourceRange.NonNegative) || 13975 (!TargetRange.NonNegative && LikelySourceRange.NonNegative && 13976 LikelySourceRange.Width == TargetRange.Width))) { 13977 if (S.SourceMgr.isInSystemMacro(CC)) 13978 return; 13979 13980 unsigned DiagID = diag::warn_impcast_integer_sign; 13981 13982 // Traditionally, gcc has warned about this under -Wsign-compare. 13983 // We also want to warn about it in -Wconversion. 13984 // So if -Wconversion is off, use a completely identical diagnostic 13985 // in the sign-compare group. 13986 // The conditional-checking code will 13987 if (ICContext) { 13988 DiagID = diag::warn_impcast_integer_sign_conditional; 13989 *ICContext = true; 13990 } 13991 13992 return DiagnoseImpCast(S, E, T, CC, DiagID); 13993 } 13994 13995 // Diagnose conversions between different enumeration types. 13996 // In C, we pretend that the type of an EnumConstantDecl is its enumeration 13997 // type, to give us better diagnostics. 13998 QualType SourceType = E->getType(); 13999 if (!S.getLangOpts().CPlusPlus) { 14000 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 14001 if (EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(DRE->getDecl())) { 14002 EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext()); 14003 SourceType = S.Context.getTypeDeclType(Enum); 14004 Source = S.Context.getCanonicalType(SourceType).getTypePtr(); 14005 } 14006 } 14007 14008 if (const EnumType *SourceEnum = Source->getAs<EnumType>()) 14009 if (const EnumType *TargetEnum = Target->getAs<EnumType>()) 14010 if (SourceEnum->getDecl()->hasNameForLinkage() && 14011 TargetEnum->getDecl()->hasNameForLinkage() && 14012 SourceEnum != TargetEnum) { 14013 if (S.SourceMgr.isInSystemMacro(CC)) 14014 return; 14015 14016 return DiagnoseImpCast(S, E, SourceType, T, CC, 14017 diag::warn_impcast_different_enum_types); 14018 } 14019 } 14020 14021 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 14022 SourceLocation CC, QualType T); 14023 14024 static void CheckConditionalOperand(Sema &S, Expr *E, QualType T, 14025 SourceLocation CC, bool &ICContext) { 14026 E = E->IgnoreParenImpCasts(); 14027 14028 if (auto *CO = dyn_cast<AbstractConditionalOperator>(E)) 14029 return CheckConditionalOperator(S, CO, CC, T); 14030 14031 AnalyzeImplicitConversions(S, E, CC); 14032 if (E->getType() != T) 14033 return CheckImplicitConversion(S, E, T, CC, &ICContext); 14034 } 14035 14036 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 14037 SourceLocation CC, QualType T) { 14038 AnalyzeImplicitConversions(S, E->getCond(), E->getQuestionLoc()); 14039 14040 Expr *TrueExpr = E->getTrueExpr(); 14041 if (auto *BCO = dyn_cast<BinaryConditionalOperator>(E)) 14042 TrueExpr = BCO->getCommon(); 14043 14044 bool Suspicious = false; 14045 CheckConditionalOperand(S, TrueExpr, T, CC, Suspicious); 14046 CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious); 14047 14048 if (T->isBooleanType()) 14049 DiagnoseIntInBoolContext(S, E); 14050 14051 // If -Wconversion would have warned about either of the candidates 14052 // for a signedness conversion to the context type... 14053 if (!Suspicious) return; 14054 14055 // ...but it's currently ignored... 14056 if (!S.Diags.isIgnored(diag::warn_impcast_integer_sign_conditional, CC)) 14057 return; 14058 14059 // ...then check whether it would have warned about either of the 14060 // candidates for a signedness conversion to the condition type. 14061 if (E->getType() == T) return; 14062 14063 Suspicious = false; 14064 CheckImplicitConversion(S, TrueExpr->IgnoreParenImpCasts(), 14065 E->getType(), CC, &Suspicious); 14066 if (!Suspicious) 14067 CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(), 14068 E->getType(), CC, &Suspicious); 14069 } 14070 14071 /// Check conversion of given expression to boolean. 14072 /// Input argument E is a logical expression. 14073 static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) { 14074 if (S.getLangOpts().Bool) 14075 return; 14076 if (E->IgnoreParenImpCasts()->getType()->isAtomicType()) 14077 return; 14078 CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC); 14079 } 14080 14081 namespace { 14082 struct AnalyzeImplicitConversionsWorkItem { 14083 Expr *E; 14084 SourceLocation CC; 14085 bool IsListInit; 14086 }; 14087 } 14088 14089 /// Data recursive variant of AnalyzeImplicitConversions. Subexpressions 14090 /// that should be visited are added to WorkList. 14091 static void AnalyzeImplicitConversions( 14092 Sema &S, AnalyzeImplicitConversionsWorkItem Item, 14093 llvm::SmallVectorImpl<AnalyzeImplicitConversionsWorkItem> &WorkList) { 14094 Expr *OrigE = Item.E; 14095 SourceLocation CC = Item.CC; 14096 14097 QualType T = OrigE->getType(); 14098 Expr *E = OrigE->IgnoreParenImpCasts(); 14099 14100 // Propagate whether we are in a C++ list initialization expression. 14101 // If so, we do not issue warnings for implicit int-float conversion 14102 // precision loss, because C++11 narrowing already handles it. 14103 bool IsListInit = Item.IsListInit || 14104 (isa<InitListExpr>(OrigE) && S.getLangOpts().CPlusPlus); 14105 14106 if (E->isTypeDependent() || E->isValueDependent()) 14107 return; 14108 14109 Expr *SourceExpr = E; 14110 // Examine, but don't traverse into the source expression of an 14111 // OpaqueValueExpr, since it may have multiple parents and we don't want to 14112 // emit duplicate diagnostics. Its fine to examine the form or attempt to 14113 // evaluate it in the context of checking the specific conversion to T though. 14114 if (auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 14115 if (auto *Src = OVE->getSourceExpr()) 14116 SourceExpr = Src; 14117 14118 if (const auto *UO = dyn_cast<UnaryOperator>(SourceExpr)) 14119 if (UO->getOpcode() == UO_Not && 14120 UO->getSubExpr()->isKnownToHaveBooleanValue()) 14121 S.Diag(UO->getBeginLoc(), diag::warn_bitwise_negation_bool) 14122 << OrigE->getSourceRange() << T->isBooleanType() 14123 << FixItHint::CreateReplacement(UO->getBeginLoc(), "!"); 14124 14125 if (const auto *BO = dyn_cast<BinaryOperator>(SourceExpr)) 14126 if ((BO->getOpcode() == BO_And || BO->getOpcode() == BO_Or) && 14127 BO->getLHS()->isKnownToHaveBooleanValue() && 14128 BO->getRHS()->isKnownToHaveBooleanValue() && 14129 BO->getLHS()->HasSideEffects(S.Context) && 14130 BO->getRHS()->HasSideEffects(S.Context)) { 14131 S.Diag(BO->getBeginLoc(), diag::warn_bitwise_instead_of_logical) 14132 << (BO->getOpcode() == BO_And ? "&" : "|") << OrigE->getSourceRange() 14133 << FixItHint::CreateReplacement( 14134 BO->getOperatorLoc(), 14135 (BO->getOpcode() == BO_And ? "&&" : "||")); 14136 S.Diag(BO->getBeginLoc(), diag::note_cast_operand_to_int); 14137 } 14138 14139 // For conditional operators, we analyze the arguments as if they 14140 // were being fed directly into the output. 14141 if (auto *CO = dyn_cast<AbstractConditionalOperator>(SourceExpr)) { 14142 CheckConditionalOperator(S, CO, CC, T); 14143 return; 14144 } 14145 14146 // Check implicit argument conversions for function calls. 14147 if (CallExpr *Call = dyn_cast<CallExpr>(SourceExpr)) 14148 CheckImplicitArgumentConversions(S, Call, CC); 14149 14150 // Go ahead and check any implicit conversions we might have skipped. 14151 // The non-canonical typecheck is just an optimization; 14152 // CheckImplicitConversion will filter out dead implicit conversions. 14153 if (SourceExpr->getType() != T) 14154 CheckImplicitConversion(S, SourceExpr, T, CC, nullptr, IsListInit); 14155 14156 // Now continue drilling into this expression. 14157 14158 if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) { 14159 // The bound subexpressions in a PseudoObjectExpr are not reachable 14160 // as transitive children. 14161 // FIXME: Use a more uniform representation for this. 14162 for (auto *SE : POE->semantics()) 14163 if (auto *OVE = dyn_cast<OpaqueValueExpr>(SE)) 14164 WorkList.push_back({OVE->getSourceExpr(), CC, IsListInit}); 14165 } 14166 14167 // Skip past explicit casts. 14168 if (auto *CE = dyn_cast<ExplicitCastExpr>(E)) { 14169 E = CE->getSubExpr()->IgnoreParenImpCasts(); 14170 if (!CE->getType()->isVoidType() && E->getType()->isAtomicType()) 14171 S.Diag(E->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 14172 WorkList.push_back({E, CC, IsListInit}); 14173 return; 14174 } 14175 14176 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 14177 // Do a somewhat different check with comparison operators. 14178 if (BO->isComparisonOp()) 14179 return AnalyzeComparison(S, BO); 14180 14181 // And with simple assignments. 14182 if (BO->getOpcode() == BO_Assign) 14183 return AnalyzeAssignment(S, BO); 14184 // And with compound assignments. 14185 if (BO->isAssignmentOp()) 14186 return AnalyzeCompoundAssignment(S, BO); 14187 } 14188 14189 // These break the otherwise-useful invariant below. Fortunately, 14190 // we don't really need to recurse into them, because any internal 14191 // expressions should have been analyzed already when they were 14192 // built into statements. 14193 if (isa<StmtExpr>(E)) return; 14194 14195 // Don't descend into unevaluated contexts. 14196 if (isa<UnaryExprOrTypeTraitExpr>(E)) return; 14197 14198 // Now just recurse over the expression's children. 14199 CC = E->getExprLoc(); 14200 BinaryOperator *BO = dyn_cast<BinaryOperator>(E); 14201 bool IsLogicalAndOperator = BO && BO->getOpcode() == BO_LAnd; 14202 for (Stmt *SubStmt : E->children()) { 14203 Expr *ChildExpr = dyn_cast_or_null<Expr>(SubStmt); 14204 if (!ChildExpr) 14205 continue; 14206 14207 if (auto *CSE = dyn_cast<CoroutineSuspendExpr>(E)) 14208 if (ChildExpr == CSE->getOperand()) 14209 // Do not recurse over a CoroutineSuspendExpr's operand. 14210 // The operand is also a subexpression of getCommonExpr(), and 14211 // recursing into it directly would produce duplicate diagnostics. 14212 continue; 14213 14214 if (IsLogicalAndOperator && 14215 isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts())) 14216 // Ignore checking string literals that are in logical and operators. 14217 // This is a common pattern for asserts. 14218 continue; 14219 WorkList.push_back({ChildExpr, CC, IsListInit}); 14220 } 14221 14222 if (BO && BO->isLogicalOp()) { 14223 Expr *SubExpr = BO->getLHS()->IgnoreParenImpCasts(); 14224 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 14225 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 14226 14227 SubExpr = BO->getRHS()->IgnoreParenImpCasts(); 14228 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 14229 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 14230 } 14231 14232 if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E)) { 14233 if (U->getOpcode() == UO_LNot) { 14234 ::CheckBoolLikeConversion(S, U->getSubExpr(), CC); 14235 } else if (U->getOpcode() != UO_AddrOf) { 14236 if (U->getSubExpr()->getType()->isAtomicType()) 14237 S.Diag(U->getSubExpr()->getBeginLoc(), 14238 diag::warn_atomic_implicit_seq_cst); 14239 } 14240 } 14241 } 14242 14243 /// AnalyzeImplicitConversions - Find and report any interesting 14244 /// implicit conversions in the given expression. There are a couple 14245 /// of competing diagnostics here, -Wconversion and -Wsign-compare. 14246 static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC, 14247 bool IsListInit/*= false*/) { 14248 llvm::SmallVector<AnalyzeImplicitConversionsWorkItem, 16> WorkList; 14249 WorkList.push_back({OrigE, CC, IsListInit}); 14250 while (!WorkList.empty()) 14251 AnalyzeImplicitConversions(S, WorkList.pop_back_val(), WorkList); 14252 } 14253 14254 /// Diagnose integer type and any valid implicit conversion to it. 14255 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) { 14256 // Taking into account implicit conversions, 14257 // allow any integer. 14258 if (!E->getType()->isIntegerType()) { 14259 S.Diag(E->getBeginLoc(), 14260 diag::err_opencl_enqueue_kernel_invalid_local_size_type); 14261 return true; 14262 } 14263 // Potentially emit standard warnings for implicit conversions if enabled 14264 // using -Wconversion. 14265 CheckImplicitConversion(S, E, IntT, E->getBeginLoc()); 14266 return false; 14267 } 14268 14269 // Helper function for Sema::DiagnoseAlwaysNonNullPointer. 14270 // Returns true when emitting a warning about taking the address of a reference. 14271 static bool CheckForReference(Sema &SemaRef, const Expr *E, 14272 const PartialDiagnostic &PD) { 14273 E = E->IgnoreParenImpCasts(); 14274 14275 const FunctionDecl *FD = nullptr; 14276 14277 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { 14278 if (!DRE->getDecl()->getType()->isReferenceType()) 14279 return false; 14280 } else if (const MemberExpr *M = dyn_cast<MemberExpr>(E)) { 14281 if (!M->getMemberDecl()->getType()->isReferenceType()) 14282 return false; 14283 } else if (const CallExpr *Call = dyn_cast<CallExpr>(E)) { 14284 if (!Call->getCallReturnType(SemaRef.Context)->isReferenceType()) 14285 return false; 14286 FD = Call->getDirectCallee(); 14287 } else { 14288 return false; 14289 } 14290 14291 SemaRef.Diag(E->getExprLoc(), PD); 14292 14293 // If possible, point to location of function. 14294 if (FD) { 14295 SemaRef.Diag(FD->getLocation(), diag::note_reference_is_return_value) << FD; 14296 } 14297 14298 return true; 14299 } 14300 14301 // Returns true if the SourceLocation is expanded from any macro body. 14302 // Returns false if the SourceLocation is invalid, is from not in a macro 14303 // expansion, or is from expanded from a top-level macro argument. 14304 static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) { 14305 if (Loc.isInvalid()) 14306 return false; 14307 14308 while (Loc.isMacroID()) { 14309 if (SM.isMacroBodyExpansion(Loc)) 14310 return true; 14311 Loc = SM.getImmediateMacroCallerLoc(Loc); 14312 } 14313 14314 return false; 14315 } 14316 14317 /// Diagnose pointers that are always non-null. 14318 /// \param E the expression containing the pointer 14319 /// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is 14320 /// compared to a null pointer 14321 /// \param IsEqual True when the comparison is equal to a null pointer 14322 /// \param Range Extra SourceRange to highlight in the diagnostic 14323 void Sema::DiagnoseAlwaysNonNullPointer(Expr *E, 14324 Expr::NullPointerConstantKind NullKind, 14325 bool IsEqual, SourceRange Range) { 14326 if (!E) 14327 return; 14328 14329 // Don't warn inside macros. 14330 if (E->getExprLoc().isMacroID()) { 14331 const SourceManager &SM = getSourceManager(); 14332 if (IsInAnyMacroBody(SM, E->getExprLoc()) || 14333 IsInAnyMacroBody(SM, Range.getBegin())) 14334 return; 14335 } 14336 E = E->IgnoreImpCasts(); 14337 14338 const bool IsCompare = NullKind != Expr::NPCK_NotNull; 14339 14340 if (isa<CXXThisExpr>(E)) { 14341 unsigned DiagID = IsCompare ? diag::warn_this_null_compare 14342 : diag::warn_this_bool_conversion; 14343 Diag(E->getExprLoc(), DiagID) << E->getSourceRange() << Range << IsEqual; 14344 return; 14345 } 14346 14347 bool IsAddressOf = false; 14348 14349 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 14350 if (UO->getOpcode() != UO_AddrOf) 14351 return; 14352 IsAddressOf = true; 14353 E = UO->getSubExpr(); 14354 } 14355 14356 if (IsAddressOf) { 14357 unsigned DiagID = IsCompare 14358 ? diag::warn_address_of_reference_null_compare 14359 : diag::warn_address_of_reference_bool_conversion; 14360 PartialDiagnostic PD = PDiag(DiagID) << E->getSourceRange() << Range 14361 << IsEqual; 14362 if (CheckForReference(*this, E, PD)) { 14363 return; 14364 } 14365 } 14366 14367 auto ComplainAboutNonnullParamOrCall = [&](const Attr *NonnullAttr) { 14368 bool IsParam = isa<NonNullAttr>(NonnullAttr); 14369 std::string Str; 14370 llvm::raw_string_ostream S(Str); 14371 E->printPretty(S, nullptr, getPrintingPolicy()); 14372 unsigned DiagID = IsCompare ? diag::warn_nonnull_expr_compare 14373 : diag::warn_cast_nonnull_to_bool; 14374 Diag(E->getExprLoc(), DiagID) << IsParam << S.str() 14375 << E->getSourceRange() << Range << IsEqual; 14376 Diag(NonnullAttr->getLocation(), diag::note_declared_nonnull) << IsParam; 14377 }; 14378 14379 // If we have a CallExpr that is tagged with returns_nonnull, we can complain. 14380 if (auto *Call = dyn_cast<CallExpr>(E->IgnoreParenImpCasts())) { 14381 if (auto *Callee = Call->getDirectCallee()) { 14382 if (const Attr *A = Callee->getAttr<ReturnsNonNullAttr>()) { 14383 ComplainAboutNonnullParamOrCall(A); 14384 return; 14385 } 14386 } 14387 } 14388 14389 // Expect to find a single Decl. Skip anything more complicated. 14390 ValueDecl *D = nullptr; 14391 if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) { 14392 D = R->getDecl(); 14393 } else if (MemberExpr *M = dyn_cast<MemberExpr>(E)) { 14394 D = M->getMemberDecl(); 14395 } 14396 14397 // Weak Decls can be null. 14398 if (!D || D->isWeak()) 14399 return; 14400 14401 // Check for parameter decl with nonnull attribute 14402 if (const auto* PV = dyn_cast<ParmVarDecl>(D)) { 14403 if (getCurFunction() && 14404 !getCurFunction()->ModifiedNonNullParams.count(PV)) { 14405 if (const Attr *A = PV->getAttr<NonNullAttr>()) { 14406 ComplainAboutNonnullParamOrCall(A); 14407 return; 14408 } 14409 14410 if (const auto *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) { 14411 // Skip function template not specialized yet. 14412 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 14413 return; 14414 auto ParamIter = llvm::find(FD->parameters(), PV); 14415 assert(ParamIter != FD->param_end()); 14416 unsigned ParamNo = std::distance(FD->param_begin(), ParamIter); 14417 14418 for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) { 14419 if (!NonNull->args_size()) { 14420 ComplainAboutNonnullParamOrCall(NonNull); 14421 return; 14422 } 14423 14424 for (const ParamIdx &ArgNo : NonNull->args()) { 14425 if (ArgNo.getASTIndex() == ParamNo) { 14426 ComplainAboutNonnullParamOrCall(NonNull); 14427 return; 14428 } 14429 } 14430 } 14431 } 14432 } 14433 } 14434 14435 QualType T = D->getType(); 14436 const bool IsArray = T->isArrayType(); 14437 const bool IsFunction = T->isFunctionType(); 14438 14439 // Address of function is used to silence the function warning. 14440 if (IsAddressOf && IsFunction) { 14441 return; 14442 } 14443 14444 // Found nothing. 14445 if (!IsAddressOf && !IsFunction && !IsArray) 14446 return; 14447 14448 // Pretty print the expression for the diagnostic. 14449 std::string Str; 14450 llvm::raw_string_ostream S(Str); 14451 E->printPretty(S, nullptr, getPrintingPolicy()); 14452 14453 unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare 14454 : diag::warn_impcast_pointer_to_bool; 14455 enum { 14456 AddressOf, 14457 FunctionPointer, 14458 ArrayPointer 14459 } DiagType; 14460 if (IsAddressOf) 14461 DiagType = AddressOf; 14462 else if (IsFunction) 14463 DiagType = FunctionPointer; 14464 else if (IsArray) 14465 DiagType = ArrayPointer; 14466 else 14467 llvm_unreachable("Could not determine diagnostic."); 14468 Diag(E->getExprLoc(), DiagID) << DiagType << S.str() << E->getSourceRange() 14469 << Range << IsEqual; 14470 14471 if (!IsFunction) 14472 return; 14473 14474 // Suggest '&' to silence the function warning. 14475 Diag(E->getExprLoc(), diag::note_function_warning_silence) 14476 << FixItHint::CreateInsertion(E->getBeginLoc(), "&"); 14477 14478 // Check to see if '()' fixit should be emitted. 14479 QualType ReturnType; 14480 UnresolvedSet<4> NonTemplateOverloads; 14481 tryExprAsCall(*E, ReturnType, NonTemplateOverloads); 14482 if (ReturnType.isNull()) 14483 return; 14484 14485 if (IsCompare) { 14486 // There are two cases here. If there is null constant, the only suggest 14487 // for a pointer return type. If the null is 0, then suggest if the return 14488 // type is a pointer or an integer type. 14489 if (!ReturnType->isPointerType()) { 14490 if (NullKind == Expr::NPCK_ZeroExpression || 14491 NullKind == Expr::NPCK_ZeroLiteral) { 14492 if (!ReturnType->isIntegerType()) 14493 return; 14494 } else { 14495 return; 14496 } 14497 } 14498 } else { // !IsCompare 14499 // For function to bool, only suggest if the function pointer has bool 14500 // return type. 14501 if (!ReturnType->isSpecificBuiltinType(BuiltinType::Bool)) 14502 return; 14503 } 14504 Diag(E->getExprLoc(), diag::note_function_to_function_call) 14505 << FixItHint::CreateInsertion(getLocForEndOfToken(E->getEndLoc()), "()"); 14506 } 14507 14508 /// Diagnoses "dangerous" implicit conversions within the given 14509 /// expression (which is a full expression). Implements -Wconversion 14510 /// and -Wsign-compare. 14511 /// 14512 /// \param CC the "context" location of the implicit conversion, i.e. 14513 /// the most location of the syntactic entity requiring the implicit 14514 /// conversion 14515 void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) { 14516 // Don't diagnose in unevaluated contexts. 14517 if (isUnevaluatedContext()) 14518 return; 14519 14520 // Don't diagnose for value- or type-dependent expressions. 14521 if (E->isTypeDependent() || E->isValueDependent()) 14522 return; 14523 14524 // Check for array bounds violations in cases where the check isn't triggered 14525 // elsewhere for other Expr types (like BinaryOperators), e.g. when an 14526 // ArraySubscriptExpr is on the RHS of a variable initialization. 14527 CheckArrayAccess(E); 14528 14529 // This is not the right CC for (e.g.) a variable initialization. 14530 AnalyzeImplicitConversions(*this, E, CC); 14531 } 14532 14533 /// CheckBoolLikeConversion - Check conversion of given expression to boolean. 14534 /// Input argument E is a logical expression. 14535 void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) { 14536 ::CheckBoolLikeConversion(*this, E, CC); 14537 } 14538 14539 /// Diagnose when expression is an integer constant expression and its evaluation 14540 /// results in integer overflow 14541 void Sema::CheckForIntOverflow (Expr *E) { 14542 // Use a work list to deal with nested struct initializers. 14543 SmallVector<Expr *, 2> Exprs(1, E); 14544 14545 do { 14546 Expr *OriginalE = Exprs.pop_back_val(); 14547 Expr *E = OriginalE->IgnoreParenCasts(); 14548 14549 if (isa<BinaryOperator>(E)) { 14550 E->EvaluateForOverflow(Context); 14551 continue; 14552 } 14553 14554 if (auto InitList = dyn_cast<InitListExpr>(OriginalE)) 14555 Exprs.append(InitList->inits().begin(), InitList->inits().end()); 14556 else if (isa<ObjCBoxedExpr>(OriginalE)) 14557 E->EvaluateForOverflow(Context); 14558 else if (auto Call = dyn_cast<CallExpr>(E)) 14559 Exprs.append(Call->arg_begin(), Call->arg_end()); 14560 else if (auto Message = dyn_cast<ObjCMessageExpr>(E)) 14561 Exprs.append(Message->arg_begin(), Message->arg_end()); 14562 } while (!Exprs.empty()); 14563 } 14564 14565 namespace { 14566 14567 /// Visitor for expressions which looks for unsequenced operations on the 14568 /// same object. 14569 class SequenceChecker : public ConstEvaluatedExprVisitor<SequenceChecker> { 14570 using Base = ConstEvaluatedExprVisitor<SequenceChecker>; 14571 14572 /// A tree of sequenced regions within an expression. Two regions are 14573 /// unsequenced if one is an ancestor or a descendent of the other. When we 14574 /// finish processing an expression with sequencing, such as a comma 14575 /// expression, we fold its tree nodes into its parent, since they are 14576 /// unsequenced with respect to nodes we will visit later. 14577 class SequenceTree { 14578 struct Value { 14579 explicit Value(unsigned Parent) : Parent(Parent), Merged(false) {} 14580 unsigned Parent : 31; 14581 unsigned Merged : 1; 14582 }; 14583 SmallVector<Value, 8> Values; 14584 14585 public: 14586 /// A region within an expression which may be sequenced with respect 14587 /// to some other region. 14588 class Seq { 14589 friend class SequenceTree; 14590 14591 unsigned Index; 14592 14593 explicit Seq(unsigned N) : Index(N) {} 14594 14595 public: 14596 Seq() : Index(0) {} 14597 }; 14598 14599 SequenceTree() { Values.push_back(Value(0)); } 14600 Seq root() const { return Seq(0); } 14601 14602 /// Create a new sequence of operations, which is an unsequenced 14603 /// subset of \p Parent. This sequence of operations is sequenced with 14604 /// respect to other children of \p Parent. 14605 Seq allocate(Seq Parent) { 14606 Values.push_back(Value(Parent.Index)); 14607 return Seq(Values.size() - 1); 14608 } 14609 14610 /// Merge a sequence of operations into its parent. 14611 void merge(Seq S) { 14612 Values[S.Index].Merged = true; 14613 } 14614 14615 /// Determine whether two operations are unsequenced. This operation 14616 /// is asymmetric: \p Cur should be the more recent sequence, and \p Old 14617 /// should have been merged into its parent as appropriate. 14618 bool isUnsequenced(Seq Cur, Seq Old) { 14619 unsigned C = representative(Cur.Index); 14620 unsigned Target = representative(Old.Index); 14621 while (C >= Target) { 14622 if (C == Target) 14623 return true; 14624 C = Values[C].Parent; 14625 } 14626 return false; 14627 } 14628 14629 private: 14630 /// Pick a representative for a sequence. 14631 unsigned representative(unsigned K) { 14632 if (Values[K].Merged) 14633 // Perform path compression as we go. 14634 return Values[K].Parent = representative(Values[K].Parent); 14635 return K; 14636 } 14637 }; 14638 14639 /// An object for which we can track unsequenced uses. 14640 using Object = const NamedDecl *; 14641 14642 /// Different flavors of object usage which we track. We only track the 14643 /// least-sequenced usage of each kind. 14644 enum UsageKind { 14645 /// A read of an object. Multiple unsequenced reads are OK. 14646 UK_Use, 14647 14648 /// A modification of an object which is sequenced before the value 14649 /// computation of the expression, such as ++n in C++. 14650 UK_ModAsValue, 14651 14652 /// A modification of an object which is not sequenced before the value 14653 /// computation of the expression, such as n++. 14654 UK_ModAsSideEffect, 14655 14656 UK_Count = UK_ModAsSideEffect + 1 14657 }; 14658 14659 /// Bundle together a sequencing region and the expression corresponding 14660 /// to a specific usage. One Usage is stored for each usage kind in UsageInfo. 14661 struct Usage { 14662 const Expr *UsageExpr; 14663 SequenceTree::Seq Seq; 14664 14665 Usage() : UsageExpr(nullptr) {} 14666 }; 14667 14668 struct UsageInfo { 14669 Usage Uses[UK_Count]; 14670 14671 /// Have we issued a diagnostic for this object already? 14672 bool Diagnosed; 14673 14674 UsageInfo() : Diagnosed(false) {} 14675 }; 14676 using UsageInfoMap = llvm::SmallDenseMap<Object, UsageInfo, 16>; 14677 14678 Sema &SemaRef; 14679 14680 /// Sequenced regions within the expression. 14681 SequenceTree Tree; 14682 14683 /// Declaration modifications and references which we have seen. 14684 UsageInfoMap UsageMap; 14685 14686 /// The region we are currently within. 14687 SequenceTree::Seq Region; 14688 14689 /// Filled in with declarations which were modified as a side-effect 14690 /// (that is, post-increment operations). 14691 SmallVectorImpl<std::pair<Object, Usage>> *ModAsSideEffect = nullptr; 14692 14693 /// Expressions to check later. We defer checking these to reduce 14694 /// stack usage. 14695 SmallVectorImpl<const Expr *> &WorkList; 14696 14697 /// RAII object wrapping the visitation of a sequenced subexpression of an 14698 /// expression. At the end of this process, the side-effects of the evaluation 14699 /// become sequenced with respect to the value computation of the result, so 14700 /// we downgrade any UK_ModAsSideEffect within the evaluation to 14701 /// UK_ModAsValue. 14702 struct SequencedSubexpression { 14703 SequencedSubexpression(SequenceChecker &Self) 14704 : Self(Self), OldModAsSideEffect(Self.ModAsSideEffect) { 14705 Self.ModAsSideEffect = &ModAsSideEffect; 14706 } 14707 14708 ~SequencedSubexpression() { 14709 for (const std::pair<Object, Usage> &M : llvm::reverse(ModAsSideEffect)) { 14710 // Add a new usage with usage kind UK_ModAsValue, and then restore 14711 // the previous usage with UK_ModAsSideEffect (thus clearing it if 14712 // the previous one was empty). 14713 UsageInfo &UI = Self.UsageMap[M.first]; 14714 auto &SideEffectUsage = UI.Uses[UK_ModAsSideEffect]; 14715 Self.addUsage(M.first, UI, SideEffectUsage.UsageExpr, UK_ModAsValue); 14716 SideEffectUsage = M.second; 14717 } 14718 Self.ModAsSideEffect = OldModAsSideEffect; 14719 } 14720 14721 SequenceChecker &Self; 14722 SmallVector<std::pair<Object, Usage>, 4> ModAsSideEffect; 14723 SmallVectorImpl<std::pair<Object, Usage>> *OldModAsSideEffect; 14724 }; 14725 14726 /// RAII object wrapping the visitation of a subexpression which we might 14727 /// choose to evaluate as a constant. If any subexpression is evaluated and 14728 /// found to be non-constant, this allows us to suppress the evaluation of 14729 /// the outer expression. 14730 class EvaluationTracker { 14731 public: 14732 EvaluationTracker(SequenceChecker &Self) 14733 : Self(Self), Prev(Self.EvalTracker) { 14734 Self.EvalTracker = this; 14735 } 14736 14737 ~EvaluationTracker() { 14738 Self.EvalTracker = Prev; 14739 if (Prev) 14740 Prev->EvalOK &= EvalOK; 14741 } 14742 14743 bool evaluate(const Expr *E, bool &Result) { 14744 if (!EvalOK || E->isValueDependent()) 14745 return false; 14746 EvalOK = E->EvaluateAsBooleanCondition( 14747 Result, Self.SemaRef.Context, Self.SemaRef.isConstantEvaluated()); 14748 return EvalOK; 14749 } 14750 14751 private: 14752 SequenceChecker &Self; 14753 EvaluationTracker *Prev; 14754 bool EvalOK = true; 14755 } *EvalTracker = nullptr; 14756 14757 /// Find the object which is produced by the specified expression, 14758 /// if any. 14759 Object getObject(const Expr *E, bool Mod) const { 14760 E = E->IgnoreParenCasts(); 14761 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 14762 if (Mod && (UO->getOpcode() == UO_PreInc || UO->getOpcode() == UO_PreDec)) 14763 return getObject(UO->getSubExpr(), Mod); 14764 } else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 14765 if (BO->getOpcode() == BO_Comma) 14766 return getObject(BO->getRHS(), Mod); 14767 if (Mod && BO->isAssignmentOp()) 14768 return getObject(BO->getLHS(), Mod); 14769 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) { 14770 // FIXME: Check for more interesting cases, like "x.n = ++x.n". 14771 if (isa<CXXThisExpr>(ME->getBase()->IgnoreParenCasts())) 14772 return ME->getMemberDecl(); 14773 } else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 14774 // FIXME: If this is a reference, map through to its value. 14775 return DRE->getDecl(); 14776 return nullptr; 14777 } 14778 14779 /// Note that an object \p O was modified or used by an expression 14780 /// \p UsageExpr with usage kind \p UK. \p UI is the \p UsageInfo for 14781 /// the object \p O as obtained via the \p UsageMap. 14782 void addUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, UsageKind UK) { 14783 // Get the old usage for the given object and usage kind. 14784 Usage &U = UI.Uses[UK]; 14785 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) { 14786 // If we have a modification as side effect and are in a sequenced 14787 // subexpression, save the old Usage so that we can restore it later 14788 // in SequencedSubexpression::~SequencedSubexpression. 14789 if (UK == UK_ModAsSideEffect && ModAsSideEffect) 14790 ModAsSideEffect->push_back(std::make_pair(O, U)); 14791 // Then record the new usage with the current sequencing region. 14792 U.UsageExpr = UsageExpr; 14793 U.Seq = Region; 14794 } 14795 } 14796 14797 /// Check whether a modification or use of an object \p O in an expression 14798 /// \p UsageExpr conflicts with a prior usage of kind \p OtherKind. \p UI is 14799 /// the \p UsageInfo for the object \p O as obtained via the \p UsageMap. 14800 /// \p IsModMod is true when we are checking for a mod-mod unsequenced 14801 /// usage and false we are checking for a mod-use unsequenced usage. 14802 void checkUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, 14803 UsageKind OtherKind, bool IsModMod) { 14804 if (UI.Diagnosed) 14805 return; 14806 14807 const Usage &U = UI.Uses[OtherKind]; 14808 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) 14809 return; 14810 14811 const Expr *Mod = U.UsageExpr; 14812 const Expr *ModOrUse = UsageExpr; 14813 if (OtherKind == UK_Use) 14814 std::swap(Mod, ModOrUse); 14815 14816 SemaRef.DiagRuntimeBehavior( 14817 Mod->getExprLoc(), {Mod, ModOrUse}, 14818 SemaRef.PDiag(IsModMod ? diag::warn_unsequenced_mod_mod 14819 : diag::warn_unsequenced_mod_use) 14820 << O << SourceRange(ModOrUse->getExprLoc())); 14821 UI.Diagnosed = true; 14822 } 14823 14824 // A note on note{Pre, Post}{Use, Mod}: 14825 // 14826 // (It helps to follow the algorithm with an expression such as 14827 // "((++k)++, k) = k" or "k = (k++, k++)". Both contain unsequenced 14828 // operations before C++17 and both are well-defined in C++17). 14829 // 14830 // When visiting a node which uses/modify an object we first call notePreUse 14831 // or notePreMod before visiting its sub-expression(s). At this point the 14832 // children of the current node have not yet been visited and so the eventual 14833 // uses/modifications resulting from the children of the current node have not 14834 // been recorded yet. 14835 // 14836 // We then visit the children of the current node. After that notePostUse or 14837 // notePostMod is called. These will 1) detect an unsequenced modification 14838 // as side effect (as in "k++ + k") and 2) add a new usage with the 14839 // appropriate usage kind. 14840 // 14841 // We also have to be careful that some operation sequences modification as 14842 // side effect as well (for example: || or ,). To account for this we wrap 14843 // the visitation of such a sub-expression (for example: the LHS of || or ,) 14844 // with SequencedSubexpression. SequencedSubexpression is an RAII object 14845 // which record usages which are modifications as side effect, and then 14846 // downgrade them (or more accurately restore the previous usage which was a 14847 // modification as side effect) when exiting the scope of the sequenced 14848 // subexpression. 14849 14850 void notePreUse(Object O, const Expr *UseExpr) { 14851 UsageInfo &UI = UsageMap[O]; 14852 // Uses conflict with other modifications. 14853 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/false); 14854 } 14855 14856 void notePostUse(Object O, const Expr *UseExpr) { 14857 UsageInfo &UI = UsageMap[O]; 14858 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsSideEffect, 14859 /*IsModMod=*/false); 14860 addUsage(O, UI, UseExpr, /*UsageKind=*/UK_Use); 14861 } 14862 14863 void notePreMod(Object O, const Expr *ModExpr) { 14864 UsageInfo &UI = UsageMap[O]; 14865 // Modifications conflict with other modifications and with uses. 14866 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/true); 14867 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_Use, /*IsModMod=*/false); 14868 } 14869 14870 void notePostMod(Object O, const Expr *ModExpr, UsageKind UK) { 14871 UsageInfo &UI = UsageMap[O]; 14872 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsSideEffect, 14873 /*IsModMod=*/true); 14874 addUsage(O, UI, ModExpr, /*UsageKind=*/UK); 14875 } 14876 14877 public: 14878 SequenceChecker(Sema &S, const Expr *E, 14879 SmallVectorImpl<const Expr *> &WorkList) 14880 : Base(S.Context), SemaRef(S), Region(Tree.root()), WorkList(WorkList) { 14881 Visit(E); 14882 // Silence a -Wunused-private-field since WorkList is now unused. 14883 // TODO: Evaluate if it can be used, and if not remove it. 14884 (void)this->WorkList; 14885 } 14886 14887 void VisitStmt(const Stmt *S) { 14888 // Skip all statements which aren't expressions for now. 14889 } 14890 14891 void VisitExpr(const Expr *E) { 14892 // By default, just recurse to evaluated subexpressions. 14893 Base::VisitStmt(E); 14894 } 14895 14896 void VisitCastExpr(const CastExpr *E) { 14897 Object O = Object(); 14898 if (E->getCastKind() == CK_LValueToRValue) 14899 O = getObject(E->getSubExpr(), false); 14900 14901 if (O) 14902 notePreUse(O, E); 14903 VisitExpr(E); 14904 if (O) 14905 notePostUse(O, E); 14906 } 14907 14908 void VisitSequencedExpressions(const Expr *SequencedBefore, 14909 const Expr *SequencedAfter) { 14910 SequenceTree::Seq BeforeRegion = Tree.allocate(Region); 14911 SequenceTree::Seq AfterRegion = Tree.allocate(Region); 14912 SequenceTree::Seq OldRegion = Region; 14913 14914 { 14915 SequencedSubexpression SeqBefore(*this); 14916 Region = BeforeRegion; 14917 Visit(SequencedBefore); 14918 } 14919 14920 Region = AfterRegion; 14921 Visit(SequencedAfter); 14922 14923 Region = OldRegion; 14924 14925 Tree.merge(BeforeRegion); 14926 Tree.merge(AfterRegion); 14927 } 14928 14929 void VisitArraySubscriptExpr(const ArraySubscriptExpr *ASE) { 14930 // C++17 [expr.sub]p1: 14931 // The expression E1[E2] is identical (by definition) to *((E1)+(E2)). The 14932 // expression E1 is sequenced before the expression E2. 14933 if (SemaRef.getLangOpts().CPlusPlus17) 14934 VisitSequencedExpressions(ASE->getLHS(), ASE->getRHS()); 14935 else { 14936 Visit(ASE->getLHS()); 14937 Visit(ASE->getRHS()); 14938 } 14939 } 14940 14941 void VisitBinPtrMemD(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 14942 void VisitBinPtrMemI(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 14943 void VisitBinPtrMem(const BinaryOperator *BO) { 14944 // C++17 [expr.mptr.oper]p4: 14945 // Abbreviating pm-expression.*cast-expression as E1.*E2, [...] 14946 // the expression E1 is sequenced before the expression E2. 14947 if (SemaRef.getLangOpts().CPlusPlus17) 14948 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 14949 else { 14950 Visit(BO->getLHS()); 14951 Visit(BO->getRHS()); 14952 } 14953 } 14954 14955 void VisitBinShl(const BinaryOperator *BO) { VisitBinShlShr(BO); } 14956 void VisitBinShr(const BinaryOperator *BO) { VisitBinShlShr(BO); } 14957 void VisitBinShlShr(const BinaryOperator *BO) { 14958 // C++17 [expr.shift]p4: 14959 // The expression E1 is sequenced before the expression E2. 14960 if (SemaRef.getLangOpts().CPlusPlus17) 14961 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 14962 else { 14963 Visit(BO->getLHS()); 14964 Visit(BO->getRHS()); 14965 } 14966 } 14967 14968 void VisitBinComma(const BinaryOperator *BO) { 14969 // C++11 [expr.comma]p1: 14970 // Every value computation and side effect associated with the left 14971 // expression is sequenced before every value computation and side 14972 // effect associated with the right expression. 14973 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 14974 } 14975 14976 void VisitBinAssign(const BinaryOperator *BO) { 14977 SequenceTree::Seq RHSRegion; 14978 SequenceTree::Seq LHSRegion; 14979 if (SemaRef.getLangOpts().CPlusPlus17) { 14980 RHSRegion = Tree.allocate(Region); 14981 LHSRegion = Tree.allocate(Region); 14982 } else { 14983 RHSRegion = Region; 14984 LHSRegion = Region; 14985 } 14986 SequenceTree::Seq OldRegion = Region; 14987 14988 // C++11 [expr.ass]p1: 14989 // [...] the assignment is sequenced after the value computation 14990 // of the right and left operands, [...] 14991 // 14992 // so check it before inspecting the operands and update the 14993 // map afterwards. 14994 Object O = getObject(BO->getLHS(), /*Mod=*/true); 14995 if (O) 14996 notePreMod(O, BO); 14997 14998 if (SemaRef.getLangOpts().CPlusPlus17) { 14999 // C++17 [expr.ass]p1: 15000 // [...] The right operand is sequenced before the left operand. [...] 15001 { 15002 SequencedSubexpression SeqBefore(*this); 15003 Region = RHSRegion; 15004 Visit(BO->getRHS()); 15005 } 15006 15007 Region = LHSRegion; 15008 Visit(BO->getLHS()); 15009 15010 if (O && isa<CompoundAssignOperator>(BO)) 15011 notePostUse(O, BO); 15012 15013 } else { 15014 // C++11 does not specify any sequencing between the LHS and RHS. 15015 Region = LHSRegion; 15016 Visit(BO->getLHS()); 15017 15018 if (O && isa<CompoundAssignOperator>(BO)) 15019 notePostUse(O, BO); 15020 15021 Region = RHSRegion; 15022 Visit(BO->getRHS()); 15023 } 15024 15025 // C++11 [expr.ass]p1: 15026 // the assignment is sequenced [...] before the value computation of the 15027 // assignment expression. 15028 // C11 6.5.16/3 has no such rule. 15029 Region = OldRegion; 15030 if (O) 15031 notePostMod(O, BO, 15032 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 15033 : UK_ModAsSideEffect); 15034 if (SemaRef.getLangOpts().CPlusPlus17) { 15035 Tree.merge(RHSRegion); 15036 Tree.merge(LHSRegion); 15037 } 15038 } 15039 15040 void VisitCompoundAssignOperator(const CompoundAssignOperator *CAO) { 15041 VisitBinAssign(CAO); 15042 } 15043 15044 void VisitUnaryPreInc(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 15045 void VisitUnaryPreDec(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 15046 void VisitUnaryPreIncDec(const UnaryOperator *UO) { 15047 Object O = getObject(UO->getSubExpr(), true); 15048 if (!O) 15049 return VisitExpr(UO); 15050 15051 notePreMod(O, UO); 15052 Visit(UO->getSubExpr()); 15053 // C++11 [expr.pre.incr]p1: 15054 // the expression ++x is equivalent to x+=1 15055 notePostMod(O, UO, 15056 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 15057 : UK_ModAsSideEffect); 15058 } 15059 15060 void VisitUnaryPostInc(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 15061 void VisitUnaryPostDec(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 15062 void VisitUnaryPostIncDec(const UnaryOperator *UO) { 15063 Object O = getObject(UO->getSubExpr(), true); 15064 if (!O) 15065 return VisitExpr(UO); 15066 15067 notePreMod(O, UO); 15068 Visit(UO->getSubExpr()); 15069 notePostMod(O, UO, UK_ModAsSideEffect); 15070 } 15071 15072 void VisitBinLOr(const BinaryOperator *BO) { 15073 // C++11 [expr.log.or]p2: 15074 // If the second expression is evaluated, every value computation and 15075 // side effect associated with the first expression is sequenced before 15076 // every value computation and side effect associated with the 15077 // second expression. 15078 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 15079 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 15080 SequenceTree::Seq OldRegion = Region; 15081 15082 EvaluationTracker Eval(*this); 15083 { 15084 SequencedSubexpression Sequenced(*this); 15085 Region = LHSRegion; 15086 Visit(BO->getLHS()); 15087 } 15088 15089 // C++11 [expr.log.or]p1: 15090 // [...] the second operand is not evaluated if the first operand 15091 // evaluates to true. 15092 bool EvalResult = false; 15093 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 15094 bool ShouldVisitRHS = !EvalOK || (EvalOK && !EvalResult); 15095 if (ShouldVisitRHS) { 15096 Region = RHSRegion; 15097 Visit(BO->getRHS()); 15098 } 15099 15100 Region = OldRegion; 15101 Tree.merge(LHSRegion); 15102 Tree.merge(RHSRegion); 15103 } 15104 15105 void VisitBinLAnd(const BinaryOperator *BO) { 15106 // C++11 [expr.log.and]p2: 15107 // If the second expression is evaluated, every value computation and 15108 // side effect associated with the first expression is sequenced before 15109 // every value computation and side effect associated with the 15110 // second expression. 15111 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 15112 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 15113 SequenceTree::Seq OldRegion = Region; 15114 15115 EvaluationTracker Eval(*this); 15116 { 15117 SequencedSubexpression Sequenced(*this); 15118 Region = LHSRegion; 15119 Visit(BO->getLHS()); 15120 } 15121 15122 // C++11 [expr.log.and]p1: 15123 // [...] the second operand is not evaluated if the first operand is false. 15124 bool EvalResult = false; 15125 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 15126 bool ShouldVisitRHS = !EvalOK || (EvalOK && EvalResult); 15127 if (ShouldVisitRHS) { 15128 Region = RHSRegion; 15129 Visit(BO->getRHS()); 15130 } 15131 15132 Region = OldRegion; 15133 Tree.merge(LHSRegion); 15134 Tree.merge(RHSRegion); 15135 } 15136 15137 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO) { 15138 // C++11 [expr.cond]p1: 15139 // [...] Every value computation and side effect associated with the first 15140 // expression is sequenced before every value computation and side effect 15141 // associated with the second or third expression. 15142 SequenceTree::Seq ConditionRegion = Tree.allocate(Region); 15143 15144 // No sequencing is specified between the true and false expression. 15145 // However since exactly one of both is going to be evaluated we can 15146 // consider them to be sequenced. This is needed to avoid warning on 15147 // something like "x ? y+= 1 : y += 2;" in the case where we will visit 15148 // both the true and false expressions because we can't evaluate x. 15149 // This will still allow us to detect an expression like (pre C++17) 15150 // "(x ? y += 1 : y += 2) = y". 15151 // 15152 // We don't wrap the visitation of the true and false expression with 15153 // SequencedSubexpression because we don't want to downgrade modifications 15154 // as side effect in the true and false expressions after the visition 15155 // is done. (for example in the expression "(x ? y++ : y++) + y" we should 15156 // not warn between the two "y++", but we should warn between the "y++" 15157 // and the "y". 15158 SequenceTree::Seq TrueRegion = Tree.allocate(Region); 15159 SequenceTree::Seq FalseRegion = Tree.allocate(Region); 15160 SequenceTree::Seq OldRegion = Region; 15161 15162 EvaluationTracker Eval(*this); 15163 { 15164 SequencedSubexpression Sequenced(*this); 15165 Region = ConditionRegion; 15166 Visit(CO->getCond()); 15167 } 15168 15169 // C++11 [expr.cond]p1: 15170 // [...] The first expression is contextually converted to bool (Clause 4). 15171 // It is evaluated and if it is true, the result of the conditional 15172 // expression is the value of the second expression, otherwise that of the 15173 // third expression. Only one of the second and third expressions is 15174 // evaluated. [...] 15175 bool EvalResult = false; 15176 bool EvalOK = Eval.evaluate(CO->getCond(), EvalResult); 15177 bool ShouldVisitTrueExpr = !EvalOK || (EvalOK && EvalResult); 15178 bool ShouldVisitFalseExpr = !EvalOK || (EvalOK && !EvalResult); 15179 if (ShouldVisitTrueExpr) { 15180 Region = TrueRegion; 15181 Visit(CO->getTrueExpr()); 15182 } 15183 if (ShouldVisitFalseExpr) { 15184 Region = FalseRegion; 15185 Visit(CO->getFalseExpr()); 15186 } 15187 15188 Region = OldRegion; 15189 Tree.merge(ConditionRegion); 15190 Tree.merge(TrueRegion); 15191 Tree.merge(FalseRegion); 15192 } 15193 15194 void VisitCallExpr(const CallExpr *CE) { 15195 // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions. 15196 15197 if (CE->isUnevaluatedBuiltinCall(Context)) 15198 return; 15199 15200 // C++11 [intro.execution]p15: 15201 // When calling a function [...], every value computation and side effect 15202 // associated with any argument expression, or with the postfix expression 15203 // designating the called function, is sequenced before execution of every 15204 // expression or statement in the body of the function [and thus before 15205 // the value computation of its result]. 15206 SequencedSubexpression Sequenced(*this); 15207 SemaRef.runWithSufficientStackSpace(CE->getExprLoc(), [&] { 15208 // C++17 [expr.call]p5 15209 // The postfix-expression is sequenced before each expression in the 15210 // expression-list and any default argument. [...] 15211 SequenceTree::Seq CalleeRegion; 15212 SequenceTree::Seq OtherRegion; 15213 if (SemaRef.getLangOpts().CPlusPlus17) { 15214 CalleeRegion = Tree.allocate(Region); 15215 OtherRegion = Tree.allocate(Region); 15216 } else { 15217 CalleeRegion = Region; 15218 OtherRegion = Region; 15219 } 15220 SequenceTree::Seq OldRegion = Region; 15221 15222 // Visit the callee expression first. 15223 Region = CalleeRegion; 15224 if (SemaRef.getLangOpts().CPlusPlus17) { 15225 SequencedSubexpression Sequenced(*this); 15226 Visit(CE->getCallee()); 15227 } else { 15228 Visit(CE->getCallee()); 15229 } 15230 15231 // Then visit the argument expressions. 15232 Region = OtherRegion; 15233 for (const Expr *Argument : CE->arguments()) 15234 Visit(Argument); 15235 15236 Region = OldRegion; 15237 if (SemaRef.getLangOpts().CPlusPlus17) { 15238 Tree.merge(CalleeRegion); 15239 Tree.merge(OtherRegion); 15240 } 15241 }); 15242 } 15243 15244 void VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *CXXOCE) { 15245 // C++17 [over.match.oper]p2: 15246 // [...] the operator notation is first transformed to the equivalent 15247 // function-call notation as summarized in Table 12 (where @ denotes one 15248 // of the operators covered in the specified subclause). However, the 15249 // operands are sequenced in the order prescribed for the built-in 15250 // operator (Clause 8). 15251 // 15252 // From the above only overloaded binary operators and overloaded call 15253 // operators have sequencing rules in C++17 that we need to handle 15254 // separately. 15255 if (!SemaRef.getLangOpts().CPlusPlus17 || 15256 (CXXOCE->getNumArgs() != 2 && CXXOCE->getOperator() != OO_Call)) 15257 return VisitCallExpr(CXXOCE); 15258 15259 enum { 15260 NoSequencing, 15261 LHSBeforeRHS, 15262 RHSBeforeLHS, 15263 LHSBeforeRest 15264 } SequencingKind; 15265 switch (CXXOCE->getOperator()) { 15266 case OO_Equal: 15267 case OO_PlusEqual: 15268 case OO_MinusEqual: 15269 case OO_StarEqual: 15270 case OO_SlashEqual: 15271 case OO_PercentEqual: 15272 case OO_CaretEqual: 15273 case OO_AmpEqual: 15274 case OO_PipeEqual: 15275 case OO_LessLessEqual: 15276 case OO_GreaterGreaterEqual: 15277 SequencingKind = RHSBeforeLHS; 15278 break; 15279 15280 case OO_LessLess: 15281 case OO_GreaterGreater: 15282 case OO_AmpAmp: 15283 case OO_PipePipe: 15284 case OO_Comma: 15285 case OO_ArrowStar: 15286 case OO_Subscript: 15287 SequencingKind = LHSBeforeRHS; 15288 break; 15289 15290 case OO_Call: 15291 SequencingKind = LHSBeforeRest; 15292 break; 15293 15294 default: 15295 SequencingKind = NoSequencing; 15296 break; 15297 } 15298 15299 if (SequencingKind == NoSequencing) 15300 return VisitCallExpr(CXXOCE); 15301 15302 // This is a call, so all subexpressions are sequenced before the result. 15303 SequencedSubexpression Sequenced(*this); 15304 15305 SemaRef.runWithSufficientStackSpace(CXXOCE->getExprLoc(), [&] { 15306 assert(SemaRef.getLangOpts().CPlusPlus17 && 15307 "Should only get there with C++17 and above!"); 15308 assert((CXXOCE->getNumArgs() == 2 || CXXOCE->getOperator() == OO_Call) && 15309 "Should only get there with an overloaded binary operator" 15310 " or an overloaded call operator!"); 15311 15312 if (SequencingKind == LHSBeforeRest) { 15313 assert(CXXOCE->getOperator() == OO_Call && 15314 "We should only have an overloaded call operator here!"); 15315 15316 // This is very similar to VisitCallExpr, except that we only have the 15317 // C++17 case. The postfix-expression is the first argument of the 15318 // CXXOperatorCallExpr. The expressions in the expression-list, if any, 15319 // are in the following arguments. 15320 // 15321 // Note that we intentionally do not visit the callee expression since 15322 // it is just a decayed reference to a function. 15323 SequenceTree::Seq PostfixExprRegion = Tree.allocate(Region); 15324 SequenceTree::Seq ArgsRegion = Tree.allocate(Region); 15325 SequenceTree::Seq OldRegion = Region; 15326 15327 assert(CXXOCE->getNumArgs() >= 1 && 15328 "An overloaded call operator must have at least one argument" 15329 " for the postfix-expression!"); 15330 const Expr *PostfixExpr = CXXOCE->getArgs()[0]; 15331 llvm::ArrayRef<const Expr *> Args(CXXOCE->getArgs() + 1, 15332 CXXOCE->getNumArgs() - 1); 15333 15334 // Visit the postfix-expression first. 15335 { 15336 Region = PostfixExprRegion; 15337 SequencedSubexpression Sequenced(*this); 15338 Visit(PostfixExpr); 15339 } 15340 15341 // Then visit the argument expressions. 15342 Region = ArgsRegion; 15343 for (const Expr *Arg : Args) 15344 Visit(Arg); 15345 15346 Region = OldRegion; 15347 Tree.merge(PostfixExprRegion); 15348 Tree.merge(ArgsRegion); 15349 } else { 15350 assert(CXXOCE->getNumArgs() == 2 && 15351 "Should only have two arguments here!"); 15352 assert((SequencingKind == LHSBeforeRHS || 15353 SequencingKind == RHSBeforeLHS) && 15354 "Unexpected sequencing kind!"); 15355 15356 // We do not visit the callee expression since it is just a decayed 15357 // reference to a function. 15358 const Expr *E1 = CXXOCE->getArg(0); 15359 const Expr *E2 = CXXOCE->getArg(1); 15360 if (SequencingKind == RHSBeforeLHS) 15361 std::swap(E1, E2); 15362 15363 return VisitSequencedExpressions(E1, E2); 15364 } 15365 }); 15366 } 15367 15368 void VisitCXXConstructExpr(const CXXConstructExpr *CCE) { 15369 // This is a call, so all subexpressions are sequenced before the result. 15370 SequencedSubexpression Sequenced(*this); 15371 15372 if (!CCE->isListInitialization()) 15373 return VisitExpr(CCE); 15374 15375 // In C++11, list initializations are sequenced. 15376 SmallVector<SequenceTree::Seq, 32> Elts; 15377 SequenceTree::Seq Parent = Region; 15378 for (CXXConstructExpr::const_arg_iterator I = CCE->arg_begin(), 15379 E = CCE->arg_end(); 15380 I != E; ++I) { 15381 Region = Tree.allocate(Parent); 15382 Elts.push_back(Region); 15383 Visit(*I); 15384 } 15385 15386 // Forget that the initializers are sequenced. 15387 Region = Parent; 15388 for (unsigned I = 0; I < Elts.size(); ++I) 15389 Tree.merge(Elts[I]); 15390 } 15391 15392 void VisitInitListExpr(const InitListExpr *ILE) { 15393 if (!SemaRef.getLangOpts().CPlusPlus11) 15394 return VisitExpr(ILE); 15395 15396 // In C++11, list initializations are sequenced. 15397 SmallVector<SequenceTree::Seq, 32> Elts; 15398 SequenceTree::Seq Parent = Region; 15399 for (unsigned I = 0; I < ILE->getNumInits(); ++I) { 15400 const Expr *E = ILE->getInit(I); 15401 if (!E) 15402 continue; 15403 Region = Tree.allocate(Parent); 15404 Elts.push_back(Region); 15405 Visit(E); 15406 } 15407 15408 // Forget that the initializers are sequenced. 15409 Region = Parent; 15410 for (unsigned I = 0; I < Elts.size(); ++I) 15411 Tree.merge(Elts[I]); 15412 } 15413 }; 15414 15415 } // namespace 15416 15417 void Sema::CheckUnsequencedOperations(const Expr *E) { 15418 SmallVector<const Expr *, 8> WorkList; 15419 WorkList.push_back(E); 15420 while (!WorkList.empty()) { 15421 const Expr *Item = WorkList.pop_back_val(); 15422 SequenceChecker(*this, Item, WorkList); 15423 } 15424 } 15425 15426 void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc, 15427 bool IsConstexpr) { 15428 llvm::SaveAndRestore<bool> ConstantContext( 15429 isConstantEvaluatedOverride, IsConstexpr || isa<ConstantExpr>(E)); 15430 CheckImplicitConversions(E, CheckLoc); 15431 if (!E->isInstantiationDependent()) 15432 CheckUnsequencedOperations(E); 15433 if (!IsConstexpr && !E->isValueDependent()) 15434 CheckForIntOverflow(E); 15435 DiagnoseMisalignedMembers(); 15436 } 15437 15438 void Sema::CheckBitFieldInitialization(SourceLocation InitLoc, 15439 FieldDecl *BitField, 15440 Expr *Init) { 15441 (void) AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc); 15442 } 15443 15444 static void diagnoseArrayStarInParamType(Sema &S, QualType PType, 15445 SourceLocation Loc) { 15446 if (!PType->isVariablyModifiedType()) 15447 return; 15448 if (const auto *PointerTy = dyn_cast<PointerType>(PType)) { 15449 diagnoseArrayStarInParamType(S, PointerTy->getPointeeType(), Loc); 15450 return; 15451 } 15452 if (const auto *ReferenceTy = dyn_cast<ReferenceType>(PType)) { 15453 diagnoseArrayStarInParamType(S, ReferenceTy->getPointeeType(), Loc); 15454 return; 15455 } 15456 if (const auto *ParenTy = dyn_cast<ParenType>(PType)) { 15457 diagnoseArrayStarInParamType(S, ParenTy->getInnerType(), Loc); 15458 return; 15459 } 15460 15461 const ArrayType *AT = S.Context.getAsArrayType(PType); 15462 if (!AT) 15463 return; 15464 15465 if (AT->getSizeModifier() != ArrayType::Star) { 15466 diagnoseArrayStarInParamType(S, AT->getElementType(), Loc); 15467 return; 15468 } 15469 15470 S.Diag(Loc, diag::err_array_star_in_function_definition); 15471 } 15472 15473 /// CheckParmsForFunctionDef - Check that the parameters of the given 15474 /// function are appropriate for the definition of a function. This 15475 /// takes care of any checks that cannot be performed on the 15476 /// declaration itself, e.g., that the types of each of the function 15477 /// parameters are complete. 15478 bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, 15479 bool CheckParameterNames) { 15480 bool HasInvalidParm = false; 15481 for (ParmVarDecl *Param : Parameters) { 15482 // C99 6.7.5.3p4: the parameters in a parameter type list in a 15483 // function declarator that is part of a function definition of 15484 // that function shall not have incomplete type. 15485 // 15486 // This is also C++ [dcl.fct]p6. 15487 if (!Param->isInvalidDecl() && 15488 RequireCompleteType(Param->getLocation(), Param->getType(), 15489 diag::err_typecheck_decl_incomplete_type)) { 15490 Param->setInvalidDecl(); 15491 HasInvalidParm = true; 15492 } 15493 15494 // C99 6.9.1p5: If the declarator includes a parameter type list, the 15495 // declaration of each parameter shall include an identifier. 15496 if (CheckParameterNames && Param->getIdentifier() == nullptr && 15497 !Param->isImplicit() && !getLangOpts().CPlusPlus) { 15498 // Diagnose this as an extension in C17 and earlier. 15499 if (!getLangOpts().C2x) 15500 Diag(Param->getLocation(), diag::ext_parameter_name_omitted_c2x); 15501 } 15502 15503 // C99 6.7.5.3p12: 15504 // If the function declarator is not part of a definition of that 15505 // function, parameters may have incomplete type and may use the [*] 15506 // notation in their sequences of declarator specifiers to specify 15507 // variable length array types. 15508 QualType PType = Param->getOriginalType(); 15509 // FIXME: This diagnostic should point the '[*]' if source-location 15510 // information is added for it. 15511 diagnoseArrayStarInParamType(*this, PType, Param->getLocation()); 15512 15513 // If the parameter is a c++ class type and it has to be destructed in the 15514 // callee function, declare the destructor so that it can be called by the 15515 // callee function. Do not perform any direct access check on the dtor here. 15516 if (!Param->isInvalidDecl()) { 15517 if (CXXRecordDecl *ClassDecl = Param->getType()->getAsCXXRecordDecl()) { 15518 if (!ClassDecl->isInvalidDecl() && 15519 !ClassDecl->hasIrrelevantDestructor() && 15520 !ClassDecl->isDependentContext() && 15521 ClassDecl->isParamDestroyedInCallee()) { 15522 CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl); 15523 MarkFunctionReferenced(Param->getLocation(), Destructor); 15524 DiagnoseUseOfDecl(Destructor, Param->getLocation()); 15525 } 15526 } 15527 } 15528 15529 // Parameters with the pass_object_size attribute only need to be marked 15530 // constant at function definitions. Because we lack information about 15531 // whether we're on a declaration or definition when we're instantiating the 15532 // attribute, we need to check for constness here. 15533 if (const auto *Attr = Param->getAttr<PassObjectSizeAttr>()) 15534 if (!Param->getType().isConstQualified()) 15535 Diag(Param->getLocation(), diag::err_attribute_pointers_only) 15536 << Attr->getSpelling() << 1; 15537 15538 // Check for parameter names shadowing fields from the class. 15539 if (LangOpts.CPlusPlus && !Param->isInvalidDecl()) { 15540 // The owning context for the parameter should be the function, but we 15541 // want to see if this function's declaration context is a record. 15542 DeclContext *DC = Param->getDeclContext(); 15543 if (DC && DC->isFunctionOrMethod()) { 15544 if (auto *RD = dyn_cast<CXXRecordDecl>(DC->getParent())) 15545 CheckShadowInheritedFields(Param->getLocation(), Param->getDeclName(), 15546 RD, /*DeclIsField*/ false); 15547 } 15548 } 15549 } 15550 15551 return HasInvalidParm; 15552 } 15553 15554 Optional<std::pair<CharUnits, CharUnits>> 15555 static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx); 15556 15557 /// Compute the alignment and offset of the base class object given the 15558 /// derived-to-base cast expression and the alignment and offset of the derived 15559 /// class object. 15560 static std::pair<CharUnits, CharUnits> 15561 getDerivedToBaseAlignmentAndOffset(const CastExpr *CE, QualType DerivedType, 15562 CharUnits BaseAlignment, CharUnits Offset, 15563 ASTContext &Ctx) { 15564 for (auto PathI = CE->path_begin(), PathE = CE->path_end(); PathI != PathE; 15565 ++PathI) { 15566 const CXXBaseSpecifier *Base = *PathI; 15567 const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl(); 15568 if (Base->isVirtual()) { 15569 // The complete object may have a lower alignment than the non-virtual 15570 // alignment of the base, in which case the base may be misaligned. Choose 15571 // the smaller of the non-virtual alignment and BaseAlignment, which is a 15572 // conservative lower bound of the complete object alignment. 15573 CharUnits NonVirtualAlignment = 15574 Ctx.getASTRecordLayout(BaseDecl).getNonVirtualAlignment(); 15575 BaseAlignment = std::min(BaseAlignment, NonVirtualAlignment); 15576 Offset = CharUnits::Zero(); 15577 } else { 15578 const ASTRecordLayout &RL = 15579 Ctx.getASTRecordLayout(DerivedType->getAsCXXRecordDecl()); 15580 Offset += RL.getBaseClassOffset(BaseDecl); 15581 } 15582 DerivedType = Base->getType(); 15583 } 15584 15585 return std::make_pair(BaseAlignment, Offset); 15586 } 15587 15588 /// Compute the alignment and offset of a binary additive operator. 15589 static Optional<std::pair<CharUnits, CharUnits>> 15590 getAlignmentAndOffsetFromBinAddOrSub(const Expr *PtrE, const Expr *IntE, 15591 bool IsSub, ASTContext &Ctx) { 15592 QualType PointeeType = PtrE->getType()->getPointeeType(); 15593 15594 if (!PointeeType->isConstantSizeType()) 15595 return llvm::None; 15596 15597 auto P = getBaseAlignmentAndOffsetFromPtr(PtrE, Ctx); 15598 15599 if (!P) 15600 return llvm::None; 15601 15602 CharUnits EltSize = Ctx.getTypeSizeInChars(PointeeType); 15603 if (Optional<llvm::APSInt> IdxRes = IntE->getIntegerConstantExpr(Ctx)) { 15604 CharUnits Offset = EltSize * IdxRes->getExtValue(); 15605 if (IsSub) 15606 Offset = -Offset; 15607 return std::make_pair(P->first, P->second + Offset); 15608 } 15609 15610 // If the integer expression isn't a constant expression, compute the lower 15611 // bound of the alignment using the alignment and offset of the pointer 15612 // expression and the element size. 15613 return std::make_pair( 15614 P->first.alignmentAtOffset(P->second).alignmentAtOffset(EltSize), 15615 CharUnits::Zero()); 15616 } 15617 15618 /// This helper function takes an lvalue expression and returns the alignment of 15619 /// a VarDecl and a constant offset from the VarDecl. 15620 Optional<std::pair<CharUnits, CharUnits>> 15621 static getBaseAlignmentAndOffsetFromLValue(const Expr *E, ASTContext &Ctx) { 15622 E = E->IgnoreParens(); 15623 switch (E->getStmtClass()) { 15624 default: 15625 break; 15626 case Stmt::CStyleCastExprClass: 15627 case Stmt::CXXStaticCastExprClass: 15628 case Stmt::ImplicitCastExprClass: { 15629 auto *CE = cast<CastExpr>(E); 15630 const Expr *From = CE->getSubExpr(); 15631 switch (CE->getCastKind()) { 15632 default: 15633 break; 15634 case CK_NoOp: 15635 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 15636 case CK_UncheckedDerivedToBase: 15637 case CK_DerivedToBase: { 15638 auto P = getBaseAlignmentAndOffsetFromLValue(From, Ctx); 15639 if (!P) 15640 break; 15641 return getDerivedToBaseAlignmentAndOffset(CE, From->getType(), P->first, 15642 P->second, Ctx); 15643 } 15644 } 15645 break; 15646 } 15647 case Stmt::ArraySubscriptExprClass: { 15648 auto *ASE = cast<ArraySubscriptExpr>(E); 15649 return getAlignmentAndOffsetFromBinAddOrSub(ASE->getBase(), ASE->getIdx(), 15650 false, Ctx); 15651 } 15652 case Stmt::DeclRefExprClass: { 15653 if (auto *VD = dyn_cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())) { 15654 // FIXME: If VD is captured by copy or is an escaping __block variable, 15655 // use the alignment of VD's type. 15656 if (!VD->getType()->isReferenceType()) 15657 return std::make_pair(Ctx.getDeclAlign(VD), CharUnits::Zero()); 15658 if (VD->hasInit()) 15659 return getBaseAlignmentAndOffsetFromLValue(VD->getInit(), Ctx); 15660 } 15661 break; 15662 } 15663 case Stmt::MemberExprClass: { 15664 auto *ME = cast<MemberExpr>(E); 15665 auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()); 15666 if (!FD || FD->getType()->isReferenceType() || 15667 FD->getParent()->isInvalidDecl()) 15668 break; 15669 Optional<std::pair<CharUnits, CharUnits>> P; 15670 if (ME->isArrow()) 15671 P = getBaseAlignmentAndOffsetFromPtr(ME->getBase(), Ctx); 15672 else 15673 P = getBaseAlignmentAndOffsetFromLValue(ME->getBase(), Ctx); 15674 if (!P) 15675 break; 15676 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(FD->getParent()); 15677 uint64_t Offset = Layout.getFieldOffset(FD->getFieldIndex()); 15678 return std::make_pair(P->first, 15679 P->second + CharUnits::fromQuantity(Offset)); 15680 } 15681 case Stmt::UnaryOperatorClass: { 15682 auto *UO = cast<UnaryOperator>(E); 15683 switch (UO->getOpcode()) { 15684 default: 15685 break; 15686 case UO_Deref: 15687 return getBaseAlignmentAndOffsetFromPtr(UO->getSubExpr(), Ctx); 15688 } 15689 break; 15690 } 15691 case Stmt::BinaryOperatorClass: { 15692 auto *BO = cast<BinaryOperator>(E); 15693 auto Opcode = BO->getOpcode(); 15694 switch (Opcode) { 15695 default: 15696 break; 15697 case BO_Comma: 15698 return getBaseAlignmentAndOffsetFromLValue(BO->getRHS(), Ctx); 15699 } 15700 break; 15701 } 15702 } 15703 return llvm::None; 15704 } 15705 15706 /// This helper function takes a pointer expression and returns the alignment of 15707 /// a VarDecl and a constant offset from the VarDecl. 15708 Optional<std::pair<CharUnits, CharUnits>> 15709 static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx) { 15710 E = E->IgnoreParens(); 15711 switch (E->getStmtClass()) { 15712 default: 15713 break; 15714 case Stmt::CStyleCastExprClass: 15715 case Stmt::CXXStaticCastExprClass: 15716 case Stmt::ImplicitCastExprClass: { 15717 auto *CE = cast<CastExpr>(E); 15718 const Expr *From = CE->getSubExpr(); 15719 switch (CE->getCastKind()) { 15720 default: 15721 break; 15722 case CK_NoOp: 15723 return getBaseAlignmentAndOffsetFromPtr(From, Ctx); 15724 case CK_ArrayToPointerDecay: 15725 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 15726 case CK_UncheckedDerivedToBase: 15727 case CK_DerivedToBase: { 15728 auto P = getBaseAlignmentAndOffsetFromPtr(From, Ctx); 15729 if (!P) 15730 break; 15731 return getDerivedToBaseAlignmentAndOffset( 15732 CE, From->getType()->getPointeeType(), P->first, P->second, Ctx); 15733 } 15734 } 15735 break; 15736 } 15737 case Stmt::CXXThisExprClass: { 15738 auto *RD = E->getType()->getPointeeType()->getAsCXXRecordDecl(); 15739 CharUnits Alignment = Ctx.getASTRecordLayout(RD).getNonVirtualAlignment(); 15740 return std::make_pair(Alignment, CharUnits::Zero()); 15741 } 15742 case Stmt::UnaryOperatorClass: { 15743 auto *UO = cast<UnaryOperator>(E); 15744 if (UO->getOpcode() == UO_AddrOf) 15745 return getBaseAlignmentAndOffsetFromLValue(UO->getSubExpr(), Ctx); 15746 break; 15747 } 15748 case Stmt::BinaryOperatorClass: { 15749 auto *BO = cast<BinaryOperator>(E); 15750 auto Opcode = BO->getOpcode(); 15751 switch (Opcode) { 15752 default: 15753 break; 15754 case BO_Add: 15755 case BO_Sub: { 15756 const Expr *LHS = BO->getLHS(), *RHS = BO->getRHS(); 15757 if (Opcode == BO_Add && !RHS->getType()->isIntegralOrEnumerationType()) 15758 std::swap(LHS, RHS); 15759 return getAlignmentAndOffsetFromBinAddOrSub(LHS, RHS, Opcode == BO_Sub, 15760 Ctx); 15761 } 15762 case BO_Comma: 15763 return getBaseAlignmentAndOffsetFromPtr(BO->getRHS(), Ctx); 15764 } 15765 break; 15766 } 15767 } 15768 return llvm::None; 15769 } 15770 15771 static CharUnits getPresumedAlignmentOfPointer(const Expr *E, Sema &S) { 15772 // See if we can compute the alignment of a VarDecl and an offset from it. 15773 Optional<std::pair<CharUnits, CharUnits>> P = 15774 getBaseAlignmentAndOffsetFromPtr(E, S.Context); 15775 15776 if (P) 15777 return P->first.alignmentAtOffset(P->second); 15778 15779 // If that failed, return the type's alignment. 15780 return S.Context.getTypeAlignInChars(E->getType()->getPointeeType()); 15781 } 15782 15783 /// CheckCastAlign - Implements -Wcast-align, which warns when a 15784 /// pointer cast increases the alignment requirements. 15785 void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) { 15786 // This is actually a lot of work to potentially be doing on every 15787 // cast; don't do it if we're ignoring -Wcast_align (as is the default). 15788 if (getDiagnostics().isIgnored(diag::warn_cast_align, TRange.getBegin())) 15789 return; 15790 15791 // Ignore dependent types. 15792 if (T->isDependentType() || Op->getType()->isDependentType()) 15793 return; 15794 15795 // Require that the destination be a pointer type. 15796 const PointerType *DestPtr = T->getAs<PointerType>(); 15797 if (!DestPtr) return; 15798 15799 // If the destination has alignment 1, we're done. 15800 QualType DestPointee = DestPtr->getPointeeType(); 15801 if (DestPointee->isIncompleteType()) return; 15802 CharUnits DestAlign = Context.getTypeAlignInChars(DestPointee); 15803 if (DestAlign.isOne()) return; 15804 15805 // Require that the source be a pointer type. 15806 const PointerType *SrcPtr = Op->getType()->getAs<PointerType>(); 15807 if (!SrcPtr) return; 15808 QualType SrcPointee = SrcPtr->getPointeeType(); 15809 15810 // Explicitly allow casts from cv void*. We already implicitly 15811 // allowed casts to cv void*, since they have alignment 1. 15812 // Also allow casts involving incomplete types, which implicitly 15813 // includes 'void'. 15814 if (SrcPointee->isIncompleteType()) return; 15815 15816 CharUnits SrcAlign = getPresumedAlignmentOfPointer(Op, *this); 15817 15818 if (SrcAlign >= DestAlign) return; 15819 15820 Diag(TRange.getBegin(), diag::warn_cast_align) 15821 << Op->getType() << T 15822 << static_cast<unsigned>(SrcAlign.getQuantity()) 15823 << static_cast<unsigned>(DestAlign.getQuantity()) 15824 << TRange << Op->getSourceRange(); 15825 } 15826 15827 /// Check whether this array fits the idiom of a size-one tail padded 15828 /// array member of a struct. 15829 /// 15830 /// We avoid emitting out-of-bounds access warnings for such arrays as they are 15831 /// commonly used to emulate flexible arrays in C89 code. 15832 static bool IsTailPaddedMemberArray(Sema &S, const llvm::APInt &Size, 15833 const NamedDecl *ND, 15834 unsigned StrictFlexArraysLevel) { 15835 if (!ND) 15836 return false; 15837 15838 if (StrictFlexArraysLevel >= 2 && Size != 0) 15839 return false; 15840 15841 if (StrictFlexArraysLevel == 1 && Size.ule(1)) 15842 return false; 15843 15844 // FIXME: While the default -fstrict-flex-arrays=0 permits Size>1 trailing 15845 // arrays to be treated as flexible-array-members, we still emit diagnostics 15846 // as if they are not. Pending further discussion... 15847 if (StrictFlexArraysLevel == 0 && Size != 1) 15848 return false; 15849 15850 const FieldDecl *FD = dyn_cast<FieldDecl>(ND); 15851 if (!FD) 15852 return false; 15853 15854 // Don't consider sizes resulting from macro expansions or template argument 15855 // substitution to form C89 tail-padded arrays. 15856 15857 TypeSourceInfo *TInfo = FD->getTypeSourceInfo(); 15858 while (TInfo) { 15859 TypeLoc TL = TInfo->getTypeLoc(); 15860 // Look through typedefs. 15861 if (TypedefTypeLoc TTL = TL.getAs<TypedefTypeLoc>()) { 15862 const TypedefNameDecl *TDL = TTL.getTypedefNameDecl(); 15863 TInfo = TDL->getTypeSourceInfo(); 15864 continue; 15865 } 15866 if (ConstantArrayTypeLoc CTL = TL.getAs<ConstantArrayTypeLoc>()) { 15867 const Expr *SizeExpr = dyn_cast<IntegerLiteral>(CTL.getSizeExpr()); 15868 if (!SizeExpr || SizeExpr->getExprLoc().isMacroID()) 15869 return false; 15870 } 15871 break; 15872 } 15873 15874 const RecordDecl *RD = dyn_cast<RecordDecl>(FD->getDeclContext()); 15875 if (!RD) 15876 return false; 15877 if (RD->isUnion()) 15878 return false; 15879 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { 15880 if (!CRD->isStandardLayout()) 15881 return false; 15882 } 15883 15884 // See if this is the last field decl in the record. 15885 const Decl *D = FD; 15886 while ((D = D->getNextDeclInContext())) 15887 if (isa<FieldDecl>(D)) 15888 return false; 15889 return true; 15890 } 15891 15892 void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, 15893 const ArraySubscriptExpr *ASE, 15894 bool AllowOnePastEnd, bool IndexNegated) { 15895 // Already diagnosed by the constant evaluator. 15896 if (isConstantEvaluated()) 15897 return; 15898 15899 IndexExpr = IndexExpr->IgnoreParenImpCasts(); 15900 if (IndexExpr->isValueDependent()) 15901 return; 15902 15903 const Type *EffectiveType = 15904 BaseExpr->getType()->getPointeeOrArrayElementType(); 15905 BaseExpr = BaseExpr->IgnoreParenCasts(); 15906 const ConstantArrayType *ArrayTy = 15907 Context.getAsConstantArrayType(BaseExpr->getType()); 15908 15909 const Type *BaseType = 15910 ArrayTy == nullptr ? nullptr : ArrayTy->getElementType().getTypePtr(); 15911 bool IsUnboundedArray = (BaseType == nullptr); 15912 if (EffectiveType->isDependentType() || 15913 (!IsUnboundedArray && BaseType->isDependentType())) 15914 return; 15915 15916 Expr::EvalResult Result; 15917 if (!IndexExpr->EvaluateAsInt(Result, Context, Expr::SE_AllowSideEffects)) 15918 return; 15919 15920 llvm::APSInt index = Result.Val.getInt(); 15921 if (IndexNegated) { 15922 index.setIsUnsigned(false); 15923 index = -index; 15924 } 15925 15926 const NamedDecl *ND = nullptr; 15927 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 15928 ND = DRE->getDecl(); 15929 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr)) 15930 ND = ME->getMemberDecl(); 15931 15932 if (IsUnboundedArray) { 15933 if (EffectiveType->isFunctionType()) 15934 return; 15935 if (index.isUnsigned() || !index.isNegative()) { 15936 const auto &ASTC = getASTContext(); 15937 unsigned AddrBits = 15938 ASTC.getTargetInfo().getPointerWidth(ASTC.getTargetAddressSpace( 15939 EffectiveType->getCanonicalTypeInternal())); 15940 if (index.getBitWidth() < AddrBits) 15941 index = index.zext(AddrBits); 15942 Optional<CharUnits> ElemCharUnits = 15943 ASTC.getTypeSizeInCharsIfKnown(EffectiveType); 15944 // PR50741 - If EffectiveType has unknown size (e.g., if it's a void 15945 // pointer) bounds-checking isn't meaningful. 15946 if (!ElemCharUnits) 15947 return; 15948 llvm::APInt ElemBytes(index.getBitWidth(), ElemCharUnits->getQuantity()); 15949 // If index has more active bits than address space, we already know 15950 // we have a bounds violation to warn about. Otherwise, compute 15951 // address of (index + 1)th element, and warn about bounds violation 15952 // only if that address exceeds address space. 15953 if (index.getActiveBits() <= AddrBits) { 15954 bool Overflow; 15955 llvm::APInt Product(index); 15956 Product += 1; 15957 Product = Product.umul_ov(ElemBytes, Overflow); 15958 if (!Overflow && Product.getActiveBits() <= AddrBits) 15959 return; 15960 } 15961 15962 // Need to compute max possible elements in address space, since that 15963 // is included in diag message. 15964 llvm::APInt MaxElems = llvm::APInt::getMaxValue(AddrBits); 15965 MaxElems = MaxElems.zext(std::max(AddrBits + 1, ElemBytes.getBitWidth())); 15966 MaxElems += 1; 15967 ElemBytes = ElemBytes.zextOrTrunc(MaxElems.getBitWidth()); 15968 MaxElems = MaxElems.udiv(ElemBytes); 15969 15970 unsigned DiagID = 15971 ASE ? diag::warn_array_index_exceeds_max_addressable_bounds 15972 : diag::warn_ptr_arith_exceeds_max_addressable_bounds; 15973 15974 // Diag message shows element size in bits and in "bytes" (platform- 15975 // dependent CharUnits) 15976 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 15977 PDiag(DiagID) 15978 << toString(index, 10, true) << AddrBits 15979 << (unsigned)ASTC.toBits(*ElemCharUnits) 15980 << toString(ElemBytes, 10, false) 15981 << toString(MaxElems, 10, false) 15982 << (unsigned)MaxElems.getLimitedValue(~0U) 15983 << IndexExpr->getSourceRange()); 15984 15985 if (!ND) { 15986 // Try harder to find a NamedDecl to point at in the note. 15987 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr)) 15988 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 15989 if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 15990 ND = DRE->getDecl(); 15991 if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr)) 15992 ND = ME->getMemberDecl(); 15993 } 15994 15995 if (ND) 15996 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 15997 PDiag(diag::note_array_declared_here) << ND); 15998 } 15999 return; 16000 } 16001 16002 if (index.isUnsigned() || !index.isNegative()) { 16003 // It is possible that the type of the base expression after 16004 // IgnoreParenCasts is incomplete, even though the type of the base 16005 // expression before IgnoreParenCasts is complete (see PR39746 for an 16006 // example). In this case we have no information about whether the array 16007 // access exceeds the array bounds. However we can still diagnose an array 16008 // access which precedes the array bounds. 16009 // 16010 // FIXME: this check should be redundant with the IsUnboundedArray check 16011 // above. 16012 if (BaseType->isIncompleteType()) 16013 return; 16014 16015 // FIXME: this check should belong to the IsTailPaddedMemberArray call 16016 // below. 16017 llvm::APInt size = ArrayTy->getSize(); 16018 if (!size.isStrictlyPositive()) 16019 return; 16020 16021 if (BaseType != EffectiveType) { 16022 // Make sure we're comparing apples to apples when comparing index to size 16023 uint64_t ptrarith_typesize = Context.getTypeSize(EffectiveType); 16024 uint64_t array_typesize = Context.getTypeSize(BaseType); 16025 // Handle ptrarith_typesize being zero, such as when casting to void* 16026 if (!ptrarith_typesize) ptrarith_typesize = 1; 16027 if (ptrarith_typesize != array_typesize) { 16028 // There's a cast to a different size type involved 16029 uint64_t ratio = array_typesize / ptrarith_typesize; 16030 // TODO: Be smarter about handling cases where array_typesize is not a 16031 // multiple of ptrarith_typesize 16032 if (ptrarith_typesize * ratio == array_typesize) 16033 size *= llvm::APInt(size.getBitWidth(), ratio); 16034 } 16035 } 16036 16037 if (size.getBitWidth() > index.getBitWidth()) 16038 index = index.zext(size.getBitWidth()); 16039 else if (size.getBitWidth() < index.getBitWidth()) 16040 size = size.zext(index.getBitWidth()); 16041 16042 // For array subscripting the index must be less than size, but for pointer 16043 // arithmetic also allow the index (offset) to be equal to size since 16044 // computing the next address after the end of the array is legal and 16045 // commonly done e.g. in C++ iterators and range-based for loops. 16046 if (AllowOnePastEnd ? index.ule(size) : index.ult(size)) 16047 return; 16048 16049 // Also don't warn for Flexible Array Member emulation. 16050 const unsigned StrictFlexArraysLevel = getLangOpts().StrictFlexArrays; 16051 if (IsTailPaddedMemberArray(*this, size, ND, StrictFlexArraysLevel)) 16052 return; 16053 16054 // Suppress the warning if the subscript expression (as identified by the 16055 // ']' location) and the index expression are both from macro expansions 16056 // within a system header. 16057 if (ASE) { 16058 SourceLocation RBracketLoc = SourceMgr.getSpellingLoc( 16059 ASE->getRBracketLoc()); 16060 if (SourceMgr.isInSystemHeader(RBracketLoc)) { 16061 SourceLocation IndexLoc = 16062 SourceMgr.getSpellingLoc(IndexExpr->getBeginLoc()); 16063 if (SourceMgr.isWrittenInSameFile(RBracketLoc, IndexLoc)) 16064 return; 16065 } 16066 } 16067 16068 unsigned DiagID = ASE ? diag::warn_array_index_exceeds_bounds 16069 : diag::warn_ptr_arith_exceeds_bounds; 16070 16071 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 16072 PDiag(DiagID) << toString(index, 10, true) 16073 << toString(size, 10, true) 16074 << (unsigned)size.getLimitedValue(~0U) 16075 << IndexExpr->getSourceRange()); 16076 } else { 16077 unsigned DiagID = diag::warn_array_index_precedes_bounds; 16078 if (!ASE) { 16079 DiagID = diag::warn_ptr_arith_precedes_bounds; 16080 if (index.isNegative()) index = -index; 16081 } 16082 16083 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 16084 PDiag(DiagID) << toString(index, 10, true) 16085 << IndexExpr->getSourceRange()); 16086 } 16087 16088 if (!ND) { 16089 // Try harder to find a NamedDecl to point at in the note. 16090 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr)) 16091 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 16092 if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 16093 ND = DRE->getDecl(); 16094 if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr)) 16095 ND = ME->getMemberDecl(); 16096 } 16097 16098 if (ND) 16099 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 16100 PDiag(diag::note_array_declared_here) << ND); 16101 } 16102 16103 void Sema::CheckArrayAccess(const Expr *expr) { 16104 int AllowOnePastEnd = 0; 16105 while (expr) { 16106 expr = expr->IgnoreParenImpCasts(); 16107 switch (expr->getStmtClass()) { 16108 case Stmt::ArraySubscriptExprClass: { 16109 const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(expr); 16110 CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE, 16111 AllowOnePastEnd > 0); 16112 expr = ASE->getBase(); 16113 break; 16114 } 16115 case Stmt::MemberExprClass: { 16116 expr = cast<MemberExpr>(expr)->getBase(); 16117 break; 16118 } 16119 case Stmt::OMPArraySectionExprClass: { 16120 const OMPArraySectionExpr *ASE = cast<OMPArraySectionExpr>(expr); 16121 if (ASE->getLowerBound()) 16122 CheckArrayAccess(ASE->getBase(), ASE->getLowerBound(), 16123 /*ASE=*/nullptr, AllowOnePastEnd > 0); 16124 return; 16125 } 16126 case Stmt::UnaryOperatorClass: { 16127 // Only unwrap the * and & unary operators 16128 const UnaryOperator *UO = cast<UnaryOperator>(expr); 16129 expr = UO->getSubExpr(); 16130 switch (UO->getOpcode()) { 16131 case UO_AddrOf: 16132 AllowOnePastEnd++; 16133 break; 16134 case UO_Deref: 16135 AllowOnePastEnd--; 16136 break; 16137 default: 16138 return; 16139 } 16140 break; 16141 } 16142 case Stmt::ConditionalOperatorClass: { 16143 const ConditionalOperator *cond = cast<ConditionalOperator>(expr); 16144 if (const Expr *lhs = cond->getLHS()) 16145 CheckArrayAccess(lhs); 16146 if (const Expr *rhs = cond->getRHS()) 16147 CheckArrayAccess(rhs); 16148 return; 16149 } 16150 case Stmt::CXXOperatorCallExprClass: { 16151 const auto *OCE = cast<CXXOperatorCallExpr>(expr); 16152 for (const auto *Arg : OCE->arguments()) 16153 CheckArrayAccess(Arg); 16154 return; 16155 } 16156 default: 16157 return; 16158 } 16159 } 16160 } 16161 16162 //===--- CHECK: Objective-C retain cycles ----------------------------------// 16163 16164 namespace { 16165 16166 struct RetainCycleOwner { 16167 VarDecl *Variable = nullptr; 16168 SourceRange Range; 16169 SourceLocation Loc; 16170 bool Indirect = false; 16171 16172 RetainCycleOwner() = default; 16173 16174 void setLocsFrom(Expr *e) { 16175 Loc = e->getExprLoc(); 16176 Range = e->getSourceRange(); 16177 } 16178 }; 16179 16180 } // namespace 16181 16182 /// Consider whether capturing the given variable can possibly lead to 16183 /// a retain cycle. 16184 static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) { 16185 // In ARC, it's captured strongly iff the variable has __strong 16186 // lifetime. In MRR, it's captured strongly if the variable is 16187 // __block and has an appropriate type. 16188 if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 16189 return false; 16190 16191 owner.Variable = var; 16192 if (ref) 16193 owner.setLocsFrom(ref); 16194 return true; 16195 } 16196 16197 static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) { 16198 while (true) { 16199 e = e->IgnoreParens(); 16200 if (CastExpr *cast = dyn_cast<CastExpr>(e)) { 16201 switch (cast->getCastKind()) { 16202 case CK_BitCast: 16203 case CK_LValueBitCast: 16204 case CK_LValueToRValue: 16205 case CK_ARCReclaimReturnedObject: 16206 e = cast->getSubExpr(); 16207 continue; 16208 16209 default: 16210 return false; 16211 } 16212 } 16213 16214 if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(e)) { 16215 ObjCIvarDecl *ivar = ref->getDecl(); 16216 if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 16217 return false; 16218 16219 // Try to find a retain cycle in the base. 16220 if (!findRetainCycleOwner(S, ref->getBase(), owner)) 16221 return false; 16222 16223 if (ref->isFreeIvar()) owner.setLocsFrom(ref); 16224 owner.Indirect = true; 16225 return true; 16226 } 16227 16228 if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) { 16229 VarDecl *var = dyn_cast<VarDecl>(ref->getDecl()); 16230 if (!var) return false; 16231 return considerVariable(var, ref, owner); 16232 } 16233 16234 if (MemberExpr *member = dyn_cast<MemberExpr>(e)) { 16235 if (member->isArrow()) return false; 16236 16237 // Don't count this as an indirect ownership. 16238 e = member->getBase(); 16239 continue; 16240 } 16241 16242 if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { 16243 // Only pay attention to pseudo-objects on property references. 16244 ObjCPropertyRefExpr *pre 16245 = dyn_cast<ObjCPropertyRefExpr>(pseudo->getSyntacticForm() 16246 ->IgnoreParens()); 16247 if (!pre) return false; 16248 if (pre->isImplicitProperty()) return false; 16249 ObjCPropertyDecl *property = pre->getExplicitProperty(); 16250 if (!property->isRetaining() && 16251 !(property->getPropertyIvarDecl() && 16252 property->getPropertyIvarDecl()->getType() 16253 .getObjCLifetime() == Qualifiers::OCL_Strong)) 16254 return false; 16255 16256 owner.Indirect = true; 16257 if (pre->isSuperReceiver()) { 16258 owner.Variable = S.getCurMethodDecl()->getSelfDecl(); 16259 if (!owner.Variable) 16260 return false; 16261 owner.Loc = pre->getLocation(); 16262 owner.Range = pre->getSourceRange(); 16263 return true; 16264 } 16265 e = const_cast<Expr*>(cast<OpaqueValueExpr>(pre->getBase()) 16266 ->getSourceExpr()); 16267 continue; 16268 } 16269 16270 // Array ivars? 16271 16272 return false; 16273 } 16274 } 16275 16276 namespace { 16277 16278 struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> { 16279 ASTContext &Context; 16280 VarDecl *Variable; 16281 Expr *Capturer = nullptr; 16282 bool VarWillBeReased = false; 16283 16284 FindCaptureVisitor(ASTContext &Context, VarDecl *variable) 16285 : EvaluatedExprVisitor<FindCaptureVisitor>(Context), 16286 Context(Context), Variable(variable) {} 16287 16288 void VisitDeclRefExpr(DeclRefExpr *ref) { 16289 if (ref->getDecl() == Variable && !Capturer) 16290 Capturer = ref; 16291 } 16292 16293 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) { 16294 if (Capturer) return; 16295 Visit(ref->getBase()); 16296 if (Capturer && ref->isFreeIvar()) 16297 Capturer = ref; 16298 } 16299 16300 void VisitBlockExpr(BlockExpr *block) { 16301 // Look inside nested blocks 16302 if (block->getBlockDecl()->capturesVariable(Variable)) 16303 Visit(block->getBlockDecl()->getBody()); 16304 } 16305 16306 void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) { 16307 if (Capturer) return; 16308 if (OVE->getSourceExpr()) 16309 Visit(OVE->getSourceExpr()); 16310 } 16311 16312 void VisitBinaryOperator(BinaryOperator *BinOp) { 16313 if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign) 16314 return; 16315 Expr *LHS = BinOp->getLHS(); 16316 if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(LHS)) { 16317 if (DRE->getDecl() != Variable) 16318 return; 16319 if (Expr *RHS = BinOp->getRHS()) { 16320 RHS = RHS->IgnoreParenCasts(); 16321 Optional<llvm::APSInt> Value; 16322 VarWillBeReased = 16323 (RHS && (Value = RHS->getIntegerConstantExpr(Context)) && 16324 *Value == 0); 16325 } 16326 } 16327 } 16328 }; 16329 16330 } // namespace 16331 16332 /// Check whether the given argument is a block which captures a 16333 /// variable. 16334 static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) { 16335 assert(owner.Variable && owner.Loc.isValid()); 16336 16337 e = e->IgnoreParenCasts(); 16338 16339 // Look through [^{...} copy] and Block_copy(^{...}). 16340 if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(e)) { 16341 Selector Cmd = ME->getSelector(); 16342 if (Cmd.isUnarySelector() && Cmd.getNameForSlot(0) == "copy") { 16343 e = ME->getInstanceReceiver(); 16344 if (!e) 16345 return nullptr; 16346 e = e->IgnoreParenCasts(); 16347 } 16348 } else if (CallExpr *CE = dyn_cast<CallExpr>(e)) { 16349 if (CE->getNumArgs() == 1) { 16350 FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl()); 16351 if (Fn) { 16352 const IdentifierInfo *FnI = Fn->getIdentifier(); 16353 if (FnI && FnI->isStr("_Block_copy")) { 16354 e = CE->getArg(0)->IgnoreParenCasts(); 16355 } 16356 } 16357 } 16358 } 16359 16360 BlockExpr *block = dyn_cast<BlockExpr>(e); 16361 if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable)) 16362 return nullptr; 16363 16364 FindCaptureVisitor visitor(S.Context, owner.Variable); 16365 visitor.Visit(block->getBlockDecl()->getBody()); 16366 return visitor.VarWillBeReased ? nullptr : visitor.Capturer; 16367 } 16368 16369 static void diagnoseRetainCycle(Sema &S, Expr *capturer, 16370 RetainCycleOwner &owner) { 16371 assert(capturer); 16372 assert(owner.Variable && owner.Loc.isValid()); 16373 16374 S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle) 16375 << owner.Variable << capturer->getSourceRange(); 16376 S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner) 16377 << owner.Indirect << owner.Range; 16378 } 16379 16380 /// Check for a keyword selector that starts with the word 'add' or 16381 /// 'set'. 16382 static bool isSetterLikeSelector(Selector sel) { 16383 if (sel.isUnarySelector()) return false; 16384 16385 StringRef str = sel.getNameForSlot(0); 16386 while (!str.empty() && str.front() == '_') str = str.substr(1); 16387 if (str.startswith("set")) 16388 str = str.substr(3); 16389 else if (str.startswith("add")) { 16390 // Specially allow 'addOperationWithBlock:'. 16391 if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock")) 16392 return false; 16393 str = str.substr(3); 16394 } 16395 else 16396 return false; 16397 16398 if (str.empty()) return true; 16399 return !isLowercase(str.front()); 16400 } 16401 16402 static Optional<int> GetNSMutableArrayArgumentIndex(Sema &S, 16403 ObjCMessageExpr *Message) { 16404 bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass( 16405 Message->getReceiverInterface(), 16406 NSAPI::ClassId_NSMutableArray); 16407 if (!IsMutableArray) { 16408 return None; 16409 } 16410 16411 Selector Sel = Message->getSelector(); 16412 16413 Optional<NSAPI::NSArrayMethodKind> MKOpt = 16414 S.NSAPIObj->getNSArrayMethodKind(Sel); 16415 if (!MKOpt) { 16416 return None; 16417 } 16418 16419 NSAPI::NSArrayMethodKind MK = *MKOpt; 16420 16421 switch (MK) { 16422 case NSAPI::NSMutableArr_addObject: 16423 case NSAPI::NSMutableArr_insertObjectAtIndex: 16424 case NSAPI::NSMutableArr_setObjectAtIndexedSubscript: 16425 return 0; 16426 case NSAPI::NSMutableArr_replaceObjectAtIndex: 16427 return 1; 16428 16429 default: 16430 return None; 16431 } 16432 16433 return None; 16434 } 16435 16436 static 16437 Optional<int> GetNSMutableDictionaryArgumentIndex(Sema &S, 16438 ObjCMessageExpr *Message) { 16439 bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass( 16440 Message->getReceiverInterface(), 16441 NSAPI::ClassId_NSMutableDictionary); 16442 if (!IsMutableDictionary) { 16443 return None; 16444 } 16445 16446 Selector Sel = Message->getSelector(); 16447 16448 Optional<NSAPI::NSDictionaryMethodKind> MKOpt = 16449 S.NSAPIObj->getNSDictionaryMethodKind(Sel); 16450 if (!MKOpt) { 16451 return None; 16452 } 16453 16454 NSAPI::NSDictionaryMethodKind MK = *MKOpt; 16455 16456 switch (MK) { 16457 case NSAPI::NSMutableDict_setObjectForKey: 16458 case NSAPI::NSMutableDict_setValueForKey: 16459 case NSAPI::NSMutableDict_setObjectForKeyedSubscript: 16460 return 0; 16461 16462 default: 16463 return None; 16464 } 16465 16466 return None; 16467 } 16468 16469 static Optional<int> GetNSSetArgumentIndex(Sema &S, ObjCMessageExpr *Message) { 16470 bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass( 16471 Message->getReceiverInterface(), 16472 NSAPI::ClassId_NSMutableSet); 16473 16474 bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass( 16475 Message->getReceiverInterface(), 16476 NSAPI::ClassId_NSMutableOrderedSet); 16477 if (!IsMutableSet && !IsMutableOrderedSet) { 16478 return None; 16479 } 16480 16481 Selector Sel = Message->getSelector(); 16482 16483 Optional<NSAPI::NSSetMethodKind> MKOpt = S.NSAPIObj->getNSSetMethodKind(Sel); 16484 if (!MKOpt) { 16485 return None; 16486 } 16487 16488 NSAPI::NSSetMethodKind MK = *MKOpt; 16489 16490 switch (MK) { 16491 case NSAPI::NSMutableSet_addObject: 16492 case NSAPI::NSOrderedSet_setObjectAtIndex: 16493 case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript: 16494 case NSAPI::NSOrderedSet_insertObjectAtIndex: 16495 return 0; 16496 case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject: 16497 return 1; 16498 } 16499 16500 return None; 16501 } 16502 16503 void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) { 16504 if (!Message->isInstanceMessage()) { 16505 return; 16506 } 16507 16508 Optional<int> ArgOpt; 16509 16510 if (!(ArgOpt = GetNSMutableArrayArgumentIndex(*this, Message)) && 16511 !(ArgOpt = GetNSMutableDictionaryArgumentIndex(*this, Message)) && 16512 !(ArgOpt = GetNSSetArgumentIndex(*this, Message))) { 16513 return; 16514 } 16515 16516 int ArgIndex = *ArgOpt; 16517 16518 Expr *Arg = Message->getArg(ArgIndex)->IgnoreImpCasts(); 16519 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Arg)) { 16520 Arg = OE->getSourceExpr()->IgnoreImpCasts(); 16521 } 16522 16523 if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) { 16524 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 16525 if (ArgRE->isObjCSelfExpr()) { 16526 Diag(Message->getSourceRange().getBegin(), 16527 diag::warn_objc_circular_container) 16528 << ArgRE->getDecl() << StringRef("'super'"); 16529 } 16530 } 16531 } else { 16532 Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts(); 16533 16534 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) { 16535 Receiver = OE->getSourceExpr()->IgnoreImpCasts(); 16536 } 16537 16538 if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) { 16539 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 16540 if (ReceiverRE->getDecl() == ArgRE->getDecl()) { 16541 ValueDecl *Decl = ReceiverRE->getDecl(); 16542 Diag(Message->getSourceRange().getBegin(), 16543 diag::warn_objc_circular_container) 16544 << Decl << Decl; 16545 if (!ArgRE->isObjCSelfExpr()) { 16546 Diag(Decl->getLocation(), 16547 diag::note_objc_circular_container_declared_here) 16548 << Decl; 16549 } 16550 } 16551 } 16552 } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) { 16553 if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) { 16554 if (IvarRE->getDecl() == IvarArgRE->getDecl()) { 16555 ObjCIvarDecl *Decl = IvarRE->getDecl(); 16556 Diag(Message->getSourceRange().getBegin(), 16557 diag::warn_objc_circular_container) 16558 << Decl << Decl; 16559 Diag(Decl->getLocation(), 16560 diag::note_objc_circular_container_declared_here) 16561 << Decl; 16562 } 16563 } 16564 } 16565 } 16566 } 16567 16568 /// Check a message send to see if it's likely to cause a retain cycle. 16569 void Sema::checkRetainCycles(ObjCMessageExpr *msg) { 16570 // Only check instance methods whose selector looks like a setter. 16571 if (!msg->isInstanceMessage() || !isSetterLikeSelector(msg->getSelector())) 16572 return; 16573 16574 // Try to find a variable that the receiver is strongly owned by. 16575 RetainCycleOwner owner; 16576 if (msg->getReceiverKind() == ObjCMessageExpr::Instance) { 16577 if (!findRetainCycleOwner(*this, msg->getInstanceReceiver(), owner)) 16578 return; 16579 } else { 16580 assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance); 16581 owner.Variable = getCurMethodDecl()->getSelfDecl(); 16582 owner.Loc = msg->getSuperLoc(); 16583 owner.Range = msg->getSuperLoc(); 16584 } 16585 16586 // Check whether the receiver is captured by any of the arguments. 16587 const ObjCMethodDecl *MD = msg->getMethodDecl(); 16588 for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i) { 16589 if (Expr *capturer = findCapturingExpr(*this, msg->getArg(i), owner)) { 16590 // noescape blocks should not be retained by the method. 16591 if (MD && MD->parameters()[i]->hasAttr<NoEscapeAttr>()) 16592 continue; 16593 return diagnoseRetainCycle(*this, capturer, owner); 16594 } 16595 } 16596 } 16597 16598 /// Check a property assign to see if it's likely to cause a retain cycle. 16599 void Sema::checkRetainCycles(Expr *receiver, Expr *argument) { 16600 RetainCycleOwner owner; 16601 if (!findRetainCycleOwner(*this, receiver, owner)) 16602 return; 16603 16604 if (Expr *capturer = findCapturingExpr(*this, argument, owner)) 16605 diagnoseRetainCycle(*this, capturer, owner); 16606 } 16607 16608 void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) { 16609 RetainCycleOwner Owner; 16610 if (!considerVariable(Var, /*DeclRefExpr=*/nullptr, Owner)) 16611 return; 16612 16613 // Because we don't have an expression for the variable, we have to set the 16614 // location explicitly here. 16615 Owner.Loc = Var->getLocation(); 16616 Owner.Range = Var->getSourceRange(); 16617 16618 if (Expr *Capturer = findCapturingExpr(*this, Init, Owner)) 16619 diagnoseRetainCycle(*this, Capturer, Owner); 16620 } 16621 16622 static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc, 16623 Expr *RHS, bool isProperty) { 16624 // Check if RHS is an Objective-C object literal, which also can get 16625 // immediately zapped in a weak reference. Note that we explicitly 16626 // allow ObjCStringLiterals, since those are designed to never really die. 16627 RHS = RHS->IgnoreParenImpCasts(); 16628 16629 // This enum needs to match with the 'select' in 16630 // warn_objc_arc_literal_assign (off-by-1). 16631 Sema::ObjCLiteralKind Kind = S.CheckLiteralKind(RHS); 16632 if (Kind == Sema::LK_String || Kind == Sema::LK_None) 16633 return false; 16634 16635 S.Diag(Loc, diag::warn_arc_literal_assign) 16636 << (unsigned) Kind 16637 << (isProperty ? 0 : 1) 16638 << RHS->getSourceRange(); 16639 16640 return true; 16641 } 16642 16643 static bool checkUnsafeAssignObject(Sema &S, SourceLocation Loc, 16644 Qualifiers::ObjCLifetime LT, 16645 Expr *RHS, bool isProperty) { 16646 // Strip off any implicit cast added to get to the one ARC-specific. 16647 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 16648 if (cast->getCastKind() == CK_ARCConsumeObject) { 16649 S.Diag(Loc, diag::warn_arc_retained_assign) 16650 << (LT == Qualifiers::OCL_ExplicitNone) 16651 << (isProperty ? 0 : 1) 16652 << RHS->getSourceRange(); 16653 return true; 16654 } 16655 RHS = cast->getSubExpr(); 16656 } 16657 16658 if (LT == Qualifiers::OCL_Weak && 16659 checkUnsafeAssignLiteral(S, Loc, RHS, isProperty)) 16660 return true; 16661 16662 return false; 16663 } 16664 16665 bool Sema::checkUnsafeAssigns(SourceLocation Loc, 16666 QualType LHS, Expr *RHS) { 16667 Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime(); 16668 16669 if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone) 16670 return false; 16671 16672 if (checkUnsafeAssignObject(*this, Loc, LT, RHS, false)) 16673 return true; 16674 16675 return false; 16676 } 16677 16678 void Sema::checkUnsafeExprAssigns(SourceLocation Loc, 16679 Expr *LHS, Expr *RHS) { 16680 QualType LHSType; 16681 // PropertyRef on LHS type need be directly obtained from 16682 // its declaration as it has a PseudoType. 16683 ObjCPropertyRefExpr *PRE 16684 = dyn_cast<ObjCPropertyRefExpr>(LHS->IgnoreParens()); 16685 if (PRE && !PRE->isImplicitProperty()) { 16686 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 16687 if (PD) 16688 LHSType = PD->getType(); 16689 } 16690 16691 if (LHSType.isNull()) 16692 LHSType = LHS->getType(); 16693 16694 Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime(); 16695 16696 if (LT == Qualifiers::OCL_Weak) { 16697 if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc)) 16698 getCurFunction()->markSafeWeakUse(LHS); 16699 } 16700 16701 if (checkUnsafeAssigns(Loc, LHSType, RHS)) 16702 return; 16703 16704 // FIXME. Check for other life times. 16705 if (LT != Qualifiers::OCL_None) 16706 return; 16707 16708 if (PRE) { 16709 if (PRE->isImplicitProperty()) 16710 return; 16711 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 16712 if (!PD) 16713 return; 16714 16715 unsigned Attributes = PD->getPropertyAttributes(); 16716 if (Attributes & ObjCPropertyAttribute::kind_assign) { 16717 // when 'assign' attribute was not explicitly specified 16718 // by user, ignore it and rely on property type itself 16719 // for lifetime info. 16720 unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten(); 16721 if (!(AsWrittenAttr & ObjCPropertyAttribute::kind_assign) && 16722 LHSType->isObjCRetainableType()) 16723 return; 16724 16725 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 16726 if (cast->getCastKind() == CK_ARCConsumeObject) { 16727 Diag(Loc, diag::warn_arc_retained_property_assign) 16728 << RHS->getSourceRange(); 16729 return; 16730 } 16731 RHS = cast->getSubExpr(); 16732 } 16733 } else if (Attributes & ObjCPropertyAttribute::kind_weak) { 16734 if (checkUnsafeAssignObject(*this, Loc, Qualifiers::OCL_Weak, RHS, true)) 16735 return; 16736 } 16737 } 16738 } 16739 16740 //===--- CHECK: Empty statement body (-Wempty-body) ---------------------===// 16741 16742 static bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr, 16743 SourceLocation StmtLoc, 16744 const NullStmt *Body) { 16745 // Do not warn if the body is a macro that expands to nothing, e.g: 16746 // 16747 // #define CALL(x) 16748 // if (condition) 16749 // CALL(0); 16750 if (Body->hasLeadingEmptyMacro()) 16751 return false; 16752 16753 // Get line numbers of statement and body. 16754 bool StmtLineInvalid; 16755 unsigned StmtLine = SourceMgr.getPresumedLineNumber(StmtLoc, 16756 &StmtLineInvalid); 16757 if (StmtLineInvalid) 16758 return false; 16759 16760 bool BodyLineInvalid; 16761 unsigned BodyLine = SourceMgr.getSpellingLineNumber(Body->getSemiLoc(), 16762 &BodyLineInvalid); 16763 if (BodyLineInvalid) 16764 return false; 16765 16766 // Warn if null statement and body are on the same line. 16767 if (StmtLine != BodyLine) 16768 return false; 16769 16770 return true; 16771 } 16772 16773 void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc, 16774 const Stmt *Body, 16775 unsigned DiagID) { 16776 // Since this is a syntactic check, don't emit diagnostic for template 16777 // instantiations, this just adds noise. 16778 if (CurrentInstantiationScope) 16779 return; 16780 16781 // The body should be a null statement. 16782 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 16783 if (!NBody) 16784 return; 16785 16786 // Do the usual checks. 16787 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 16788 return; 16789 16790 Diag(NBody->getSemiLoc(), DiagID); 16791 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 16792 } 16793 16794 void Sema::DiagnoseEmptyLoopBody(const Stmt *S, 16795 const Stmt *PossibleBody) { 16796 assert(!CurrentInstantiationScope); // Ensured by caller 16797 16798 SourceLocation StmtLoc; 16799 const Stmt *Body; 16800 unsigned DiagID; 16801 if (const ForStmt *FS = dyn_cast<ForStmt>(S)) { 16802 StmtLoc = FS->getRParenLoc(); 16803 Body = FS->getBody(); 16804 DiagID = diag::warn_empty_for_body; 16805 } else if (const WhileStmt *WS = dyn_cast<WhileStmt>(S)) { 16806 StmtLoc = WS->getRParenLoc(); 16807 Body = WS->getBody(); 16808 DiagID = diag::warn_empty_while_body; 16809 } else 16810 return; // Neither `for' nor `while'. 16811 16812 // The body should be a null statement. 16813 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 16814 if (!NBody) 16815 return; 16816 16817 // Skip expensive checks if diagnostic is disabled. 16818 if (Diags.isIgnored(DiagID, NBody->getSemiLoc())) 16819 return; 16820 16821 // Do the usual checks. 16822 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 16823 return; 16824 16825 // `for(...);' and `while(...);' are popular idioms, so in order to keep 16826 // noise level low, emit diagnostics only if for/while is followed by a 16827 // CompoundStmt, e.g.: 16828 // for (int i = 0; i < n; i++); 16829 // { 16830 // a(i); 16831 // } 16832 // or if for/while is followed by a statement with more indentation 16833 // than for/while itself: 16834 // for (int i = 0; i < n; i++); 16835 // a(i); 16836 bool ProbableTypo = isa<CompoundStmt>(PossibleBody); 16837 if (!ProbableTypo) { 16838 bool BodyColInvalid; 16839 unsigned BodyCol = SourceMgr.getPresumedColumnNumber( 16840 PossibleBody->getBeginLoc(), &BodyColInvalid); 16841 if (BodyColInvalid) 16842 return; 16843 16844 bool StmtColInvalid; 16845 unsigned StmtCol = 16846 SourceMgr.getPresumedColumnNumber(S->getBeginLoc(), &StmtColInvalid); 16847 if (StmtColInvalid) 16848 return; 16849 16850 if (BodyCol > StmtCol) 16851 ProbableTypo = true; 16852 } 16853 16854 if (ProbableTypo) { 16855 Diag(NBody->getSemiLoc(), DiagID); 16856 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 16857 } 16858 } 16859 16860 //===--- CHECK: Warn on self move with std::move. -------------------------===// 16861 16862 /// DiagnoseSelfMove - Emits a warning if a value is moved to itself. 16863 void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, 16864 SourceLocation OpLoc) { 16865 if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc)) 16866 return; 16867 16868 if (inTemplateInstantiation()) 16869 return; 16870 16871 // Strip parens and casts away. 16872 LHSExpr = LHSExpr->IgnoreParenImpCasts(); 16873 RHSExpr = RHSExpr->IgnoreParenImpCasts(); 16874 16875 // Check for a call expression 16876 const CallExpr *CE = dyn_cast<CallExpr>(RHSExpr); 16877 if (!CE || CE->getNumArgs() != 1) 16878 return; 16879 16880 // Check for a call to std::move 16881 if (!CE->isCallToStdMove()) 16882 return; 16883 16884 // Get argument from std::move 16885 RHSExpr = CE->getArg(0); 16886 16887 const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr); 16888 const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr); 16889 16890 // Two DeclRefExpr's, check that the decls are the same. 16891 if (LHSDeclRef && RHSDeclRef) { 16892 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 16893 return; 16894 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 16895 RHSDeclRef->getDecl()->getCanonicalDecl()) 16896 return; 16897 16898 auto D = Diag(OpLoc, diag::warn_self_move) 16899 << LHSExpr->getType() << LHSExpr->getSourceRange() 16900 << RHSExpr->getSourceRange(); 16901 if (const FieldDecl *F = 16902 getSelfAssignmentClassMemberCandidate(RHSDeclRef->getDecl())) 16903 D << 1 << F 16904 << FixItHint::CreateInsertion(LHSDeclRef->getBeginLoc(), "this->"); 16905 else 16906 D << 0; 16907 return; 16908 } 16909 16910 // Member variables require a different approach to check for self moves. 16911 // MemberExpr's are the same if every nested MemberExpr refers to the same 16912 // Decl and that the base Expr's are DeclRefExpr's with the same Decl or 16913 // the base Expr's are CXXThisExpr's. 16914 const Expr *LHSBase = LHSExpr; 16915 const Expr *RHSBase = RHSExpr; 16916 const MemberExpr *LHSME = dyn_cast<MemberExpr>(LHSExpr); 16917 const MemberExpr *RHSME = dyn_cast<MemberExpr>(RHSExpr); 16918 if (!LHSME || !RHSME) 16919 return; 16920 16921 while (LHSME && RHSME) { 16922 if (LHSME->getMemberDecl()->getCanonicalDecl() != 16923 RHSME->getMemberDecl()->getCanonicalDecl()) 16924 return; 16925 16926 LHSBase = LHSME->getBase(); 16927 RHSBase = RHSME->getBase(); 16928 LHSME = dyn_cast<MemberExpr>(LHSBase); 16929 RHSME = dyn_cast<MemberExpr>(RHSBase); 16930 } 16931 16932 LHSDeclRef = dyn_cast<DeclRefExpr>(LHSBase); 16933 RHSDeclRef = dyn_cast<DeclRefExpr>(RHSBase); 16934 if (LHSDeclRef && RHSDeclRef) { 16935 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 16936 return; 16937 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 16938 RHSDeclRef->getDecl()->getCanonicalDecl()) 16939 return; 16940 16941 Diag(OpLoc, diag::warn_self_move) 16942 << LHSExpr->getType() << 0 << LHSExpr->getSourceRange() 16943 << RHSExpr->getSourceRange(); 16944 return; 16945 } 16946 16947 if (isa<CXXThisExpr>(LHSBase) && isa<CXXThisExpr>(RHSBase)) 16948 Diag(OpLoc, diag::warn_self_move) 16949 << LHSExpr->getType() << 0 << LHSExpr->getSourceRange() 16950 << RHSExpr->getSourceRange(); 16951 } 16952 16953 //===--- Layout compatibility ----------------------------------------------// 16954 16955 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2); 16956 16957 /// Check if two enumeration types are layout-compatible. 16958 static bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) { 16959 // C++11 [dcl.enum] p8: 16960 // Two enumeration types are layout-compatible if they have the same 16961 // underlying type. 16962 return ED1->isComplete() && ED2->isComplete() && 16963 C.hasSameType(ED1->getIntegerType(), ED2->getIntegerType()); 16964 } 16965 16966 /// Check if two fields are layout-compatible. 16967 static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1, 16968 FieldDecl *Field2) { 16969 if (!isLayoutCompatible(C, Field1->getType(), Field2->getType())) 16970 return false; 16971 16972 if (Field1->isBitField() != Field2->isBitField()) 16973 return false; 16974 16975 if (Field1->isBitField()) { 16976 // Make sure that the bit-fields are the same length. 16977 unsigned Bits1 = Field1->getBitWidthValue(C); 16978 unsigned Bits2 = Field2->getBitWidthValue(C); 16979 16980 if (Bits1 != Bits2) 16981 return false; 16982 } 16983 16984 return true; 16985 } 16986 16987 /// Check if two standard-layout structs are layout-compatible. 16988 /// (C++11 [class.mem] p17) 16989 static bool isLayoutCompatibleStruct(ASTContext &C, RecordDecl *RD1, 16990 RecordDecl *RD2) { 16991 // If both records are C++ classes, check that base classes match. 16992 if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(RD1)) { 16993 // If one of records is a CXXRecordDecl we are in C++ mode, 16994 // thus the other one is a CXXRecordDecl, too. 16995 const CXXRecordDecl *D2CXX = cast<CXXRecordDecl>(RD2); 16996 // Check number of base classes. 16997 if (D1CXX->getNumBases() != D2CXX->getNumBases()) 16998 return false; 16999 17000 // Check the base classes. 17001 for (CXXRecordDecl::base_class_const_iterator 17002 Base1 = D1CXX->bases_begin(), 17003 BaseEnd1 = D1CXX->bases_end(), 17004 Base2 = D2CXX->bases_begin(); 17005 Base1 != BaseEnd1; 17006 ++Base1, ++Base2) { 17007 if (!isLayoutCompatible(C, Base1->getType(), Base2->getType())) 17008 return false; 17009 } 17010 } else if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(RD2)) { 17011 // If only RD2 is a C++ class, it should have zero base classes. 17012 if (D2CXX->getNumBases() > 0) 17013 return false; 17014 } 17015 17016 // Check the fields. 17017 RecordDecl::field_iterator Field2 = RD2->field_begin(), 17018 Field2End = RD2->field_end(), 17019 Field1 = RD1->field_begin(), 17020 Field1End = RD1->field_end(); 17021 for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) { 17022 if (!isLayoutCompatible(C, *Field1, *Field2)) 17023 return false; 17024 } 17025 if (Field1 != Field1End || Field2 != Field2End) 17026 return false; 17027 17028 return true; 17029 } 17030 17031 /// Check if two standard-layout unions are layout-compatible. 17032 /// (C++11 [class.mem] p18) 17033 static bool isLayoutCompatibleUnion(ASTContext &C, RecordDecl *RD1, 17034 RecordDecl *RD2) { 17035 llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields; 17036 for (auto *Field2 : RD2->fields()) 17037 UnmatchedFields.insert(Field2); 17038 17039 for (auto *Field1 : RD1->fields()) { 17040 llvm::SmallPtrSet<FieldDecl *, 8>::iterator 17041 I = UnmatchedFields.begin(), 17042 E = UnmatchedFields.end(); 17043 17044 for ( ; I != E; ++I) { 17045 if (isLayoutCompatible(C, Field1, *I)) { 17046 bool Result = UnmatchedFields.erase(*I); 17047 (void) Result; 17048 assert(Result); 17049 break; 17050 } 17051 } 17052 if (I == E) 17053 return false; 17054 } 17055 17056 return UnmatchedFields.empty(); 17057 } 17058 17059 static bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1, 17060 RecordDecl *RD2) { 17061 if (RD1->isUnion() != RD2->isUnion()) 17062 return false; 17063 17064 if (RD1->isUnion()) 17065 return isLayoutCompatibleUnion(C, RD1, RD2); 17066 else 17067 return isLayoutCompatibleStruct(C, RD1, RD2); 17068 } 17069 17070 /// Check if two types are layout-compatible in C++11 sense. 17071 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) { 17072 if (T1.isNull() || T2.isNull()) 17073 return false; 17074 17075 // C++11 [basic.types] p11: 17076 // If two types T1 and T2 are the same type, then T1 and T2 are 17077 // layout-compatible types. 17078 if (C.hasSameType(T1, T2)) 17079 return true; 17080 17081 T1 = T1.getCanonicalType().getUnqualifiedType(); 17082 T2 = T2.getCanonicalType().getUnqualifiedType(); 17083 17084 const Type::TypeClass TC1 = T1->getTypeClass(); 17085 const Type::TypeClass TC2 = T2->getTypeClass(); 17086 17087 if (TC1 != TC2) 17088 return false; 17089 17090 if (TC1 == Type::Enum) { 17091 return isLayoutCompatible(C, 17092 cast<EnumType>(T1)->getDecl(), 17093 cast<EnumType>(T2)->getDecl()); 17094 } else if (TC1 == Type::Record) { 17095 if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType()) 17096 return false; 17097 17098 return isLayoutCompatible(C, 17099 cast<RecordType>(T1)->getDecl(), 17100 cast<RecordType>(T2)->getDecl()); 17101 } 17102 17103 return false; 17104 } 17105 17106 //===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----// 17107 17108 /// Given a type tag expression find the type tag itself. 17109 /// 17110 /// \param TypeExpr Type tag expression, as it appears in user's code. 17111 /// 17112 /// \param VD Declaration of an identifier that appears in a type tag. 17113 /// 17114 /// \param MagicValue Type tag magic value. 17115 /// 17116 /// \param isConstantEvaluated whether the evalaution should be performed in 17117 17118 /// constant context. 17119 static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx, 17120 const ValueDecl **VD, uint64_t *MagicValue, 17121 bool isConstantEvaluated) { 17122 while(true) { 17123 if (!TypeExpr) 17124 return false; 17125 17126 TypeExpr = TypeExpr->IgnoreParenImpCasts()->IgnoreParenCasts(); 17127 17128 switch (TypeExpr->getStmtClass()) { 17129 case Stmt::UnaryOperatorClass: { 17130 const UnaryOperator *UO = cast<UnaryOperator>(TypeExpr); 17131 if (UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Deref) { 17132 TypeExpr = UO->getSubExpr(); 17133 continue; 17134 } 17135 return false; 17136 } 17137 17138 case Stmt::DeclRefExprClass: { 17139 const DeclRefExpr *DRE = cast<DeclRefExpr>(TypeExpr); 17140 *VD = DRE->getDecl(); 17141 return true; 17142 } 17143 17144 case Stmt::IntegerLiteralClass: { 17145 const IntegerLiteral *IL = cast<IntegerLiteral>(TypeExpr); 17146 llvm::APInt MagicValueAPInt = IL->getValue(); 17147 if (MagicValueAPInt.getActiveBits() <= 64) { 17148 *MagicValue = MagicValueAPInt.getZExtValue(); 17149 return true; 17150 } else 17151 return false; 17152 } 17153 17154 case Stmt::BinaryConditionalOperatorClass: 17155 case Stmt::ConditionalOperatorClass: { 17156 const AbstractConditionalOperator *ACO = 17157 cast<AbstractConditionalOperator>(TypeExpr); 17158 bool Result; 17159 if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx, 17160 isConstantEvaluated)) { 17161 if (Result) 17162 TypeExpr = ACO->getTrueExpr(); 17163 else 17164 TypeExpr = ACO->getFalseExpr(); 17165 continue; 17166 } 17167 return false; 17168 } 17169 17170 case Stmt::BinaryOperatorClass: { 17171 const BinaryOperator *BO = cast<BinaryOperator>(TypeExpr); 17172 if (BO->getOpcode() == BO_Comma) { 17173 TypeExpr = BO->getRHS(); 17174 continue; 17175 } 17176 return false; 17177 } 17178 17179 default: 17180 return false; 17181 } 17182 } 17183 } 17184 17185 /// Retrieve the C type corresponding to type tag TypeExpr. 17186 /// 17187 /// \param TypeExpr Expression that specifies a type tag. 17188 /// 17189 /// \param MagicValues Registered magic values. 17190 /// 17191 /// \param FoundWrongKind Set to true if a type tag was found, but of a wrong 17192 /// kind. 17193 /// 17194 /// \param TypeInfo Information about the corresponding C type. 17195 /// 17196 /// \param isConstantEvaluated whether the evalaution should be performed in 17197 /// constant context. 17198 /// 17199 /// \returns true if the corresponding C type was found. 17200 static bool GetMatchingCType( 17201 const IdentifierInfo *ArgumentKind, const Expr *TypeExpr, 17202 const ASTContext &Ctx, 17203 const llvm::DenseMap<Sema::TypeTagMagicValue, Sema::TypeTagData> 17204 *MagicValues, 17205 bool &FoundWrongKind, Sema::TypeTagData &TypeInfo, 17206 bool isConstantEvaluated) { 17207 FoundWrongKind = false; 17208 17209 // Variable declaration that has type_tag_for_datatype attribute. 17210 const ValueDecl *VD = nullptr; 17211 17212 uint64_t MagicValue; 17213 17214 if (!FindTypeTagExpr(TypeExpr, Ctx, &VD, &MagicValue, isConstantEvaluated)) 17215 return false; 17216 17217 if (VD) { 17218 if (TypeTagForDatatypeAttr *I = VD->getAttr<TypeTagForDatatypeAttr>()) { 17219 if (I->getArgumentKind() != ArgumentKind) { 17220 FoundWrongKind = true; 17221 return false; 17222 } 17223 TypeInfo.Type = I->getMatchingCType(); 17224 TypeInfo.LayoutCompatible = I->getLayoutCompatible(); 17225 TypeInfo.MustBeNull = I->getMustBeNull(); 17226 return true; 17227 } 17228 return false; 17229 } 17230 17231 if (!MagicValues) 17232 return false; 17233 17234 llvm::DenseMap<Sema::TypeTagMagicValue, 17235 Sema::TypeTagData>::const_iterator I = 17236 MagicValues->find(std::make_pair(ArgumentKind, MagicValue)); 17237 if (I == MagicValues->end()) 17238 return false; 17239 17240 TypeInfo = I->second; 17241 return true; 17242 } 17243 17244 void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, 17245 uint64_t MagicValue, QualType Type, 17246 bool LayoutCompatible, 17247 bool MustBeNull) { 17248 if (!TypeTagForDatatypeMagicValues) 17249 TypeTagForDatatypeMagicValues.reset( 17250 new llvm::DenseMap<TypeTagMagicValue, TypeTagData>); 17251 17252 TypeTagMagicValue Magic(ArgumentKind, MagicValue); 17253 (*TypeTagForDatatypeMagicValues)[Magic] = 17254 TypeTagData(Type, LayoutCompatible, MustBeNull); 17255 } 17256 17257 static bool IsSameCharType(QualType T1, QualType T2) { 17258 const BuiltinType *BT1 = T1->getAs<BuiltinType>(); 17259 if (!BT1) 17260 return false; 17261 17262 const BuiltinType *BT2 = T2->getAs<BuiltinType>(); 17263 if (!BT2) 17264 return false; 17265 17266 BuiltinType::Kind T1Kind = BT1->getKind(); 17267 BuiltinType::Kind T2Kind = BT2->getKind(); 17268 17269 return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) || 17270 (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) || 17271 (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) || 17272 (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar); 17273 } 17274 17275 void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, 17276 const ArrayRef<const Expr *> ExprArgs, 17277 SourceLocation CallSiteLoc) { 17278 const IdentifierInfo *ArgumentKind = Attr->getArgumentKind(); 17279 bool IsPointerAttr = Attr->getIsPointer(); 17280 17281 // Retrieve the argument representing the 'type_tag'. 17282 unsigned TypeTagIdxAST = Attr->getTypeTagIdx().getASTIndex(); 17283 if (TypeTagIdxAST >= ExprArgs.size()) { 17284 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 17285 << 0 << Attr->getTypeTagIdx().getSourceIndex(); 17286 return; 17287 } 17288 const Expr *TypeTagExpr = ExprArgs[TypeTagIdxAST]; 17289 bool FoundWrongKind; 17290 TypeTagData TypeInfo; 17291 if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context, 17292 TypeTagForDatatypeMagicValues.get(), FoundWrongKind, 17293 TypeInfo, isConstantEvaluated())) { 17294 if (FoundWrongKind) 17295 Diag(TypeTagExpr->getExprLoc(), 17296 diag::warn_type_tag_for_datatype_wrong_kind) 17297 << TypeTagExpr->getSourceRange(); 17298 return; 17299 } 17300 17301 // Retrieve the argument representing the 'arg_idx'. 17302 unsigned ArgumentIdxAST = Attr->getArgumentIdx().getASTIndex(); 17303 if (ArgumentIdxAST >= ExprArgs.size()) { 17304 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 17305 << 1 << Attr->getArgumentIdx().getSourceIndex(); 17306 return; 17307 } 17308 const Expr *ArgumentExpr = ExprArgs[ArgumentIdxAST]; 17309 if (IsPointerAttr) { 17310 // Skip implicit cast of pointer to `void *' (as a function argument). 17311 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgumentExpr)) 17312 if (ICE->getType()->isVoidPointerType() && 17313 ICE->getCastKind() == CK_BitCast) 17314 ArgumentExpr = ICE->getSubExpr(); 17315 } 17316 QualType ArgumentType = ArgumentExpr->getType(); 17317 17318 // Passing a `void*' pointer shouldn't trigger a warning. 17319 if (IsPointerAttr && ArgumentType->isVoidPointerType()) 17320 return; 17321 17322 if (TypeInfo.MustBeNull) { 17323 // Type tag with matching void type requires a null pointer. 17324 if (!ArgumentExpr->isNullPointerConstant(Context, 17325 Expr::NPC_ValueDependentIsNotNull)) { 17326 Diag(ArgumentExpr->getExprLoc(), 17327 diag::warn_type_safety_null_pointer_required) 17328 << ArgumentKind->getName() 17329 << ArgumentExpr->getSourceRange() 17330 << TypeTagExpr->getSourceRange(); 17331 } 17332 return; 17333 } 17334 17335 QualType RequiredType = TypeInfo.Type; 17336 if (IsPointerAttr) 17337 RequiredType = Context.getPointerType(RequiredType); 17338 17339 bool mismatch = false; 17340 if (!TypeInfo.LayoutCompatible) { 17341 mismatch = !Context.hasSameType(ArgumentType, RequiredType); 17342 17343 // C++11 [basic.fundamental] p1: 17344 // Plain char, signed char, and unsigned char are three distinct types. 17345 // 17346 // But we treat plain `char' as equivalent to `signed char' or `unsigned 17347 // char' depending on the current char signedness mode. 17348 if (mismatch) 17349 if ((IsPointerAttr && IsSameCharType(ArgumentType->getPointeeType(), 17350 RequiredType->getPointeeType())) || 17351 (!IsPointerAttr && IsSameCharType(ArgumentType, RequiredType))) 17352 mismatch = false; 17353 } else 17354 if (IsPointerAttr) 17355 mismatch = !isLayoutCompatible(Context, 17356 ArgumentType->getPointeeType(), 17357 RequiredType->getPointeeType()); 17358 else 17359 mismatch = !isLayoutCompatible(Context, ArgumentType, RequiredType); 17360 17361 if (mismatch) 17362 Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch) 17363 << ArgumentType << ArgumentKind 17364 << TypeInfo.LayoutCompatible << RequiredType 17365 << ArgumentExpr->getSourceRange() 17366 << TypeTagExpr->getSourceRange(); 17367 } 17368 17369 void Sema::AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, 17370 CharUnits Alignment) { 17371 MisalignedMembers.emplace_back(E, RD, MD, Alignment); 17372 } 17373 17374 void Sema::DiagnoseMisalignedMembers() { 17375 for (MisalignedMember &m : MisalignedMembers) { 17376 const NamedDecl *ND = m.RD; 17377 if (ND->getName().empty()) { 17378 if (const TypedefNameDecl *TD = m.RD->getTypedefNameForAnonDecl()) 17379 ND = TD; 17380 } 17381 Diag(m.E->getBeginLoc(), diag::warn_taking_address_of_packed_member) 17382 << m.MD << ND << m.E->getSourceRange(); 17383 } 17384 MisalignedMembers.clear(); 17385 } 17386 17387 void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) { 17388 E = E->IgnoreParens(); 17389 if (!T->isPointerType() && !T->isIntegerType()) 17390 return; 17391 if (isa<UnaryOperator>(E) && 17392 cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf) { 17393 auto *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens(); 17394 if (isa<MemberExpr>(Op)) { 17395 auto MA = llvm::find(MisalignedMembers, MisalignedMember(Op)); 17396 if (MA != MisalignedMembers.end() && 17397 (T->isIntegerType() || 17398 (T->isPointerType() && (T->getPointeeType()->isIncompleteType() || 17399 Context.getTypeAlignInChars( 17400 T->getPointeeType()) <= MA->Alignment)))) 17401 MisalignedMembers.erase(MA); 17402 } 17403 } 17404 } 17405 17406 void Sema::RefersToMemberWithReducedAlignment( 17407 Expr *E, 17408 llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> 17409 Action) { 17410 const auto *ME = dyn_cast<MemberExpr>(E); 17411 if (!ME) 17412 return; 17413 17414 // No need to check expressions with an __unaligned-qualified type. 17415 if (E->getType().getQualifiers().hasUnaligned()) 17416 return; 17417 17418 // For a chain of MemberExpr like "a.b.c.d" this list 17419 // will keep FieldDecl's like [d, c, b]. 17420 SmallVector<FieldDecl *, 4> ReverseMemberChain; 17421 const MemberExpr *TopME = nullptr; 17422 bool AnyIsPacked = false; 17423 do { 17424 QualType BaseType = ME->getBase()->getType(); 17425 if (BaseType->isDependentType()) 17426 return; 17427 if (ME->isArrow()) 17428 BaseType = BaseType->getPointeeType(); 17429 RecordDecl *RD = BaseType->castAs<RecordType>()->getDecl(); 17430 if (RD->isInvalidDecl()) 17431 return; 17432 17433 ValueDecl *MD = ME->getMemberDecl(); 17434 auto *FD = dyn_cast<FieldDecl>(MD); 17435 // We do not care about non-data members. 17436 if (!FD || FD->isInvalidDecl()) 17437 return; 17438 17439 AnyIsPacked = 17440 AnyIsPacked || (RD->hasAttr<PackedAttr>() || MD->hasAttr<PackedAttr>()); 17441 ReverseMemberChain.push_back(FD); 17442 17443 TopME = ME; 17444 ME = dyn_cast<MemberExpr>(ME->getBase()->IgnoreParens()); 17445 } while (ME); 17446 assert(TopME && "We did not compute a topmost MemberExpr!"); 17447 17448 // Not the scope of this diagnostic. 17449 if (!AnyIsPacked) 17450 return; 17451 17452 const Expr *TopBase = TopME->getBase()->IgnoreParenImpCasts(); 17453 const auto *DRE = dyn_cast<DeclRefExpr>(TopBase); 17454 // TODO: The innermost base of the member expression may be too complicated. 17455 // For now, just disregard these cases. This is left for future 17456 // improvement. 17457 if (!DRE && !isa<CXXThisExpr>(TopBase)) 17458 return; 17459 17460 // Alignment expected by the whole expression. 17461 CharUnits ExpectedAlignment = Context.getTypeAlignInChars(E->getType()); 17462 17463 // No need to do anything else with this case. 17464 if (ExpectedAlignment.isOne()) 17465 return; 17466 17467 // Synthesize offset of the whole access. 17468 CharUnits Offset; 17469 for (const FieldDecl *FD : llvm::reverse(ReverseMemberChain)) 17470 Offset += Context.toCharUnitsFromBits(Context.getFieldOffset(FD)); 17471 17472 // Compute the CompleteObjectAlignment as the alignment of the whole chain. 17473 CharUnits CompleteObjectAlignment = Context.getTypeAlignInChars( 17474 ReverseMemberChain.back()->getParent()->getTypeForDecl()); 17475 17476 // The base expression of the innermost MemberExpr may give 17477 // stronger guarantees than the class containing the member. 17478 if (DRE && !TopME->isArrow()) { 17479 const ValueDecl *VD = DRE->getDecl(); 17480 if (!VD->getType()->isReferenceType()) 17481 CompleteObjectAlignment = 17482 std::max(CompleteObjectAlignment, Context.getDeclAlign(VD)); 17483 } 17484 17485 // Check if the synthesized offset fulfills the alignment. 17486 if (Offset % ExpectedAlignment != 0 || 17487 // It may fulfill the offset it but the effective alignment may still be 17488 // lower than the expected expression alignment. 17489 CompleteObjectAlignment < ExpectedAlignment) { 17490 // If this happens, we want to determine a sensible culprit of this. 17491 // Intuitively, watching the chain of member expressions from right to 17492 // left, we start with the required alignment (as required by the field 17493 // type) but some packed attribute in that chain has reduced the alignment. 17494 // It may happen that another packed structure increases it again. But if 17495 // we are here such increase has not been enough. So pointing the first 17496 // FieldDecl that either is packed or else its RecordDecl is, 17497 // seems reasonable. 17498 FieldDecl *FD = nullptr; 17499 CharUnits Alignment; 17500 for (FieldDecl *FDI : ReverseMemberChain) { 17501 if (FDI->hasAttr<PackedAttr>() || 17502 FDI->getParent()->hasAttr<PackedAttr>()) { 17503 FD = FDI; 17504 Alignment = std::min( 17505 Context.getTypeAlignInChars(FD->getType()), 17506 Context.getTypeAlignInChars(FD->getParent()->getTypeForDecl())); 17507 break; 17508 } 17509 } 17510 assert(FD && "We did not find a packed FieldDecl!"); 17511 Action(E, FD->getParent(), FD, Alignment); 17512 } 17513 } 17514 17515 void Sema::CheckAddressOfPackedMember(Expr *rhs) { 17516 using namespace std::placeholders; 17517 17518 RefersToMemberWithReducedAlignment( 17519 rhs, std::bind(&Sema::AddPotentialMisalignedMembers, std::ref(*this), _1, 17520 _2, _3, _4)); 17521 } 17522 17523 // Check if \p Ty is a valid type for the elementwise math builtins. If it is 17524 // not a valid type, emit an error message and return true. Otherwise return 17525 // false. 17526 static bool checkMathBuiltinElementType(Sema &S, SourceLocation Loc, 17527 QualType Ty) { 17528 if (!Ty->getAs<VectorType>() && !ConstantMatrixType::isValidElementType(Ty)) { 17529 S.Diag(Loc, diag::err_builtin_invalid_arg_type) 17530 << 1 << /* vector, integer or float ty*/ 0 << Ty; 17531 return true; 17532 } 17533 return false; 17534 } 17535 17536 bool Sema::PrepareBuiltinElementwiseMathOneArgCall(CallExpr *TheCall) { 17537 if (checkArgCount(*this, TheCall, 1)) 17538 return true; 17539 17540 ExprResult A = UsualUnaryConversions(TheCall->getArg(0)); 17541 if (A.isInvalid()) 17542 return true; 17543 17544 TheCall->setArg(0, A.get()); 17545 QualType TyA = A.get()->getType(); 17546 17547 if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA)) 17548 return true; 17549 17550 TheCall->setType(TyA); 17551 return false; 17552 } 17553 17554 bool Sema::SemaBuiltinElementwiseMath(CallExpr *TheCall) { 17555 if (checkArgCount(*this, TheCall, 2)) 17556 return true; 17557 17558 ExprResult A = TheCall->getArg(0); 17559 ExprResult B = TheCall->getArg(1); 17560 // Do standard promotions between the two arguments, returning their common 17561 // type. 17562 QualType Res = 17563 UsualArithmeticConversions(A, B, TheCall->getExprLoc(), ACK_Comparison); 17564 if (A.isInvalid() || B.isInvalid()) 17565 return true; 17566 17567 QualType TyA = A.get()->getType(); 17568 QualType TyB = B.get()->getType(); 17569 17570 if (Res.isNull() || TyA.getCanonicalType() != TyB.getCanonicalType()) 17571 return Diag(A.get()->getBeginLoc(), 17572 diag::err_typecheck_call_different_arg_types) 17573 << TyA << TyB; 17574 17575 if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA)) 17576 return true; 17577 17578 TheCall->setArg(0, A.get()); 17579 TheCall->setArg(1, B.get()); 17580 TheCall->setType(Res); 17581 return false; 17582 } 17583 17584 bool Sema::PrepareBuiltinReduceMathOneArgCall(CallExpr *TheCall) { 17585 if (checkArgCount(*this, TheCall, 1)) 17586 return true; 17587 17588 ExprResult A = UsualUnaryConversions(TheCall->getArg(0)); 17589 if (A.isInvalid()) 17590 return true; 17591 17592 TheCall->setArg(0, A.get()); 17593 return false; 17594 } 17595 17596 ExprResult Sema::SemaBuiltinMatrixTranspose(CallExpr *TheCall, 17597 ExprResult CallResult) { 17598 if (checkArgCount(*this, TheCall, 1)) 17599 return ExprError(); 17600 17601 ExprResult MatrixArg = DefaultLvalueConversion(TheCall->getArg(0)); 17602 if (MatrixArg.isInvalid()) 17603 return MatrixArg; 17604 Expr *Matrix = MatrixArg.get(); 17605 17606 auto *MType = Matrix->getType()->getAs<ConstantMatrixType>(); 17607 if (!MType) { 17608 Diag(Matrix->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17609 << 1 << /* matrix ty*/ 1 << Matrix->getType(); 17610 return ExprError(); 17611 } 17612 17613 // Create returned matrix type by swapping rows and columns of the argument 17614 // matrix type. 17615 QualType ResultType = Context.getConstantMatrixType( 17616 MType->getElementType(), MType->getNumColumns(), MType->getNumRows()); 17617 17618 // Change the return type to the type of the returned matrix. 17619 TheCall->setType(ResultType); 17620 17621 // Update call argument to use the possibly converted matrix argument. 17622 TheCall->setArg(0, Matrix); 17623 return CallResult; 17624 } 17625 17626 // Get and verify the matrix dimensions. 17627 static llvm::Optional<unsigned> 17628 getAndVerifyMatrixDimension(Expr *Expr, StringRef Name, Sema &S) { 17629 SourceLocation ErrorPos; 17630 Optional<llvm::APSInt> Value = 17631 Expr->getIntegerConstantExpr(S.Context, &ErrorPos); 17632 if (!Value) { 17633 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_scalar_unsigned_arg) 17634 << Name; 17635 return {}; 17636 } 17637 uint64_t Dim = Value->getZExtValue(); 17638 if (!ConstantMatrixType::isDimensionValid(Dim)) { 17639 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_invalid_dimension) 17640 << Name << ConstantMatrixType::getMaxElementsPerDimension(); 17641 return {}; 17642 } 17643 return Dim; 17644 } 17645 17646 ExprResult Sema::SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, 17647 ExprResult CallResult) { 17648 if (!getLangOpts().MatrixTypes) { 17649 Diag(TheCall->getBeginLoc(), diag::err_builtin_matrix_disabled); 17650 return ExprError(); 17651 } 17652 17653 if (checkArgCount(*this, TheCall, 4)) 17654 return ExprError(); 17655 17656 unsigned PtrArgIdx = 0; 17657 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 17658 Expr *RowsExpr = TheCall->getArg(1); 17659 Expr *ColumnsExpr = TheCall->getArg(2); 17660 Expr *StrideExpr = TheCall->getArg(3); 17661 17662 bool ArgError = false; 17663 17664 // Check pointer argument. 17665 { 17666 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 17667 if (PtrConv.isInvalid()) 17668 return PtrConv; 17669 PtrExpr = PtrConv.get(); 17670 TheCall->setArg(0, PtrExpr); 17671 if (PtrExpr->isTypeDependent()) { 17672 TheCall->setType(Context.DependentTy); 17673 return TheCall; 17674 } 17675 } 17676 17677 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 17678 QualType ElementTy; 17679 if (!PtrTy) { 17680 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17681 << PtrArgIdx + 1 << /*pointer to element ty*/ 2 << PtrExpr->getType(); 17682 ArgError = true; 17683 } else { 17684 ElementTy = PtrTy->getPointeeType().getUnqualifiedType(); 17685 17686 if (!ConstantMatrixType::isValidElementType(ElementTy)) { 17687 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17688 << PtrArgIdx + 1 << /* pointer to element ty*/ 2 17689 << PtrExpr->getType(); 17690 ArgError = true; 17691 } 17692 } 17693 17694 // Apply default Lvalue conversions and convert the expression to size_t. 17695 auto ApplyArgumentConversions = [this](Expr *E) { 17696 ExprResult Conv = DefaultLvalueConversion(E); 17697 if (Conv.isInvalid()) 17698 return Conv; 17699 17700 return tryConvertExprToType(Conv.get(), Context.getSizeType()); 17701 }; 17702 17703 // Apply conversion to row and column expressions. 17704 ExprResult RowsConv = ApplyArgumentConversions(RowsExpr); 17705 if (!RowsConv.isInvalid()) { 17706 RowsExpr = RowsConv.get(); 17707 TheCall->setArg(1, RowsExpr); 17708 } else 17709 RowsExpr = nullptr; 17710 17711 ExprResult ColumnsConv = ApplyArgumentConversions(ColumnsExpr); 17712 if (!ColumnsConv.isInvalid()) { 17713 ColumnsExpr = ColumnsConv.get(); 17714 TheCall->setArg(2, ColumnsExpr); 17715 } else 17716 ColumnsExpr = nullptr; 17717 17718 // If any any part of the result matrix type is still pending, just use 17719 // Context.DependentTy, until all parts are resolved. 17720 if ((RowsExpr && RowsExpr->isTypeDependent()) || 17721 (ColumnsExpr && ColumnsExpr->isTypeDependent())) { 17722 TheCall->setType(Context.DependentTy); 17723 return CallResult; 17724 } 17725 17726 // Check row and column dimensions. 17727 llvm::Optional<unsigned> MaybeRows; 17728 if (RowsExpr) 17729 MaybeRows = getAndVerifyMatrixDimension(RowsExpr, "row", *this); 17730 17731 llvm::Optional<unsigned> MaybeColumns; 17732 if (ColumnsExpr) 17733 MaybeColumns = getAndVerifyMatrixDimension(ColumnsExpr, "column", *this); 17734 17735 // Check stride argument. 17736 ExprResult StrideConv = ApplyArgumentConversions(StrideExpr); 17737 if (StrideConv.isInvalid()) 17738 return ExprError(); 17739 StrideExpr = StrideConv.get(); 17740 TheCall->setArg(3, StrideExpr); 17741 17742 if (MaybeRows) { 17743 if (Optional<llvm::APSInt> Value = 17744 StrideExpr->getIntegerConstantExpr(Context)) { 17745 uint64_t Stride = Value->getZExtValue(); 17746 if (Stride < *MaybeRows) { 17747 Diag(StrideExpr->getBeginLoc(), 17748 diag::err_builtin_matrix_stride_too_small); 17749 ArgError = true; 17750 } 17751 } 17752 } 17753 17754 if (ArgError || !MaybeRows || !MaybeColumns) 17755 return ExprError(); 17756 17757 TheCall->setType( 17758 Context.getConstantMatrixType(ElementTy, *MaybeRows, *MaybeColumns)); 17759 return CallResult; 17760 } 17761 17762 ExprResult Sema::SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, 17763 ExprResult CallResult) { 17764 if (checkArgCount(*this, TheCall, 3)) 17765 return ExprError(); 17766 17767 unsigned PtrArgIdx = 1; 17768 Expr *MatrixExpr = TheCall->getArg(0); 17769 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 17770 Expr *StrideExpr = TheCall->getArg(2); 17771 17772 bool ArgError = false; 17773 17774 { 17775 ExprResult MatrixConv = DefaultLvalueConversion(MatrixExpr); 17776 if (MatrixConv.isInvalid()) 17777 return MatrixConv; 17778 MatrixExpr = MatrixConv.get(); 17779 TheCall->setArg(0, MatrixExpr); 17780 } 17781 if (MatrixExpr->isTypeDependent()) { 17782 TheCall->setType(Context.DependentTy); 17783 return TheCall; 17784 } 17785 17786 auto *MatrixTy = MatrixExpr->getType()->getAs<ConstantMatrixType>(); 17787 if (!MatrixTy) { 17788 Diag(MatrixExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17789 << 1 << /*matrix ty */ 1 << MatrixExpr->getType(); 17790 ArgError = true; 17791 } 17792 17793 { 17794 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 17795 if (PtrConv.isInvalid()) 17796 return PtrConv; 17797 PtrExpr = PtrConv.get(); 17798 TheCall->setArg(1, PtrExpr); 17799 if (PtrExpr->isTypeDependent()) { 17800 TheCall->setType(Context.DependentTy); 17801 return TheCall; 17802 } 17803 } 17804 17805 // Check pointer argument. 17806 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 17807 if (!PtrTy) { 17808 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17809 << PtrArgIdx + 1 << /*pointer to element ty*/ 2 << PtrExpr->getType(); 17810 ArgError = true; 17811 } else { 17812 QualType ElementTy = PtrTy->getPointeeType(); 17813 if (ElementTy.isConstQualified()) { 17814 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_store_to_const); 17815 ArgError = true; 17816 } 17817 ElementTy = ElementTy.getUnqualifiedType().getCanonicalType(); 17818 if (MatrixTy && 17819 !Context.hasSameType(ElementTy, MatrixTy->getElementType())) { 17820 Diag(PtrExpr->getBeginLoc(), 17821 diag::err_builtin_matrix_pointer_arg_mismatch) 17822 << ElementTy << MatrixTy->getElementType(); 17823 ArgError = true; 17824 } 17825 } 17826 17827 // Apply default Lvalue conversions and convert the stride expression to 17828 // size_t. 17829 { 17830 ExprResult StrideConv = DefaultLvalueConversion(StrideExpr); 17831 if (StrideConv.isInvalid()) 17832 return StrideConv; 17833 17834 StrideConv = tryConvertExprToType(StrideConv.get(), Context.getSizeType()); 17835 if (StrideConv.isInvalid()) 17836 return StrideConv; 17837 StrideExpr = StrideConv.get(); 17838 TheCall->setArg(2, StrideExpr); 17839 } 17840 17841 // Check stride argument. 17842 if (MatrixTy) { 17843 if (Optional<llvm::APSInt> Value = 17844 StrideExpr->getIntegerConstantExpr(Context)) { 17845 uint64_t Stride = Value->getZExtValue(); 17846 if (Stride < MatrixTy->getNumRows()) { 17847 Diag(StrideExpr->getBeginLoc(), 17848 diag::err_builtin_matrix_stride_too_small); 17849 ArgError = true; 17850 } 17851 } 17852 } 17853 17854 if (ArgError) 17855 return ExprError(); 17856 17857 return CallResult; 17858 } 17859 17860 /// \brief Enforce the bounds of a TCB 17861 /// CheckTCBEnforcement - Enforces that every function in a named TCB only 17862 /// directly calls other functions in the same TCB as marked by the enforce_tcb 17863 /// and enforce_tcb_leaf attributes. 17864 void Sema::CheckTCBEnforcement(const SourceLocation CallExprLoc, 17865 const NamedDecl *Callee) { 17866 const NamedDecl *Caller = getCurFunctionOrMethodDecl(); 17867 17868 if (!Caller || !Caller->hasAttr<EnforceTCBAttr>()) 17869 return; 17870 17871 // Search through the enforce_tcb and enforce_tcb_leaf attributes to find 17872 // all TCBs the callee is a part of. 17873 llvm::StringSet<> CalleeTCBs; 17874 for (const auto *A : Callee->specific_attrs<EnforceTCBAttr>()) 17875 CalleeTCBs.insert(A->getTCBName()); 17876 for (const auto *A : Callee->specific_attrs<EnforceTCBLeafAttr>()) 17877 CalleeTCBs.insert(A->getTCBName()); 17878 17879 // Go through the TCBs the caller is a part of and emit warnings if Caller 17880 // is in a TCB that the Callee is not. 17881 for (const auto *A : Caller->specific_attrs<EnforceTCBAttr>()) { 17882 StringRef CallerTCB = A->getTCBName(); 17883 if (CalleeTCBs.count(CallerTCB) == 0) { 17884 this->Diag(CallExprLoc, diag::warn_tcb_enforcement_violation) 17885 << Callee << CallerTCB; 17886 } 17887 } 17888 } 17889